/[gxemul]/upstream/0.4.3/src/memory_rw.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/0.4.3/src/memory_rw.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 33 - (show annotations)
Mon Oct 8 16:21:06 2007 UTC (16 years, 6 months ago) by dpavlin
File MIME type: text/plain
File size: 15262 byte(s)
0.4.3
1 /*
2 * Copyright (C) 2003-2006 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory_rw.c,v 1.97 2006/09/07 11:44:01 debug Exp $
29 *
30 * Generic memory_rw(), with special hacks for specific CPU families.
31 *
32 * Example for inclusion from memory_mips.c:
33 *
34 * MEMORY_RW should be mips_memory_rw
35 * MEM_MIPS should be defined
36 */
37
38
39 /*
40 * memory_rw():
41 *
42 * Read or write data from/to memory.
43 *
44 * cpu the cpu doing the read/write
45 * mem the memory object to use
46 * vaddr the virtual address
47 * data a pointer to the data to be written to memory, or
48 * a placeholder for data when reading from memory
49 * len the length of the 'data' buffer
50 * writeflag set to MEM_READ or MEM_WRITE
51 * misc_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags
52 *
53 * If the address indicates access to a memory mapped device, that device'
54 * read/write access function is called.
55 *
56 * This function should not be called with cpu == NULL.
57 *
58 * Returns one of the following:
59 * MEMORY_ACCESS_FAILED
60 * MEMORY_ACCESS_OK
61 *
62 * (MEMORY_ACCESS_FAILED is 0.)
63 */
64 int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr,
65 unsigned char *data, size_t len, int writeflag, int misc_flags)
66 {
67 #ifdef MEM_ALPHA
68 const int offset_mask = 0x1fff;
69 #else
70 const int offset_mask = 0xfff;
71 #endif
72
73 #ifndef MEM_USERLAND
74 int ok = 1;
75 #endif
76 uint64_t paddr;
77 int cache, no_exceptions, offset;
78 unsigned char *memblock;
79 int dyntrans_device_danger = 0;
80
81 no_exceptions = misc_flags & NO_EXCEPTIONS;
82 cache = misc_flags & CACHE_FLAGS_MASK;
83
84 #ifdef MEM_X86
85 /* Real-mode wrap-around: */
86 if (REAL_MODE && !(misc_flags & PHYSICAL)) {
87 if ((vaddr & 0xffff) + len > 0x10000) {
88 /* Do one byte at a time: */
89 int res = 0;
90 size_t i;
91 for (i=0; i<len; i++)
92 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
93 writeflag, misc_flags);
94 return res;
95 }
96 }
97
98 /* Crossing a page boundary? Then do one byte at a time: */
99 if ((vaddr & 0xfff) + len > 0x1000 && !(misc_flags & PHYSICAL)
100 && cpu->cd.x86.cr[0] & X86_CR0_PG) {
101 /*
102 * For WRITES: Read ALL BYTES FIRST and write them back!!!
103 * Then do a write of all the new bytes. This is to make sure
104 * than both pages around the boundary are writable so that
105 * there is no "partial write" performed.
106 */
107 int res = 0;
108 size_t i;
109 if (writeflag == MEM_WRITE) {
110 unsigned char tmp;
111 for (i=0; i<len; i++) {
112 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
113 MEM_READ, misc_flags);
114 if (!res)
115 return 0;
116 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
117 MEM_WRITE, misc_flags);
118 if (!res)
119 return 0;
120 }
121 for (i=0; i<len; i++) {
122 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
123 MEM_WRITE, misc_flags);
124 if (!res)
125 return 0;
126 }
127 } else {
128 for (i=0; i<len; i++) {
129 /* Do one byte at a time: */
130 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
131 writeflag, misc_flags);
132 if (!res) {
133 if (cache == CACHE_INSTRUCTION) {
134 fatal("FAILED instruction "
135 "fetch across page boundar"
136 "y: todo. vaddr=0x%08x\n",
137 (int)vaddr);
138 cpu->running = 0;
139 }
140 return 0;
141 }
142 }
143 }
144 return res;
145 }
146 #endif /* X86 */
147
148
149 #ifdef MEM_USERLAND
150 #ifdef MEM_ALPHA
151 paddr = vaddr;
152 #else
153 paddr = vaddr & 0x7fffffff;
154 #endif
155 #else /* !MEM_USERLAND */
156 if (misc_flags & PHYSICAL || cpu->translate_v2p == NULL) {
157 paddr = vaddr;
158 } else {
159 ok = cpu->translate_v2p(cpu, vaddr, &paddr,
160 (writeflag? FLAG_WRITEFLAG : 0) +
161 (no_exceptions? FLAG_NOEXCEPTIONS : 0)
162 #ifdef MEM_X86
163 + (misc_flags & NO_SEGMENTATION)
164 #endif
165 #ifdef MEM_ARM
166 + (misc_flags & MEMORY_USER_ACCESS)
167 #endif
168 + (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0));
169
170 /*
171 * If the translation caused an exception, or was invalid in
172 * some way, then simply return without doing the memory
173 * access:
174 */
175 if (!ok)
176 return MEMORY_ACCESS_FAILED;
177 }
178
179
180 #ifdef MEM_X86
181 /* DOS debugging :-) */
182 if (!quiet_mode && !(misc_flags & PHYSICAL)) {
183 if (paddr >= 0x400 && paddr <= 0x4ff)
184 debug("{ PC BIOS DATA AREA: %s 0x%x }\n", writeflag ==
185 MEM_WRITE? "writing to" : "reading from",
186 (int)paddr);
187 #if 0
188 if (paddr >= 0xf0000 && paddr <= 0xfffff)
189 debug("{ BIOS ACCESS: %s 0x%x }\n",
190 writeflag == MEM_WRITE? "writing to" :
191 "reading from", (int)paddr);
192 #endif
193 }
194 #endif
195 #endif /* !MEM_USERLAND */
196
197
198 #ifndef MEM_USERLAND
199 /*
200 * Memory mapped device?
201 *
202 * TODO: if paddr < base, but len enough, then the device should
203 * still be written to!
204 */
205 if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) {
206 uint64_t orig_paddr = paddr;
207 int i, start, end, res;
208
209 /*
210 * Really really slow, but unfortunately necessary. This is
211 * to avoid the folowing scenario:
212 *
213 * a) offsets 0x000..0x123 are normal memory
214 * b) offsets 0x124..0x777 are a device
215 *
216 * 1) a read is done from offset 0x100. the page is
217 * added to the dyntrans system as a "RAM" page
218 * 2) a dyntranslated read is done from offset 0x200,
219 * which should access the device, but since the
220 * entire page is added, it will access non-existant
221 * RAM instead, without warning.
222 *
223 * Setting dyntrans_device_danger = 1 on accesses which are
224 * on _any_ offset on pages that are device mapped avoids
225 * this problem, but it is probably not very fast.
226 *
227 * TODO: Convert this into a quick (multi-level, 64-bit)
228 * address space lookup, to find dangerous pages.
229 */
230 #if 1
231 for (i=0; i<mem->n_mmapped_devices; i++)
232 if (paddr >= (mem->devices[i].baseaddr & ~offset_mask)&&
233 paddr <= ((mem->devices[i].endaddr-1)|offset_mask)){
234 dyntrans_device_danger = 1;
235 break;
236 }
237 #endif
238
239 start = 0; end = mem->n_mmapped_devices - 1;
240 i = mem->last_accessed_device;
241
242 /* Scan through all devices: */
243 do {
244 if (paddr >= mem->devices[i].baseaddr &&
245 paddr < mem->devices[i].endaddr) {
246 /* Found a device, let's access it: */
247 mem->last_accessed_device = i;
248
249 paddr -= mem->devices[i].baseaddr;
250 if (paddr + len > mem->devices[i].length)
251 len = mem->devices[i].length - paddr;
252
253 if (cpu->update_translation_table != NULL &&
254 !(ok & MEMORY_NOT_FULL_PAGE) &&
255 mem->devices[i].flags & DM_DYNTRANS_OK) {
256 int wf = writeflag == MEM_WRITE? 1 : 0;
257 unsigned char *host_addr;
258
259 if (!(mem->devices[i].flags &
260 DM_DYNTRANS_WRITE_OK))
261 wf = 0;
262
263 if (writeflag && wf) {
264 if (paddr < mem->devices[i].
265 dyntrans_write_low)
266 mem->devices[i].
267 dyntrans_write_low =
268 paddr &~offset_mask;
269 if (paddr >= mem->devices[i].
270 dyntrans_write_high)
271 mem->devices[i].
272 dyntrans_write_high =
273 paddr | offset_mask;
274 }
275
276 if (mem->devices[i].flags &
277 DM_EMULATED_RAM) {
278 /* MEM_WRITE to force the page
279 to be allocated, if it
280 wasn't already */
281 uint64_t *pp = (uint64_t *)mem->
282 devices[i].dyntrans_data;
283 uint64_t p = orig_paddr - *pp;
284 host_addr =
285 memory_paddr_to_hostaddr(
286 mem, p & ~offset_mask,
287 MEM_WRITE);
288 } else {
289 host_addr = mem->devices[i].
290 dyntrans_data +
291 (paddr & ~offset_mask);
292 }
293
294 cpu->update_translation_table(cpu,
295 vaddr & ~offset_mask, host_addr,
296 wf, orig_paddr & ~offset_mask);
297 }
298
299 res = 0;
300 if (!no_exceptions || (mem->devices[i].flags &
301 DM_READS_HAVE_NO_SIDE_EFFECTS))
302 res = mem->devices[i].f(cpu, mem, paddr,
303 data, len, writeflag,
304 mem->devices[i].extra);
305
306 if (res == 0)
307 res = -1;
308
309 #ifndef MEM_X86
310 /*
311 * If accessing the memory mapped device
312 * failed, then return with a DBE exception.
313 */
314 if (res <= 0 && !no_exceptions) {
315 debug("%s device '%s' addr %08lx "
316 "failed\n", writeflag?
317 "writing to" : "reading from",
318 mem->devices[i].name, (long)paddr);
319 #ifdef MEM_MIPS
320 mips_cpu_exception(cpu, EXCEPTION_DBE,
321 0, vaddr, 0, 0, 0, 0);
322 #endif
323 return MEMORY_ACCESS_FAILED;
324 }
325 #endif
326 goto do_return_ok;
327 }
328
329 if (paddr < mem->devices[i].baseaddr)
330 end = i - 1;
331 if (paddr >= mem->devices[i].endaddr)
332 start = i + 1;
333 i = (start + end) >> 1;
334 } while (start <= end);
335 }
336
337
338 #ifdef MEM_MIPS
339 /*
340 * Data and instruction cache emulation:
341 */
342
343 switch (cpu->cd.mips.cpu_type.mmu_model) {
344 case MMU3K:
345 /* if not uncached addess (TODO: generalize this) */
346 if (!(misc_flags & PHYSICAL) && cache != CACHE_NONE &&
347 !((vaddr & 0xffffffffULL) >= 0xa0000000ULL &&
348 (vaddr & 0xffffffffULL) <= 0xbfffffffULL)) {
349 if (memory_cache_R3000(cpu, cache, paddr,
350 writeflag, len, data))
351 goto do_return_ok;
352 }
353 break;
354 default:
355 /* R4000 etc */
356 /* TODO */
357 ;
358 }
359 #endif /* MEM_MIPS */
360
361
362 /* Outside of physical RAM? */
363 if (paddr >= mem->physical_max) {
364 #ifdef MEM_MIPS
365 if ((paddr & 0xffffc00000ULL) == 0x1fc00000) {
366 /* Ok, this is PROM stuff */
367 } else if ((paddr & 0xfffff00000ULL) == 0x1ff00000) {
368 /* Sprite reads from this area of memory... */
369 /* TODO: is this still correct? */
370 if (writeflag == MEM_READ)
371 memset(data, 0, len);
372 goto do_return_ok;
373 } else
374 #endif /* MIPS */
375 {
376 if (paddr >= mem->physical_max) {
377 uint64_t offset, old_pc = cpu->pc;
378 char *symbol;
379
380 /* This allows for example OS kernels to probe
381 memory a few KBs past the end of memory,
382 without giving too many warnings. */
383 if (!quiet_mode && !no_exceptions && paddr >=
384 mem->physical_max + 0x40000) {
385 fatal("[ memory_rw(): writeflag=%i ",
386 writeflag);
387 if (writeflag) {
388 unsigned int i;
389 debug("data={", writeflag);
390 if (len > 16) {
391 int start2 = len-16;
392 for (i=0; i<16; i++)
393 debug("%s%02x",
394 i?",":"",
395 data[i]);
396 debug(" .. ");
397 if (start2 < 16)
398 start2 = 16;
399 for (i=start2; i<len;
400 i++)
401 debug("%s%02x",
402 i?",":"",
403 data[i]);
404 } else
405 for (i=0; i<len; i++)
406 debug("%s%02x",
407 i?",":"",
408 data[i]);
409 debug("}");
410 }
411
412 fatal(" paddr=0x%llx >= physical_max"
413 "; pc=", (long long)paddr);
414 if (cpu->is_32bit)
415 fatal("0x%08x",(int)old_pc);
416 else
417 fatal("0x%016llx",
418 (long long)old_pc);
419 symbol = get_symbol_name(
420 &cpu->machine->symbol_context,
421 old_pc, &offset);
422 fatal(" <%s> ]\n",
423 symbol? symbol : " no symbol ");
424 }
425 }
426
427 if (writeflag == MEM_READ) {
428 #ifdef MEM_X86
429 /* Reading non-existant memory on x86: */
430 memset(data, 0xff, len);
431 #else
432 /* Return all zeroes? (Or 0xff? TODO) */
433 memset(data, 0, len);
434 #endif
435
436 #ifdef MEM_MIPS
437 /*
438 * For real data/instruction accesses, cause
439 * an exceptions on an illegal read:
440 */
441 if (cache != CACHE_NONE && cpu->machine->
442 dbe_on_nonexistant_memaccess &&
443 !no_exceptions) {
444 if (paddr >= mem->physical_max &&
445 paddr < mem->physical_max+1048576)
446 mips_cpu_exception(cpu,
447 EXCEPTION_DBE, 0, vaddr, 0,
448 0, 0, 0);
449 }
450 #endif /* MEM_MIPS */
451 }
452
453 /* Hm? Shouldn't there be a DBE exception for
454 invalid writes as well? TODO */
455
456 goto do_return_ok;
457 }
458 }
459
460 #endif /* ifndef MEM_USERLAND */
461
462
463 /*
464 * Uncached access:
465 *
466 * 1) Translate the physical address to a host address.
467 *
468 * 2) Insert this virtual->physical->host translation into the
469 * fast translation arrays (using update_translation_table()).
470 *
471 * 3) If this was a Write, then invalidate any code translations
472 * in that page.
473 */
474 memblock = memory_paddr_to_hostaddr(mem, paddr & ~offset_mask,
475 writeflag);
476 if (memblock == NULL) {
477 if (writeflag == MEM_READ)
478 memset(data, 0, len);
479 goto do_return_ok;
480 }
481
482 offset = paddr & offset_mask;
483
484 if (cpu->update_translation_table != NULL && !dyntrans_device_danger
485 #ifdef MEM_MIPS
486 /* Ugly hack for R2000/R3000 caches: */
487 && (cpu->cd.mips.cpu_type.mmu_model != MMU3K ||
488 !(cpu->cd.mips.coproc[0]->reg[COP0_STATUS] & MIPS1_ISOL_CACHES))
489 #endif
490 #ifndef MEM_MIPS
491 /* && !(misc_flags & MEMORY_USER_ACCESS) */
492 #ifndef MEM_USERLAND
493 && !(ok & MEMORY_NOT_FULL_PAGE)
494 #endif
495 #endif
496 && !no_exceptions)
497 cpu->update_translation_table(cpu, vaddr & ~offset_mask,
498 memblock, (misc_flags & MEMORY_USER_ACCESS) |
499 #if !defined(MEM_MIPS) && !defined(MEM_USERLAND)
500 (cache == CACHE_INSTRUCTION?
501 (writeflag == MEM_WRITE? 1 : 0) : ok - 1),
502 #else
503 (writeflag == MEM_WRITE? 1 : 0),
504 #endif
505 paddr & ~offset_mask);
506
507 /*
508 * If writing, then invalidate code translations for the (physical)
509 * page address:
510 */
511 if (writeflag == MEM_WRITE && cpu->invalidate_code_translation != NULL)
512 cpu->invalidate_code_translation(cpu, paddr, INVALIDATE_PADDR);
513
514 if ((paddr&((1<<BITS_PER_MEMBLOCK)-1)) + len > (1<<BITS_PER_MEMBLOCK)) {
515 printf("Write over memblock boundary?\n");
516 exit(1);
517 }
518
519 if (writeflag == MEM_WRITE) {
520 /* Ugly optimization, but it works: */
521 if (len == sizeof(uint32_t) && (offset & 3)==0
522 && ((size_t)data&3)==0)
523 *(uint32_t *)(memblock + offset) = *(uint32_t *)data;
524 else if (len == sizeof(uint8_t))
525 *(uint8_t *)(memblock + offset) = *(uint8_t *)data;
526 else
527 memcpy(memblock + offset, data, len);
528 } else {
529 /* Ugly optimization, but it works: */
530 if (len == sizeof(uint32_t) && (offset & 3)==0
531 && ((size_t)data&3)==0)
532 *(uint32_t *)data = *(uint32_t *)(memblock + offset);
533 else if (len == sizeof(uint8_t))
534 *(uint8_t *)data = *(uint8_t *)(memblock + offset);
535 else
536 memcpy(data, memblock + offset, len);
537 }
538
539
540 do_return_ok:
541 return MEMORY_ACCESS_OK;
542 }
543

  ViewVC Help
Powered by ViewVC 1.1.26