/[gxemul]/upstream/0.3.8/src/memory_rw.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/0.3.8/src/memory_rw.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 23 - (show annotations)
Mon Oct 8 16:19:43 2007 UTC (16 years, 6 months ago) by dpavlin
File MIME type: text/plain
File size: 17875 byte(s)
0.3.8
1 /*
2 * Copyright (C) 2003-2006 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory_rw.c,v 1.82 2005/12/31 15:48:32 debug Exp $
29 *
30 * Generic memory_rw(), with special hacks for specific CPU families.
31 *
32 * Example for inclusion from memory_mips.c:
33 *
34 * MEMORY_RW should be mips_memory_rw
35 * MEM_MIPS should be defined
36 */
37
38
39 /*
40 * memory_rw():
41 *
42 * Read or write data from/to memory.
43 *
44 * cpu the cpu doing the read/write
45 * mem the memory object to use
46 * vaddr the virtual address
47 * data a pointer to the data to be written to memory, or
48 * a placeholder for data when reading from memory
49 * len the length of the 'data' buffer
50 * writeflag set to MEM_READ or MEM_WRITE
51 * misc_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags
52 *
53 * If the address indicates access to a memory mapped device, that device'
54 * read/write access function is called.
55 *
56 * If instruction latency/delay support is enabled, then
57 * cpu->instruction_delay is increased by the number of instruction to
58 * delay execution.
59 *
60 * This function should not be called with cpu == NULL.
61 *
62 * Returns one of the following:
63 * MEMORY_ACCESS_FAILED
64 * MEMORY_ACCESS_OK
65 *
66 * (MEMORY_ACCESS_FAILED is 0.)
67 */
68 int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr,
69 unsigned char *data, size_t len, int writeflag, int misc_flags)
70 {
71 #ifdef MEM_ALPHA
72 const int offset_mask = 0x1fff;
73 #else
74 const int offset_mask = 0xfff;
75 #endif
76
77 #ifndef MEM_USERLAND
78 int ok = 1;
79 #endif
80 uint64_t paddr;
81 int cache, no_exceptions, offset;
82 unsigned char *memblock;
83 #ifdef MEM_MIPS
84 int bintrans_cached = cpu->machine->bintrans_enable;
85 #endif
86 int dyntrans_device_danger = 0;
87
88 no_exceptions = misc_flags & NO_EXCEPTIONS;
89 cache = misc_flags & CACHE_FLAGS_MASK;
90
91 #ifdef MEM_X86
92 /* Real-mode wrap-around: */
93 if (REAL_MODE && !(misc_flags & PHYSICAL)) {
94 if ((vaddr & 0xffff) + len > 0x10000) {
95 /* Do one byte at a time: */
96 int res = 0;
97 size_t i;
98 for (i=0; i<len; i++)
99 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
100 writeflag, misc_flags);
101 return res;
102 }
103 }
104
105 /* Crossing a page boundary? Then do one byte at a time: */
106 if ((vaddr & 0xfff) + len > 0x1000 && !(misc_flags & PHYSICAL)
107 && cpu->cd.x86.cr[0] & X86_CR0_PG) {
108 /* For WRITES: Read ALL BYTES FIRST and write them back!!!
109 Then do a write of all the new bytes. This is to make sure
110 than both pages around the boundary are writable so we don't
111 do a partial write. */
112 int res = 0;
113 size_t i;
114 if (writeflag == MEM_WRITE) {
115 unsigned char tmp;
116 for (i=0; i<len; i++) {
117 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
118 MEM_READ, misc_flags);
119 if (!res)
120 return 0;
121 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
122 MEM_WRITE, misc_flags);
123 if (!res)
124 return 0;
125 }
126 for (i=0; i<len; i++) {
127 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
128 MEM_WRITE, misc_flags);
129 if (!res)
130 return 0;
131 }
132 } else {
133 for (i=0; i<len; i++) {
134 /* Do one byte at a time: */
135 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
136 writeflag, misc_flags);
137 if (!res) {
138 if (cache == CACHE_INSTRUCTION) {
139 fatal("FAILED instruction "
140 "fetch across page boundar"
141 "y: todo. vaddr=0x%08x\n",
142 (int)vaddr);
143 cpu->running = 0;
144 }
145 return 0;
146 }
147 }
148 }
149 return res;
150 }
151 #endif /* X86 */
152
153 #ifdef MEM_MIPS
154 if (bintrans_cached) {
155 if (cache == CACHE_INSTRUCTION) {
156 cpu->cd.mips.pc_bintrans_host_4kpage = NULL;
157 cpu->cd.mips.pc_bintrans_paddr_valid = 0;
158 }
159 }
160 #endif /* MEM_MIPS */
161
162 #ifdef MEM_USERLAND
163 #ifdef MEM_ALPHA
164 paddr = vaddr;
165 #else
166 paddr = vaddr & 0x7fffffff;
167 #endif
168 goto have_paddr;
169 #endif
170
171 #ifndef MEM_USERLAND
172 #ifdef MEM_MIPS
173 /*
174 * For instruction fetch, are we on the same page as the last
175 * instruction we fetched?
176 *
177 * NOTE: There's no need to check this stuff here if this address
178 * is known to be in host ram, as it's done at instruction fetch
179 * time in cpu.c! Only check if _host_4k_page == NULL.
180 */
181 if (cache == CACHE_INSTRUCTION &&
182 cpu->cd.mips.pc_last_host_4k_page == NULL &&
183 (vaddr & ~0xfff) == cpu->cd.mips.pc_last_virtual_page) {
184 paddr = cpu->cd.mips.pc_last_physical_page | (vaddr & 0xfff);
185 goto have_paddr;
186 }
187 #endif /* MEM_MIPS */
188
189 if (misc_flags & PHYSICAL || cpu->translate_address == NULL) {
190 paddr = vaddr;
191 #ifdef MEM_ALPHA
192 /* paddr &= 0x1fffffff; For testalpha */
193 paddr &= 0x000003ffffffffffULL;
194 #endif
195 } else {
196 ok = cpu->translate_address(cpu, vaddr, &paddr,
197 (writeflag? FLAG_WRITEFLAG : 0) +
198 (no_exceptions? FLAG_NOEXCEPTIONS : 0)
199 #ifdef MEM_X86
200 + (misc_flags & NO_SEGMENTATION)
201 #endif
202 #ifdef MEM_ARM
203 + (misc_flags & MEMORY_USER_ACCESS)
204 #endif
205 + (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0));
206 /* If the translation caused an exception, or was invalid in
207 some way, we simply return without doing the memory
208 access: */
209 if (!ok)
210 return MEMORY_ACCESS_FAILED;
211 }
212
213
214 #ifdef MEM_X86
215 /* DOS debugging :-) */
216 if (!quiet_mode && !(misc_flags & PHYSICAL)) {
217 if (paddr >= 0x400 && paddr <= 0x4ff)
218 debug("{ PC BIOS DATA AREA: %s 0x%x }\n", writeflag ==
219 MEM_WRITE? "writing to" : "reading from",
220 (int)paddr);
221 #if 0
222 if (paddr >= 0xf0000 && paddr <= 0xfffff)
223 debug("{ BIOS ACCESS: %s 0x%x }\n",
224 writeflag == MEM_WRITE? "writing to" :
225 "reading from", (int)paddr);
226 #endif
227 }
228 #endif
229
230 #ifdef MEM_MIPS
231 /*
232 * If correct cache emulation is enabled, and we need to simluate
233 * cache misses even from the instruction cache, we can't run directly
234 * from a host page. :-/
235 */
236 #if defined(ENABLE_CACHE_EMULATION) && defined(ENABLE_INSTRUCTION_DELAYS)
237 #else
238 if (cache == CACHE_INSTRUCTION) {
239 cpu->cd.mips.pc_last_virtual_page = vaddr & ~0xfff;
240 cpu->cd.mips.pc_last_physical_page = paddr & ~0xfff;
241 cpu->cd.mips.pc_last_host_4k_page = NULL;
242
243 /* _last_host_4k_page will be set to 1 further down,
244 if the page is actually in host ram */
245 }
246 #endif
247 #endif /* MEM_MIPS */
248 #endif /* ifndef MEM_USERLAND */
249
250
251 #if defined(MEM_MIPS) || defined(MEM_USERLAND)
252 have_paddr:
253 #endif
254
255
256 #ifdef MEM_MIPS
257 /* TODO: How about bintrans vs cache emulation? */
258 if (bintrans_cached) {
259 if (cache == CACHE_INSTRUCTION) {
260 cpu->cd.mips.pc_bintrans_paddr_valid = 1;
261 cpu->cd.mips.pc_bintrans_paddr = paddr;
262 }
263 }
264 #endif /* MEM_MIPS */
265
266
267
268 #ifndef MEM_USERLAND
269 /*
270 * Memory mapped device?
271 *
272 * TODO: if paddr < base, but len enough, then the device should
273 * still be written to!
274 */
275 if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) {
276 uint64_t orig_paddr = paddr;
277 int i, start, end, res;
278
279 /*
280 * Really really slow, but unfortunately necessary. This is
281 * to avoid the folowing scenario:
282 *
283 * a) offsets 0x000..0x123 are normal memory
284 * b) offsets 0x124..0x777 are a device
285 *
286 * 1) a read is done from offset 0x100. the page is
287 * added to the dyntrans system as a "RAM" page
288 * 2) a dyntranslated read is done from offset 0x200,
289 * which should access the device, but since the
290 * entire page is added, it will access non-existant
291 * RAM instead, without warning.
292 *
293 * Setting dyntrans_device_danger = 1 on accesses which are
294 * on _any_ offset on pages that are device mapped avoids
295 * this problem, but it is probably not very fast.
296 *
297 * TODO: Convert this into a quick (multi-level, 64-bit)
298 * address space lookup, to find dangerous pages.
299 */
300 #if 1
301 for (i=0; i<mem->n_mmapped_devices; i++)
302 if (paddr >= (mem->dev_baseaddr[i] & ~offset_mask) &&
303 paddr <= ((mem->dev_endaddr[i]-1) | offset_mask)) {
304 dyntrans_device_danger = 1;
305 break;
306 }
307 #endif
308
309 start = 0; end = mem->n_mmapped_devices - 1;
310 i = mem->last_accessed_device;
311
312 /* Scan through all devices: */
313 do {
314 if (paddr >= mem->dev_baseaddr[i] &&
315 paddr < mem->dev_endaddr[i]) {
316 /* Found a device, let's access it: */
317 mem->last_accessed_device = i;
318
319 paddr -= mem->dev_baseaddr[i];
320 if (paddr + len > mem->dev_length[i])
321 len = mem->dev_length[i] - paddr;
322
323 if (cpu->update_translation_table != NULL &&
324 !(ok & MEMORY_NOT_FULL_PAGE) &&
325 mem->dev_flags[i] & DM_DYNTRANS_OK) {
326 int wf = writeflag == MEM_WRITE? 1 : 0;
327 unsigned char *host_addr;
328
329 if (!(mem->dev_flags[i] &
330 DM_DYNTRANS_WRITE_OK))
331 wf = 0;
332
333 if (writeflag && wf) {
334 if (paddr < mem->
335 dev_dyntrans_write_low[i])
336 mem->
337 dev_dyntrans_write_low
338 [i] = paddr &
339 ~offset_mask;
340 if (paddr >= mem->
341 dev_dyntrans_write_high[i])
342 mem->
343 dev_dyntrans_write_high
344 [i] = paddr |
345 offset_mask;
346 }
347
348 if (mem->dev_flags[i] &
349 DM_EMULATED_RAM) {
350 /* MEM_WRITE to force the page
351 to be allocated, if it
352 wasn't already */
353 uint64_t *pp = (uint64_t *)
354 mem->dev_dyntrans_data[i];
355 uint64_t p = orig_paddr - *pp;
356 host_addr =
357 memory_paddr_to_hostaddr(
358 mem, p, MEM_WRITE)
359 + (p & ~offset_mask
360 & ((1 <<
361 BITS_PER_MEMBLOCK) - 1));
362 } else {
363 host_addr =
364 mem->dev_dyntrans_data[i] +
365 (paddr & ~offset_mask);
366 }
367 cpu->update_translation_table(cpu,
368 vaddr & ~offset_mask, host_addr,
369 wf, orig_paddr & ~offset_mask);
370 }
371
372 res = 0;
373 if (!no_exceptions || (mem->dev_flags[i] &
374 DM_READS_HAVE_NO_SIDE_EFFECTS))
375 res = mem->dev_f[i](cpu, mem, paddr,
376 data, len, writeflag,
377 mem->dev_extra[i]);
378
379 #ifdef ENABLE_INSTRUCTION_DELAYS
380 if (res == 0)
381 res = -1;
382
383 #ifdef MEM_MIPS
384 cpu->cd.mips.instruction_delay +=
385 ( (abs(res) - 1) *
386 cpu->cd.mips.cpu_type.instrs_per_cycle );
387 #endif
388 #endif
389
390 #ifndef MEM_X86
391 /*
392 * If accessing the memory mapped device
393 * failed, then return with a DBE exception.
394 */
395 if (res <= 0 && !no_exceptions) {
396 debug("%s device '%s' addr %08lx "
397 "failed\n", writeflag?
398 "writing to" : "reading from",
399 mem->dev_name[i], (long)paddr);
400 #ifdef MEM_MIPS
401 mips_cpu_exception(cpu, EXCEPTION_DBE,
402 0, vaddr, 0, 0, 0, 0);
403 #endif
404 return MEMORY_ACCESS_FAILED;
405 }
406 #endif
407 goto do_return_ok;
408 }
409
410 if (paddr < mem->dev_baseaddr[i])
411 end = i - 1;
412 if (paddr >= mem->dev_endaddr[i])
413 start = i + 1;
414 i = (start + end) >> 1;
415 } while (start <= end);
416 }
417
418
419 #ifdef MEM_MIPS
420 /*
421 * Data and instruction cache emulation:
422 */
423
424 switch (cpu->cd.mips.cpu_type.mmu_model) {
425 case MMU3K:
426 /* if not uncached addess (TODO: generalize this) */
427 if (!(misc_flags & PHYSICAL) && cache != CACHE_NONE &&
428 !((vaddr & 0xffffffffULL) >= 0xa0000000ULL &&
429 (vaddr & 0xffffffffULL) <= 0xbfffffffULL)) {
430 if (memory_cache_R3000(cpu, cache, paddr,
431 writeflag, len, data))
432 goto do_return_ok;
433 }
434 break;
435 default:
436 /* R4000 etc */
437 /* TODO */
438 ;
439 }
440 #endif /* MEM_MIPS */
441
442
443 /* Outside of physical RAM? */
444 if (paddr >= mem->physical_max) {
445 #ifdef MEM_MIPS
446 if ((paddr & 0xffffc00000ULL) == 0x1fc00000) {
447 /* Ok, this is PROM stuff */
448 } else if ((paddr & 0xfffff00000ULL) == 0x1ff00000) {
449 /* Sprite reads from this area of memory... */
450 /* TODO: is this still correct? */
451 if (writeflag == MEM_READ)
452 memset(data, 0, len);
453 goto do_return_ok;
454 } else
455 #endif /* MIPS */
456 {
457 if (paddr >= mem->physical_max) {
458 char *symbol;
459 uint64_t offset;
460 #ifdef MEM_MIPS
461 uint64_t old_pc = cpu->cd.mips.pc_last;
462 #else
463 uint64_t old_pc = cpu->pc;
464 #endif
465
466 /* This allows for example OS kernels to probe
467 memory a few KBs past the end of memory,
468 without giving too many warnings. */
469 if (!quiet_mode && !no_exceptions && paddr >=
470 mem->physical_max + 0x40000) {
471 fatal("[ memory_rw(): writeflag=%i ",
472 writeflag);
473 if (writeflag) {
474 unsigned int i;
475 debug("data={", writeflag);
476 if (len > 16) {
477 int start2 = len-16;
478 for (i=0; i<16; i++)
479 debug("%s%02x",
480 i?",":"",
481 data[i]);
482 debug(" .. ");
483 if (start2 < 16)
484 start2 = 16;
485 for (i=start2; i<len;
486 i++)
487 debug("%s%02x",
488 i?",":"",
489 data[i]);
490 } else
491 for (i=0; i<len; i++)
492 debug("%s%02x",
493 i?",":"",
494 data[i]);
495 debug("}");
496 }
497
498 fatal(" paddr=0x%llx >= physical_max"
499 "; pc=", (long long)paddr);
500 if (cpu->is_32bit)
501 fatal("0x%08x",(int)old_pc);
502 else
503 fatal("0x%016llx",
504 (long long)old_pc);
505 symbol = get_symbol_name(
506 &cpu->machine->symbol_context,
507 old_pc, &offset);
508 fatal(" <%s> ]\n",
509 symbol? symbol : " no symbol ");
510 }
511
512 if (cpu->machine->single_step_on_bad_addr) {
513 fatal("[ unimplemented access to "
514 "0x%llx, pc=0x",(long long)paddr);
515 if (cpu->is_32bit)
516 fatal("%08x ]\n",
517 (int)old_pc);
518 else
519 fatal("%016llx ]\n",
520 (long long)old_pc);
521 single_step = 1;
522 }
523 }
524
525 if (writeflag == MEM_READ) {
526 #ifdef MEM_X86
527 /* Reading non-existant memory on x86: */
528 memset(data, 0xff, len);
529 #else
530 /* Return all zeroes? (Or 0xff? TODO) */
531 memset(data, 0, len);
532 #endif
533
534 #ifdef MEM_MIPS
535 /*
536 * For real data/instruction accesses, cause
537 * an exceptions on an illegal read:
538 */
539 if (cache != CACHE_NONE && cpu->machine->
540 dbe_on_nonexistant_memaccess &&
541 !no_exceptions) {
542 if (paddr >= mem->physical_max &&
543 paddr < mem->physical_max+1048576)
544 mips_cpu_exception(cpu,
545 EXCEPTION_DBE, 0, vaddr, 0,
546 0, 0, 0);
547 }
548 #endif /* MEM_MIPS */
549 }
550
551 /* Hm? Shouldn't there be a DBE exception for
552 invalid writes as well? TODO */
553
554 goto do_return_ok;
555 }
556 }
557
558 #endif /* ifndef MEM_USERLAND */
559
560
561 /*
562 * Uncached access:
563 *
564 * 1) Translate the physical address to a host address.
565 *
566 * 2) Insert this virtual->physical->host translation into the
567 * fast translation arrays (using update_translation_table()).
568 *
569 * 3) If this was a Write, then invalidate any code translations
570 * in that page.
571 */
572 memblock = memory_paddr_to_hostaddr(mem, paddr, writeflag);
573 if (memblock == NULL) {
574 if (writeflag == MEM_READ)
575 memset(data, 0, len);
576 goto do_return_ok;
577 }
578
579 offset = paddr & ((1 << BITS_PER_MEMBLOCK) - 1);
580
581 if (cpu->update_translation_table != NULL && !dyntrans_device_danger
582 #ifndef MEM_MIPS
583 /* && !(misc_flags & MEMORY_USER_ACCESS) */
584 #ifndef MEM_USERLAND
585 && !(ok & MEMORY_NOT_FULL_PAGE)
586 #endif
587 #endif
588 && !no_exceptions)
589 cpu->update_translation_table(cpu, vaddr & ~offset_mask,
590 memblock + (offset & ~offset_mask),
591 (misc_flags & MEMORY_USER_ACCESS) |
592 #ifndef MEM_MIPS
593 (cache == CACHE_INSTRUCTION? TLB_CODE : 0) |
594 #endif
595 #if !defined(MEM_MIPS) && !defined(MEM_USERLAND)
596 (cache == CACHE_INSTRUCTION?
597 (writeflag == MEM_WRITE? 1 : 0) : ok - 1),
598 #else
599 (writeflag == MEM_WRITE? 1 : 0),
600 #endif
601 paddr & ~offset_mask);
602
603 /* Invalidate code translations for the page we are writing to. */
604 if (writeflag == MEM_WRITE && cpu->invalidate_code_translation != NULL)
605 cpu->invalidate_code_translation(cpu, paddr, INVALIDATE_PADDR);
606
607 if (writeflag == MEM_WRITE) {
608 /* Ugly optimization, but it works: */
609 if (len == sizeof(uint32_t) && (offset & 3)==0
610 && ((size_t)data&3)==0)
611 *(uint32_t *)(memblock + offset) = *(uint32_t *)data;
612 else if (len == sizeof(uint8_t))
613 *(uint8_t *)(memblock + offset) = *(uint8_t *)data;
614 else
615 memcpy(memblock + offset, data, len);
616 } else {
617 /* Ugly optimization, but it works: */
618 if (len == sizeof(uint32_t) && (offset & 3)==0
619 && ((size_t)data&3)==0)
620 *(uint32_t *)data = *(uint32_t *)(memblock + offset);
621 else if (len == sizeof(uint8_t))
622 *(uint8_t *)data = *(uint8_t *)(memblock + offset);
623 else
624 memcpy(data, memblock + offset, len);
625
626 #ifdef MEM_MIPS
627 if (cache == CACHE_INSTRUCTION) {
628 cpu->cd.mips.pc_last_host_4k_page = memblock
629 + (offset & ~offset_mask);
630 if (bintrans_cached) {
631 cpu->cd.mips.pc_bintrans_host_4kpage =
632 cpu->cd.mips.pc_last_host_4k_page;
633 }
634 }
635 #endif /* MIPS */
636 }
637
638
639 do_return_ok:
640 return MEMORY_ACCESS_OK;
641 }
642

  ViewVC Help
Powered by ViewVC 1.1.26