/[gxemul]/upstream/0.3.8/src/memory_rw.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Annotation of /upstream/0.3.8/src/memory_rw.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 23 - (hide annotations)
Mon Oct 8 16:19:43 2007 UTC (16 years, 7 months ago) by dpavlin
File MIME type: text/plain
File size: 17875 byte(s)
0.3.8
1 dpavlin 2 /*
2 dpavlin 22 * Copyright (C) 2003-2006 Anders Gavare. All rights reserved.
3 dpavlin 2 *
4     * Redistribution and use in source and binary forms, with or without
5     * modification, are permitted provided that the following conditions are met:
6     *
7     * 1. Redistributions of source code must retain the above copyright
8     * notice, this list of conditions and the following disclaimer.
9     * 2. Redistributions in binary form must reproduce the above copyright
10     * notice, this list of conditions and the following disclaimer in the
11     * documentation and/or other materials provided with the distribution.
12     * 3. The name of the author may not be used to endorse or promote products
13     * derived from this software without specific prior written permission.
14     *
15     * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16     * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17     * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18     * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19     * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20     * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21     * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22     * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23     * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24     * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25     * SUCH DAMAGE.
26     *
27     *
28 dpavlin 22 * $Id: memory_rw.c,v 1.82 2005/12/31 15:48:32 debug Exp $
29 dpavlin 2 *
30     * Generic memory_rw(), with special hacks for specific CPU families.
31     *
32     * Example for inclusion from memory_mips.c:
33     *
34     * MEMORY_RW should be mips_memory_rw
35     * MEM_MIPS should be defined
36     */
37    
38    
39     /*
40     * memory_rw():
41     *
42     * Read or write data from/to memory.
43     *
44     * cpu the cpu doing the read/write
45     * mem the memory object to use
46     * vaddr the virtual address
47     * data a pointer to the data to be written to memory, or
48     * a placeholder for data when reading from memory
49     * len the length of the 'data' buffer
50     * writeflag set to MEM_READ or MEM_WRITE
51 dpavlin 20 * misc_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags
52 dpavlin 2 *
53     * If the address indicates access to a memory mapped device, that device'
54     * read/write access function is called.
55     *
56     * If instruction latency/delay support is enabled, then
57     * cpu->instruction_delay is increased by the number of instruction to
58     * delay execution.
59     *
60     * This function should not be called with cpu == NULL.
61     *
62     * Returns one of the following:
63     * MEMORY_ACCESS_FAILED
64     * MEMORY_ACCESS_OK
65     *
66     * (MEMORY_ACCESS_FAILED is 0.)
67     */
68     int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr,
69 dpavlin 20 unsigned char *data, size_t len, int writeflag, int misc_flags)
70 dpavlin 2 {
71 dpavlin 12 #ifdef MEM_ALPHA
72     const int offset_mask = 0x1fff;
73     #else
74     const int offset_mask = 0xfff;
75     #endif
76    
77 dpavlin 2 #ifndef MEM_USERLAND
78     int ok = 1;
79     #endif
80     uint64_t paddr;
81     int cache, no_exceptions, offset;
82     unsigned char *memblock;
83 dpavlin 12 #ifdef MEM_MIPS
84 dpavlin 2 int bintrans_cached = cpu->machine->bintrans_enable;
85 dpavlin 12 #endif
86 dpavlin 22 int dyntrans_device_danger = 0;
87 dpavlin 12
88 dpavlin 20 no_exceptions = misc_flags & NO_EXCEPTIONS;
89     cache = misc_flags & CACHE_FLAGS_MASK;
90 dpavlin 2
91 dpavlin 4 #ifdef MEM_X86
92 dpavlin 6 /* Real-mode wrap-around: */
93 dpavlin 20 if (REAL_MODE && !(misc_flags & PHYSICAL)) {
94 dpavlin 6 if ((vaddr & 0xffff) + len > 0x10000) {
95     /* Do one byte at a time: */
96 dpavlin 22 int res = 0;
97     size_t i;
98 dpavlin 6 for (i=0; i<len; i++)
99     res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
100 dpavlin 20 writeflag, misc_flags);
101 dpavlin 6 return res;
102     }
103     }
104 dpavlin 4
105 dpavlin 6 /* Crossing a page boundary? Then do one byte at a time: */
106 dpavlin 20 if ((vaddr & 0xfff) + len > 0x1000 && !(misc_flags & PHYSICAL)
107 dpavlin 6 && cpu->cd.x86.cr[0] & X86_CR0_PG) {
108     /* For WRITES: Read ALL BYTES FIRST and write them back!!!
109     Then do a write of all the new bytes. This is to make sure
110     than both pages around the boundary are writable so we don't
111     do a partial write. */
112 dpavlin 22 int res = 0;
113     size_t i;
114 dpavlin 6 if (writeflag == MEM_WRITE) {
115     unsigned char tmp;
116     for (i=0; i<len; i++) {
117     res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
118 dpavlin 20 MEM_READ, misc_flags);
119 dpavlin 6 if (!res)
120 dpavlin 4 return 0;
121 dpavlin 6 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
122 dpavlin 20 MEM_WRITE, misc_flags);
123 dpavlin 6 if (!res)
124     return 0;
125     }
126     for (i=0; i<len; i++) {
127     res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
128 dpavlin 20 MEM_WRITE, misc_flags);
129 dpavlin 6 if (!res)
130     return 0;
131     }
132     } else {
133     for (i=0; i<len; i++) {
134     /* Do one byte at a time: */
135     res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
136 dpavlin 20 writeflag, misc_flags);
137 dpavlin 6 if (!res) {
138     if (cache == CACHE_INSTRUCTION) {
139     fatal("FAILED instruction "
140     "fetch across page boundar"
141     "y: todo. vaddr=0x%08x\n",
142     (int)vaddr);
143     cpu->running = 0;
144     }
145     return 0;
146 dpavlin 4 }
147     }
148     }
149 dpavlin 6 return res;
150 dpavlin 4 }
151 dpavlin 6 #endif /* X86 */
152 dpavlin 4
153 dpavlin 2 #ifdef MEM_MIPS
154     if (bintrans_cached) {
155     if (cache == CACHE_INSTRUCTION) {
156     cpu->cd.mips.pc_bintrans_host_4kpage = NULL;
157     cpu->cd.mips.pc_bintrans_paddr_valid = 0;
158     }
159     }
160     #endif /* MEM_MIPS */
161    
162     #ifdef MEM_USERLAND
163 dpavlin 12 #ifdef MEM_ALPHA
164     paddr = vaddr;
165     #else
166 dpavlin 2 paddr = vaddr & 0x7fffffff;
167 dpavlin 12 #endif
168 dpavlin 2 goto have_paddr;
169     #endif
170    
171     #ifndef MEM_USERLAND
172     #ifdef MEM_MIPS
173     /*
174     * For instruction fetch, are we on the same page as the last
175     * instruction we fetched?
176     *
177     * NOTE: There's no need to check this stuff here if this address
178     * is known to be in host ram, as it's done at instruction fetch
179     * time in cpu.c! Only check if _host_4k_page == NULL.
180     */
181     if (cache == CACHE_INSTRUCTION &&
182     cpu->cd.mips.pc_last_host_4k_page == NULL &&
183     (vaddr & ~0xfff) == cpu->cd.mips.pc_last_virtual_page) {
184     paddr = cpu->cd.mips.pc_last_physical_page | (vaddr & 0xfff);
185     goto have_paddr;
186     }
187     #endif /* MEM_MIPS */
188    
189 dpavlin 20 if (misc_flags & PHYSICAL || cpu->translate_address == NULL) {
190 dpavlin 2 paddr = vaddr;
191 dpavlin 12 #ifdef MEM_ALPHA
192 dpavlin 14 /* paddr &= 0x1fffffff; For testalpha */
193     paddr &= 0x000003ffffffffffULL;
194 dpavlin 12 #endif
195 dpavlin 2 } else {
196     ok = cpu->translate_address(cpu, vaddr, &paddr,
197     (writeflag? FLAG_WRITEFLAG : 0) +
198     (no_exceptions? FLAG_NOEXCEPTIONS : 0)
199 dpavlin 6 #ifdef MEM_X86
200 dpavlin 20 + (misc_flags & NO_SEGMENTATION)
201 dpavlin 6 #endif
202 dpavlin 14 #ifdef MEM_ARM
203 dpavlin 20 + (misc_flags & MEMORY_USER_ACCESS)
204 dpavlin 14 #endif
205 dpavlin 2 + (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0));
206     /* If the translation caused an exception, or was invalid in
207     some way, we simply return without doing the memory
208     access: */
209     if (!ok)
210     return MEMORY_ACCESS_FAILED;
211     }
212    
213    
214 dpavlin 6 #ifdef MEM_X86
215     /* DOS debugging :-) */
216 dpavlin 20 if (!quiet_mode && !(misc_flags & PHYSICAL)) {
217 dpavlin 6 if (paddr >= 0x400 && paddr <= 0x4ff)
218     debug("{ PC BIOS DATA AREA: %s 0x%x }\n", writeflag ==
219     MEM_WRITE? "writing to" : "reading from",
220     (int)paddr);
221     #if 0
222     if (paddr >= 0xf0000 && paddr <= 0xfffff)
223     debug("{ BIOS ACCESS: %s 0x%x }\n",
224     writeflag == MEM_WRITE? "writing to" :
225     "reading from", (int)paddr);
226     #endif
227     }
228     #endif
229    
230 dpavlin 2 #ifdef MEM_MIPS
231     /*
232     * If correct cache emulation is enabled, and we need to simluate
233     * cache misses even from the instruction cache, we can't run directly
234     * from a host page. :-/
235     */
236     #if defined(ENABLE_CACHE_EMULATION) && defined(ENABLE_INSTRUCTION_DELAYS)
237     #else
238     if (cache == CACHE_INSTRUCTION) {
239     cpu->cd.mips.pc_last_virtual_page = vaddr & ~0xfff;
240     cpu->cd.mips.pc_last_physical_page = paddr & ~0xfff;
241     cpu->cd.mips.pc_last_host_4k_page = NULL;
242    
243     /* _last_host_4k_page will be set to 1 further down,
244     if the page is actually in host ram */
245     }
246     #endif
247     #endif /* MEM_MIPS */
248     #endif /* ifndef MEM_USERLAND */
249    
250    
251 dpavlin 4 #if defined(MEM_MIPS) || defined(MEM_USERLAND)
252 dpavlin 2 have_paddr:
253 dpavlin 4 #endif
254 dpavlin 2
255    
256     #ifdef MEM_MIPS
257     /* TODO: How about bintrans vs cache emulation? */
258     if (bintrans_cached) {
259     if (cache == CACHE_INSTRUCTION) {
260     cpu->cd.mips.pc_bintrans_paddr_valid = 1;
261     cpu->cd.mips.pc_bintrans_paddr = paddr;
262     }
263     }
264     #endif /* MEM_MIPS */
265    
266    
267    
268     #ifndef MEM_USERLAND
269     /*
270     * Memory mapped device?
271     *
272 dpavlin 22 * TODO: if paddr < base, but len enough, then the device should
273     * still be written to!
274 dpavlin 2 */
275     if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) {
276     uint64_t orig_paddr = paddr;
277 dpavlin 22 int i, start, end, res;
278 dpavlin 4
279     /*
280     * Really really slow, but unfortunately necessary. This is
281     * to avoid the folowing scenario:
282     *
283     * a) offsets 0x000..0x123 are normal memory
284     * b) offsets 0x124..0x777 are a device
285     *
286     * 1) a read is done from offset 0x100. the page is
287 dpavlin 22 * added to the dyntrans system as a "RAM" page
288     * 2) a dyntranslated read is done from offset 0x200,
289 dpavlin 4 * which should access the device, but since the
290     * entire page is added, it will access non-existant
291     * RAM instead, without warning.
292     *
293 dpavlin 22 * Setting dyntrans_device_danger = 1 on accesses which are
294 dpavlin 4 * on _any_ offset on pages that are device mapped avoids
295     * this problem, but it is probably not very fast.
296 dpavlin 22 *
297     * TODO: Convert this into a quick (multi-level, 64-bit)
298     * address space lookup, to find dangerous pages.
299 dpavlin 4 */
300 dpavlin 22 #if 1
301 dpavlin 12 for (i=0; i<mem->n_mmapped_devices; i++)
302     if (paddr >= (mem->dev_baseaddr[i] & ~offset_mask) &&
303 dpavlin 18 paddr <= ((mem->dev_endaddr[i]-1) | offset_mask)) {
304 dpavlin 22 dyntrans_device_danger = 1;
305 dpavlin 12 break;
306     }
307 dpavlin 22 #endif
308 dpavlin 4
309 dpavlin 22 start = 0; end = mem->n_mmapped_devices - 1;
310     i = mem->last_accessed_device;
311 dpavlin 2
312     /* Scan through all devices: */
313     do {
314     if (paddr >= mem->dev_baseaddr[i] &&
315 dpavlin 18 paddr < mem->dev_endaddr[i]) {
316 dpavlin 2 /* Found a device, let's access it: */
317     mem->last_accessed_device = i;
318    
319     paddr -= mem->dev_baseaddr[i];
320     if (paddr + len > mem->dev_length[i])
321     len = mem->dev_length[i] - paddr;
322    
323 dpavlin 12 if (cpu->update_translation_table != NULL &&
324 dpavlin 20 !(ok & MEMORY_NOT_FULL_PAGE) &&
325     mem->dev_flags[i] & DM_DYNTRANS_OK) {
326 dpavlin 2 int wf = writeflag == MEM_WRITE? 1 : 0;
327 dpavlin 18 unsigned char *host_addr;
328 dpavlin 2
329 dpavlin 18 if (!(mem->dev_flags[i] &
330 dpavlin 20 DM_DYNTRANS_WRITE_OK))
331 dpavlin 18 wf = 0;
332    
333     if (writeflag && wf) {
334 dpavlin 2 if (paddr < mem->
335 dpavlin 12 dev_dyntrans_write_low[i])
336 dpavlin 2 mem->
337 dpavlin 12 dev_dyntrans_write_low
338     [i] = paddr &
339     ~offset_mask;
340     if (paddr >= mem->
341     dev_dyntrans_write_high[i])
342 dpavlin 2 mem->
343 dpavlin 12 dev_dyntrans_write_high
344     [i] = paddr |
345     offset_mask;
346 dpavlin 2 }
347    
348 dpavlin 18 if (mem->dev_flags[i] &
349 dpavlin 20 DM_EMULATED_RAM) {
350 dpavlin 18 /* MEM_WRITE to force the page
351     to be allocated, if it
352     wasn't already */
353     uint64_t *pp = (uint64_t *)
354     mem->dev_dyntrans_data[i];
355     uint64_t p = orig_paddr - *pp;
356     host_addr =
357     memory_paddr_to_hostaddr(
358     mem, p, MEM_WRITE)
359     + (p & ~offset_mask
360     & ((1 <<
361     BITS_PER_MEMBLOCK) - 1));
362     } else {
363     host_addr =
364     mem->dev_dyntrans_data[i] +
365     (paddr & ~offset_mask);
366     }
367 dpavlin 12 cpu->update_translation_table(cpu,
368 dpavlin 18 vaddr & ~offset_mask, host_addr,
369 dpavlin 12 wf, orig_paddr & ~offset_mask);
370 dpavlin 2 }
371    
372 dpavlin 6 res = 0;
373     if (!no_exceptions || (mem->dev_flags[i] &
374 dpavlin 20 DM_READS_HAVE_NO_SIDE_EFFECTS))
375 dpavlin 6 res = mem->dev_f[i](cpu, mem, paddr,
376     data, len, writeflag,
377     mem->dev_extra[i]);
378 dpavlin 2
379     #ifdef ENABLE_INSTRUCTION_DELAYS
380     if (res == 0)
381     res = -1;
382    
383 dpavlin 18 #ifdef MEM_MIPS
384 dpavlin 2 cpu->cd.mips.instruction_delay +=
385     ( (abs(res) - 1) *
386     cpu->cd.mips.cpu_type.instrs_per_cycle );
387     #endif
388 dpavlin 18 #endif
389 dpavlin 6
390     #ifndef MEM_X86
391 dpavlin 2 /*
392     * If accessing the memory mapped device
393     * failed, then return with a DBE exception.
394     */
395 dpavlin 6 if (res <= 0 && !no_exceptions) {
396 dpavlin 2 debug("%s device '%s' addr %08lx "
397     "failed\n", writeflag?
398     "writing to" : "reading from",
399     mem->dev_name[i], (long)paddr);
400     #ifdef MEM_MIPS
401     mips_cpu_exception(cpu, EXCEPTION_DBE,
402     0, vaddr, 0, 0, 0, 0);
403     #endif
404     return MEMORY_ACCESS_FAILED;
405     }
406 dpavlin 6 #endif
407 dpavlin 2 goto do_return_ok;
408     }
409    
410 dpavlin 22 if (paddr < mem->dev_baseaddr[i])
411     end = i - 1;
412     if (paddr >= mem->dev_endaddr[i])
413     start = i + 1;
414     i = (start + end) >> 1;
415     } while (start <= end);
416 dpavlin 2 }
417    
418    
419     #ifdef MEM_MIPS
420     /*
421     * Data and instruction cache emulation:
422     */
423    
424     switch (cpu->cd.mips.cpu_type.mmu_model) {
425     case MMU3K:
426     /* if not uncached addess (TODO: generalize this) */
427 dpavlin 20 if (!(misc_flags & PHYSICAL) && cache != CACHE_NONE &&
428 dpavlin 2 !((vaddr & 0xffffffffULL) >= 0xa0000000ULL &&
429     (vaddr & 0xffffffffULL) <= 0xbfffffffULL)) {
430     if (memory_cache_R3000(cpu, cache, paddr,
431     writeflag, len, data))
432     goto do_return_ok;
433     }
434     break;
435     default:
436     /* R4000 etc */
437     /* TODO */
438     ;
439     }
440     #endif /* MEM_MIPS */
441    
442    
443     /* Outside of physical RAM? */
444     if (paddr >= mem->physical_max) {
445 dpavlin 6 #ifdef MEM_MIPS
446     if ((paddr & 0xffffc00000ULL) == 0x1fc00000) {
447 dpavlin 2 /* Ok, this is PROM stuff */
448     } else if ((paddr & 0xfffff00000ULL) == 0x1ff00000) {
449     /* Sprite reads from this area of memory... */
450     /* TODO: is this still correct? */
451     if (writeflag == MEM_READ)
452     memset(data, 0, len);
453     goto do_return_ok;
454 dpavlin 6 } else
455     #endif /* MIPS */
456     {
457     if (paddr >= mem->physical_max) {
458 dpavlin 2 char *symbol;
459 dpavlin 12 uint64_t offset;
460 dpavlin 2 #ifdef MEM_MIPS
461 dpavlin 20 uint64_t old_pc = cpu->cd.mips.pc_last;
462 dpavlin 12 #else
463 dpavlin 20 uint64_t old_pc = cpu->pc;
464 dpavlin 2 #endif
465 dpavlin 12
466 dpavlin 6 /* This allows for example OS kernels to probe
467     memory a few KBs past the end of memory,
468     without giving too many warnings. */
469 dpavlin 12 if (!quiet_mode && !no_exceptions && paddr >=
470 dpavlin 6 mem->physical_max + 0x40000) {
471 dpavlin 2 fatal("[ memory_rw(): writeflag=%i ",
472     writeflag);
473     if (writeflag) {
474     unsigned int i;
475     debug("data={", writeflag);
476     if (len > 16) {
477     int start2 = len-16;
478     for (i=0; i<16; i++)
479     debug("%s%02x",
480     i?",":"",
481     data[i]);
482     debug(" .. ");
483     if (start2 < 16)
484     start2 = 16;
485     for (i=start2; i<len;
486     i++)
487     debug("%s%02x",
488     i?",":"",
489     data[i]);
490     } else
491     for (i=0; i<len; i++)
492     debug("%s%02x",
493     i?",":"",
494     data[i]);
495     debug("}");
496     }
497 dpavlin 12
498     fatal(" paddr=0x%llx >= physical_max"
499     "; pc=", (long long)paddr);
500     if (cpu->is_32bit)
501     fatal("0x%08x",(int)old_pc);
502     else
503     fatal("0x%016llx",
504     (long long)old_pc);
505 dpavlin 2 symbol = get_symbol_name(
506     &cpu->machine->symbol_context,
507 dpavlin 12 old_pc, &offset);
508     fatal(" <%s> ]\n",
509     symbol? symbol : " no symbol ");
510 dpavlin 2 }
511    
512     if (cpu->machine->single_step_on_bad_addr) {
513     fatal("[ unimplemented access to "
514 dpavlin 12 "0x%llx, pc=0x",(long long)paddr);
515     if (cpu->is_32bit)
516     fatal("%08x ]\n",
517     (int)old_pc);
518     else
519     fatal("%016llx ]\n",
520     (long long)old_pc);
521 dpavlin 2 single_step = 1;
522     }
523     }
524    
525     if (writeflag == MEM_READ) {
526 dpavlin 6 #ifdef MEM_X86
527     /* Reading non-existant memory on x86: */
528     memset(data, 0xff, len);
529     #else
530 dpavlin 2 /* Return all zeroes? (Or 0xff? TODO) */
531     memset(data, 0, len);
532 dpavlin 6 #endif
533 dpavlin 2
534     #ifdef MEM_MIPS
535     /*
536     * For real data/instruction accesses, cause
537     * an exceptions on an illegal read:
538     */
539     if (cache != CACHE_NONE && cpu->machine->
540 dpavlin 6 dbe_on_nonexistant_memaccess &&
541     !no_exceptions) {
542 dpavlin 2 if (paddr >= mem->physical_max &&
543     paddr < mem->physical_max+1048576)
544     mips_cpu_exception(cpu,
545     EXCEPTION_DBE, 0, vaddr, 0,
546     0, 0, 0);
547     }
548     #endif /* MEM_MIPS */
549     }
550    
551     /* Hm? Shouldn't there be a DBE exception for
552     invalid writes as well? TODO */
553    
554     goto do_return_ok;
555     }
556     }
557    
558     #endif /* ifndef MEM_USERLAND */
559    
560    
561     /*
562     * Uncached access:
563 dpavlin 18 *
564     * 1) Translate the physical address to a host address.
565     *
566     * 2) Insert this virtual->physical->host translation into the
567     * fast translation arrays (using update_translation_table()).
568     *
569     * 3) If this was a Write, then invalidate any code translations
570     * in that page.
571 dpavlin 2 */
572     memblock = memory_paddr_to_hostaddr(mem, paddr, writeflag);
573     if (memblock == NULL) {
574     if (writeflag == MEM_READ)
575     memset(data, 0, len);
576     goto do_return_ok;
577     }
578    
579     offset = paddr & ((1 << BITS_PER_MEMBLOCK) - 1);
580    
581 dpavlin 22 if (cpu->update_translation_table != NULL && !dyntrans_device_danger
582 dpavlin 18 #ifndef MEM_MIPS
583 dpavlin 20 /* && !(misc_flags & MEMORY_USER_ACCESS) */
584 dpavlin 18 #ifndef MEM_USERLAND
585     && !(ok & MEMORY_NOT_FULL_PAGE)
586     #endif
587     #endif
588 dpavlin 16 && !no_exceptions)
589 dpavlin 12 cpu->update_translation_table(cpu, vaddr & ~offset_mask,
590     memblock + (offset & ~offset_mask),
591 dpavlin 20 (misc_flags & MEMORY_USER_ACCESS) |
592 dpavlin 18 #ifndef MEM_MIPS
593     (cache == CACHE_INSTRUCTION? TLB_CODE : 0) |
594     #endif
595 dpavlin 20 #if !defined(MEM_MIPS) && !defined(MEM_USERLAND)
596 dpavlin 18 (cache == CACHE_INSTRUCTION?
597 dpavlin 20 (writeflag == MEM_WRITE? 1 : 0) : ok - 1),
598 dpavlin 2 #else
599 dpavlin 18 (writeflag == MEM_WRITE? 1 : 0),
600 dpavlin 2 #endif
601 dpavlin 12 paddr & ~offset_mask);
602 dpavlin 2
603 dpavlin 18 /* Invalidate code translations for the page we are writing to. */
604 dpavlin 20 if (writeflag == MEM_WRITE && cpu->invalidate_code_translation != NULL)
605 dpavlin 14 cpu->invalidate_code_translation(cpu, paddr, INVALIDATE_PADDR);
606    
607 dpavlin 2 if (writeflag == MEM_WRITE) {
608 dpavlin 12 /* Ugly optimization, but it works: */
609     if (len == sizeof(uint32_t) && (offset & 3)==0
610     && ((size_t)data&3)==0)
611 dpavlin 2 *(uint32_t *)(memblock + offset) = *(uint32_t *)data;
612     else if (len == sizeof(uint8_t))
613     *(uint8_t *)(memblock + offset) = *(uint8_t *)data;
614     else
615     memcpy(memblock + offset, data, len);
616     } else {
617 dpavlin 12 /* Ugly optimization, but it works: */
618     if (len == sizeof(uint32_t) && (offset & 3)==0
619     && ((size_t)data&3)==0)
620 dpavlin 2 *(uint32_t *)data = *(uint32_t *)(memblock + offset);
621     else if (len == sizeof(uint8_t))
622     *(uint8_t *)data = *(uint8_t *)(memblock + offset);
623     else
624     memcpy(data, memblock + offset, len);
625    
626 dpavlin 6 #ifdef MEM_MIPS
627 dpavlin 2 if (cache == CACHE_INSTRUCTION) {
628     cpu->cd.mips.pc_last_host_4k_page = memblock
629 dpavlin 12 + (offset & ~offset_mask);
630 dpavlin 2 if (bintrans_cached) {
631     cpu->cd.mips.pc_bintrans_host_4kpage =
632     cpu->cd.mips.pc_last_host_4k_page;
633     }
634     }
635 dpavlin 6 #endif /* MIPS */
636 dpavlin 2 }
637    
638    
639     do_return_ok:
640     return MEMORY_ACCESS_OK;
641     }
642    

  ViewVC Help
Powered by ViewVC 1.1.26