/[gxemul]/trunk/src/memory_rw.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Annotation of /trunk/src/memory_rw.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 16 - (hide annotations)
Mon Oct 8 16:19:01 2007 UTC (16 years, 7 months ago) by dpavlin
File MIME type: text/plain
File size: 16741 byte(s)
++ trunk/HISTORY	(local)
$Id: HISTORY,v 1.988 2005/10/11 03:53:57 debug Exp $

==============  RELEASE 0.3.6  ==============

20051008	The bug was not because of faulty ARM documentation after all,
		but it was related to those parts of the code.
		Fixing the RTC (dev_mc146818) to work with CATS.
20051009	Rewriting the R() function; now there are 8192 automatically
		generated smaller functions doing the same thing, but hopefully
		faster. This also fixes some bugs which were triggered when
		trying to compile GXemul inside itself. :-)
		Adding a dummy dev_lpt.
20051010	Small hack to not update virtual translation tables if memory
		accesses are done with the NO_EXCEPTION flag; a time reduction
		of almost a factor 2 for a full NetBSD/cats install. :-)
20051011	Passing -A as the default boot arg for CATS (works fine with
		OpenBSD/cats).

==============  RELEASE 0.3.6.1  ==============


1 dpavlin 2 /*
2     * Copyright (C) 2003-2005 Anders Gavare. All rights reserved.
3     *
4     * Redistribution and use in source and binary forms, with or without
5     * modification, are permitted provided that the following conditions are met:
6     *
7     * 1. Redistributions of source code must retain the above copyright
8     * notice, this list of conditions and the following disclaimer.
9     * 2. Redistributions in binary form must reproduce the above copyright
10     * notice, this list of conditions and the following disclaimer in the
11     * documentation and/or other materials provided with the distribution.
12     * 3. The name of the author may not be used to endorse or promote products
13     * derived from this software without specific prior written permission.
14     *
15     * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16     * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17     * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18     * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19     * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20     * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21     * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22     * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23     * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24     * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25     * SUCH DAMAGE.
26     *
27     *
28 dpavlin 16 * $Id: memory_rw.c,v 1.65 2005/10/10 18:43:36 debug Exp $
29 dpavlin 2 *
30     * Generic memory_rw(), with special hacks for specific CPU families.
31     *
32     * Example for inclusion from memory_mips.c:
33     *
34     * MEMORY_RW should be mips_memory_rw
35     * MEM_MIPS should be defined
36     */
37    
38    
39     /*
40     * memory_rw():
41     *
42     * Read or write data from/to memory.
43     *
44     * cpu the cpu doing the read/write
45     * mem the memory object to use
46     * vaddr the virtual address
47     * data a pointer to the data to be written to memory, or
48     * a placeholder for data when reading from memory
49     * len the length of the 'data' buffer
50     * writeflag set to MEM_READ or MEM_WRITE
51     * cache_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags
52     *
53     * If the address indicates access to a memory mapped device, that device'
54     * read/write access function is called.
55     *
56     * If instruction latency/delay support is enabled, then
57     * cpu->instruction_delay is increased by the number of instruction to
58     * delay execution.
59     *
60     * This function should not be called with cpu == NULL.
61     *
62     * Returns one of the following:
63     * MEMORY_ACCESS_FAILED
64     * MEMORY_ACCESS_OK
65     *
66     * (MEMORY_ACCESS_FAILED is 0.)
67     */
68     int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr,
69     unsigned char *data, size_t len, int writeflag, int cache_flags)
70     {
71 dpavlin 12 #ifdef MEM_ALPHA
72     const int offset_mask = 0x1fff;
73     #else
74     const int offset_mask = 0xfff;
75     #endif
76    
77 dpavlin 2 #ifndef MEM_USERLAND
78     int ok = 1;
79     #endif
80     uint64_t paddr;
81     int cache, no_exceptions, offset;
82     unsigned char *memblock;
83 dpavlin 12 #ifdef MEM_MIPS
84 dpavlin 2 int bintrans_cached = cpu->machine->bintrans_enable;
85 dpavlin 12 #endif
86 dpavlin 4 int bintrans_device_danger = 0;
87 dpavlin 12
88 dpavlin 2 no_exceptions = cache_flags & NO_EXCEPTIONS;
89     cache = cache_flags & CACHE_FLAGS_MASK;
90    
91 dpavlin 4 #ifdef MEM_X86
92 dpavlin 6 /* Real-mode wrap-around: */
93     if (REAL_MODE && !(cache_flags & PHYSICAL)) {
94     if ((vaddr & 0xffff) + len > 0x10000) {
95     /* Do one byte at a time: */
96     int res = 0, i;
97     for (i=0; i<len; i++)
98     res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
99     writeflag, cache_flags);
100     return res;
101     }
102     }
103 dpavlin 4
104 dpavlin 6 /* Crossing a page boundary? Then do one byte at a time: */
105     if ((vaddr & 0xfff) + len > 0x1000 && !(cache_flags & PHYSICAL)
106     && cpu->cd.x86.cr[0] & X86_CR0_PG) {
107     /* For WRITES: Read ALL BYTES FIRST and write them back!!!
108     Then do a write of all the new bytes. This is to make sure
109     than both pages around the boundary are writable so we don't
110     do a partial write. */
111     int res = 0, i;
112     if (writeflag == MEM_WRITE) {
113     unsigned char tmp;
114     for (i=0; i<len; i++) {
115     res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
116     MEM_READ, cache_flags);
117     if (!res)
118 dpavlin 4 return 0;
119 dpavlin 6 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
120     MEM_WRITE, cache_flags);
121     if (!res)
122     return 0;
123     }
124     for (i=0; i<len; i++) {
125     res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
126     MEM_WRITE, cache_flags);
127     if (!res)
128     return 0;
129     }
130     } else {
131     for (i=0; i<len; i++) {
132     /* Do one byte at a time: */
133     res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
134     writeflag, cache_flags);
135     if (!res) {
136     if (cache == CACHE_INSTRUCTION) {
137     fatal("FAILED instruction "
138     "fetch across page boundar"
139     "y: todo. vaddr=0x%08x\n",
140     (int)vaddr);
141     cpu->running = 0;
142     }
143     return 0;
144 dpavlin 4 }
145     }
146     }
147 dpavlin 6 return res;
148 dpavlin 4 }
149 dpavlin 6 #endif /* X86 */
150 dpavlin 4
151 dpavlin 2 #ifdef MEM_MIPS
152     if (bintrans_cached) {
153     if (cache == CACHE_INSTRUCTION) {
154     cpu->cd.mips.pc_bintrans_host_4kpage = NULL;
155     cpu->cd.mips.pc_bintrans_paddr_valid = 0;
156     }
157     }
158     #endif /* MEM_MIPS */
159    
160     #ifdef MEM_USERLAND
161 dpavlin 12 #ifdef MEM_ALPHA
162     paddr = vaddr;
163     #else
164 dpavlin 2 paddr = vaddr & 0x7fffffff;
165 dpavlin 12 #endif
166 dpavlin 2 goto have_paddr;
167     #endif
168    
169     #ifndef MEM_USERLAND
170     #ifdef MEM_MIPS
171     /*
172     * For instruction fetch, are we on the same page as the last
173     * instruction we fetched?
174     *
175     * NOTE: There's no need to check this stuff here if this address
176     * is known to be in host ram, as it's done at instruction fetch
177     * time in cpu.c! Only check if _host_4k_page == NULL.
178     */
179     if (cache == CACHE_INSTRUCTION &&
180     cpu->cd.mips.pc_last_host_4k_page == NULL &&
181     (vaddr & ~0xfff) == cpu->cd.mips.pc_last_virtual_page) {
182     paddr = cpu->cd.mips.pc_last_physical_page | (vaddr & 0xfff);
183     goto have_paddr;
184     }
185     #endif /* MEM_MIPS */
186    
187     if (cache_flags & PHYSICAL || cpu->translate_address == NULL) {
188     paddr = vaddr;
189 dpavlin 12
190     #ifdef MEM_ALPHA
191 dpavlin 14 /* paddr &= 0x1fffffff; For testalpha */
192     paddr &= 0x000003ffffffffffULL;
193 dpavlin 12 #endif
194    
195 dpavlin 14 #ifdef MEM_IA64
196     /* For testia64 */
197     paddr &= 0x3fffffff;
198 dpavlin 12 #endif
199    
200 dpavlin 14 #ifdef MEM_PPC
201     if (cpu->cd.ppc.bits == 32)
202     paddr &= 0xffffffff;
203 dpavlin 12 #endif
204    
205 dpavlin 14 #ifdef MEM_SH
206 dpavlin 12 paddr &= 0xffffffff;
207     #endif
208 dpavlin 2 } else {
209     ok = cpu->translate_address(cpu, vaddr, &paddr,
210     (writeflag? FLAG_WRITEFLAG : 0) +
211     (no_exceptions? FLAG_NOEXCEPTIONS : 0)
212 dpavlin 6 #ifdef MEM_X86
213     + (cache_flags & NO_SEGMENTATION)
214     #endif
215 dpavlin 14 #ifdef MEM_ARM
216     + (cache_flags & MEMORY_USER_ACCESS)
217     #endif
218 dpavlin 2 + (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0));
219     /* If the translation caused an exception, or was invalid in
220     some way, we simply return without doing the memory
221     access: */
222     if (!ok)
223     return MEMORY_ACCESS_FAILED;
224     }
225    
226    
227 dpavlin 6 #ifdef MEM_X86
228     /* DOS debugging :-) */
229     if (!quiet_mode && !(cache_flags & PHYSICAL)) {
230     if (paddr >= 0x400 && paddr <= 0x4ff)
231     debug("{ PC BIOS DATA AREA: %s 0x%x }\n", writeflag ==
232     MEM_WRITE? "writing to" : "reading from",
233     (int)paddr);
234     #if 0
235     if (paddr >= 0xf0000 && paddr <= 0xfffff)
236     debug("{ BIOS ACCESS: %s 0x%x }\n",
237     writeflag == MEM_WRITE? "writing to" :
238     "reading from", (int)paddr);
239     #endif
240     }
241     #endif
242    
243 dpavlin 2 #ifdef MEM_MIPS
244     /*
245     * If correct cache emulation is enabled, and we need to simluate
246     * cache misses even from the instruction cache, we can't run directly
247     * from a host page. :-/
248     */
249     #if defined(ENABLE_CACHE_EMULATION) && defined(ENABLE_INSTRUCTION_DELAYS)
250     #else
251     if (cache == CACHE_INSTRUCTION) {
252     cpu->cd.mips.pc_last_virtual_page = vaddr & ~0xfff;
253     cpu->cd.mips.pc_last_physical_page = paddr & ~0xfff;
254     cpu->cd.mips.pc_last_host_4k_page = NULL;
255    
256     /* _last_host_4k_page will be set to 1 further down,
257     if the page is actually in host ram */
258     }
259     #endif
260     #endif /* MEM_MIPS */
261     #endif /* ifndef MEM_USERLAND */
262    
263    
264 dpavlin 4 #if defined(MEM_MIPS) || defined(MEM_USERLAND)
265 dpavlin 2 have_paddr:
266 dpavlin 4 #endif
267 dpavlin 2
268    
269     #ifdef MEM_MIPS
270     /* TODO: How about bintrans vs cache emulation? */
271     if (bintrans_cached) {
272     if (cache == CACHE_INSTRUCTION) {
273     cpu->cd.mips.pc_bintrans_paddr_valid = 1;
274     cpu->cd.mips.pc_bintrans_paddr = paddr;
275     }
276     }
277     #endif /* MEM_MIPS */
278    
279    
280    
281     #ifndef MEM_USERLAND
282     /*
283     * Memory mapped device?
284     *
285     * TODO: this is utterly slow.
286     * TODO2: if paddr<base, but len enough, then we should write
287     * to a device to
288     */
289     if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) {
290     uint64_t orig_paddr = paddr;
291     int i, start, res;
292 dpavlin 4
293     /*
294     * Really really slow, but unfortunately necessary. This is
295     * to avoid the folowing scenario:
296     *
297     * a) offsets 0x000..0x123 are normal memory
298     * b) offsets 0x124..0x777 are a device
299     *
300     * 1) a read is done from offset 0x100. the page is
301     * added to the bintrans system as a "RAM" page
302     * 2) a bintranslated read is done from offset 0x200,
303     * which should access the device, but since the
304     * entire page is added, it will access non-existant
305     * RAM instead, without warning.
306     *
307     * Setting bintrans_device_danger = 1 on accesses which are
308     * on _any_ offset on pages that are device mapped avoids
309     * this problem, but it is probably not very fast.
310     */
311 dpavlin 12 for (i=0; i<mem->n_mmapped_devices; i++)
312     if (paddr >= (mem->dev_baseaddr[i] & ~offset_mask) &&
313     paddr <= ((mem->dev_baseaddr[i] +
314     mem->dev_length[i] - 1) | offset_mask)) {
315     bintrans_device_danger = 1;
316     break;
317     }
318 dpavlin 4
319 dpavlin 2 i = start = mem->last_accessed_device;
320    
321     /* Scan through all devices: */
322     do {
323     if (paddr >= mem->dev_baseaddr[i] &&
324     paddr < mem->dev_baseaddr[i] + mem->dev_length[i]) {
325     /* Found a device, let's access it: */
326     mem->last_accessed_device = i;
327    
328     paddr -= mem->dev_baseaddr[i];
329     if (paddr + len > mem->dev_length[i])
330     len = mem->dev_length[i] - paddr;
331    
332 dpavlin 12 if (cpu->update_translation_table != NULL &&
333     mem->dev_flags[i] & MEM_DYNTRANS_OK) {
334 dpavlin 2 int wf = writeflag == MEM_WRITE? 1 : 0;
335    
336     if (writeflag) {
337     if (paddr < mem->
338 dpavlin 12 dev_dyntrans_write_low[i])
339 dpavlin 2 mem->
340 dpavlin 12 dev_dyntrans_write_low
341     [i] = paddr &
342     ~offset_mask;
343     if (paddr >= mem->
344     dev_dyntrans_write_high[i])
345 dpavlin 2 mem->
346 dpavlin 12 dev_dyntrans_write_high
347     [i] = paddr |
348     offset_mask;
349 dpavlin 2 }
350    
351     if (!(mem->dev_flags[i] &
352 dpavlin 12 MEM_DYNTRANS_WRITE_OK))
353 dpavlin 2 wf = 0;
354    
355 dpavlin 12 cpu->update_translation_table(cpu,
356     vaddr & ~offset_mask,
357     mem->dev_dyntrans_data[i] +
358     (paddr & ~offset_mask),
359     wf, orig_paddr & ~offset_mask);
360 dpavlin 2 }
361    
362 dpavlin 6 res = 0;
363     if (!no_exceptions || (mem->dev_flags[i] &
364     MEM_READING_HAS_NO_SIDE_EFFECTS))
365     res = mem->dev_f[i](cpu, mem, paddr,
366     data, len, writeflag,
367     mem->dev_extra[i]);
368 dpavlin 2
369     #ifdef ENABLE_INSTRUCTION_DELAYS
370     if (res == 0)
371     res = -1;
372    
373     cpu->cd.mips.instruction_delay +=
374     ( (abs(res) - 1) *
375     cpu->cd.mips.cpu_type.instrs_per_cycle );
376     #endif
377 dpavlin 6
378     #ifndef MEM_X86
379 dpavlin 2 /*
380     * If accessing the memory mapped device
381     * failed, then return with a DBE exception.
382     */
383 dpavlin 6 if (res <= 0 && !no_exceptions) {
384 dpavlin 2 debug("%s device '%s' addr %08lx "
385     "failed\n", writeflag?
386     "writing to" : "reading from",
387     mem->dev_name[i], (long)paddr);
388     #ifdef MEM_MIPS
389     mips_cpu_exception(cpu, EXCEPTION_DBE,
390     0, vaddr, 0, 0, 0, 0);
391     #endif
392     return MEMORY_ACCESS_FAILED;
393     }
394 dpavlin 6 #endif
395 dpavlin 2 goto do_return_ok;
396     }
397    
398     i ++;
399     if (i == mem->n_mmapped_devices)
400     i = 0;
401     } while (i != start);
402     }
403    
404    
405     #ifdef MEM_MIPS
406     /*
407     * Data and instruction cache emulation:
408     */
409    
410     switch (cpu->cd.mips.cpu_type.mmu_model) {
411     case MMU3K:
412     /* if not uncached addess (TODO: generalize this) */
413     if (!(cache_flags & PHYSICAL) && cache != CACHE_NONE &&
414     !((vaddr & 0xffffffffULL) >= 0xa0000000ULL &&
415     (vaddr & 0xffffffffULL) <= 0xbfffffffULL)) {
416     if (memory_cache_R3000(cpu, cache, paddr,
417     writeflag, len, data))
418     goto do_return_ok;
419     }
420     break;
421     default:
422     /* R4000 etc */
423     /* TODO */
424     ;
425     }
426     #endif /* MEM_MIPS */
427    
428    
429     /* Outside of physical RAM? */
430     if (paddr >= mem->physical_max) {
431 dpavlin 6 #ifdef MEM_MIPS
432     if ((paddr & 0xffffc00000ULL) == 0x1fc00000) {
433 dpavlin 2 /* Ok, this is PROM stuff */
434     } else if ((paddr & 0xfffff00000ULL) == 0x1ff00000) {
435     /* Sprite reads from this area of memory... */
436     /* TODO: is this still correct? */
437     if (writeflag == MEM_READ)
438     memset(data, 0, len);
439     goto do_return_ok;
440 dpavlin 6 } else
441     #endif /* MIPS */
442     {
443     if (paddr >= mem->physical_max) {
444 dpavlin 2 char *symbol;
445 dpavlin 12 uint64_t old_pc;
446     uint64_t offset;
447    
448 dpavlin 2 #ifdef MEM_MIPS
449 dpavlin 12 old_pc = cpu->cd.mips.pc_last;
450     #else
451     /* Default instruction size on most
452     RISC archs is 32 bits: */
453     old_pc = cpu->pc - sizeof(uint32_t);
454 dpavlin 2 #endif
455 dpavlin 12
456 dpavlin 6 /* This allows for example OS kernels to probe
457     memory a few KBs past the end of memory,
458     without giving too many warnings. */
459 dpavlin 12 if (!quiet_mode && !no_exceptions && paddr >=
460 dpavlin 6 mem->physical_max + 0x40000) {
461 dpavlin 2 fatal("[ memory_rw(): writeflag=%i ",
462     writeflag);
463     if (writeflag) {
464     unsigned int i;
465     debug("data={", writeflag);
466     if (len > 16) {
467     int start2 = len-16;
468     for (i=0; i<16; i++)
469     debug("%s%02x",
470     i?",":"",
471     data[i]);
472     debug(" .. ");
473     if (start2 < 16)
474     start2 = 16;
475     for (i=start2; i<len;
476     i++)
477     debug("%s%02x",
478     i?",":"",
479     data[i]);
480     } else
481     for (i=0; i<len; i++)
482     debug("%s%02x",
483     i?",":"",
484     data[i]);
485     debug("}");
486     }
487 dpavlin 12
488     fatal(" paddr=0x%llx >= physical_max"
489     "; pc=", (long long)paddr);
490     if (cpu->is_32bit)
491     fatal("0x%08x",(int)old_pc);
492     else
493     fatal("0x%016llx",
494     (long long)old_pc);
495 dpavlin 2 symbol = get_symbol_name(
496     &cpu->machine->symbol_context,
497 dpavlin 12 old_pc, &offset);
498     fatal(" <%s> ]\n",
499     symbol? symbol : " no symbol ");
500 dpavlin 2 }
501    
502     if (cpu->machine->single_step_on_bad_addr) {
503     fatal("[ unimplemented access to "
504 dpavlin 12 "0x%llx, pc=0x",(long long)paddr);
505     if (cpu->is_32bit)
506     fatal("%08x ]\n",
507     (int)old_pc);
508     else
509     fatal("%016llx ]\n",
510     (long long)old_pc);
511 dpavlin 2 single_step = 1;
512     }
513     }
514    
515     if (writeflag == MEM_READ) {
516 dpavlin 6 #ifdef MEM_X86
517     /* Reading non-existant memory on x86: */
518     memset(data, 0xff, len);
519     #else
520 dpavlin 2 /* Return all zeroes? (Or 0xff? TODO) */
521     memset(data, 0, len);
522 dpavlin 6 #endif
523 dpavlin 2
524     #ifdef MEM_MIPS
525     /*
526     * For real data/instruction accesses, cause
527     * an exceptions on an illegal read:
528     */
529     if (cache != CACHE_NONE && cpu->machine->
530 dpavlin 6 dbe_on_nonexistant_memaccess &&
531     !no_exceptions) {
532 dpavlin 2 if (paddr >= mem->physical_max &&
533     paddr < mem->physical_max+1048576)
534     mips_cpu_exception(cpu,
535     EXCEPTION_DBE, 0, vaddr, 0,
536     0, 0, 0);
537     }
538     #endif /* MEM_MIPS */
539     }
540    
541     /* Hm? Shouldn't there be a DBE exception for
542     invalid writes as well? TODO */
543    
544     goto do_return_ok;
545     }
546     }
547    
548     #endif /* ifndef MEM_USERLAND */
549    
550    
551     /*
552     * Uncached access:
553     */
554     memblock = memory_paddr_to_hostaddr(mem, paddr, writeflag);
555     if (memblock == NULL) {
556     if (writeflag == MEM_READ)
557     memset(data, 0, len);
558     goto do_return_ok;
559     }
560    
561     offset = paddr & ((1 << BITS_PER_MEMBLOCK) - 1);
562    
563 dpavlin 16 if (cpu->update_translation_table != NULL && !bintrans_device_danger
564     && !no_exceptions)
565 dpavlin 12 cpu->update_translation_table(cpu, vaddr & ~offset_mask,
566     memblock + (offset & ~offset_mask),
567 dpavlin 2 #if 0
568     cache == CACHE_INSTRUCTION?
569     (writeflag == MEM_WRITE? 1 : 0)
570     : ok - 1,
571     #else
572     writeflag == MEM_WRITE? 1 : 0,
573     #endif
574 dpavlin 12 paddr & ~offset_mask);
575 dpavlin 2
576 dpavlin 14 if (writeflag == MEM_WRITE &&
577     cpu->invalidate_code_translation != NULL)
578     cpu->invalidate_code_translation(cpu, paddr, INVALIDATE_PADDR);
579    
580 dpavlin 2 if (writeflag == MEM_WRITE) {
581 dpavlin 12 /* Ugly optimization, but it works: */
582     if (len == sizeof(uint32_t) && (offset & 3)==0
583     && ((size_t)data&3)==0)
584 dpavlin 2 *(uint32_t *)(memblock + offset) = *(uint32_t *)data;
585     else if (len == sizeof(uint8_t))
586     *(uint8_t *)(memblock + offset) = *(uint8_t *)data;
587     else
588     memcpy(memblock + offset, data, len);
589     } else {
590 dpavlin 12 /* Ugly optimization, but it works: */
591     if (len == sizeof(uint32_t) && (offset & 3)==0
592     && ((size_t)data&3)==0)
593 dpavlin 2 *(uint32_t *)data = *(uint32_t *)(memblock + offset);
594     else if (len == sizeof(uint8_t))
595     *(uint8_t *)data = *(uint8_t *)(memblock + offset);
596     else
597     memcpy(data, memblock + offset, len);
598    
599 dpavlin 6 #ifdef MEM_MIPS
600 dpavlin 2 if (cache == CACHE_INSTRUCTION) {
601     cpu->cd.mips.pc_last_host_4k_page = memblock
602 dpavlin 12 + (offset & ~offset_mask);
603 dpavlin 2 if (bintrans_cached) {
604     cpu->cd.mips.pc_bintrans_host_4kpage =
605     cpu->cd.mips.pc_last_host_4k_page;
606     }
607     }
608 dpavlin 6 #endif /* MIPS */
609 dpavlin 2 }
610    
611    
612     do_return_ok:
613     return MEMORY_ACCESS_OK;
614     }
615    

  ViewVC Help
Powered by ViewVC 1.1.26