/[gxemul]/upstream/0.4.1/src/memory_rw.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/0.4.1/src/memory_rw.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 29 - (show annotations)
Mon Oct 8 16:20:32 2007 UTC (16 years, 6 months ago) by dpavlin
File MIME type: text/plain
File size: 15142 byte(s)
0.4.1
1 /*
2 * Copyright (C) 2003-2006 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory_rw.c,v 1.93 2006/07/14 16:33:27 debug Exp $
29 *
30 * Generic memory_rw(), with special hacks for specific CPU families.
31 *
32 * Example for inclusion from memory_mips.c:
33 *
34 * MEMORY_RW should be mips_memory_rw
35 * MEM_MIPS should be defined
36 */
37
38
39 /*
40 * memory_rw():
41 *
42 * Read or write data from/to memory.
43 *
44 * cpu the cpu doing the read/write
45 * mem the memory object to use
46 * vaddr the virtual address
47 * data a pointer to the data to be written to memory, or
48 * a placeholder for data when reading from memory
49 * len the length of the 'data' buffer
50 * writeflag set to MEM_READ or MEM_WRITE
51 * misc_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags
52 *
53 * If the address indicates access to a memory mapped device, that device'
54 * read/write access function is called.
55 *
56 * This function should not be called with cpu == NULL.
57 *
58 * Returns one of the following:
59 * MEMORY_ACCESS_FAILED
60 * MEMORY_ACCESS_OK
61 *
62 * (MEMORY_ACCESS_FAILED is 0.)
63 */
64 int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr,
65 unsigned char *data, size_t len, int writeflag, int misc_flags)
66 {
67 #ifdef MEM_ALPHA
68 const int offset_mask = 0x1fff;
69 #else
70 const int offset_mask = 0xfff;
71 #endif
72
73 #ifndef MEM_USERLAND
74 int ok = 1;
75 #endif
76 uint64_t paddr;
77 int cache, no_exceptions, offset;
78 unsigned char *memblock;
79 int dyntrans_device_danger = 0;
80
81 no_exceptions = misc_flags & NO_EXCEPTIONS;
82 cache = misc_flags & CACHE_FLAGS_MASK;
83
84 #ifdef MEM_X86
85 /* Real-mode wrap-around: */
86 if (REAL_MODE && !(misc_flags & PHYSICAL)) {
87 if ((vaddr & 0xffff) + len > 0x10000) {
88 /* Do one byte at a time: */
89 int res = 0;
90 size_t i;
91 for (i=0; i<len; i++)
92 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
93 writeflag, misc_flags);
94 return res;
95 }
96 }
97
98 /* Crossing a page boundary? Then do one byte at a time: */
99 if ((vaddr & 0xfff) + len > 0x1000 && !(misc_flags & PHYSICAL)
100 && cpu->cd.x86.cr[0] & X86_CR0_PG) {
101 /* For WRITES: Read ALL BYTES FIRST and write them back!!!
102 Then do a write of all the new bytes. This is to make sure
103 than both pages around the boundary are writable so we don't
104 do a partial write. */
105 int res = 0;
106 size_t i;
107 if (writeflag == MEM_WRITE) {
108 unsigned char tmp;
109 for (i=0; i<len; i++) {
110 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
111 MEM_READ, misc_flags);
112 if (!res)
113 return 0;
114 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
115 MEM_WRITE, misc_flags);
116 if (!res)
117 return 0;
118 }
119 for (i=0; i<len; i++) {
120 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
121 MEM_WRITE, misc_flags);
122 if (!res)
123 return 0;
124 }
125 } else {
126 for (i=0; i<len; i++) {
127 /* Do one byte at a time: */
128 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
129 writeflag, misc_flags);
130 if (!res) {
131 if (cache == CACHE_INSTRUCTION) {
132 fatal("FAILED instruction "
133 "fetch across page boundar"
134 "y: todo. vaddr=0x%08x\n",
135 (int)vaddr);
136 cpu->running = 0;
137 }
138 return 0;
139 }
140 }
141 }
142 return res;
143 }
144 #endif /* X86 */
145
146
147 #ifdef MEM_USERLAND
148 #ifdef MEM_ALPHA
149 paddr = vaddr;
150 #else
151 paddr = vaddr & 0x7fffffff;
152 #endif
153 #else /* !MEM_USERLAND */
154 if (misc_flags & PHYSICAL || cpu->translate_v2p == NULL) {
155 paddr = vaddr;
156 } else {
157 ok = cpu->translate_v2p(cpu, vaddr, &paddr,
158 (writeflag? FLAG_WRITEFLAG : 0) +
159 (no_exceptions? FLAG_NOEXCEPTIONS : 0)
160 #ifdef MEM_X86
161 + (misc_flags & NO_SEGMENTATION)
162 #endif
163 #ifdef MEM_ARM
164 + (misc_flags & MEMORY_USER_ACCESS)
165 #endif
166 + (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0));
167 /* If the translation caused an exception, or was invalid in
168 some way, we simply return without doing the memory
169 access: */
170 if (!ok)
171 return MEMORY_ACCESS_FAILED;
172 }
173
174
175 #ifdef MEM_X86
176 /* DOS debugging :-) */
177 if (!quiet_mode && !(misc_flags & PHYSICAL)) {
178 if (paddr >= 0x400 && paddr <= 0x4ff)
179 debug("{ PC BIOS DATA AREA: %s 0x%x }\n", writeflag ==
180 MEM_WRITE? "writing to" : "reading from",
181 (int)paddr);
182 #if 0
183 if (paddr >= 0xf0000 && paddr <= 0xfffff)
184 debug("{ BIOS ACCESS: %s 0x%x }\n",
185 writeflag == MEM_WRITE? "writing to" :
186 "reading from", (int)paddr);
187 #endif
188 }
189 #endif
190 #endif /* !MEM_USERLAND */
191
192
193 #ifndef MEM_USERLAND
194 /*
195 * Memory mapped device?
196 *
197 * TODO: if paddr < base, but len enough, then the device should
198 * still be written to!
199 */
200 if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) {
201 uint64_t orig_paddr = paddr;
202 int i, start, end, res;
203
204 /*
205 * Really really slow, but unfortunately necessary. This is
206 * to avoid the folowing scenario:
207 *
208 * a) offsets 0x000..0x123 are normal memory
209 * b) offsets 0x124..0x777 are a device
210 *
211 * 1) a read is done from offset 0x100. the page is
212 * added to the dyntrans system as a "RAM" page
213 * 2) a dyntranslated read is done from offset 0x200,
214 * which should access the device, but since the
215 * entire page is added, it will access non-existant
216 * RAM instead, without warning.
217 *
218 * Setting dyntrans_device_danger = 1 on accesses which are
219 * on _any_ offset on pages that are device mapped avoids
220 * this problem, but it is probably not very fast.
221 *
222 * TODO: Convert this into a quick (multi-level, 64-bit)
223 * address space lookup, to find dangerous pages.
224 */
225 #if 1
226 for (i=0; i<mem->n_mmapped_devices; i++)
227 if (paddr >= (mem->dev_baseaddr[i] & ~offset_mask) &&
228 paddr <= ((mem->dev_endaddr[i]-1) | offset_mask)) {
229 dyntrans_device_danger = 1;
230 break;
231 }
232 #endif
233
234 start = 0; end = mem->n_mmapped_devices - 1;
235 i = mem->last_accessed_device;
236
237 /* Scan through all devices: */
238 do {
239 if (paddr >= mem->dev_baseaddr[i] &&
240 paddr < mem->dev_endaddr[i]) {
241 /* Found a device, let's access it: */
242 mem->last_accessed_device = i;
243
244 paddr -= mem->dev_baseaddr[i];
245 if (paddr + len > mem->dev_length[i])
246 len = mem->dev_length[i] - paddr;
247
248 if (cpu->update_translation_table != NULL &&
249 !(ok & MEMORY_NOT_FULL_PAGE) &&
250 mem->dev_flags[i] & DM_DYNTRANS_OK) {
251 int wf = writeflag == MEM_WRITE? 1 : 0;
252 unsigned char *host_addr;
253
254 if (!(mem->dev_flags[i] &
255 DM_DYNTRANS_WRITE_OK))
256 wf = 0;
257
258 if (writeflag && wf) {
259 if (paddr < mem->
260 dev_dyntrans_write_low[i])
261 mem->
262 dev_dyntrans_write_low
263 [i] = paddr &
264 ~offset_mask;
265 if (paddr >= mem->
266 dev_dyntrans_write_high[i])
267 mem->
268 dev_dyntrans_write_high
269 [i] = paddr |
270 offset_mask;
271 }
272
273 if (mem->dev_flags[i] &
274 DM_EMULATED_RAM) {
275 /* MEM_WRITE to force the page
276 to be allocated, if it
277 wasn't already */
278 uint64_t *pp = (uint64_t *)
279 mem->dev_dyntrans_data[i];
280 uint64_t p = orig_paddr - *pp;
281 host_addr =
282 memory_paddr_to_hostaddr(
283 mem, p & ~offset_mask,
284 MEM_WRITE);
285 } else {
286 host_addr =
287 mem->dev_dyntrans_data[i] +
288 (paddr & ~offset_mask);
289 }
290
291 cpu->update_translation_table(cpu,
292 vaddr & ~offset_mask, host_addr,
293 wf, orig_paddr & ~offset_mask);
294 }
295
296 res = 0;
297 if (!no_exceptions || (mem->dev_flags[i] &
298 DM_READS_HAVE_NO_SIDE_EFFECTS))
299 res = mem->dev_f[i](cpu, mem, paddr,
300 data, len, writeflag,
301 mem->dev_extra[i]);
302
303 if (res == 0)
304 res = -1;
305
306 #ifndef MEM_X86
307 /*
308 * If accessing the memory mapped device
309 * failed, then return with a DBE exception.
310 */
311 if (res <= 0 && !no_exceptions) {
312 debug("%s device '%s' addr %08lx "
313 "failed\n", writeflag?
314 "writing to" : "reading from",
315 mem->dev_name[i], (long)paddr);
316 #ifdef MEM_MIPS
317 mips_cpu_exception(cpu, EXCEPTION_DBE,
318 0, vaddr, 0, 0, 0, 0);
319 #endif
320 return MEMORY_ACCESS_FAILED;
321 }
322 #endif
323 goto do_return_ok;
324 }
325
326 if (paddr < mem->dev_baseaddr[i])
327 end = i - 1;
328 if (paddr >= mem->dev_endaddr[i])
329 start = i + 1;
330 i = (start + end) >> 1;
331 } while (start <= end);
332 }
333
334
335 #ifdef MEM_MIPS
336 /*
337 * Data and instruction cache emulation:
338 */
339
340 switch (cpu->cd.mips.cpu_type.mmu_model) {
341 case MMU3K:
342 /* if not uncached addess (TODO: generalize this) */
343 if (!(misc_flags & PHYSICAL) && cache != CACHE_NONE &&
344 !((vaddr & 0xffffffffULL) >= 0xa0000000ULL &&
345 (vaddr & 0xffffffffULL) <= 0xbfffffffULL)) {
346 if (memory_cache_R3000(cpu, cache, paddr,
347 writeflag, len, data))
348 goto do_return_ok;
349 }
350 break;
351 default:
352 /* R4000 etc */
353 /* TODO */
354 ;
355 }
356 #endif /* MEM_MIPS */
357
358
359 /* Outside of physical RAM? */
360 if (paddr >= mem->physical_max) {
361 #ifdef MEM_MIPS
362 if ((paddr & 0xffffc00000ULL) == 0x1fc00000) {
363 /* Ok, this is PROM stuff */
364 } else if ((paddr & 0xfffff00000ULL) == 0x1ff00000) {
365 /* Sprite reads from this area of memory... */
366 /* TODO: is this still correct? */
367 if (writeflag == MEM_READ)
368 memset(data, 0, len);
369 goto do_return_ok;
370 } else
371 #endif /* MIPS */
372 {
373 if (paddr >= mem->physical_max) {
374 uint64_t offset, old_pc = cpu->pc;
375 char *symbol;
376
377 /* This allows for example OS kernels to probe
378 memory a few KBs past the end of memory,
379 without giving too many warnings. */
380 if (!quiet_mode && !no_exceptions && paddr >=
381 mem->physical_max + 0x40000) {
382 fatal("[ memory_rw(): writeflag=%i ",
383 writeflag);
384 if (writeflag) {
385 unsigned int i;
386 debug("data={", writeflag);
387 if (len > 16) {
388 int start2 = len-16;
389 for (i=0; i<16; i++)
390 debug("%s%02x",
391 i?",":"",
392 data[i]);
393 debug(" .. ");
394 if (start2 < 16)
395 start2 = 16;
396 for (i=start2; i<len;
397 i++)
398 debug("%s%02x",
399 i?",":"",
400 data[i]);
401 } else
402 for (i=0; i<len; i++)
403 debug("%s%02x",
404 i?",":"",
405 data[i]);
406 debug("}");
407 }
408
409 fatal(" paddr=0x%llx >= physical_max"
410 "; pc=", (long long)paddr);
411 if (cpu->is_32bit)
412 fatal("0x%08x",(int)old_pc);
413 else
414 fatal("0x%016llx",
415 (long long)old_pc);
416 symbol = get_symbol_name(
417 &cpu->machine->symbol_context,
418 old_pc, &offset);
419 fatal(" <%s> ]\n",
420 symbol? symbol : " no symbol ");
421 }
422 }
423
424 if (writeflag == MEM_READ) {
425 #ifdef MEM_X86
426 /* Reading non-existant memory on x86: */
427 memset(data, 0xff, len);
428 #else
429 /* Return all zeroes? (Or 0xff? TODO) */
430 memset(data, 0, len);
431 #endif
432
433 #ifdef MEM_MIPS
434 /*
435 * For real data/instruction accesses, cause
436 * an exceptions on an illegal read:
437 */
438 if (cache != CACHE_NONE && cpu->machine->
439 dbe_on_nonexistant_memaccess &&
440 !no_exceptions) {
441 if (paddr >= mem->physical_max &&
442 paddr < mem->physical_max+1048576)
443 mips_cpu_exception(cpu,
444 EXCEPTION_DBE, 0, vaddr, 0,
445 0, 0, 0);
446 }
447 #endif /* MEM_MIPS */
448 }
449
450 /* Hm? Shouldn't there be a DBE exception for
451 invalid writes as well? TODO */
452
453 goto do_return_ok;
454 }
455 }
456
457 #endif /* ifndef MEM_USERLAND */
458
459
460 /*
461 * Uncached access:
462 *
463 * 1) Translate the physical address to a host address.
464 *
465 * 2) Insert this virtual->physical->host translation into the
466 * fast translation arrays (using update_translation_table()).
467 *
468 * 3) If this was a Write, then invalidate any code translations
469 * in that page.
470 */
471 memblock = memory_paddr_to_hostaddr(mem, paddr & ~offset_mask,
472 writeflag);
473 if (memblock == NULL) {
474 if (writeflag == MEM_READ)
475 memset(data, 0, len);
476 goto do_return_ok;
477 }
478
479 offset = paddr & offset_mask;
480
481 if (cpu->update_translation_table != NULL && !dyntrans_device_danger
482 #ifdef MEM_MIPS
483 /* Ugly hack for R2000/R3000 caches: */
484 && (cpu->cd.mips.cpu_type.mmu_model != MMU3K ||
485 !(cpu->cd.mips.coproc[0]->reg[COP0_STATUS] & MIPS1_ISOL_CACHES))
486 #endif
487 #ifndef MEM_MIPS
488 /* && !(misc_flags & MEMORY_USER_ACCESS) */
489 #ifndef MEM_USERLAND
490 && !(ok & MEMORY_NOT_FULL_PAGE)
491 #endif
492 #endif
493 && !no_exceptions)
494 cpu->update_translation_table(cpu, vaddr & ~offset_mask,
495 memblock, (misc_flags & MEMORY_USER_ACCESS) |
496 #if !defined(MEM_MIPS) && !defined(MEM_USERLAND)
497 (cache == CACHE_INSTRUCTION?
498 (writeflag == MEM_WRITE? 1 : 0) : ok - 1),
499 #else
500 (writeflag == MEM_WRITE? 1 : 0),
501 #endif
502 paddr & ~offset_mask);
503
504 /* Invalidate code translations for the page we are writing to. */
505 if (writeflag == MEM_WRITE && cpu->invalidate_code_translation != NULL)
506 cpu->invalidate_code_translation(cpu, paddr, INVALIDATE_PADDR);
507
508 if ((paddr&((1<<BITS_PER_MEMBLOCK)-1)) + len > (1<<BITS_PER_MEMBLOCK)) {
509 printf("Write over memblock boundary?\n");
510 exit(1);
511 }
512
513 if (writeflag == MEM_WRITE) {
514 /* Ugly optimization, but it works: */
515 if (len == sizeof(uint32_t) && (offset & 3)==0
516 && ((size_t)data&3)==0)
517 *(uint32_t *)(memblock + offset) = *(uint32_t *)data;
518 else if (len == sizeof(uint8_t))
519 *(uint8_t *)(memblock + offset) = *(uint8_t *)data;
520 else
521 memcpy(memblock + offset, data, len);
522 } else {
523 /* Ugly optimization, but it works: */
524 if (len == sizeof(uint32_t) && (offset & 3)==0
525 && ((size_t)data&3)==0)
526 *(uint32_t *)data = *(uint32_t *)(memblock + offset);
527 else if (len == sizeof(uint8_t))
528 *(uint8_t *)data = *(uint8_t *)(memblock + offset);
529 else
530 memcpy(data, memblock + offset, len);
531 }
532
533
534 do_return_ok:
535 return MEMORY_ACCESS_OK;
536 }
537

  ViewVC Help
Powered by ViewVC 1.1.26