/[gxemul]/trunk/src/memory_rw.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/src/memory_rw.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 26 - (show annotations)
Mon Oct 8 16:20:10 2007 UTC (16 years, 6 months ago) by dpavlin
File MIME type: text/plain
File size: 15097 byte(s)
++ trunk/HISTORY	(local)
$Id: HISTORY,v 1.1264 2006/06/25 11:08:04 debug Exp $
20060624	Replacing the error-prone machine type initialization stuff
		with something more reasonable.
		Finally removing the old "cpu_run" kludge; moving around stuff
		in machine.c and emul.c to better suit the dyntrans system.
		Various minor dyntrans cleanups (renaming translate_address to
		translate_v2p, and experimenting with template physpages).
20060625	Removing the speed hack which separated the vph entries into
		two halves (code vs data); things seem a lot more stable now.
		Minor performance hack: R2000/R3000 cache isolation now only
		clears address translations when going into isolation, not
		when going out of it.
		Fixing the MIPS interrupt problems by letting mtc0 immediately
		cause interrupts.

==============  RELEASE 0.4.0.1  ==============


1 /*
2 * Copyright (C) 2003-2006 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory_rw.c,v 1.90 2006/06/25 00:15:44 debug Exp $
29 *
30 * Generic memory_rw(), with special hacks for specific CPU families.
31 *
32 * Example for inclusion from memory_mips.c:
33 *
34 * MEMORY_RW should be mips_memory_rw
35 * MEM_MIPS should be defined
36 */
37
38
39 /*
40 * memory_rw():
41 *
42 * Read or write data from/to memory.
43 *
44 * cpu the cpu doing the read/write
45 * mem the memory object to use
46 * vaddr the virtual address
47 * data a pointer to the data to be written to memory, or
48 * a placeholder for data when reading from memory
49 * len the length of the 'data' buffer
50 * writeflag set to MEM_READ or MEM_WRITE
51 * misc_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags
52 *
53 * If the address indicates access to a memory mapped device, that device'
54 * read/write access function is called.
55 *
56 * This function should not be called with cpu == NULL.
57 *
58 * Returns one of the following:
59 * MEMORY_ACCESS_FAILED
60 * MEMORY_ACCESS_OK
61 *
62 * (MEMORY_ACCESS_FAILED is 0.)
63 */
64 int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr,
65 unsigned char *data, size_t len, int writeflag, int misc_flags)
66 {
67 #ifdef MEM_ALPHA
68 const int offset_mask = 0x1fff;
69 #else
70 const int offset_mask = 0xfff;
71 #endif
72
73 #ifndef MEM_USERLAND
74 int ok = 1;
75 #endif
76 uint64_t paddr;
77 int cache, no_exceptions, offset;
78 unsigned char *memblock;
79 int dyntrans_device_danger = 0;
80
81 no_exceptions = misc_flags & NO_EXCEPTIONS;
82 cache = misc_flags & CACHE_FLAGS_MASK;
83
84 #ifdef MEM_X86
85 /* Real-mode wrap-around: */
86 if (REAL_MODE && !(misc_flags & PHYSICAL)) {
87 if ((vaddr & 0xffff) + len > 0x10000) {
88 /* Do one byte at a time: */
89 int res = 0;
90 size_t i;
91 for (i=0; i<len; i++)
92 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
93 writeflag, misc_flags);
94 return res;
95 }
96 }
97
98 /* Crossing a page boundary? Then do one byte at a time: */
99 if ((vaddr & 0xfff) + len > 0x1000 && !(misc_flags & PHYSICAL)
100 && cpu->cd.x86.cr[0] & X86_CR0_PG) {
101 /* For WRITES: Read ALL BYTES FIRST and write them back!!!
102 Then do a write of all the new bytes. This is to make sure
103 than both pages around the boundary are writable so we don't
104 do a partial write. */
105 int res = 0;
106 size_t i;
107 if (writeflag == MEM_WRITE) {
108 unsigned char tmp;
109 for (i=0; i<len; i++) {
110 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
111 MEM_READ, misc_flags);
112 if (!res)
113 return 0;
114 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
115 MEM_WRITE, misc_flags);
116 if (!res)
117 return 0;
118 }
119 for (i=0; i<len; i++) {
120 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
121 MEM_WRITE, misc_flags);
122 if (!res)
123 return 0;
124 }
125 } else {
126 for (i=0; i<len; i++) {
127 /* Do one byte at a time: */
128 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
129 writeflag, misc_flags);
130 if (!res) {
131 if (cache == CACHE_INSTRUCTION) {
132 fatal("FAILED instruction "
133 "fetch across page boundar"
134 "y: todo. vaddr=0x%08x\n",
135 (int)vaddr);
136 cpu->running = 0;
137 }
138 return 0;
139 }
140 }
141 }
142 return res;
143 }
144 #endif /* X86 */
145
146
147 #ifdef MEM_USERLAND
148 #ifdef MEM_ALPHA
149 paddr = vaddr;
150 #else
151 paddr = vaddr & 0x7fffffff;
152 #endif
153 #else /* !MEM_USERLAND */
154 if (misc_flags & PHYSICAL || cpu->translate_v2p == NULL) {
155 paddr = vaddr;
156 } else {
157 ok = cpu->translate_v2p(cpu, vaddr, &paddr,
158 (writeflag? FLAG_WRITEFLAG : 0) +
159 (no_exceptions? FLAG_NOEXCEPTIONS : 0)
160 #ifdef MEM_X86
161 + (misc_flags & NO_SEGMENTATION)
162 #endif
163 #ifdef MEM_ARM
164 + (misc_flags & MEMORY_USER_ACCESS)
165 #endif
166 + (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0));
167 /* If the translation caused an exception, or was invalid in
168 some way, we simply return without doing the memory
169 access: */
170 if (!ok)
171 return MEMORY_ACCESS_FAILED;
172 }
173
174
175 #ifdef MEM_X86
176 /* DOS debugging :-) */
177 if (!quiet_mode && !(misc_flags & PHYSICAL)) {
178 if (paddr >= 0x400 && paddr <= 0x4ff)
179 debug("{ PC BIOS DATA AREA: %s 0x%x }\n", writeflag ==
180 MEM_WRITE? "writing to" : "reading from",
181 (int)paddr);
182 #if 0
183 if (paddr >= 0xf0000 && paddr <= 0xfffff)
184 debug("{ BIOS ACCESS: %s 0x%x }\n",
185 writeflag == MEM_WRITE? "writing to" :
186 "reading from", (int)paddr);
187 #endif
188 }
189 #endif
190 #endif /* !MEM_USERLAND */
191
192
193 #ifndef MEM_USERLAND
194 /*
195 * Memory mapped device?
196 *
197 * TODO: if paddr < base, but len enough, then the device should
198 * still be written to!
199 */
200 if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) {
201 uint64_t orig_paddr = paddr;
202 int i, start, end, res;
203
204 /*
205 * Really really slow, but unfortunately necessary. This is
206 * to avoid the folowing scenario:
207 *
208 * a) offsets 0x000..0x123 are normal memory
209 * b) offsets 0x124..0x777 are a device
210 *
211 * 1) a read is done from offset 0x100. the page is
212 * added to the dyntrans system as a "RAM" page
213 * 2) a dyntranslated read is done from offset 0x200,
214 * which should access the device, but since the
215 * entire page is added, it will access non-existant
216 * RAM instead, without warning.
217 *
218 * Setting dyntrans_device_danger = 1 on accesses which are
219 * on _any_ offset on pages that are device mapped avoids
220 * this problem, but it is probably not very fast.
221 *
222 * TODO: Convert this into a quick (multi-level, 64-bit)
223 * address space lookup, to find dangerous pages.
224 */
225 #if 1
226 for (i=0; i<mem->n_mmapped_devices; i++)
227 if (paddr >= (mem->dev_baseaddr[i] & ~offset_mask) &&
228 paddr <= ((mem->dev_endaddr[i]-1) | offset_mask)) {
229 dyntrans_device_danger = 1;
230 break;
231 }
232 #endif
233
234 start = 0; end = mem->n_mmapped_devices - 1;
235 i = mem->last_accessed_device;
236
237 /* Scan through all devices: */
238 do {
239 if (paddr >= mem->dev_baseaddr[i] &&
240 paddr < mem->dev_endaddr[i]) {
241 /* Found a device, let's access it: */
242 mem->last_accessed_device = i;
243
244 paddr -= mem->dev_baseaddr[i];
245 if (paddr + len > mem->dev_length[i])
246 len = mem->dev_length[i] - paddr;
247
248 if (cpu->update_translation_table != NULL &&
249 !(ok & MEMORY_NOT_FULL_PAGE) &&
250 mem->dev_flags[i] & DM_DYNTRANS_OK) {
251 int wf = writeflag == MEM_WRITE? 1 : 0;
252 unsigned char *host_addr;
253
254 if (!(mem->dev_flags[i] &
255 DM_DYNTRANS_WRITE_OK))
256 wf = 0;
257
258 if (writeflag && wf) {
259 if (paddr < mem->
260 dev_dyntrans_write_low[i])
261 mem->
262 dev_dyntrans_write_low
263 [i] = paddr &
264 ~offset_mask;
265 if (paddr >= mem->
266 dev_dyntrans_write_high[i])
267 mem->
268 dev_dyntrans_write_high
269 [i] = paddr |
270 offset_mask;
271 }
272
273 if (mem->dev_flags[i] &
274 DM_EMULATED_RAM) {
275 /* MEM_WRITE to force the page
276 to be allocated, if it
277 wasn't already */
278 uint64_t *pp = (uint64_t *)
279 mem->dev_dyntrans_data[i];
280 uint64_t p = orig_paddr - *pp;
281 host_addr =
282 memory_paddr_to_hostaddr(
283 mem, p, MEM_WRITE)
284 + (p & ~offset_mask
285 & ((1 <<
286 BITS_PER_MEMBLOCK) - 1));
287 } else {
288 host_addr =
289 mem->dev_dyntrans_data[i] +
290 (paddr & ~offset_mask);
291 }
292 cpu->update_translation_table(cpu,
293 vaddr & ~offset_mask, host_addr,
294 wf, orig_paddr & ~offset_mask);
295 }
296
297 res = 0;
298 if (!no_exceptions || (mem->dev_flags[i] &
299 DM_READS_HAVE_NO_SIDE_EFFECTS))
300 res = mem->dev_f[i](cpu, mem, paddr,
301 data, len, writeflag,
302 mem->dev_extra[i]);
303
304 if (res == 0)
305 res = -1;
306
307 #ifndef MEM_X86
308 /*
309 * If accessing the memory mapped device
310 * failed, then return with a DBE exception.
311 */
312 if (res <= 0 && !no_exceptions) {
313 debug("%s device '%s' addr %08lx "
314 "failed\n", writeflag?
315 "writing to" : "reading from",
316 mem->dev_name[i], (long)paddr);
317 #ifdef MEM_MIPS
318 mips_cpu_exception(cpu, EXCEPTION_DBE,
319 0, vaddr, 0, 0, 0, 0);
320 #endif
321 return MEMORY_ACCESS_FAILED;
322 }
323 #endif
324 goto do_return_ok;
325 }
326
327 if (paddr < mem->dev_baseaddr[i])
328 end = i - 1;
329 if (paddr >= mem->dev_endaddr[i])
330 start = i + 1;
331 i = (start + end) >> 1;
332 } while (start <= end);
333 }
334
335
336 #ifdef MEM_MIPS
337 /*
338 * Data and instruction cache emulation:
339 */
340
341 switch (cpu->cd.mips.cpu_type.mmu_model) {
342 case MMU3K:
343 /* if not uncached addess (TODO: generalize this) */
344 if (!(misc_flags & PHYSICAL) && cache != CACHE_NONE &&
345 !((vaddr & 0xffffffffULL) >= 0xa0000000ULL &&
346 (vaddr & 0xffffffffULL) <= 0xbfffffffULL)) {
347 if (memory_cache_R3000(cpu, cache, paddr,
348 writeflag, len, data))
349 goto do_return_ok;
350 }
351 break;
352 default:
353 /* R4000 etc */
354 /* TODO */
355 ;
356 }
357 #endif /* MEM_MIPS */
358
359
360 /* Outside of physical RAM? */
361 if (paddr >= mem->physical_max) {
362 #ifdef MEM_MIPS
363 if ((paddr & 0xffffc00000ULL) == 0x1fc00000) {
364 /* Ok, this is PROM stuff */
365 } else if ((paddr & 0xfffff00000ULL) == 0x1ff00000) {
366 /* Sprite reads from this area of memory... */
367 /* TODO: is this still correct? */
368 if (writeflag == MEM_READ)
369 memset(data, 0, len);
370 goto do_return_ok;
371 } else
372 #endif /* MIPS */
373 {
374 if (paddr >= mem->physical_max) {
375 uint64_t offset, old_pc = cpu->pc;
376 char *symbol;
377
378 /* This allows for example OS kernels to probe
379 memory a few KBs past the end of memory,
380 without giving too many warnings. */
381 if (!quiet_mode && !no_exceptions && paddr >=
382 mem->physical_max + 0x40000) {
383 fatal("[ memory_rw(): writeflag=%i ",
384 writeflag);
385 if (writeflag) {
386 unsigned int i;
387 debug("data={", writeflag);
388 if (len > 16) {
389 int start2 = len-16;
390 for (i=0; i<16; i++)
391 debug("%s%02x",
392 i?",":"",
393 data[i]);
394 debug(" .. ");
395 if (start2 < 16)
396 start2 = 16;
397 for (i=start2; i<len;
398 i++)
399 debug("%s%02x",
400 i?",":"",
401 data[i]);
402 } else
403 for (i=0; i<len; i++)
404 debug("%s%02x",
405 i?",":"",
406 data[i]);
407 debug("}");
408 }
409
410 fatal(" paddr=0x%llx >= physical_max"
411 "; pc=", (long long)paddr);
412 if (cpu->is_32bit)
413 fatal("0x%08x",(int)old_pc);
414 else
415 fatal("0x%016llx",
416 (long long)old_pc);
417 symbol = get_symbol_name(
418 &cpu->machine->symbol_context,
419 old_pc, &offset);
420 fatal(" <%s> ]\n",
421 symbol? symbol : " no symbol ");
422 }
423 }
424
425 if (writeflag == MEM_READ) {
426 #ifdef MEM_X86
427 /* Reading non-existant memory on x86: */
428 memset(data, 0xff, len);
429 #else
430 /* Return all zeroes? (Or 0xff? TODO) */
431 memset(data, 0, len);
432 #endif
433
434 #ifdef MEM_MIPS
435 /*
436 * For real data/instruction accesses, cause
437 * an exceptions on an illegal read:
438 */
439 if (cache != CACHE_NONE && cpu->machine->
440 dbe_on_nonexistant_memaccess &&
441 !no_exceptions) {
442 if (paddr >= mem->physical_max &&
443 paddr < mem->physical_max+1048576)
444 mips_cpu_exception(cpu,
445 EXCEPTION_DBE, 0, vaddr, 0,
446 0, 0, 0);
447 }
448 #endif /* MEM_MIPS */
449 }
450
451 /* Hm? Shouldn't there be a DBE exception for
452 invalid writes as well? TODO */
453
454 goto do_return_ok;
455 }
456 }
457
458 #endif /* ifndef MEM_USERLAND */
459
460
461 /*
462 * Uncached access:
463 *
464 * 1) Translate the physical address to a host address.
465 *
466 * 2) Insert this virtual->physical->host translation into the
467 * fast translation arrays (using update_translation_table()).
468 *
469 * 3) If this was a Write, then invalidate any code translations
470 * in that page.
471 */
472 memblock = memory_paddr_to_hostaddr(mem, paddr, writeflag);
473 if (memblock == NULL) {
474 if (writeflag == MEM_READ)
475 memset(data, 0, len);
476 goto do_return_ok;
477 }
478
479 offset = paddr & ((1 << BITS_PER_MEMBLOCK) - 1);
480
481 if (cpu->update_translation_table != NULL && !dyntrans_device_danger
482 #ifdef MEM_MIPS
483 /* Ugly hack for R2000/R3000 caches: */
484 && (cpu->cd.mips.cpu_type.mmu_model != MMU3K ||
485 !(cpu->cd.mips.coproc[0]->reg[COP0_STATUS] & MIPS1_ISOL_CACHES))
486 #endif
487 #ifndef MEM_MIPS
488 /* && !(misc_flags & MEMORY_USER_ACCESS) */
489 #ifndef MEM_USERLAND
490 && !(ok & MEMORY_NOT_FULL_PAGE)
491 #endif
492 #endif
493 && !no_exceptions)
494 cpu->update_translation_table(cpu, vaddr & ~offset_mask,
495 memblock + (offset & ~offset_mask),
496 (misc_flags & MEMORY_USER_ACCESS) |
497 #if !defined(MEM_MIPS) && !defined(MEM_USERLAND)
498 (cache == CACHE_INSTRUCTION?
499 (writeflag == MEM_WRITE? 1 : 0) : ok - 1),
500 #else
501 (writeflag == MEM_WRITE? 1 : 0),
502 #endif
503 paddr & ~offset_mask);
504
505 /* Invalidate code translations for the page we are writing to. */
506 if (writeflag == MEM_WRITE && cpu->invalidate_code_translation != NULL)
507 cpu->invalidate_code_translation(cpu, paddr, INVALIDATE_PADDR);
508
509 if (writeflag == MEM_WRITE) {
510 /* Ugly optimization, but it works: */
511 if (len == sizeof(uint32_t) && (offset & 3)==0
512 && ((size_t)data&3)==0)
513 *(uint32_t *)(memblock + offset) = *(uint32_t *)data;
514 else if (len == sizeof(uint8_t))
515 *(uint8_t *)(memblock + offset) = *(uint8_t *)data;
516 else
517 memcpy(memblock + offset, data, len);
518 } else {
519 /* Ugly optimization, but it works: */
520 if (len == sizeof(uint32_t) && (offset & 3)==0
521 && ((size_t)data&3)==0)
522 *(uint32_t *)data = *(uint32_t *)(memblock + offset);
523 else if (len == sizeof(uint8_t))
524 *(uint8_t *)data = *(uint8_t *)(memblock + offset);
525 else
526 memcpy(data, memblock + offset, len);
527 }
528
529
530 do_return_ok:
531 return MEMORY_ACCESS_OK;
532 }
533

  ViewVC Help
Powered by ViewVC 1.1.26