/[gxemul]/upstream/0.4.6/src/memory_rw.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/0.4.6/src/memory_rw.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 43 - (show annotations)
Mon Oct 8 16:22:43 2007 UTC (16 years, 7 months ago) by dpavlin
File MIME type: text/plain
File size: 11814 byte(s)
0.4.6
1 /*
2 * Copyright (C) 2003-2007 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory_rw.c,v 1.107 2007/06/12 03:49:11 debug Exp $
29 *
30 * Generic memory_rw(), with special hacks for specific CPU families.
31 *
32 * Example for inclusion from memory_mips.c:
33 *
34 * MEMORY_RW should be mips_memory_rw
35 * MEM_MIPS should be defined
36 *
37 *
38 * TODO: Cleanup the "ok" variable usage!
39 */
40
41
42 /*
43 * memory_rw():
44 *
45 * Read or write data from/to memory.
46 *
47 * cpu the cpu doing the read/write
48 * mem the memory object to use
49 * vaddr the virtual address
50 * data a pointer to the data to be written to memory, or
51 * a placeholder for data when reading from memory
52 * len the length of the 'data' buffer
53 * writeflag set to MEM_READ or MEM_WRITE
54 * misc_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags
55 *
56 * If the address indicates access to a memory mapped device, that device'
57 * read/write access function is called.
58 *
59 * This function should not be called with cpu == NULL.
60 *
61 * Returns one of the following:
62 * MEMORY_ACCESS_FAILED
63 * MEMORY_ACCESS_OK
64 *
65 * (MEMORY_ACCESS_FAILED is 0.)
66 */
67 int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr,
68 unsigned char *data, size_t len, int writeflag, int misc_flags)
69 {
70 #ifdef MEM_ALPHA
71 const int offset_mask = 0x1fff;
72 #else
73 const int offset_mask = 0xfff;
74 #endif
75
76 #ifndef MEM_USERLAND
77 int ok = 2;
78 #endif
79 uint64_t paddr;
80 int cache, no_exceptions, offset;
81 unsigned char *memblock;
82 int dyntrans_device_danger = 0;
83
84 no_exceptions = misc_flags & NO_EXCEPTIONS;
85 cache = misc_flags & CACHE_FLAGS_MASK;
86
87
88 #ifdef MEM_USERLAND
89 #ifdef MEM_ALPHA
90 paddr = vaddr;
91 #else
92 paddr = vaddr & 0x7fffffff;
93 #endif
94 #else /* !MEM_USERLAND */
95 if (misc_flags & PHYSICAL || cpu->translate_v2p == NULL) {
96 paddr = vaddr;
97 } else {
98 ok = cpu->translate_v2p(cpu, vaddr, &paddr,
99 (writeflag? FLAG_WRITEFLAG : 0) +
100 (no_exceptions? FLAG_NOEXCEPTIONS : 0)
101 + (misc_flags & MEMORY_USER_ACCESS)
102 + (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0));
103
104 /*
105 * If the translation caused an exception, or was invalid in
106 * some way, then simply return without doing the memory
107 * access:
108 */
109 if (!ok)
110 return MEMORY_ACCESS_FAILED;
111 }
112
113 #endif /* !MEM_USERLAND */
114
115
116 #ifndef MEM_USERLAND
117 /*
118 * Memory mapped device?
119 *
120 * TODO: if paddr < base, but len enough, then the device should
121 * still be written to!
122 */
123 if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) {
124 uint64_t orig_paddr = paddr;
125 int i, start, end, res;
126
127 #if 0
128
129 TODO: The correct solution for this is to add RAM devices _around_ the
130 dangerous device. The solution below incurs a slowdown for _everything_,
131 not just the device in question.
132
133 /*
134 * Really really slow, but unfortunately necessary. This is
135 * to avoid the folowing scenario:
136 *
137 * a) offsets 0x000..0x123 are normal memory
138 * b) offsets 0x124..0x777 are a device
139 *
140 * 1) a read is done from offset 0x100. the page is
141 * added to the dyntrans system as a "RAM" page
142 * 2) a dyntranslated read is done from offset 0x200,
143 * which should access the device, but since the
144 * entire page is added, it will access non-existant
145 * RAM instead, without warning.
146 *
147 * Setting dyntrans_device_danger = 1 on accesses which are
148 * on _any_ offset on pages that are device mapped avoids
149 * this problem, but it is probably not very fast.
150 *
151 * TODO: Convert this into a quick (multi-level, 64-bit)
152 * address space lookup, to find dangerous pages.
153 */
154 for (i=0; i<mem->n_mmapped_devices; i++)
155 if (paddr >= (mem->devices[i].baseaddr & ~offset_mask)&&
156 paddr <= ((mem->devices[i].endaddr-1)|offset_mask)){
157 dyntrans_device_danger = 1;
158 break;
159 }
160 #endif
161
162 start = 0; end = mem->n_mmapped_devices - 1;
163 i = mem->last_accessed_device;
164
165 /* Scan through all devices: */
166 do {
167 if (paddr >= mem->devices[i].baseaddr &&
168 paddr < mem->devices[i].endaddr) {
169 /* Found a device, let's access it: */
170 mem->last_accessed_device = i;
171
172 paddr -= mem->devices[i].baseaddr;
173 if (paddr + len > mem->devices[i].length)
174 len = mem->devices[i].length - paddr;
175
176 if (cpu->update_translation_table != NULL &&
177 !(ok & MEMORY_NOT_FULL_PAGE) &&
178 mem->devices[i].flags & DM_DYNTRANS_OK) {
179 int wf = writeflag == MEM_WRITE? 1 : 0;
180 unsigned char *host_addr;
181
182 if (!(mem->devices[i].flags &
183 DM_DYNTRANS_WRITE_OK))
184 wf = 0;
185
186 if (writeflag && wf) {
187 if (paddr < mem->devices[i].
188 dyntrans_write_low)
189 mem->devices[i].
190 dyntrans_write_low =
191 paddr &~offset_mask;
192 if (paddr >= mem->devices[i].
193 dyntrans_write_high)
194 mem->devices[i].
195 dyntrans_write_high =
196 paddr | offset_mask;
197 }
198
199 if (mem->devices[i].flags &
200 DM_EMULATED_RAM) {
201 /* MEM_WRITE to force the page
202 to be allocated, if it
203 wasn't already */
204 uint64_t *pp = (uint64_t *)mem->
205 devices[i].dyntrans_data;
206 uint64_t p = orig_paddr - *pp;
207 host_addr =
208 memory_paddr_to_hostaddr(
209 mem, p & ~offset_mask,
210 MEM_WRITE);
211 } else {
212 host_addr = mem->devices[i].
213 dyntrans_data +
214 (paddr & ~offset_mask);
215 }
216
217 cpu->update_translation_table(cpu,
218 vaddr & ~offset_mask, host_addr,
219 wf, orig_paddr & ~offset_mask);
220 }
221
222 res = 0;
223 if (!no_exceptions || (mem->devices[i].flags &
224 DM_READS_HAVE_NO_SIDE_EFFECTS))
225 res = mem->devices[i].f(cpu, mem, paddr,
226 data, len, writeflag,
227 mem->devices[i].extra);
228
229 if (res == 0)
230 res = -1;
231
232 /*
233 * If accessing the memory mapped device
234 * failed, then return with a DBE exception.
235 */
236 if (res <= 0 && !no_exceptions) {
237 debug("%s device '%s' addr %08lx "
238 "failed\n", writeflag?
239 "writing to" : "reading from",
240 mem->devices[i].name, (long)paddr);
241 #ifdef MEM_MIPS
242 mips_cpu_exception(cpu,
243 cache == CACHE_INSTRUCTION?
244 EXCEPTION_IBE : EXCEPTION_DBE,
245 0, vaddr, 0, 0, 0, 0);
246 #endif
247 return MEMORY_ACCESS_FAILED;
248 }
249 goto do_return_ok;
250 }
251
252 if (paddr < mem->devices[i].baseaddr)
253 end = i - 1;
254 if (paddr >= mem->devices[i].endaddr)
255 start = i + 1;
256 i = (start + end) >> 1;
257 } while (start <= end);
258 }
259
260
261 #ifdef MEM_MIPS
262 /*
263 * Data and instruction cache emulation:
264 */
265
266 switch (cpu->cd.mips.cpu_type.mmu_model) {
267 case MMU3K:
268 /* if not uncached addess (TODO: generalize this) */
269 if (!(misc_flags & PHYSICAL) && cache != CACHE_NONE &&
270 !((vaddr & 0xffffffffULL) >= 0xa0000000ULL &&
271 (vaddr & 0xffffffffULL) <= 0xbfffffffULL)) {
272 if (memory_cache_R3000(cpu, cache, paddr,
273 writeflag, len, data))
274 goto do_return_ok;
275 }
276 break;
277 default:
278 /* R4000 etc */
279 /* TODO */
280 ;
281 }
282 #endif /* MEM_MIPS */
283
284
285 /* Outside of physical RAM? */
286 if (paddr >= mem->physical_max) {
287 #ifdef MEM_MIPS
288 if ((paddr & 0xffffc00000ULL) == 0x1fc00000) {
289 /* Ok, this is PROM stuff */
290 } else if ((paddr & 0xfffff00000ULL) == 0x1ff00000) {
291 /* Sprite reads from this area of memory... */
292 /* TODO: is this still correct? */
293 if (writeflag == MEM_READ)
294 memset(data, 0, len);
295 goto do_return_ok;
296 } else
297 #endif /* MIPS */
298 {
299 if (paddr >= mem->physical_max && !no_exceptions)
300 memory_warn_about_unimplemented_addr
301 (cpu, mem, writeflag, paddr, data, len);
302
303 if (writeflag == MEM_READ) {
304 /* Return all zeroes? (Or 0xff? TODO) */
305 memset(data, 0, len);
306
307 #if 0
308 /*
309 * NOTE: This code prevents a PROM image from a real 5000/200 from booting.
310 * I think I introduced it because it was how some guest OS (NetBSD?) detected
311 * the amount of RAM on some machine.
312 *
313 * TODO: Figure out if it is not needed anymore, and remove it completely.
314 */
315 #ifdef MEM_MIPS
316 /*
317 * For real data/instruction accesses, cause
318 * an exceptions on an illegal read:
319 */
320 if (cache != CACHE_NONE && !no_exceptions &&
321 paddr >= mem->physical_max &&
322 paddr < mem->physical_max+1048576) {
323 mips_cpu_exception(cpu,
324 EXCEPTION_DBE, 0, vaddr, 0,
325 0, 0, 0);
326 }
327 #endif /* MEM_MIPS */
328 #endif
329 }
330
331 /* Hm? Shouldn't there be a DBE exception for
332 invalid writes as well? TODO */
333
334 goto do_return_ok;
335 }
336 }
337
338 #endif /* ifndef MEM_USERLAND */
339
340
341 /*
342 * Uncached access:
343 *
344 * 1) Translate the physical address to a host address.
345 *
346 * 2) Insert this virtual->physical->host translation into the
347 * fast translation arrays (using update_translation_table()).
348 *
349 * 3) If this was a Write, then invalidate any code translations
350 * in that page.
351 */
352 memblock = memory_paddr_to_hostaddr(mem, paddr & ~offset_mask,
353 writeflag);
354 if (memblock == NULL) {
355 if (writeflag == MEM_READ)
356 memset(data, 0, len);
357 goto do_return_ok;
358 }
359
360 offset = paddr & offset_mask;
361
362 if (cpu->update_translation_table != NULL && !dyntrans_device_danger
363 #ifdef MEM_MIPS
364 /* Ugly hack for R2000/R3000 caches: */
365 && (cpu->cd.mips.cpu_type.mmu_model != MMU3K ||
366 !(cpu->cd.mips.coproc[0]->reg[COP0_STATUS] & MIPS1_ISOL_CACHES))
367 #endif
368 #ifndef MEM_USERLAND
369 && !(ok & MEMORY_NOT_FULL_PAGE)
370 #endif
371 && !no_exceptions)
372 cpu->update_translation_table(cpu, vaddr & ~offset_mask,
373 memblock, (misc_flags & MEMORY_USER_ACCESS) |
374 #if !defined(MEM_USERLAND)
375 (cache == CACHE_INSTRUCTION?
376 (writeflag == MEM_WRITE? 1 : 0) : ok - 1),
377 #else
378 (writeflag == MEM_WRITE? 1 : 0),
379 #endif
380 paddr & ~offset_mask);
381
382 /*
383 * If writing, or if mapping a page where writing is ok later on,
384 * then invalidate code translations for the (physical) page address:
385 */
386
387 if ((writeflag == MEM_WRITE
388 #if !defined(MEM_USERLAND)
389 || (ok == 2 && cache == CACHE_DATA)
390 #endif
391 ) && cpu->invalidate_code_translation != NULL)
392 cpu->invalidate_code_translation(cpu, paddr, INVALIDATE_PADDR);
393
394 if ((paddr&((1<<BITS_PER_MEMBLOCK)-1)) + len > (1<<BITS_PER_MEMBLOCK)) {
395 printf("Write over memblock boundary?\n");
396 exit(1);
397 }
398
399 /* And finally, read or write the data: */
400 if (writeflag == MEM_WRITE)
401 memcpy(memblock + offset, data, len);
402 else
403 memcpy(data, memblock + offset, len);
404
405 do_return_ok:
406 return MEMORY_ACCESS_OK;
407 }
408

  ViewVC Help
Powered by ViewVC 1.1.26