/[gxemul]/upstream/0.4.4/src/memory_rw.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/0.4.4/src/memory_rw.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 35 - (show annotations)
Mon Oct 8 16:21:26 2007 UTC (16 years, 7 months ago) by dpavlin
File MIME type: text/plain
File size: 11974 byte(s)
0.4.4
1 /*
2 * Copyright (C) 2003-2007 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory_rw.c,v 1.101 2007/02/10 14:04:51 debug Exp $
29 *
30 * Generic memory_rw(), with special hacks for specific CPU families.
31 *
32 * Example for inclusion from memory_mips.c:
33 *
34 * MEMORY_RW should be mips_memory_rw
35 * MEM_MIPS should be defined
36 */
37
38
39 /*
40 * memory_rw():
41 *
42 * Read or write data from/to memory.
43 *
44 * cpu the cpu doing the read/write
45 * mem the memory object to use
46 * vaddr the virtual address
47 * data a pointer to the data to be written to memory, or
48 * a placeholder for data when reading from memory
49 * len the length of the 'data' buffer
50 * writeflag set to MEM_READ or MEM_WRITE
51 * misc_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags
52 *
53 * If the address indicates access to a memory mapped device, that device'
54 * read/write access function is called.
55 *
56 * This function should not be called with cpu == NULL.
57 *
58 * Returns one of the following:
59 * MEMORY_ACCESS_FAILED
60 * MEMORY_ACCESS_OK
61 *
62 * (MEMORY_ACCESS_FAILED is 0.)
63 */
64 int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr,
65 unsigned char *data, size_t len, int writeflag, int misc_flags)
66 {
67 #ifdef MEM_ALPHA
68 const int offset_mask = 0x1fff;
69 #else
70 const int offset_mask = 0xfff;
71 #endif
72
73 #ifndef MEM_USERLAND
74 int ok = 1;
75 #endif
76 uint64_t paddr;
77 int cache, no_exceptions, offset;
78 unsigned char *memblock;
79 int dyntrans_device_danger = 0;
80
81 no_exceptions = misc_flags & NO_EXCEPTIONS;
82 cache = misc_flags & CACHE_FLAGS_MASK;
83
84
85 #ifdef MEM_USERLAND
86 #ifdef MEM_ALPHA
87 paddr = vaddr;
88 #else
89 paddr = vaddr & 0x7fffffff;
90 #endif
91 #else /* !MEM_USERLAND */
92 if (misc_flags & PHYSICAL || cpu->translate_v2p == NULL) {
93 paddr = vaddr;
94 } else {
95 ok = cpu->translate_v2p(cpu, vaddr, &paddr,
96 (writeflag? FLAG_WRITEFLAG : 0) +
97 (no_exceptions? FLAG_NOEXCEPTIONS : 0)
98 #ifdef MEM_ARM
99 + (misc_flags & MEMORY_USER_ACCESS)
100 #endif
101 + (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0));
102
103 /*
104 * If the translation caused an exception, or was invalid in
105 * some way, then simply return without doing the memory
106 * access:
107 */
108 if (!ok)
109 return MEMORY_ACCESS_FAILED;
110 }
111
112 #endif /* !MEM_USERLAND */
113
114
115 #ifndef MEM_USERLAND
116 /*
117 * Memory mapped device?
118 *
119 * TODO: if paddr < base, but len enough, then the device should
120 * still be written to!
121 */
122 if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) {
123 uint64_t orig_paddr = paddr;
124 int i, start, end, res;
125
126 #if 0
127
128 TODO: The correct solution for this is to add RAM devices _around_ the
129 dangerous device. The solution below incurs a slowdown for _everything_,
130 not just the device in question.
131
132 /*
133 * Really really slow, but unfortunately necessary. This is
134 * to avoid the folowing scenario:
135 *
136 * a) offsets 0x000..0x123 are normal memory
137 * b) offsets 0x124..0x777 are a device
138 *
139 * 1) a read is done from offset 0x100. the page is
140 * added to the dyntrans system as a "RAM" page
141 * 2) a dyntranslated read is done from offset 0x200,
142 * which should access the device, but since the
143 * entire page is added, it will access non-existant
144 * RAM instead, without warning.
145 *
146 * Setting dyntrans_device_danger = 1 on accesses which are
147 * on _any_ offset on pages that are device mapped avoids
148 * this problem, but it is probably not very fast.
149 *
150 * TODO: Convert this into a quick (multi-level, 64-bit)
151 * address space lookup, to find dangerous pages.
152 */
153 for (i=0; i<mem->n_mmapped_devices; i++)
154 if (paddr >= (mem->devices[i].baseaddr & ~offset_mask)&&
155 paddr <= ((mem->devices[i].endaddr-1)|offset_mask)){
156 dyntrans_device_danger = 1;
157 break;
158 }
159 #endif
160
161 start = 0; end = mem->n_mmapped_devices - 1;
162 i = mem->last_accessed_device;
163
164 /* Scan through all devices: */
165 do {
166 if (paddr >= mem->devices[i].baseaddr &&
167 paddr < mem->devices[i].endaddr) {
168 /* Found a device, let's access it: */
169 mem->last_accessed_device = i;
170
171 paddr -= mem->devices[i].baseaddr;
172 if (paddr + len > mem->devices[i].length)
173 len = mem->devices[i].length - paddr;
174
175 if (cpu->update_translation_table != NULL &&
176 !(ok & MEMORY_NOT_FULL_PAGE) &&
177 mem->devices[i].flags & DM_DYNTRANS_OK) {
178 int wf = writeflag == MEM_WRITE? 1 : 0;
179 unsigned char *host_addr;
180
181 if (!(mem->devices[i].flags &
182 DM_DYNTRANS_WRITE_OK))
183 wf = 0;
184
185 if (writeflag && wf) {
186 if (paddr < mem->devices[i].
187 dyntrans_write_low)
188 mem->devices[i].
189 dyntrans_write_low =
190 paddr &~offset_mask;
191 if (paddr >= mem->devices[i].
192 dyntrans_write_high)
193 mem->devices[i].
194 dyntrans_write_high =
195 paddr | offset_mask;
196 }
197
198 if (mem->devices[i].flags &
199 DM_EMULATED_RAM) {
200 /* MEM_WRITE to force the page
201 to be allocated, if it
202 wasn't already */
203 uint64_t *pp = (uint64_t *)mem->
204 devices[i].dyntrans_data;
205 uint64_t p = orig_paddr - *pp;
206 host_addr =
207 memory_paddr_to_hostaddr(
208 mem, p & ~offset_mask,
209 MEM_WRITE);
210 } else {
211 host_addr = mem->devices[i].
212 dyntrans_data +
213 (paddr & ~offset_mask);
214 }
215
216 cpu->update_translation_table(cpu,
217 vaddr & ~offset_mask, host_addr,
218 wf, orig_paddr & ~offset_mask);
219 }
220
221 res = 0;
222 if (!no_exceptions || (mem->devices[i].flags &
223 DM_READS_HAVE_NO_SIDE_EFFECTS))
224 res = mem->devices[i].f(cpu, mem, paddr,
225 data, len, writeflag,
226 mem->devices[i].extra);
227
228 if (res == 0)
229 res = -1;
230
231 /*
232 * If accessing the memory mapped device
233 * failed, then return with a DBE exception.
234 */
235 if (res <= 0 && !no_exceptions) {
236 debug("%s device '%s' addr %08lx "
237 "failed\n", writeflag?
238 "writing to" : "reading from",
239 mem->devices[i].name, (long)paddr);
240 #ifdef MEM_MIPS
241 mips_cpu_exception(cpu, EXCEPTION_DBE,
242 0, vaddr, 0, 0, 0, 0);
243 #endif
244 return MEMORY_ACCESS_FAILED;
245 }
246 goto do_return_ok;
247 }
248
249 if (paddr < mem->devices[i].baseaddr)
250 end = i - 1;
251 if (paddr >= mem->devices[i].endaddr)
252 start = i + 1;
253 i = (start + end) >> 1;
254 } while (start <= end);
255 }
256
257
258 #ifdef MEM_MIPS
259 /*
260 * Data and instruction cache emulation:
261 */
262
263 switch (cpu->cd.mips.cpu_type.mmu_model) {
264 case MMU3K:
265 /* if not uncached addess (TODO: generalize this) */
266 if (!(misc_flags & PHYSICAL) && cache != CACHE_NONE &&
267 !((vaddr & 0xffffffffULL) >= 0xa0000000ULL &&
268 (vaddr & 0xffffffffULL) <= 0xbfffffffULL)) {
269 if (memory_cache_R3000(cpu, cache, paddr,
270 writeflag, len, data))
271 goto do_return_ok;
272 }
273 break;
274 default:
275 /* R4000 etc */
276 /* TODO */
277 ;
278 }
279 #endif /* MEM_MIPS */
280
281
282 /* Outside of physical RAM? */
283 if (paddr >= mem->physical_max) {
284 #ifdef MEM_MIPS
285 if ((paddr & 0xffffc00000ULL) == 0x1fc00000) {
286 /* Ok, this is PROM stuff */
287 } else if ((paddr & 0xfffff00000ULL) == 0x1ff00000) {
288 /* Sprite reads from this area of memory... */
289 /* TODO: is this still correct? */
290 if (writeflag == MEM_READ)
291 memset(data, 0, len);
292 goto do_return_ok;
293 } else
294 #endif /* MIPS */
295 {
296 if (paddr >= mem->physical_max && !no_exceptions)
297 memory_warn_about_unimplemented_addr
298 (cpu, mem, writeflag, paddr, data, len);
299
300 if (writeflag == MEM_READ) {
301 /* Return all zeroes? (Or 0xff? TODO) */
302 memset(data, 0, len);
303
304 #ifdef MEM_MIPS
305 /*
306 * For real data/instruction accesses, cause
307 * an exceptions on an illegal read:
308 */
309 if (cache != CACHE_NONE && cpu->machine->
310 dbe_on_nonexistant_memaccess &&
311 !no_exceptions) {
312 if (paddr >= mem->physical_max &&
313 paddr < mem->physical_max+1048576)
314 mips_cpu_exception(cpu,
315 EXCEPTION_DBE, 0, vaddr, 0,
316 0, 0, 0);
317 }
318 #endif /* MEM_MIPS */
319 }
320
321 /* Hm? Shouldn't there be a DBE exception for
322 invalid writes as well? TODO */
323
324 goto do_return_ok;
325 }
326 }
327
328 #endif /* ifndef MEM_USERLAND */
329
330
331 /*
332 * Uncached access:
333 *
334 * 1) Translate the physical address to a host address.
335 *
336 * 2) Insert this virtual->physical->host translation into the
337 * fast translation arrays (using update_translation_table()).
338 *
339 * 3) If this was a Write, then invalidate any code translations
340 * in that page.
341 */
342 memblock = memory_paddr_to_hostaddr(mem, paddr & ~offset_mask,
343 writeflag);
344 if (memblock == NULL) {
345 if (writeflag == MEM_READ)
346 memset(data, 0, len);
347 goto do_return_ok;
348 }
349
350 offset = paddr & offset_mask;
351
352 if (cpu->update_translation_table != NULL && !dyntrans_device_danger
353 #ifdef MEM_MIPS
354 /* Ugly hack for R2000/R3000 caches: */
355 && (cpu->cd.mips.cpu_type.mmu_model != MMU3K ||
356 !(cpu->cd.mips.coproc[0]->reg[COP0_STATUS] & MIPS1_ISOL_CACHES))
357 #endif
358 #ifndef MEM_MIPS
359 /* && !(misc_flags & MEMORY_USER_ACCESS) */
360 #ifndef MEM_USERLAND
361 && !(ok & MEMORY_NOT_FULL_PAGE)
362 #endif
363 #endif
364 && !no_exceptions)
365 cpu->update_translation_table(cpu, vaddr & ~offset_mask,
366 memblock, (misc_flags & MEMORY_USER_ACCESS) |
367 #if !defined(MEM_MIPS) && !defined(MEM_USERLAND)
368 (cache == CACHE_INSTRUCTION?
369 (writeflag == MEM_WRITE? 1 : 0) : ok - 1),
370 #else
371 (writeflag == MEM_WRITE? 1 : 0),
372 #endif
373 paddr & ~offset_mask);
374
375 /*
376 * If writing, then invalidate code translations for the (physical)
377 * page address:
378 */
379 if (writeflag == MEM_WRITE && cpu->invalidate_code_translation != NULL)
380 cpu->invalidate_code_translation(cpu, paddr, INVALIDATE_PADDR);
381
382 if ((paddr&((1<<BITS_PER_MEMBLOCK)-1)) + len > (1<<BITS_PER_MEMBLOCK)) {
383 printf("Write over memblock boundary?\n");
384 exit(1);
385 }
386
387 if (writeflag == MEM_WRITE) {
388 /* Ugly optimization, but it works: */
389 if (len == sizeof(uint32_t) && (offset & 3)==0
390 && ((size_t)data&3)==0)
391 *(uint32_t *)(memblock + offset) = *(uint32_t *)data;
392 else if (len == sizeof(uint8_t))
393 *(uint8_t *)(memblock + offset) = *(uint8_t *)data;
394 else
395 memcpy(memblock + offset, data, len);
396 } else {
397 /* Ugly optimization, but it works: */
398 if (len == sizeof(uint32_t) && (offset & 3)==0
399 && ((size_t)data&3)==0)
400 *(uint32_t *)data = *(uint32_t *)(memblock + offset);
401 else if (len == sizeof(uint8_t))
402 *(uint8_t *)data = *(uint8_t *)(memblock + offset);
403 else
404 memcpy(data, memblock + offset, len);
405 }
406
407
408 do_return_ok:
409 return MEMORY_ACCESS_OK;
410 }
411

  ViewVC Help
Powered by ViewVC 1.1.26