/[gxemul]/upstream/0.4.4/src/memory_rw.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Annotation of /upstream/0.4.4/src/memory_rw.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 35 - (hide annotations)
Mon Oct 8 16:21:26 2007 UTC (16 years, 7 months ago) by dpavlin
File MIME type: text/plain
File size: 11974 byte(s)
0.4.4
1 dpavlin 2 /*
2 dpavlin 34 * Copyright (C) 2003-2007 Anders Gavare. All rights reserved.
3 dpavlin 2 *
4     * Redistribution and use in source and binary forms, with or without
5     * modification, are permitted provided that the following conditions are met:
6     *
7     * 1. Redistributions of source code must retain the above copyright
8     * notice, this list of conditions and the following disclaimer.
9     * 2. Redistributions in binary form must reproduce the above copyright
10     * notice, this list of conditions and the following disclaimer in the
11     * documentation and/or other materials provided with the distribution.
12     * 3. The name of the author may not be used to endorse or promote products
13     * derived from this software without specific prior written permission.
14     *
15     * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16     * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17     * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18     * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19     * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20     * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21     * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22     * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23     * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24     * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25     * SUCH DAMAGE.
26     *
27     *
28 dpavlin 34 * $Id: memory_rw.c,v 1.101 2007/02/10 14:04:51 debug Exp $
29 dpavlin 2 *
30     * Generic memory_rw(), with special hacks for specific CPU families.
31     *
32     * Example for inclusion from memory_mips.c:
33     *
34     * MEMORY_RW should be mips_memory_rw
35     * MEM_MIPS should be defined
36     */
37    
38    
39     /*
40     * memory_rw():
41     *
42     * Read or write data from/to memory.
43     *
44     * cpu the cpu doing the read/write
45     * mem the memory object to use
46     * vaddr the virtual address
47     * data a pointer to the data to be written to memory, or
48     * a placeholder for data when reading from memory
49     * len the length of the 'data' buffer
50     * writeflag set to MEM_READ or MEM_WRITE
51 dpavlin 20 * misc_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags
52 dpavlin 2 *
53     * If the address indicates access to a memory mapped device, that device'
54     * read/write access function is called.
55     *
56     * This function should not be called with cpu == NULL.
57     *
58     * Returns one of the following:
59     * MEMORY_ACCESS_FAILED
60     * MEMORY_ACCESS_OK
61     *
62     * (MEMORY_ACCESS_FAILED is 0.)
63     */
64     int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr,
65 dpavlin 20 unsigned char *data, size_t len, int writeflag, int misc_flags)
66 dpavlin 2 {
67 dpavlin 12 #ifdef MEM_ALPHA
68     const int offset_mask = 0x1fff;
69     #else
70     const int offset_mask = 0xfff;
71     #endif
72    
73 dpavlin 2 #ifndef MEM_USERLAND
74     int ok = 1;
75     #endif
76     uint64_t paddr;
77     int cache, no_exceptions, offset;
78     unsigned char *memblock;
79 dpavlin 22 int dyntrans_device_danger = 0;
80 dpavlin 12
81 dpavlin 20 no_exceptions = misc_flags & NO_EXCEPTIONS;
82     cache = misc_flags & CACHE_FLAGS_MASK;
83 dpavlin 2
84 dpavlin 4
85 dpavlin 2 #ifdef MEM_USERLAND
86 dpavlin 12 #ifdef MEM_ALPHA
87     paddr = vaddr;
88     #else
89 dpavlin 2 paddr = vaddr & 0x7fffffff;
90 dpavlin 12 #endif
91 dpavlin 24 #else /* !MEM_USERLAND */
92 dpavlin 26 if (misc_flags & PHYSICAL || cpu->translate_v2p == NULL) {
93 dpavlin 2 paddr = vaddr;
94     } else {
95 dpavlin 26 ok = cpu->translate_v2p(cpu, vaddr, &paddr,
96 dpavlin 2 (writeflag? FLAG_WRITEFLAG : 0) +
97     (no_exceptions? FLAG_NOEXCEPTIONS : 0)
98 dpavlin 14 #ifdef MEM_ARM
99 dpavlin 20 + (misc_flags & MEMORY_USER_ACCESS)
100 dpavlin 14 #endif
101 dpavlin 2 + (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0));
102 dpavlin 32
103     /*
104     * If the translation caused an exception, or was invalid in
105     * some way, then simply return without doing the memory
106     * access:
107     */
108 dpavlin 2 if (!ok)
109     return MEMORY_ACCESS_FAILED;
110     }
111    
112 dpavlin 24 #endif /* !MEM_USERLAND */
113 dpavlin 6
114 dpavlin 2
115     #ifndef MEM_USERLAND
116     /*
117     * Memory mapped device?
118     *
119 dpavlin 22 * TODO: if paddr < base, but len enough, then the device should
120     * still be written to!
121 dpavlin 2 */
122     if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) {
123     uint64_t orig_paddr = paddr;
124 dpavlin 22 int i, start, end, res;
125 dpavlin 4
126 dpavlin 34 #if 0
127    
128     TODO: The correct solution for this is to add RAM devices _around_ the
129     dangerous device. The solution below incurs a slowdown for _everything_,
130     not just the device in question.
131    
132 dpavlin 4 /*
133     * Really really slow, but unfortunately necessary. This is
134     * to avoid the folowing scenario:
135     *
136     * a) offsets 0x000..0x123 are normal memory
137     * b) offsets 0x124..0x777 are a device
138     *
139     * 1) a read is done from offset 0x100. the page is
140 dpavlin 22 * added to the dyntrans system as a "RAM" page
141     * 2) a dyntranslated read is done from offset 0x200,
142 dpavlin 4 * which should access the device, but since the
143     * entire page is added, it will access non-existant
144     * RAM instead, without warning.
145     *
146 dpavlin 22 * Setting dyntrans_device_danger = 1 on accesses which are
147 dpavlin 4 * on _any_ offset on pages that are device mapped avoids
148     * this problem, but it is probably not very fast.
149 dpavlin 22 *
150     * TODO: Convert this into a quick (multi-level, 64-bit)
151     * address space lookup, to find dangerous pages.
152 dpavlin 4 */
153 dpavlin 12 for (i=0; i<mem->n_mmapped_devices; i++)
154 dpavlin 32 if (paddr >= (mem->devices[i].baseaddr & ~offset_mask)&&
155     paddr <= ((mem->devices[i].endaddr-1)|offset_mask)){
156 dpavlin 22 dyntrans_device_danger = 1;
157 dpavlin 12 break;
158     }
159 dpavlin 22 #endif
160 dpavlin 4
161 dpavlin 22 start = 0; end = mem->n_mmapped_devices - 1;
162     i = mem->last_accessed_device;
163 dpavlin 2
164     /* Scan through all devices: */
165     do {
166 dpavlin 32 if (paddr >= mem->devices[i].baseaddr &&
167     paddr < mem->devices[i].endaddr) {
168 dpavlin 2 /* Found a device, let's access it: */
169     mem->last_accessed_device = i;
170    
171 dpavlin 32 paddr -= mem->devices[i].baseaddr;
172     if (paddr + len > mem->devices[i].length)
173     len = mem->devices[i].length - paddr;
174 dpavlin 2
175 dpavlin 12 if (cpu->update_translation_table != NULL &&
176 dpavlin 20 !(ok & MEMORY_NOT_FULL_PAGE) &&
177 dpavlin 32 mem->devices[i].flags & DM_DYNTRANS_OK) {
178 dpavlin 2 int wf = writeflag == MEM_WRITE? 1 : 0;
179 dpavlin 18 unsigned char *host_addr;
180 dpavlin 2
181 dpavlin 32 if (!(mem->devices[i].flags &
182 dpavlin 20 DM_DYNTRANS_WRITE_OK))
183 dpavlin 18 wf = 0;
184    
185     if (writeflag && wf) {
186 dpavlin 32 if (paddr < mem->devices[i].
187     dyntrans_write_low)
188     mem->devices[i].
189     dyntrans_write_low =
190     paddr &~offset_mask;
191     if (paddr >= mem->devices[i].
192     dyntrans_write_high)
193     mem->devices[i].
194     dyntrans_write_high =
195     paddr | offset_mask;
196 dpavlin 2 }
197    
198 dpavlin 32 if (mem->devices[i].flags &
199 dpavlin 20 DM_EMULATED_RAM) {
200 dpavlin 18 /* MEM_WRITE to force the page
201     to be allocated, if it
202     wasn't already */
203 dpavlin 32 uint64_t *pp = (uint64_t *)mem->
204     devices[i].dyntrans_data;
205 dpavlin 18 uint64_t p = orig_paddr - *pp;
206     host_addr =
207     memory_paddr_to_hostaddr(
208 dpavlin 28 mem, p & ~offset_mask,
209     MEM_WRITE);
210 dpavlin 18 } else {
211 dpavlin 32 host_addr = mem->devices[i].
212     dyntrans_data +
213 dpavlin 18 (paddr & ~offset_mask);
214     }
215 dpavlin 28
216 dpavlin 12 cpu->update_translation_table(cpu,
217 dpavlin 18 vaddr & ~offset_mask, host_addr,
218 dpavlin 12 wf, orig_paddr & ~offset_mask);
219 dpavlin 2 }
220    
221 dpavlin 6 res = 0;
222 dpavlin 32 if (!no_exceptions || (mem->devices[i].flags &
223 dpavlin 20 DM_READS_HAVE_NO_SIDE_EFFECTS))
224 dpavlin 32 res = mem->devices[i].f(cpu, mem, paddr,
225 dpavlin 6 data, len, writeflag,
226 dpavlin 32 mem->devices[i].extra);
227 dpavlin 2
228     if (res == 0)
229     res = -1;
230    
231     /*
232     * If accessing the memory mapped device
233     * failed, then return with a DBE exception.
234     */
235 dpavlin 6 if (res <= 0 && !no_exceptions) {
236 dpavlin 2 debug("%s device '%s' addr %08lx "
237     "failed\n", writeflag?
238     "writing to" : "reading from",
239 dpavlin 32 mem->devices[i].name, (long)paddr);
240 dpavlin 2 #ifdef MEM_MIPS
241     mips_cpu_exception(cpu, EXCEPTION_DBE,
242     0, vaddr, 0, 0, 0, 0);
243     #endif
244     return MEMORY_ACCESS_FAILED;
245     }
246     goto do_return_ok;
247     }
248    
249 dpavlin 32 if (paddr < mem->devices[i].baseaddr)
250 dpavlin 22 end = i - 1;
251 dpavlin 32 if (paddr >= mem->devices[i].endaddr)
252 dpavlin 22 start = i + 1;
253     i = (start + end) >> 1;
254     } while (start <= end);
255 dpavlin 2 }
256    
257    
258     #ifdef MEM_MIPS
259     /*
260     * Data and instruction cache emulation:
261     */
262    
263     switch (cpu->cd.mips.cpu_type.mmu_model) {
264     case MMU3K:
265     /* if not uncached addess (TODO: generalize this) */
266 dpavlin 20 if (!(misc_flags & PHYSICAL) && cache != CACHE_NONE &&
267 dpavlin 2 !((vaddr & 0xffffffffULL) >= 0xa0000000ULL &&
268     (vaddr & 0xffffffffULL) <= 0xbfffffffULL)) {
269     if (memory_cache_R3000(cpu, cache, paddr,
270     writeflag, len, data))
271     goto do_return_ok;
272     }
273     break;
274     default:
275     /* R4000 etc */
276     /* TODO */
277     ;
278     }
279     #endif /* MEM_MIPS */
280    
281    
282     /* Outside of physical RAM? */
283     if (paddr >= mem->physical_max) {
284 dpavlin 6 #ifdef MEM_MIPS
285     if ((paddr & 0xffffc00000ULL) == 0x1fc00000) {
286 dpavlin 2 /* Ok, this is PROM stuff */
287     } else if ((paddr & 0xfffff00000ULL) == 0x1ff00000) {
288     /* Sprite reads from this area of memory... */
289     /* TODO: is this still correct? */
290     if (writeflag == MEM_READ)
291     memset(data, 0, len);
292     goto do_return_ok;
293 dpavlin 6 } else
294     #endif /* MIPS */
295     {
296 dpavlin 34 if (paddr >= mem->physical_max && !no_exceptions)
297     memory_warn_about_unimplemented_addr
298     (cpu, mem, writeflag, paddr, data, len);
299 dpavlin 12
300 dpavlin 2 if (writeflag == MEM_READ) {
301     /* Return all zeroes? (Or 0xff? TODO) */
302     memset(data, 0, len);
303    
304     #ifdef MEM_MIPS
305     /*
306     * For real data/instruction accesses, cause
307     * an exceptions on an illegal read:
308     */
309     if (cache != CACHE_NONE && cpu->machine->
310 dpavlin 6 dbe_on_nonexistant_memaccess &&
311     !no_exceptions) {
312 dpavlin 2 if (paddr >= mem->physical_max &&
313     paddr < mem->physical_max+1048576)
314     mips_cpu_exception(cpu,
315     EXCEPTION_DBE, 0, vaddr, 0,
316     0, 0, 0);
317     }
318     #endif /* MEM_MIPS */
319     }
320    
321     /* Hm? Shouldn't there be a DBE exception for
322     invalid writes as well? TODO */
323    
324     goto do_return_ok;
325     }
326     }
327    
328     #endif /* ifndef MEM_USERLAND */
329    
330    
331     /*
332     * Uncached access:
333 dpavlin 18 *
334     * 1) Translate the physical address to a host address.
335     *
336     * 2) Insert this virtual->physical->host translation into the
337     * fast translation arrays (using update_translation_table()).
338     *
339     * 3) If this was a Write, then invalidate any code translations
340     * in that page.
341 dpavlin 2 */
342 dpavlin 28 memblock = memory_paddr_to_hostaddr(mem, paddr & ~offset_mask,
343     writeflag);
344 dpavlin 2 if (memblock == NULL) {
345     if (writeflag == MEM_READ)
346     memset(data, 0, len);
347     goto do_return_ok;
348     }
349    
350 dpavlin 28 offset = paddr & offset_mask;
351 dpavlin 2
352 dpavlin 22 if (cpu->update_translation_table != NULL && !dyntrans_device_danger
353 dpavlin 26 #ifdef MEM_MIPS
354     /* Ugly hack for R2000/R3000 caches: */
355     && (cpu->cd.mips.cpu_type.mmu_model != MMU3K ||
356     !(cpu->cd.mips.coproc[0]->reg[COP0_STATUS] & MIPS1_ISOL_CACHES))
357     #endif
358 dpavlin 18 #ifndef MEM_MIPS
359 dpavlin 20 /* && !(misc_flags & MEMORY_USER_ACCESS) */
360 dpavlin 18 #ifndef MEM_USERLAND
361     && !(ok & MEMORY_NOT_FULL_PAGE)
362     #endif
363     #endif
364 dpavlin 16 && !no_exceptions)
365 dpavlin 12 cpu->update_translation_table(cpu, vaddr & ~offset_mask,
366 dpavlin 28 memblock, (misc_flags & MEMORY_USER_ACCESS) |
367 dpavlin 20 #if !defined(MEM_MIPS) && !defined(MEM_USERLAND)
368 dpavlin 18 (cache == CACHE_INSTRUCTION?
369 dpavlin 20 (writeflag == MEM_WRITE? 1 : 0) : ok - 1),
370 dpavlin 2 #else
371 dpavlin 18 (writeflag == MEM_WRITE? 1 : 0),
372 dpavlin 2 #endif
373 dpavlin 12 paddr & ~offset_mask);
374 dpavlin 2
375 dpavlin 32 /*
376     * If writing, then invalidate code translations for the (physical)
377     * page address:
378     */
379 dpavlin 20 if (writeflag == MEM_WRITE && cpu->invalidate_code_translation != NULL)
380 dpavlin 14 cpu->invalidate_code_translation(cpu, paddr, INVALIDATE_PADDR);
381    
382 dpavlin 28 if ((paddr&((1<<BITS_PER_MEMBLOCK)-1)) + len > (1<<BITS_PER_MEMBLOCK)) {
383     printf("Write over memblock boundary?\n");
384     exit(1);
385     }
386    
387 dpavlin 2 if (writeflag == MEM_WRITE) {
388 dpavlin 12 /* Ugly optimization, but it works: */
389     if (len == sizeof(uint32_t) && (offset & 3)==0
390     && ((size_t)data&3)==0)
391 dpavlin 2 *(uint32_t *)(memblock + offset) = *(uint32_t *)data;
392     else if (len == sizeof(uint8_t))
393     *(uint8_t *)(memblock + offset) = *(uint8_t *)data;
394     else
395     memcpy(memblock + offset, data, len);
396     } else {
397 dpavlin 12 /* Ugly optimization, but it works: */
398     if (len == sizeof(uint32_t) && (offset & 3)==0
399     && ((size_t)data&3)==0)
400 dpavlin 2 *(uint32_t *)data = *(uint32_t *)(memblock + offset);
401     else if (len == sizeof(uint8_t))
402     *(uint8_t *)data = *(uint8_t *)(memblock + offset);
403     else
404     memcpy(data, memblock + offset, len);
405     }
406    
407    
408     do_return_ok:
409     return MEMORY_ACCESS_OK;
410     }
411    

  ViewVC Help
Powered by ViewVC 1.1.26