/[gxemul]/trunk/src/cpus/memory_arm.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/src/cpus/memory_arm.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 26 - (show annotations)
Mon Oct 8 16:20:10 2007 UTC (16 years, 6 months ago) by dpavlin
File MIME type: text/plain
File size: 8065 byte(s)
++ trunk/HISTORY	(local)
$Id: HISTORY,v 1.1264 2006/06/25 11:08:04 debug Exp $
20060624	Replacing the error-prone machine type initialization stuff
		with something more reasonable.
		Finally removing the old "cpu_run" kludge; moving around stuff
		in machine.c and emul.c to better suit the dyntrans system.
		Various minor dyntrans cleanups (renaming translate_address to
		translate_v2p, and experimenting with template physpages).
20060625	Removing the speed hack which separated the vph entries into
		two halves (code vs data); things seem a lot more stable now.
		Minor performance hack: R2000/R3000 cache isolation now only
		clears address translations when going into isolation, not
		when going out of it.
		Fixing the MIPS interrupt problems by letting mtc0 immediately
		cause interrupts.

==============  RELEASE 0.4.0.1  ==============


1 /*
2 * Copyright (C) 2005-2006 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory_arm.c,v 1.35 2006/06/24 21:47:23 debug Exp $
29 *
30 *
31 * TODO/NOTE: The B and/or C bits could also cause the return value to
32 * be MEMORY_NOT_FULL_PAGE, to make sure it doesn't get entered into the
33 * translation arrays. TODO: Find out if this is a good thing to do.
34 */
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39
40 #include "arm_cpu_types.h"
41 #include "cpu.h"
42 #include "memory.h"
43 #include "misc.h"
44
45 #include "armreg.h"
46
47 extern int quiet_mode;
48
49
50 /*
51 * arm_translate_v2p():
52 *
53 * Address translation with the MMU disabled. (Just treat the virtual address
54 * as a physical address.)
55 */
56 int arm_translate_v2p(struct cpu *cpu, uint64_t vaddr64,
57 uint64_t *return_paddr, int flags)
58 {
59 *return_paddr = vaddr64 & 0xffffffff;
60
61 return 2;
62 }
63
64
65 /*
66 * arm_check_access():
67 *
68 * Helper function. Returns 0 for no access, 1 for read-only, and 2 for
69 * read/write.
70 */
71 static int arm_check_access(struct cpu *cpu, int ap, int dav, int user)
72 {
73 int s, r;
74
75 switch (dav) {
76 case 0: /* No access at all. */
77 return 0;
78 case 1: /* Normal access check. */
79 break;
80 case 2: fatal("arm_check_access(): 1 shouldn't be used\n");
81 exit(1);
82 case 3: /* Anything is allowed. */
83 return 2;
84 }
85
86 switch (ap) {
87 case 0: s = (cpu->cd.arm.control & ARM_CONTROL_S)? 1 : 0;
88 r = (cpu->cd.arm.control & ARM_CONTROL_R)? 2 : 0;
89 switch (s + r) {
90 case 0: return 0;
91 case 1: return user? 0 : 1;
92 case 2: return 1;
93 }
94 fatal("arm_check_access: UNPREDICTABLE s+r value!\n");
95 return 0;
96 case 1: return user? 0 : 2;
97 case 2: return user? 1 : 2;
98 }
99
100 /* "case 3": */
101 return 2;
102 }
103
104
105 /*
106 * arm_translate_v2p_mmu():
107 *
108 * Don't call this function is userland_emul is non-NULL, or cpu is NULL.
109 *
110 * Return values:
111 * 0 Failure
112 * 1 Success, the page is readable only
113 * 2 Success, the page is read/write
114 *
115 * If this is a 1KB page access, then the return value is ORed with
116 * MEMORY_NOT_FULL_PAGE.
117 */
118 int arm_translate_v2p_mmu(struct cpu *cpu, uint64_t vaddr64,
119 uint64_t *return_paddr, int flags)
120 {
121 unsigned char *q;
122 uint32_t addr, d=0, d2 = (uint32_t)(int32_t)-1, ptba, vaddr = vaddr64;
123 int instr = flags & FLAG_INSTR;
124 int writeflag = (flags & FLAG_WRITEFLAG)? 1 : 0;
125 int useraccess = flags & MEMORY_USER_ACCESS;
126 int no_exceptions = flags & FLAG_NOEXCEPTIONS;
127 int user = (cpu->cd.arm.cpsr & ARM_FLAG_MODE) == ARM_MODE_USR32;
128 int domain, dav, ap0,ap1,ap2,ap3, ap = 0, access = 0;
129 int fs = 2; /* fault status (2 = terminal exception) */
130 int subpage = 0;
131
132 if (useraccess)
133 user = 1;
134
135 addr = ((vaddr & 0xfff00000ULL) >> 18);
136
137 if (cpu->cd.arm.translation_table == NULL ||
138 cpu->cd.arm.ttb != cpu->cd.arm.last_ttb) {
139 uint32_t ofs;
140 cpu->cd.arm.translation_table = memory_paddr_to_hostaddr(
141 cpu->mem, cpu->cd.arm.ttb & 0x0fffffff, 0);
142 if (cpu->cd.arm.translation_table != NULL) {
143 ofs = cpu->cd.arm.ttb & ((1 << BITS_PER_MEMBLOCK) - 1);
144 cpu->cd.arm.translation_table += ofs;
145 }
146 cpu->cd.arm.last_ttb = cpu->cd.arm.ttb;
147 }
148
149 if (cpu->cd.arm.translation_table != NULL) {
150 d = *(uint32_t *)(cpu->cd.arm.translation_table + addr);
151 #ifdef HOST_LITTLE_ENDIAN
152 if (cpu->byte_order == EMUL_BIG_ENDIAN)
153 #else
154 if (cpu->byte_order == EMUL_LITTLE_ENDIAN)
155 #endif
156 d = ((d & 0xff) << 24) | ((d & 0xff00) << 8) |
157 ((d & 0xff0000) >> 8) | ((d & 0xff000000) >> 24);
158 }
159
160 /* Get the domain from the descriptor, and the Domain Access Value: */
161 domain = (d >> 5) & 15;
162 dav = (cpu->cd.arm.dacr >> (domain * 2)) & 3;
163
164 switch (d & 3) {
165
166 case 0: domain = 0;
167 fs = FAULT_TRANS_S;
168 goto exception_return;
169
170 case 1: /* Course Pagetable: */
171 if (dav == 0) {
172 fs = FAULT_DOMAIN_P;
173 goto exception_return;
174 }
175 ptba = d & 0xfffffc00;
176 addr = ptba + ((vaddr & 0x000ff000) >> 10);
177
178 q = memory_paddr_to_hostaddr(cpu->mem, addr & 0x0fffffff, 0);
179 if (q == NULL) {
180 printf("arm memory blah blah adfh asfg asdgasdg\n");
181 exit(1);
182 }
183 d2 = *(uint32_t *)(q + (addr & ((1 << BITS_PER_MEMBLOCK) - 1)));
184 #ifdef HOST_LITTLE_ENDIAN
185 if (cpu->byte_order == EMUL_BIG_ENDIAN)
186 #else
187 if (cpu->byte_order == EMUL_LITTLE_ENDIAN)
188 #endif
189 d2 = ((d2 & 0xff) << 24) | ((d2 & 0xff00) << 8) |
190 ((d2 & 0xff0000) >> 8) | ((d2 & 0xff000000) >> 24);
191
192 switch (d2 & 3) {
193 case 0: fs = FAULT_TRANS_P;
194 goto exception_return;
195 case 1: /* 16KB page: */
196 ap = (d2 >> 4) & 255;
197 switch (vaddr & 0x0000c000) {
198 case 0x4000: ap >>= 2; break;
199 case 0x8000: ap >>= 4; break;
200 case 0xc000: ap >>= 6; break;
201 }
202 ap &= 3;
203 *return_paddr = (d2 & 0xffff0000)|(vaddr & 0x0000ffff);
204 break;
205 case 3: if (cpu->cd.arm.cpu_type.flags & ARM_XSCALE) {
206 /* 4KB page (Xscale) */
207 subpage = 0;
208 } else {
209 /* 1KB page */
210 subpage = 1;
211 ap = (d2 >> 4) & 3;
212 *return_paddr = (d2 & 0xfffffc00) |
213 (vaddr & 0x000003ff);
214 break;
215 }
216 /* NOTE: Fall-through for XScale! */
217 case 2: /* 4KB page: */
218 ap3 = (d2 >> 10) & 3;
219 ap2 = (d2 >> 8) & 3;
220 ap1 = (d2 >> 6) & 3;
221 ap0 = (d2 >> 4) & 3;
222 switch (vaddr & 0x00000c00) {
223 case 0x000: ap = ap0; break;
224 case 0x400: ap = ap1; break;
225 case 0x800: ap = ap2; break;
226 default: ap = ap3;
227 }
228 /* NOTE: Ugly hack for XScale: */
229 if ((d2 & 3) == 3) {
230 /* Treated as 4KB page: */
231 ap = ap0;
232 } else {
233 if (ap0 != ap1 || ap0 != ap2 || ap0 != ap3)
234 subpage = 1;
235 }
236 *return_paddr = (d2 & 0xfffff000)|(vaddr & 0x00000fff);
237 break;
238 }
239 access = arm_check_access(cpu, ap, dav, user);
240 if (access > writeflag)
241 return access | (subpage? MEMORY_NOT_FULL_PAGE : 0);
242 fs = FAULT_PERM_P;
243 goto exception_return;
244
245 case 2: /* Section descriptor: */
246 if (dav == 0) {
247 fs = FAULT_DOMAIN_S;
248 goto exception_return;
249 }
250 *return_paddr = (d & 0xfff00000) | (vaddr & 0x000fffff);
251 ap = (d >> 10) & 3;
252 access = arm_check_access(cpu, ap, dav, user);
253 if (access > writeflag)
254 return access;
255 fs = FAULT_PERM_S;
256 goto exception_return;
257
258 default:fatal("TODO: descriptor for vaddr 0x%08x: 0x%08x ("
259 "unimplemented type %i)\n", vaddr, d, d&3);
260 exit(1);
261 }
262
263 exception_return:
264 if (no_exceptions)
265 return 0;
266
267 if (!quiet_mode) {
268 fatal("{ arm memory fault: vaddr=0x%08x domain=%i dav=%i ap=%i "
269 "access=%i user=%i", (int)vaddr, domain, dav, ap,
270 access, user);
271 fatal(" d=0x%08x d2=0x%08x pc=0x%08x }\n", d, d2, (int)cpu->pc);
272 }
273
274 if (instr)
275 arm_exception(cpu, ARM_EXCEPTION_PREF_ABT);
276 else {
277 cpu->cd.arm.far = vaddr;
278 cpu->cd.arm.fsr = (domain << 4) | fs;
279 arm_exception(cpu, ARM_EXCEPTION_DATA_ABT);
280 }
281
282 return 0;
283 }
284

  ViewVC Help
Powered by ViewVC 1.1.26