1 |
/* |
/* |
2 |
* Copyright (C) 2005 Anders Gavare. All rights reserved. |
* Copyright (C) 2005-2006 Anders Gavare. All rights reserved. |
3 |
* |
* |
4 |
* Redistribution and use in source and binary forms, with or without |
* Redistribution and use in source and binary forms, with or without |
5 |
* modification, are permitted provided that the following conditions are met: |
* modification, are permitted provided that the following conditions are met: |
25 |
* SUCH DAMAGE. |
* SUCH DAMAGE. |
26 |
* |
* |
27 |
* |
* |
28 |
* $Id: memory_arm.c,v 1.23 2005/10/07 15:19:48 debug Exp $ |
* $Id: memory_arm.c,v 1.34 2006/06/02 18:11:38 debug Exp $ |
29 |
* |
* |
30 |
* |
* |
31 |
* TODO/NOTE: There are probably two solutions to the subpage access |
* TODO/NOTE: The B and/or C bits could also cause the return value to |
32 |
* permission problem: |
* be MEMORY_NOT_FULL_PAGE, to make sure it doesn't get entered into the |
33 |
* |
* translation arrays. TODO: Find out if this is a good thing to do. |
|
* a) the obvious (almost trivial) solution is to decrease the native page |
|
|
* size from 4 KB to 1 KB. That would ruin the rest of the translation |
|
|
* system though. (It would be infeasible to hold the entire address |
|
|
* space in 1-level tables.) |
|
|
* |
|
|
* b) to return something else than just 0, 1, or 2 from arm_memory_rw(). |
|
|
* Perhaps |4, which would indicate that the vaddr => paddr conversion |
|
|
* was done, but that it should not be entered into the cache. This could |
|
|
* also be used in combination with the B and C bits (which are currently |
|
|
* ignored). |
|
|
* |
|
|
* b would probably be the best solution. |
|
34 |
*/ |
*/ |
35 |
|
|
36 |
#include <stdio.h> |
#include <stdio.h> |
37 |
#include <stdlib.h> |
#include <stdlib.h> |
38 |
#include <string.h> |
#include <string.h> |
39 |
|
|
40 |
|
#include "arm_cpu_types.h" |
41 |
#include "cpu.h" |
#include "cpu.h" |
42 |
#include "memory.h" |
#include "memory.h" |
43 |
#include "misc.h" |
#include "misc.h" |
48 |
|
|
49 |
|
|
50 |
/* |
/* |
51 |
|
* arm_translate_address(): |
52 |
|
* |
53 |
|
* Address translation with the MMU disabled. |
54 |
|
*/ |
55 |
|
int arm_translate_address(struct cpu *cpu, uint64_t vaddr64, |
56 |
|
uint64_t *return_addr, int flags) |
57 |
|
{ |
58 |
|
*return_addr = vaddr64 & 0xffffffff; |
59 |
|
return 2; |
60 |
|
} |
61 |
|
|
62 |
|
|
63 |
|
/* |
64 |
* arm_check_access(): |
* arm_check_access(): |
65 |
* |
* |
66 |
* Helper function. Returns 0 for no access, 1 for read-only, and 2 for |
* Helper function. Returns 0 for no access, 1 for read-only, and 2 for |
101 |
|
|
102 |
|
|
103 |
/* |
/* |
104 |
* arm_translate_address(): |
* arm_translate_address_mmu(): |
105 |
* |
* |
106 |
* Don't call this function is userland_emul is non-NULL, or cpu is NULL. |
* Don't call this function is userland_emul is non-NULL, or cpu is NULL. |
107 |
* |
* |
109 |
* 0 Failure |
* 0 Failure |
110 |
* 1 Success, the page is readable only |
* 1 Success, the page is readable only |
111 |
* 2 Success, the page is read/write |
* 2 Success, the page is read/write |
112 |
|
* |
113 |
|
* If this is a 1KB page access, then the return value is ORed with |
114 |
|
* MEMORY_NOT_FULL_PAGE. |
115 |
*/ |
*/ |
116 |
int arm_translate_address(struct cpu *cpu, uint64_t vaddr64, |
int arm_translate_address_mmu(struct cpu *cpu, uint64_t vaddr64, |
117 |
uint64_t *return_addr, int flags) |
uint64_t *return_addr, int flags) |
118 |
{ |
{ |
119 |
unsigned char descr[4]; |
unsigned char *q; |
120 |
uint32_t addr, d, d2 = (uint32_t)(int32_t)-1, ptba, vaddr = vaddr64; |
uint32_t addr, d=0, d2 = (uint32_t)(int32_t)-1, ptba, vaddr = vaddr64; |
|
int d2_in_use = 0, d_in_use = 1; |
|
121 |
int instr = flags & FLAG_INSTR; |
int instr = flags & FLAG_INSTR; |
122 |
int writeflag = (flags & FLAG_WRITEFLAG)? 1 : 0; |
int writeflag = (flags & FLAG_WRITEFLAG)? 1 : 0; |
123 |
int useraccess = flags & MEMORY_USER_ACCESS; |
int useraccess = flags & MEMORY_USER_ACCESS; |
125 |
int user = (cpu->cd.arm.cpsr & ARM_FLAG_MODE) == ARM_MODE_USR32; |
int user = (cpu->cd.arm.cpsr & ARM_FLAG_MODE) == ARM_MODE_USR32; |
126 |
int domain, dav, ap0,ap1,ap2,ap3, ap = 0, access = 0; |
int domain, dav, ap0,ap1,ap2,ap3, ap = 0, access = 0; |
127 |
int fs = 2; /* fault status (2 = terminal exception) */ |
int fs = 2; /* fault status (2 = terminal exception) */ |
128 |
|
int subpage = 0; |
|
if (!(cpu->cd.arm.control & ARM_CONTROL_MMU)) { |
|
|
*return_addr = vaddr; |
|
|
return 2; |
|
|
} |
|
129 |
|
|
130 |
if (useraccess) |
if (useraccess) |
131 |
user = 1; |
user = 1; |
132 |
|
|
133 |
addr = cpu->cd.arm.ttb + ((vaddr & 0xfff00000ULL) >> 18); |
addr = ((vaddr & 0xfff00000ULL) >> 18); |
134 |
if (!cpu->memory_rw(cpu, cpu->mem, addr, &descr[0], |
|
135 |
sizeof(descr), MEM_READ, PHYSICAL | NO_EXCEPTIONS)) { |
if (cpu->cd.arm.translation_table == NULL || |
136 |
fatal("arm_translate_address(): huh?\n"); |
cpu->cd.arm.ttb != cpu->cd.arm.last_ttb) { |
137 |
exit(1); |
uint32_t ofs; |
138 |
|
cpu->cd.arm.translation_table = memory_paddr_to_hostaddr( |
139 |
|
cpu->mem, cpu->cd.arm.ttb & 0x0fffffff, 0); |
140 |
|
if (cpu->cd.arm.translation_table != NULL) { |
141 |
|
ofs = cpu->cd.arm.ttb & ((1 << BITS_PER_MEMBLOCK) - 1); |
142 |
|
cpu->cd.arm.translation_table += ofs; |
143 |
|
} |
144 |
|
cpu->cd.arm.last_ttb = cpu->cd.arm.ttb; |
145 |
} |
} |
|
if (cpu->byte_order == EMUL_LITTLE_ENDIAN) |
|
|
d = descr[0] + (descr[1] << 8) + (descr[2] << 16) |
|
|
+ (descr[3] << 24); |
|
|
else |
|
|
d = descr[3] + (descr[2] << 8) + (descr[1] << 16) |
|
|
+ (descr[0] << 24); |
|
146 |
|
|
147 |
/* fatal("vaddr=0x%08x ttb=0x%08x addr=0x%08x d=0x%08x\n", |
if (cpu->cd.arm.translation_table != NULL) { |
148 |
vaddr, cpu->cd.arm.ttb, addr, d); */ |
d = *(uint32_t *)(cpu->cd.arm.translation_table + addr); |
149 |
|
#ifdef HOST_LITTLE_ENDIAN |
150 |
|
if (cpu->byte_order == EMUL_BIG_ENDIAN) |
151 |
|
#else |
152 |
|
if (cpu->byte_order == EMUL_LITTLE_ENDIAN) |
153 |
|
#endif |
154 |
|
d = ((d & 0xff) << 24) | ((d & 0xff00) << 8) | |
155 |
|
((d & 0xff0000) >> 8) | ((d & 0xff000000) >> 24); |
156 |
|
} |
157 |
|
|
158 |
/* Get the domain from the descriptor, and the Domain Access Value: */ |
/* Get the domain from the descriptor, and the Domain Access Value: */ |
159 |
domain = (d >> 5) & 15; |
domain = (d >> 5) & 15; |
161 |
|
|
162 |
switch (d & 3) { |
switch (d & 3) { |
163 |
|
|
164 |
case 0: d_in_use = 0; |
case 0: domain = 0; |
|
domain = 0; |
|
165 |
fs = FAULT_TRANS_S; |
fs = FAULT_TRANS_S; |
166 |
goto exception_return; |
goto exception_return; |
167 |
|
|
168 |
case 1: /* Course Pagetable: */ |
case 1: /* Course Pagetable: */ |
169 |
|
if (dav == 0) { |
170 |
|
fs = FAULT_DOMAIN_P; |
171 |
|
goto exception_return; |
172 |
|
} |
173 |
ptba = d & 0xfffffc00; |
ptba = d & 0xfffffc00; |
174 |
addr = ptba + ((vaddr & 0x000ff000) >> 10); |
addr = ptba + ((vaddr & 0x000ff000) >> 10); |
175 |
if (!cpu->memory_rw(cpu, cpu->mem, addr, &descr[0], |
|
176 |
sizeof(descr), MEM_READ, PHYSICAL | NO_EXCEPTIONS)) { |
q = memory_paddr_to_hostaddr(cpu->mem, addr & 0x0fffffff, 0); |
177 |
fatal("arm_translate_address(): huh 2?\n"); |
if (q == NULL) { |
178 |
|
printf("arm memory blah blah adfh asfg asdgasdg\n"); |
179 |
exit(1); |
exit(1); |
180 |
} |
} |
181 |
|
d2 = *(uint32_t *)(q + (addr & ((1 << BITS_PER_MEMBLOCK) - 1))); |
182 |
|
#ifdef HOST_LITTLE_ENDIAN |
183 |
|
if (cpu->byte_order == EMUL_BIG_ENDIAN) |
184 |
|
#else |
185 |
if (cpu->byte_order == EMUL_LITTLE_ENDIAN) |
if (cpu->byte_order == EMUL_LITTLE_ENDIAN) |
186 |
d2 = descr[0] + (descr[1] << 8) + (descr[2] << 16) |
#endif |
187 |
+ (descr[3] << 24); |
d2 = ((d2 & 0xff) << 24) | ((d2 & 0xff00) << 8) | |
188 |
else |
((d2 & 0xff0000) >> 8) | ((d2 & 0xff000000) >> 24); |
|
d2 = descr[3] + (descr[2] << 8) + (descr[1] << 16) |
|
|
+ (descr[0] << 24); |
|
|
d2_in_use = 1; |
|
189 |
|
|
190 |
switch (d2 & 3) { |
switch (d2 & 3) { |
191 |
case 0: fs = FAULT_TRANS_P; |
case 0: fs = FAULT_TRANS_P; |
200 |
ap &= 3; |
ap &= 3; |
201 |
*return_addr = (d2 & 0xffff0000) | (vaddr & 0x0000ffff); |
*return_addr = (d2 & 0xffff0000) | (vaddr & 0x0000ffff); |
202 |
break; |
break; |
203 |
|
case 3: if (cpu->cd.arm.cpu_type.flags & ARM_XSCALE) { |
204 |
|
/* 4KB page (Xscale) */ |
205 |
|
subpage = 0; |
206 |
|
} else { |
207 |
|
/* 1KB page */ |
208 |
|
subpage = 1; |
209 |
|
ap = (d2 >> 4) & 3; |
210 |
|
*return_addr = (d2 & 0xfffffc00) | |
211 |
|
(vaddr & 0x000003ff); |
212 |
|
break; |
213 |
|
} |
214 |
|
/* NOTE: Fall-through for XScale! */ |
215 |
case 2: /* 4KB page: */ |
case 2: /* 4KB page: */ |
216 |
ap3 = (d2 >> 10) & 3; |
ap3 = (d2 >> 10) & 3; |
217 |
ap2 = (d2 >> 8) & 3; |
ap2 = (d2 >> 8) & 3; |
223 |
case 0x800: ap = ap2; break; |
case 0x800: ap = ap2; break; |
224 |
default: ap = ap3; |
default: ap = ap3; |
225 |
} |
} |
226 |
#if 0 |
/* NOTE: Ugly hack for XScale: */ |
227 |
if ((ap0 != ap1 || ap0 != ap2 || ap0 != ap3) && |
if ((d2 & 3) == 3) { |
228 |
!no_exceptions) |
/* Treated as 4KB page: */ |
229 |
fatal("WARNING: vaddr = 0x%08x, small page, but" |
ap = ap0; |
230 |
" different access permissions for the sub" |
} else { |
231 |
"pages! This is not really implemented " |
if (ap0 != ap1 || ap0 != ap2 || ap0 != ap3) |
232 |
"yet.\n", (int)vaddr); |
subpage = 1; |
233 |
#endif |
} |
234 |
*return_addr = (d2 & 0xfffff000) | (vaddr & 0x00000fff); |
*return_addr = (d2 & 0xfffff000) | (vaddr & 0x00000fff); |
235 |
break; |
break; |
|
case 3: /* 1KB page: */ |
|
|
fatal("WARNING: 1 KB page! Not implemented yet.\n"); |
|
|
ap = (d2 >> 4) & 3; |
|
|
*return_addr = (d2 & 0xfffffc00) | (vaddr & 0x000003ff); |
|
|
break; |
|
|
} |
|
|
if (dav == 0) { |
|
|
fs = FAULT_DOMAIN_P; |
|
|
goto exception_return; |
|
236 |
} |
} |
237 |
access = arm_check_access(cpu, ap, dav, user); |
access = arm_check_access(cpu, ap, dav, user); |
238 |
if (access > writeflag) |
if (access > writeflag) |
239 |
return access; |
return access | (subpage? MEMORY_NOT_FULL_PAGE : 0); |
240 |
fs = FAULT_PERM_P; |
fs = FAULT_PERM_P; |
241 |
goto exception_return; |
goto exception_return; |
242 |
|
|
243 |
case 2: /* Section descriptor: */ |
case 2: /* Section descriptor: */ |
|
*return_addr = (d & 0xfff00000) | (vaddr & 0x000fffff); |
|
244 |
if (dav == 0) { |
if (dav == 0) { |
245 |
fs = FAULT_DOMAIN_S; |
fs = FAULT_DOMAIN_S; |
246 |
goto exception_return; |
goto exception_return; |
247 |
} |
} |
248 |
|
*return_addr = (d & 0xfff00000) | (vaddr & 0x000fffff); |
249 |
ap = (d >> 10) & 3; |
ap = (d >> 10) & 3; |
250 |
access = arm_check_access(cpu, ap, dav, user); |
access = arm_check_access(cpu, ap, dav, user); |
251 |
if (access > writeflag) |
if (access > writeflag) |
266 |
fatal("{ arm memory fault: vaddr=0x%08x domain=%i dav=%i ap=%i " |
fatal("{ arm memory fault: vaddr=0x%08x domain=%i dav=%i ap=%i " |
267 |
"access=%i user=%i", (int)vaddr, domain, dav, ap, |
"access=%i user=%i", (int)vaddr, domain, dav, ap, |
268 |
access, user); |
access, user); |
269 |
if (d_in_use) |
fatal(" d=0x%08x d2=0x%08x pc=0x%08x }\n", d, d2, (int)cpu->pc); |
|
fatal(" d=0x%08x", d); |
|
|
if (d2_in_use) |
|
|
fatal(" d2=0x%08x", d2); |
|
|
fatal(" }\n"); |
|
270 |
} |
} |
271 |
|
|
272 |
if (instr) |
if (instr) |