/[gxemul]/upstream/0.4.4/src/cpus/memory_sh.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/0.4.4/src/cpus/memory_sh.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 35 - (show annotations)
Mon Oct 8 16:21:26 2007 UTC (16 years, 7 months ago) by dpavlin
File MIME type: text/plain
File size: 8295 byte(s)
0.4.4
1 /*
2 * Copyright (C) 2006-2007 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory_sh.c,v 1.15 2007/01/29 18:06:37 debug Exp $
29 */
30
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34
35 #include "cpu.h"
36 #include "machine.h"
37 #include "memory.h"
38 #include "misc.h"
39
40 #include "sh4_exception.h"
41 #include "sh4_mmu.h"
42
43
44 /*
45 * translate_via_mmu():
46 *
47 * Scan the UTLB for a matching virtual address. If a match was found, then
48 * check permission bits etc. If everything was ok, then return the physical
49 * page address, otherwise cause an exception.
50 *
51 * The implementation should (hopefully) be quite complete, except for lack
52 * of "Multiple matching entries" detection. (On a real CPU, these would
53 * cause exceptions.)
54 *
55 * Same return values as sh_translate_v2p().
56 */
57 static int translate_via_mmu(struct cpu *cpu, uint32_t vaddr,
58 uint64_t *return_paddr, int flags)
59 {
60 int wf = flags & FLAG_WRITEFLAG;
61 int i, urb, urc, require_asid_match, cur_asid, expevt = 0;
62 uint32_t hi, lo = 0, mask = 0;
63 int sh; /* Shared */
64 int d; /* Dirty bit */
65 int v; /* Valid bit */
66 int pr; /* Protection */
67 int i_start;
68
69 cur_asid = cpu->cd.sh.pteh & SH4_PTEH_ASID_MASK;
70 require_asid_match = !(cpu->cd.sh.mmucr & SH4_MMUCR_SV)
71 || !(cpu->cd.sh.sr & SH_SR_MD);
72
73 if (!(flags & FLAG_NOEXCEPTIONS)) {
74 /*
75 * Increase URC every time the UTLB is accessed. (Note:
76 * According to the SH4 manual, the URC should not be
77 * increased when running the ldtlb instruction. Perhaps this
78 * is a good place? Perhaps it is better to just set it to a
79 * random value? TODO: Find out.
80 */
81 urb = (cpu->cd.sh.mmucr & SH4_MMUCR_URB_MASK) >>
82 SH4_MMUCR_URB_SHIFT;
83 urc = (cpu->cd.sh.mmucr & SH4_MMUCR_URC_MASK) >>
84 SH4_MMUCR_URC_SHIFT;
85
86 /* fatal("urc = %i ==> ", urc); */
87 urc ++;
88 if (urc == SH_N_UTLB_ENTRIES || (urb > 0 && urc == urb))
89 urc = 0;
90 /* fatal("%i\n", urc); */
91
92 cpu->cd.sh.mmucr &= ~SH4_MMUCR_URC_MASK;
93 cpu->cd.sh.mmucr |= (urc << SH4_MMUCR_URC_SHIFT);
94 }
95
96 /*
97 * When doing Instruction lookups, the ITLB should be scanned first.
98 * This is done by using negative i. (Ugly hack, but works.)
99 */
100 if (flags & FLAG_INSTR)
101 i_start = -SH_N_ITLB_ENTRIES;
102 else
103 i_start = 0;
104
105 for (i=i_start; i<SH_N_UTLB_ENTRIES; i++) {
106 if (i<0) {
107 hi = cpu->cd.sh.itlb_hi[i + SH_N_ITLB_ENTRIES];
108 lo = cpu->cd.sh.itlb_lo[i + SH_N_ITLB_ENTRIES];
109 } else {
110 hi = cpu->cd.sh.utlb_hi[i];
111 lo = cpu->cd.sh.utlb_lo[i];
112 }
113 mask = 0xfff00000;
114
115 v = lo & SH4_PTEL_V;
116
117 switch (lo & SH4_PTEL_SZ_MASK) {
118 case SH4_PTEL_SZ_1K: mask = 0xfffffc00; break;
119 case SH4_PTEL_SZ_4K: mask = 0xfffff000; break;
120 case SH4_PTEL_SZ_64K: mask = 0xffff0000; break;
121 /* case SH4_PTEL_SZ_1M: mask = 0xfff00000; break; */
122 }
123
124 if (!v || (hi & mask) != (vaddr & mask))
125 continue;
126
127 sh = lo & SH4_PTEL_SH;
128
129 if (!sh && require_asid_match) {
130 int asid = hi & SH4_PTEH_ASID_MASK;
131 if (asid != cur_asid)
132 continue;
133 }
134
135 /* Note/TODO: Check for multiple matches is not implemented. */
136
137 break;
138 }
139
140 /* Virtual address not found? Then it's a TLB miss. */
141 if (i == SH_N_UTLB_ENTRIES)
142 goto tlb_miss;
143
144 /* Matching address found! Let's see it is readable/writable, etc: */
145 d = lo & SH4_PTEL_D;
146 pr = (lo & SH4_PTEL_PR_MASK) >> SH4_PTEL_PR_SHIFT;
147
148 *return_paddr = (vaddr & ~mask) | (lo & mask & 0x1fffffff);
149
150 if (flags & FLAG_INSTR) {
151 /*
152 * Instruction access:
153 *
154 * If a matching entry wasn't found in the ITLB, but in the
155 * UTLB, then copy it to a random place in the ITLB.
156 */
157 if (i >= 0) {
158 int r = random() % SH_N_ITLB_ENTRIES;
159
160 /* NOTE: Make sure that the old mapping for
161 that itlb entry is invalidated: */
162 cpu->invalidate_translation_caches(cpu,
163 cpu->cd.sh.itlb_hi[r] & ~0xfff, INVALIDATE_VADDR);
164
165 cpu->invalidate_code_translation(cpu,
166 cpu->cd.sh.utlb_lo[i] & ~0xfff, INVALIDATE_PADDR);
167
168 cpu->cd.sh.itlb_hi[r] = cpu->cd.sh.utlb_hi[i];
169 cpu->cd.sh.itlb_lo[r] = cpu->cd.sh.utlb_lo[i];
170 }
171
172 /* Permission checks: */
173 if (cpu->cd.sh.sr & SH_SR_MD)
174 return 1;
175 if (!(pr & 2))
176 goto protection_violation;
177
178 return 1;
179 }
180
181 /* Data access: */
182 if (cpu->cd.sh.sr & SH_SR_MD) {
183 /* Kernel access: */
184 switch (pr) {
185 case 0:
186 case 2: if (wf)
187 goto protection_violation;
188 return 1;
189 case 1:
190 case 3: if (wf && !d)
191 goto initial_write_exception;
192 return 1;
193 }
194 }
195
196 /* User access */
197 switch (pr) {
198 case 0:
199 case 1: goto protection_violation;
200 case 2: if (wf)
201 goto protection_violation;
202 return 1;
203 case 3: if (wf && !d)
204 goto initial_write_exception;
205 return 1;
206 }
207
208
209 tlb_miss:
210 expevt = wf? EXPEVT_TLB_MISS_ST : EXPEVT_TLB_MISS_LD;
211 goto exception;
212
213 protection_violation:
214 expevt = wf? EXPEVT_TLB_PROT_ST : EXPEVT_TLB_PROT_LD;
215 goto exception;
216
217 initial_write_exception:
218 expevt = EXPEVT_TLB_MOD;
219
220
221 exception:
222 if (flags & FLAG_NOEXCEPTIONS) {
223 *return_paddr = 0;
224 return 2;
225 }
226
227 sh_exception(cpu, expevt, 0, vaddr);
228
229 return 0;
230 }
231
232
233 /*
234 * sh_translate_v2p():
235 *
236 * Return values:
237 *
238 * 0 No access to the virtual address.
239 * 1 return_paddr contains the physical address, the page is
240 * available as read-only.
241 * 2 Same as 1, but the page is available as read/write.
242 */
243 int sh_translate_v2p(struct cpu *cpu, uint64_t vaddr, uint64_t *return_paddr,
244 int flags)
245 {
246 int user = cpu->cd.sh.sr & SH_SR_MD? 0 : 1;
247
248 vaddr = (uint32_t)vaddr;
249
250 /* U0/P0: Userspace addresses, or P3: Kernel virtual memory. */
251 if (!(vaddr & 0x80000000) ||
252 (vaddr >= 0xc0000000 && vaddr < 0xe0000000)) {
253 /* Address translation turned off? */
254 if (!(cpu->cd.sh.mmucr & SH4_MMUCR_AT)) {
255 /* Then return raw physical address: */
256 *return_paddr = vaddr & 0x1fffffff;
257 return 2;
258 }
259
260 /* Perform translation via the MMU: */
261 return translate_via_mmu(cpu, vaddr, return_paddr, flags);
262 }
263
264 /* Store queue region: */
265 if (vaddr >= 0xe0000000 && vaddr < 0xe4000000) {
266 /* Note/TODO: Take SH4_MMUCR_SQMD into account. */
267 *return_paddr = vaddr;
268 return 2;
269 }
270
271 if (user) {
272 if (flags & FLAG_NOEXCEPTIONS) {
273 *return_paddr = 0;
274 return 2;
275 }
276
277 fatal("Userspace tried to access non-user space memory."
278 " TODO: cause exception! (vaddr=0x%08"PRIx32"\n",
279 (uint32_t) vaddr);
280 exit(1);
281 }
282
283 /* P1,P2: Direct-mapped physical memory. */
284 if (vaddr >= 0x80000000 && vaddr < 0xc0000000) {
285 *return_paddr = vaddr & 0x1fffffff;
286 return 2;
287 }
288
289 if (flags & FLAG_INSTR) {
290 fatal("TODO: instr at 0x%08"PRIx32"\n", (uint32_t)vaddr);
291 exit(1);
292 }
293
294 /* P4: Special registers mapped at 0xf0000000 .. 0xffffffff: */
295 if ((vaddr & 0xf0000000) == 0xf0000000) {
296 *return_paddr = vaddr;
297 return 2;
298 }
299
300 if (flags & FLAG_NOEXCEPTIONS) {
301 *return_paddr = 0;
302 return 2;
303 }
304
305 /* TODO */
306 fatal("Unimplemented SH vaddr 0x%08"PRIx32"\n", (uint32_t)vaddr);
307 exit(1);
308 }
309

  ViewVC Help
Powered by ViewVC 1.1.26