/[gxemul]/trunk/src/cpus/cpu_arm_instr_loadstore.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/src/cpus/cpu_arm_instr_loadstore.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 16 - (show annotations)
Mon Oct 8 16:19:01 2007 UTC (16 years, 5 months ago) by dpavlin
File MIME type: text/plain
File size: 12346 byte(s)
++ trunk/HISTORY	(local)
$Id: HISTORY,v 1.988 2005/10/11 03:53:57 debug Exp $

==============  RELEASE 0.3.6  ==============

20051008	The bug was not because of faulty ARM documentation after all,
		but it was related to those parts of the code.
		Fixing the RTC (dev_mc146818) to work with CATS.
20051009	Rewriting the R() function; now there are 8192 automatically
		generated smaller functions doing the same thing, but hopefully
		faster. This also fixes some bugs which were triggered when
		trying to compile GXemul inside itself. :-)
		Adding a dummy dev_lpt.
20051010	Small hack to not update virtual translation tables if memory
		accesses are done with the NO_EXCEPTION flag; a time reduction
		of almost a factor 2 for a full NetBSD/cats install. :-)
20051011	Passing -A as the default boot arg for CATS (works fine with
		OpenBSD/cats).

==============  RELEASE 0.3.6.1  ==============


1 /*
2 * Copyright (C) 2005 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: cpu_arm_instr_loadstore.c,v 1.9 2005/10/09 21:32:07 debug Exp $
29 *
30 *
31 * TODO: Many things...
32 *
33 * o) Big-endian ARM loads/stores.
34 *
35 * o) Alignment checks!
36 *
37 * o) Native load/store if the endianness is the same as the host's
38 *
39 * o) "Base Updated Abort Model", which updates the base register
40 * even if the memory access failed.
41 *
42 * o) Some ARM implementations use pc+8, some use pc+12 for stores?
43 *
44 * o) All load/store variants with the PC register are not really
45 * valid. (E.g. a byte load into the PC register. What should that
46 * accomplish?)
47 *
48 * o) Perhaps an optimization for the case when offset = 0, because
49 * that's quite common, and also when the Reg expression is just
50 * a simple, non-rotated register (0..14).
51 */
52
53
54 /*
55 * General load/store, by using memory_rw(). If at all possible, memory_rw()
56 * then inserts the page into the translation array, so that the fast
57 * load/store routine below can be used for further accesses.
58 */
59 void A__NAME__general(struct cpu *cpu, struct arm_instr_call *ic)
60 {
61 #if !defined(A__P) && defined(A__W)
62 const int memory_rw_flags = CACHE_DATA | MEMORY_USER_ACCESS;
63 #else
64 const int memory_rw_flags = CACHE_DATA;
65 #endif
66 #ifdef A__REG
67 uint32_t (*reg_func)(struct cpu *, struct arm_instr_call *)
68 = (void *)(size_t)ic->arg[1];
69 #endif
70 #ifdef A__B
71 unsigned char data[1];
72 #else
73 #ifdef A__H
74 unsigned char data[2];
75 #else
76 unsigned char data[4];
77 #endif
78 #endif
79 uint32_t addr, low_pc, offset =
80 #ifndef A__U
81 -
82 #endif
83 #ifdef A__REG
84 reg_func(cpu, ic);
85 #else
86 ic->arg[1];
87 #endif
88
89 low_pc = ((size_t)ic - (size_t)cpu->cd.arm.
90 cur_ic_page) / sizeof(struct arm_instr_call);
91 cpu->cd.arm.r[ARM_PC] &= ~((ARM_IC_ENTRIES_PER_PAGE-1)
92 << ARM_INSTR_ALIGNMENT_SHIFT);
93 cpu->cd.arm.r[ARM_PC] += (low_pc << ARM_INSTR_ALIGNMENT_SHIFT);
94 cpu->pc = cpu->cd.arm.r[ARM_PC];
95
96 addr = reg(ic->arg[0])
97 #ifdef A__P
98 + offset
99 #endif
100 ;
101
102 #ifdef A__L
103 /* Load: */
104 if (!cpu->memory_rw(cpu, cpu->mem, addr, data, sizeof(data),
105 MEM_READ, memory_rw_flags)) {
106 /* load failed, an exception was generated */
107 return;
108 }
109 #ifdef A__B
110 reg(ic->arg[2]) =
111 #ifdef A__SIGNED
112 (int32_t)(int8_t)
113 #endif
114 data[0];
115 #else
116 #ifdef A__H
117 reg(ic->arg[2]) =
118 #ifdef A__SIGNED
119 (int32_t)(int16_t)
120 #endif
121 (data[0] + (data[1] << 8));
122 #else
123 reg(ic->arg[2]) = data[0] + (data[1] << 8) +
124 (data[2] << 16) + (data[3] << 24);
125 #endif
126 #endif
127 #else
128 /* Store: */
129 #ifdef A__B
130 data[0] = reg(ic->arg[2]);
131 #else
132 #ifdef A__H
133 data[0] = reg(ic->arg[2]);
134 data[1] = reg(ic->arg[2]) >> 8;
135 #else
136 data[0] = reg(ic->arg[2]);
137 data[1] = reg(ic->arg[2]) >> 8;
138 data[2] = reg(ic->arg[2]) >> 16;
139 data[3] = reg(ic->arg[2]) >> 24;
140 #endif
141 #endif
142 if (!cpu->memory_rw(cpu, cpu->mem, addr, data, sizeof(data),
143 MEM_WRITE, memory_rw_flags)) {
144 /* store failed, an exception was generated */
145 return;
146 }
147 #endif
148
149 #ifdef A__P
150 #ifdef A__W
151 reg(ic->arg[0]) = addr;
152 #endif
153 #else /* post-index writeback */
154 reg(ic->arg[0]) = addr + offset;
155 #endif
156 }
157
158
159 /*
160 * Fast load/store, if the page is in the translation array.
161 */
162 void A__NAME(struct cpu *cpu, struct arm_instr_call *ic)
163 {
164 #if !defined(A__P) && defined(A__W)
165 /* T-bit: userland access. Use the general routine for that. */
166 A__NAME__general(cpu, ic);
167 #else
168 #ifdef A__REG
169 uint32_t (*reg_func)(struct cpu *, struct arm_instr_call *)
170 = (void *)(size_t)ic->arg[1];
171 #endif
172 uint32_t offset =
173 #ifndef A__U
174 -
175 #endif
176 #ifdef A__REG
177 reg_func(cpu, ic);
178 #else
179 ic->arg[1];
180 #endif
181 uint32_t addr = reg(ic->arg[0])
182 #ifdef A__P
183 + offset
184 #endif
185 ;
186 unsigned char *page = cpu->cd.arm.
187 #ifdef A__L
188 host_load
189 #else
190 host_store
191 #endif
192 [addr >> 12];
193
194 if (page == NULL) {
195 A__NAME__general(cpu, ic);
196 } else {
197 #ifdef A__L
198 #ifdef A__B
199 reg(ic->arg[2]) =
200 #ifdef A__SIGNED
201 (int32_t)(int8_t)
202 #endif
203 page[addr & 0xfff];
204 #else
205 #ifdef A__H
206 reg(ic->arg[2]) =
207 #ifdef A__SIGNED
208 (int32_t)(int16_t)
209 #endif
210 (page[addr & 0xfff] + (page[(addr & 0xfff) + 1] << 8));
211 #else
212 reg(ic->arg[2]) = page[addr & 0xfff] +
213 (page[(addr & 0xfff) + 1] << 8) +
214 (page[(addr & 0xfff) + 2] << 16) +
215 (page[(addr & 0xfff) + 3] << 24);
216 #endif
217 #endif
218 #else
219 #ifdef A__B
220 page[addr & 0xfff] = reg(ic->arg[2]);
221 #else
222 #ifdef A__H
223 page[addr & 0xfff] = reg(ic->arg[2]);
224 page[(addr & 0xfff)+1] = reg(ic->arg[2]) >> 8;
225 #else
226 page[addr & 0xfff] = reg(ic->arg[2]);
227 page[(addr & 0xfff)+1] = reg(ic->arg[2]) >> 8;
228 page[(addr & 0xfff)+2] = reg(ic->arg[2]) >> 16;
229 page[(addr & 0xfff)+3] = reg(ic->arg[2]) >> 24;
230 #endif
231 #endif
232 #endif
233
234 /* Index Write-back: */
235 #ifdef A__P
236 #ifdef A__W
237 reg(ic->arg[0]) = addr;
238 #endif
239 #else
240 /* post-index writeback */
241 reg(ic->arg[0]) = addr + offset;
242 #endif
243 }
244 #endif /* not T-bit */
245 }
246
247
248 /*
249 * Special case when loading or storing the ARM's PC register, or when the PC
250 * register is used as the base address register.
251 *
252 * o) Loads into the PC register cause a branch. If an exception occured
253 * during the load, then the PC register should already point to the
254 * exception handler, in which case we simply recalculate the pointers a
255 * second time (no harm is done by doing that).
256 *
257 * TODO: A tiny performance optimization would be to separate the two
258 * cases: a load where arg[0] = PC, and the case where arg[2] = PC.
259 *
260 * o) Stores store "PC of the current instruction + 12". The solution I have
261 * choosen is to calculate this value and place it into a temporary
262 * variable (tmp_pc), which is then used for the store.
263 */
264 void A__NAME_PC(struct cpu *cpu, struct arm_instr_call *ic)
265 {
266 #ifdef A__L
267 /* Load: */
268 if (ic->arg[0] == (size_t)(&cpu->cd.arm.tmp_pc)) {
269 /* tmp_pc = current PC + 8: */
270 uint32_t low_pc, tmp;
271 low_pc = ((size_t)ic - (size_t) cpu->cd.arm.cur_ic_page) /
272 sizeof(struct arm_instr_call);
273 tmp = cpu->cd.arm.r[ARM_PC] & ~((ARM_IC_ENTRIES_PER_PAGE-1) <<
274 ARM_INSTR_ALIGNMENT_SHIFT);
275 tmp += (low_pc << ARM_INSTR_ALIGNMENT_SHIFT);
276 cpu->cd.arm.tmp_pc = tmp + 8;
277 }
278 A__NAME(cpu, ic);
279 if (ic->arg[2] == (size_t)(&cpu->cd.arm.r[ARM_PC])) {
280 cpu->pc = cpu->cd.arm.r[ARM_PC];
281 arm_pc_to_pointers(cpu);
282 }
283 #else
284 /* Store: */
285 uint32_t low_pc, tmp;
286 /* Calculate tmp from this instruction's PC + 12 */
287 low_pc = ((size_t)ic - (size_t) cpu->cd.arm.cur_ic_page) /
288 sizeof(struct arm_instr_call);
289 tmp = cpu->cd.arm.r[ARM_PC] & ~((ARM_IC_ENTRIES_PER_PAGE-1) <<
290 ARM_INSTR_ALIGNMENT_SHIFT);
291 tmp += (low_pc << ARM_INSTR_ALIGNMENT_SHIFT);
292 cpu->cd.arm.tmp_pc = tmp + 12;
293 A__NAME(cpu, ic);
294 #endif
295 }
296
297
298 #ifndef A__NOCONDITIONS
299 /* Load/stores with all registers except the PC register: */
300 void A__NAME__eq(struct cpu *cpu, struct arm_instr_call *ic)
301 { if (cpu->cd.arm.cpsr & ARM_FLAG_Z) A__NAME(cpu, ic); }
302 void A__NAME__ne(struct cpu *cpu, struct arm_instr_call *ic)
303 { if (!(cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME(cpu, ic); }
304 void A__NAME__cs(struct cpu *cpu, struct arm_instr_call *ic)
305 { if (cpu->cd.arm.cpsr & ARM_FLAG_C) A__NAME(cpu, ic); }
306 void A__NAME__cc(struct cpu *cpu, struct arm_instr_call *ic)
307 { if (!(cpu->cd.arm.cpsr & ARM_FLAG_C)) A__NAME(cpu, ic); }
308 void A__NAME__mi(struct cpu *cpu, struct arm_instr_call *ic)
309 { if (cpu->cd.arm.cpsr & ARM_FLAG_N) A__NAME(cpu, ic); }
310 void A__NAME__pl(struct cpu *cpu, struct arm_instr_call *ic)
311 { if (!(cpu->cd.arm.cpsr & ARM_FLAG_N)) A__NAME(cpu, ic); }
312 void A__NAME__vs(struct cpu *cpu, struct arm_instr_call *ic)
313 { if (cpu->cd.arm.cpsr & ARM_FLAG_V) A__NAME(cpu, ic); }
314 void A__NAME__vc(struct cpu *cpu, struct arm_instr_call *ic)
315 { if (!(cpu->cd.arm.cpsr & ARM_FLAG_V)) A__NAME(cpu, ic); }
316
317 void A__NAME__hi(struct cpu *cpu, struct arm_instr_call *ic)
318 { if (cpu->cd.arm.cpsr & ARM_FLAG_C &&
319 !(cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME(cpu, ic); }
320 void A__NAME__ls(struct cpu *cpu, struct arm_instr_call *ic)
321 { if (cpu->cd.arm.cpsr & ARM_FLAG_Z ||
322 !(cpu->cd.arm.cpsr & ARM_FLAG_C)) A__NAME(cpu, ic); }
323 void A__NAME__ge(struct cpu *cpu, struct arm_instr_call *ic)
324 { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) ==
325 ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0)) A__NAME(cpu, ic); }
326 void A__NAME__lt(struct cpu *cpu, struct arm_instr_call *ic)
327 { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) !=
328 ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0)) A__NAME(cpu, ic); }
329 void A__NAME__gt(struct cpu *cpu, struct arm_instr_call *ic)
330 { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) ==
331 ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0) &&
332 !(cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME(cpu, ic); }
333 void A__NAME__le(struct cpu *cpu, struct arm_instr_call *ic)
334 { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) !=
335 ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0) ||
336 (cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME(cpu, ic); }
337
338
339 /* Load/stores with the PC register: */
340 void A__NAME_PC__eq(struct cpu *cpu, struct arm_instr_call *ic)
341 { if (cpu->cd.arm.cpsr & ARM_FLAG_Z) A__NAME_PC(cpu, ic); }
342 void A__NAME_PC__ne(struct cpu *cpu, struct arm_instr_call *ic)
343 { if (!(cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME_PC(cpu, ic); }
344 void A__NAME_PC__cs(struct cpu *cpu, struct arm_instr_call *ic)
345 { if (cpu->cd.arm.cpsr & ARM_FLAG_C) A__NAME_PC(cpu, ic); }
346 void A__NAME_PC__cc(struct cpu *cpu, struct arm_instr_call *ic)
347 { if (!(cpu->cd.arm.cpsr & ARM_FLAG_C)) A__NAME_PC(cpu, ic); }
348 void A__NAME_PC__mi(struct cpu *cpu, struct arm_instr_call *ic)
349 { if (cpu->cd.arm.cpsr & ARM_FLAG_N) A__NAME_PC(cpu, ic); }
350 void A__NAME_PC__pl(struct cpu *cpu, struct arm_instr_call *ic)
351 { if (!(cpu->cd.arm.cpsr & ARM_FLAG_N)) A__NAME_PC(cpu, ic); }
352 void A__NAME_PC__vs(struct cpu *cpu, struct arm_instr_call *ic)
353 { if (cpu->cd.arm.cpsr & ARM_FLAG_V) A__NAME_PC(cpu, ic); }
354 void A__NAME_PC__vc(struct cpu *cpu, struct arm_instr_call *ic)
355 { if (!(cpu->cd.arm.cpsr & ARM_FLAG_V)) A__NAME_PC(cpu, ic); }
356
357 void A__NAME_PC__hi(struct cpu *cpu, struct arm_instr_call *ic)
358 { if (cpu->cd.arm.cpsr & ARM_FLAG_C &&
359 !(cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME_PC(cpu, ic); }
360 void A__NAME_PC__ls(struct cpu *cpu, struct arm_instr_call *ic)
361 { if (cpu->cd.arm.cpsr & ARM_FLAG_Z ||
362 !(cpu->cd.arm.cpsr & ARM_FLAG_C)) A__NAME_PC(cpu, ic); }
363 void A__NAME_PC__ge(struct cpu *cpu, struct arm_instr_call *ic)
364 { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) ==
365 ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0)) A__NAME_PC(cpu, ic); }
366 void A__NAME_PC__lt(struct cpu *cpu, struct arm_instr_call *ic)
367 { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) !=
368 ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0)) A__NAME_PC(cpu, ic); }
369 void A__NAME_PC__gt(struct cpu *cpu, struct arm_instr_call *ic)
370 { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) ==
371 ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0) &&
372 !(cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME_PC(cpu, ic); }
373 void A__NAME_PC__le(struct cpu *cpu, struct arm_instr_call *ic)
374 { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) !=
375 ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0) ||
376 (cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME_PC(cpu, ic); }
377 #endif

  ViewVC Help
Powered by ViewVC 1.1.26