/[gxemul]/trunk/src/cpus/cpu_arm_instr_loadstore.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Annotation of /trunk/src/cpus/cpu_arm_instr_loadstore.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 18 - (hide annotations)
Mon Oct 8 16:19:11 2007 UTC (16 years, 6 months ago) by dpavlin
File MIME type: text/plain
File size: 13756 byte(s)
++ trunk/HISTORY	(local)
$Id: HISTORY,v 1.1004 2005/10/27 14:01:10 debug Exp $
20051011        Passing -A as the default boot arg for CATS (works fine with
                OpenBSD/cats).
20051012	Fixing the VGA cursor offset bug, and speeding up framebuffer
		redraws if character cells contain the same thing as during
		the last redraw.
20051013	Adding a slow strd ARM instruction hack.
20051017	Minor updates: Adding a dummy i80321 Verde controller (for
		XScale emulation), fixing the disassembly of the ARM "ldrd"
		instruction, adding "support" for less-than-4KB pages for ARM
		(by not adding them to translation tables).
20051020	Continuing on some HPCarm stuff. A NetBSD/hpcarm kernel prints
		some boot messages on an emulated Jornada 720.
		Making dev_ram work better with dyntrans (speeds up some things
		quite a bit).
20051021	Automatically generating some of the most common ARM load/store
		multiple instructions.
20051022	Better statistics gathering for the ARM load/store multiple.
		Various other dyntrans and device updates.
20051023	Various minor updates.
20051024	Continuing; minor device and dyntrans fine-tuning. Adding the
		first "reasonable" instruction combination hacks for ARM (the
		cores of NetBSD/cats' memset and memcpy).
20051025	Fixing a dyntrans-related bug in dev_vga. Also changing the
		dyntrans low/high access notification to only be updated on
		writes, not reads. Hopefully it will be enough. (dev_vga in
		charcell mode now seems to work correctly with both reads and
		writes.)
		Experimenting with gathering dyntrans statistics (which parts
		of emulated RAM that are actually executed), and adding
		instruction combination hacks for cache cleaning and a part of
		NetBSD's scanc() function.
20051026	Adding a bitmap for ARM emulation which indicates if a page is
		(specifically) user accessible; loads and stores with the t-
		flag set can now use the translation arrays, which results in
		a measurable speedup.
20051027	Dyntrans updates; adding an extra bitmap array for 32-bit
		emulation modes, speeding up the check whether a physical page
		has any code translations or not (O(n) -> O(1)). Doing a
		similar reduction of O(n) to O(1) by avoiding the scan through
		the translation entries on a translation update (32-bit mode
		only).
		Various other minor hacks.
20051029	Quick release, without any testing at all.

==============  RELEASE 0.3.6.2  ==============


1 dpavlin 14 /*
2     * Copyright (C) 2005 Anders Gavare. All rights reserved.
3     *
4     * Redistribution and use in source and binary forms, with or without
5     * modification, are permitted provided that the following conditions are met:
6     *
7     * 1. Redistributions of source code must retain the above copyright
8     * notice, this list of conditions and the following disclaimer.
9     * 2. Redistributions in binary form must reproduce the above copyright
10     * notice, this list of conditions and the following disclaimer in the
11     * documentation and/or other materials provided with the distribution.
12     * 3. The name of the author may not be used to endorse or promote products
13     * derived from this software without specific prior written permission.
14     *
15     * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16     * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17     * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18     * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19     * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20     * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21     * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22     * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23     * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24     * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25     * SUCH DAMAGE.
26     *
27     *
28 dpavlin 18 * $Id: cpu_arm_instr_loadstore.c,v 1.15 2005/10/27 14:01:13 debug Exp $
29 dpavlin 14 *
30     *
31     * TODO: Many things...
32     *
33     * o) Big-endian ARM loads/stores.
34     *
35     * o) Alignment checks!
36     *
37     * o) Native load/store if the endianness is the same as the host's
38 dpavlin 18 * (only implemented for little endian, so far, and it assumes that
39     * alignment is correct!)
40 dpavlin 14 *
41     * o) "Base Updated Abort Model", which updates the base register
42     * even if the memory access failed.
43     *
44     * o) Some ARM implementations use pc+8, some use pc+12 for stores?
45     *
46     * o) All load/store variants with the PC register are not really
47     * valid. (E.g. a byte load into the PC register. What should that
48     * accomplish?)
49     *
50     * o) Perhaps an optimization for the case when offset = 0, because
51     * that's quite common, and also when the Reg expression is just
52     * a simple, non-rotated register (0..14).
53     */
54    
55    
56 dpavlin 18 #if defined(A__SIGNED) && defined(A__H) && !defined(A__L)
57     #define A__STRD
58     #endif
59    
60    
61 dpavlin 14 /*
62     * General load/store, by using memory_rw(). If at all possible, memory_rw()
63     * then inserts the page into the translation array, so that the fast
64     * load/store routine below can be used for further accesses.
65     */
66     void A__NAME__general(struct cpu *cpu, struct arm_instr_call *ic)
67     {
68     #if !defined(A__P) && defined(A__W)
69     const int memory_rw_flags = CACHE_DATA | MEMORY_USER_ACCESS;
70     #else
71     const int memory_rw_flags = CACHE_DATA;
72     #endif
73 dpavlin 18
74 dpavlin 16 #ifdef A__REG
75     uint32_t (*reg_func)(struct cpu *, struct arm_instr_call *)
76     = (void *)(size_t)ic->arg[1];
77     #endif
78 dpavlin 18
79     #ifdef A__STRD
80     unsigned char data[8];
81     const int datalen = 8;
82     #else
83 dpavlin 14 #ifdef A__B
84     unsigned char data[1];
85 dpavlin 18 const int datalen = 1;
86 dpavlin 14 #else
87     #ifdef A__H
88     unsigned char data[2];
89 dpavlin 18 const int datalen = 2;
90 dpavlin 14 #else
91 dpavlin 18 const int datalen = 4;
92     #ifdef HOST_LITTLE_ENDIAN
93     unsigned char *data = (unsigned char *) ic->arg[2];
94     #else
95 dpavlin 14 unsigned char data[4];
96     #endif
97     #endif
98 dpavlin 18 #endif
99     #endif
100    
101 dpavlin 14 uint32_t addr, low_pc, offset =
102     #ifndef A__U
103     -
104     #endif
105     #ifdef A__REG
106 dpavlin 16 reg_func(cpu, ic);
107 dpavlin 14 #else
108 dpavlin 16 ic->arg[1];
109 dpavlin 14 #endif
110    
111     low_pc = ((size_t)ic - (size_t)cpu->cd.arm.
112     cur_ic_page) / sizeof(struct arm_instr_call);
113     cpu->cd.arm.r[ARM_PC] &= ~((ARM_IC_ENTRIES_PER_PAGE-1)
114     << ARM_INSTR_ALIGNMENT_SHIFT);
115     cpu->cd.arm.r[ARM_PC] += (low_pc << ARM_INSTR_ALIGNMENT_SHIFT);
116     cpu->pc = cpu->cd.arm.r[ARM_PC];
117    
118     addr = reg(ic->arg[0])
119     #ifdef A__P
120     + offset
121     #endif
122     ;
123    
124     #ifdef A__L
125     /* Load: */
126 dpavlin 18 if (!cpu->memory_rw(cpu, cpu->mem, addr, data, datalen,
127 dpavlin 14 MEM_READ, memory_rw_flags)) {
128     /* load failed, an exception was generated */
129     return;
130     }
131     #ifdef A__B
132     reg(ic->arg[2]) =
133     #ifdef A__SIGNED
134     (int32_t)(int8_t)
135     #endif
136     data[0];
137     #else
138     #ifdef A__H
139     reg(ic->arg[2]) =
140     #ifdef A__SIGNED
141     (int32_t)(int16_t)
142     #endif
143     (data[0] + (data[1] << 8));
144     #else
145 dpavlin 18 #ifdef HOST_LITTLE_ENDIAN
146     /* Nothing. */
147     #else
148 dpavlin 14 reg(ic->arg[2]) = data[0] + (data[1] << 8) +
149     (data[2] << 16) + (data[3] << 24);
150     #endif
151     #endif
152 dpavlin 18 #endif
153 dpavlin 14 #else
154     /* Store: */
155 dpavlin 18 #if !defined(A__B) && !defined(A__H) && defined(HOST_LITTLE_ENDIAN)
156     #ifdef A__STRD
157     *(uint32_t *)data = reg(ic->arg[2]);
158     *(uint32_t *)(data + 4) = reg(ic->arg[2] + 4);
159     #endif
160 dpavlin 14 #else
161     data[0] = reg(ic->arg[2]);
162 dpavlin 18 #ifndef A__B
163 dpavlin 14 data[1] = reg(ic->arg[2]) >> 8;
164 dpavlin 18 #if !defined(A__H) || defined(A__STRD)
165 dpavlin 14 data[1] = reg(ic->arg[2]) >> 8;
166     data[2] = reg(ic->arg[2]) >> 16;
167     data[3] = reg(ic->arg[2]) >> 24;
168 dpavlin 18 #ifdef A__STRD
169     data[4] = reg(ic->arg[2] + 4);
170     data[5] = reg(ic->arg[2] + 4) >> 8;
171     data[6] = reg(ic->arg[2] + 4) >> 16;
172     data[7] = reg(ic->arg[2] + 4) >> 24;
173 dpavlin 14 #endif
174     #endif
175 dpavlin 18 #endif
176     #endif
177     if (!cpu->memory_rw(cpu, cpu->mem, addr, data, datalen,
178 dpavlin 14 MEM_WRITE, memory_rw_flags)) {
179     /* store failed, an exception was generated */
180     return;
181     }
182     #endif
183    
184     #ifdef A__P
185     #ifdef A__W
186     reg(ic->arg[0]) = addr;
187     #endif
188     #else /* post-index writeback */
189     reg(ic->arg[0]) = addr + offset;
190     #endif
191     }
192    
193    
194     /*
195     * Fast load/store, if the page is in the translation array.
196     */
197     void A__NAME(struct cpu *cpu, struct arm_instr_call *ic)
198     {
199 dpavlin 18 #ifdef A__STRD
200     /* Chicken out, let's do this unoptimized for now: */
201 dpavlin 14 A__NAME__general(cpu, ic);
202     #else
203 dpavlin 16 #ifdef A__REG
204     uint32_t (*reg_func)(struct cpu *, struct arm_instr_call *)
205     = (void *)(size_t)ic->arg[1];
206     #endif
207 dpavlin 14 uint32_t offset =
208     #ifndef A__U
209     -
210     #endif
211     #ifdef A__REG
212 dpavlin 16 reg_func(cpu, ic);
213 dpavlin 14 #else
214 dpavlin 16 ic->arg[1];
215 dpavlin 14 #endif
216     uint32_t addr = reg(ic->arg[0])
217     #ifdef A__P
218     + offset
219     #endif
220     ;
221     unsigned char *page = cpu->cd.arm.
222     #ifdef A__L
223     host_load
224     #else
225     host_store
226     #endif
227     [addr >> 12];
228    
229 dpavlin 18
230     #if !defined(A__P) && defined(A__W)
231     /*
232     * T-bit: userland access: check the corresponding bit in the
233     * is_userpage array. If it is set, then we're ok. Otherwise: use the
234     * generic function.
235     */
236     unsigned char x = cpu->cd.arm.is_userpage[addr >> 15];
237     if (!(x & (1 << ((addr >> 12) & 7))))
238     A__NAME__general(cpu, ic);
239     else
240     #endif
241    
242    
243 dpavlin 14 if (page == NULL) {
244 dpavlin 18 A__NAME__general(cpu, ic);
245 dpavlin 14 } else {
246     #ifdef A__L
247     #ifdef A__B
248     reg(ic->arg[2]) =
249     #ifdef A__SIGNED
250     (int32_t)(int8_t)
251     #endif
252     page[addr & 0xfff];
253     #else
254     #ifdef A__H
255     reg(ic->arg[2]) =
256     #ifdef A__SIGNED
257     (int32_t)(int16_t)
258     #endif
259     (page[addr & 0xfff] + (page[(addr & 0xfff) + 1] << 8));
260     #else
261 dpavlin 18 #ifdef HOST_LITTLE_ENDIAN
262     reg(ic->arg[2]) = *(uint32_t *)(page + (addr & 0xffc));
263     #else
264 dpavlin 14 reg(ic->arg[2]) = page[addr & 0xfff] +
265     (page[(addr & 0xfff) + 1] << 8) +
266     (page[(addr & 0xfff) + 2] << 16) +
267     (page[(addr & 0xfff) + 3] << 24);
268     #endif
269     #endif
270 dpavlin 18 #endif
271 dpavlin 14 #else
272     #ifdef A__B
273     page[addr & 0xfff] = reg(ic->arg[2]);
274     #else
275     #ifdef A__H
276     page[addr & 0xfff] = reg(ic->arg[2]);
277     page[(addr & 0xfff)+1] = reg(ic->arg[2]) >> 8;
278     #else
279 dpavlin 18 #ifdef HOST_LITTLE_ENDIAN
280     *(uint32_t *)(page + (addr & 0xffc)) = reg(ic->arg[2]);
281     #else
282 dpavlin 14 page[addr & 0xfff] = reg(ic->arg[2]);
283     page[(addr & 0xfff)+1] = reg(ic->arg[2]) >> 8;
284     page[(addr & 0xfff)+2] = reg(ic->arg[2]) >> 16;
285     page[(addr & 0xfff)+3] = reg(ic->arg[2]) >> 24;
286     #endif
287     #endif
288     #endif
289 dpavlin 18 #endif
290 dpavlin 14
291     /* Index Write-back: */
292     #ifdef A__P
293     #ifdef A__W
294     reg(ic->arg[0]) = addr;
295     #endif
296     #else
297     /* post-index writeback */
298     reg(ic->arg[0]) = addr + offset;
299     #endif
300     }
301 dpavlin 18 #endif /* not STRD */
302 dpavlin 14 }
303    
304    
305     /*
306     * Special case when loading or storing the ARM's PC register, or when the PC
307     * register is used as the base address register.
308     *
309     * o) Loads into the PC register cause a branch. If an exception occured
310     * during the load, then the PC register should already point to the
311     * exception handler, in which case we simply recalculate the pointers a
312     * second time (no harm is done by doing that).
313     *
314     * TODO: A tiny performance optimization would be to separate the two
315     * cases: a load where arg[0] = PC, and the case where arg[2] = PC.
316     *
317     * o) Stores store "PC of the current instruction + 12". The solution I have
318     * choosen is to calculate this value and place it into a temporary
319     * variable (tmp_pc), which is then used for the store.
320     */
321     void A__NAME_PC(struct cpu *cpu, struct arm_instr_call *ic)
322     {
323     #ifdef A__L
324     /* Load: */
325     if (ic->arg[0] == (size_t)(&cpu->cd.arm.tmp_pc)) {
326     /* tmp_pc = current PC + 8: */
327     uint32_t low_pc, tmp;
328     low_pc = ((size_t)ic - (size_t) cpu->cd.arm.cur_ic_page) /
329     sizeof(struct arm_instr_call);
330     tmp = cpu->cd.arm.r[ARM_PC] & ~((ARM_IC_ENTRIES_PER_PAGE-1) <<
331     ARM_INSTR_ALIGNMENT_SHIFT);
332     tmp += (low_pc << ARM_INSTR_ALIGNMENT_SHIFT);
333     cpu->cd.arm.tmp_pc = tmp + 8;
334     }
335     A__NAME(cpu, ic);
336     if (ic->arg[2] == (size_t)(&cpu->cd.arm.r[ARM_PC])) {
337     cpu->pc = cpu->cd.arm.r[ARM_PC];
338 dpavlin 18 if (cpu->machine->show_trace_tree)
339     cpu_functioncall_trace(cpu, cpu->pc);
340     quick_pc_to_pointers(cpu);
341 dpavlin 14 }
342     #else
343     /* Store: */
344     uint32_t low_pc, tmp;
345     /* Calculate tmp from this instruction's PC + 12 */
346     low_pc = ((size_t)ic - (size_t) cpu->cd.arm.cur_ic_page) /
347     sizeof(struct arm_instr_call);
348     tmp = cpu->cd.arm.r[ARM_PC] & ~((ARM_IC_ENTRIES_PER_PAGE-1) <<
349     ARM_INSTR_ALIGNMENT_SHIFT);
350     tmp += (low_pc << ARM_INSTR_ALIGNMENT_SHIFT);
351     cpu->cd.arm.tmp_pc = tmp + 12;
352     A__NAME(cpu, ic);
353     #endif
354     }
355    
356    
357     #ifndef A__NOCONDITIONS
358     /* Load/stores with all registers except the PC register: */
359     void A__NAME__eq(struct cpu *cpu, struct arm_instr_call *ic)
360     { if (cpu->cd.arm.cpsr & ARM_FLAG_Z) A__NAME(cpu, ic); }
361     void A__NAME__ne(struct cpu *cpu, struct arm_instr_call *ic)
362     { if (!(cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME(cpu, ic); }
363     void A__NAME__cs(struct cpu *cpu, struct arm_instr_call *ic)
364     { if (cpu->cd.arm.cpsr & ARM_FLAG_C) A__NAME(cpu, ic); }
365     void A__NAME__cc(struct cpu *cpu, struct arm_instr_call *ic)
366     { if (!(cpu->cd.arm.cpsr & ARM_FLAG_C)) A__NAME(cpu, ic); }
367     void A__NAME__mi(struct cpu *cpu, struct arm_instr_call *ic)
368     { if (cpu->cd.arm.cpsr & ARM_FLAG_N) A__NAME(cpu, ic); }
369     void A__NAME__pl(struct cpu *cpu, struct arm_instr_call *ic)
370     { if (!(cpu->cd.arm.cpsr & ARM_FLAG_N)) A__NAME(cpu, ic); }
371     void A__NAME__vs(struct cpu *cpu, struct arm_instr_call *ic)
372     { if (cpu->cd.arm.cpsr & ARM_FLAG_V) A__NAME(cpu, ic); }
373     void A__NAME__vc(struct cpu *cpu, struct arm_instr_call *ic)
374     { if (!(cpu->cd.arm.cpsr & ARM_FLAG_V)) A__NAME(cpu, ic); }
375    
376     void A__NAME__hi(struct cpu *cpu, struct arm_instr_call *ic)
377     { if (cpu->cd.arm.cpsr & ARM_FLAG_C &&
378     !(cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME(cpu, ic); }
379     void A__NAME__ls(struct cpu *cpu, struct arm_instr_call *ic)
380     { if (cpu->cd.arm.cpsr & ARM_FLAG_Z ||
381     !(cpu->cd.arm.cpsr & ARM_FLAG_C)) A__NAME(cpu, ic); }
382     void A__NAME__ge(struct cpu *cpu, struct arm_instr_call *ic)
383     { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) ==
384     ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0)) A__NAME(cpu, ic); }
385     void A__NAME__lt(struct cpu *cpu, struct arm_instr_call *ic)
386     { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) !=
387     ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0)) A__NAME(cpu, ic); }
388     void A__NAME__gt(struct cpu *cpu, struct arm_instr_call *ic)
389     { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) ==
390     ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0) &&
391     !(cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME(cpu, ic); }
392     void A__NAME__le(struct cpu *cpu, struct arm_instr_call *ic)
393     { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) !=
394     ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0) ||
395     (cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME(cpu, ic); }
396    
397    
398     /* Load/stores with the PC register: */
399     void A__NAME_PC__eq(struct cpu *cpu, struct arm_instr_call *ic)
400     { if (cpu->cd.arm.cpsr & ARM_FLAG_Z) A__NAME_PC(cpu, ic); }
401     void A__NAME_PC__ne(struct cpu *cpu, struct arm_instr_call *ic)
402     { if (!(cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME_PC(cpu, ic); }
403     void A__NAME_PC__cs(struct cpu *cpu, struct arm_instr_call *ic)
404     { if (cpu->cd.arm.cpsr & ARM_FLAG_C) A__NAME_PC(cpu, ic); }
405     void A__NAME_PC__cc(struct cpu *cpu, struct arm_instr_call *ic)
406     { if (!(cpu->cd.arm.cpsr & ARM_FLAG_C)) A__NAME_PC(cpu, ic); }
407     void A__NAME_PC__mi(struct cpu *cpu, struct arm_instr_call *ic)
408     { if (cpu->cd.arm.cpsr & ARM_FLAG_N) A__NAME_PC(cpu, ic); }
409     void A__NAME_PC__pl(struct cpu *cpu, struct arm_instr_call *ic)
410     { if (!(cpu->cd.arm.cpsr & ARM_FLAG_N)) A__NAME_PC(cpu, ic); }
411     void A__NAME_PC__vs(struct cpu *cpu, struct arm_instr_call *ic)
412     { if (cpu->cd.arm.cpsr & ARM_FLAG_V) A__NAME_PC(cpu, ic); }
413     void A__NAME_PC__vc(struct cpu *cpu, struct arm_instr_call *ic)
414     { if (!(cpu->cd.arm.cpsr & ARM_FLAG_V)) A__NAME_PC(cpu, ic); }
415    
416     void A__NAME_PC__hi(struct cpu *cpu, struct arm_instr_call *ic)
417     { if (cpu->cd.arm.cpsr & ARM_FLAG_C &&
418     !(cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME_PC(cpu, ic); }
419     void A__NAME_PC__ls(struct cpu *cpu, struct arm_instr_call *ic)
420     { if (cpu->cd.arm.cpsr & ARM_FLAG_Z ||
421     !(cpu->cd.arm.cpsr & ARM_FLAG_C)) A__NAME_PC(cpu, ic); }
422     void A__NAME_PC__ge(struct cpu *cpu, struct arm_instr_call *ic)
423     { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) ==
424     ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0)) A__NAME_PC(cpu, ic); }
425     void A__NAME_PC__lt(struct cpu *cpu, struct arm_instr_call *ic)
426     { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) !=
427     ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0)) A__NAME_PC(cpu, ic); }
428     void A__NAME_PC__gt(struct cpu *cpu, struct arm_instr_call *ic)
429     { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) ==
430     ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0) &&
431     !(cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME_PC(cpu, ic); }
432     void A__NAME_PC__le(struct cpu *cpu, struct arm_instr_call *ic)
433     { if (((cpu->cd.arm.cpsr & ARM_FLAG_N)?1:0) !=
434     ((cpu->cd.arm.cpsr & ARM_FLAG_V)?1:0) ||
435     (cpu->cd.arm.cpsr & ARM_FLAG_Z)) A__NAME_PC(cpu, ic); }
436     #endif
437 dpavlin 18
438    
439     #ifdef A__STRD
440     #undef A__STRD
441     #endif

  ViewVC Help
Powered by ViewVC 1.1.26