/[pearpc]/src/cpu/cpu_jitc_x86/ppc_opc.cc
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Annotation of /src/cpu/cpu_jitc_x86/ppc_opc.cc

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1 - (hide annotations)
Wed Sep 5 17:11:21 2007 UTC (16 years, 7 months ago) by dpavlin
File size: 55948 byte(s)
import upstream CVS
1 dpavlin 1 /*
2     * PearPC
3     * ppc_opc.cc
4     *
5     * Copyright (C) 2003, 2004 Sebastian Biallas (sb@biallas.net)
6     * Copyright (C) 2004 Daniel Foesch (dfoesch@cs.nmsu.edu)
7     *
8     * This program is free software; you can redistribute it and/or modify
9     * it under the terms of the GNU General Public License version 2 as
10     * published by the Free Software Foundation.
11     *
12     * This program is distributed in the hope that it will be useful,
13     * but WITHOUT ANY WARRANTY; without even the implied warranty of
14     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15     * GNU General Public License for more details.
16     *
17     * You should have received a copy of the GNU General Public License
18     * along with this program; if not, write to the Free Software
19     * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20     */
21    
22     #include "debug/tracers.h"
23     #include "io/pic/pic.h"
24     #include "info.h"
25     #include "ppc_cpu.h"
26     #include "ppc_exc.h"
27     #include "ppc_mmu.h"
28     #include "ppc_opc.h"
29     #include "ppc_dec.h"
30    
31     #include "jitc.h"
32     #include "jitc_asm.h"
33     #include "x86asm.h"
34    
35     static uint64 gDECwriteITB;
36     static uint64 gDECwriteValue;
37    
38     static void readDEC()
39     {
40     uint64 itb = ppc_get_cpu_ideal_timebase() - gDECwriteITB;
41     gCPU.dec = gDECwriteValue - itb;
42     // PPC_OPC_WARN("read dec=%08x\n", gCPU.dec);
43     }
44    
45     static void FASTCALL writeDEC(uint32 newdec)
46     {
47     // PPC_OPC_WARN("write dec=%08x\n", newdec);
48     if (!(gCPU.dec & 0x80000000) && (newdec & 0x80000000)) {
49     gCPU.dec = newdec;
50     sys_set_timer(gDECtimer, 0, 0, false);
51     } else {
52     gCPU.dec = newdec;
53     /*
54     * 1000000000ULL and gCPU.dec are both smaller than 2^32
55     * so this expression can't overflow
56     */
57     uint64 q = 1000000000ULL*gCPU.dec / gClientTimeBaseFrequency;
58    
59     // FIXME: Occasionally, ppc seems to generate very large dec values
60     // as a result of a memory overwrite or something else. Let's handle
61     // that until we figure out why.
62     if (q > 20 * 1000 * 1000) {
63     PPC_OPC_WARN("write dec > 20 millisec := %08x (%qu)\n", gCPU.dec, q);
64     q = 10 * 1000 * 1000;
65     sys_set_timer(gDECtimer, 0, q, false);
66     } else {
67     sys_set_timer(gDECtimer, 0, q, false);
68     }
69     }
70     gDECwriteValue = gCPU.dec;
71     gDECwriteITB = ppc_get_cpu_ideal_timebase();
72     }
73    
74     static void FASTCALL writeTBL(uint32 newtbl)
75     {
76     uint64 tbBase = ppc_get_cpu_timebase();
77     gCPU.tb = (tbBase & 0xffffffff00000000ULL) | (uint64)newtbl;
78     }
79    
80     static void FASTCALL writeTBU(uint32 newtbu)
81     {
82     uint64 tbBase = ppc_get_cpu_timebase();
83     gCPU.tb = ((uint64)newtbu << 32) | (tbBase & 0xffffffff);
84     }
85    
86     void ppc_set_msr(uint32 newmsr)
87     {
88     /* if ((newmsr & MSR_EE) && !(gCPU.msr & MSR_EE)) {
89     if (pic_check_interrupt()) {
90     gCPU.exception_pending = true;
91     gCPU.ext_exception = true;
92     }
93     }*/
94     ppc_mmu_tlb_invalidate();
95     #ifndef PPC_CPU_ENABLE_SINGLESTEP
96     if (newmsr & MSR_SE) {
97     SINGLESTEP("");
98     PPC_CPU_WARN("MSR[SE] (singlestep enable) set, but compiled w/o SE support.\n");
99     }
100     #else
101     gCPU.singlestep_ignore = true;
102     #endif
103     if (newmsr & PPC_CPU_UNSUPPORTED_MSR_BITS) {
104     PPC_CPU_ERR("unsupported bits in MSR set: %08x @%08x\n", newmsr & PPC_CPU_UNSUPPORTED_MSR_BITS, gCPU.pc);
105     }
106     if (newmsr & MSR_POW) {
107     // doze();
108     newmsr &= ~MSR_POW;
109     }
110     gCPU.msr = newmsr;
111    
112     }
113    
114     void ppc_opc_gen_check_privilege()
115     {
116     if (!gJITC.checkedPriviledge) {
117     jitcClobberCarryAndFlags();
118     jitcFloatRegisterClobberAll();
119     jitcFlushVectorRegister();
120     NativeReg msr = jitcGetClientRegisterMapping(PPC_MSR);
121     if (msr == REG_NO) {
122     asmTESTDMemImm((uint32)&gCPU.msr, MSR_PR);
123     } else {
124     asmALURegImm(X86_TEST, msr, MSR_PR);
125     }
126     NativeAddress fixup = asmJxxFixup(X86_Z);
127     jitcFlushRegisterDirty();
128     asmALURegImm(X86_MOV, ECX, PPC_EXC_PROGRAM_PRIV);
129     asmALURegImm(X86_MOV, EDX, gJITC.current_opc);
130     asmALURegImm(X86_MOV, ESI, gJITC.pc);
131     asmJMP((NativeAddress)ppc_program_exception_asm);
132     asmResolveFixup(fixup, asmHERE());
133     gJITC.checkedPriviledge = true;
134     }
135     }
136    
137     static inline void ppc_opc_gen_set_pc_rel(uint32 li)
138     {
139     li += gJITC.pc;
140     if (li < 4096) {
141     /*
142     * We assure here 7+6+5+5 bytes, to have enough space for
143     * four instructions (since we want to modify them)
144     */
145     jitcEmitAssure(7+6+5+5);
146    
147     asmMOVRegImm_NoFlags(EAX, li);
148     asmCALL((NativeAddress)ppc_heartbeat_ext_rel_asm);
149     asmMOVRegImm_NoFlags(EAX, li);
150     asmCALL((NativeAddress)ppc_new_pc_this_page_asm);
151     asmNOP(3);
152     } else {
153     asmALURegImm(X86_MOV, EAX, li);
154     asmJMP((NativeAddress)ppc_new_pc_rel_asm);
155     }
156     }
157    
158     /*
159     * bx Branch
160     * .435
161     */
162     void ppc_opc_bx()
163     {
164     uint32 li;
165     PPC_OPC_TEMPL_I(gCPU.current_opc, li);
166     if (!(gCPU.current_opc & PPC_OPC_AA)) {
167     li += gCPU.pc;
168     }
169     if (gCPU.current_opc & PPC_OPC_LK) {
170     gCPU.lr = gCPU.pc + 4;
171     }
172     gCPU.npc = li;
173     }
174    
175     JITCFlow ppc_opc_gen_bx()
176     {
177     uint32 li;
178     PPC_OPC_TEMPL_I(gJITC.current_opc, li);
179     jitcClobberAll();
180     if (gJITC.current_opc & PPC_OPC_LK) {
181     asmMOVRegDMem(EAX, (uint32)&gCPU.current_code_base);
182     asmALURegImm(X86_ADD, EAX, gJITC.pc+4);
183     asmMOVDMemReg((uint32)&gCPU.lr, EAX);
184     }
185     if (gJITC.current_opc & PPC_OPC_AA) {
186     asmALURegImm(X86_MOV, EAX, li);
187     asmJMP((NativeAddress)ppc_new_pc_asm);
188     } else {
189     ppc_opc_gen_set_pc_rel(li);
190     }
191     return flowEndBlockUnreachable;
192     }
193    
194     /*
195     * bcx Branch Conditional
196     * .436
197     */
198     void ppc_opc_bcx()
199     {
200     uint32 BO, BI, BD;
201     PPC_OPC_TEMPL_B(gCPU.current_opc, BO, BI, BD);
202     if (!(BO & 4)) {
203     gCPU.ctr--;
204     }
205     bool bo2 = (BO & 2);
206     bool bo8 = (BO & 8); // branch condition true
207     bool cr = (gCPU.cr & (1<<(31-BI)));
208     if (((BO & 4) || ((gCPU.ctr!=0) ^ bo2))
209     && ((BO & 16) || (!(cr ^ bo8)))) {
210     if (!(gCPU.current_opc & PPC_OPC_AA)) {
211     BD += gCPU.pc;
212     }
213     if (gCPU.current_opc & PPC_OPC_LK) {
214     gCPU.lr = gCPU.pc + 4;
215     }
216     gCPU.npc = BD;
217     }
218     }
219     JITCFlow ppc_opc_gen_bcx()
220     {
221     uint32 BO, BI, BD;
222     PPC_OPC_TEMPL_B(gJITC.current_opc, BO, BI, BD);
223     NativeAddress fixup = NULL;
224     jitcFloatRegisterClobberAll();
225     jitcFlushVectorRegister();
226     if (!(BO & 16)) {
227     // only branch if condition
228     if (BO & 4) {
229     // don't check ctr
230     PPC_CRx cr = (PPC_CRx)(BI / 4);
231     if (jitcFlagsMapped() && jitcGetFlagsMapping() == cr && (BI%4) != 3) {
232     // x86 flags map to correct crX register
233     // and not SO flag (which isnt mapped)
234     NativeAddress fixup2=NULL;
235     switch (BI%4) {
236     case 0:
237     // less than
238     fixup = asmJxxFixup((BO & 8) ? X86_NS : X86_S);
239     break;
240     case 1:
241     // greater than
242     // there seems to be no equivalent instruction on the x86
243     if (BO & 8) {
244     fixup = asmJxxFixup(X86_S);
245     fixup2 = asmJxxFixup(X86_Z);
246     } else {
247     NativeAddress fixup3 = asmJxxFixup(X86_S);
248     NativeAddress fixup4 = asmJxxFixup(X86_Z);
249     fixup = asmJMPFixup();
250     asmResolveFixup(fixup3, asmHERE());
251     asmResolveFixup(fixup4, asmHERE());
252     }
253     break;
254     case 2:
255     // equal
256     fixup = asmJxxFixup((BO & 8) ? X86_NZ : X86_Z);
257     break;
258     }
259     // FIXME: optimize me
260     if (jitcCarryMapped()) {
261     byte modrm[6];
262     asmSETMem(X86_C, modrm, x86_mem(modrm, REG_NO, (uint32)&gCPU.xer_ca));
263     }
264     asmCALL((NativeAddress)ppc_flush_flags_asm);
265     jitcFlushRegisterDirty();
266     if (gJITC.current_opc & PPC_OPC_LK) {
267     asmMOVRegDMem(EAX, (uint32)&gCPU.current_code_base);
268     asmALURegImm(X86_ADD, EAX, gJITC.pc+4);
269     asmMOVDMemReg((uint32)&gCPU.lr, EAX);
270     }
271     if (gJITC.current_opc & PPC_OPC_AA) {
272     asmALURegImm(X86_MOV, EAX, BD);
273     asmJMP((NativeAddress)ppc_new_pc_asm);
274     } else {
275     ppc_opc_gen_set_pc_rel(BD);
276     }
277     asmResolveFixup(fixup, asmHERE());
278     if (fixup2) {
279     asmResolveFixup(fixup2, asmHERE());
280     }
281     return flowContinue;
282     } else {
283     jitcClobberCarryAndFlags();
284     // test specific crX bit
285     asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-BI));
286     fixup = asmJxxFixup((BO & 8) ? X86_Z : X86_NZ);
287     }
288     } else {
289     // decrement and check condition
290     jitcClobberCarryAndFlags();
291     NativeReg ctr = jitcGetClientRegisterDirty(PPC_CTR);
292     asmDECReg(ctr);
293     NativeAddress fixup = asmJxxFixup((BO & 2) ? X86_NZ : X86_Z);
294     asmTESTDMemImm((uint32)(&gCPU.cr), 1<<(31-BI));
295     NativeAddress fixup2 = asmJxxFixup((BO & 8) ? X86_Z : X86_NZ);
296     jitcFlushRegisterDirty();
297     if (gJITC.current_opc & PPC_OPC_LK) {
298     asmMOVRegDMem(EAX, (uint32)&gCPU.current_code_base);
299     asmALURegImm(X86_ADD, EAX, gJITC.pc+4);
300     asmMOVDMemReg((uint32)&gCPU.lr, EAX);
301     }
302     if (gJITC.current_opc & PPC_OPC_AA) {
303     asmALURegImm(X86_MOV, EAX, BD);
304     asmJMP((NativeAddress)ppc_new_pc_asm);
305     } else {
306     ppc_opc_gen_set_pc_rel(BD);
307     }
308     asmResolveFixup(fixup, asmHERE());
309     asmResolveFixup(fixup2, asmHERE());
310     return flowContinue;
311     }
312     } else {
313     // don't check condition
314     if (BO & 4) {
315     // always branch
316     jitcClobberCarryAndFlags();
317     jitcFlushRegister();
318     if (gJITC.current_opc & PPC_OPC_LK) {
319     asmMOVRegDMem(EAX, (uint32)&gCPU.current_code_base);
320     asmALURegImm(X86_ADD, EAX, gJITC.pc+4);
321     asmMOVDMemReg((uint32)&gCPU.lr, EAX);
322     }
323     if (gJITC.current_opc & PPC_OPC_AA) {
324     asmALURegImm(X86_MOV, EAX, BD);
325     asmJMP((NativeAddress)ppc_new_pc_asm);
326     } else {
327     ppc_opc_gen_set_pc_rel(BD);
328     }
329     return flowEndBlockUnreachable;
330     } else {
331     // decrement ctr and branch on ctr
332     jitcClobberCarryAndFlags();
333     NativeReg ctr = jitcGetClientRegisterDirty(PPC_CTR);
334     asmDECReg(ctr);
335     fixup = asmJxxFixup((BO & 2) ? X86_NZ : X86_Z);
336     }
337     }
338     jitcFlushRegisterDirty();
339     if (gJITC.current_opc & PPC_OPC_LK) {
340     asmMOVRegDMem(EAX, (uint32)&gCPU.current_code_base);
341     asmALURegImm(X86_ADD, EAX, gJITC.pc+4);
342     asmMOVDMemReg((uint32)&gCPU.lr, EAX);
343     }
344     if (gJITC.current_opc & PPC_OPC_AA) {
345     asmALURegImm(X86_MOV, EAX, BD);
346     asmJMP((NativeAddress)ppc_new_pc_asm);
347     } else {
348     ppc_opc_gen_set_pc_rel(BD);
349     }
350     asmResolveFixup(fixup, asmHERE());
351     return flowContinue;
352     }
353    
354     /*
355     * bcctrx Branch Conditional to Count Register
356     * .438
357     */
358     void ppc_opc_bcctrx()
359     {
360     uint32 BO, BI, BD;
361     PPC_OPC_TEMPL_XL(gCPU.current_opc, BO, BI, BD);
362     PPC_OPC_ASSERT(BD==0);
363     PPC_OPC_ASSERT(!(BO & 2));
364     bool bo8 = (BO & 8);
365     bool cr = (gCPU.cr & (1<<(31-BI)));
366     if ((BO & 16) || (!(cr ^ bo8))) {
367     if (gCPU.current_opc & PPC_OPC_LK) {
368     gCPU.lr = gCPU.pc + 4;
369     }
370     gCPU.npc = gCPU.ctr & 0xfffffffc;
371     }
372     }
373     JITCFlow ppc_opc_gen_bcctrx()
374     {
375     uint32 BO, BI, BD;
376     PPC_OPC_TEMPL_XL(gJITC.current_opc, BO, BI, BD);
377     jitcFloatRegisterClobberAll();
378     jitcFlushVectorRegister();
379     if (BO & 16) {
380     // branch always
381     jitcClobberCarryAndFlags();
382     jitcFlushRegister();
383     jitcGetClientRegister(PPC_CTR, NATIVE_REG | EAX);
384     if (gJITC.current_opc & PPC_OPC_LK) {
385     asmMOVRegDMem(ECX, (uint32)&gCPU.current_code_base);
386     asmALURegImm(X86_ADD, ECX, gJITC.pc+4);
387     asmMOVDMemReg((uint32)&gCPU.lr, ECX);
388     }
389     asmALURegImm(X86_AND, EAX, 0xfffffffc);
390     asmJMP((NativeAddress)ppc_new_pc_asm);
391     return flowEndBlockUnreachable;
392     } else {
393     // test specific crX bit
394     jitcClobberCarryAndFlags();
395     asmTESTDMemImm((uint32)(&gCPU.cr), 1<<(31-BI));
396     jitcGetClientRegister(PPC_CTR, NATIVE_REG | EAX);
397     NativeAddress fixup = asmJxxFixup((BO & 8) ? X86_Z : X86_NZ);
398     jitcFlushRegisterDirty();
399     if (gJITC.current_opc & PPC_OPC_LK) {
400     asmMOVRegDMem(ECX, (uint32)&gCPU.current_code_base);
401     asmALURegImm(X86_ADD, ECX, gJITC.pc+4);
402     asmMOVDMemReg((uint32)&gCPU.lr, ECX);
403     }
404     asmALURegImm(X86_AND, EAX, 0xfffffffc);
405     asmJMP((NativeAddress)ppc_new_pc_asm);
406     asmResolveFixup(fixup, asmHERE());
407     return flowContinue;
408     }
409     }
410     /*
411     * bclrx Branch Conditional to Link Register
412     * .440
413     */
414     void ppc_opc_bclrx()
415     {
416     uint32 BO, BI, BD;
417     PPC_OPC_TEMPL_XL(gCPU.current_opc, BO, BI, BD);
418     PPC_OPC_ASSERT(BD==0);
419     if (!(BO & 4)) {
420     gCPU.ctr--;
421     }
422     bool bo2 = (BO & 2);
423     bool bo8 = (BO & 8);
424     bool cr = (gCPU.cr & (1<<(31-BI)));
425     if (((BO & 4) || ((gCPU.ctr!=0) ^ bo2))
426     && ((BO & 16) || (!(cr ^ bo8)))) {
427     BD = gCPU.lr & 0xfffffffc;
428     if (gCPU.current_opc & PPC_OPC_LK) {
429     gCPU.lr = gCPU.pc + 4;
430     }
431     gCPU.npc = BD;
432     }
433     }
434     JITCFlow ppc_opc_gen_bclrx()
435     {
436     uint32 BO, BI, BD;
437     PPC_OPC_TEMPL_XL(gJITC.current_opc, BO, BI, BD);
438     if (!(BO & 4)) {
439     PPC_OPC_ERR("not impl.: bclrx + BO&4\n");
440     }
441     jitcFloatRegisterClobberAll();
442     jitcFlushVectorRegister();
443     if (BO & 16) {
444     // branch always
445     jitcClobberCarryAndFlags();
446     jitcFlushRegister();
447     jitcGetClientRegister(PPC_LR, NATIVE_REG | EAX);
448     if (gJITC.current_opc & PPC_OPC_LK) {
449     asmMOVRegDMem(ECX, (uint32)&gCPU.current_code_base);
450     asmALURegImm(X86_ADD, ECX, gJITC.pc+4);
451     asmMOVDMemReg((uint32)&gCPU.lr, ECX);
452     }
453     asmALURegImm(X86_AND, EAX, 0xfffffffc);
454     asmJMP((NativeAddress)ppc_new_pc_asm);
455     return flowEndBlockUnreachable;
456     } else {
457     jitcClobberCarryAndFlags();
458     // test specific crX bit
459     asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-BI));
460     jitcGetClientRegister(PPC_LR, NATIVE_REG | EAX);
461     NativeAddress fixup = asmJxxFixup((BO & 8) ? X86_Z : X86_NZ);
462     jitcFlushRegisterDirty();
463     if (gJITC.current_opc & PPC_OPC_LK) {
464     asmMOVRegDMem(ECX, (uint32)&gCPU.current_code_base);
465     asmALURegImm(X86_ADD, ECX, gJITC.pc+4);
466     asmMOVDMemReg((uint32)&gCPU.lr, ECX);
467     }
468     asmALURegImm(X86_AND, EAX, 0xfffffffc);
469     asmJMP((NativeAddress)ppc_new_pc_asm);
470     asmResolveFixup(fixup, asmHERE());
471     return flowContinue;
472     }
473     }
474    
475     /*
476     * dcbf Data Cache Block Flush
477     * .458
478     */
479     void ppc_opc_dcbf()
480     {
481     // NO-OP
482     }
483     JITCFlow ppc_opc_gen_dcbf()
484     {
485     // NO-OP
486     return flowContinue;
487     }
488    
489     /*
490     * dcbi Data Cache Block Invalidate
491     * .460
492     */
493     void ppc_opc_dcbi()
494     {
495     if (gCPU.msr & MSR_PR) {
496     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_PRIV);
497     return;
498     }
499     // FIXME: check addr
500     }
501     JITCFlow ppc_opc_gen_dcbi()
502     {
503     ppc_opc_gen_check_privilege();
504     return flowContinue;
505     }
506     /*
507     * dcbst Data Cache Block Store
508     * .461
509     */
510     void ppc_opc_dcbst()
511     {
512     // NO-OP
513     }
514     JITCFlow ppc_opc_gen_dcbst()
515     {
516     // NO-OP
517     return flowContinue;
518     }
519     /*
520     * dcbt Data Cache Block Touch
521     * .462
522     */
523     void ppc_opc_dcbt()
524     {
525     // NO-OP
526     }
527     JITCFlow ppc_opc_gen_dcbt()
528     {
529     // NO-OP
530     return flowContinue;
531     }
532     /*
533     * dcbtst Data Cache Block Touch for Store
534     * .463
535     */
536     void ppc_opc_dcbtst()
537     {
538     // NO-OP
539     }
540     JITCFlow ppc_opc_gen_dcbtst()
541     {
542     // NO-OP
543     return flowContinue;
544     }
545     /*
546     * eciwx External Control In Word Indexed
547     * .474
548     */
549     void ppc_opc_eciwx()
550     {
551     PPC_OPC_ERR("eciwx unimplemented.\n");
552     }
553     JITCFlow ppc_opc_gen_eciwx()
554     {
555     PPC_OPC_ERR("eciwx unimplemented.\n");
556     return flowContinue;
557     }
558     /*
559     * ecowx External Control Out Word Indexed
560     * .476
561     */
562     void ppc_opc_ecowx()
563     {
564     PPC_OPC_ERR("ecowx unimplemented.\n");
565     }
566     JITCFlow ppc_opc_gen_ecowx()
567     {
568     PPC_OPC_ERR("ecowx unimplemented.\n");
569     return flowContinue;
570     }
571     /*
572     * eieio Enforce In-Order Execution of I/O
573     * .478
574     */
575     void ppc_opc_eieio()
576     {
577     // NO-OP
578     }
579     JITCFlow ppc_opc_gen_eieio()
580     {
581     // NO-OP
582     return flowContinue;
583     }
584    
585     /*
586     * icbi Instruction Cache Block Invalidate
587     * .519
588     */
589     void ppc_opc_icbi()
590     {
591     // FIXME: not a NOP with jitc
592     }
593     JITCFlow ppc_opc_gen_icbi()
594     {
595     int rD, rA, rB;
596     PPC_OPC_TEMPL_X(gJITC.current_opc, rD, rA, rB);
597     jitcClobberAll();
598     if (rA) {
599     byte modrm[6];
600     asmMOVRegDMem(EAX, (uint32)&gCPU.gpr[rA]);
601     asmALURegMem(X86_ADD, EAX, modrm, x86_mem(modrm, REG_NO, (uint32)&gCPU.gpr[rB]));
602     } else {
603     asmMOVRegDMem(EAX, (uint32)&gCPU.gpr[rB]);
604     }
605     asmALURegImm(X86_MOV, ESI, gJITC.pc);
606     asmCALL((NativeAddress)ppc_opc_icbi_asm);
607     asmALURegImm(X86_MOV, EAX, gJITC.pc+4);
608     asmJMP((NativeAddress)ppc_new_pc_rel_asm);
609     return flowEndBlockUnreachable;
610     }
611    
612     /*
613     * isync Instruction Synchronize
614     * .520
615     */
616     void ppc_opc_isync()
617     {
618     // NO-OP
619     }
620     JITCFlow ppc_opc_gen_isync()
621     {
622     // NO-OP
623     return flowContinue;
624     }
625    
626     static uint32 ppc_cmp_and_mask[8] = {
627     0xfffffff0,
628     0xffffff0f,
629     0xfffff0ff,
630     0xffff0fff,
631     0xfff0ffff,
632     0xff0fffff,
633     0xf0ffffff,
634     0x0fffffff,
635     };
636     /*
637     * mcrf Move Condition Register Field
638     * .561
639     */
640     void ppc_opc_mcrf()
641     {
642     uint32 crD, crS, bla;
643     PPC_OPC_TEMPL_X(gCPU.current_opc, crD, crS, bla);
644     // FIXME: bla == 0
645     crD>>=2;
646     crS>>=2;
647     crD = 7-crD;
648     crS = 7-crS;
649     uint32 c = (gCPU.cr>>(crS*4)) & 0xf;
650     gCPU.cr &= ppc_cmp_and_mask[crD];
651     gCPU.cr |= c<<(crD*4);
652     }
653     JITCFlow ppc_opc_gen_mcrf()
654     {
655     ppc_opc_gen_interpret(ppc_opc_mcrf);
656     return flowEndBlock;
657     }
658     /*
659     * mcrfs Move to Condition Register from FPSCR
660     * .562
661     */
662     void ppc_opc_mcrfs()
663     {
664     PPC_OPC_ERR("mcrfs unimplemented.\n");
665     }
666     JITCFlow ppc_opc_gen_mcrfs()
667     {
668     PPC_OPC_ERR("mcrfs unimplemented.\n");
669     }
670     /*
671     * mcrxr Move to Condition Register from XER
672     * .563
673     */
674     void ppc_opc_mcrxr()
675     {
676     int crD, a, b;
677     PPC_OPC_TEMPL_X(gCPU.current_opc, crD, a, b);
678     crD >>= 2;
679     crD = 7-crD;
680     gCPU.cr &= ppc_cmp_and_mask[crD];
681     gCPU.cr |= (((gCPU.xer & 0xf0000000) | (gCPU.xer_ca ? XER_CA : 0))>>28)<<(crD*4);
682     gCPU.xer = ~0xf0000000;
683     gCPU.xer_ca = 0;
684     }
685     JITCFlow ppc_opc_gen_mcrxr()
686     {
687     ppc_opc_gen_interpret(ppc_opc_mcrxr);
688     return flowEndBlock;
689     }
690    
691     static void inline move_reg(PPC_Register creg1, PPC_Register creg2)
692     {
693     NativeReg reg2 = jitcGetClientRegister(creg2);
694     NativeReg reg1 = jitcMapClientRegisterDirty(creg1);
695     asmALURegReg(X86_MOV, reg1, reg2);
696     }
697    
698     static void inline move_reg0(PPC_Register creg1)
699     {
700     NativeReg reg1 = jitcMapClientRegisterDirty(creg1);
701     asmMOVRegImm_NoFlags(reg1, 0);
702     }
703    
704     /*
705     * mfcr Move from Condition Register
706     * .564
707     */
708     void ppc_opc_mfcr()
709     {
710     int rD, rA, rB;
711     PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
712     PPC_OPC_ASSERT(rA==0 && rB==0);
713     gCPU.gpr[rD] = gCPU.cr;
714     }
715     JITCFlow ppc_opc_gen_mfcr()
716     {
717     int rD, rA, rB;
718     PPC_OPC_TEMPL_X(gJITC.current_opc, rD, rA, rB);
719     jitcClobberFlags();
720     NativeReg d = jitcMapClientRegisterDirty(PPC_GPR(rD));
721     asmMOVRegDMem(d, (uint32)&gCPU.cr);
722     return flowContinue;
723     }
724     /*
725     * mffs Move from FPSCR
726     * .565
727     */
728     void ppc_opc_mffsx()
729     {
730     int frD, rA, rB;
731     PPC_OPC_TEMPL_X(gCPU.current_opc, frD, rA, rB);
732     PPC_OPC_ASSERT(rA==0 && rB==0);
733     gCPU.fpr[frD] = gCPU.fpscr;
734     if (gCPU.current_opc & PPC_OPC_Rc) {
735     // update cr1 flags
736     PPC_OPC_ERR("mffs. unimplemented.\n");
737     }
738     }
739     JITCFlow ppc_opc_gen_mffsx()
740     {
741     ppc_opc_gen_interpret(ppc_opc_mffsx);
742     return flowEndBlock;
743     }
744    
745     /*
746     * mfmsr Move from Machine State Register
747     * .566
748     */
749     void ppc_opc_mfmsr()
750     {
751     if (gCPU.msr & MSR_PR) {
752     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_PRIV);
753     return;
754     }
755     int rD, rA, rB;
756     PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
757     PPC_OPC_ASSERT((rA == 0) && (rB == 0));
758     gCPU.gpr[rD] = gCPU.msr;
759     }
760     JITCFlow ppc_opc_gen_mfmsr()
761     {
762     ppc_opc_gen_check_privilege();
763     int rD, rA, rB;
764     PPC_OPC_TEMPL_X(gJITC.current_opc, rD, rA, rB);
765     move_reg(PPC_GPR(rD), PPC_MSR);
766     return flowContinue;
767     }
768    
769     void FASTCALL unknown_tbr_warning(uint32 a, uint32 spr1, uint32 spr2)
770     {
771     PPC_OPC_WARN("invalid tbr %d:%d @%08x\n", spr1, spr2, a);
772     }
773    
774     void FASTCALL unknown_spr_warning(uint32 a, uint32 spr1, uint32 spr2)
775     {
776     PPC_OPC_WARN("invalid spr %d:%d @%08x\n", spr1, spr2, a);
777     }
778    
779     /*
780     * mfspr Move from Special-Purpose Register
781     * .567
782     */
783     void ppc_opc_mfspr()
784     {
785     int rD, spr1, spr2;
786     PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, spr1, spr2);
787     switch (spr2) {
788     case 0:
789     switch (spr1) {
790     case 1: gCPU.gpr[rD] = gCPU.xer | (gCPU.xer_ca ? XER_CA : 0); return;
791     case 8: gCPU.gpr[rD] = gCPU.lr; return;
792     case 9: gCPU.gpr[rD] = gCPU.ctr; return;
793     }
794     case 8: // altivec makes this user visible
795     if (spr1 == 0) {
796     gCPU.gpr[rD] = gCPU.vrsave;
797     return;
798     }
799     }
800     if (gCPU.msr & MSR_PR) {
801     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_PRIV);
802     return;
803     }
804     switch (spr2) {
805     case 0:
806     switch (spr1) {
807     case 18: gCPU.gpr[rD] = gCPU.dsisr; return;
808     case 19: gCPU.gpr[rD] = gCPU.dar; return;
809     case 22: gCPU.gpr[rD] = gCPU.dec; return;
810     case 25: gCPU.gpr[rD] = gCPU.sdr1; return;
811     case 26: gCPU.gpr[rD] = gCPU.srr[0]; return;
812     case 27: gCPU.gpr[rD] = gCPU.srr[1]; return;
813     }
814     break;
815     case 8:
816     switch (spr1) {
817     case 12: gCPU.gpr[rD] = ppc_get_cpu_timebase(); return;
818     case 13: gCPU.gpr[rD] = ppc_get_cpu_timebase() >> 32; return;
819     case 16: gCPU.gpr[rD] = gCPU.sprg[0]; return;
820     case 17: gCPU.gpr[rD] = gCPU.sprg[1]; return;
821     case 18: gCPU.gpr[rD] = gCPU.sprg[2]; return;
822     case 19: gCPU.gpr[rD] = gCPU.sprg[3]; return;
823     case 26: gCPU.gpr[rD] = gCPU.ear; return;
824     case 31: gCPU.gpr[rD] = gCPU.pvr; return;
825     }
826     break;
827     case 16:
828     switch (spr1) {
829     case 16: gCPU.gpr[rD] = gCPU.ibatu[0]; return;
830     case 17: gCPU.gpr[rD] = gCPU.ibatl[0]; return;
831     case 18: gCPU.gpr[rD] = gCPU.ibatu[1]; return;
832     case 19: gCPU.gpr[rD] = gCPU.ibatl[1]; return;
833     case 20: gCPU.gpr[rD] = gCPU.ibatu[2]; return;
834     case 21: gCPU.gpr[rD] = gCPU.ibatl[2]; return;
835     case 22: gCPU.gpr[rD] = gCPU.ibatu[3]; return;
836     case 23: gCPU.gpr[rD] = gCPU.ibatl[3]; return;
837     case 24: gCPU.gpr[rD] = gCPU.dbatu[0]; return;
838     case 25: gCPU.gpr[rD] = gCPU.dbatl[0]; return;
839     case 26: gCPU.gpr[rD] = gCPU.dbatu[1]; return;
840     case 27: gCPU.gpr[rD] = gCPU.dbatl[1]; return;
841     case 28: gCPU.gpr[rD] = gCPU.dbatu[2]; return;
842     case 29: gCPU.gpr[rD] = gCPU.dbatl[2]; return;
843     case 30: gCPU.gpr[rD] = gCPU.dbatu[3]; return;
844     case 31: gCPU.gpr[rD] = gCPU.dbatl[3]; return;
845     }
846     break;
847     case 29:
848     switch (spr1) {
849     case 16:
850     gCPU.gpr[rD] = 0;
851     return;
852     case 17:
853     gCPU.gpr[rD] = 0;
854     return;
855     case 18:
856     gCPU.gpr[rD] = 0;
857     return;
858     case 24:
859     gCPU.gpr[rD] = 0;
860     return;
861     case 25:
862     gCPU.gpr[rD] = 0;
863     return;
864     case 26:
865     gCPU.gpr[rD] = 0;
866     return;
867     case 28:
868     gCPU.gpr[rD] = 0;
869     return;
870     case 29:
871     gCPU.gpr[rD] = 0;
872     return;
873     case 30:
874     gCPU.gpr[rD] = 0;
875     return;
876     }
877     case 31:
878     switch (spr1) {
879     case 16:
880     // PPC_OPC_WARN("read from spr %d:%d (HID0) not supported!\n", spr1, spr2);
881     gCPU.gpr[rD] = gCPU.hid[0];
882     return;
883     case 17:
884     PPC_OPC_WARN("read from spr %d:%d (HID1) not supported!\n", spr1, spr2);
885     gCPU.gpr[rD] = gCPU.hid[1];
886     return;
887     case 22:
888     gCPU.gpr[rD] = 0;
889     return;
890     case 23:
891     gCPU.gpr[rD] = 0;
892     return;
893     case 25:
894     PPC_OPC_WARN("read from spr %d:%d (L2CR) not supported! (from %08x)\n", spr1, spr2, gCPU.pc);
895     gCPU.gpr[rD] = 0;
896     return;
897     case 27:
898     PPC_OPC_WARN("read from spr %d:%d (ICTC) not supported!\n", spr1, spr2);
899     gCPU.gpr[rD] = 0;
900     return;
901     case 28:
902     // PPC_OPC_WARN("read from spr %d:%d (THRM1) not supported!\n", spr1, spr2);
903     gCPU.gpr[rD] = 0;
904     return;
905     case 29:
906     // PPC_OPC_WARN("read from spr %d:%d (THRM2) not supported!\n", spr1, spr2);
907     gCPU.gpr[rD] = 0;
908     return;
909     case 30:
910     // PPC_OPC_WARN("read from spr %d:%d (THRM3) not supported!\n", spr1, spr2);
911     gCPU.gpr[rD] = 0;
912     return;
913     case 31:
914     // PPC_OPC_WARN("read from spr %d:%d (???) not supported!\n", spr1, spr2);
915     gCPU.gpr[rD] = 0;
916     return;
917     }
918     }
919     fprintf(stderr, "unknown mfspr: %i:%i\n", spr1, spr2);
920     SINGLESTEP("invalid mfspr\n");
921     }
922    
923     JITCFlow ppc_opc_gen_mfspr()
924     {
925     int rD, spr1, spr2;
926     PPC_OPC_TEMPL_X(gJITC.current_opc, rD, spr1, spr2);
927     switch (spr2) {
928     case 0:
929     switch (spr1) {
930     case 1: {
931     jitcClobberFlags();
932     jitcGetClientCarry();
933     NativeReg reg2 = jitcGetClientRegister(PPC_XER);
934     NativeReg reg1 = jitcMapClientRegisterDirty(PPC_GPR(rD));
935     asmALURegReg(X86_SBB, reg1, reg1); // reg1 = CA ? -1 : 0
936     asmALURegImm(X86_AND, reg1, XER_CA); // reg1 = CA ? XER_CA : 0
937     asmALURegReg(X86_OR, reg1, reg2);
938     jitcClobberCarry();
939     return flowContinue;
940     }
941     case 8: move_reg(PPC_GPR(rD), PPC_LR); return flowContinue;
942     case 9: move_reg(PPC_GPR(rD), PPC_CTR); return flowContinue;
943     }
944     case 8:
945     if (spr1 == 0) {
946     move_reg(PPC_GPR(rD), PPC_VRSAVE);
947     return flowContinue;
948     }
949     }
950     ppc_opc_gen_check_privilege();
951     switch (spr2) {
952     case 0:
953     switch (spr1) {
954     case 18: move_reg(PPC_GPR(rD), PPC_DSISR); return flowContinue;
955     case 19: move_reg(PPC_GPR(rD), PPC_DAR); return flowContinue;
956     case 22: {
957     jitcClobberAll();
958     asmCALL((NativeAddress)readDEC);
959     move_reg(PPC_GPR(rD), PPC_DEC);
960     return flowContinue;
961     }
962     case 25: move_reg(PPC_GPR(rD), PPC_SDR1); return flowContinue;
963     case 26: move_reg(PPC_GPR(rD), PPC_SRR0); return flowContinue;
964     case 27: move_reg(PPC_GPR(rD), PPC_SRR1); return flowContinue;
965     }
966     break;
967     case 8:
968     switch (spr1) {
969     case 12: {
970     jitcClobberAll();
971     asmCALL((NativeAddress)ppc_get_cpu_timebase);
972     jitcMapClientRegisterDirty(PPC_GPR(rD), NATIVE_REG | EAX);
973     return flowContinue;
974     }
975     case 13: {
976     jitcClobberAll();
977     asmCALL((NativeAddress)ppc_get_cpu_timebase);
978     jitcMapClientRegisterDirty(PPC_GPR(rD), NATIVE_REG | EDX);
979     return flowContinue;
980     }
981     case 16: move_reg(PPC_GPR(rD), PPC_SPRG(0)); return flowContinue;
982     case 17: move_reg(PPC_GPR(rD), PPC_SPRG(1)); return flowContinue;
983     case 18: move_reg(PPC_GPR(rD), PPC_SPRG(2)); return flowContinue;
984     case 19: move_reg(PPC_GPR(rD), PPC_SPRG(3)); return flowContinue;
985     case 26: move_reg(PPC_GPR(rD), PPC_EAR); return flowContinue;
986     case 31: move_reg(PPC_GPR(rD), PPC_PVR); return flowContinue;
987     }
988     break;
989     case 16:
990     switch (spr1) {
991     case 16: move_reg(PPC_GPR(rD), PPC_IBATU(0)); return flowContinue;
992     case 17: move_reg(PPC_GPR(rD), PPC_IBATL(0)); return flowContinue;
993     case 18: move_reg(PPC_GPR(rD), PPC_IBATU(1)); return flowContinue;
994     case 19: move_reg(PPC_GPR(rD), PPC_IBATL(1)); return flowContinue;
995     case 20: move_reg(PPC_GPR(rD), PPC_IBATU(2)); return flowContinue;
996     case 21: move_reg(PPC_GPR(rD), PPC_IBATL(2)); return flowContinue;
997     case 22: move_reg(PPC_GPR(rD), PPC_IBATU(3)); return flowContinue;
998     case 23: move_reg(PPC_GPR(rD), PPC_IBATL(3)); return flowContinue;
999     case 24: move_reg(PPC_GPR(rD), PPC_DBATU(0)); return flowContinue;
1000     case 25: move_reg(PPC_GPR(rD), PPC_DBATL(0)); return flowContinue;
1001     case 26: move_reg(PPC_GPR(rD), PPC_DBATU(1)); return flowContinue;
1002     case 27: move_reg(PPC_GPR(rD), PPC_DBATL(1)); return flowContinue;
1003     case 28: move_reg(PPC_GPR(rD), PPC_DBATU(2)); return flowContinue;
1004     case 29: move_reg(PPC_GPR(rD), PPC_DBATL(2)); return flowContinue;
1005     case 30: move_reg(PPC_GPR(rD), PPC_DBATU(3)); return flowContinue;
1006     case 31: move_reg(PPC_GPR(rD), PPC_DBATL(3)); return flowContinue;
1007     }
1008     break;
1009     case 29:
1010     switch (spr1) {
1011     case 16: move_reg0(PPC_GPR(rD)); return flowContinue; //g4
1012     case 17: move_reg0(PPC_GPR(rD)); return flowContinue; //g4
1013     case 18: move_reg0(PPC_GPR(rD)); return flowContinue; //g4
1014     case 24: move_reg0(PPC_GPR(rD)); return flowContinue; //g4
1015     case 25: move_reg0(PPC_GPR(rD)); return flowContinue; //g4
1016     case 26: move_reg0(PPC_GPR(rD)); return flowContinue; //g4
1017     case 28: move_reg0(PPC_GPR(rD)); return flowContinue; //g4
1018     case 29: move_reg0(PPC_GPR(rD)); return flowContinue; //g4
1019     case 30: move_reg0(PPC_GPR(rD)); return flowContinue; //g4
1020     }
1021     break;
1022     case 31:
1023     switch (spr1) {
1024     case 16: move_reg(PPC_GPR(rD), PPC_HID0); return flowContinue;
1025     case 17: move_reg(PPC_GPR(rD), PPC_HID1); return flowContinue;
1026     case 22: move_reg0(PPC_GPR(rD)); return flowContinue; //g4
1027     case 23: move_reg0(PPC_GPR(rD)); return flowContinue; //g4
1028     case 25: move_reg0(PPC_GPR(rD)); return flowContinue;
1029     case 27: move_reg0(PPC_GPR(rD)); return flowContinue;
1030     case 28: move_reg0(PPC_GPR(rD)); return flowContinue;
1031     case 29: move_reg0(PPC_GPR(rD)); return flowContinue;
1032     case 30: move_reg0(PPC_GPR(rD)); return flowContinue;
1033     case 31: move_reg0(PPC_GPR(rD)); return flowContinue;
1034     }
1035     }
1036     move_reg0(PPC_GPR(rD));
1037     jitcClobberAll();
1038     asmMOVRegDMem(EAX, (uint32)&gCPU.current_code_base);
1039     asmALURegImm(X86_ADD, EAX, gJITC.pc);
1040     asmALURegImm(X86_MOV, EDX, spr1);
1041     asmALURegImm(X86_MOV, ECX, spr2);
1042     asmCALL((NativeAddress)unknown_spr_warning);
1043     return flowEndBlock;
1044     }
1045     /*
1046     * mfsr Move from Segment Register
1047     * .570
1048     */
1049     void ppc_opc_mfsr()
1050     {
1051     if (gCPU.msr & MSR_PR) {
1052     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_PRIV);
1053     return;
1054     }
1055     int rD, SR, rB;
1056     PPC_OPC_TEMPL_X(gCPU.current_opc, rD, SR, rB);
1057     // FIXME: check insn
1058     gCPU.gpr[rD] = gCPU.sr[SR & 0xf];
1059     }
1060     JITCFlow ppc_opc_gen_mfsr()
1061     {
1062     ppc_opc_gen_check_privilege();
1063     int rD, SR, rB;
1064     PPC_OPC_TEMPL_X(gJITC.current_opc, rD, SR, rB);
1065     move_reg(PPC_GPR(rD), PPC_SR(SR & 0xf));
1066     return flowContinue;
1067     }
1068     /*
1069     * mfsrin Move from Segment Register Indirect
1070     * .572
1071     */
1072     void ppc_opc_mfsrin()
1073     {
1074     if (gCPU.msr & MSR_PR) {
1075     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_PRIV);
1076     return;
1077     }
1078     int rD, rA, rB;
1079     PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
1080     // FIXME: check insn
1081     gCPU.gpr[rD] = gCPU.sr[gCPU.gpr[rB] >> 28];
1082     }
1083     JITCFlow ppc_opc_gen_mfsrin()
1084     {
1085     ppc_opc_gen_check_privilege();
1086     int rD, SR, rB;
1087     PPC_OPC_TEMPL_X(gJITC.current_opc, rD, SR, rB);
1088     jitcClobberCarryAndFlags();
1089     NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1090     NativeReg d = jitcMapClientRegisterDirty(PPC_GPR(rD));
1091     if (b != d) jitcClobberRegister(NATIVE_REG | b);
1092     // no problem here if b==d
1093     asmShiftRegImm(X86_SHR, b, 28);
1094     // mov d, [4*b+sr]
1095     byte modrm[6];
1096     asmALURegMem(X86_MOV, d, modrm, x86_mem_sib(modrm, REG_NO, 4, b, (uint32)(&gCPU.sr[0])));
1097     return flowContinue;
1098     }
1099     /*
1100     * mftb Move from Time Base
1101     * .574
1102     */
1103     void ppc_opc_mftb()
1104     {
1105     int rD, spr1, spr2;
1106     PPC_OPC_TEMPL_X(gCPU.current_opc, rD, spr1, spr2);
1107     switch (spr2) {
1108     case 8:
1109     switch (spr1) {
1110     case 12: gCPU.gpr[rD] = ppc_get_cpu_timebase(); return;
1111     case 13: gCPU.gpr[rD] = ppc_get_cpu_timebase() >> 32; return;
1112     /* case 12: gCPU.gpr[rD] = gCPU.tb; return;
1113     case 13: gCPU.gpr[rD] = gCPU.tb >> 32; return;*/
1114     }
1115     break;
1116     }
1117     SINGLESTEP("unknown mftb\n");
1118     }
1119     JITCFlow ppc_opc_gen_mftb()
1120     {
1121     int rD, spr1, spr2;
1122     PPC_OPC_TEMPL_X(gJITC.current_opc, rD, spr1, spr2);
1123     switch (spr2) {
1124     case 8:
1125     switch (spr1) {
1126     case 12:
1127     jitcClobberAll();
1128     asmCALL((NativeAddress)ppc_get_cpu_timebase);
1129     jitcMapClientRegisterDirty(PPC_GPR(rD), NATIVE_REG | EAX);
1130     return flowContinue;
1131    
1132     case 13:
1133     jitcClobberAll();
1134     asmCALL((NativeAddress)ppc_get_cpu_timebase);
1135     jitcMapClientRegisterDirty(PPC_GPR(rD), NATIVE_REG | EDX);
1136     return flowContinue;
1137     }
1138     break;
1139     }
1140     move_reg0(PPC_GPR(rD));
1141     jitcClobberAll();
1142     asmMOVRegDMem(EAX, (uint32)&gCPU.current_code_base);
1143     asmALURegImm(X86_MOV, EDX, spr1);
1144     asmALURegImm(X86_MOV, ECX, spr2);
1145     asmCALL((NativeAddress)unknown_tbr_warning);
1146     return flowEndBlock;
1147     }
1148     /*
1149     * mtcrf Move to Condition Register Fields
1150     * .576
1151     */
1152     void ppc_opc_mtcrf()
1153     {
1154     int rS;
1155     uint32 crm;
1156     uint32 CRM;
1157     PPC_OPC_TEMPL_XFX(gCPU.current_opc, rS, crm);
1158     CRM = ((crm&0x80)?0xf0000000:0)|((crm&0x40)?0x0f000000:0)|((crm&0x20)?0x00f00000:0)|((crm&0x10)?0x000f0000:0)|
1159     ((crm&0x08)?0x0000f000:0)|((crm&0x04)?0x00000f00:0)|((crm&0x02)?0x000000f0:0)|((crm&0x01)?0x0000000f:0);
1160     gCPU.cr = (gCPU.gpr[rS] & CRM) | (gCPU.cr & ~CRM);
1161     }
1162     JITCFlow ppc_opc_gen_mtcrf()
1163     {
1164     int rS;
1165     uint32 crm;
1166     uint32 CRM;
1167     PPC_OPC_TEMPL_XFX(gJITC.current_opc, rS, crm);
1168     CRM = ((crm&0x80)?0xf0000000:0)|((crm&0x40)?0x0f000000:0)|((crm&0x20)?0x00f00000:0)|((crm&0x10)?0x000f0000:0)|
1169     ((crm&0x08)?0x0000f000:0)|((crm&0x04)?0x00000f00:0)|((crm&0x02)?0x000000f0:0)|((crm&0x01)?0x0000000f:0);
1170     jitcClobberCarryAndFlags();
1171     NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1172     jitcClobberRegister(NATIVE_REG | s);
1173     byte modrm[6];
1174     asmALURegImm(X86_AND, s, CRM);
1175     asmALUMemImm(X86_AND, modrm, x86_mem(modrm, REG_NO, (uint32)&gCPU.cr), ~CRM);
1176     asmALUMemReg(X86_OR, modrm, x86_mem(modrm, REG_NO, (uint32)&gCPU.cr), s);
1177     return flowContinue;
1178     }
1179     /*
1180     * mtfsb0x Move to FPSCR Bit 0
1181     * .577
1182     */
1183     void ppc_opc_mtfsb0x()
1184     {
1185     int crbD, n1, n2;
1186     PPC_OPC_TEMPL_X(gCPU.current_opc, crbD, n1, n2);
1187     if (crbD != 1 && crbD != 2) {
1188     gCPU.fpscr &= ~(1<<(31-crbD));
1189     }
1190     if (gCPU.current_opc & PPC_OPC_Rc) {
1191     // update cr1 flags
1192     PPC_OPC_ERR("mtfsb0. unimplemented.\n");
1193     }
1194     }
1195    
1196     static uint32 ppc_to_x86_roundmode[] = {
1197     0x0000, // round to nearest
1198     0x0c00, // round to zero
1199     0x0800, // round to pinf
1200     0x0400, // round to minf
1201     };
1202    
1203     static void ppc_opc_set_fpscr_roundmode(NativeReg r)
1204     {
1205     byte modrm[6];
1206     asmALURegImm(X86_AND, r, 3); // RC
1207     asmALUMemImm(X86_AND, modrm, x86_mem(modrm, REG_NO, (uint32)&gCPU.x87cw), ~0x0c00);
1208     asmALURegMem(X86_MOV, r, modrm, x86_mem_sib(modrm, REG_NO, 4, r, (uint32)&ppc_to_x86_roundmode));
1209     asmALUMemReg(X86_OR, modrm, x86_mem(modrm, REG_NO, (uint32)&gCPU.x87cw), r);
1210     asmFLDCWMem(modrm, x86_mem(modrm, REG_NO, (uint32)&gCPU.x87cw));
1211     }
1212    
1213     JITCFlow ppc_opc_gen_mtfsb0x()
1214     {
1215     int crbD, n1, n2;
1216     PPC_OPC_TEMPL_X(gJITC.current_opc, crbD, n1, n2);
1217     if (crbD != 1 && crbD != 2) {
1218     jitcGetClientRegister(PPC_FPSCR, NATIVE_REG | EAX);
1219     jitcClobberAll();
1220     asmALURegImm(X86_AND, EAX, ~(1<<(31-crbD)));
1221     asmMOVDMemReg((uint32)&gCPU.fpscr, EAX);
1222     if (crbD == 30 || crbD == 31) {
1223     ppc_opc_set_fpscr_roundmode(EAX);
1224     }
1225     }
1226     return flowContinue;
1227     }
1228     /*
1229     * mtfsb1x Move to FPSCR Bit 1
1230     * .578
1231     */
1232     void ppc_opc_mtfsb1x()
1233     {
1234     int crbD, n1, n2;
1235     PPC_OPC_TEMPL_X(gCPU.current_opc, crbD, n1, n2);
1236     if (crbD != 1 && crbD != 2) {
1237     gCPU.fpscr |= 1<<(31-crbD);
1238     }
1239     if (gCPU.current_opc & PPC_OPC_Rc) {
1240     // update cr1 flags
1241     PPC_OPC_ERR("mtfsb1. unimplemented.\n");
1242     }
1243     }
1244     /*JITCFlow ppc_opc_gen_mtfsb1x()
1245     {
1246     ppc_opc_gen_interpret(ppc_opc_mtfsb1x);
1247     return flowEndBlock;
1248     }*/
1249     JITCFlow ppc_opc_gen_mtfsb1x()
1250     {
1251     int crbD, n1, n2;
1252     PPC_OPC_TEMPL_X(gJITC.current_opc, crbD, n1, n2);
1253     if (crbD != 1 && crbD != 2) {
1254     jitcGetClientRegister(PPC_FPSCR, NATIVE_REG | EAX);
1255     jitcClobberAll();
1256     asmALURegImm(X86_OR, EAX, 1<<(31-crbD));
1257     asmMOVDMemReg((uint32)&gCPU.fpscr, EAX);
1258     if (crbD == 30 || crbD == 31) {
1259     ppc_opc_set_fpscr_roundmode(EAX);
1260     }
1261     }
1262     return flowContinue;
1263     }
1264     /*
1265     * mtfsfx Move to FPSCR Fields
1266     * .579
1267     */
1268     void ppc_opc_mtfsfx()
1269     {
1270     int frB;
1271     uint32 fm, FM;
1272     PPC_OPC_TEMPL_XFL(gCPU.current_opc, frB, fm);
1273     FM = ((fm&0x80)?0xf0000000:0)|((fm&0x40)?0x0f000000:0)|((fm&0x20)?0x00f00000:0)|((fm&0x10)?0x000f0000:0)|
1274     ((fm&0x08)?0x0000f000:0)|((fm&0x04)?0x00000f00:0)|((fm&0x02)?0x000000f0:0)|((fm&0x01)?0x0000000f:0);
1275     gCPU.fpscr = (gCPU.fpr[frB] & FM) | (gCPU.fpscr & ~FM);
1276     if (gCPU.current_opc & PPC_OPC_Rc) {
1277     // update cr1 flags
1278     PPC_OPC_ERR("mtfsf. unimplemented.\n");
1279     }
1280     }
1281     /*JITCFlow ppc_opc_gen_mtfsfx()
1282     {
1283     ppc_opc_gen_interpret(ppc_opc_mtfsfx);
1284     return flowEndBlock;
1285     }*/
1286     JITCFlow ppc_opc_gen_mtfsfx()
1287     {
1288     int frB;
1289     uint32 fm, FM;
1290     PPC_OPC_TEMPL_XFL(gJITC.current_opc, frB, fm);
1291     FM = ((fm&0x80)?0xf0000000:0)|((fm&0x40)?0x0f000000:0)|((fm&0x20)?0x00f00000:0)|((fm&0x10)?0x000f0000:0)|
1292     ((fm&0x08)?0x0000f000:0)|((fm&0x04)?0x00000f00:0)|((fm&0x02)?0x000000f0:0)|((fm&0x01)?0x0000000f:0);
1293    
1294     NativeReg fpscr = jitcGetClientRegister(PPC_FPSCR);
1295     NativeReg b = jitcGetClientRegister(PPC_FPR_L(frB));
1296     jitcClobberAll();
1297     asmALURegImm(X86_AND, b, FM);
1298     asmALURegImm(X86_AND, fpscr, ~FM);
1299     asmALURegReg(X86_OR, fpscr, b);
1300     if (fm & 1) {
1301     asmMOVDMemReg((uint32)&gCPU.fpscr, fpscr);
1302     ppc_opc_set_fpscr_roundmode(fpscr);
1303     } else {
1304     jitcMapClientRegisterDirty(PPC_FPSCR, NATIVE_REG | fpscr);
1305     }
1306     if (gJITC.current_opc & PPC_OPC_Rc) {
1307     // update cr1 flags
1308     PPC_OPC_ERR("mtfsf. unimplemented.\n");
1309     }
1310     return flowContinue;
1311     }
1312     /*
1313     * mtfsfix Move to FPSCR Field Immediate
1314     * .580
1315     */
1316     void ppc_opc_mtfsfix()
1317     {
1318     int crfD, n1;
1319     uint32 imm;
1320     PPC_OPC_TEMPL_X(gCPU.current_opc, crfD, n1, imm);
1321     crfD >>= 2;
1322     imm >>= 1;
1323     crfD = 7-crfD;
1324     gCPU.fpscr &= ppc_cmp_and_mask[crfD];
1325     gCPU.fpscr |= imm<<(crfD*4);
1326     if (gCPU.current_opc & PPC_OPC_Rc) {
1327     // update cr1 flags
1328     PPC_OPC_ERR("mtfsfi. unimplemented.\n");
1329     }
1330     }
1331     /*JITCFlow ppc_opc_gen_mtfsfix()
1332     {
1333     ppc_opc_gen_interpret(ppc_opc_mtfsfix);
1334     return flowEndBlock;
1335     }*/
1336     JITCFlow ppc_opc_gen_mtfsfix()
1337     {
1338     int crfD, n1;
1339     uint32 imm;
1340     PPC_OPC_TEMPL_X(gJITC.current_opc, crfD, n1, imm);
1341     crfD >>= 2;
1342     imm >>= 1;
1343     crfD = 7-crfD;
1344     NativeReg fpscr = jitcGetClientRegister(PPC_FPSCR);
1345     jitcClobberAll();
1346     asmALURegImm(X86_AND, fpscr, ppc_cmp_and_mask[crfD]);
1347     asmALURegImm(X86_OR, fpscr, imm<<(crfD*4));
1348     if (crfD == 0) {
1349     asmMOVDMemReg((uint32)&gCPU.fpscr, fpscr);
1350     ppc_opc_set_fpscr_roundmode(fpscr);
1351     } else {
1352     jitcMapClientRegisterDirty(PPC_FPSCR, NATIVE_REG | fpscr);
1353     }
1354     if (gJITC.current_opc & PPC_OPC_Rc) {
1355     // update cr1 flags
1356     PPC_OPC_ERR("mtfsfi. unimplemented.\n");
1357     }
1358     return flowContinue;
1359     }
1360     /*
1361     * mtmsr Move to Machine State Register
1362     * .581
1363     */
1364     void ppc_opc_mtmsr()
1365     {
1366     if (gCPU.msr & MSR_PR) {
1367     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_PRIV);
1368     return;
1369     }
1370     int rS, rA, rB;
1371     PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1372     PPC_OPC_ASSERT((rA == 0) && (rB == 0));
1373     ppc_set_msr(gCPU.gpr[rS]);
1374     }
1375     JITCFlow ppc_opc_gen_mtmsr()
1376     {
1377     jitcClobberCarryAndFlags();
1378     jitcFlushRegister();
1379     ppc_opc_gen_check_privilege();
1380     int rS, rA, rB;
1381     PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
1382     jitcGetClientRegister(PPC_GPR(rS), NATIVE_REG | EAX);
1383     asmCALL((NativeAddress)ppc_set_msr_asm);
1384     asmALURegImm(X86_MOV, EAX, gJITC.pc+4);
1385     asmJMP((NativeAddress)ppc_new_pc_rel_asm);
1386     // return flowContinue;
1387     return flowEndBlockUnreachable;
1388     }
1389    
1390    
1391     static inline void ppc_opc_batu_helper(bool dbat, int idx)
1392     {
1393     if (dbat) {
1394     gCPU.dbat_bl[idx] = ((~gCPU.dbatu[idx] << 15) & 0xfffe0000);
1395     gCPU.dbat_nbl[idx] = ~gCPU.dbat_bl[idx];
1396     gCPU.dbat_bepi[idx] = (gCPU.dbatu[idx] & gCPU.dbat_bl[idx]);
1397     } else {
1398     gCPU.ibat_bl[idx] = ((~gCPU.ibatu[idx] << 15) & 0xfffe0000);
1399     gCPU.ibat_bepi[idx] = (gCPU.ibatu[idx] & gCPU.ibat_bl[idx]);
1400     }
1401     }
1402    
1403     static inline void ppc_opc_batl_helper(bool dbat, int idx)
1404     {
1405     if (dbat) {
1406     gCPU.dbat_brpn[idx] = (gCPU.dbatl[idx] & gCPU.dbat_bl[idx]);
1407     } else {
1408     gCPU.ibat_brpn[idx] = (gCPU.ibatl[idx] & gCPU.ibat_bl[idx]);
1409     }
1410     }
1411    
1412     /*
1413     * mtspr Move to Special-Purpose Register
1414     * .584
1415     */
1416     void ppc_opc_mtspr()
1417     {
1418     int rS, spr1, spr2;
1419     PPC_OPC_TEMPL_X(gCPU.current_opc, rS, spr1, spr2);
1420     switch (spr2) {
1421     case 0:
1422     switch (spr1) {
1423     case 1:
1424     gCPU.xer = gCPU.gpr[rS] & ~XER_CA;
1425     gCPU.xer_ca = !!(gCPU.gpr[rS] & XER_CA);
1426     return;
1427     case 8: gCPU.lr = gCPU.gpr[rS]; return;
1428     case 9: gCPU.ctr = gCPU.gpr[rS]; return;
1429     }
1430     case 8:
1431     if (spr1 == 0) {
1432     gCPU.vrsave = gCPU.gpr[rS];
1433     return;
1434     }
1435     }
1436     if (gCPU.msr & MSR_PR) {
1437     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_PRIV);
1438     return;
1439     }
1440     switch (spr2) {
1441     case 0:
1442     switch (spr1) {
1443     /* case 18: gCPU.gpr[rD] = gCPU.dsisr; return;
1444     case 19: gCPU.gpr[rD] = gCPU.dar; return;*/
1445     case 22: {
1446     writeDEC(gCPU.gpr[rS]);
1447     return;
1448     }
1449     case 25:
1450     if (!ppc_mmu_set_sdr1(gCPU.gpr[rS], true)) {
1451     PPC_OPC_ERR("cannot set sdr1\n");
1452     }
1453     return;
1454     case 26: gCPU.srr[0] = gCPU.gpr[rS]; return;
1455     case 27: gCPU.srr[1] = gCPU.gpr[rS]; return;
1456     }
1457     break;
1458     case 8:
1459     switch (spr1) {
1460     case 16: gCPU.sprg[0] = gCPU.gpr[rS]; return;
1461     case 17: gCPU.sprg[1] = gCPU.gpr[rS]; return;
1462     case 18: gCPU.sprg[2] = gCPU.gpr[rS]; return;
1463     case 19: gCPU.sprg[3] = gCPU.gpr[rS]; return;
1464     case 28: writeTBL(gCPU.gpr[rS]); return;
1465     case 29: writeTBU(gCPU.gpr[rS]); return;
1466     /* case 26: gCPU.gpr[rD] = gCPU.ear; return;
1467     case 31: gCPU.gpr[rD] = gCPU.pvr; return;*/
1468     }
1469     break;
1470     case 16:
1471     switch (spr1) {
1472     case 16:
1473     gCPU.ibatu[0] = gCPU.gpr[rS];
1474     ppc_opc_batu_helper(false, 0);
1475     return;
1476     case 17:
1477     gCPU.ibatl[0] = gCPU.gpr[rS];
1478     ppc_opc_batl_helper(false, 0);
1479     return;
1480     case 18:
1481     gCPU.ibatu[1] = gCPU.gpr[rS];
1482     ppc_opc_batu_helper(false, 1);
1483     return;
1484     case 19:
1485     gCPU.ibatl[1] = gCPU.gpr[rS];
1486     ppc_opc_batl_helper(false, 1);
1487     return;
1488     case 20:
1489     gCPU.ibatu[2] = gCPU.gpr[rS];
1490     ppc_opc_batu_helper(false, 2);
1491     return;
1492     case 21:
1493     gCPU.ibatl[2] = gCPU.gpr[rS];
1494     ppc_opc_batl_helper(false, 2);
1495     return;
1496     case 22:
1497     gCPU.ibatu[3] = gCPU.gpr[rS];
1498     ppc_opc_batu_helper(false, 3);
1499     return;
1500     case 23:
1501     gCPU.ibatl[3] = gCPU.gpr[rS];
1502     ppc_opc_batl_helper(false, 3);
1503     return;
1504     case 24:
1505     gCPU.dbatu[0] = gCPU.gpr[rS];
1506     ppc_opc_batu_helper(true, 0);
1507     return;
1508     case 25:
1509     gCPU.dbatl[0] = gCPU.gpr[rS];
1510     ppc_opc_batl_helper(true, 0);
1511     return;
1512     case 26:
1513     gCPU.dbatu[1] = gCPU.gpr[rS];
1514     ppc_opc_batu_helper(true, 1);
1515     return;
1516     case 27:
1517     gCPU.dbatl[1] = gCPU.gpr[rS];
1518     ppc_opc_batl_helper(true, 1);
1519     return;
1520     case 28:
1521     gCPU.dbatu[2] = gCPU.gpr[rS];
1522     ppc_opc_batu_helper(true, 2);
1523     return;
1524     case 29:
1525     gCPU.dbatl[2] = gCPU.gpr[rS];
1526     ppc_opc_batl_helper(true, 2);
1527     return;
1528     case 30:
1529     gCPU.dbatu[3] = gCPU.gpr[rS];
1530     ppc_opc_batu_helper(true, 3);
1531     return;
1532     case 31:
1533     gCPU.dbatl[3] = gCPU.gpr[rS];
1534     ppc_opc_batl_helper(true, 3);
1535     return;
1536     }
1537     break;
1538     case 29:
1539     switch(spr1) {
1540     case 17: return;
1541     case 24: return;
1542     case 25: return;
1543     case 26: return;
1544     }
1545     case 31:
1546     switch (spr1) {
1547     case 16:
1548     // PPC_OPC_WARN("write(%08x) to spr %d:%d (HID0) not supported! @%08x\n", gCPU.gpr[rS], spr1, spr2, gCPU.pc);
1549     gCPU.hid[0] = gCPU.gpr[rS];
1550     return;
1551     case 17:
1552     PPC_OPC_WARN("write(%08x) to spr %d:%d (HID1) not supported! @%08x\n", gCPU.gpr[rS], spr1, spr2, gCPU.pc);
1553     gCPU.hid[1] = gCPU.gpr[rS];
1554     return;
1555     case 18:
1556     PPC_OPC_ERR("write(%08x) to spr %d:%d (IABR) not supported!\n", gCPU.gpr[rS], spr1, spr2);
1557     return;
1558     case 21:
1559     PPC_OPC_ERR("write(%08x) to spr %d:%d (DABR) not supported!\n", gCPU.gpr[rS], spr1, spr2);
1560     return;
1561     case 22:
1562     PPC_OPC_ERR("write(%08x) to spr %d:%d (?) not supported!\n", gCPU.gpr[rS], spr1, spr2);
1563     return;
1564     case 23:
1565     PPC_OPC_ERR("write(%08x) to spr %d:%d (?) not supported!\n", gCPU.gpr[rS], spr1, spr2);
1566     return;
1567     case 27:
1568     PPC_OPC_WARN("write(%08x) to spr %d:%d (ICTC) not supported!\n", gCPU.gpr[rS], spr1, spr2);
1569     return;
1570     case 28:
1571     // PPC_OPC_WARN("write(%08x) to spr %d:%d (THRM1) not supported!\n", gCPU.gpr[rS], spr1, spr2);
1572     return;
1573     case 29:
1574     // PPC_OPC_WARN("write(%08x) to spr %d:%d (THRM2) not supported!\n", gCPU.gpr[rS], spr1, spr2);
1575     return;
1576     case 30:
1577     // PPC_OPC_WARN("write(%08x) to spr %d:%d (THRM3) not supported!\n", gCPU.gpr[rS], spr1, spr2);
1578     return;
1579     case 31:
1580     return;
1581     }
1582     }
1583     fprintf(stderr, "unknown mtspr: %i:%i\n", spr1, spr2);
1584     SINGLESTEP("unknown mtspr\n");
1585     }
1586    
1587     static void FASTCALL ppc_mmu_set_sdr1_check_error(uint32 newsdr1)
1588     {
1589     if (!ppc_mmu_set_sdr1(newsdr1, true)) {
1590     PPC_OPC_ERR("cannot set sdr1\n");
1591     }
1592     }
1593    
1594     static inline void ppc_opc_gen_batu_helper(bool dbat, int idx)
1595     {
1596     NativeReg tmp = jitcAllocRegister();
1597     NativeReg reg = jitcGetClientRegister(dbat ? PPC_DBATU(idx) : PPC_IBATU(idx));
1598    
1599     jitcClobberCarryAndFlags();
1600     jitcClobberRegister(NATIVE_REG | reg);
1601    
1602     asmALURegReg(X86_MOV, tmp, reg);
1603    
1604     asmALUReg(X86_NOT, reg);
1605     asmShiftRegImm(X86_SHL, reg, 15);
1606     asmALURegImm(X86_AND, reg, 0xfffe0000);
1607     asmMOVDMemReg(dbat ? (uint32)&gCPU.dbat_bl[idx] : (uint32)&gCPU.ibat_bl[idx], reg);
1608    
1609     asmALURegReg(X86_AND, tmp, reg);
1610     asmMOVDMemReg(dbat ? (uint32)&gCPU.dbat_bepi[idx] : (uint32)&gCPU.ibat_bepi[idx], tmp);
1611    
1612     asmMOVRegDMem(tmp, dbat ? (uint32)&gCPU.dbatl[idx] : (uint32)&gCPU.ibatl[idx]);
1613     asmALURegReg(X86_AND, tmp, reg);
1614     asmMOVDMemReg(dbat ? (uint32)&gCPU.dbat_brpn[idx] : (uint32)&gCPU.ibat_brpn[idx], tmp);
1615    
1616     asmALUReg(X86_NOT, reg);
1617     asmMOVDMemReg(dbat ? (uint32)&gCPU.dbat_nbl[idx] : (uint32)&gCPU.ibat_nbl[idx], reg);
1618     }
1619    
1620     static inline void ppc_opc_gen_batl_helper(bool dbat, int idx)
1621     {
1622     byte modrm[6];
1623    
1624     NativeReg reg = jitcGetClientRegister(dbat ? PPC_DBATL(idx) : PPC_IBATL(idx));
1625    
1626     jitcClobberCarryAndFlags();
1627     jitcClobberRegister(NATIVE_REG | reg);
1628    
1629     asmALURegMem(X86_AND, reg, modrm, x86_mem(modrm, REG_NO, dbat ? (uint32)&gCPU.dbat_bl[idx] : (uint32)&gCPU.ibat_bl[idx]));
1630    
1631     asmMOVDMemReg(dbat ? (uint32)&gCPU.dbat_brpn[idx] : (uint32)&gCPU.ibat_brpn[idx], reg);
1632     }
1633    
1634    
1635     JITCFlow ppc_opc_gen_mtspr()
1636     {
1637     int rS, spr1, spr2;
1638     PPC_OPC_TEMPL_X(gJITC.current_opc, rS, spr1, spr2);
1639     switch (spr2) {
1640     case 0:
1641     switch (spr1) {
1642     case 1: {
1643     jitcClobberFlags();
1644     NativeReg reg2 = jitcGetClientRegister(PPC_GPR(rS));
1645     NativeReg reg1 = jitcMapClientRegisterDirty(PPC_XER);
1646     asmALURegReg(X86_MOV, reg1, reg2);
1647     asmALURegImm(X86_AND, reg1, ~XER_CA);
1648     asmBTxRegImm(X86_BT, reg2, 29);
1649     jitcMapCarryDirty();
1650     return flowContinue;
1651     }
1652     case 8: move_reg(PPC_LR, PPC_GPR(rS)); return flowContinue;
1653     case 9: move_reg(PPC_CTR, PPC_GPR(rS)); return flowContinue;
1654     }
1655     case 8:
1656     if (spr1 == 0) {
1657     move_reg(PPC_VRSAVE, PPC_GPR(rS));
1658     return flowContinue;
1659     }
1660     }
1661     ppc_opc_gen_check_privilege();
1662     switch (spr2) {
1663     case 0:
1664     switch (spr1) {
1665     case 22: {
1666     jitcGetClientRegister(PPC_GPR(rS), NATIVE_REG | EAX);
1667     jitcClobberAll();
1668     asmCALL((NativeAddress)writeDEC);
1669     return flowContinue;
1670     }
1671     case 25: {
1672     jitcGetClientRegister(PPC_GPR(rS), NATIVE_REG | EAX);
1673     jitcClobberAll();
1674     asmCALL((NativeAddress)ppc_mmu_set_sdr1_check_error);
1675     asmALURegImm(X86_MOV, EAX, gJITC.pc+4);
1676     asmJMP((NativeAddress)ppc_new_pc_rel_asm);
1677     return flowEndBlockUnreachable;
1678     }
1679     case 26: move_reg(PPC_SRR0, PPC_GPR(rS)); return flowContinue;
1680     case 27: move_reg(PPC_SRR1, PPC_GPR(rS)); return flowContinue;
1681     }
1682     break;
1683     case 8:
1684     switch (spr1) {
1685     case 16: move_reg(PPC_SPRG(0), PPC_GPR(rS)); return flowContinue;
1686     case 17: move_reg(PPC_SPRG(1), PPC_GPR(rS)); return flowContinue;
1687     case 18: move_reg(PPC_SPRG(2), PPC_GPR(rS)); return flowContinue;
1688     case 19: move_reg(PPC_SPRG(3), PPC_GPR(rS)); return flowContinue;
1689     case 28:
1690     jitcGetClientRegister(PPC_GPR(rS), NATIVE_REG | EAX);
1691     jitcClobberAll();
1692     asmCALL((NativeAddress)writeTBL);
1693     return flowContinue;
1694     case 29:
1695     jitcGetClientRegister(PPC_GPR(rS), NATIVE_REG | EAX);
1696     jitcClobberAll();
1697     asmCALL((NativeAddress)writeTBU);
1698     return flowContinue;
1699     }
1700     break;
1701     case 16: {
1702     switch (spr1) {
1703     case 16:
1704     move_reg(PPC_IBATU(0), PPC_GPR(rS));
1705     ppc_opc_gen_batu_helper(false, 0);
1706     break;
1707     case 17:
1708     move_reg(PPC_IBATL(0), PPC_GPR(rS));
1709     ppc_opc_gen_batl_helper(false, 0);
1710     break;
1711     case 18:
1712     move_reg(PPC_IBATU(1), PPC_GPR(rS));
1713     ppc_opc_gen_batu_helper(false, 1);
1714     break;
1715     case 19:
1716     move_reg(PPC_IBATL(1), PPC_GPR(rS));
1717     ppc_opc_gen_batl_helper(false, 1);
1718     break;
1719     case 20:
1720     move_reg(PPC_IBATU(2), PPC_GPR(rS));
1721     ppc_opc_gen_batu_helper(false, 2);
1722     break;
1723     case 21:
1724     move_reg(PPC_IBATL(2), PPC_GPR(rS));
1725     ppc_opc_gen_batl_helper(false, 2);
1726     break;
1727     case 22:
1728     move_reg(PPC_IBATU(3), PPC_GPR(rS));
1729     ppc_opc_gen_batu_helper(false, 3);
1730     break;
1731     case 23:
1732     move_reg(PPC_IBATL(3), PPC_GPR(rS));
1733     ppc_opc_gen_batl_helper(false, 3);
1734     break;
1735     case 24:
1736     move_reg(PPC_DBATU(0), PPC_GPR(rS));
1737     ppc_opc_gen_batu_helper(true, 0);
1738     break;
1739     case 25:
1740     move_reg(PPC_DBATL(0), PPC_GPR(rS));
1741     ppc_opc_gen_batl_helper(true, 0);
1742     break;
1743     case 26:
1744     move_reg(PPC_DBATU(1), PPC_GPR(rS));
1745     ppc_opc_gen_batu_helper(true, 1);
1746     break;
1747     case 27:
1748     move_reg(PPC_DBATL(1), PPC_GPR(rS));
1749     ppc_opc_gen_batl_helper(true, 1);
1750     break;
1751     case 28:
1752     move_reg(PPC_DBATU(2), PPC_GPR(rS));
1753     ppc_opc_gen_batu_helper(true, 2);
1754     break;
1755     case 29:
1756     move_reg(PPC_DBATL(2), PPC_GPR(rS));
1757     ppc_opc_gen_batl_helper(true, 2);
1758     break;
1759     case 30:
1760     move_reg(PPC_DBATU(3), PPC_GPR(rS));
1761     ppc_opc_gen_batu_helper(true, 3);
1762     break;
1763     case 31:
1764     move_reg(PPC_DBATL(3), PPC_GPR(rS));
1765     ppc_opc_gen_batl_helper(true, 3);
1766     break;
1767     default: goto invalid;
1768     }
1769     jitcClobberAll();
1770     asmCALL((NativeAddress)ppc_mmu_tlb_invalidate_all_asm);
1771     asmALURegImm(X86_MOV, EAX, gJITC.pc+4);
1772     asmJMP((NativeAddress)ppc_new_pc_rel_asm);
1773     return flowEndBlockUnreachable;
1774     }
1775     case 29:
1776     switch (spr1) {
1777     case 17: return flowContinue; //g4
1778     case 24: return flowContinue; //g4
1779     case 25: return flowContinue; //g4
1780     case 26: return flowContinue; //g4
1781     }
1782     case 31:
1783     switch (spr1) {
1784     case 16: move_reg(PPC_HID0, PPC_GPR(rS)); return flowContinue;
1785     case 17: return flowContinue; //g4
1786     case 18: return flowContinue;
1787     case 21: return flowContinue; //g4
1788     case 22: return flowContinue;
1789     case 23: return flowContinue;
1790     case 27: return flowContinue;
1791     case 28: return flowContinue;
1792     case 29: return flowContinue;
1793     case 30: return flowContinue;
1794     case 31: return flowContinue; //g4
1795     }
1796     }
1797     invalid:
1798     jitcClobberAll();
1799     asmMOVRegDMem(EAX, (uint32)&gCPU.current_code_base);
1800     asmALURegImm(X86_ADD, EAX, gJITC.pc);
1801     asmALURegImm(X86_MOV, EDX, spr1);
1802     asmALURegImm(X86_MOV, ECX, spr2);
1803     asmCALL((NativeAddress)unknown_spr_warning);
1804     return flowEndBlock;
1805     }
1806     /*
1807     * mtsr Move to Segment Register
1808     * .587
1809     */
1810     void ppc_opc_mtsr()
1811     {
1812     if (gCPU.msr & MSR_PR) {
1813     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_PRIV);
1814     return;
1815     }
1816     int rS, SR, rB;
1817     PPC_OPC_TEMPL_X(gCPU.current_opc, rS, SR, rB);
1818     // FIXME: check insn
1819     gCPU.sr[SR & 0xf] = gCPU.gpr[rS];
1820     }
1821     JITCFlow ppc_opc_gen_mtsr()
1822     {
1823     jitcFlushRegister();
1824     ppc_opc_gen_check_privilege();
1825     int rS, SR, rB;
1826     PPC_OPC_TEMPL_X(gJITC.current_opc, rS, SR, rB);
1827     // FIXME: check insn
1828     move_reg(PPC_SR(SR & 0xf), PPC_GPR(rS));
1829     jitcClobberAll();
1830     asmCALL((NativeAddress)ppc_mmu_tlb_invalidate_all_asm);
1831     // sync
1832     // asmALURegImm(X86_MOV, EAX, gJITC.pc+4);
1833     // asmJMP((NativeAddress)ppc_new_pc_rel_asm);
1834     // return flowEndBlockUnreachable;
1835     return flowContinue;
1836     }
1837     /*
1838     * mtsrin Move to Segment Register Indirect
1839     * .591
1840     */
1841     void ppc_opc_mtsrin()
1842     {
1843     if (gCPU.msr & MSR_PR) {
1844     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_PRIV);
1845     return;
1846     }
1847     int rS, rA, rB;
1848     PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1849     // FIXME: check insn
1850     gCPU.sr[gCPU.gpr[rB] >> 28] = gCPU.gpr[rS];
1851     }
1852     JITCFlow ppc_opc_gen_mtsrin()
1853     {
1854     jitcFlushRegister();
1855     ppc_opc_gen_check_privilege();
1856     int rS, rA, rB;
1857     PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
1858     // FIXME: check insn
1859     NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1860     NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1861     if (b == s) {
1862     s = jitcAllocRegister();
1863     asmALURegReg(X86_MOV, s, b);
1864     }
1865     jitcClobberAll();
1866     asmShiftRegImm(X86_SHR, b, 28);
1867     // mov [4*b+sr], s
1868     byte modrm[6];
1869     asmALUMemReg(X86_MOV, modrm, x86_mem_sib(modrm, REG_NO, 4, b, (uint32)(&gCPU.sr[0])), s);
1870     asmCALL((NativeAddress)ppc_mmu_tlb_invalidate_all_asm);
1871     // sync
1872     // asmALURegImm(X86_MOV, EAX, gJITC.pc+4);
1873     // asmJMP((NativeAddress)ppc_new_pc_rel_asm);
1874     // return flowEndBlockUnreachable;
1875     return flowContinue;
1876     }
1877    
1878     /*
1879     * rfi Return from Interrupt
1880     * .607
1881     */
1882     void ppc_opc_rfi()
1883     {
1884     if (gCPU.msr & MSR_PR) {
1885     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_PRIV);
1886     return;
1887     }
1888     ppc_set_msr(gCPU.srr[1] & MSR_RFI_SAVE_MASK);
1889     gCPU.npc = gCPU.srr[0] & 0xfffffffc;
1890     }
1891     JITCFlow ppc_opc_gen_rfi()
1892     {
1893     jitcClobberCarryAndFlags();
1894     jitcFlushRegister();
1895     ppc_opc_gen_check_privilege();
1896     jitcGetClientRegister(PPC_SRR1, NATIVE_REG | EAX);
1897     asmALURegImm(X86_AND, EAX, MSR_RFI_SAVE_MASK);
1898     asmCALL((NativeAddress)ppc_set_msr_asm);
1899     byte modrm[6];
1900     asmALURegMem(X86_MOV, EAX, modrm, x86_mem(modrm, REG_NO, (uint32)(&gCPU.srr[0])));
1901     asmALURegImm(X86_AND, EAX, 0xfffffffc);
1902     asmJMP((NativeAddress)ppc_new_pc_asm);
1903     return flowEndBlockUnreachable;
1904     }
1905    
1906     /*
1907     * sc System Call
1908     * .621
1909     */
1910     #include "io/graphic/gcard.h"
1911     void ppc_opc_sc()
1912     {
1913     if (gCPU.gpr[3] == 0x113724fa && gCPU.gpr[4] == 0x77810f9b) {
1914     gcard_osi(0);
1915     return;
1916     }
1917     ppc_exception(PPC_EXC_SC);
1918     }
1919     JITCFlow ppc_opc_gen_sc()
1920     {
1921     jitcClobberCarryAndFlags();
1922     jitcFlushRegister();
1923    
1924     NativeReg r1 = jitcGetClientRegister(PPC_GPR(3));
1925     asmALURegImm(X86_CMP, r1, 0x113724fa);
1926     asmALURegImm(X86_MOV, ESI, gJITC.pc+4);
1927     asmJxx(X86_NE, (NativeAddress)ppc_sc_exception_asm);
1928    
1929     jitcClobberRegister(NATIVE_REG | ESI);
1930    
1931     NativeReg r2 = jitcGetClientRegister(PPC_GPR(4));
1932     asmALURegImm(X86_CMP, r2, 0x77810f9b);
1933     if (r2 == ESI) {
1934     asmALURegImm(X86_MOV, ESI, gJITC.pc+4);
1935     }
1936     asmJxx(X86_NE, (NativeAddress)ppc_sc_exception_asm);
1937    
1938     asmCALL((NativeAddress)gcard_osi);
1939    
1940     jitcClobberRegister();
1941     return flowEndBlock;
1942     }
1943    
1944     /*
1945     * sync Synchronize
1946     * .672
1947     */
1948     void ppc_opc_sync()
1949     {
1950     // NO-OP
1951     }
1952     JITCFlow ppc_opc_gen_sync()
1953     {
1954     // NO-OP
1955     return flowContinue;
1956     }
1957    
1958     /*
1959     * tlbia Translation Lookaside Buffer Invalidate All
1960     * .676
1961     */
1962     void ppc_opc_tlbia()
1963     {
1964     if (gCPU.msr & MSR_PR) {
1965     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_PRIV);
1966     return;
1967     }
1968     int rS, rA, rB;
1969     PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1970     // FIXME: check rS.. for 0
1971     ppc_mmu_tlb_invalidate();
1972     }
1973     JITCFlow ppc_opc_gen_tlbia()
1974     {
1975     jitcClobberAll();
1976     ppc_opc_gen_check_privilege();
1977     asmCALL((NativeAddress)ppc_mmu_tlb_invalidate_all_asm);
1978     asmALURegImm(X86_MOV, EAX, gJITC.pc+4);
1979     asmJMP((NativeAddress)ppc_new_pc_rel_asm);
1980     return flowEndBlockUnreachable;
1981     }
1982    
1983     /*
1984     * tlbie Translation Lookaside Buffer Invalidate Entry
1985     * .676
1986     */
1987     void ppc_opc_tlbie()
1988     {
1989     if (gCPU.msr & MSR_PR) {
1990     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_PRIV);
1991     return;
1992     }
1993     int rS, rA, rB;
1994     PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1995     // FIXME: check rS.. for 0
1996     ppc_mmu_tlb_invalidate();
1997     }
1998     JITCFlow ppc_opc_gen_tlbie()
1999     {
2000     jitcFlushRegister();
2001     ppc_opc_gen_check_privilege();
2002     int rS, rA, rB;
2003     PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
2004     jitcGetClientRegister(PPC_GPR(rB), NATIVE_REG | EAX);
2005     jitcClobberAll();
2006     asmCALL((NativeAddress)ppc_mmu_tlb_invalidate_entry_asm);
2007     asmALURegImm(X86_MOV, EAX, gJITC.pc+4);
2008     asmJMP((NativeAddress)ppc_new_pc_rel_asm);
2009     return flowEndBlockUnreachable;
2010     }
2011    
2012     /*
2013     * tlbsync Translation Lookaside Buffer Syncronize
2014     * .677
2015     */
2016     void ppc_opc_tlbsync()
2017     {
2018     if (gCPU.msr & MSR_PR) {
2019     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_PRIV);
2020     return;
2021     }
2022     int rS, rA, rB;
2023     PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
2024     // FIXME: check rS.. for 0
2025     }
2026     JITCFlow ppc_opc_gen_tlbsync()
2027     {
2028     ppc_opc_gen_check_privilege();
2029     return flowContinue;
2030     }
2031    
2032     /*
2033     * tw Trap Word
2034     * .678
2035     */
2036     void ppc_opc_tw()
2037     {
2038     int TO, rA, rB;
2039     PPC_OPC_TEMPL_X(gCPU.current_opc, TO, rA, rB);
2040     uint32 a = gCPU.gpr[rA];
2041     uint32 b = gCPU.gpr[rB];
2042     if (((TO & 16) && ((sint32)a < (sint32)b))
2043     || ((TO & 8) && ((sint32)a > (sint32)b))
2044     || ((TO & 4) && (a == b))
2045     || ((TO & 2) && (a < b))
2046     || ((TO & 1) && (a > b))) {
2047     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_TRAP);
2048     }
2049     }
2050     JITCFlow ppc_opc_gen_tw()
2051     {
2052     int TO, rA, rB;
2053     PPC_OPC_TEMPL_X(gJITC.current_opc, TO, rA, rB);
2054     if (TO == 0x1f) {
2055     // TRAP always
2056     jitcClobberAll();
2057     asmALURegImm(X86_MOV, ESI, gJITC.pc);
2058     asmALURegImm(X86_MOV, ECX, PPC_EXC_PROGRAM_TRAP);
2059     asmJMP((NativeAddress)ppc_program_exception_asm);
2060     return flowEndBlockUnreachable;
2061     } else if (TO) {
2062     NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
2063     NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
2064     jitcClobberAll();
2065     asmALURegReg(X86_CMP, a, b);
2066     NativeAddress fixup1=NULL, fixup2=NULL, fixup3=NULL, fixup4=NULL, fixup5=NULL;
2067     if (TO & 16) fixup1 = asmJxxFixup(X86_L);
2068     if (TO & 8) fixup2 = asmJxxFixup(X86_G);
2069     if (TO & 4) fixup3 = asmJxxFixup(X86_E);
2070     if (TO & 2) fixup4 = asmJxxFixup(X86_B);
2071     if (TO & 1) fixup5 = asmJxxFixup(X86_A);
2072     NativeAddress fixup6 = asmJMPFixup();
2073     if (fixup1) asmResolveFixup(fixup1, asmHERE());
2074     if (fixup2) asmResolveFixup(fixup2, asmHERE());
2075     if (fixup3) asmResolveFixup(fixup3, asmHERE());
2076     if (fixup4) asmResolveFixup(fixup4, asmHERE());
2077     if (fixup5) asmResolveFixup(fixup5, asmHERE());
2078     asmALURegImm(X86_MOV, ESI, gJITC.pc);
2079     asmALURegImm(X86_MOV, ECX, PPC_EXC_PROGRAM_TRAP);
2080     asmJMP((NativeAddress)ppc_program_exception_asm);
2081     asmResolveFixup(fixup6, asmHERE());
2082     return flowEndBlock;
2083     } else {
2084     // TRAP never
2085     return flowContinue;
2086     }
2087     }
2088    
2089     /*
2090     * twi Trap Word Immediate
2091     * .679
2092     */
2093     void ppc_opc_twi()
2094     {
2095     int TO, rA;
2096     uint32 imm;
2097     PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, TO, rA, imm);
2098     uint32 a = gCPU.gpr[rA];
2099     if (((TO & 16) && ((sint32)a < (sint32)imm))
2100     || ((TO & 8) && ((sint32)a > (sint32)imm))
2101     || ((TO & 4) && (a == imm))
2102     || ((TO & 2) && (a < imm))
2103     || ((TO & 1) && (a > imm))) {
2104     ppc_exception(PPC_EXC_PROGRAM, PPC_EXC_PROGRAM_TRAP);
2105     }
2106     }
2107     JITCFlow ppc_opc_gen_twi()
2108     {
2109     int TO, rA;
2110     uint32 imm;
2111     PPC_OPC_TEMPL_D_SImm(gJITC.current_opc, TO, rA, imm);
2112     if (TO == 0x1f) {
2113     // TRAP always
2114     jitcClobberAll();
2115     asmALURegImm(X86_MOV, ESI, gJITC.pc);
2116     asmALURegImm(X86_MOV, ECX, PPC_EXC_PROGRAM_TRAP);
2117     asmJMP((NativeAddress)ppc_program_exception_asm);
2118     return flowEndBlockUnreachable;
2119     } else if (TO) {
2120     NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
2121     jitcClobberAll();
2122     asmALURegImm(X86_CMP, a, imm);
2123     NativeAddress fixup1=NULL, fixup2=NULL, fixup3=NULL, fixup4=NULL, fixup5=NULL;
2124     if (TO & 16) fixup1 = asmJxxFixup(X86_L);
2125     if (TO & 8) fixup2 = asmJxxFixup(X86_G);
2126     if (TO & 4) fixup3 = asmJxxFixup(X86_E);
2127     if (TO & 2) fixup4 = asmJxxFixup(X86_B);
2128     if (TO & 1) fixup5 = asmJxxFixup(X86_A);
2129     NativeAddress fixup6 = asmJMPFixup();
2130     if (fixup1) asmResolveFixup(fixup1, asmHERE());
2131     if (fixup2) asmResolveFixup(fixup2, asmHERE());
2132     if (fixup3) asmResolveFixup(fixup3, asmHERE());
2133     if (fixup4) asmResolveFixup(fixup4, asmHERE());
2134     if (fixup5) asmResolveFixup(fixup5, asmHERE());
2135     asmALURegImm(X86_MOV, ESI, gJITC.pc);
2136     asmALURegImm(X86_MOV, ECX, PPC_EXC_PROGRAM_TRAP);
2137     asmJMP((NativeAddress)ppc_program_exception_asm);
2138     asmResolveFixup(fixup6, asmHERE());
2139     return flowEndBlock;
2140     } else {
2141     // TRAP never
2142     return flowContinue;
2143     }
2144     }
2145    
2146     /* dcba Data Cache Block Allocate
2147     * .???
2148     */
2149     void ppc_opc_dcba()
2150     {
2151     // FIXME: check addr
2152     }
2153     JITCFlow ppc_opc_gen_dcba()
2154     {
2155     // FIXME: check addr
2156     return flowContinue;
2157     }

  ViewVC Help
Powered by ViewVC 1.1.26