1 |
/* |
2 |
* PearPC |
3 |
* x86asm.h |
4 |
* |
5 |
* Copyright (C) 2004 Sebastian Biallas (sb@biallas.net) |
6 |
* |
7 |
* This program is free software; you can redistribute it and/or modify |
8 |
* it under the terms of the GNU General Public License version 2 as |
9 |
* published by the Free Software Foundation. |
10 |
* |
11 |
* This program is distributed in the hope that it will be useful, |
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 |
* GNU General Public License for more details. |
15 |
* |
16 |
* You should have received a copy of the GNU General Public License |
17 |
* along with this program; if not, write to the Free Software |
18 |
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 |
*/ |
20 |
|
21 |
#ifndef __X86ASM_H__ |
22 |
#define __X86ASM_H__ |
23 |
|
24 |
#include "system/types.h" |
25 |
#include "ppc_cpu.h" |
26 |
#include "jitc_types.h" |
27 |
|
28 |
/* FSCALE is also defined in FreeBSD's sys/param.h */ |
29 |
#ifdef FSCALE |
30 |
#undef FSCALE |
31 |
#endif /* FSCALE */ |
32 |
|
33 |
typedef byte modrm_o[8]; |
34 |
typedef byte *modrm_p; |
35 |
|
36 |
enum NativeReg { |
37 |
EAX = 0, |
38 |
ECX = 1, |
39 |
EDX = 2, |
40 |
EBX = 3, |
41 |
ESP = 4, // don't mess with me, buddy |
42 |
EBP = 5, |
43 |
ESI = 6, |
44 |
EDI = 7, |
45 |
REG_NO = 0xffffffff, |
46 |
}; |
47 |
|
48 |
enum NativeReg16 { |
49 |
AX = 0, |
50 |
CX = 1, |
51 |
DX = 2, |
52 |
BX = 3, |
53 |
SP = 4, // don't mess with me, buddy |
54 |
BP = 5, |
55 |
SI = 6, |
56 |
DI = 7, |
57 |
REG16_NO = 0xffffffff, |
58 |
}; |
59 |
|
60 |
enum NativeReg8 { |
61 |
AL = 0, |
62 |
CL = 1, |
63 |
DL = 2, |
64 |
BL = 3, |
65 |
AH = 4, |
66 |
CH = 5, |
67 |
DH = 6, |
68 |
BH = 7, |
69 |
REG8_NO = 0xffffffff, |
70 |
}; |
71 |
|
72 |
#define NATIVE_REG_8 (1<<8) // eax,ecx,edx,ebx -> al,cl,dl,bl |
73 |
#define NATIVE_REG (2<<8) // used as a bitmask to specify register |
74 |
#define NATIVE_REG_PREFER (4<<8) // used as a bitmask to specify register |
75 |
|
76 |
#define NATIVE_REGS_ALL 0 |
77 |
|
78 |
struct X86CPUCaps { |
79 |
char vendor[13]; |
80 |
bool rdtsc; |
81 |
bool cmov; |
82 |
bool mmx; |
83 |
bool _3dnow; |
84 |
bool _3dnow2; |
85 |
bool sse; |
86 |
bool sse2; |
87 |
bool sse3; |
88 |
bool ssse3; |
89 |
bool sse4; |
90 |
int loop_align; |
91 |
}; |
92 |
|
93 |
void x86GetCaps(X86CPUCaps &caps); |
94 |
|
95 |
NativeReg FASTCALL jitcAllocRegister(int options = 0); |
96 |
NativeReg FASTCALL jitcDirtyRegister(NativeReg reg); |
97 |
NativeReg FASTCALL jitcMapClientRegisterDirty(PPC_Register creg, int options = 0); |
98 |
NativeReg FASTCALL jitcGetClientRegister(PPC_Register creg, int options = 0); |
99 |
NativeReg FASTCALL jitcGetClientRegisterDirty(PPC_Register reg, int options = 0); |
100 |
NativeReg FASTCALL jitcGetClientRegisterMapping(PPC_Register creg); |
101 |
|
102 |
void FASTCALL jitcFlushAll(); |
103 |
void FASTCALL jitcClobberAll(); |
104 |
void FASTCALL jitcInvalidateAll(); |
105 |
void FASTCALL jitcTouchRegister(NativeReg reg); |
106 |
void FASTCALL jitcFlushRegister(int options = NATIVE_REGS_ALL); |
107 |
void FASTCALL jitcFlushRegisterDirty(int options = NATIVE_REGS_ALL); |
108 |
void FASTCALL jitcClobberRegister(int options = NATIVE_REGS_ALL); |
109 |
void FASTCALL jitcGetClientCarry(); |
110 |
void FASTCALL jitcMapFlagsDirty(PPC_CRx cr = PPC_CR0); |
111 |
void FASTCALL jitcMapCarryDirty(); |
112 |
void FASTCALL jitcClobberFlags(); |
113 |
void FASTCALL jitcClobberCarry(); |
114 |
void FASTCALL jitcClobberCarryAndFlags(); |
115 |
void FASTCALL jitcFlushCarryAndFlagsDirty(); // ONLY FOR DEBUG! DON'T CALL! |
116 |
|
117 |
PPC_CRx FASTCALL jitcGetFlagsMapping(); |
118 |
|
119 |
bool FASTCALL jitcFlagsMapped(); |
120 |
bool FASTCALL jitcCarryMapped(); |
121 |
|
122 |
void FASTCALL jitcFlushFlagsAfterCMPL_L(int disp); |
123 |
void FASTCALL jitcFlushFlagsAfterCMPL_U(int disp); |
124 |
void FASTCALL jitcFlushFlagsAfterCMP_L(int disp); |
125 |
void FASTCALL jitcFlushFlagsAfterCMP_U(int disp); |
126 |
|
127 |
enum X86ALUopc { |
128 |
X86_ADC = 2, |
129 |
X86_ADD = 0, |
130 |
X86_AND = 4, |
131 |
X86_MOV = 8, |
132 |
X86_CMP = 7, |
133 |
X86_OR = 1, |
134 |
X86_SBB = 3, |
135 |
X86_SUB = 5, |
136 |
X86_TEST = 9, |
137 |
X86_XCHG = 10, |
138 |
X86_XOR = 6, |
139 |
}; |
140 |
|
141 |
enum X86ALUopc1 { |
142 |
X86_NOT, |
143 |
X86_NEG, |
144 |
X86_MUL, |
145 |
X86_IMUL, |
146 |
X86_DIV, |
147 |
X86_IDIV, |
148 |
}; |
149 |
enum X86MOVxx { |
150 |
X86_MOVSX = 0xbe, |
151 |
X86_MOVZX = 0xb6, |
152 |
}; |
153 |
|
154 |
enum X86SimpleOpc { |
155 |
X86_CBW = 0x9866, |
156 |
X86_CWDE = 0x98, |
157 |
X86_CWD = 0x9966, |
158 |
X86_CDQ = 0x99, |
159 |
X86_CMC = 0xf5, |
160 |
X86_LAHF = 0x9f, |
161 |
X86_PUSHA = 0x60, |
162 |
X86_POPA = 0x61, |
163 |
X86_PUSHF = 0x9c, |
164 |
X86_POPF = 0x9d, |
165 |
X86_RET = 0xc3, |
166 |
X86_STC = 0xf9, |
167 |
}; |
168 |
|
169 |
enum X86FlagTest { |
170 |
X86_O = 0, |
171 |
X86_NO = 1, |
172 |
X86_B = 2, |
173 |
X86_C = 2, |
174 |
X86_NAE = 2, |
175 |
X86_NB = 3, |
176 |
X86_NC = 3, |
177 |
X86_AE = 3, |
178 |
X86_E = 4, |
179 |
X86_Z = 4, |
180 |
X86_NE = 5, |
181 |
X86_NZ = 5, |
182 |
X86_NA = 6, |
183 |
X86_BE = 6, |
184 |
X86_A = 7, |
185 |
X86_NBE = 7, |
186 |
X86_S = 8, |
187 |
X86_NS = 9, |
188 |
X86_PE = 10, |
189 |
X86_PO = 11, |
190 |
X86_L = 12, |
191 |
X86_NGE = 12, |
192 |
X86_NL = 13, |
193 |
X86_GE = 13, |
194 |
X86_NG = 14, |
195 |
X86_LE = 14, |
196 |
X86_G = 15, |
197 |
X86_NLE = 15, |
198 |
}; |
199 |
|
200 |
NativeAddress FASTCALL asmHERE(); |
201 |
|
202 |
#ifndef X86ASM_V2_ONLY |
203 |
/* Begin: X86Asm v1.0 */ |
204 |
void FASTCALL asmALURegReg(X86ALUopc opc, NativeReg reg1, NativeReg reg2); |
205 |
void FASTCALL asmALURegImm(X86ALUopc opc, NativeReg reg1, uint32 imm); |
206 |
void FASTCALL asmALUMemReg(X86ALUopc opc, byte *modrm, int len, NativeReg reg2); |
207 |
void FASTCALL asmALUMemImm(X86ALUopc opc, byte *modrm, int len, uint32 imm); |
208 |
void FASTCALL asmALURegMem(X86ALUopc opc, NativeReg reg1, byte *modrm, int len); |
209 |
void FASTCALL asmALUReg(X86ALUopc1 opc, NativeReg reg1); |
210 |
void FASTCALL asmALURegReg16(X86ALUopc opc, NativeReg reg1, NativeReg reg2); |
211 |
void FASTCALL asmALURegImm16(X86ALUopc opc, NativeReg reg1, uint32 imm); |
212 |
void FASTCALL asmALUMemReg16(X86ALUopc opc, byte *modrm, int len, NativeReg reg2); |
213 |
void FASTCALL asmALUMemImm16(X86ALUopc opc, byte *modrm, int len, uint32 imm); |
214 |
void FASTCALL asmALURegMem16(X86ALUopc opc, NativeReg reg1, byte *modrm, int len); |
215 |
void FASTCALL asmALUReg16(X86ALUopc1 opc, NativeReg reg1); |
216 |
void FASTCALL asmMOVRegImm_NoFlags(NativeReg reg1, uint32 imm); |
217 |
void FASTCALL asmMOVRegImm16_NoFlags(NativeReg reg1, uint16 imm); |
218 |
void FASTCALL asmCMOVRegReg(X86FlagTest flags, NativeReg reg1, NativeReg reg2); |
219 |
void FASTCALL asmCMOVRegMem(X86FlagTest flags, NativeReg reg1, byte *modrm, int len); |
220 |
void FASTCALL asmSETReg8(X86FlagTest flags, NativeReg8 reg1); |
221 |
void FASTCALL asmSETMem(X86FlagTest flags, byte *modrm, int len); |
222 |
void FASTCALL asmALURegReg8(X86ALUopc opc, NativeReg8 reg1, NativeReg8 reg2); |
223 |
void FASTCALL asmALURegImm8(X86ALUopc opc, NativeReg8 reg1, uint8 imm); |
224 |
void FASTCALL asmALURegMem8(X86ALUopc opc, NativeReg8 reg1, byte *modrm, int len); |
225 |
void FASTCALL asmALUMemReg8(X86ALUopc opc, byte *modrm, int len, NativeReg8 reg2); |
226 |
void FASTCALL asmALUMemImm8(X86ALUopc opc, byte *modrm, int len, uint8 imm); |
227 |
void FASTCALL asmMOVDMemReg(uint32 disp, NativeReg reg1); |
228 |
void FASTCALL asmMOVDMemReg16(uint32 disp, NativeReg reg1); |
229 |
void FASTCALL asmMOVRegDMem(NativeReg reg1, uint32 disp); |
230 |
void FASTCALL asmMOVRegDMem16(NativeReg reg1, uint32 disp); |
231 |
void FASTCALL asmTESTDMemImm(uint32 disp, uint32 imm); |
232 |
void FASTCALL asmANDDMemImm(uint32 disp, uint32 imm); |
233 |
void FASTCALL asmORDMemImm(uint32 disp, uint32 imm); |
234 |
void FASTCALL asmMOVxxRegReg8(X86MOVxx opc, NativeReg reg1, NativeReg8 reg2); |
235 |
void FASTCALL asmMOVxxRegReg16(X86MOVxx opc, NativeReg reg1, NativeReg reg2); |
236 |
void FASTCALL asmMOVxxRegMem8(X86MOVxx opc, NativeReg reg1, byte *modrm, int len); |
237 |
void FASTCALL asmMOVxxRegMem16(X86MOVxx opc, NativeReg reg1, byte *modrm, int len); |
238 |
/* END: X86Asm v1.0 */ |
239 |
#endif // X86ASM_V2_ONLY |
240 |
|
241 |
/* BEGIN: X86Asm v2.0 */ |
242 |
void FASTCALL asmNOP(int n); // v2.0 also |
243 |
void FASTCALL asmALU(X86ALUopc opc, NativeReg reg1, NativeReg reg2); |
244 |
void FASTCALL asmALU(X86ALUopc opc, NativeReg reg1, uint32 imm); |
245 |
void FASTCALL asmALU(X86ALUopc opc, modrm_p modrm, NativeReg reg2); |
246 |
void FASTCALL asmALU_D(X86ALUopc opc, modrm_p modrm, uint32 imm); |
247 |
void FASTCALL asmALU(X86ALUopc opc, NativeReg reg1, modrm_p modrm); |
248 |
void FASTCALL asmALU(X86ALUopc1 opc, NativeReg reg1); |
249 |
|
250 |
void FASTCALL asmALU(X86ALUopc opc, NativeReg16 reg1, NativeReg16 reg2); |
251 |
void FASTCALL asmALU(X86ALUopc opc, NativeReg16 reg1, uint16 imm); |
252 |
void FASTCALL asmALU(X86ALUopc opc, modrm_p modrm, NativeReg16 reg2); |
253 |
void FASTCALL asmALU_W(X86ALUopc opc, modrm_p modrm, uint16 imm); |
254 |
void FASTCALL asmALU(X86ALUopc opc, NativeReg16 reg1, modrm_p modrm); |
255 |
void FASTCALL asmALU(X86ALUopc1 opc, NativeReg16 reg1); |
256 |
|
257 |
void FASTCALL asmMOV_NoFlags(NativeReg reg1, uint32 imm); |
258 |
void FASTCALL asmMOV_NoFlags(NativeReg16 reg1, uint16 imm); |
259 |
void FASTCALL asmCMOV(X86FlagTest flags, NativeReg reg1, NativeReg reg2); |
260 |
void FASTCALL asmCMOV(X86FlagTest flags, NativeReg reg1, modrm_p modrm); |
261 |
|
262 |
void FASTCALL asmSET(X86FlagTest flags, NativeReg8 reg1); |
263 |
void FASTCALL asmSET(X86FlagTest flags, modrm_p modrm); |
264 |
|
265 |
void FASTCALL asmALU(X86ALUopc opc, NativeReg8 reg1, NativeReg8 reg2); |
266 |
void FASTCALL asmALU(X86ALUopc opc, NativeReg8 reg1, uint8 imm); |
267 |
void FASTCALL asmALU(X86ALUopc opc, NativeReg8 reg1, modrm_p modrm); |
268 |
void FASTCALL asmALU(X86ALUopc opc, modrm_p modrm, NativeReg8 reg2); |
269 |
void FASTCALL asmALU_B(X86ALUopc opc, modrm_p modrm, uint8 imm); |
270 |
|
271 |
void FASTCALL asmMOV(const void *disp, NativeReg reg1); |
272 |
void FASTCALL asmMOV(const void *disp, NativeReg16 reg1); |
273 |
void FASTCALL asmMOV(NativeReg reg1, const void *disp); |
274 |
void FASTCALL asmMOV(NativeReg16 reg1, const void *disp); |
275 |
|
276 |
void FASTCALL asmTEST(const void *disp, uint32 imm); |
277 |
void FASTCALL asmAND(const void *disp, uint32 imm); |
278 |
void FASTCALL asmOR(const void *disp, uint32 imm); |
279 |
|
280 |
void FASTCALL asmMOVxx(X86MOVxx opc, NativeReg reg1, NativeReg8 reg2); |
281 |
void FASTCALL asmMOVxx(X86MOVxx opc, NativeReg reg1, NativeReg16 reg2); |
282 |
void FASTCALL asmMOVxx_B(X86MOVxx opc, NativeReg reg1, modrm_p modrm); |
283 |
void FASTCALL asmMOVxx_W(X86MOVxx opc, NativeReg reg1, modrm_p modrm); |
284 |
void FASTCALL asmSimple(X86SimpleOpc simple); |
285 |
/* End: X86Asm v2.0 */ |
286 |
|
287 |
enum X86ShiftOpc { |
288 |
X86_ROL = 0x00, |
289 |
X86_ROR = 0x08, |
290 |
X86_RCL = 0x10, |
291 |
X86_RCR = 0x18, |
292 |
X86_SHL = 0x20, |
293 |
X86_SHR = 0x28, |
294 |
X86_SAL = 0x20, |
295 |
X86_SAR = 0x38, |
296 |
}; |
297 |
|
298 |
enum X86BitTest { |
299 |
X86_BT = 4, |
300 |
X86_BTC = 7, |
301 |
X86_BTR = 6, |
302 |
X86_BTS = 5, |
303 |
}; |
304 |
|
305 |
enum X86BitSearch { |
306 |
X86_BSF = 0xbc, |
307 |
X86_BSR = 0xbd, |
308 |
}; |
309 |
|
310 |
#ifndef X86ASM_V2_ONLY |
311 |
/* Begin: X86Asm v1.0 */ |
312 |
void FASTCALL asmShiftRegImm(X86ShiftOpc opc, NativeReg reg1, uint32 imm); |
313 |
void FASTCALL asmShiftRegCL(X86ShiftOpc opc, NativeReg reg1); |
314 |
void FASTCALL asmShiftReg16Imm(X86ShiftOpc opc, NativeReg reg1, uint32 imm); |
315 |
void FASTCALL asmShiftReg16CL(X86ShiftOpc opc, NativeReg reg1); |
316 |
void FASTCALL asmShiftReg8Imm(X86ShiftOpc opc, NativeReg8 reg1, uint32 imm); |
317 |
void FASTCALL asmShiftReg8CL(X86ShiftOpc opc, NativeReg8 reg1); |
318 |
void FASTCALL asmINCReg(NativeReg reg1); |
319 |
void FASTCALL asmDECReg(NativeReg reg1); |
320 |
|
321 |
void FASTCALL asmIMULRegRegImm(NativeReg reg1, NativeReg reg2, uint32 imm); |
322 |
void FASTCALL asmIMULRegReg(NativeReg reg1, NativeReg reg2); |
323 |
|
324 |
void FASTCALL asmLEA(NativeReg reg1, byte *modrm, int len); |
325 |
void FASTCALL asmBTxRegImm(X86BitTest opc, NativeReg reg1, int value); |
326 |
void FASTCALL asmBTxMemImm(X86BitTest opc, byte *modrm, int len, int value); |
327 |
void FASTCALL asmBSxRegReg(X86BitSearch opc, NativeReg reg1, NativeReg reg2); |
328 |
/* End: X86Asm v1.0 */ |
329 |
#endif // X86ASM_V2_ONLY |
330 |
|
331 |
/* Begin: X86Asm v2.0 */ |
332 |
void FASTCALL asmShift(X86ShiftOpc opc, NativeReg reg1, uint32 imm); |
333 |
void FASTCALL asmShift_CL(X86ShiftOpc opc, NativeReg reg1); |
334 |
void FASTCALL asmShift(X86ShiftOpc opc, NativeReg16 reg1, uint32 imm); |
335 |
void FASTCALL asmShift_CL(X86ShiftOpc opc, NativeReg16 reg1); |
336 |
void FASTCALL asmShift(X86ShiftOpc opc, NativeReg8 reg1, uint32 imm); |
337 |
void FASTCALL asmShift_CL(X86ShiftOpc opc, NativeReg8 reg1); |
338 |
void FASTCALL asmINC(NativeReg reg1); |
339 |
void FASTCALL asmDEC(NativeReg reg1); |
340 |
|
341 |
void FASTCALL asmIMUL(NativeReg reg1, NativeReg reg2, uint32 imm); |
342 |
void FASTCALL asmIMUL(NativeReg reg1, NativeReg reg2); |
343 |
|
344 |
void FASTCALL asmLEA(NativeReg reg1, modrm_p modrm); |
345 |
void FASTCALL asmBTx(X86BitTest opc, NativeReg reg1, int value); |
346 |
void FASTCALL asmBTx(X86BitTest opc, modrm_p modrm, int value); |
347 |
void FASTCALL asmBSx(X86BitSearch opc, NativeReg reg1, NativeReg reg2); |
348 |
/* End: X86Asm v2.0 */ |
349 |
|
350 |
void FASTCALL asmBSWAP(NativeReg reg); |
351 |
|
352 |
void FASTCALL asmJMP(NativeAddress to); |
353 |
void FASTCALL asmJxx(X86FlagTest flags, NativeAddress to); |
354 |
NativeAddress FASTCALL asmJMPFixup(); |
355 |
NativeAddress FASTCALL asmJxxFixup(X86FlagTest flags); |
356 |
void FASTCALL asmCALL(NativeAddress to); |
357 |
|
358 |
void FASTCALL asmResolveFixup(NativeAddress at, NativeAddress to=0); |
359 |
|
360 |
enum NativeFloatReg { |
361 |
Float_ST0=0, |
362 |
Float_ST1=1, |
363 |
Float_ST2=2, |
364 |
Float_ST3=3, |
365 |
Float_ST4=4, |
366 |
Float_ST5=5, |
367 |
Float_ST6=6, |
368 |
Float_ST7=7, |
369 |
}; |
370 |
|
371 |
#define X86_FLOAT_ST(i) ((NativeFloatReg)(i)) |
372 |
|
373 |
typedef int JitcFloatReg; |
374 |
#define JITC_FLOAT_REG_NONE 0 |
375 |
|
376 |
NativeFloatReg FASTCALL jitcFloatRegisterToNative(JitcFloatReg r); |
377 |
bool FASTCALL jitcFloatRegisterIsTOP(JitcFloatReg r); |
378 |
JitcFloatReg FASTCALL jitcFloatRegisterXCHGToFront(JitcFloatReg r); |
379 |
JitcFloatReg FASTCALL jitcFloatRegisterDirty(JitcFloatReg r); |
380 |
void FASTCALL jitcFloatRegisterInvalidate(JitcFloatReg r); |
381 |
JitcFloatReg FASTCALL jitcFloatRegisterDup(JitcFloatReg r, JitcFloatReg hint=JITC_FLOAT_REG_NONE); |
382 |
void FASTCALL jitcFloatRegisterClobberAll(); |
383 |
void FASTCALL jitcFloatRegisterStoreAndPopTOP(JitcFloatReg r); |
384 |
|
385 |
void FASTCALL jitcPopFloatStack(JitcFloatReg hint1, JitcFloatReg hint2); |
386 |
void FASTCALL jitcClobberClientRegisterForFloat(int creg); |
387 |
void FASTCALL jitcInvalidateClientRegisterForFloat(int creg); |
388 |
JitcFloatReg FASTCALL jitcGetClientFloatRegisterMapping(int creg); |
389 |
JitcFloatReg FASTCALL jitcGetClientFloatRegister(int creg, JitcFloatReg hint1=JITC_FLOAT_REG_NONE, JitcFloatReg hint1=JITC_FLOAT_REG_NONE); |
390 |
JitcFloatReg FASTCALL jitcGetClientFloatRegisterUnmapped(int creg, JitcFloatReg hint1=JITC_FLOAT_REG_NONE, JitcFloatReg hint1=JITC_FLOAT_REG_NONE); |
391 |
JitcFloatReg FASTCALL jitcMapClientFloatRegisterDirty(int creg, JitcFloatReg freg=JITC_FLOAT_REG_NONE); |
392 |
|
393 |
enum X86FloatFlagTest { |
394 |
X86_FB=0, |
395 |
X86_FE=1, |
396 |
X86_FBE=2, |
397 |
X86_FU=3, |
398 |
X86_FNB=4, |
399 |
X86_FNE=5, |
400 |
X86_FNBE=6, |
401 |
X86_FNU=7, |
402 |
}; |
403 |
|
404 |
enum X86FloatArithOp { |
405 |
X86_FADD = 0xc0, // .238 |
406 |
|
407 |
// st(i)/st(0) |
408 |
X86_FDIV = 0xf8, // .261 |
409 |
|
410 |
// st(0)/st(i) |
411 |
X86_FDIVR = 0xf0, // .265 |
412 |
|
413 |
X86_FMUL = 0xc8, // .288 |
414 |
|
415 |
// st(i) - st(0) |
416 |
X86_FSUB = 0xe8, // .327 |
417 |
|
418 |
// st(0) - st(i) |
419 |
X86_FSUBR = 0xe0, // .330 |
420 |
}; |
421 |
|
422 |
enum X86FloatCompOp { |
423 |
//dbf0+i |
424 |
X86_FCOMI = 0xf0db, // .255 |
425 |
|
426 |
//dff0+i |
427 |
X86_FCOMIP = 0xf0df, // .255 |
428 |
|
429 |
//dbe8+i |
430 |
X86_FUCOMI = 0xe8db, // .255 |
431 |
//dfe8+i |
432 |
X86_FUCOMIP = 0xe8df, // .255 |
433 |
}; |
434 |
|
435 |
enum X86FloatICompOp { |
436 |
X86_FICOM16 = 0xde, |
437 |
X86_FICOM32 = 0xda, |
438 |
}; |
439 |
|
440 |
enum X86FloatOp { |
441 |
FABS = 0xe1d9, |
442 |
FCOMPP = 0xd9de, // .252 |
443 |
FCHS = 0xe0d9, // .246 |
444 |
FLD1 = 0xe8d9, // .282 |
445 |
FLDL2T = 0xe9d9, // .282 |
446 |
FLDL2E = 0xead9, // .282 |
447 |
FLDPI = 0xebd9, // .282 |
448 |
FLDLG2 = 0xecd9, // .282 |
449 |
FLDLN2 = 0xedd9, // .282 |
450 |
FLDZ = 0xeed9, // .282 |
451 |
FRNDINT = 0xfcd9, |
452 |
FSQRT = 0xfad9, // .314 |
453 |
F2XM1 = 0xf0d9, // .236 |
454 |
FYL2X = 0xf1d9, // .353 |
455 |
FYL2XP1 = 0xf9d9, // .355 |
456 |
FSCALE = 0xfdd9, // .308 |
457 |
FTST = 0xe4d9, // .333 |
458 |
}; |
459 |
|
460 |
// .250 FCMOVcc |
461 |
// .277 FISTP [mem32] 0xDB /3 |
462 |
|
463 |
#ifndef X86ASM_V2_ONLY |
464 |
/* Begin: X86Asm v1.0 */ |
465 |
void FASTCALL asmFCompSTi(X86FloatCompOp op, NativeFloatReg sti); |
466 |
void FASTCALL asmFICompMem(X86FloatICompOp op, byte *modrm, int len); |
467 |
void FASTCALL asmFICompPMem(X86FloatICompOp op, byte *modrm, int len); |
468 |
void FASTCALL asmFArithMem(X86FloatArithOp op, byte *modrm, int len); |
469 |
void FASTCALL asmFArithST0(X86FloatArithOp op, NativeFloatReg sti); |
470 |
void FASTCALL asmFArithSTi(X86FloatArithOp op, NativeFloatReg sti); |
471 |
void FASTCALL asmFArithSTiP(X86FloatArithOp op, NativeFloatReg sti); |
472 |
void FASTCALL asmFXCHSTi(NativeFloatReg sti); |
473 |
void FASTCALL asmFFREESTi(NativeFloatReg sti); |
474 |
void FASTCALL asmFFREEPSTi(NativeFloatReg sti); |
475 |
void FASTCALL asmFSimpleST0(X86FloatOp op); |
476 |
void FASTCALL asmFLDSingleMem(byte *modrm, int len); |
477 |
void FASTCALL asmFLDDoubleMem(byte *modrm, int len); |
478 |
void FASTCALL asmFLDSTi(NativeFloatReg sti); |
479 |
void FASTCALL asmFILD16(byte *modrm, int len); |
480 |
void FASTCALL asmFILD(byte *modrm, int len); |
481 |
void FASTCALL asmFSTSingleMem(byte *modrm, int len); |
482 |
void FASTCALL asmFSTPSingleMem(byte *modrm, int len); |
483 |
void FASTCALL asmFSTDoubleMem(byte *modrm, int len); |
484 |
void FASTCALL asmFSTPDoubleMem(byte *modrm, int len); |
485 |
void FASTCALL asmFSTDSTi(NativeFloatReg sti); |
486 |
void FASTCALL asmFSTDPSTi(NativeFloatReg sti); |
487 |
void FASTCALL asmFISTPMem(byte *modrm, int len); |
488 |
void FASTCALL asmFISTPMem64(byte *modrm, int len); |
489 |
void FASTCALL asmFISTTPMem(byte *modrm, int len); |
490 |
|
491 |
void FASTCALL asmFSTSWMem(byte *modrm, int len); |
492 |
void FASTCALL asmFSTSW_EAX(void); |
493 |
|
494 |
void FASTCALL asmFLDCWMem(byte *modrm, int len); |
495 |
void FASTCALL asmFSTCWMem(byte *modrm, int len); |
496 |
/* End: X86Asm v1.0 */ |
497 |
#endif // X86ASM_V2_ONLY |
498 |
|
499 |
/* Begin: X86Asm v2.0 */ |
500 |
void FASTCALL asmFComp(X86FloatCompOp op, NativeFloatReg sti); |
501 |
void FASTCALL asmFIComp(X86FloatICompOp op, modrm_p modrm); |
502 |
void FASTCALL asmFICompP(X86FloatICompOp op, modrm_p modrm); |
503 |
void FASTCALL asmFArith(X86FloatArithOp op, modrm_p modrm); |
504 |
void FASTCALL asmFArith_ST0(X86FloatArithOp op, NativeFloatReg sti); |
505 |
void FASTCALL asmFArith_STi(X86FloatArithOp op, NativeFloatReg sti); |
506 |
void FASTCALL asmFArithP_STi(X86FloatArithOp op, NativeFloatReg sti); |
507 |
void FASTCALL asmFXCH(NativeFloatReg sti); |
508 |
void FASTCALL asmFFREE(NativeFloatReg sti); |
509 |
void FASTCALL asmFFREEP(NativeFloatReg sti); |
510 |
void FASTCALL asmFSimple(X86FloatOp op); |
511 |
void FASTCALL asmFLD_Single(modrm_p modrm); |
512 |
void FASTCALL asmFLD_Double(modrm_p modrm); |
513 |
void FASTCALL asmFLD(NativeFloatReg sti); |
514 |
void FASTCALL asmFILD_W(modrm_p modrm); |
515 |
void FASTCALL asmFILD_D(modrm_p modrm); |
516 |
void FASTCALL asmFILD_Q(modrm_p modrm); |
517 |
void FASTCALL asmFST_Single(modrm_p modrm); |
518 |
void FASTCALL asmFSTP_Single(modrm_p modrm); |
519 |
void FASTCALL asmFST_Double(modrm_p modrm); |
520 |
void FASTCALL asmFSTP_Double(modrm_p modrm); |
521 |
void FASTCALL asmFST(NativeFloatReg sti); |
522 |
void FASTCALL asmFSTP(NativeFloatReg sti); |
523 |
void FASTCALL asmFISTP_W(modrm_p modrm); |
524 |
void FASTCALL asmFISTP_D(modrm_p modrm); |
525 |
void FASTCALL asmFISTP_Q(modrm_p modrm); |
526 |
void FASTCALL asmFISTTP(modrm_p modrm); |
527 |
|
528 |
void FASTCALL asmFSTSW(modrm_p modrm); |
529 |
void FASTCALL asmFSTSW_EAX(void); |
530 |
|
531 |
void FASTCALL asmFLDCW(modrm_p modrm); |
532 |
void FASTCALL asmFSTCW(modrm_p modrm); |
533 |
/* End: X86Asm v2.0 */ |
534 |
|
535 |
enum NativeVectorReg { |
536 |
XMM0 = 0, |
537 |
XMM1 = 1, |
538 |
XMM2 = 2, |
539 |
XMM3 = 3, |
540 |
XMM4 = 4, |
541 |
XMM5 = 5, |
542 |
XMM6 = 6, |
543 |
XMM7 = 7, |
544 |
XMM_SENTINEL = 8, |
545 |
VECTREG_NO = 0xffffffff, |
546 |
}; |
547 |
|
548 |
enum X86ALUPSopc { |
549 |
X86_ANDPS = 0x54, |
550 |
X86_ANDNPS = 0x55, |
551 |
X86_ORPS = 0x56, |
552 |
X86_XORPS = 0x57, |
553 |
X86_MOVAPS = 0x28, |
554 |
X86_MOVUPS = 0x10, |
555 |
X86_ADDPS = 0x58, |
556 |
X86_DIVPS = 0x53, |
557 |
X86_MAXPS = 0x5F, |
558 |
X86_MINPS = 0x5D, |
559 |
X86_MULPS = 0x59, |
560 |
X86_RCPPS = 0x53, |
561 |
X86_RSQRTPS = 0x52, |
562 |
X86_SQRTPS = 0x51, |
563 |
X86_SUBPS = 0x5C, |
564 |
X86_UNPCKLPS = 0x14, |
565 |
X86_UNPCKHPS = 0x15, |
566 |
}; |
567 |
|
568 |
enum X86PALUopc { |
569 |
X86_PACKSSWB = 0x63, // Do *NOT* use PALU*() macros on these |
570 |
X86_PACKUSWB = 0x67, |
571 |
X86_PACKSSDW = 0x6B, |
572 |
X86_PMULLW = 0xD5, |
573 |
X86_PMINUB = 0xDA, |
574 |
X86_PMAXUB = 0xDE, |
575 |
X86_PAVGB = 0xE0, |
576 |
X86_PAVGW = 0xE3, |
577 |
X86_PMULHUW = 0xE4, |
578 |
X86_PMULHW = 0xE5, |
579 |
X86_PMINSW = 0xEA, |
580 |
X86_PMAXSW = 0xEE, |
581 |
|
582 |
X86_PAND = 0xDB, |
583 |
X86_PANDN = 0xDF, |
584 |
X86_POR = 0xEB, |
585 |
X86_PXOR = 0xEF, |
586 |
|
587 |
X86_PUNPCKL = 0x60, |
588 |
X86_PCMPGT = 0x64, |
589 |
X86_PUNPCKH = 0x68, |
590 |
X86_PCMPEQ = 0x74, |
591 |
X86_PSRL = 0xD0, |
592 |
X86_PSUBUS = 0xD8, |
593 |
X86_PADDUS = 0xDC, |
594 |
X86_PSRA = 0xE0, |
595 |
X86_PSUBS = 0xE8, |
596 |
X86_PADDS = 0xEC, |
597 |
X86_PSLL = 0xF0, |
598 |
X86_PSUB = 0xF8, |
599 |
X86_PADD = 0xFC, |
600 |
}; |
601 |
|
602 |
#define PALUB(op) ((X86PALUopc)((op) | 0x00)) |
603 |
#define PALUW(op) ((X86PALUopc)((op) | 0x01)) |
604 |
#define PALUD(op) ((X86PALUopc)((op) | 0x02)) |
605 |
#define PALUQ(op) ((X86PALUopc)((op) | 0x03)) |
606 |
|
607 |
#define X86_VECTOR_VR(i) ((NativeVectorReg)(i)) |
608 |
typedef int JitcVectorReg; |
609 |
|
610 |
#define JITC_VECTOR_REGS_ALL 0 |
611 |
|
612 |
#define JITC_VECTOR_TEMP 32 |
613 |
#define JITC_VECTOR_NEG1 33 |
614 |
|
615 |
#define PPC_VECTREG_NO 0xffffffff |
616 |
|
617 |
NativeVectorReg FASTCALL jitcAllocVectorRegister(int hint=0); |
618 |
void FASTCALL jitcDirtyVectorRegister(NativeVectorReg nreg); |
619 |
void FASTCALL jitcTouchVectorRegister(NativeVectorReg nreg); |
620 |
|
621 |
int FASTCALL jitcAssertFlushedVectorRegister(JitcVectorReg creg); |
622 |
int FASTCALL jitcAssertFlushedVectorRegisters(); |
623 |
void FASTCALL jitcShowVectorRegisterStatus(JitcVectorReg creg); |
624 |
|
625 |
NativeVectorReg FASTCALL jitcMapClientVectorRegisterDirty(JitcVectorReg creg, int hint=0); |
626 |
NativeVectorReg FASTCALL jitcGetClientVectorRegister(JitcVectorReg creg, int hint=0); |
627 |
NativeVectorReg FASTCALL jitcGetClientVectorRegisterDirty(JitcVectorReg creg, int hint=0); |
628 |
NativeVectorReg FASTCALL jitcGetClientVectorRegisterMapping(JitcVectorReg creg); |
629 |
NativeVectorReg FASTCALL jitcRenameVectorRegisterDirty(NativeVectorReg reg, JitcVectorReg creg, int hint=0); |
630 |
|
631 |
void FASTCALL jitcFlushVectorRegister(int options=0); |
632 |
void FASTCALL jitcFlushVectorRegisterDirty(int options=0); |
633 |
void FASTCALL jitcClobberVectorRegister(int options=0); |
634 |
void FASTCALL jitcTrashVectorRegister(int options=0); |
635 |
void FASTCALL jitcDropVectorRegister(int options=0); |
636 |
|
637 |
void FASTCALL jitcFlushClientVectorRegister(JitcVectorReg creg); |
638 |
void FASTCALL jitcTrashClientVectorRegister(JitcVectorReg creg); |
639 |
void FASTCALL jitcClobberClientVectorRegister(JitcVectorReg creg); |
640 |
void FASTCALL jitcDropClientVectorRegister(JitcVectorReg creg); |
641 |
|
642 |
void asmMOVAPS(NativeVectorReg reg, const void *disp); |
643 |
void asmMOVAPS(const void *disp, NativeVectorReg reg); |
644 |
void asmMOVUPS(NativeVectorReg reg, const void *disp); |
645 |
void asmMOVUPS(const void *disp, NativeVectorReg reg); |
646 |
void asmMOVSS(NativeVectorReg reg, const void *disp); |
647 |
void asmMOVSS(const void *disp, NativeVectorReg reg); |
648 |
|
649 |
void asmALUPS(X86ALUPSopc opc, NativeVectorReg reg1, NativeVectorReg reg2); |
650 |
void asmALUPS(X86ALUPSopc opc, NativeVectorReg reg1, modrm_p modrm); |
651 |
void asmPALU(X86PALUopc opc, NativeVectorReg reg1, NativeVectorReg reg2); |
652 |
void asmPALU(X86PALUopc opc, NativeVectorReg reg1, modrm_p modrm); |
653 |
|
654 |
void asmSHUFPS(NativeVectorReg reg1, NativeVectorReg reg2, int order); |
655 |
void asmSHUFPS(NativeVectorReg reg1, modrm_p modrm, int order); |
656 |
void asmPSHUFD(NativeVectorReg reg1, NativeVectorReg reg2, int order); |
657 |
void asmPSHUFD(NativeVectorReg reg1, modrm_p modrm, int order); |
658 |
|
659 |
#ifndef X86ASM_V2_ONLY |
660 |
/* |
661 |
* reg1 must not be ESP |
662 |
*/ |
663 |
static inline int x86_mem_r(byte *modrm, NativeReg reg, uint32 disp) |
664 |
{ |
665 |
if (((uint32)(disp) > 0x7f) && ((uint32)(disp) < 0xffffff80)) { |
666 |
/* if (reg == ESP) { |
667 |
modrm[0] = 0x84; |
668 |
modrm[1] = 0x24; |
669 |
*((uint32 *)&modrm[2]) = disp; |
670 |
return 6; |
671 |
}*/ |
672 |
modrm[0] = 0x80+reg; |
673 |
*((uint32 *)&modrm[1]) = disp; |
674 |
return 5; |
675 |
} else if (reg == EBP) { |
676 |
modrm[0] = 0x45; |
677 |
modrm[1] = disp; |
678 |
return 2; |
679 |
/* } else if (reg == ESP) { |
680 |
if (disp) { |
681 |
modrm[0] = 0x44; |
682 |
modrm[1] = 0x24; |
683 |
modrm[2] = disp; |
684 |
return 3; |
685 |
} else { |
686 |
modrm[0] = 0x04; |
687 |
modrm[1] = 0x24; |
688 |
return 2; |
689 |
} */ |
690 |
} else if (disp) { |
691 |
modrm[0] = 0x40+reg; |
692 |
modrm[1] = disp; |
693 |
return 2; |
694 |
} else { |
695 |
modrm[0] = reg; |
696 |
return 1; |
697 |
} |
698 |
} |
699 |
|
700 |
static inline int x86_mem(byte *modrm, NativeReg reg, uint32 disp) |
701 |
{ |
702 |
if (reg == REG_NO) { |
703 |
modrm[0] = 0x05; |
704 |
*((uint32 *)&modrm[1]) = disp; |
705 |
return 5; |
706 |
} else return x86_mem_r(modrm, reg, disp); |
707 |
} |
708 |
|
709 |
/* |
710 |
* reg1, reg2 must not be ESP |
711 |
*/ |
712 |
static inline int x86_mem_sib_r(byte *modrm, NativeReg reg1, int factor, NativeReg reg2, uint32 disp=0) |
713 |
{ |
714 |
switch (factor) { |
715 |
case 1: |
716 |
case 4: |
717 |
case 8: // ok |
718 |
break; |
719 |
case 2: if (reg1 == REG_NO) { |
720 |
// [eax+eax] is shorter than [eax*2+0] |
721 |
reg1 = reg2; |
722 |
factor = 1; |
723 |
} |
724 |
break; |
725 |
case 3: |
726 |
case 5: |
727 |
case 9: // [eax*(2^n+1)] -> [eax+eax*2^n] |
728 |
if (reg1 != REG_NO) { /* internal error */ } |
729 |
reg1 = reg2; |
730 |
factor--; |
731 |
break; |
732 |
default: |
733 |
/* internal error */ |
734 |
break; |
735 |
} |
736 |
// 0 1 2 3 4 5 6 7 8 |
737 |
static const byte factors[9] = {0, 0x00, 0x40, 0, 0x80, 0, 0, 0, 0xc0}; |
738 |
if (reg1 == REG_NO) { |
739 |
// [eax*4+disp] |
740 |
modrm[0] = 0x04; |
741 |
modrm[1] = factors[factor]+(reg2<<3)+EBP; |
742 |
*((uint32 *)&modrm[2]) = disp; |
743 |
return 6; |
744 |
} else if (((uint32)(disp) > 0x7f) && ((uint32)(disp) < 0xffffff80)) { |
745 |
modrm[0] = 0x84; |
746 |
modrm[1] = factors[factor]+(reg2<<3)+reg1; |
747 |
*((uint32 *)&modrm[2]) = disp; |
748 |
return 6; |
749 |
} else if (disp || reg1 == EBP) { |
750 |
modrm[0] = 0x44; |
751 |
modrm[1] = factors[factor]+(reg2<<3)+reg1; |
752 |
modrm[2] = disp; |
753 |
return 3; |
754 |
} else { |
755 |
modrm[0] = 0x04; |
756 |
modrm[1] = factors[factor]+(reg2<<3)+reg1; |
757 |
return 2; |
758 |
} |
759 |
} |
760 |
|
761 |
/* |
762 |
* reg1, reg2 must not be ESP |
763 |
*/ |
764 |
static inline int x86_mem_sib(byte *modrm, NativeReg reg1, int factor, NativeReg reg2, uint32 disp=0) |
765 |
{ |
766 |
if (reg2 == REG_NO) return x86_mem(modrm, reg1, disp); |
767 |
return x86_mem_sib_r(modrm, reg1, factor, reg2, disp); |
768 |
} |
769 |
|
770 |
#endif // X86ASM_V2_ONLY |
771 |
|
772 |
/* |
773 |
* reg1 must not be ESP |
774 |
*/ |
775 |
static inline modrm_p x86_mem2_r(modrm_o modrm, NativeReg reg, uint32 disp) |
776 |
{ |
777 |
if (((uint32)(disp) > 0x7f) && ((uint32)(disp) < 0xffffff80)) { |
778 |
/* if (reg == ESP) { |
779 |
modrm[0] = 6; |
780 |
modrm[1] = 0x84; |
781 |
modrm[2] = 0x24; |
782 |
*((uint32 *)&modrm[3]) = disp; |
783 |
return modrm; |
784 |
}*/ |
785 |
modrm[0] = 5; |
786 |
modrm[1] = 0x80+reg; |
787 |
*((uint32 *)&modrm[2]) = disp; |
788 |
return modrm; |
789 |
} else if (reg == EBP) { |
790 |
modrm[0] = 2; |
791 |
modrm[1] = 0x45; |
792 |
modrm[2] = disp; |
793 |
return modrm; |
794 |
/* } else if (reg == ESP) { |
795 |
if (disp) { |
796 |
modrm[0] = 3; |
797 |
modrm[1] = 0x44; |
798 |
modrm[2] = 0x24; |
799 |
modrm[3] = disp; |
800 |
return modrm; |
801 |
} else { |
802 |
modrm[0] = 2; |
803 |
modrm[1] = 0x04; |
804 |
modrm[2] = 0x24; |
805 |
return modrm; |
806 |
} */ |
807 |
} else if (disp) { |
808 |
modrm[0] = 2; |
809 |
modrm[1] = 0x40+reg; |
810 |
modrm[2] = disp; |
811 |
return modrm; |
812 |
} else { |
813 |
modrm[0] = 1; |
814 |
modrm[1] = reg; |
815 |
return modrm; |
816 |
} |
817 |
} |
818 |
|
819 |
static inline modrm_p x86_mem2(modrm_o modrm, NativeReg reg, uint32 disp=0) |
820 |
{ |
821 |
if (reg == REG_NO) { |
822 |
modrm[0] = 5; |
823 |
modrm[1] = 0x05; |
824 |
*((uint32 *)&modrm[2]) = disp; |
825 |
return modrm; |
826 |
} else return x86_mem2_r(modrm, reg, disp); |
827 |
} |
828 |
|
829 |
static inline modrm_p x86_mem2(modrm_o modrm, NativeReg reg, const void *disp) |
830 |
{ |
831 |
return x86_mem2(modrm, reg, (uint32)disp); |
832 |
} |
833 |
|
834 |
static inline modrm_p x86_mem2(modrm_o modrm, const void *disp) |
835 |
{ |
836 |
modrm[0] = 5; |
837 |
modrm[1] = 0x05; |
838 |
*((uint32 *)&modrm[2]) = (uint32)disp; |
839 |
return modrm; |
840 |
} |
841 |
|
842 |
/* |
843 |
* reg1, reg2 must not be ESP |
844 |
*/ |
845 |
static inline modrm_p x86_mem2_sib_r(modrm_o modrm, NativeReg reg1, int factor, NativeReg reg2, uint32 disp=0) |
846 |
{ |
847 |
switch (factor) { |
848 |
case 1: |
849 |
case 4: |
850 |
case 8: // ok |
851 |
break; |
852 |
case 2: if (reg1 == REG_NO) { |
853 |
// [eax+eax] is shorter than [eax*2+0] |
854 |
reg1 = reg2; |
855 |
factor = 1; |
856 |
} |
857 |
break; |
858 |
case 3: |
859 |
case 5: |
860 |
case 9: // [eax*(2^n+1)] -> [eax+eax*2^n] |
861 |
if (reg1 != REG_NO) { /* internal error */ } |
862 |
reg1 = reg2; |
863 |
factor--; |
864 |
break; |
865 |
default: |
866 |
/* internal error */ |
867 |
break; |
868 |
} |
869 |
// 0 1 2 3 4 5 6 7 8 |
870 |
static const byte factors[9] = {0, 0x00, 0x40, 0, 0x80, 0, 0, 0, 0xc0}; |
871 |
if (reg1 == REG_NO) { |
872 |
// [eax*4+disp] |
873 |
modrm[0] = 6; |
874 |
modrm[1] = 0x04; |
875 |
modrm[2] = factors[factor]+(reg2<<3)+EBP; |
876 |
*((uint32 *)&modrm[3]) = disp; |
877 |
return modrm; |
878 |
} else if (((uint32)(disp) > 0x7f) && ((uint32)(disp) < 0xffffff80)) { |
879 |
modrm[0] = 6; |
880 |
modrm[1] = 0x84; |
881 |
modrm[2] = factors[factor]+(reg2<<3)+reg1; |
882 |
*((uint32 *)&modrm[3]) = disp; |
883 |
return modrm; |
884 |
} else if (disp || reg1 == EBP) { |
885 |
modrm[0] = 3; |
886 |
modrm[1] = 0x44; |
887 |
modrm[2] = factors[factor]+(reg2<<3)+reg1; |
888 |
modrm[3] = disp; |
889 |
return modrm; |
890 |
} else { |
891 |
modrm[0] = 2; |
892 |
modrm[1] = 0x04; |
893 |
modrm[2] = factors[factor]+(reg2<<3)+reg1; |
894 |
return modrm; |
895 |
} |
896 |
} |
897 |
|
898 |
/* |
899 |
* reg1, reg2 must not be ESP |
900 |
*/ |
901 |
static inline modrm_p x86_mem2(modrm_o modrm, NativeReg reg1, int factor, NativeReg reg2, uint32 disp=0) |
902 |
{ |
903 |
if (reg2 == REG_NO) return x86_mem2(modrm, reg1, disp); |
904 |
return x86_mem2_sib_r(modrm, reg1, factor, reg2, disp); |
905 |
} |
906 |
|
907 |
#endif |