1 |
dpavlin |
7 |
/* |
2 |
|
|
* Cisco router simulation platform. |
3 |
|
|
* Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr) |
4 |
|
|
*/ |
5 |
|
|
|
6 |
|
|
#ifndef __MIPS64_AMD64_TRANS_H__ |
7 |
|
|
#define __MIPS64_AMD64_TRANS_H__ |
8 |
|
|
|
9 |
|
|
#include "utils.h" |
10 |
|
|
#include "amd64-codegen.h" |
11 |
|
|
#include "cpu.h" |
12 |
|
|
#include "dynamips.h" |
13 |
|
|
#include "mips64_exec.h" |
14 |
|
|
|
15 |
|
|
#define JIT_SUPPORT 1 |
16 |
|
|
|
17 |
|
|
/* Manipulate bitmasks atomically */ |
18 |
|
|
static forced_inline void atomic_or(m_uint32_t *v,m_uint32_t m) |
19 |
|
|
{ |
20 |
|
|
__asm__ __volatile__("lock; orl %1,%0":"=m"(*v):"ir"(m),"m"(*v)); |
21 |
|
|
} |
22 |
|
|
|
23 |
|
|
static forced_inline void atomic_and(m_uint32_t *v,m_uint32_t m) |
24 |
|
|
{ |
25 |
|
|
__asm__ __volatile__("lock; andl %1,%0":"=m"(*v):"ir"(m),"m"(*v)); |
26 |
|
|
} |
27 |
|
|
|
28 |
|
|
/* Wrappers to amd64-codegen functions */ |
29 |
|
|
#define mips64_jit_tcb_set_patch amd64_patch |
30 |
|
|
#define mips64_jit_tcb_set_jump amd64_jump_code |
31 |
|
|
|
32 |
|
|
/* MIPS instruction array */ |
33 |
|
|
extern struct mips64_insn_tag mips64_insn_tags[]; |
34 |
|
|
|
35 |
|
|
/* Push epilog for an amd64 instruction block */ |
36 |
|
|
static forced_inline void mips64_jit_tcb_push_epilog(mips64_jit_tcb_t *block) |
37 |
|
|
{ |
38 |
|
|
amd64_ret(block->jit_ptr); |
39 |
|
|
} |
40 |
|
|
|
41 |
|
|
/* Execute JIT code */ |
42 |
|
|
static forced_inline |
43 |
|
|
void mips64_jit_tcb_exec(cpu_mips_t *cpu,mips64_jit_tcb_t *block) |
44 |
|
|
{ |
45 |
|
|
insn_tblock_fptr jit_code; |
46 |
|
|
m_uint32_t offset; |
47 |
|
|
|
48 |
|
|
offset = (cpu->pc & MIPS_MIN_PAGE_IMASK) >> 2; |
49 |
|
|
jit_code = (insn_tblock_fptr)block->jit_insn_ptr[offset]; |
50 |
|
|
|
51 |
|
|
if (unlikely(!jit_code)) { |
52 |
|
|
mips64_exec_single_step(cpu,vmtoh32(block->mips_code[offset])); |
53 |
|
|
return; |
54 |
|
|
} |
55 |
|
|
|
56 |
|
|
asm volatile ("movq %0,%%r15"::"r"(cpu): |
57 |
|
|
"r14","r15","rax","rbx","rcx","rdx","rdi","rsi"); |
58 |
|
|
jit_code(); |
59 |
|
|
} |
60 |
|
|
|
61 |
|
|
static inline void amd64_patch(u_char *code,u_char *target) |
62 |
|
|
{ |
63 |
|
|
/* Skip REX */ |
64 |
|
|
if ((code[0] >= 0x40) && (code[0] <= 0x4f)) |
65 |
|
|
code += 1; |
66 |
|
|
|
67 |
|
|
if ((code [0] & 0xf8) == 0xb8) { |
68 |
|
|
/* amd64_set_reg_template */ |
69 |
|
|
*(m_uint64_t *)(code + 1) = (m_uint64_t)target; |
70 |
|
|
} |
71 |
|
|
else if (code [0] == 0x8b) { |
72 |
|
|
/* mov 0(%rip), %dreg */ |
73 |
|
|
*(m_uint32_t *)(code + 2) = (m_uint32_t)(m_uint64_t)target - 7; |
74 |
|
|
} |
75 |
|
|
else if ((code [0] == 0xff) && (code [1] == 0x15)) { |
76 |
|
|
/* call *<OFFSET>(%rip) */ |
77 |
|
|
*(m_uint32_t *)(code + 2) = ((m_uint32_t)(m_uint64_t)target) - 7; |
78 |
|
|
} |
79 |
|
|
else |
80 |
|
|
x86_patch(code,target); |
81 |
|
|
} |
82 |
|
|
|
83 |
|
|
#endif |