/[dynamips]/trunk/ppc32_amd64_trans.h
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/ppc32_amd64_trans.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 12 - (show annotations)
Sat Oct 6 16:45:40 2007 UTC (16 years, 5 months ago) by dpavlin
File MIME type: text/plain
File size: 2318 byte(s)
make working copy

1 /*
2 * Cisco router simulation platform.
3 * Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr)
4 */
5
6 #ifndef __PPC32_AMD64_TRANS_H__
7 #define __PPC32_AMD64_TRANS_H__
8
9 #include "utils.h"
10 #include "amd64-codegen.h"
11 #include "cpu.h"
12 #include "dynamips.h"
13 #include "ppc32_exec.h"
14
15 #define JIT_SUPPORT 1
16
17 /* Manipulate bitmasks atomically */
18 static forced_inline void atomic_or(m_uint32_t *v,m_uint32_t m)
19 {
20 __asm__ __volatile__("lock; orl %1,%0":"=m"(*v):"ir"(m),"m"(*v));
21 }
22
23 static forced_inline void atomic_and(m_uint32_t *v,m_uint32_t m)
24 {
25 __asm__ __volatile__("lock; andl %1,%0":"=m"(*v):"ir"(m),"m"(*v));
26 }
27
28 /* Wrappers to amd64-codegen functions */
29 #define ppc32_jit_tcb_set_patch amd64_patch
30 #define ppc32_jit_tcb_set_jump amd64_jump_code
31
32 /* PPC instruction array */
33 extern struct ppc32_insn_tag ppc32_insn_tags[];
34
35 /* Push epilog for an x86 instruction block */
36 static forced_inline void ppc32_jit_tcb_push_epilog(u_char **ptr)
37 {
38 amd64_ret(*ptr);
39 }
40
41 /* Execute JIT code */
42 static forced_inline
43 void ppc32_jit_tcb_exec(cpu_ppc_t *cpu,ppc32_jit_tcb_t *block)
44 {
45 insn_tblock_fptr jit_code;
46 m_uint32_t offset;
47
48 offset = (cpu->ia & PPC32_MIN_PAGE_IMASK) >> 2;
49 jit_code = (insn_tblock_fptr)block->jit_insn_ptr[offset];
50
51 if (unlikely(!jit_code)) {
52 ppc32_jit_tcb_set_target_bit(block,cpu->ia);
53
54 if (++block->target_undef_cnt == 16) {
55 ppc32_jit_tcb_recompile(cpu,block);
56 jit_code = (insn_tblock_fptr)block->jit_insn_ptr[offset];
57 } else {
58 ppc32_exec_page(cpu);
59 return;
60 }
61 }
62
63 asm volatile ("movq %0,%%r15"::"r"(cpu):
64 "r13","r14","r15","rax","rbx","rcx","rdx","rdi","rsi");
65 jit_code();
66 }
67
68 static inline void amd64_patch(u_char *code,u_char *target)
69 {
70 /* Skip REX */
71 if ((code[0] >= 0x40) && (code[0] <= 0x4f))
72 code += 1;
73
74 if ((code [0] & 0xf8) == 0xb8) {
75 /* amd64_set_reg_template */
76 *(m_uint64_t *)(code + 1) = (m_uint64_t)target;
77 }
78 else if (code [0] == 0x8b) {
79 /* mov 0(%rip), %dreg */
80 *(m_uint32_t *)(code + 2) = (m_uint32_t)(m_uint64_t)target - 7;
81 }
82 else if ((code [0] == 0xff) && (code [1] == 0x15)) {
83 /* call *<OFFSET>(%rip) */
84 *(m_uint32_t *)(code + 2) = ((m_uint32_t)(m_uint64_t)target) - 7;
85 }
86 else
87 x86_patch(code,target);
88 }
89
90 #endif

  ViewVC Help
Powered by ViewVC 1.1.26