1 |
/* |
2 |
* Copyright (C) 2007 Anders Gavare. All rights reserved. |
3 |
* |
4 |
* Redistribution and use in source and binary forms, with or without |
5 |
* modification, are permitted provided that the following conditions are met: |
6 |
* |
7 |
* 1. Redistributions of source code must retain the above copyright |
8 |
* notice, this list of conditions and the following disclaimer. |
9 |
* 2. Redistributions in binary form must reproduce the above copyright |
10 |
* notice, this list of conditions and the following disclaimer in the |
11 |
* documentation and/or other materials provided with the distribution. |
12 |
* 3. The name of the author may not be used to endorse or promote products |
13 |
* derived from this software without specific prior written permission. |
14 |
* |
15 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
16 |
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
17 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
18 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
19 |
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
20 |
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
21 |
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
22 |
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
23 |
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
24 |
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
25 |
* SUCH DAMAGE. |
26 |
* |
27 |
* |
28 |
* $Id: native_x86.c,v 1.6 2007/04/10 15:37:00 debug Exp $ |
29 |
* |
30 |
* Native code generation backend for AMD64 and i386. |
31 |
* |
32 |
* NOTE/TODO: This is ONLY for amd64 for now, not i386! |
33 |
* |
34 |
* NOTE/TODO: This is just a dummy so far, there is no working native |
35 |
* code generation for GXemul's new dyntrans system. |
36 |
* |
37 |
* Register usage: |
38 |
* |
39 |
* rdi: Function call arg 1, always the struct cpu *. |
40 |
* rsi: Function call arg 2, struct ic * at entry to the code block. |
41 |
* Note: rdi and rsi need to be saved, if a function call is made. |
42 |
* (They are not preserved.) |
43 |
* |
44 |
* rax, rbx, rcx, rdx, r08, r09, r10, r11: Temporaries |
45 |
* (NOTE: rbx must be saved at function entry and |
46 |
* restored at exit. The others don't need to be saved.) |
47 |
* |
48 |
* r12-r15: Saved temporaries. These may be used, if their original |
49 |
* contents is saved at entry and restored at exit. |
50 |
*/ |
51 |
|
52 |
/* NOTE: This file is included from native.c. */ |
53 |
|
54 |
|
55 |
#include "native_x86.h" |
56 |
|
57 |
/* static char *x86_reg_names[] = X86_REG_NAMES; */ |
58 |
|
59 |
|
60 |
static int x86_round_robin_temp_reg(void) |
61 |
{ |
62 |
/* TODO: Which registers on amd64 are temp registers? */ |
63 |
static int temp_reg = 0; |
64 |
return (temp_reg++) % 2; |
65 |
} |
66 |
|
67 |
|
68 |
/* Allocate a new native_op, and add it after the previous one. */ |
69 |
static void new_op(struct native_op **native_ops_p, struct native_op **first_op) |
70 |
{ |
71 |
struct native_op *op = malloc(sizeof(struct native_op)); |
72 |
if (op == NULL) { |
73 |
fprintf(stderr, "fatal error, out of memory\n"); |
74 |
exit(1); |
75 |
} |
76 |
|
77 |
memset(op, 0, sizeof(struct native_op)); |
78 |
|
79 |
if (*native_ops_p != NULL) { |
80 |
(*native_ops_p)->next = op; |
81 |
op->prev = (*native_ops_p); |
82 |
} |
83 |
|
84 |
*native_ops_p = op; |
85 |
|
86 |
if (*first_op == NULL) |
87 |
*first_op = op; |
88 |
} |
89 |
|
90 |
|
91 |
void dump_op_list(struct native_op *list); |
92 |
|
93 |
|
94 |
int native_inr_to_native_ops(struct inr *inr, |
95 |
struct native_op **native_ops_p) |
96 |
{ |
97 |
struct native_op *first_op = NULL; |
98 |
struct native_op *ops = NULL; |
99 |
int i, nr_of_inr_entries = inr->nr_inr_entries_used; |
100 |
|
101 |
*native_ops_p = NULL; |
102 |
|
103 |
for (i = 0; i < nr_of_inr_entries; ++i) { |
104 |
/* Get the INR opcode: */ |
105 |
struct inr_entry *inr_entry = &inr->inr_entries[i]; |
106 |
int inr_opcode = inr_entry->opcode; |
107 |
int r1; |
108 |
|
109 |
switch (inr_opcode) { |
110 |
|
111 |
case INR_OPCODE_NOP: |
112 |
break; |
113 |
|
114 |
case INR_OPCODE_OR_DCR32_SCR32_IS16: |
115 |
new_op(&ops, &first_op); |
116 |
r1 = x86_round_robin_temp_reg(); |
117 |
ops->opcode = NATIVE_X86_OPCODE_LOAD_CR64_R64; |
118 |
ops->arg1 = inr_entry->arg2; |
119 |
ops->arg2 = r1; |
120 |
new_op(&ops, &first_op); |
121 |
ops->opcode = NATIVE_X86_OPCODE_OR_R64_I32; |
122 |
ops->arg1 = r1; |
123 |
ops->arg2 = inr_entry->arg3; |
124 |
new_op(&ops, &first_op); |
125 |
ops->opcode = NATIVE_X86_OPCODE_STORE_CR64_R64; |
126 |
ops->arg1 = inr_entry->arg1; |
127 |
ops->arg2 = r1; |
128 |
break; |
129 |
|
130 |
default: |
131 |
/* TODO: Break out and return partial failure/ |
132 |
success in a better way */ |
133 |
fprintf(stderr, "Internal error in native_x86.c:" |
134 |
" INR opcode %i not yet implemented.\n", |
135 |
inr_opcode); |
136 |
exit(1); |
137 |
} |
138 |
} |
139 |
|
140 |
*native_ops_p = first_op; |
141 |
|
142 |
return 0; |
143 |
} |
144 |
|
145 |
|
146 |
#ifdef TEST_NATIVE_X86 |
147 |
|
148 |
void dump_op_list(struct native_op *list) |
149 |
{ |
150 |
int n = 0; |
151 |
printf("OPs:\n"); |
152 |
while (list != NULL) { |
153 |
printf("nr %i: opcode = %i (0x%"PRIx64",0x%"PRIx64 |
154 |
",0x%"PRIx64")\n", |
155 |
n++, list->opcode, list->arg1, list->arg2, list->arg3); |
156 |
|
157 |
list = list->next; |
158 |
} |
159 |
printf("-----\n"); |
160 |
} |
161 |
|
162 |
void dump_inr_array(struct inr *inr) |
163 |
{ |
164 |
int i; |
165 |
printf("INR:\n"); |
166 |
for (i=0; i<inr->nr_inr_entries_used; ++i) { |
167 |
struct inr_entry *e = &inr->inr_entries[i]; |
168 |
printf("nr %i: opcode = %i (0x%"PRIx64",0x%"PRIx64 |
169 |
",0x%"PRIx64")\n", |
170 |
i, e->opcode, e->arg1, e->arg2, e->arg3); |
171 |
} |
172 |
printf("-----\n"); |
173 |
} |
174 |
|
175 |
#define N 3 |
176 |
|
177 |
void generate_inr_array_contents(struct inr *inr, int n) |
178 |
{ |
179 |
int i; |
180 |
|
181 |
memset(inr, 0, sizeof(inr)); |
182 |
inr->inr_entries = malloc(sizeof(struct inr_entry) * |
183 |
INR_MAX_ENTRIES); |
184 |
|
185 |
inr->nr_inr_entries_used = n; |
186 |
|
187 |
for (i=0; i<n; i++) { |
188 |
struct inr_entry inr_entry; |
189 |
memset(&inr_entry, 0, sizeof(inr_entry)); |
190 |
|
191 |
inr_entry.opcode = INR_OPCODE_OR_DCR32_SCR32_IS16; |
192 |
inr_entry.arg1 = 128; |
193 |
inr_entry.arg2 = 160; |
194 |
inr_entry.arg3 = 0x1234; |
195 |
|
196 |
inr->inr_entries[i] = inr_entry; |
197 |
} |
198 |
} |
199 |
|
200 |
void test_native_x86(void) |
201 |
{ |
202 |
struct inr inr; |
203 |
struct native_op *op_list; |
204 |
|
205 |
printf("TESTING x86/amd64 code generation\n"); |
206 |
|
207 |
generate_inr_array_contents(&inr, N); |
208 |
|
209 |
dump_inr_array(&inr); |
210 |
|
211 |
native_inr_to_native_ops(&inr, &op_list); |
212 |
dump_op_list(op_list); |
213 |
|
214 |
printf("DONE\n"); |
215 |
|
216 |
exit(1); |
217 |
} |
218 |
|
219 |
#endif /* TEST_NATIVE_X86 */ |