/[gxemul]/upstream/0.3.5/src/cpu_alpha_instr_loadstore.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/0.3.5/src/cpu_alpha_instr_loadstore.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 13 - (show annotations)
Mon Oct 8 16:18:43 2007 UTC (16 years, 8 months ago) by dpavlin
File MIME type: text/plain
File size: 8637 byte(s)
0.3.5
1 /*
2 * Copyright (C) 2005 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: cpu_alpha_instr_loadstore.c,v 1.9 2005/08/08 20:19:43 debug Exp $
29 *
30 * Alpha load/store instructions. (Included from cpu_alpha_instr_inc.c.)
31 *
32 *
33 * Load/store instructions have the following arguments:
34 *
35 * arg[0] = pointer to the register to load to or store from (uint64_t)
36 * arg[1] = pointer to the base register (uint64_t)
37 * arg[2] = offset (as an int32_t)
38 *
39 * NOTE:
40 * Alpha byte and word loads (8- and 16-bit) are unsigned, while
41 * 32-bit long words are sign-extended up to 64 bits during a load!
42 */
43
44
45 #ifndef LS_IGNORE_OFFSET
46 #ifndef LS_ALIGN_CHECK
47 static void LS_GENERIC_N(struct cpu *cpu, struct alpha_instr_call *ic)
48 {
49 #ifdef LS_B
50 unsigned char data[1];
51 #endif
52 #ifdef LS_W
53 unsigned char data[2];
54 #endif
55 #ifdef LS_L
56 unsigned char data[4];
57 #endif
58 #ifdef LS_Q
59 unsigned char data[8];
60 #endif
61 uint64_t addr = *((uint64_t *)ic->arg[1]);
62 uint64_t data_x;
63
64 addr += (int32_t)ic->arg[2];
65 #ifdef LS_UNALIGNED
66 addr &= ~7;
67 #endif
68
69 #ifdef LS_LOAD
70 /* Load: */
71 if (!cpu->memory_rw(cpu, cpu->mem, addr, data, sizeof(data),
72 MEM_READ, CACHE_DATA)) {
73 fatal("store failed: TODO\n");
74 exit(1);
75 }
76
77 data_x = data[0];
78 #ifndef LS_B
79 data_x += (data[1] << 8);
80 #ifndef LS_W
81 data_x += (data[2] << 16);
82 data_x += (data[3] << 24);
83 #ifdef LS_L
84 data_x = (int64_t)(int32_t)data_x;
85 #endif
86 #ifndef LS_L
87 data_x += ((uint64_t)data[4] << 32);
88 data_x += ((uint64_t)data[5] << 40);
89 data_x += ((uint64_t)data[6] << 48);
90 data_x += ((uint64_t)data[7] << 56);
91 #endif
92 #endif
93 #endif
94 *((uint64_t *)ic->arg[0]) = data_x;
95 #else
96 /* Store: */
97 data_x = *((uint64_t *)ic->arg[0]);
98 data[0] = data_x;
99 #ifndef LS_B
100 data[1] = data_x >> 8;
101 #ifndef LS_W
102 data[2] = data_x >> 16;
103 data[3] = data_x >> 24;
104 #ifndef LS_L
105 data[4] = data_x >> 32;
106 data[5] = data_x >> 40;
107 data[6] = data_x >> 48;
108 data[7] = data_x >> 56;
109 #endif
110 #endif
111 #endif
112
113 if (!cpu->memory_rw(cpu, cpu->mem, addr, data, sizeof(data),
114 MEM_WRITE, CACHE_DATA)) {
115 fatal("store failed: TODO\n");
116 exit(1);
117 }
118
119 #ifdef LS_LLSC
120 #ifndef LS_LOAD
121 *((uint64_t *)ic->arg[0]) = 1;
122 #endif
123 #endif
124
125 #endif
126 }
127 #endif
128 #endif
129
130
131 static void LS_N(struct cpu *cpu, struct alpha_instr_call *ic)
132 {
133 int first, a, b, c;
134 uint64_t addr;
135
136 addr = (*((uint64_t *)ic->arg[1]))
137 #ifndef LS_IGNORE_OFFSET
138 + (int32_t)ic->arg[2]
139 #endif
140 ;
141
142 #ifdef LS_UNALIGNED
143 addr &= ~7;
144 #endif
145
146 #ifdef LS_LLSC
147 #ifdef LS_LOAD
148 /* TODO: cache-line size! */
149 cpu->cd.alpha.load_linked_addr = addr & ~63;
150 cpu->cd.alpha.ll_flag = 1;
151 #else
152 /* TODO: only invalidate per cache line, not everything! */
153 if (cpu->cd.alpha.ll_flag == 1) {
154 int i;
155 for (i=0; i<cpu->machine->ncpus; i++)
156 cpu->machine->cpus[i]->cd.alpha.ll_flag = 0;
157 } else {
158 *((uint64_t *)ic->arg[0]) = 0;
159 return;
160 }
161 #endif
162 #endif
163
164 first = addr >> ALPHA_TOPSHIFT;
165 a = (addr >> ALPHA_LEVEL0_SHIFT) & (ALPHA_LEVEL0 - 1);
166 b = (addr >> ALPHA_LEVEL1_SHIFT) & (ALPHA_LEVEL1 - 1);
167 c = addr & 8191;
168
169 #ifdef LS_ALIGN_CHECK
170 #ifndef LS_B
171 if (c &
172 #ifdef LS_W
173 1
174 #endif
175 #ifdef LS_L
176 3
177 #endif
178 #ifdef LS_Q
179 7
180 #endif
181 ) {
182 LS_GENERIC_N(cpu, ic);
183 return;
184 }
185 else
186 #endif
187 #endif
188
189 if (first == 0) {
190 struct alpha_vph_page *vph_p;
191 unsigned char *page;
192 vph_p = cpu->cd.alpha.vph_table0[a];
193 page = vph_p->host_load[b];
194 if (page != NULL) {
195 #ifdef LS_LOAD
196 #ifdef HOST_BIG_ENDIAN
197 uint64_t data_x;
198 data_x = page[c];
199 #ifndef LS_B
200 data_x += (page[c+1] << 8);
201 #ifndef LS_W
202 data_x += (page[c+2] << 16);
203 data_x += (page[c+3] << 24);
204 #ifndef LS_L
205 data_x += ((uint64_t)page[c+4] << 32);
206 data_x += ((uint64_t)page[c+5] << 40);
207 data_x += ((uint64_t)page[c+6] << 48);
208 data_x += ((uint64_t)page[c+7] << 56);
209 #endif
210 #endif
211 #endif
212 #ifdef LS_L
213 *((uint64_t *)ic->arg[0]) = (int64_t)(int32_t)data_x;
214 #else
215 *((uint64_t *)ic->arg[0]) = data_x;
216 #endif
217 #else
218 #ifdef LS_B
219 *((uint64_t *)ic->arg[0]) = page[c];
220 #endif
221 #ifdef LS_W
222 uint16_t d = *((uint16_t *) (page + c));
223 *((uint64_t *)ic->arg[0]) = d;
224 #endif
225 #ifdef LS_L
226 int32_t d = *((int32_t *) (page + c));
227 *((uint64_t *)ic->arg[0]) = (int64_t)d;
228 #endif
229 #ifdef LS_Q
230 uint64_t d = *((uint64_t *) (page + c));
231 *((uint64_t *)ic->arg[0]) = d;
232 #endif
233 #endif
234 #else
235 /* Store: */
236 #ifdef HOST_BIG_ENDIAN
237 uint64_t data_x = *((uint64_t *)ic->arg[0]);
238 page[c] = data_x;
239 #ifndef LS_B
240 page[c+1] = data_x >> 8;
241 #ifndef LS_W
242 page[c+2] = data_x >> 16;
243 page[c+3] = data_x >> 24;
244 #ifndef LS_L
245 page[c+4] = data_x >> 32;
246 page[c+5] = data_x >> 40;
247 page[c+6] = data_x >> 48;
248 page[c+7] = data_x >> 56;
249 #endif
250 #endif
251 #endif
252 #else
253 /* Native byte order: */
254 #ifdef LS_B
255 page[c] = *((uint64_t *)ic->arg[0]);
256 #endif
257 #ifdef LS_W
258 uint32_t d = *((uint64_t *)ic->arg[0]);
259 *((uint16_t *) (page + c)) = d;
260 #endif
261 #ifdef LS_L
262 uint32_t d = *((uint64_t *)ic->arg[0]);
263 *((uint32_t *) (page + c)) = d;
264 #endif
265 #ifdef LS_Q
266 uint64_t d = *((uint64_t *)ic->arg[0]);
267 *((uint64_t *) (page + c)) = d;
268 #endif
269 #endif
270
271 #ifdef LS_LLSC
272 #ifndef LS_LOAD
273 *((uint64_t *)ic->arg[0]) = 1;
274 #endif
275 #endif
276
277 #endif /* !LS_LOAD */
278 } else
279 LS_GENERIC_N(cpu, ic);
280 } else if (first == ALPHA_TOP_KERNEL) {
281 struct alpha_vph_page *vph_p;
282 unsigned char *page;
283 vph_p = cpu->cd.alpha.vph_table0_kernel[a];
284 page = vph_p->host_load[b];
285 if (page != NULL) {
286 #ifdef LS_LOAD
287 #ifdef HOST_BIG_ENDIAN
288 uint64_t data_x;
289 data_x = page[c];
290 #ifndef LS_B
291 data_x += (page[c+1] << 8);
292 #ifndef LS_W
293 data_x += (page[c+2] << 16);
294 data_x += (page[c+3] << 24);
295 #ifndef LS_L
296 data_x += ((uint64_t)page[c+4] << 32);
297 data_x += ((uint64_t)page[c+5] << 40);
298 data_x += ((uint64_t)page[c+6] << 48);
299 data_x += ((uint64_t)page[c+7] << 56);
300 #endif
301 #endif
302 #endif
303 #ifdef LS_L
304 *((uint64_t *)ic->arg[0]) = (int64_t)(int32_t)data_x;
305 #else
306 *((uint64_t *)ic->arg[0]) = data_x;
307 #endif
308 #else
309 #ifdef LS_B
310 *((uint64_t *)ic->arg[0]) = page[c];
311 #endif
312 #ifdef LS_W
313 uint16_t d = *((uint16_t *) (page + c));
314 *((uint64_t *)ic->arg[0]) = d;
315 #endif
316 #ifdef LS_L
317 int32_t d = *((int32_t *) (page + c));
318 *((uint64_t *)ic->arg[0]) = (int64_t)d;
319 #endif
320 #ifdef LS_Q
321 uint64_t d = *((uint64_t *) (page + c));
322 *((uint64_t *)ic->arg[0]) = d;
323 #endif
324 #endif
325 #else
326 /* Store: */
327 #ifdef HOST_BIG_ENDIAN
328 uint64_t data_x = *((uint64_t *)ic->arg[0]);
329 page[c] = data_x;
330 #ifndef LS_B
331 page[c+1] = data_x >> 8;
332 #ifndef LS_W
333 page[c+2] = data_x >> 16;
334 page[c+3] = data_x >> 24;
335 #ifndef LS_L
336 page[c+4] = data_x >> 32;
337 page[c+5] = data_x >> 40;
338 page[c+6] = data_x >> 48;
339 page[c+7] = data_x >> 56;
340 #endif
341 #endif
342 #endif
343 #else
344 /* Native byte order: */
345 #ifdef LS_B
346 page[c] = *((uint64_t *)ic->arg[0]);
347 #endif
348 #ifdef LS_W
349 uint32_t d = *((uint64_t *)ic->arg[0]);
350 *((uint16_t *) (page + c)) = d;
351 #endif
352 #ifdef LS_L
353 uint32_t d = *((uint64_t *)ic->arg[0]);
354 *((uint32_t *) (page + c)) = d;
355 #endif
356 #ifdef LS_Q
357 uint64_t d = *((uint64_t *)ic->arg[0]);
358 *((uint64_t *) (page + c)) = d;
359 #endif
360 #endif
361
362 #ifdef LS_LLSC
363 #ifndef LS_LOAD
364 *((uint64_t *)ic->arg[0]) = 1;
365 #endif
366 #endif
367
368 #endif /* !LS_LOAD */
369 } else
370 LS_GENERIC_N(cpu, ic);
371 } else
372 LS_GENERIC_N(cpu, ic);
373 }
374

  ViewVC Help
Powered by ViewVC 1.1.26