1 |
/* |
2 |
* Cisco 7200 (Predator) simulation platform. |
3 |
* Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr) |
4 |
*/ |
5 |
|
6 |
#define _GNU_SOURCE |
7 |
#include <stdio.h> |
8 |
#include <stdlib.h> |
9 |
#include <unistd.h> |
10 |
#include <string.h> |
11 |
#include <sys/types.h> |
12 |
#include <sys/stat.h> |
13 |
#include <sys/mman.h> |
14 |
#include <fcntl.h> |
15 |
#include <assert.h> |
16 |
|
17 |
#include "mips64.h" |
18 |
#include "dynamips.h" |
19 |
#include "memory.h" |
20 |
#include "device.h" |
21 |
#include "cpu.h" |
22 |
#include "cp0.h" |
23 |
#include "vm.h" |
24 |
|
25 |
/* Record a memory access */ |
26 |
void memlog_rec_access(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint64_t data, |
27 |
m_uint32_t op_size,m_uint32_t op_type) |
28 |
{ |
29 |
memlog_access_t *acc; |
30 |
|
31 |
acc = &cpu->memlog_array[cpu->memlog_pos]; |
32 |
acc->pc = cpu->pc; |
33 |
acc->vaddr = vaddr; |
34 |
acc->data = data; |
35 |
acc->op_size = op_size; |
36 |
acc->op_type = op_type; |
37 |
acc->data_valid = (op_type == MTS_WRITE); |
38 |
|
39 |
cpu->memlog_pos = (cpu->memlog_pos + 1) & (MEMLOG_COUNT - 1); |
40 |
} |
41 |
|
42 |
/* Show the latest memory accesses */ |
43 |
void memlog_dump(cpu_mips_t *cpu) |
44 |
{ |
45 |
memlog_access_t *acc; |
46 |
char s_data[64]; |
47 |
u_int i,pos; |
48 |
|
49 |
for(i=0;i<MEMLOG_COUNT;i++) { |
50 |
pos = cpu->memlog_pos + i; |
51 |
pos &= (MEMLOG_COUNT-1); |
52 |
acc = &cpu->memlog_array[pos]; |
53 |
|
54 |
if (cpu->pc) { |
55 |
if (acc->data_valid) |
56 |
snprintf(s_data,sizeof(s_data),"0x%llx",acc->data); |
57 |
else |
58 |
snprintf(s_data,sizeof(s_data),"XXXXXXXX"); |
59 |
|
60 |
printf("CPU%u: pc=0x%8.8llx, vaddr=0x%8.8llx, " |
61 |
"size=%u, type=%s, data=%s\n", |
62 |
cpu->id,acc->pc,acc->vaddr,acc->op_size, |
63 |
(acc->op_type == MTS_READ) ? "read " : "write", |
64 |
s_data); |
65 |
} |
66 |
} |
67 |
} |
68 |
|
69 |
/* Update the data obtained by a read access */ |
70 |
void memlog_update_read(cpu_mips_t *cpu,m_iptr_t raddr) |
71 |
{ |
72 |
memlog_access_t *acc; |
73 |
|
74 |
acc = &cpu->memlog_array[(cpu->memlog_pos-1) & (MEMLOG_COUNT-1)]; |
75 |
|
76 |
if (acc->op_type == MTS_READ) |
77 |
{ |
78 |
switch(acc->op_size) { |
79 |
case 1: |
80 |
acc->data = *(m_uint8_t *)raddr; |
81 |
break; |
82 |
case 2: |
83 |
acc->data = vmtoh16(*(m_uint16_t *)raddr); |
84 |
break; |
85 |
case 4: |
86 |
acc->data = vmtoh32(*(m_uint32_t *)raddr); |
87 |
break; |
88 |
case 8: |
89 |
acc->data = vmtoh64(*(m_uint64_t *)raddr); |
90 |
break; |
91 |
} |
92 |
|
93 |
acc->data_valid = TRUE; |
94 |
} |
95 |
} |
96 |
|
97 |
/* MTS access with special access mask */ |
98 |
void mts_access_special(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint32_t mask, |
99 |
u_int op_code,u_int op_type,u_int op_size, |
100 |
m_uint64_t *data,u_int *exc) |
101 |
{ |
102 |
switch(mask) { |
103 |
case MTS_ACC_U: |
104 |
#if DEBUG_MTS_ACC_U |
105 |
if (op_type == MTS_READ) |
106 |
cpu_log(cpu,"MTS","read access to undefined address 0x%llx at " |
107 |
"pc=0x%llx (size=%u)\n",vaddr,cpu->pc,op_size); |
108 |
else |
109 |
cpu_log(cpu,"MTS","write access to undefined address 0x%llx at " |
110 |
"pc=0x%llx, value=0x%8.8llx (size=%u)\n", |
111 |
vaddr,cpu->pc,*data,op_size); |
112 |
#endif |
113 |
if (op_type == MTS_READ) |
114 |
*data = 0; |
115 |
break; |
116 |
|
117 |
case MTS_ACC_T: |
118 |
if (op_code != MIPS_MEMOP_LOOKUP) { |
119 |
#if DEBUG_MTS_ACC_T |
120 |
cpu_log(cpu,"MTS","TLB exception for address 0x%llx at pc=0x%llx " |
121 |
"(%s access, size=%u)\n", |
122 |
vaddr,cpu->pc,(op_type == MTS_READ) ? |
123 |
"read":"write",op_size); |
124 |
mips64_dump_regs(cpu); |
125 |
#if MEMLOG_ENABLE |
126 |
memlog_dump(cpu); |
127 |
#endif |
128 |
#endif |
129 |
cpu->cp0.reg[MIPS_CP0_BADVADDR] = vaddr; |
130 |
|
131 |
if (op_type == MTS_READ) |
132 |
mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_TLB_LOAD,0); |
133 |
else |
134 |
mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_TLB_SAVE,0); |
135 |
} |
136 |
|
137 |
*exc = 1; |
138 |
break; |
139 |
|
140 |
case MTS_ACC_AE: |
141 |
if (op_code != MIPS_MEMOP_LOOKUP) { |
142 |
#if DEBUG_MTS_ACC_AE |
143 |
cpu_log(cpu,"MTS","AE exception for address 0x%llx at pc=0x%llx " |
144 |
"(%s access)\n", |
145 |
vaddr,cpu->pc,(op_type == MTS_READ) ? "read":"write"); |
146 |
#endif |
147 |
cpu->cp0.reg[MIPS_CP0_BADVADDR] = vaddr; |
148 |
|
149 |
if (op_type == MTS_READ) |
150 |
mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_ADDR_LOAD,0); |
151 |
else |
152 |
mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_ADDR_SAVE,0); |
153 |
} |
154 |
|
155 |
*exc = 1; |
156 |
break; |
157 |
} |
158 |
} |
159 |
|
160 |
/* Allocate an L1 array */ |
161 |
mts32_l1_array_t *mts32_alloc_l1_array(m_iptr_t val) |
162 |
{ |
163 |
mts32_l1_array_t *p; |
164 |
u_int i; |
165 |
|
166 |
if (!(p = malloc(sizeof(mts32_l1_array_t)))) |
167 |
return NULL; |
168 |
|
169 |
for(i=0;i<(1 << MTS32_LEVEL1_BITS);i++) |
170 |
p->entry[i] = val; |
171 |
|
172 |
return p; |
173 |
} |
174 |
|
175 |
/* Allocate an L2 array */ |
176 |
mts32_l2_array_t *mts32_alloc_l2_array(cpu_mips_t *cpu,m_iptr_t val) |
177 |
{ |
178 |
mts32_l2_array_t *p; |
179 |
u_int i; |
180 |
|
181 |
if (cpu->mts32_l2_free_list) { |
182 |
p = cpu->mts32_l2_free_list; |
183 |
cpu->mts32_l2_free_list = p->next; |
184 |
} else { |
185 |
if (!(p = m_memalign((1 << MTS_FLAG_BITS),sizeof(*p)))) |
186 |
return NULL; |
187 |
} |
188 |
|
189 |
for(i=0;i<(1 << MTS32_LEVEL2_BITS);i++) |
190 |
p->entry[i] = val; |
191 |
|
192 |
return p; |
193 |
} |
194 |
|
195 |
/* Free an L1 array */ |
196 |
void mts32_free_l1_array(mts32_l1_array_t *array) |
197 |
{ |
198 |
u_int i; |
199 |
|
200 |
if (array != NULL) { |
201 |
for(i=0;i<(1<<MTS32_LEVEL1_BITS);i++) |
202 |
if (array->entry[i] & MTS_CHAIN_MASK) |
203 |
free((void *)(array->entry[i] & ~MTS_CHAIN_MASK)); |
204 |
|
205 |
free(array); |
206 |
} |
207 |
} |
208 |
|
209 |
/* Free an L2 array */ |
210 |
void mts32_free_l2_array(cpu_mips_t *cpu,mts32_l2_array_t *array) |
211 |
{ |
212 |
array->next = cpu->mts32_l2_free_list; |
213 |
cpu->mts32_l2_free_list = array; |
214 |
} |
215 |
|
216 |
/* Set an L1 entry */ |
217 |
static void mts32_set_l1_data(cpu_mips_t *cpu,m_uint32_t start,m_uint32_t len, |
218 |
m_iptr_t val) |
219 |
{ |
220 |
mts32_l1_array_t *p = cpu->mts_l1_ptr; |
221 |
m_uint32_t pos; |
222 |
m_iptr_t p2; |
223 |
|
224 |
while(len > 0) { |
225 |
pos = start >> (MTS32_LEVEL2_BITS + MTS32_OFFSET_BITS); |
226 |
|
227 |
if (pos >= (1 << MTS32_LEVEL1_BITS)) |
228 |
break; |
229 |
|
230 |
/* free a possible L2 array */ |
231 |
if (p->entry[pos] & MTS_CHAIN_MASK) { |
232 |
p2 = p->entry[pos] & ~MTS_CHAIN_MASK; |
233 |
mts32_free_l2_array(cpu,(mts32_l2_array_t *)p2); |
234 |
} |
235 |
|
236 |
p->entry[pos] = val; |
237 |
start += MTS32_LEVEL1_SIZE; |
238 |
len -= MTS32_LEVEL1_SIZE; |
239 |
} |
240 |
} |
241 |
|
242 |
/* Fork an L1 array */ |
243 |
static int mts32_fork_l1_array(cpu_mips_t *cpu,u_int l1_pos) |
244 |
{ |
245 |
mts32_l1_array_t *p1; |
246 |
mts32_l2_array_t *p2; |
247 |
m_iptr_t entry,val; |
248 |
u_int i; |
249 |
|
250 |
p1 = cpu->mts_l1_ptr; |
251 |
entry = p1->entry[l1_pos]; |
252 |
val = ((entry & MTS_ACC_MASK) != MTS_ACC_OK) ? entry : 0; |
253 |
|
254 |
if (!(p2 = mts32_alloc_l2_array(cpu,val))) |
255 |
return(-1); |
256 |
|
257 |
/* mts32_alloc_l2_array() did the job for us */ |
258 |
if (!val) { |
259 |
for(i=0;i<(1 << MTS32_LEVEL2_BITS);i++) |
260 |
p2->entry[i] = entry + (1 << MTS32_OFFSET_BITS); |
261 |
} |
262 |
|
263 |
p1->entry[l1_pos] = (m_iptr_t)p2 | MTS_CHAIN_MASK; |
264 |
return(0); |
265 |
} |
266 |
|
267 |
/* Set address error on a complete level 1 array */ |
268 |
void mts32_set_l1_ae(cpu_mips_t *cpu) |
269 |
{ |
270 |
mts32_l1_array_t *p1 = cpu->mts_l1_ptr; |
271 |
u_int i; |
272 |
|
273 |
for(i=0;i<(1<<MTS32_LEVEL1_BITS);i++) |
274 |
p1->entry[i] = MTS_ACC_AE; |
275 |
} |
276 |
|
277 |
/* Set an L2 entry */ |
278 |
static int mts32_set_l2_entry(cpu_mips_t *cpu,m_uint64_t vaddr,m_iptr_t val) |
279 |
{ |
280 |
m_uint32_t naddr = vaddr & 0xffffffff; |
281 |
m_uint32_t l1_pos,l2_pos; |
282 |
mts32_l1_array_t *p1; |
283 |
mts32_l2_array_t *p2; |
284 |
m_iptr_t entry; |
285 |
|
286 |
p1 = cpu->mts_l1_ptr; |
287 |
|
288 |
l1_pos = naddr >> (MTS32_LEVEL2_BITS + MTS32_OFFSET_BITS); |
289 |
l2_pos = (naddr >> MTS32_OFFSET_BITS) & ((1 << MTS32_LEVEL2_BITS) - 1); |
290 |
|
291 |
entry = p1->entry[l1_pos]; |
292 |
|
293 |
if (!(entry & MTS_CHAIN_MASK)) { |
294 |
if (mts32_fork_l1_array(cpu,l1_pos) == -1) { |
295 |
fprintf(stderr,"mts32_set_l2_entry: unable to fork L1 entry.\n"); |
296 |
return(-1); |
297 |
} |
298 |
|
299 |
entry = p1->entry[l1_pos]; |
300 |
} |
301 |
|
302 |
p2 = (mts32_l2_array_t *)(entry & MTS_ADDR_MASK); |
303 |
p2->entry[l2_pos] = val; |
304 |
return(0); |
305 |
} |
306 |
|
307 |
/* Initialize an empty MTS32 subsystem */ |
308 |
int mts32_init_empty(cpu_mips_t *cpu) |
309 |
{ |
310 |
if (cpu->state == MIPS_CPU_RUNNING) { |
311 |
cpu_log(cpu,"MTS","trying to reset MTS while the CPU is online.\n"); |
312 |
return(-1); |
313 |
} |
314 |
|
315 |
mts32_free_l1_array(cpu->mts_l1_ptr); |
316 |
|
317 |
/* Allocate a new L1 array */ |
318 |
cpu->mts_l1_ptr = mts32_alloc_l1_array(0); |
319 |
|
320 |
if (!cpu->mts_l1_ptr) |
321 |
return(-1); |
322 |
|
323 |
/* Address Error on complete address space for now */ |
324 |
mts32_set_l1_ae(cpu); |
325 |
return(0); |
326 |
} |
327 |
|
328 |
/* Free memory used by MTS32 */ |
329 |
void mts32_shutdown(cpu_mips_t *cpu) |
330 |
{ |
331 |
mts32_l2_array_t *array,*next; |
332 |
|
333 |
/* Free L1/L2 entries */ |
334 |
if (cpu->mts_l1_ptr) { |
335 |
mts32_free_l1_array(cpu->mts_l1_ptr); |
336 |
cpu->mts_l1_ptr = NULL; |
337 |
} |
338 |
|
339 |
/* Free arrays that are sitting in the free list */ |
340 |
for(array=cpu->mts32_l2_free_list;array;array=next) { |
341 |
next = array->next; |
342 |
free(array); |
343 |
} |
344 |
|
345 |
cpu->mts32_l2_free_list = NULL; |
346 |
} |
347 |
|
348 |
/* Map a device at the specified virtual address */ |
349 |
void mts32_map_device(cpu_mips_t *cpu,u_int dev_id,m_uint64_t vaddr, |
350 |
m_uint32_t offset,m_uint32_t len) |
351 |
{ |
352 |
struct vdevice *dev; |
353 |
m_iptr_t val; |
354 |
|
355 |
if (!(dev = dev_get_by_id(cpu->vm,dev_id)) || !dev->phys_len) |
356 |
return; |
357 |
|
358 |
#if DEBUG_MTS_MAP_DEV |
359 |
cpu_log(cpu,"MTS32", |
360 |
"mapping device %s (offset=0x%x,len=0x%x) at vaddr 0x%llx\n", |
361 |
dev->name,offset,len,vaddr); |
362 |
#endif |
363 |
|
364 |
if (!dev->host_addr || (dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) |
365 |
val = (dev_id << MTS_DEVID_SHIFT) | MTS_DEV_MASK | MTS_ACC_OK; |
366 |
else |
367 |
val = dev->host_addr | MTS_ACC_OK; |
368 |
|
369 |
val += offset; |
370 |
|
371 |
while(len > 0) { |
372 |
if (!(vaddr & MTS32_LEVEL1_MASK) && !(len & MTS32_LEVEL1_MASK)) { |
373 |
mts32_set_l1_data(cpu,vaddr,MTS32_LEVEL1_SIZE,val); |
374 |
vaddr += MTS32_LEVEL1_SIZE; |
375 |
val += MTS32_LEVEL1_SIZE; |
376 |
len -= MTS32_LEVEL1_SIZE; |
377 |
} else { |
378 |
mts32_set_l2_entry(cpu,vaddr,val); |
379 |
vaddr += MTS32_LEVEL2_SIZE; |
380 |
val += MTS32_LEVEL2_SIZE; |
381 |
len -= MTS32_LEVEL2_SIZE; |
382 |
} |
383 |
} |
384 |
} |
385 |
|
386 |
/* Map a physical address to the specified virtual address */ |
387 |
void mts32_map(cpu_mips_t *cpu,m_uint64_t vaddr, |
388 |
m_uint64_t paddr,m_uint32_t len, |
389 |
int cache_access) |
390 |
{ |
391 |
struct vdevice *dev,*next_dev; |
392 |
m_uint32_t dev_offset,clen; |
393 |
|
394 |
while(len > 0) |
395 |
{ |
396 |
#if DEBUG_MTS_MAP_VIRT |
397 |
cpu_log(cpu,"MTS32", |
398 |
"mts32_map: vaddr=0x%llx, paddr=0x%llx, len=0x%x, cache=%d\n", |
399 |
vaddr,paddr,len,cache_access); |
400 |
#endif |
401 |
dev = dev_lookup(cpu->vm,paddr,cache_access); |
402 |
next_dev = dev_lookup_next(cpu->vm,paddr,dev,cache_access); |
403 |
|
404 |
if (next_dev) |
405 |
clen = m_min(len,next_dev->phys_addr-paddr); |
406 |
else |
407 |
clen = len; |
408 |
|
409 |
if (!dev) { |
410 |
mts32_unmap(cpu,vaddr,clen,MTS_ACC_U); |
411 |
} else { |
412 |
dev_offset = paddr - dev->phys_addr; |
413 |
clen = m_min(clen,dev->phys_len); |
414 |
mts32_map_device(cpu,dev->id,vaddr,dev_offset,clen); |
415 |
} |
416 |
|
417 |
vaddr += clen; |
418 |
paddr += clen; |
419 |
len -= clen; |
420 |
} |
421 |
} |
422 |
|
423 |
/* Unmap a memory zone */ |
424 |
void mts32_unmap(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint32_t len, |
425 |
m_uint32_t val) |
426 |
{ |
427 |
while(len > 0) |
428 |
{ |
429 |
#if DEBUG_MTS_MAP_VIRT |
430 |
cpu_log(cpu,"MTS32","mts32_unmap: vaddr=0x%llx, len=0x%x\n",vaddr,len); |
431 |
#endif |
432 |
if (!(vaddr & MTS32_LEVEL1_MASK) && !(len & MTS32_LEVEL1_MASK)) { |
433 |
mts32_set_l1_data(cpu,vaddr,MTS32_LEVEL1_SIZE,val); |
434 |
vaddr += MTS32_LEVEL1_SIZE; |
435 |
len -= MTS32_LEVEL1_SIZE; |
436 |
} else { |
437 |
mts32_set_l2_entry(cpu,vaddr,val); |
438 |
vaddr += MTS32_LEVEL2_SIZE; |
439 |
len -= MTS32_LEVEL2_SIZE; |
440 |
} |
441 |
} |
442 |
} |
443 |
|
444 |
/* Map all devices for kernel mode */ |
445 |
void mts32_km_map_all_dev(cpu_mips_t *cpu) |
446 |
{ |
447 |
/* KSEG0: cached accesses */ |
448 |
mts32_map(cpu,MIPS_KSEG0_BASE,0,MTS_SIZE_512M,TRUE); |
449 |
|
450 |
/* KSEG1: uncached accesses */ |
451 |
mts32_map(cpu,MIPS_KSEG1_BASE,0,MTS_SIZE_512M,FALSE); |
452 |
} |
453 |
|
454 |
/* MTS32 raw lookup */ |
455 |
static forced_inline |
456 |
void *mts32_raw_lookup(cpu_mips_t *cpu,mts32_l1_array_t *p1,m_uint64_t vaddr) |
457 |
{ |
458 |
m_uint32_t naddr = vaddr & 0xffffffff; |
459 |
m_uint32_t l1_pos,l2_pos,shift; |
460 |
mts32_l2_array_t *p2; |
461 |
m_iptr_t entry,haddr; |
462 |
m_uint64_t data; |
463 |
u_int dev_id; |
464 |
|
465 |
shift = MTS32_LEVEL2_BITS + MTS32_OFFSET_BITS; |
466 |
l1_pos = naddr >> shift; |
467 |
entry = p1->entry[l1_pos]; |
468 |
|
469 |
if (unlikely((entry & MTS_ACC_MASK) != MTS_ACC_OK)) |
470 |
return NULL; |
471 |
|
472 |
if (entry & MTS_CHAIN_MASK) { |
473 |
p2 = (mts32_l2_array_t *)(entry & MTS_ADDR_MASK); |
474 |
l2_pos = (naddr >> MTS32_OFFSET_BITS) & ((1 << MTS32_LEVEL2_BITS) - 1); |
475 |
entry = p2->entry[l2_pos]; |
476 |
shift = MTS32_OFFSET_BITS; |
477 |
} |
478 |
|
479 |
if (unlikely((entry & MTS_ACC_MASK) != MTS_ACC_OK)) |
480 |
return NULL; |
481 |
|
482 |
/* device access */ |
483 |
if (unlikely(entry & MTS_DEV_MASK)) { |
484 |
dev_id = (entry & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT; |
485 |
haddr = (entry & MTS_DEVOFF_MASK); |
486 |
haddr += (naddr & ((1 << shift) - 1)); |
487 |
return(dev_access_fast(cpu,dev_id,haddr,4,MTS_READ,&data)); |
488 |
} |
489 |
|
490 |
haddr = entry & MTS_ADDR_MASK; |
491 |
haddr += (naddr & ((1 << shift) - 1)); |
492 |
return((void *)haddr); |
493 |
} |
494 |
|
495 |
/* MTS32 access */ |
496 |
static forced_inline void *mts32_access(cpu_mips_t *cpu,m_uint64_t vaddr, |
497 |
u_int op_code,u_int op_size, |
498 |
u_int op_type,m_uint64_t *data, |
499 |
u_int *exc) |
500 |
{ |
501 |
m_uint32_t naddr = vaddr & 0xffffffff; |
502 |
m_uint32_t l1_pos,l2_pos,mask,shift; |
503 |
mts32_l1_array_t *p1 = cpu->mts_l1_ptr; |
504 |
mts32_l2_array_t *p2; |
505 |
m_iptr_t entry,haddr; |
506 |
u_int dev_id; |
507 |
|
508 |
#if MEMLOG_ENABLE |
509 |
/* Record the memory access */ |
510 |
memlog_rec_access(cpu,vaddr,*data,op_size,op_type); |
511 |
#endif |
512 |
|
513 |
*exc = 0; |
514 |
shift = MTS32_LEVEL2_BITS + MTS32_OFFSET_BITS; |
515 |
l1_pos = naddr >> shift; |
516 |
entry = p1->entry[l1_pos]; |
517 |
|
518 |
if (unlikely((mask = (entry & MTS_ACC_MASK)) != MTS_ACC_OK)) { |
519 |
mts_access_special(cpu,vaddr,mask,op_code,op_type,op_size,data,exc); |
520 |
return NULL; |
521 |
} |
522 |
|
523 |
/* do we have a level 2 entry ? */ |
524 |
if (entry & MTS_CHAIN_MASK) { |
525 |
p2 = (mts32_l2_array_t *)(entry & MTS_ADDR_MASK); |
526 |
l2_pos = (naddr >> MTS32_OFFSET_BITS) & ((1 << MTS32_LEVEL2_BITS) - 1); |
527 |
entry = p2->entry[l2_pos]; |
528 |
shift = MTS32_OFFSET_BITS; |
529 |
|
530 |
if (unlikely((mask = (entry & MTS_ACC_MASK)) != MTS_ACC_OK)) { |
531 |
mts_access_special(cpu,vaddr,mask,op_code,op_type,op_size,data,exc); |
532 |
return NULL; |
533 |
} |
534 |
} |
535 |
|
536 |
/* device access */ |
537 |
if (unlikely(entry & MTS_DEV_MASK)) { |
538 |
dev_id = (entry & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT; |
539 |
haddr = (entry & MTS_DEVOFF_MASK); |
540 |
haddr += (naddr & ((1 << shift) - 1)); |
541 |
|
542 |
#if DEBUG_MTS_DEV |
543 |
cpu_log(cpu,"MTS32", |
544 |
"device access: vaddr=0x%llx, pc=0x%llx, dev_offset=0x%x\n", |
545 |
vaddr,cpu->pc,haddr); |
546 |
#endif |
547 |
return(dev_access_fast(cpu,dev_id,haddr,op_size,op_type,data)); |
548 |
} |
549 |
|
550 |
/* raw memory access */ |
551 |
haddr = entry & MTS_ADDR_MASK; |
552 |
haddr += (naddr & ((1 << shift) - 1)); |
553 |
#if MEMLOG_ENABLE |
554 |
memlog_update_read(cpu,haddr); |
555 |
#endif |
556 |
return((void *)haddr); |
557 |
} |
558 |
|
559 |
/* Memory lookup */ |
560 |
void *mts32_lookup(cpu_mips_t *cpu,m_uint64_t vaddr) |
561 |
{ |
562 |
return(mts32_raw_lookup(cpu,cpu->mts_l1_ptr,vaddr)); |
563 |
} |
564 |
|
565 |
/* Initialize the MTS64 subsystem for the specified CPU */ |
566 |
int mts64_init(cpu_mips_t *cpu) |
567 |
{ |
568 |
size_t len; |
569 |
|
570 |
/* Initialize the cache entries to 0 (empty) */ |
571 |
len = MTS64_HASH_SIZE * sizeof(mts64_entry_t *); |
572 |
if (!(cpu->mts64_cache = malloc(len))) |
573 |
return(-1); |
574 |
|
575 |
memset(cpu->mts64_cache,0,len); |
576 |
cpu->mts64_lookups = 0; |
577 |
cpu->mts64_misses = 0; |
578 |
|
579 |
/* Reset the TLB reverse map (used for selective invalidations) */ |
580 |
memset(cpu->mts64_rmap,0,(cpu->cp0.tlb_entries * sizeof(mts64_entry_t *))); |
581 |
return(0); |
582 |
} |
583 |
|
584 |
/* Free memory used by MTS64 */ |
585 |
void mts64_shutdown(cpu_mips_t *cpu) |
586 |
{ |
587 |
mts64_chunk_t *chunk,*next; |
588 |
int i; |
589 |
|
590 |
/* Reset the reverse map */ |
591 |
for(i=0;i<cpu->cp0.tlb_entries;i++) |
592 |
cpu->mts64_rmap[i] = NULL; |
593 |
|
594 |
/* Free the cache itself */ |
595 |
free(cpu->mts64_cache); |
596 |
cpu->mts64_cache = NULL; |
597 |
|
598 |
/* Free the chunks */ |
599 |
for(chunk=cpu->mts64_chunk_list;chunk;chunk=next) { |
600 |
next = chunk->next; |
601 |
free(chunk); |
602 |
} |
603 |
|
604 |
for(chunk=cpu->mts64_chunk_free_list;chunk;chunk=next) { |
605 |
next = chunk->next; |
606 |
free(chunk); |
607 |
} |
608 |
|
609 |
cpu->mts64_chunk_list = cpu->mts64_chunk_free_list = NULL; |
610 |
cpu->mts64_entry_free_list = NULL; |
611 |
} |
612 |
|
613 |
/* Show MTS64 detailed information (debugging only!) */ |
614 |
void mts64_show_stats(cpu_mips_t *cpu) |
615 |
{ |
616 |
mts64_chunk_t *chunk; |
617 |
#if DEBUG_MTS_MAP_VIRT |
618 |
mts64_entry_t *entry; |
619 |
u_int i; |
620 |
#endif |
621 |
u_int count; |
622 |
|
623 |
printf("\nCPU%u: MTS64 statistics:\n",cpu->id); |
624 |
|
625 |
printf(" Total lookups: %llu, misses: %llu, efficiency: %g%%\n", |
626 |
cpu->mts64_lookups, cpu->mts64_misses, |
627 |
100 - ((double)(cpu->mts64_misses*100)/ |
628 |
(double)cpu->mts64_lookups)); |
629 |
|
630 |
#if DEBUG_MTS_MAP_VIRT |
631 |
/* Valid hash entries */ |
632 |
for(count=0,i=0;i<MTS64_HASH_SIZE;i++) { |
633 |
if ((entry = cpu->mts64_cache[i]) != NULL) { |
634 |
printf(" %4u: entry=%p, start=0x%16.16llx, " |
635 |
"len=0x%8.8x (%6u bytes), action=0x%llx\n", |
636 |
i,entry,entry->start,entry->mask,entry->mask+1, |
637 |
(m_uint64_t)entry->action); |
638 |
count++; |
639 |
} |
640 |
} |
641 |
|
642 |
printf(" %u/%u valid hash entries.\n",count,MTS64_HASH_SIZE); |
643 |
#endif |
644 |
|
645 |
/* Number of chunks */ |
646 |
for(count=0,chunk=cpu->mts64_chunk_list;chunk;chunk=chunk->next) |
647 |
count++; |
648 |
|
649 |
printf(" Number of chunks: %u\n",count); |
650 |
|
651 |
#if DEBUG_MTS_MAP_VIRT |
652 |
/* Reverse map */ |
653 |
for(i=0;i<MIPS64_TLB_ENTRIES;i++) { |
654 |
for(count=0,entry=cpu->mts64_rmap[i];entry;entry=entry->next) |
655 |
count++; |
656 |
|
657 |
if (count > 0) |
658 |
printf(" tlb_rmap[%u]: %u entries\n",i,count); |
659 |
} |
660 |
#endif |
661 |
} |
662 |
|
663 |
/* Allocate a new chunk */ |
664 |
static int mts64_alloc_chunk(cpu_mips_t *cpu) |
665 |
{ |
666 |
mts64_chunk_t *chunk; |
667 |
|
668 |
/* Try the free list first, then use standard allocation procedure */ |
669 |
if ((chunk = cpu->mts64_chunk_free_list) != NULL) { |
670 |
cpu->mts64_chunk_free_list = chunk->next; |
671 |
} else { |
672 |
if (!(chunk = malloc(sizeof(*chunk)))) |
673 |
return(-1); |
674 |
} |
675 |
|
676 |
chunk->count = 0; |
677 |
chunk->next = cpu->mts64_chunk_list; |
678 |
cpu->mts64_chunk_list = chunk; |
679 |
return(0); |
680 |
} |
681 |
|
682 |
/* Allocate a new entry */ |
683 |
static mts64_entry_t *mts64_alloc_entry(cpu_mips_t *cpu) |
684 |
{ |
685 |
mts64_chunk_t *chunk = cpu->mts64_chunk_list; |
686 |
mts64_entry_t *entry; |
687 |
|
688 |
/* First, try to allocate the entry from the free list */ |
689 |
if ((entry = cpu->mts64_entry_free_list) != NULL) { |
690 |
cpu->mts64_entry_free_list = cpu->mts64_entry_free_list->next; |
691 |
return entry; |
692 |
} |
693 |
|
694 |
/* A new chunk is required */ |
695 |
if (!chunk || (chunk->count == MTS64_CHUNK_SIZE)) { |
696 |
if (mts64_alloc_chunk(cpu) == -1) |
697 |
return NULL; |
698 |
|
699 |
chunk = cpu->mts64_chunk_list; |
700 |
} |
701 |
|
702 |
entry = &chunk->entry[chunk->count]; |
703 |
chunk->count++; |
704 |
return entry; |
705 |
} |
706 |
|
707 |
/* Invalidate the complete MTS64 cache */ |
708 |
void mts64_invalidate_cache(cpu_mips_t *cpu) |
709 |
{ |
710 |
mts64_chunk_t *chunk; |
711 |
size_t len; |
712 |
u_int i; |
713 |
|
714 |
len = MTS64_HASH_SIZE * sizeof(mts64_entry_t *); |
715 |
memset(cpu->mts64_cache,0,len); |
716 |
|
717 |
/* Move all chunks to the free list */ |
718 |
while((chunk = cpu->mts64_chunk_list) != NULL) { |
719 |
cpu->mts64_chunk_list = chunk->next; |
720 |
chunk->next = cpu->mts64_chunk_free_list; |
721 |
cpu->mts64_chunk_free_list = chunk; |
722 |
} |
723 |
|
724 |
/* Reset the free list of entries (since they are located in chunks) */ |
725 |
cpu->mts64_entry_free_list = NULL; |
726 |
|
727 |
/* Reset the reverse map */ |
728 |
for(i=0;i<cpu->cp0.tlb_entries;i++) |
729 |
cpu->mts64_rmap[i] = NULL; |
730 |
} |
731 |
|
732 |
/* Invalidate partially the MTS64 cache, given a TLB entry index */ |
733 |
void mts64_invalidate_tlb_entry(cpu_mips_t *cpu,u_int tlb_index) |
734 |
{ |
735 |
mts64_entry_t *entry; |
736 |
|
737 |
for(entry=cpu->mts64_rmap[tlb_index];entry;entry=entry->next) { |
738 |
*(entry->pself) = NULL; |
739 |
if (!entry->next) { |
740 |
entry->next = cpu->mts64_entry_free_list; |
741 |
break; |
742 |
} |
743 |
} |
744 |
|
745 |
cpu->mts64_entry_free_list = cpu->mts64_rmap[tlb_index]; |
746 |
cpu->mts64_rmap[tlb_index] = NULL; |
747 |
} |
748 |
|
749 |
/* |
750 |
* MTS64 mapping. |
751 |
* |
752 |
* It is NOT inlined since it triggers a GCC bug on my config (x86, GCC 3.3.5) |
753 |
*/ |
754 |
static no_inline int mts64_map(cpu_mips_t *cpu,m_uint64_t vaddr,mts_map_t *map, |
755 |
mts64_entry_t *entry) |
756 |
{ |
757 |
struct vdevice *dev; |
758 |
m_uint64_t lk_addr; |
759 |
m_uint32_t poffset; |
760 |
|
761 |
lk_addr = map->paddr + (vaddr - map->vaddr); |
762 |
|
763 |
if (!(dev = dev_lookup(cpu->vm,lk_addr,map->cached))) |
764 |
return(FALSE); |
765 |
|
766 |
if (map->paddr > dev->phys_addr) { |
767 |
poffset = map->paddr - dev->phys_addr; |
768 |
entry->start = map->vaddr; |
769 |
entry->phys_page = map->paddr >> MIPS_MIN_PAGE_SHIFT; |
770 |
entry->mask = ~((m_min(map->len,dev->phys_len - poffset)) - 1); |
771 |
entry->action = poffset; |
772 |
} else { |
773 |
poffset = dev->phys_addr - map->paddr; |
774 |
entry->start = map->vaddr + poffset; |
775 |
entry->phys_page = (map->paddr + poffset) >> MIPS_MIN_PAGE_SHIFT; |
776 |
entry->mask = ~((m_min(map->len - poffset,dev->phys_len)) - 1); |
777 |
entry->action = 0; |
778 |
} |
779 |
|
780 |
if (!dev->host_addr || (dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) |
781 |
entry->action += (dev->id << MTS_DEVID_SHIFT) | MTS_DEV_MASK; |
782 |
else |
783 |
entry->action += dev->host_addr; |
784 |
|
785 |
return(TRUE); |
786 |
} |
787 |
|
788 |
/* MTS64 slow lookup */ |
789 |
static forced_inline |
790 |
mts64_entry_t *mts64_slow_lookup(cpu_mips_t *cpu,m_uint64_t vaddr, |
791 |
u_int op_code,u_int op_size, |
792 |
u_int op_type,m_uint64_t *data, |
793 |
u_int *exc) |
794 |
{ |
795 |
m_uint32_t hash_bucket,zone,sub_zone,cca; |
796 |
mts64_entry_t *entry,new_entry; |
797 |
mts_map_t map; |
798 |
|
799 |
map.tlb_index = -1; |
800 |
hash_bucket = MTS64_HASH(vaddr); |
801 |
entry = cpu->mts64_cache[hash_bucket]; |
802 |
zone = vaddr >> 40; |
803 |
|
804 |
#if DEBUG_MTS_STATS |
805 |
cpu->mts64_misses++; |
806 |
#endif |
807 |
|
808 |
switch(zone) { |
809 |
case 0x000000: /* xkuseg */ |
810 |
case 0x400000: /* xksseg */ |
811 |
case 0xc00000: /* xkseg */ |
812 |
/* trigger TLB exception if no matching entry found */ |
813 |
if (!cp0_tlb_lookup(cpu,vaddr,&map)) |
814 |
goto err_tlb; |
815 |
|
816 |
if (!mts64_map(cpu,vaddr,&map,&new_entry)) |
817 |
goto err_undef; |
818 |
break; |
819 |
|
820 |
case 0xffffff: |
821 |
sub_zone = (vaddr >> 29) & 0x7FF; |
822 |
|
823 |
switch(sub_zone) { |
824 |
case 0x7fc: /* ckseg0 */ |
825 |
map.vaddr = sign_extend(MIPS_KSEG0_BASE,32); |
826 |
map.paddr = 0; |
827 |
map.len = MIPS_KSEG0_SIZE; |
828 |
map.cached = TRUE; |
829 |
if (!mts64_map(cpu,vaddr,&map,&new_entry)) |
830 |
goto err_undef; |
831 |
break; |
832 |
|
833 |
case 0x7fd: /* ckseg1 */ |
834 |
map.vaddr = sign_extend(MIPS_KSEG1_BASE,32); |
835 |
map.paddr = 0; |
836 |
map.len = MIPS_KSEG1_SIZE; |
837 |
map.cached = FALSE; |
838 |
if (!mts64_map(cpu,vaddr,&map,&new_entry)) |
839 |
goto err_undef; |
840 |
break; |
841 |
|
842 |
case 0x7fe: /* cksseg */ |
843 |
case 0x7ff: /* ckseg3 */ |
844 |
/* trigger TLB exception if no matching entry found */ |
845 |
if (!cp0_tlb_lookup(cpu,vaddr,&map)) |
846 |
goto err_tlb; |
847 |
|
848 |
if (!mts64_map(cpu,vaddr,&map,&new_entry)) |
849 |
goto err_undef; |
850 |
break; |
851 |
|
852 |
default: |
853 |
/* Invalid zone: generate Address Error (AE) exception */ |
854 |
goto err_address; |
855 |
} |
856 |
break; |
857 |
|
858 |
/* xkphys */ |
859 |
case 0x800000: |
860 |
case 0x880000: |
861 |
case 0x900000: |
862 |
case 0x980000: |
863 |
case 0xa00000: |
864 |
case 0xa80000: |
865 |
case 0xb00000: |
866 |
case 0xb80000: |
867 |
cca = (vaddr >> MIPS64_XKPHYS_CCA_SHIFT) & 0x03; |
868 |
map.cached = mips64_cca_cached(cca); |
869 |
map.vaddr = vaddr & MIPS64_XKPHYS_ZONE_MASK; |
870 |
map.paddr = 0; |
871 |
map.len = MIPS64_XKPHYS_PHYS_SIZE; |
872 |
if (!mts64_map(cpu,vaddr,&map,&new_entry)) |
873 |
goto err_undef; |
874 |
break; |
875 |
|
876 |
default: |
877 |
/* Invalid zone: generate Address Error (AE) exception */ |
878 |
goto err_address; |
879 |
} |
880 |
|
881 |
/* Get a new entry if necessary */ |
882 |
if (!entry) { |
883 |
entry = mts64_alloc_entry(cpu); |
884 |
entry->pself = entry->pprev = NULL; |
885 |
entry->next = NULL; |
886 |
|
887 |
/* Store the entry in hash table for future use */ |
888 |
cpu->mts64_cache[hash_bucket] = entry; |
889 |
} else { |
890 |
/* Remove the entry from the reverse map list */ |
891 |
if (entry->pprev) { |
892 |
if (entry->next) |
893 |
entry->next->pprev = entry->pprev; |
894 |
|
895 |
*(entry->pprev) = entry->next; |
896 |
} |
897 |
} |
898 |
|
899 |
/* Add this entry to the reverse map list */ |
900 |
if (map.tlb_index != -1) { |
901 |
entry->pself = &cpu->mts64_cache[hash_bucket]; |
902 |
entry->next = cpu->mts64_rmap[map.tlb_index]; |
903 |
entry->pprev = &cpu->mts64_rmap[map.tlb_index]; |
904 |
if (entry->next) |
905 |
entry->next->pprev = &entry->next; |
906 |
cpu->mts64_rmap[map.tlb_index] = entry; |
907 |
} |
908 |
|
909 |
/* Fill the new entry or replace the previous */ |
910 |
entry->phys_page = new_entry.phys_page; |
911 |
entry->start = new_entry.start; |
912 |
entry->mask = new_entry.mask; |
913 |
entry->action = new_entry.action; |
914 |
return entry; |
915 |
|
916 |
err_undef: |
917 |
mts_access_special(cpu,vaddr,MTS_ACC_U,op_code,op_type,op_size,data,exc); |
918 |
return NULL; |
919 |
err_address: |
920 |
mts_access_special(cpu,vaddr,MTS_ACC_AE,op_code,op_type,op_size,data,exc); |
921 |
return NULL; |
922 |
err_tlb: |
923 |
mts_access_special(cpu,vaddr,MTS_ACC_T,op_code,op_type,op_size,data,exc); |
924 |
return NULL; |
925 |
} |
926 |
|
927 |
/* MTS64 access */ |
928 |
static forced_inline void *mts64_access(cpu_mips_t *cpu,m_uint64_t vaddr, |
929 |
u_int op_code,u_int op_size, |
930 |
u_int op_type,m_uint64_t *data, |
931 |
u_int *exc) |
932 |
{ |
933 |
m_uint32_t hash_bucket; |
934 |
mts64_entry_t *entry; |
935 |
m_iptr_t haddr; |
936 |
u_int dev_id; |
937 |
|
938 |
#if MEMLOG_ENABLE |
939 |
/* Record the memory access */ |
940 |
memlog_rec_access(cpu,vaddr,*data,op_size,op_type); |
941 |
#endif |
942 |
|
943 |
*exc = 0; |
944 |
hash_bucket = MTS64_HASH(vaddr); |
945 |
entry = cpu->mts64_cache[hash_bucket]; |
946 |
|
947 |
#if DEBUG_MTS_STATS |
948 |
cpu->mts64_lookups++; |
949 |
#endif |
950 |
|
951 |
/* Slow lookup if nothing found in cache */ |
952 |
if (unlikely((!entry) || |
953 |
unlikely((vaddr & sign_extend(entry->mask,32)) != entry->start))) |
954 |
{ |
955 |
entry = mts64_slow_lookup(cpu,vaddr,op_code,op_size,op_type,data,exc); |
956 |
if (!entry) return NULL; |
957 |
} |
958 |
|
959 |
/* Device access */ |
960 |
if (unlikely(entry->action & MTS_DEV_MASK)) { |
961 |
dev_id = (entry->action & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT; |
962 |
haddr = entry->action & MTS_DEVOFF_MASK; |
963 |
haddr += vaddr - entry->start; |
964 |
|
965 |
#if DEBUG_MTS_DEV |
966 |
cpu_log(cpu,"MTS64", |
967 |
"device access: vaddr=0x%llx, pc=0x%llx, dev_offset=0x%x\n", |
968 |
vaddr,cpu->pc,haddr); |
969 |
#endif |
970 |
return(dev_access_fast(cpu,dev_id,haddr,op_size,op_type,data)); |
971 |
} |
972 |
|
973 |
/* Raw memory access */ |
974 |
haddr = entry->action & MTS_ADDR_MASK; |
975 |
haddr += vaddr - entry->start; |
976 |
#if MEMLOG_ENABLE |
977 |
memlog_update_read(cpu,haddr); |
978 |
#endif |
979 |
return((void *)haddr); |
980 |
} |
981 |
|
982 |
/* MTS64 lookup */ |
983 |
static void *mts64_lookup(cpu_mips_t *cpu,m_uint64_t vaddr) |
984 |
{ |
985 |
m_uint64_t data; |
986 |
u_int exc; |
987 |
return(mts64_access(cpu,vaddr,MIPS_MEMOP_LOOKUP,4,MTS_READ,&data,&exc)); |
988 |
} |
989 |
|
990 |
/* MTS64 virtual address to physical page translation */ |
991 |
static fastcall int mts64_translate(cpu_mips_t *cpu,m_uint64_t vaddr, |
992 |
m_uint32_t *phys_page) |
993 |
{ |
994 |
m_uint32_t hash_bucket,offset; |
995 |
mts64_entry_t *entry; |
996 |
m_uint64_t data = 0; |
997 |
u_int exc = 0; |
998 |
|
999 |
hash_bucket = MTS64_HASH(vaddr); |
1000 |
entry = cpu->mts64_cache[hash_bucket]; |
1001 |
|
1002 |
/* Slow lookup if nothing found in cache */ |
1003 |
if (unlikely((!entry) || |
1004 |
unlikely((vaddr & sign_extend(entry->mask,32)) != entry->start))) |
1005 |
{ |
1006 |
entry = mts64_slow_lookup(cpu,vaddr,MIPS_MEMOP_LOOKUP,4,MTS_READ, |
1007 |
&data,&exc); |
1008 |
if (!entry) |
1009 |
return(-1); |
1010 |
} |
1011 |
|
1012 |
offset = vaddr - entry->start; |
1013 |
*phys_page = entry->phys_page + (offset >> MIPS_MIN_PAGE_SHIFT); |
1014 |
return(0); |
1015 |
} |
1016 |
|
1017 |
/* === MIPS Memory Operations ============================================= */ |
1018 |
|
1019 |
/* Macro for MTS access (32 or 64 bit) */ |
1020 |
#define MTS_ACCESS(X) mts##X##_access |
1021 |
#define MTS_MEMOP(op) MTS_##op(32) MTS_##op(64) |
1022 |
|
1023 |
/* LB: Load Byte */ |
1024 |
#define MTS_LB(X) \ |
1025 |
fastcall u_int mts##X##_lb(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1026 |
{ \ |
1027 |
m_uint64_t data; \ |
1028 |
void *haddr; \ |
1029 |
u_int exc; \ |
1030 |
\ |
1031 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LB,1,MTS_READ,&data,&exc); \ |
1032 |
if (likely(haddr != NULL)) data = *(m_uint8_t *)haddr; \ |
1033 |
if (likely(!exc)) cpu->gpr[reg] = sign_extend(data,8); \ |
1034 |
return(exc); \ |
1035 |
} \ |
1036 |
|
1037 |
/* LBU: Load Byte Unsigned */ |
1038 |
#define MTS_LBU(X) \ |
1039 |
fastcall u_int mts##X##_lbu(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1040 |
{ \ |
1041 |
m_uint64_t data; \ |
1042 |
void *haddr; \ |
1043 |
u_int exc; \ |
1044 |
\ |
1045 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LBU,1,MTS_READ,&data,&exc); \ |
1046 |
if (likely(haddr != NULL)) data = *(m_uint8_t *)haddr; \ |
1047 |
if (likely(!exc)) cpu->gpr[reg] = data & 0xff; \ |
1048 |
return(exc); \ |
1049 |
} |
1050 |
|
1051 |
/* LH: Load Half-Word */ |
1052 |
#define MTS_LH(X) \ |
1053 |
fastcall u_int mts##X##_lh(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1054 |
{ \ |
1055 |
m_uint64_t data; \ |
1056 |
void *haddr; \ |
1057 |
u_int exc; \ |
1058 |
\ |
1059 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LH,2,MTS_READ,&data,&exc); \ |
1060 |
if (likely(haddr != NULL)) data = vmtoh16(*(m_uint16_t *)haddr); \ |
1061 |
if (likely(!exc)) cpu->gpr[reg] = sign_extend(data,16); \ |
1062 |
return(exc); \ |
1063 |
} |
1064 |
|
1065 |
/* LHU: Load Half-Word Unsigned */ |
1066 |
#define MTS_LHU(X) \ |
1067 |
fastcall u_int mts##X##_lhu(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1068 |
{ \ |
1069 |
m_uint64_t data; \ |
1070 |
void *haddr; \ |
1071 |
u_int exc; \ |
1072 |
\ |
1073 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LHU,2,MTS_READ,&data,&exc); \ |
1074 |
if (likely(haddr != NULL)) data = vmtoh16(*(m_uint16_t *)haddr); \ |
1075 |
if (likely(!exc)) cpu->gpr[reg] = data & 0xffff; \ |
1076 |
return(exc); \ |
1077 |
} |
1078 |
|
1079 |
/* LW: Load Word */ |
1080 |
#define MTS_LW(X) \ |
1081 |
fastcall u_int mts##X##_lw(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1082 |
{ \ |
1083 |
m_uint64_t data; \ |
1084 |
void *haddr; \ |
1085 |
u_int exc; \ |
1086 |
\ |
1087 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LW,4,MTS_READ,&data,&exc); \ |
1088 |
if (likely(haddr != NULL)) data = vmtoh32(*(m_uint32_t *)haddr); \ |
1089 |
if (likely(!exc)) cpu->gpr[reg] = sign_extend(data,32); \ |
1090 |
return(exc); \ |
1091 |
} |
1092 |
|
1093 |
/* LWU: Load Word Unsigned */ |
1094 |
#define MTS_LWU(X) \ |
1095 |
fastcall u_int mts##X##_lwu(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1096 |
{ \ |
1097 |
m_uint64_t data; \ |
1098 |
void *haddr; \ |
1099 |
u_int exc; \ |
1100 |
\ |
1101 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LWU,4,MTS_READ,&data,&exc); \ |
1102 |
if (likely(haddr != NULL)) data = vmtoh32(*(m_uint32_t *)haddr); \ |
1103 |
if (likely(!exc)) cpu->gpr[reg] = data & 0xffffffff; \ |
1104 |
return(exc); \ |
1105 |
} |
1106 |
|
1107 |
/* LD: Load Double-Word */ |
1108 |
#define MTS_LD(X) \ |
1109 |
fastcall u_int mts##X##_ld(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1110 |
{ \ |
1111 |
m_uint64_t data; \ |
1112 |
void *haddr; \ |
1113 |
u_int exc; \ |
1114 |
\ |
1115 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LD,8,MTS_READ,&data,&exc); \ |
1116 |
if (likely(haddr != NULL)) data = vmtoh64(*(m_uint64_t *)haddr); \ |
1117 |
if (likely(!exc)) cpu->gpr[reg] = data; \ |
1118 |
return(exc); \ |
1119 |
} |
1120 |
|
1121 |
/* SB: Store Byte */ |
1122 |
#define MTS_SB(X) \ |
1123 |
fastcall u_int mts##X##_sb(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1124 |
{ \ |
1125 |
m_uint64_t data; \ |
1126 |
void *haddr; \ |
1127 |
u_int exc; \ |
1128 |
\ |
1129 |
data = cpu->gpr[reg] & 0xff; \ |
1130 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_SB,1,MTS_WRITE,&data,&exc); \ |
1131 |
if (likely(haddr != NULL)) *(m_uint8_t *)haddr = data; \ |
1132 |
return(exc); \ |
1133 |
} |
1134 |
|
1135 |
/* SH: Store Half-Word */ |
1136 |
#define MTS_SH(X) \ |
1137 |
fastcall u_int mts##X##_sh(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1138 |
{ \ |
1139 |
m_uint64_t data; \ |
1140 |
void *haddr; \ |
1141 |
u_int exc; \ |
1142 |
\ |
1143 |
data = cpu->gpr[reg] & 0xffff; \ |
1144 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_SH,2,MTS_WRITE,&data,&exc); \ |
1145 |
if (likely(haddr != NULL)) *(m_uint16_t *)haddr = htovm16(data); \ |
1146 |
return(exc); \ |
1147 |
} |
1148 |
|
1149 |
/* SW: Store Word */ |
1150 |
#define MTS_SW(X) \ |
1151 |
fastcall u_int mts##X##_sw(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1152 |
{ \ |
1153 |
m_uint64_t data; \ |
1154 |
void *haddr; \ |
1155 |
u_int exc; \ |
1156 |
\ |
1157 |
data = cpu->gpr[reg] & 0xffffffff; \ |
1158 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_SW,4,MTS_WRITE,&data,&exc); \ |
1159 |
if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data); \ |
1160 |
return(exc); \ |
1161 |
} |
1162 |
|
1163 |
/* SD: Store Double-Word */ |
1164 |
#define MTS_SD(X) \ |
1165 |
fastcall u_int mts##X##_sd(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1166 |
{ \ |
1167 |
m_uint64_t data; \ |
1168 |
void *haddr; \ |
1169 |
u_int exc; \ |
1170 |
\ |
1171 |
data = cpu->gpr[reg]; \ |
1172 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_SD,8,MTS_WRITE,&data,&exc); \ |
1173 |
if (likely(haddr != NULL)) *(m_uint64_t *)haddr = htovm64(data); \ |
1174 |
return(exc); \ |
1175 |
} |
1176 |
|
1177 |
/* LDC1: Load Double-Word To Coprocessor 1 */ |
1178 |
#define MTS_LDC1(X) \ |
1179 |
fastcall u_int mts##X##_ldc1(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1180 |
{ \ |
1181 |
m_uint64_t data; \ |
1182 |
void *haddr; \ |
1183 |
u_int exc; \ |
1184 |
\ |
1185 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LDC1,8,MTS_READ,&data,&exc); \ |
1186 |
if (likely(haddr != NULL)) data = vmtoh64(*(m_uint64_t *)haddr); \ |
1187 |
if (likely(!exc)) cpu->fpu.reg[reg] = data; \ |
1188 |
return(exc); \ |
1189 |
} |
1190 |
|
1191 |
/* LWL: Load Word Left */ |
1192 |
#define MTS_LWL(X) \ |
1193 |
fastcall u_int mts##X##_lwl(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1194 |
{ \ |
1195 |
m_uint64_t r_mask,naddr; \ |
1196 |
m_uint64_t data; \ |
1197 |
u_int m_shift; \ |
1198 |
void *haddr; \ |
1199 |
u_int exc; \ |
1200 |
\ |
1201 |
naddr = vaddr & ~(0x03); \ |
1202 |
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_LWL,4,MTS_READ,&data,&exc); \ |
1203 |
\ |
1204 |
if (likely(haddr != NULL)) \ |
1205 |
data = vmtoh32(*(m_uint32_t *)haddr); \ |
1206 |
\ |
1207 |
if (likely(!exc)) { \ |
1208 |
m_shift = (vaddr & 0x03) << 3; \ |
1209 |
r_mask = (1ULL << m_shift) - 1; \ |
1210 |
data <<= m_shift; \ |
1211 |
\ |
1212 |
cpu->gpr[reg] &= r_mask; \ |
1213 |
cpu->gpr[reg] |= data; \ |
1214 |
cpu->gpr[reg] = sign_extend(cpu->gpr[reg],32); \ |
1215 |
} \ |
1216 |
return(exc); \ |
1217 |
} |
1218 |
|
1219 |
/* LWR: Load Word Right */ |
1220 |
#define MTS_LWR(X) \ |
1221 |
fastcall u_int mts##X##_lwr(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1222 |
{ \ |
1223 |
m_uint64_t r_mask,naddr; \ |
1224 |
m_uint64_t data; \ |
1225 |
u_int m_shift; \ |
1226 |
void *haddr; \ |
1227 |
u_int exc; \ |
1228 |
\ |
1229 |
naddr = vaddr & ~(0x03); \ |
1230 |
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_LWR,4,MTS_READ,&data,&exc); \ |
1231 |
\ |
1232 |
if (likely(haddr != NULL)) \ |
1233 |
data = vmtoh32(*(m_uint32_t *)haddr); \ |
1234 |
\ |
1235 |
if (likely(!exc)) { \ |
1236 |
m_shift = ((vaddr & 0x03) + 1) << 3; \ |
1237 |
r_mask = (1ULL << m_shift) - 1; \ |
1238 |
\ |
1239 |
data = sign_extend(data >> (32 - m_shift),32); \ |
1240 |
r_mask = sign_extend(r_mask,32); \ |
1241 |
\ |
1242 |
cpu->gpr[reg] &= ~r_mask; \ |
1243 |
cpu->gpr[reg] |= data; \ |
1244 |
} \ |
1245 |
return(exc); \ |
1246 |
} |
1247 |
|
1248 |
/* LDL: Load Double-Word Left */ |
1249 |
#define MTS_LDL(X) \ |
1250 |
fastcall u_int mts##X##_ldl(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1251 |
{ \ |
1252 |
m_uint64_t r_mask,naddr; \ |
1253 |
m_uint64_t data; \ |
1254 |
u_int m_shift; \ |
1255 |
void *haddr; \ |
1256 |
u_int exc; \ |
1257 |
\ |
1258 |
naddr = vaddr & ~(0x07); \ |
1259 |
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_LDL,8,MTS_READ,&data,&exc); \ |
1260 |
\ |
1261 |
if (likely(haddr != NULL)) \ |
1262 |
data = vmtoh64(*(m_uint64_t *)haddr); \ |
1263 |
\ |
1264 |
if (likely(!exc)) { \ |
1265 |
m_shift = (vaddr & 0x07) << 3; \ |
1266 |
r_mask = (1ULL << m_shift) - 1; \ |
1267 |
data <<= m_shift; \ |
1268 |
\ |
1269 |
cpu->gpr[reg] &= r_mask; \ |
1270 |
cpu->gpr[reg] |= data; \ |
1271 |
} \ |
1272 |
return(exc); \ |
1273 |
} |
1274 |
|
1275 |
/* LDR: Load Double-Word Right */ |
1276 |
#define MTS_LDR(X) \ |
1277 |
fastcall u_int mts##X##_ldr(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1278 |
{ \ |
1279 |
m_uint64_t r_mask,naddr; \ |
1280 |
m_uint64_t data; \ |
1281 |
u_int m_shift; \ |
1282 |
void *haddr; \ |
1283 |
u_int exc; \ |
1284 |
\ |
1285 |
naddr = vaddr & ~(0x07); \ |
1286 |
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_LDR,8,MTS_READ,&data,&exc); \ |
1287 |
\ |
1288 |
if (likely(haddr != NULL)) \ |
1289 |
data = vmtoh64(*(m_uint64_t *)haddr); \ |
1290 |
\ |
1291 |
if (likely(!exc)) { \ |
1292 |
m_shift = ((vaddr & 0x07) + 1) << 3; \ |
1293 |
r_mask = (1ULL << m_shift) - 1; \ |
1294 |
data >>= (64 - m_shift); \ |
1295 |
\ |
1296 |
cpu->gpr[reg] &= ~r_mask; \ |
1297 |
cpu->gpr[reg] |= data; \ |
1298 |
} \ |
1299 |
return(exc); \ |
1300 |
} |
1301 |
|
1302 |
/* SWL: Store Word Left */ |
1303 |
#define MTS_SWL(X) \ |
1304 |
fastcall u_int mts##X##_swl(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1305 |
{ \ |
1306 |
m_uint64_t d_mask,naddr; \ |
1307 |
m_uint64_t data; \ |
1308 |
u_int r_shift; \ |
1309 |
void *haddr; \ |
1310 |
u_int exc; \ |
1311 |
\ |
1312 |
naddr = vaddr & ~(0x03ULL); \ |
1313 |
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SWL,4,MTS_READ,&data,&exc); \ |
1314 |
if (unlikely(exc)) return(exc); \ |
1315 |
\ |
1316 |
if (likely(haddr != NULL)) \ |
1317 |
data = vmtoh32(*(m_uint32_t *)haddr); \ |
1318 |
\ |
1319 |
r_shift = (vaddr & 0x03) << 3; \ |
1320 |
d_mask = 0xffffffff >> r_shift; \ |
1321 |
\ |
1322 |
data &= ~d_mask; \ |
1323 |
data |= (cpu->gpr[reg] & 0xffffffff) >> r_shift; \ |
1324 |
\ |
1325 |
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SWL,4,MTS_WRITE,&data,&exc); \ |
1326 |
if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data); \ |
1327 |
return(exc); \ |
1328 |
} |
1329 |
|
1330 |
/* SWR: Store Word Right */ |
1331 |
#define MTS_SWR(X) \ |
1332 |
fastcall u_int mts##X##_swr(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1333 |
{ \ |
1334 |
m_uint64_t d_mask,naddr; \ |
1335 |
m_uint64_t data; \ |
1336 |
u_int r_shift; \ |
1337 |
void *haddr; \ |
1338 |
u_int exc; \ |
1339 |
\ |
1340 |
naddr = vaddr & ~(0x03); \ |
1341 |
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SWR,4,MTS_READ,&data,&exc); \ |
1342 |
if (unlikely(exc)) return(exc); \ |
1343 |
\ |
1344 |
if (likely(haddr != NULL)) \ |
1345 |
data = vmtoh32(*(m_uint32_t *)haddr); \ |
1346 |
\ |
1347 |
r_shift = ((vaddr & 0x03) + 1) << 3; \ |
1348 |
d_mask = 0xffffffff >> r_shift; \ |
1349 |
\ |
1350 |
data &= d_mask; \ |
1351 |
data |= (cpu->gpr[reg] << (32 - r_shift)) & 0xffffffff; \ |
1352 |
\ |
1353 |
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SWR,4,MTS_WRITE,&data,&exc); \ |
1354 |
if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data); \ |
1355 |
return(exc); \ |
1356 |
} |
1357 |
|
1358 |
/* SDL: Store Double-Word Left */ |
1359 |
#define MTS_SDL(X) \ |
1360 |
fastcall u_int mts##X##_sdl(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1361 |
{ \ |
1362 |
m_uint64_t d_mask,naddr; \ |
1363 |
m_uint64_t data; \ |
1364 |
u_int r_shift; \ |
1365 |
void *haddr; \ |
1366 |
u_int exc; \ |
1367 |
\ |
1368 |
naddr = vaddr & ~(0x07); \ |
1369 |
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SDL,8,MTS_READ,&data,&exc); \ |
1370 |
if (unlikely(exc)) return(exc); \ |
1371 |
\ |
1372 |
if (likely(haddr != NULL)) \ |
1373 |
data = vmtoh64(*(m_uint64_t *)haddr); \ |
1374 |
\ |
1375 |
r_shift = (vaddr & 0x07) << 3; \ |
1376 |
d_mask = 0xffffffffffffffffULL >> r_shift; \ |
1377 |
\ |
1378 |
data &= ~d_mask; \ |
1379 |
data |= cpu->gpr[reg] >> r_shift; \ |
1380 |
\ |
1381 |
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SDL,8,MTS_WRITE,&data,&exc); \ |
1382 |
if (likely(haddr != NULL)) *(m_uint64_t *)haddr = htovm64(data); \ |
1383 |
return(exc); \ |
1384 |
} |
1385 |
|
1386 |
/* SDR: Store Double-Word Right */ |
1387 |
#define MTS_SDR(X) \ |
1388 |
fastcall u_int mts##X##_sdr(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1389 |
{ \ |
1390 |
m_uint64_t d_mask,naddr; \ |
1391 |
m_uint64_t data; \ |
1392 |
u_int r_shift; \ |
1393 |
void *haddr; \ |
1394 |
u_int exc; \ |
1395 |
\ |
1396 |
naddr = vaddr & ~(0x07); \ |
1397 |
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SDR,8,MTS_READ,&data,&exc); \ |
1398 |
if (unlikely(exc)) return(exc); \ |
1399 |
\ |
1400 |
if (likely(haddr != NULL)) \ |
1401 |
data = vmtoh64(*(m_uint64_t *)haddr); \ |
1402 |
\ |
1403 |
r_shift = ((vaddr & 0x07) + 1) << 3; \ |
1404 |
d_mask = 0xffffffffffffffffULL >> r_shift; \ |
1405 |
\ |
1406 |
data &= d_mask; \ |
1407 |
data |= cpu->gpr[reg] << (64 - r_shift); \ |
1408 |
\ |
1409 |
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SDR,8,MTS_WRITE,&data,&exc); \ |
1410 |
if (likely(haddr != NULL)) *(m_uint64_t *)haddr = htovm64(data); \ |
1411 |
return(exc); \ |
1412 |
} |
1413 |
|
1414 |
/* LL: Load Linked */ |
1415 |
#define MTS_LL(X) \ |
1416 |
fastcall u_int mts##X##_ll(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1417 |
{ \ |
1418 |
m_uint64_t data; \ |
1419 |
void *haddr; \ |
1420 |
u_int exc; \ |
1421 |
\ |
1422 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LL,4,MTS_READ,&data,&exc); \ |
1423 |
if (likely(haddr != NULL)) data = vmtoh32(*(m_uint32_t *)haddr); \ |
1424 |
\ |
1425 |
if (likely(!exc)) { \ |
1426 |
cpu->gpr[reg] = sign_extend(data,32); \ |
1427 |
cpu->ll_bit = 1; \ |
1428 |
} \ |
1429 |
\ |
1430 |
return(exc); \ |
1431 |
} |
1432 |
|
1433 |
/* SC: Store Conditional */ |
1434 |
#define MTS_SC(X) \ |
1435 |
fastcall u_int mts##X##_sc(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1436 |
{ \ |
1437 |
m_uint64_t data; \ |
1438 |
void *haddr; \ |
1439 |
u_int exc = 0; \ |
1440 |
\ |
1441 |
if (cpu->ll_bit) { \ |
1442 |
data = cpu->gpr[reg] & 0xffffffff; \ |
1443 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_SC,4,MTS_WRITE,&data,&exc); \ |
1444 |
if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data); \ |
1445 |
} \ |
1446 |
\ |
1447 |
if (likely(!exc)) \ |
1448 |
cpu->gpr[reg] = cpu->ll_bit; \ |
1449 |
return(exc); \ |
1450 |
} |
1451 |
|
1452 |
/* SDC1: Store Double-Word from Coprocessor 1 */ |
1453 |
#define MTS_SDC1(X) \ |
1454 |
fastcall u_int mts##X##_sdc1(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
1455 |
{ \ |
1456 |
m_uint64_t data; \ |
1457 |
void *haddr; \ |
1458 |
u_int exc; \ |
1459 |
\ |
1460 |
data = cpu->fpu.reg[reg]; \ |
1461 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_SDC1,8,MTS_WRITE,&data,&exc); \ |
1462 |
if (likely(haddr != NULL)) *(m_uint64_t *)haddr = htovm64(data); \ |
1463 |
return(exc); \ |
1464 |
} |
1465 |
|
1466 |
/* CACHE: Cache operation */ |
1467 |
fastcall u_int mts_cache(cpu_mips_t *cpu,m_uint64_t vaddr,u_int op) |
1468 |
{ |
1469 |
struct insn_block *block; |
1470 |
m_uint32_t phys_page; |
1471 |
|
1472 |
#if DEBUG_CACHE |
1473 |
cpu_log(cpu,"MTS","CACHE: PC=0x%llx, vaddr=0x%llx, cache=%u, code=%u\n", |
1474 |
cpu->pc, vaddr, op & 0x3, op >> 2); |
1475 |
#endif |
1476 |
|
1477 |
if (!cpu->translate(cpu,vaddr,&phys_page)) { |
1478 |
if ((phys_page < 1048576) && cpu->exec_phys_map) { |
1479 |
block = cpu->exec_phys_map[phys_page]; |
1480 |
|
1481 |
if (block) { |
1482 |
if ((cpu->pc < block->start_pc) || |
1483 |
((cpu->pc - block->start_pc) >= MIPS_MIN_PAGE_SIZE)) |
1484 |
{ |
1485 |
#if DEBUG_CACHE |
1486 |
cpu_log(cpu,"MTS", |
1487 |
"CACHE: removing compiled page at 0x%llx, pc=0x%llx\n", |
1488 |
block->start_pc,cpu->pc); |
1489 |
#endif |
1490 |
cpu->exec_phys_map[phys_page] = NULL; |
1491 |
insn_block_free(cpu,block,TRUE); |
1492 |
} |
1493 |
else |
1494 |
{ |
1495 |
#if DEBUG_CACHE |
1496 |
cpu_log(cpu,"MTS", |
1497 |
"CACHE: trying to remove page 0x%llx with pc=0x%llx\n", |
1498 |
block->start_pc,cpu->pc); |
1499 |
#endif |
1500 |
} |
1501 |
} |
1502 |
} |
1503 |
} |
1504 |
|
1505 |
return(0); |
1506 |
} |
1507 |
|
1508 |
/* |
1509 |
* "Instanciation" of MIPS Memory Operations. |
1510 |
*/ |
1511 |
MTS_MEMOP(LB) |
1512 |
MTS_MEMOP(LBU) |
1513 |
MTS_MEMOP(LH) |
1514 |
MTS_MEMOP(LHU) |
1515 |
MTS_MEMOP(LW) |
1516 |
MTS_MEMOP(LWU) |
1517 |
MTS_MEMOP(LD) |
1518 |
MTS_MEMOP(SB) |
1519 |
MTS_MEMOP(SH) |
1520 |
MTS_MEMOP(SW) |
1521 |
MTS_MEMOP(SD) |
1522 |
MTS_MEMOP(LDC1) |
1523 |
MTS_MEMOP(LWL) |
1524 |
MTS_MEMOP(LWR) |
1525 |
MTS_MEMOP(LDL) |
1526 |
MTS_MEMOP(LDR) |
1527 |
MTS_MEMOP(SWL) |
1528 |
MTS_MEMOP(SWR) |
1529 |
MTS_MEMOP(SDL) |
1530 |
MTS_MEMOP(SDR) |
1531 |
MTS_MEMOP(LL) |
1532 |
MTS_MEMOP(SC) |
1533 |
MTS_MEMOP(SDC1) |
1534 |
|
1535 |
/* Fast assembly routines */ |
1536 |
#ifdef FAST_ASM |
1537 |
extern fastcall u_int mts32_lw_asm(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg); |
1538 |
#endif |
1539 |
|
1540 |
/* === MTS32 Cache Management ============================================= */ |
1541 |
|
1542 |
/* MTS32 map/unmap/rebuild "API" functions */ |
1543 |
void mts32_api_map(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint64_t paddr, |
1544 |
m_uint32_t len,int cache_access,int tlb_index) |
1545 |
{ |
1546 |
mts32_map(cpu,vaddr,paddr,len,cache_access); |
1547 |
} |
1548 |
|
1549 |
void mts32_api_unmap(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint32_t len, |
1550 |
m_uint32_t val,int tlb_index) |
1551 |
{ |
1552 |
mts32_unmap(cpu,vaddr,len,val); |
1553 |
} |
1554 |
|
1555 |
void mts32_api_rebuild(cpu_mips_t *cpu) |
1556 |
{ |
1557 |
u_int cpu_mode; |
1558 |
|
1559 |
cpu_mode = cp0_get_mode(cpu); |
1560 |
|
1561 |
/* The complete address space gives AE (address error) */ |
1562 |
if (mts32_init_empty(cpu) == -1) |
1563 |
return; |
1564 |
|
1565 |
/* USEG: 0x00000000 -> 0x80000000: 2 GB mapped (TLB) */ |
1566 |
mts32_unmap(cpu,MIPS_KUSEG_BASE,MIPS_KUSEG_SIZE,MTS_ACC_T); |
1567 |
|
1568 |
/* If the CPU is in Kernel Mode, activate KSEG segments */ |
1569 |
if (cpu_mode == MIPS_CP0_STATUS_KM) { |
1570 |
/* KSEG0 / KSEG1 : physical memory */ |
1571 |
mts32_km_map_all_dev(cpu); |
1572 |
|
1573 |
/* KSSEG: 0xc0000000 -> 0xe0000000: 0.5GB mapped (TLB) */ |
1574 |
mts32_unmap(cpu,MIPS_KSSEG_BASE,MIPS_KSSEG_SIZE,MTS_ACC_T); |
1575 |
|
1576 |
/* KSEG3: 0xe0000000 -> 0xffffffff: 0.5GB mapped (TLB) */ |
1577 |
mts32_unmap(cpu,MIPS_KSEG3_BASE,MIPS_KSEG3_SIZE,MTS_ACC_T); |
1578 |
} else { |
1579 |
if (cpu_mode == MIPS_CP0_STATUS_SM) { |
1580 |
/* SSEG: 0xc0000000 -> 0xe0000000: 0.5GB mapped (TLB) */ |
1581 |
mts32_unmap(cpu,MIPS_KSSEG_BASE,MIPS_KSSEG_SIZE,MTS_ACC_T); |
1582 |
} |
1583 |
} |
1584 |
|
1585 |
/* Map all TLB entries */ |
1586 |
cp0_map_all_tlb_to_mts(cpu); |
1587 |
} |
1588 |
|
1589 |
/* === MTS64 Cache Management ============================================= */ |
1590 |
|
1591 |
/* MTS64 map/unmap/rebuild "API" functions */ |
1592 |
void mts64_api_map(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint64_t paddr, |
1593 |
m_uint32_t len,int cache_access,int tlb_index) |
1594 |
{ |
1595 |
/* nothing to do, the cache will be filled on-the-fly */ |
1596 |
} |
1597 |
|
1598 |
void mts64_api_unmap(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint32_t len, |
1599 |
m_uint32_t val,int tlb_index) |
1600 |
{ |
1601 |
/* Invalidate the TLB entry or the full cache if no index is specified */ |
1602 |
if (tlb_index != -1) |
1603 |
mts64_invalidate_tlb_entry(cpu,tlb_index); |
1604 |
else |
1605 |
mts64_invalidate_cache(cpu); |
1606 |
} |
1607 |
|
1608 |
void mts64_api_rebuild(cpu_mips_t *cpu) |
1609 |
{ |
1610 |
mts64_invalidate_cache(cpu); |
1611 |
} |
1612 |
|
1613 |
/* ======================================================================== */ |
1614 |
|
1615 |
/* Initialize memory 32-bit access vectors */ |
1616 |
void mts32_init_memop_vectors(cpu_mips_t *cpu) |
1617 |
{ |
1618 |
/* XXX TODO: |
1619 |
* - LD/SD forbidden in Supervisor/User modes with 32-bit addresses. |
1620 |
*/ |
1621 |
|
1622 |
/* API vectors */ |
1623 |
cpu->mts_map = mts32_api_map; |
1624 |
cpu->mts_unmap = mts32_api_unmap; |
1625 |
cpu->mts_rebuild = mts32_api_rebuild; |
1626 |
|
1627 |
/* memory lookup operation */ |
1628 |
cpu->mem_op_lookup = mts32_lookup; |
1629 |
|
1630 |
/* Load Operations */ |
1631 |
cpu->mem_op_fn[MIPS_MEMOP_LB] = mts32_lb; |
1632 |
cpu->mem_op_fn[MIPS_MEMOP_LBU] = mts32_lbu; |
1633 |
cpu->mem_op_fn[MIPS_MEMOP_LH] = mts32_lh; |
1634 |
cpu->mem_op_fn[MIPS_MEMOP_LHU] = mts32_lhu; |
1635 |
cpu->mem_op_fn[MIPS_MEMOP_LW] = mts32_lw; |
1636 |
cpu->mem_op_fn[MIPS_MEMOP_LWU] = mts32_lwu; |
1637 |
cpu->mem_op_fn[MIPS_MEMOP_LD] = mts32_ld; |
1638 |
cpu->mem_op_fn[MIPS_MEMOP_LDL] = mts32_ldl; |
1639 |
cpu->mem_op_fn[MIPS_MEMOP_LDR] = mts32_ldr; |
1640 |
|
1641 |
/* Store Operations */ |
1642 |
cpu->mem_op_fn[MIPS_MEMOP_SB] = mts32_sb; |
1643 |
cpu->mem_op_fn[MIPS_MEMOP_SH] = mts32_sh; |
1644 |
cpu->mem_op_fn[MIPS_MEMOP_SW] = mts32_sw; |
1645 |
cpu->mem_op_fn[MIPS_MEMOP_SD] = mts32_sd; |
1646 |
|
1647 |
/* Load Left/Right operations */ |
1648 |
cpu->mem_op_fn[MIPS_MEMOP_LWL] = mts32_lwl; |
1649 |
cpu->mem_op_fn[MIPS_MEMOP_LWR] = mts32_lwr; |
1650 |
cpu->mem_op_fn[MIPS_MEMOP_LDL] = mts32_ldl; |
1651 |
cpu->mem_op_fn[MIPS_MEMOP_LDR] = mts32_ldr; |
1652 |
|
1653 |
/* Store Left/Right operations */ |
1654 |
cpu->mem_op_fn[MIPS_MEMOP_SWL] = mts32_swl; |
1655 |
cpu->mem_op_fn[MIPS_MEMOP_SWR] = mts32_swr; |
1656 |
cpu->mem_op_fn[MIPS_MEMOP_SDL] = mts32_sdl; |
1657 |
cpu->mem_op_fn[MIPS_MEMOP_SDR] = mts32_sdr; |
1658 |
|
1659 |
/* LL/SC - Load Linked / Store Conditional */ |
1660 |
cpu->mem_op_fn[MIPS_MEMOP_LL] = mts32_ll; |
1661 |
cpu->mem_op_fn[MIPS_MEMOP_SC] = mts32_sc; |
1662 |
|
1663 |
/* Coprocessor 1 memory access functions */ |
1664 |
cpu->mem_op_fn[MIPS_MEMOP_LDC1] = mts32_ldc1; |
1665 |
cpu->mem_op_fn[MIPS_MEMOP_SDC1] = mts32_sdc1; |
1666 |
|
1667 |
/* Cache Operation */ |
1668 |
cpu->mem_op_fn[MIPS_MEMOP_CACHE] = mts_cache; |
1669 |
|
1670 |
#if 0 |
1671 |
#if defined(FAST_ASM) && MTSASM_ENABLE |
1672 |
if (cpu->vm->jit_use) |
1673 |
cpu->mem_op_fn[MIPS_MEMOP_LW] = mts32_lw_asm; |
1674 |
#endif |
1675 |
#endif |
1676 |
} |
1677 |
|
1678 |
/* Initialize memory 64-bit access vectors */ |
1679 |
void mts64_init_memop_vectors(cpu_mips_t *cpu) |
1680 |
{ |
1681 |
/* API vectors */ |
1682 |
cpu->mts_map = mts64_api_map; |
1683 |
cpu->mts_unmap = mts64_api_unmap; |
1684 |
cpu->mts_rebuild = mts64_api_rebuild; |
1685 |
|
1686 |
/* memory lookup operation */ |
1687 |
cpu->mem_op_lookup = mts64_lookup; |
1688 |
|
1689 |
cpu->translate = mts64_translate; |
1690 |
|
1691 |
/* Load Operations */ |
1692 |
cpu->mem_op_fn[MIPS_MEMOP_LB] = mts64_lb; |
1693 |
cpu->mem_op_fn[MIPS_MEMOP_LBU] = mts64_lbu; |
1694 |
cpu->mem_op_fn[MIPS_MEMOP_LH] = mts64_lh; |
1695 |
cpu->mem_op_fn[MIPS_MEMOP_LHU] = mts64_lhu; |
1696 |
cpu->mem_op_fn[MIPS_MEMOP_LW] = mts64_lw; |
1697 |
cpu->mem_op_fn[MIPS_MEMOP_LWU] = mts64_lwu; |
1698 |
cpu->mem_op_fn[MIPS_MEMOP_LD] = mts64_ld; |
1699 |
cpu->mem_op_fn[MIPS_MEMOP_LDL] = mts64_ldl; |
1700 |
cpu->mem_op_fn[MIPS_MEMOP_LDR] = mts64_ldr; |
1701 |
|
1702 |
/* Store Operations */ |
1703 |
cpu->mem_op_fn[MIPS_MEMOP_SB] = mts64_sb; |
1704 |
cpu->mem_op_fn[MIPS_MEMOP_SH] = mts64_sh; |
1705 |
cpu->mem_op_fn[MIPS_MEMOP_SW] = mts64_sw; |
1706 |
cpu->mem_op_fn[MIPS_MEMOP_SD] = mts64_sd; |
1707 |
|
1708 |
/* Load Left/Right operations */ |
1709 |
cpu->mem_op_fn[MIPS_MEMOP_LWL] = mts64_lwl; |
1710 |
cpu->mem_op_fn[MIPS_MEMOP_LWR] = mts64_lwr; |
1711 |
cpu->mem_op_fn[MIPS_MEMOP_LDL] = mts64_ldl; |
1712 |
cpu->mem_op_fn[MIPS_MEMOP_LDR] = mts64_ldr; |
1713 |
|
1714 |
/* Store Left/Right operations */ |
1715 |
cpu->mem_op_fn[MIPS_MEMOP_SWL] = mts64_swl; |
1716 |
cpu->mem_op_fn[MIPS_MEMOP_SWR] = mts64_swr; |
1717 |
cpu->mem_op_fn[MIPS_MEMOP_SDL] = mts64_sdl; |
1718 |
cpu->mem_op_fn[MIPS_MEMOP_SDR] = mts64_sdr; |
1719 |
|
1720 |
/* LL/SC - Load Linked / Store Conditional */ |
1721 |
cpu->mem_op_fn[MIPS_MEMOP_LL] = mts64_ll; |
1722 |
cpu->mem_op_fn[MIPS_MEMOP_SC] = mts64_sc; |
1723 |
|
1724 |
/* Coprocessor 1 memory access functions */ |
1725 |
cpu->mem_op_fn[MIPS_MEMOP_LDC1] = mts64_ldc1; |
1726 |
cpu->mem_op_fn[MIPS_MEMOP_SDC1] = mts64_sdc1; |
1727 |
|
1728 |
/* Cache Operation */ |
1729 |
cpu->mem_op_fn[MIPS_MEMOP_CACHE] = mts_cache; |
1730 |
} |
1731 |
|
1732 |
/* Initialize memory access vectors */ |
1733 |
void mts_init_memop_vectors(cpu_mips_t *cpu) |
1734 |
{ |
1735 |
/* TEST */ |
1736 |
mts64_init_memop_vectors(cpu); |
1737 |
} |
1738 |
|
1739 |
/* Shutdown MTS subsystem */ |
1740 |
void mts_shutdown(cpu_mips_t *cpu) |
1741 |
{ |
1742 |
mts32_shutdown(cpu); |
1743 |
mts64_shutdown(cpu); |
1744 |
} |
1745 |
|
1746 |
/* Copy a memory block from VM physical RAM to real host */ |
1747 |
void physmem_copy_from_vm(vm_instance_t *vm,void *real_buffer, |
1748 |
m_uint64_t paddr,size_t len) |
1749 |
{ |
1750 |
struct vdevice *vm_ram; |
1751 |
u_char *ptr; |
1752 |
|
1753 |
if ((vm_ram = dev_lookup(vm,paddr,FALSE)) != NULL) { |
1754 |
assert(vm_ram->host_addr != 0); |
1755 |
ptr = (u_char *)vm_ram->host_addr + (paddr - vm_ram->phys_addr); |
1756 |
memcpy(real_buffer,ptr,len); |
1757 |
} |
1758 |
} |
1759 |
|
1760 |
/* Copy a memory block to VM physical RAM from real host */ |
1761 |
void physmem_copy_to_vm(vm_instance_t *vm,void *real_buffer, |
1762 |
m_uint64_t paddr,size_t len) |
1763 |
{ |
1764 |
struct vdevice *vm_ram; |
1765 |
u_char *ptr; |
1766 |
|
1767 |
if ((vm_ram = dev_lookup(vm,paddr,FALSE)) != NULL) { |
1768 |
assert(vm_ram->host_addr != 0); |
1769 |
ptr = (u_char *)vm_ram->host_addr + (paddr - vm_ram->phys_addr); |
1770 |
memcpy(ptr,real_buffer,len); |
1771 |
} |
1772 |
} |
1773 |
|
1774 |
/* Copy a 32-bit word from the VM physical RAM to real host */ |
1775 |
m_uint32_t physmem_copy_u32_from_vm(vm_instance_t *vm,m_uint64_t paddr) |
1776 |
{ |
1777 |
struct vdevice *dev; |
1778 |
m_uint32_t offset; |
1779 |
m_uint64_t tmp; |
1780 |
void *ptr; |
1781 |
|
1782 |
if (unlikely((dev = dev_lookup(vm,paddr,FALSE)) == NULL)) |
1783 |
return(0); |
1784 |
|
1785 |
offset = paddr - dev->phys_addr; |
1786 |
|
1787 |
if ((dev->host_addr != 0) && !(dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) |
1788 |
ptr = (u_char *)dev->host_addr + offset; |
1789 |
else { |
1790 |
ptr = dev->handler(vm->boot_cpu,dev,offset,4,MTS_READ,&tmp); |
1791 |
if (!ptr) return(tmp); |
1792 |
} |
1793 |
|
1794 |
return(vmtoh32(*(m_uint32_t *)ptr)); |
1795 |
} |
1796 |
|
1797 |
/* Copy a 32-bit word to the VM physical RAM from real host */ |
1798 |
void physmem_copy_u32_to_vm(vm_instance_t *vm,m_uint64_t paddr,m_uint32_t val) |
1799 |
{ |
1800 |
struct vdevice *dev; |
1801 |
m_uint32_t offset; |
1802 |
m_uint64_t tmp; |
1803 |
void *ptr; |
1804 |
|
1805 |
if (unlikely((dev = dev_lookup(vm,paddr,FALSE)) == NULL)) |
1806 |
return; |
1807 |
|
1808 |
offset = paddr - dev->phys_addr; |
1809 |
|
1810 |
if ((dev->host_addr != 0) && !(dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) |
1811 |
ptr = (u_char *)dev->host_addr + offset; |
1812 |
else { |
1813 |
tmp = val; |
1814 |
ptr = dev->handler(vm->boot_cpu,dev,offset,4,MTS_WRITE,&tmp); |
1815 |
if (!ptr) return; |
1816 |
} |
1817 |
|
1818 |
*(m_uint32_t *)ptr = htovm32(val); |
1819 |
} |
1820 |
|
1821 |
/* Copy a 16-bit word from the VM physical RAM to real host */ |
1822 |
m_uint16_t physmem_copy_u16_from_vm(vm_instance_t *vm,m_uint64_t paddr) |
1823 |
{ |
1824 |
struct vdevice *dev; |
1825 |
m_uint32_t offset; |
1826 |
m_uint64_t tmp; |
1827 |
void *ptr; |
1828 |
|
1829 |
if (unlikely((dev = dev_lookup(vm,paddr,FALSE)) == NULL)) |
1830 |
return(0); |
1831 |
|
1832 |
offset = paddr - dev->phys_addr; |
1833 |
|
1834 |
if ((dev->host_addr != 0) && !(dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) |
1835 |
ptr = (u_char *)dev->host_addr + offset; |
1836 |
else { |
1837 |
ptr = dev->handler(vm->boot_cpu,dev,offset,2,MTS_READ,&tmp); |
1838 |
if (!ptr) return(tmp); |
1839 |
} |
1840 |
|
1841 |
return(vmtoh16(*(m_uint16_t *)ptr)); |
1842 |
} |
1843 |
|
1844 |
/* Copy a 16-bit word to the VM physical RAM from real host */ |
1845 |
void physmem_copy_u16_to_vm(vm_instance_t *vm,m_uint64_t paddr,m_uint16_t val) |
1846 |
{ |
1847 |
struct vdevice *dev; |
1848 |
m_uint32_t offset; |
1849 |
m_uint64_t tmp; |
1850 |
void *ptr; |
1851 |
|
1852 |
if (unlikely((dev = dev_lookup(vm,paddr,FALSE)) == NULL)) |
1853 |
return; |
1854 |
|
1855 |
offset = paddr - dev->phys_addr; |
1856 |
|
1857 |
if ((dev->host_addr != 0) && !(dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) |
1858 |
ptr = (u_char *)dev->host_addr + offset; |
1859 |
else { |
1860 |
tmp = val; |
1861 |
ptr = dev->handler(vm->boot_cpu,dev,offset,2,MTS_WRITE,&tmp); |
1862 |
if (!ptr) return; |
1863 |
} |
1864 |
|
1865 |
*(m_uint16_t *)ptr = htovm16(val); |
1866 |
} |
1867 |
|
1868 |
/* DMA transfer operation */ |
1869 |
void physmem_dma_transfer(vm_instance_t *vm,m_uint64_t src,m_uint64_t dst, |
1870 |
size_t len) |
1871 |
{ |
1872 |
struct vdevice *src_dev,*dst_dev; |
1873 |
u_char *sptr,*dptr; |
1874 |
|
1875 |
src_dev = dev_lookup(vm,src,FALSE); |
1876 |
dst_dev = dev_lookup(vm,dst,FALSE); |
1877 |
|
1878 |
if ((src_dev != NULL) && (dst_dev != NULL)) { |
1879 |
assert(src_dev->host_addr != 0); |
1880 |
assert(dst_dev->host_addr != 0); |
1881 |
|
1882 |
sptr = (u_char *)src_dev->host_addr + (src - src_dev->phys_addr); |
1883 |
dptr = (u_char *)dst_dev->host_addr + (dst - dst_dev->phys_addr); |
1884 |
memcpy(dptr,sptr,len); |
1885 |
} else { |
1886 |
vm_log(vm,"DMA","unable to transfer from 0x%llx to 0x%llx (len=%lu)\n", |
1887 |
src,dst,(u_long)len); |
1888 |
} |
1889 |
} |
1890 |
|
1891 |
/* strlen in VM physical memory */ |
1892 |
size_t physmem_strlen(vm_instance_t *vm,m_uint64_t paddr) |
1893 |
{ |
1894 |
struct vdevice *vm_ram; |
1895 |
size_t len = 0; |
1896 |
char *ptr; |
1897 |
|
1898 |
if ((vm_ram = dev_lookup(vm,paddr,TRUE)) != NULL) { |
1899 |
ptr = (char *)vm_ram->host_addr + (paddr - vm_ram->phys_addr); |
1900 |
len = strlen(ptr); |
1901 |
} |
1902 |
|
1903 |
return(len); |
1904 |
} |
1905 |
|
1906 |
/* Physical memory dump (32-bit words) */ |
1907 |
void physmem_dump_vm(vm_instance_t *vm,m_uint64_t paddr,m_uint32_t u32_count) |
1908 |
{ |
1909 |
m_uint32_t i; |
1910 |
|
1911 |
for(i=0;i<u32_count;i++) { |
1912 |
vm_log(vm,"physmem_dump","0x%8.8llx: 0x%8.8x\n", |
1913 |
paddr+(i<<2),physmem_copy_u32_from_vm(vm,paddr+(i<<2))); |
1914 |
} |
1915 |
} |