/[dynamips]/upstream/dynamips-0.2.7-RC1/ppc32_mem.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/dynamips-0.2.7-RC1/ppc32_mem.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 7 - (show annotations)
Sat Oct 6 16:23:47 2007 UTC (16 years, 5 months ago) by dpavlin
File MIME type: text/plain
File size: 28129 byte(s)
dynamips-0.2.7-RC1

1 /*
2 * Cisco router simulation platform.
3 * Copyright (c) 2006 Christophe Fillot (cf@utc.fr)
4 *
5 * PowerPC MMU.
6 */
7
8 #define _GNU_SOURCE
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <unistd.h>
12 #include <string.h>
13 #include <sys/types.h>
14 #include <sys/stat.h>
15 #include <sys/mman.h>
16 #include <fcntl.h>
17 #include <assert.h>
18
19 #include "cpu.h"
20 #include "vm.h"
21 #include "dynamips.h"
22 #include "memory.h"
23 #include "device.h"
24 #include "ppc32_jit.h"
25
26 #define DEBUG_ICBI 0
27
28 /* Memory access with special access mask */
29 void ppc32_access_special(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int cid,
30 m_uint32_t mask,u_int op_code,u_int op_type,
31 u_int op_size,m_uint64_t *data,u_int *exc)
32 {
33 switch(mask) {
34 case MTS_ACC_T:
35 if (op_code != PPC_MEMOP_LOOKUP) {
36 #if DEBUG_MTS_ACC_T
37 cpu_log(cpu->gen,
38 "MTS","MMU exception for address 0x%8.8x at ia=0x%8.8x "
39 "(%s access, size=%u)\n",
40 vaddr,cpu->ia,(op_type == MTS_READ) ?
41 "read":"write",op_size);
42 //ppc32_dump_regs(cpu->gen);
43 #if MEMLOG_ENABLE
44 memlog_dump(cpu->gen);
45 #endif
46 #endif
47
48 if (cid == PPC32_MTS_DCACHE) {
49 cpu->dsisr = PPC32_DSISR_NOTRANS;
50
51 if (op_type == MTS_WRITE)
52 cpu->dsisr |= PPC32_DSISR_STORE;
53
54 cpu->dar = vaddr;
55 ppc32_trigger_exception(cpu,PPC32_EXC_DSI);
56 }
57 }
58
59 *exc = 1;
60 break;
61
62 case MTS_ACC_U:
63 #if DEBUG_MTS_ACC_U
64 if (op_type == MTS_READ)
65 cpu_log(cpu->gen,
66 "MTS","read access to undefined address 0x%8.8x at "
67 "ia=0x%8.8x (size=%u)\n",vaddr,cpu->ia,op_size);
68 else
69 cpu_log(cpu->gen,
70 "MTS","write access to undefined address 0x%8.8x at "
71 "ia=0x%8.8x, value=0x%8.8llx (size=%u)\n",
72 vaddr,cpu->ia,*data,op_size);
73 #endif
74 if (op_type == MTS_READ)
75 *data = 0;
76 break;
77 }
78 }
79
80 /* Initialize the MTS subsystem for the specified CPU */
81 int ppc32_mem_init(cpu_ppc_t *cpu)
82 {
83 size_t len;
84
85 /* Initialize the cache entries to 0 (empty) */
86 len = MTS32_HASH_SIZE * sizeof(mts32_entry_t);
87
88 if (!(cpu->mts_cache[PPC32_MTS_ICACHE] = malloc(len)))
89 return(-1);
90
91 if (!(cpu->mts_cache[PPC32_MTS_DCACHE] = malloc(len)))
92 return(-1);
93
94 memset(cpu->mts_cache[PPC32_MTS_ICACHE],0xFF,len);
95 memset(cpu->mts_cache[PPC32_MTS_DCACHE],0xFF,len);
96
97 cpu->mts_lookups = 0;
98 cpu->mts_misses = 0;
99 return(0);
100 }
101
102 /* Free memory used by MTS */
103 void ppc32_mem_shutdown(cpu_ppc_t *cpu)
104 {
105 if (cpu != NULL) {
106 /* Free the caches themselves */
107 free(cpu->mts_cache[PPC32_MTS_ICACHE]);
108 free(cpu->mts_cache[PPC32_MTS_DCACHE]);
109 cpu->mts_cache[PPC32_MTS_ICACHE] = NULL;
110 cpu->mts_cache[PPC32_MTS_DCACHE] = NULL;
111 }
112 }
113
114 /* Show MTS detailed information (debugging only!) */
115 void ppc32_mem_show_stats(cpu_gen_t *gen_cpu)
116 {
117 cpu_ppc_t *cpu = CPU_PPC32(gen_cpu);
118 #if DEBUG_MTS_MAP_VIRT
119 mts32_entry_t *entry;
120 u_int i,count;
121 #endif
122
123 printf("\nCPU%u: MTS statistics:\n",cpu->gen->id);
124
125 #if DEBUG_MTS_MAP_VIRT
126 printf("Instruction cache:\n");
127
128 /* Valid hash entries for Instruction Cache */
129 for(count=0,i=0;i<MTS32_HASH_SIZE;i++) {
130 entry = &cpu->mts_cache[PPC32_MTS_ICACHE][i];
131
132 if (!(entry->gvpa & MTS_INV_ENTRY_MASK)) {
133 printf(" %4u: vaddr=0x%8.8x, paddr=0x%8.8x, hpa=%p\n",
134 i,entry->gvpa,entry->gppa,(void *)entry->hpa);
135 count++;
136 }
137 }
138
139 printf(" %u/%u valid hash entries for icache.\n",count,MTS32_HASH_SIZE);
140
141
142 printf("Data cache:\n");
143
144 /* Valid hash entries for Instruction Cache */
145 for(count=0,i=0;i<MTS32_HASH_SIZE;i++) {
146 entry = &cpu->mts_cache[PPC32_MTS_DCACHE][i];
147
148 if (!(entry->gvpa & MTS_INV_ENTRY_MASK)) {
149 printf(" %4u: vaddr=0x%8.8x, paddr=0x%8.8x, hpa=%p\n",
150 i,entry->gvpa,entry->gppa,(void *)entry->hpa);
151 count++;
152 }
153 }
154
155 printf(" %u/%u valid hash entries for dcache.\n",count,MTS32_HASH_SIZE);
156 #endif
157
158 printf("\n Total lookups: %llu, misses: %llu, efficiency: %g%%\n",
159 cpu->mts_lookups, cpu->mts_misses,
160 100 - ((double)(cpu->mts_misses*100)/
161 (double)cpu->mts_lookups));
162 }
163
164 /* Invalidate the MTS caches (instruction and data) */
165 void ppc32_mem_invalidate_cache(cpu_ppc_t *cpu)
166 {
167 size_t len;
168
169 len = MTS32_HASH_SIZE * sizeof(mts32_entry_t);
170 memset(cpu->mts_cache[PPC32_MTS_ICACHE],0xFF,len);
171 memset(cpu->mts_cache[PPC32_MTS_DCACHE],0xFF,len);
172 }
173
174 /*
175 * MTS mapping.
176 *
177 * It is NOT inlined since it triggers a GCC bug on my config (x86, GCC 3.3.5)
178 */
179 static no_inline struct mts32_entry *
180 ppc32_mem_map(cpu_ppc_t *cpu,u_int op_type,mts_map_t *map,
181 mts32_entry_t *entry,mts32_entry_t *alt_entry)
182 {
183 struct vdevice *dev;
184 m_uint32_t offset;
185 m_iptr_t host_ptr;
186 int cow;
187
188 if (!(dev = dev_lookup(cpu->vm,map->paddr,map->cached)))
189 return NULL;
190
191 if (dev->flags & VDEVICE_FLAG_SPARSE) {
192 host_ptr = dev_sparse_get_host_addr(cpu->vm,dev,map->paddr,op_type,&cow);
193
194 entry->gvpa = map->vaddr;
195 entry->gppa = map->paddr;
196 entry->hpa = host_ptr;
197 entry->flags = (cow) ? MTS_FLAG_COW : 0;
198 return entry;
199 }
200
201 if (!dev->host_addr || (dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) {
202 offset = map->paddr - dev->phys_addr;
203
204 alt_entry->gvpa = map->vaddr;
205 alt_entry->gppa = map->paddr;
206 alt_entry->hpa = (dev->id << MTS_DEVID_SHIFT) + offset;
207 alt_entry->flags = MTS_FLAG_DEV;
208 return alt_entry;
209 }
210
211 entry->gvpa = map->vaddr;
212 entry->gppa = map->paddr;
213 entry->hpa = dev->host_addr + (map->paddr - dev->phys_addr);
214 entry->flags = 0;
215 return entry;
216 }
217
218 /* BAT lookup */
219 static forced_inline int ppc32_bat_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr,
220 u_int cid,mts_map_t *map)
221 {
222 m_uint32_t bepi,mask,bl,pr,ubat;
223 int i;
224
225 pr = (cpu->msr & PPC32_MSR_PR) >> PPC32_MSR_PR_SHIFT;
226 pr = ((~pr << 1) | pr) & 0x03;
227
228 for(i=0;i<PPC32_BAT_NR;i++) {
229 ubat = cpu->bat[cid][i].reg[0];
230
231 if (!(ubat & pr))
232 continue;
233
234 //bl = (ubat & PPC32_UBAT_BL_MASK) >> PPC32_UBAT_BL_SHIFT;
235 bl = (ubat & PPC32_UBAT_XBL_MASK) >> PPC32_UBAT_XBL_SHIFT;
236
237 mask = ~bl << PPC32_BAT_ADDR_SHIFT;
238 bepi = ubat & PPC32_UBAT_BEPI_MASK;
239
240 if (bepi == (vaddr & mask)) {
241 map->vaddr = vaddr & PPC32_MIN_PAGE_MASK;
242 map->paddr = cpu->bat[cid][i].reg[1] & PPC32_LBAT_BRPN_MASK;
243 map->paddr += map->vaddr - bepi;
244 map->cached = FALSE;
245 return(TRUE);
246 }
247 }
248
249 return(FALSE);
250 }
251
252 /* Memory slow lookup */
253 static mts32_entry_t *ppc32_slow_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr,
254 u_int cid,u_int op_code,u_int op_size,
255 u_int op_type,m_uint64_t *data,
256 u_int *exc,mts32_entry_t *alt_entry)
257 {
258 m_uint32_t hash_bucket,segment,vsid;
259 m_uint32_t hash,tmp,pteg_offset,pte_key,key,pte2;
260 mts32_entry_t *entry;
261 m_uint8_t *pte_haddr;
262 m_uint64_t paddr;
263 mts_map_t map;
264 int i;
265
266 #if DEBUG_MTS_STATS
267 cpu->mts_misses++;
268 #endif
269
270 hash_bucket = MTS32_HASH(vaddr);
271 entry = &cpu->mts_cache[cid][hash_bucket];
272
273 /* No translation - cover the 4GB space */
274 if (((cid == PPC32_MTS_ICACHE) && !(cpu->msr & PPC32_MSR_IR)) ||
275 ((cid == PPC32_MTS_DCACHE) && !(cpu->msr & PPC32_MSR_DR)))
276 {
277 map.vaddr = vaddr & PPC32_MIN_PAGE_MASK;
278 map.paddr = vaddr & PPC32_MIN_PAGE_MASK;
279 map.cached = FALSE;
280
281 if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
282 goto err_undef;
283
284 return entry;
285 }
286
287 /* Walk through the BAT registers */
288 if (ppc32_bat_lookup(cpu,vaddr,cid,&map)) {
289 if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
290 goto err_undef;
291
292 return entry;
293 }
294
295 if (unlikely(!cpu->sdr1))
296 goto no_pte;
297
298 /* Get the virtual segment identifier */
299 segment = vaddr >> 28;
300 vsid = cpu->sr[segment] & PPC32_SD_VSID_MASK;
301
302 /* Compute the first hash value */
303 hash = (vaddr >> PPC32_MIN_PAGE_SHIFT) & 0xFFFF;
304 hash ^= vsid;
305 hash &= 0x7FFFFF;
306
307 tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK);
308 pteg_offset = (hash & 0x3FF) << 6;
309 pteg_offset |= tmp << 16;
310 pte_haddr = cpu->sdr1_hptr + pteg_offset;
311
312 pte_key = 0x80000000 | (vsid << 7);
313 pte_key |= (vaddr >> 22) & 0x3F;
314
315 for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) {
316 key = vmtoh32(*(m_uint32_t *)pte_haddr);
317
318 if (key == pte_key)
319 goto pte_lookup_done;
320 }
321
322 /* Secondary hash value */
323 hash = (~hash) & 0x7FFFFF;
324
325 tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK);
326 pteg_offset = (hash & 0x3FF) << 6;
327 pteg_offset |= tmp << 16;
328 pte_haddr = cpu->sdr1_hptr + pteg_offset;
329
330 pte_key = 0x80000040 | (vsid << 7);
331 pte_key |= (vaddr >> 22) & 0x3F;
332
333 for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) {
334 key = vmtoh32(*(m_uint32_t *)pte_haddr);
335
336 if (key == pte_key)
337 goto pte_lookup_done;
338 }
339
340 no_pte:
341 /* No matching PTE for this virtual address */
342 ppc32_access_special(cpu,vaddr,cid,MTS_ACC_T,op_code,op_type,op_size,
343 data,exc);
344 return NULL;
345
346 pte_lookup_done:
347 pte2 = vmtoh32(*(m_uint32_t *)(pte_haddr + sizeof(m_uint32_t)));
348 paddr = pte2 & PPC32_PTEL_RPN_MASK;
349 paddr |= (pte2 & PPC32_PTEL_XPN_MASK) << (33 - PPC32_PTEL_XPN_SHIFT);
350 paddr |= (pte2 & PPC32_PTEL_X_MASK) << (32 - PPC32_PTEL_X_SHIFT);
351
352 map.vaddr = vaddr & ~PPC32_MIN_PAGE_IMASK;
353 map.paddr = paddr;
354 map.cached = FALSE;
355
356 if ((entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
357 return entry;
358
359 err_undef:
360 ppc32_access_special(cpu,vaddr,cid,MTS_ACC_U,op_code,op_type,op_size,
361 data,exc);
362 return NULL;
363 }
364
365 /* Memory access */
366 static inline void *ppc32_mem_access(cpu_ppc_t *cpu,m_uint32_t vaddr,
367 u_int cid,u_int op_code,u_int op_size,
368 u_int op_type,m_uint64_t *data,
369 u_int *exc)
370 {
371 mts32_entry_t *entry,alt_entry;
372 m_uint32_t hash_bucket;
373 m_iptr_t haddr;
374 u_int dev_id;
375 int cow;
376
377 #if MEMLOG_ENABLE
378 /* Record the memory access */
379 memlog_rec_access(cpu->gen,vaddr,*data,op_size,op_type);
380 #endif
381
382 *exc = 0;
383 hash_bucket = MTS32_HASH(vaddr);
384 entry = &cpu->mts_cache[cid][hash_bucket];
385
386 #if DEBUG_MTS_STATS
387 cpu->mts_lookups++;
388 #endif
389
390 /* Copy-On-Write for sparse device ? */
391 cow = (op_type == MTS_WRITE) && (entry->flags & MTS_FLAG_COW);
392
393 /* Slow lookup if nothing found in cache */
394 if (unlikely(((vaddr & PPC32_MIN_PAGE_MASK) != entry->gvpa) || cow)) {
395 entry = cpu->mts_slow_lookup(cpu,vaddr,cid,op_code,op_size,op_type,
396 data,exc,&alt_entry);
397 if (!entry)
398 return NULL;
399
400 if (entry->flags & MTS_FLAG_DEV) {
401 dev_id = (entry->hpa & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT;
402 haddr = entry->hpa & MTS_DEVOFF_MASK;
403 haddr += vaddr - entry->gvpa;
404 return(dev_access_fast(cpu->gen,dev_id,haddr,op_size,op_type,data));
405 }
406 }
407
408 /* Raw memory access */
409 haddr = entry->hpa + (vaddr & PPC32_MIN_PAGE_IMASK);
410 #if MEMLOG_ENABLE
411 memlog_update_read(cpu->gen,haddr);
412 #endif
413 return((void *)haddr);
414 }
415
416 /* Memory data access */
417 #define PPC32_MEM_DACCESS(cpu,vaddr,op_code,op_size,op_type,data,exc) \
418 ppc32_mem_access((cpu),(vaddr),PPC32_MTS_DCACHE,(op_code),(op_size),\
419 (op_type),(data),(exc))
420
421 /* Virtual address to physical page translation */
422 static fastcall int ppc32_translate(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int cid,
423 m_uint32_t *phys_page)
424 {
425 mts32_entry_t *entry,alt_entry;
426 m_uint32_t hash_bucket;
427 m_uint64_t data = 0;
428 u_int exc = 0;
429
430 hash_bucket = MTS32_HASH(vaddr);
431 entry = &cpu->mts_cache[cid][hash_bucket];
432
433 /* Slow lookup if nothing found in cache */
434 if (unlikely(((m_uint32_t)vaddr & PPC32_MIN_PAGE_MASK) != entry->gvpa)) {
435 entry = cpu->mts_slow_lookup(cpu,vaddr,cid,PPC_MEMOP_LOOKUP,4,MTS_READ,
436 &data,&exc,&alt_entry);
437 if (!entry)
438 return(-1);
439 }
440
441 *phys_page = entry->gppa >> PPC32_MIN_PAGE_SHIFT;
442 return(0);
443 }
444
445 /* Virtual address lookup */
446 static void *ppc32_mem_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int cid)
447 {
448 m_uint64_t data;
449 u_int exc;
450 return(ppc32_mem_access(cpu,vaddr,cid,PPC_MEMOP_LOOKUP,4,MTS_READ,
451 &data,&exc));
452 }
453
454 /* Set a BAT register */
455 int ppc32_set_bat(cpu_ppc_t *cpu,struct ppc32_bat_prog *bp)
456 {
457 struct ppc32_bat_reg *bat;
458
459 if ((bp->type != PPC32_IBAT_IDX) && (bp->type != PPC32_DBAT_IDX))
460 return(-1);
461
462 if (bp->index >= PPC32_BAT_NR)
463 return(-1);
464
465 bat = &cpu->bat[bp->type][bp->index];
466 bat->reg[0] = bp->hi;
467 bat->reg[1] = bp->lo;
468 return(0);
469 }
470
471 /* Load BAT registers from a BAT array */
472 void ppc32_load_bat_array(cpu_ppc_t *cpu,struct ppc32_bat_prog *bp)
473 {
474 while(bp->index != -1) {
475 ppc32_set_bat(cpu,bp);
476 bp++;
477 }
478 }
479
480 /* Get the host address for SDR1 */
481 int ppc32_set_sdr1(cpu_ppc_t *cpu,m_uint32_t sdr1)
482 {
483 struct vdevice *dev;
484 m_uint64_t pt_addr;
485
486 cpu->sdr1 = sdr1;
487 pt_addr = sdr1 & PPC32_SDR1_HTABORG_MASK;
488 pt_addr |= ((m_uint64_t)(sdr1 & PPC32_SDR1_HTABEXT_MASK) << 20);
489
490 if (!(dev = dev_lookup(cpu->vm,pt_addr,TRUE))) {
491 fprintf(stderr,"ppc32_set_sdr1: unable to find haddr for SDR1=0x%8.8x\n",
492 sdr1);
493 return(-1);
494 }
495
496 cpu->sdr1_hptr = (char *)dev->host_addr + (pt_addr - dev->phys_addr);
497 return(0);
498 }
499
500 /* Initialize the page table */
501 int ppc32_init_page_table(cpu_ppc_t *cpu)
502 {
503 m_uint32_t pt_size;
504
505 if (!cpu->sdr1_hptr)
506 return(-1);
507
508 pt_size = (1 + (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK)) << 16;
509 memset(cpu->sdr1_hptr,0,pt_size);
510 return(0);
511 }
512
513 /* Map a page */
514 int ppc32_map_page(cpu_ppc_t *cpu,u_int vsid,m_uint32_t vaddr,m_uint64_t paddr,
515 u_int wimg,u_int pp)
516 {
517 m_uint32_t hash,tmp,pteg_offset,key;
518 m_uint8_t *pte_haddr;
519 int i;
520
521 /* Compute the first hash value */
522 hash = (vaddr >> PPC32_MIN_PAGE_SHIFT) & 0xFFFF;
523 hash ^= vsid;
524 hash &= 0x7FFFFF;
525
526 tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK);
527 pteg_offset = (hash & 0x3FF) << 6;
528 pteg_offset |= tmp << 16;
529 pte_haddr = cpu->sdr1_hptr + pteg_offset;
530
531 for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) {
532 key = vmtoh32(*(m_uint32_t *)pte_haddr);
533
534 if (!(key & PPC32_PTEU_V)) {
535 hash = 0;
536 goto free_pte_found;
537 }
538 }
539
540 /* Secondary hash value */
541 hash = (~hash) & 0x7FFFFF;
542
543 tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK);
544 pteg_offset = (hash & 0x3FF) << 6;
545 pteg_offset |= tmp << 16;
546 pte_haddr = cpu->sdr1_hptr + pteg_offset;
547
548 for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) {
549 key = vmtoh32(*(m_uint32_t *)pte_haddr);
550
551 if (!(key & PPC32_PTEU_V)) {
552 hash = PPC32_PTEU_H;
553 goto free_pte_found;
554 }
555 }
556
557 /* No free PTE found */
558 return(-1);
559
560 free_pte_found:
561 tmp = PPC32_PTEU_V | (vsid << PPC32_PTEU_VSID_SHIFT) | hash;
562 tmp |= (vaddr >> 22) & 0x3F;
563 *(m_uint32_t *)pte_haddr = htovm32(tmp);
564
565 tmp = vaddr & PPC32_PTEL_RPN_MASK;
566 tmp |= (vaddr >> (32 - PPC32_PTEL_X_SHIFT)) & PPC32_PTEL_X_MASK;
567 tmp |= (vaddr >> (33 - PPC32_PTEL_XPN_SHIFT)) & PPC32_PTEL_XPN_MASK;
568
569 tmp |= (wimg << PPC32_PTEL_WIMG_SHIFT) + pp;
570 *(m_uint32_t *)(pte_haddr+sizeof(m_uint32_t)) = htovm32(tmp);
571 return(0);
572 }
573
574 /* Map a memory zone */
575 int ppc32_map_zone(cpu_ppc_t *cpu,u_int vsid,m_uint32_t vaddr,m_uint64_t paddr,
576 m_uint32_t size,u_int wimg,u_int pp)
577 {
578 while(size > 0) {
579 if (ppc32_map_page(cpu,vsid,vaddr,paddr,wimg,pp) == -1)
580 return(-1);
581
582 size -= PPC32_MIN_PAGE_SIZE;
583 vaddr += PPC32_MIN_PAGE_SIZE;
584 paddr += PPC32_MIN_PAGE_SIZE;
585 }
586
587 return(0);
588 }
589
590 /* PowerPC 405 TLB masks */
591 static m_uint32_t ppc405_tlb_masks[8] = {
592 0xFFFFFC00, 0xFFFFF000, 0xFFFFC000, 0xFFFF0000,
593 0xFFFC0000, 0xFFF00000, 0xFFC00000, 0xFF000000,
594 };
595
596 /* PowerPC 405 slow lookup */
597 static mts32_entry_t *ppc405_slow_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr,
598 u_int cid,u_int op_code,u_int op_size,
599 u_int op_type,m_uint64_t *data,
600 u_int *exc,mts32_entry_t *alt_entry)
601 {
602 struct ppc405_tlb_entry *tlb_entry;
603 m_uint32_t hash_bucket,mask;
604 m_uint32_t page_size;
605 mts32_entry_t *entry;
606 mts_map_t map;
607 int i;
608
609 #if DEBUG_MTS_STATS
610 cpu->mts_misses++;
611 #endif
612
613 hash_bucket = MTS32_HASH(vaddr);
614 entry = &cpu->mts_cache[cid][hash_bucket];
615
616 /* No translation - cover the 4GB space */
617 if (((cid == PPC32_MTS_ICACHE) && !(cpu->msr & PPC32_MSR_IR)) ||
618 ((cid == PPC32_MTS_DCACHE) && !(cpu->msr & PPC32_MSR_DR)))
619 {
620 map.vaddr = vaddr & PPC32_MIN_PAGE_MASK;
621 map.paddr = vaddr & PPC32_MIN_PAGE_MASK;
622 map.cached = FALSE;
623
624 if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
625 goto err_undef;
626
627 return entry;
628 }
629
630 /* Walk through the unified TLB */
631 for(i=0;i<PPC405_TLB_ENTRIES;i++)
632 {
633 tlb_entry = &cpu->ppc405_tlb[i];
634
635 /* We want a valid entry with TID = PID */
636 if (!(tlb_entry->tlb_hi & PPC405_TLBHI_V) ||
637 (tlb_entry->tid != cpu->ppc405_pid))
638 continue;
639
640 /* Get the address mask corresponding to this entry */
641 page_size = tlb_entry->tlb_hi & PPC405_TLBHI_SIZE_MASK;
642 page_size >>= PPC405_TLBHI_SIZE_SHIFT;
643 mask = ppc405_tlb_masks[page_size];
644
645 /* Matching entry ? */
646 if ((vaddr & mask) == (tlb_entry->tlb_hi & mask)) {
647 map.vaddr = vaddr & mask;
648 map.paddr = tlb_entry->tlb_lo & mask;
649 map.cached = FALSE;
650
651 if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
652 goto err_undef;
653
654 return entry;
655 }
656 }
657
658 /* No matching TLB entry for this virtual address */
659 ppc32_access_special(cpu,vaddr,cid,MTS_ACC_T,op_code,op_type,op_size,
660 data,exc);
661 return NULL;
662
663 err_undef:
664 ppc32_access_special(cpu,vaddr,cid,MTS_ACC_U,op_code,op_type,op_size,
665 data,exc);
666 return NULL;
667 }
668
669 /* Dump a PowerPC 405 TLB entry */
670 static void ppc405_dump_tlb_entry(cpu_ppc_t *cpu,u_int index)
671 {
672 struct ppc405_tlb_entry *entry;
673
674 entry = &cpu->ppc405_tlb[index];
675
676 printf(" %2d: hi=0x%8.8x lo=0x%8.8x tid=0x%2.2x\n",
677 index,entry->tlb_hi,entry->tlb_lo,entry->tid);
678 }
679
680 /* Dump the PowerPC 405 TLB */
681 static void ppc405_dump_tlb(cpu_gen_t *cpu)
682 {
683 cpu_ppc_t *pcpu = CPU_PPC32(cpu);
684 u_int i;
685
686 for(i=0;i<PPC405_TLB_ENTRIES;i++)
687 ppc405_dump_tlb_entry(pcpu,i);
688 }
689
690 /* === PPC Memory Operations ============================================= */
691
692 /* LBZ: Load Byte Zero */
693 fastcall u_int ppc32_lbz(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
694 {
695 m_uint64_t data;
696 void *haddr;
697 u_int exc;
698
699 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LBZ,1,MTS_READ,&data,&exc);
700 if (likely(haddr != NULL)) data = *(m_uint8_t *)haddr;
701 if (likely(!exc)) cpu->gpr[reg] = data & 0xFF;
702 return(exc);
703 }
704
705 /* LHZ: Load Half-Word Zero */
706 fastcall u_int ppc32_lhz(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
707 {
708 m_uint64_t data;
709 void *haddr;
710 u_int exc;
711
712 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LHZ,2,MTS_READ,&data,&exc);
713 if (likely(haddr != NULL)) data = vmtoh16(*(m_uint16_t *)haddr);
714 if (likely(!exc)) cpu->gpr[reg] = data & 0xFFFF;
715 return(exc);
716 }
717
718 /* LWZ: Load Word Zero */
719 fastcall u_int ppc32_lwz(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
720 {
721 m_uint64_t data;
722 void *haddr;
723 u_int exc;
724
725 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LWZ,4,MTS_READ,&data,&exc);
726 if (likely(haddr != NULL)) data = vmtoh32(*(m_uint32_t *)haddr);
727 if (likely(!exc)) cpu->gpr[reg] = data;
728 return(exc);
729 }
730
731 /* LWBR: Load Word Byte Reverse */
732 fastcall u_int ppc32_lwbr(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
733 {
734 m_uint64_t data;
735 void *haddr;
736 u_int exc;
737
738 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LWBR,4,MTS_READ,&data,&exc);
739 if (likely(haddr != NULL)) data = vmtoh32(*(m_uint32_t *)haddr);
740 if (likely(!exc)) cpu->gpr[reg] = swap32(data);
741 return(exc);
742 }
743
744 /* LHA: Load Half-Word Algebraic */
745 fastcall u_int ppc32_lha(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
746 {
747 m_uint64_t data;
748 void *haddr;
749 u_int exc;
750
751 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LHZ,2,MTS_READ,&data,&exc);
752 if (likely(haddr != NULL)) data = vmtoh16(*(m_uint16_t *)haddr);
753 if (likely(!exc)) cpu->gpr[reg] = sign_extend_32(data,16);
754 return(exc);
755 }
756
757 /* STB: Store Byte */
758 fastcall u_int ppc32_stb(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
759 {
760 m_uint64_t data;
761 void *haddr;
762 u_int exc;
763
764 data = cpu->gpr[reg] & 0xff;
765 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STB,1,MTS_WRITE,&data,&exc);
766 if (likely(haddr != NULL)) *(m_uint8_t *)haddr = data;
767 return(exc);
768 }
769
770 /* STH: Store Half-Word */
771 fastcall u_int ppc32_sth(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
772 {
773 m_uint64_t data;
774 void *haddr;
775 u_int exc;
776
777 data = cpu->gpr[reg] & 0xffff;
778 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STH,2,MTS_WRITE,&data,&exc);
779 if (likely(haddr != NULL)) *(m_uint16_t *)haddr = htovm16(data);
780 return(exc);
781 }
782
783 /* STW: Store Word */
784 fastcall u_int ppc32_stw(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
785 {
786 m_uint64_t data;
787 void *haddr;
788 u_int exc;
789
790 data = cpu->gpr[reg];
791 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STW,4,MTS_WRITE,&data,&exc);
792 if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data);
793 return(exc);
794 }
795
796 /* STWBR: Store Word Byte Reversed */
797 fastcall u_int ppc32_stwbr(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
798 {
799 m_uint64_t data;
800 void *haddr;
801 u_int exc;
802
803 data = swap32(cpu->gpr[reg]);
804 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STWBR,4,MTS_WRITE,&data,&exc);
805 if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data);
806 return(exc);
807 }
808
809 /* LSW: Load String Word */
810 fastcall u_int ppc32_lsw(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
811 {
812 m_uint64_t data;
813 void *haddr;
814 u_int exc;
815
816 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LSW,1,MTS_READ,&data,&exc);
817 if (likely(haddr != NULL)) data = *(m_uint8_t *)haddr;
818 if (likely(!exc)) cpu->gpr[reg] |= (data & 0xFF) << (24 - cpu->sw_pos);
819 return(exc);
820 }
821
822 /* STW: Store String Word */
823 fastcall u_int ppc32_stsw(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
824 {
825 m_uint64_t data;
826 void *haddr;
827 u_int exc;
828
829 data = (cpu->gpr[reg] >> (24 - cpu->sw_pos)) & 0xFF;
830 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STSW,1,MTS_WRITE,&data,&exc);
831 if (likely(haddr != NULL)) *(m_uint8_t *)haddr = data;
832 return(exc);
833 }
834
835 /* LFD: Load Floating-Point Double */
836 fastcall u_int ppc32_lfd(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
837 {
838 m_uint64_t data;
839 void *haddr;
840 u_int exc;
841
842 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LWZ,8,MTS_READ,&data,&exc);
843 if (likely(haddr != NULL)) data = vmtoh64(*(m_uint64_t *)haddr);
844 if (likely(!exc)) cpu->fpu.reg[reg] = data;
845 return(exc);
846 }
847
848 /* STFD: Store Floating-Point Double */
849 fastcall u_int ppc32_stfd(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
850 {
851 m_uint64_t data;
852 void *haddr;
853 u_int exc;
854
855 data = cpu->fpu.reg[reg];
856 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STW,8,MTS_WRITE,&data,&exc);
857 if (likely(haddr != NULL)) *(m_uint64_t *)haddr = htovm64(data);
858 return(exc);
859 }
860
861 /* ICBI: Instruction Cache Block Invalidate */
862 fastcall u_int ppc32_icbi(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int op)
863 {
864 ppc32_jit_tcb_t *block;
865 m_uint32_t phys_page;
866
867 #if DEBUG_ICBI
868 cpu_log(cpu->gen,"MTS","ICBI: ia=0x%8.8x, vaddr=0x%8.8x\n",cpu->ia,vaddr);
869 #endif
870
871 if (!cpu->translate(cpu,vaddr,PPC32_MTS_ICACHE,&phys_page)) {
872 if ((phys_page < 1048576) && cpu->exec_phys_map) {
873 block = cpu->exec_phys_map[phys_page];
874
875 if (block) {
876 if ((cpu->ia < block->start_ia) ||
877 ((cpu->ia - block->start_ia) >= PPC32_MIN_PAGE_SIZE))
878 {
879 #if DEBUG_ICBI
880 cpu_log(cpu->gen,"MTS",
881 "ICBI: removing compiled page at 0x%8.8x, pc=0x%8.8x\n",
882 block->start_ia,cpu->ia);
883 #endif
884 cpu->exec_phys_map[phys_page] = NULL;
885 ppc32_jit_tcb_free(cpu,block,TRUE);
886 }
887 else
888 {
889 #if DEBUG_ICBI
890 cpu_log(cpu->gen,"MTS",
891 "ICBI: trying to remove page 0x%llx with pc=0x%llx\n",
892 block->start_ia,cpu->ia);
893 #endif
894 }
895 }
896 }
897 }
898
899 return(0);
900 }
901
902 /* ======================================================================== */
903
904 /* Get a BAT register pointer given a SPR index */
905 static inline m_uint32_t *ppc32_get_bat_spr_ptr(cpu_ppc_t *cpu,u_int spr)
906 {
907 m_uint32_t spr_cat,cid,index;
908
909 spr_cat = spr >> 5;
910 if ((spr_cat != 0x10) && (spr_cat != 0x11))
911 return NULL;
912
913 cid = (spr >> 3) & 0x1;
914 index = (spr >> 1) & 0x3;
915
916 if (spr & 0x20)
917 index += 4;
918
919 //printf("GET_BAT_SPR: SPR=%u => cid=%u, index=%u\n",spr,cid,index);
920
921 return(&cpu->bat[cid][index].reg[spr & 0x1]);
922 }
923
924 /* Get a BAT SPR */
925 m_uint32_t ppc32_get_bat_spr(cpu_ppc_t *cpu,u_int spr)
926 {
927 m_uint32_t *p;
928
929 if (!(p = ppc32_get_bat_spr_ptr(cpu,spr)))
930 return(0);
931
932 return(*p);
933 }
934
935 /* Set a BAT SPR */
936 void ppc32_set_bat_spr(cpu_ppc_t *cpu,u_int spr,m_uint32_t val)
937 {
938 m_uint32_t *p;
939
940 if ((p = ppc32_get_bat_spr_ptr(cpu,spr))) {
941 *p = val;
942 ppc32_mem_invalidate_cache(cpu);
943 }
944 }
945
946 /* ======================================================================== */
947
948 /* Rebuild MTS data structures */
949 static void ppc32_mem_rebuild_mts(cpu_gen_t *gen_cpu)
950 {
951 ppc32_mem_invalidate_cache(CPU_PPC32(gen_cpu));
952 }
953
954 /* Initialize memory access vectors */
955 void ppc32_init_memop_vectors(cpu_ppc_t *cpu)
956 {
957 /* MTS slow lookup */
958 cpu->mts_slow_lookup = ppc32_slow_lookup;
959
960 /* MTS rebuild */
961 cpu->gen->mts_rebuild = ppc32_mem_rebuild_mts;
962
963 /* MTS statistics */
964 cpu->gen->mts_show_stats = ppc32_mem_show_stats;
965
966 /* Memory lookup operation */
967 cpu->mem_op_lookup = ppc32_mem_lookup;
968
969 /* Translation operation */
970 cpu->translate = ppc32_translate;
971
972 /* Load Operations */
973 cpu->mem_op_fn[PPC_MEMOP_LBZ] = ppc32_lbz;
974 cpu->mem_op_fn[PPC_MEMOP_LHZ] = ppc32_lhz;
975 cpu->mem_op_fn[PPC_MEMOP_LWZ] = ppc32_lwz;
976
977 /* Load Operation with sign-extension */
978 cpu->mem_op_fn[PPC_MEMOP_LHA] = ppc32_lha;
979
980 /* Store Operations */
981 cpu->mem_op_fn[PPC_MEMOP_STB] = ppc32_stb;
982 cpu->mem_op_fn[PPC_MEMOP_STH] = ppc32_sth;
983 cpu->mem_op_fn[PPC_MEMOP_STW] = ppc32_stw;
984
985 /* Byte-Reversed operations */
986 cpu->mem_op_fn[PPC_MEMOP_LWBR] = ppc32_lwbr;
987 cpu->mem_op_fn[PPC_MEMOP_STWBR] = ppc32_stwbr;
988
989 /* String operations */
990 cpu->mem_op_fn[PPC_MEMOP_LSW] = ppc32_lsw;
991 cpu->mem_op_fn[PPC_MEMOP_STSW] = ppc32_stsw;
992
993 /* FPU operations */
994 cpu->mem_op_fn[PPC_MEMOP_LFD] = ppc32_lfd;
995 cpu->mem_op_fn[PPC_MEMOP_STFD] = ppc32_stfd;
996
997 /* ICBI - Instruction Cache Block Invalidate */
998 cpu->mem_op_fn[PPC_MEMOP_ICBI] = ppc32_icbi;
999 }
1000
1001 /* Restart the memory subsystem */
1002 int ppc32_mem_restart(cpu_ppc_t *cpu)
1003 {
1004 m_uint32_t family;
1005
1006 ppc32_mem_shutdown(cpu);
1007 ppc32_mem_init(cpu);
1008 ppc32_init_memop_vectors(cpu);
1009
1010 /* Override the MTS lookup vector depending on the cpu type */
1011 family = cpu->pvr & 0xFFFF0000;
1012
1013 if (family == PPC32_PVR_405) {
1014 cpu->mts_slow_lookup = ppc405_slow_lookup;
1015 cpu->gen->mmu_dump = ppc405_dump_tlb;
1016 cpu->gen->mmu_raw_dump = ppc405_dump_tlb;
1017 }
1018
1019 return(0);
1020 }

  ViewVC Help
Powered by ViewVC 1.1.26