/[dynamips]/trunk/ppc32_mem.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/ppc32_mem.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 12 - (show annotations)
Sat Oct 6 16:45:40 2007 UTC (16 years, 5 months ago) by dpavlin
File MIME type: text/plain
File size: 28543 byte(s)
make working copy

1 /*
2 * Cisco router simulation platform.
3 * Copyright (c) 2006 Christophe Fillot (cf@utc.fr)
4 *
5 * PowerPC MMU.
6 */
7
8 #define _GNU_SOURCE
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <unistd.h>
12 #include <string.h>
13 #include <sys/types.h>
14 #include <sys/stat.h>
15 #include <sys/mman.h>
16 #include <fcntl.h>
17 #include <assert.h>
18
19 #include "cpu.h"
20 #include "vm.h"
21 #include "dynamips.h"
22 #include "memory.h"
23 #include "device.h"
24 #include "ppc32_jit.h"
25
26 #define DEBUG_ICBI 0
27
28 /* Memory access with special access mask */
29 void ppc32_access_special(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int cid,
30 m_uint32_t mask,u_int op_code,u_int op_type,
31 u_int op_size,m_uint64_t *data)
32 {
33 switch(mask) {
34 case MTS_ACC_T:
35 if (op_code != PPC_MEMOP_LOOKUP) {
36 #if DEBUG_MTS_ACC_T
37 cpu_log(cpu->gen,
38 "MTS","MMU exception for address 0x%8.8x at ia=0x%8.8x "
39 "(%s access, size=%u)\n",
40 vaddr,cpu->ia,(op_type == MTS_READ) ?
41 "read":"write",op_size);
42 //ppc32_dump_regs(cpu->gen);
43 #if MEMLOG_ENABLE
44 memlog_dump(cpu->gen);
45 #endif
46 #endif
47 if (cid == PPC32_MTS_DCACHE) {
48 cpu->dsisr = PPC32_DSISR_NOTRANS;
49
50 if (op_type == MTS_WRITE)
51 cpu->dsisr |= PPC32_DSISR_STORE;
52
53 cpu->dar = vaddr;
54 ppc32_trigger_exception(cpu,PPC32_EXC_DSI);
55 cpu_exec_loop_enter(cpu->gen);
56 }
57 }
58 break;
59
60 case MTS_ACC_U:
61 if (op_type == MTS_READ)
62 *data = 0;
63
64 if (cpu->gen->undef_mem_handler != NULL) {
65 if (cpu->gen->undef_mem_handler(cpu->gen,(m_uint64_t)vaddr,
66 op_size,op_type,data))
67 return;
68 }
69
70 #if DEBUG_MTS_ACC_U
71 if (op_type == MTS_READ)
72 cpu_log(cpu->gen,
73 "MTS","read access to undefined address 0x%8.8x at "
74 "ia=0x%8.8x (size=%u)\n",vaddr,cpu->ia,op_size);
75 else
76 cpu_log(cpu->gen,
77 "MTS","write access to undefined address 0x%8.8x at "
78 "ia=0x%8.8x, value=0x%8.8llx (size=%u)\n",
79 vaddr,cpu->ia,*data,op_size);
80 #endif
81 break;
82 }
83 }
84
85 /* Initialize the MTS subsystem for the specified CPU */
86 int ppc32_mem_init(cpu_ppc_t *cpu)
87 {
88 size_t len;
89
90 /* Initialize the cache entries to 0 (empty) */
91 len = MTS32_HASH_SIZE * sizeof(mts32_entry_t);
92
93 if (!(cpu->mts_cache[PPC32_MTS_ICACHE] = malloc(len)))
94 return(-1);
95
96 if (!(cpu->mts_cache[PPC32_MTS_DCACHE] = malloc(len)))
97 return(-1);
98
99 memset(cpu->mts_cache[PPC32_MTS_ICACHE],0xFF,len);
100 memset(cpu->mts_cache[PPC32_MTS_DCACHE],0xFF,len);
101
102 cpu->mts_lookups = 0;
103 cpu->mts_misses = 0;
104 return(0);
105 }
106
107 /* Free memory used by MTS */
108 void ppc32_mem_shutdown(cpu_ppc_t *cpu)
109 {
110 if (cpu != NULL) {
111 /* Free the caches themselves */
112 free(cpu->mts_cache[PPC32_MTS_ICACHE]);
113 free(cpu->mts_cache[PPC32_MTS_DCACHE]);
114 cpu->mts_cache[PPC32_MTS_ICACHE] = NULL;
115 cpu->mts_cache[PPC32_MTS_DCACHE] = NULL;
116 }
117 }
118
119 /* Show MTS detailed information (debugging only!) */
120 void ppc32_mem_show_stats(cpu_gen_t *gen_cpu)
121 {
122 cpu_ppc_t *cpu = CPU_PPC32(gen_cpu);
123 #if DEBUG_MTS_MAP_VIRT
124 mts32_entry_t *entry;
125 u_int i,count;
126 #endif
127
128 printf("\nCPU%u: MTS statistics:\n",cpu->gen->id);
129
130 #if DEBUG_MTS_MAP_VIRT
131 printf("Instruction cache:\n");
132
133 /* Valid hash entries for Instruction Cache */
134 for(count=0,i=0;i<MTS32_HASH_SIZE;i++) {
135 entry = &cpu->mts_cache[PPC32_MTS_ICACHE][i];
136
137 if (!(entry->gvpa & MTS_INV_ENTRY_MASK)) {
138 printf(" %4u: vaddr=0x%8.8x, paddr=0x%8.8x, hpa=%p\n",
139 i,entry->gvpa,entry->gppa,(void *)entry->hpa);
140 count++;
141 }
142 }
143
144 printf(" %u/%u valid hash entries for icache.\n",count,MTS32_HASH_SIZE);
145
146
147 printf("Data cache:\n");
148
149 /* Valid hash entries for Instruction Cache */
150 for(count=0,i=0;i<MTS32_HASH_SIZE;i++) {
151 entry = &cpu->mts_cache[PPC32_MTS_DCACHE][i];
152
153 if (!(entry->gvpa & MTS_INV_ENTRY_MASK)) {
154 printf(" %4u: vaddr=0x%8.8x, paddr=0x%8.8x, hpa=%p\n",
155 i,entry->gvpa,entry->gppa,(void *)entry->hpa);
156 count++;
157 }
158 }
159
160 printf(" %u/%u valid hash entries for dcache.\n",count,MTS32_HASH_SIZE);
161 #endif
162
163 printf("\n Total lookups: %llu, misses: %llu, efficiency: %g%%\n",
164 cpu->mts_lookups, cpu->mts_misses,
165 100 - ((double)(cpu->mts_misses*100)/
166 (double)cpu->mts_lookups));
167 }
168
169 /* Invalidate the MTS caches (instruction and data) */
170 void ppc32_mem_invalidate_cache(cpu_ppc_t *cpu)
171 {
172 size_t len;
173
174 len = MTS32_HASH_SIZE * sizeof(mts32_entry_t);
175 memset(cpu->mts_cache[PPC32_MTS_ICACHE],0xFF,len);
176 memset(cpu->mts_cache[PPC32_MTS_DCACHE],0xFF,len);
177 }
178
179 /*
180 * MTS mapping.
181 *
182 * It is NOT inlined since it triggers a GCC bug on my config (x86, GCC 3.3.5)
183 */
184 static no_inline struct mts32_entry *
185 ppc32_mem_map(cpu_ppc_t *cpu,u_int op_type,mts_map_t *map,
186 mts32_entry_t *entry,mts32_entry_t *alt_entry)
187 {
188 ppc32_jit_tcb_t *block;
189 struct vdevice *dev;
190 m_uint32_t offset;
191 m_iptr_t host_ptr;
192 m_uint32_t exec_flag = 0;
193 int cow;
194
195 if (!(dev = dev_lookup(cpu->vm,map->paddr+map->offset,map->cached)))
196 return NULL;
197
198 if (cpu->exec_phys_map) {
199 block = ppc32_jit_find_by_phys_page(cpu,map->paddr >> VM_PAGE_SHIFT);
200
201 if (block)
202 exec_flag = MTS_FLAG_EXEC;
203 }
204
205 if (dev->flags & VDEVICE_FLAG_SPARSE) {
206 host_ptr = dev_sparse_get_host_addr(cpu->vm,dev,map->paddr,op_type,&cow);
207
208 entry->gvpa = map->vaddr;
209 entry->gppa = map->paddr;
210 entry->hpa = host_ptr;
211 entry->flags = (cow) ? MTS_FLAG_COW : 0;
212 entry->flags |= exec_flag;
213 return entry;
214 }
215
216 if (!dev->host_addr || (dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) {
217 offset = (map->paddr + map->offset) - dev->phys_addr;
218
219 /* device entries are never stored in virtual TLB */
220 alt_entry->hpa = (dev->id << MTS_DEVID_SHIFT) + offset;
221 alt_entry->flags = MTS_FLAG_DEV;
222 return alt_entry;
223 }
224
225 entry->gvpa = map->vaddr;
226 entry->gppa = map->paddr;
227 entry->hpa = dev->host_addr + (map->paddr - dev->phys_addr);
228 entry->flags = exec_flag;
229 return entry;
230 }
231
232 /* BAT lookup */
233 static forced_inline int ppc32_bat_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr,
234 u_int cid,mts_map_t *map)
235 {
236 m_uint32_t bepi,mask,bl,pr,ubat;
237 int i;
238
239 pr = (cpu->msr & PPC32_MSR_PR) >> PPC32_MSR_PR_SHIFT;
240 pr = ((~pr << 1) | pr) & 0x03;
241
242 for(i=0;i<PPC32_BAT_NR;i++) {
243 ubat = cpu->bat[cid][i].reg[0];
244
245 if (!(ubat & pr))
246 continue;
247
248 //bl = (ubat & PPC32_UBAT_BL_MASK) >> PPC32_UBAT_BL_SHIFT;
249 bl = (ubat & PPC32_UBAT_XBL_MASK) >> PPC32_UBAT_XBL_SHIFT;
250
251 mask = ~bl << PPC32_BAT_ADDR_SHIFT;
252 bepi = ubat & PPC32_UBAT_BEPI_MASK;
253
254 if (bepi == (vaddr & mask)) {
255 map->vaddr = vaddr & PPC32_MIN_PAGE_MASK;
256 map->paddr = cpu->bat[cid][i].reg[1] & PPC32_LBAT_BRPN_MASK;
257 map->paddr += map->vaddr - bepi;
258 map->offset = vaddr & PPC32_MIN_PAGE_IMASK;
259 map->cached = FALSE;
260 return(TRUE);
261 }
262 }
263
264 return(FALSE);
265 }
266
267 /* Memory slow lookup */
268 static mts32_entry_t *ppc32_slow_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr,
269 u_int cid,u_int op_code,u_int op_size,
270 u_int op_type,m_uint64_t *data,
271 mts32_entry_t *alt_entry)
272 {
273 m_uint32_t hash_bucket,segment,vsid;
274 m_uint32_t hash,tmp,pteg_offset,pte_key,key,pte2;
275 mts32_entry_t *entry;
276 m_uint8_t *pte_haddr;
277 m_uint64_t paddr;
278 mts_map_t map;
279 int i;
280
281 #if DEBUG_MTS_STATS
282 cpu->mts_misses++;
283 #endif
284
285 hash_bucket = MTS32_HASH(vaddr);
286 entry = &cpu->mts_cache[cid][hash_bucket];
287
288 /* No translation - cover the 4GB space */
289 if (((cid == PPC32_MTS_ICACHE) && !(cpu->msr & PPC32_MSR_IR)) ||
290 ((cid == PPC32_MTS_DCACHE) && !(cpu->msr & PPC32_MSR_DR)))
291 {
292 map.vaddr = vaddr & PPC32_MIN_PAGE_MASK;
293 map.paddr = vaddr & PPC32_MIN_PAGE_MASK;
294 map.offset = vaddr & PPC32_MIN_PAGE_IMASK;
295 map.cached = FALSE;
296
297 if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
298 goto err_undef;
299
300 return entry;
301 }
302
303 /* Walk through the BAT registers */
304 if (ppc32_bat_lookup(cpu,vaddr,cid,&map)) {
305 if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
306 goto err_undef;
307
308 return entry;
309 }
310
311 if (unlikely(!cpu->sdr1))
312 goto no_pte;
313
314 /* Get the virtual segment identifier */
315 segment = vaddr >> 28;
316 vsid = cpu->sr[segment] & PPC32_SD_VSID_MASK;
317
318 /* Compute the first hash value */
319 hash = (vaddr >> PPC32_MIN_PAGE_SHIFT) & 0xFFFF;
320 hash ^= vsid;
321 hash &= 0x7FFFFF;
322
323 tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK);
324 pteg_offset = (hash & 0x3FF) << 6;
325 pteg_offset |= tmp << 16;
326 pte_haddr = cpu->sdr1_hptr + pteg_offset;
327
328 pte_key = 0x80000000 | (vsid << 7);
329 pte_key |= (vaddr >> 22) & 0x3F;
330
331 for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) {
332 key = vmtoh32(*(m_uint32_t *)pte_haddr);
333
334 if (key == pte_key)
335 goto pte_lookup_done;
336 }
337
338 /* Secondary hash value */
339 hash = (~hash) & 0x7FFFFF;
340
341 tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK);
342 pteg_offset = (hash & 0x3FF) << 6;
343 pteg_offset |= tmp << 16;
344 pte_haddr = cpu->sdr1_hptr + pteg_offset;
345
346 pte_key = 0x80000040 | (vsid << 7);
347 pte_key |= (vaddr >> 22) & 0x3F;
348
349 for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) {
350 key = vmtoh32(*(m_uint32_t *)pte_haddr);
351
352 if (key == pte_key)
353 goto pte_lookup_done;
354 }
355
356 no_pte:
357 /* No matching PTE for this virtual address */
358 ppc32_access_special(cpu,vaddr,cid,MTS_ACC_T,op_code,op_type,op_size,data);
359 return NULL;
360
361 pte_lookup_done:
362 pte2 = vmtoh32(*(m_uint32_t *)(pte_haddr + sizeof(m_uint32_t)));
363 paddr = pte2 & PPC32_PTEL_RPN_MASK;
364 paddr |= (pte2 & PPC32_PTEL_XPN_MASK) << (33 - PPC32_PTEL_XPN_SHIFT);
365 paddr |= (pte2 & PPC32_PTEL_X_MASK) << (32 - PPC32_PTEL_X_SHIFT);
366
367 map.vaddr = vaddr & ~PPC32_MIN_PAGE_IMASK;
368 map.paddr = paddr;
369 map.offset = vaddr & PPC32_MIN_PAGE_IMASK;
370 map.cached = FALSE;
371
372 if ((entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
373 return entry;
374
375 err_undef:
376 ppc32_access_special(cpu,vaddr,cid,MTS_ACC_U,op_code,op_type,op_size,data);
377 return NULL;
378 }
379
380 /* Memory access */
381 static inline void *ppc32_mem_access(cpu_ppc_t *cpu,m_uint32_t vaddr,
382 u_int cid,u_int op_code,u_int op_size,
383 u_int op_type,m_uint64_t *data)
384 {
385 mts32_entry_t *entry,alt_entry;
386 ppc32_jit_tcb_t *block;
387 m_uint32_t hash_bucket;
388 m_uint32_t phys_page;
389 m_uint32_t ia_hash;
390 m_iptr_t haddr;
391 u_int dev_id;
392 int cow;
393
394 #if MEMLOG_ENABLE
395 /* Record the memory access */
396 memlog_rec_access(cpu->gen,vaddr,*data,op_size,op_type);
397 #endif
398
399 hash_bucket = MTS32_HASH(vaddr);
400 entry = &cpu->mts_cache[cid][hash_bucket];
401
402 #if DEBUG_MTS_STATS
403 cpu->mts_lookups++;
404 #endif
405
406 /* Copy-On-Write for sparse device ? */
407 cow = (op_type == MTS_WRITE) && (entry->flags & MTS_FLAG_COW);
408
409 /* Slow lookup if nothing found in cache */
410 if (unlikely(((vaddr & PPC32_MIN_PAGE_MASK) != entry->gvpa) || cow)) {
411 entry = cpu->mts_slow_lookup(cpu,vaddr,cid,op_code,op_size,op_type,
412 data,&alt_entry);
413 if (!entry)
414 return NULL;
415
416 if (entry->flags & MTS_FLAG_DEV) {
417 dev_id = (entry->hpa & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT;
418 haddr = entry->hpa & MTS_DEVOFF_MASK;
419 return(dev_access_fast(cpu->gen,dev_id,haddr,op_size,op_type,data));
420 }
421 }
422
423 /* Invalidate JIT code for written pages */
424 if ((op_type == MTS_WRITE) && (entry->flags & MTS_FLAG_EXEC)) {
425 if (cpu->exec_phys_map) {
426 phys_page = entry->gppa >> VM_PAGE_SHIFT;
427
428 if (vaddr >= PPC32_EXC_SYS_RST) {
429 block = ppc32_jit_find_by_phys_page(cpu,phys_page);
430
431 if (block != NULL) {
432 //printf("Invalidation of block 0x%8.8x\n",block->start_ia);
433 ia_hash = ppc32_jit_get_ia_hash(block->start_ia);
434 ppc32_jit_tcb_free(cpu,block,TRUE);
435
436 if (cpu->exec_blk_map[ia_hash] == block)
437 cpu->exec_blk_map[ia_hash] = NULL;
438
439 entry->flags &= ~MTS_FLAG_EXEC;
440 }
441 }
442 }
443 }
444
445 /* Raw memory access */
446 haddr = entry->hpa + (vaddr & PPC32_MIN_PAGE_IMASK);
447 #if MEMLOG_ENABLE
448 memlog_update_read(cpu->gen,haddr);
449 #endif
450 return((void *)haddr);
451 }
452
453 /* Memory data access */
454 #define PPC32_MEM_DACCESS(cpu,vaddr,op_code,op_size,op_type,data) \
455 ppc32_mem_access((cpu),(vaddr),PPC32_MTS_DCACHE,(op_code),(op_size),\
456 (op_type),(data))
457
458 /* Virtual address to physical page translation */
459 static fastcall int ppc32_translate(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int cid,
460 m_uint32_t *phys_page)
461 {
462 mts32_entry_t *entry,alt_entry;
463 m_uint32_t hash_bucket;
464 m_uint64_t data = 0;
465
466 hash_bucket = MTS32_HASH(vaddr);
467 entry = &cpu->mts_cache[cid][hash_bucket];
468
469 /* Slow lookup if nothing found in cache */
470 if (unlikely(((m_uint32_t)vaddr & PPC32_MIN_PAGE_MASK) != entry->gvpa)) {
471 entry = cpu->mts_slow_lookup(cpu,vaddr,cid,PPC_MEMOP_LOOKUP,4,MTS_READ,
472 &data,&alt_entry);
473 if (!entry)
474 return(-1);
475 }
476
477 *phys_page = entry->gppa >> PPC32_MIN_PAGE_SHIFT;
478 return(0);
479 }
480
481 /* Virtual address lookup */
482 static void *ppc32_mem_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int cid)
483 {
484 m_uint64_t data;
485 return(ppc32_mem_access(cpu,vaddr,cid,PPC_MEMOP_LOOKUP,4,MTS_READ,&data));
486 }
487
488 /* Set a BAT register */
489 int ppc32_set_bat(cpu_ppc_t *cpu,struct ppc32_bat_prog *bp)
490 {
491 struct ppc32_bat_reg *bat;
492
493 if ((bp->type != PPC32_IBAT_IDX) && (bp->type != PPC32_DBAT_IDX))
494 return(-1);
495
496 if (bp->index >= PPC32_BAT_NR)
497 return(-1);
498
499 bat = &cpu->bat[bp->type][bp->index];
500 bat->reg[0] = bp->hi;
501 bat->reg[1] = bp->lo;
502 return(0);
503 }
504
505 /* Load BAT registers from a BAT array */
506 void ppc32_load_bat_array(cpu_ppc_t *cpu,struct ppc32_bat_prog *bp)
507 {
508 while(bp->index != -1) {
509 ppc32_set_bat(cpu,bp);
510 bp++;
511 }
512 }
513
514 /* Get the host address for SDR1 */
515 int ppc32_set_sdr1(cpu_ppc_t *cpu,m_uint32_t sdr1)
516 {
517 struct vdevice *dev;
518 m_uint64_t pt_addr;
519
520 cpu->sdr1 = sdr1;
521 pt_addr = sdr1 & PPC32_SDR1_HTABORG_MASK;
522 pt_addr |= ((m_uint64_t)(sdr1 & PPC32_SDR1_HTABEXT_MASK) << 20);
523
524 if (!(dev = dev_lookup(cpu->vm,pt_addr,TRUE))) {
525 fprintf(stderr,"ppc32_set_sdr1: unable to find haddr for SDR1=0x%8.8x\n",
526 sdr1);
527 return(-1);
528 }
529
530 cpu->sdr1_hptr = (char *)dev->host_addr + (pt_addr - dev->phys_addr);
531 return(0);
532 }
533
534 /* Initialize the page table */
535 int ppc32_init_page_table(cpu_ppc_t *cpu)
536 {
537 m_uint32_t pt_size;
538
539 if (!cpu->sdr1_hptr)
540 return(-1);
541
542 pt_size = (1 + (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK)) << 16;
543 memset(cpu->sdr1_hptr,0,pt_size);
544 return(0);
545 }
546
547 /* Map a page */
548 int ppc32_map_page(cpu_ppc_t *cpu,u_int vsid,m_uint32_t vaddr,m_uint64_t paddr,
549 u_int wimg,u_int pp)
550 {
551 m_uint32_t hash,tmp,pteg_offset,key;
552 m_uint8_t *pte_haddr;
553 int i;
554
555 /* Compute the first hash value */
556 hash = (vaddr >> PPC32_MIN_PAGE_SHIFT) & 0xFFFF;
557 hash ^= vsid;
558 hash &= 0x7FFFFF;
559
560 tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK);
561 pteg_offset = (hash & 0x3FF) << 6;
562 pteg_offset |= tmp << 16;
563 pte_haddr = cpu->sdr1_hptr + pteg_offset;
564
565 for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) {
566 key = vmtoh32(*(m_uint32_t *)pte_haddr);
567
568 if (!(key & PPC32_PTEU_V)) {
569 hash = 0;
570 goto free_pte_found;
571 }
572 }
573
574 /* Secondary hash value */
575 hash = (~hash) & 0x7FFFFF;
576
577 tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK);
578 pteg_offset = (hash & 0x3FF) << 6;
579 pteg_offset |= tmp << 16;
580 pte_haddr = cpu->sdr1_hptr + pteg_offset;
581
582 for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) {
583 key = vmtoh32(*(m_uint32_t *)pte_haddr);
584
585 if (!(key & PPC32_PTEU_V)) {
586 hash = PPC32_PTEU_H;
587 goto free_pte_found;
588 }
589 }
590
591 /* No free PTE found */
592 return(-1);
593
594 free_pte_found:
595 tmp = PPC32_PTEU_V | (vsid << PPC32_PTEU_VSID_SHIFT) | hash;
596 tmp |= (vaddr >> 22) & 0x3F;
597 *(m_uint32_t *)pte_haddr = htovm32(tmp);
598
599 tmp = vaddr & PPC32_PTEL_RPN_MASK;
600 tmp |= (vaddr >> (32 - PPC32_PTEL_X_SHIFT)) & PPC32_PTEL_X_MASK;
601 tmp |= (vaddr >> (33 - PPC32_PTEL_XPN_SHIFT)) & PPC32_PTEL_XPN_MASK;
602
603 tmp |= (wimg << PPC32_PTEL_WIMG_SHIFT) + pp;
604 *(m_uint32_t *)(pte_haddr+sizeof(m_uint32_t)) = htovm32(tmp);
605 return(0);
606 }
607
608 /* Map a memory zone */
609 int ppc32_map_zone(cpu_ppc_t *cpu,u_int vsid,m_uint32_t vaddr,m_uint64_t paddr,
610 m_uint32_t size,u_int wimg,u_int pp)
611 {
612 while(size > 0) {
613 if (ppc32_map_page(cpu,vsid,vaddr,paddr,wimg,pp) == -1)
614 return(-1);
615
616 size -= PPC32_MIN_PAGE_SIZE;
617 vaddr += PPC32_MIN_PAGE_SIZE;
618 paddr += PPC32_MIN_PAGE_SIZE;
619 }
620
621 return(0);
622 }
623
624 /* PowerPC 405 TLB masks */
625 static m_uint32_t ppc405_tlb_masks[8] = {
626 0xFFFFFC00, 0xFFFFF000, 0xFFFFC000, 0xFFFF0000,
627 0xFFFC0000, 0xFFF00000, 0xFFC00000, 0xFF000000,
628 };
629
630 /* PowerPC 405 slow lookup */
631 static mts32_entry_t *ppc405_slow_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr,
632 u_int cid,u_int op_code,u_int op_size,
633 u_int op_type,m_uint64_t *data,
634 mts32_entry_t *alt_entry)
635 {
636 struct ppc405_tlb_entry *tlb_entry;
637 m_uint32_t hash_bucket,mask;
638 m_uint32_t page_size;
639 mts32_entry_t *entry;
640 mts_map_t map;
641 int i;
642
643 #if DEBUG_MTS_STATS
644 cpu->mts_misses++;
645 #endif
646
647 hash_bucket = MTS32_HASH(vaddr);
648 entry = &cpu->mts_cache[cid][hash_bucket];
649
650 /* No translation - cover the 4GB space */
651 if (((cid == PPC32_MTS_ICACHE) && !(cpu->msr & PPC32_MSR_IR)) ||
652 ((cid == PPC32_MTS_DCACHE) && !(cpu->msr & PPC32_MSR_DR)))
653 {
654 map.vaddr = vaddr & PPC32_MIN_PAGE_MASK;
655 map.paddr = vaddr & PPC32_MIN_PAGE_MASK;
656 map.cached = FALSE;
657
658 if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
659 goto err_undef;
660
661 return entry;
662 }
663
664 /* Walk through the unified TLB */
665 for(i=0;i<PPC405_TLB_ENTRIES;i++)
666 {
667 tlb_entry = &cpu->ppc405_tlb[i];
668
669 /* We want a valid entry with TID = PID */
670 if (!(tlb_entry->tlb_hi & PPC405_TLBHI_V) ||
671 (tlb_entry->tid != cpu->ppc405_pid))
672 continue;
673
674 /* Get the address mask corresponding to this entry */
675 page_size = tlb_entry->tlb_hi & PPC405_TLBHI_SIZE_MASK;
676 page_size >>= PPC405_TLBHI_SIZE_SHIFT;
677 mask = ppc405_tlb_masks[page_size];
678
679 /* Matching entry ? */
680 if ((vaddr & mask) == (tlb_entry->tlb_hi & mask)) {
681 map.vaddr = vaddr & mask;
682 map.paddr = tlb_entry->tlb_lo & mask;
683 map.cached = FALSE;
684
685 if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
686 goto err_undef;
687
688 return entry;
689 }
690 }
691
692 /* No matching TLB entry for this virtual address */
693 ppc32_access_special(cpu,vaddr,cid,MTS_ACC_T,op_code,op_type,op_size,data);
694 return NULL;
695
696 err_undef:
697 ppc32_access_special(cpu,vaddr,cid,MTS_ACC_U,op_code,op_type,op_size,data);
698 return NULL;
699 }
700
701 /* Dump a PowerPC 405 TLB entry */
702 static void ppc405_dump_tlb_entry(cpu_ppc_t *cpu,u_int index)
703 {
704 struct ppc405_tlb_entry *entry;
705
706 entry = &cpu->ppc405_tlb[index];
707
708 printf(" %2d: hi=0x%8.8x lo=0x%8.8x tid=0x%2.2x\n",
709 index,entry->tlb_hi,entry->tlb_lo,entry->tid);
710 }
711
712 /* Dump the PowerPC 405 TLB */
713 static void ppc405_dump_tlb(cpu_gen_t *cpu)
714 {
715 cpu_ppc_t *pcpu = CPU_PPC32(cpu);
716 u_int i;
717
718 for(i=0;i<PPC405_TLB_ENTRIES;i++)
719 ppc405_dump_tlb_entry(pcpu,i);
720 }
721
722 /* === PPC Memory Operations ============================================= */
723
724 /* LBZ: Load Byte Zero */
725 fastcall void ppc32_lbz(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
726 {
727 m_uint64_t data;
728 void *haddr;
729
730 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LBZ,1,MTS_READ,&data);
731 if (likely(haddr != NULL)) data = *(m_uint8_t *)haddr;
732 cpu->gpr[reg] = data & 0xFF;
733 }
734
735 /* LHZ: Load Half-Word Zero */
736 fastcall void ppc32_lhz(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
737 {
738 m_uint64_t data;
739 void *haddr;
740
741 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LHZ,2,MTS_READ,&data);
742 if (likely(haddr != NULL)) data = vmtoh16(*(m_uint16_t *)haddr);
743 cpu->gpr[reg] = data & 0xFFFF;
744 }
745
746 /* LWZ: Load Word Zero */
747 fastcall void ppc32_lwz(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
748 {
749 m_uint64_t data;
750 void *haddr;
751
752 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LWZ,4,MTS_READ,&data);
753 if (likely(haddr != NULL)) data = vmtoh32(*(m_uint32_t *)haddr);
754 cpu->gpr[reg] = data;
755 }
756
757 /* LWBR: Load Word Byte Reverse */
758 fastcall void ppc32_lwbr(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
759 {
760 m_uint64_t data;
761 void *haddr;
762
763 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LWBR,4,MTS_READ,&data);
764 if (likely(haddr != NULL)) data = vmtoh32(*(m_uint32_t *)haddr);
765 cpu->gpr[reg] = swap32(data);
766 }
767
768 /* LHA: Load Half-Word Algebraic */
769 fastcall void ppc32_lha(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
770 {
771 m_uint64_t data;
772 void *haddr;
773
774 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LHZ,2,MTS_READ,&data);
775 if (likely(haddr != NULL)) data = vmtoh16(*(m_uint16_t *)haddr);
776 cpu->gpr[reg] = sign_extend_32(data,16);
777 }
778
779 /* STB: Store Byte */
780 fastcall void ppc32_stb(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
781 {
782 m_uint64_t data;
783 void *haddr;
784
785 data = cpu->gpr[reg] & 0xff;
786 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STB,1,MTS_WRITE,&data);
787 if (likely(haddr != NULL)) *(m_uint8_t *)haddr = data;
788 }
789
790 /* STH: Store Half-Word */
791 fastcall void ppc32_sth(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
792 {
793 m_uint64_t data;
794 void *haddr;
795
796 data = cpu->gpr[reg] & 0xffff;
797 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STH,2,MTS_WRITE,&data);
798 if (likely(haddr != NULL)) *(m_uint16_t *)haddr = htovm16(data);
799 }
800
801 /* STW: Store Word */
802 fastcall void ppc32_stw(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
803 {
804 m_uint64_t data;
805 void *haddr;
806
807 data = cpu->gpr[reg];
808 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STW,4,MTS_WRITE,&data);
809 if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data);
810 }
811
812 /* STWBR: Store Word Byte Reversed */
813 fastcall void ppc32_stwbr(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
814 {
815 m_uint64_t data;
816 void *haddr;
817
818 data = swap32(cpu->gpr[reg]);
819 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STWBR,4,MTS_WRITE,&data);
820 if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data);
821 }
822
823 /* LSW: Load String Word */
824 fastcall void ppc32_lsw(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
825 {
826 m_uint64_t data;
827 void *haddr;
828
829 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LSW,1,MTS_READ,&data);
830 if (likely(haddr != NULL)) data = *(m_uint8_t *)haddr;
831 cpu->gpr[reg] |= (data & 0xFF) << (24 - cpu->sw_pos);
832 }
833
834 /* STW: Store String Word */
835 fastcall void ppc32_stsw(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
836 {
837 m_uint64_t data;
838 void *haddr;
839
840 data = (cpu->gpr[reg] >> (24 - cpu->sw_pos)) & 0xFF;
841 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STSW,1,MTS_WRITE,&data);
842 if (likely(haddr != NULL)) *(m_uint8_t *)haddr = data;
843 }
844
845 /* LFD: Load Floating-Point Double */
846 fastcall void ppc32_lfd(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
847 {
848 m_uint64_t data;
849 void *haddr;
850
851 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LWZ,8,MTS_READ,&data);
852 if (likely(haddr != NULL)) data = vmtoh64(*(m_uint64_t *)haddr);
853 cpu->fpu.reg[reg] = data;
854 }
855
856 /* STFD: Store Floating-Point Double */
857 fastcall void ppc32_stfd(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
858 {
859 m_uint64_t data;
860 void *haddr;
861
862 data = cpu->fpu.reg[reg];
863 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STW,8,MTS_WRITE,&data);
864 if (likely(haddr != NULL)) *(m_uint64_t *)haddr = htovm64(data);
865 }
866
867 /* ICBI: Instruction Cache Block Invalidate */
868 fastcall void ppc32_icbi(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int op)
869 {
870 ppc32_jit_tcb_t *block;
871 m_uint32_t phys_page;
872
873 #if DEBUG_ICBI
874 cpu_log(cpu->gen,"MTS","ICBI: ia=0x%8.8x, vaddr=0x%8.8x\n",cpu->ia,vaddr);
875 #endif
876
877 if (!cpu->translate(cpu,vaddr,PPC32_MTS_ICACHE,&phys_page)) {
878 if (cpu->exec_phys_map) {
879 block = ppc32_jit_find_by_phys_page(cpu,phys_page);
880
881 if (block && (block->start_ia == (vaddr & PPC32_MIN_PAGE_MASK))) {
882 #if DEBUG_ICBI
883 cpu_log(cpu->gen,"MTS",
884 "ICBI: removing compiled page at 0x%8.8x, pc=0x%8.8x\n",
885 block->start_ia,cpu->ia);
886 #endif
887 ppc32_jit_tcb_free(cpu,block,TRUE);
888 cpu->exec_blk_map[ppc32_jit_get_ia_hash(vaddr)] = NULL;
889 }
890 else
891 {
892 #if DEBUG_ICBI
893 cpu_log(cpu->gen,"MTS",
894 "ICBI: trying to remove page 0x%llx with pc=0x%llx\n",
895 block->start_ia,cpu->ia);
896 #endif
897 }
898 }
899 }
900 }
901
902 /* ======================================================================== */
903
904 /* Get a BAT register pointer given a SPR index */
905 static inline m_uint32_t *ppc32_get_bat_spr_ptr(cpu_ppc_t *cpu,u_int spr)
906 {
907 m_uint32_t spr_cat,cid,index;
908
909 spr_cat = spr >> 5;
910 if ((spr_cat != 0x10) && (spr_cat != 0x11))
911 return NULL;
912
913 cid = (spr >> 3) & 0x1;
914 index = (spr >> 1) & 0x3;
915
916 if (spr & 0x20)
917 index += 4;
918
919 //printf("GET_BAT_SPR: SPR=%u => cid=%u, index=%u\n",spr,cid,index);
920
921 return(&cpu->bat[cid][index].reg[spr & 0x1]);
922 }
923
924 /* Get a BAT SPR */
925 m_uint32_t ppc32_get_bat_spr(cpu_ppc_t *cpu,u_int spr)
926 {
927 m_uint32_t *p;
928
929 if (!(p = ppc32_get_bat_spr_ptr(cpu,spr)))
930 return(0);
931
932 return(*p);
933 }
934
935 /* Set a BAT SPR */
936 void ppc32_set_bat_spr(cpu_ppc_t *cpu,u_int spr,m_uint32_t val)
937 {
938 m_uint32_t *p;
939
940 if ((p = ppc32_get_bat_spr_ptr(cpu,spr))) {
941 *p = val;
942 ppc32_mem_invalidate_cache(cpu);
943 }
944 }
945
946 /* ======================================================================== */
947
948 /* Rebuild MTS data structures */
949 static void ppc32_mem_rebuild_mts(cpu_gen_t *gen_cpu)
950 {
951 ppc32_mem_invalidate_cache(CPU_PPC32(gen_cpu));
952 }
953
954 /* Initialize memory access vectors */
955 void ppc32_init_memop_vectors(cpu_ppc_t *cpu)
956 {
957 /* MTS slow lookup */
958 cpu->mts_slow_lookup = ppc32_slow_lookup;
959
960 /* MTS rebuild */
961 cpu->gen->mts_rebuild = ppc32_mem_rebuild_mts;
962
963 /* MTS statistics */
964 cpu->gen->mts_show_stats = ppc32_mem_show_stats;
965
966 /* Memory lookup operation */
967 cpu->mem_op_lookup = ppc32_mem_lookup;
968
969 /* Translation operation */
970 cpu->translate = ppc32_translate;
971
972 /* Load Operations */
973 cpu->mem_op_fn[PPC_MEMOP_LBZ] = ppc32_lbz;
974 cpu->mem_op_fn[PPC_MEMOP_LHZ] = ppc32_lhz;
975 cpu->mem_op_fn[PPC_MEMOP_LWZ] = ppc32_lwz;
976
977 /* Load Operation with sign-extension */
978 cpu->mem_op_fn[PPC_MEMOP_LHA] = ppc32_lha;
979
980 /* Store Operations */
981 cpu->mem_op_fn[PPC_MEMOP_STB] = ppc32_stb;
982 cpu->mem_op_fn[PPC_MEMOP_STH] = ppc32_sth;
983 cpu->mem_op_fn[PPC_MEMOP_STW] = ppc32_stw;
984
985 /* Byte-Reversed operations */
986 cpu->mem_op_fn[PPC_MEMOP_LWBR] = ppc32_lwbr;
987 cpu->mem_op_fn[PPC_MEMOP_STWBR] = ppc32_stwbr;
988
989 /* String operations */
990 cpu->mem_op_fn[PPC_MEMOP_LSW] = ppc32_lsw;
991 cpu->mem_op_fn[PPC_MEMOP_STSW] = ppc32_stsw;
992
993 /* FPU operations */
994 cpu->mem_op_fn[PPC_MEMOP_LFD] = ppc32_lfd;
995 cpu->mem_op_fn[PPC_MEMOP_STFD] = ppc32_stfd;
996
997 /* ICBI - Instruction Cache Block Invalidate */
998 cpu->mem_op_fn[PPC_MEMOP_ICBI] = ppc32_icbi;
999 }
1000
1001 /* Restart the memory subsystem */
1002 int ppc32_mem_restart(cpu_ppc_t *cpu)
1003 {
1004 m_uint32_t family;
1005
1006 ppc32_mem_shutdown(cpu);
1007 ppc32_mem_init(cpu);
1008 ppc32_init_memop_vectors(cpu);
1009
1010 /* Override the MTS lookup vector depending on the cpu type */
1011 family = cpu->pvr & 0xFFFF0000;
1012
1013 if (family == PPC32_PVR_405) {
1014 cpu->mts_slow_lookup = ppc405_slow_lookup;
1015 cpu->gen->mmu_dump = ppc405_dump_tlb;
1016 cpu->gen->mmu_raw_dump = ppc405_dump_tlb;
1017 }
1018
1019 return(0);
1020 }

  ViewVC Help
Powered by ViewVC 1.1.26