157 |
} |
} |
158 |
} |
} |
159 |
|
|
160 |
/* Allocate an L1 array */ |
/* === MTS for 64-bit address space ======================================= */ |
161 |
mts32_l1_array_t *mts32_alloc_l1_array(m_iptr_t val) |
#define MTS_ADDR_SIZE 64 |
162 |
{ |
#define MTS_PROTO(name) mts64_##name |
163 |
mts32_l1_array_t *p; |
#define MTS_PROTO_UP(name) MTS64_##name |
|
u_int i; |
|
|
|
|
|
if (!(p = malloc(sizeof(mts32_l1_array_t)))) |
|
|
return NULL; |
|
|
|
|
|
for(i=0;i<(1 << MTS32_LEVEL1_BITS);i++) |
|
|
p->entry[i] = val; |
|
|
|
|
|
return p; |
|
|
} |
|
|
|
|
|
/* Allocate an L2 array */ |
|
|
mts32_l2_array_t *mts32_alloc_l2_array(cpu_mips_t *cpu,m_iptr_t val) |
|
|
{ |
|
|
mts32_l2_array_t *p; |
|
|
u_int i; |
|
|
|
|
|
if (cpu->mts32_l2_free_list) { |
|
|
p = cpu->mts32_l2_free_list; |
|
|
cpu->mts32_l2_free_list = p->next; |
|
|
} else { |
|
|
if (!(p = m_memalign((1 << MTS_FLAG_BITS),sizeof(*p)))) |
|
|
return NULL; |
|
|
} |
|
|
|
|
|
for(i=0;i<(1 << MTS32_LEVEL2_BITS);i++) |
|
|
p->entry[i] = val; |
|
|
|
|
|
return p; |
|
|
} |
|
|
|
|
|
/* Free an L1 array */ |
|
|
void mts32_free_l1_array(mts32_l1_array_t *array) |
|
|
{ |
|
|
u_int i; |
|
|
|
|
|
if (array != NULL) { |
|
|
for(i=0;i<(1<<MTS32_LEVEL1_BITS);i++) |
|
|
if (array->entry[i] & MTS_CHAIN_MASK) |
|
|
free((void *)(array->entry[i] & ~MTS_CHAIN_MASK)); |
|
|
|
|
|
free(array); |
|
|
} |
|
|
} |
|
|
|
|
|
/* Free an L2 array */ |
|
|
void mts32_free_l2_array(cpu_mips_t *cpu,mts32_l2_array_t *array) |
|
|
{ |
|
|
array->next = cpu->mts32_l2_free_list; |
|
|
cpu->mts32_l2_free_list = array; |
|
|
} |
|
|
|
|
|
/* Set an L1 entry */ |
|
|
static void mts32_set_l1_data(cpu_mips_t *cpu,m_uint32_t start,m_uint32_t len, |
|
|
m_iptr_t val) |
|
|
{ |
|
|
mts32_l1_array_t *p = cpu->mts_l1_ptr; |
|
|
m_uint32_t pos; |
|
|
m_iptr_t p2; |
|
|
|
|
|
while(len > 0) { |
|
|
pos = start >> (MTS32_LEVEL2_BITS + MTS32_OFFSET_BITS); |
|
|
|
|
|
if (pos >= (1 << MTS32_LEVEL1_BITS)) |
|
|
break; |
|
|
|
|
|
/* free a possible L2 array */ |
|
|
if (p->entry[pos] & MTS_CHAIN_MASK) { |
|
|
p2 = p->entry[pos] & ~MTS_CHAIN_MASK; |
|
|
mts32_free_l2_array(cpu,(mts32_l2_array_t *)p2); |
|
|
} |
|
|
|
|
|
p->entry[pos] = val; |
|
|
start += MTS32_LEVEL1_SIZE; |
|
|
len -= MTS32_LEVEL1_SIZE; |
|
|
} |
|
|
} |
|
|
|
|
|
/* Fork an L1 array */ |
|
|
static int mts32_fork_l1_array(cpu_mips_t *cpu,u_int l1_pos) |
|
|
{ |
|
|
mts32_l1_array_t *p1; |
|
|
mts32_l2_array_t *p2; |
|
|
m_iptr_t entry,val; |
|
|
u_int i; |
|
|
|
|
|
p1 = cpu->mts_l1_ptr; |
|
|
entry = p1->entry[l1_pos]; |
|
|
val = ((entry & MTS_ACC_MASK) != MTS_ACC_OK) ? entry : 0; |
|
|
|
|
|
if (!(p2 = mts32_alloc_l2_array(cpu,val))) |
|
|
return(-1); |
|
|
|
|
|
/* mts32_alloc_l2_array() did the job for us */ |
|
|
if (!val) { |
|
|
for(i=0;i<(1 << MTS32_LEVEL2_BITS);i++) |
|
|
p2->entry[i] = entry + (1 << MTS32_OFFSET_BITS); |
|
|
} |
|
|
|
|
|
p1->entry[l1_pos] = (m_iptr_t)p2 | MTS_CHAIN_MASK; |
|
|
return(0); |
|
|
} |
|
|
|
|
|
/* Set address error on a complete level 1 array */ |
|
|
void mts32_set_l1_ae(cpu_mips_t *cpu) |
|
|
{ |
|
|
mts32_l1_array_t *p1 = cpu->mts_l1_ptr; |
|
|
u_int i; |
|
|
|
|
|
for(i=0;i<(1<<MTS32_LEVEL1_BITS);i++) |
|
|
p1->entry[i] = MTS_ACC_AE; |
|
|
} |
|
|
|
|
|
/* Set an L2 entry */ |
|
|
static int mts32_set_l2_entry(cpu_mips_t *cpu,m_uint64_t vaddr,m_iptr_t val) |
|
|
{ |
|
|
m_uint32_t naddr = vaddr & 0xffffffff; |
|
|
m_uint32_t l1_pos,l2_pos; |
|
|
mts32_l1_array_t *p1; |
|
|
mts32_l2_array_t *p2; |
|
|
m_iptr_t entry; |
|
|
|
|
|
p1 = cpu->mts_l1_ptr; |
|
|
|
|
|
l1_pos = naddr >> (MTS32_LEVEL2_BITS + MTS32_OFFSET_BITS); |
|
|
l2_pos = (naddr >> MTS32_OFFSET_BITS) & ((1 << MTS32_LEVEL2_BITS) - 1); |
|
|
|
|
|
entry = p1->entry[l1_pos]; |
|
|
|
|
|
if (!(entry & MTS_CHAIN_MASK)) { |
|
|
if (mts32_fork_l1_array(cpu,l1_pos) == -1) { |
|
|
fprintf(stderr,"mts32_set_l2_entry: unable to fork L1 entry.\n"); |
|
|
return(-1); |
|
|
} |
|
|
|
|
|
entry = p1->entry[l1_pos]; |
|
|
} |
|
|
|
|
|
p2 = (mts32_l2_array_t *)(entry & MTS_ADDR_MASK); |
|
|
p2->entry[l2_pos] = val; |
|
|
return(0); |
|
|
} |
|
|
|
|
|
/* Initialize an empty MTS32 subsystem */ |
|
|
int mts32_init_empty(cpu_mips_t *cpu) |
|
|
{ |
|
|
if (cpu->state == MIPS_CPU_RUNNING) { |
|
|
cpu_log(cpu,"MTS","trying to reset MTS while the CPU is online.\n"); |
|
|
return(-1); |
|
|
} |
|
|
|
|
|
mts32_free_l1_array(cpu->mts_l1_ptr); |
|
|
|
|
|
/* Allocate a new L1 array */ |
|
|
cpu->mts_l1_ptr = mts32_alloc_l1_array(0); |
|
164 |
|
|
165 |
if (!cpu->mts_l1_ptr) |
#include "mips_mts.c" |
|
return(-1); |
|
|
|
|
|
/* Address Error on complete address space for now */ |
|
|
mts32_set_l1_ae(cpu); |
|
|
return(0); |
|
|
} |
|
|
|
|
|
/* Free memory used by MTS32 */ |
|
|
void mts32_shutdown(cpu_mips_t *cpu) |
|
|
{ |
|
|
mts32_l2_array_t *array,*next; |
|
|
|
|
|
/* Free L1/L2 entries */ |
|
|
if (cpu->mts_l1_ptr) { |
|
|
mts32_free_l1_array(cpu->mts_l1_ptr); |
|
|
cpu->mts_l1_ptr = NULL; |
|
|
} |
|
166 |
|
|
167 |
/* Free arrays that are sitting in the free list */ |
/* === MTS for 32-bit address space ======================================= */ |
168 |
for(array=cpu->mts32_l2_free_list;array;array=next) { |
#define MTS_ADDR_SIZE 32 |
169 |
next = array->next; |
#define MTS_PROTO(name) mts32_##name |
170 |
free(array); |
#define MTS_PROTO_UP(name) MTS32_##name |
|
} |
|
|
|
|
|
cpu->mts32_l2_free_list = NULL; |
|
|
} |
|
|
|
|
|
/* Map a device at the specified virtual address */ |
|
|
void mts32_map_device(cpu_mips_t *cpu,u_int dev_id,m_uint64_t vaddr, |
|
|
m_uint32_t offset,m_uint32_t len) |
|
|
{ |
|
|
struct vdevice *dev; |
|
|
m_iptr_t val; |
|
|
|
|
|
if (!(dev = dev_get_by_id(cpu->vm,dev_id)) || !dev->phys_len) |
|
|
return; |
|
|
|
|
|
#if DEBUG_MTS_MAP_DEV |
|
|
cpu_log(cpu,"MTS32", |
|
|
"mapping device %s (offset=0x%x,len=0x%x) at vaddr 0x%llx\n", |
|
|
dev->name,offset,len,vaddr); |
|
|
#endif |
|
|
|
|
|
if (!dev->host_addr || (dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) |
|
|
val = (dev_id << MTS_DEVID_SHIFT) | MTS_DEV_MASK | MTS_ACC_OK; |
|
|
else |
|
|
val = dev->host_addr | MTS_ACC_OK; |
|
|
|
|
|
val += offset; |
|
|
|
|
|
while(len > 0) { |
|
|
if (!(vaddr & MTS32_LEVEL1_MASK) && !(len & MTS32_LEVEL1_MASK)) { |
|
|
mts32_set_l1_data(cpu,vaddr,MTS32_LEVEL1_SIZE,val); |
|
|
vaddr += MTS32_LEVEL1_SIZE; |
|
|
val += MTS32_LEVEL1_SIZE; |
|
|
len -= MTS32_LEVEL1_SIZE; |
|
|
} else { |
|
|
mts32_set_l2_entry(cpu,vaddr,val); |
|
|
vaddr += MTS32_LEVEL2_SIZE; |
|
|
val += MTS32_LEVEL2_SIZE; |
|
|
len -= MTS32_LEVEL2_SIZE; |
|
|
} |
|
|
} |
|
|
} |
|
171 |
|
|
172 |
/* Map a physical address to the specified virtual address */ |
#include "mips_mts.c" |
|
void mts32_map(cpu_mips_t *cpu,m_uint64_t vaddr, |
|
|
m_uint64_t paddr,m_uint32_t len, |
|
|
int cache_access) |
|
|
{ |
|
|
struct vdevice *dev,*next_dev; |
|
|
m_uint32_t dev_offset,clen; |
|
173 |
|
|
174 |
while(len > 0) |
/* === Specific operations for MTS64 ====================================== */ |
|
{ |
|
|
#if DEBUG_MTS_MAP_VIRT |
|
|
cpu_log(cpu,"MTS32", |
|
|
"mts32_map: vaddr=0x%llx, paddr=0x%llx, len=0x%x, cache=%d\n", |
|
|
vaddr,paddr,len,cache_access); |
|
|
#endif |
|
|
dev = dev_lookup(cpu->vm,paddr,cache_access); |
|
|
next_dev = dev_lookup_next(cpu->vm,paddr,dev,cache_access); |
|
|
|
|
|
if (next_dev) |
|
|
clen = m_min(len,next_dev->phys_addr-paddr); |
|
|
else |
|
|
clen = len; |
|
|
|
|
|
if (!dev) { |
|
|
mts32_unmap(cpu,vaddr,clen,MTS_ACC_U); |
|
|
} else { |
|
|
dev_offset = paddr - dev->phys_addr; |
|
|
clen = m_min(clen,dev->phys_len); |
|
|
mts32_map_device(cpu,dev->id,vaddr,dev_offset,clen); |
|
|
} |
|
|
|
|
|
vaddr += clen; |
|
|
paddr += clen; |
|
|
len -= clen; |
|
|
} |
|
|
} |
|
|
|
|
|
/* Unmap a memory zone */ |
|
|
void mts32_unmap(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint32_t len, |
|
|
m_uint32_t val) |
|
|
{ |
|
|
while(len > 0) |
|
|
{ |
|
|
#if DEBUG_MTS_MAP_VIRT |
|
|
cpu_log(cpu,"MTS32","mts32_unmap: vaddr=0x%llx, len=0x%x\n",vaddr,len); |
|
|
#endif |
|
|
if (!(vaddr & MTS32_LEVEL1_MASK) && !(len & MTS32_LEVEL1_MASK)) { |
|
|
mts32_set_l1_data(cpu,vaddr,MTS32_LEVEL1_SIZE,val); |
|
|
vaddr += MTS32_LEVEL1_SIZE; |
|
|
len -= MTS32_LEVEL1_SIZE; |
|
|
} else { |
|
|
mts32_set_l2_entry(cpu,vaddr,val); |
|
|
vaddr += MTS32_LEVEL2_SIZE; |
|
|
len -= MTS32_LEVEL2_SIZE; |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
/* Map all devices for kernel mode */ |
|
|
void mts32_km_map_all_dev(cpu_mips_t *cpu) |
|
|
{ |
|
|
/* KSEG0: cached accesses */ |
|
|
mts32_map(cpu,MIPS_KSEG0_BASE,0,MTS_SIZE_512M,TRUE); |
|
|
|
|
|
/* KSEG1: uncached accesses */ |
|
|
mts32_map(cpu,MIPS_KSEG1_BASE,0,MTS_SIZE_512M,FALSE); |
|
|
} |
|
|
|
|
|
/* MTS32 raw lookup */ |
|
|
static forced_inline |
|
|
void *mts32_raw_lookup(cpu_mips_t *cpu,mts32_l1_array_t *p1,m_uint64_t vaddr) |
|
|
{ |
|
|
m_uint32_t naddr = vaddr & 0xffffffff; |
|
|
m_uint32_t l1_pos,l2_pos,shift; |
|
|
mts32_l2_array_t *p2; |
|
|
m_iptr_t entry,haddr; |
|
|
m_uint64_t data; |
|
|
u_int dev_id; |
|
|
|
|
|
shift = MTS32_LEVEL2_BITS + MTS32_OFFSET_BITS; |
|
|
l1_pos = naddr >> shift; |
|
|
entry = p1->entry[l1_pos]; |
|
|
|
|
|
if (unlikely((entry & MTS_ACC_MASK) != MTS_ACC_OK)) |
|
|
return NULL; |
|
|
|
|
|
if (entry & MTS_CHAIN_MASK) { |
|
|
p2 = (mts32_l2_array_t *)(entry & MTS_ADDR_MASK); |
|
|
l2_pos = (naddr >> MTS32_OFFSET_BITS) & ((1 << MTS32_LEVEL2_BITS) - 1); |
|
|
entry = p2->entry[l2_pos]; |
|
|
shift = MTS32_OFFSET_BITS; |
|
|
} |
|
|
|
|
|
if (unlikely((entry & MTS_ACC_MASK) != MTS_ACC_OK)) |
|
|
return NULL; |
|
|
|
|
|
/* device access */ |
|
|
if (unlikely(entry & MTS_DEV_MASK)) { |
|
|
dev_id = (entry & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT; |
|
|
haddr = (entry & MTS_DEVOFF_MASK); |
|
|
haddr += (naddr & ((1 << shift) - 1)); |
|
|
return(dev_access_fast(cpu,dev_id,haddr,4,MTS_READ,&data)); |
|
|
} |
|
|
|
|
|
haddr = entry & MTS_ADDR_MASK; |
|
|
haddr += (naddr & ((1 << shift) - 1)); |
|
|
return((void *)haddr); |
|
|
} |
|
|
|
|
|
/* MTS32 access */ |
|
|
static forced_inline void *mts32_access(cpu_mips_t *cpu,m_uint64_t vaddr, |
|
|
u_int op_code,u_int op_size, |
|
|
u_int op_type,m_uint64_t *data, |
|
|
u_int *exc) |
|
|
{ |
|
|
m_uint32_t naddr = vaddr & 0xffffffff; |
|
|
m_uint32_t l1_pos,l2_pos,mask,shift; |
|
|
mts32_l1_array_t *p1 = cpu->mts_l1_ptr; |
|
|
mts32_l2_array_t *p2; |
|
|
m_iptr_t entry,haddr; |
|
|
u_int dev_id; |
|
|
|
|
|
#if MEMLOG_ENABLE |
|
|
/* Record the memory access */ |
|
|
memlog_rec_access(cpu,vaddr,*data,op_size,op_type); |
|
|
#endif |
|
|
|
|
|
*exc = 0; |
|
|
shift = MTS32_LEVEL2_BITS + MTS32_OFFSET_BITS; |
|
|
l1_pos = naddr >> shift; |
|
|
entry = p1->entry[l1_pos]; |
|
|
|
|
|
if (unlikely((mask = (entry & MTS_ACC_MASK)) != MTS_ACC_OK)) { |
|
|
mts_access_special(cpu,vaddr,mask,op_code,op_type,op_size,data,exc); |
|
|
return NULL; |
|
|
} |
|
|
|
|
|
/* do we have a level 2 entry ? */ |
|
|
if (entry & MTS_CHAIN_MASK) { |
|
|
p2 = (mts32_l2_array_t *)(entry & MTS_ADDR_MASK); |
|
|
l2_pos = (naddr >> MTS32_OFFSET_BITS) & ((1 << MTS32_LEVEL2_BITS) - 1); |
|
|
entry = p2->entry[l2_pos]; |
|
|
shift = MTS32_OFFSET_BITS; |
|
|
|
|
|
if (unlikely((mask = (entry & MTS_ACC_MASK)) != MTS_ACC_OK)) { |
|
|
mts_access_special(cpu,vaddr,mask,op_code,op_type,op_size,data,exc); |
|
|
return NULL; |
|
|
} |
|
|
} |
|
|
|
|
|
/* device access */ |
|
|
if (unlikely(entry & MTS_DEV_MASK)) { |
|
|
dev_id = (entry & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT; |
|
|
haddr = (entry & MTS_DEVOFF_MASK); |
|
|
haddr += (naddr & ((1 << shift) - 1)); |
|
|
|
|
|
#if DEBUG_MTS_DEV |
|
|
cpu_log(cpu,"MTS32", |
|
|
"device access: vaddr=0x%llx, pc=0x%llx, dev_offset=0x%x\n", |
|
|
vaddr,cpu->pc,haddr); |
|
|
#endif |
|
|
return(dev_access_fast(cpu,dev_id,haddr,op_size,op_type,data)); |
|
|
} |
|
|
|
|
|
/* raw memory access */ |
|
|
haddr = entry & MTS_ADDR_MASK; |
|
|
haddr += (naddr & ((1 << shift) - 1)); |
|
|
#if MEMLOG_ENABLE |
|
|
memlog_update_read(cpu,haddr); |
|
|
#endif |
|
|
return((void *)haddr); |
|
|
} |
|
|
|
|
|
/* Memory lookup */ |
|
|
void *mts32_lookup(cpu_mips_t *cpu,m_uint64_t vaddr) |
|
|
{ |
|
|
return(mts32_raw_lookup(cpu,cpu->mts_l1_ptr,vaddr)); |
|
|
} |
|
|
|
|
|
/* Initialize the MTS64 subsystem for the specified CPU */ |
|
|
int mts64_init(cpu_mips_t *cpu) |
|
|
{ |
|
|
size_t len; |
|
|
|
|
|
/* Initialize the cache entries to 0 (empty) */ |
|
|
len = MTS64_HASH_SIZE * sizeof(mts64_entry_t *); |
|
|
if (!(cpu->mts64_cache = malloc(len))) |
|
|
return(-1); |
|
|
|
|
|
memset(cpu->mts64_cache,0,len); |
|
|
cpu->mts64_lookups = 0; |
|
|
cpu->mts64_misses = 0; |
|
|
|
|
|
/* Reset the TLB reverse map (used for selective invalidations) */ |
|
|
memset(cpu->mts64_rmap,0,(cpu->cp0.tlb_entries * sizeof(mts64_entry_t *))); |
|
|
return(0); |
|
|
} |
|
|
|
|
|
/* Free memory used by MTS64 */ |
|
|
void mts64_shutdown(cpu_mips_t *cpu) |
|
|
{ |
|
|
mts64_chunk_t *chunk,*next; |
|
|
int i; |
|
|
|
|
|
/* Reset the reverse map */ |
|
|
for(i=0;i<cpu->cp0.tlb_entries;i++) |
|
|
cpu->mts64_rmap[i] = NULL; |
|
|
|
|
|
/* Free the cache itself */ |
|
|
free(cpu->mts64_cache); |
|
|
cpu->mts64_cache = NULL; |
|
|
|
|
|
/* Free the chunks */ |
|
|
for(chunk=cpu->mts64_chunk_list;chunk;chunk=next) { |
|
|
next = chunk->next; |
|
|
free(chunk); |
|
|
} |
|
|
|
|
|
for(chunk=cpu->mts64_chunk_free_list;chunk;chunk=next) { |
|
|
next = chunk->next; |
|
|
free(chunk); |
|
|
} |
|
|
|
|
|
cpu->mts64_chunk_list = cpu->mts64_chunk_free_list = NULL; |
|
|
cpu->mts64_entry_free_list = NULL; |
|
|
} |
|
|
|
|
|
/* Show MTS64 detailed information (debugging only!) */ |
|
|
void mts64_show_stats(cpu_mips_t *cpu) |
|
|
{ |
|
|
mts64_chunk_t *chunk; |
|
|
#if DEBUG_MTS_MAP_VIRT |
|
|
mts64_entry_t *entry; |
|
|
u_int i; |
|
|
#endif |
|
|
u_int count; |
|
|
|
|
|
printf("\nCPU%u: MTS64 statistics:\n",cpu->id); |
|
|
|
|
|
printf(" Total lookups: %llu, misses: %llu, efficiency: %g%%\n", |
|
|
cpu->mts64_lookups, cpu->mts64_misses, |
|
|
100 - ((double)(cpu->mts64_misses*100)/ |
|
|
(double)cpu->mts64_lookups)); |
|
|
|
|
|
#if DEBUG_MTS_MAP_VIRT |
|
|
/* Valid hash entries */ |
|
|
for(count=0,i=0;i<MTS64_HASH_SIZE;i++) { |
|
|
if ((entry = cpu->mts64_cache[i]) != NULL) { |
|
|
printf(" %4u: entry=%p, start=0x%16.16llx, " |
|
|
"len=0x%8.8x (%6u bytes), action=0x%llx\n", |
|
|
i,entry,entry->start,entry->mask,entry->mask+1, |
|
|
(m_uint64_t)entry->action); |
|
|
count++; |
|
|
} |
|
|
} |
|
|
|
|
|
printf(" %u/%u valid hash entries.\n",count,MTS64_HASH_SIZE); |
|
|
#endif |
|
|
|
|
|
/* Number of chunks */ |
|
|
for(count=0,chunk=cpu->mts64_chunk_list;chunk;chunk=chunk->next) |
|
|
count++; |
|
|
|
|
|
printf(" Number of chunks: %u\n",count); |
|
|
|
|
|
#if DEBUG_MTS_MAP_VIRT |
|
|
/* Reverse map */ |
|
|
for(i=0;i<MIPS64_TLB_ENTRIES;i++) { |
|
|
for(count=0,entry=cpu->mts64_rmap[i];entry;entry=entry->next) |
|
|
count++; |
|
|
|
|
|
if (count > 0) |
|
|
printf(" tlb_rmap[%u]: %u entries\n",i,count); |
|
|
} |
|
|
#endif |
|
|
} |
|
|
|
|
|
/* Allocate a new chunk */ |
|
|
static int mts64_alloc_chunk(cpu_mips_t *cpu) |
|
|
{ |
|
|
mts64_chunk_t *chunk; |
|
|
|
|
|
/* Try the free list first, then use standard allocation procedure */ |
|
|
if ((chunk = cpu->mts64_chunk_free_list) != NULL) { |
|
|
cpu->mts64_chunk_free_list = chunk->next; |
|
|
} else { |
|
|
if (!(chunk = malloc(sizeof(*chunk)))) |
|
|
return(-1); |
|
|
} |
|
|
|
|
|
chunk->count = 0; |
|
|
chunk->next = cpu->mts64_chunk_list; |
|
|
cpu->mts64_chunk_list = chunk; |
|
|
return(0); |
|
|
} |
|
|
|
|
|
/* Allocate a new entry */ |
|
|
static mts64_entry_t *mts64_alloc_entry(cpu_mips_t *cpu) |
|
|
{ |
|
|
mts64_chunk_t *chunk = cpu->mts64_chunk_list; |
|
|
mts64_entry_t *entry; |
|
|
|
|
|
/* First, try to allocate the entry from the free list */ |
|
|
if ((entry = cpu->mts64_entry_free_list) != NULL) { |
|
|
cpu->mts64_entry_free_list = cpu->mts64_entry_free_list->next; |
|
|
return entry; |
|
|
} |
|
|
|
|
|
/* A new chunk is required */ |
|
|
if (!chunk || (chunk->count == MTS64_CHUNK_SIZE)) { |
|
|
if (mts64_alloc_chunk(cpu) == -1) |
|
|
return NULL; |
|
|
|
|
|
chunk = cpu->mts64_chunk_list; |
|
|
} |
|
|
|
|
|
entry = &chunk->entry[chunk->count]; |
|
|
chunk->count++; |
|
|
return entry; |
|
|
} |
|
|
|
|
|
/* Invalidate the complete MTS64 cache */ |
|
|
void mts64_invalidate_cache(cpu_mips_t *cpu) |
|
|
{ |
|
|
mts64_chunk_t *chunk; |
|
|
size_t len; |
|
|
u_int i; |
|
|
|
|
|
len = MTS64_HASH_SIZE * sizeof(mts64_entry_t *); |
|
|
memset(cpu->mts64_cache,0,len); |
|
|
|
|
|
/* Move all chunks to the free list */ |
|
|
while((chunk = cpu->mts64_chunk_list) != NULL) { |
|
|
cpu->mts64_chunk_list = chunk->next; |
|
|
chunk->next = cpu->mts64_chunk_free_list; |
|
|
cpu->mts64_chunk_free_list = chunk; |
|
|
} |
|
|
|
|
|
/* Reset the free list of entries (since they are located in chunks) */ |
|
|
cpu->mts64_entry_free_list = NULL; |
|
|
|
|
|
/* Reset the reverse map */ |
|
|
for(i=0;i<cpu->cp0.tlb_entries;i++) |
|
|
cpu->mts64_rmap[i] = NULL; |
|
|
} |
|
|
|
|
|
/* Invalidate partially the MTS64 cache, given a TLB entry index */ |
|
|
void mts64_invalidate_tlb_entry(cpu_mips_t *cpu,u_int tlb_index) |
|
|
{ |
|
|
mts64_entry_t *entry; |
|
|
|
|
|
for(entry=cpu->mts64_rmap[tlb_index];entry;entry=entry->next) { |
|
|
*(entry->pself) = NULL; |
|
|
if (!entry->next) { |
|
|
entry->next = cpu->mts64_entry_free_list; |
|
|
break; |
|
|
} |
|
|
} |
|
|
|
|
|
cpu->mts64_entry_free_list = cpu->mts64_rmap[tlb_index]; |
|
|
cpu->mts64_rmap[tlb_index] = NULL; |
|
|
} |
|
|
|
|
|
/* |
|
|
* MTS64 mapping. |
|
|
* |
|
|
* It is NOT inlined since it triggers a GCC bug on my config (x86, GCC 3.3.5) |
|
|
*/ |
|
|
static no_inline int mts64_map(cpu_mips_t *cpu,m_uint64_t vaddr,mts_map_t *map, |
|
|
mts64_entry_t *entry) |
|
|
{ |
|
|
struct vdevice *dev; |
|
|
m_uint64_t lk_addr; |
|
|
m_uint32_t poffset; |
|
|
|
|
|
lk_addr = map->paddr + (vaddr - map->vaddr); |
|
|
|
|
|
if (!(dev = dev_lookup(cpu->vm,lk_addr,map->cached))) |
|
|
return(FALSE); |
|
|
|
|
|
if (map->paddr > dev->phys_addr) { |
|
|
poffset = map->paddr - dev->phys_addr; |
|
|
entry->start = map->vaddr; |
|
|
entry->phys_page = map->paddr >> MIPS_MIN_PAGE_SHIFT; |
|
|
entry->mask = ~((m_min(map->len,dev->phys_len - poffset)) - 1); |
|
|
entry->action = poffset; |
|
|
} else { |
|
|
poffset = dev->phys_addr - map->paddr; |
|
|
entry->start = map->vaddr + poffset; |
|
|
entry->phys_page = (map->paddr + poffset) >> MIPS_MIN_PAGE_SHIFT; |
|
|
entry->mask = ~((m_min(map->len - poffset,dev->phys_len)) - 1); |
|
|
entry->action = 0; |
|
|
} |
|
|
|
|
|
if (!dev->host_addr || (dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) |
|
|
entry->action += (dev->id << MTS_DEVID_SHIFT) | MTS_DEV_MASK; |
|
|
else |
|
|
entry->action += dev->host_addr; |
|
|
|
|
|
return(TRUE); |
|
|
} |
|
175 |
|
|
176 |
/* MTS64 slow lookup */ |
/* MTS64 slow lookup */ |
177 |
static forced_inline |
static forced_inline |
186 |
|
|
187 |
map.tlb_index = -1; |
map.tlb_index = -1; |
188 |
hash_bucket = MTS64_HASH(vaddr); |
hash_bucket = MTS64_HASH(vaddr); |
189 |
entry = cpu->mts64_cache[hash_bucket]; |
entry = cpu->mts_cache[hash_bucket]; |
190 |
zone = vaddr >> 40; |
zone = vaddr >> 40; |
191 |
|
|
192 |
#if DEBUG_MTS_STATS |
#if DEBUG_MTS_STATS |
193 |
cpu->mts64_misses++; |
cpu->mts_misses++; |
194 |
#endif |
#endif |
195 |
|
|
196 |
switch(zone) { |
switch(zone) { |
273 |
entry->next = NULL; |
entry->next = NULL; |
274 |
|
|
275 |
/* Store the entry in hash table for future use */ |
/* Store the entry in hash table for future use */ |
276 |
cpu->mts64_cache[hash_bucket] = entry; |
cpu->mts_cache[hash_bucket] = entry; |
277 |
} else { |
} else { |
278 |
/* Remove the entry from the reverse map list */ |
/* Remove the entry from the reverse map list */ |
279 |
if (entry->pprev) { |
if (entry->pprev) { |
286 |
|
|
287 |
/* Add this entry to the reverse map list */ |
/* Add this entry to the reverse map list */ |
288 |
if (map.tlb_index != -1) { |
if (map.tlb_index != -1) { |
289 |
entry->pself = &cpu->mts64_cache[hash_bucket]; |
entry->pself = (mts64_entry_t **)&cpu->mts_cache[hash_bucket]; |
290 |
entry->next = cpu->mts64_rmap[map.tlb_index]; |
entry->next = cpu->mts_rmap[map.tlb_index]; |
291 |
entry->pprev = &cpu->mts64_rmap[map.tlb_index]; |
entry->pprev = (mts64_entry_t **)&cpu->mts_rmap[map.tlb_index]; |
292 |
if (entry->next) |
if (entry->next) |
293 |
entry->next->pprev = &entry->next; |
entry->next->pprev = &entry->next; |
294 |
cpu->mts64_rmap[map.tlb_index] = entry; |
cpu->mts_rmap[map.tlb_index] = entry; |
295 |
} |
} |
296 |
|
|
297 |
/* Fill the new entry or replace the previous */ |
/* Fill the new entry or replace the previous */ |
330 |
|
|
331 |
*exc = 0; |
*exc = 0; |
332 |
hash_bucket = MTS64_HASH(vaddr); |
hash_bucket = MTS64_HASH(vaddr); |
333 |
entry = cpu->mts64_cache[hash_bucket]; |
entry = cpu->mts_cache[hash_bucket]; |
334 |
|
|
335 |
#if DEBUG_MTS_STATS |
#if DEBUG_MTS_STATS |
336 |
cpu->mts64_lookups++; |
cpu->mts_lookups++; |
337 |
#endif |
#endif |
338 |
|
|
339 |
/* Slow lookup if nothing found in cache */ |
/* Slow lookup if nothing found in cache */ |
367 |
return((void *)haddr); |
return((void *)haddr); |
368 |
} |
} |
369 |
|
|
|
/* MTS64 lookup */ |
|
|
static void *mts64_lookup(cpu_mips_t *cpu,m_uint64_t vaddr) |
|
|
{ |
|
|
m_uint64_t data; |
|
|
u_int exc; |
|
|
return(mts64_access(cpu,vaddr,MIPS_MEMOP_LOOKUP,4,MTS_READ,&data,&exc)); |
|
|
} |
|
|
|
|
370 |
/* MTS64 virtual address to physical page translation */ |
/* MTS64 virtual address to physical page translation */ |
371 |
static fastcall int mts64_translate(cpu_mips_t *cpu,m_uint64_t vaddr, |
static fastcall int mts64_translate(cpu_mips_t *cpu,m_uint64_t vaddr, |
372 |
m_uint32_t *phys_page) |
m_uint32_t *phys_page) |
377 |
u_int exc = 0; |
u_int exc = 0; |
378 |
|
|
379 |
hash_bucket = MTS64_HASH(vaddr); |
hash_bucket = MTS64_HASH(vaddr); |
380 |
entry = cpu->mts64_cache[hash_bucket]; |
entry = cpu->mts_cache[hash_bucket]; |
381 |
|
|
382 |
/* Slow lookup if nothing found in cache */ |
/* Slow lookup if nothing found in cache */ |
383 |
if (unlikely((!entry) || |
if (unlikely((!entry) || |
394 |
return(0); |
return(0); |
395 |
} |
} |
396 |
|
|
397 |
/* === MIPS Memory Operations ============================================= */ |
/* === Specific operations for MTS32 ====================================== */ |
|
|
|
|
/* Macro for MTS access (32 or 64 bit) */ |
|
|
#define MTS_ACCESS(X) mts##X##_access |
|
|
#define MTS_MEMOP(op) MTS_##op(32) MTS_##op(64) |
|
|
|
|
|
/* LB: Load Byte */ |
|
|
#define MTS_LB(X) \ |
|
|
fastcall u_int mts##X##_lb(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
|
|
{ \ |
|
|
m_uint64_t data; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LB,1,MTS_READ,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) data = *(m_uint8_t *)haddr; \ |
|
|
if (likely(!exc)) cpu->gpr[reg] = sign_extend(data,8); \ |
|
|
return(exc); \ |
|
|
} \ |
|
|
|
|
|
/* LBU: Load Byte Unsigned */ |
|
|
#define MTS_LBU(X) \ |
|
|
fastcall u_int mts##X##_lbu(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
|
|
{ \ |
|
|
m_uint64_t data; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LBU,1,MTS_READ,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) data = *(m_uint8_t *)haddr; \ |
|
|
if (likely(!exc)) cpu->gpr[reg] = data & 0xff; \ |
|
|
return(exc); \ |
|
|
} |
|
|
|
|
|
/* LH: Load Half-Word */ |
|
|
#define MTS_LH(X) \ |
|
|
fastcall u_int mts##X##_lh(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
|
|
{ \ |
|
|
m_uint64_t data; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LH,2,MTS_READ,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) data = vmtoh16(*(m_uint16_t *)haddr); \ |
|
|
if (likely(!exc)) cpu->gpr[reg] = sign_extend(data,16); \ |
|
|
return(exc); \ |
|
|
} |
|
|
|
|
|
/* LHU: Load Half-Word Unsigned */ |
|
|
#define MTS_LHU(X) \ |
|
|
fastcall u_int mts##X##_lhu(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
|
|
{ \ |
|
|
m_uint64_t data; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LHU,2,MTS_READ,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) data = vmtoh16(*(m_uint16_t *)haddr); \ |
|
|
if (likely(!exc)) cpu->gpr[reg] = data & 0xffff; \ |
|
|
return(exc); \ |
|
|
} |
|
398 |
|
|
399 |
/* LW: Load Word */ |
/* MTS32 slow lookup */ |
400 |
#define MTS_LW(X) \ |
static forced_inline |
401 |
fastcall u_int mts##X##_lw(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
mts32_entry_t *mts32_slow_lookup(cpu_mips_t *cpu,m_uint64_t vaddr, |
402 |
{ \ |
u_int op_code,u_int op_size, |
403 |
m_uint64_t data; \ |
u_int op_type,m_uint64_t *data, |
404 |
void *haddr; \ |
u_int *exc) |
405 |
u_int exc; \ |
{ |
406 |
\ |
m_uint32_t hash_bucket,zone; |
407 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LW,4,MTS_READ,&data,&exc); \ |
mts32_entry_t *entry,new_entry; |
408 |
if (likely(haddr != NULL)) data = vmtoh32(*(m_uint32_t *)haddr); \ |
mts_map_t map; |
|
if (likely(!exc)) cpu->gpr[reg] = sign_extend(data,32); \ |
|
|
return(exc); \ |
|
|
} |
|
|
|
|
|
/* LWU: Load Word Unsigned */ |
|
|
#define MTS_LWU(X) \ |
|
|
fastcall u_int mts##X##_lwu(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
|
|
{ \ |
|
|
m_uint64_t data; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LWU,4,MTS_READ,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) data = vmtoh32(*(m_uint32_t *)haddr); \ |
|
|
if (likely(!exc)) cpu->gpr[reg] = data & 0xffffffff; \ |
|
|
return(exc); \ |
|
|
} |
|
|
|
|
|
/* LD: Load Double-Word */ |
|
|
#define MTS_LD(X) \ |
|
|
fastcall u_int mts##X##_ld(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
|
|
{ \ |
|
|
m_uint64_t data; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LD,8,MTS_READ,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) data = vmtoh64(*(m_uint64_t *)haddr); \ |
|
|
if (likely(!exc)) cpu->gpr[reg] = data; \ |
|
|
return(exc); \ |
|
|
} |
|
|
|
|
|
/* SB: Store Byte */ |
|
|
#define MTS_SB(X) \ |
|
|
fastcall u_int mts##X##_sb(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
|
|
{ \ |
|
|
m_uint64_t data; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
data = cpu->gpr[reg] & 0xff; \ |
|
|
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_SB,1,MTS_WRITE,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) *(m_uint8_t *)haddr = data; \ |
|
|
return(exc); \ |
|
|
} |
|
|
|
|
|
/* SH: Store Half-Word */ |
|
|
#define MTS_SH(X) \ |
|
|
fastcall u_int mts##X##_sh(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
|
|
{ \ |
|
|
m_uint64_t data; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
data = cpu->gpr[reg] & 0xffff; \ |
|
|
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_SH,2,MTS_WRITE,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) *(m_uint16_t *)haddr = htovm16(data); \ |
|
|
return(exc); \ |
|
|
} |
|
409 |
|
|
410 |
/* SW: Store Word */ |
map.tlb_index = -1; |
411 |
#define MTS_SW(X) \ |
hash_bucket = MTS32_HASH(vaddr); |
412 |
fastcall u_int mts##X##_sw(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
entry = cpu->mts_cache[hash_bucket]; |
413 |
{ \ |
zone = (vaddr >> 29) & 0x7; |
|
m_uint64_t data; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
data = cpu->gpr[reg] & 0xffffffff; \ |
|
|
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_SW,4,MTS_WRITE,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data); \ |
|
|
return(exc); \ |
|
|
} |
|
414 |
|
|
415 |
/* SD: Store Double-Word */ |
#if DEBUG_MTS_STATS |
416 |
#define MTS_SD(X) \ |
cpu->mts_misses++; |
417 |
fastcall u_int mts##X##_sd(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
#endif |
|
{ \ |
|
|
m_uint64_t data; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
data = cpu->gpr[reg]; \ |
|
|
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_SD,8,MTS_WRITE,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) *(m_uint64_t *)haddr = htovm64(data); \ |
|
|
return(exc); \ |
|
|
} |
|
418 |
|
|
419 |
/* LDC1: Load Double-Word To Coprocessor 1 */ |
switch(zone) { |
420 |
#define MTS_LDC1(X) \ |
case 0x00 ... 0x03: /* kuseg */ |
421 |
fastcall u_int mts##X##_ldc1(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
/* trigger TLB exception if no matching entry found */ |
422 |
{ \ |
if (!cp0_tlb_lookup(cpu,vaddr,&map)) |
423 |
m_uint64_t data; \ |
goto err_tlb; |
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LDC1,8,MTS_READ,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) data = vmtoh64(*(m_uint64_t *)haddr); \ |
|
|
if (likely(!exc)) cpu->fpu.reg[reg] = data; \ |
|
|
return(exc); \ |
|
|
} |
|
424 |
|
|
425 |
/* LWL: Load Word Left */ |
if (!mts32_map(cpu,vaddr,&map,&new_entry)) |
426 |
#define MTS_LWL(X) \ |
goto err_undef; |
427 |
fastcall u_int mts##X##_lwl(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
break; |
|
{ \ |
|
|
m_uint64_t r_mask,naddr; \ |
|
|
m_uint64_t data; \ |
|
|
u_int m_shift; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
naddr = vaddr & ~(0x03); \ |
|
|
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_LWL,4,MTS_READ,&data,&exc); \ |
|
|
\ |
|
|
if (likely(haddr != NULL)) \ |
|
|
data = vmtoh32(*(m_uint32_t *)haddr); \ |
|
|
\ |
|
|
if (likely(!exc)) { \ |
|
|
m_shift = (vaddr & 0x03) << 3; \ |
|
|
r_mask = (1ULL << m_shift) - 1; \ |
|
|
data <<= m_shift; \ |
|
|
\ |
|
|
cpu->gpr[reg] &= r_mask; \ |
|
|
cpu->gpr[reg] |= data; \ |
|
|
cpu->gpr[reg] = sign_extend(cpu->gpr[reg],32); \ |
|
|
} \ |
|
|
return(exc); \ |
|
|
} |
|
428 |
|
|
429 |
/* LWR: Load Word Right */ |
case 0x04: /* kseg0 */ |
430 |
#define MTS_LWR(X) \ |
map.vaddr = sign_extend(MIPS_KSEG0_BASE,32); |
431 |
fastcall u_int mts##X##_lwr(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
map.paddr = 0; |
432 |
{ \ |
map.len = MIPS_KSEG0_SIZE; |
433 |
m_uint64_t r_mask,naddr; \ |
map.cached = TRUE; |
434 |
m_uint64_t data; \ |
if (!mts32_map(cpu,vaddr,&map,&new_entry)) |
435 |
u_int m_shift; \ |
goto err_undef; |
436 |
void *haddr; \ |
break; |
|
u_int exc; \ |
|
|
\ |
|
|
naddr = vaddr & ~(0x03); \ |
|
|
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_LWR,4,MTS_READ,&data,&exc); \ |
|
|
\ |
|
|
if (likely(haddr != NULL)) \ |
|
|
data = vmtoh32(*(m_uint32_t *)haddr); \ |
|
|
\ |
|
|
if (likely(!exc)) { \ |
|
|
m_shift = ((vaddr & 0x03) + 1) << 3; \ |
|
|
r_mask = (1ULL << m_shift) - 1; \ |
|
|
\ |
|
|
data = sign_extend(data >> (32 - m_shift),32); \ |
|
|
r_mask = sign_extend(r_mask,32); \ |
|
|
\ |
|
|
cpu->gpr[reg] &= ~r_mask; \ |
|
|
cpu->gpr[reg] |= data; \ |
|
|
} \ |
|
|
return(exc); \ |
|
|
} |
|
437 |
|
|
438 |
/* LDL: Load Double-Word Left */ |
case 0x05: /* kseg1 */ |
439 |
#define MTS_LDL(X) \ |
map.vaddr = sign_extend(MIPS_KSEG1_BASE,32); |
440 |
fastcall u_int mts##X##_ldl(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
map.paddr = 0; |
441 |
{ \ |
map.len = MIPS_KSEG1_SIZE; |
442 |
m_uint64_t r_mask,naddr; \ |
map.cached = FALSE; |
443 |
m_uint64_t data; \ |
if (!mts32_map(cpu,vaddr,&map,&new_entry)) |
444 |
u_int m_shift; \ |
goto err_undef; |
445 |
void *haddr; \ |
break; |
|
u_int exc; \ |
|
|
\ |
|
|
naddr = vaddr & ~(0x07); \ |
|
|
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_LDL,8,MTS_READ,&data,&exc); \ |
|
|
\ |
|
|
if (likely(haddr != NULL)) \ |
|
|
data = vmtoh64(*(m_uint64_t *)haddr); \ |
|
|
\ |
|
|
if (likely(!exc)) { \ |
|
|
m_shift = (vaddr & 0x07) << 3; \ |
|
|
r_mask = (1ULL << m_shift) - 1; \ |
|
|
data <<= m_shift; \ |
|
|
\ |
|
|
cpu->gpr[reg] &= r_mask; \ |
|
|
cpu->gpr[reg] |= data; \ |
|
|
} \ |
|
|
return(exc); \ |
|
|
} |
|
446 |
|
|
447 |
/* LDR: Load Double-Word Right */ |
case 0x06: /* ksseg */ |
448 |
#define MTS_LDR(X) \ |
case 0x07: /* kseg3 */ |
449 |
fastcall u_int mts##X##_ldr(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
/* trigger TLB exception if no matching entry found */ |
450 |
{ \ |
if (!cp0_tlb_lookup(cpu,vaddr,&map)) |
451 |
m_uint64_t r_mask,naddr; \ |
goto err_tlb; |
|
m_uint64_t data; \ |
|
|
u_int m_shift; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
naddr = vaddr & ~(0x07); \ |
|
|
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_LDR,8,MTS_READ,&data,&exc); \ |
|
|
\ |
|
|
if (likely(haddr != NULL)) \ |
|
|
data = vmtoh64(*(m_uint64_t *)haddr); \ |
|
|
\ |
|
|
if (likely(!exc)) { \ |
|
|
m_shift = ((vaddr & 0x07) + 1) << 3; \ |
|
|
r_mask = (1ULL << m_shift) - 1; \ |
|
|
data >>= (64 - m_shift); \ |
|
|
\ |
|
|
cpu->gpr[reg] &= ~r_mask; \ |
|
|
cpu->gpr[reg] |= data; \ |
|
|
} \ |
|
|
return(exc); \ |
|
|
} |
|
452 |
|
|
453 |
/* SWL: Store Word Left */ |
if (!mts32_map(cpu,vaddr,&map,&new_entry)) |
454 |
#define MTS_SWL(X) \ |
goto err_undef; |
455 |
fastcall u_int mts##X##_swl(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
break; |
456 |
{ \ |
} |
|
m_uint64_t d_mask,naddr; \ |
|
|
m_uint64_t data; \ |
|
|
u_int r_shift; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
naddr = vaddr & ~(0x03ULL); \ |
|
|
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SWL,4,MTS_READ,&data,&exc); \ |
|
|
if (unlikely(exc)) return(exc); \ |
|
|
\ |
|
|
if (likely(haddr != NULL)) \ |
|
|
data = vmtoh32(*(m_uint32_t *)haddr); \ |
|
|
\ |
|
|
r_shift = (vaddr & 0x03) << 3; \ |
|
|
d_mask = 0xffffffff >> r_shift; \ |
|
|
\ |
|
|
data &= ~d_mask; \ |
|
|
data |= (cpu->gpr[reg] & 0xffffffff) >> r_shift; \ |
|
|
\ |
|
|
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SWL,4,MTS_WRITE,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data); \ |
|
|
return(exc); \ |
|
|
} |
|
457 |
|
|
458 |
/* SWR: Store Word Right */ |
/* Get a new entry if necessary */ |
459 |
#define MTS_SWR(X) \ |
if (!entry) { |
460 |
fastcall u_int mts##X##_swr(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
entry = mts32_alloc_entry(cpu); |
461 |
{ \ |
entry->pself = entry->pprev = NULL; |
462 |
m_uint64_t d_mask,naddr; \ |
entry->next = NULL; |
|
m_uint64_t data; \ |
|
|
u_int r_shift; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
naddr = vaddr & ~(0x03); \ |
|
|
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SWR,4,MTS_READ,&data,&exc); \ |
|
|
if (unlikely(exc)) return(exc); \ |
|
|
\ |
|
|
if (likely(haddr != NULL)) \ |
|
|
data = vmtoh32(*(m_uint32_t *)haddr); \ |
|
|
\ |
|
|
r_shift = ((vaddr & 0x03) + 1) << 3; \ |
|
|
d_mask = 0xffffffff >> r_shift; \ |
|
|
\ |
|
|
data &= d_mask; \ |
|
|
data |= (cpu->gpr[reg] << (32 - r_shift)) & 0xffffffff; \ |
|
|
\ |
|
|
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SWR,4,MTS_WRITE,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data); \ |
|
|
return(exc); \ |
|
|
} |
|
463 |
|
|
464 |
/* SDL: Store Double-Word Left */ |
/* Store the entry in hash table for future use */ |
465 |
#define MTS_SDL(X) \ |
cpu->mts_cache[hash_bucket] = entry; |
466 |
fastcall u_int mts##X##_sdl(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
} else { |
467 |
{ \ |
/* Remove the entry from the reverse map list */ |
468 |
m_uint64_t d_mask,naddr; \ |
if (entry->pprev) { |
469 |
m_uint64_t data; \ |
if (entry->next) |
470 |
u_int r_shift; \ |
entry->next->pprev = entry->pprev; |
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
naddr = vaddr & ~(0x07); \ |
|
|
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SDL,8,MTS_READ,&data,&exc); \ |
|
|
if (unlikely(exc)) return(exc); \ |
|
|
\ |
|
|
if (likely(haddr != NULL)) \ |
|
|
data = vmtoh64(*(m_uint64_t *)haddr); \ |
|
|
\ |
|
|
r_shift = (vaddr & 0x07) << 3; \ |
|
|
d_mask = 0xffffffffffffffffULL >> r_shift; \ |
|
|
\ |
|
|
data &= ~d_mask; \ |
|
|
data |= cpu->gpr[reg] >> r_shift; \ |
|
|
\ |
|
|
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SDL,8,MTS_WRITE,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) *(m_uint64_t *)haddr = htovm64(data); \ |
|
|
return(exc); \ |
|
|
} |
|
471 |
|
|
472 |
/* SDR: Store Double-Word Right */ |
*(entry->pprev) = entry->next; |
473 |
#define MTS_SDR(X) \ |
} |
474 |
fastcall u_int mts##X##_sdr(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
} |
|
{ \ |
|
|
m_uint64_t d_mask,naddr; \ |
|
|
m_uint64_t data; \ |
|
|
u_int r_shift; \ |
|
|
void *haddr; \ |
|
|
u_int exc; \ |
|
|
\ |
|
|
naddr = vaddr & ~(0x07); \ |
|
|
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SDR,8,MTS_READ,&data,&exc); \ |
|
|
if (unlikely(exc)) return(exc); \ |
|
|
\ |
|
|
if (likely(haddr != NULL)) \ |
|
|
data = vmtoh64(*(m_uint64_t *)haddr); \ |
|
|
\ |
|
|
r_shift = ((vaddr & 0x07) + 1) << 3; \ |
|
|
d_mask = 0xffffffffffffffffULL >> r_shift; \ |
|
|
\ |
|
|
data &= d_mask; \ |
|
|
data |= cpu->gpr[reg] << (64 - r_shift); \ |
|
|
\ |
|
|
haddr = MTS_ACCESS(X)(cpu,naddr,MIPS_MEMOP_SDR,8,MTS_WRITE,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) *(m_uint64_t *)haddr = htovm64(data); \ |
|
|
return(exc); \ |
|
|
} |
|
475 |
|
|
476 |
/* LL: Load Linked */ |
/* Add this entry to the reverse map list */ |
477 |
#define MTS_LL(X) \ |
if (map.tlb_index != -1) { |
478 |
fastcall u_int mts##X##_ll(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
entry->pself = (mts32_entry_t **)&cpu->mts_cache[hash_bucket]; |
479 |
{ \ |
entry->next = cpu->mts_rmap[map.tlb_index]; |
480 |
m_uint64_t data; \ |
entry->pprev = (mts32_entry_t **)&cpu->mts_rmap[map.tlb_index]; |
481 |
void *haddr; \ |
if (entry->next) |
482 |
u_int exc; \ |
entry->next->pprev = &entry->next; |
483 |
\ |
cpu->mts_rmap[map.tlb_index] = entry; |
484 |
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_LL,4,MTS_READ,&data,&exc); \ |
} |
|
if (likely(haddr != NULL)) data = vmtoh32(*(m_uint32_t *)haddr); \ |
|
|
\ |
|
|
if (likely(!exc)) { \ |
|
|
cpu->gpr[reg] = sign_extend(data,32); \ |
|
|
cpu->ll_bit = 1; \ |
|
|
} \ |
|
|
\ |
|
|
return(exc); \ |
|
|
} |
|
485 |
|
|
486 |
/* SC: Store Conditional */ |
/* Fill the new entry or replace the previous */ |
487 |
#define MTS_SC(X) \ |
entry->phys_page = new_entry.phys_page; |
488 |
fastcall u_int mts##X##_sc(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
entry->start = new_entry.start; |
489 |
{ \ |
entry->mask = new_entry.mask; |
490 |
m_uint64_t data; \ |
entry->action = new_entry.action; |
491 |
void *haddr; \ |
return entry; |
|
u_int exc = 0; \ |
|
|
\ |
|
|
if (cpu->ll_bit) { \ |
|
|
data = cpu->gpr[reg] & 0xffffffff; \ |
|
|
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_SC,4,MTS_WRITE,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data); \ |
|
|
} \ |
|
|
\ |
|
|
if (likely(!exc)) \ |
|
|
cpu->gpr[reg] = cpu->ll_bit; \ |
|
|
return(exc); \ |
|
|
} |
|
492 |
|
|
493 |
/* SDC1: Store Double-Word from Coprocessor 1 */ |
err_undef: |
494 |
#define MTS_SDC1(X) \ |
mts_access_special(cpu,vaddr,MTS_ACC_U,op_code,op_type,op_size,data,exc); |
495 |
fastcall u_int mts##X##_sdc1(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg) \ |
return NULL; |
496 |
{ \ |
err_address: |
497 |
m_uint64_t data; \ |
mts_access_special(cpu,vaddr,MTS_ACC_AE,op_code,op_type,op_size,data,exc); |
498 |
void *haddr; \ |
return NULL; |
499 |
u_int exc; \ |
err_tlb: |
500 |
\ |
mts_access_special(cpu,vaddr,MTS_ACC_T,op_code,op_type,op_size,data,exc); |
501 |
data = cpu->fpu.reg[reg]; \ |
return NULL; |
|
haddr = MTS_ACCESS(X)(cpu,vaddr,MIPS_MEMOP_SDC1,8,MTS_WRITE,&data,&exc); \ |
|
|
if (likely(haddr != NULL)) *(m_uint64_t *)haddr = htovm64(data); \ |
|
|
return(exc); \ |
|
502 |
} |
} |
503 |
|
|
504 |
/* CACHE: Cache operation */ |
/* MTS32 access */ |
505 |
fastcall u_int mts_cache(cpu_mips_t *cpu,m_uint64_t vaddr,u_int op) |
static forced_inline void *mts32_access(cpu_mips_t *cpu,m_uint64_t vaddr, |
506 |
|
u_int op_code,u_int op_size, |
507 |
|
u_int op_type,m_uint64_t *data, |
508 |
|
u_int *exc) |
509 |
{ |
{ |
510 |
struct insn_block *block; |
m_uint32_t hash_bucket; |
511 |
m_uint32_t phys_page; |
mts32_entry_t *entry; |
512 |
|
m_iptr_t haddr; |
513 |
|
u_int dev_id; |
514 |
|
|
515 |
#if DEBUG_CACHE |
#if MEMLOG_ENABLE |
516 |
cpu_log(cpu,"MTS","CACHE: PC=0x%llx, vaddr=0x%llx, cache=%u, code=%u\n", |
/* Record the memory access */ |
517 |
cpu->pc, vaddr, op & 0x3, op >> 2); |
memlog_rec_access(cpu,vaddr,*data,op_size,op_type); |
518 |
#endif |
#endif |
519 |
|
|
520 |
if (!cpu->translate(cpu,vaddr,&phys_page)) { |
*exc = 0; |
521 |
if ((phys_page < 1048576) && cpu->exec_phys_map) { |
hash_bucket = MTS32_HASH(vaddr); |
522 |
block = cpu->exec_phys_map[phys_page]; |
entry = cpu->mts_cache[hash_bucket]; |
|
|
|
|
if (block) { |
|
|
if ((cpu->pc < block->start_pc) || |
|
|
((cpu->pc - block->start_pc) >= MIPS_MIN_PAGE_SIZE)) |
|
|
{ |
|
|
#if DEBUG_CACHE |
|
|
cpu_log(cpu,"MTS", |
|
|
"CACHE: removing compiled page at 0x%llx, pc=0x%llx\n", |
|
|
block->start_pc,cpu->pc); |
|
|
#endif |
|
|
cpu->exec_phys_map[phys_page] = NULL; |
|
|
insn_block_free(cpu,block,TRUE); |
|
|
} |
|
|
else |
|
|
{ |
|
|
#if DEBUG_CACHE |
|
|
cpu_log(cpu,"MTS", |
|
|
"CACHE: trying to remove page 0x%llx with pc=0x%llx\n", |
|
|
block->start_pc,cpu->pc); |
|
|
#endif |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
return(0); |
|
|
} |
|
523 |
|
|
524 |
/* |
#if DEBUG_MTS_STATS |
525 |
* "Instanciation" of MIPS Memory Operations. |
cpu->mts_lookups++; |
|
*/ |
|
|
MTS_MEMOP(LB) |
|
|
MTS_MEMOP(LBU) |
|
|
MTS_MEMOP(LH) |
|
|
MTS_MEMOP(LHU) |
|
|
MTS_MEMOP(LW) |
|
|
MTS_MEMOP(LWU) |
|
|
MTS_MEMOP(LD) |
|
|
MTS_MEMOP(SB) |
|
|
MTS_MEMOP(SH) |
|
|
MTS_MEMOP(SW) |
|
|
MTS_MEMOP(SD) |
|
|
MTS_MEMOP(LDC1) |
|
|
MTS_MEMOP(LWL) |
|
|
MTS_MEMOP(LWR) |
|
|
MTS_MEMOP(LDL) |
|
|
MTS_MEMOP(LDR) |
|
|
MTS_MEMOP(SWL) |
|
|
MTS_MEMOP(SWR) |
|
|
MTS_MEMOP(SDL) |
|
|
MTS_MEMOP(SDR) |
|
|
MTS_MEMOP(LL) |
|
|
MTS_MEMOP(SC) |
|
|
MTS_MEMOP(SDC1) |
|
|
|
|
|
/* Fast assembly routines */ |
|
|
#ifdef FAST_ASM |
|
|
extern fastcall u_int mts32_lw_asm(cpu_mips_t *cpu,m_uint64_t vaddr,u_int reg); |
|
526 |
#endif |
#endif |
527 |
|
|
528 |
/* === MTS32 Cache Management ============================================= */ |
/* Slow lookup if nothing found in cache */ |
529 |
|
if (unlikely((!entry) || unlikely((vaddr & entry->mask) != entry->start))) { |
530 |
/* MTS32 map/unmap/rebuild "API" functions */ |
entry = mts32_slow_lookup(cpu,vaddr,op_code,op_size,op_type,data,exc); |
531 |
void mts32_api_map(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint64_t paddr, |
if (!entry) return NULL; |
532 |
m_uint32_t len,int cache_access,int tlb_index) |
} |
|
{ |
|
|
mts32_map(cpu,vaddr,paddr,len,cache_access); |
|
|
} |
|
|
|
|
|
void mts32_api_unmap(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint32_t len, |
|
|
m_uint32_t val,int tlb_index) |
|
|
{ |
|
|
mts32_unmap(cpu,vaddr,len,val); |
|
|
} |
|
|
|
|
|
void mts32_api_rebuild(cpu_mips_t *cpu) |
|
|
{ |
|
|
u_int cpu_mode; |
|
|
|
|
|
cpu_mode = cp0_get_mode(cpu); |
|
533 |
|
|
534 |
/* The complete address space gives AE (address error) */ |
/* Device access */ |
535 |
if (mts32_init_empty(cpu) == -1) |
if (unlikely(entry->action & MTS_DEV_MASK)) { |
536 |
return; |
dev_id = (entry->action & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT; |
537 |
|
haddr = entry->action & MTS_DEVOFF_MASK; |
538 |
/* USEG: 0x00000000 -> 0x80000000: 2 GB mapped (TLB) */ |
haddr += (m_uint32_t)vaddr - entry->start; |
|
mts32_unmap(cpu,MIPS_KUSEG_BASE,MIPS_KUSEG_SIZE,MTS_ACC_T); |
|
|
|
|
|
/* If the CPU is in Kernel Mode, activate KSEG segments */ |
|
|
if (cpu_mode == MIPS_CP0_STATUS_KM) { |
|
|
/* KSEG0 / KSEG1 : physical memory */ |
|
|
mts32_km_map_all_dev(cpu); |
|
|
|
|
|
/* KSSEG: 0xc0000000 -> 0xe0000000: 0.5GB mapped (TLB) */ |
|
|
mts32_unmap(cpu,MIPS_KSSEG_BASE,MIPS_KSSEG_SIZE,MTS_ACC_T); |
|
539 |
|
|
540 |
/* KSEG3: 0xe0000000 -> 0xffffffff: 0.5GB mapped (TLB) */ |
#if DEBUG_MTS_DEV |
541 |
mts32_unmap(cpu,MIPS_KSEG3_BASE,MIPS_KSEG3_SIZE,MTS_ACC_T); |
cpu_log(cpu,"MTS32", |
542 |
} else { |
"device access: vaddr=0x%llx, pc=0x%llx, dev_offset=0x%x\n", |
543 |
if (cpu_mode == MIPS_CP0_STATUS_SM) { |
vaddr,cpu->pc,haddr); |
544 |
/* SSEG: 0xc0000000 -> 0xe0000000: 0.5GB mapped (TLB) */ |
#endif |
545 |
mts32_unmap(cpu,MIPS_KSSEG_BASE,MIPS_KSSEG_SIZE,MTS_ACC_T); |
return(dev_access_fast(cpu,dev_id,haddr,op_size,op_type,data)); |
|
} |
|
546 |
} |
} |
547 |
|
|
548 |
/* Map all TLB entries */ |
/* Raw memory access */ |
549 |
cp0_map_all_tlb_to_mts(cpu); |
haddr = entry->action & MTS_ADDR_MASK; |
550 |
|
haddr += (m_uint32_t)vaddr - entry->start; |
551 |
|
#if MEMLOG_ENABLE |
552 |
|
memlog_update_read(cpu,haddr); |
553 |
|
#endif |
554 |
|
return((void *)haddr); |
555 |
} |
} |
556 |
|
|
557 |
/* === MTS64 Cache Management ============================================= */ |
/* MTS32 virtual address to physical page translation */ |
558 |
|
static fastcall int mts32_translate(cpu_mips_t *cpu,m_uint64_t vaddr, |
559 |
/* MTS64 map/unmap/rebuild "API" functions */ |
m_uint32_t *phys_page) |
560 |
void mts64_api_map(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint64_t paddr, |
{ |
561 |
m_uint32_t len,int cache_access,int tlb_index) |
m_uint32_t hash_bucket,offset; |
562 |
{ |
mts32_entry_t *entry; |
563 |
/* nothing to do, the cache will be filled on-the-fly */ |
m_uint64_t data = 0; |
564 |
} |
u_int exc = 0; |
565 |
|
|
566 |
|
hash_bucket = MTS32_HASH(vaddr); |
567 |
|
entry = cpu->mts_cache[hash_bucket]; |
568 |
|
|
569 |
void mts64_api_unmap(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint32_t len, |
/* Slow lookup if nothing found in cache */ |
570 |
m_uint32_t val,int tlb_index) |
if (unlikely((!entry) || unlikely((vaddr & entry->mask) != entry->start))) { |
571 |
{ |
entry = mts32_slow_lookup(cpu,vaddr,MIPS_MEMOP_LOOKUP,4,MTS_READ, |
572 |
/* Invalidate the TLB entry or the full cache if no index is specified */ |
&data,&exc); |
573 |
if (tlb_index != -1) |
if (!entry) |
574 |
mts64_invalidate_tlb_entry(cpu,tlb_index); |
return(-1); |
575 |
else |
} |
|
mts64_invalidate_cache(cpu); |
|
|
} |
|
576 |
|
|
577 |
void mts64_api_rebuild(cpu_mips_t *cpu) |
offset = vaddr - entry->start; |
578 |
{ |
*phys_page = entry->phys_page + (offset >> MIPS_MIN_PAGE_SHIFT); |
579 |
mts64_invalidate_cache(cpu); |
return(0); |
580 |
} |
} |
581 |
|
|
582 |
/* ======================================================================== */ |
/* ======================================================================== */ |
583 |
|
|
584 |
/* Initialize memory 32-bit access vectors */ |
/* Shutdown MTS subsystem */ |
585 |
void mts32_init_memop_vectors(cpu_mips_t *cpu) |
void mts_shutdown(cpu_mips_t *cpu) |
586 |
{ |
{ |
587 |
/* XXX TODO: |
if (cpu->mts_shutdown != NULL) |
588 |
* - LD/SD forbidden in Supervisor/User modes with 32-bit addresses. |
cpu->mts_shutdown(cpu); |
|
*/ |
|
|
|
|
|
/* API vectors */ |
|
|
cpu->mts_map = mts32_api_map; |
|
|
cpu->mts_unmap = mts32_api_unmap; |
|
|
cpu->mts_rebuild = mts32_api_rebuild; |
|
|
|
|
|
/* memory lookup operation */ |
|
|
cpu->mem_op_lookup = mts32_lookup; |
|
|
|
|
|
/* Load Operations */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LB] = mts32_lb; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LBU] = mts32_lbu; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LH] = mts32_lh; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LHU] = mts32_lhu; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LW] = mts32_lw; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LWU] = mts32_lwu; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LD] = mts32_ld; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LDL] = mts32_ldl; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LDR] = mts32_ldr; |
|
|
|
|
|
/* Store Operations */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SB] = mts32_sb; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SH] = mts32_sh; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SW] = mts32_sw; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SD] = mts32_sd; |
|
|
|
|
|
/* Load Left/Right operations */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LWL] = mts32_lwl; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LWR] = mts32_lwr; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LDL] = mts32_ldl; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LDR] = mts32_ldr; |
|
|
|
|
|
/* Store Left/Right operations */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SWL] = mts32_swl; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SWR] = mts32_swr; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SDL] = mts32_sdl; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SDR] = mts32_sdr; |
|
|
|
|
|
/* LL/SC - Load Linked / Store Conditional */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LL] = mts32_ll; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SC] = mts32_sc; |
|
|
|
|
|
/* Coprocessor 1 memory access functions */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LDC1] = mts32_ldc1; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SDC1] = mts32_sdc1; |
|
|
|
|
|
/* Cache Operation */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_CACHE] = mts_cache; |
|
|
|
|
|
#if 0 |
|
|
#if defined(FAST_ASM) && MTSASM_ENABLE |
|
|
if (cpu->vm->jit_use) |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LW] = mts32_lw_asm; |
|
|
#endif |
|
|
#endif |
|
|
} |
|
|
|
|
|
/* Initialize memory 64-bit access vectors */ |
|
|
void mts64_init_memop_vectors(cpu_mips_t *cpu) |
|
|
{ |
|
|
/* API vectors */ |
|
|
cpu->mts_map = mts64_api_map; |
|
|
cpu->mts_unmap = mts64_api_unmap; |
|
|
cpu->mts_rebuild = mts64_api_rebuild; |
|
|
|
|
|
/* memory lookup operation */ |
|
|
cpu->mem_op_lookup = mts64_lookup; |
|
|
|
|
|
cpu->translate = mts64_translate; |
|
|
|
|
|
/* Load Operations */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LB] = mts64_lb; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LBU] = mts64_lbu; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LH] = mts64_lh; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LHU] = mts64_lhu; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LW] = mts64_lw; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LWU] = mts64_lwu; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LD] = mts64_ld; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LDL] = mts64_ldl; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LDR] = mts64_ldr; |
|
|
|
|
|
/* Store Operations */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SB] = mts64_sb; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SH] = mts64_sh; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SW] = mts64_sw; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SD] = mts64_sd; |
|
|
|
|
|
/* Load Left/Right operations */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LWL] = mts64_lwl; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LWR] = mts64_lwr; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LDL] = mts64_ldl; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LDR] = mts64_ldr; |
|
|
|
|
|
/* Store Left/Right operations */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SWL] = mts64_swl; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SWR] = mts64_swr; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SDL] = mts64_sdl; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SDR] = mts64_sdr; |
|
|
|
|
|
/* LL/SC - Load Linked / Store Conditional */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LL] = mts64_ll; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SC] = mts64_sc; |
|
|
|
|
|
/* Coprocessor 1 memory access functions */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_LDC1] = mts64_ldc1; |
|
|
cpu->mem_op_fn[MIPS_MEMOP_SDC1] = mts64_sdc1; |
|
|
|
|
|
/* Cache Operation */ |
|
|
cpu->mem_op_fn[MIPS_MEMOP_CACHE] = mts_cache; |
|
589 |
} |
} |
590 |
|
|
591 |
/* Initialize memory access vectors */ |
/* Set the address mode */ |
592 |
void mts_init_memop_vectors(cpu_mips_t *cpu) |
int mts_set_addr_mode(cpu_mips_t *cpu,u_int addr_mode) |
593 |
{ |
{ |
594 |
/* TEST */ |
if (cpu->addr_mode != addr_mode) { |
595 |
mts64_init_memop_vectors(cpu); |
mts_shutdown(cpu); |
596 |
} |
|
597 |
|
switch(addr_mode) { |
598 |
|
case 32: |
599 |
|
mts32_init(cpu); |
600 |
|
mts32_init_memop_vectors(cpu); |
601 |
|
break; |
602 |
|
case 64: |
603 |
|
mts64_init(cpu); |
604 |
|
mts64_init_memop_vectors(cpu); |
605 |
|
break; |
606 |
|
default: |
607 |
|
fprintf(stderr, |
608 |
|
"mts_set_addr_mode: internal error (addr_mode=%u)\n", |
609 |
|
addr_mode); |
610 |
|
exit(EXIT_FAILURE); |
611 |
|
} |
612 |
|
} |
613 |
|
|
614 |
/* Shutdown MTS subsystem */ |
return(0); |
|
void mts_shutdown(cpu_mips_t *cpu) |
|
|
{ |
|
|
mts32_shutdown(cpu); |
|
|
mts64_shutdown(cpu); |
|
615 |
} |
} |
616 |
|
|
617 |
|
/* === Operations on physical memory ====================================== */ |
618 |
|
|
619 |
/* Copy a memory block from VM physical RAM to real host */ |
/* Copy a memory block from VM physical RAM to real host */ |
620 |
void physmem_copy_from_vm(vm_instance_t *vm,void *real_buffer, |
void physmem_copy_from_vm(vm_instance_t *vm,void *real_buffer, |
621 |
m_uint64_t paddr,size_t len) |
m_uint64_t paddr,size_t len) |