25 |
* SUCH DAMAGE. |
* SUCH DAMAGE. |
26 |
* |
* |
27 |
* |
* |
28 |
* $Id: memory.c,v 1.187 2006/01/14 12:51:59 debug Exp $ |
* $Id: memory.c,v 1.199 2006/10/24 09:32:48 debug Exp $ |
29 |
* |
* |
30 |
* Functions for handling the memory of an emulated machine. |
* Functions for handling the memory of an emulated machine. |
31 |
*/ |
*/ |
120 |
{ |
{ |
121 |
void *p = mmap(NULL, s, PROT_READ | PROT_WRITE, |
void *p = mmap(NULL, s, PROT_READ | PROT_WRITE, |
122 |
MAP_ANON | MAP_PRIVATE, -1, 0); |
MAP_ANON | MAP_PRIVATE, -1, 0); |
123 |
|
|
124 |
if (p == NULL) { |
if (p == NULL) { |
125 |
|
#if 1 |
126 |
|
fprintf(stderr, "zeroed_alloc(): mmap() failed. This should" |
127 |
|
" not usually happen. If you can reproduce this, then" |
128 |
|
" please contact me with details about your run-time" |
129 |
|
" environment.\n"); |
130 |
|
exit(1); |
131 |
|
#else |
132 |
p = malloc(s); |
p = malloc(s); |
133 |
if (p == NULL) { |
if (p == NULL) { |
134 |
fprintf(stderr, "out of memory\n"); |
fprintf(stderr, "out of memory\n"); |
135 |
exit(1); |
exit(1); |
136 |
} |
} |
137 |
memset(p, 0, s); |
memset(p, 0, s); |
138 |
|
#endif |
139 |
} |
} |
140 |
|
|
141 |
return p; |
return p; |
142 |
} |
} |
143 |
|
|
280 |
void memory_device_dyntrans_access(struct cpu *cpu, struct memory *mem, |
void memory_device_dyntrans_access(struct cpu *cpu, struct memory *mem, |
281 |
void *extra, uint64_t *low, uint64_t *high) |
void *extra, uint64_t *low, uint64_t *high) |
282 |
{ |
{ |
|
int i, j; |
|
283 |
size_t s; |
size_t s; |
284 |
int need_inval = 0; |
int i, need_inval = 0; |
285 |
|
|
286 |
/* TODO: This is O(n), so it might be good to rewrite it some day. |
/* TODO: This is O(n), so it might be good to rewrite it some day. |
287 |
For now, it will be enough, as long as this function is not |
For now, it will be enough, as long as this function is not |
288 |
called too often. */ |
called too often. */ |
289 |
|
|
290 |
for (i=0; i<mem->n_mmapped_devices; i++) { |
for (i=0; i<mem->n_mmapped_devices; i++) { |
291 |
if (mem->dev_extra[i] == extra && |
if (mem->devices[i].extra == extra && |
292 |
mem->dev_flags[i] & DM_DYNTRANS_WRITE_OK && |
mem->devices[i].flags & DM_DYNTRANS_WRITE_OK && |
293 |
mem->dev_dyntrans_data[i] != NULL) { |
mem->devices[i].dyntrans_data != NULL) { |
294 |
if (mem->dev_dyntrans_write_low[i] != (uint64_t) -1) |
if (mem->devices[i].dyntrans_write_low != (uint64_t) -1) |
295 |
need_inval = 1; |
need_inval = 1; |
296 |
if (low != NULL) |
if (low != NULL) |
297 |
*low = mem->dev_dyntrans_write_low[i]; |
*low = mem->devices[i].dyntrans_write_low; |
298 |
mem->dev_dyntrans_write_low[i] = (uint64_t) -1; |
mem->devices[i].dyntrans_write_low = (uint64_t) -1; |
299 |
|
|
300 |
if (high != NULL) |
if (high != NULL) |
301 |
*high = mem->dev_dyntrans_write_high[i]; |
*high = mem->devices[i].dyntrans_write_high; |
302 |
mem->dev_dyntrans_write_high[i] = 0; |
mem->devices[i].dyntrans_write_high = 0; |
303 |
|
|
304 |
if (!need_inval) |
if (!need_inval) |
305 |
return; |
return; |
308 |
be in the dyntrans load/store cache, by marking |
be in the dyntrans load/store cache, by marking |
309 |
the pages read-only. */ |
the pages read-only. */ |
310 |
if (cpu->invalidate_translation_caches != NULL) { |
if (cpu->invalidate_translation_caches != NULL) { |
311 |
for (s=0; s<mem->dev_length[i]; |
for (s = *low; s <= *high; |
312 |
s+=cpu->machine->arch_pagesize) |
s += cpu->machine->arch_pagesize) |
313 |
cpu->invalidate_translation_caches |
cpu->invalidate_translation_caches |
314 |
(cpu, mem->dev_baseaddr[i] + s, |
(cpu, mem->devices[i].baseaddr + s, |
315 |
JUST_MARK_AS_NON_WRITABLE |
JUST_MARK_AS_NON_WRITABLE |
316 |
| INVALIDATE_PADDR); |
| INVALIDATE_PADDR); |
317 |
} |
} |
318 |
|
|
|
if (cpu->machine->arch == ARCH_MIPS) { |
|
|
/* |
|
|
* ... and invalidate the "fast_vaddr_to_ |
|
|
* hostaddr" cache entries that contain |
|
|
* pointers to this device: (NOTE: Device i, |
|
|
* cache entry j) |
|
|
*/ |
|
|
for (j=0; j<N_BINTRANS_VADDR_TO_HOST; j++) { |
|
|
if (cpu->cd. |
|
|
mips.bintrans_data_hostpage[j] >= |
|
|
mem->dev_dyntrans_data[i] && |
|
|
cpu->cd.mips. |
|
|
bintrans_data_hostpage[j] < |
|
|
mem->dev_dyntrans_data[i] + |
|
|
mem->dev_length[i]) |
|
|
cpu->cd.mips. |
|
|
bintrans_data_hostpage[j] |
|
|
= NULL; |
|
|
} |
|
|
} |
|
319 |
return; |
return; |
320 |
} |
} |
321 |
} |
} |
323 |
|
|
324 |
|
|
325 |
/* |
/* |
326 |
|
* memory_device_update_data(): |
327 |
|
* |
328 |
|
* Update a device' dyntrans data pointer. |
329 |
|
* |
330 |
|
* SUPER-IMPORTANT NOTE: Anyone who changes a dyntrans data pointer while |
331 |
|
* things are running also needs to invalidate all CPUs' address translation |
332 |
|
* caches! Otherwise, these may contain old pointers to the old data. |
333 |
|
*/ |
334 |
|
void memory_device_update_data(struct memory *mem, void *extra, |
335 |
|
unsigned char *data) |
336 |
|
{ |
337 |
|
int i; |
338 |
|
|
339 |
|
for (i=0; i<mem->n_mmapped_devices; i++) { |
340 |
|
if (mem->devices[i].extra != extra) |
341 |
|
continue; |
342 |
|
|
343 |
|
mem->devices[i].dyntrans_data = data; |
344 |
|
mem->devices[i].dyntrans_write_low = (uint64_t)-1; |
345 |
|
mem->devices[i].dyntrans_write_high = 0; |
346 |
|
} |
347 |
|
} |
348 |
|
|
349 |
|
|
350 |
|
/* |
351 |
* memory_device_register(): |
* memory_device_register(): |
352 |
* |
* |
353 |
* Register a (memory mapped) device by adding it to the dev_* fields of a |
* Register a memory mapped device. |
|
* memory struct. |
|
354 |
*/ |
*/ |
355 |
void memory_device_register(struct memory *mem, const char *device_name, |
void memory_device_register(struct memory *mem, const char *device_name, |
356 |
uint64_t baseaddr, uint64_t len, |
uint64_t baseaddr, uint64_t len, |
360 |
{ |
{ |
361 |
int i, newi = 0; |
int i, newi = 0; |
362 |
|
|
|
if (mem->n_mmapped_devices >= MAX_DEVICES) { |
|
|
fprintf(stderr, "memory_device_register(): too many " |
|
|
"devices registered, cannot register '%s'\n", device_name); |
|
|
exit(1); |
|
|
} |
|
|
|
|
363 |
/* |
/* |
364 |
* Figure out at which index to insert this device, and simultaneously |
* Figure out at which index to insert this device, and simultaneously |
365 |
* check for collisions: |
* check for collisions: |
366 |
*/ |
*/ |
367 |
newi = -1; |
newi = -1; |
368 |
for (i=0; i<mem->n_mmapped_devices; i++) { |
for (i=0; i<mem->n_mmapped_devices; i++) { |
369 |
if (i == 0 && baseaddr + len <= mem->dev_baseaddr[i]) |
if (i == 0 && baseaddr + len <= mem->devices[i].baseaddr) |
370 |
newi = i; |
newi = i; |
371 |
if (i > 0 && baseaddr + len <= mem->dev_baseaddr[i] && |
if (i > 0 && baseaddr + len <= mem->devices[i].baseaddr && |
372 |
baseaddr >= mem->dev_endaddr[i-1]) |
baseaddr >= mem->devices[i-1].endaddr) |
373 |
newi = i; |
newi = i; |
374 |
if (i == mem->n_mmapped_devices - 1 && |
if (i == mem->n_mmapped_devices - 1 && |
375 |
baseaddr >= mem->dev_endaddr[i]) |
baseaddr >= mem->devices[i].endaddr) |
376 |
newi = i + 1; |
newi = i + 1; |
377 |
|
|
378 |
/* If we are not colliding with device i, then continue: */ |
/* If this is not colliding with device i, then continue: */ |
379 |
if (baseaddr + len <= mem->dev_baseaddr[i]) |
if (baseaddr + len <= mem->devices[i].baseaddr) |
380 |
continue; |
continue; |
381 |
if (baseaddr >= mem->dev_endaddr[i]) |
if (baseaddr >= mem->devices[i].endaddr) |
382 |
continue; |
continue; |
383 |
|
|
384 |
fatal("\nERROR! \"%s\" collides with device %i (\"%s\")!\n", |
fatal("\nERROR! \"%s\" collides with device %i (\"%s\")!\n", |
385 |
device_name, i, mem->dev_name[i]); |
device_name, i, mem->devices[i].name); |
386 |
exit(1); |
exit(1); |
387 |
} |
} |
388 |
if (mem->n_mmapped_devices == 0) |
if (mem->n_mmapped_devices == 0) |
394 |
|
|
395 |
if (verbose >= 2) { |
if (verbose >= 2) { |
396 |
/* (40 bits of physical address is displayed) */ |
/* (40 bits of physical address is displayed) */ |
397 |
debug("device at 0x%010llx: %s", (long long)baseaddr, |
debug("device at 0x%010"PRIx64": %s", (uint64_t) baseaddr, |
398 |
device_name); |
device_name); |
399 |
|
|
400 |
if (flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK) |
if (flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK) |
401 |
&& (baseaddr & mem->dev_dyntrans_alignment) != 0) { |
&& (baseaddr & mem->dev_dyntrans_alignment) != 0) { |
402 |
fatal("\nWARNING: Device dyntrans access, but unaligned" |
fatal("\nWARNING: Device dyntrans access, but unaligned" |
403 |
" baseaddr 0x%llx.\n", (long long)baseaddr); |
" baseaddr 0x%"PRIx64".\n", (uint64_t) baseaddr); |
404 |
} |
} |
405 |
|
|
406 |
if (flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)) { |
if (flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)) { |
411 |
} |
} |
412 |
|
|
413 |
for (i=0; i<mem->n_mmapped_devices; i++) { |
for (i=0; i<mem->n_mmapped_devices; i++) { |
414 |
if (dyntrans_data == mem->dev_dyntrans_data[i] && |
if (dyntrans_data == mem->devices[i].dyntrans_data && |
415 |
mem->dev_flags[i] & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK) |
mem->devices[i].flags&(DM_DYNTRANS_OK|DM_DYNTRANS_WRITE_OK) |
416 |
&& flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)) { |
&& flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)) { |
417 |
fatal("ERROR: the data pointer used for dyntrans " |
fatal("ERROR: the data pointer used for dyntrans " |
418 |
"accesses must only be used once!\n"); |
"accesses must only be used once!\n"); |
419 |
fatal("(%p cannot be used by '%s'; already in use by '" |
fatal("(%p cannot be used by '%s'; already in use by '" |
420 |
"%s')\n", dyntrans_data, device_name, |
"%s')\n", dyntrans_data, device_name, |
421 |
mem->dev_name[i]); |
mem->devices[i].name); |
422 |
exit(1); |
exit(1); |
423 |
} |
} |
424 |
} |
} |
425 |
|
|
426 |
mem->n_mmapped_devices++; |
mem->n_mmapped_devices++; |
427 |
|
|
428 |
/* |
mem->devices = realloc(mem->devices, sizeof(struct memory_device) |
429 |
* YUCK! This is ugly. TODO: fix |
* mem->n_mmapped_devices); |
430 |
*/ |
if (mem->devices == NULL) { |
431 |
|
fprintf(stderr, "out of memory\n"); |
432 |
|
exit(1); |
433 |
|
} |
434 |
|
|
435 |
/* Make space for the new entry: */ |
/* Make space for the new entry: */ |
436 |
memmove(&mem->dev_name[newi+1], &mem->dev_name[newi], sizeof(char *) * |
if (newi + 1 != mem->n_mmapped_devices) |
437 |
(MAX_DEVICES - newi - 1)); |
memmove(&mem->devices[newi+1], &mem->devices[newi], |
438 |
memmove(&mem->dev_baseaddr[newi+1], &mem->dev_baseaddr[newi], |
sizeof(struct memory_device) |
439 |
sizeof(uint64_t) * (MAX_DEVICES - newi - 1)); |
* (mem->n_mmapped_devices - newi - 1)); |
440 |
memmove(&mem->dev_endaddr[newi+1], &mem->dev_endaddr[newi], |
|
441 |
sizeof(uint64_t) * (MAX_DEVICES - newi - 1)); |
mem->devices[newi].name = strdup(device_name); |
442 |
memmove(&mem->dev_length[newi+1], &mem->dev_length[newi], |
mem->devices[newi].baseaddr = baseaddr; |
443 |
sizeof(uint64_t) * (MAX_DEVICES - newi - 1)); |
mem->devices[newi].endaddr = baseaddr + len; |
444 |
memmove(&mem->dev_flags[newi+1], &mem->dev_flags[newi], sizeof(int) * |
mem->devices[newi].length = len; |
445 |
(MAX_DEVICES - newi - 1)); |
mem->devices[newi].flags = flags; |
446 |
memmove(&mem->dev_extra[newi+1], &mem->dev_extra[newi], sizeof(void *) * |
mem->devices[newi].dyntrans_data = dyntrans_data; |
|
(MAX_DEVICES - newi - 1)); |
|
|
memmove(&mem->dev_f[newi+1], &mem->dev_f[newi], sizeof(void *) * |
|
|
(MAX_DEVICES - newi - 1)); |
|
|
memmove(&mem->dev_dyntrans_data[newi+1], &mem->dev_dyntrans_data[newi], |
|
|
sizeof(void *) * (MAX_DEVICES - newi - 1)); |
|
|
memmove(&mem->dev_dyntrans_write_low[newi+1], |
|
|
&mem->dev_dyntrans_write_low[newi], |
|
|
sizeof(uint64_t) * (MAX_DEVICES - newi - 1)); |
|
|
memmove(&mem->dev_dyntrans_write_high[newi+1], |
|
|
&mem->dev_dyntrans_write_high[newi], |
|
|
sizeof(uint64_t) * (MAX_DEVICES - newi - 1)); |
|
|
|
|
|
|
|
|
mem->dev_name[newi] = strdup(device_name); |
|
|
mem->dev_baseaddr[newi] = baseaddr; |
|
|
mem->dev_endaddr[newi] = baseaddr + len; |
|
|
mem->dev_length[newi] = len; |
|
|
mem->dev_flags[newi] = flags; |
|
|
mem->dev_dyntrans_data[newi] = dyntrans_data; |
|
447 |
|
|
448 |
if (mem->dev_name[newi] == NULL) { |
if (mem->devices[newi].name == NULL) { |
449 |
fprintf(stderr, "out of memory\n"); |
fprintf(stderr, "out of memory\n"); |
450 |
exit(1); |
exit(1); |
451 |
} |
} |
464 |
exit(1); |
exit(1); |
465 |
} |
} |
466 |
|
|
467 |
mem->dev_dyntrans_write_low[newi] = (uint64_t)-1; |
mem->devices[newi].dyntrans_write_low = (uint64_t)-1; |
468 |
mem->dev_dyntrans_write_high[newi] = 0; |
mem->devices[newi].dyntrans_write_high = 0; |
469 |
mem->dev_f[newi] = f; |
mem->devices[newi].f = f; |
470 |
mem->dev_extra[newi] = extra; |
mem->devices[newi].extra = extra; |
471 |
|
|
472 |
if (baseaddr < mem->mmap_dev_minaddr) |
if (baseaddr < mem->mmap_dev_minaddr) |
473 |
mem->mmap_dev_minaddr = baseaddr & ~mem->dev_dyntrans_alignment; |
mem->mmap_dev_minaddr = baseaddr & ~mem->dev_dyntrans_alignment; |
474 |
if (baseaddr + len > mem->mmap_dev_maxaddr) |
if (baseaddr + len > mem->mmap_dev_maxaddr) |
475 |
mem->mmap_dev_maxaddr = (((baseaddr + len) - 1) | |
mem->mmap_dev_maxaddr = (((baseaddr + len) - 1) | |
476 |
mem->dev_dyntrans_alignment) + 1; |
mem->dev_dyntrans_alignment) + 1; |
477 |
|
|
478 |
|
if (newi < mem->last_accessed_device) |
479 |
|
mem->last_accessed_device ++; |
480 |
} |
} |
481 |
|
|
482 |
|
|
483 |
/* |
/* |
484 |
* memory_device_remove(): |
* memory_device_remove(): |
485 |
* |
* |
486 |
* Unregister a (memory mapped) device from a memory struct. |
* Unregister a memory mapped device from a memory object. |
487 |
*/ |
*/ |
488 |
void memory_device_remove(struct memory *mem, int i) |
void memory_device_remove(struct memory *mem, int i) |
489 |
{ |
{ |
490 |
if (i < 0 || i >= mem->n_mmapped_devices) { |
if (i < 0 || i >= mem->n_mmapped_devices) { |
491 |
fatal("memory_device_remove(): invalid device number %i\n", i); |
fatal("memory_device_remove(): invalid device number %i\n", i); |
492 |
return; |
exit(1); |
493 |
} |
} |
494 |
|
|
495 |
mem->n_mmapped_devices --; |
mem->n_mmapped_devices --; |
497 |
if (i == mem->n_mmapped_devices) |
if (i == mem->n_mmapped_devices) |
498 |
return; |
return; |
499 |
|
|
500 |
/* |
memmove(&mem->devices[i], &mem->devices[i+1], |
501 |
* YUCK! This is ugly. TODO: fix |
sizeof(struct memory_device) * (mem->n_mmapped_devices - i)); |
|
*/ |
|
502 |
|
|
503 |
memmove(&mem->dev_name[i], &mem->dev_name[i+1], sizeof(char *) * |
if (i <= mem->last_accessed_device) |
504 |
(MAX_DEVICES - i - 1)); |
mem->last_accessed_device --; |
505 |
memmove(&mem->dev_baseaddr[i], &mem->dev_baseaddr[i+1], |
if (mem->last_accessed_device < 0) |
506 |
sizeof(uint64_t) * (MAX_DEVICES - i - 1)); |
mem->last_accessed_device = 0; |
|
memmove(&mem->dev_endaddr[i], &mem->dev_endaddr[i+1], |
|
|
sizeof(uint64_t) * (MAX_DEVICES - i - 1)); |
|
|
memmove(&mem->dev_length[i], &mem->dev_length[i+1], sizeof(uint64_t) * |
|
|
(MAX_DEVICES - i - 1)); |
|
|
memmove(&mem->dev_flags[i], &mem->dev_flags[i+1], sizeof(int) * |
|
|
(MAX_DEVICES - i - 1)); |
|
|
memmove(&mem->dev_extra[i], &mem->dev_extra[i+1], sizeof(void *) * |
|
|
(MAX_DEVICES - i - 1)); |
|
|
memmove(&mem->dev_f[i], &mem->dev_f[i+1], sizeof(void *) * |
|
|
(MAX_DEVICES - i - 1)); |
|
|
memmove(&mem->dev_dyntrans_data[i], &mem->dev_dyntrans_data[i+1], |
|
|
sizeof(void *) * (MAX_DEVICES - i - 1)); |
|
|
memmove(&mem->dev_dyntrans_write_low[i], &mem->dev_dyntrans_write_low |
|
|
[i+1], sizeof(uint64_t) * (MAX_DEVICES - i - 1)); |
|
|
memmove(&mem->dev_dyntrans_write_high[i], &mem->dev_dyntrans_write_high |
|
|
[i+1], sizeof(uint64_t) * (MAX_DEVICES - i - 1)); |
|
507 |
} |
} |
508 |
|
|
509 |
|
|
517 |
/* |
/* |
518 |
* memory_paddr_to_hostaddr(): |
* memory_paddr_to_hostaddr(): |
519 |
* |
* |
520 |
* Translate a physical address into a host address. |
* Translate a physical address into a host address. The usual way to call |
521 |
|
* this function is to make sure that paddr is page aligned, which will result |
522 |
|
* in the host _page_ corresponding to that address. |
523 |
* |
* |
524 |
* Return value is a pointer to a host memblock, or NULL on failure. |
* Return value is a pointer to the address in the host, or NULL on failure. |
525 |
* On reads, a NULL return value should be interpreted as reading all zeroes. |
* On reads, a NULL return value should be interpreted as reading all zeroes. |
526 |
*/ |
*/ |
527 |
unsigned char *memory_paddr_to_hostaddr(struct memory *mem, |
unsigned char *memory_paddr_to_hostaddr(struct memory *mem, |
531 |
int entry; |
int entry; |
532 |
const int mask = (1 << BITS_PER_PAGETABLE) - 1; |
const int mask = (1 << BITS_PER_PAGETABLE) - 1; |
533 |
const int shrcount = MAX_BITS - BITS_PER_PAGETABLE; |
const int shrcount = MAX_BITS - BITS_PER_PAGETABLE; |
534 |
|
unsigned char *hostptr; |
535 |
|
|
536 |
table = mem->pagetable; |
table = mem->pagetable; |
537 |
entry = (paddr >> shrcount) & mask; |
entry = (paddr >> shrcount) & mask; |
538 |
|
|
539 |
/* printf("memory_paddr_to_hostaddr(): p=%16llx w=%i => entry=0x%x\n", |
/* printf("memory_paddr_to_hostaddr(): p=%16"PRIx64 |
540 |
(long long)paddr, writeflag, entry); */ |
" w=%i => entry=0x%x\n", (uint64_t) paddr, writeflag, entry); */ |
541 |
|
|
542 |
if (table[entry] == NULL) { |
if (table[entry] == NULL) { |
543 |
size_t alloclen; |
size_t alloclen; |
571 |
} |
} |
572 |
} |
} |
573 |
|
|
574 |
return (unsigned char *) table[entry]; |
hostptr = (unsigned char *) table[entry]; |
575 |
|
|
576 |
|
if (hostptr != NULL) |
577 |
|
hostptr += (paddr & ((1 << BITS_PER_MEMBLOCK) - 1)); |
578 |
|
|
579 |
|
return hostptr; |
580 |
|
} |
581 |
|
|
582 |
|
|
583 |
|
#define UPDATE_CHECKSUM(value) { \ |
584 |
|
internal_state -= 0x118c7771c0c0a77fULL; \ |
585 |
|
internal_state = ((internal_state + (value)) << 7) ^ \ |
586 |
|
(checksum >> 11) ^ ((checksum - (value)) << 3) ^ \ |
587 |
|
(internal_state - checksum) ^ ((value) - internal_state); \ |
588 |
|
checksum ^= internal_state; \ |
589 |
|
} |
590 |
|
|
591 |
|
|
592 |
|
/* |
593 |
|
* memory_checksum(): |
594 |
|
* |
595 |
|
* Calculate a 64-bit checksum of everything in a struct memory. This is |
596 |
|
* useful for tracking down bugs; an old (presumably working) version of |
597 |
|
* the emulator can be compared to a newer (buggy) version. |
598 |
|
*/ |
599 |
|
uint64_t memory_checksum(struct memory *mem) |
600 |
|
{ |
601 |
|
uint64_t internal_state = 0x80624185376feff2ULL; |
602 |
|
uint64_t checksum = 0xcb9a87d5c010072cULL; |
603 |
|
const int n_entries = (1 << BITS_PER_PAGETABLE) - 1; |
604 |
|
const size_t len = (1 << BITS_PER_MEMBLOCK) / sizeof(uint64_t); |
605 |
|
size_t entry, i; |
606 |
|
|
607 |
|
for (entry=0; entry<=n_entries; entry++) { |
608 |
|
uint64_t **table = mem->pagetable; |
609 |
|
uint64_t *memblock = table[entry]; |
610 |
|
|
611 |
|
if (memblock == NULL) { |
612 |
|
UPDATE_CHECKSUM(0x1198ab7c8174a76fULL); |
613 |
|
continue; |
614 |
|
} |
615 |
|
|
616 |
|
for (i=0; i<len; i++) |
617 |
|
UPDATE_CHECKSUM(memblock[i]); |
618 |
|
} |
619 |
|
|
620 |
|
return checksum; |
621 |
} |
} |
622 |
|
|