/[gxemul]/upstream/0.4.4/src/memory.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/0.4.4/src/memory.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 35 - (show annotations)
Mon Oct 8 16:21:26 2007 UTC (16 years, 6 months ago) by dpavlin
File MIME type: text/plain
File size: 18055 byte(s)
0.4.4
1 /*
2 * Copyright (C) 2003-2007 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory.c,v 1.201 2006/12/30 13:30:52 debug Exp $
29 *
30 * Functions for handling the memory of an emulated machine.
31 */
32
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <sys/types.h>
37 #include <sys/mman.h>
38
39 #include "cpu.h"
40 #include "machine.h"
41 #include "memory.h"
42 #include "misc.h"
43
44
45 extern int verbose;
46 extern int quiet_mode;
47
48
49 /*
50 * memory_readmax64():
51 *
52 * Read at most 64 bits of data from a buffer. Length is given by
53 * len, and the byte order by cpu->byte_order.
54 *
55 * This function should not be called with cpu == NULL.
56 */
57 uint64_t memory_readmax64(struct cpu *cpu, unsigned char *buf, int len)
58 {
59 int i, byte_order = cpu->byte_order;
60 uint64_t x = 0;
61
62 if (len & MEM_PCI_LITTLE_ENDIAN) {
63 len &= ~MEM_PCI_LITTLE_ENDIAN;
64 byte_order = EMUL_LITTLE_ENDIAN;
65 }
66
67 /* Switch byte order for incoming data, if necessary: */
68 if (byte_order == EMUL_BIG_ENDIAN)
69 for (i=0; i<len; i++) {
70 x <<= 8;
71 x |= buf[i];
72 }
73 else
74 for (i=len-1; i>=0; i--) {
75 x <<= 8;
76 x |= buf[i];
77 }
78
79 return x;
80 }
81
82
83 /*
84 * memory_writemax64():
85 *
86 * Write at most 64 bits of data to a buffer. Length is given by
87 * len, and the byte order by cpu->byte_order.
88 *
89 * This function should not be called with cpu == NULL.
90 */
91 void memory_writemax64(struct cpu *cpu, unsigned char *buf, int len,
92 uint64_t data)
93 {
94 int i, byte_order = cpu->byte_order;
95
96 if (len & MEM_PCI_LITTLE_ENDIAN) {
97 len &= ~MEM_PCI_LITTLE_ENDIAN;
98 byte_order = EMUL_LITTLE_ENDIAN;
99 }
100
101 if (byte_order == EMUL_LITTLE_ENDIAN)
102 for (i=0; i<len; i++) {
103 buf[i] = data & 255;
104 data >>= 8;
105 }
106 else
107 for (i=0; i<len; i++) {
108 buf[len - 1 - i] = data & 255;
109 data >>= 8;
110 }
111 }
112
113
114 /*
115 * zeroed_alloc():
116 *
117 * Allocates a block of memory using mmap(), and if that fails, try
118 * malloc() + memset(). The returned memory block contains only zeroes.
119 */
120 void *zeroed_alloc(size_t s)
121 {
122 void *p = mmap(NULL, s, PROT_READ | PROT_WRITE,
123 MAP_ANON | MAP_PRIVATE, -1, 0);
124
125 if (p == NULL) {
126 #if 1
127 fprintf(stderr, "zeroed_alloc(): mmap() failed. This should"
128 " not usually happen. If you can reproduce this, then"
129 " please contact me with details about your run-time"
130 " environment.\n");
131 exit(1);
132 #else
133 p = malloc(s);
134 if (p == NULL) {
135 fprintf(stderr, "out of memory\n");
136 exit(1);
137 }
138 memset(p, 0, s);
139 #endif
140 }
141
142 return p;
143 }
144
145
146 /*
147 * memory_new():
148 *
149 * This function creates a new memory object. An emulated machine needs one
150 * of these.
151 */
152 struct memory *memory_new(uint64_t physical_max, int arch)
153 {
154 struct memory *mem;
155 int bits_per_pagetable = BITS_PER_PAGETABLE;
156 int bits_per_memblock = BITS_PER_MEMBLOCK;
157 int entries_per_pagetable = 1 << BITS_PER_PAGETABLE;
158 int max_bits = MAX_BITS;
159 size_t s;
160
161 mem = malloc(sizeof(struct memory));
162 if (mem == NULL) {
163 fprintf(stderr, "out of memory\n");
164 exit(1);
165 }
166
167 memset(mem, 0, sizeof(struct memory));
168
169 /* Check bits_per_pagetable and bits_per_memblock for sanity: */
170 if (bits_per_pagetable + bits_per_memblock != max_bits) {
171 fprintf(stderr, "memory_new(): bits_per_pagetable and "
172 "bits_per_memblock mismatch\n");
173 exit(1);
174 }
175
176 mem->physical_max = physical_max;
177 mem->dev_dyntrans_alignment = 4095;
178 if (arch == ARCH_ALPHA)
179 mem->dev_dyntrans_alignment = 8191;
180
181 s = entries_per_pagetable * sizeof(void *);
182
183 mem->pagetable = (unsigned char *) mmap(NULL, s,
184 PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
185 if (mem->pagetable == NULL) {
186 mem->pagetable = malloc(s);
187 if (mem->pagetable == NULL) {
188 fprintf(stderr, "out of memory\n");
189 exit(1);
190 }
191 memset(mem->pagetable, 0, s);
192 }
193
194 mem->mmap_dev_minaddr = 0xffffffffffffffffULL;
195 mem->mmap_dev_maxaddr = 0;
196
197 return mem;
198 }
199
200
201 /*
202 * memory_points_to_string():
203 *
204 * Returns 1 if there's something string-like in emulated memory at address
205 * addr, otherwise 0.
206 */
207 int memory_points_to_string(struct cpu *cpu, struct memory *mem, uint64_t addr,
208 int min_string_length)
209 {
210 int cur_length = 0;
211 unsigned char c;
212
213 for (;;) {
214 c = '\0';
215 cpu->memory_rw(cpu, mem, addr+cur_length,
216 &c, sizeof(c), MEM_READ, CACHE_NONE | NO_EXCEPTIONS);
217 if (c=='\n' || c=='\t' || c=='\r' || (c>=' ' && c<127)) {
218 cur_length ++;
219 if (cur_length >= min_string_length)
220 return 1;
221 } else {
222 if (cur_length >= min_string_length)
223 return 1;
224 else
225 return 0;
226 }
227 }
228 }
229
230
231 /*
232 * memory_conv_to_string():
233 *
234 * Convert emulated memory contents to a string, placing it in a buffer
235 * provided by the caller.
236 */
237 char *memory_conv_to_string(struct cpu *cpu, struct memory *mem, uint64_t addr,
238 char *buf, int bufsize)
239 {
240 int len = 0;
241 int output_index = 0;
242 unsigned char c, p='\0';
243
244 while (output_index < bufsize-1) {
245 c = '\0';
246 cpu->memory_rw(cpu, mem, addr+len, &c, sizeof(c), MEM_READ,
247 CACHE_NONE | NO_EXCEPTIONS);
248 buf[output_index] = c;
249 if (c>=' ' && c<127) {
250 len ++;
251 output_index ++;
252 } else if (c=='\n' || c=='\r' || c=='\t') {
253 len ++;
254 buf[output_index] = '\\';
255 output_index ++;
256 switch (c) {
257 case '\n': p = 'n'; break;
258 case '\r': p = 'r'; break;
259 case '\t': p = 't'; break;
260 }
261 if (output_index < bufsize-1) {
262 buf[output_index] = p;
263 output_index ++;
264 }
265 } else {
266 buf[output_index] = '\0';
267 return buf;
268 }
269 }
270
271 buf[bufsize-1] = '\0';
272 return buf;
273 }
274
275
276 /*
277 * memory_device_dyntrans_access():
278 *
279 * Get the lowest and highest dyntrans access since last time.
280 */
281 void memory_device_dyntrans_access(struct cpu *cpu, struct memory *mem,
282 void *extra, uint64_t *low, uint64_t *high)
283 {
284 size_t s;
285 int i, need_inval = 0;
286
287 /* TODO: This is O(n), so it might be good to rewrite it some day.
288 For now, it will be enough, as long as this function is not
289 called too often. */
290
291 for (i=0; i<mem->n_mmapped_devices; i++) {
292 if (mem->devices[i].extra == extra &&
293 mem->devices[i].flags & DM_DYNTRANS_WRITE_OK &&
294 mem->devices[i].dyntrans_data != NULL) {
295 if (mem->devices[i].dyntrans_write_low != (uint64_t) -1)
296 need_inval = 1;
297 if (low != NULL)
298 *low = mem->devices[i].dyntrans_write_low;
299 mem->devices[i].dyntrans_write_low = (uint64_t) -1;
300
301 if (high != NULL)
302 *high = mem->devices[i].dyntrans_write_high;
303 mem->devices[i].dyntrans_write_high = 0;
304
305 if (!need_inval)
306 return;
307
308 /* Invalidate any pages of this device that might
309 be in the dyntrans load/store cache, by marking
310 the pages read-only. */
311 if (cpu->invalidate_translation_caches != NULL) {
312 for (s = *low; s <= *high;
313 s += cpu->machine->arch_pagesize)
314 cpu->invalidate_translation_caches
315 (cpu, mem->devices[i].baseaddr + s,
316 JUST_MARK_AS_NON_WRITABLE
317 | INVALIDATE_PADDR);
318 }
319
320 return;
321 }
322 }
323 }
324
325
326 /*
327 * memory_device_update_data():
328 *
329 * Update a device' dyntrans data pointer.
330 *
331 * SUPER-IMPORTANT NOTE: Anyone who changes a dyntrans data pointer while
332 * things are running also needs to invalidate all CPUs' address translation
333 * caches! Otherwise, these may contain old pointers to the old data.
334 */
335 void memory_device_update_data(struct memory *mem, void *extra,
336 unsigned char *data)
337 {
338 int i;
339
340 for (i=0; i<mem->n_mmapped_devices; i++) {
341 if (mem->devices[i].extra != extra)
342 continue;
343
344 mem->devices[i].dyntrans_data = data;
345 mem->devices[i].dyntrans_write_low = (uint64_t)-1;
346 mem->devices[i].dyntrans_write_high = 0;
347 }
348 }
349
350
351 /*
352 * memory_device_register():
353 *
354 * Register a memory mapped device.
355 */
356 void memory_device_register(struct memory *mem, const char *device_name,
357 uint64_t baseaddr, uint64_t len,
358 int (*f)(struct cpu *,struct memory *,uint64_t,unsigned char *,
359 size_t,int,void *),
360 void *extra, int flags, unsigned char *dyntrans_data)
361 {
362 int i, newi = 0;
363
364 /*
365 * Figure out at which index to insert this device, and simultaneously
366 * check for collisions:
367 */
368 newi = -1;
369 for (i=0; i<mem->n_mmapped_devices; i++) {
370 if (i == 0 && baseaddr + len <= mem->devices[i].baseaddr)
371 newi = i;
372 if (i > 0 && baseaddr + len <= mem->devices[i].baseaddr &&
373 baseaddr >= mem->devices[i-1].endaddr)
374 newi = i;
375 if (i == mem->n_mmapped_devices - 1 &&
376 baseaddr >= mem->devices[i].endaddr)
377 newi = i + 1;
378
379 /* If this is not colliding with device i, then continue: */
380 if (baseaddr + len <= mem->devices[i].baseaddr)
381 continue;
382 if (baseaddr >= mem->devices[i].endaddr)
383 continue;
384
385 fatal("\nERROR! \"%s\" collides with device %i (\"%s\")!\n",
386 device_name, i, mem->devices[i].name);
387 exit(1);
388 }
389 if (mem->n_mmapped_devices == 0)
390 newi = 0;
391 if (newi == -1) {
392 fatal("INTERNAL ERROR\n");
393 exit(1);
394 }
395
396 if (verbose >= 2) {
397 /* (40 bits of physical address is displayed) */
398 debug("device at 0x%010"PRIx64": %s", (uint64_t) baseaddr,
399 device_name);
400
401 if (flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)
402 && (baseaddr & mem->dev_dyntrans_alignment) != 0) {
403 fatal("\nWARNING: Device dyntrans access, but unaligned"
404 " baseaddr 0x%"PRIx64".\n", (uint64_t) baseaddr);
405 }
406
407 if (flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)) {
408 debug(" (dyntrans %s)",
409 (flags & DM_DYNTRANS_WRITE_OK)? "R/W" : "R");
410 }
411 debug("\n");
412 }
413
414 for (i=0; i<mem->n_mmapped_devices; i++) {
415 if (dyntrans_data == mem->devices[i].dyntrans_data &&
416 mem->devices[i].flags&(DM_DYNTRANS_OK|DM_DYNTRANS_WRITE_OK)
417 && flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)) {
418 fatal("ERROR: the data pointer used for dyntrans "
419 "accesses must only be used once!\n");
420 fatal("(%p cannot be used by '%s'; already in use by '"
421 "%s')\n", dyntrans_data, device_name,
422 mem->devices[i].name);
423 exit(1);
424 }
425 }
426
427 mem->n_mmapped_devices++;
428
429 mem->devices = realloc(mem->devices, sizeof(struct memory_device)
430 * mem->n_mmapped_devices);
431 if (mem->devices == NULL) {
432 fprintf(stderr, "out of memory\n");
433 exit(1);
434 }
435
436 /* Make space for the new entry: */
437 if (newi + 1 != mem->n_mmapped_devices)
438 memmove(&mem->devices[newi+1], &mem->devices[newi],
439 sizeof(struct memory_device)
440 * (mem->n_mmapped_devices - newi - 1));
441
442 mem->devices[newi].name = strdup(device_name);
443 mem->devices[newi].baseaddr = baseaddr;
444 mem->devices[newi].endaddr = baseaddr + len;
445 mem->devices[newi].length = len;
446 mem->devices[newi].flags = flags;
447 mem->devices[newi].dyntrans_data = dyntrans_data;
448
449 if (mem->devices[newi].name == NULL) {
450 fprintf(stderr, "out of memory\n");
451 exit(1);
452 }
453
454 if (flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)
455 && !(flags & DM_EMULATED_RAM) && dyntrans_data == NULL) {
456 fatal("\nERROR: Device dyntrans access, but dyntrans_data"
457 " = NULL!\n");
458 exit(1);
459 }
460
461 if ((size_t)dyntrans_data & (sizeof(void *) - 1)) {
462 fprintf(stderr, "memory_device_register():"
463 " dyntrans_data not aligned correctly (%p)\n",
464 dyntrans_data);
465 exit(1);
466 }
467
468 mem->devices[newi].dyntrans_write_low = (uint64_t)-1;
469 mem->devices[newi].dyntrans_write_high = 0;
470 mem->devices[newi].f = f;
471 mem->devices[newi].extra = extra;
472
473 if (baseaddr < mem->mmap_dev_minaddr)
474 mem->mmap_dev_minaddr = baseaddr & ~mem->dev_dyntrans_alignment;
475 if (baseaddr + len > mem->mmap_dev_maxaddr)
476 mem->mmap_dev_maxaddr = (((baseaddr + len) - 1) |
477 mem->dev_dyntrans_alignment) + 1;
478
479 if (newi < mem->last_accessed_device)
480 mem->last_accessed_device ++;
481 }
482
483
484 /*
485 * memory_device_remove():
486 *
487 * Unregister a memory mapped device from a memory object.
488 */
489 void memory_device_remove(struct memory *mem, int i)
490 {
491 if (i < 0 || i >= mem->n_mmapped_devices) {
492 fatal("memory_device_remove(): invalid device number %i\n", i);
493 exit(1);
494 }
495
496 mem->n_mmapped_devices --;
497
498 if (i == mem->n_mmapped_devices)
499 return;
500
501 memmove(&mem->devices[i], &mem->devices[i+1],
502 sizeof(struct memory_device) * (mem->n_mmapped_devices - i));
503
504 if (i <= mem->last_accessed_device)
505 mem->last_accessed_device --;
506 if (mem->last_accessed_device < 0)
507 mem->last_accessed_device = 0;
508 }
509
510
511 #define MEMORY_RW userland_memory_rw
512 #define MEM_USERLAND
513 #include "memory_rw.c"
514 #undef MEM_USERLAND
515 #undef MEMORY_RW
516
517
518 /*
519 * memory_paddr_to_hostaddr():
520 *
521 * Translate a physical address into a host address. The usual way to call
522 * this function is to make sure that paddr is page aligned, which will result
523 * in the host _page_ corresponding to that address.
524 *
525 * Return value is a pointer to the address in the host, or NULL on failure.
526 * On reads, a NULL return value should be interpreted as reading all zeroes.
527 */
528 unsigned char *memory_paddr_to_hostaddr(struct memory *mem,
529 uint64_t paddr, int writeflag)
530 {
531 void **table;
532 int entry;
533 const int mask = (1 << BITS_PER_PAGETABLE) - 1;
534 const int shrcount = MAX_BITS - BITS_PER_PAGETABLE;
535 unsigned char *hostptr;
536
537 table = mem->pagetable;
538 entry = (paddr >> shrcount) & mask;
539
540 /* printf("memory_paddr_to_hostaddr(): p=%16"PRIx64
541 " w=%i => entry=0x%x\n", (uint64_t) paddr, writeflag, entry); */
542
543 if (table[entry] == NULL) {
544 size_t alloclen;
545
546 /*
547 * Special case: reading from a nonexistant memblock
548 * returns all zeroes, and doesn't allocate anything.
549 * (If any intermediate pagetable is nonexistant, then
550 * the same thing happens):
551 */
552 if (writeflag == MEM_READ)
553 return NULL;
554
555 /* Allocate a memblock: */
556 alloclen = 1 << BITS_PER_MEMBLOCK;
557
558 /* printf(" allocating for entry %i, len=%i\n",
559 entry, alloclen); */
560
561 /* Anonymous mmap() should return zero-filled memory,
562 try malloc + memset if mmap failed. */
563 table[entry] = (void *) mmap(NULL, alloclen,
564 PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
565 if (table[entry] == NULL) {
566 table[entry] = malloc(alloclen);
567 if (table[entry] == NULL) {
568 fatal("out of memory\n");
569 exit(1);
570 }
571 memset(table[entry], 0, alloclen);
572 }
573 }
574
575 hostptr = (unsigned char *) table[entry];
576
577 if (hostptr != NULL)
578 hostptr += (paddr & ((1 << BITS_PER_MEMBLOCK) - 1));
579
580 return hostptr;
581 }
582
583
584 #define UPDATE_CHECKSUM(value) { \
585 internal_state -= 0x118c7771c0c0a77fULL; \
586 internal_state = ((internal_state + (value)) << 7) ^ \
587 (checksum >> 11) ^ ((checksum - (value)) << 3) ^ \
588 (internal_state - checksum) ^ ((value) - internal_state); \
589 checksum ^= internal_state; \
590 }
591
592
593 /*
594 * memory_checksum():
595 *
596 * Calculate a 64-bit checksum of everything in a struct memory. This is
597 * useful for tracking down bugs; an old (presumably working) version of
598 * the emulator can be compared to a newer (buggy) version.
599 */
600 uint64_t memory_checksum(struct memory *mem)
601 {
602 uint64_t internal_state = 0x80624185376feff2ULL;
603 uint64_t checksum = 0xcb9a87d5c010072cULL;
604 const int n_entries = (1 << BITS_PER_PAGETABLE) - 1;
605 const size_t len = (1 << BITS_PER_MEMBLOCK) / sizeof(uint64_t);
606 size_t entry, i;
607
608 for (entry=0; entry<=n_entries; entry++) {
609 uint64_t **table = mem->pagetable;
610 uint64_t *memblock = table[entry];
611
612 if (memblock == NULL) {
613 UPDATE_CHECKSUM(0x1198ab7c8174a76fULL);
614 continue;
615 }
616
617 for (i=0; i<len; i++)
618 UPDATE_CHECKSUM(memblock[i]);
619 }
620
621 return checksum;
622 }
623
624
625 /*
626 * memory_warn_about_unimplemented_addr():
627 *
628 * Called from memory_rw whenever memory outside of the physical address space
629 * is accessed (and quiet_mode isn't set).
630 */
631 void memory_warn_about_unimplemented_addr(struct cpu *cpu, struct memory *mem,
632 int writeflag, uint64_t paddr, uint8_t *data, size_t len)
633 {
634 uint64_t offset, old_pc = cpu->pc;
635 char *symbol;
636
637 /*
638 * This allows guest OS kernels to probe memory a few KBs past the
639 * end of memory, without giving too many warnings.
640 */
641 if (paddr < mem->physical_max + 0x40000)
642 return;
643
644 if (!cpu->machine->halt_on_nonexistant_memaccess && quiet_mode)
645 return;
646
647 fatal("[ memory_rw(): %s ", writeflag? "write":"read");
648
649 if (writeflag) {
650 unsigned int i;
651 debug("data={", writeflag);
652 if (len > 16) {
653 int start2 = len-16;
654 for (i=0; i<16; i++)
655 debug("%s%02x", i?",":"", data[i]);
656 debug(" .. ");
657 if (start2 < 16)
658 start2 = 16;
659 for (i=start2; i<len; i++)
660 debug("%s%02x", i?",":"", data[i]);
661 } else
662 for (i=0; i<len; i++)
663 debug("%s%02x", i?",":"", data[i]);
664 debug("} ");
665 }
666
667 fatal("paddr=0x%llx >= physical_max; pc=", (long long)paddr);
668 if (cpu->is_32bit)
669 fatal("0x%08"PRIx32, (uint32_t) old_pc);
670 else
671 fatal("0x%016"PRIx64, (uint64_t) old_pc);
672 symbol = get_symbol_name(&cpu->machine->symbol_context,
673 old_pc, &offset);
674 fatal(" <%s> ]\n", symbol? symbol : " no symbol ");
675
676 if (cpu->machine->halt_on_nonexistant_memaccess) {
677 /* TODO: Halt in a nicer way. Not possible with the
678 current dyntrans system... */
679 exit(1);
680 }
681 }
682

  ViewVC Help
Powered by ViewVC 1.1.26