/[gxemul]/upstream/0.4.3/src/memory.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/0.4.3/src/memory.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 33 - (show annotations)
Mon Oct 8 16:21:06 2007 UTC (16 years, 7 months ago) by dpavlin
File MIME type: text/plain
File size: 16481 byte(s)
0.4.3
1 /*
2 * Copyright (C) 2003-2006 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory.c,v 1.199 2006/10/24 09:32:48 debug Exp $
29 *
30 * Functions for handling the memory of an emulated machine.
31 */
32
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <sys/types.h>
37 #include <sys/mman.h>
38
39 #include "cpu.h"
40 #include "machine.h"
41 #include "memory.h"
42 #include "misc.h"
43
44
45 extern int verbose;
46
47
48 /*
49 * memory_readmax64():
50 *
51 * Read at most 64 bits of data from a buffer. Length is given by
52 * len, and the byte order by cpu->byte_order.
53 *
54 * This function should not be called with cpu == NULL.
55 */
56 uint64_t memory_readmax64(struct cpu *cpu, unsigned char *buf, int len)
57 {
58 int i, byte_order = cpu->byte_order;
59 uint64_t x = 0;
60
61 if (len & MEM_PCI_LITTLE_ENDIAN) {
62 len &= ~MEM_PCI_LITTLE_ENDIAN;
63 byte_order = EMUL_LITTLE_ENDIAN;
64 }
65
66 /* Switch byte order for incoming data, if necessary: */
67 if (byte_order == EMUL_BIG_ENDIAN)
68 for (i=0; i<len; i++) {
69 x <<= 8;
70 x |= buf[i];
71 }
72 else
73 for (i=len-1; i>=0; i--) {
74 x <<= 8;
75 x |= buf[i];
76 }
77
78 return x;
79 }
80
81
82 /*
83 * memory_writemax64():
84 *
85 * Write at most 64 bits of data to a buffer. Length is given by
86 * len, and the byte order by cpu->byte_order.
87 *
88 * This function should not be called with cpu == NULL.
89 */
90 void memory_writemax64(struct cpu *cpu, unsigned char *buf, int len,
91 uint64_t data)
92 {
93 int i, byte_order = cpu->byte_order;
94
95 if (len & MEM_PCI_LITTLE_ENDIAN) {
96 len &= ~MEM_PCI_LITTLE_ENDIAN;
97 byte_order = EMUL_LITTLE_ENDIAN;
98 }
99
100 if (byte_order == EMUL_LITTLE_ENDIAN)
101 for (i=0; i<len; i++) {
102 buf[i] = data & 255;
103 data >>= 8;
104 }
105 else
106 for (i=0; i<len; i++) {
107 buf[len - 1 - i] = data & 255;
108 data >>= 8;
109 }
110 }
111
112
113 /*
114 * zeroed_alloc():
115 *
116 * Allocates a block of memory using mmap(), and if that fails, try
117 * malloc() + memset(). The returned memory block contains only zeroes.
118 */
119 void *zeroed_alloc(size_t s)
120 {
121 void *p = mmap(NULL, s, PROT_READ | PROT_WRITE,
122 MAP_ANON | MAP_PRIVATE, -1, 0);
123
124 if (p == NULL) {
125 #if 1
126 fprintf(stderr, "zeroed_alloc(): mmap() failed. This should"
127 " not usually happen. If you can reproduce this, then"
128 " please contact me with details about your run-time"
129 " environment.\n");
130 exit(1);
131 #else
132 p = malloc(s);
133 if (p == NULL) {
134 fprintf(stderr, "out of memory\n");
135 exit(1);
136 }
137 memset(p, 0, s);
138 #endif
139 }
140
141 return p;
142 }
143
144
145 /*
146 * memory_new():
147 *
148 * This function creates a new memory object. An emulated machine needs one
149 * of these.
150 */
151 struct memory *memory_new(uint64_t physical_max, int arch)
152 {
153 struct memory *mem;
154 int bits_per_pagetable = BITS_PER_PAGETABLE;
155 int bits_per_memblock = BITS_PER_MEMBLOCK;
156 int entries_per_pagetable = 1 << BITS_PER_PAGETABLE;
157 int max_bits = MAX_BITS;
158 size_t s;
159
160 mem = malloc(sizeof(struct memory));
161 if (mem == NULL) {
162 fprintf(stderr, "out of memory\n");
163 exit(1);
164 }
165
166 memset(mem, 0, sizeof(struct memory));
167
168 /* Check bits_per_pagetable and bits_per_memblock for sanity: */
169 if (bits_per_pagetable + bits_per_memblock != max_bits) {
170 fprintf(stderr, "memory_new(): bits_per_pagetable and "
171 "bits_per_memblock mismatch\n");
172 exit(1);
173 }
174
175 mem->physical_max = physical_max;
176 mem->dev_dyntrans_alignment = 4095;
177 if (arch == ARCH_ALPHA)
178 mem->dev_dyntrans_alignment = 8191;
179
180 s = entries_per_pagetable * sizeof(void *);
181
182 mem->pagetable = (unsigned char *) mmap(NULL, s,
183 PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
184 if (mem->pagetable == NULL) {
185 mem->pagetable = malloc(s);
186 if (mem->pagetable == NULL) {
187 fprintf(stderr, "out of memory\n");
188 exit(1);
189 }
190 memset(mem->pagetable, 0, s);
191 }
192
193 mem->mmap_dev_minaddr = 0xffffffffffffffffULL;
194 mem->mmap_dev_maxaddr = 0;
195
196 return mem;
197 }
198
199
200 /*
201 * memory_points_to_string():
202 *
203 * Returns 1 if there's something string-like in emulated memory at address
204 * addr, otherwise 0.
205 */
206 int memory_points_to_string(struct cpu *cpu, struct memory *mem, uint64_t addr,
207 int min_string_length)
208 {
209 int cur_length = 0;
210 unsigned char c;
211
212 for (;;) {
213 c = '\0';
214 cpu->memory_rw(cpu, mem, addr+cur_length,
215 &c, sizeof(c), MEM_READ, CACHE_NONE | NO_EXCEPTIONS);
216 if (c=='\n' || c=='\t' || c=='\r' || (c>=' ' && c<127)) {
217 cur_length ++;
218 if (cur_length >= min_string_length)
219 return 1;
220 } else {
221 if (cur_length >= min_string_length)
222 return 1;
223 else
224 return 0;
225 }
226 }
227 }
228
229
230 /*
231 * memory_conv_to_string():
232 *
233 * Convert emulated memory contents to a string, placing it in a buffer
234 * provided by the caller.
235 */
236 char *memory_conv_to_string(struct cpu *cpu, struct memory *mem, uint64_t addr,
237 char *buf, int bufsize)
238 {
239 int len = 0;
240 int output_index = 0;
241 unsigned char c, p='\0';
242
243 while (output_index < bufsize-1) {
244 c = '\0';
245 cpu->memory_rw(cpu, mem, addr+len, &c, sizeof(c), MEM_READ,
246 CACHE_NONE | NO_EXCEPTIONS);
247 buf[output_index] = c;
248 if (c>=' ' && c<127) {
249 len ++;
250 output_index ++;
251 } else if (c=='\n' || c=='\r' || c=='\t') {
252 len ++;
253 buf[output_index] = '\\';
254 output_index ++;
255 switch (c) {
256 case '\n': p = 'n'; break;
257 case '\r': p = 'r'; break;
258 case '\t': p = 't'; break;
259 }
260 if (output_index < bufsize-1) {
261 buf[output_index] = p;
262 output_index ++;
263 }
264 } else {
265 buf[output_index] = '\0';
266 return buf;
267 }
268 }
269
270 buf[bufsize-1] = '\0';
271 return buf;
272 }
273
274
275 /*
276 * memory_device_dyntrans_access():
277 *
278 * Get the lowest and highest dyntrans access since last time.
279 */
280 void memory_device_dyntrans_access(struct cpu *cpu, struct memory *mem,
281 void *extra, uint64_t *low, uint64_t *high)
282 {
283 size_t s;
284 int i, need_inval = 0;
285
286 /* TODO: This is O(n), so it might be good to rewrite it some day.
287 For now, it will be enough, as long as this function is not
288 called too often. */
289
290 for (i=0; i<mem->n_mmapped_devices; i++) {
291 if (mem->devices[i].extra == extra &&
292 mem->devices[i].flags & DM_DYNTRANS_WRITE_OK &&
293 mem->devices[i].dyntrans_data != NULL) {
294 if (mem->devices[i].dyntrans_write_low != (uint64_t) -1)
295 need_inval = 1;
296 if (low != NULL)
297 *low = mem->devices[i].dyntrans_write_low;
298 mem->devices[i].dyntrans_write_low = (uint64_t) -1;
299
300 if (high != NULL)
301 *high = mem->devices[i].dyntrans_write_high;
302 mem->devices[i].dyntrans_write_high = 0;
303
304 if (!need_inval)
305 return;
306
307 /* Invalidate any pages of this device that might
308 be in the dyntrans load/store cache, by marking
309 the pages read-only. */
310 if (cpu->invalidate_translation_caches != NULL) {
311 for (s = *low; s <= *high;
312 s += cpu->machine->arch_pagesize)
313 cpu->invalidate_translation_caches
314 (cpu, mem->devices[i].baseaddr + s,
315 JUST_MARK_AS_NON_WRITABLE
316 | INVALIDATE_PADDR);
317 }
318
319 return;
320 }
321 }
322 }
323
324
325 /*
326 * memory_device_update_data():
327 *
328 * Update a device' dyntrans data pointer.
329 *
330 * SUPER-IMPORTANT NOTE: Anyone who changes a dyntrans data pointer while
331 * things are running also needs to invalidate all CPUs' address translation
332 * caches! Otherwise, these may contain old pointers to the old data.
333 */
334 void memory_device_update_data(struct memory *mem, void *extra,
335 unsigned char *data)
336 {
337 int i;
338
339 for (i=0; i<mem->n_mmapped_devices; i++) {
340 if (mem->devices[i].extra != extra)
341 continue;
342
343 mem->devices[i].dyntrans_data = data;
344 mem->devices[i].dyntrans_write_low = (uint64_t)-1;
345 mem->devices[i].dyntrans_write_high = 0;
346 }
347 }
348
349
350 /*
351 * memory_device_register():
352 *
353 * Register a memory mapped device.
354 */
355 void memory_device_register(struct memory *mem, const char *device_name,
356 uint64_t baseaddr, uint64_t len,
357 int (*f)(struct cpu *,struct memory *,uint64_t,unsigned char *,
358 size_t,int,void *),
359 void *extra, int flags, unsigned char *dyntrans_data)
360 {
361 int i, newi = 0;
362
363 /*
364 * Figure out at which index to insert this device, and simultaneously
365 * check for collisions:
366 */
367 newi = -1;
368 for (i=0; i<mem->n_mmapped_devices; i++) {
369 if (i == 0 && baseaddr + len <= mem->devices[i].baseaddr)
370 newi = i;
371 if (i > 0 && baseaddr + len <= mem->devices[i].baseaddr &&
372 baseaddr >= mem->devices[i-1].endaddr)
373 newi = i;
374 if (i == mem->n_mmapped_devices - 1 &&
375 baseaddr >= mem->devices[i].endaddr)
376 newi = i + 1;
377
378 /* If this is not colliding with device i, then continue: */
379 if (baseaddr + len <= mem->devices[i].baseaddr)
380 continue;
381 if (baseaddr >= mem->devices[i].endaddr)
382 continue;
383
384 fatal("\nERROR! \"%s\" collides with device %i (\"%s\")!\n",
385 device_name, i, mem->devices[i].name);
386 exit(1);
387 }
388 if (mem->n_mmapped_devices == 0)
389 newi = 0;
390 if (newi == -1) {
391 fatal("INTERNAL ERROR\n");
392 exit(1);
393 }
394
395 if (verbose >= 2) {
396 /* (40 bits of physical address is displayed) */
397 debug("device at 0x%010"PRIx64": %s", (uint64_t) baseaddr,
398 device_name);
399
400 if (flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)
401 && (baseaddr & mem->dev_dyntrans_alignment) != 0) {
402 fatal("\nWARNING: Device dyntrans access, but unaligned"
403 " baseaddr 0x%"PRIx64".\n", (uint64_t) baseaddr);
404 }
405
406 if (flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)) {
407 debug(" (dyntrans %s)",
408 (flags & DM_DYNTRANS_WRITE_OK)? "R/W" : "R");
409 }
410 debug("\n");
411 }
412
413 for (i=0; i<mem->n_mmapped_devices; i++) {
414 if (dyntrans_data == mem->devices[i].dyntrans_data &&
415 mem->devices[i].flags&(DM_DYNTRANS_OK|DM_DYNTRANS_WRITE_OK)
416 && flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)) {
417 fatal("ERROR: the data pointer used for dyntrans "
418 "accesses must only be used once!\n");
419 fatal("(%p cannot be used by '%s'; already in use by '"
420 "%s')\n", dyntrans_data, device_name,
421 mem->devices[i].name);
422 exit(1);
423 }
424 }
425
426 mem->n_mmapped_devices++;
427
428 mem->devices = realloc(mem->devices, sizeof(struct memory_device)
429 * mem->n_mmapped_devices);
430 if (mem->devices == NULL) {
431 fprintf(stderr, "out of memory\n");
432 exit(1);
433 }
434
435 /* Make space for the new entry: */
436 if (newi + 1 != mem->n_mmapped_devices)
437 memmove(&mem->devices[newi+1], &mem->devices[newi],
438 sizeof(struct memory_device)
439 * (mem->n_mmapped_devices - newi - 1));
440
441 mem->devices[newi].name = strdup(device_name);
442 mem->devices[newi].baseaddr = baseaddr;
443 mem->devices[newi].endaddr = baseaddr + len;
444 mem->devices[newi].length = len;
445 mem->devices[newi].flags = flags;
446 mem->devices[newi].dyntrans_data = dyntrans_data;
447
448 if (mem->devices[newi].name == NULL) {
449 fprintf(stderr, "out of memory\n");
450 exit(1);
451 }
452
453 if (flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)
454 && !(flags & DM_EMULATED_RAM) && dyntrans_data == NULL) {
455 fatal("\nERROR: Device dyntrans access, but dyntrans_data"
456 " = NULL!\n");
457 exit(1);
458 }
459
460 if ((size_t)dyntrans_data & (sizeof(void *) - 1)) {
461 fprintf(stderr, "memory_device_register():"
462 " dyntrans_data not aligned correctly (%p)\n",
463 dyntrans_data);
464 exit(1);
465 }
466
467 mem->devices[newi].dyntrans_write_low = (uint64_t)-1;
468 mem->devices[newi].dyntrans_write_high = 0;
469 mem->devices[newi].f = f;
470 mem->devices[newi].extra = extra;
471
472 if (baseaddr < mem->mmap_dev_minaddr)
473 mem->mmap_dev_minaddr = baseaddr & ~mem->dev_dyntrans_alignment;
474 if (baseaddr + len > mem->mmap_dev_maxaddr)
475 mem->mmap_dev_maxaddr = (((baseaddr + len) - 1) |
476 mem->dev_dyntrans_alignment) + 1;
477
478 if (newi < mem->last_accessed_device)
479 mem->last_accessed_device ++;
480 }
481
482
483 /*
484 * memory_device_remove():
485 *
486 * Unregister a memory mapped device from a memory object.
487 */
488 void memory_device_remove(struct memory *mem, int i)
489 {
490 if (i < 0 || i >= mem->n_mmapped_devices) {
491 fatal("memory_device_remove(): invalid device number %i\n", i);
492 exit(1);
493 }
494
495 mem->n_mmapped_devices --;
496
497 if (i == mem->n_mmapped_devices)
498 return;
499
500 memmove(&mem->devices[i], &mem->devices[i+1],
501 sizeof(struct memory_device) * (mem->n_mmapped_devices - i));
502
503 if (i <= mem->last_accessed_device)
504 mem->last_accessed_device --;
505 if (mem->last_accessed_device < 0)
506 mem->last_accessed_device = 0;
507 }
508
509
510 #define MEMORY_RW userland_memory_rw
511 #define MEM_USERLAND
512 #include "memory_rw.c"
513 #undef MEM_USERLAND
514 #undef MEMORY_RW
515
516
517 /*
518 * memory_paddr_to_hostaddr():
519 *
520 * Translate a physical address into a host address. The usual way to call
521 * this function is to make sure that paddr is page aligned, which will result
522 * in the host _page_ corresponding to that address.
523 *
524 * Return value is a pointer to the address in the host, or NULL on failure.
525 * On reads, a NULL return value should be interpreted as reading all zeroes.
526 */
527 unsigned char *memory_paddr_to_hostaddr(struct memory *mem,
528 uint64_t paddr, int writeflag)
529 {
530 void **table;
531 int entry;
532 const int mask = (1 << BITS_PER_PAGETABLE) - 1;
533 const int shrcount = MAX_BITS - BITS_PER_PAGETABLE;
534 unsigned char *hostptr;
535
536 table = mem->pagetable;
537 entry = (paddr >> shrcount) & mask;
538
539 /* printf("memory_paddr_to_hostaddr(): p=%16"PRIx64
540 " w=%i => entry=0x%x\n", (uint64_t) paddr, writeflag, entry); */
541
542 if (table[entry] == NULL) {
543 size_t alloclen;
544
545 /*
546 * Special case: reading from a nonexistant memblock
547 * returns all zeroes, and doesn't allocate anything.
548 * (If any intermediate pagetable is nonexistant, then
549 * the same thing happens):
550 */
551 if (writeflag == MEM_READ)
552 return NULL;
553
554 /* Allocate a memblock: */
555 alloclen = 1 << BITS_PER_MEMBLOCK;
556
557 /* printf(" allocating for entry %i, len=%i\n",
558 entry, alloclen); */
559
560 /* Anonymous mmap() should return zero-filled memory,
561 try malloc + memset if mmap failed. */
562 table[entry] = (void *) mmap(NULL, alloclen,
563 PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
564 if (table[entry] == NULL) {
565 table[entry] = malloc(alloclen);
566 if (table[entry] == NULL) {
567 fatal("out of memory\n");
568 exit(1);
569 }
570 memset(table[entry], 0, alloclen);
571 }
572 }
573
574 hostptr = (unsigned char *) table[entry];
575
576 if (hostptr != NULL)
577 hostptr += (paddr & ((1 << BITS_PER_MEMBLOCK) - 1));
578
579 return hostptr;
580 }
581
582
583 #define UPDATE_CHECKSUM(value) { \
584 internal_state -= 0x118c7771c0c0a77fULL; \
585 internal_state = ((internal_state + (value)) << 7) ^ \
586 (checksum >> 11) ^ ((checksum - (value)) << 3) ^ \
587 (internal_state - checksum) ^ ((value) - internal_state); \
588 checksum ^= internal_state; \
589 }
590
591
592 /*
593 * memory_checksum():
594 *
595 * Calculate a 64-bit checksum of everything in a struct memory. This is
596 * useful for tracking down bugs; an old (presumably working) version of
597 * the emulator can be compared to a newer (buggy) version.
598 */
599 uint64_t memory_checksum(struct memory *mem)
600 {
601 uint64_t internal_state = 0x80624185376feff2ULL;
602 uint64_t checksum = 0xcb9a87d5c010072cULL;
603 const int n_entries = (1 << BITS_PER_PAGETABLE) - 1;
604 const size_t len = (1 << BITS_PER_MEMBLOCK) / sizeof(uint64_t);
605 size_t entry, i;
606
607 for (entry=0; entry<=n_entries; entry++) {
608 uint64_t **table = mem->pagetable;
609 uint64_t *memblock = table[entry];
610
611 if (memblock == NULL) {
612 UPDATE_CHECKSUM(0x1198ab7c8174a76fULL);
613 continue;
614 }
615
616 for (i=0; i<len; i++)
617 UPDATE_CHECKSUM(memblock[i]);
618 }
619
620 return checksum;
621 }
622

  ViewVC Help
Powered by ViewVC 1.1.26