/[gxemul]/trunk/src/memory.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/src/memory.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 44 - (show annotations)
Mon Oct 8 16:22:56 2007 UTC (16 years, 5 months ago) by dpavlin
File MIME type: text/plain
File size: 26584 byte(s)
++ trunk/HISTORY	(local)
$Id: HISTORY,v 1.1632 2007/09/11 21:46:35 debug Exp $
20070616	Implementing the MIPS32/64 revision 2 "ror" instruction.
20070617	Adding a struct for each physpage which keeps track of which
		ranges within that page (base offset, length) that are
		continuously translatable. When running with native code
		generation enabled (-b), a range is added after each read-
		ahead loop.
		Experimenting with using the physical program counter sample
		data (implemented 20070608) together with the "translatable
		range" information, to figure out which physical address ranges
		would be worth translating to native code (if the number of
		samples falling within a range is above a certain threshold).
20070618	Adding automagic building of .index comment files for
		src/file/, src/promemul/, src src/useremul/ as well.
		Adding a "has been translated" bit to the ranges, so that only
		not-yet-translated ranges will be sampled.
20070619	Moving src/cpu.c and src/memory_rw.c into src/cpus/,
		src/device.c into src/devices/, and src/machine.c into
		src/machines/.
		Creating a skeleton cc/ld native backend module; beginning on
		the function which will detect cc command line, etc.
20070620	Continuing on the native code generation infrastructure.
20070621	Moving src/x11.c and src/console.c into a new src/console/
		subdir (for everything that is console or framebuffer related).
		Moving src/symbol*.c into a new src/symbol/, which should
		contain anything that is symbol handling related.
20070624	Making the program counter sampling threshold a "settings
		variable" (sampling_threshold), i.e. it can now be changed
		during runtime.
		Switching the RELEASE notes format from plain text to HTML.
		If the TMPDIR environment variable is set, it is used instead
		of "/tmp" for temporary files.
		Continuing on the cc/ld backend: simple .c code is generated,
		the compiler and linker are called, etc.
		Adding detection of host architecture to the configure script
		(again), and adding icache invalidation support (only
		implemented for Alpha hosts so far).
20070625	Simplifying the program counter sampling mechanism.
20070626	Removing the cc/ld native code generation stuff, program
		counter sampling, etc; it would not have worked well in the
		general case.
20070627	Removing everything related to native code generation.
20070629	Removing the (practically unusable) support for multiple
		emulations. (The single emulation allowed now still supports
		multiple simultaneous machines, as before.)
		Beginning on PCCTWO and M88K interrupts.
20070723	Adding a dummy skeleton for emulation of M32R processors.
20070901	Fixing a warning found by "gcc version 4.3.0 20070817
		(experimental)" on amd64.
20070905	Removing some more traces of the old "multiple emulations"
		code.
		Also looking in /usr/local/include and /usr/local/lib for
		X11 libs, when running configure.
20070909	Minor updates to the guest OS install instructions, in
		preparation for the NetBSD 4.0 release.
20070918	More testing of NetBSD 4.0 RC1.

1 /*
2 * Copyright (C) 2003-2007 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory.c,v 1.206 2007/06/19 04:04:02 debug Exp $
29 *
30 * Functions for handling the memory of an emulated machine.
31 */
32
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <sys/types.h>
37 #include <sys/mman.h>
38
39 #include "cpu.h"
40 #include "machine.h"
41 #include "memory.h"
42 #include "misc.h"
43
44
45 extern int verbose;
46 extern int quiet_mode;
47
48
49 /*
50 * memory_readmax64():
51 *
52 * Read at most 64 bits of data from a buffer. Length is given by
53 * len, and the byte order by cpu->byte_order.
54 *
55 * This function should not be called with cpu == NULL.
56 */
57 uint64_t memory_readmax64(struct cpu *cpu, unsigned char *buf, int len)
58 {
59 int i, byte_order = cpu->byte_order;
60 uint64_t x = 0;
61
62 if (len & MEM_PCI_LITTLE_ENDIAN) {
63 len &= ~MEM_PCI_LITTLE_ENDIAN;
64 byte_order = EMUL_LITTLE_ENDIAN;
65 }
66
67 /* Switch byte order for incoming data, if necessary: */
68 if (byte_order == EMUL_BIG_ENDIAN)
69 for (i=0; i<len; i++) {
70 x <<= 8;
71 x |= buf[i];
72 }
73 else
74 for (i=len-1; i>=0; i--) {
75 x <<= 8;
76 x |= buf[i];
77 }
78
79 return x;
80 }
81
82
83 /*
84 * memory_writemax64():
85 *
86 * Write at most 64 bits of data to a buffer. Length is given by
87 * len, and the byte order by cpu->byte_order.
88 *
89 * This function should not be called with cpu == NULL.
90 */
91 void memory_writemax64(struct cpu *cpu, unsigned char *buf, int len,
92 uint64_t data)
93 {
94 int i, byte_order = cpu->byte_order;
95
96 if (len & MEM_PCI_LITTLE_ENDIAN) {
97 len &= ~MEM_PCI_LITTLE_ENDIAN;
98 byte_order = EMUL_LITTLE_ENDIAN;
99 }
100
101 if (byte_order == EMUL_LITTLE_ENDIAN)
102 for (i=0; i<len; i++) {
103 buf[i] = data & 255;
104 data >>= 8;
105 }
106 else
107 for (i=0; i<len; i++) {
108 buf[len - 1 - i] = data & 255;
109 data >>= 8;
110 }
111 }
112
113
114 /*
115 * zeroed_alloc():
116 *
117 * Allocates a block of memory using mmap(), and if that fails, try
118 * malloc() + memset(). The returned memory block contains only zeroes.
119 */
120 void *zeroed_alloc(size_t s)
121 {
122 void *p = mmap(NULL, s, PROT_READ | PROT_WRITE,
123 MAP_ANON | MAP_PRIVATE, -1, 0);
124
125 if (p == NULL) {
126 #if 1
127 fprintf(stderr, "zeroed_alloc(): mmap() failed. This should"
128 " not usually happen. If you can reproduce this, then"
129 " please contact me with details about your run-time"
130 " environment.\n");
131 exit(1);
132 #else
133 CHECK_ALLOCATION(p = malloc(s));
134 memset(p, 0, s);
135 #endif
136 }
137
138 return p;
139 }
140
141
142 /*
143 * memory_new():
144 *
145 * This function creates a new memory object. An emulated machine needs one
146 * of these.
147 */
148 struct memory *memory_new(uint64_t physical_max, int arch)
149 {
150 struct memory *mem;
151 int bits_per_pagetable = BITS_PER_PAGETABLE;
152 int bits_per_memblock = BITS_PER_MEMBLOCK;
153 int entries_per_pagetable = 1 << BITS_PER_PAGETABLE;
154 int max_bits = MAX_BITS;
155 size_t s;
156
157 CHECK_ALLOCATION(mem = malloc(sizeof(struct memory)));
158 memset(mem, 0, sizeof(struct memory));
159
160 /* Check bits_per_pagetable and bits_per_memblock for sanity: */
161 if (bits_per_pagetable + bits_per_memblock != max_bits) {
162 fprintf(stderr, "memory_new(): bits_per_pagetable and "
163 "bits_per_memblock mismatch\n");
164 exit(1);
165 }
166
167 mem->physical_max = physical_max;
168 mem->dev_dyntrans_alignment = 4095;
169 if (arch == ARCH_ALPHA)
170 mem->dev_dyntrans_alignment = 8191;
171
172 s = entries_per_pagetable * sizeof(void *);
173
174 mem->pagetable = (unsigned char *) mmap(NULL, s,
175 PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
176 if (mem->pagetable == NULL) {
177 CHECK_ALLOCATION(mem->pagetable = malloc(s));
178 memset(mem->pagetable, 0, s);
179 }
180
181 mem->mmap_dev_minaddr = 0xffffffffffffffffULL;
182 mem->mmap_dev_maxaddr = 0;
183
184 return mem;
185 }
186
187
188 /*
189 * memory_points_to_string():
190 *
191 * Returns 1 if there's something string-like in emulated memory at address
192 * addr, otherwise 0.
193 */
194 int memory_points_to_string(struct cpu *cpu, struct memory *mem, uint64_t addr,
195 int min_string_length)
196 {
197 int cur_length = 0;
198 unsigned char c;
199
200 for (;;) {
201 c = '\0';
202 cpu->memory_rw(cpu, mem, addr+cur_length,
203 &c, sizeof(c), MEM_READ, CACHE_NONE | NO_EXCEPTIONS);
204 if (c=='\n' || c=='\t' || c=='\r' || (c>=' ' && c<127)) {
205 cur_length ++;
206 if (cur_length >= min_string_length)
207 return 1;
208 } else {
209 if (cur_length >= min_string_length)
210 return 1;
211 else
212 return 0;
213 }
214 }
215 }
216
217
218 /*
219 * memory_conv_to_string():
220 *
221 * Convert emulated memory contents to a string, placing it in a buffer
222 * provided by the caller.
223 */
224 char *memory_conv_to_string(struct cpu *cpu, struct memory *mem, uint64_t addr,
225 char *buf, int bufsize)
226 {
227 int len = 0;
228 int output_index = 0;
229 unsigned char c, p='\0';
230
231 while (output_index < bufsize-1) {
232 c = '\0';
233 cpu->memory_rw(cpu, mem, addr+len, &c, sizeof(c), MEM_READ,
234 CACHE_NONE | NO_EXCEPTIONS);
235 buf[output_index] = c;
236 if (c>=' ' && c<127) {
237 len ++;
238 output_index ++;
239 } else if (c=='\n' || c=='\r' || c=='\t') {
240 len ++;
241 buf[output_index] = '\\';
242 output_index ++;
243 switch (c) {
244 case '\n': p = 'n'; break;
245 case '\r': p = 'r'; break;
246 case '\t': p = 't'; break;
247 }
248 if (output_index < bufsize-1) {
249 buf[output_index] = p;
250 output_index ++;
251 }
252 } else {
253 buf[output_index] = '\0';
254 return buf;
255 }
256 }
257
258 buf[bufsize-1] = '\0';
259 return buf;
260 }
261
262
263 /*
264 * memory_device_dyntrans_access():
265 *
266 * Get the lowest and highest dyntrans access since last time.
267 */
268 void memory_device_dyntrans_access(struct cpu *cpu, struct memory *mem,
269 void *extra, uint64_t *low, uint64_t *high)
270 {
271 size_t s;
272 int i, need_inval = 0;
273
274 /* TODO: This is O(n), so it might be good to rewrite it some day.
275 For now, it will be enough, as long as this function is not
276 called too often. */
277
278 for (i=0; i<mem->n_mmapped_devices; i++) {
279 if (mem->devices[i].extra == extra &&
280 mem->devices[i].flags & DM_DYNTRANS_WRITE_OK &&
281 mem->devices[i].dyntrans_data != NULL) {
282 if (mem->devices[i].dyntrans_write_low != (uint64_t) -1)
283 need_inval = 1;
284 if (low != NULL)
285 *low = mem->devices[i].dyntrans_write_low;
286 mem->devices[i].dyntrans_write_low = (uint64_t) -1;
287
288 if (high != NULL)
289 *high = mem->devices[i].dyntrans_write_high;
290 mem->devices[i].dyntrans_write_high = 0;
291
292 if (!need_inval)
293 return;
294
295 /* Invalidate any pages of this device that might
296 be in the dyntrans load/store cache, by marking
297 the pages read-only. */
298 if (cpu->invalidate_translation_caches != NULL) {
299 for (s = *low; s <= *high;
300 s += cpu->machine->arch_pagesize)
301 cpu->invalidate_translation_caches
302 (cpu, mem->devices[i].baseaddr + s,
303 JUST_MARK_AS_NON_WRITABLE
304 | INVALIDATE_PADDR);
305 }
306
307 return;
308 }
309 }
310 }
311
312
313 /*
314 * memory_device_update_data():
315 *
316 * Update a device' dyntrans data pointer.
317 *
318 * SUPER-IMPORTANT NOTE: Anyone who changes a dyntrans data pointer while
319 * things are running also needs to invalidate all CPUs' address translation
320 * caches! Otherwise, these may contain old pointers to the old data.
321 */
322 void memory_device_update_data(struct memory *mem, void *extra,
323 unsigned char *data)
324 {
325 int i;
326
327 for (i=0; i<mem->n_mmapped_devices; i++) {
328 if (mem->devices[i].extra != extra)
329 continue;
330
331 mem->devices[i].dyntrans_data = data;
332 mem->devices[i].dyntrans_write_low = (uint64_t)-1;
333 mem->devices[i].dyntrans_write_high = 0;
334 }
335 }
336
337
338 /*
339 * memory_device_register():
340 *
341 * Register a memory mapped device.
342 */
343 void memory_device_register(struct memory *mem, const char *device_name,
344 uint64_t baseaddr, uint64_t len,
345 int (*f)(struct cpu *,struct memory *,uint64_t,unsigned char *,
346 size_t,int,void *),
347 void *extra, int flags, unsigned char *dyntrans_data)
348 {
349 int i, newi = 0;
350
351 /*
352 * Figure out at which index to insert this device, and simultaneously
353 * check for collisions:
354 */
355 newi = -1;
356 for (i=0; i<mem->n_mmapped_devices; i++) {
357 if (i == 0 && baseaddr + len <= mem->devices[i].baseaddr)
358 newi = i;
359 if (i > 0 && baseaddr + len <= mem->devices[i].baseaddr &&
360 baseaddr >= mem->devices[i-1].endaddr)
361 newi = i;
362 if (i == mem->n_mmapped_devices - 1 &&
363 baseaddr >= mem->devices[i].endaddr)
364 newi = i + 1;
365
366 /* If this is not colliding with device i, then continue: */
367 if (baseaddr + len <= mem->devices[i].baseaddr)
368 continue;
369 if (baseaddr >= mem->devices[i].endaddr)
370 continue;
371
372 fatal("\nERROR! \"%s\" collides with device %i (\"%s\")!\n",
373 device_name, i, mem->devices[i].name);
374 exit(1);
375 }
376 if (mem->n_mmapped_devices == 0)
377 newi = 0;
378 if (newi == -1) {
379 fatal("INTERNAL ERROR\n");
380 exit(1);
381 }
382
383 if (verbose >= 2) {
384 /* (40 bits of physical address is displayed) */
385 debug("device at 0x%010"PRIx64": %s", (uint64_t) baseaddr,
386 device_name);
387
388 if (flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)
389 && (baseaddr & mem->dev_dyntrans_alignment) != 0) {
390 fatal("\nWARNING: Device dyntrans access, but unaligned"
391 " baseaddr 0x%"PRIx64".\n", (uint64_t) baseaddr);
392 }
393
394 if (flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)) {
395 debug(" (dyntrans %s)",
396 (flags & DM_DYNTRANS_WRITE_OK)? "R/W" : "R");
397 }
398 debug("\n");
399 }
400
401 for (i=0; i<mem->n_mmapped_devices; i++) {
402 if (dyntrans_data == mem->devices[i].dyntrans_data &&
403 mem->devices[i].flags&(DM_DYNTRANS_OK|DM_DYNTRANS_WRITE_OK)
404 && flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)) {
405 fatal("ERROR: the data pointer used for dyntrans "
406 "accesses must only be used once!\n");
407 fatal("(%p cannot be used by '%s'; already in use by '"
408 "%s')\n", dyntrans_data, device_name,
409 mem->devices[i].name);
410 exit(1);
411 }
412 }
413
414 mem->n_mmapped_devices++;
415
416 CHECK_ALLOCATION(mem->devices = realloc(mem->devices,
417 sizeof(struct memory_device) * mem->n_mmapped_devices));
418
419 /* Make space for the new entry: */
420 if (newi + 1 != mem->n_mmapped_devices)
421 memmove(&mem->devices[newi+1], &mem->devices[newi],
422 sizeof(struct memory_device)
423 * (mem->n_mmapped_devices - newi - 1));
424
425 CHECK_ALLOCATION(mem->devices[newi].name = strdup(device_name));
426 mem->devices[newi].baseaddr = baseaddr;
427 mem->devices[newi].endaddr = baseaddr + len;
428 mem->devices[newi].length = len;
429 mem->devices[newi].flags = flags;
430 mem->devices[newi].dyntrans_data = dyntrans_data;
431
432 if (flags & (DM_DYNTRANS_OK | DM_DYNTRANS_WRITE_OK)
433 && !(flags & DM_EMULATED_RAM) && dyntrans_data == NULL) {
434 fatal("\nERROR: Device dyntrans access, but dyntrans_data"
435 " = NULL!\n");
436 exit(1);
437 }
438
439 if ((size_t)dyntrans_data & (sizeof(void *) - 1)) {
440 fprintf(stderr, "memory_device_register():"
441 " dyntrans_data not aligned correctly (%p)\n",
442 dyntrans_data);
443 exit(1);
444 }
445
446 mem->devices[newi].dyntrans_write_low = (uint64_t)-1;
447 mem->devices[newi].dyntrans_write_high = 0;
448 mem->devices[newi].f = f;
449 mem->devices[newi].extra = extra;
450
451 if (baseaddr < mem->mmap_dev_minaddr)
452 mem->mmap_dev_minaddr = baseaddr & ~mem->dev_dyntrans_alignment;
453 if (baseaddr + len > mem->mmap_dev_maxaddr)
454 mem->mmap_dev_maxaddr = (((baseaddr + len) - 1) |
455 mem->dev_dyntrans_alignment) + 1;
456
457 if (newi < mem->last_accessed_device)
458 mem->last_accessed_device ++;
459 }
460
461
462 /*
463 * memory_device_remove():
464 *
465 * Unregister a memory mapped device from a memory object.
466 */
467 void memory_device_remove(struct memory *mem, int i)
468 {
469 if (i < 0 || i >= mem->n_mmapped_devices) {
470 fatal("memory_device_remove(): invalid device number %i\n", i);
471 exit(1);
472 }
473
474 mem->n_mmapped_devices --;
475
476 if (i == mem->n_mmapped_devices)
477 return;
478
479 memmove(&mem->devices[i], &mem->devices[i+1],
480 sizeof(struct memory_device) * (mem->n_mmapped_devices - i));
481
482 if (i <= mem->last_accessed_device)
483 mem->last_accessed_device --;
484 if (mem->last_accessed_device < 0)
485 mem->last_accessed_device = 0;
486 }
487
488
489 #define MEMORY_RW userland_memory_rw
490 #define MEM_USERLAND
491 #include "cpus/memory_rw.c"
492 #undef MEM_USERLAND
493 #undef MEMORY_RW
494
495
496 /*
497 * memory_paddr_to_hostaddr():
498 *
499 * Translate a physical address into a host address. The usual way to call
500 * this function is to make sure that paddr is page aligned, which will result
501 * in the host _page_ corresponding to that address.
502 *
503 * Return value is a pointer to the address in the host, or NULL on failure.
504 * On reads, a NULL return value should be interpreted as reading all zeroes.
505 */
506 unsigned char *memory_paddr_to_hostaddr(struct memory *mem,
507 uint64_t paddr, int writeflag)
508 {
509 void **table;
510 int entry;
511 const int mask = (1 << BITS_PER_PAGETABLE) - 1;
512 const int shrcount = MAX_BITS - BITS_PER_PAGETABLE;
513 unsigned char *hostptr;
514
515 table = mem->pagetable;
516 entry = (paddr >> shrcount) & mask;
517
518 /* printf("memory_paddr_to_hostaddr(): p=%16"PRIx64
519 " w=%i => entry=0x%x\n", (uint64_t) paddr, writeflag, entry); */
520
521 if (table[entry] == NULL) {
522 size_t alloclen;
523
524 /*
525 * Special case: reading from a nonexistant memblock
526 * returns all zeroes, and doesn't allocate anything.
527 * (If any intermediate pagetable is nonexistant, then
528 * the same thing happens):
529 */
530 if (writeflag == MEM_READ)
531 return NULL;
532
533 /* Allocate a memblock: */
534 alloclen = 1 << BITS_PER_MEMBLOCK;
535
536 /* printf(" allocating for entry %i, len=%i\n",
537 entry, alloclen); */
538
539 /* Anonymous mmap() should return zero-filled memory,
540 try malloc + memset if mmap failed. */
541 table[entry] = (void *) mmap(NULL, alloclen,
542 PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
543 if (table[entry] == NULL) {
544 CHECK_ALLOCATION(table[entry] = malloc(alloclen));
545 memset(table[entry], 0, alloclen);
546 }
547 }
548
549 hostptr = (unsigned char *) table[entry];
550
551 if (hostptr != NULL)
552 hostptr += (paddr & ((1 << BITS_PER_MEMBLOCK) - 1));
553
554 return hostptr;
555 }
556
557
558 #define UPDATE_CHECKSUM(value) { \
559 internal_state -= 0x118c7771c0c0a77fULL; \
560 internal_state = ((internal_state + (value)) << 7) ^ \
561 (checksum >> 11) ^ ((checksum - (value)) << 3) ^ \
562 (internal_state - checksum) ^ ((value) - internal_state); \
563 checksum ^= internal_state; \
564 }
565
566
567 /*
568 * memory_checksum():
569 *
570 * Calculate a 64-bit checksum of everything in a struct memory. This is
571 * useful for tracking down bugs; an old (presumably working) version of
572 * the emulator can be compared to a newer (buggy) version.
573 */
574 uint64_t memory_checksum(struct memory *mem)
575 {
576 uint64_t internal_state = 0x80624185376feff2ULL;
577 uint64_t checksum = 0xcb9a87d5c010072cULL;
578 const size_t n_entries = (1 << BITS_PER_PAGETABLE) - 1;
579 const size_t len = (1 << BITS_PER_MEMBLOCK) / sizeof(uint64_t);
580 size_t entry, i;
581
582 for (entry=0; entry<=n_entries; entry++) {
583 uint64_t **table = mem->pagetable;
584 uint64_t *memblock = table[entry];
585
586 if (memblock == NULL) {
587 UPDATE_CHECKSUM(0x1198ab7c8174a76fULL);
588 continue;
589 }
590
591 for (i=0; i<len; i++)
592 UPDATE_CHECKSUM(memblock[i]);
593 }
594
595 return checksum;
596 }
597
598
599 /*
600 * memory_warn_about_unimplemented_addr():
601 *
602 * Called from memory_rw whenever memory outside of the physical address space
603 * is accessed (and quiet_mode isn't set).
604 */
605 void memory_warn_about_unimplemented_addr(struct cpu *cpu, struct memory *mem,
606 int writeflag, uint64_t paddr, uint8_t *data, size_t len)
607 {
608 uint64_t offset, old_pc = cpu->pc;
609 char *symbol;
610
611 /*
612 * This allows guest OS kernels to probe memory a few KBs past the
613 * end of memory, without giving too many warnings.
614 */
615 if (paddr < mem->physical_max + 0x40000)
616 return;
617
618 if (!cpu->machine->halt_on_nonexistant_memaccess && quiet_mode)
619 return;
620
621 fatal("[ memory_rw(): %s ", writeflag? "write":"read");
622
623 if (writeflag) {
624 unsigned int i;
625 debug("data={", writeflag);
626 if (len > 16) {
627 int start2 = len-16;
628 for (i=0; i<16; i++)
629 debug("%s%02x", i?",":"", data[i]);
630 debug(" .. ");
631 if (start2 < 16)
632 start2 = 16;
633 for (i=start2; i<len; i++)
634 debug("%s%02x", i?",":"", data[i]);
635 } else
636 for (i=0; i<len; i++)
637 debug("%s%02x", i?",":"", data[i]);
638 debug("} ");
639 }
640
641 fatal("paddr=0x%"PRIx64" >= physical_max; pc=", paddr);
642 if (cpu->is_32bit)
643 fatal("0x%08"PRIx32, (uint32_t) old_pc);
644 else
645 fatal("0x%016"PRIx64, (uint64_t) old_pc);
646 symbol = get_symbol_name(&cpu->machine->symbol_context,
647 old_pc, &offset);
648 fatal(" <%s> ]\n", symbol? symbol : " no symbol ");
649
650 if (cpu->machine->halt_on_nonexistant_memaccess) {
651 /* TODO: Halt in a nicer way. Not possible with the
652 current dyntrans system... */
653 exit(1);
654 }
655 }
656
657
658 /*
659 * dump_mem_string():
660 *
661 * Dump the contents of emulated RAM as readable text. Bytes that aren't
662 * readable are dumped in [xx] notation, where xx is in hexadecimal.
663 * Dumping ends after DUMP_MEM_STRING_MAX bytes, or when a terminating
664 * zero byte is found.
665 */
666 #define DUMP_MEM_STRING_MAX 45
667 void dump_mem_string(struct cpu *cpu, uint64_t addr)
668 {
669 int i;
670 for (i=0; i<DUMP_MEM_STRING_MAX; i++) {
671 unsigned char ch = '\0';
672
673 cpu->memory_rw(cpu, cpu->mem, addr + i, &ch, sizeof(ch),
674 MEM_READ, CACHE_DATA | NO_EXCEPTIONS);
675 if (ch == '\0')
676 return;
677 if (ch >= ' ' && ch < 126)
678 debug("%c", ch);
679 else
680 debug("[%02x]", ch);
681 }
682 }
683
684
685 /*
686 * store_byte():
687 *
688 * Stores a byte in emulated ram. (Helper function.)
689 */
690 void store_byte(struct cpu *cpu, uint64_t addr, uint8_t data)
691 {
692 if ((addr >> 32) == 0)
693 addr = (int64_t)(int32_t)addr;
694 cpu->memory_rw(cpu, cpu->mem,
695 addr, &data, sizeof(data), MEM_WRITE, CACHE_DATA);
696 }
697
698
699 /*
700 * store_string():
701 *
702 * Stores chars into emulated RAM until a zero byte (string terminating
703 * character) is found. The zero byte is also copied.
704 * (strcpy()-like helper function, host-RAM-to-emulated-RAM.)
705 */
706 void store_string(struct cpu *cpu, uint64_t addr, char *s)
707 {
708 do {
709 store_byte(cpu, addr++, *s);
710 } while (*s++);
711 }
712
713
714 /*
715 * add_environment_string():
716 *
717 * Like store_string(), but advances the pointer afterwards. The most
718 * obvious use is to place a number of strings (such as environment variable
719 * strings) after one-another in emulated memory.
720 */
721 void add_environment_string(struct cpu *cpu, char *s, uint64_t *addr)
722 {
723 store_string(cpu, *addr, s);
724 (*addr) += strlen(s) + 1;
725 }
726
727
728 /*
729 * add_environment_string_dual():
730 *
731 * Add "dual" environment strings, one for the variable name and one for the
732 * value, and update pointers afterwards.
733 */
734 void add_environment_string_dual(struct cpu *cpu,
735 uint64_t *ptrp, uint64_t *addrp, char *s1, char *s2)
736 {
737 uint64_t ptr = *ptrp, addr = *addrp;
738
739 store_32bit_word(cpu, ptr, addr);
740 ptr += sizeof(uint32_t);
741 if (addr != 0) {
742 store_string(cpu, addr, s1);
743 addr += strlen(s1) + 1;
744 }
745 store_32bit_word(cpu, ptr, addr);
746 ptr += sizeof(uint32_t);
747 if (addr != 0) {
748 store_string(cpu, addr, s2);
749 addr += strlen(s2) + 1;
750 }
751
752 *ptrp = ptr;
753 *addrp = addr;
754 }
755
756
757 /*
758 * store_64bit_word():
759 *
760 * Stores a 64-bit word in emulated RAM. Byte order is taken into account.
761 * Helper function.
762 */
763 int store_64bit_word(struct cpu *cpu, uint64_t addr, uint64_t data64)
764 {
765 unsigned char data[8];
766 if ((addr >> 32) == 0)
767 addr = (int64_t)(int32_t)addr;
768 data[0] = (data64 >> 56) & 255;
769 data[1] = (data64 >> 48) & 255;
770 data[2] = (data64 >> 40) & 255;
771 data[3] = (data64 >> 32) & 255;
772 data[4] = (data64 >> 24) & 255;
773 data[5] = (data64 >> 16) & 255;
774 data[6] = (data64 >> 8) & 255;
775 data[7] = (data64) & 255;
776 if (cpu->byte_order == EMUL_LITTLE_ENDIAN) {
777 int tmp = data[0]; data[0] = data[7]; data[7] = tmp;
778 tmp = data[1]; data[1] = data[6]; data[6] = tmp;
779 tmp = data[2]; data[2] = data[5]; data[5] = tmp;
780 tmp = data[3]; data[3] = data[4]; data[4] = tmp;
781 }
782 return cpu->memory_rw(cpu, cpu->mem,
783 addr, data, sizeof(data), MEM_WRITE, CACHE_DATA);
784 }
785
786
787 /*
788 * store_32bit_word():
789 *
790 * Stores a 32-bit word in emulated RAM. Byte order is taken into account.
791 * (This function takes a 64-bit word as argument, to suppress some
792 * warnings, but only the lowest 32 bits are used.)
793 */
794 int store_32bit_word(struct cpu *cpu, uint64_t addr, uint64_t data32)
795 {
796 unsigned char data[4];
797
798 data[0] = (data32 >> 24) & 255;
799 data[1] = (data32 >> 16) & 255;
800 data[2] = (data32 >> 8) & 255;
801 data[3] = (data32) & 255;
802 if (cpu->byte_order == EMUL_LITTLE_ENDIAN) {
803 int tmp = data[0]; data[0] = data[3]; data[3] = tmp;
804 tmp = data[1]; data[1] = data[2]; data[2] = tmp;
805 }
806 return cpu->memory_rw(cpu, cpu->mem,
807 addr, data, sizeof(data), MEM_WRITE, CACHE_DATA);
808 }
809
810
811 /*
812 * store_16bit_word():
813 *
814 * Stores a 16-bit word in emulated RAM. Byte order is taken into account.
815 * (This function takes a 64-bit word as argument, to suppress some
816 * warnings, but only the lowest 16 bits are used.)
817 */
818 int store_16bit_word(struct cpu *cpu, uint64_t addr, uint64_t data16)
819 {
820 unsigned char data[2];
821
822 data[0] = (data16 >> 8) & 255;
823 data[1] = (data16) & 255;
824 if (cpu->byte_order == EMUL_LITTLE_ENDIAN) {
825 int tmp = data[0]; data[0] = data[1]; data[1] = tmp;
826 }
827 return cpu->memory_rw(cpu, cpu->mem,
828 addr, data, sizeof(data), MEM_WRITE, CACHE_DATA);
829 }
830
831
832 /*
833 * store_buf():
834 *
835 * memcpy()-like helper function, from host RAM to emulated RAM.
836 */
837 void store_buf(struct cpu *cpu, uint64_t addr, char *s, size_t len)
838 {
839 size_t psize = 1024; /* 1024 256 64 16 4 1 */
840
841 while (len != 0) {
842 if ((addr & (psize-1)) == 0) {
843 while (len >= psize) {
844 cpu->memory_rw(cpu, cpu->mem, addr,
845 (unsigned char *)s, psize, MEM_WRITE,
846 CACHE_DATA);
847 addr += psize;
848 s += psize;
849 len -= psize;
850 }
851 }
852 psize >>= 2;
853 }
854
855 while (len-- != 0)
856 store_byte(cpu, addr++, *s++);
857 }
858
859
860 /*
861 * store_pointer_and_advance():
862 *
863 * Stores a 32-bit or 64-bit pointer in emulated RAM, and advances the
864 * target address. (Useful for e.g. ARCBIOS environment initialization.)
865 */
866 void store_pointer_and_advance(struct cpu *cpu, uint64_t *addrp,
867 uint64_t data, int flag64)
868 {
869 uint64_t addr = *addrp;
870 if (flag64) {
871 store_64bit_word(cpu, addr, data);
872 addr += 8;
873 } else {
874 store_32bit_word(cpu, addr, data);
875 addr += 4;
876 }
877 *addrp = addr;
878 }
879
880
881 /*
882 * load_64bit_word():
883 *
884 * Helper function. Emulated byte order is taken into account.
885 */
886 uint64_t load_64bit_word(struct cpu *cpu, uint64_t addr)
887 {
888 unsigned char data[8];
889
890 cpu->memory_rw(cpu, cpu->mem,
891 addr, data, sizeof(data), MEM_READ, CACHE_DATA);
892
893 if (cpu->byte_order == EMUL_LITTLE_ENDIAN) {
894 int tmp = data[0]; data[0] = data[7]; data[7] = tmp;
895 tmp = data[1]; data[1] = data[6]; data[6] = tmp;
896 tmp = data[2]; data[2] = data[5]; data[5] = tmp;
897 tmp = data[3]; data[3] = data[4]; data[4] = tmp;
898 }
899
900 return
901 ((uint64_t)data[0] << 56) + ((uint64_t)data[1] << 48) +
902 ((uint64_t)data[2] << 40) + ((uint64_t)data[3] << 32) +
903 ((uint64_t)data[4] << 24) + ((uint64_t)data[5] << 16) +
904 ((uint64_t)data[6] << 8) + (uint64_t)data[7];
905 }
906
907
908 /*
909 * load_32bit_word():
910 *
911 * Helper function. Emulated byte order is taken into account.
912 */
913 uint32_t load_32bit_word(struct cpu *cpu, uint64_t addr)
914 {
915 unsigned char data[4];
916
917 cpu->memory_rw(cpu, cpu->mem,
918 addr, data, sizeof(data), MEM_READ, CACHE_DATA);
919
920 if (cpu->byte_order == EMUL_LITTLE_ENDIAN) {
921 int tmp = data[0]; data[0] = data[3]; data[3] = tmp;
922 tmp = data[1]; data[1] = data[2]; data[2] = tmp;
923 }
924
925 return (data[0] << 24) + (data[1] << 16) + (data[2] << 8) + data[3];
926 }
927
928
929 /*
930 * load_16bit_word():
931 *
932 * Helper function. Emulated byte order is taken into account.
933 */
934 uint16_t load_16bit_word(struct cpu *cpu, uint64_t addr)
935 {
936 unsigned char data[2];
937
938 cpu->memory_rw(cpu, cpu->mem,
939 addr, data, sizeof(data), MEM_READ, CACHE_DATA);
940
941 if (cpu->byte_order == EMUL_LITTLE_ENDIAN) {
942 int tmp = data[0]; data[0] = data[1]; data[1] = tmp;
943 }
944
945 return (data[0] << 8) + data[1];
946 }
947
948
949 /*
950 * store_64bit_word_in_host():
951 *
952 * Stores a 64-bit word in the _host's_ RAM. Emulated byte order is taken
953 * into account. This is useful when building structs in the host's RAM
954 * which will later be copied into emulated RAM.
955 */
956 void store_64bit_word_in_host(struct cpu *cpu,
957 unsigned char *data, uint64_t data64)
958 {
959 data[0] = (data64 >> 56) & 255;
960 data[1] = (data64 >> 48) & 255;
961 data[2] = (data64 >> 40) & 255;
962 data[3] = (data64 >> 32) & 255;
963 data[4] = (data64 >> 24) & 255;
964 data[5] = (data64 >> 16) & 255;
965 data[6] = (data64 >> 8) & 255;
966 data[7] = (data64) & 255;
967 if (cpu->byte_order == EMUL_LITTLE_ENDIAN) {
968 int tmp = data[0]; data[0] = data[7]; data[7] = tmp;
969 tmp = data[1]; data[1] = data[6]; data[6] = tmp;
970 tmp = data[2]; data[2] = data[5]; data[5] = tmp;
971 tmp = data[3]; data[3] = data[4]; data[4] = tmp;
972 }
973 }
974
975
976 /*
977 * store_32bit_word_in_host():
978 *
979 * See comment for store_64bit_word_in_host().
980 *
981 * (Note: The data32 parameter is a uint64_t. This is done to suppress
982 * some warnings.)
983 */
984 void store_32bit_word_in_host(struct cpu *cpu,
985 unsigned char *data, uint64_t data32)
986 {
987 data[0] = (data32 >> 24) & 255;
988 data[1] = (data32 >> 16) & 255;
989 data[2] = (data32 >> 8) & 255;
990 data[3] = (data32) & 255;
991 if (cpu->byte_order == EMUL_LITTLE_ENDIAN) {
992 int tmp = data[0]; data[0] = data[3]; data[3] = tmp;
993 tmp = data[1]; data[1] = data[2]; data[2] = tmp;
994 }
995 }
996
997
998 /*
999 * store_16bit_word_in_host():
1000 *
1001 * See comment for store_64bit_word_in_host().
1002 */
1003 void store_16bit_word_in_host(struct cpu *cpu,
1004 unsigned char *data, uint16_t data16)
1005 {
1006 data[0] = (data16 >> 8) & 255;
1007 data[1] = (data16) & 255;
1008 if (cpu->byte_order == EMUL_LITTLE_ENDIAN) {
1009 int tmp = data[0]; data[0] = data[1]; data[1] = tmp;
1010 }
1011 }
1012

  ViewVC Help
Powered by ViewVC 1.1.26