/[gxemul]/upstream/0.3.6/src/cpu.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Annotation of /upstream/0.3.6/src/cpu.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 15 - (hide annotations)
Mon Oct 8 16:18:56 2007 UTC (16 years, 7 months ago) by dpavlin
File MIME type: text/plain
File size: 17035 byte(s)
0.3.6
1 dpavlin 2 /*
2     * Copyright (C) 2005 Anders Gavare. All rights reserved.
3     *
4     * Redistribution and use in source and binary forms, with or without
5     * modification, are permitted provided that the following conditions are met:
6     *
7     * 1. Redistributions of source code must retain the above copyright
8     * notice, this list of conditions and the following disclaimer.
9     * 2. Redistributions in binary form must reproduce the above copyright
10     * notice, this list of conditions and the following disclaimer in the
11     * documentation and/or other materials provided with the distribution.
12     * 3. The name of the author may not be used to endorse or promote products
13     * derived from this software without specific prior written permission.
14     *
15     * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16     * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17     * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18     * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19     * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20     * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21     * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22     * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23     * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24     * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25     * SUCH DAMAGE.
26     *
27     *
28 dpavlin 14 * $Id: cpu.c,v 1.321 2005/10/03 01:07:40 debug Exp $
29 dpavlin 2 *
30     * Common routines for CPU emulation. (Not specific to any CPU type.)
31     */
32    
33     #include <stdio.h>
34     #include <stdlib.h>
35     #include <sys/types.h>
36     #include <string.h>
37    
38     #include "cpu.h"
39     #include "machine.h"
40 dpavlin 12 #include "memory.h"
41 dpavlin 2 #include "misc.h"
42    
43    
44     extern int quiet_mode;
45     extern int show_opcode_statistics;
46    
47    
48     static struct cpu_family *first_cpu_family = NULL;
49    
50    
51     /*
52     * cpu_new():
53     *
54     * Create a new cpu object. Each family is tried in sequence until a
55     * CPU family recognizes the cpu_type_name.
56     */
57     struct cpu *cpu_new(struct memory *mem, struct machine *machine,
58     int cpu_id, char *name)
59     {
60 dpavlin 10 struct cpu *cpu;
61 dpavlin 2 struct cpu_family *fp;
62     char *cpu_type_name;
63    
64     if (name == NULL) {
65     fprintf(stderr, "cpu_new(): cpu name = NULL?\n");
66     exit(1);
67     }
68    
69     cpu_type_name = strdup(name);
70     if (cpu_type_name == NULL) {
71     fprintf(stderr, "cpu_new(): out of memory\n");
72     exit(1);
73     }
74    
75 dpavlin 12 cpu = zeroed_alloc(sizeof(struct cpu));
76 dpavlin 10
77     cpu->memory_rw = NULL;
78     cpu->name = cpu_type_name;
79     cpu->mem = mem;
80     cpu->machine = machine;
81     cpu->cpu_id = cpu_id;
82     cpu->byte_order = EMUL_LITTLE_ENDIAN;
83     cpu->bootstrap_cpu_flag = 0;
84     cpu->running = 0;
85    
86 dpavlin 12 cpu_create_or_reset_tc(cpu);
87    
88 dpavlin 2 fp = first_cpu_family;
89    
90     while (fp != NULL) {
91     if (fp->cpu_new != NULL) {
92 dpavlin 10 if (fp->cpu_new(cpu, mem, machine, cpu_id,
93     cpu_type_name)) {
94     /* Sanity check: */
95     if (cpu->memory_rw == NULL) {
96     fatal("\ncpu_new(): memory_rw == "
97     "NULL\n");
98 dpavlin 2 exit(1);
99     }
100 dpavlin 10 return cpu;
101 dpavlin 2 }
102     }
103    
104     fp = fp->next;
105     }
106    
107 dpavlin 6 fatal("\ncpu_new(): unknown cpu type '%s'\n", cpu_type_name);
108 dpavlin 2 exit(1);
109     }
110    
111    
112     /*
113     * cpu_show_full_statistics():
114     *
115     * Show detailed statistics on opcode usage on each cpu.
116     */
117     void cpu_show_full_statistics(struct machine *m)
118     {
119     if (m->cpu_family == NULL ||
120     m->cpu_family->show_full_statistics == NULL)
121     fatal("cpu_show_full_statistics(): NULL\n");
122     else
123     m->cpu_family->show_full_statistics(m);
124     }
125    
126    
127     /*
128     * cpu_tlbdump():
129     *
130     * Called from the debugger to dump the TLB in a readable format.
131     * x is the cpu number to dump, or -1 to dump all CPUs.
132     *
133     * If rawflag is nonzero, then the TLB contents isn't formated nicely,
134     * just dumped.
135     */
136     void cpu_tlbdump(struct machine *m, int x, int rawflag)
137     {
138     if (m->cpu_family == NULL || m->cpu_family->tlbdump == NULL)
139     fatal("cpu_tlbdump(): NULL\n");
140     else
141     m->cpu_family->tlbdump(m, x, rawflag);
142     }
143    
144    
145     /*
146     * cpu_register_match():
147     *
148     * Used by the debugger.
149     */
150     void cpu_register_match(struct machine *m, char *name,
151     int writeflag, uint64_t *valuep, int *match_register)
152     {
153     if (m->cpu_family == NULL || m->cpu_family->register_match == NULL)
154     fatal("cpu_register_match(): NULL\n");
155     else
156     m->cpu_family->register_match(m, name, writeflag,
157     valuep, match_register);
158     }
159    
160    
161     /*
162     * cpu_disassemble_instr():
163     *
164     * Convert an instruction word into human readable format, for instruction
165     * tracing.
166     */
167     int cpu_disassemble_instr(struct machine *m, struct cpu *cpu,
168     unsigned char *instr, int running, uint64_t addr, int bintrans)
169     {
170     if (m->cpu_family == NULL || m->cpu_family->disassemble_instr == NULL) {
171     fatal("cpu_disassemble_instr(): NULL\n");
172     return 0;
173     } else
174     return m->cpu_family->disassemble_instr(cpu, instr,
175     running, addr, bintrans);
176     }
177    
178    
179     /*
180     * cpu_register_dump():
181     *
182     * Dump cpu registers in a relatively readable format.
183     *
184     * gprs: set to non-zero to dump GPRs. (CPU dependant.)
185     * coprocs: set bit 0..x to dump registers in coproc 0..x. (CPU dependant.)
186     */
187     void cpu_register_dump(struct machine *m, struct cpu *cpu,
188     int gprs, int coprocs)
189     {
190     if (m->cpu_family == NULL || m->cpu_family->register_dump == NULL)
191     fatal("cpu_register_dump(): NULL\n");
192     else
193     m->cpu_family->register_dump(cpu, gprs, coprocs);
194     }
195    
196    
197     /*
198     * cpu_interrupt():
199     *
200     * Assert an interrupt.
201     * Return value is 1 if the interrupt was asserted, 0 otherwise.
202     */
203     int cpu_interrupt(struct cpu *cpu, uint64_t irq_nr)
204     {
205     if (cpu->machine->cpu_family == NULL ||
206     cpu->machine->cpu_family->interrupt == NULL) {
207     fatal("cpu_interrupt(): NULL\n");
208     return 0;
209     } else
210     return cpu->machine->cpu_family->interrupt(cpu, irq_nr);
211     }
212    
213    
214     /*
215     * cpu_interrupt_ack():
216     *
217     * Acknowledge an interrupt.
218     * Return value is 1 if the interrupt was deasserted, 0 otherwise.
219     */
220     int cpu_interrupt_ack(struct cpu *cpu, uint64_t irq_nr)
221     {
222     if (cpu->machine->cpu_family == NULL ||
223     cpu->machine->cpu_family->interrupt_ack == NULL) {
224     /* debug("cpu_interrupt_ack(): NULL\n"); */
225     return 0;
226     } else
227     return cpu->machine->cpu_family->interrupt_ack(cpu, irq_nr);
228     }
229    
230    
231     /*
232 dpavlin 12 * cpu_functioncall_trace():
233     *
234     * This function should be called if machine->show_trace_tree is enabled, and
235     * a function call is being made. f contains the address of the function.
236     */
237     void cpu_functioncall_trace(struct cpu *cpu, uint64_t f)
238     {
239     int i, n_args = -1;
240     char *symbol;
241     uint64_t offset;
242    
243     if (cpu->machine->ncpus > 1)
244     fatal("cpu%i:\t", cpu->cpu_id);
245    
246     cpu->trace_tree_depth ++;
247 dpavlin 14 if (cpu->trace_tree_depth > 100)
248     cpu->trace_tree_depth = 100;
249 dpavlin 12 for (i=0; i<cpu->trace_tree_depth; i++)
250     fatal(" ");
251    
252     fatal("<");
253     symbol = get_symbol_name_and_n_args(&cpu->machine->symbol_context,
254     f, &offset, &n_args);
255     if (symbol != NULL)
256     fatal("%s", symbol);
257     else {
258     if (cpu->is_32bit)
259     fatal("0x%08x", (int)f);
260     else
261     fatal("0x%llx", (long long)f);
262     }
263     fatal("(");
264    
265     if (cpu->machine->cpu_family->functioncall_trace != NULL)
266     cpu->machine->cpu_family->functioncall_trace(cpu, f, n_args);
267    
268     fatal(")>\n");
269     }
270    
271    
272     /*
273     * cpu_functioncall_trace_return():
274     *
275     * This function should be called if machine->show_trace_tree is enabled, and
276     * a function is being returned from.
277     *
278     * TODO: Print return value? This could be implemented similar to the
279     * cpu->functioncall_trace function call above.
280     */
281     void cpu_functioncall_trace_return(struct cpu *cpu)
282     {
283     cpu->trace_tree_depth --;
284     if (cpu->trace_tree_depth < 0)
285     cpu->trace_tree_depth = 0;
286     }
287    
288    
289     /*
290     * cpu_create_or_reset_tc():
291     *
292     * Create the translation cache in memory (ie allocate memory for it), if
293     * necessary, and then reset it to an initial state.
294     */
295     void cpu_create_or_reset_tc(struct cpu *cpu)
296     {
297     if (cpu->translation_cache == NULL)
298     cpu->translation_cache = zeroed_alloc(DYNTRANS_CACHE_SIZE +
299     DYNTRANS_CACHE_MARGIN);
300    
301     /* Create an empty table at the beginning of the translation cache: */
302     memset(cpu->translation_cache, 0, sizeof(uint32_t)
303     * N_BASE_TABLE_ENTRIES);
304    
305     cpu->translation_cache_cur_ofs =
306     N_BASE_TABLE_ENTRIES * sizeof(uint32_t);
307    
308     /*
309     * There might be other translation pointers that still point to
310     * within the translation_cache region. Let's invalidate those too:
311     */
312 dpavlin 14 if (cpu->invalidate_code_translation != NULL)
313     cpu->invalidate_code_translation(cpu, 0, INVALIDATE_ALL);
314 dpavlin 12 }
315    
316    
317     /*
318 dpavlin 2 * cpu_run():
319     *
320     * Run instructions on all CPUs in this machine, for a "medium duration"
321     * (or until all CPUs have halted).
322     *
323     * Return value is 1 if anything happened, 0 if all CPUs are stopped.
324     */
325     int cpu_run(struct emul *emul, struct machine *m)
326     {
327     if (m->cpu_family == NULL || m->cpu_family->run == NULL) {
328     fatal("cpu_run(): NULL\n");
329     return 0;
330     } else
331     return m->cpu_family->run(emul, m);
332     }
333    
334    
335     /*
336     * cpu_dumpinfo():
337     *
338     * Dumps info about a CPU using debug(). "cpu0: CPUNAME, running" (or similar)
339     * is outputed, and it is up to CPU dependant code to complete the line.
340     */
341     void cpu_dumpinfo(struct machine *m, struct cpu *cpu)
342     {
343     debug("cpu%i: %s, %s", cpu->cpu_id, cpu->name,
344     cpu->running? "running" : "stopped");
345    
346     if (m->cpu_family == NULL || m->cpu_family->dumpinfo == NULL)
347     fatal("cpu_dumpinfo(): NULL\n");
348     else
349     m->cpu_family->dumpinfo(cpu);
350     }
351    
352    
353     /*
354     * cpu_list_available_types():
355     *
356     * Print a list of available CPU types for each cpu family.
357     */
358     void cpu_list_available_types(void)
359     {
360     struct cpu_family *fp;
361     int iadd = 4;
362    
363     fp = first_cpu_family;
364    
365     if (fp == NULL) {
366     debug("No CPUs defined!\n");
367     return;
368     }
369    
370     while (fp != NULL) {
371     debug("%s:\n", fp->name);
372     debug_indentation(iadd);
373     if (fp->list_available_types != NULL)
374     fp->list_available_types();
375     else
376     debug("(internal error: list_available_types"
377     " = NULL)\n");
378     debug_indentation(-iadd);
379    
380     fp = fp->next;
381     }
382     }
383    
384    
385     /*
386     * cpu_run_deinit():
387     *
388     * Shuts down all CPUs in a machine when ending a simulation. (This function
389     * should only need to be called once for each machine.)
390     */
391 dpavlin 12 void cpu_run_deinit(struct machine *machine)
392 dpavlin 2 {
393     int te;
394    
395     /*
396     * Two last ticks of every hardware device. This will allow
397     * framebuffers to draw the last updates to the screen before
398     * halting.
399     */
400     for (te=0; te<machine->n_tick_entries; te++) {
401     machine->tick_func[te](machine->cpus[0],
402     machine->tick_extra[te]);
403     machine->tick_func[te](machine->cpus[0],
404     machine->tick_extra[te]);
405     }
406    
407     debug("cpu_run_deinit(): All CPUs halted.\n");
408    
409     if (machine->show_nr_of_instructions || !quiet_mode)
410 dpavlin 10 cpu_show_cycles(machine, 1);
411 dpavlin 2
412     if (show_opcode_statistics)
413     cpu_show_full_statistics(machine);
414    
415     fflush(stdout);
416     }
417    
418    
419     /*
420     * cpu_show_cycles():
421     *
422     * If automatic adjustment of clock interrupts is turned on, then recalculate
423     * emulated_hz. Also, if show_nr_of_instructions is on, then print a
424     * line to stdout about how many instructions/cycles have been executed so
425     * far.
426     */
427 dpavlin 10 void cpu_show_cycles(struct machine *machine, int forced)
428 dpavlin 2 {
429     uint64_t offset, pc;
430     char *symbol;
431 dpavlin 12 int64_t mseconds, ninstrs, is, avg;
432 dpavlin 2 struct timeval tv;
433 dpavlin 12 int h, m, s, ms, d, instrs_per_cycle = 1;
434 dpavlin 2
435     static int64_t mseconds_last = 0;
436     static int64_t ninstrs_last = -1;
437    
438 dpavlin 10 switch (machine->arch) {
439     case ARCH_MIPS:
440     instrs_per_cycle = machine->cpus[machine->bootstrap_cpu]->
441     cd.mips.cpu_type.instrs_per_cycle;
442     break;
443 dpavlin 2 }
444    
445     pc = machine->cpus[machine->bootstrap_cpu]->pc;
446    
447     gettimeofday(&tv, NULL);
448 dpavlin 10 mseconds = (tv.tv_sec - machine->starttime.tv_sec) * 1000
449     + (tv.tv_usec - machine->starttime.tv_usec) / 1000;
450 dpavlin 2
451     if (mseconds == 0)
452     mseconds = 1;
453    
454     if (mseconds - mseconds_last == 0)
455     mseconds ++;
456    
457 dpavlin 10 ninstrs = machine->ncycles_since_gettimeofday * instrs_per_cycle;
458 dpavlin 2
459     if (machine->automatic_clock_adjustment) {
460     static int first_adjustment = 1;
461    
462     /* Current nr of cycles per second: */
463     int64_t cur_cycles_per_second = 1000 *
464     (ninstrs-ninstrs_last) / (mseconds-mseconds_last)
465     / instrs_per_cycle;
466    
467     if (cur_cycles_per_second < 1000000)
468     cur_cycles_per_second = 1000000;
469    
470     if (first_adjustment) {
471     machine->emulated_hz = cur_cycles_per_second;
472     first_adjustment = 0;
473     } else {
474     machine->emulated_hz = (15 * machine->emulated_hz +
475     cur_cycles_per_second) / 16;
476     }
477    
478 dpavlin 12 /* debug("[ updating emulated_hz to %lli Hz ]\n",
479     (long long)machine->emulated_hz); */
480 dpavlin 2 }
481    
482    
483     /* RETURN here, unless show_nr_of_instructions (-N) is turned on: */
484     if (!machine->show_nr_of_instructions && !forced)
485     goto do_return;
486    
487 dpavlin 10 printf("[ %lli instrs",
488     (long long)(machine->ncycles * instrs_per_cycle));
489 dpavlin 2
490     if (!machine->automatic_clock_adjustment) {
491     d = machine->emulated_hz / 1000;
492     if (d < 1)
493     d = 1;
494 dpavlin 10 ms = machine->ncycles / d;
495 dpavlin 2 h = ms / 3600000;
496     ms -= 3600000 * h;
497     m = ms / 60000;
498     ms -= 60000 * m;
499     s = ms / 1000;
500     ms -= 1000 * s;
501    
502     printf("emulated time = %02i:%02i:%02i.%03i; ", h, m, s, ms);
503     }
504    
505     /* Instructions per second, and average so far: */
506 dpavlin 12 is = 1000 * (ninstrs-ninstrs_last) / (mseconds-mseconds_last);
507     avg = (long long)1000 * ninstrs / mseconds;
508     if (is < 0)
509     is = 0;
510     if (avg < 0)
511     avg = 0;
512     printf("; i/s=%lli avg=%lli", (long long)is, (long long)avg);
513 dpavlin 2
514     symbol = get_symbol_name(&machine->symbol_context, pc, &offset);
515    
516 dpavlin 12 if (machine->ncpus == 1) {
517     if (machine->cpus[machine->bootstrap_cpu]->is_32bit)
518     printf("; pc=0x%08x", (int)pc);
519     else
520     printf("; pc=0x%016llx", (long long)pc);
521     }
522 dpavlin 2
523 dpavlin 10 if (symbol != NULL)
524     printf(" <%s>", symbol);
525     printf(" ]\n");
526 dpavlin 2
527     do_return:
528     ninstrs_last = ninstrs;
529     mseconds_last = mseconds;
530     }
531    
532    
533     /*
534     * cpu_run_init():
535     *
536     * Prepare to run instructions on all CPUs in this machine. (This function
537     * should only need to be called once for each machine.)
538     */
539 dpavlin 12 void cpu_run_init(struct machine *machine)
540 dpavlin 2 {
541     int ncpus = machine->ncpus;
542     int te;
543    
544     machine->a_few_cycles = 1048576;
545     machine->ncycles_flush = 0;
546     machine->ncycles = 0;
547     machine->ncycles_show = 0;
548    
549     /*
550     * Instead of doing { one cycle, check hardware ticks }, we
551     * can do { n cycles, check hardware ticks }, as long as
552     * n is at most as much as the lowest number of cycles/tick
553     * for any hardware device.
554     */
555     for (te=0; te<machine->n_tick_entries; te++) {
556     if (machine->ticks_reset_value[te] < machine->a_few_cycles)
557     machine->a_few_cycles = machine->ticks_reset_value[te];
558     }
559    
560     machine->a_few_cycles >>= 1;
561     if (machine->a_few_cycles < 1)
562     machine->a_few_cycles = 1;
563    
564     if (ncpus > 1 && machine->max_random_cycles_per_chunk == 0)
565     machine->a_few_cycles = 1;
566    
567     /* debug("cpu_run_init(): a_few_cycles = %i\n",
568     machine->a_few_cycles); */
569    
570     /* For performance measurement: */
571     gettimeofday(&machine->starttime, NULL);
572 dpavlin 10 machine->ncycles_since_gettimeofday = 0;
573 dpavlin 2 }
574    
575    
576     /*
577     * add_cpu_family():
578     *
579     * Allocates a cpu_family struct and calls an init function for the
580     * family to fill in reasonable data and pointers.
581     */
582     static void add_cpu_family(int (*family_init)(struct cpu_family *), int arch)
583     {
584     struct cpu_family *fp, *tmp;
585     int res;
586    
587     fp = malloc(sizeof(struct cpu_family));
588     if (fp == NULL) {
589     fprintf(stderr, "add_cpu_family(): out of memory\n");
590     exit(1);
591     }
592     memset(fp, 0, sizeof(struct cpu_family));
593    
594     /*
595     * family_init() returns 1 if the struct has been filled with
596     * valid data, 0 if suppor for the cpu family isn't compiled
597     * into the emulator.
598     */
599     res = family_init(fp);
600     if (!res) {
601     free(fp);
602     return;
603     }
604     fp->arch = arch;
605     fp->next = NULL;
606    
607     /* Add last in family chain: */
608     tmp = first_cpu_family;
609     if (tmp == NULL) {
610     first_cpu_family = fp;
611     } else {
612     while (tmp->next != NULL)
613     tmp = tmp->next;
614     tmp->next = fp;
615     }
616     }
617    
618    
619     /*
620     * cpu_family_ptr_by_number():
621     *
622     * Returns a pointer to a CPU family based on the ARCH_* integers.
623     */
624     struct cpu_family *cpu_family_ptr_by_number(int arch)
625     {
626     struct cpu_family *fp;
627     fp = first_cpu_family;
628    
629     /* YUCK! This is too hardcoded! TODO */
630    
631     while (fp != NULL) {
632     if (arch == fp->arch)
633     return fp;
634     fp = fp->next;
635     }
636    
637     return NULL;
638     }
639    
640    
641     /*
642     * cpu_init():
643     *
644     * Should be called before any other cpu_*() function.
645     */
646     void cpu_init(void)
647     {
648     /* Note: These are registered in alphabetic order. */
649 dpavlin 12
650     #ifdef ENABLE_ALPHA
651     add_cpu_family(alpha_cpu_family_init, ARCH_ALPHA);
652     #endif
653    
654     #ifdef ENABLE_ARM
655 dpavlin 6 add_cpu_family(arm_cpu_family_init, ARCH_ARM);
656 dpavlin 12 #endif
657    
658 dpavlin 14 #ifdef ENABLE_AVR
659     add_cpu_family(avr_cpu_family_init, ARCH_AVR);
660     #endif
661    
662     #ifdef ENABLE_HPPA
663     add_cpu_family(hppa_cpu_family_init, ARCH_HPPA);
664     #endif
665    
666     #ifdef ENABLE_I960
667     add_cpu_family(i960_cpu_family_init, ARCH_I960);
668     #endif
669    
670 dpavlin 12 #ifdef ENABLE_IA64
671     add_cpu_family(ia64_cpu_family_init, ARCH_IA64);
672     #endif
673    
674     #ifdef ENABLE_M68K
675     add_cpu_family(m68k_cpu_family_init, ARCH_M68K);
676     #endif
677    
678     #ifdef ENABLE_MIPS
679 dpavlin 2 add_cpu_family(mips_cpu_family_init, ARCH_MIPS);
680 dpavlin 12 #endif
681    
682     #ifdef ENABLE_PPC
683 dpavlin 2 add_cpu_family(ppc_cpu_family_init, ARCH_PPC);
684 dpavlin 12 #endif
685    
686 dpavlin 14 #ifdef ENABLE_SH
687     add_cpu_family(sh_cpu_family_init, ARCH_SH);
688     #endif
689    
690 dpavlin 12 #ifdef ENABLE_SPARC
691     add_cpu_family(sparc_cpu_family_init, ARCH_SPARC);
692     #endif
693    
694     #ifdef ENABLE_X86
695 dpavlin 4 add_cpu_family(x86_cpu_family_init, ARCH_X86);
696 dpavlin 12 #endif
697 dpavlin 2 }
698    

  ViewVC Help
Powered by ViewVC 1.1.26