1 |
/* |
/* |
2 |
* Copyright (C) 2005 Anders Gavare. All rights reserved. |
* Copyright (C) 2005-2006 Anders Gavare. All rights reserved. |
3 |
* |
* |
4 |
* Redistribution and use in source and binary forms, with or without |
* Redistribution and use in source and binary forms, with or without |
5 |
* modification, are permitted provided that the following conditions are met: |
* modification, are permitted provided that the following conditions are met: |
25 |
* SUCH DAMAGE. |
* SUCH DAMAGE. |
26 |
* |
* |
27 |
* |
* |
28 |
* $Id: cpu_run.c,v 1.6 2005/12/26 12:32:10 debug Exp $ |
* $Id: cpu_run.c,v 1.9 2006/06/22 13:22:41 debug Exp $ |
29 |
* |
* |
30 |
* Included from cpu_mips.c, cpu_ppc.c etc. (The reason for this is that |
* Included from cpu_mips.c, cpu_ppc.c etc. (The reason for this is that |
31 |
* the call to a specific cpu's routine that runs one instruction will |
* the call to a specific cpu's routine that runs one instruction will |
40 |
#include "debugger.h" |
#include "debugger.h" |
41 |
|
|
42 |
|
|
|
static int instrs_per_cycle(struct cpu *cpu) { |
|
|
#ifdef CPU_RUN_MIPS |
|
|
return cpu->cd.mips.cpu_type.instrs_per_cycle; |
|
|
#else |
|
|
return 1; |
|
|
#endif |
|
|
} |
|
|
|
|
|
|
|
43 |
/* |
/* |
44 |
* CPU_RUN(): |
* CPU_RUN(): |
45 |
* |
* |
52 |
{ |
{ |
53 |
struct cpu **cpus = machine->cpus; |
struct cpu **cpus = machine->cpus; |
54 |
int ncpus = machine->ncpus; |
int ncpus = machine->ncpus; |
|
int64_t max_instructions_cached = machine->max_instructions; |
|
|
int64_t max_random_cycles_per_chunk_cached = |
|
|
machine->max_random_cycles_per_chunk; |
|
55 |
int64_t ncycles_chunk_end; |
int64_t ncycles_chunk_end; |
56 |
int running, rounds; |
int running, rounds; |
57 |
|
|
61 |
while (running || single_step) { |
while (running || single_step) { |
62 |
ncycles_chunk_end = machine->ncycles + (1 << 17); |
ncycles_chunk_end = machine->ncycles + (1 << 17); |
63 |
|
|
64 |
machine->a_few_instrs = machine->a_few_cycles * |
machine->a_few_instrs = machine->a_few_cycles; |
|
instrs_per_cycle(cpus[0]); |
|
65 |
|
|
66 |
/* Do a chunk of cycles: */ |
/* Do a chunk of cycles: */ |
67 |
do { |
do { |
100 |
single_step = 2; |
single_step = 2; |
101 |
} |
} |
102 |
|
|
103 |
for (j=0; j<instrs_per_cycle(cpus[0]); j++) { |
if (single_step) |
104 |
if (single_step) |
debugger(); |
|
debugger(); |
|
|
for (i=0; i<ncpus; i++) |
|
|
if (cpus[i]->running) { |
|
|
int instrs_run = |
|
|
CPU_RINSTR(emul, |
|
|
cpus[i]); |
|
|
if (i == 0) |
|
|
cpu0instrs += |
|
|
instrs_run; |
|
|
} |
|
|
} |
|
|
} else if (max_random_cycles_per_chunk_cached > 0) { |
|
105 |
for (i=0; i<ncpus; i++) |
for (i=0; i<ncpus; i++) |
106 |
if (cpus[i]->running && !single_step) { |
if (cpus[i]->running) { |
107 |
a_few_instrs2 = machine-> |
int instrs_run = |
108 |
a_few_cycles; |
CPU_RINSTR(emul, |
109 |
if (a_few_instrs2 >= |
cpus[i]); |
110 |
max_random_cycles_per_chunk_cached) |
if (i == 0) |
111 |
a_few_instrs2 = max_random_cycles_per_chunk_cached; |
cpu0instrs += |
112 |
j = (random() % a_few_instrs2) + 1; |
instrs_run; |
|
j *= instrs_per_cycle(cpus[i]); |
|
|
while (j-- >= 1 && cpus[i]->running) { |
|
|
int instrs_run = CPU_RINSTR(emul, cpus[i]); |
|
|
if (i == 0) |
|
|
cpu0instrs += instrs_run; |
|
|
if (single_step) |
|
|
break; |
|
|
} |
|
113 |
} |
} |
114 |
} else { |
} else { |
115 |
/* CPU 0 is special, cpu0instr must be updated. */ |
/* CPU 0 is special, cpu0instr must be updated. */ |
132 |
|
|
133 |
/* CPU 1 and up: */ |
/* CPU 1 and up: */ |
134 |
for (i=1; i<ncpus; i++) { |
for (i=1; i<ncpus; i++) { |
135 |
a_few_instrs2 = machine->a_few_cycles * |
a_few_instrs2 = machine->a_few_cycles; |
|
instrs_per_cycle(cpus[i]); |
|
136 |
for (j=0; j<a_few_instrs2; ) |
for (j=0; j<a_few_instrs2; ) |
137 |
if (cpus[i]->running) { |
if (cpus[i]->running) { |
138 |
int instrs_run = 0; |
int instrs_run = 0; |
155 |
* |
* |
156 |
* Here, cpu0instrs is the number of instructions |
* Here, cpu0instrs is the number of instructions |
157 |
* executed on cpu0. (TODO: don't use cpu 0 for this, |
* executed on cpu0. (TODO: don't use cpu 0 for this, |
158 |
* use some kind of "mainbus" instead.) Hardware |
* use some kind of "mainbus" instead.) |
|
* ticks are not per instruction, but per cycle, |
|
|
* so we divide by the number of |
|
|
* instructions_per_cycle for cpu0. |
|
|
* |
|
|
* TODO: This doesn't work in a machine with, say, |
|
|
* a mixture of R3000, R4000, and R10000 CPUs, if |
|
|
* there ever was such a thing. |
|
|
* |
|
|
* TODO 2: A small bug occurs if cpu0instrs isn't |
|
|
* evenly divisible by instrs_per_cycle. We then |
|
|
* cause hardware ticks a fraction of a cycle too |
|
|
* often. |
|
159 |
*/ |
*/ |
|
i = instrs_per_cycle(cpus[0]); |
|
|
switch (i) { |
|
|
case 1: break; |
|
|
case 2: cpu0instrs >>= 1; break; |
|
|
case 4: cpu0instrs >>= 2; break; |
|
|
default: |
|
|
cpu0instrs /= i; |
|
|
} |
|
160 |
|
|
161 |
for (te=0; te<machine->n_tick_entries; te++) { |
for (te=0; te<machine->n_tick_entries; te++) { |
162 |
machine->ticks_till_next[te] -= cpu0instrs; |
machine->ticks_till_next[te] -= cpu0instrs; |
|
|
|
163 |
if (machine->ticks_till_next[te] <= 0) { |
if (machine->ticks_till_next[te] <= 0) { |
164 |
while (machine->ticks_till_next[te] |
while (machine->ticks_till_next[te] |
165 |
<= 0) |
<= 0) |
195 |
machine->ncycles_show = machine->ncycles; |
machine->ncycles_show = machine->ncycles; |
196 |
} |
} |
197 |
|
|
|
if (max_instructions_cached != 0 && |
|
|
machine->ncycles >= max_instructions_cached) |
|
|
running = 0; |
|
|
|
|
198 |
/* Let's allow other machines to run. */ |
/* Let's allow other machines to run. */ |
199 |
rounds ++; |
rounds ++; |
200 |
if (rounds > 2) |
if (rounds > 2) |