qemu: refactor main_loop (Marcelo Tosatti)

Break main loop into 3 main functions.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@7241 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
aliguori 2009-04-24 18:03:33 +00:00
parent 50317c7fa7
commit 43b9685807

298
vl.c
View file

@ -273,7 +273,7 @@ uint64_t node_cpumask[MAX_NODES];
static CPUState *cur_cpu;
static CPUState *next_cpu;
static int event_pending = 1;
static int timer_alarm_pending = 1;
/* Conversion factor from emulated instructions to virtual clock ticks. */
static int icount_time_shift;
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
@ -1360,7 +1360,7 @@ static void host_alarm_handler(int host_signum)
}
#endif
}
event_pending = 1;
timer_alarm_pending = 1;
qemu_notify_event();
}
}
@ -3879,153 +3879,175 @@ void main_loop_wait(int timeout)
}
static int main_loop(void)
static int qemu_cpu_exec(CPUState *env)
{
int ret, timeout;
int ret;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
if (use_icount) {
int64_t count;
int decr;
qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
env->icount_decr.u16.low = 0;
env->icount_extra = 0;
count = qemu_next_deadline();
count = (count + (1 << icount_time_shift) - 1)
>> icount_time_shift;
qemu_icount += count;
decr = (count > 0xffff) ? 0xffff : count;
count -= decr;
env->icount_decr.u16.low = decr;
env->icount_extra = count;
}
ret = cpu_exec(env);
#ifdef CONFIG_PROFILER
qemu_time += profile_getclock() - ti;
#endif
if (use_icount) {
/* Fold pending instructions back into the
instruction counter, and clear the interrupt flag. */
qemu_icount -= (env->icount_decr.u16.low
+ env->icount_extra);
env->icount_decr.u32 = 0;
env->icount_extra = 0;
}
return ret;
}
static int cpu_has_work(CPUState *env)
{
if (!env->halted)
return 1;
if (qemu_cpu_has_work(env))
return 1;
return 0;
}
static int tcg_has_work(void)
{
CPUState *env;
cur_cpu = first_cpu;
next_cpu = cur_cpu->next_cpu ?: first_cpu;
for(;;) {
if (vm_running) {
for (env = first_cpu; env != NULL; env = env->next_cpu)
if (cpu_has_work(env))
return 1;
return 0;
}
for(;;) {
/* get next cpu */
env = next_cpu;
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
if (use_icount) {
int64_t count;
int decr;
qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
env->icount_decr.u16.low = 0;
env->icount_extra = 0;
count = qemu_next_deadline();
count = (count + (1 << icount_time_shift) - 1)
>> icount_time_shift;
qemu_icount += count;
decr = (count > 0xffff) ? 0xffff : count;
count -= decr;
env->icount_decr.u16.low = decr;
env->icount_extra = count;
}
ret = cpu_exec(env);
#ifdef CONFIG_PROFILER
qemu_time += profile_getclock() - ti;
#endif
if (use_icount) {
/* Fold pending instructions back into the
instruction counter, and clear the interrupt flag. */
qemu_icount -= (env->icount_decr.u16.low
+ env->icount_extra);
env->icount_decr.u32 = 0;
env->icount_extra = 0;
}
next_cpu = env->next_cpu ?: first_cpu;
if (event_pending && likely(ret != EXCP_DEBUG)) {
ret = EXCP_INTERRUPT;
event_pending = 0;
break;
}
if (ret == EXCP_HLT) {
/* Give the next CPU a chance to run. */
cur_cpu = env;
continue;
}
if (ret != EXCP_HALTED)
break;
/* all CPUs are halted ? */
if (env == cur_cpu)
break;
}
cur_cpu = env;
static int qemu_calculate_timeout(void)
{
int timeout;
if (shutdown_requested) {
ret = EXCP_INTERRUPT;
if (no_shutdown) {
vm_stop(0);
no_shutdown = 0;
}
else
break;
}
if (reset_requested) {
reset_requested = 0;
qemu_system_reset();
ret = EXCP_INTERRUPT;
}
if (powerdown_requested) {
powerdown_requested = 0;
qemu_system_powerdown();
ret = EXCP_INTERRUPT;
}
if (unlikely(ret == EXCP_DEBUG)) {
gdb_set_stop_cpu(cur_cpu);
vm_stop(EXCP_DEBUG);
}
/* If all cpus are halted then wait until the next IRQ */
/* XXX: use timeout computed from timers */
if (ret == EXCP_HALTED) {
if (use_icount) {
int64_t add;
int64_t delta;
/* Advance virtual time to the next event. */
if (use_icount == 1) {
/* When not using an adaptive execution frequency
we tend to get badly out of sync with real time,
so just delay for a reasonable amount of time. */
delta = 0;
} else {
delta = cpu_get_icount() - cpu_get_clock();
}
if (delta > 0) {
/* If virtual time is ahead of real time then just
wait for IO. */
timeout = (delta / 1000000) + 1;
} else {
/* Wait for either IO to occur or the next
timer event. */
add = qemu_next_deadline();
/* We advance the timer before checking for IO.
Limit the amount we advance so that early IO
activity won't get the guest too far ahead. */
if (add > 10000000)
add = 10000000;
delta += add;
add = (add + (1 << icount_time_shift) - 1)
>> icount_time_shift;
qemu_icount += add;
timeout = delta / 1000000;
if (timeout < 0)
timeout = 0;
}
} else {
timeout = 5000;
}
} else {
timeout = 0;
}
if (!vm_running)
timeout = 5000;
else if (tcg_has_work())
timeout = 0;
else if (!use_icount)
timeout = 5000;
else {
/* XXX: use timeout computed from timers */
int64_t add;
int64_t delta;
/* Advance virtual time to the next event. */
if (use_icount == 1) {
/* When not using an adaptive execution frequency
we tend to get badly out of sync with real time,
so just delay for a reasonable amount of time. */
delta = 0;
} else {
if (shutdown_requested) {
ret = EXCP_INTERRUPT;
break;
}
timeout = 5000;
delta = cpu_get_icount() - cpu_get_clock();
}
if (delta > 0) {
/* If virtual time is ahead of real time then just
wait for IO. */
timeout = (delta / 1000000) + 1;
} else {
/* Wait for either IO to occur or the next
timer event. */
add = qemu_next_deadline();
/* We advance the timer before checking for IO.
Limit the amount we advance so that early IO
activity won't get the guest too far ahead. */
if (add > 10000000)
add = 10000000;
delta += add;
add = (add + (1 << icount_time_shift) - 1)
>> icount_time_shift;
qemu_icount += add;
timeout = delta / 1000000;
if (timeout < 0)
timeout = 0;
}
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
main_loop_wait(timeout);
#ifdef CONFIG_PROFILER
dev_time += profile_getclock() - ti;
#endif
}
cpu_disable_ticks();
return ret;
return timeout;
}
static int vm_can_run(void)
{
if (powerdown_requested)
return 0;
if (reset_requested)
return 0;
if (shutdown_requested)
return 0;
return 1;
}
static void main_loop(void)
{
int ret = 0;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
for (;;) {
do {
if (next_cpu == NULL)
next_cpu = first_cpu;
for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) {
CPUState *env = cur_cpu = next_cpu;
if (!vm_running)
break;
if (timer_alarm_pending) {
timer_alarm_pending = 0;
break;
}
ret = qemu_cpu_exec(env);
if (ret == EXCP_DEBUG) {
gdb_set_stop_cpu(env);
break;
}
}
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
main_loop_wait(qemu_calculate_timeout());
#ifdef CONFIG_PROFILER
dev_time += profile_getclock() - ti;
#endif
} while (ret != EXCP_DEBUG && vm_can_run());
if (ret == EXCP_DEBUG)
vm_stop(EXCP_DEBUG);
if (qemu_shutdown_requested()) {
if (no_shutdown) {
vm_stop(0);
no_shutdown = 0;
} else
break;
}
if (qemu_reset_requested())
qemu_system_reset();
if (qemu_powerdown_requested())
qemu_system_powerdown();
}
}
static void version(void)