1
0
mirror of https://gitlab.com/qemu-project/qemu synced 2024-06-29 06:14:38 +00:00

maintainer updates (plugins, gdbstub):

- add missing include guard comment to gdbstub.h
   - move gdbstub enums into separate header
   - move qtest_[get|set]_virtual_clock functions
   - allow plugins to manipulate the virtual clock
   - introduce an Instructions Per Second plugin
   - fix inject_mem_cb rw mask tests
   - allow qemu_plugin_vcpu_mem_cb to shortcut when no memory cbs
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAmZ5OjoACgkQ+9DbCVqe
 KkQPlwf/VK673BAjYktuCLnf3DgWvIkkiHWwzBREP5MmseUloLjK2CQPLY/xWZED
 pbA/1OSzHViD/mvG5wTxwef36b9PIleWj5/YwBxGlrb/rh6hCd9004pZK4EMI3qU
 53SK8Qron8TIXjey6XfmAY8rcl030GsHr0Zqf5i2pZKE5g0iaGlM3Cwkpo0SxQsu
 kMNqiSs9NzX7LxB+YeuAauIvC1YA2F/MGTXeFCTtO9Beyp5oV7oOI+2zIvLjlG5M
 Z5hKjG/STkNOteoIBGZpe1+QNpoGHSBoGE3nQnGpXb82iLx1KVBcKuQ6GoWGv1Wo
 hqiSh9kJX479l0mLML+IzaDsgSglbg==
 =pvWx
 -----END PGP SIGNATURE-----

Merge tag 'pull-maintainer-june24-240624-1' of https://gitlab.com/stsquad/qemu into staging

maintainer updates (plugins, gdbstub):

  - add missing include guard comment to gdbstub.h
  - move gdbstub enums into separate header
  - move qtest_[get|set]_virtual_clock functions
  - allow plugins to manipulate the virtual clock
  - introduce an Instructions Per Second plugin
  - fix inject_mem_cb rw mask tests
  - allow qemu_plugin_vcpu_mem_cb to shortcut when no memory cbs

# -----BEGIN PGP SIGNATURE-----
#
# iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAmZ5OjoACgkQ+9DbCVqe
# KkQPlwf/VK673BAjYktuCLnf3DgWvIkkiHWwzBREP5MmseUloLjK2CQPLY/xWZED
# pbA/1OSzHViD/mvG5wTxwef36b9PIleWj5/YwBxGlrb/rh6hCd9004pZK4EMI3qU
# 53SK8Qron8TIXjey6XfmAY8rcl030GsHr0Zqf5i2pZKE5g0iaGlM3Cwkpo0SxQsu
# kMNqiSs9NzX7LxB+YeuAauIvC1YA2F/MGTXeFCTtO9Beyp5oV7oOI+2zIvLjlG5M
# Z5hKjG/STkNOteoIBGZpe1+QNpoGHSBoGE3nQnGpXb82iLx1KVBcKuQ6GoWGv1Wo
# hqiSh9kJX479l0mLML+IzaDsgSglbg==
# =pvWx
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 24 Jun 2024 02:19:54 AM PDT
# gpg:                using RSA key 6685AE99E75167BCAFC8DF35FBD0DB095A9E2A44
# gpg: Good signature from "Alex Bennée (Master Work Key) <alex.bennee@linaro.org>" [full]

* tag 'pull-maintainer-june24-240624-1' of https://gitlab.com/stsquad/qemu:
  accel/tcg: Avoid unnecessary call overhead from qemu_plugin_vcpu_mem_cb
  plugins: fix inject_mem_cb rw masking
  contrib/plugins: add Instructions Per Second (IPS) example for cost modeling
  plugins: add migration blocker
  plugins: add time control API
  qtest: move qtest_{get, set}_virtual_clock to accel/qtest/qtest.c
  sysemu: generalise qtest_warp_clock as qemu_clock_advance_virtual_time
  qtest: use cpu interface in qtest_clock_warp
  sysemu: add set_virtual_time to accel ops
  plugins: Ensure register handles are not NULL
  gdbstub: move enums into separate header
  include/exec: add missing include guard comment

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2024-06-24 13:51:11 -07:00
commit e2bc7787c8
32 changed files with 379 additions and 67 deletions

View File

@ -52,7 +52,7 @@
#include "qemu/main-loop.h"
#include "exec/address-spaces.h"
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
#include "gdbstub/enums.h"
#include "sysemu/cpus.h"
#include "sysemu/hvf.h"
#include "sysemu/hvf_int.h"

View File

@ -27,7 +27,7 @@
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/s390x/adapter.h"
#include "exec/gdbstub.h"
#include "gdbstub/enums.h"
#include "sysemu/kvm_int.h"
#include "sysemu/runstate.h"
#include "sysemu/cpus.h"

View File

@ -24,6 +24,18 @@
#include "qemu/main-loop.h"
#include "hw/core/cpu.h"
static int64_t qtest_clock_counter;
static int64_t qtest_get_virtual_clock(void)
{
return qatomic_read_i64(&qtest_clock_counter);
}
static void qtest_set_virtual_clock(int64_t count)
{
qatomic_set_i64(&qtest_clock_counter, count);
}
static int qtest_init_accel(MachineState *ms)
{
return 0;
@ -52,6 +64,7 @@ static void qtest_accel_ops_class_init(ObjectClass *oc, void *data)
ops->create_vcpu_thread = dummy_start_vcpu_thread;
ops->get_virtual_clock = qtest_get_virtual_clock;
ops->set_virtual_clock = qtest_set_virtual_clock;
};
static const TypeInfo qtest_accel_ops_type = {

View File

@ -125,7 +125,9 @@ void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
}
}
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
@ -188,7 +190,9 @@ Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
}
void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,

View File

@ -240,13 +240,13 @@ static void inject_mem_cb(struct qemu_plugin_dyn_cb *cb,
{
switch (cb->type) {
case PLUGIN_CB_MEM_REGULAR:
if (rw && cb->regular.rw) {
if (rw & cb->regular.rw) {
gen_mem_cb(&cb->regular, meminfo, addr);
}
break;
case PLUGIN_CB_INLINE_ADD_U64:
case PLUGIN_CB_INLINE_STORE_U64:
if (rw && cb->inline_insn.rw) {
if (rw & cb->inline_insn.rw) {
inject_cb(cb);
}
break;

View File

@ -35,7 +35,7 @@
#include "exec/exec-all.h"
#include "exec/hwaddr.h"
#include "exec/tb-flush.h"
#include "exec/gdbstub.h"
#include "gdbstub/enums.h"
#include "hw/core/cpu.h"

View File

@ -27,6 +27,7 @@ endif
NAMES += hwprofile
NAMES += cache
NAMES += drcov
NAMES += ips
ifeq ($(CONFIG_WIN32),y)
SO_SUFFIX := .dll

164
contrib/plugins/ips.c Normal file
View File

@ -0,0 +1,164 @@
/*
* Instructions Per Second (IPS) rate limiting plugin.
*
* This plugin can be used to restrict the execution of a system to a
* particular number of Instructions Per Second (IPS). This controls
* time as seen by the guest so while wall-clock time may be longer
* from the guests point of view time will pass at the normal rate.
*
* This uses the new plugin API which allows the plugin to control
* system time.
*
* Copyright (c) 2023 Linaro Ltd
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include <stdio.h>
#include <glib.h>
#include <qemu-plugin.h>
QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
/* how many times do we update time per sec */
#define NUM_TIME_UPDATE_PER_SEC 10
#define NSEC_IN_ONE_SEC (1000 * 1000 * 1000)
static GMutex global_state_lock;
static uint64_t max_insn_per_second = 1000 * 1000 * 1000; /* ips per core, per second */
static uint64_t max_insn_per_quantum; /* trap every N instructions */
static int64_t virtual_time_ns; /* last set virtual time */
static const void *time_handle;
typedef struct {
uint64_t total_insn;
uint64_t quantum_insn; /* insn in last quantum */
int64_t last_quantum_time; /* time when last quantum started */
} vCPUTime;
struct qemu_plugin_scoreboard *vcpus;
/* return epoch time in ns */
static int64_t now_ns(void)
{
return g_get_real_time() * 1000;
}
static uint64_t num_insn_during(int64_t elapsed_ns)
{
double num_secs = elapsed_ns / (double) NSEC_IN_ONE_SEC;
return num_secs * (double) max_insn_per_second;
}
static int64_t time_for_insn(uint64_t num_insn)
{
double num_secs = (double) num_insn / (double) max_insn_per_second;
return num_secs * (double) NSEC_IN_ONE_SEC;
}
static void update_system_time(vCPUTime *vcpu)
{
int64_t elapsed_ns = now_ns() - vcpu->last_quantum_time;
uint64_t max_insn = num_insn_during(elapsed_ns);
if (vcpu->quantum_insn >= max_insn) {
/* this vcpu ran faster than expected, so it has to sleep */
uint64_t insn_advance = vcpu->quantum_insn - max_insn;
uint64_t time_advance_ns = time_for_insn(insn_advance);
int64_t sleep_us = time_advance_ns / 1000;
g_usleep(sleep_us);
}
vcpu->total_insn += vcpu->quantum_insn;
vcpu->quantum_insn = 0;
vcpu->last_quantum_time = now_ns();
/* based on total number of instructions, what should be the new time? */
int64_t new_virtual_time = time_for_insn(vcpu->total_insn);
g_mutex_lock(&global_state_lock);
/* Time only moves forward. Another vcpu might have updated it already. */
if (new_virtual_time > virtual_time_ns) {
qemu_plugin_update_ns(time_handle, new_virtual_time);
virtual_time_ns = new_virtual_time;
}
g_mutex_unlock(&global_state_lock);
}
static void vcpu_init(qemu_plugin_id_t id, unsigned int cpu_index)
{
vCPUTime *vcpu = qemu_plugin_scoreboard_find(vcpus, cpu_index);
vcpu->total_insn = 0;
vcpu->quantum_insn = 0;
vcpu->last_quantum_time = now_ns();
}
static void vcpu_exit(qemu_plugin_id_t id, unsigned int cpu_index)
{
vCPUTime *vcpu = qemu_plugin_scoreboard_find(vcpus, cpu_index);
update_system_time(vcpu);
}
static void every_quantum_insn(unsigned int cpu_index, void *udata)
{
vCPUTime *vcpu = qemu_plugin_scoreboard_find(vcpus, cpu_index);
g_assert(vcpu->quantum_insn >= max_insn_per_quantum);
update_system_time(vcpu);
}
static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
{
size_t n_insns = qemu_plugin_tb_n_insns(tb);
qemu_plugin_u64 quantum_insn =
qemu_plugin_scoreboard_u64_in_struct(vcpus, vCPUTime, quantum_insn);
/* count (and eventually trap) once per tb */
qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
tb, QEMU_PLUGIN_INLINE_ADD_U64, quantum_insn, n_insns);
qemu_plugin_register_vcpu_tb_exec_cond_cb(
tb, every_quantum_insn,
QEMU_PLUGIN_CB_NO_REGS, QEMU_PLUGIN_COND_GE,
quantum_insn, max_insn_per_quantum, NULL);
}
static void plugin_exit(qemu_plugin_id_t id, void *udata)
{
qemu_plugin_scoreboard_free(vcpus);
}
QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
const qemu_info_t *info, int argc,
char **argv)
{
for (int i = 0; i < argc; i++) {
char *opt = argv[i];
g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
if (g_strcmp0(tokens[0], "ips") == 0) {
max_insn_per_second = g_ascii_strtoull(tokens[1], NULL, 10);
if (!max_insn_per_second && errno) {
fprintf(stderr, "%s: couldn't parse %s (%s)\n",
__func__, tokens[1], g_strerror(errno));
return -1;
}
} else {
fprintf(stderr, "option parsing failed: %s\n", opt);
return -1;
}
}
vcpus = qemu_plugin_scoreboard_new(sizeof(vCPUTime));
max_insn_per_quantum = max_insn_per_second / NUM_TIME_UPDATE_PER_SEC;
time_handle = qemu_plugin_request_time_control();
g_assert(time_handle);
qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
qemu_plugin_register_vcpu_init_cb(id, vcpu_init);
qemu_plugin_register_vcpu_exit_cb(id, vcpu_exit);
qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
return 0;
}

View File

@ -18,6 +18,7 @@
#include "exec/gdbstub.h"
#include "gdbstub/syscalls.h"
#include "gdbstub/user.h"
#include "gdbstub/enums.h"
#include "hw/core/cpu.h"
#include "trace.h"
#include "internals.h"

View File

@ -1,15 +1,6 @@
#ifndef GDBSTUB_H
#define GDBSTUB_H
#define DEFAULT_GDBSTUB_PORT "1234"
/* GDB breakpoint/watchpoint types */
#define GDB_BREAKPOINT_SW 0
#define GDB_BREAKPOINT_HW 1
#define GDB_WATCHPOINT_WRITE 2
#define GDB_WATCHPOINT_READ 3
#define GDB_WATCHPOINT_ACCESS 4
typedef struct GDBFeature {
const char *xmlname;
const char *xml;
@ -144,4 +135,4 @@ void gdb_set_stop_cpu(CPUState *cpu);
/* in gdbstub-xml.c, generated by scripts/feature_to_c.py */
extern const GDBFeature gdb_static_features[];
#endif
#endif /* GDBSTUB_H */

21
include/gdbstub/enums.h Normal file
View File

@ -0,0 +1,21 @@
/*
* gdbstub enums
*
* Copyright (c) 2024 Linaro Ltd
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef GDBSTUB_ENUMS_H
#define GDBSTUB_ENUMS_H
#define DEFAULT_GDBSTUB_PORT "1234"
/* GDB breakpoint/watchpoint types */
#define GDB_BREAKPOINT_SW 0
#define GDB_BREAKPOINT_HW 1
#define GDB_WATCHPOINT_WRITE 2
#define GDB_WATCHPOINT_READ 3
#define GDB_WATCHPOINT_ACCESS 4
#endif /* GDBSTUB_ENUMS_H */

View File

@ -661,6 +661,33 @@ void qemu_plugin_register_vcpu_mem_inline_per_vcpu(
qemu_plugin_u64 entry,
uint64_t imm);
/**
* qemu_plugin_request_time_control() - request the ability to control time
*
* This grants the plugin the ability to control system time. Only one
* plugin can control time so if multiple plugins request the ability
* all but the first will fail.
*
* Returns an opaque handle or NULL if fails
*/
QEMU_PLUGIN_API
const void *qemu_plugin_request_time_control(void);
/**
* qemu_plugin_update_ns() - update system emulation time
* @handle: opaque handle returned by qemu_plugin_request_time_control()
* @time: time in nanoseconds
*
* This allows an appropriately authorised plugin (i.e. holding the
* time control handle) to move system time forward to @time. For
* user-mode emulation the time is not changed by this as all reported
* time comes from the host kernel.
*
* Start time is 0.
*/
QEMU_PLUGIN_API
void qemu_plugin_update_ns(const void *handle, int64_t time);
typedef void
(*qemu_plugin_vcpu_syscall_cb_t)(qemu_plugin_id_t id, unsigned int vcpu_index,
int64_t num, uint64_t a1, uint64_t a2,

View File

@ -245,6 +245,21 @@ bool qemu_clock_run_timers(QEMUClockType type);
*/
bool qemu_clock_run_all_timers(void);
/**
* qemu_clock_advance_virtual_time(): advance the virtual time tick
* @target_ns: target time in nanoseconds
*
* This function is used where the control of the flow of time has
* been delegated to outside the clock subsystem (be it qtest, icount
* or some other external source). You can ask the clock system to
* return @early at the first expired timer.
*
* Time can only move forward, attempts to reverse time would lead to
* an error.
*
* Returns: new virtual time.
*/
int64_t qemu_clock_advance_virtual_time(int64_t target_ns);
/*
* QEMUTimerList

View File

@ -20,7 +20,12 @@
typedef struct AccelOpsClass AccelOpsClass;
DECLARE_CLASS_CHECKERS(AccelOpsClass, ACCEL_OPS, TYPE_ACCEL_OPS)
/* cpus.c operations interface */
/**
* struct AccelOpsClass - accelerator interfaces
*
* This structure is used to abstract accelerator differences from the
* core CPU code. Not all have to be implemented.
*/
struct AccelOpsClass {
/*< private >*/
ObjectClass parent_class;
@ -44,7 +49,18 @@ struct AccelOpsClass {
void (*handle_interrupt)(CPUState *cpu, int mask);
/**
* @get_virtual_clock: fetch virtual clock
* @set_virtual_clock: set virtual clock
*
* These allow the timer subsystem to defer to the accelerator to
* fetch time. The set function is needed if the accelerator wants
* to track the changes to time as the timer is warped through
* various timer events.
*/
int64_t (*get_virtual_clock)(void);
void (*set_virtual_clock)(int64_t time);
int64_t (*get_elapsed_ticks)(void);
/* gdbstub hooks */

View File

@ -96,8 +96,9 @@ int64_t cpu_get_clock(void);
void qemu_timer_notify_cb(void *opaque, QEMUClockType type);
/* get the VIRTUAL clock and VM elapsed ticks via the cpus accel interface */
/* get/set VIRTUAL clock and VM elapsed ticks via the cpus accel interface */
int64_t cpus_get_virtual_clock(void);
void cpus_set_virtual_clock(int64_t new_time);
int64_t cpus_get_elapsed_ticks(void);
#endif /* SYSEMU_CPU_TIMERS_H */

View File

@ -34,8 +34,6 @@ void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error **
void qtest_server_set_send_handler(void (*send)(void *, const char *),
void *opaque);
void qtest_server_inproc_recv(void *opaque, const char *buf);
int64_t qtest_get_virtual_clock(void);
#endif
#endif

View File

@ -15,8 +15,9 @@
#include "qemu/osdep.h"
#include "exec/address-spaces.h"
#include "exec/gdbstub.h"
#include "exec/ioport.h"
#include "exec/gdbstub.h"
#include "gdbstub/enums.h"
#include "monitor/hmp.h"
#include "qemu/help_option.h"
#include "monitor/monitor-internal.h"

View File

@ -39,6 +39,7 @@
#include "qemu/main-loop.h"
#include "qemu/plugin.h"
#include "qemu/log.h"
#include "qemu/timer.h"
#include "tcg/tcg.h"
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
@ -46,6 +47,8 @@
#include "disas/disas.h"
#include "plugin.h"
#ifndef CONFIG_USER_ONLY
#include "qapi/error.h"
#include "migration/blocker.h"
#include "exec/ram_addr.h"
#include "qemu/plugin-memory.h"
#include "hw/boards.h"
@ -507,7 +510,7 @@ static GArray *create_register_handles(GArray *gdbstub_regs)
}
/* Create a record for the plugin */
desc.handle = GINT_TO_POINTER(grd->gdb_reg);
desc.handle = GINT_TO_POINTER(grd->gdb_reg + 1);
desc.name = g_intern_string(grd->name);
desc.feature = g_intern_string(grd->feature_name);
g_array_append_val(find_data, desc);
@ -528,7 +531,7 @@ int qemu_plugin_read_register(struct qemu_plugin_register *reg, GByteArray *buf)
{
g_assert(current_cpu);
return gdb_read_register(current_cpu, buf, GPOINTER_TO_INT(reg));
return gdb_read_register(current_cpu, buf, GPOINTER_TO_INT(reg) - 1);
}
struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size)
@ -583,3 +586,45 @@ uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry)
}
return total;
}
/*
* Time control
*/
static bool has_control;
#ifdef CONFIG_SOFTMMU
static Error *migration_blocker;
#endif
const void *qemu_plugin_request_time_control(void)
{
if (!has_control) {
has_control = true;
#ifdef CONFIG_SOFTMMU
error_setg(&migration_blocker,
"TCG plugin time control does not support migration");
migrate_add_blocker(&migration_blocker, NULL);
#endif
return &has_control;
}
return NULL;
}
#ifdef CONFIG_SOFTMMU
static void advance_virtual_time__async(CPUState *cpu, run_on_cpu_data data)
{
int64_t new_time = data.host_ulong;
qemu_clock_advance_virtual_time(new_time);
}
#endif
void qemu_plugin_update_ns(const void *handle, int64_t new_time)
{
#ifdef CONFIG_SOFTMMU
if (handle == &has_control) {
/* Need to execute out of cpu_exec, so bql can be locked. */
async_run_on_cpu(current_cpu,
advance_virtual_time__async,
RUN_ON_CPU_HOST_ULONG(new_time));
}
#endif
}

View File

@ -589,7 +589,7 @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
switch (cb->type) {
case PLUGIN_CB_MEM_REGULAR:
if (rw && cb->regular.rw) {
if (rw & cb->regular.rw) {
cb->regular.f.vcpu_mem(cpu->cpu_index,
make_plugin_meminfo(oi, rw),
vaddr, cb->regular.userp);
@ -597,7 +597,7 @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
break;
case PLUGIN_CB_INLINE_ADD_U64:
case PLUGIN_CB_INLINE_STORE_U64:
if (rw && cb->inline_insn.rw) {
if (rw & cb->inline_insn.rw) {
exec_inline_op(cb->type, &cb->inline_insn, cpu->cpu_index);
}
break;

View File

@ -38,6 +38,7 @@
qemu_plugin_register_vcpu_tb_exec_cond_cb;
qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu;
qemu_plugin_register_vcpu_tb_trans_cb;
qemu_plugin_request_time_control;
qemu_plugin_reset;
qemu_plugin_scoreboard_free;
qemu_plugin_scoreboard_find;
@ -51,5 +52,6 @@
qemu_plugin_u64_set;
qemu_plugin_u64_sum;
qemu_plugin_uninstall;
qemu_plugin_update_ns;
qemu_plugin_vcpu_for_each;
};

View File

@ -6,3 +6,8 @@ int64_t cpus_get_virtual_clock(void)
{
return cpu_get_clock();
}
void cpus_set_virtual_clock(int64_t new_time)
{
/* do nothing */
}

View File

@ -29,7 +29,7 @@ endif
if have_block or have_ga
stub_ss.add(files('replay-tools.c'))
# stubs for hooks in util/main-loop.c, util/async.c etc.
stub_ss.add(files('cpus-get-virtual-clock.c'))
stub_ss.add(files('cpus-virtual-clock.c'))
stub_ss.add(files('icount.c'))
stub_ss.add(files('graph-lock.c'))
if linux_io_uring.found()

View File

@ -229,6 +229,17 @@ int64_t cpus_get_virtual_clock(void)
return cpu_get_clock();
}
/*
* Signal the new virtual time to the accelerator. This is only needed
* by accelerators that need to track the changes as we warp time.
*/
void cpus_set_virtual_clock(int64_t new_time)
{
if (cpus_accel && cpus_accel->set_virtual_clock) {
cpus_accel->set_virtual_clock(new_time);
}
}
/*
* return the time elapsed in VM between vm_start and vm_stop. Unless
* icount is active, cpus_get_elapsed_ticks() uses units of the host CPU cycle

View File

@ -325,38 +325,6 @@ static void qtest_irq_handler(void *opaque, int n, int level)
}
}
static int64_t qtest_clock_counter;
int64_t qtest_get_virtual_clock(void)
{
return qatomic_read_i64(&qtest_clock_counter);
}
static void qtest_set_virtual_clock(int64_t count)
{
qatomic_set_i64(&qtest_clock_counter, count);
}
static void qtest_clock_warp(int64_t dest)
{
int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
AioContext *aio_context;
assert(qtest_enabled());
aio_context = qemu_get_aio_context();
while (clock < dest) {
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
QEMU_TIMER_ATTR_ALL);
int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
qtest_set_virtual_clock(qtest_get_virtual_clock() + warp);
qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}
static bool (*process_command_cb)(CharBackend *chr, gchar **words);
void qtest_set_command_cb(bool (*pc_cb)(CharBackend *chr, gchar **words))
@ -751,7 +719,8 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
ns = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
QEMU_TIMER_ATTR_ALL);
}
qtest_clock_warp(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns);
qemu_clock_advance_virtual_time(
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns);
qtest_send_prefix(chr);
qtest_sendf(chr, "OK %"PRIi64"\n",
(int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
@ -777,7 +746,7 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
g_assert(words[1]);
ret = qemu_strtoi64(words[1], NULL, 0, &ns);
g_assert(ret == 0);
qtest_clock_warp(ns);
qemu_clock_advance_virtual_time(ns);
qtest_send_prefix(chr);
qtest_sendf(chr, "OK %"PRIi64"\n",
(int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));

View File

@ -68,6 +68,7 @@
#include "sysemu/numa.h"
#include "sysemu/hostmem.h"
#include "exec/gdbstub.h"
#include "gdbstub/enums.h"
#include "qemu/timer.h"
#include "chardev/char.h"
#include "qemu/bitmap.h"

View File

@ -33,7 +33,7 @@
#include "trace/trace-target_arm_hvf.h"
#include "migration/vmstate.h"
#include "exec/gdbstub.h"
#include "gdbstub/enums.h"
#define MDSCR_EL1_SS_SHIFT 0
#define MDSCR_EL1_MDE_SHIFT 15

View File

@ -12,7 +12,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
#include "exec/gdbstub.h"
#include "gdbstub/enums.h"
/* Maximum and current break/watch point counts */
int max_hw_bps, max_hw_wps;

View File

@ -31,7 +31,7 @@
#include "hw/pci/pci.h"
#include "exec/memattrs.h"
#include "exec/address-spaces.h"
#include "exec/gdbstub.h"
#include "gdbstub/enums.h"
#include "hw/boards.h"
#include "hw/irq.h"
#include "qapi/visitor.h"

View File

@ -38,7 +38,7 @@
#include "hyperv.h"
#include "hyperv-proto.h"
#include "exec/gdbstub.h"
#include "gdbstub/enums.h"
#include "qemu/host-utils.h"
#include "qemu/main-loop.h"
#include "qemu/ratelimit.h"

View File

@ -39,7 +39,7 @@
#include "migration/qemu-file-types.h"
#include "sysemu/watchdog.h"
#include "trace.h"
#include "exec/gdbstub.h"
#include "gdbstub/enums.h"
#include "exec/memattrs.h"
#include "exec/ram_addr.h"
#include "sysemu/hostmem.h"

View File

@ -40,7 +40,7 @@
#include "sysemu/hw_accel.h"
#include "sysemu/runstate.h"
#include "sysemu/device_tree.h"
#include "exec/gdbstub.h"
#include "gdbstub/enums.h"
#include "exec/ram_addr.h"
#include "trace.h"
#include "hw/s390x/s390-pci-inst.h"

View File

@ -645,6 +645,11 @@ int64_t qemu_clock_get_ns(QEMUClockType type)
}
}
static void qemu_virtual_clock_set_ns(int64_t time)
{
return cpus_set_virtual_clock(time);
}
void init_clocks(QEMUTimerListNotifyCB *notify_cb)
{
QEMUClockType type;
@ -675,3 +680,24 @@ bool qemu_clock_run_all_timers(void)
return progress;
}
int64_t qemu_clock_advance_virtual_time(int64_t dest)
{
int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
AioContext *aio_context;
aio_context = qemu_get_aio_context();
while (clock < dest) {
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
QEMU_TIMER_ATTR_ALL);
int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
qemu_virtual_clock_set_ns(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + warp);
qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
return clock;
}