maintainer updates (tests, gdbstub, plugins):

- expand QOS_PATH_MAX_ELEMENT_SIZE to avoid LTO issues
   - support fork-follow-mode in gdbstub
   - new thread-safe scoreboard API for TCG plugins
   - suppress showing opcodes in plugin disassembly
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAmXoY7oACgkQ+9DbCVqe
 KkTdTwf8D8nUB+Ee6LuglW36vtd1ETdMfUmfRis7RIBsXZZ0Tg4+8LyfKkNi1vCL
 UMdWQTkSW79RfXr21QEtETokwLZ0CWQMdxDAWfOiz4S+uDgQyBE+lwUsy0mHBmd7
 +J4SQb3adoZ+//9KMJhRU1wL9j3ygpEoKHVJonDObU6K5XuhE18JuBE44q7FqkWl
 0VhoLDgNxrf2PqT+LLP/O3MFLDXPVKbzrZYQF0IoqBTlcqShCoaykhSwiwCZ4Sqq
 NO9hVwZIOFOcOF4F6ZqRXaZrwERldoBwG+BeIx1ah20vKFVT12y02dQqdP/oKwe+
 /PXFXDdzs4yMOghb4Go6SiKlKT5g4A==
 =s1lF
 -----END PGP SIGNATURE-----

Merge tag 'pull-maintainer-updates-060324-1' of https://gitlab.com/stsquad/qemu into staging

maintainer updates (tests, gdbstub, plugins):

  - expand QOS_PATH_MAX_ELEMENT_SIZE to avoid LTO issues
  - support fork-follow-mode in gdbstub
  - new thread-safe scoreboard API for TCG plugins
  - suppress showing opcodes in plugin disassembly

# -----BEGIN PGP SIGNATURE-----
#
# iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAmXoY7oACgkQ+9DbCVqe
# KkTdTwf8D8nUB+Ee6LuglW36vtd1ETdMfUmfRis7RIBsXZZ0Tg4+8LyfKkNi1vCL
# UMdWQTkSW79RfXr21QEtETokwLZ0CWQMdxDAWfOiz4S+uDgQyBE+lwUsy0mHBmd7
# +J4SQb3adoZ+//9KMJhRU1wL9j3ygpEoKHVJonDObU6K5XuhE18JuBE44q7FqkWl
# 0VhoLDgNxrf2PqT+LLP/O3MFLDXPVKbzrZYQF0IoqBTlcqShCoaykhSwiwCZ4Sqq
# NO9hVwZIOFOcOF4F6ZqRXaZrwERldoBwG+BeIx1ah20vKFVT12y02dQqdP/oKwe+
# /PXFXDdzs4yMOghb4Go6SiKlKT5g4A==
# =s1lF
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 06 Mar 2024 12:38:18 GMT
# gpg:                using RSA key 6685AE99E75167BCAFC8DF35FBD0DB095A9E2A44
# gpg: Good signature from "Alex Bennée (Master Work Key) <alex.bennee@linaro.org>" [full]
# Primary key fingerprint: 6685 AE99 E751 67BC AFC8  DF35 FBD0 DB09 5A9E 2A44

* tag 'pull-maintainer-updates-060324-1' of https://gitlab.com/stsquad/qemu: (29 commits)
  target/riscv: honour show_opcodes when disassembling
  target/loongarch: honour show_opcodes when disassembling
  disas/hppa: honour show_opcodes
  disas: introduce show_opcodes
  plugins: cleanup codepath for previous inline operation
  plugins: remove non per_vcpu inline operation from API
  contrib/plugins/howvec: migrate to new per_vcpu API
  contrib/plugins/hotblocks: migrate to new per_vcpu API
  tests/plugin/bb: migrate to new per_vcpu API
  tests/plugin/insn: migrate to new per_vcpu API
  tests/plugin/mem: migrate to new per_vcpu API
  tests/plugin: add test plugin for inline operations
  plugins: add inline operation per vcpu
  plugins: implement inline operation relative to cpu_index
  plugins: define qemu_plugin_u64
  plugins: scoreboard API
  tests/tcg: Add two follow-fork-mode tests
  gdbstub: Implement follow-fork-mode child
  gdbstub: Introduce gdb_handle_detach_user()
  gdbstub: Introduce gdb_handle_set_thread_user()
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-03-06 16:56:20 +00:00
commit 8f6330a807
57 changed files with 1258 additions and 339 deletions

View file

@ -133,16 +133,28 @@ static void gen_empty_udata_cb_no_rwg(void)
*/ */
static void gen_empty_inline_cb(void) static void gen_empty_inline_cb(void)
{ {
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
TCGv_ptr cpu_index_as_ptr = tcg_temp_ebb_new_ptr();
TCGv_i64 val = tcg_temp_ebb_new_i64(); TCGv_i64 val = tcg_temp_ebb_new_i64();
TCGv_ptr ptr = tcg_temp_ebb_new_ptr(); TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
tcg_gen_ld_i32(cpu_index, tcg_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
/* second operand will be replaced by immediate value */
tcg_gen_mul_i32(cpu_index, cpu_index, cpu_index);
tcg_gen_ext_i32_ptr(cpu_index_as_ptr, cpu_index);
tcg_gen_movi_ptr(ptr, 0); tcg_gen_movi_ptr(ptr, 0);
tcg_gen_add_ptr(ptr, ptr, cpu_index_as_ptr);
tcg_gen_ld_i64(val, ptr, 0); tcg_gen_ld_i64(val, ptr, 0);
/* pass an immediate != 0 so that it doesn't get optimized away */ /* second operand will be replaced by immediate value */
tcg_gen_addi_i64(val, val, 0xdeadface); tcg_gen_add_i64(val, val, val);
tcg_gen_st_i64(val, ptr, 0); tcg_gen_st_i64(val, ptr, 0);
tcg_temp_free_ptr(ptr); tcg_temp_free_ptr(ptr);
tcg_temp_free_i64(val); tcg_temp_free_i64(val);
tcg_temp_free_ptr(cpu_index_as_ptr);
tcg_temp_free_i32(cpu_index);
} }
static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info) static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
@ -290,12 +302,37 @@ static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
return op; return op;
} }
static TCGOp *copy_ld_i32(TCGOp **begin_op, TCGOp *op)
{
return copy_op(begin_op, op, INDEX_op_ld_i32);
}
static TCGOp *copy_ext_i32_ptr(TCGOp **begin_op, TCGOp *op)
{
if (UINTPTR_MAX == UINT32_MAX) {
op = copy_op(begin_op, op, INDEX_op_mov_i32);
} else {
op = copy_op(begin_op, op, INDEX_op_ext_i32_i64);
}
return op;
}
static TCGOp *copy_add_ptr(TCGOp **begin_op, TCGOp *op)
{
if (UINTPTR_MAX == UINT32_MAX) {
op = copy_op(begin_op, op, INDEX_op_add_i32);
} else {
op = copy_op(begin_op, op, INDEX_op_add_i64);
}
return op;
}
static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op) static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
{ {
if (TCG_TARGET_REG_BITS == 32) { if (TCG_TARGET_REG_BITS == 32) {
/* 2x ld_i32 */ /* 2x ld_i32 */
op = copy_op(begin_op, op, INDEX_op_ld_i32); op = copy_ld_i32(begin_op, op);
op = copy_op(begin_op, op, INDEX_op_ld_i32); op = copy_ld_i32(begin_op, op);
} else { } else {
/* ld_i64 */ /* ld_i64 */
op = copy_op(begin_op, op, INDEX_op_ld_i64); op = copy_op(begin_op, op, INDEX_op_ld_i64);
@ -331,6 +368,13 @@ static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
return op; return op;
} }
static TCGOp *copy_mul_i32(TCGOp **begin_op, TCGOp *op, uint32_t v)
{
op = copy_op(begin_op, op, INDEX_op_mul_i32);
op->args[2] = tcgv_i32_arg(tcg_constant_i32(v));
return op;
}
static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op) static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
{ {
if (UINTPTR_MAX == UINT32_MAX) { if (UINTPTR_MAX == UINT32_MAX) {
@ -396,18 +440,19 @@ static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
TCGOp *begin_op, TCGOp *op, TCGOp *begin_op, TCGOp *op,
int *unused) int *unused)
{ {
/* const_ptr */ char *ptr = cb->inline_insn.entry.score->data->data;
op = copy_const_ptr(&begin_op, op, cb->userp); size_t elem_size = g_array_get_element_size(
cb->inline_insn.entry.score->data);
size_t offset = cb->inline_insn.entry.offset;
/* ld_i64 */ op = copy_ld_i32(&begin_op, op);
op = copy_mul_i32(&begin_op, op, elem_size);
op = copy_ext_i32_ptr(&begin_op, op);
op = copy_const_ptr(&begin_op, op, ptr + offset);
op = copy_add_ptr(&begin_op, op);
op = copy_ld_i64(&begin_op, op); op = copy_ld_i64(&begin_op, op);
/* add_i64 */
op = copy_add_i64(&begin_op, op, cb->inline_insn.imm); op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
/* st_i64 */
op = copy_st_i64(&begin_op, op); op = copy_st_i64(&begin_op, op);
return op; return op;
} }

View file

@ -641,7 +641,7 @@ static abi_long do_bsd_readlink(CPUArchState *env, abi_long arg1,
} }
if (strcmp(p1, "/proc/curproc/file") == 0) { if (strcmp(p1, "/proc/curproc/file") == 0) {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
TaskState *ts = (TaskState *)cpu->opaque; TaskState *ts = get_task_state(cpu);
strncpy(p2, ts->bprm->fullpath, arg3); strncpy(p2, ts->bprm->fullpath, arg3);
ret = MIN((abi_long)strlen(ts->bprm->fullpath), arg3); ret = MIN((abi_long)strlen(ts->bprm->fullpath), arg3);
} else { } else {

View file

@ -208,7 +208,7 @@ static inline abi_long do_freebsd_fork(void *cpu_env)
*/ */
set_second_rval(cpu_env, child_flag); set_second_rval(cpu_env, child_flag);
fork_end(child_flag); fork_end(ret);
return ret; return ret;
} }
@ -252,7 +252,7 @@ static inline abi_long do_freebsd_rfork(void *cpu_env, abi_long flags)
* value: 0 for parent process, 1 for child process. * value: 0 for parent process, 1 for child process.
*/ */
set_second_rval(cpu_env, child_flag); set_second_rval(cpu_env, child_flag);
fork_end(child_flag); fork_end(ret);
return ret; return ret;
@ -285,7 +285,7 @@ static inline abi_long do_freebsd_pdfork(void *cpu_env, abi_ulong target_fdp,
* value: 0 for parent process, 1 for child process. * value: 0 for parent process, 1 for child process.
*/ */
set_second_rval(cpu_env, child_flag); set_second_rval(cpu_env, child_flag);
fork_end(child_flag); fork_end(ret);
return ret; return ret;
} }

View file

@ -113,10 +113,13 @@ void fork_start(void)
start_exclusive(); start_exclusive();
cpu_list_lock(); cpu_list_lock();
mmap_fork_start(); mmap_fork_start();
gdbserver_fork_start();
} }
void fork_end(int child) void fork_end(pid_t pid)
{ {
bool child = pid == 0;
if (child) { if (child) {
CPUState *cpu, *next_cpu; CPUState *cpu, *next_cpu;
/* /*
@ -134,10 +137,12 @@ void fork_end(int child)
* state, so we don't need to end_exclusive() here. * state, so we don't need to end_exclusive() here.
*/ */
qemu_init_cpu_list(); qemu_init_cpu_list();
gdbserver_fork(thread_cpu); get_task_state(thread_cpu)->ts_tid = qemu_get_thread_id();
gdbserver_fork_end(thread_cpu, pid);
} else { } else {
mmap_fork_end(child); mmap_fork_end(child);
cpu_list_unlock(); cpu_list_unlock();
gdbserver_fork_end(thread_cpu, pid);
end_exclusive(); end_exclusive();
} }
} }

View file

@ -117,6 +117,11 @@ typedef struct TaskState {
struct target_sigaltstack sigaltstack_used; struct target_sigaltstack sigaltstack_used;
} __attribute__((aligned(16))) TaskState; } __attribute__((aligned(16))) TaskState;
static inline TaskState *get_task_state(CPUState *cs)
{
return cs->opaque;
}
void stop_all_tasks(void); void stop_all_tasks(void);
extern const char *interp_prefix; extern const char *interp_prefix;
extern const char *qemu_uname_release; extern const char *qemu_uname_release;
@ -187,7 +192,7 @@ void cpu_loop(CPUArchState *env);
char *target_strerror(int err); char *target_strerror(int err);
int get_osversion(void); int get_osversion(void);
void fork_start(void); void fork_start(void);
void fork_end(int child); void fork_end(pid_t pid);
#include "qemu/log.h" #include "qemu/log.h"

View file

@ -319,7 +319,7 @@ void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
int block_signals(void) int block_signals(void)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
sigset_t set; sigset_t set;
/* /*
@ -359,7 +359,7 @@ void dump_core_and_abort(int target_sig)
{ {
CPUState *cpu = thread_cpu; CPUState *cpu = thread_cpu;
CPUArchState *env = cpu_env(cpu); CPUArchState *env = cpu_env(cpu);
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
int core_dumped = 0; int core_dumped = 0;
int host_sig; int host_sig;
struct sigaction act; struct sigaction act;
@ -421,7 +421,7 @@ void queue_signal(CPUArchState *env, int sig, int si_type,
target_siginfo_t *info) target_siginfo_t *info)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
trace_user_queue_signal(env, sig); trace_user_queue_signal(env, sig);
@ -476,7 +476,7 @@ void force_sig_fault(int sig, int code, abi_ulong addr)
static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
{ {
CPUState *cpu = thread_cpu; CPUState *cpu = thread_cpu;
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
target_siginfo_t tinfo; target_siginfo_t tinfo;
ucontext_t *uc = puc; ucontext_t *uc = puc;
struct emulated_sigtable *k; struct emulated_sigtable *k;
@ -585,7 +585,7 @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
/* compare to kern/kern_sig.c sys_sigaltstack() and kern_sigaltstack() */ /* compare to kern/kern_sig.c sys_sigaltstack() and kern_sigaltstack() */
abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
int ret; int ret;
target_stack_t oss; target_stack_t oss;
@ -714,7 +714,7 @@ int do_sigaction(int sig, const struct target_sigaction *act,
static inline abi_ulong get_sigframe(struct target_sigaction *ka, static inline abi_ulong get_sigframe(struct target_sigaction *ka,
CPUArchState *env, size_t frame_size) CPUArchState *env, size_t frame_size)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
abi_ulong sp; abi_ulong sp;
/* Use default user stack */ /* Use default user stack */
@ -789,7 +789,7 @@ static int reset_signal_mask(target_ucontext_t *ucontext)
int i; int i;
sigset_t blocked; sigset_t blocked;
target_sigset_t target_set; target_sigset_t target_set;
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
for (i = 0; i < TARGET_NSIG_WORDS; i++) { for (i = 0; i < TARGET_NSIG_WORDS; i++) {
__get_user(target_set.__bits[i], &ucontext->uc_sigmask.__bits[i]); __get_user(target_set.__bits[i], &ucontext->uc_sigmask.__bits[i]);
@ -839,7 +839,7 @@ badframe:
void signal_init(void) void signal_init(void)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
struct sigaction act; struct sigaction act;
struct sigaction oact; struct sigaction oact;
int i; int i;
@ -878,7 +878,7 @@ static void handle_pending_signal(CPUArchState *env, int sig,
struct emulated_sigtable *k) struct emulated_sigtable *k)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
struct target_sigaction *sa; struct target_sigaction *sa;
int code; int code;
sigset_t set; sigset_t set;
@ -967,7 +967,7 @@ void process_pending_signals(CPUArchState *env)
int sig; int sig;
sigset_t *blocked_set, set; sigset_t *blocked_set, set;
struct emulated_sigtable *k; struct emulated_sigtable *k;
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
while (qatomic_read(&ts->signal_pending)) { while (qatomic_read(&ts->signal_pending)) {
sigfillset(&set); sigfillset(&set);

View file

@ -34,8 +34,8 @@ static guint64 limit = 20;
*/ */
typedef struct { typedef struct {
uint64_t start_addr; uint64_t start_addr;
uint64_t exec_count; struct qemu_plugin_scoreboard *exec_count;
int trans_count; int trans_count;
unsigned long insns; unsigned long insns;
} ExecCount; } ExecCount;
@ -43,7 +43,17 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b)
{ {
ExecCount *ea = (ExecCount *) a; ExecCount *ea = (ExecCount *) a;
ExecCount *eb = (ExecCount *) b; ExecCount *eb = (ExecCount *) b;
return ea->exec_count > eb->exec_count ? -1 : 1; uint64_t count_a =
qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(ea->exec_count));
uint64_t count_b =
qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(eb->exec_count));
return count_a > count_b ? -1 : 1;
}
static void exec_count_free(gpointer key, gpointer value, gpointer user_data)
{
ExecCount *cnt = value;
qemu_plugin_scoreboard_free(cnt->exec_count);
} }
static void plugin_exit(qemu_plugin_id_t id, void *p) static void plugin_exit(qemu_plugin_id_t id, void *p)
@ -52,7 +62,6 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
GList *counts, *it; GList *counts, *it;
int i; int i;
g_mutex_lock(&lock);
g_string_append_printf(report, "%d entries in the hash table\n", g_string_append_printf(report, "%d entries in the hash table\n",
g_hash_table_size(hotblocks)); g_hash_table_size(hotblocks));
counts = g_hash_table_get_values(hotblocks); counts = g_hash_table_get_values(hotblocks);
@ -63,16 +72,21 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
for (i = 0; i < limit && it->next; i++, it = it->next) { for (i = 0; i < limit && it->next; i++, it = it->next) {
ExecCount *rec = (ExecCount *) it->data; ExecCount *rec = (ExecCount *) it->data;
g_string_append_printf(report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n", g_string_append_printf(
rec->start_addr, rec->trans_count, report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n",
rec->insns, rec->exec_count); rec->start_addr, rec->trans_count,
rec->insns,
qemu_plugin_u64_sum(
qemu_plugin_scoreboard_u64(rec->exec_count)));
} }
g_list_free(it); g_list_free(it);
} }
g_mutex_unlock(&lock);
qemu_plugin_outs(report->str); qemu_plugin_outs(report->str);
g_hash_table_foreach(hotblocks, exec_count_free, NULL);
g_hash_table_destroy(hotblocks);
} }
static void plugin_init(void) static void plugin_init(void)
@ -82,15 +96,9 @@ static void plugin_init(void)
static void vcpu_tb_exec(unsigned int cpu_index, void *udata) static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
{ {
ExecCount *cnt; ExecCount *cnt = (ExecCount *)udata;
uint64_t hash = (uint64_t) udata; qemu_plugin_u64_add(qemu_plugin_scoreboard_u64(cnt->exec_count),
cpu_index, 1);
g_mutex_lock(&lock);
cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash);
/* should always succeed */
g_assert(cnt);
cnt->exec_count++;
g_mutex_unlock(&lock);
} }
/* /*
@ -114,18 +122,20 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
cnt->start_addr = pc; cnt->start_addr = pc;
cnt->trans_count = 1; cnt->trans_count = 1;
cnt->insns = insns; cnt->insns = insns;
cnt->exec_count = qemu_plugin_scoreboard_new(sizeof(uint64_t));
g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt); g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt);
} }
g_mutex_unlock(&lock); g_mutex_unlock(&lock);
if (do_inline) { if (do_inline) {
qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64, qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
&cnt->exec_count, 1); tb, QEMU_PLUGIN_INLINE_ADD_U64,
qemu_plugin_scoreboard_u64(cnt->exec_count), 1);
} else { } else {
qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec, qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec,
QEMU_PLUGIN_CB_NO_REGS, QEMU_PLUGIN_CB_NO_REGS,
(void *)hash); (void *)cnt);
} }
} }

View file

@ -43,13 +43,13 @@ typedef struct {
uint32_t mask; uint32_t mask;
uint32_t pattern; uint32_t pattern;
CountType what; CountType what;
uint64_t count; qemu_plugin_u64 count;
} InsnClassExecCount; } InsnClassExecCount;
typedef struct { typedef struct {
char *insn; char *insn;
uint32_t opcode; uint32_t opcode;
uint64_t count; qemu_plugin_u64 count;
InsnClassExecCount *class; InsnClassExecCount *class;
} InsnExecCount; } InsnExecCount;
@ -159,7 +159,9 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b)
{ {
InsnExecCount *ea = (InsnExecCount *) a; InsnExecCount *ea = (InsnExecCount *) a;
InsnExecCount *eb = (InsnExecCount *) b; InsnExecCount *eb = (InsnExecCount *) b;
return ea->count > eb->count ? -1 : 1; uint64_t count_a = qemu_plugin_u64_sum(ea->count);
uint64_t count_b = qemu_plugin_u64_sum(eb->count);
return count_a > count_b ? -1 : 1;
} }
static void free_record(gpointer data) static void free_record(gpointer data)
@ -167,12 +169,14 @@ static void free_record(gpointer data)
InsnExecCount *rec = (InsnExecCount *) data; InsnExecCount *rec = (InsnExecCount *) data;
g_free(rec->insn); g_free(rec->insn);
g_free(rec); g_free(rec);
qemu_plugin_scoreboard_free(rec->count.score);
} }
static void plugin_exit(qemu_plugin_id_t id, void *p) static void plugin_exit(qemu_plugin_id_t id, void *p)
{ {
g_autoptr(GString) report = g_string_new("Instruction Classes:\n"); g_autoptr(GString) report = g_string_new("Instruction Classes:\n");
int i; int i;
uint64_t total_count;
GList *counts; GList *counts;
InsnClassExecCount *class = NULL; InsnClassExecCount *class = NULL;
@ -180,11 +184,12 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
class = &class_table[i]; class = &class_table[i];
switch (class->what) { switch (class->what) {
case COUNT_CLASS: case COUNT_CLASS:
if (class->count || verbose) { total_count = qemu_plugin_u64_sum(class->count);
if (total_count || verbose) {
g_string_append_printf(report, g_string_append_printf(report,
"Class: %-24s\t(%" PRId64 " hits)\n", "Class: %-24s\t(%" PRId64 " hits)\n",
class->class, class->class,
class->count); total_count);
} }
break; break;
case COUNT_INDIVIDUAL: case COUNT_INDIVIDUAL:
@ -212,7 +217,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
"Instr: %-24s\t(%" PRId64 " hits)" "Instr: %-24s\t(%" PRId64 " hits)"
"\t(op=0x%08x/%s)\n", "\t(op=0x%08x/%s)\n",
rec->insn, rec->insn,
rec->count, qemu_plugin_u64_sum(rec->count),
rec->opcode, rec->opcode,
rec->class ? rec->class ?
rec->class->class : "un-categorised"); rec->class->class : "un-categorised");
@ -221,6 +226,12 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
} }
g_hash_table_destroy(insns); g_hash_table_destroy(insns);
for (i = 0; i < ARRAY_SIZE(class_tables); i++) {
for (int j = 0; j < class_tables[i].table_sz; ++j) {
qemu_plugin_scoreboard_free(class_tables[i].table[j].count.score);
}
}
qemu_plugin_outs(report->str); qemu_plugin_outs(report->str);
} }
@ -232,11 +243,12 @@ static void plugin_init(void)
static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata) static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata)
{ {
uint64_t *count = (uint64_t *) udata; struct qemu_plugin_scoreboard *score = udata;
(*count)++; qemu_plugin_u64_add(qemu_plugin_scoreboard_u64(score), cpu_index, 1);
} }
static uint64_t *find_counter(struct qemu_plugin_insn *insn) static struct qemu_plugin_scoreboard *find_counter(
struct qemu_plugin_insn *insn)
{ {
int i; int i;
uint64_t *cnt = NULL; uint64_t *cnt = NULL;
@ -265,7 +277,7 @@ static uint64_t *find_counter(struct qemu_plugin_insn *insn)
case COUNT_NONE: case COUNT_NONE:
return NULL; return NULL;
case COUNT_CLASS: case COUNT_CLASS:
return &class->count; return class->count.score;
case COUNT_INDIVIDUAL: case COUNT_INDIVIDUAL:
{ {
InsnExecCount *icount; InsnExecCount *icount;
@ -279,13 +291,16 @@ static uint64_t *find_counter(struct qemu_plugin_insn *insn)
icount->opcode = opcode; icount->opcode = opcode;
icount->insn = qemu_plugin_insn_disas(insn); icount->insn = qemu_plugin_insn_disas(insn);
icount->class = class; icount->class = class;
struct qemu_plugin_scoreboard *score =
qemu_plugin_scoreboard_new(sizeof(uint64_t));
icount->count = qemu_plugin_scoreboard_u64(score);
g_hash_table_insert(insns, GUINT_TO_POINTER(opcode), g_hash_table_insert(insns, GUINT_TO_POINTER(opcode),
(gpointer) icount); (gpointer) icount);
} }
g_mutex_unlock(&lock); g_mutex_unlock(&lock);
return &icount->count; return icount->count.score;
} }
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -300,14 +315,14 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
size_t i; size_t i;
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
uint64_t *cnt;
struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
cnt = find_counter(insn); struct qemu_plugin_scoreboard *cnt = find_counter(insn);
if (cnt) { if (cnt) {
if (do_inline) { if (do_inline) {
qemu_plugin_register_vcpu_insn_exec_inline( qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
insn, QEMU_PLUGIN_INLINE_ADD_U64, cnt, 1); insn, QEMU_PLUGIN_INLINE_ADD_U64,
qemu_plugin_scoreboard_u64(cnt), 1);
} else { } else {
qemu_plugin_register_vcpu_insn_exec_cb( qemu_plugin_register_vcpu_insn_exec_cb(
insn, vcpu_insn_exec_before, QEMU_PLUGIN_CB_NO_REGS, cnt); insn, vcpu_insn_exec_before, QEMU_PLUGIN_CB_NO_REGS, cnt);
@ -322,6 +337,14 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
{ {
int i; int i;
for (i = 0; i < ARRAY_SIZE(class_tables); i++) {
for (int j = 0; j < class_tables[i].table_sz; ++j) {
struct qemu_plugin_scoreboard *score =
qemu_plugin_scoreboard_new(sizeof(uint64_t));
class_tables[i].table[j].count = qemu_plugin_scoreboard_u64(score);
}
}
/* Select a class table appropriate to the guest architecture */ /* Select a class table appropriate to the guest architecture */
for (i = 0; i < ARRAY_SIZE(class_tables); i++) { for (i = 0; i < ARRAY_SIZE(class_tables); i++) {
ClassSelector *entry = &class_tables[i]; ClassSelector *entry = &class_tables[i];

View file

@ -299,6 +299,7 @@ void disas(FILE *out, const void *code, size_t size)
s.info.buffer = code; s.info.buffer = code;
s.info.buffer_vma = (uintptr_t)code; s.info.buffer_vma = (uintptr_t)code;
s.info.buffer_length = size; s.info.buffer_length = size;
s.info.show_opcodes = true;
if (s.info.cap_arch >= 0 && cap_disas_host(&s.info, code, size)) { if (s.info.cap_arch >= 0 && cap_disas_host(&s.info, code, size)) {
return; return;

View file

@ -1972,9 +1972,11 @@ print_insn_hppa (bfd_vma memaddr, disassemble_info *info)
insn = bfd_getb32 (buffer); insn = bfd_getb32 (buffer);
info->fprintf_func(info->stream, " %02x %02x %02x %02x ", if (info->show_opcodes) {
(insn >> 24) & 0xff, (insn >> 16) & 0xff, info->fprintf_func(info->stream, " %02x %02x %02x %02x ",
(insn >> 8) & 0xff, insn & 0xff); (insn >> 24) & 0xff, (insn >> 16) & 0xff,
(insn >> 8) & 0xff, insn & 0xff);
}
for (i = 0; i < NUMOPCODES; ++i) for (i = 0; i < NUMOPCODES; ++i)
{ {

View file

@ -5192,19 +5192,21 @@ print_insn_riscv(bfd_vma memaddr, struct disassemble_info *info, rv_isa isa)
} }
} }
switch (len) { if (info->show_opcodes) {
case 2: switch (len) {
(*info->fprintf_func)(info->stream, INST_FMT_2, inst); case 2:
break; (*info->fprintf_func)(info->stream, INST_FMT_2, inst);
case 4: break;
(*info->fprintf_func)(info->stream, INST_FMT_4, inst); case 4:
break; (*info->fprintf_func)(info->stream, INST_FMT_4, inst);
case 6: break;
(*info->fprintf_func)(info->stream, INST_FMT_6, inst); case 6:
break; (*info->fprintf_func)(info->stream, INST_FMT_6, inst);
default: break;
(*info->fprintf_func)(info->stream, INST_FMT_8, inst); default:
break; (*info->fprintf_func)(info->stream, INST_FMT_8, inst);
break;
}
} }
disasm_inst(buf, sizeof(buf), isa, memaddr, inst, disasm_inst(buf, sizeof(buf), isa, memaddr, inst,

View file

@ -1024,6 +1024,12 @@ static void handle_detach(GArray *params, void *user_ctx)
pid = get_param(params, 0)->val_ul; pid = get_param(params, 0)->val_ul;
} }
#ifdef CONFIG_USER_ONLY
if (gdb_handle_detach_user(pid)) {
return;
}
#endif
process = gdb_get_process(pid); process = gdb_get_process(pid);
gdb_process_breakpoint_remove_all(process); gdb_process_breakpoint_remove_all(process);
process->attached = false; process->attached = false;
@ -1099,6 +1105,7 @@ static void handle_cont_with_sig(GArray *params, void *user_ctx)
static void handle_set_thread(GArray *params, void *user_ctx) static void handle_set_thread(GArray *params, void *user_ctx)
{ {
uint32_t pid, tid;
CPUState *cpu; CPUState *cpu;
if (params->len != 2) { if (params->len != 2) {
@ -1116,8 +1123,14 @@ static void handle_set_thread(GArray *params, void *user_ctx)
return; return;
} }
cpu = gdb_get_cpu(get_param(params, 1)->thread_id.pid, pid = get_param(params, 1)->thread_id.pid;
get_param(params, 1)->thread_id.tid); tid = get_param(params, 1)->thread_id.tid;
#ifdef CONFIG_USER_ONLY
if (gdb_handle_set_thread_user(pid, tid)) {
return;
}
#endif
cpu = gdb_get_cpu(pid, tid);
if (!cpu) { if (!cpu) {
gdb_put_packet("E22"); gdb_put_packet("E22");
return; return;
@ -1655,9 +1668,15 @@ static void handle_query_supported(GArray *params, void *user_ctx)
g_string_append(gdbserver_state.str_buf, ";qXfer:exec-file:read+"); g_string_append(gdbserver_state.str_buf, ";qXfer:exec-file:read+");
#endif #endif
if (params->len && if (params->len) {
strstr(get_param(params, 0)->data, "multiprocess+")) { const char *gdb_supported = get_param(params, 0)->data;
gdbserver_state.multiprocess = true;
if (strstr(gdb_supported, "multiprocess+")) {
gdbserver_state.multiprocess = true;
}
#if defined(CONFIG_USER_ONLY)
gdb_handle_query_supported_user(gdb_supported);
#endif
} }
g_string_append(gdbserver_state.str_buf, ";vContSupported+;multiprocess+"); g_string_append(gdbserver_state.str_buf, ";vContSupported+;multiprocess+");

View file

@ -196,6 +196,9 @@ void gdb_handle_v_file_pread(GArray *params, void *user_ctx); /* user */
void gdb_handle_v_file_readlink(GArray *params, void *user_ctx); /* user */ void gdb_handle_v_file_readlink(GArray *params, void *user_ctx); /* user */
void gdb_handle_query_xfer_exec_file(GArray *params, void *user_ctx); /* user */ void gdb_handle_query_xfer_exec_file(GArray *params, void *user_ctx); /* user */
void gdb_handle_set_catch_syscalls(GArray *params, void *user_ctx); /* user */ void gdb_handle_set_catch_syscalls(GArray *params, void *user_ctx); /* user */
void gdb_handle_query_supported_user(const char *gdb_supported); /* user */
bool gdb_handle_set_thread_user(uint32_t pid, uint32_t tid); /* user */
bool gdb_handle_detach_user(uint32_t pid); /* user */
void gdb_handle_query_attached(GArray *params, void *user_ctx); /* both */ void gdb_handle_query_attached(GArray *params, void *user_ctx); /* both */

View file

@ -204,7 +204,7 @@ int gdb_target_signal_to_gdb(int sig)
int gdb_get_cpu_index(CPUState *cpu) int gdb_get_cpu_index(CPUState *cpu)
{ {
TaskState *ts = (TaskState *) cpu->opaque; TaskState *ts = get_task_state(cpu);
return ts ? ts->ts_tid : -1; return ts ? ts->ts_tid : -1;
} }
@ -399,7 +399,7 @@ void gdb_handle_query_xfer_exec_file(GArray *params, void *user_ctx)
return; return;
} }
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
if (!ts || !ts->bprm || !ts->bprm->filename) { if (!ts || !ts->bprm || !ts->bprm->filename) {
gdb_put_packet("E00"); gdb_put_packet("E00");
return; return;

View file

@ -25,6 +25,61 @@
#define GDB_NR_SYSCALLS 1024 #define GDB_NR_SYSCALLS 1024
typedef unsigned long GDBSyscallsMask[BITS_TO_LONGS(GDB_NR_SYSCALLS)]; typedef unsigned long GDBSyscallsMask[BITS_TO_LONGS(GDB_NR_SYSCALLS)];
/*
* Forked child talks to its parent in order to let GDB enforce the
* follow-fork-mode. This happens inside a start_exclusive() section, so that
* the other threads, which may be forking too, do not interfere. The
* implementation relies on GDB not sending $vCont until it has detached
* either from the parent (follow-fork-mode child) or from the child
* (follow-fork-mode parent).
*
* The parent and the child share the GDB socket; at any given time only one
* of them is allowed to use it, as is reflected in the respective fork_state.
* This is negotiated via the fork_sockets pair as a reaction to $Hg.
*
* Below is a short summary of the possible state transitions:
*
* ENABLED : Terminal state.
* DISABLED : Terminal state.
* ACTIVE : Parent initial state.
* INACTIVE : Child initial state.
* ACTIVE -> DEACTIVATING: On $Hg.
* ACTIVE -> ENABLING : On $D.
* ACTIVE -> DISABLING : On $D.
* ACTIVE -> DISABLED : On communication error.
* DEACTIVATING -> INACTIVE : On gdb_read_byte() return.
* DEACTIVATING -> DISABLED : On communication error.
* INACTIVE -> ACTIVE : On $Hg in the peer.
* INACTIVE -> ENABLE : On $D in the peer.
* INACTIVE -> DISABLE : On $D in the peer.
* INACTIVE -> DISABLED : On communication error.
* ENABLING -> ENABLED : On gdb_read_byte() return.
* ENABLING -> DISABLED : On communication error.
* DISABLING -> DISABLED : On gdb_read_byte() return.
*/
enum GDBForkState {
/* Fully owning the GDB socket. */
GDB_FORK_ENABLED,
/* Working with the GDB socket; the peer is inactive. */
GDB_FORK_ACTIVE,
/* Handing off the GDB socket to the peer. */
GDB_FORK_DEACTIVATING,
/* The peer is working with the GDB socket. */
GDB_FORK_INACTIVE,
/* Asking the peer to close its GDB socket fd. */
GDB_FORK_ENABLING,
/* Asking the peer to take over, closing our GDB socket fd. */
GDB_FORK_DISABLING,
/* The peer has taken over, our GDB socket fd is closed. */
GDB_FORK_DISABLED,
};
enum GDBForkMessage {
GDB_FORK_ACTIVATE = 'a',
GDB_FORK_ENABLE = 'e',
GDB_FORK_DISABLE = 'd',
};
/* User-mode specific state */ /* User-mode specific state */
typedef struct { typedef struct {
int fd; int fd;
@ -36,6 +91,10 @@ typedef struct {
*/ */
bool catch_all_syscalls; bool catch_all_syscalls;
GDBSyscallsMask catch_syscalls_mask; GDBSyscallsMask catch_syscalls_mask;
bool fork_events;
enum GDBForkState fork_state;
int fork_sockets[2];
pid_t fork_peer_pid, fork_peer_tid;
} GDBUserState; } GDBUserState;
static GDBUserState gdbserver_user_state; static GDBUserState gdbserver_user_state;
@ -356,16 +415,193 @@ int gdbserver_start(const char *port_or_path)
return -1; return -1;
} }
/* Disable gdb stub for child processes. */ void gdbserver_fork_start(void)
void gdbserver_fork(CPUState *cpu)
{ {
if (!gdbserver_state.init || gdbserver_user_state.fd < 0) { if (!gdbserver_state.init || gdbserver_user_state.fd < 0) {
return; return;
} }
if (!gdbserver_user_state.fork_events ||
qemu_socketpair(AF_UNIX, SOCK_STREAM, 0,
gdbserver_user_state.fork_sockets) < 0) {
gdbserver_user_state.fork_state = GDB_FORK_DISABLED;
return;
}
gdbserver_user_state.fork_state = GDB_FORK_INACTIVE;
gdbserver_user_state.fork_peer_pid = getpid();
gdbserver_user_state.fork_peer_tid = qemu_get_thread_id();
}
static void disable_gdbstub(CPUState *thread_cpu)
{
CPUState *cpu;
close(gdbserver_user_state.fd); close(gdbserver_user_state.fd);
gdbserver_user_state.fd = -1; gdbserver_user_state.fd = -1;
cpu_breakpoint_remove_all(cpu, BP_GDB); CPU_FOREACH(cpu) {
/* no cpu_watchpoint_remove_all for user-mode */ cpu_breakpoint_remove_all(cpu, BP_GDB);
/* no cpu_watchpoint_remove_all for user-mode */
cpu_single_step(cpu, 0);
}
tb_flush(thread_cpu);
}
void gdbserver_fork_end(CPUState *cpu, pid_t pid)
{
char b;
int fd;
if (!gdbserver_state.init || gdbserver_user_state.fd < 0) {
return;
}
if (pid == -1) {
if (gdbserver_user_state.fork_state != GDB_FORK_DISABLED) {
g_assert(gdbserver_user_state.fork_state == GDB_FORK_INACTIVE);
close(gdbserver_user_state.fork_sockets[0]);
close(gdbserver_user_state.fork_sockets[1]);
}
return;
}
if (gdbserver_user_state.fork_state == GDB_FORK_DISABLED) {
if (pid == 0) {
disable_gdbstub(cpu);
}
return;
}
if (pid == 0) {
close(gdbserver_user_state.fork_sockets[0]);
fd = gdbserver_user_state.fork_sockets[1];
g_assert(gdbserver_state.process_num == 1);
g_assert(gdbserver_state.processes[0].pid ==
gdbserver_user_state.fork_peer_pid);
g_assert(gdbserver_state.processes[0].attached);
gdbserver_state.processes[0].pid = getpid();
} else {
close(gdbserver_user_state.fork_sockets[1]);
fd = gdbserver_user_state.fork_sockets[0];
gdbserver_user_state.fork_state = GDB_FORK_ACTIVE;
gdbserver_user_state.fork_peer_pid = pid;
gdbserver_user_state.fork_peer_tid = pid;
if (!gdbserver_state.allow_stop_reply) {
goto fail;
}
g_string_printf(gdbserver_state.str_buf,
"T%02xfork:p%02x.%02x;thread:p%02x.%02x;",
gdb_target_signal_to_gdb(gdb_target_sigtrap()),
pid, pid, (int)getpid(), qemu_get_thread_id());
gdb_put_strbuf();
}
gdbserver_state.state = RS_IDLE;
gdbserver_state.allow_stop_reply = false;
gdbserver_user_state.running_state = 0;
for (;;) {
switch (gdbserver_user_state.fork_state) {
case GDB_FORK_ENABLED:
if (gdbserver_user_state.running_state) {
return;
}
QEMU_FALLTHROUGH;
case GDB_FORK_ACTIVE:
if (read(gdbserver_user_state.fd, &b, 1) != 1) {
goto fail;
}
gdb_read_byte(b);
break;
case GDB_FORK_DEACTIVATING:
b = GDB_FORK_ACTIVATE;
if (write(fd, &b, 1) != 1) {
goto fail;
}
gdbserver_user_state.fork_state = GDB_FORK_INACTIVE;
break;
case GDB_FORK_INACTIVE:
if (read(fd, &b, 1) != 1) {
goto fail;
}
switch (b) {
case GDB_FORK_ACTIVATE:
gdbserver_user_state.fork_state = GDB_FORK_ACTIVE;
break;
case GDB_FORK_ENABLE:
close(fd);
gdbserver_user_state.fork_state = GDB_FORK_ENABLED;
break;
case GDB_FORK_DISABLE:
gdbserver_user_state.fork_state = GDB_FORK_DISABLED;
break;
default:
g_assert_not_reached();
}
break;
case GDB_FORK_ENABLING:
b = GDB_FORK_DISABLE;
if (write(fd, &b, 1) != 1) {
goto fail;
}
close(fd);
gdbserver_user_state.fork_state = GDB_FORK_ENABLED;
break;
case GDB_FORK_DISABLING:
b = GDB_FORK_ENABLE;
if (write(fd, &b, 1) != 1) {
goto fail;
}
gdbserver_user_state.fork_state = GDB_FORK_DISABLED;
break;
case GDB_FORK_DISABLED:
close(fd);
disable_gdbstub(cpu);
return;
default:
g_assert_not_reached();
}
}
fail:
close(fd);
if (pid == 0) {
disable_gdbstub(cpu);
}
}
void gdb_handle_query_supported_user(const char *gdb_supported)
{
if (strstr(gdb_supported, "fork-events+")) {
gdbserver_user_state.fork_events = true;
}
g_string_append(gdbserver_state.str_buf, ";fork-events+");
}
bool gdb_handle_set_thread_user(uint32_t pid, uint32_t tid)
{
if (gdbserver_user_state.fork_state == GDB_FORK_ACTIVE &&
pid == gdbserver_user_state.fork_peer_pid &&
tid == gdbserver_user_state.fork_peer_tid) {
gdbserver_user_state.fork_state = GDB_FORK_DEACTIVATING;
gdb_put_packet("OK");
return true;
}
return false;
}
bool gdb_handle_detach_user(uint32_t pid)
{
bool enable;
if (gdbserver_user_state.fork_state == GDB_FORK_ACTIVE) {
enable = pid == gdbserver_user_state.fork_peer_pid;
if (enable || pid == getpid()) {
gdbserver_user_state.fork_state = enable ? GDB_FORK_ENABLING :
GDB_FORK_DISABLING;
gdb_put_packet("OK");
return true;
}
}
return false;
} }
/* /*

View file

@ -396,6 +396,14 @@ typedef struct disassemble_info {
/* Command line options specific to the target disassembler. */ /* Command line options specific to the target disassembler. */
char * disassembler_options; char * disassembler_options;
/*
* When true instruct the disassembler it may preface the
* disassembly with the opcodes values if it wants to. This is
* mainly for the benefit of the plugin interface which doesn't want
* that.
*/
bool show_opcodes;
/* Field intended to be used by targets in any way they deem suitable. */ /* Field intended to be used by targets in any way they deem suitable. */
void *target_info; void *target_info;

View file

@ -46,10 +46,16 @@ static inline int gdb_handlesig(CPUState *cpu, int sig)
void gdb_signalled(CPUArchState *as, int sig); void gdb_signalled(CPUArchState *as, int sig);
/** /**
* gdbserver_fork() - disable gdb stub for child processes. * gdbserver_fork_start() - inform gdb of the upcoming fork()
* @cs: CPU
*/ */
void gdbserver_fork(CPUState *cs); void gdbserver_fork_start(void);
/**
* gdbserver_fork_end() - inform gdb of the completed fork()
* @cs: CPU
* @pid: 0 if in child process, -1 if fork failed, child process pid otherwise
*/
void gdbserver_fork_end(CPUState *cs, pid_t pid);
/** /**
* gdb_syscall_entry() - inform gdb of syscall entry and yield control to it * gdb_syscall_entry() - inform gdb of syscall entry and yield control to it

View file

@ -92,6 +92,7 @@ struct qemu_plugin_dyn_cb {
/* fields specific to each dyn_cb type go here */ /* fields specific to each dyn_cb type go here */
union { union {
struct { struct {
qemu_plugin_u64 entry;
enum qemu_plugin_op op; enum qemu_plugin_op op;
uint64_t imm; uint64_t imm;
} inline_insn; } inline_insn;
@ -112,6 +113,12 @@ struct qemu_plugin_insn {
bool mem_only; bool mem_only;
}; };
/* A scoreboard is an array of values, indexed by vcpu_index */
struct qemu_plugin_scoreboard {
GArray *data;
QLIST_ENTRY(qemu_plugin_scoreboard) entry;
};
/* /*
* qemu_plugin_insn allocate and cleanup functions. We don't expect to * qemu_plugin_insn allocate and cleanup functions. We don't expect to
* cleanup many of these structures. They are reused for each fresh * cleanup many of these structures. They are reused for each fresh

View file

@ -52,7 +52,11 @@ typedef uint64_t qemu_plugin_id_t;
* The plugins export the API they were built against by exposing the * The plugins export the API they were built against by exposing the
* symbol qemu_plugin_version which can be checked. * symbol qemu_plugin_version which can be checked.
* *
* version 2: removed qemu_plugin_n_vcpus and qemu_plugin_n_max_vcpus * version 2:
* - removed qemu_plugin_n_vcpus and qemu_plugin_n_max_vcpus
* - Remove qemu_plugin_register_vcpu_{tb, insn, mem}_exec_inline.
* Those functions are replaced by *_per_vcpu variants, which guarantee
* thread-safety for operations.
*/ */
extern QEMU_PLUGIN_EXPORT int qemu_plugin_version; extern QEMU_PLUGIN_EXPORT int qemu_plugin_version;
@ -222,6 +226,19 @@ void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id,
struct qemu_plugin_tb; struct qemu_plugin_tb;
/** struct qemu_plugin_insn - Opaque handle for a translated instruction */ /** struct qemu_plugin_insn - Opaque handle for a translated instruction */
struct qemu_plugin_insn; struct qemu_plugin_insn;
/** struct qemu_plugin_scoreboard - Opaque handle for a scoreboard */
struct qemu_plugin_scoreboard;
/**
* typedef qemu_plugin_u64 - uint64_t member of an entry in a scoreboard
*
* This field allows to access a specific uint64_t member in one given entry,
* located at a specified offset. Inline operations expect this as entry.
*/
typedef struct {
struct qemu_plugin_scoreboard *score;
size_t offset;
} qemu_plugin_u64;
/** /**
* enum qemu_plugin_cb_flags - type of callback * enum qemu_plugin_cb_flags - type of callback
@ -297,23 +314,20 @@ enum qemu_plugin_op {
}; };
/** /**
* qemu_plugin_register_vcpu_tb_exec_inline() - execution inline op * qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu() - execution inline op
* @tb: the opaque qemu_plugin_tb handle for the translation * @tb: the opaque qemu_plugin_tb handle for the translation
* @op: the type of qemu_plugin_op (e.g. ADD_U64) * @op: the type of qemu_plugin_op (e.g. ADD_U64)
* @ptr: the target memory location for the op * @entry: entry to run op
* @imm: the op data (e.g. 1) * @imm: the op data (e.g. 1)
* *
* Insert an inline op to every time a translated unit executes. * Insert an inline op on a given scoreboard entry.
* Useful if you just want to increment a single counter somewhere in
* memory.
*
* Note: ops are not atomic so in multi-threaded/multi-smp situations
* you will get inexact results.
*/ */
QEMU_PLUGIN_API QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb, void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
enum qemu_plugin_op op, struct qemu_plugin_tb *tb,
void *ptr, uint64_t imm); enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm);
/** /**
* qemu_plugin_register_vcpu_insn_exec_cb() - register insn execution cb * qemu_plugin_register_vcpu_insn_exec_cb() - register insn execution cb
@ -331,19 +345,20 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
void *userdata); void *userdata);
/** /**
* qemu_plugin_register_vcpu_insn_exec_inline() - insn execution inline op * qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu() - insn exec inline op
* @insn: the opaque qemu_plugin_insn handle for an instruction * @insn: the opaque qemu_plugin_insn handle for an instruction
* @op: the type of qemu_plugin_op (e.g. ADD_U64) * @op: the type of qemu_plugin_op (e.g. ADD_U64)
* @ptr: the target memory location for the op * @entry: entry to run op
* @imm: the op data (e.g. 1) * @imm: the op data (e.g. 1)
* *
* Insert an inline op to every time an instruction executes. Useful * Insert an inline op to every time an instruction executes.
* if you just want to increment a single counter somewhere in memory.
*/ */
QEMU_PLUGIN_API QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn *insn, void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
enum qemu_plugin_op op, struct qemu_plugin_insn *insn,
void *ptr, uint64_t imm); enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm);
/** /**
* qemu_plugin_tb_n_insns() - query helper for number of insns in TB * qemu_plugin_tb_n_insns() - query helper for number of insns in TB
@ -553,24 +568,23 @@ void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn,
void *userdata); void *userdata);
/** /**
* qemu_plugin_register_vcpu_mem_inline() - register an inline op to any memory access * qemu_plugin_register_vcpu_mem_inline_per_vcpu() - inline op for mem access
* @insn: handle for instruction to instrument * @insn: handle for instruction to instrument
* @rw: apply to reads, writes or both * @rw: apply to reads, writes or both
* @op: the op, of type qemu_plugin_op * @op: the op, of type qemu_plugin_op
* @ptr: pointer memory for the op * @entry: entry to run op
* @imm: immediate data for @op * @imm: immediate data for @op
* *
* This registers a inline op every memory access generated by the * This registers a inline op every memory access generated by the
* instruction. This provides for a lightweight but not thread-safe * instruction.
* way of counting the number of operations done.
*/ */
QEMU_PLUGIN_API QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn, void qemu_plugin_register_vcpu_mem_inline_per_vcpu(
enum qemu_plugin_mem_rw rw, struct qemu_plugin_insn *insn,
enum qemu_plugin_op op, void *ptr, enum qemu_plugin_mem_rw rw,
uint64_t imm); enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm);
typedef void typedef void
(*qemu_plugin_vcpu_syscall_cb_t)(qemu_plugin_id_t id, unsigned int vcpu_index, (*qemu_plugin_vcpu_syscall_cb_t)(qemu_plugin_id_t id, unsigned int vcpu_index,
@ -752,5 +766,75 @@ QEMU_PLUGIN_API
int qemu_plugin_read_register(struct qemu_plugin_register *handle, int qemu_plugin_read_register(struct qemu_plugin_register *handle,
GByteArray *buf); GByteArray *buf);
/**
* qemu_plugin_scoreboard_new() - alloc a new scoreboard
*
* @element_size: size (in bytes) for one entry
*
* Returns a pointer to a new scoreboard. It must be freed using
* qemu_plugin_scoreboard_free.
*/
QEMU_PLUGIN_API
struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size);
/**
* qemu_plugin_scoreboard_free() - free a scoreboard
* @score: scoreboard to free
*/
QEMU_PLUGIN_API
void qemu_plugin_scoreboard_free(struct qemu_plugin_scoreboard *score);
/**
* qemu_plugin_scoreboard_find() - get pointer to an entry of a scoreboard
* @score: scoreboard to query
* @vcpu_index: entry index
*
* Returns address of entry of a scoreboard matching a given vcpu_index. This
* address can be modified later if scoreboard is resized.
*/
QEMU_PLUGIN_API
void *qemu_plugin_scoreboard_find(struct qemu_plugin_scoreboard *score,
unsigned int vcpu_index);
/* Macros to define a qemu_plugin_u64 */
#define qemu_plugin_scoreboard_u64(score) \
(qemu_plugin_u64) {score, 0}
#define qemu_plugin_scoreboard_u64_in_struct(score, type, member) \
(qemu_plugin_u64) {score, offsetof(type, member)}
/**
* qemu_plugin_u64_add() - add a value to a qemu_plugin_u64 for a given vcpu
* @entry: entry to query
* @vcpu_index: entry index
* @added: value to add
*/
QEMU_PLUGIN_API
void qemu_plugin_u64_add(qemu_plugin_u64 entry, unsigned int vcpu_index,
uint64_t added);
/**
* qemu_plugin_u64_get() - get value of a qemu_plugin_u64 for a given vcpu
* @entry: entry to query
* @vcpu_index: entry index
*/
QEMU_PLUGIN_API
uint64_t qemu_plugin_u64_get(qemu_plugin_u64 entry, unsigned int vcpu_index);
/**
* qemu_plugin_u64_set() - set value of a qemu_plugin_u64 for a given vcpu
* @entry: entry to query
* @vcpu_index: entry index
* @val: new value
*/
QEMU_PLUGIN_API
void qemu_plugin_u64_set(qemu_plugin_u64 entry, unsigned int vcpu_index,
uint64_t val);
/**
* qemu_plugin_u64_sum() - return sum of all vcpu entries in a scoreboard
* @entry: entry to sum
*/
QEMU_PLUGIN_API
uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry);
#endif /* QEMU_QEMU_PLUGIN_H */ #endif /* QEMU_QEMU_PLUGIN_H */

View file

@ -134,7 +134,7 @@ extern char safe_syscall_start[];
extern char safe_syscall_end[]; extern char safe_syscall_end[];
#define safe_syscall(...) \ #define safe_syscall(...) \
safe_syscall_base(&((TaskState *)thread_cpu->opaque)->signal_pending, \ safe_syscall_base(&get_task_state(thread_cpu)->signal_pending, \
__VA_ARGS__) __VA_ARGS__)
#endif #endif

View file

@ -189,7 +189,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{ {
ARMCPU *cpu = env_archcpu(env); ARMCPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
struct image_info *info = ts->info; struct image_info *info = ts->info;
int i; int i;

View file

@ -263,7 +263,7 @@ static bool insn_is_linux_bkpt(uint32_t opcode, bool is_thumb)
static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode) static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode)
{ {
TaskState *ts = env_cpu(env)->opaque; TaskState *ts = get_task_state(env_cpu(env));
int rc = EmulateAll(opcode, &ts->fpa, env); int rc = EmulateAll(opcode, &ts->fpa, env);
int raise, enabled; int raise, enabled;
@ -514,7 +514,7 @@ void cpu_loop(CPUARMState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info; struct image_info *info = ts->info;
int i; int i;

View file

@ -177,7 +177,7 @@ setup_return(CPUARMState *env, struct target_sigaction *ka, int usig,
abi_ulong handler = 0; abi_ulong handler = 0;
abi_ulong handler_fdpic_GOT = 0; abi_ulong handler_fdpic_GOT = 0;
abi_ulong retcode; abi_ulong retcode;
bool is_fdpic = info_is_fdpic(((TaskState *)thread_cpu->opaque)->info); bool is_fdpic = info_is_fdpic(get_task_state(thread_cpu)->info);
bool is_rt = ka->sa_flags & TARGET_SA_SIGINFO; bool is_rt = ka->sa_flags & TARGET_SA_SIGINFO;
bool thumb; bool thumb;

View file

@ -72,7 +72,7 @@ void cpu_loop(CPUCRISState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info; struct image_info *info = ts->info;
env->regs[0] = regs->r0; env->regs[0] = regs->r0;

View file

@ -4404,7 +4404,7 @@ static int wmr_write_region(void *opaque, target_ulong start,
static int elf_core_dump(int signr, const CPUArchState *env) static int elf_core_dump(int signr, const CPUArchState *env)
{ {
const CPUState *cpu = env_cpu((CPUArchState *)env); const CPUState *cpu = env_cpu((CPUArchState *)env);
const TaskState *ts = (const TaskState *)cpu->opaque; const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu);
struct rlimit dumpsize; struct rlimit dumpsize;
CountAndSizeRegions css; CountAndSizeRegions css;
off_t offset, note_offset, data_offset; off_t offset, note_offset, data_offset;

View file

@ -112,7 +112,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
abi_ulong frame_addr, sp, haddr; abi_ulong frame_addr, sp, haddr;
struct target_rt_sigframe *frame; struct target_rt_sigframe *frame;
int i; int i;
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
sp = get_sp_from_cpustate(env); sp = get_sp_from_cpustate(env);
if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {

View file

@ -89,7 +89,7 @@ static int prepare_binprm(struct linux_binprm *bprm)
abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp, abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp,
abi_ulong stringp, int push_ptr) abi_ulong stringp, int push_ptr)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
int n = sizeof(abi_ulong); int n = sizeof(abi_ulong);
abi_ulong envp; abi_ulong envp;
abi_ulong argv; abi_ulong argv;

View file

@ -95,7 +95,7 @@ void cpu_loop(CPUM68KState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info; struct image_info *info = ts->info;
env->pc = regs->pc; env->pc = regs->pc;

View file

@ -37,7 +37,7 @@ static inline void cpu_clone_regs_parent(CPUM68KState *env, unsigned flags)
static inline void cpu_set_tls(CPUM68KState *env, target_ulong newtls) static inline void cpu_set_tls(CPUM68KState *env, target_ulong newtls)
{ {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
ts->tp_value = newtls; ts->tp_value = newtls;
} }

View file

@ -145,10 +145,13 @@ void fork_start(void)
mmap_fork_start(); mmap_fork_start();
cpu_list_lock(); cpu_list_lock();
qemu_plugin_user_prefork_lock(); qemu_plugin_user_prefork_lock();
gdbserver_fork_start();
} }
void fork_end(int child) void fork_end(pid_t pid)
{ {
bool child = pid == 0;
qemu_plugin_user_postfork(child); qemu_plugin_user_postfork(child);
mmap_fork_end(child); mmap_fork_end(child);
if (child) { if (child) {
@ -161,10 +164,11 @@ void fork_end(int child)
} }
} }
qemu_init_cpu_list(); qemu_init_cpu_list();
gdbserver_fork(thread_cpu); get_task_state(thread_cpu)->ts_tid = qemu_get_thread_id();
} else { } else {
cpu_list_unlock(); cpu_list_unlock();
} }
gdbserver_fork_end(thread_cpu, pid);
/* /*
* qemu_init_cpu_list() reinitialized the child exclusive state, but we * qemu_init_cpu_list() reinitialized the child exclusive state, but we
* also need to keep current_cpu consistent, so call end_exclusive() for * also need to keep current_cpu consistent, so call end_exclusive() for

View file

@ -214,7 +214,7 @@ done_syscall:
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info; struct image_info *info = ts->info;
int i; int i;

View file

@ -486,7 +486,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
int i, err = 0; int i, err = 0;
#if defined(TARGET_PPC64) #if defined(TARGET_PPC64)
struct target_sigcontext *sc = 0; struct target_sigcontext *sc = 0;
struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; struct image_info *image = get_task_state(thread_cpu)->info;
#endif #endif
rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
@ -673,7 +673,7 @@ abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx,
} }
if (uold_ctx) { if (uold_ctx) {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
if (!lock_user_struct(VERIFY_WRITE, uctx, uold_ctx, 1)) { if (!lock_user_struct(VERIFY_WRITE, uctx, uold_ctx, 1)) {
return -TARGET_EFAULT; return -TARGET_EFAULT;

View file

@ -162,6 +162,11 @@ typedef struct TaskState {
uint64_t start_boottime; uint64_t start_boottime;
} TaskState; } TaskState;
static inline TaskState *get_task_state(CPUState *cs)
{
return cs->opaque;
}
abi_long do_brk(abi_ulong new_brk); abi_long do_brk(abi_ulong new_brk);
int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
int flags, mode_t mode, bool safe); int flags, mode_t mode, bool safe);

View file

@ -97,7 +97,7 @@ void cpu_loop(CPURISCVState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info; struct image_info *info = ts->info;
env->pc = regs->sepc; env->pc = regs->sepc;

View file

@ -113,7 +113,7 @@ int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
static inline void finish_sigsuspend_mask(int ret) static inline void finish_sigsuspend_mask(int ret)
{ {
if (ret != -QEMU_ERESTARTSYS) { if (ret != -QEMU_ERESTARTSYS) {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
ts->in_sigsuspend = 1; ts->in_sigsuspend = 1;
} }
} }

View file

@ -172,7 +172,7 @@ void target_to_host_old_sigset(sigset_t *sigset,
int block_signals(void) int block_signals(void)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
sigset_t set; sigset_t set;
/* It's OK to block everything including SIGSEGV, because we won't /* It's OK to block everything including SIGSEGV, because we won't
@ -194,7 +194,7 @@ int block_signals(void)
*/ */
int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
if (oldset) { if (oldset) {
*oldset = ts->signal_mask; *oldset = ts->signal_mask;
@ -237,7 +237,7 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
*/ */
void set_sigmask(const sigset_t *set) void set_sigmask(const sigset_t *set)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
ts->signal_mask = *set; ts->signal_mask = *set;
} }
@ -246,7 +246,7 @@ void set_sigmask(const sigset_t *set)
int on_sig_stack(unsigned long sp) int on_sig_stack(unsigned long sp)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
return (sp - ts->sigaltstack_used.ss_sp return (sp - ts->sigaltstack_used.ss_sp
< ts->sigaltstack_used.ss_size); < ts->sigaltstack_used.ss_size);
@ -254,7 +254,7 @@ int on_sig_stack(unsigned long sp)
int sas_ss_flags(unsigned long sp) int sas_ss_flags(unsigned long sp)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
: on_sig_stack(sp) ? SS_ONSTACK : 0); : on_sig_stack(sp) ? SS_ONSTACK : 0);
@ -265,7 +265,7 @@ abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
/* /*
* This is the X/Open sanctioned signal stack switching. * This is the X/Open sanctioned signal stack switching.
*/ */
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size; return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
@ -275,7 +275,7 @@ abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
void target_save_altstack(target_stack_t *uss, CPUArchState *env) void target_save_altstack(target_stack_t *uss, CPUArchState *env)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
__put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp); __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
__put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags); __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
@ -284,7 +284,7 @@ void target_save_altstack(target_stack_t *uss, CPUArchState *env)
abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env) abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
size_t minstacksize = TARGET_MINSIGSTKSZ; size_t minstacksize = TARGET_MINSIGSTKSZ;
target_stack_t ss; target_stack_t ss;
@ -571,7 +571,7 @@ static void signal_table_init(void)
void signal_init(void) void signal_init(void)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
struct sigaction act, oact; struct sigaction act, oact;
/* initialize signal conversion tables */ /* initialize signal conversion tables */
@ -730,7 +730,7 @@ static G_NORETURN
void dump_core_and_abort(CPUArchState *env, int target_sig) void dump_core_and_abort(CPUArchState *env, int target_sig)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
TaskState *ts = (TaskState *)cpu->opaque; TaskState *ts = get_task_state(cpu);
int host_sig, core_dumped = 0; int host_sig, core_dumped = 0;
/* On exit, undo the remapping of SIGABRT. */ /* On exit, undo the remapping of SIGABRT. */
@ -769,7 +769,7 @@ void queue_signal(CPUArchState *env, int sig, int si_type,
target_siginfo_t *info) target_siginfo_t *info)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
trace_user_queue_signal(env, sig); trace_user_queue_signal(env, sig);
@ -954,7 +954,7 @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
{ {
CPUState *cpu = thread_cpu; CPUState *cpu = thread_cpu;
CPUArchState *env = cpu_env(cpu); CPUArchState *env = cpu_env(cpu);
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
target_siginfo_t tinfo; target_siginfo_t tinfo;
host_sigcontext *uc = puc; host_sigcontext *uc = puc;
struct emulated_sigtable *k; struct emulated_sigtable *k;
@ -1174,7 +1174,7 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig,
sigset_t set; sigset_t set;
target_sigset_t target_old_set; target_sigset_t target_old_set;
struct target_sigaction *sa; struct target_sigaction *sa;
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
trace_user_handle_signal(cpu_env, sig); trace_user_handle_signal(cpu_env, sig);
/* dequeue signal */ /* dequeue signal */
@ -1256,7 +1256,7 @@ void process_pending_signals(CPUArchState *cpu_env)
{ {
CPUState *cpu = env_cpu(cpu_env); CPUState *cpu = env_cpu(cpu_env);
int sig; int sig;
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
sigset_t set; sigset_t set;
sigset_t *blocked_set; sigset_t *blocked_set;
@ -1316,7 +1316,7 @@ void process_pending_signals(CPUArchState *cpu_env)
int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset, int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
target_ulong sigsize) target_ulong sigsize)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque; TaskState *ts = get_task_state(thread_cpu);
sigset_t *host_set = &ts->sigsuspend_mask; sigset_t *host_set = &ts->sigsuspend_mask;
target_sigset_t *target_sigset; target_sigset_t *target_sigset;

View file

@ -6515,7 +6515,7 @@ static void *clone_func(void *arg)
env = info->env; env = info->env;
cpu = env_cpu(env); cpu = env_cpu(env);
thread_cpu = cpu; thread_cpu = cpu;
ts = (TaskState *)cpu->opaque; ts = get_task_state(cpu);
info->tid = sys_gettid(); info->tid = sys_gettid();
task_settid(ts); task_settid(ts);
if (info->child_tidptr) if (info->child_tidptr)
@ -6557,7 +6557,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
flags &= ~(CLONE_VFORK | CLONE_VM); flags &= ~(CLONE_VFORK | CLONE_VM);
if (flags & CLONE_VM) { if (flags & CLONE_VM) {
TaskState *parent_ts = (TaskState *)cpu->opaque; TaskState *parent_ts = get_task_state(cpu);
new_thread_info info; new_thread_info info;
pthread_attr_t attr; pthread_attr_t attr;
@ -6669,7 +6669,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
if (ret == 0) { if (ret == 0) {
/* Child Process. */ /* Child Process. */
cpu_clone_regs_child(env, newsp, flags); cpu_clone_regs_child(env, newsp, flags);
fork_end(1); fork_end(ret);
/* There is a race condition here. The parent process could /* There is a race condition here. The parent process could
theoretically read the TID in the child process before the child theoretically read the TID in the child process before the child
tid is set. This would require using either ptrace tid is set. This would require using either ptrace
@ -6680,7 +6680,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
put_user_u32(sys_gettid(), child_tidptr); put_user_u32(sys_gettid(), child_tidptr);
if (flags & CLONE_PARENT_SETTID) if (flags & CLONE_PARENT_SETTID)
put_user_u32(sys_gettid(), parent_tidptr); put_user_u32(sys_gettid(), parent_tidptr);
ts = (TaskState *)cpu->opaque; ts = get_task_state(cpu);
if (flags & CLONE_SETTLS) if (flags & CLONE_SETTLS)
cpu_set_tls (env, newtls); cpu_set_tls (env, newtls);
if (flags & CLONE_CHILD_CLEARTID) if (flags & CLONE_CHILD_CLEARTID)
@ -6700,8 +6700,8 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
} }
#endif #endif
put_user_u32(pid_fd, parent_tidptr); put_user_u32(pid_fd, parent_tidptr);
} }
fork_end(0); fork_end(ret);
} }
g_assert(!cpu_in_exclusive_context(cpu)); g_assert(!cpu_in_exclusive_context(cpu));
} }
@ -7946,7 +7946,7 @@ int host_to_target_waitstatus(int status)
static int open_self_cmdline(CPUArchState *cpu_env, int fd) static int open_self_cmdline(CPUArchState *cpu_env, int fd)
{ {
CPUState *cpu = env_cpu(cpu_env); CPUState *cpu = env_cpu(cpu_env);
struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; struct linux_binprm *bprm = get_task_state(cpu)->bprm;
int i; int i;
for (i = 0; i < bprm->argc; i++) { for (i = 0; i < bprm->argc; i++) {
@ -8146,7 +8146,7 @@ static int open_self_smaps(CPUArchState *cpu_env, int fd)
static int open_self_stat(CPUArchState *cpu_env, int fd) static int open_self_stat(CPUArchState *cpu_env, int fd)
{ {
CPUState *cpu = env_cpu(cpu_env); CPUState *cpu = env_cpu(cpu_env);
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
g_autoptr(GString) buf = g_string_new(NULL); g_autoptr(GString) buf = g_string_new(NULL);
int i; int i;
@ -8187,7 +8187,7 @@ static int open_self_stat(CPUArchState *cpu_env, int fd)
static int open_self_auxv(CPUArchState *cpu_env, int fd) static int open_self_auxv(CPUArchState *cpu_env, int fd)
{ {
CPUState *cpu = env_cpu(cpu_env); CPUState *cpu = env_cpu(cpu_env);
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
abi_ulong auxv = ts->info->saved_auxv; abi_ulong auxv = ts->info->saved_auxv;
abi_ulong len = ts->info->auxv_len; abi_ulong len = ts->info->auxv_len;
char *ptr; char *ptr;
@ -9012,7 +9012,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
pthread_mutex_lock(&clone_lock); pthread_mutex_lock(&clone_lock);
if (CPU_NEXT(first_cpu)) { if (CPU_NEXT(first_cpu)) {
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
if (ts->child_tidptr) { if (ts->child_tidptr) {
put_user_u32(0, ts->child_tidptr); put_user_u32(0, ts->child_tidptr);
@ -9439,7 +9439,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
#ifdef TARGET_NR_pause /* not on alpha */ #ifdef TARGET_NR_pause /* not on alpha */
case TARGET_NR_pause: case TARGET_NR_pause:
if (!block_signals()) { if (!block_signals()) {
sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); sigsuspend(&get_task_state(cpu)->signal_mask);
} }
return -TARGET_EINTR; return -TARGET_EINTR;
#endif #endif
@ -10005,7 +10005,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
sigset_t *set; sigset_t *set;
#if defined(TARGET_ALPHA) #if defined(TARGET_ALPHA)
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
/* target_to_host_old_sigset will bswap back */ /* target_to_host_old_sigset will bswap back */
abi_ulong mask = tswapal(arg1); abi_ulong mask = tswapal(arg1);
set = &ts->sigsuspend_mask; set = &ts->sigsuspend_mask;
@ -10406,7 +10406,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
case TARGET_NR_mprotect: case TARGET_NR_mprotect:
arg1 = cpu_untagged_addr(cpu, arg1); arg1 = cpu_untagged_addr(cpu, arg1);
{ {
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
/* Special hack to detect libc making the stack executable. */ /* Special hack to detect libc making the stack executable. */
if ((arg3 & PROT_GROWSDOWN) if ((arg3 & PROT_GROWSDOWN)
&& arg1 >= ts->info->stack_limit && arg1 >= ts->info->stack_limit
@ -12537,7 +12537,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
return do_set_thread_area(cpu_env, arg1); return do_set_thread_area(cpu_env, arg1);
#elif defined(TARGET_M68K) #elif defined(TARGET_M68K)
{ {
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
ts->tp_value = arg1; ts->tp_value = arg1;
return 0; return 0;
} }
@ -12551,7 +12551,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
return do_get_thread_area(cpu_env, arg1); return do_get_thread_area(cpu_env, arg1);
#elif defined(TARGET_M68K) #elif defined(TARGET_M68K)
{ {
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
return ts->tp_value; return ts->tp_value;
} }
#else #else
@ -12676,7 +12676,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
#if defined(TARGET_NR_set_tid_address) #if defined(TARGET_NR_set_tid_address)
case TARGET_NR_set_tid_address: case TARGET_NR_set_tid_address:
{ {
TaskState *ts = cpu->opaque; TaskState *ts = get_task_state(cpu);
ts->child_tidptr = arg1; ts->child_tidptr = arg1;
/* do not call host set_tid_address() syscall, instead return tid() */ /* do not call host set_tid_address() syscall, instead return tid() */
return get_errno(sys_gettid()); return get_errno(sys_gettid());

View file

@ -71,7 +71,7 @@ const char *target_strerror(int err);
int get_osversion(void); int get_osversion(void);
void init_qemu_uname_release(void); void init_qemu_uname_release(void);
void fork_start(void); void fork_start(void);
void fork_end(int child); void fork_end(pid_t pid);
/** /**
* probe_guest_base: * probe_guest_base:

View file

@ -74,7 +74,7 @@ static inline unsigned int vm_getl(CPUX86State *env,
void save_v86_state(CPUX86State *env) void save_v86_state(CPUX86State *env)
{ {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
struct target_vm86plus_struct * target_v86; struct target_vm86plus_struct * target_v86;
if (!lock_user_struct(VERIFY_WRITE, target_v86, ts->target_v86, 0)) if (!lock_user_struct(VERIFY_WRITE, target_v86, ts->target_v86, 0))
@ -134,7 +134,7 @@ static inline void return_to_32bit(CPUX86State *env, int retval)
static inline int set_IF(CPUX86State *env) static inline int set_IF(CPUX86State *env)
{ {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
ts->v86flags |= VIF_MASK; ts->v86flags |= VIF_MASK;
if (ts->v86flags & VIP_MASK) { if (ts->v86flags & VIP_MASK) {
@ -147,7 +147,7 @@ static inline int set_IF(CPUX86State *env)
static inline void clear_IF(CPUX86State *env) static inline void clear_IF(CPUX86State *env)
{ {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
ts->v86flags &= ~VIF_MASK; ts->v86flags &= ~VIF_MASK;
} }
@ -165,7 +165,7 @@ static inline void clear_AC(CPUX86State *env)
static inline int set_vflags_long(unsigned long eflags, CPUX86State *env) static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
{ {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
set_flags(ts->v86flags, eflags, ts->v86mask); set_flags(ts->v86flags, eflags, ts->v86mask);
set_flags(env->eflags, eflags, SAFE_MASK); set_flags(env->eflags, eflags, SAFE_MASK);
@ -179,7 +179,7 @@ static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
static inline int set_vflags_short(unsigned short flags, CPUX86State *env) static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
{ {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
set_flags(ts->v86flags, flags, ts->v86mask & 0xffff); set_flags(ts->v86flags, flags, ts->v86mask & 0xffff);
set_flags(env->eflags, flags, SAFE_MASK); set_flags(env->eflags, flags, SAFE_MASK);
@ -193,7 +193,7 @@ static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
static inline unsigned int get_vflags(CPUX86State *env) static inline unsigned int get_vflags(CPUX86State *env)
{ {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
unsigned int flags; unsigned int flags;
flags = env->eflags & RETURN_MASK; flags = env->eflags & RETURN_MASK;
@ -210,7 +210,7 @@ static inline unsigned int get_vflags(CPUX86State *env)
static void do_int(CPUX86State *env, int intno) static void do_int(CPUX86State *env, int intno)
{ {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
uint32_t int_addr, segoffs, ssp; uint32_t int_addr, segoffs, ssp;
unsigned int sp; unsigned int sp;
@ -269,7 +269,7 @@ void handle_vm86_trap(CPUX86State *env, int trapno)
void handle_vm86_fault(CPUX86State *env) void handle_vm86_fault(CPUX86State *env)
{ {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
uint32_t csp, ssp; uint32_t csp, ssp;
unsigned int ip, sp, newflags, newip, newcs, opcode, intno; unsigned int ip, sp, newflags, newip, newcs, opcode, intno;
int data32, pref_done; int data32, pref_done;
@ -394,7 +394,7 @@ void handle_vm86_fault(CPUX86State *env)
int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr) int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr)
{ {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
struct target_vm86plus_struct * target_v86; struct target_vm86plus_struct * target_v86;
int ret; int ret;

View file

@ -157,7 +157,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
{ {
abi_ulong frame_addr; abi_ulong frame_addr;
struct target_rt_sigframe *frame; struct target_rt_sigframe *frame;
int is_fdpic = info_is_fdpic(((TaskState *)thread_cpu->opaque)->info); int is_fdpic = info_is_fdpic(get_task_state(thread_cpu)->info);
abi_ulong handler = 0; abi_ulong handler = 0;
abi_ulong handler_fdpic_GOT = 0; abi_ulong handler_fdpic_GOT = 0;
uint32_t ra; uint32_t ra;

View file

@ -101,12 +101,15 @@ void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb,
} }
} }
void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb, void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
enum qemu_plugin_op op, struct qemu_plugin_tb *tb,
void *ptr, uint64_t imm) enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm)
{ {
if (!tb->mem_only) { if (!tb->mem_only) {
plugin_register_inline_op(&tb->cbs[PLUGIN_CB_INLINE], 0, op, ptr, imm); plugin_register_inline_op_on_entry(
&tb->cbs[PLUGIN_CB_INLINE], 0, op, entry, imm);
} }
} }
@ -125,13 +128,15 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
} }
} }
void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn *insn, void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
enum qemu_plugin_op op, struct qemu_plugin_insn *insn,
void *ptr, uint64_t imm) enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm)
{ {
if (!insn->mem_only) { if (!insn->mem_only) {
plugin_register_inline_op(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], plugin_register_inline_op_on_entry(
0, op, ptr, imm); &insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], 0, op, entry, imm);
} }
} }
@ -147,16 +152,18 @@ void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn,
void *udata) void *udata)
{ {
plugin_register_vcpu_mem_cb(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], plugin_register_vcpu_mem_cb(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR],
cb, flags, rw, udata); cb, flags, rw, udata);
} }
void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn, void qemu_plugin_register_vcpu_mem_inline_per_vcpu(
enum qemu_plugin_mem_rw rw, struct qemu_plugin_insn *insn,
enum qemu_plugin_op op, void *ptr, enum qemu_plugin_mem_rw rw,
uint64_t imm) enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm)
{ {
plugin_register_inline_op(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE], plugin_register_inline_op_on_entry(
rw, op, ptr, imm); &insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE], rw, op, entry, imm);
} }
void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id, void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id,
@ -378,7 +385,7 @@ const char *qemu_plugin_path_to_binary(void)
{ {
char *path = NULL; char *path = NULL;
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
TaskState *ts = (TaskState *) current_cpu->opaque; TaskState *ts = get_task_state(current_cpu);
path = g_strdup(ts->bprm->filename); path = g_strdup(ts->bprm->filename);
#endif #endif
return path; return path;
@ -388,7 +395,7 @@ uint64_t qemu_plugin_start_code(void)
{ {
uint64_t start = 0; uint64_t start = 0;
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
TaskState *ts = (TaskState *) current_cpu->opaque; TaskState *ts = get_task_state(current_cpu);
start = ts->info->start_code; start = ts->info->start_code;
#endif #endif
return start; return start;
@ -398,7 +405,7 @@ uint64_t qemu_plugin_end_code(void)
{ {
uint64_t end = 0; uint64_t end = 0;
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
TaskState *ts = (TaskState *) current_cpu->opaque; TaskState *ts = get_task_state(current_cpu);
end = ts->info->end_code; end = ts->info->end_code;
#endif #endif
return end; return end;
@ -408,7 +415,7 @@ uint64_t qemu_plugin_entry_code(void)
{ {
uint64_t entry = 0; uint64_t entry = 0;
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
TaskState *ts = (TaskState *) current_cpu->opaque; TaskState *ts = get_task_state(current_cpu);
entry = ts->info->entry; entry = ts->info->entry;
#endif #endif
return entry; return entry;
@ -465,3 +472,56 @@ int qemu_plugin_read_register(struct qemu_plugin_register *reg, GByteArray *buf)
return gdb_read_register(current_cpu, buf, GPOINTER_TO_INT(reg)); return gdb_read_register(current_cpu, buf, GPOINTER_TO_INT(reg));
} }
struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size)
{
return plugin_scoreboard_new(element_size);
}
void qemu_plugin_scoreboard_free(struct qemu_plugin_scoreboard *score)
{
plugin_scoreboard_free(score);
}
void *qemu_plugin_scoreboard_find(struct qemu_plugin_scoreboard *score,
unsigned int vcpu_index)
{
g_assert(vcpu_index < qemu_plugin_num_vcpus());
/* we can't use g_array_index since entry size is not statically known */
char *base_ptr = score->data->data;
return base_ptr + vcpu_index * g_array_get_element_size(score->data);
}
static uint64_t *plugin_u64_address(qemu_plugin_u64 entry,
unsigned int vcpu_index)
{
char *ptr = qemu_plugin_scoreboard_find(entry.score, vcpu_index);
return (uint64_t *)(ptr + entry.offset);
}
void qemu_plugin_u64_add(qemu_plugin_u64 entry, unsigned int vcpu_index,
uint64_t added)
{
*plugin_u64_address(entry, vcpu_index) += added;
}
uint64_t qemu_plugin_u64_get(qemu_plugin_u64 entry,
unsigned int vcpu_index)
{
return *plugin_u64_address(entry, vcpu_index);
}
void qemu_plugin_u64_set(qemu_plugin_u64 entry, unsigned int vcpu_index,
uint64_t val)
{
*plugin_u64_address(entry, vcpu_index) = val;
}
uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry)
{
uint64_t total = 0;
for (int i = 0, n = qemu_plugin_num_vcpus(); i < n; ++i) {
total += qemu_plugin_u64_get(entry, i);
}
return total;
}

View file

@ -18,6 +18,7 @@
#include "qemu/lockable.h" #include "qemu/lockable.h"
#include "qemu/option.h" #include "qemu/option.h"
#include "qemu/plugin.h" #include "qemu/plugin.h"
#include "qemu/queue.h"
#include "qemu/rcu_queue.h" #include "qemu/rcu_queue.h"
#include "qemu/xxhash.h" #include "qemu/xxhash.h"
#include "qemu/rcu.h" #include "qemu/rcu.h"
@ -215,6 +216,35 @@ CPUPluginState *qemu_plugin_create_vcpu_state(void)
return g_new0(CPUPluginState, 1); return g_new0(CPUPluginState, 1);
} }
static void plugin_grow_scoreboards__locked(CPUState *cpu)
{
if (cpu->cpu_index < plugin.scoreboard_alloc_size) {
return;
}
bool need_realloc = FALSE;
while (cpu->cpu_index >= plugin.scoreboard_alloc_size) {
plugin.scoreboard_alloc_size *= 2;
need_realloc = TRUE;
}
if (!need_realloc || QLIST_EMPTY(&plugin.scoreboards)) {
/* nothing to do, we just updated sizes for future scoreboards */
return;
}
/* cpus must be stopped, as tb might still use an existing scoreboard. */
start_exclusive();
struct qemu_plugin_scoreboard *score;
QLIST_FOREACH(score, &plugin.scoreboards, entry) {
g_array_set_size(score->data, plugin.scoreboard_alloc_size);
}
/* force all tb to be flushed, as scoreboard pointers were changed. */
tb_flush(cpu);
end_exclusive();
}
void qemu_plugin_vcpu_init_hook(CPUState *cpu) void qemu_plugin_vcpu_init_hook(CPUState *cpu)
{ {
bool success; bool success;
@ -225,6 +255,7 @@ void qemu_plugin_vcpu_init_hook(CPUState *cpu)
success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index, success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index,
&cpu->cpu_index); &cpu->cpu_index);
g_assert(success); g_assert(success);
plugin_grow_scoreboards__locked(cpu);
qemu_rec_mutex_unlock(&plugin.lock); qemu_rec_mutex_unlock(&plugin.lock);
plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT); plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT);
@ -285,17 +316,19 @@ static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr)
return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1); return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1);
} }
void plugin_register_inline_op(GArray **arr, void plugin_register_inline_op_on_entry(GArray **arr,
enum qemu_plugin_mem_rw rw, enum qemu_plugin_mem_rw rw,
enum qemu_plugin_op op, void *ptr, enum qemu_plugin_op op,
uint64_t imm) qemu_plugin_u64 entry,
uint64_t imm)
{ {
struct qemu_plugin_dyn_cb *dyn_cb; struct qemu_plugin_dyn_cb *dyn_cb;
dyn_cb = plugin_get_dyn_cb(arr); dyn_cb = plugin_get_dyn_cb(arr);
dyn_cb->userp = ptr; dyn_cb->userp = NULL;
dyn_cb->type = PLUGIN_CB_INLINE; dyn_cb->type = PLUGIN_CB_INLINE;
dyn_cb->rw = rw; dyn_cb->rw = rw;
dyn_cb->inline_insn.entry = entry;
dyn_cb->inline_insn.op = op; dyn_cb->inline_insn.op = op;
dyn_cb->inline_insn.imm = imm; dyn_cb->inline_insn.imm = imm;
} }
@ -443,9 +476,13 @@ void qemu_plugin_flush_cb(void)
plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH); plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH);
} }
void exec_inline_op(struct qemu_plugin_dyn_cb *cb) void exec_inline_op(struct qemu_plugin_dyn_cb *cb, int cpu_index)
{ {
uint64_t *val = cb->userp; char *ptr = cb->inline_insn.entry.score->data->data;
size_t elem_size = g_array_get_element_size(
cb->inline_insn.entry.score->data);
size_t offset = cb->inline_insn.entry.offset;
uint64_t *val = (uint64_t *)(ptr + offset + cpu_index * elem_size);
switch (cb->inline_insn.op) { switch (cb->inline_insn.op) {
case QEMU_PLUGIN_INLINE_ADD_U64: case QEMU_PLUGIN_INLINE_ADD_U64:
@ -478,7 +515,7 @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
vaddr, cb->userp); vaddr, cb->userp);
break; break;
case PLUGIN_CB_INLINE: case PLUGIN_CB_INLINE:
exec_inline_op(cb); exec_inline_op(cb, cpu->cpu_index);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -578,6 +615,8 @@ static void __attribute__((__constructor__)) plugin_init(void)
qemu_rec_mutex_init(&plugin.lock); qemu_rec_mutex_init(&plugin.lock);
plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal); plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal);
plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal); plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal);
QLIST_INIT(&plugin.scoreboards);
plugin.scoreboard_alloc_size = 16; /* avoid frequent reallocation */
QTAILQ_INIT(&plugin.ctxs); QTAILQ_INIT(&plugin.ctxs);
qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16, qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16,
QHT_MODE_AUTO_RESIZE); QHT_MODE_AUTO_RESIZE);
@ -588,3 +627,27 @@ int plugin_num_vcpus(void)
{ {
return plugin.num_vcpus; return plugin.num_vcpus;
} }
struct qemu_plugin_scoreboard *plugin_scoreboard_new(size_t element_size)
{
struct qemu_plugin_scoreboard *score =
g_malloc0(sizeof(struct qemu_plugin_scoreboard));
score->data = g_array_new(FALSE, TRUE, element_size);
g_array_set_size(score->data, plugin.scoreboard_alloc_size);
qemu_rec_mutex_lock(&plugin.lock);
QLIST_INSERT_HEAD(&plugin.scoreboards, score, entry);
qemu_rec_mutex_unlock(&plugin.lock);
return score;
}
void plugin_scoreboard_free(struct qemu_plugin_scoreboard *score)
{
qemu_rec_mutex_lock(&plugin.lock);
QLIST_REMOVE(score, entry);
qemu_rec_mutex_unlock(&plugin.lock);
g_array_free(score->data, TRUE);
g_free(score);
}

View file

@ -31,6 +31,8 @@ struct qemu_plugin_state {
* but with the HT we avoid adding a field to CPUState. * but with the HT we avoid adding a field to CPUState.
*/ */
GHashTable *cpu_ht; GHashTable *cpu_ht;
QLIST_HEAD(, qemu_plugin_scoreboard) scoreboards;
size_t scoreboard_alloc_size;
DECLARE_BITMAP(mask, QEMU_PLUGIN_EV_MAX); DECLARE_BITMAP(mask, QEMU_PLUGIN_EV_MAX);
/* /*
* @lock protects the struct as well as ctx->uninstalling. * @lock protects the struct as well as ctx->uninstalling.
@ -66,10 +68,11 @@ struct qemu_plugin_ctx {
struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id); struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id);
void plugin_register_inline_op(GArray **arr, void plugin_register_inline_op_on_entry(GArray **arr,
enum qemu_plugin_mem_rw rw, enum qemu_plugin_mem_rw rw,
enum qemu_plugin_op op, void *ptr, enum qemu_plugin_op op,
uint64_t imm); qemu_plugin_u64 entry,
uint64_t imm);
void plugin_reset_uninstall(qemu_plugin_id_t id, void plugin_reset_uninstall(qemu_plugin_id_t id,
qemu_plugin_simple_cb_t cb, qemu_plugin_simple_cb_t cb,
@ -97,8 +100,12 @@ void plugin_register_vcpu_mem_cb(GArray **arr,
enum qemu_plugin_mem_rw rw, enum qemu_plugin_mem_rw rw,
void *udata); void *udata);
void exec_inline_op(struct qemu_plugin_dyn_cb *cb); void exec_inline_op(struct qemu_plugin_dyn_cb *cb, int cpu_index);
int plugin_num_vcpus(void); int plugin_num_vcpus(void);
struct qemu_plugin_scoreboard *plugin_scoreboard_new(size_t element_size);
void plugin_scoreboard_free(struct qemu_plugin_scoreboard *score);
#endif /* PLUGIN_H */ #endif /* PLUGIN_H */

View file

@ -27,20 +27,27 @@
qemu_plugin_register_vcpu_idle_cb; qemu_plugin_register_vcpu_idle_cb;
qemu_plugin_register_vcpu_init_cb; qemu_plugin_register_vcpu_init_cb;
qemu_plugin_register_vcpu_insn_exec_cb; qemu_plugin_register_vcpu_insn_exec_cb;
qemu_plugin_register_vcpu_insn_exec_inline; qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu;
qemu_plugin_register_vcpu_mem_cb; qemu_plugin_register_vcpu_mem_cb;
qemu_plugin_register_vcpu_mem_inline; qemu_plugin_register_vcpu_mem_inline_per_vcpu;
qemu_plugin_register_vcpu_resume_cb; qemu_plugin_register_vcpu_resume_cb;
qemu_plugin_register_vcpu_syscall_cb; qemu_plugin_register_vcpu_syscall_cb;
qemu_plugin_register_vcpu_syscall_ret_cb; qemu_plugin_register_vcpu_syscall_ret_cb;
qemu_plugin_register_vcpu_tb_exec_cb; qemu_plugin_register_vcpu_tb_exec_cb;
qemu_plugin_register_vcpu_tb_exec_inline; qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu;
qemu_plugin_register_vcpu_tb_trans_cb; qemu_plugin_register_vcpu_tb_trans_cb;
qemu_plugin_reset; qemu_plugin_reset;
qemu_plugin_scoreboard_free;
qemu_plugin_scoreboard_find;
qemu_plugin_scoreboard_new;
qemu_plugin_start_code; qemu_plugin_start_code;
qemu_plugin_tb_get_insn; qemu_plugin_tb_get_insn;
qemu_plugin_tb_n_insns; qemu_plugin_tb_n_insns;
qemu_plugin_tb_vaddr; qemu_plugin_tb_vaddr;
qemu_plugin_u64_add;
qemu_plugin_u64_get;
qemu_plugin_u64_set;
qemu_plugin_u64_sum;
qemu_plugin_uninstall; qemu_plugin_uninstall;
qemu_plugin_vcpu_for_each; qemu_plugin_vcpu_for_each;
}; };

View file

@ -214,7 +214,7 @@ static target_ulong syscall_err;
static inline uint32_t get_swi_errno(CPUState *cs) static inline uint32_t get_swi_errno(CPUState *cs)
{ {
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
return ts->swi_errno; return ts->swi_errno;
#else #else
@ -226,7 +226,7 @@ static void common_semi_cb(CPUState *cs, uint64_t ret, int err)
{ {
if (err) { if (err) {
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
ts->swi_errno = err; ts->swi_errno = err;
#else #else
syscall_err = err; syscall_err = err;
@ -586,7 +586,7 @@ void do_common_semihosting(CPUState *cs)
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
const char *cmdline; const char *cmdline;
#else #else
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
#endif #endif
GET_ARG(0); GET_ARG(0);
GET_ARG(1); GET_ARG(1);
@ -664,7 +664,7 @@ void do_common_semihosting(CPUState *cs)
target_ulong retvals[4]; target_ulong retvals[4];
int i; int i;
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
TaskState *ts = cs->opaque; TaskState *ts = get_task_state(cs);
target_ulong limit; target_ulong limit;
#else #else
LayoutInfo info = common_semi_find_bases(cs); LayoutInfo info = common_semi_find_bases(cs);

View file

@ -120,10 +120,15 @@ static const char *get_csr_name(unsigned num)
csr_names[num] : "Undefined CSR"; csr_names[num] : "Undefined CSR";
} }
#define output(C, INSN, FMT, ...) \ #define output(C, INSN, FMT, ...) \
{ \ { \
(C)->info->fprintf_func((C)->info->stream, "%08x %-9s\t" FMT, \ if ((C)->info->show_opcodes) { \
(C)->insn, INSN, ##__VA_ARGS__); \ (C)->info->fprintf_func((C)->info->stream, "%08x %-9s\t" FMT,\
(C)->insn, INSN, ##__VA_ARGS__); \
} else { \
(C)->info->fprintf_func((C)->info->stream, "%-9s\t" FMT, \
INSN, ##__VA_ARGS__); \
} \
} }
#include "decode-insns.c.inc" #include "decode-insns.c.inc"

View file

@ -17,27 +17,25 @@
QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
typedef struct { typedef struct {
GMutex lock;
int index;
uint64_t bb_count; uint64_t bb_count;
uint64_t insn_count; uint64_t insn_count;
} CPUCount; } CPUCount;
/* Used by the inline & linux-user counts */ static struct qemu_plugin_scoreboard *counts;
static bool do_inline; static qemu_plugin_u64 bb_count;
static CPUCount inline_count; static qemu_plugin_u64 insn_count;
static bool do_inline;
/* Dump running CPU total on idle? */ /* Dump running CPU total on idle? */
static bool idle_report; static bool idle_report;
static GPtrArray *counts;
static int max_cpus;
static void gen_one_cpu_report(CPUCount *count, GString *report) static void gen_one_cpu_report(CPUCount *count, GString *report,
unsigned int cpu_index)
{ {
if (count->bb_count) { if (count->bb_count) {
g_string_append_printf(report, "CPU%d: " g_string_append_printf(report, "CPU%d: "
"bb's: %" PRIu64", insns: %" PRIu64 "\n", "bb's: %" PRIu64", insns: %" PRIu64 "\n",
count->index, cpu_index,
count->bb_count, count->insn_count); count->bb_count, count->insn_count);
} }
} }
@ -46,20 +44,23 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
{ {
g_autoptr(GString) report = g_string_new(""); g_autoptr(GString) report = g_string_new("");
if (do_inline || !max_cpus) { for (int i = 0; i < qemu_plugin_num_vcpus(); ++i) {
g_string_printf(report, "bb's: %" PRIu64", insns: %" PRIu64 "\n", CPUCount *count = qemu_plugin_scoreboard_find(counts, i);
inline_count.bb_count, inline_count.insn_count); gen_one_cpu_report(count, report, i);
} else {
g_ptr_array_foreach(counts, (GFunc) gen_one_cpu_report, report);
} }
g_string_append_printf(report, "Total: "
"bb's: %" PRIu64", insns: %" PRIu64 "\n",
qemu_plugin_u64_sum(bb_count),
qemu_plugin_u64_sum(insn_count));
qemu_plugin_outs(report->str); qemu_plugin_outs(report->str);
qemu_plugin_scoreboard_free(counts);
} }
static void vcpu_idle(qemu_plugin_id_t id, unsigned int cpu_index) static void vcpu_idle(qemu_plugin_id_t id, unsigned int cpu_index)
{ {
CPUCount *count = g_ptr_array_index(counts, cpu_index); CPUCount *count = qemu_plugin_scoreboard_find(counts, cpu_index);
g_autoptr(GString) report = g_string_new(""); g_autoptr(GString) report = g_string_new("");
gen_one_cpu_report(count, report); gen_one_cpu_report(count, report, cpu_index);
if (report->len > 0) { if (report->len > 0) {
g_string_prepend(report, "Idling "); g_string_prepend(report, "Idling ");
@ -69,14 +70,11 @@ static void vcpu_idle(qemu_plugin_id_t id, unsigned int cpu_index)
static void vcpu_tb_exec(unsigned int cpu_index, void *udata) static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
{ {
CPUCount *count = max_cpus ? CPUCount *count = qemu_plugin_scoreboard_find(counts, cpu_index);
g_ptr_array_index(counts, cpu_index) : &inline_count;
uintptr_t n_insns = (uintptr_t)udata; uintptr_t n_insns = (uintptr_t)udata;
g_mutex_lock(&count->lock);
count->insn_count += n_insns; count->insn_count += n_insns;
count->bb_count++; count->bb_count++;
g_mutex_unlock(&count->lock);
} }
static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
@ -84,11 +82,10 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
size_t n_insns = qemu_plugin_tb_n_insns(tb); size_t n_insns = qemu_plugin_tb_n_insns(tb);
if (do_inline) { if (do_inline) {
qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64, qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
&inline_count.bb_count, 1); tb, QEMU_PLUGIN_INLINE_ADD_U64, bb_count, 1);
qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64, qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
&inline_count.insn_count, tb, QEMU_PLUGIN_INLINE_ADD_U64, insn_count, n_insns);
n_insns);
} else { } else {
qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec, qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec,
QEMU_PLUGIN_CB_NO_REGS, QEMU_PLUGIN_CB_NO_REGS,
@ -121,18 +118,10 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
} }
} }
if (info->system_emulation && !do_inline) { counts = qemu_plugin_scoreboard_new(sizeof(CPUCount));
max_cpus = info->system.max_vcpus; bb_count = qemu_plugin_scoreboard_u64_in_struct(counts, CPUCount, bb_count);
counts = g_ptr_array_new(); insn_count = qemu_plugin_scoreboard_u64_in_struct(
for (i = 0; i < max_cpus; i++) { counts, CPUCount, insn_count);
CPUCount *count = g_new0(CPUCount, 1);
g_mutex_init(&count->lock);
count->index = i;
g_ptr_array_add(counts, count);
}
} else if (!do_inline) {
g_mutex_init(&inline_count.lock);
}
if (idle_report) { if (idle_report) {
qemu_plugin_register_vcpu_idle_cb(id, vcpu_idle); qemu_plugin_register_vcpu_idle_cb(id, vcpu_idle);

186
tests/plugin/inline.c Normal file
View file

@ -0,0 +1,186 @@
/*
* Copyright (C) 2023, Pierrick Bouvier <pierrick.bouvier@linaro.org>
*
* Demonstrates and tests usage of inline ops.
*
* License: GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include <glib.h>
#include <stdint.h>
#include <stdio.h>
#include <qemu-plugin.h>
typedef struct {
uint64_t count_tb;
uint64_t count_tb_inline;
uint64_t count_insn;
uint64_t count_insn_inline;
uint64_t count_mem;
uint64_t count_mem_inline;
} CPUCount;
static struct qemu_plugin_scoreboard *counts;
static qemu_plugin_u64 count_tb;
static qemu_plugin_u64 count_tb_inline;
static qemu_plugin_u64 count_insn;
static qemu_plugin_u64 count_insn_inline;
static qemu_plugin_u64 count_mem;
static qemu_plugin_u64 count_mem_inline;
static uint64_t global_count_tb;
static uint64_t global_count_insn;
static uint64_t global_count_mem;
static unsigned int max_cpu_index;
static GMutex tb_lock;
static GMutex insn_lock;
static GMutex mem_lock;
QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
static void stats_insn(void)
{
const uint64_t expected = global_count_insn;
const uint64_t per_vcpu = qemu_plugin_u64_sum(count_insn);
const uint64_t inl_per_vcpu =
qemu_plugin_u64_sum(count_insn_inline);
printf("insn: %" PRIu64 "\n", expected);
printf("insn: %" PRIu64 " (per vcpu)\n", per_vcpu);
printf("insn: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
g_assert(expected > 0);
g_assert(per_vcpu == expected);
g_assert(inl_per_vcpu == expected);
}
static void stats_tb(void)
{
const uint64_t expected = global_count_tb;
const uint64_t per_vcpu = qemu_plugin_u64_sum(count_tb);
const uint64_t inl_per_vcpu =
qemu_plugin_u64_sum(count_tb_inline);
printf("tb: %" PRIu64 "\n", expected);
printf("tb: %" PRIu64 " (per vcpu)\n", per_vcpu);
printf("tb: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
g_assert(expected > 0);
g_assert(per_vcpu == expected);
g_assert(inl_per_vcpu == expected);
}
static void stats_mem(void)
{
const uint64_t expected = global_count_mem;
const uint64_t per_vcpu = qemu_plugin_u64_sum(count_mem);
const uint64_t inl_per_vcpu =
qemu_plugin_u64_sum(count_mem_inline);
printf("mem: %" PRIu64 "\n", expected);
printf("mem: %" PRIu64 " (per vcpu)\n", per_vcpu);
printf("mem: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
g_assert(expected > 0);
g_assert(per_vcpu == expected);
g_assert(inl_per_vcpu == expected);
}
static void plugin_exit(qemu_plugin_id_t id, void *udata)
{
const unsigned int num_cpus = qemu_plugin_num_vcpus();
g_assert(num_cpus == max_cpu_index + 1);
for (int i = 0; i < num_cpus ; ++i) {
const uint64_t tb = qemu_plugin_u64_get(count_tb, i);
const uint64_t tb_inline = qemu_plugin_u64_get(count_tb_inline, i);
const uint64_t insn = qemu_plugin_u64_get(count_insn, i);
const uint64_t insn_inline = qemu_plugin_u64_get(count_insn_inline, i);
const uint64_t mem = qemu_plugin_u64_get(count_mem, i);
const uint64_t mem_inline = qemu_plugin_u64_get(count_mem_inline, i);
printf("cpu %d: tb (%" PRIu64 ", %" PRIu64 ") | "
"insn (%" PRIu64 ", %" PRIu64 ") | "
"mem (%" PRIu64 ", %" PRIu64 ")"
"\n",
i, tb, tb_inline, insn, insn_inline, mem, mem_inline);
g_assert(tb == tb_inline);
g_assert(insn == insn_inline);
g_assert(mem == mem_inline);
}
stats_tb();
stats_insn();
stats_mem();
qemu_plugin_scoreboard_free(counts);
}
static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
{
qemu_plugin_u64_add(count_tb, cpu_index, 1);
g_mutex_lock(&tb_lock);
max_cpu_index = MAX(max_cpu_index, cpu_index);
global_count_tb++;
g_mutex_unlock(&tb_lock);
}
static void vcpu_insn_exec(unsigned int cpu_index, void *udata)
{
qemu_plugin_u64_add(count_insn, cpu_index, 1);
g_mutex_lock(&insn_lock);
global_count_insn++;
g_mutex_unlock(&insn_lock);
}
static void vcpu_mem_access(unsigned int cpu_index,
qemu_plugin_meminfo_t info,
uint64_t vaddr,
void *userdata)
{
qemu_plugin_u64_add(count_mem, cpu_index, 1);
g_mutex_lock(&mem_lock);
global_count_mem++;
g_mutex_unlock(&mem_lock);
}
static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
{
qemu_plugin_register_vcpu_tb_exec_cb(
tb, vcpu_tb_exec, QEMU_PLUGIN_CB_NO_REGS, 0);
qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
tb, QEMU_PLUGIN_INLINE_ADD_U64, count_tb_inline, 1);
for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) {
struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx);
qemu_plugin_register_vcpu_insn_exec_cb(
insn, vcpu_insn_exec, QEMU_PLUGIN_CB_NO_REGS, 0);
qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
insn, QEMU_PLUGIN_INLINE_ADD_U64, count_insn_inline, 1);
qemu_plugin_register_vcpu_mem_cb(insn, &vcpu_mem_access,
QEMU_PLUGIN_CB_NO_REGS,
QEMU_PLUGIN_MEM_RW, 0);
qemu_plugin_register_vcpu_mem_inline_per_vcpu(
insn, QEMU_PLUGIN_MEM_RW,
QEMU_PLUGIN_INLINE_ADD_U64,
count_mem_inline, 1);
}
}
QEMU_PLUGIN_EXPORT
int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
int argc, char **argv)
{
counts = qemu_plugin_scoreboard_new(sizeof(CPUCount));
count_tb = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, count_tb);
count_insn = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, count_insn);
count_mem = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, count_mem);
count_tb_inline = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, count_tb_inline);
count_insn_inline = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, count_insn_inline);
count_mem_inline = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, count_mem_inline);
qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
return 0;
}

View file

@ -16,25 +16,21 @@
QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
#define MAX_CPUS 8 /* lets not go nuts */ static qemu_plugin_u64 insn_count;
typedef struct {
uint64_t insn_count;
} InstructionCount;
static InstructionCount counts[MAX_CPUS];
static uint64_t inline_insn_count;
static bool do_inline; static bool do_inline;
static bool do_size; static bool do_size;
static GArray *sizes; static GArray *sizes;
typedef struct {
uint64_t hits;
uint64_t last_hit;
uint64_t total_delta;
} MatchCount;
typedef struct { typedef struct {
char *match_string; char *match_string;
uint64_t hits[MAX_CPUS]; struct qemu_plugin_scoreboard *counts; /* MatchCount */
uint64_t last_hit[MAX_CPUS];
uint64_t total_delta[MAX_CPUS];
GPtrArray *history[MAX_CPUS];
} Match; } Match;
static GArray *matches; static GArray *matches;
@ -67,41 +63,40 @@ static void vcpu_init(qemu_plugin_id_t id, unsigned int vcpu_index)
static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata) static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata)
{ {
unsigned int i = cpu_index % MAX_CPUS; qemu_plugin_u64_add(insn_count, cpu_index, 1);
InstructionCount *c = &counts[i];
c->insn_count++;
} }
static void vcpu_insn_matched_exec_before(unsigned int cpu_index, void *udata) static void vcpu_insn_matched_exec_before(unsigned int cpu_index, void *udata)
{ {
unsigned int i = cpu_index % MAX_CPUS;
Instruction *insn = (Instruction *) udata; Instruction *insn = (Instruction *) udata;
Match *match = insn->match; Match *insn_match = insn->match;
MatchCount *match = qemu_plugin_scoreboard_find(insn_match->counts,
cpu_index);
g_autoptr(GString) ts = g_string_new(""); g_autoptr(GString) ts = g_string_new("");
insn->hits++; insn->hits++;
g_string_append_printf(ts, "0x%" PRIx64 ", '%s', %"PRId64 " hits", g_string_append_printf(ts, "0x%" PRIx64 ", '%s', %"PRId64 " hits",
insn->vaddr, insn->disas, insn->hits); insn->vaddr, insn->disas, insn->hits);
uint64_t icount = counts[i].insn_count; uint64_t icount = qemu_plugin_u64_get(insn_count, cpu_index);
uint64_t delta = icount - match->last_hit[i]; uint64_t delta = icount - match->last_hit;
match->hits[i]++; match->hits++;
match->total_delta[i] += delta; match->total_delta += delta;
g_string_append_printf(ts, g_string_append_printf(ts,
", %"PRId64" match hits, " " , cpu %u,"
"Δ+%"PRId64 " since last match," " %"PRId64" match hits,"
" Δ+%"PRId64 " since last match,"
" %"PRId64 " avg insns/match\n", " %"PRId64 " avg insns/match\n",
match->hits[i], delta, cpu_index,
match->total_delta[i] / match->hits[i]); match->hits, delta,
match->total_delta / match->hits);
match->last_hit[i] = icount; match->last_hit = icount;
qemu_plugin_outs(ts->str); qemu_plugin_outs(ts->str);
g_ptr_array_add(match->history[i], insn);
} }
static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
@ -113,8 +108,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
if (do_inline) { if (do_inline) {
qemu_plugin_register_vcpu_insn_exec_inline( qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
insn, QEMU_PLUGIN_INLINE_ADD_U64, &inline_insn_count, 1); insn, QEMU_PLUGIN_INLINE_ADD_U64, insn_count, 1);
} else { } else {
uint64_t vaddr = qemu_plugin_insn_vaddr(insn); uint64_t vaddr = qemu_plugin_insn_vaddr(insn);
qemu_plugin_register_vcpu_insn_exec_cb( qemu_plugin_register_vcpu_insn_exec_cb(
@ -136,10 +131,9 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
* information about the instruction which we also need to * information about the instruction which we also need to
* save if there is a hit. * save if there is a hit.
*/ */
if (matches) { if (matches->len) {
char *insn_disas = qemu_plugin_insn_disas(insn); char *insn_disas = qemu_plugin_insn_disas(insn);
int j; for (int j = 0; j < matches->len; j++) {
for (j = 0; j < matches->len; j++) {
Match *m = &g_array_index(matches, Match, j); Match *m = &g_array_index(matches, Match, j);
if (g_str_has_prefix(insn_disas, m->match_string)) { if (g_str_has_prefix(insn_disas, m->match_string)) {
Instruction *rec = g_new0(Instruction, 1); Instruction *rec = g_new0(Instruction, 1);
@ -169,36 +163,33 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
"len %d bytes: %ld insns\n", i, *cnt); "len %d bytes: %ld insns\n", i, *cnt);
} }
} }
} else if (do_inline) {
g_string_append_printf(out, "insns: %" PRIu64 "\n", inline_insn_count);
} else { } else {
uint64_t total_insns = 0; for (i = 0; i < qemu_plugin_num_vcpus(); i++) {
for (i = 0; i < MAX_CPUS; i++) { g_string_append_printf(out, "cpu %d insns: %" PRIu64 "\n",
InstructionCount *c = &counts[i]; i, qemu_plugin_u64_get(insn_count, i));
if (c->insn_count) {
g_string_append_printf(out, "cpu %d insns: %" PRIu64 "\n",
i, c->insn_count);
total_insns += c->insn_count;
}
} }
g_string_append_printf(out, "total insns: %" PRIu64 "\n", g_string_append_printf(out, "total insns: %" PRIu64 "\n",
total_insns); qemu_plugin_u64_sum(insn_count));
} }
qemu_plugin_outs(out->str); qemu_plugin_outs(out->str);
qemu_plugin_scoreboard_free(insn_count.score);
for (i = 0; i < matches->len; ++i) {
Match *m = &g_array_index(matches, Match, i);
g_free(m->match_string);
qemu_plugin_scoreboard_free(m->counts);
}
g_array_free(matches, TRUE);
g_array_free(sizes, TRUE);
} }
/* Add a match to the array of matches */ /* Add a match to the array of matches */
static void parse_match(char *match) static void parse_match(char *match)
{ {
Match new_match = { .match_string = match }; Match new_match = {
int i; .match_string = g_strdup(match),
for (i = 0; i < MAX_CPUS; i++) { .counts = qemu_plugin_scoreboard_new(sizeof(MatchCount)) };
new_match.history[i] = g_ptr_array_new();
}
if (!matches) {
matches = g_array_new(false, true, sizeof(Match));
}
g_array_append_val(matches, new_match); g_array_append_val(matches, new_match);
} }
@ -206,6 +197,10 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
const qemu_info_t *info, const qemu_info_t *info,
int argc, char **argv) int argc, char **argv)
{ {
matches = g_array_new(false, true, sizeof(Match));
/* null terminated so 0 is not a special case */
sizes = g_array_new(true, true, sizeof(unsigned long));
for (int i = 0; i < argc; i++) { for (int i = 0; i < argc; i++) {
char *opt = argv[i]; char *opt = argv[i];
g_auto(GStrv) tokens = g_strsplit(opt, "=", 2); g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
@ -227,9 +222,8 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
} }
} }
if (do_size) { insn_count = qemu_plugin_scoreboard_u64(
sizes = g_array_new(true, true, sizeof(unsigned long)); qemu_plugin_scoreboard_new(sizeof(uint64_t)));
}
/* Register init, translation block and exit callbacks */ /* Register init, translation block and exit callbacks */
qemu_plugin_register_vcpu_init_cb(id, vcpu_init); qemu_plugin_register_vcpu_init_cb(id, vcpu_init);

View file

@ -16,9 +16,14 @@
QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
static uint64_t inline_mem_count; typedef struct {
static uint64_t cb_mem_count; uint64_t mem_count;
static uint64_t io_count; uint64_t io_count;
} CPUCount;
static struct qemu_plugin_scoreboard *counts;
static qemu_plugin_u64 mem_count;
static qemu_plugin_u64 io_count;
static bool do_inline, do_callback; static bool do_inline, do_callback;
static bool do_haddr; static bool do_haddr;
static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW; static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW;
@ -27,16 +32,16 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
{ {
g_autoptr(GString) out = g_string_new(""); g_autoptr(GString) out = g_string_new("");
if (do_inline) { if (do_inline || do_callback) {
g_string_printf(out, "inline mem accesses: %" PRIu64 "\n", inline_mem_count); g_string_printf(out, "mem accesses: %" PRIu64 "\n",
} qemu_plugin_u64_sum(mem_count));
if (do_callback) {
g_string_append_printf(out, "callback mem accesses: %" PRIu64 "\n", cb_mem_count);
} }
if (do_haddr) { if (do_haddr) {
g_string_append_printf(out, "io accesses: %" PRIu64 "\n", io_count); g_string_append_printf(out, "io accesses: %" PRIu64 "\n",
qemu_plugin_u64_sum(io_count));
} }
qemu_plugin_outs(out->str); qemu_plugin_outs(out->str);
qemu_plugin_scoreboard_free(counts);
} }
static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
@ -46,12 +51,12 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
struct qemu_plugin_hwaddr *hwaddr; struct qemu_plugin_hwaddr *hwaddr;
hwaddr = qemu_plugin_get_hwaddr(meminfo, vaddr); hwaddr = qemu_plugin_get_hwaddr(meminfo, vaddr);
if (qemu_plugin_hwaddr_is_io(hwaddr)) { if (qemu_plugin_hwaddr_is_io(hwaddr)) {
io_count++; qemu_plugin_u64_add(io_count, cpu_index, 1);
} else { } else {
cb_mem_count++; qemu_plugin_u64_add(mem_count, cpu_index, 1);
} }
} else { } else {
cb_mem_count++; qemu_plugin_u64_add(mem_count, cpu_index, 1);
} }
} }
@ -64,9 +69,10 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
if (do_inline) { if (do_inline) {
qemu_plugin_register_vcpu_mem_inline(insn, rw, qemu_plugin_register_vcpu_mem_inline_per_vcpu(
QEMU_PLUGIN_INLINE_ADD_U64, insn, rw,
&inline_mem_count, 1); QEMU_PLUGIN_INLINE_ADD_U64,
mem_count, 1);
} }
if (do_callback) { if (do_callback) {
qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem, qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem,
@ -117,6 +123,16 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
} }
} }
if (do_inline && do_callback) {
fprintf(stderr,
"can't enable inline and callback counting at the same time\n");
return -1;
}
counts = qemu_plugin_scoreboard_new(sizeof(CPUCount));
mem_count = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, mem_count);
io_count = qemu_plugin_scoreboard_u64_in_struct(counts, CPUCount, io_count);
qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
return 0; return 0;

View file

@ -1,6 +1,6 @@
t = [] t = []
if get_option('plugins') if get_option('plugins')
foreach i : ['bb', 'empty', 'insn', 'mem', 'syscall'] foreach i : ['bb', 'empty', 'inline', 'insn', 'mem', 'syscall']
if host_os == 'windows' if host_os == 'windows'
t += shared_module(i, files(i + '.c') + '../../contrib/plugins/win32_linker.c', t += shared_module(i, files(i + '.c') + '../../contrib/plugins/win32_linker.c',
include_directories: '../../include/qemu', include_directories: '../../include/qemu',

View file

@ -24,7 +24,7 @@
#include "libqos-malloc.h" #include "libqos-malloc.h"
/* maximum path length */ /* maximum path length */
#define QOS_PATH_MAX_ELEMENT_SIZE 64 #define QOS_PATH_MAX_ELEMENT_SIZE 128
typedef struct QOSGraphObject QOSGraphObject; typedef struct QOSGraphObject QOSGraphObject;
typedef struct QOSGraphNode QOSGraphNode; typedef struct QOSGraphNode QOSGraphNode;

View file

@ -168,7 +168,7 @@ RUN_TESTS+=$(EXTRA_RUNS)
# Some plugins need additional arguments above the default to fully # Some plugins need additional arguments above the default to fully
# exercise things. We can define them on a per-test basis here. # exercise things. We can define them on a per-test basis here.
run-plugin-%-with-libmem.so: PLUGIN_ARGS=$(COMMA)inline=true$(COMMA)callback=true run-plugin-%-with-libmem.so: PLUGIN_ARGS=$(COMMA)inline=true
ifeq ($(filter %-softmmu, $(TARGET)),) ifeq ($(filter %-softmmu, $(TARGET)),)
run-%: % run-%: %

View file

@ -106,6 +106,20 @@ run-gdbstub-catch-syscalls: catch-syscalls
--bin $< --test $(MULTIARCH_SRC)/gdbstub/catch-syscalls.py, \ --bin $< --test $(MULTIARCH_SRC)/gdbstub/catch-syscalls.py, \
hitting a syscall catchpoint) hitting a syscall catchpoint)
run-gdbstub-follow-fork-mode-child: follow-fork-mode
$(call run-test, $@, $(GDB_SCRIPT) \
--gdb $(GDB) \
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
--bin $< --test $(MULTIARCH_SRC)/gdbstub/follow-fork-mode-child.py, \
following children on fork)
run-gdbstub-follow-fork-mode-parent: follow-fork-mode
$(call run-test, $@, $(GDB_SCRIPT) \
--gdb $(GDB) \
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
--bin $< --test $(MULTIARCH_SRC)/gdbstub/follow-fork-mode-parent.py, \
following parents on fork)
else else
run-gdbstub-%: run-gdbstub-%:
$(call skip-test, "gdbstub test $*", "need working gdb with $(patsubst -%,,$(TARGET_NAME)) support") $(call skip-test, "gdbstub test $*", "need working gdb with $(patsubst -%,,$(TARGET_NAME)) support")
@ -113,7 +127,8 @@ endif
EXTRA_RUNS += run-gdbstub-sha1 run-gdbstub-qxfer-auxv-read \ EXTRA_RUNS += run-gdbstub-sha1 run-gdbstub-qxfer-auxv-read \
run-gdbstub-proc-mappings run-gdbstub-thread-breakpoint \ run-gdbstub-proc-mappings run-gdbstub-thread-breakpoint \
run-gdbstub-registers run-gdbstub-prot-none \ run-gdbstub-registers run-gdbstub-prot-none \
run-gdbstub-catch-syscalls run-gdbstub-catch-syscalls run-gdbstub-follow-fork-mode-child \
run-gdbstub-follow-fork-mode-parent
# ARM Compatible Semi Hosting Tests # ARM Compatible Semi Hosting Tests
# #

View file

@ -0,0 +1,56 @@
/*
* Test GDB's follow-fork-mode.
*
* fork() a chain of processes.
* Parents sends one byte to their children, and children return their
* position in the chain, in order to prove that they survived GDB's fork()
* handling.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include <assert.h>
#include <stdlib.h>
#include <sys/wait.h>
#include <unistd.h>
void break_after_fork(void)
{
}
int main(void)
{
int depth = 42, err, i, fd[2], status;
pid_t child, pid;
ssize_t n;
char b;
for (i = 0; i < depth; i++) {
err = pipe(fd);
assert(err == 0);
child = fork();
break_after_fork();
assert(child != -1);
if (child == 0) {
close(fd[1]);
n = read(fd[0], &b, 1);
close(fd[0]);
assert(n == 1);
assert(b == (char)i);
} else {
close(fd[0]);
b = (char)i;
n = write(fd[1], &b, 1);
close(fd[1]);
assert(n == 1);
pid = waitpid(child, &status, 0);
assert(pid == child);
assert(WIFEXITED(status));
return WEXITSTATUS(status) - 1;
}
}
return depth;
}

View file

@ -0,0 +1,40 @@
"""Test GDB's follow-fork-mode child.
SPDX-License-Identifier: GPL-2.0-or-later
"""
from test_gdbstub import main, report
def run_test():
"""Run through the tests one by one"""
gdb.execute("set follow-fork-mode child")
# Check that the parent breakpoints are unset.
gdb.execute("break break_after_fork")
# Check that the parent syscall catchpoints are unset.
# Skip this check on the architectures that don't have them.
have_fork_syscall = False
for fork_syscall in ("fork", "clone", "clone2", "clone3"):
try:
gdb.execute("catch syscall {}".format(fork_syscall))
except gdb.error:
pass
else:
have_fork_syscall = True
gdb.execute("continue")
for i in range(42):
if have_fork_syscall:
# syscall entry.
if i % 2 == 0:
# Check that the parent single-stepping is turned off.
gdb.execute("si")
else:
gdb.execute("continue")
# syscall exit.
gdb.execute("continue")
# break_after_fork()
gdb.execute("continue")
exitcode = int(gdb.parse_and_eval("$_exitcode"))
report(exitcode == 42, "{} == 42".format(exitcode))
main(run_test)

View file

@ -0,0 +1,16 @@
"""Test GDB's follow-fork-mode parent.
SPDX-License-Identifier: GPL-2.0-or-later
"""
from test_gdbstub import main, report
def run_test():
"""Run through the tests one by one"""
gdb.execute("set follow-fork-mode parent")
gdb.execute("continue")
exitcode = int(gdb.parse_and_eval("$_exitcode"))
report(exitcode == 0, "{} == 0".format(exitcode))
main(run_test)