1
0
mirror of https://gitlab.com/qemu-project/qemu synced 2024-07-08 20:17:27 +00:00

maintainer updates (tests, gdbstub, plugins):

- expand QOS_PATH_MAX_ELEMENT_SIZE to avoid LTO issues
   - support fork-follow-mode in gdbstub
   - new thread-safe scoreboard API for TCG plugins
   - suppress showing opcodes in plugin disassembly
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAmXoY7oACgkQ+9DbCVqe
 KkTdTwf8D8nUB+Ee6LuglW36vtd1ETdMfUmfRis7RIBsXZZ0Tg4+8LyfKkNi1vCL
 UMdWQTkSW79RfXr21QEtETokwLZ0CWQMdxDAWfOiz4S+uDgQyBE+lwUsy0mHBmd7
 +J4SQb3adoZ+//9KMJhRU1wL9j3ygpEoKHVJonDObU6K5XuhE18JuBE44q7FqkWl
 0VhoLDgNxrf2PqT+LLP/O3MFLDXPVKbzrZYQF0IoqBTlcqShCoaykhSwiwCZ4Sqq
 NO9hVwZIOFOcOF4F6ZqRXaZrwERldoBwG+BeIx1ah20vKFVT12y02dQqdP/oKwe+
 /PXFXDdzs4yMOghb4Go6SiKlKT5g4A==
 =s1lF
 -----END PGP SIGNATURE-----

Merge tag 'pull-maintainer-updates-060324-1' of https://gitlab.com/stsquad/qemu into staging

maintainer updates (tests, gdbstub, plugins):

  - expand QOS_PATH_MAX_ELEMENT_SIZE to avoid LTO issues
  - support fork-follow-mode in gdbstub
  - new thread-safe scoreboard API for TCG plugins
  - suppress showing opcodes in plugin disassembly

# -----BEGIN PGP SIGNATURE-----
#
# iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAmXoY7oACgkQ+9DbCVqe
# KkTdTwf8D8nUB+Ee6LuglW36vtd1ETdMfUmfRis7RIBsXZZ0Tg4+8LyfKkNi1vCL
# UMdWQTkSW79RfXr21QEtETokwLZ0CWQMdxDAWfOiz4S+uDgQyBE+lwUsy0mHBmd7
# +J4SQb3adoZ+//9KMJhRU1wL9j3ygpEoKHVJonDObU6K5XuhE18JuBE44q7FqkWl
# 0VhoLDgNxrf2PqT+LLP/O3MFLDXPVKbzrZYQF0IoqBTlcqShCoaykhSwiwCZ4Sqq
# NO9hVwZIOFOcOF4F6ZqRXaZrwERldoBwG+BeIx1ah20vKFVT12y02dQqdP/oKwe+
# /PXFXDdzs4yMOghb4Go6SiKlKT5g4A==
# =s1lF
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 06 Mar 2024 12:38:18 GMT
# gpg:                using RSA key 6685AE99E75167BCAFC8DF35FBD0DB095A9E2A44
# gpg: Good signature from "Alex Bennée (Master Work Key) <alex.bennee@linaro.org>" [full]
# Primary key fingerprint: 6685 AE99 E751 67BC AFC8  DF35 FBD0 DB09 5A9E 2A44

* tag 'pull-maintainer-updates-060324-1' of https://gitlab.com/stsquad/qemu: (29 commits)
  target/riscv: honour show_opcodes when disassembling
  target/loongarch: honour show_opcodes when disassembling
  disas/hppa: honour show_opcodes
  disas: introduce show_opcodes
  plugins: cleanup codepath for previous inline operation
  plugins: remove non per_vcpu inline operation from API
  contrib/plugins/howvec: migrate to new per_vcpu API
  contrib/plugins/hotblocks: migrate to new per_vcpu API
  tests/plugin/bb: migrate to new per_vcpu API
  tests/plugin/insn: migrate to new per_vcpu API
  tests/plugin/mem: migrate to new per_vcpu API
  tests/plugin: add test plugin for inline operations
  plugins: add inline operation per vcpu
  plugins: implement inline operation relative to cpu_index
  plugins: define qemu_plugin_u64
  plugins: scoreboard API
  tests/tcg: Add two follow-fork-mode tests
  gdbstub: Implement follow-fork-mode child
  gdbstub: Introduce gdb_handle_detach_user()
  gdbstub: Introduce gdb_handle_set_thread_user()
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-03-06 16:56:20 +00:00
commit 8f6330a807
57 changed files with 1258 additions and 339 deletions

View File

@ -133,16 +133,28 @@ static void gen_empty_udata_cb_no_rwg(void)
*/
static void gen_empty_inline_cb(void)
{
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
TCGv_ptr cpu_index_as_ptr = tcg_temp_ebb_new_ptr();
TCGv_i64 val = tcg_temp_ebb_new_i64();
TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
tcg_gen_ld_i32(cpu_index, tcg_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
/* second operand will be replaced by immediate value */
tcg_gen_mul_i32(cpu_index, cpu_index, cpu_index);
tcg_gen_ext_i32_ptr(cpu_index_as_ptr, cpu_index);
tcg_gen_movi_ptr(ptr, 0);
tcg_gen_add_ptr(ptr, ptr, cpu_index_as_ptr);
tcg_gen_ld_i64(val, ptr, 0);
/* pass an immediate != 0 so that it doesn't get optimized away */
tcg_gen_addi_i64(val, val, 0xdeadface);
/* second operand will be replaced by immediate value */
tcg_gen_add_i64(val, val, val);
tcg_gen_st_i64(val, ptr, 0);
tcg_temp_free_ptr(ptr);
tcg_temp_free_i64(val);
tcg_temp_free_ptr(cpu_index_as_ptr);
tcg_temp_free_i32(cpu_index);
}
static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
@ -290,12 +302,37 @@ static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
return op;
}
static TCGOp *copy_ld_i32(TCGOp **begin_op, TCGOp *op)
{
return copy_op(begin_op, op, INDEX_op_ld_i32);
}
static TCGOp *copy_ext_i32_ptr(TCGOp **begin_op, TCGOp *op)
{
if (UINTPTR_MAX == UINT32_MAX) {
op = copy_op(begin_op, op, INDEX_op_mov_i32);
} else {
op = copy_op(begin_op, op, INDEX_op_ext_i32_i64);
}
return op;
}
static TCGOp *copy_add_ptr(TCGOp **begin_op, TCGOp *op)
{
if (UINTPTR_MAX == UINT32_MAX) {
op = copy_op(begin_op, op, INDEX_op_add_i32);
} else {
op = copy_op(begin_op, op, INDEX_op_add_i64);
}
return op;
}
static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
{
if (TCG_TARGET_REG_BITS == 32) {
/* 2x ld_i32 */
op = copy_op(begin_op, op, INDEX_op_ld_i32);
op = copy_op(begin_op, op, INDEX_op_ld_i32);
op = copy_ld_i32(begin_op, op);
op = copy_ld_i32(begin_op, op);
} else {
/* ld_i64 */
op = copy_op(begin_op, op, INDEX_op_ld_i64);
@ -331,6 +368,13 @@ static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
return op;
}
static TCGOp *copy_mul_i32(TCGOp **begin_op, TCGOp *op, uint32_t v)
{
op = copy_op(begin_op, op, INDEX_op_mul_i32);
op->args[2] = tcgv_i32_arg(tcg_constant_i32(v));
return op;
}
static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
{
if (UINTPTR_MAX == UINT32_MAX) {
@ -396,18 +440,19 @@ static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
TCGOp *begin_op, TCGOp *op,
int *unused)
{
/* const_ptr */
op = copy_const_ptr(&begin_op, op, cb->userp);
char *ptr = cb->inline_insn.entry.score->data->data;
size_t elem_size = g_array_get_element_size(
cb->inline_insn.entry.score->data);
size_t offset = cb->inline_insn.entry.offset;
/* ld_i64 */
op = copy_ld_i32(&begin_op, op);
op = copy_mul_i32(&begin_op, op, elem_size);
op = copy_ext_i32_ptr(&begin_op, op);
op = copy_const_ptr(&begin_op, op, ptr + offset);
op = copy_add_ptr(&begin_op, op);
op = copy_ld_i64(&begin_op, op);
/* add_i64 */
op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
/* st_i64 */
op = copy_st_i64(&begin_op, op);
return op;
}

View File

@ -641,7 +641,7 @@ static abi_long do_bsd_readlink(CPUArchState *env, abi_long arg1,
}
if (strcmp(p1, "/proc/curproc/file") == 0) {
CPUState *cpu = env_cpu(env);
TaskState *ts = (TaskState *)cpu->opaque;
TaskState *ts = get_task_state(cpu);
strncpy(p2, ts->bprm->fullpath, arg3);
ret = MIN((abi_long)strlen(ts->bprm->fullpath), arg3);
} else {

View File

@ -208,7 +208,7 @@ static inline abi_long do_freebsd_fork(void *cpu_env)
*/
set_second_rval(cpu_env, child_flag);
fork_end(child_flag);
fork_end(ret);
return ret;
}
@ -252,7 +252,7 @@ static inline abi_long do_freebsd_rfork(void *cpu_env, abi_long flags)
* value: 0 for parent process, 1 for child process.
*/
set_second_rval(cpu_env, child_flag);
fork_end(child_flag);
fork_end(ret);
return ret;
@ -285,7 +285,7 @@ static inline abi_long do_freebsd_pdfork(void *cpu_env, abi_ulong target_fdp,
* value: 0 for parent process, 1 for child process.
*/
set_second_rval(cpu_env, child_flag);
fork_end(child_flag);
fork_end(ret);
return ret;
}

View File

@ -113,10 +113,13 @@ void fork_start(void)
start_exclusive();
cpu_list_lock();
mmap_fork_start();
gdbserver_fork_start();
}
void fork_end(int child)
void fork_end(pid_t pid)
{
bool child = pid == 0;
if (child) {
CPUState *cpu, *next_cpu;
/*
@ -134,10 +137,12 @@ void fork_end(int child)
* state, so we don't need to end_exclusive() here.
*/
qemu_init_cpu_list();
gdbserver_fork(thread_cpu);
get_task_state(thread_cpu)->ts_tid = qemu_get_thread_id();
gdbserver_fork_end(thread_cpu, pid);
} else {
mmap_fork_end(child);
cpu_list_unlock();
gdbserver_fork_end(thread_cpu, pid);
end_exclusive();
}
}

View File

@ -117,6 +117,11 @@ typedef struct TaskState {
struct target_sigaltstack sigaltstack_used;
} __attribute__((aligned(16))) TaskState;
static inline TaskState *get_task_state(CPUState *cs)
{
return cs->opaque;
}
void stop_all_tasks(void);
extern const char *interp_prefix;
extern const char *qemu_uname_release;
@ -187,7 +192,7 @@ void cpu_loop(CPUArchState *env);
char *target_strerror(int err);
int get_osversion(void);
void fork_start(void);
void fork_end(int child);
void fork_end(pid_t pid);
#include "qemu/log.h"

View File

@ -319,7 +319,7 @@ void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
int block_signals(void)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
sigset_t set;
/*
@ -359,7 +359,7 @@ void dump_core_and_abort(int target_sig)
{
CPUState *cpu = thread_cpu;
CPUArchState *env = cpu_env(cpu);
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
int core_dumped = 0;
int host_sig;
struct sigaction act;
@ -421,7 +421,7 @@ void queue_signal(CPUArchState *env, int sig, int si_type,
target_siginfo_t *info)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
trace_user_queue_signal(env, sig);
@ -476,7 +476,7 @@ void force_sig_fault(int sig, int code, abi_ulong addr)
static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
{
CPUState *cpu = thread_cpu;
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
target_siginfo_t tinfo;
ucontext_t *uc = puc;
struct emulated_sigtable *k;
@ -585,7 +585,7 @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
/* compare to kern/kern_sig.c sys_sigaltstack() and kern_sigaltstack() */
abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
int ret;
target_stack_t oss;
@ -714,7 +714,7 @@ int do_sigaction(int sig, const struct target_sigaction *act,
static inline abi_ulong get_sigframe(struct target_sigaction *ka,
CPUArchState *env, size_t frame_size)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
abi_ulong sp;
/* Use default user stack */
@ -789,7 +789,7 @@ static int reset_signal_mask(target_ucontext_t *ucontext)
int i;
sigset_t blocked;
target_sigset_t target_set;
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
for (i = 0; i < TARGET_NSIG_WORDS; i++) {
__get_user(target_set.__bits[i], &ucontext->uc_sigmask.__bits[i]);
@ -839,7 +839,7 @@ badframe:
void signal_init(void)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
struct sigaction act;
struct sigaction oact;
int i;
@ -878,7 +878,7 @@ static void handle_pending_signal(CPUArchState *env, int sig,
struct emulated_sigtable *k)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
struct target_sigaction *sa;
int code;
sigset_t set;
@ -967,7 +967,7 @@ void process_pending_signals(CPUArchState *env)
int sig;
sigset_t *blocked_set, set;
struct emulated_sigtable *k;
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
while (qatomic_read(&ts->signal_pending)) {
sigfillset(&set);

View File

@ -34,8 +34,8 @@ static guint64 limit = 20;
*/
typedef struct {
uint64_t start_addr;
uint64_t exec_count;
int trans_count;
struct qemu_plugin_scoreboard *exec_count;
int trans_count;
unsigned long insns;
} ExecCount;
@ -43,7 +43,17 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b)
{
ExecCount *ea = (ExecCount *) a;
ExecCount *eb = (ExecCount *) b;
return ea->exec_count > eb->exec_count ? -1 : 1;
uint64_t count_a =
qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(ea->exec_count));
uint64_t count_b =
qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(eb->exec_count));
return count_a > count_b ? -1 : 1;
}
static void exec_count_free(gpointer key, gpointer value, gpointer user_data)
{
ExecCount *cnt = value;
qemu_plugin_scoreboard_free(cnt->exec_count);
}
static void plugin_exit(qemu_plugin_id_t id, void *p)
@ -52,7 +62,6 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
GList *counts, *it;
int i;
g_mutex_lock(&lock);
g_string_append_printf(report, "%d entries in the hash table\n",
g_hash_table_size(hotblocks));
counts = g_hash_table_get_values(hotblocks);
@ -63,16 +72,21 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
for (i = 0; i < limit && it->next; i++, it = it->next) {
ExecCount *rec = (ExecCount *) it->data;
g_string_append_printf(report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n",
rec->start_addr, rec->trans_count,
rec->insns, rec->exec_count);
g_string_append_printf(
report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n",
rec->start_addr, rec->trans_count,
rec->insns,
qemu_plugin_u64_sum(
qemu_plugin_scoreboard_u64(rec->exec_count)));
}
g_list_free(it);
}
g_mutex_unlock(&lock);
qemu_plugin_outs(report->str);
g_hash_table_foreach(hotblocks, exec_count_free, NULL);
g_hash_table_destroy(hotblocks);
}
static void plugin_init(void)
@ -82,15 +96,9 @@ static void plugin_init(void)
static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
{
ExecCount *cnt;
uint64_t hash = (uint64_t) udata;
g_mutex_lock(&lock);
cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash);
/* should always succeed */
g_assert(cnt);
cnt->exec_count++;
g_mutex_unlock(&lock);
ExecCount *cnt = (ExecCount *)udata;
qemu_plugin_u64_add(qemu_plugin_scoreboard_u64(cnt->exec_count),
cpu_index, 1);
}
/*
@ -114,18 +122,20 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
cnt->start_addr = pc;
cnt->trans_count = 1;
cnt->insns = insns;
cnt->exec_count = qemu_plugin_scoreboard_new(sizeof(uint64_t));
g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt);
}
g_mutex_unlock(&lock);
if (do_inline) {
qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64,
&cnt->exec_count, 1);
qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
tb, QEMU_PLUGIN_INLINE_ADD_U64,
qemu_plugin_scoreboard_u64(cnt->exec_count), 1);
} else {
qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec,
QEMU_PLUGIN_CB_NO_REGS,
(void *)hash);
(void *)cnt);
}
}

View File

@ -43,13 +43,13 @@ typedef struct {
uint32_t mask;
uint32_t pattern;
CountType what;
uint64_t count;
qemu_plugin_u64 count;
} InsnClassExecCount;
typedef struct {
char *insn;
uint32_t opcode;
uint64_t count;
qemu_plugin_u64 count;
InsnClassExecCount *class;
} InsnExecCount;
@ -159,7 +159,9 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b)
{
InsnExecCount *ea = (InsnExecCount *) a;
InsnExecCount *eb = (InsnExecCount *) b;
return ea->count > eb->count ? -1 : 1;
uint64_t count_a = qemu_plugin_u64_sum(ea->count);
uint64_t count_b = qemu_plugin_u64_sum(eb->count);
return count_a > count_b ? -1 : 1;
}
static void free_record(gpointer data)
@ -167,12 +169,14 @@ static void free_record(gpointer data)
InsnExecCount *rec = (InsnExecCount *) data;
g_free(rec->insn);
g_free(rec);
qemu_plugin_scoreboard_free(rec->count.score);
}
static void plugin_exit(qemu_plugin_id_t id, void *p)
{
g_autoptr(GString) report = g_string_new("Instruction Classes:\n");
int i;
uint64_t total_count;
GList *counts;
InsnClassExecCount *class = NULL;
@ -180,11 +184,12 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
class = &class_table[i];
switch (class->what) {
case COUNT_CLASS:
if (class->count || verbose) {
total_count = qemu_plugin_u64_sum(class->count);
if (total_count || verbose) {
g_string_append_printf(report,
"Class: %-24s\t(%" PRId64 " hits)\n",
class->class,
class->count);
total_count);
}
break;
case COUNT_INDIVIDUAL:
@ -212,7 +217,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
"Instr: %-24s\t(%" PRId64 " hits)"
"\t(op=0x%08x/%s)\n",
rec->insn,
rec->count,
qemu_plugin_u64_sum(rec->count),
rec->opcode,
rec->class ?
rec->class->class : "un-categorised");
@ -221,6 +226,12 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
}
g_hash_table_destroy(insns);
for (i = 0; i < ARRAY_SIZE(class_tables); i++) {
for (int j = 0; j < class_tables[i].table_sz; ++j) {
qemu_plugin_scoreboard_free(class_tables[i].table[j].count.score);
}
}
qemu_plugin_outs(report->str);
}
@ -232,11 +243,12 @@ static void plugin_init(void)
static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata)
{
uint64_t *count = (uint64_t *) udata;
(*count)++;
struct qemu_plugin_scoreboard *score = udata;
qemu_plugin_u64_add(qemu_plugin_scoreboard_u64(score), cpu_index, 1);
}
static uint64_t *find_counter(struct qemu_plugin_insn *insn)
static struct qemu_plugin_scoreboard *find_counter(
struct qemu_plugin_insn *insn)
{
int i;
uint64_t *cnt = NULL;
@ -265,7 +277,7 @@ static uint64_t *find_counter(struct qemu_plugin_insn *insn)
case COUNT_NONE:
return NULL;
case COUNT_CLASS:
return &class->count;
return class->count.score;
case COUNT_INDIVIDUAL:
{
InsnExecCount *icount;
@ -279,13 +291,16 @@ static uint64_t *find_counter(struct qemu_plugin_insn *insn)
icount->opcode = opcode;
icount->insn = qemu_plugin_insn_disas(insn);
icount->class = class;
struct qemu_plugin_scoreboard *score =
qemu_plugin_scoreboard_new(sizeof(uint64_t));
icount->count = qemu_plugin_scoreboard_u64(score);
g_hash_table_insert(insns, GUINT_TO_POINTER(opcode),
(gpointer) icount);
}
g_mutex_unlock(&lock);
return &icount->count;
return icount->count.score;
}
default:
g_assert_not_reached();
@ -300,14 +315,14 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
size_t i;
for (i = 0; i < n; i++) {
uint64_t *cnt;
struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
cnt = find_counter(insn);
struct qemu_plugin_scoreboard *cnt = find_counter(insn);
if (cnt) {
if (do_inline) {
qemu_plugin_register_vcpu_insn_exec_inline(
insn, QEMU_PLUGIN_INLINE_ADD_U64, cnt, 1);
qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
insn, QEMU_PLUGIN_INLINE_ADD_U64,
qemu_plugin_scoreboard_u64(cnt), 1);
} else {
qemu_plugin_register_vcpu_insn_exec_cb(
insn, vcpu_insn_exec_before, QEMU_PLUGIN_CB_NO_REGS, cnt);
@ -322,6 +337,14 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
{
int i;
for (i = 0; i < ARRAY_SIZE(class_tables); i++) {
for (int j = 0; j < class_tables[i].table_sz; ++j) {
struct qemu_plugin_scoreboard *score =
qemu_plugin_scoreboard_new(sizeof(uint64_t));
class_tables[i].table[j].count = qemu_plugin_scoreboard_u64(score);
}
}
/* Select a class table appropriate to the guest architecture */
for (i = 0; i < ARRAY_SIZE(class_tables); i++) {
ClassSelector *entry = &class_tables[i];

View File

@ -299,6 +299,7 @@ void disas(FILE *out, const void *code, size_t size)
s.info.buffer = code;
s.info.buffer_vma = (uintptr_t)code;
s.info.buffer_length = size;
s.info.show_opcodes = true;
if (s.info.cap_arch >= 0 && cap_disas_host(&s.info, code, size)) {
return;

View File

@ -1972,9 +1972,11 @@ print_insn_hppa (bfd_vma memaddr, disassemble_info *info)
insn = bfd_getb32 (buffer);
info->fprintf_func(info->stream, " %02x %02x %02x %02x ",
(insn >> 24) & 0xff, (insn >> 16) & 0xff,
(insn >> 8) & 0xff, insn & 0xff);
if (info->show_opcodes) {
info->fprintf_func(info->stream, " %02x %02x %02x %02x ",
(insn >> 24) & 0xff, (insn >> 16) & 0xff,
(insn >> 8) & 0xff, insn & 0xff);
}
for (i = 0; i < NUMOPCODES; ++i)
{

View File

@ -5192,19 +5192,21 @@ print_insn_riscv(bfd_vma memaddr, struct disassemble_info *info, rv_isa isa)
}
}
switch (len) {
case 2:
(*info->fprintf_func)(info->stream, INST_FMT_2, inst);
break;
case 4:
(*info->fprintf_func)(info->stream, INST_FMT_4, inst);
break;
case 6:
(*info->fprintf_func)(info->stream, INST_FMT_6, inst);
break;
default:
(*info->fprintf_func)(info->stream, INST_FMT_8, inst);
break;
if (info->show_opcodes) {
switch (len) {
case 2:
(*info->fprintf_func)(info->stream, INST_FMT_2, inst);
break;
case 4:
(*info->fprintf_func)(info->stream, INST_FMT_4, inst);
break;
case 6:
(*info->fprintf_func)(info->stream, INST_FMT_6, inst);
break;
default:
(*info->fprintf_func)(info->stream, INST_FMT_8, inst);
break;
}
}
disasm_inst(buf, sizeof(buf), isa, memaddr, inst,

View File

@ -1024,6 +1024,12 @@ static void handle_detach(GArray *params, void *user_ctx)
pid = get_param(params, 0)->val_ul;
}
#ifdef CONFIG_USER_ONLY
if (gdb_handle_detach_user(pid)) {
return;
}
#endif
process = gdb_get_process(pid);
gdb_process_breakpoint_remove_all(process);
process->attached = false;
@ -1099,6 +1105,7 @@ static void handle_cont_with_sig(GArray *params, void *user_ctx)
static void handle_set_thread(GArray *params, void *user_ctx)
{
uint32_t pid, tid;
CPUState *cpu;
if (params->len != 2) {
@ -1116,8 +1123,14 @@ static void handle_set_thread(GArray *params, void *user_ctx)
return;
}
cpu = gdb_get_cpu(get_param(params, 1)->thread_id.pid,
get_param(params, 1)->thread_id.tid);
pid = get_param(params, 1)->thread_id.pid;
tid = get_param(params, 1)->thread_id.tid;
#ifdef CONFIG_USER_ONLY
if (gdb_handle_set_thread_user(pid, tid)) {
return;
}
#endif
cpu = gdb_get_cpu(pid, tid);
if (!cpu) {
gdb_put_packet("E22");
return;
@ -1655,9 +1668,15 @@ static void handle_query_supported(GArray *params, void *user_ctx)
g_string_append(gdbserver_state.str_buf, ";qXfer:exec-file:read+");
#endif
if (params->len &&
strstr(get_param(params, 0)->data, "multiprocess+")) {
gdbserver_state.multiprocess = true;
if (params->len) {
const char *gdb_supported = get_param(params, 0)->data;
if (strstr(gdb_supported, "multiprocess+")) {
gdbserver_state.multiprocess = true;
}
#if defined(CONFIG_USER_ONLY)
gdb_handle_query_supported_user(gdb_supported);
#endif
}
g_string_append(gdbserver_state.str_buf, ";vContSupported+;multiprocess+");

View File

@ -196,6 +196,9 @@ void gdb_handle_v_file_pread(GArray *params, void *user_ctx); /* user */
void gdb_handle_v_file_readlink(GArray *params, void *user_ctx); /* user */
void gdb_handle_query_xfer_exec_file(GArray *params, void *user_ctx); /* user */
void gdb_handle_set_catch_syscalls(GArray *params, void *user_ctx); /* user */
void gdb_handle_query_supported_user(const char *gdb_supported); /* user */
bool gdb_handle_set_thread_user(uint32_t pid, uint32_t tid); /* user */
bool gdb_handle_detach_user(uint32_t pid); /* user */
void gdb_handle_query_attached(GArray *params, void *user_ctx); /* both */

View File

@ -204,7 +204,7 @@ int gdb_target_signal_to_gdb(int sig)
int gdb_get_cpu_index(CPUState *cpu)
{
TaskState *ts = (TaskState *) cpu->opaque;
TaskState *ts = get_task_state(cpu);
return ts ? ts->ts_tid : -1;
}
@ -399,7 +399,7 @@ void gdb_handle_query_xfer_exec_file(GArray *params, void *user_ctx)
return;
}
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
if (!ts || !ts->bprm || !ts->bprm->filename) {
gdb_put_packet("E00");
return;

View File

@ -25,6 +25,61 @@
#define GDB_NR_SYSCALLS 1024
typedef unsigned long GDBSyscallsMask[BITS_TO_LONGS(GDB_NR_SYSCALLS)];
/*
* Forked child talks to its parent in order to let GDB enforce the
* follow-fork-mode. This happens inside a start_exclusive() section, so that
* the other threads, which may be forking too, do not interfere. The
* implementation relies on GDB not sending $vCont until it has detached
* either from the parent (follow-fork-mode child) or from the child
* (follow-fork-mode parent).
*
* The parent and the child share the GDB socket; at any given time only one
* of them is allowed to use it, as is reflected in the respective fork_state.
* This is negotiated via the fork_sockets pair as a reaction to $Hg.
*
* Below is a short summary of the possible state transitions:
*
* ENABLED : Terminal state.
* DISABLED : Terminal state.
* ACTIVE : Parent initial state.
* INACTIVE : Child initial state.
* ACTIVE -> DEACTIVATING: On $Hg.
* ACTIVE -> ENABLING : On $D.
* ACTIVE -> DISABLING : On $D.
* ACTIVE -> DISABLED : On communication error.
* DEACTIVATING -> INACTIVE : On gdb_read_byte() return.
* DEACTIVATING -> DISABLED : On communication error.
* INACTIVE -> ACTIVE : On $Hg in the peer.
* INACTIVE -> ENABLE : On $D in the peer.
* INACTIVE -> DISABLE : On $D in the peer.
* INACTIVE -> DISABLED : On communication error.
* ENABLING -> ENABLED : On gdb_read_byte() return.
* ENABLING -> DISABLED : On communication error.
* DISABLING -> DISABLED : On gdb_read_byte() return.
*/
enum GDBForkState {
/* Fully owning the GDB socket. */
GDB_FORK_ENABLED,
/* Working with the GDB socket; the peer is inactive. */
GDB_FORK_ACTIVE,
/* Handing off the GDB socket to the peer. */
GDB_FORK_DEACTIVATING,
/* The peer is working with the GDB socket. */
GDB_FORK_INACTIVE,
/* Asking the peer to close its GDB socket fd. */
GDB_FORK_ENABLING,
/* Asking the peer to take over, closing our GDB socket fd. */
GDB_FORK_DISABLING,
/* The peer has taken over, our GDB socket fd is closed. */
GDB_FORK_DISABLED,
};
enum GDBForkMessage {
GDB_FORK_ACTIVATE = 'a',
GDB_FORK_ENABLE = 'e',
GDB_FORK_DISABLE = 'd',
};
/* User-mode specific state */
typedef struct {
int fd;
@ -36,6 +91,10 @@ typedef struct {
*/
bool catch_all_syscalls;
GDBSyscallsMask catch_syscalls_mask;
bool fork_events;
enum GDBForkState fork_state;
int fork_sockets[2];
pid_t fork_peer_pid, fork_peer_tid;
} GDBUserState;
static GDBUserState gdbserver_user_state;
@ -356,16 +415,193 @@ int gdbserver_start(const char *port_or_path)
return -1;
}
/* Disable gdb stub for child processes. */
void gdbserver_fork(CPUState *cpu)
void gdbserver_fork_start(void)
{
if (!gdbserver_state.init || gdbserver_user_state.fd < 0) {
return;
}
if (!gdbserver_user_state.fork_events ||
qemu_socketpair(AF_UNIX, SOCK_STREAM, 0,
gdbserver_user_state.fork_sockets) < 0) {
gdbserver_user_state.fork_state = GDB_FORK_DISABLED;
return;
}
gdbserver_user_state.fork_state = GDB_FORK_INACTIVE;
gdbserver_user_state.fork_peer_pid = getpid();
gdbserver_user_state.fork_peer_tid = qemu_get_thread_id();
}
static void disable_gdbstub(CPUState *thread_cpu)
{
CPUState *cpu;
close(gdbserver_user_state.fd);
gdbserver_user_state.fd = -1;
cpu_breakpoint_remove_all(cpu, BP_GDB);
/* no cpu_watchpoint_remove_all for user-mode */
CPU_FOREACH(cpu) {
cpu_breakpoint_remove_all(cpu, BP_GDB);
/* no cpu_watchpoint_remove_all for user-mode */
cpu_single_step(cpu, 0);
}
tb_flush(thread_cpu);
}
void gdbserver_fork_end(CPUState *cpu, pid_t pid)
{
char b;
int fd;
if (!gdbserver_state.init || gdbserver_user_state.fd < 0) {
return;
}
if (pid == -1) {
if (gdbserver_user_state.fork_state != GDB_FORK_DISABLED) {
g_assert(gdbserver_user_state.fork_state == GDB_FORK_INACTIVE);
close(gdbserver_user_state.fork_sockets[0]);
close(gdbserver_user_state.fork_sockets[1]);
}
return;
}
if (gdbserver_user_state.fork_state == GDB_FORK_DISABLED) {
if (pid == 0) {
disable_gdbstub(cpu);
}
return;
}
if (pid == 0) {
close(gdbserver_user_state.fork_sockets[0]);
fd = gdbserver_user_state.fork_sockets[1];
g_assert(gdbserver_state.process_num == 1);
g_assert(gdbserver_state.processes[0].pid ==
gdbserver_user_state.fork_peer_pid);
g_assert(gdbserver_state.processes[0].attached);
gdbserver_state.processes[0].pid = getpid();
} else {
close(gdbserver_user_state.fork_sockets[1]);
fd = gdbserver_user_state.fork_sockets[0];
gdbserver_user_state.fork_state = GDB_FORK_ACTIVE;
gdbserver_user_state.fork_peer_pid = pid;
gdbserver_user_state.fork_peer_tid = pid;
if (!gdbserver_state.allow_stop_reply) {
goto fail;
}
g_string_printf(gdbserver_state.str_buf,
"T%02xfork:p%02x.%02x;thread:p%02x.%02x;",
gdb_target_signal_to_gdb(gdb_target_sigtrap()),
pid, pid, (int)getpid(), qemu_get_thread_id());
gdb_put_strbuf();
}
gdbserver_state.state = RS_IDLE;
gdbserver_state.allow_stop_reply = false;
gdbserver_user_state.running_state = 0;
for (;;) {
switch (gdbserver_user_state.fork_state) {
case GDB_FORK_ENABLED:
if (gdbserver_user_state.running_state) {
return;
}
QEMU_FALLTHROUGH;
case GDB_FORK_ACTIVE:
if (read(gdbserver_user_state.fd, &b, 1) != 1) {
goto fail;
}
gdb_read_byte(b);
break;
case GDB_FORK_DEACTIVATING:
b = GDB_FORK_ACTIVATE;
if (write(fd, &b, 1) != 1) {
goto fail;
}
gdbserver_user_state.fork_state = GDB_FORK_INACTIVE;
break;
case GDB_FORK_INACTIVE:
if (read(fd, &b, 1) != 1) {
goto fail;
}
switch (b) {
case GDB_FORK_ACTIVATE:
gdbserver_user_state.fork_state = GDB_FORK_ACTIVE;
break;
case GDB_FORK_ENABLE:
close(fd);
gdbserver_user_state.fork_state = GDB_FORK_ENABLED;
break;
case GDB_FORK_DISABLE:
gdbserver_user_state.fork_state = GDB_FORK_DISABLED;
break;
default:
g_assert_not_reached();
}
break;
case GDB_FORK_ENABLING:
b = GDB_FORK_DISABLE;
if (write(fd, &b, 1) != 1) {
goto fail;
}
close(fd);
gdbserver_user_state.fork_state = GDB_FORK_ENABLED;
break;
case GDB_FORK_DISABLING:
b = GDB_FORK_ENABLE;
if (write(fd, &b, 1) != 1) {
goto fail;
}
gdbserver_user_state.fork_state = GDB_FORK_DISABLED;
break;
case GDB_FORK_DISABLED:
close(fd);
disable_gdbstub(cpu);
return;
default:
g_assert_not_reached();
}
}
fail:
close(fd);
if (pid == 0) {
disable_gdbstub(cpu);
}
}
void gdb_handle_query_supported_user(const char *gdb_supported)
{
if (strstr(gdb_supported, "fork-events+")) {
gdbserver_user_state.fork_events = true;
}
g_string_append(gdbserver_state.str_buf, ";fork-events+");
}
bool gdb_handle_set_thread_user(uint32_t pid, uint32_t tid)
{
if (gdbserver_user_state.fork_state == GDB_FORK_ACTIVE &&
pid == gdbserver_user_state.fork_peer_pid &&
tid == gdbserver_user_state.fork_peer_tid) {
gdbserver_user_state.fork_state = GDB_FORK_DEACTIVATING;
gdb_put_packet("OK");
return true;
}
return false;
}
bool gdb_handle_detach_user(uint32_t pid)
{
bool enable;
if (gdbserver_user_state.fork_state == GDB_FORK_ACTIVE) {
enable = pid == gdbserver_user_state.fork_peer_pid;
if (enable || pid == getpid()) {
gdbserver_user_state.fork_state = enable ? GDB_FORK_ENABLING :
GDB_FORK_DISABLING;
gdb_put_packet("OK");
return true;
}
}
return false;
}
/*

View File

@ -396,6 +396,14 @@ typedef struct disassemble_info {
/* Command line options specific to the target disassembler. */
char * disassembler_options;
/*
* When true instruct the disassembler it may preface the
* disassembly with the opcodes values if it wants to. This is
* mainly for the benefit of the plugin interface which doesn't want
* that.
*/
bool show_opcodes;
/* Field intended to be used by targets in any way they deem suitable. */
void *target_info;

View File

@ -46,10 +46,16 @@ static inline int gdb_handlesig(CPUState *cpu, int sig)
void gdb_signalled(CPUArchState *as, int sig);
/**
* gdbserver_fork() - disable gdb stub for child processes.
* @cs: CPU
* gdbserver_fork_start() - inform gdb of the upcoming fork()
*/
void gdbserver_fork(CPUState *cs);
void gdbserver_fork_start(void);
/**
* gdbserver_fork_end() - inform gdb of the completed fork()
* @cs: CPU
* @pid: 0 if in child process, -1 if fork failed, child process pid otherwise
*/
void gdbserver_fork_end(CPUState *cs, pid_t pid);
/**
* gdb_syscall_entry() - inform gdb of syscall entry and yield control to it

View File

@ -92,6 +92,7 @@ struct qemu_plugin_dyn_cb {
/* fields specific to each dyn_cb type go here */
union {
struct {
qemu_plugin_u64 entry;
enum qemu_plugin_op op;
uint64_t imm;
} inline_insn;
@ -112,6 +113,12 @@ struct qemu_plugin_insn {
bool mem_only;
};
/* A scoreboard is an array of values, indexed by vcpu_index */
struct qemu_plugin_scoreboard {
GArray *data;
QLIST_ENTRY(qemu_plugin_scoreboard) entry;
};
/*
* qemu_plugin_insn allocate and cleanup functions. We don't expect to
* cleanup many of these structures. They are reused for each fresh

View File

@ -52,7 +52,11 @@ typedef uint64_t qemu_plugin_id_t;
* The plugins export the API they were built against by exposing the
* symbol qemu_plugin_version which can be checked.
*
* version 2: removed qemu_plugin_n_vcpus and qemu_plugin_n_max_vcpus
* version 2:
* - removed qemu_plugin_n_vcpus and qemu_plugin_n_max_vcpus
* - Remove qemu_plugin_register_vcpu_{tb, insn, mem}_exec_inline.
* Those functions are replaced by *_per_vcpu variants, which guarantee
* thread-safety for operations.
*/
extern QEMU_PLUGIN_EXPORT int qemu_plugin_version;
@ -222,6 +226,19 @@ void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id,
struct qemu_plugin_tb;
/** struct qemu_plugin_insn - Opaque handle for a translated instruction */
struct qemu_plugin_insn;
/** struct qemu_plugin_scoreboard - Opaque handle for a scoreboard */
struct qemu_plugin_scoreboard;
/**
* typedef qemu_plugin_u64 - uint64_t member of an entry in a scoreboard
*
* This field allows to access a specific uint64_t member in one given entry,
* located at a specified offset. Inline operations expect this as entry.
*/
typedef struct {
struct qemu_plugin_scoreboard *score;
size_t offset;
} qemu_plugin_u64;
/**
* enum qemu_plugin_cb_flags - type of callback
@ -297,23 +314,20 @@ enum qemu_plugin_op {
};
/**
* qemu_plugin_register_vcpu_tb_exec_inline() - execution inline op
* qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu() - execution inline op
* @tb: the opaque qemu_plugin_tb handle for the translation
* @op: the type of qemu_plugin_op (e.g. ADD_U64)
* @ptr: the target memory location for the op
* @entry: entry to run op
* @imm: the op data (e.g. 1)
*
* Insert an inline op to every time a translated unit executes.
* Useful if you just want to increment a single counter somewhere in
* memory.
*
* Note: ops are not atomic so in multi-threaded/multi-smp situations
* you will get inexact results.
* Insert an inline op on a given scoreboard entry.
*/
QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb,
enum qemu_plugin_op op,
void *ptr, uint64_t imm);
void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
struct qemu_plugin_tb *tb,
enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm);
/**
* qemu_plugin_register_vcpu_insn_exec_cb() - register insn execution cb
@ -331,19 +345,20 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
void *userdata);
/**
* qemu_plugin_register_vcpu_insn_exec_inline() - insn execution inline op
* qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu() - insn exec inline op
* @insn: the opaque qemu_plugin_insn handle for an instruction
* @op: the type of qemu_plugin_op (e.g. ADD_U64)
* @ptr: the target memory location for the op
* @entry: entry to run op
* @imm: the op data (e.g. 1)
*
* Insert an inline op to every time an instruction executes. Useful
* if you just want to increment a single counter somewhere in memory.
* Insert an inline op to every time an instruction executes.
*/
QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn *insn,
enum qemu_plugin_op op,
void *ptr, uint64_t imm);
void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
struct qemu_plugin_insn *insn,
enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm);
/**
* qemu_plugin_tb_n_insns() - query helper for number of insns in TB
@ -553,24 +568,23 @@ void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn,
void *userdata);
/**
* qemu_plugin_register_vcpu_mem_inline() - register an inline op to any memory access
* qemu_plugin_register_vcpu_mem_inline_per_vcpu() - inline op for mem access
* @insn: handle for instruction to instrument
* @rw: apply to reads, writes or both
* @op: the op, of type qemu_plugin_op
* @ptr: pointer memory for the op
* @entry: entry to run op
* @imm: immediate data for @op
*
* This registers a inline op every memory access generated by the
* instruction. This provides for a lightweight but not thread-safe
* way of counting the number of operations done.
* instruction.
*/
QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn,
enum qemu_plugin_mem_rw rw,
enum qemu_plugin_op op, void *ptr,
uint64_t imm);
void qemu_plugin_register_vcpu_mem_inline_per_vcpu(
struct qemu_plugin_insn *insn,
enum qemu_plugin_mem_rw rw,
enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm);
typedef void
(*qemu_plugin_vcpu_syscall_cb_t)(qemu_plugin_id_t id, unsigned int vcpu_index,
@ -752,5 +766,75 @@ QEMU_PLUGIN_API
int qemu_plugin_read_register(struct qemu_plugin_register *handle,
GByteArray *buf);
/**
* qemu_plugin_scoreboard_new() - alloc a new scoreboard
*
* @element_size: size (in bytes) for one entry
*
* Returns a pointer to a new scoreboard. It must be freed using
* qemu_plugin_scoreboard_free.
*/
QEMU_PLUGIN_API
struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size);
/**
* qemu_plugin_scoreboard_free() - free a scoreboard
* @score: scoreboard to free
*/
QEMU_PLUGIN_API
void qemu_plugin_scoreboard_free(struct qemu_plugin_scoreboard *score);
/**
* qemu_plugin_scoreboard_find() - get pointer to an entry of a scoreboard
* @score: scoreboard to query
* @vcpu_index: entry index
*
* Returns address of entry of a scoreboard matching a given vcpu_index. This
* address can be modified later if scoreboard is resized.
*/
QEMU_PLUGIN_API
void *qemu_plugin_scoreboard_find(struct qemu_plugin_scoreboard *score,
unsigned int vcpu_index);
/* Macros to define a qemu_plugin_u64 */
#define qemu_plugin_scoreboard_u64(score) \
(qemu_plugin_u64) {score, 0}
#define qemu_plugin_scoreboard_u64_in_struct(score, type, member) \
(qemu_plugin_u64) {score, offsetof(type, member)}
/**
* qemu_plugin_u64_add() - add a value to a qemu_plugin_u64 for a given vcpu
* @entry: entry to query
* @vcpu_index: entry index
* @added: value to add
*/
QEMU_PLUGIN_API
void qemu_plugin_u64_add(qemu_plugin_u64 entry, unsigned int vcpu_index,
uint64_t added);
/**
* qemu_plugin_u64_get() - get value of a qemu_plugin_u64 for a given vcpu
* @entry: entry to query
* @vcpu_index: entry index
*/
QEMU_PLUGIN_API
uint64_t qemu_plugin_u64_get(qemu_plugin_u64 entry, unsigned int vcpu_index);
/**
* qemu_plugin_u64_set() - set value of a qemu_plugin_u64 for a given vcpu
* @entry: entry to query
* @vcpu_index: entry index
* @val: new value
*/
QEMU_PLUGIN_API
void qemu_plugin_u64_set(qemu_plugin_u64 entry, unsigned int vcpu_index,
uint64_t val);
/**
* qemu_plugin_u64_sum() - return sum of all vcpu entries in a scoreboard
* @entry: entry to sum
*/
QEMU_PLUGIN_API
uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry);
#endif /* QEMU_QEMU_PLUGIN_H */

View File

@ -134,7 +134,7 @@ extern char safe_syscall_start[];
extern char safe_syscall_end[];
#define safe_syscall(...) \
safe_syscall_base(&((TaskState *)thread_cpu->opaque)->signal_pending, \
safe_syscall_base(&get_task_state(thread_cpu)->signal_pending, \
__VA_ARGS__)
#endif

View File

@ -189,7 +189,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
struct image_info *info = ts->info;
int i;

View File

@ -263,7 +263,7 @@ static bool insn_is_linux_bkpt(uint32_t opcode, bool is_thumb)
static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode)
{
TaskState *ts = env_cpu(env)->opaque;
TaskState *ts = get_task_state(env_cpu(env));
int rc = EmulateAll(opcode, &ts->fpa, env);
int raise, enabled;
@ -514,7 +514,7 @@ void cpu_loop(CPUARMState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info;
int i;

View File

@ -177,7 +177,7 @@ setup_return(CPUARMState *env, struct target_sigaction *ka, int usig,
abi_ulong handler = 0;
abi_ulong handler_fdpic_GOT = 0;
abi_ulong retcode;
bool is_fdpic = info_is_fdpic(((TaskState *)thread_cpu->opaque)->info);
bool is_fdpic = info_is_fdpic(get_task_state(thread_cpu)->info);
bool is_rt = ka->sa_flags & TARGET_SA_SIGINFO;
bool thumb;

View File

@ -72,7 +72,7 @@ void cpu_loop(CPUCRISState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info;
env->regs[0] = regs->r0;

View File

@ -4404,7 +4404,7 @@ static int wmr_write_region(void *opaque, target_ulong start,
static int elf_core_dump(int signr, const CPUArchState *env)
{
const CPUState *cpu = env_cpu((CPUArchState *)env);
const TaskState *ts = (const TaskState *)cpu->opaque;
const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu);
struct rlimit dumpsize;
CountAndSizeRegions css;
off_t offset, note_offset, data_offset;

View File

@ -112,7 +112,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
abi_ulong frame_addr, sp, haddr;
struct target_rt_sigframe *frame;
int i;
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
sp = get_sp_from_cpustate(env);
if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {

View File

@ -89,7 +89,7 @@ static int prepare_binprm(struct linux_binprm *bprm)
abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp,
abi_ulong stringp, int push_ptr)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
int n = sizeof(abi_ulong);
abi_ulong envp;
abi_ulong argv;

View File

@ -95,7 +95,7 @@ void cpu_loop(CPUM68KState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info;
env->pc = regs->pc;

View File

@ -37,7 +37,7 @@ static inline void cpu_clone_regs_parent(CPUM68KState *env, unsigned flags)
static inline void cpu_set_tls(CPUM68KState *env, target_ulong newtls)
{
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
ts->tp_value = newtls;
}

View File

@ -145,10 +145,13 @@ void fork_start(void)
mmap_fork_start();
cpu_list_lock();
qemu_plugin_user_prefork_lock();
gdbserver_fork_start();
}
void fork_end(int child)
void fork_end(pid_t pid)
{
bool child = pid == 0;
qemu_plugin_user_postfork(child);
mmap_fork_end(child);
if (child) {
@ -161,10 +164,11 @@ void fork_end(int child)
}
}
qemu_init_cpu_list();
gdbserver_fork(thread_cpu);
get_task_state(thread_cpu)->ts_tid = qemu_get_thread_id();
} else {
cpu_list_unlock();
}
gdbserver_fork_end(thread_cpu, pid);
/*
* qemu_init_cpu_list() reinitialized the child exclusive state, but we
* also need to keep current_cpu consistent, so call end_exclusive() for

View File

@ -214,7 +214,7 @@ done_syscall:
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info;
int i;

View File

@ -486,7 +486,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
int i, err = 0;
#if defined(TARGET_PPC64)
struct target_sigcontext *sc = 0;
struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
struct image_info *image = get_task_state(thread_cpu)->info;
#endif
rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
@ -673,7 +673,7 @@ abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx,
}
if (uold_ctx) {
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
if (!lock_user_struct(VERIFY_WRITE, uctx, uold_ctx, 1)) {
return -TARGET_EFAULT;

View File

@ -162,6 +162,11 @@ typedef struct TaskState {
uint64_t start_boottime;
} TaskState;
static inline TaskState *get_task_state(CPUState *cs)
{
return cs->opaque;
}
abi_long do_brk(abi_ulong new_brk);
int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
int flags, mode_t mode, bool safe);

View File

@ -97,7 +97,7 @@ void cpu_loop(CPURISCVState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info;
env->pc = regs->sepc;

View File

@ -113,7 +113,7 @@ int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
static inline void finish_sigsuspend_mask(int ret)
{
if (ret != -QEMU_ERESTARTSYS) {
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
ts->in_sigsuspend = 1;
}
}

View File

@ -172,7 +172,7 @@ void target_to_host_old_sigset(sigset_t *sigset,
int block_signals(void)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
sigset_t set;
/* It's OK to block everything including SIGSEGV, because we won't
@ -194,7 +194,7 @@ int block_signals(void)
*/
int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
if (oldset) {
*oldset = ts->signal_mask;
@ -237,7 +237,7 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
*/
void set_sigmask(const sigset_t *set)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
ts->signal_mask = *set;
}
@ -246,7 +246,7 @@ void set_sigmask(const sigset_t *set)
int on_sig_stack(unsigned long sp)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
return (sp - ts->sigaltstack_used.ss_sp
< ts->sigaltstack_used.ss_size);
@ -254,7 +254,7 @@ int on_sig_stack(unsigned long sp)
int sas_ss_flags(unsigned long sp)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
: on_sig_stack(sp) ? SS_ONSTACK : 0);
@ -265,7 +265,7 @@ abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
/*
* This is the X/Open sanctioned signal stack switching.
*/
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
@ -275,7 +275,7 @@ abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
void target_save_altstack(target_stack_t *uss, CPUArchState *env)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
__put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
__put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
@ -284,7 +284,7 @@ void target_save_altstack(target_stack_t *uss, CPUArchState *env)
abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
size_t minstacksize = TARGET_MINSIGSTKSZ;
target_stack_t ss;
@ -571,7 +571,7 @@ static void signal_table_init(void)
void signal_init(void)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
struct sigaction act, oact;
/* initialize signal conversion tables */
@ -730,7 +730,7 @@ static G_NORETURN
void dump_core_and_abort(CPUArchState *env, int target_sig)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = (TaskState *)cpu->opaque;
TaskState *ts = get_task_state(cpu);
int host_sig, core_dumped = 0;
/* On exit, undo the remapping of SIGABRT. */
@ -769,7 +769,7 @@ void queue_signal(CPUArchState *env, int sig, int si_type,
target_siginfo_t *info)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
trace_user_queue_signal(env, sig);
@ -954,7 +954,7 @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
{
CPUState *cpu = thread_cpu;
CPUArchState *env = cpu_env(cpu);
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
target_siginfo_t tinfo;
host_sigcontext *uc = puc;
struct emulated_sigtable *k;
@ -1174,7 +1174,7 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig,
sigset_t set;
target_sigset_t target_old_set;
struct target_sigaction *sa;
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
trace_user_handle_signal(cpu_env, sig);
/* dequeue signal */
@ -1256,7 +1256,7 @@ void process_pending_signals(CPUArchState *cpu_env)
{
CPUState *cpu = env_cpu(cpu_env);
int sig;
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
sigset_t set;
sigset_t *blocked_set;
@ -1316,7 +1316,7 @@ void process_pending_signals(CPUArchState *cpu_env)
int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
target_ulong sigsize)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
TaskState *ts = get_task_state(thread_cpu);
sigset_t *host_set = &ts->sigsuspend_mask;
target_sigset_t *target_sigset;

View File

@ -6515,7 +6515,7 @@ static void *clone_func(void *arg)
env = info->env;
cpu = env_cpu(env);
thread_cpu = cpu;
ts = (TaskState *)cpu->opaque;
ts = get_task_state(cpu);
info->tid = sys_gettid();
task_settid(ts);
if (info->child_tidptr)
@ -6557,7 +6557,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
flags &= ~(CLONE_VFORK | CLONE_VM);
if (flags & CLONE_VM) {
TaskState *parent_ts = (TaskState *)cpu->opaque;
TaskState *parent_ts = get_task_state(cpu);
new_thread_info info;
pthread_attr_t attr;
@ -6669,7 +6669,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
if (ret == 0) {
/* Child Process. */
cpu_clone_regs_child(env, newsp, flags);
fork_end(1);
fork_end(ret);
/* There is a race condition here. The parent process could
theoretically read the TID in the child process before the child
tid is set. This would require using either ptrace
@ -6680,7 +6680,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
put_user_u32(sys_gettid(), child_tidptr);
if (flags & CLONE_PARENT_SETTID)
put_user_u32(sys_gettid(), parent_tidptr);
ts = (TaskState *)cpu->opaque;
ts = get_task_state(cpu);
if (flags & CLONE_SETTLS)
cpu_set_tls (env, newtls);
if (flags & CLONE_CHILD_CLEARTID)
@ -6700,8 +6700,8 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
}
#endif
put_user_u32(pid_fd, parent_tidptr);
}
fork_end(0);
}
fork_end(ret);
}
g_assert(!cpu_in_exclusive_context(cpu));
}
@ -7946,7 +7946,7 @@ int host_to_target_waitstatus(int status)
static int open_self_cmdline(CPUArchState *cpu_env, int fd)
{
CPUState *cpu = env_cpu(cpu_env);
struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
struct linux_binprm *bprm = get_task_state(cpu)->bprm;
int i;
for (i = 0; i < bprm->argc; i++) {
@ -8146,7 +8146,7 @@ static int open_self_smaps(CPUArchState *cpu_env, int fd)
static int open_self_stat(CPUArchState *cpu_env, int fd)
{
CPUState *cpu = env_cpu(cpu_env);
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
g_autoptr(GString) buf = g_string_new(NULL);
int i;
@ -8187,7 +8187,7 @@ static int open_self_stat(CPUArchState *cpu_env, int fd)
static int open_self_auxv(CPUArchState *cpu_env, int fd)
{
CPUState *cpu = env_cpu(cpu_env);
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
abi_ulong auxv = ts->info->saved_auxv;
abi_ulong len = ts->info->auxv_len;
char *ptr;
@ -9012,7 +9012,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
pthread_mutex_lock(&clone_lock);
if (CPU_NEXT(first_cpu)) {
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
if (ts->child_tidptr) {
put_user_u32(0, ts->child_tidptr);
@ -9439,7 +9439,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
#ifdef TARGET_NR_pause /* not on alpha */
case TARGET_NR_pause:
if (!block_signals()) {
sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
sigsuspend(&get_task_state(cpu)->signal_mask);
}
return -TARGET_EINTR;
#endif
@ -10005,7 +10005,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
sigset_t *set;
#if defined(TARGET_ALPHA)
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
/* target_to_host_old_sigset will bswap back */
abi_ulong mask = tswapal(arg1);
set = &ts->sigsuspend_mask;
@ -10406,7 +10406,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
case TARGET_NR_mprotect:
arg1 = cpu_untagged_addr(cpu, arg1);
{
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
/* Special hack to detect libc making the stack executable. */
if ((arg3 & PROT_GROWSDOWN)
&& arg1 >= ts->info->stack_limit
@ -12537,7 +12537,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
return do_set_thread_area(cpu_env, arg1);
#elif defined(TARGET_M68K)
{
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
ts->tp_value = arg1;
return 0;
}
@ -12551,7 +12551,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
return do_get_thread_area(cpu_env, arg1);
#elif defined(TARGET_M68K)
{
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
return ts->tp_value;
}
#else
@ -12676,7 +12676,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
#if defined(TARGET_NR_set_tid_address)
case TARGET_NR_set_tid_address:
{
TaskState *ts = cpu->opaque;
TaskState *ts = get_task_state(cpu);
ts->child_tidptr = arg1;
/* do not call host set_tid_address() syscall, instead return tid() */
return get_errno(sys_gettid());

View File

@ -71,7 +71,7 @@ const char *target_strerror(int err);
int get_osversion(void);
void init_qemu_uname_release(void);
void fork_start(void);
void fork_end(int child);
void fork_end(pid_t pid);
/**
* probe_guest_base:

View File

@ -74,7 +74,7 @@ static inline unsigned int vm_getl(CPUX86State *env,
void save_v86_state(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
struct target_vm86plus_struct * target_v86;
if (!lock_user_struct(VERIFY_WRITE, target_v86, ts->target_v86, 0))
@ -134,7 +134,7 @@ static inline void return_to_32bit(CPUX86State *env, int retval)
static inline int set_IF(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
ts->v86flags |= VIF_MASK;
if (ts->v86flags & VIP_MASK) {
@ -147,7 +147,7 @@ static inline int set_IF(CPUX86State *env)
static inline void clear_IF(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
ts->v86flags &= ~VIF_MASK;
}
@ -165,7 +165,7 @@ static inline void clear_AC(CPUX86State *env)
static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
{
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
set_flags(ts->v86flags, eflags, ts->v86mask);
set_flags(env->eflags, eflags, SAFE_MASK);
@ -179,7 +179,7 @@ static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
{
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
set_flags(ts->v86flags, flags, ts->v86mask & 0xffff);
set_flags(env->eflags, flags, SAFE_MASK);
@ -193,7 +193,7 @@ static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
static inline unsigned int get_vflags(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
unsigned int flags;
flags = env->eflags & RETURN_MASK;
@ -210,7 +210,7 @@ static inline unsigned int get_vflags(CPUX86State *env)
static void do_int(CPUX86State *env, int intno)
{
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
uint32_t int_addr, segoffs, ssp;
unsigned int sp;
@ -269,7 +269,7 @@ void handle_vm86_trap(CPUX86State *env, int trapno)
void handle_vm86_fault(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
uint32_t csp, ssp;
unsigned int ip, sp, newflags, newip, newcs, opcode, intno;
int data32, pref_done;
@ -394,7 +394,7 @@ void handle_vm86_fault(CPUX86State *env)
int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr)
{
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
struct target_vm86plus_struct * target_v86;
int ret;

View File

@ -157,7 +157,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
{
abi_ulong frame_addr;
struct target_rt_sigframe *frame;
int is_fdpic = info_is_fdpic(((TaskState *)thread_cpu->opaque)->info);
int is_fdpic = info_is_fdpic(get_task_state(thread_cpu)->info);
abi_ulong handler = 0;
abi_ulong handler_fdpic_GOT = 0;
uint32_t ra;

View File

@ -101,12 +101,15 @@ void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb,
}
}
void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb,
enum qemu_plugin_op op,
void *ptr, uint64_t imm)
void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
struct qemu_plugin_tb *tb,
enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm)
{
if (!tb->mem_only) {
plugin_register_inline_op(&tb->cbs[PLUGIN_CB_INLINE], 0, op, ptr, imm);
plugin_register_inline_op_on_entry(
&tb->cbs[PLUGIN_CB_INLINE], 0, op, entry, imm);
}
}
@ -125,13 +128,15 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
}
}
void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn *insn,
enum qemu_plugin_op op,
void *ptr, uint64_t imm)
void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
struct qemu_plugin_insn *insn,
enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm)
{
if (!insn->mem_only) {
plugin_register_inline_op(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
0, op, ptr, imm);
plugin_register_inline_op_on_entry(
&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], 0, op, entry, imm);
}
}
@ -147,16 +152,18 @@ void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn,
void *udata)
{
plugin_register_vcpu_mem_cb(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR],
cb, flags, rw, udata);
cb, flags, rw, udata);
}
void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn,
enum qemu_plugin_mem_rw rw,
enum qemu_plugin_op op, void *ptr,
uint64_t imm)
void qemu_plugin_register_vcpu_mem_inline_per_vcpu(
struct qemu_plugin_insn *insn,
enum qemu_plugin_mem_rw rw,
enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm)
{
plugin_register_inline_op(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE],
rw, op, ptr, imm);
plugin_register_inline_op_on_entry(
&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE], rw, op, entry, imm);
}
void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id,
@ -378,7 +385,7 @@ const char *qemu_plugin_path_to_binary(void)
{
char *path = NULL;
#ifdef CONFIG_USER_ONLY
TaskState *ts = (TaskState *) current_cpu->opaque;
TaskState *ts = get_task_state(current_cpu);
path = g_strdup(ts->bprm->filename);
#endif
return path;
@ -388,7 +395,7 @@ uint64_t qemu_plugin_start_code(void)
{
uint64_t start = 0;
#ifdef CONFIG_USER_ONLY
TaskState *ts = (TaskState *) current_cpu->opaque;
TaskState *ts = get_task_state(current_cpu);
start = ts->info->start_code;
#endif
return start;
@ -398,7 +405,7 @@ uint64_t qemu_plugin_end_code(void)
{
uint64_t end = 0;
#ifdef CONFIG_USER_ONLY
TaskState *ts = (TaskState *) current_cpu->opaque;
TaskState *ts = get_task_state(current_cpu);
end = ts->info->end_code;
#endif
return end;
@ -408,7 +415,7 @@ uint64_t qemu_plugin_entry_code(void)
{
uint64_t entry = 0;
#ifdef CONFIG_USER_ONLY
TaskState *ts = (TaskState *) current_cpu->opaque;
TaskState *ts = get_task_state(current_cpu);
entry = ts->info->entry;
#endif
return entry;
@ -465,3 +472,56 @@ int qemu_plugin_read_register(struct qemu_plugin_register *reg, GByteArray *buf)
return gdb_read_register(current_cpu, buf, GPOINTER_TO_INT(reg));
}
struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size)
{
return plugin_scoreboard_new(element_size);
}
void qemu_plugin_scoreboard_free(struct qemu_plugin_scoreboard *score)
{
plugin_scoreboard_free(score);
}
void *qemu_plugin_scoreboard_find(struct qemu_plugin_scoreboard *score,
unsigned int vcpu_index)
{
g_assert(vcpu_index < qemu_plugin_num_vcpus());
/* we can't use g_array_index since entry size is not statically known */
char *base_ptr = score->data->data;
return base_ptr + vcpu_index * g_array_get_element_size(score->data);
}
static uint64_t *plugin_u64_address(qemu_plugin_u64 entry,
unsigned int vcpu_index)
{
char *ptr = qemu_plugin_scoreboard_find(entry.score, vcpu_index);
return (uint64_t *)(ptr + entry.offset);
}
void qemu_plugin_u64_add(qemu_plugin_u64 entry, unsigned int vcpu_index,
uint64_t added)
{
*plugin_u64_address(entry, vcpu_index) += added;
}
uint64_t qemu_plugin_u64_get(qemu_plugin_u64 entry,
unsigned int vcpu_index)
{
return *plugin_u64_address(entry, vcpu_index);
}
void qemu_plugin_u64_set(qemu_plugin_u64 entry, unsigned int vcpu_index,
uint64_t val)
{
*plugin_u64_address(entry, vcpu_index) = val;
}
uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry)
{
uint64_t total = 0;
for (int i = 0, n = qemu_plugin_num_vcpus(); i < n; ++i) {
total += qemu_plugin_u64_get(entry, i);
}
return total;
}

View File

@ -18,6 +18,7 @@
#include "qemu/lockable.h"
#include "qemu/option.h"
#include "qemu/plugin.h"
#include "qemu/queue.h"
#include "qemu/rcu_queue.h"
#include "qemu/xxhash.h"
#include "qemu/rcu.h"
@ -215,6 +216,35 @@ CPUPluginState *qemu_plugin_create_vcpu_state(void)
return g_new0(CPUPluginState, 1);
}
static void plugin_grow_scoreboards__locked(CPUState *cpu)
{
if (cpu->cpu_index < plugin.scoreboard_alloc_size) {
return;
}
bool need_realloc = FALSE;
while (cpu->cpu_index >= plugin.scoreboard_alloc_size) {
plugin.scoreboard_alloc_size *= 2;
need_realloc = TRUE;
}
if (!need_realloc || QLIST_EMPTY(&plugin.scoreboards)) {
/* nothing to do, we just updated sizes for future scoreboards */
return;
}
/* cpus must be stopped, as tb might still use an existing scoreboard. */
start_exclusive();
struct qemu_plugin_scoreboard *score;
QLIST_FOREACH(score, &plugin.scoreboards, entry) {
g_array_set_size(score->data, plugin.scoreboard_alloc_size);
}
/* force all tb to be flushed, as scoreboard pointers were changed. */
tb_flush(cpu);
end_exclusive();
}
void qemu_plugin_vcpu_init_hook(CPUState *cpu)
{
bool success;
@ -225,6 +255,7 @@ void qemu_plugin_vcpu_init_hook(CPUState *cpu)
success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index,
&cpu->cpu_index);
g_assert(success);
plugin_grow_scoreboards__locked(cpu);
qemu_rec_mutex_unlock(&plugin.lock);
plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT);
@ -285,17 +316,19 @@ static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr)
return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1);
}
void plugin_register_inline_op(GArray **arr,
enum qemu_plugin_mem_rw rw,
enum qemu_plugin_op op, void *ptr,
uint64_t imm)
void plugin_register_inline_op_on_entry(GArray **arr,
enum qemu_plugin_mem_rw rw,
enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm)
{
struct qemu_plugin_dyn_cb *dyn_cb;
dyn_cb = plugin_get_dyn_cb(arr);
dyn_cb->userp = ptr;
dyn_cb->userp = NULL;
dyn_cb->type = PLUGIN_CB_INLINE;
dyn_cb->rw = rw;
dyn_cb->inline_insn.entry = entry;
dyn_cb->inline_insn.op = op;
dyn_cb->inline_insn.imm = imm;
}
@ -443,9 +476,13 @@ void qemu_plugin_flush_cb(void)
plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH);
}
void exec_inline_op(struct qemu_plugin_dyn_cb *cb)
void exec_inline_op(struct qemu_plugin_dyn_cb *cb, int cpu_index)
{
uint64_t *val = cb->userp;
char *ptr = cb->inline_insn.entry.score->data->data;
size_t elem_size = g_array_get_element_size(
cb->inline_insn.entry.score->data);
size_t offset = cb->inline_insn.entry.offset;
uint64_t *val = (uint64_t *)(ptr + offset + cpu_index * elem_size);
switch (cb->inline_insn.op) {
case QEMU_PLUGIN_INLINE_ADD_U64:
@ -478,7 +515,7 @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
vaddr, cb->userp);
break;
case PLUGIN_CB_INLINE:
exec_inline_op(cb);
exec_inline_op(cb, cpu->cpu_index);
break;
default:
g_assert_not_reached();
@ -578,6 +615,8 @@ static void __attribute__((__constructor__)) plugin_init(void)
qemu_rec_mutex_init(&plugin.lock);
plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal);
plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal);
QLIST_INIT(&plugin.scoreboards);
plugin.scoreboard_alloc_size = 16; /* avoid frequent reallocation */
QTAILQ_INIT(&plugin.ctxs);
qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16,
QHT_MODE_AUTO_RESIZE);
@ -588,3 +627,27 @@ int plugin_num_vcpus(void)
{
return plugin.num_vcpus;
}
struct qemu_plugin_scoreboard *plugin_scoreboard_new(size_t element_size)
{
struct qemu_plugin_scoreboard *score =
g_malloc0(sizeof(struct qemu_plugin_scoreboard));
score->data = g_array_new(FALSE, TRUE, element_size);
g_array_set_size(score->data, plugin.scoreboard_alloc_size);
qemu_rec_mutex_lock(&plugin.lock);
QLIST_INSERT_HEAD(&plugin.scoreboards, score, entry);
qemu_rec_mutex_unlock(&plugin.lock);
return score;
}
void plugin_scoreboard_free(struct qemu_plugin_scoreboard *score)
{
qemu_rec_mutex_lock(&plugin.lock);
QLIST_REMOVE(score, entry);
qemu_rec_mutex_unlock(&plugin.lock);
g_array_free(score->data, TRUE);
g_free(score);
}

View File

@ -31,6 +31,8 @@ struct qemu_plugin_state {
* but with the HT we avoid adding a field to CPUState.
*/
GHashTable *cpu_ht;
QLIST_HEAD(, qemu_plugin_scoreboard) scoreboards;
size_t scoreboard_alloc_size;
DECLARE_BITMAP(mask, QEMU_PLUGIN_EV_MAX);
/*
* @lock protects the struct as well as ctx->uninstalling.
@ -66,10 +68,11 @@ struct qemu_plugin_ctx {
struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id);
void plugin_register_inline_op(GArray **arr,
enum qemu_plugin_mem_rw rw,
enum qemu_plugin_op op, void *ptr,
uint64_t imm);
void plugin_register_inline_op_on_entry(GArray **arr,
enum qemu_plugin_mem_rw rw,
enum qemu_plugin_op op,
qemu_plugin_u64 entry,
uint64_t imm);
void plugin_reset_uninstall(qemu_plugin_id_t id,
qemu_plugin_simple_cb_t cb,
@ -97,8 +100,12 @@ void plugin_register_vcpu_mem_cb(GArray **arr,
enum qemu_plugin_mem_rw rw,
void *udata);
void exec_inline_op(struct qemu_plugin_dyn_cb *cb);
void exec_inline_op(struct qemu_plugin_dyn_cb *cb, int cpu_index);
int plugin_num_vcpus(void);
struct qemu_plugin_scoreboard *plugin_scoreboard_new(size_t element_size);
void plugin_scoreboard_free(struct qemu_plugin_scoreboard *score);
#endif /* PLUGIN_H */

View File

@ -27,20 +27,27 @@
qemu_plugin_register_vcpu_idle_cb;
qemu_plugin_register_vcpu_init_cb;
qemu_plugin_register_vcpu_insn_exec_cb;
qemu_plugin_register_vcpu_insn_exec_inline;
qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu;
qemu_plugin_register_vcpu_mem_cb;
qemu_plugin_register_vcpu_mem_inline;
qemu_plugin_register_vcpu_mem_inline_per_vcpu;
qemu_plugin_register_vcpu_resume_cb;
qemu_plugin_register_vcpu_syscall_cb;
qemu_plugin_register_vcpu_syscall_ret_cb;
qemu_plugin_register_vcpu_tb_exec_cb;
qemu_plugin_register_vcpu_tb_exec_inline;
qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu;
qemu_plugin_register_vcpu_tb_trans_cb;
qemu_plugin_reset;
qemu_plugin_scoreboard_free;
qemu_plugin_scoreboard_find;
qemu_plugin_scoreboard_new;
qemu_plugin_start_code;
qemu_plugin_tb_get_insn;
qemu_plugin_tb_n_insns;
qemu_plugin_tb_vaddr;
qemu_plugin_u64_add;
qemu_plugin_u64_get;
qemu_plugin_u64_set;
qemu_plugin_u64_sum;
qemu_plugin_uninstall;
qemu_plugin_vcpu_for_each;
};

View File

@ -214,7 +214,7 @@ static target_ulong syscall_err;
static inline uint32_t get_swi_errno(CPUState *cs)
{
#ifdef CONFIG_USER_ONLY
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
return ts->swi_errno;
#else
@ -226,7 +226,7 @@ static void common_semi_cb(CPUState *cs, uint64_t ret, int err)
{
if (err) {
#ifdef CONFIG_USER_ONLY
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
ts->swi_errno = err;
#else
syscall_err = err;
@ -586,7 +586,7 @@ void do_common_semihosting(CPUState *cs)
#if !defined(CONFIG_USER_ONLY)
const char *cmdline;
#else
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
#endif
GET_ARG(0);
GET_ARG(1);
@ -664,7 +664,7 @@ void do_common_semihosting(CPUState *cs)
target_ulong retvals[4];
int i;
#ifdef CONFIG_USER_ONLY
TaskState *ts = cs->opaque;
TaskState *ts = get_task_state(cs);
target_ulong limit;
#else
LayoutInfo info = common_semi_find_bases(cs);

View File

@ -120,10 +120,15 @@ static const char *get_csr_name(unsigned num)
csr_names[num] : "Undefined CSR";
}
#define output(C, INSN, FMT, ...) \
{ \
(C)->info->fprintf_func((C)->info->stream, "%08x %-9s\t" FMT, \
(C)->insn, INSN, ##__VA_ARGS__); \
#define output(C, INSN, FMT, ...) \
{ \
if ((C)->info->show_opcodes) { \
(C)->info->fprintf_func((C)->info->stream, "%08x %-9s\t" FMT,\
(C)->insn, INSN, ##__VA_ARGS__); \
} else { \
(C)->info->fprintf_func((C)->info->stream, "%-9s\t" FMT, \
INSN, ##__VA_ARGS__); \
} \
}
#include "decode-insns.c.inc"

View File

@ -17,27 +17,25 @@
QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
typedef struct {
GMutex lock;
int index;
uint64_t bb_count;
uint64_t insn_count;
} CPUCount;
/* Used by the inline & linux-user counts */
static bool do_inline;
static CPUCount inline_count;
static struct qemu_plugin_scoreboard *counts;
static qemu_plugin_u64 bb_count;
static qemu_plugin_u64 insn_count;
static bool do_inline;
/* Dump running CPU total on idle? */
static bool idle_report;
static GPtrArray *counts;
static int max_cpus;
static void gen_one_cpu_report(CPUCount *count, GString *report)
static void gen_one_cpu_report(CPUCount *count, GString *report,
unsigned int cpu_index)
{
if (count->bb_count) {
g_string_append_printf(report, "CPU%d: "
"bb's: %" PRIu64", insns: %" PRIu64 "\n",
count->index,
cpu_index,
count->bb_count, count->insn_count);
}
}
@ -46,20 +44,23 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
{
g_autoptr(GString) report = g_string_new("");
if (do_inline || !max_cpus) {
g_string_printf(report, "bb's: %" PRIu64", insns: %" PRIu64 "\n",
inline_count.bb_count, inline_count.insn_count);
} else {
g_ptr_array_foreach(counts, (GFunc) gen_one_cpu_report, report);
for (int i = 0; i < qemu_plugin_num_vcpus(); ++i) {
CPUCount *count = qemu_plugin_scoreboard_find(counts, i);
gen_one_cpu_report(count, report, i);
}
g_string_append_printf(report, "Total: "
"bb's: %" PRIu64", insns: %" PRIu64 "\n",
qemu_plugin_u64_sum(bb_count),
qemu_plugin_u64_sum(insn_count));
qemu_plugin_outs(report->str);
qemu_plugin_scoreboard_free(counts);
}
static void vcpu_idle(qemu_plugin_id_t id, unsigned int cpu_index)
{
CPUCount *count = g_ptr_array_index(counts, cpu_index);
CPUCount *count = qemu_plugin_scoreboard_find(counts, cpu_index);
g_autoptr(GString) report = g_string_new("");
gen_one_cpu_report(count, report);
gen_one_cpu_report(count, report, cpu_index);
if (report->len > 0) {
g_string_prepend(report, "Idling ");
@ -69,14 +70,11 @@ static void vcpu_idle(qemu_plugin_id_t id, unsigned int cpu_index)
static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
{
CPUCount *count = max_cpus ?
g_ptr_array_index(counts, cpu_index) : &inline_count;
CPUCount *count = qemu_plugin_scoreboard_find(counts, cpu_index);
uintptr_t n_insns = (uintptr_t)udata;
g_mutex_lock(&count->lock);
count->insn_count += n_insns;
count->bb_count++;
g_mutex_unlock(&count->lock);
}
static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
@ -84,11 +82,10 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
size_t n_insns = qemu_plugin_tb_n_insns(tb);
if (do_inline) {
qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64,
&inline_count.bb_count, 1);
qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64,
&inline_count.insn_count,
n_insns);
qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
tb, QEMU_PLUGIN_INLINE_ADD_U64, bb_count, 1);
qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
tb, QEMU_PLUGIN_INLINE_ADD_U64, insn_count, n_insns);
} else {
qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec,
QEMU_PLUGIN_CB_NO_REGS,
@ -121,18 +118,10 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
}
}
if (info->system_emulation && !do_inline) {
max_cpus = info->system.max_vcpus;
counts = g_ptr_array_new();
for (i = 0; i < max_cpus; i++) {
CPUCount *count = g_new0(CPUCount, 1);
g_mutex_init(&count->lock);
count->index = i;
g_ptr_array_add(counts, count);
}
} else if (!do_inline) {
g_mutex_init(&inline_count.lock);
}
counts = qemu_plugin_scoreboard_new(sizeof(CPUCount));
bb_count = qemu_plugin_scoreboard_u64_in_struct(counts, CPUCount, bb_count);
insn_count = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, insn_count);
if (idle_report) {
qemu_plugin_register_vcpu_idle_cb(id, vcpu_idle);

186
tests/plugin/inline.c Normal file
View File

@ -0,0 +1,186 @@
/*
* Copyright (C) 2023, Pierrick Bouvier <pierrick.bouvier@linaro.org>
*
* Demonstrates and tests usage of inline ops.
*
* License: GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include <glib.h>
#include <stdint.h>
#include <stdio.h>
#include <qemu-plugin.h>
typedef struct {
uint64_t count_tb;
uint64_t count_tb_inline;
uint64_t count_insn;
uint64_t count_insn_inline;
uint64_t count_mem;
uint64_t count_mem_inline;
} CPUCount;
static struct qemu_plugin_scoreboard *counts;
static qemu_plugin_u64 count_tb;
static qemu_plugin_u64 count_tb_inline;
static qemu_plugin_u64 count_insn;
static qemu_plugin_u64 count_insn_inline;
static qemu_plugin_u64 count_mem;
static qemu_plugin_u64 count_mem_inline;
static uint64_t global_count_tb;
static uint64_t global_count_insn;
static uint64_t global_count_mem;
static unsigned int max_cpu_index;
static GMutex tb_lock;
static GMutex insn_lock;
static GMutex mem_lock;
QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
static void stats_insn(void)
{
const uint64_t expected = global_count_insn;
const uint64_t per_vcpu = qemu_plugin_u64_sum(count_insn);
const uint64_t inl_per_vcpu =
qemu_plugin_u64_sum(count_insn_inline);
printf("insn: %" PRIu64 "\n", expected);
printf("insn: %" PRIu64 " (per vcpu)\n", per_vcpu);
printf("insn: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
g_assert(expected > 0);
g_assert(per_vcpu == expected);
g_assert(inl_per_vcpu == expected);
}
static void stats_tb(void)
{
const uint64_t expected = global_count_tb;
const uint64_t per_vcpu = qemu_plugin_u64_sum(count_tb);
const uint64_t inl_per_vcpu =
qemu_plugin_u64_sum(count_tb_inline);
printf("tb: %" PRIu64 "\n", expected);
printf("tb: %" PRIu64 " (per vcpu)\n", per_vcpu);
printf("tb: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
g_assert(expected > 0);
g_assert(per_vcpu == expected);
g_assert(inl_per_vcpu == expected);
}
static void stats_mem(void)
{
const uint64_t expected = global_count_mem;
const uint64_t per_vcpu = qemu_plugin_u64_sum(count_mem);
const uint64_t inl_per_vcpu =
qemu_plugin_u64_sum(count_mem_inline);
printf("mem: %" PRIu64 "\n", expected);
printf("mem: %" PRIu64 " (per vcpu)\n", per_vcpu);
printf("mem: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
g_assert(expected > 0);
g_assert(per_vcpu == expected);
g_assert(inl_per_vcpu == expected);
}
static void plugin_exit(qemu_plugin_id_t id, void *udata)
{
const unsigned int num_cpus = qemu_plugin_num_vcpus();
g_assert(num_cpus == max_cpu_index + 1);
for (int i = 0; i < num_cpus ; ++i) {
const uint64_t tb = qemu_plugin_u64_get(count_tb, i);
const uint64_t tb_inline = qemu_plugin_u64_get(count_tb_inline, i);
const uint64_t insn = qemu_plugin_u64_get(count_insn, i);
const uint64_t insn_inline = qemu_plugin_u64_get(count_insn_inline, i);
const uint64_t mem = qemu_plugin_u64_get(count_mem, i);
const uint64_t mem_inline = qemu_plugin_u64_get(count_mem_inline, i);
printf("cpu %d: tb (%" PRIu64 ", %" PRIu64 ") | "
"insn (%" PRIu64 ", %" PRIu64 ") | "
"mem (%" PRIu64 ", %" PRIu64 ")"
"\n",
i, tb, tb_inline, insn, insn_inline, mem, mem_inline);
g_assert(tb == tb_inline);
g_assert(insn == insn_inline);
g_assert(mem == mem_inline);
}
stats_tb();
stats_insn();
stats_mem();
qemu_plugin_scoreboard_free(counts);
}
static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
{
qemu_plugin_u64_add(count_tb, cpu_index, 1);
g_mutex_lock(&tb_lock);
max_cpu_index = MAX(max_cpu_index, cpu_index);
global_count_tb++;
g_mutex_unlock(&tb_lock);
}
static void vcpu_insn_exec(unsigned int cpu_index, void *udata)
{
qemu_plugin_u64_add(count_insn, cpu_index, 1);
g_mutex_lock(&insn_lock);
global_count_insn++;
g_mutex_unlock(&insn_lock);
}
static void vcpu_mem_access(unsigned int cpu_index,
qemu_plugin_meminfo_t info,
uint64_t vaddr,
void *userdata)
{
qemu_plugin_u64_add(count_mem, cpu_index, 1);
g_mutex_lock(&mem_lock);
global_count_mem++;
g_mutex_unlock(&mem_lock);
}
static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
{
qemu_plugin_register_vcpu_tb_exec_cb(
tb, vcpu_tb_exec, QEMU_PLUGIN_CB_NO_REGS, 0);
qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
tb, QEMU_PLUGIN_INLINE_ADD_U64, count_tb_inline, 1);
for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) {
struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx);
qemu_plugin_register_vcpu_insn_exec_cb(
insn, vcpu_insn_exec, QEMU_PLUGIN_CB_NO_REGS, 0);
qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
insn, QEMU_PLUGIN_INLINE_ADD_U64, count_insn_inline, 1);
qemu_plugin_register_vcpu_mem_cb(insn, &vcpu_mem_access,
QEMU_PLUGIN_CB_NO_REGS,
QEMU_PLUGIN_MEM_RW, 0);
qemu_plugin_register_vcpu_mem_inline_per_vcpu(
insn, QEMU_PLUGIN_MEM_RW,
QEMU_PLUGIN_INLINE_ADD_U64,
count_mem_inline, 1);
}
}
QEMU_PLUGIN_EXPORT
int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
int argc, char **argv)
{
counts = qemu_plugin_scoreboard_new(sizeof(CPUCount));
count_tb = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, count_tb);
count_insn = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, count_insn);
count_mem = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, count_mem);
count_tb_inline = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, count_tb_inline);
count_insn_inline = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, count_insn_inline);
count_mem_inline = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, count_mem_inline);
qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
return 0;
}

View File

@ -16,25 +16,21 @@
QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
#define MAX_CPUS 8 /* lets not go nuts */
typedef struct {
uint64_t insn_count;
} InstructionCount;
static InstructionCount counts[MAX_CPUS];
static uint64_t inline_insn_count;
static qemu_plugin_u64 insn_count;
static bool do_inline;
static bool do_size;
static GArray *sizes;
typedef struct {
uint64_t hits;
uint64_t last_hit;
uint64_t total_delta;
} MatchCount;
typedef struct {
char *match_string;
uint64_t hits[MAX_CPUS];
uint64_t last_hit[MAX_CPUS];
uint64_t total_delta[MAX_CPUS];
GPtrArray *history[MAX_CPUS];
struct qemu_plugin_scoreboard *counts; /* MatchCount */
} Match;
static GArray *matches;
@ -67,41 +63,40 @@ static void vcpu_init(qemu_plugin_id_t id, unsigned int vcpu_index)
static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata)
{
unsigned int i = cpu_index % MAX_CPUS;
InstructionCount *c = &counts[i];
c->insn_count++;
qemu_plugin_u64_add(insn_count, cpu_index, 1);
}
static void vcpu_insn_matched_exec_before(unsigned int cpu_index, void *udata)
{
unsigned int i = cpu_index % MAX_CPUS;
Instruction *insn = (Instruction *) udata;
Match *match = insn->match;
Match *insn_match = insn->match;
MatchCount *match = qemu_plugin_scoreboard_find(insn_match->counts,
cpu_index);
g_autoptr(GString) ts = g_string_new("");
insn->hits++;
g_string_append_printf(ts, "0x%" PRIx64 ", '%s', %"PRId64 " hits",
insn->vaddr, insn->disas, insn->hits);
uint64_t icount = counts[i].insn_count;
uint64_t delta = icount - match->last_hit[i];
uint64_t icount = qemu_plugin_u64_get(insn_count, cpu_index);
uint64_t delta = icount - match->last_hit;
match->hits[i]++;
match->total_delta[i] += delta;
match->hits++;
match->total_delta += delta;
g_string_append_printf(ts,
", %"PRId64" match hits, "
"Δ+%"PRId64 " since last match,"
" , cpu %u,"
" %"PRId64" match hits,"
" Δ+%"PRId64 " since last match,"
" %"PRId64 " avg insns/match\n",
match->hits[i], delta,
match->total_delta[i] / match->hits[i]);
cpu_index,
match->hits, delta,
match->total_delta / match->hits);
match->last_hit[i] = icount;
match->last_hit = icount;
qemu_plugin_outs(ts->str);
g_ptr_array_add(match->history[i], insn);
}
static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
@ -113,8 +108,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
if (do_inline) {
qemu_plugin_register_vcpu_insn_exec_inline(
insn, QEMU_PLUGIN_INLINE_ADD_U64, &inline_insn_count, 1);
qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
insn, QEMU_PLUGIN_INLINE_ADD_U64, insn_count, 1);
} else {
uint64_t vaddr = qemu_plugin_insn_vaddr(insn);
qemu_plugin_register_vcpu_insn_exec_cb(
@ -136,10 +131,9 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
* information about the instruction which we also need to
* save if there is a hit.
*/
if (matches) {
if (matches->len) {
char *insn_disas = qemu_plugin_insn_disas(insn);
int j;
for (j = 0; j < matches->len; j++) {
for (int j = 0; j < matches->len; j++) {
Match *m = &g_array_index(matches, Match, j);
if (g_str_has_prefix(insn_disas, m->match_string)) {
Instruction *rec = g_new0(Instruction, 1);
@ -169,36 +163,33 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
"len %d bytes: %ld insns\n", i, *cnt);
}
}
} else if (do_inline) {
g_string_append_printf(out, "insns: %" PRIu64 "\n", inline_insn_count);
} else {
uint64_t total_insns = 0;
for (i = 0; i < MAX_CPUS; i++) {
InstructionCount *c = &counts[i];
if (c->insn_count) {
g_string_append_printf(out, "cpu %d insns: %" PRIu64 "\n",
i, c->insn_count);
total_insns += c->insn_count;
}
for (i = 0; i < qemu_plugin_num_vcpus(); i++) {
g_string_append_printf(out, "cpu %d insns: %" PRIu64 "\n",
i, qemu_plugin_u64_get(insn_count, i));
}
g_string_append_printf(out, "total insns: %" PRIu64 "\n",
total_insns);
qemu_plugin_u64_sum(insn_count));
}
qemu_plugin_outs(out->str);
qemu_plugin_scoreboard_free(insn_count.score);
for (i = 0; i < matches->len; ++i) {
Match *m = &g_array_index(matches, Match, i);
g_free(m->match_string);
qemu_plugin_scoreboard_free(m->counts);
}
g_array_free(matches, TRUE);
g_array_free(sizes, TRUE);
}
/* Add a match to the array of matches */
static void parse_match(char *match)
{
Match new_match = { .match_string = match };
int i;
for (i = 0; i < MAX_CPUS; i++) {
new_match.history[i] = g_ptr_array_new();
}
if (!matches) {
matches = g_array_new(false, true, sizeof(Match));
}
Match new_match = {
.match_string = g_strdup(match),
.counts = qemu_plugin_scoreboard_new(sizeof(MatchCount)) };
g_array_append_val(matches, new_match);
}
@ -206,6 +197,10 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
const qemu_info_t *info,
int argc, char **argv)
{
matches = g_array_new(false, true, sizeof(Match));
/* null terminated so 0 is not a special case */
sizes = g_array_new(true, true, sizeof(unsigned long));
for (int i = 0; i < argc; i++) {
char *opt = argv[i];
g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
@ -227,9 +222,8 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
}
}
if (do_size) {
sizes = g_array_new(true, true, sizeof(unsigned long));
}
insn_count = qemu_plugin_scoreboard_u64(
qemu_plugin_scoreboard_new(sizeof(uint64_t)));
/* Register init, translation block and exit callbacks */
qemu_plugin_register_vcpu_init_cb(id, vcpu_init);

View File

@ -16,9 +16,14 @@
QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
static uint64_t inline_mem_count;
static uint64_t cb_mem_count;
static uint64_t io_count;
typedef struct {
uint64_t mem_count;
uint64_t io_count;
} CPUCount;
static struct qemu_plugin_scoreboard *counts;
static qemu_plugin_u64 mem_count;
static qemu_plugin_u64 io_count;
static bool do_inline, do_callback;
static bool do_haddr;
static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW;
@ -27,16 +32,16 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
{
g_autoptr(GString) out = g_string_new("");
if (do_inline) {
g_string_printf(out, "inline mem accesses: %" PRIu64 "\n", inline_mem_count);
}
if (do_callback) {
g_string_append_printf(out, "callback mem accesses: %" PRIu64 "\n", cb_mem_count);
if (do_inline || do_callback) {
g_string_printf(out, "mem accesses: %" PRIu64 "\n",
qemu_plugin_u64_sum(mem_count));
}
if (do_haddr) {
g_string_append_printf(out, "io accesses: %" PRIu64 "\n", io_count);
g_string_append_printf(out, "io accesses: %" PRIu64 "\n",
qemu_plugin_u64_sum(io_count));
}
qemu_plugin_outs(out->str);
qemu_plugin_scoreboard_free(counts);
}
static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
@ -46,12 +51,12 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
struct qemu_plugin_hwaddr *hwaddr;
hwaddr = qemu_plugin_get_hwaddr(meminfo, vaddr);
if (qemu_plugin_hwaddr_is_io(hwaddr)) {
io_count++;
qemu_plugin_u64_add(io_count, cpu_index, 1);
} else {
cb_mem_count++;
qemu_plugin_u64_add(mem_count, cpu_index, 1);
}
} else {
cb_mem_count++;
qemu_plugin_u64_add(mem_count, cpu_index, 1);
}
}
@ -64,9 +69,10 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
if (do_inline) {
qemu_plugin_register_vcpu_mem_inline(insn, rw,
QEMU_PLUGIN_INLINE_ADD_U64,
&inline_mem_count, 1);
qemu_plugin_register_vcpu_mem_inline_per_vcpu(
insn, rw,
QEMU_PLUGIN_INLINE_ADD_U64,
mem_count, 1);
}
if (do_callback) {
qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem,
@ -117,6 +123,16 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
}
}
if (do_inline && do_callback) {
fprintf(stderr,
"can't enable inline and callback counting at the same time\n");
return -1;
}
counts = qemu_plugin_scoreboard_new(sizeof(CPUCount));
mem_count = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, mem_count);
io_count = qemu_plugin_scoreboard_u64_in_struct(counts, CPUCount, io_count);
qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
return 0;

View File

@ -1,6 +1,6 @@
t = []
if get_option('plugins')
foreach i : ['bb', 'empty', 'insn', 'mem', 'syscall']
foreach i : ['bb', 'empty', 'inline', 'insn', 'mem', 'syscall']
if host_os == 'windows'
t += shared_module(i, files(i + '.c') + '../../contrib/plugins/win32_linker.c',
include_directories: '../../include/qemu',

View File

@ -24,7 +24,7 @@
#include "libqos-malloc.h"
/* maximum path length */
#define QOS_PATH_MAX_ELEMENT_SIZE 64
#define QOS_PATH_MAX_ELEMENT_SIZE 128
typedef struct QOSGraphObject QOSGraphObject;
typedef struct QOSGraphNode QOSGraphNode;

View File

@ -168,7 +168,7 @@ RUN_TESTS+=$(EXTRA_RUNS)
# Some plugins need additional arguments above the default to fully
# exercise things. We can define them on a per-test basis here.
run-plugin-%-with-libmem.so: PLUGIN_ARGS=$(COMMA)inline=true$(COMMA)callback=true
run-plugin-%-with-libmem.so: PLUGIN_ARGS=$(COMMA)inline=true
ifeq ($(filter %-softmmu, $(TARGET)),)
run-%: %

View File

@ -106,6 +106,20 @@ run-gdbstub-catch-syscalls: catch-syscalls
--bin $< --test $(MULTIARCH_SRC)/gdbstub/catch-syscalls.py, \
hitting a syscall catchpoint)
run-gdbstub-follow-fork-mode-child: follow-fork-mode
$(call run-test, $@, $(GDB_SCRIPT) \
--gdb $(GDB) \
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
--bin $< --test $(MULTIARCH_SRC)/gdbstub/follow-fork-mode-child.py, \
following children on fork)
run-gdbstub-follow-fork-mode-parent: follow-fork-mode
$(call run-test, $@, $(GDB_SCRIPT) \
--gdb $(GDB) \
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
--bin $< --test $(MULTIARCH_SRC)/gdbstub/follow-fork-mode-parent.py, \
following parents on fork)
else
run-gdbstub-%:
$(call skip-test, "gdbstub test $*", "need working gdb with $(patsubst -%,,$(TARGET_NAME)) support")
@ -113,7 +127,8 @@ endif
EXTRA_RUNS += run-gdbstub-sha1 run-gdbstub-qxfer-auxv-read \
run-gdbstub-proc-mappings run-gdbstub-thread-breakpoint \
run-gdbstub-registers run-gdbstub-prot-none \
run-gdbstub-catch-syscalls
run-gdbstub-catch-syscalls run-gdbstub-follow-fork-mode-child \
run-gdbstub-follow-fork-mode-parent
# ARM Compatible Semi Hosting Tests
#

View File

@ -0,0 +1,56 @@
/*
* Test GDB's follow-fork-mode.
*
* fork() a chain of processes.
* Parents sends one byte to their children, and children return their
* position in the chain, in order to prove that they survived GDB's fork()
* handling.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include <assert.h>
#include <stdlib.h>
#include <sys/wait.h>
#include <unistd.h>
void break_after_fork(void)
{
}
int main(void)
{
int depth = 42, err, i, fd[2], status;
pid_t child, pid;
ssize_t n;
char b;
for (i = 0; i < depth; i++) {
err = pipe(fd);
assert(err == 0);
child = fork();
break_after_fork();
assert(child != -1);
if (child == 0) {
close(fd[1]);
n = read(fd[0], &b, 1);
close(fd[0]);
assert(n == 1);
assert(b == (char)i);
} else {
close(fd[0]);
b = (char)i;
n = write(fd[1], &b, 1);
close(fd[1]);
assert(n == 1);
pid = waitpid(child, &status, 0);
assert(pid == child);
assert(WIFEXITED(status));
return WEXITSTATUS(status) - 1;
}
}
return depth;
}

View File

@ -0,0 +1,40 @@
"""Test GDB's follow-fork-mode child.
SPDX-License-Identifier: GPL-2.0-or-later
"""
from test_gdbstub import main, report
def run_test():
"""Run through the tests one by one"""
gdb.execute("set follow-fork-mode child")
# Check that the parent breakpoints are unset.
gdb.execute("break break_after_fork")
# Check that the parent syscall catchpoints are unset.
# Skip this check on the architectures that don't have them.
have_fork_syscall = False
for fork_syscall in ("fork", "clone", "clone2", "clone3"):
try:
gdb.execute("catch syscall {}".format(fork_syscall))
except gdb.error:
pass
else:
have_fork_syscall = True
gdb.execute("continue")
for i in range(42):
if have_fork_syscall:
# syscall entry.
if i % 2 == 0:
# Check that the parent single-stepping is turned off.
gdb.execute("si")
else:
gdb.execute("continue")
# syscall exit.
gdb.execute("continue")
# break_after_fork()
gdb.execute("continue")
exitcode = int(gdb.parse_and_eval("$_exitcode"))
report(exitcode == 42, "{} == 42".format(exitcode))
main(run_test)

View File

@ -0,0 +1,16 @@
"""Test GDB's follow-fork-mode parent.
SPDX-License-Identifier: GPL-2.0-or-later
"""
from test_gdbstub import main, report
def run_test():
"""Run through the tests one by one"""
gdb.execute("set follow-fork-mode parent")
gdb.execute("continue")
exitcode = int(gdb.parse_and_eval("$_exitcode"))
report(exitcode == 0, "{} == 0".format(exitcode))
main(run_test)