mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-10-07 03:29:37 +00:00
trace: Allocate cpu->trace_dstate in place
There's little point in dynamically allocating the bitmap if we know at compile-time the max number of events we want to support. Thus, make room in the struct for the bitmap, which will make things easier later: this paves the way for upcoming changes, in which we'll use a u32 to fully capture cpu->trace_dstate. This change also increases performance by saving a dereference and improving locality--note that this is important since upcoming work makes reading this bitmap fairly common. Signed-off-by: Emilio G. Cota <cota@braap.org> Reviewed-by: Lluís Vilanova <vilanova@ac.upc.edu> Signed-off-by: Lluís Vilanova <vilanova@ac.upc.edu> Message-id: 149915725977.6295.15069969323605305641.stgit@frigg.lan Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
4871b51b92
commit
d01c05c955
|
@ -259,6 +259,7 @@ typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
|
||||||
struct qemu_work_item;
|
struct qemu_work_item;
|
||||||
|
|
||||||
#define CPU_UNSET_NUMA_NODE_ID -1
|
#define CPU_UNSET_NUMA_NODE_ID -1
|
||||||
|
#define CPU_TRACE_DSTATE_MAX_EVENTS 32
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* CPUState:
|
* CPUState:
|
||||||
|
@ -370,12 +371,8 @@ struct CPUState {
|
||||||
struct KVMState *kvm_state;
|
struct KVMState *kvm_state;
|
||||||
struct kvm_run *kvm_run;
|
struct kvm_run *kvm_run;
|
||||||
|
|
||||||
/*
|
/* Used for events with 'vcpu' and *without* the 'disabled' properties */
|
||||||
* Used for events with 'vcpu' and *without* the 'disabled' properties.
|
DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS);
|
||||||
* Dynamically allocated based on bitmap requried to hold up to
|
|
||||||
* trace_get_vcpu_event_count() entries.
|
|
||||||
*/
|
|
||||||
unsigned long *trace_dstate;
|
|
||||||
|
|
||||||
/* TODO Move common fields from CPUArchState here. */
|
/* TODO Move common fields from CPUArchState here. */
|
||||||
int cpu_index; /* used by alpha TCG */
|
int cpu_index; /* used by alpha TCG */
|
||||||
|
|
|
@ -380,7 +380,6 @@ static void cpu_common_unrealizefn(DeviceState *dev, Error **errp)
|
||||||
|
|
||||||
static void cpu_common_initfn(Object *obj)
|
static void cpu_common_initfn(Object *obj)
|
||||||
{
|
{
|
||||||
uint32_t count;
|
|
||||||
CPUState *cpu = CPU(obj);
|
CPUState *cpu = CPU(obj);
|
||||||
CPUClass *cc = CPU_GET_CLASS(obj);
|
CPUClass *cc = CPU_GET_CLASS(obj);
|
||||||
|
|
||||||
|
@ -395,18 +394,11 @@ static void cpu_common_initfn(Object *obj)
|
||||||
QTAILQ_INIT(&cpu->breakpoints);
|
QTAILQ_INIT(&cpu->breakpoints);
|
||||||
QTAILQ_INIT(&cpu->watchpoints);
|
QTAILQ_INIT(&cpu->watchpoints);
|
||||||
|
|
||||||
count = trace_get_vcpu_event_count();
|
|
||||||
if (count) {
|
|
||||||
cpu->trace_dstate = bitmap_new(count);
|
|
||||||
}
|
|
||||||
|
|
||||||
cpu_exec_initfn(cpu);
|
cpu_exec_initfn(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpu_common_finalize(Object *obj)
|
static void cpu_common_finalize(Object *obj)
|
||||||
{
|
{
|
||||||
CPUState *cpu = CPU(obj);
|
|
||||||
g_free(cpu->trace_dstate);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int64_t cpu_common_get_arch_id(CPUState *cpu)
|
static int64_t cpu_common_get_arch_id(CPUState *cpu)
|
||||||
|
|
|
@ -65,8 +65,15 @@ void trace_event_register_group(TraceEvent **events)
|
||||||
size_t i;
|
size_t i;
|
||||||
for (i = 0; events[i] != NULL; i++) {
|
for (i = 0; events[i] != NULL; i++) {
|
||||||
events[i]->id = next_id++;
|
events[i]->id = next_id++;
|
||||||
if (events[i]->vcpu_id != TRACE_VCPU_EVENT_NONE) {
|
if (events[i]->vcpu_id == TRACE_VCPU_EVENT_NONE) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (likely(next_vcpu_id < CPU_TRACE_DSTATE_MAX_EVENTS)) {
|
||||||
events[i]->vcpu_id = next_vcpu_id++;
|
events[i]->vcpu_id = next_vcpu_id++;
|
||||||
|
} else {
|
||||||
|
error_report("WARNING: too many vcpu trace events; dropping '%s'",
|
||||||
|
events[i]->name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
event_groups = g_renew(TraceEventGroup, event_groups, nevent_groups + 1);
|
event_groups = g_renew(TraceEventGroup, event_groups, nevent_groups + 1);
|
||||||
|
|
Loading…
Reference in a new issue