mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
accel/tcg: Move breakpoint recognition outside translation
Trigger breakpoints before beginning translation of a TB that would begin with a BP. Thus we never generate code for the BP at all. Single-step instructions within a page containing a BP so that we are sure to check each insn for the BP as above. We no longer need to flush any TBs when changing BPs. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/286 Resolves: https://gitlab.com/qemu-project/qemu/-/issues/404 Resolves: https://gitlab.com/qemu-project/qemu/-/issues/489 Tested-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
11c1d5f8ca
commit
10c37828b2
3 changed files with 89 additions and 46 deletions
|
@ -222,6 +222,76 @@ static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
|
|||
}
|
||||
}
|
||||
|
||||
static bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
|
||||
uint32_t *cflags)
|
||||
{
|
||||
CPUBreakpoint *bp;
|
||||
bool match_page = false;
|
||||
|
||||
if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Singlestep overrides breakpoints.
|
||||
* This requirement is visible in the record-replay tests, where
|
||||
* we would fail to make forward progress in reverse-continue.
|
||||
*
|
||||
* TODO: gdb singlestep should only override gdb breakpoints,
|
||||
* so that one could (gdb) singlestep into the guest kernel's
|
||||
* architectural breakpoint handler.
|
||||
*/
|
||||
if (cpu->singlestep_enabled) {
|
||||
return false;
|
||||
}
|
||||
|
||||
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
|
||||
/*
|
||||
* If we have an exact pc match, trigger the breakpoint.
|
||||
* Otherwise, note matches within the page.
|
||||
*/
|
||||
if (pc == bp->pc) {
|
||||
bool match_bp = false;
|
||||
|
||||
if (bp->flags & BP_GDB) {
|
||||
match_bp = true;
|
||||
} else if (bp->flags & BP_CPU) {
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
g_assert_not_reached();
|
||||
#else
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
assert(cc->tcg_ops->debug_check_breakpoint);
|
||||
match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (match_bp) {
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
return true;
|
||||
}
|
||||
} else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) {
|
||||
match_page = true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Within the same page as a breakpoint, single-step,
|
||||
* returning to helper_lookup_tb_ptr after each insn looking
|
||||
* for the actual breakpoint.
|
||||
*
|
||||
* TODO: Perhaps better to record all of the TBs associated
|
||||
* with a given virtual page that contains a breakpoint, and
|
||||
* then invalidate them when a new overlapping breakpoint is
|
||||
* set on the page. Non-overlapping TBs would not be
|
||||
* invalidated, nor would any TB need to be invalidated as
|
||||
* breakpoints are removed.
|
||||
*/
|
||||
if (match_page) {
|
||||
*cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* helper_lookup_tb_ptr: quick check for next tb
|
||||
* @env: current cpu state
|
||||
|
@ -235,11 +305,16 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
|
|||
CPUState *cpu = env_cpu(env);
|
||||
TranslationBlock *tb;
|
||||
target_ulong cs_base, pc;
|
||||
uint32_t flags;
|
||||
uint32_t flags, cflags;
|
||||
|
||||
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
||||
|
||||
tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu));
|
||||
cflags = curr_cflags(cpu);
|
||||
if (check_for_breakpoints(cpu, pc, &cflags)) {
|
||||
cpu_loop_exit(cpu);
|
||||
}
|
||||
|
||||
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
|
||||
if (tb == NULL) {
|
||||
return tcg_code_gen_epilogue;
|
||||
}
|
||||
|
@ -346,6 +421,12 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
|||
cflags &= ~CF_PARALLEL;
|
||||
/* After 1 insn, return and release the exclusive lock. */
|
||||
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
|
||||
/*
|
||||
* No need to check_for_breakpoints here.
|
||||
* We only arrive in cpu_exec_step_atomic after beginning execution
|
||||
* of an insn that includes an atomic operation we can't handle.
|
||||
* Any breakpoint for this insn will have been recognized earlier.
|
||||
*/
|
||||
|
||||
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
|
||||
if (tb == NULL) {
|
||||
|
@ -837,6 +918,8 @@ int cpu_exec(CPUState *cpu)
|
|||
target_ulong cs_base, pc;
|
||||
uint32_t flags, cflags;
|
||||
|
||||
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
|
||||
|
||||
/*
|
||||
* When requested, use an exact setting for cflags for the next
|
||||
* execution. This is used for icount, precise smc, and stop-
|
||||
|
@ -851,7 +934,9 @@ int cpu_exec(CPUState *cpu)
|
|||
cpu->cflags_next_tb = -1;
|
||||
}
|
||||
|
||||
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
|
||||
if (check_for_breakpoints(cpu, pc, &cflags)) {
|
||||
break;
|
||||
}
|
||||
|
||||
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
|
||||
if (tb == NULL) {
|
||||
|
|
|
@ -50,7 +50,6 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
|
|||
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
|
||||
CPUState *cpu, TranslationBlock *tb, int max_insns)
|
||||
{
|
||||
int bp_insn = 0;
|
||||
bool plugin_enabled;
|
||||
|
||||
/* Initialize DisasContext */
|
||||
|
@ -85,27 +84,6 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
|
|||
plugin_gen_insn_start(cpu, db);
|
||||
}
|
||||
|
||||
/* Pass breakpoint hits to target for further processing */
|
||||
if (!db->singlestep_enabled
|
||||
&& unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
|
||||
CPUBreakpoint *bp;
|
||||
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
|
||||
if (bp->pc == db->pc_next) {
|
||||
if (ops->breakpoint_check(db, cpu, bp)) {
|
||||
bp_insn = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* The breakpoint_check hook may use DISAS_TOO_MANY to indicate
|
||||
that only one more instruction is to be executed. Otherwise
|
||||
it should use DISAS_NORETURN when generating an exception,
|
||||
but may use a DISAS_TARGET_* value for Something Else. */
|
||||
if (db->is_jmp > DISAS_TOO_MANY) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Disassemble one instruction. The translate_insn hook should
|
||||
update db->pc_next and db->is_jmp to indicate what should be
|
||||
done next -- either exiting this loop or locate the start of
|
||||
|
@ -144,7 +122,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
|
|||
|
||||
/* Emit code to exit the TB, as indicated by db->is_jmp. */
|
||||
ops->tb_stop(db, cpu);
|
||||
gen_tb_end(db->tb, db->num_insns - bp_insn);
|
||||
gen_tb_end(db->tb, db->num_insns);
|
||||
|
||||
if (plugin_enabled) {
|
||||
plugin_gen_tb_end(cpu);
|
||||
|
|
20
cpu.c
20
cpu.c
|
@ -225,11 +225,6 @@ void tb_invalidate_phys_addr(target_ulong addr)
|
|||
tb_invalidate_phys_page_range(addr, addr + 1);
|
||||
mmap_unlock();
|
||||
}
|
||||
|
||||
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
|
||||
{
|
||||
tb_invalidate_phys_addr(pc);
|
||||
}
|
||||
#else
|
||||
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
|
||||
{
|
||||
|
@ -250,17 +245,6 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
|
|||
ram_addr = memory_region_get_ram_addr(mr) + addr;
|
||||
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
|
||||
}
|
||||
|
||||
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
|
||||
{
|
||||
/*
|
||||
* There may not be a virtual to physical translation for the pc
|
||||
* right now, but there may exist cached TB for this pc.
|
||||
* Flush the whole TB cache to force re-translation of such TBs.
|
||||
* This is heavyweight, but we're debugging anyway.
|
||||
*/
|
||||
tb_flush(cpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Add a breakpoint. */
|
||||
|
@ -286,8 +270,6 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
|
|||
QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
|
||||
}
|
||||
|
||||
breakpoint_invalidate(cpu, pc);
|
||||
|
||||
if (breakpoint) {
|
||||
*breakpoint = bp;
|
||||
}
|
||||
|
@ -320,8 +302,6 @@ void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp)
|
|||
{
|
||||
QTAILQ_REMOVE(&cpu->breakpoints, bp, entry);
|
||||
|
||||
breakpoint_invalidate(cpu, bp->pc);
|
||||
|
||||
trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags);
|
||||
g_free(bp);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue