mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
target/riscv: Remove atomic accesses to MIP CSR
Instead of relying on atomics to access the MIP register let's update our helper function to instead just lock the IO mutex thread before writing. This follows the same concept as used in PPC for handling interrupts Signed-off-by: Alistair Francis <alistair.francis@wdc.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Palmer Dabbelt <palmer@dabbelt.com> Signed-off-by: Palmer Dabbelt <palmer@sifive.com> Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
This commit is contained in:
parent
f480f6e8c5
commit
7ec5d3030b
4 changed files with 21 additions and 43 deletions
|
@ -224,8 +224,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
|||
#ifndef CONFIG_USER_ONLY
|
||||
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mhartid ", env->mhartid);
|
||||
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatus ", env->mstatus);
|
||||
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mip ",
|
||||
(target_ulong)atomic_read(&env->mip));
|
||||
qemu_fprintf(f, " %s 0x%x\n", "mip ", env->mip);
|
||||
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mie ", env->mie);
|
||||
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mideleg ", env->mideleg);
|
||||
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "medeleg ", env->medeleg);
|
||||
|
@ -275,7 +274,7 @@ static bool riscv_cpu_has_work(CPUState *cs)
|
|||
* Definition of the WFI instruction requires it to ignore the privilege
|
||||
* mode and delegation registers, but respect individual enables
|
||||
*/
|
||||
return (atomic_read(&env->mip) & env->mie) != 0;
|
||||
return (env->mip & env->mie) != 0;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
|
|
|
@ -121,15 +121,6 @@ struct CPURISCVState {
|
|||
target_ulong mhartid;
|
||||
target_ulong mstatus;
|
||||
|
||||
/*
|
||||
* CAUTION! Unlike the rest of this struct, mip is accessed asynchonously
|
||||
* by I/O threads. It should be read with atomic_read. It should be updated
|
||||
* using riscv_cpu_update_mip with the iothread mutex held. The iothread
|
||||
* mutex must be held because mip must be consistent with the CPU inturrept
|
||||
* state. riscv_cpu_update_mip calls cpu_interrupt or cpu_reset_interrupt
|
||||
* wuth the invariant that CPU_INTERRUPT_HARD is set iff mip is non-zero.
|
||||
* mip is 32-bits to allow atomic_read on 32-bit hosts.
|
||||
*/
|
||||
uint32_t mip;
|
||||
uint32_t miclaim;
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "tcg-op.h"
|
||||
|
@ -38,7 +39,7 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
|
|||
{
|
||||
target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
|
||||
target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
|
||||
target_ulong pending = atomic_read(&env->mip) & env->mie;
|
||||
target_ulong pending = env->mip & env->mie;
|
||||
target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie);
|
||||
target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie);
|
||||
target_ulong irqs = (pending & ~env->mideleg & -mie) |
|
||||
|
@ -92,42 +93,29 @@ int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
|
|||
}
|
||||
}
|
||||
|
||||
struct CpuAsyncInfo {
|
||||
uint32_t new_mip;
|
||||
};
|
||||
|
||||
static void riscv_cpu_update_mip_irqs_async(CPUState *target_cpu_state,
|
||||
run_on_cpu_data data)
|
||||
{
|
||||
struct CpuAsyncInfo *info = (struct CpuAsyncInfo *) data.host_ptr;
|
||||
|
||||
if (info->new_mip) {
|
||||
cpu_interrupt(target_cpu_state, CPU_INTERRUPT_HARD);
|
||||
} else {
|
||||
cpu_reset_interrupt(target_cpu_state, CPU_INTERRUPT_HARD);
|
||||
}
|
||||
|
||||
g_free(info);
|
||||
}
|
||||
|
||||
uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
|
||||
{
|
||||
CPURISCVState *env = &cpu->env;
|
||||
CPUState *cs = CPU(cpu);
|
||||
struct CpuAsyncInfo *info;
|
||||
uint32_t old, new, cmp = atomic_read(&env->mip);
|
||||
uint32_t old = env->mip;
|
||||
bool locked = false;
|
||||
|
||||
do {
|
||||
old = cmp;
|
||||
new = (old & ~mask) | (value & mask);
|
||||
cmp = atomic_cmpxchg(&env->mip, old, new);
|
||||
} while (old != cmp);
|
||||
if (!qemu_mutex_iothread_locked()) {
|
||||
locked = true;
|
||||
qemu_mutex_lock_iothread();
|
||||
}
|
||||
|
||||
info = g_new(struct CpuAsyncInfo, 1);
|
||||
info->new_mip = new;
|
||||
env->mip = (env->mip & ~mask) | (value & mask);
|
||||
|
||||
async_run_on_cpu(cs, riscv_cpu_update_mip_irqs_async,
|
||||
RUN_ON_CPU_HOST_PTR(info));
|
||||
if (env->mip) {
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||
} else {
|
||||
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||
}
|
||||
|
||||
if (locked) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
|
||||
return old;
|
||||
}
|
||||
|
|
|
@ -579,7 +579,7 @@ static int rmw_mip(CPURISCVState *env, int csrno, target_ulong *ret_value,
|
|||
if (mask) {
|
||||
old_mip = riscv_cpu_update_mip(cpu, mask, (new_value & mask));
|
||||
} else {
|
||||
old_mip = atomic_read(&env->mip);
|
||||
old_mip = env->mip;
|
||||
}
|
||||
|
||||
if (ret_value) {
|
||||
|
|
Loading…
Reference in a new issue