mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
61a67f71dd
Every vCPU now uses a separate set of TBs for each set of dynamic tracing event state values. Each set of TBs can be used by any number of vCPUs to maximize TB reuse when vCPUs have the same tracing state. This feature is later used by tracetool to optimize tracing of guest code events. The maximum number of TB sets is defined as 2^E, where E is the number of events that have the 'vcpu' property (their state is stored in CPUState->trace_dstate). For this to work, a change on the dynamic tracing state of a vCPU will force it to flush its virtual TB cache (which is only indexed by address), and fall back to the physical TB cache (which now contains the vCPU's dynamic tracing state as part of the hashing function). Signed-off-by: Lluís Vilanova <vilanova@ac.upc.edu> Reviewed-by: Richard Henderson <rth@twiddle.net> Reviewed-by: Emilio G. Cota <cota@braap.org> Signed-off-by: Emilio G. Cota <cota@braap.org> Message-id: 149915775266.6295.10060144081246467690.stgit@frigg.lan Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
234 lines
5.7 KiB
C
234 lines
5.7 KiB
C
/*
|
|
* Tiny Code Generator for QEMU
|
|
*
|
|
* Copyright (c) 2008 Fabrice Bellard
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
* in the Software without restriction, including without limitation the rights
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
* THE SOFTWARE.
|
|
*/
|
|
#include "qemu/osdep.h"
|
|
#include "qemu/host-utils.h"
|
|
#include "cpu.h"
|
|
#include "exec/helper-proto.h"
|
|
#include "exec/cpu_ldst.h"
|
|
#include "exec/exec-all.h"
|
|
#include "exec/tb-hash.h"
|
|
#include "disas/disas.h"
|
|
#include "exec/log.h"
|
|
|
|
/* 32-bit helpers */
|
|
|
|
int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2)
|
|
{
|
|
return arg1 / arg2;
|
|
}
|
|
|
|
int32_t HELPER(rem_i32)(int32_t arg1, int32_t arg2)
|
|
{
|
|
return arg1 % arg2;
|
|
}
|
|
|
|
uint32_t HELPER(divu_i32)(uint32_t arg1, uint32_t arg2)
|
|
{
|
|
return arg1 / arg2;
|
|
}
|
|
|
|
uint32_t HELPER(remu_i32)(uint32_t arg1, uint32_t arg2)
|
|
{
|
|
return arg1 % arg2;
|
|
}
|
|
|
|
/* 64-bit helpers */
|
|
|
|
uint64_t HELPER(shl_i64)(uint64_t arg1, uint64_t arg2)
|
|
{
|
|
return arg1 << arg2;
|
|
}
|
|
|
|
uint64_t HELPER(shr_i64)(uint64_t arg1, uint64_t arg2)
|
|
{
|
|
return arg1 >> arg2;
|
|
}
|
|
|
|
int64_t HELPER(sar_i64)(int64_t arg1, int64_t arg2)
|
|
{
|
|
return arg1 >> arg2;
|
|
}
|
|
|
|
int64_t HELPER(div_i64)(int64_t arg1, int64_t arg2)
|
|
{
|
|
return arg1 / arg2;
|
|
}
|
|
|
|
int64_t HELPER(rem_i64)(int64_t arg1, int64_t arg2)
|
|
{
|
|
return arg1 % arg2;
|
|
}
|
|
|
|
uint64_t HELPER(divu_i64)(uint64_t arg1, uint64_t arg2)
|
|
{
|
|
return arg1 / arg2;
|
|
}
|
|
|
|
uint64_t HELPER(remu_i64)(uint64_t arg1, uint64_t arg2)
|
|
{
|
|
return arg1 % arg2;
|
|
}
|
|
|
|
uint64_t HELPER(muluh_i64)(uint64_t arg1, uint64_t arg2)
|
|
{
|
|
uint64_t l, h;
|
|
mulu64(&l, &h, arg1, arg2);
|
|
return h;
|
|
}
|
|
|
|
int64_t HELPER(mulsh_i64)(int64_t arg1, int64_t arg2)
|
|
{
|
|
uint64_t l, h;
|
|
muls64(&l, &h, arg1, arg2);
|
|
return h;
|
|
}
|
|
|
|
uint32_t HELPER(clz_i32)(uint32_t arg, uint32_t zero_val)
|
|
{
|
|
return arg ? clz32(arg) : zero_val;
|
|
}
|
|
|
|
uint32_t HELPER(ctz_i32)(uint32_t arg, uint32_t zero_val)
|
|
{
|
|
return arg ? ctz32(arg) : zero_val;
|
|
}
|
|
|
|
uint64_t HELPER(clz_i64)(uint64_t arg, uint64_t zero_val)
|
|
{
|
|
return arg ? clz64(arg) : zero_val;
|
|
}
|
|
|
|
uint64_t HELPER(ctz_i64)(uint64_t arg, uint64_t zero_val)
|
|
{
|
|
return arg ? ctz64(arg) : zero_val;
|
|
}
|
|
|
|
uint32_t HELPER(clrsb_i32)(uint32_t arg)
|
|
{
|
|
return clrsb32(arg);
|
|
}
|
|
|
|
uint64_t HELPER(clrsb_i64)(uint64_t arg)
|
|
{
|
|
return clrsb64(arg);
|
|
}
|
|
|
|
uint32_t HELPER(ctpop_i32)(uint32_t arg)
|
|
{
|
|
return ctpop32(arg);
|
|
}
|
|
|
|
uint64_t HELPER(ctpop_i64)(uint64_t arg)
|
|
{
|
|
return ctpop64(arg);
|
|
}
|
|
|
|
void *HELPER(lookup_tb_ptr)(CPUArchState *env, target_ulong addr)
|
|
{
|
|
CPUState *cpu = ENV_GET_CPU(env);
|
|
TranslationBlock *tb;
|
|
target_ulong cs_base, pc;
|
|
uint32_t flags, addr_hash;
|
|
|
|
addr_hash = tb_jmp_cache_hash_func(addr);
|
|
tb = atomic_rcu_read(&cpu->tb_jmp_cache[addr_hash]);
|
|
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
|
|
|
if (unlikely(!(tb
|
|
&& tb->pc == addr
|
|
&& tb->cs_base == cs_base
|
|
&& tb->flags == flags
|
|
&& tb->trace_vcpu_dstate == *cpu->trace_dstate))) {
|
|
tb = tb_htable_lookup(cpu, addr, cs_base, flags);
|
|
if (!tb) {
|
|
return tcg_ctx.code_gen_epilogue;
|
|
}
|
|
atomic_set(&cpu->tb_jmp_cache[addr_hash], tb);
|
|
}
|
|
|
|
qemu_log_mask_and_addr(CPU_LOG_EXEC, addr,
|
|
"Chain %p [%d: " TARGET_FMT_lx "] %s\n",
|
|
tb->tc_ptr, cpu->cpu_index, addr,
|
|
lookup_symbol(addr));
|
|
return tb->tc_ptr;
|
|
}
|
|
|
|
void HELPER(exit_atomic)(CPUArchState *env)
|
|
{
|
|
cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC());
|
|
}
|
|
|
|
#ifndef CONFIG_SOFTMMU
|
|
/* The softmmu versions of these helpers are in cputlb.c. */
|
|
|
|
/* Do not allow unaligned operations to proceed. Return the host address. */
|
|
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|
int size, uintptr_t retaddr)
|
|
{
|
|
/* Enforce qemu required alignment. */
|
|
if (unlikely(addr & (size - 1))) {
|
|
cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
|
|
}
|
|
return g2h(addr);
|
|
}
|
|
|
|
/* Macro to call the above, with local variables from the use context. */
|
|
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
|
|
|
|
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
|
|
#define EXTRA_ARGS
|
|
|
|
#define DATA_SIZE 1
|
|
#include "atomic_template.h"
|
|
|
|
#define DATA_SIZE 2
|
|
#include "atomic_template.h"
|
|
|
|
#define DATA_SIZE 4
|
|
#include "atomic_template.h"
|
|
|
|
#ifdef CONFIG_ATOMIC64
|
|
#define DATA_SIZE 8
|
|
#include "atomic_template.h"
|
|
#endif
|
|
|
|
/* The following is only callable from other helpers, and matches up
|
|
with the softmmu version. */
|
|
|
|
#ifdef CONFIG_ATOMIC128
|
|
|
|
#undef EXTRA_ARGS
|
|
#undef ATOMIC_NAME
|
|
#undef ATOMIC_MMU_LOOKUP
|
|
|
|
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
|
|
#define ATOMIC_NAME(X) \
|
|
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
|
|
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
|
|
|
|
#define DATA_SIZE 16
|
|
#include "atomic_template.h"
|
|
#endif /* CONFIG_ATOMIC128 */
|
|
|
|
#endif /* !CONFIG_SOFTMMU */
|