mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
target/i386: Add tcg/access.[ch]
Provide a method to amortize page lookup across large blocks. Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
70581940ca
commit
24f6813924
3 changed files with 210 additions and 0 deletions
169
target/i386/tcg/access.c
Normal file
169
target/i386/tcg/access.c
Normal file
|
@ -0,0 +1,169 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/* Access guest memory in blocks. */
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "access.h"
|
||||
|
||||
|
||||
void access_prepare_mmu(X86Access *ret, CPUX86State *env,
|
||||
vaddr vaddr, unsigned size,
|
||||
MMUAccessType type, int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
int size1, size2;
|
||||
void *haddr1, *haddr2;
|
||||
|
||||
assert(size > 0 && size <= TARGET_PAGE_SIZE);
|
||||
|
||||
size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)),
|
||||
size2 = size - size1;
|
||||
|
||||
memset(ret, 0, sizeof(*ret));
|
||||
ret->vaddr = vaddr;
|
||||
ret->size = size;
|
||||
ret->size1 = size1;
|
||||
ret->mmu_idx = mmu_idx;
|
||||
ret->env = env;
|
||||
ret->ra = ra;
|
||||
|
||||
haddr1 = probe_access(env, vaddr, size1, type, mmu_idx, ra);
|
||||
ret->haddr1 = haddr1;
|
||||
|
||||
if (unlikely(size2)) {
|
||||
haddr2 = probe_access(env, vaddr + size1, size2, type, mmu_idx, ra);
|
||||
if (haddr2 == haddr1 + size1) {
|
||||
ret->size1 = size;
|
||||
} else {
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
g_assert_not_reached();
|
||||
#else
|
||||
ret->haddr2 = haddr2;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void access_prepare(X86Access *ret, CPUX86State *env, vaddr vaddr,
|
||||
unsigned size, MMUAccessType type, uintptr_t ra)
|
||||
{
|
||||
int mmu_idx = cpu_mmu_index(env_cpu(env), false);
|
||||
access_prepare_mmu(ret, env, vaddr, size, type, mmu_idx, ra);
|
||||
}
|
||||
|
||||
static void *access_ptr(X86Access *ac, vaddr addr, unsigned len)
|
||||
{
|
||||
vaddr offset = addr - ac->vaddr;
|
||||
|
||||
assert(addr >= ac->vaddr);
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
assert(offset <= ac->size1 - len);
|
||||
return ac->haddr1 + offset;
|
||||
#else
|
||||
if (likely(offset <= ac->size1 - len)) {
|
||||
return ac->haddr1 + offset;
|
||||
}
|
||||
assert(offset <= ac->size - len);
|
||||
/*
|
||||
* If the address is not naturally aligned, it might span both pages.
|
||||
* Only return ac->haddr2 if the area is entirely within the second page,
|
||||
* otherwise fall back to slow accesses.
|
||||
*/
|
||||
if (likely(offset >= ac->size1)) {
|
||||
return ac->haddr2 + (offset - ac->size1);
|
||||
}
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
# define test_ptr(p) true
|
||||
#else
|
||||
# define test_ptr(p) likely(p)
|
||||
#endif
|
||||
|
||||
uint8_t access_ldb(X86Access *ac, vaddr addr)
|
||||
{
|
||||
void *p = access_ptr(ac, addr, sizeof(uint8_t));
|
||||
|
||||
if (test_ptr(p)) {
|
||||
return ldub_p(p);
|
||||
}
|
||||
return cpu_ldub_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
|
||||
}
|
||||
|
||||
uint16_t access_ldw(X86Access *ac, vaddr addr)
|
||||
{
|
||||
void *p = access_ptr(ac, addr, sizeof(uint16_t));
|
||||
|
||||
if (test_ptr(p)) {
|
||||
return lduw_le_p(p);
|
||||
}
|
||||
return cpu_lduw_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
|
||||
}
|
||||
|
||||
uint32_t access_ldl(X86Access *ac, vaddr addr)
|
||||
{
|
||||
void *p = access_ptr(ac, addr, sizeof(uint32_t));
|
||||
|
||||
if (test_ptr(p)) {
|
||||
return ldl_le_p(p);
|
||||
}
|
||||
return cpu_ldl_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
|
||||
}
|
||||
|
||||
uint64_t access_ldq(X86Access *ac, vaddr addr)
|
||||
{
|
||||
void *p = access_ptr(ac, addr, sizeof(uint64_t));
|
||||
|
||||
if (test_ptr(p)) {
|
||||
return ldq_le_p(p);
|
||||
}
|
||||
return cpu_ldq_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
|
||||
}
|
||||
|
||||
void access_stb(X86Access *ac, vaddr addr, uint8_t val)
|
||||
{
|
||||
void *p = access_ptr(ac, addr, sizeof(uint8_t));
|
||||
|
||||
if (test_ptr(p)) {
|
||||
stb_p(p, val);
|
||||
} else {
|
||||
cpu_stb_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
|
||||
}
|
||||
}
|
||||
|
||||
void access_stw(X86Access *ac, vaddr addr, uint16_t val)
|
||||
{
|
||||
void *p = access_ptr(ac, addr, sizeof(uint16_t));
|
||||
|
||||
if (test_ptr(p)) {
|
||||
stw_le_p(p, val);
|
||||
} else {
|
||||
cpu_stw_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
|
||||
}
|
||||
}
|
||||
|
||||
void access_stl(X86Access *ac, vaddr addr, uint32_t val)
|
||||
{
|
||||
void *p = access_ptr(ac, addr, sizeof(uint32_t));
|
||||
|
||||
if (test_ptr(p)) {
|
||||
stl_le_p(p, val);
|
||||
} else {
|
||||
cpu_stl_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
|
||||
}
|
||||
}
|
||||
|
||||
void access_stq(X86Access *ac, vaddr addr, uint64_t val)
|
||||
{
|
||||
void *p = access_ptr(ac, addr, sizeof(uint64_t));
|
||||
|
||||
if (test_ptr(p)) {
|
||||
stq_le_p(p, val);
|
||||
} else {
|
||||
cpu_stq_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
|
||||
}
|
||||
}
|
40
target/i386/tcg/access.h
Normal file
40
target/i386/tcg/access.h
Normal file
|
@ -0,0 +1,40 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/* Access guest memory in blocks. */
|
||||
|
||||
#ifndef X86_TCG_ACCESS_H
|
||||
#define X86_TCG_ACCESS_H
|
||||
|
||||
/* An access covers at most sizeof(X86XSaveArea), at most 2 pages. */
|
||||
typedef struct X86Access {
|
||||
target_ulong vaddr;
|
||||
void *haddr1;
|
||||
void *haddr2;
|
||||
uint16_t size;
|
||||
uint16_t size1;
|
||||
/*
|
||||
* If we can't access the host page directly, we'll have to do I/O access
|
||||
* via ld/st helpers. These are internal details, so we store the rest
|
||||
* to do the access here instead of passing it around in the helpers.
|
||||
*/
|
||||
int mmu_idx;
|
||||
CPUX86State *env;
|
||||
uintptr_t ra;
|
||||
} X86Access;
|
||||
|
||||
void access_prepare_mmu(X86Access *ret, CPUX86State *env,
|
||||
vaddr vaddr, unsigned size,
|
||||
MMUAccessType type, int mmu_idx, uintptr_t ra);
|
||||
void access_prepare(X86Access *ret, CPUX86State *env, vaddr vaddr,
|
||||
unsigned size, MMUAccessType type, uintptr_t ra);
|
||||
|
||||
uint8_t access_ldb(X86Access *ac, vaddr addr);
|
||||
uint16_t access_ldw(X86Access *ac, vaddr addr);
|
||||
uint32_t access_ldl(X86Access *ac, vaddr addr);
|
||||
uint64_t access_ldq(X86Access *ac, vaddr addr);
|
||||
|
||||
void access_stb(X86Access *ac, vaddr addr, uint8_t val);
|
||||
void access_stw(X86Access *ac, vaddr addr, uint16_t val);
|
||||
void access_stl(X86Access *ac, vaddr addr, uint32_t val);
|
||||
void access_stq(X86Access *ac, vaddr addr, uint64_t val);
|
||||
|
||||
#endif
|
|
@ -1,4 +1,5 @@
|
|||
i386_ss.add(when: 'CONFIG_TCG', if_true: files(
|
||||
'access.c',
|
||||
'bpt_helper.c',
|
||||
'cc_helper.c',
|
||||
'excp_helper.c',
|
||||
|
|
Loading…
Reference in a new issue