mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
Add tb_page_addr_t
The page tracking code in exec.c is used by both userspace and system emulation. Userspace emulation uses it to track virtual pages, and system emulation to track ram pages. Introduce a new type to hold this kind of address. Signed-off-by: Paul Brook <paul@codesourcery.com>
This commit is contained in:
parent
376a790970
commit
41c1b1c9eb
3 changed files with 52 additions and 37 deletions
|
@ -125,12 +125,13 @@ static TranslationBlock *tb_find_slow(target_ulong pc,
|
|||
{
|
||||
TranslationBlock *tb, **ptb1;
|
||||
unsigned int h;
|
||||
target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
|
||||
tb_page_addr_t phys_pc, phys_page1, phys_page2;
|
||||
target_ulong virt_page2;
|
||||
|
||||
tb_invalidated_flag = 0;
|
||||
|
||||
/* find translated block using physical mappings */
|
||||
phys_pc = get_phys_addr_code(env, pc);
|
||||
phys_pc = get_page_addr_code(env, pc);
|
||||
phys_page1 = phys_pc & TARGET_PAGE_MASK;
|
||||
phys_page2 = -1;
|
||||
h = tb_phys_hash_func(phys_pc);
|
||||
|
@ -147,7 +148,7 @@ static TranslationBlock *tb_find_slow(target_ulong pc,
|
|||
if (tb->page_addr[1] != -1) {
|
||||
virt_page2 = (pc & TARGET_PAGE_MASK) +
|
||||
TARGET_PAGE_SIZE;
|
||||
phys_page2 = get_phys_addr_code(env, virt_page2);
|
||||
phys_page2 = get_page_addr_code(env, virt_page2);
|
||||
if (tb->page_addr[1] == phys_page2)
|
||||
goto found;
|
||||
} else {
|
||||
|
|
25
exec-all.h
25
exec-all.h
|
@ -25,6 +25,15 @@
|
|||
/* allow to see translation results - the slowdown should be negligible, so we leave it */
|
||||
#define DEBUG_DISAS
|
||||
|
||||
/* Page tracking code uses ram addresses in system mode, and virtual
|
||||
addresses in userspace mode. Define tb_page_addr_t to be an appropriate
|
||||
type. */
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
typedef target_ulong tb_page_addr_t;
|
||||
#else
|
||||
typedef ram_addr_t tb_page_addr_t;
|
||||
#endif
|
||||
|
||||
/* is_jmp field values */
|
||||
#define DISAS_NEXT 0 /* next instruction can be analyzed */
|
||||
#define DISAS_JUMP 1 /* only pc was modified dynamically */
|
||||
|
@ -81,7 +90,7 @@ TranslationBlock *tb_gen_code(CPUState *env,
|
|||
void cpu_exec_init(CPUState *env);
|
||||
void QEMU_NORETURN cpu_loop_exit(void);
|
||||
int page_unprotect(target_ulong address, unsigned long pc, void *puc);
|
||||
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
|
||||
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
||||
int is_cpu_write_access);
|
||||
void tb_invalidate_page_range(target_ulong start, target_ulong end);
|
||||
void tlb_flush_page(CPUState *env, target_ulong addr);
|
||||
|
@ -136,7 +145,7 @@ struct TranslationBlock {
|
|||
/* first and second physical page containing code. The lower bit
|
||||
of the pointer tells the index in page_next[] */
|
||||
struct TranslationBlock *page_next[2];
|
||||
target_ulong page_addr[2];
|
||||
tb_page_addr_t page_addr[2];
|
||||
|
||||
/* the following data are used to directly call another TB from
|
||||
the code of this one. */
|
||||
|
@ -170,7 +179,7 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
|
|||
| (tmp & TB_JMP_ADDR_MASK));
|
||||
}
|
||||
|
||||
static inline unsigned int tb_phys_hash_func(unsigned long pc)
|
||||
static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
|
||||
{
|
||||
return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
|
||||
}
|
||||
|
@ -178,9 +187,9 @@ static inline unsigned int tb_phys_hash_func(unsigned long pc)
|
|||
TranslationBlock *tb_alloc(target_ulong pc);
|
||||
void tb_free(TranslationBlock *tb);
|
||||
void tb_flush(CPUState *env);
|
||||
void tb_link_phys(TranslationBlock *tb,
|
||||
target_ulong phys_pc, target_ulong phys_page2);
|
||||
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr);
|
||||
void tb_link_page(TranslationBlock *tb,
|
||||
tb_page_addr_t phys_pc, tb_page_addr_t phys_page2);
|
||||
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
|
||||
|
||||
extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
|
||||
extern uint8_t *code_gen_ptr;
|
||||
|
@ -305,7 +314,7 @@ void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
|
||||
static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
|
||||
{
|
||||
return addr;
|
||||
}
|
||||
|
@ -313,7 +322,7 @@ static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
|
|||
/* NOTE: this function can trigger an exception */
|
||||
/* NOTE2: the returned address is not exactly the physical address: it
|
||||
is the offset relative to phys_ram_base */
|
||||
static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
|
||||
static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
|
||||
{
|
||||
int mmu_idx, page_index, pd;
|
||||
void *p;
|
||||
|
|
57
exec.c
57
exec.c
|
@ -135,16 +135,14 @@ typedef struct PageDesc {
|
|||
#endif
|
||||
} PageDesc;
|
||||
|
||||
typedef struct PhysPageDesc {
|
||||
/* offset in host memory of the page + io_index in the low bits */
|
||||
ram_addr_t phys_offset;
|
||||
ram_addr_t region_offset;
|
||||
} PhysPageDesc;
|
||||
|
||||
/* In system mode we want L1_MAP to be based on physical addresses,
|
||||
/* In system mode we want L1_MAP to be based on ram offsets,
|
||||
while in user mode we want it to be based on virtual addresses. */
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
|
||||
# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
|
||||
#else
|
||||
# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
|
||||
#endif
|
||||
#else
|
||||
# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
|
||||
#endif
|
||||
|
@ -188,6 +186,12 @@ unsigned long qemu_host_page_mask;
|
|||
static void *l1_map[V_L1_SIZE];
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
typedef struct PhysPageDesc {
|
||||
/* offset in host memory of the page + io_index in the low bits */
|
||||
ram_addr_t phys_offset;
|
||||
ram_addr_t region_offset;
|
||||
} PhysPageDesc;
|
||||
|
||||
/* This is a multi-level map on the physical address space.
|
||||
The bottom level has pointers to PhysPageDesc. */
|
||||
static void *l1_phys_map[P_L1_SIZE];
|
||||
|
@ -301,8 +305,12 @@ static void page_init(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
static PageDesc *page_find_alloc(target_ulong index, int alloc)
|
||||
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||
{
|
||||
PageDesc *pd;
|
||||
void **lp;
|
||||
int i;
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
/* We can't use qemu_malloc because it may recurse into a locked mutex.
|
||||
Neither can we record the new pages we reserve while allocating a
|
||||
|
@ -328,10 +336,6 @@ static PageDesc *page_find_alloc(target_ulong index, int alloc)
|
|||
do { P = qemu_mallocz(SIZE); } while (0)
|
||||
#endif
|
||||
|
||||
PageDesc *pd;
|
||||
void **lp;
|
||||
int i;
|
||||
|
||||
/* Level 1. Always allocated. */
|
||||
lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
|
||||
|
||||
|
@ -374,7 +378,7 @@ static PageDesc *page_find_alloc(target_ulong index, int alloc)
|
|||
return pd + (index & (L2_SIZE - 1));
|
||||
}
|
||||
|
||||
static inline PageDesc *page_find(target_ulong index)
|
||||
static inline PageDesc *page_find(tb_page_addr_t index)
|
||||
{
|
||||
return page_find_alloc(index, 0);
|
||||
}
|
||||
|
@ -791,12 +795,12 @@ static inline void tb_reset_jump(TranslationBlock *tb, int n)
|
|||
tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
|
||||
}
|
||||
|
||||
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
|
||||
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
|
||||
{
|
||||
CPUState *env;
|
||||
PageDesc *p;
|
||||
unsigned int h, n1;
|
||||
target_phys_addr_t phys_pc;
|
||||
tb_page_addr_t phys_pc;
|
||||
TranslationBlock *tb1, *tb2;
|
||||
|
||||
/* remove the TB from the hash list */
|
||||
|
@ -908,10 +912,11 @@ TranslationBlock *tb_gen_code(CPUState *env,
|
|||
{
|
||||
TranslationBlock *tb;
|
||||
uint8_t *tc_ptr;
|
||||
target_ulong phys_pc, phys_page2, virt_page2;
|
||||
tb_page_addr_t phys_pc, phys_page2;
|
||||
target_ulong virt_page2;
|
||||
int code_gen_size;
|
||||
|
||||
phys_pc = get_phys_addr_code(env, pc);
|
||||
phys_pc = get_page_addr_code(env, pc);
|
||||
tb = tb_alloc(pc);
|
||||
if (!tb) {
|
||||
/* flush must be done */
|
||||
|
@ -933,9 +938,9 @@ TranslationBlock *tb_gen_code(CPUState *env,
|
|||
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
|
||||
phys_page2 = -1;
|
||||
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
|
||||
phys_page2 = get_phys_addr_code(env, virt_page2);
|
||||
phys_page2 = get_page_addr_code(env, virt_page2);
|
||||
}
|
||||
tb_link_phys(tb, phys_pc, phys_page2);
|
||||
tb_link_page(tb, phys_pc, phys_page2);
|
||||
return tb;
|
||||
}
|
||||
|
||||
|
@ -944,12 +949,12 @@ TranslationBlock *tb_gen_code(CPUState *env,
|
|||
the same physical page. 'is_cpu_write_access' should be true if called
|
||||
from a real cpu write access: the virtual CPU will exit the current
|
||||
TB if code is modified inside this TB. */
|
||||
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
|
||||
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
||||
int is_cpu_write_access)
|
||||
{
|
||||
TranslationBlock *tb, *tb_next, *saved_tb;
|
||||
CPUState *env = cpu_single_env;
|
||||
target_ulong tb_start, tb_end;
|
||||
tb_page_addr_t tb_start, tb_end;
|
||||
PageDesc *p;
|
||||
int n;
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
|
@ -1051,7 +1056,7 @@ void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t
|
|||
}
|
||||
|
||||
/* len must be <= 8 and start must be a multiple of len */
|
||||
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
|
||||
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
|
||||
{
|
||||
PageDesc *p;
|
||||
int offset, b;
|
||||
|
@ -1078,7 +1083,7 @@ static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int le
|
|||
}
|
||||
|
||||
#if !defined(CONFIG_SOFTMMU)
|
||||
static void tb_invalidate_phys_page(target_phys_addr_t addr,
|
||||
static void tb_invalidate_phys_page(tb_page_addr_t addr,
|
||||
unsigned long pc, void *puc)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
|
@ -1140,7 +1145,7 @@ static void tb_invalidate_phys_page(target_phys_addr_t addr,
|
|||
|
||||
/* add the tb in the target page and protect it if necessary */
|
||||
static inline void tb_alloc_page(TranslationBlock *tb,
|
||||
unsigned int n, target_ulong page_addr)
|
||||
unsigned int n, tb_page_addr_t page_addr)
|
||||
{
|
||||
PageDesc *p;
|
||||
TranslationBlock *last_first_tb;
|
||||
|
@ -1221,8 +1226,8 @@ void tb_free(TranslationBlock *tb)
|
|||
|
||||
/* add a new TB and link it to the physical page tables. phys_page2 is
|
||||
(-1) to indicate that only one page contains the TB. */
|
||||
void tb_link_phys(TranslationBlock *tb,
|
||||
target_ulong phys_pc, target_ulong phys_page2)
|
||||
void tb_link_page(TranslationBlock *tb,
|
||||
tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
|
||||
{
|
||||
unsigned int h;
|
||||
TranslationBlock **ptb;
|
||||
|
|
Loading…
Reference in a new issue