mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
accel/tcg: Restrict page_collection structure to system TB maintainance
Only the system emulation part of TB maintainance uses the page_collection structure. Restrict its declaration (and the functions requiring it) to tb-maint.c. Convert the 'len' argument of tb_invalidate_phys_page_fast__locked() from signed to unsigned. Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20221209093649.43738-6-philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
f349e92e8e
commit
8112426549
2 changed files with 7 additions and 15 deletions
|
@ -36,16 +36,9 @@ void page_table_config_init(void);
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
struct page_collection;
|
||||
void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
|
||||
tb_page_addr_t start, int len,
|
||||
uintptr_t retaddr);
|
||||
struct page_collection *page_collection_lock(tb_page_addr_t start,
|
||||
tb_page_addr_t end);
|
||||
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
|
||||
unsigned size,
|
||||
uintptr_t retaddr);
|
||||
void page_collection_unlock(struct page_collection *set);
|
||||
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
|
||||
|
|
|
@ -513,8 +513,8 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
|
|||
* intersecting TBs.
|
||||
* Locking order: acquire locks in ascending order of page index.
|
||||
*/
|
||||
struct page_collection *
|
||||
page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
|
||||
static struct page_collection *page_collection_lock(tb_page_addr_t start,
|
||||
tb_page_addr_t end)
|
||||
{
|
||||
struct page_collection *set = g_malloc(sizeof(*set));
|
||||
tb_page_addr_t index;
|
||||
|
@ -558,7 +558,7 @@ page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
|
|||
return set;
|
||||
}
|
||||
|
||||
void page_collection_unlock(struct page_collection *set)
|
||||
static void page_collection_unlock(struct page_collection *set)
|
||||
{
|
||||
/* entries are unlocked and freed via page_entry_destroy */
|
||||
g_tree_destroy(set->tree);
|
||||
|
@ -1186,9 +1186,9 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
|
|||
/*
|
||||
* Call with all @pages in the range [@start, @start + len[ locked.
|
||||
*/
|
||||
void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
|
||||
tb_page_addr_t start, int len,
|
||||
uintptr_t retaddr)
|
||||
static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
|
||||
tb_page_addr_t start,
|
||||
unsigned len, uintptr_t ra)
|
||||
{
|
||||
PageDesc *p;
|
||||
|
||||
|
@ -1198,8 +1198,7 @@ void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
|
|||
}
|
||||
|
||||
assert_page_locked(p);
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
|
||||
retaddr);
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue