mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
exec: pass client mask to cpu_physical_memory_set_dirty_range
This cuts in half the cost of bitmap operations (which will become more expensive when made atomic) during migration on non-VRAM regions. Reviewed-by: Fam Zheng <famz@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
fc377bcf61
commit
58d2707e87
3 changed files with 29 additions and 27 deletions
20
exec.c
20
exec.c
|
@ -1351,7 +1351,8 @@ int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
|
|||
|
||||
cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
|
||||
block->used_length = newsize;
|
||||
cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
|
||||
cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
|
||||
DIRTY_CLIENTS_ALL);
|
||||
memory_region_set_size(block->mr, newsize);
|
||||
if (block->resized) {
|
||||
block->resized(block->idstr, newsize, block->host);
|
||||
|
@ -1425,7 +1426,8 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
|
|||
}
|
||||
}
|
||||
cpu_physical_memory_set_dirty_range(new_block->offset,
|
||||
new_block->used_length);
|
||||
new_block->used_length,
|
||||
DIRTY_CLIENTS_ALL);
|
||||
|
||||
if (new_block->host) {
|
||||
qemu_ram_setup_dump(new_block->host, new_block->max_length);
|
||||
|
@ -1813,7 +1815,11 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
|
|||
default:
|
||||
abort();
|
||||
}
|
||||
cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
|
||||
/* Set both VGA and migration bits for simplicity and to remove
|
||||
* the notdirty callback faster.
|
||||
*/
|
||||
cpu_physical_memory_set_dirty_range(ram_addr, size,
|
||||
DIRTY_CLIENTS_NOCODE);
|
||||
/* we remove the notdirty callback only if the code has been
|
||||
flushed */
|
||||
if (!cpu_physical_memory_is_clean(ram_addr)) {
|
||||
|
@ -2259,9 +2265,7 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
|
|||
tb_invalidate_phys_range(addr, addr + length);
|
||||
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
|
||||
}
|
||||
if (dirty_log_mask) {
|
||||
cpu_physical_memory_set_dirty_range_nocode(addr, length);
|
||||
}
|
||||
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
|
||||
} else {
|
||||
xen_modified_memory(addr, length);
|
||||
}
|
||||
|
@ -3014,9 +3018,7 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
|
|||
|
||||
dirty_log_mask = memory_region_get_dirty_log_mask(mr);
|
||||
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
|
||||
if (dirty_log_mask) {
|
||||
cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
|
||||
}
|
||||
cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
|
|
|
@ -41,6 +41,9 @@ void qemu_ram_free_from_ptr(ram_addr_t addr);
|
|||
|
||||
int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
|
||||
|
||||
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
|
||||
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
|
||||
|
||||
static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
unsigned client)
|
||||
|
@ -103,28 +106,23 @@ static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
|
|||
set_bit(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty_range_nocode(ram_addr_t start,
|
||||
ram_addr_t length)
|
||||
{
|
||||
unsigned long end, page;
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
|
||||
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
|
||||
xen_modified_memory(start, length);
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
|
||||
ram_addr_t length)
|
||||
ram_addr_t length,
|
||||
uint8_t mask)
|
||||
{
|
||||
unsigned long end, page;
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
|
||||
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
|
||||
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page);
|
||||
if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
|
||||
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
|
||||
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
|
||||
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page);
|
||||
}
|
||||
xen_modified_memory(start, length);
|
||||
}
|
||||
|
||||
|
@ -172,7 +170,8 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
|
|||
addr = page_number * TARGET_PAGE_SIZE;
|
||||
ram_addr = start + addr;
|
||||
cpu_physical_memory_set_dirty_range(ram_addr,
|
||||
TARGET_PAGE_SIZE * hpratio);
|
||||
TARGET_PAGE_SIZE * hpratio,
|
||||
DIRTY_CLIENTS_ALL);
|
||||
} while (c != 0);
|
||||
}
|
||||
}
|
||||
|
|
3
memory.c
3
memory.c
|
@ -1461,7 +1461,8 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
|
|||
hwaddr size)
|
||||
{
|
||||
assert(mr->terminates);
|
||||
cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size);
|
||||
cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size,
|
||||
memory_region_get_dirty_log_mask(mr));
|
||||
}
|
||||
|
||||
bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
|
||||
|
|
Loading…
Reference in a new issue