migration: Introduce ram_transferred_add()

Replace direct manipulation of ram_counters.transferred with a
function.

Signed-off-by: David Edmondson <david.edmondson@oracle.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
David Edmondson 2021-12-21 09:34:40 +00:00 committed by Juan Quintela
parent 9e7d1223ac
commit 4c2d0f6dca

View file

@ -387,6 +387,11 @@ uint64_t ram_bytes_remaining(void)
MigrationStats ram_counters;
static void ram_transferred_add(uint64_t bytes)
{
ram_counters.transferred += bytes;
}
/* used by the search for pages to send */
struct PageSearchStatus {
/* Current block being searched */
@ -767,7 +772,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
* RAM_SAVE_FLAG_CONTINUE.
*/
xbzrle_counters.bytes += bytes_xbzrle - 8;
ram_counters.transferred += bytes_xbzrle;
ram_transferred_add(bytes_xbzrle);
return 1;
}
@ -1208,7 +1213,7 @@ static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
if (len) {
ram_counters.duplicate++;
ram_counters.transferred += len;
ram_transferred_add(len);
return 1;
}
return -1;
@ -1235,7 +1240,7 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
}
if (bytes_xmit) {
ram_counters.transferred += bytes_xmit;
ram_transferred_add(bytes_xmit);
*pages = 1;
}
@ -1266,8 +1271,8 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
uint8_t *buf, bool async)
{
ram_counters.transferred += save_page_header(rs, rs->f, block,
offset | RAM_SAVE_FLAG_PAGE);
ram_transferred_add(save_page_header(rs, rs->f, block,
offset | RAM_SAVE_FLAG_PAGE));
if (async) {
qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
migrate_release_ram() &
@ -1275,7 +1280,7 @@ static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
} else {
qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
}
ram_counters.transferred += TARGET_PAGE_SIZE;
ram_transferred_add(TARGET_PAGE_SIZE);
ram_counters.normal++;
return 1;
}
@ -1367,7 +1372,7 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
static void
update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
{
ram_counters.transferred += bytes_xmit;
ram_transferred_add(bytes_xmit);
if (param->zero_page) {
ram_counters.duplicate++;
@ -2284,7 +2289,7 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero)
ram_counters.duplicate += pages;
} else {
ram_counters.normal += pages;
ram_counters.transferred += size;
ram_transferred_add(size);
qemu_update_position(f, size);
}
}
@ -3040,7 +3045,7 @@ out:
multifd_send_sync_main(rs->f);
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);
ram_counters.transferred += 8;
ram_transferred_add(8);
ret = qemu_file_get_error(f);
}