void cpu_throttle_dirty_sync_timer_tick(void *opaque)
{
- uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
+ uint64_t sync_cnt = qatomic_read(&mig_stats.dirty_sync_count);
/*
* The first iteration copies all memory anyhow and has no
}
end:
- throttle_dirty_sync_count_prev = stat64_get(&mig_stats.dirty_sync_count);
+ throttle_dirty_sync_count_prev = qatomic_read(&mig_stats.dirty_sync_count);
timer_mod(throttle_dirty_sync_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
*/
#include "qemu/osdep.h"
-#include "qemu/stats64.h"
+#include "qemu/atomic.h"
#include "qemu-file.h"
#include "trace.h"
#include "migration-stats.h"
return false;
}
- uint64_t rate_limit_start = stat64_get(&mig_stats.rate_limit_start);
+ uint64_t rate_limit_start = qatomic_read(&mig_stats.rate_limit_start);
uint64_t rate_limit_current = migration_transferred_bytes();
uint64_t rate_limit_used = rate_limit_current - rate_limit_start;
uint64_t migration_rate_get(void)
{
- return stat64_get(&mig_stats.rate_limit_max);
+ return qatomic_read(&mig_stats.rate_limit_max);
}
#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
/*
* 'limit' is per second. But we check it each BUFFER_DELAY milliseconds.
*/
- stat64_set(&mig_stats.rate_limit_max, limit / XFER_LIMIT_RATIO);
+ qatomic_set(&mig_stats.rate_limit_max, limit / XFER_LIMIT_RATIO);
}
void migration_rate_reset(void)
{
- stat64_set(&mig_stats.rate_limit_start, migration_transferred_bytes());
+ qatomic_set(&mig_stats.rate_limit_start, migration_transferred_bytes());
}
uint64_t migration_transferred_bytes(void)
{
- uint64_t multifd = stat64_get(&mig_stats.multifd_bytes);
- uint64_t rdma = stat64_get(&mig_stats.rdma_bytes);
- uint64_t qemu_file = stat64_get(&mig_stats.qemu_file_transferred);
+ uint64_t multifd = qatomic_read(&mig_stats.multifd_bytes);
+ uint64_t rdma = qatomic_read(&mig_stats.rdma_bytes);
+ uint64_t qemu_file = qatomic_read(&mig_stats.qemu_file_transferred);
trace_migration_transferred_bytes(qemu_file, multifd, rdma);
return qemu_file + multifd + rdma;
#ifndef QEMU_MIGRATION_STATS_H
#define QEMU_MIGRATION_STATS_H
-#include "qemu/stats64.h"
-
/*
* Amount of time to allocate to each "chunk" of bandwidth-throttled
* data.
/*
* These are the ram migration statistic counters. It is loosely
- * based on MigrationStats. We change to Stat64 any counter that
- * needs to be updated using atomic ops (can be accessed by more than
- * one thread).
+ * based on MigrationStats.
*/
typedef struct {
/*
* since last iteration, not counting what the guest has dirtied
* since we synchronized bitmaps.
*/
- Stat64 dirty_bytes_last_sync;
+ uint64_t dirty_bytes_last_sync;
/*
* Number of pages dirtied per second.
*/
- Stat64 dirty_pages_rate;
+ uint64_t dirty_pages_rate;
/*
* Number of times we have synchronized guest bitmaps.
*/
- Stat64 dirty_sync_count;
+ uint64_t dirty_sync_count;
/*
* Number of times zero copy failed to send any page using zero
* copy.
*/
- Stat64 dirty_sync_missed_zero_copy;
+ uint64_t dirty_sync_missed_zero_copy;
/*
* Number of bytes sent at migration completion stage while the
* guest is stopped.
*/
- Stat64 downtime_bytes;
+ uint64_t downtime_bytes;
/*
* Number of bytes sent through multifd channels.
*/
- Stat64 multifd_bytes;
+ uint64_t multifd_bytes;
/*
* Number of pages transferred that were not full of zeros.
*/
- Stat64 normal_pages;
+ uint64_t normal_pages;
/*
* Number of bytes sent during postcopy.
*/
- Stat64 postcopy_bytes;
+ uint64_t postcopy_bytes;
/*
* Number of postcopy page faults that we have handled during
* postcopy stage.
*/
- Stat64 postcopy_requests;
+ uint64_t postcopy_requests;
/*
* Number of bytes sent during precopy stage.
*/
- Stat64 precopy_bytes;
+ uint64_t precopy_bytes;
/*
* Number of bytes transferred with QEMUFile.
*/
- Stat64 qemu_file_transferred;
+ uint64_t qemu_file_transferred;
/*
* Amount of transferred data at the start of current cycle.
*/
- Stat64 rate_limit_start;
+ uint64_t rate_limit_start;
/*
* Maximum amount of data we can send in a cycle.
*/
- Stat64 rate_limit_max;
+ uint64_t rate_limit_max;
/*
* Number of bytes sent through RDMA.
*/
- Stat64 rdma_bytes;
+ uint64_t rdma_bytes;
/*
* Number of pages transferred that were full of zeros.
*/
- Stat64 zero_pages;
+ uint64_t zero_pages;
} MigrationAtomicStats;
extern MigrationAtomicStats mig_stats;
info->ram = g_malloc0(sizeof(*info->ram));
info->ram->transferred = migration_transferred_bytes();
info->ram->total = ram_bytes_total();
- info->ram->duplicate = stat64_get(&mig_stats.zero_pages);
- info->ram->normal = stat64_get(&mig_stats.normal_pages);
+ info->ram->duplicate = qatomic_read(&mig_stats.zero_pages);
+ info->ram->normal = qatomic_read(&mig_stats.normal_pages);
info->ram->normal_bytes = info->ram->normal * page_size;
info->ram->mbps = s->mbps;
info->ram->dirty_sync_count =
- stat64_get(&mig_stats.dirty_sync_count);
+ qatomic_read(&mig_stats.dirty_sync_count);
info->ram->dirty_sync_missed_zero_copy =
- stat64_get(&mig_stats.dirty_sync_missed_zero_copy);
+ qatomic_read(&mig_stats.dirty_sync_missed_zero_copy);
info->ram->postcopy_requests =
- stat64_get(&mig_stats.postcopy_requests);
+ qatomic_read(&mig_stats.postcopy_requests);
info->ram->page_size = page_size;
- info->ram->multifd_bytes = stat64_get(&mig_stats.multifd_bytes);
+ info->ram->multifd_bytes = qatomic_read(&mig_stats.multifd_bytes);
info->ram->pages_per_second = s->pages_per_second;
- info->ram->precopy_bytes = stat64_get(&mig_stats.precopy_bytes);
- info->ram->downtime_bytes = stat64_get(&mig_stats.downtime_bytes);
- info->ram->postcopy_bytes = stat64_get(&mig_stats.postcopy_bytes);
+ info->ram->precopy_bytes = qatomic_read(&mig_stats.precopy_bytes);
+ info->ram->downtime_bytes = qatomic_read(&mig_stats.downtime_bytes);
+ info->ram->postcopy_bytes = qatomic_read(&mig_stats.postcopy_bytes);
if (migrate_xbzrle()) {
info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
if (s->state != MIGRATION_STATUS_COMPLETED) {
info->ram->remaining = ram_bytes_remaining();
info->ram->dirty_pages_rate =
- stat64_get(&mig_stats.dirty_pages_rate);
+ qatomic_read(&mig_stats.dirty_pages_rate);
}
if (migrate_dirty_limit() && dirtylimit_in_service()) {
* if we haven't sent anything, we don't want to
* recalculate. 10000 is a small enough number for our purposes
*/
- if (stat64_get(&mig_stats.dirty_pages_rate) &&
+ if (qatomic_read(&mig_stats.dirty_pages_rate) &&
transferred > 10000) {
s->expected_downtime =
- stat64_get(&mig_stats.dirty_bytes_last_sync) / expected_bw_per_ms;
+ qatomic_read(&mig_stats.dirty_bytes_last_sync) / expected_bw_per_ms;
}
migration_rate_reset();
return -1;
}
- stat64_add(&mig_stats.multifd_bytes, p->packet_len);
+ qatomic_add(&mig_stats.multifd_bytes, p->packet_len);
}
return 0;
pages->normal_num = i;
out:
- stat64_add(&mig_stats.normal_pages, pages->normal_num);
- stat64_add(&mig_stats.zero_pages, pages->num - pages->normal_num);
+ qatomic_add(&mig_stats.normal_pages, pages->normal_num);
+ qatomic_add(&mig_stats.zero_pages, pages->num - pages->normal_num);
}
void multifd_recv_zero_page_process(MultiFDRecvParams *p)
* operations on both 32bit / 64 bits hosts. It means on 32bit systems
* multifd will overflow the packet_num easier, but that should be
* fine.
- *
- * Another option is to use QEMU's Stat64 then it'll be 64 bits on all
- * hosts, however so far it does not support atomic fetch_add() yet.
- * Make it easy for now.
*/
uintptr_t packet_num;
/*
if (ret != 0) {
return -1;
}
- stat64_add(&mig_stats.multifd_bytes, size);
+ qatomic_add(&mig_stats.multifd_bytes, size);
return 0;
}
return -1;
}
if (ret == 1) {
- stat64_add(&mig_stats.dirty_sync_missed_zero_copy, 1);
+ qatomic_add(&mig_stats.dirty_sync_missed_zero_copy, 1);
}
return ret;
break;
}
- stat64_add(&mig_stats.multifd_bytes, total_size);
+ qatomic_add(&mig_stats.multifd_bytes, total_size);
p->next_packet_size = 0;
multifd_send_data_clear(p->data);
break;
}
/* p->next_packet_size will always be zero for a SYNC packet */
- stat64_add(&mig_stats.multifd_bytes, p->packet_len);
+ qatomic_add(&mig_stats.multifd_bytes, p->packet_len);
}
qatomic_set(&p->pending_sync, MULTIFD_SYNC_NONE);
qemu_file_set_error_obj(f, -EIO, local_error);
} else {
uint64_t size = iov_size(f->iov, f->iovcnt);
- stat64_add(&mig_stats.qemu_file_transferred, size);
+ qatomic_add(&mig_stats.qemu_file_transferred, size);
}
qemu_iovec_release_ram(f);
return;
}
- stat64_add(&mig_stats.qemu_file_transferred, buflen);
+ qatomic_add(&mig_stats.qemu_file_transferred, buflen);
}
uint64_t qemu_file_transferred(QEMUFile *f)
{
- uint64_t ret = stat64_get(&mig_stats.qemu_file_transferred);
+ uint64_t ret = qatomic_read(&mig_stats.qemu_file_transferred);
int i;
g_assert(qemu_file_is_writable(f));
void ram_transferred_add(uint64_t bytes)
{
if (runstate_is_running()) {
- stat64_add(&mig_stats.precopy_bytes, bytes);
+ qatomic_add(&mig_stats.precopy_bytes, bytes);
} else if (migration_in_postcopy()) {
- stat64_add(&mig_stats.postcopy_bytes, bytes);
+ qatomic_add(&mig_stats.postcopy_bytes, bytes);
} else {
- stat64_add(&mig_stats.downtime_bytes, bytes);
+ qatomic_add(&mig_stats.downtime_bytes, bytes);
}
}
/* We don't care if this fails to allocate a new cache page
* as long as it updated an old one */
cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
- stat64_get(&mig_stats.dirty_sync_count));
+ qatomic_read(&mig_stats.dirty_sync_count));
}
#define ENCODING_FLAG_XBZRLE 0x1
int encoded_len = 0, bytes_xbzrle;
uint8_t *prev_cached_page;
QEMUFile *file = pss->pss_channel;
- uint64_t generation = stat64_get(&mig_stats.dirty_sync_count);
+ uint64_t generation = qatomic_read(&mig_stats.dirty_sync_count);
if (!cache_is_cached(XBZRLE.cache, current_addr, generation)) {
xbzrle_counters.cache_miss++;
uint64_t ram_get_total_transferred_pages(void)
{
- return stat64_get(&mig_stats.normal_pages) +
- stat64_get(&mig_stats.zero_pages) +
- xbzrle_counters.pages;
+ return (qatomic_read(&mig_stats.normal_pages) +
+ qatomic_read(&mig_stats.zero_pages) +
+ xbzrle_counters.pages);
}
static void migration_update_rates(RAMState *rs, int64_t end_time)
uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
/* calculate period counters */
- stat64_set(&mig_stats.dirty_pages_rate,
+ qatomic_set(&mig_stats.dirty_pages_rate,
rs->num_dirty_pages_period * 1000 /
(end_time - rs->time_last_bitmap_sync));
RAMBlock *block;
int64_t end_time;
- stat64_add(&mig_stats.dirty_sync_count, 1);
+ qatomic_add(&mig_stats.dirty_sync_count, 1);
if (!rs->time_last_bitmap_sync) {
rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
ramblock_sync_dirty_bitmap(rs, block);
}
- stat64_set(&mig_stats.dirty_bytes_last_sync, ram_bytes_remaining());
+ qatomic_set(&mig_stats.dirty_bytes_last_sync, ram_bytes_remaining());
}
}
rs->bytes_xfer_prev = migration_transferred_bytes();
}
if (migrate_events()) {
- uint64_t generation = stat64_get(&mig_stats.dirty_sync_count);
+ uint64_t generation = qatomic_read(&mig_stats.dirty_sync_count);
qapi_event_send_migration_pass(generation);
}
}
return 0;
}
- stat64_add(&mig_stats.zero_pages, 1);
+ qatomic_add(&mig_stats.zero_pages, 1);
if (migrate_mapped_ram()) {
/* zero pages are not transferred with mapped-ram */
}
}
ram_transferred_add(TARGET_PAGE_SIZE);
- stat64_add(&mig_stats.normal_pages, 1);
+ qatomic_add(&mig_stats.normal_pages, 1);
return 1;
}
RAMBlock *ramblock;
RAMState *rs = ram_state;
- stat64_add(&mig_stats.postcopy_requests, 1);
+ qatomic_add(&mig_stats.postcopy_requests, 1);
RCU_READ_LOCK_GUARD();
if (!rbname) {
* would think that head.len would be the more similar
* thing to a correct value.
*/
- stat64_add(&mig_stats.zero_pages,
- sge.length / qemu_target_page_size());
+ qatomic_add(&mig_stats.zero_pages,
+ sge.length / qemu_target_page_size());
return 1;
}
}
set_bit(chunk, block->transit_bitmap);
- stat64_add(&mig_stats.normal_pages, sge.length / qemu_target_page_size());
+ qatomic_add(&mig_stats.normal_pages, sge.length / qemu_target_page_size());
/*
* We are adding to transferred the amount of data written, but no
* overhead at all. I will assume that RDMA is magicaly and don't
* sizeof(send_wr) + sge.length
* but this being RDMA, who knows.
*/
- stat64_add(&mig_stats.rdma_bytes, sge.length);
+ qatomic_add(&mig_stats.rdma_bytes, sge.length);
ram_transferred_add(sge.length);
rdma->total_writes++;