/* number of vCPU are suspended */
int smp_cpus_down;
+ /*
+ * Fast path for looking up vcpu_index from tid. NOTE: this result
+ * only reflects the vcpu setup when postcopy is running. It may not
+ * always match with the current vcpu setup because vcpus can be hot
+ * attached/detached after migration completes. However this should be
+ * stable when blocktime is using the structure.
+ */
+ GHashTable *tid_to_vcpu_hash;
+ /* Count of non-vCPU faults. This is only for debugging purpose. */
+ uint64_t non_vcpu_faults;
+
/*
* Handler for exit event, necessary for
* releasing whole blocktime_ctx
static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx)
{
+ g_hash_table_destroy(ctx->tid_to_vcpu_hash);
g_free(ctx->vcpu_blocktime_start);
g_free(ctx->vcpu_blocktime_total);
g_free(ctx->vcpu_faults_count);
destroy_blocktime_context(ctx);
}
+static GHashTable *blocktime_init_tid_to_vcpu_hash(void)
+{
+ /*
+ * TID as an unsigned int can be directly used as the key. However,
+ * CPU index can NOT be directly used as value, because CPU index can
+ * be 0, which means NULL. Then when lookup we can never know whether
+ * it's 0 or "not found". Hence use an indirection for CPU index.
+ */
+ GHashTable *table = g_hash_table_new_full(g_direct_hash, g_direct_equal,
+ NULL, g_free);
+ CPUState *cpu;
+
+ /*
+ * Initialize the tid->cpu_id mapping for lookups. The caller needs to
+ * make sure when reaching here the CPU topology is frozen and will be
+ * stable for the whole blocktime trapping period.
+ */
+ CPU_FOREACH(cpu) {
+ int *value = g_new(int, 1);
+
+ *value = cpu->cpu_index;
+ g_hash_table_insert(table,
+ GUINT_TO_POINTER((uint32_t)cpu->thread_id),
+ value);
+ trace_postcopy_blocktime_tid_cpu_map(cpu->cpu_index, cpu->thread_id);
+ }
+
+ return table;
+}
+
static struct PostcopyBlocktimeContext *blocktime_context_new(void)
{
MachineState *ms = MACHINE(qdev_get_machine());
ctx->vcpu_blocktime_total = g_new0(uint64_t, smp_cpus);
ctx->vcpu_faults_count = g_new0(uint64_t, smp_cpus);
ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus);
+ ctx->tid_to_vcpu_hash = blocktime_init_tid_to_vcpu_hash();
+
ctx->exit_notifier.notify = migration_exit_cb;
qemu_add_exit_notifier(&ctx->exit_notifier);
return 0;
}
-static int get_mem_fault_cpu_index(uint32_t pid)
+static int blocktime_get_vcpu(PostcopyBlocktimeContext *ctx, uint32_t tid)
{
- CPUState *cpu_iter;
+ int *found;
- CPU_FOREACH(cpu_iter) {
- if (cpu_iter->thread_id == pid) {
- trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid);
- return cpu_iter->cpu_index;
- }
+ found = g_hash_table_lookup(ctx->tid_to_vcpu_hash, GUINT_TO_POINTER(tid));
+ if (!found) {
+ /*
+ * NOTE: this is possible, because QEMU's non-vCPU threads can
+ * also access a missing page. Or, when KVM async pf is enabled, a
+ * fault can even happen from a kworker..
+ */
+ return -1;
}
- trace_get_mem_fault_cpu_index(-1, pid);
- return -1;
+
+ return *found;
}
static uint64_t get_current_ns(void)
if (!dc || ptid == 0) {
return;
}
- cpu = get_mem_fault_cpu_index(ptid);
+ cpu = blocktime_get_vcpu(dc, ptid);
if (cpu < 0) {
+ dc->non_vcpu_faults++;
return;
}