]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: selftests: Add infrastructure for getting vCPU binary stats
authorSean Christopherson <seanjc@google.com>
Sat, 11 Jan 2025 00:50:48 +0000 (16:50 -0800)
committerSean Christopherson <seanjc@google.com>
Fri, 14 Feb 2025 15:02:13 +0000 (07:02 -0800)
Now that the binary stats cache infrastructure is largely scope agnostic,
add support for vCPU-scoped stats.  Like VM stats, open and cache the
stats FD when the vCPU is created so that it's guaranteed to be valid when
vcpu_get_stats() is invoked.

Account for the extra per-vCPU file descriptor in kvm_set_files_rlimit(),
so that tests that create large VMs don't run afoul of resource limits.

To sanity check that the infrastructure actually works, and to get a bit
of bonus coverage, add an assert in x86's xapic_ipi_test to verify that
the number of HLTs executed by the test matches the number of HLT exits
observed by KVM.

Tested-by: Manali Shukla <Manali.Shukla@amd.com>
Link: https://lore.kernel.org/r/20250111005049.1247555-9-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/x86/xapic_ipi_test.c

index d4670b5962ab569961575c42baad55d415403f57..373912464fb4036de21eb628fb9045060ef3edbb 100644 (file)
@@ -61,6 +61,7 @@ struct kvm_vcpu {
 #ifdef __x86_64__
        struct kvm_cpuid2 *cpuid;
 #endif
+       struct kvm_binary_stats stats;
        struct kvm_dirty_gfn *dirty_gfns;
        uint32_t fetch_index;
        uint32_t dirty_gfns_count;
@@ -534,17 +535,20 @@ void read_stat_data(int stats_fd, struct kvm_stats_header *header,
                    struct kvm_stats_desc *desc, uint64_t *data,
                    size_t max_elements);
 
-void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
-                  size_t max_elements);
+void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
+                 uint64_t *data, size_t max_elements);
 
-#define vm_get_stat(vm, stat)                          \
-({                                                     \
-       uint64_t data;                                  \
-                                                       \
-       __vm_get_stat(vm, #stat, &data, 1);             \
-       data;                                           \
+#define __get_stat(stats, stat)                                                        \
+({                                                                             \
+       uint64_t data;                                                          \
+                                                                               \
+       kvm_get_stat(stats, #stat, &data, 1);                                   \
+       data;                                                                   \
 })
 
+#define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat)
+#define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat)
+
 void vm_create_irqchip(struct kvm_vm *vm);
 
 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
index faeeb576be909f5da391b9c1a55e7f6f65bc615c..279ad8946040cb2b631203db1de8458f31365458 100644 (file)
@@ -415,10 +415,11 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
 void kvm_set_files_rlimit(uint32_t nr_vcpus)
 {
        /*
-        * Number of file descriptors required, nr_vpucs vCPU fds + an arbitrary
-        * number for everything else.
+        * Each vCPU will open two file descriptors: the vCPU itself and the
+        * vCPU's binary stats file descriptor.  Add an arbitrary amount of
+        * buffer for all other files a test may open.
         */
-       int nr_fds_wanted = nr_vcpus + 100;
+       int nr_fds_wanted = nr_vcpus * 2 + 100;
        struct rlimit rl;
 
        /*
@@ -746,6 +747,8 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
        ret = close(vcpu->fd);
        TEST_ASSERT(!ret,  __KVM_SYSCALL_ERROR("close()", ret));
 
+       kvm_stats_release(&vcpu->stats);
+
        list_del(&vcpu->list);
 
        vcpu_arch_free(vcpu);
@@ -1339,6 +1342,11 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
        TEST_ASSERT(vcpu->run != MAP_FAILED,
                    __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
 
+       if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD))
+               vcpu->stats.fd = vcpu_get_stats_fd(vcpu);
+       else
+               vcpu->stats.fd = -1;
+
        /* Add to linked-list of VCPUs. */
        list_add(&vcpu->list, &vm->vcpus);
 
@@ -2251,23 +2259,9 @@ void read_stat_data(int stats_fd, struct kvm_stats_header *header,
                    desc->name, size, ret);
 }
 
-/*
- * Read the data of the named stat
- *
- * Input Args:
- *   vm - the VM for which the stat should be read
- *   stat_name - the name of the stat to read
- *   max_elements - the maximum number of 8-byte values to read into data
- *
- * Output Args:
- *   data - the buffer into which stat data should be read
- *
- * Read the data values of a specified stat from the binary stats interface.
- */
-void __vm_get_stat(struct kvm_vm *vm, const char *name, uint64_t *data,
-                  size_t max_elements)
+void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
+                 uint64_t *data, size_t max_elements)
 {
-       struct kvm_binary_stats *stats = &vm->stats;
        struct kvm_stats_desc *desc;
        size_t size_desc;
        int i;
index a76078a08ff82f527e5b62d4e96234647c957a38..574a944763b79bafd1e7f2499ab626f29975c68a 100644 (file)
@@ -465,6 +465,8 @@ int main(int argc, char *argv[])
        cancel_join_vcpu_thread(threads[0], params[0].vcpu);
        cancel_join_vcpu_thread(threads[1], params[1].vcpu);
 
+       TEST_ASSERT_EQ(data->hlt_count, vcpu_get_stat(params[0].vcpu, halt_exits));
+
        fprintf(stderr,
                "Test successful after running for %d seconds.\n"
                "Sending vCPU sent %lu IPIs to halting vCPU\n"