]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: selftests: Move Hyper-V specific functions out of processor.c
authorVitaly Kuznetsov <vkuznets@redhat.com>
Fri, 16 Aug 2024 13:01:38 +0000 (15:01 +0200)
committerSean Christopherson <seanjc@google.com>
Thu, 22 Aug 2024 19:14:42 +0000 (12:14 -0700)
Since there is 'hyperv.c' for Hyper-V specific functions already, move
Hyper-V specific functions out of processor.c there.

No functional change intended.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Link: https://lore.kernel.org/r/20240816130139.286246-2-vkuznets@redhat.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/include/x86_64/hyperv.h
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/lib/x86_64/hyperv.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c

index fa65b908b13e5b9dd425075e9751e7fa638e1984..a2e7cf7ee0ad1ab1d2dce76168ebb177434e2fef 100644 (file)
@@ -343,4 +343,8 @@ struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
 /* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */
 #define HV_INVARIANT_TSC_EXPOSED               BIT_ULL(0)
 
+const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
+const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
+void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
+
 #endif /* !SELFTEST_KVM_HYPERV_H */
index a0c1440017bb362f82c0d92ef7da7ef935782f38..e247f99e0473dfd37880a353d7a680d2243d2755 100644 (file)
@@ -25,6 +25,10 @@ extern bool host_cpu_is_intel;
 extern bool host_cpu_is_amd;
 extern uint64_t guest_tsc_khz;
 
+#ifndef MAX_NR_CPUID_ENTRIES
+#define MAX_NR_CPUID_ENTRIES 100
+#endif
+
 /* Forced emulation prefix, used to invoke the emulator unconditionally. */
 #define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
 
@@ -908,8 +912,6 @@ static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
                                               uint32_t function, uint32_t index);
 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
-const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
-const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
 
 static inline uint32_t kvm_cpu_fms(void)
 {
@@ -1009,7 +1011,6 @@ static inline struct kvm_cpuid2 *allocate_kvm_cpuid2(int nr_entries)
 }
 
 void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid);
-void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
 
 static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
                                                              uint32_t function,
index efb7e7a1354dc4b05b5064fa0a5cc58a62b13688..b4a5e4ad710545c54cf0b4dae21fef22fcf0b45d 100644 (file)
@@ -8,6 +8,65 @@
 #include "processor.h"
 #include "hyperv.h"
 
+const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
+{
+       static struct kvm_cpuid2 *cpuid;
+       int kvm_fd;
+
+       if (cpuid)
+               return cpuid;
+
+       cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
+       kvm_fd = open_kvm_dev_path_or_exit();
+
+       kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+
+       close(kvm_fd);
+       return cpuid;
+}
+
+void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu)
+{
+       static struct kvm_cpuid2 *cpuid_full;
+       const struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
+       int i, nent = 0;
+
+       if (!cpuid_full) {
+               cpuid_sys = kvm_get_supported_cpuid();
+               cpuid_hv = kvm_get_supported_hv_cpuid();
+
+               cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent);
+               if (!cpuid_full) {
+                       perror("malloc");
+                       abort();
+               }
+
+               /* Need to skip KVM CPUID leaves 0x400000xx */
+               for (i = 0; i < cpuid_sys->nent; i++) {
+                       if (cpuid_sys->entries[i].function >= 0x40000000 &&
+                           cpuid_sys->entries[i].function < 0x40000100)
+                               continue;
+                       cpuid_full->entries[nent] = cpuid_sys->entries[i];
+                       nent++;
+               }
+
+               memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
+                      cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
+               cpuid_full->nent = nent + cpuid_hv->nent;
+       }
+
+       vcpu_init_cpuid(vcpu, cpuid_full);
+}
+
+const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
+
+       vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+
+       return cpuid;
+}
+
 struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
                                                       vm_vaddr_t *p_hv_pages_gva)
 {
index 153739f2e201fc17f3c075805aa3e674a9708ea9..7876f052ca39f150c44655b31cc1587e2ec11b8e 100644 (file)
@@ -19,8 +19,6 @@
 #define KERNEL_DS      0x10
 #define KERNEL_TSS     0x18
 
-#define MAX_NR_CPUID_ENTRIES 100
-
 vm_vaddr_t exception_handlers;
 bool host_cpu_is_amd;
 bool host_cpu_is_intel;
@@ -1195,65 +1193,6 @@ void xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
        GUEST_ASSERT(!__xen_hypercall(nr, a0, a1));
 }
 
-const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
-{
-       static struct kvm_cpuid2 *cpuid;
-       int kvm_fd;
-
-       if (cpuid)
-               return cpuid;
-
-       cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
-       kvm_fd = open_kvm_dev_path_or_exit();
-
-       kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
-
-       close(kvm_fd);
-       return cpuid;
-}
-
-void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu)
-{
-       static struct kvm_cpuid2 *cpuid_full;
-       const struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
-       int i, nent = 0;
-
-       if (!cpuid_full) {
-               cpuid_sys = kvm_get_supported_cpuid();
-               cpuid_hv = kvm_get_supported_hv_cpuid();
-
-               cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent);
-               if (!cpuid_full) {
-                       perror("malloc");
-                       abort();
-               }
-
-               /* Need to skip KVM CPUID leaves 0x400000xx */
-               for (i = 0; i < cpuid_sys->nent; i++) {
-                       if (cpuid_sys->entries[i].function >= 0x40000000 &&
-                           cpuid_sys->entries[i].function < 0x40000100)
-                               continue;
-                       cpuid_full->entries[nent] = cpuid_sys->entries[i];
-                       nent++;
-               }
-
-               memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
-                      cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
-               cpuid_full->nent = nent + cpuid_hv->nent;
-       }
-
-       vcpu_init_cpuid(vcpu, cpuid_full);
-}
-
-const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
-
-       vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
-
-       return cpuid;
-}
-
 unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
 {
        const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
index e149d0574961c07e3055f5c72094a174ad6b85c6..2585087cdf5cbe4cf01c9dd267c24d2d7d6d0e6f 100644 (file)
@@ -10,6 +10,7 @@
 #include "test_util.h"
 #include "kvm_util.h"
 #include "processor.h"
+#include "hyperv.h"
 
 #define HCALL_REGION_GPA       0xc0000000ULL
 #define HCALL_REGION_SLOT      10