]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/xen: Move kvm_xen_hvm_config field into kvm_xen
authorSean Christopherson <seanjc@google.com>
Sat, 15 Feb 2025 01:14:37 +0000 (17:14 -0800)
committerSean Christopherson <seanjc@google.com>
Mon, 24 Feb 2025 16:59:59 +0000 (08:59 -0800)
Now that all KVM usage of the Xen HVM config information is buried behind
CONFIG_KVM_XEN=y, move the per-VM kvm_xen_hvm_config field out of kvm_arch
and into kvm_xen.

No functional change intended.

Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
Link: https://lore.kernel.org/r/20250215011437.1203084-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c
arch/x86/kvm/xen.c
arch/x86/kvm/xen.h

index f31fca4c496848767b76bb16798b4703467d6d62..9df725e528b1462a53799025704b5d9bf41804d4 100644 (file)
@@ -1188,6 +1188,8 @@ struct kvm_xen {
        struct gfn_to_pfn_cache shinfo_cache;
        struct idr evtchn_ports;
        unsigned long poll_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+
+       struct kvm_xen_hvm_config hvm_config;
 };
 #endif
 
@@ -1419,7 +1421,6 @@ struct kvm_arch {
 
 #ifdef CONFIG_KVM_XEN
        struct kvm_xen xen;
-       struct kvm_xen_hvm_config xen_hvm_config;
 #endif
 
        bool backwards_tsc_observed;
index 12c60adb7349be7d86fed1c6ba4ed4df61af1e21..f97d4d435e7f19c63aab053200666af2eaa33b15 100644 (file)
@@ -3188,7 +3188,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
         * problems if they observe PVCLOCK_TSC_STABLE_BIT in the pvclock flags.
         */
        bool xen_pvclock_tsc_unstable =
-               ka->xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE;
+               ka->xen.hvm_config.flags & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE;
 #endif
 
        kernel_ns = 0;
index 5b94825001a7a78660f0c386920aad89745615d7..8aef7cd243490e232d23bcf7a2e39c540b95deb4 100644 (file)
@@ -1280,10 +1280,10 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
                 * Note, truncation is a non-issue as 'lm' is guaranteed to be
                 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
                 */
-               hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
-                                    : kvm->arch.xen_hvm_config.blob_addr_32;
-               u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
-                                 : kvm->arch.xen_hvm_config.blob_size_32;
+               hva_t blob_addr = lm ? kvm->arch.xen.hvm_config.blob_addr_64
+                                    : kvm->arch.xen.hvm_config.blob_addr_32;
+               u8 blob_size = lm ? kvm->arch.xen.hvm_config.blob_size_64
+                                 : kvm->arch.xen.hvm_config.blob_size_32;
                u8 *page;
                int ret;
 
@@ -1335,13 +1335,13 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
 
        mutex_lock(&kvm->arch.xen.xen_lock);
 
-       if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
+       if (xhc->msr && !kvm->arch.xen.hvm_config.msr)
                static_branch_inc(&kvm_xen_enabled.key);
-       else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
+       else if (!xhc->msr && kvm->arch.xen.hvm_config.msr)
                static_branch_slow_dec_deferred(&kvm_xen_enabled);
 
-       old_flags = kvm->arch.xen_hvm_config.flags;
-       memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
+       old_flags = kvm->arch.xen.hvm_config.flags;
+       memcpy(&kvm->arch.xen.hvm_config, xhc, sizeof(*xhc));
 
        mutex_unlock(&kvm->arch.xen.xen_lock);
 
@@ -1422,7 +1422,7 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
        int i;
 
        if (!lapic_in_kernel(vcpu) ||
-           !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
+           !(vcpu->kvm->arch.xen.hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
                return false;
 
        if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
@@ -2300,6 +2300,6 @@ void kvm_xen_destroy_vm(struct kvm *kvm)
        }
        idr_destroy(&kvm->arch.xen.evtchn_ports);
 
-       if (kvm->arch.xen_hvm_config.msr)
+       if (kvm->arch.xen.hvm_config.msr)
                static_branch_slow_dec_deferred(&kvm_xen_enabled);
 }
index 1e3a913dfb94259bb9c09dc3fed45c111463c841..d191103d8163442c725b01d7eba831879eba0afa 100644 (file)
@@ -53,7 +53,7 @@ static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
 static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
 {
        return static_branch_unlikely(&kvm_xen_enabled.key) &&
-               kvm->arch.xen_hvm_config.msr;
+               kvm->arch.xen.hvm_config.msr;
 }
 
 static inline bool kvm_xen_is_hypercall_page_msr(struct kvm *kvm, u32 msr)
@@ -61,13 +61,13 @@ static inline bool kvm_xen_is_hypercall_page_msr(struct kvm *kvm, u32 msr)
        if (!static_branch_unlikely(&kvm_xen_enabled.key))
                return false;
 
-       return msr && msr == kvm->arch.xen_hvm_config.msr;
+       return msr && msr == kvm->arch.xen.hvm_config.msr;
 }
 
 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
 {
        return static_branch_unlikely(&kvm_xen_enabled.key) &&
-               (kvm->arch.xen_hvm_config.flags &
+               (kvm->arch.xen.hvm_config.flags &
                 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
 }