From 6fbef8615d3588450045f014c6015d0beeff6cf2 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 25 Jun 2025 17:12:21 -0700 Subject: [PATCH] KVM: x86: Replace growing set of *_in_guest bools with a u64 Store each "disabled exit" boolean in a single bit rather than a byte. No functional change intended. Suggested-by: Sean Christopherson Signed-off-by: Jim Mattson Link: https://lore.kernel.org/r/20250530185239.2335185-2-jmattson@google.com Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20250626001225.744268-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 5 +---- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/vmx/vmx.c | 2 +- arch/x86/kvm/x86.c | 9 +-------- arch/x86/kvm/x86.h | 13 +++++++++---- 5 files changed, 13 insertions(+), 18 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8f38c24cce63b..f2f641583a503 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1392,10 +1392,7 @@ struct kvm_arch { gpa_t wall_clock; - bool mwait_in_guest; - bool hlt_in_guest; - bool pause_in_guest; - bool cstate_in_guest; + u64 disabled_exits; unsigned long irq_sources_bitmap; s64 kvmclock_offset; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 803574920e410..1261447ffcdd7 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5012,7 +5012,7 @@ static int svm_vm_init(struct kvm *kvm) } if (!pause_filter_count || !pause_filter_thresh) - kvm->arch.pause_in_guest = true; + kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE); if (enable_apicv) { int ret = avic_vm_init(kvm); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f81710d7d992f..b064e50c6e64e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7515,7 +7515,7 @@ free_vpid: int vmx_vm_init(struct kvm *kvm) { if (!ple_gap) - kvm->arch.pause_in_guest = true; + kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE); if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) { switch (l1tf_mitigation) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ceea434d297ea..6dda7bf4c44c8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6616,14 +6616,7 @@ split_irqchip_unlock: (cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE)) pr_warn_once(SMT_RSB_MSG); - if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) - kvm->arch.pause_in_guest = true; - if (cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) - kvm->arch.mwait_in_guest = true; - if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) - kvm->arch.hlt_in_guest = true; - if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) - kvm->arch.cstate_in_guest = true; + kvm_disable_exits(kvm, cap->args[0]); r = 0; disable_exits_unlock: mutex_unlock(&kvm->lock); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 832f0faf47791..17ec8436e5650 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -499,24 +499,29 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) __rem; \ }) +static inline void kvm_disable_exits(struct kvm *kvm, u64 mask) +{ + kvm->arch.disabled_exits |= mask; +} + static inline bool kvm_mwait_in_guest(struct kvm *kvm) { - return kvm->arch.mwait_in_guest; + return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_MWAIT; } static inline bool kvm_hlt_in_guest(struct kvm *kvm) { - return kvm->arch.hlt_in_guest; + return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_HLT; } static inline bool kvm_pause_in_guest(struct kvm *kvm) { - return kvm->arch.pause_in_guest; + return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_PAUSE; } static inline bool kvm_cstate_in_guest(struct kvm *kvm) { - return kvm->arch.cstate_in_guest; + return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_CSTATE; } static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm) -- 2.47.2