]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: nVMX: Add macros to simplify nested MSR interception setting
authorDapeng Mi <dapeng1.mi@linux.intel.com>
Sat, 6 Dec 2025 00:17:06 +0000 (16:17 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 8 Jan 2026 19:52:13 +0000 (11:52 -0800)
Add macros nested_vmx_merge_msr_bitmaps_xxx() to simplify nested MSR
interception setting. No function change intended.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Mingwei Zhang <mizhang@google.com>
Tested-by: Xudong Hao <xudong.hao@intel.com>
Tested-by: Manali Shukla <manali.shukla@amd.com>
Link: https://patch.msgid.link/20251206001720.468579-31-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/nested.c

index 40777278eabbf2986a9039ebb6b877bc628bbc2d..b56ed2b1ac674c88f1c7e94674b945e2507be1e4 100644 (file)
@@ -617,6 +617,19 @@ static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx,
                                                   msr_bitmap_l0, msr);
 }
 
+#define nested_vmx_merge_msr_bitmaps(msr, type)        \
+       nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1,    \
+                                        msr_bitmap_l0, msr, type)
+
+#define nested_vmx_merge_msr_bitmaps_read(msr) \
+       nested_vmx_merge_msr_bitmaps(msr, MSR_TYPE_R)
+
+#define nested_vmx_merge_msr_bitmaps_write(msr) \
+       nested_vmx_merge_msr_bitmaps(msr, MSR_TYPE_W)
+
+#define nested_vmx_merge_msr_bitmaps_rw(msr) \
+       nested_vmx_merge_msr_bitmaps(msr, MSR_TYPE_RW)
+
 /*
  * Merge L0's and L1's MSR bitmap, return false to indicate that
  * we do not use the hardware.
@@ -700,23 +713,13 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
         * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through.
         */
 #ifdef CONFIG_X86_64
-       nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
-                                        MSR_FS_BASE, MSR_TYPE_RW);
-
-       nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
-                                        MSR_GS_BASE, MSR_TYPE_RW);
-
-       nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
-                                        MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+       nested_vmx_merge_msr_bitmaps_rw(MSR_FS_BASE);
+       nested_vmx_merge_msr_bitmaps_rw(MSR_GS_BASE);
+       nested_vmx_merge_msr_bitmaps_rw(MSR_KERNEL_GS_BASE);
 #endif
-       nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
-                                        MSR_IA32_SPEC_CTRL, MSR_TYPE_RW);
-
-       nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
-                                        MSR_IA32_PRED_CMD, MSR_TYPE_W);
-
-       nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
-                                        MSR_IA32_FLUSH_CMD, MSR_TYPE_W);
+       nested_vmx_merge_msr_bitmaps_rw(MSR_IA32_SPEC_CTRL);
+       nested_vmx_merge_msr_bitmaps_write(MSR_IA32_PRED_CMD);
+       nested_vmx_merge_msr_bitmaps_write(MSR_IA32_FLUSH_CMD);
 
        nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
                                         MSR_IA32_APERF, MSR_TYPE_R);