]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: nSVM: Merge MSRPM in 64-bit chunks on 64-bit kernels
authorSean Christopherson <seanjc@google.com>
Tue, 10 Jun 2025 22:57:34 +0000 (15:57 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 20 Jun 2025 20:07:36 +0000 (13:07 -0700)
When merging L0 and L1 MSRPMs as part of nested VMRUN emulation, access
the bitmaps using "unsigned long" chunks, i.e. use 8-byte access for
64-bit kernels instead of arbitrarily working on 4-byte chunks.

Opportunistically rename local variables in nested_svm_merge_msrpm() to
more precisely/accurately reflect their purpose ("offset" in particular is
extremely ambiguous).

Link: https://lore.kernel.org/r/20250610225737.156318-30-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/nested.c

index 7ca45361ced36a56759eb30e25fbf0f6f877af0c..749f7b866ac808f9d12b2a8ab1402553a0498f12 100644 (file)
@@ -196,6 +196,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
  */
 static int nested_svm_msrpm_merge_offsets[6] __ro_after_init;
 static int nested_svm_nr_msrpm_merge_offsets __ro_after_init;
+typedef unsigned long nsvm_msrpm_merge_t;
 
 int __init nested_svm_init_msrpm_merge_offsets(void)
 {
@@ -230,10 +231,10 @@ int __init nested_svm_init_msrpm_merge_offsets(void)
                        return -EIO;
 
                /*
-                * Merging is done in 32-bit chunks to reduce the number of
-                * accesses to L1's bitmap.
+                * Merging is done in chunks to reduce the number of accesses
+                * to L1's bitmap.
                 */
-               offset = bit_nr / BITS_PER_BYTE / sizeof(u32);
+               offset = bit_nr / BITS_PER_BYTE / sizeof(nsvm_msrpm_merge_t);
 
                for (j = 0; j < nested_svm_nr_msrpm_merge_offsets; j++) {
                        if (nested_svm_msrpm_merge_offsets[j] == offset)
@@ -261,8 +262,8 @@ int __init nested_svm_init_msrpm_merge_offsets(void)
 static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-       u32 *msrpm02 = svm->nested.msrpm;
-       u32 *msrpm01 = svm->msrpm;
+       nsvm_msrpm_merge_t *msrpm02 = svm->nested.msrpm;
+       nsvm_msrpm_merge_t *msrpm01 = svm->msrpm;
        int i;
 
        /*
@@ -289,15 +290,15 @@ static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu)
 
        for (i = 0; i < nested_svm_nr_msrpm_merge_offsets; i++) {
                const int p = nested_svm_msrpm_merge_offsets[i];
-               u32 value;
-               u64 offset;
+               nsvm_msrpm_merge_t l1_val;
+               gpa_t gpa;
 
-               offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
+               gpa = svm->nested.ctl.msrpm_base_pa + (p * sizeof(l1_val));
 
-               if (kvm_vcpu_read_guest(vcpu, offset, &value, 4))
+               if (kvm_vcpu_read_guest(vcpu, gpa, &l1_val, sizeof(l1_val)))
                        return false;
 
-               msrpm02[p] = msrpm01[p] | value;
+               msrpm02[p] = msrpm01[p] | l1_val;
        }
 
        svm->nested.force_msr_bitmap_recalc = false;