]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: nSVM: Raise #UD if unhandled VMMCALL isn't intercepted by L1
authorKevin Cheng <chengkev@google.com>
Wed, 4 Mar 2026 00:22:22 +0000 (16:22 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 5 Mar 2026 00:08:55 +0000 (16:08 -0800)
Explicitly synthesize a #UD for VMMCALL if L2 is active, L1 does NOT want
to intercept VMMCALL, nested_svm_l2_tlb_flush_enabled() is true, and the
hypercall is something other than one of the supported Hyper-V hypercalls.
When all of the above conditions are met, KVM will intercept VMMCALL but
never forward it to L1, i.e. will let L2 make hypercalls as if it were L1.

The TLFS says a whole lot of nothing about this scenario, so go with the
architectural behavior, which says that VMMCALL #UDs if it's not
intercepted.

Opportunistically do a 2-for-1 stub trade by stub-ifying the new API
instead of the helpers it uses.  The last remaining "single" stub will
soon be dropped as well.

Suggested-by: Sean Christopherson <seanjc@google.com>
Fixes: 3f4a812edf5c ("KVM: nSVM: hyper-v: Enable L2 TLB flush")
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Kevin Cheng <chengkev@google.com>
Link: https://patch.msgid.link/20260228033328.2285047-5-chengkev@google.com
[sean: rewrite changelog and comment, tag for stable, remove defunct stubs]
Reviewed-by: Yosry Ahmed <yosry@kernel.org>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Link: https://patch.msgid.link/20260304002223.1105129-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/hyperv.h
arch/x86/kvm/svm/hyperv.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c

index 6ce160ffa6786564ab02cd04171a9d2ec18e648c..6301f79fcbae7c221d9cb88120fdc4cc2874147b 100644 (file)
@@ -305,14 +305,6 @@ static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
 {
        return false;
 }
-static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
-{
-       return false;
-}
-static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
-{
-       return false;
-}
 static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
 {
        return 0;
index d3f8bfc05832ee0a2249cbaeed22f081027a9a2e..9af03970d40c287601afe00207c1c220c5a9c246 100644 (file)
@@ -41,6 +41,13 @@ static inline bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
        return hv_vcpu->vp_assist_page.nested_control.features.directhypercall;
 }
 
+static inline bool nested_svm_is_l2_tlb_flush_hcall(struct kvm_vcpu *vcpu)
+{
+       return guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
+              nested_svm_l2_tlb_flush_enabled(vcpu) &&
+              kvm_hv_is_tlb_flush_hcall(vcpu);
+}
+
 void svm_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
 #else /* CONFIG_KVM_HYPERV */
 static inline void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu) {}
@@ -48,6 +55,10 @@ static inline bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
 {
        return false;
 }
+static inline bool nested_svm_is_l2_tlb_flush_hcall(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
 static inline void svm_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu) {}
 #endif /* CONFIG_KVM_HYPERV */
 
index 5b639d98bf09d83d2374801c7a7541c147e1e001..0f7893a7cb040a291485782ec64be358d226a6ca 100644 (file)
@@ -1738,9 +1738,7 @@ int nested_svm_exit_special(struct vcpu_svm *svm)
        }
        case SVM_EXIT_VMMCALL:
                /* Hyper-V L2 TLB flush hypercall is handled by L0 */
-               if (guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
-                   nested_svm_l2_tlb_flush_enabled(vcpu) &&
-                   kvm_hv_is_tlb_flush_hcall(vcpu))
+               if (nested_svm_is_l2_tlb_flush_hcall(vcpu))
                        return NESTED_EXIT_HOST;
                break;
        default:
index 7efa7170929232f2965febc788fc523a3cea1867..9e6864cf58d34e453084ef9e2c45801d97ff742f 100644 (file)
@@ -52,6 +52,7 @@
 #include "svm.h"
 #include "svm_ops.h"
 
+#include "hyperv.h"
 #include "kvm_onhyperv.h"
 #include "svm_onhyperv.h"
 
@@ -3248,6 +3249,22 @@ static int bus_lock_exit(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+static int vmmcall_interception(struct kvm_vcpu *vcpu)
+{
+       /*
+        * Inject a #UD if L2 is active and the VMMCALL isn't a Hyper-V TLB
+        * hypercall, as VMMCALL #UDs if it's not intercepted, and this path is
+        * reachable if and only if L1 doesn't want to intercept VMMCALL or has
+        * enabled L0 (KVM) handling of Hyper-V L2 TLB flush hypercalls.
+        */
+       if (is_guest_mode(vcpu) && !nested_svm_is_l2_tlb_flush_hcall(vcpu)) {
+               kvm_queue_exception(vcpu, UD_VECTOR);
+               return 1;
+       }
+
+       return kvm_emulate_hypercall(vcpu);
+}
+
 static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [SVM_EXIT_READ_CR0]                     = cr_interception,
        [SVM_EXIT_READ_CR3]                     = cr_interception,
@@ -3298,7 +3315,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
        [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
        [SVM_EXIT_VMRUN]                        = vmrun_interception,
-       [SVM_EXIT_VMMCALL]                      = kvm_emulate_hypercall,
+       [SVM_EXIT_VMMCALL]                      = vmmcall_interception,
        [SVM_EXIT_VMLOAD]                       = vmload_interception,
        [SVM_EXIT_VMSAVE]                       = vmsave_interception,
        [SVM_EXIT_STGI]                         = stgi_interception,