From: Yosry Ahmed Date: Mon, 16 Mar 2026 20:27:27 +0000 (+0000) Subject: KVM: SVM: Move RAX legality check to SVM insn interception handlers X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=d2fbeb61e1451eba09eb3249aaf1f01d4c5c1f8b;p=thirdparty%2Fkernel%2Fstable.git KVM: SVM: Move RAX legality check to SVM insn interception handlers When #GP is intercepted by KVM, the #GP interception handler checks whether the GPA in RAX is legal and reinjects the #GP accordingly. Otherwise, it calls into the appropriate interception handler for VMRUN/VMLOAD/VMSAVE. The intercept handlers do not check RAX. However, the intercept handlers need to do the RAX check, because if the guest has a smaller MAXPHYADDR, RAX could be legal from the hardware perspective (i.e. CPU does not inject #GP), but not from the vCPU's perspective. Note that with allow_smaller_maxphyaddr, both NPT and VLS cannot be used, so VMLOAD/VMSAVE have to be intercepted, and RAX can always be checked against the vCPU's MAXPHYADDR. Move the check into the interception handlers for VMRUN/VMLOAD/VMSAVE as the CPU does not check RAX before the interception. Read RAX using kvm_register_read() to avoid a false negative on page_address_valid() on 32-bit due to garbage in the higher bits. Keep the check in the #GP intercept handler in the nested case where a #VMEXIT is synthesized into L1, as the RAX check is still needed there and takes precedence over the intercept. Opportunistically add a FIXME about the #VMEXIT being synthesized into L1, as it needs to be conditional. Signed-off-by: Yosry Ahmed Link: https://patch.msgid.link/20260316202732.3164936-5-yosry@kernel.org Signed-off-by: Sean Christopherson --- diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 88e878160229..16f4bc4f48f5 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1103,7 +1103,11 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) if (WARN_ON_ONCE(!svm->nested.initialized)) return -EINVAL; - vmcb12_gpa = svm->vmcb->save.rax; + vmcb12_gpa = kvm_register_read(vcpu, VCPU_REGS_RAX); + if (!page_address_valid(vcpu, vmcb12_gpa)) { + kvm_inject_gp(vcpu, 0); + return 1; + } ret = nested_svm_copy_vmcb12_to_cache(vcpu, vmcb12_gpa); if (ret) { diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index f68958447e58..3472916657e1 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2185,6 +2185,7 @@ static int intr_interception(struct kvm_vcpu *vcpu) static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload) { + u64 vmcb12_gpa = kvm_register_read(vcpu, VCPU_REGS_RAX); struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb12; struct kvm_host_map map; @@ -2193,7 +2194,12 @@ static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload) if (nested_svm_check_permissions(vcpu)) return 1; - ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); + if (!page_address_valid(vcpu, vmcb12_gpa)) { + kvm_inject_gp(vcpu, 0); + return 1; + } + + ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map); if (ret) { if (ret == -EINVAL) kvm_inject_gp(vcpu, 0); @@ -2285,12 +2291,18 @@ static int gp_interception(struct kvm_vcpu *vcpu) /* FIXME: Handle SVM instructions through the emulator */ svm_exit_code = svm_get_decoded_instr_exit_code(vcpu); if (svm_exit_code) { - if (!page_address_valid(vcpu, kvm_register_read(vcpu, VCPU_REGS_RAX))) - goto reinject; - if (!is_guest_mode(vcpu)) return svm_invoke_exit_handler(vcpu, svm_exit_code); + if (!page_address_valid(vcpu, kvm_register_read(vcpu, VCPU_REGS_RAX))) + goto reinject; + + /* + * FIXME: Only synthesize a #VMEXIT if L1 sets the intercept, + * but only after the VMLOAD/VMSAVE exit handlers can properly + * handle VMLOAD/VMSAVE from L2 with VLS enabled in L1 (i.e. + * RAX is an L2 GPA that needs translation through L1's NPT). + */ nested_svm_simple_vmexit(svm, svm_exit_code); return 1; }