]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: VMX: Always reflect SGX EPCM #PFs back into the guest
authorSean Christopherson <seanjc@google.com>
Fri, 21 Nov 2025 22:20:18 +0000 (14:20 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 8 Jan 2026 18:51:46 +0000 (10:51 -0800)
When handling intercepted #PFs, reflect EPCM (Enclave Page Cache Map)
violations, i.e. #PFs with the SGX flag set, back into the guest.  KVM
doesn't shadow EPCM entries (the EPCM deals only with virtual/linear
addresses), and so EPCM violation cannot be due to KVM interference,
and more importantly can't be resolved by KVM.

On pre-SGX2 hardware, EPCM violations are delivered as #GP(0) faults, but
on SGX2+ hardware, they are delivered as #PF(SGX).  Failure to account for
the SGX2 behavior could put a vCPU into an infinite loop due to KVM not
realizing the #PF is the guest's responsibility.

Take care to deliver the EPCM violation as a #GP(0) if the _guest_ CPU
model is only SGX1.

Fixes: 72add915fbd5 ("KVM: VMX: Enable SGX virtualization for SGX1, SGX2 and LC")
Cc: Kai Huang <kai.huang@intel.com>
Reviewed-by: Richard Lyu <richard.lyu@suse.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Link: https://patch.msgid.link/20251121222018.348987-1-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/vmx.c

index 6b96f7aea20bd3f39389da8c9a1cf890d2e29b05..97d696014f9a442bb77e0b413b5466fba580ac51 100644 (file)
@@ -5303,12 +5303,53 @@ static bool is_xfd_nm_fault(struct kvm_vcpu *vcpu)
               !kvm_is_cr0_bit_set(vcpu, X86_CR0_TS);
 }
 
+static int vmx_handle_page_fault(struct kvm_vcpu *vcpu, u32 error_code)
+{
+       unsigned long cr2 = vmx_get_exit_qual(vcpu);
+
+       if (vcpu->arch.apf.host_apf_flags)
+               goto handle_pf;
+
+       /* When using EPT, KVM intercepts #PF only to detect illegal GPAs. */
+       WARN_ON_ONCE(enable_ept && !allow_smaller_maxphyaddr);
+
+       /*
+        * On SGX2 hardware, EPCM violations are delivered as #PF with the SGX
+        * flag set in the error code (SGX1 hardware generates #GP(0)).  EPCM
+        * violations have nothing to do with shadow paging and can never be
+        * resolved by KVM; always reflect them into the guest.
+        */
+       if (error_code & PFERR_SGX_MASK) {
+               WARN_ON_ONCE(!IS_ENABLED(CONFIG_X86_SGX_KVM) ||
+                            !cpu_feature_enabled(X86_FEATURE_SGX2));
+
+               if (guest_cpu_cap_has(vcpu, X86_FEATURE_SGX2))
+                       kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
+               else
+                       kvm_inject_gp(vcpu, 0);
+               return 1;
+       }
+
+       /*
+        * If EPT is enabled, fixup and inject the #PF.  KVM intercepts #PFs
+        * only to set PFERR_RSVD as appropriate (hardware won't set RSVD due
+        * to the GPA being legal with respect to host.MAXPHYADDR).
+        */
+       if (enable_ept) {
+               kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
+               return 1;
+       }
+
+handle_pf:
+       return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
+}
+
 static int handle_exception_nmi(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct kvm_run *kvm_run = vcpu->run;
        u32 intr_info, ex_no, error_code;
-       unsigned long cr2, dr6;
+       unsigned long dr6;
        u32 vect_info;
 
        vect_info = vmx->idt_vectoring_info;
@@ -5383,19 +5424,8 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
                return 0;
        }
 
-       if (is_page_fault(intr_info)) {
-               cr2 = vmx_get_exit_qual(vcpu);
-               if (enable_ept && !vcpu->arch.apf.host_apf_flags) {
-                       /*
-                        * EPT will cause page fault only if we need to
-                        * detect illegal GPAs.
-                        */
-                       WARN_ON_ONCE(!allow_smaller_maxphyaddr);
-                       kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
-                       return 1;
-               } else
-                       return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
-       }
+       if (is_page_fault(intr_info))
+               return vmx_handle_page_fault(vcpu, error_code);
 
        ex_no = intr_info & INTR_INFO_VECTOR_MASK;