]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: nv: Forward FEAT_XNX permissions to the shadow stage-2
authorOliver Upton <oupton@kernel.org>
Mon, 24 Nov 2025 19:01:45 +0000 (11:01 -0800)
committerOliver Upton <oupton@kernel.org>
Mon, 24 Nov 2025 22:24:45 +0000 (14:24 -0800)
Add support for FEAT_XNX to shadow stage-2 MMUs, being careful to only
evaluate XN[0] when the feature is actually exposed to the VM.
Restructure the layering of permissions in the fault handler to assume
pX and uX then restricting based on the guest's stage-2 afterwards.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Tested-by: Marc Zyngier <maz@kernel.org>
Link: https://msgid.link/20251124190158.177318-4-oupton@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>
arch/arm64/include/asm/kvm_nested.h
arch/arm64/kvm/mmu.c
arch/arm64/kvm/nested.c

index f7c06a840963d396604b6d6a5a0d3408e4f1aff4..5d967b60414cdcb6099ed3be74c5ea9b224867be 100644 (file)
@@ -120,9 +120,42 @@ static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
        return trans->writable;
 }
 
-static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans)
+static inline bool kvm_has_xnx(struct kvm *kvm)
 {
-       return !(trans->desc & BIT(54));
+       return cpus_have_final_cap(ARM64_HAS_XNX) &&
+               kvm_has_feat(kvm, ID_AA64MMFR1_EL1, XNX, IMP);
+}
+
+static inline bool kvm_s2_trans_exec_el0(struct kvm *kvm, struct kvm_s2_trans *trans)
+{
+       u8 xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, trans->desc);
+
+       if (!kvm_has_xnx(kvm))
+               xn &= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, 0b10);
+
+       switch (xn) {
+       case 0b00:
+       case 0b01:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static inline bool kvm_s2_trans_exec_el1(struct kvm *kvm, struct kvm_s2_trans *trans)
+{
+       u8 xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, trans->desc);
+
+       if (!kvm_has_xnx(kvm))
+               xn &= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, 0b10);
+
+       switch (xn) {
+       case 0b00:
+       case 0b11:
+               return true;
+       default:
+               return false;
+       }
 }
 
 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
index 7cc964af8d305313643d4ebffc53a69ff2ecff60..96f1786c72fe0a762e2b27331b8ab2d741847392 100644 (file)
@@ -1521,6 +1521,16 @@ static void adjust_nested_fault_perms(struct kvm_s2_trans *nested,
        *prot |= kvm_encode_nested_level(nested);
 }
 
+static void adjust_nested_exec_perms(struct kvm *kvm,
+                                    struct kvm_s2_trans *nested,
+                                    enum kvm_pgtable_prot *prot)
+{
+       if (!kvm_s2_trans_exec_el0(kvm, nested))
+               *prot &= ~KVM_PGTABLE_PROT_UX;
+       if (!kvm_s2_trans_exec_el1(kvm, nested))
+               *prot &= ~KVM_PGTABLE_PROT_PX;
+}
+
 #define KVM_PGTABLE_WALK_MEMABORT_FLAGS (KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED)
 
 static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
@@ -1572,11 +1582,12 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (writable)
                prot |= KVM_PGTABLE_PROT_W;
 
-       if (exec_fault ||
-           (cpus_have_final_cap(ARM64_HAS_CACHE_DIC) &&
-            (!nested || kvm_s2_trans_executable(nested))))
+       if (exec_fault || cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
                prot |= KVM_PGTABLE_PROT_X;
 
+       if (nested)
+               adjust_nested_exec_perms(kvm, nested, &prot);
+
        kvm_fault_lock(kvm);
        if (mmu_invalidate_retry(kvm, mmu_seq)) {
                ret = -EAGAIN;
@@ -1851,11 +1862,13 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        prot |= KVM_PGTABLE_PROT_NORMAL_NC;
                else
                        prot |= KVM_PGTABLE_PROT_DEVICE;
-       } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC) &&
-                  (!nested || kvm_s2_trans_executable(nested))) {
+       } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) {
                prot |= KVM_PGTABLE_PROT_X;
        }
 
+       if (nested)
+               adjust_nested_exec_perms(kvm, nested, &prot);
+
        /*
         * Under the premise of getting a FSC_PERM fault, we just need to relax
         * permissions only if vma_pagesize equals fault_granule. Otherwise,
index f04cda40545b101f3fcde7a9cf63b7fdfe27af0c..92b2a69f0b895a88d6dbcbdf7df35e26875749d9 100644 (file)
@@ -788,7 +788,10 @@ int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct kvm_s2_trans *trans)
                return 0;
 
        if (kvm_vcpu_trap_is_iabt(vcpu)) {
-               forward_fault = !kvm_s2_trans_executable(trans);
+               if (vcpu_mode_priv(vcpu))
+                       forward_fault = !kvm_s2_trans_exec_el1(vcpu->kvm, trans);
+               else
+                       forward_fault = !kvm_s2_trans_exec_el0(vcpu->kvm, trans);
        } else {
                bool write_fault = kvm_is_write_fault(vcpu);