]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: Use CAST instruction for swapping guest descriptor
authorYeoreum Yun <yeoreum.yun@arm.com>
Sat, 14 Mar 2026 17:51:32 +0000 (17:51 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 27 Mar 2026 12:52:45 +0000 (12:52 +0000)
Use the CAST instruction to swap the guest descriptor when FEAT_LSUI
is enabled, avoiding the need to clear the PAN bit.

FEAT_LSUI is introduced in Armv9.6, where FEAT_PAN is mandatory. However,
this assumption may not always hold:

 - Some CPUs may advertise FEAT_LSUI but lack FEAT_PAN.
 - Virtualization or ID register overrides may expose invalid feature
   combinations.

Therefore, instead of disabling FEAT_LSUI when FEAT_PAN is absent, wrap
LSUI instructions with uaccess_ttbr0_enable()/disable() when
ARM64_SW_TTBR0_PAN is enabled.

Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/kvm/at.c

index 6588ea251ed777bc74025ff7260c13410a0a5419..1adf88a5732840e93d45ebe11f4a6795dcd64710 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/esr.h>
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
+#include <asm/lsui.h>
 
 static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw)
 {
@@ -1681,6 +1682,35 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
        }
 }
 
+static int __lsui_swap_desc(u64 __user *ptep, u64 old, u64 new)
+{
+       u64 tmp = old;
+       int ret = 0;
+
+       /*
+        * Wrap LSUI instructions with uaccess_ttbr0_enable()/disable(),
+        * as PAN toggling is not required.
+        */
+       uaccess_ttbr0_enable();
+
+       asm volatile(__LSUI_PREAMBLE
+                    "1: cast   %[old], %[new], %[addr]\n"
+                    "2:\n"
+                    _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret])
+                    : [old] "+r" (old), [addr] "+Q" (*ptep), [ret] "+r" (ret)
+                    : [new] "r" (new)
+                    : "memory");
+
+       uaccess_ttbr0_disable();
+
+       if (ret)
+               return ret;
+       if (tmp != old)
+               return -EAGAIN;
+
+       return ret;
+}
+
 static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new)
 {
        u64 tmp = old;
@@ -1756,7 +1786,9 @@ int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new)
                return -EPERM;
 
        ptep = (u64 __user *)hva + offset;
-       if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
+       if (cpus_have_final_cap(ARM64_HAS_LSUI))
+               r = __lsui_swap_desc(ptep, old, new);
+       else if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
                r = __lse_swap_desc(ptep, old, new);
        else
                r = __llsc_swap_desc(ptep, old, new);