]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: Add helper for swapping guest descriptor
authorOliver Upton <oupton@kernel.org>
Mon, 24 Nov 2025 19:01:52 +0000 (11:01 -0800)
committerOliver Upton <oupton@kernel.org>
Mon, 1 Dec 2025 08:44:02 +0000 (00:44 -0800)
Implementing FEAT_HAFDBS in KVM's software PTWs requires the ability to
CAS a descriptor to update the in-memory value. Add an accessor to do
exactly that, coping with the fact that guest descriptors are in user
memory (duh).

While FEAT_LSE required on any system that implements NV, KVM now uses
the stage-1 PTW for non-nested use cases meaning an LL/SC implementation
is necessary as well.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Tested-by: Marc Zyngier <maz@kernel.org>
Link: https://msgid.link/20251124190158.177318-11-oupton@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>
arch/arm64/include/asm/kvm_nested.h
arch/arm64/kvm/at.c

index 5d967b60414cdcb6099ed3be74c5ea9b224867be..6dbc2908aed923425212fa792ad1cce2a80a94dc 100644 (file)
@@ -403,4 +403,6 @@ void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val);
                (FIX_VNCR - __c);                               \
        })
 
+int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new);
+
 #endif /* __ARM64_KVM_NESTED_H */
index a295a37dd3b1306e6684091f3c5c0074f5599719..581c4c49d9cd12132a613aba27d5c32df4c21e93 100644 (file)
@@ -1650,3 +1650,90 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
                return ret;
        }
 }
+
+static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new)
+{
+       u64 tmp = old;
+       int ret = 0;
+
+       uaccess_enable_privileged();
+
+       asm volatile(__LSE_PREAMBLE
+                    "1: cas    %[old], %[new], %[addr]\n"
+                    "2:\n"
+                    _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret])
+                    : [old] "+r" (old), [addr] "+Q" (*ptep), [ret] "+r" (ret)
+                    : [new] "r" (new)
+                    : "memory");
+
+       uaccess_disable_privileged();
+
+       if (ret)
+               return ret;
+       if (tmp != old)
+               return -EAGAIN;
+
+       return ret;
+}
+
+static int __llsc_swap_desc(u64 __user *ptep, u64 old, u64 new)
+{
+       int ret = 1;
+       u64 tmp;
+
+       uaccess_enable_privileged();
+
+       asm volatile("prfm      pstl1strm, %[addr]\n"
+                    "1: ldxr   %[tmp], %[addr]\n"
+                    "sub       %[tmp], %[tmp], %[old]\n"
+                    "cbnz      %[tmp], 3f\n"
+                    "2: stlxr  %w[ret], %[new], %[addr]\n"
+                    "3:\n"
+                    _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w[ret])
+                    _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w[ret])
+                    : [ret] "+r" (ret), [addr] "+Q" (*ptep), [tmp] "=&r" (tmp)
+                    : [old] "r" (old), [new] "r" (new)
+                    : "memory");
+
+       uaccess_disable_privileged();
+
+       /* STLXR didn't update the descriptor, or the compare failed */
+       if (ret == 1)
+               return -EAGAIN;
+
+       return ret;
+}
+
+int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new)
+{
+       struct kvm_memory_slot *slot;
+       unsigned long hva;
+       u64 __user *ptep;
+       bool writable;
+       int offset;
+       gfn_t gfn;
+       int r;
+
+       lockdep_assert(srcu_read_lock_held(&kvm->srcu));
+
+       gfn = ipa >> PAGE_SHIFT;
+       offset = offset_in_page(ipa);
+       slot = gfn_to_memslot(kvm, gfn);
+       hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
+       if (kvm_is_error_hva(hva))
+               return -EINVAL;
+       if (!writable)
+               return -EPERM;
+
+       ptep = (u64 __user *)hva + offset;
+       if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
+               r = __lse_swap_desc(ptep, old, new);
+       else
+               r = __llsc_swap_desc(ptep, old, new);
+
+       if (r < 0)
+               return r;
+
+       mark_page_dirty_in_slot(kvm, slot, gfn);
+       return 0;
+}