]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: Implement HW access flag management in stage-1 SW PTW
authorOliver Upton <oupton@kernel.org>
Mon, 24 Nov 2025 19:01:54 +0000 (11:01 -0800)
committerOliver Upton <oupton@kernel.org>
Mon, 1 Dec 2025 08:44:02 +0000 (00:44 -0800)
Atomically update the Access flag at stage-1 when the guest has
configured the MMU to do so. Make the implementation choice (and liberal
interpretation of speculation) that any access type updates the Access
flag, including AT and CMO instructions.

Restart the entire walk by returning to the exception-generating
instruction in the case of a failed Access flag update.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Tested-by: Marc Zyngier <maz@kernel.org>
Link: https://msgid.link/20251124190158.177318-13-oupton@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>
arch/arm64/include/asm/kvm_nested.h
arch/arm64/kvm/at.c

index 6dbc2908aed923425212fa792ad1cce2a80a94dc..905c658057a4319e25f7168ef34849d1c5c5ccb8 100644 (file)
@@ -353,6 +353,7 @@ struct s1_walk_info {
        bool                    be;
        bool                    s2;
        bool                    pa52bit;
+       bool                    ha;
 };
 
 struct s1_walk_result {
index 2a99380ada6f37dd7d599f13e80e6be225d04926..e39f814d247fb3e2480280238cbe7cb8cd008b63 100644 (file)
@@ -346,6 +346,8 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
 
        wi->baddr &= GENMASK_ULL(wi->max_oa_bits - 1, x);
 
+       wi->ha = tcr & TCR_HA;
+
        return 0;
 
 addrsz:
@@ -380,10 +382,24 @@ static int kvm_read_s1_desc(struct kvm_vcpu *vcpu, u64 pa, u64 *desc,
        return 0;
 }
 
+static int kvm_swap_s1_desc(struct kvm_vcpu *vcpu, u64 pa, u64 old, u64 new,
+                           struct s1_walk_info *wi)
+{
+       if (wi->be) {
+               old = cpu_to_be64(old);
+               new = cpu_to_be64(new);
+       } else {
+               old = cpu_to_le64(old);
+               new = cpu_to_le64(new);
+       }
+
+       return __kvm_at_swap_desc(vcpu->kvm, pa, old, new);
+}
+
 static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
                   struct s1_walk_result *wr, u64 va)
 {
-       u64 va_top, va_bottom, baddr, desc;
+       u64 va_top, va_bottom, baddr, desc, new_desc, ipa;
        int level, stride, ret;
 
        level = wi->sl;
@@ -393,7 +409,7 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
        va_top = get_ia_size(wi) - 1;
 
        while (1) {
-               u64 index, ipa;
+               u64 index;
 
                va_bottom = (3 - level) * stride + wi->pgshift;
                index = (va & GENMASK_ULL(va_top, va_bottom)) >> (va_bottom - 3);
@@ -438,6 +454,8 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
                        return ret;
                }
 
+               new_desc = desc;
+
                /* Invalid descriptor */
                if (!(desc & BIT(0)))
                        goto transfault;
@@ -490,6 +508,17 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
        if (check_output_size(baddr & GENMASK(52, va_bottom), wi))
                goto addrsz;
 
+       if (wi->ha)
+               new_desc |= PTE_AF;
+
+       if (new_desc != desc) {
+               ret = kvm_swap_s1_desc(vcpu, ipa, desc, new_desc, wi);
+               if (ret)
+                       return ret;
+
+               desc = new_desc;
+       }
+
        if (!(desc & PTE_AF)) {
                fail_s1_walk(wr, ESR_ELx_FSC_ACCESS_L(level), false);
                return -EACCES;