]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Ignore -EAGAIN when mapping in pages for the pKVM host
authorWill Deacon <will@kernel.org>
Mon, 30 Mar 2026 14:48:11 +0000 (15:48 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 30 Mar 2026 15:58:07 +0000 (16:58 +0100)
If the host takes a stage-2 translation fault on two CPUs at the same
time, one of them will get back -EAGAIN from the page-table mapping code
when it runs into the mapping installed by the other.

Rather than handle this explicitly in handle_host_mem_abort(), pass the
new KVM_PGTABLE_WALK_IGNORE_EAGAIN flag to kvm_pgtable_stage2_map() from
__host_stage2_idmap() and return -EEXIST if host_stage2_adjust_range()
finds a valid pte. This will avoid having to test for -EAGAIN on the
reclaim path in subsequent patches.

Reviewed-by: Fuad Tabba <tabba@google.com>
Tested-by: Fuad Tabba <tabba@google.com>
Tested-by: Mostafa Saleh <smostafa@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Link: https://patch.msgid.link/20260330144841.26181-11-will@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/nvhe/mem_protect.c

index d815265bd374fa0a83443877302a323598f71057..7d22893ab1dcefe06e5e459becbd7a8e530c0dfb 100644 (file)
@@ -461,8 +461,15 @@ static bool range_is_memory(u64 start, u64 end)
 static inline int __host_stage2_idmap(u64 start, u64 end,
                                      enum kvm_pgtable_prot prot)
 {
+       /*
+        * We don't make permission changes to the host idmap after
+        * initialisation, so we can squash -EAGAIN to save callers
+        * having to treat it like success in the case that they try to
+        * map something that is already mapped.
+        */
        return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start,
-                                     prot, &host_s2_pool, 0);
+                                     prot, &host_s2_pool,
+                                     KVM_PGTABLE_WALK_IGNORE_EAGAIN);
 }
 
 /*
@@ -504,7 +511,7 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
                return ret;
 
        if (kvm_pte_valid(pte))
-               return -EAGAIN;
+               return -EEXIST;
 
        if (pte) {
                WARN_ON(addr_is_memory(addr) &&
@@ -609,7 +616,6 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
 {
        struct kvm_vcpu_fault_info fault;
        u64 esr, addr;
-       int ret = 0;
 
        esr = read_sysreg_el2(SYS_ESR);
        if (!__get_fault_info(esr, &fault)) {
@@ -628,8 +634,13 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
        BUG_ON(!(fault.hpfar_el2 & HPFAR_EL2_NS));
        addr = FIELD_GET(HPFAR_EL2_FIPA, fault.hpfar_el2) << 12;
 
-       ret = host_stage2_idmap(addr);
-       BUG_ON(ret && ret != -EAGAIN);
+       switch (host_stage2_idmap(addr)) {
+       case -EEXIST:
+       case 0:
+               break;
+       default:
+               BUG();
+       }
 }
 
 struct check_walk_data {