]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Kill KVM_PGTABLE_S2_NOFWB
authorMarc Zyngier <maz@kernel.org>
Fri, 23 Jan 2026 19:16:36 +0000 (19:16 +0000)
committerMarc Zyngier <maz@kernel.org>
Sun, 25 Jan 2026 16:17:21 +0000 (16:17 +0000)
Nobody is using this flag anymore, so remove it. This allows
some cleanup by removing stage2_has_fwb(), which is can be replaced
by a direct check on the capability.

Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Tested-by: Fuad Tabba <tabba@google.com>
Link: https://patch.msgid.link/20260123191637.715429-5-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/kvm/hyp/pgtable.c

index 9ce51a637da0aebbd234e8d99a13a093caf82736..2198b6242883225e82b3e2a400b837bd5851a904 100644 (file)
@@ -229,15 +229,12 @@ struct kvm_pgtable_mm_ops {
 
 /**
  * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
- * @KVM_PGTABLE_S2_NOFWB:      Don't enforce Normal-WB even if the CPUs have
- *                             ARM64_HAS_STAGE2_FWB.
  * @KVM_PGTABLE_S2_IDMAP:      Only use identity mappings.
  * @KVM_PGTABLE_S2_AS_S1:      Final memory attributes are that of Stage-1.
  */
 enum kvm_pgtable_stage2_flags {
-       KVM_PGTABLE_S2_NOFWB                    = BIT(0),
-       KVM_PGTABLE_S2_IDMAP                    = BIT(1),
-       KVM_PGTABLE_S2_AS_S1                    = BIT(2),
+       KVM_PGTABLE_S2_IDMAP                    = BIT(0),
+       KVM_PGTABLE_S2_AS_S1                    = BIT(1),
 };
 
 /**
index c52a24c15ff284d21fd8b70bcedca23795d87f69..00e33a16494bd0d7f32eac97314c44e6d44bc3c1 100644 (file)
@@ -631,14 +631,6 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
        return vtcr;
 }
 
-static bool stage2_has_fwb(struct kvm_pgtable *pgt)
-{
-       if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
-               return false;
-
-       return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
-}
-
 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
                                phys_addr_t addr, size_t size)
 {
@@ -661,14 +653,13 @@ void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
 
 #define KVM_S2_MEMATTR(pgt, attr)                                      \
        ({                                                              \
+               bool __fwb = cpus_have_final_cap(ARM64_HAS_STAGE2_FWB); \
                kvm_pte_t __attr;                                       \
                                                                        \
                if ((pgt)->flags & KVM_PGTABLE_S2_AS_S1)                \
-                       __attr = PAGE_S2_MEMATTR(AS_S1,                 \
-                                                stage2_has_fwb(pgt));  \
+                       __attr = PAGE_S2_MEMATTR(AS_S1, __fwb);         \
                else                                                    \
-                       __attr = PAGE_S2_MEMATTR(attr,                  \
-                                                stage2_has_fwb(pgt));  \
+                       __attr = PAGE_S2_MEMATTR(attr, __fwb);          \
                                                                        \
                __attr;                                                 \
        })
@@ -880,7 +871,7 @@ static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt)
         * system supporting FWB as the optimization is entirely
         * pointless when the unmap walker needs to perform CMOs.
         */
-       return system_supports_tlb_range() && stage2_has_fwb(pgt);
+       return system_supports_tlb_range() && cpus_have_final_cap(ARM64_HAS_STAGE2_FWB);
 }
 
 static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
@@ -1160,7 +1151,7 @@ static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
                if (mm_ops->page_count(childp) != 1)
                        return 0;
        } else if (stage2_pte_cacheable(pgt, ctx->old)) {
-               need_flush = !stage2_has_fwb(pgt);
+               need_flush = !cpus_have_final_cap(ARM64_HAS_STAGE2_FWB);
        }
 
        /*
@@ -1390,7 +1381,7 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
                .arg    = pgt,
        };
 
-       if (stage2_has_fwb(pgt))
+       if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
                return 0;
 
        return kvm_pgtable_walk(pgt, addr, size, &walker);