From: Marc Zyngier Date: Fri, 23 Jan 2026 19:16:36 +0000 (+0000) Subject: KVM: arm64: Kill KVM_PGTABLE_S2_NOFWB X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=4f27fe82aa304c50b24f92da72b4895cf73b54ba;p=thirdparty%2Fkernel%2Fstable.git KVM: arm64: Kill KVM_PGTABLE_S2_NOFWB Nobody is using this flag anymore, so remove it. This allows some cleanup by removing stage2_has_fwb(), which is can be replaced by a direct check on the capability. Reviewed-by: Joey Gouly Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Link: https://patch.msgid.link/20260123191637.715429-5-maz@kernel.org Signed-off-by: Marc Zyngier --- diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 9ce51a637da0..2198b6242883 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -229,15 +229,12 @@ struct kvm_pgtable_mm_ops { /** * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags. - * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have - * ARM64_HAS_STAGE2_FWB. * @KVM_PGTABLE_S2_IDMAP: Only use identity mappings. * @KVM_PGTABLE_S2_AS_S1: Final memory attributes are that of Stage-1. */ enum kvm_pgtable_stage2_flags { - KVM_PGTABLE_S2_NOFWB = BIT(0), - KVM_PGTABLE_S2_IDMAP = BIT(1), - KVM_PGTABLE_S2_AS_S1 = BIT(2), + KVM_PGTABLE_S2_IDMAP = BIT(0), + KVM_PGTABLE_S2_AS_S1 = BIT(1), }; /** diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index c52a24c15ff2..00e33a16494b 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -631,14 +631,6 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift) return vtcr; } -static bool stage2_has_fwb(struct kvm_pgtable *pgt) -{ - if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) - return false; - - return !(pgt->flags & KVM_PGTABLE_S2_NOFWB); -} - void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, size_t size) { @@ -661,14 +653,13 @@ void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, #define KVM_S2_MEMATTR(pgt, attr) \ ({ \ + bool __fwb = cpus_have_final_cap(ARM64_HAS_STAGE2_FWB); \ kvm_pte_t __attr; \ \ if ((pgt)->flags & KVM_PGTABLE_S2_AS_S1) \ - __attr = PAGE_S2_MEMATTR(AS_S1, \ - stage2_has_fwb(pgt)); \ + __attr = PAGE_S2_MEMATTR(AS_S1, __fwb); \ else \ - __attr = PAGE_S2_MEMATTR(attr, \ - stage2_has_fwb(pgt)); \ + __attr = PAGE_S2_MEMATTR(attr, __fwb); \ \ __attr; \ }) @@ -880,7 +871,7 @@ static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt) * system supporting FWB as the optimization is entirely * pointless when the unmap walker needs to perform CMOs. */ - return system_supports_tlb_range() && stage2_has_fwb(pgt); + return system_supports_tlb_range() && cpus_have_final_cap(ARM64_HAS_STAGE2_FWB); } static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx, @@ -1160,7 +1151,7 @@ static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, if (mm_ops->page_count(childp) != 1) return 0; } else if (stage2_pte_cacheable(pgt, ctx->old)) { - need_flush = !stage2_has_fwb(pgt); + need_flush = !cpus_have_final_cap(ARM64_HAS_STAGE2_FWB); } /* @@ -1390,7 +1381,7 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) .arg = pgt, }; - if (stage2_has_fwb(pgt)) + if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) return 0; return kvm_pgtable_walk(pgt, addr, size, &walker);