/**
* enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
- * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
- * ARM64_HAS_STAGE2_FWB.
* @KVM_PGTABLE_S2_IDMAP: Only use identity mappings.
* @KVM_PGTABLE_S2_AS_S1: Final memory attributes are that of Stage-1.
*/
enum kvm_pgtable_stage2_flags {
- KVM_PGTABLE_S2_NOFWB = BIT(0),
- KVM_PGTABLE_S2_IDMAP = BIT(1),
- KVM_PGTABLE_S2_AS_S1 = BIT(2),
+ KVM_PGTABLE_S2_IDMAP = BIT(0),
+ KVM_PGTABLE_S2_AS_S1 = BIT(1),
};
/**
return vtcr;
}
-static bool stage2_has_fwb(struct kvm_pgtable *pgt)
-{
- if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
- return false;
-
- return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
-}
-
void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
phys_addr_t addr, size_t size)
{
#define KVM_S2_MEMATTR(pgt, attr) \
({ \
+ bool __fwb = cpus_have_final_cap(ARM64_HAS_STAGE2_FWB); \
kvm_pte_t __attr; \
\
if ((pgt)->flags & KVM_PGTABLE_S2_AS_S1) \
- __attr = PAGE_S2_MEMATTR(AS_S1, \
- stage2_has_fwb(pgt)); \
+ __attr = PAGE_S2_MEMATTR(AS_S1, __fwb); \
else \
- __attr = PAGE_S2_MEMATTR(attr, \
- stage2_has_fwb(pgt)); \
+ __attr = PAGE_S2_MEMATTR(attr, __fwb); \
\
__attr; \
})
* system supporting FWB as the optimization is entirely
* pointless when the unmap walker needs to perform CMOs.
*/
- return system_supports_tlb_range() && stage2_has_fwb(pgt);
+ return system_supports_tlb_range() && cpus_have_final_cap(ARM64_HAS_STAGE2_FWB);
}
static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
if (mm_ops->page_count(childp) != 1)
return 0;
} else if (stage2_pte_cacheable(pgt, ctx->old)) {
- need_flush = !stage2_has_fwb(pgt);
+ need_flush = !cpus_have_final_cap(ARM64_HAS_STAGE2_FWB);
}
/*
.arg = pgt,
};
- if (stage2_has_fwb(pgt))
+ if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
return 0;
return kvm_pgtable_walk(pgt, addr, size, &walker);