]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Assume non-PFNMAP/MIXEDMAP VMAs can be mapped cacheable
authorAnkit Agrawal <ankita@nvidia.com>
Sat, 5 Jul 2025 07:17:13 +0000 (07:17 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Mon, 7 Jul 2025 14:50:20 +0000 (07:50 -0700)
Despite its name, kvm_is_device_pfn() is actually used to determine if a
given PFN has a kernel mapping that can be used to perform cache
maintenance, as it calls pfn_is_map_memory() internally.

Expand the helper into its single callsite and further condition the
check on the VMA having either VM_PFNMAP or VM_MIXEDMAP set. VMAs that
set neither of these flags must always contain Normal, struct page
backed memory with valid aliases in the kernel address space.

Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Tested-by: Donald Dutile <ddutile@redhat.com>
Signed-off-by: Ankit Agrawal <ankita@nvidia.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20250705071717.5062-3-ankita@nvidia.com
[ Oliver: fixed typos, refined changelog ]
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/mmu.c

index 1601ab9527d44646c79a299cfa425ab03056f925..5fe24f30999d3f891444c1984f4e3dad82d6206e 100644 (file)
@@ -193,11 +193,6 @@ int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
        return 0;
 }
 
-static bool kvm_is_device_pfn(unsigned long pfn)
-{
-       return !pfn_is_map_memory(pfn);
-}
-
 static void *stage2_memcache_zalloc_page(void *arg)
 {
        struct kvm_mmu_memory_cache *mc = arg;
@@ -1492,6 +1487,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
        struct kvm_pgtable *pgt;
        struct page *page;
+       vm_flags_t vm_flags;
        enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED;
 
        if (fault_is_perm)
@@ -1619,6 +1615,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 
        vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
 
+       vm_flags = vma->vm_flags;
+
        /* Don't use the VMA after the unlock -- it may have vanished */
        vma = NULL;
 
@@ -1642,7 +1640,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (is_error_noslot_pfn(pfn))
                return -EFAULT;
 
-       if (kvm_is_device_pfn(pfn)) {
+       if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(pfn)) {
                /*
                 * If the page was identified as device early by looking at
                 * the VMA flags, vma_pagesize is already representing the