]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: remove remaining uses of PFN_DEV
authorAlistair Popple <apopple@nvidia.com>
Thu, 19 Jun 2025 08:57:55 +0000 (18:57 +1000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 10 Jul 2025 05:42:16 +0000 (22:42 -0700)
PFN_DEV was used by callers of dax_direct_access() to figure out if the
returned PFN is associated with a page using pfn_t_has_page() or not.
However all DAX PFNs now require an assoicated ZONE_DEVICE page so can
assume a page exists.

Other users of PFN_DEV were setting it before calling vmf_insert_mixed().
This is unnecessary as it is no longer checked, instead relying on
pfn_valid() to determine if there is an associated page or not.

Link: https://lkml.kernel.org/r/74b293aebc21b941090bc3e7aeafa91b57c821a5.1750323463.git-series.apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Björn Töpel <bjorn@kernel.org>
Cc: Björn Töpel <bjorn@rivosinc.com>
Cc: Chunyan Zhang <zhang.lyra@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Inki Dae <m.szyprowski@samsung.com>
Cc: John Groves <john@groves.net>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
drivers/gpu/drm/gma500/fbdev.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/s390/block/dcssblk.c
drivers/vfio/pci/vfio_pci_core.c
fs/cramfs/inode.c
mm/memory.c

index 8edefea2ef59816d6ac66a6d584915e907379e78..109efdc96ac5a3879d62b5505c4fc5ca2d93b223 100644 (file)
@@ -33,7 +33,7 @@ static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf)
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        for (i = 0; i < page_num; ++i) {
-               err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV));
+               err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, 0));
                if (unlikely(err & VM_FAULT_ERROR))
                        break;
                address += PAGE_SIZE;
index b9c67e4ca360545024d59dcaff0964e9451cb598..9df05b2b7ba04056b1341f42bb05a375714cfb51 100644 (file)
@@ -371,8 +371,7 @@ static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
                        pfn, pfn << PAGE_SHIFT);
 
-       return vmf_insert_mixed(vma, vmf->address,
-                       __pfn_to_pfn_t(pfn, PFN_DEV));
+       return vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, 0));
 }
 
 /* Special handling for the case of faulting in 2d tiled buffers */
@@ -468,7 +467,7 @@ static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
 
        for (i = n; i > 0; i--) {
                ret = vmf_insert_mixed(vma,
-                       vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
+                       vaddr, __pfn_to_pfn_t(pfn, 0));
                if (ret & VM_FAULT_ERROR)
                        break;
                pfn += priv->usergart[fmt].stride_pfn;
index cdc7b2f16b884f17516944dde0f50153578d8d0c..249ae403f69874cf5f56d730211ca8ef30ce2507 100644 (file)
@@ -923,8 +923,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
        if (kaddr)
                *kaddr = __va(dev_info->start + offset);
        if (pfn)
-               *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
-                                     PFN_DEV);
+               *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), 0);
 
        return (dev_sz - offset) / PAGE_SIZE;
 }
index 6328c3a05bcdd44c1e76c4d399d3b050d05a999b..3f2ad5fb4c172840a47268d675fd5514cb88d5e9 100644 (file)
@@ -1669,14 +1669,12 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
                break;
 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
        case PMD_ORDER:
-               ret = vmf_insert_pfn_pmd(vmf,
-                                        __pfn_to_pfn_t(pfn, PFN_DEV), false);
+               ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn, 0), false);
                break;
 #endif
 #ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
        case PUD_ORDER:
-               ret = vmf_insert_pfn_pud(vmf,
-                                        __pfn_to_pfn_t(pfn, PFN_DEV), false);
+               ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn, 0), false);
                break;
 #endif
        default:
index b84d1747a0205a4aa4dd22d1df9f6b2f7b64b33a..820a664cfec76b3323bc03d46af471ac74da649b 100644 (file)
@@ -412,7 +412,7 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
                for (i = 0; i < pages && !ret; i++) {
                        vm_fault_t vmf;
                        unsigned long off = i * PAGE_SIZE;
-                       pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV);
+                       pfn_t pfn = phys_to_pfn_t(address + off, 0);
                        vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
                        if (vmf & VM_FAULT_ERROR)
                                ret = vm_fault_to_errno(vmf, 0);
index 833426fa5fe0357e7162f8e54102ff94cbbbf9ca..ea1388d2a87a746ab2bae1202e27db6f95eaf0b7 100644 (file)
@@ -2557,7 +2557,7 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
 
        pfnmap_setup_cachemode_pfn(pfn, &pgprot);
 
-       return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
+       return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, 0), pgprot,
                        false);
 }
 EXPORT_SYMBOL(vmf_insert_pfn_prot);