PFN_DEV was used by callers of dax_direct_access() to figure out if the
returned PFN is associated with a page using pfn_t_has_page() or not.
However all DAX PFNs now require an assoicated ZONE_DEVICE page so can
assume a page exists.
Other users of PFN_DEV were setting it before calling vmf_insert_mixed().
This is unnecessary as it is no longer checked, instead relying on
pfn_valid() to determine if there is an associated page or not.
Link: https://lkml.kernel.org/r/74b293aebc21b941090bc3e7aeafa91b57c821a5.1750323463.git-series.apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Björn Töpel <bjorn@kernel.org>
Cc: Björn Töpel <bjorn@rivosinc.com>
Cc: Chunyan Zhang <zhang.lyra@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Inki Dae <m.szyprowski@samsung.com>
Cc: John Groves <john@groves.net>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
for (i = 0; i < page_num; ++i) {
- err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, 0));
if (unlikely(err & VM_FAULT_ERROR))
break;
address += PAGE_SIZE;
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- return vmf_insert_mixed(vma, vmf->address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ return vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, 0));
}
/* Special handling for the case of faulting in 2d tiled buffers */
for (i = n; i > 0; i--) {
ret = vmf_insert_mixed(vma,
- vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
+ vaddr, __pfn_to_pfn_t(pfn, 0));
if (ret & VM_FAULT_ERROR)
break;
pfn += priv->usergart[fmt].stride_pfn;
if (kaddr)
*kaddr = __va(dev_info->start + offset);
if (pfn)
- *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
- PFN_DEV);
+ *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), 0);
return (dev_sz - offset) / PAGE_SIZE;
}
break;
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
case PMD_ORDER:
- ret = vmf_insert_pfn_pmd(vmf,
- __pfn_to_pfn_t(pfn, PFN_DEV), false);
+ ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn, 0), false);
break;
#endif
#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
case PUD_ORDER:
- ret = vmf_insert_pfn_pud(vmf,
- __pfn_to_pfn_t(pfn, PFN_DEV), false);
+ ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn, 0), false);
break;
#endif
default:
for (i = 0; i < pages && !ret; i++) {
vm_fault_t vmf;
unsigned long off = i * PAGE_SIZE;
- pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV);
+ pfn_t pfn = phys_to_pfn_t(address + off, 0);
vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
if (vmf & VM_FAULT_ERROR)
ret = vm_fault_to_errno(vmf, 0);
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
- return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
+ return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, 0), pgprot,
false);
}
EXPORT_SYMBOL(vmf_insert_pfn_prot);