]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: remove callers of pfn_t functionality
authorAlistair Popple <apopple@nvidia.com>
Thu, 19 Jun 2025 08:58:05 +0000 (18:58 +1000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 10 Jul 2025 05:42:19 +0000 (22:42 -0700)
All PFN_* pfn_t flags have been removed.  Therefore there is no longer a
need for the pfn_t type and all uses can be replaced with normal pfns.

Link: https://lkml.kernel.org/r/bbedfa576c9822f8032494efbe43544628698b1f.1750323463.git-series.apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Björn Töpel <bjorn@kernel.org>
Cc: Björn Töpel <bjorn@rivosinc.com>
Cc: Chunyan Zhang <zhang.lyra@gmail.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Inki Dae <m.szyprowski@samsung.com>
Cc: John Groves <john@groves.net>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
43 files changed:
arch/x86/mm/pat/memtype.c
drivers/dax/device.c
drivers/dax/hmem/hmem.c
drivers/dax/kmem.c
drivers/dax/pmem.c
drivers/dax/super.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/gma500/fbdev.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/v3d/v3d_bo.c
drivers/hwtracing/intel_th/msu.c
drivers/md/dm-linear.c
drivers/md/dm-log-writes.c
drivers/md/dm-stripe.c
drivers/md/dm-target.c
drivers/md/dm-writecache.c
drivers/md/dm.c
drivers/nvdimm/pmem.c
drivers/nvdimm/pmem.h
drivers/s390/block/dcssblk.c
drivers/vfio/pci/vfio_pci_core.c
fs/cramfs/inode.c
fs/dax.c
fs/ext4/file.c
fs/fuse/dax.c
fs/fuse/virtio_fs.c
fs/xfs/xfs_file.c
include/linux/dax.h
include/linux/device-mapper.h
include/linux/huge_mm.h
include/linux/mm.h
include/linux/pfn.h
include/linux/pfn_t.h [deleted file]
mm/debug_vm_pgtable.c
mm/huge_memory.c
mm/memory.c
mm/memremap.c
mm/migrate.c
tools/testing/nvdimm/pmem-dax.c
tools/testing/nvdimm/test/iomap.c
tools/testing/nvdimm/test/nfit_test.h

index 2e7923844afeea412b8c1852deded57ab61fc768..c09284302dd3cd042ce02758efe5388bc4432fe6 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/debugfs.h>
 #include <linux/ioport.h>
 #include <linux/kernel.h>
-#include <linux/pfn_t.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/mm.h>
index 328231cfb02839dea78c9a92a3b6305b7d3962dc..2bb40a6060af3bdd9f8cf3fc08dc0546904ddd70 100644 (file)
@@ -4,7 +4,6 @@
 #include <linux/pagemap.h>
 #include <linux/module.h>
 #include <linux/device.h>
-#include <linux/pfn_t.h>
 #include <linux/cdev.h>
 #include <linux/slab.h>
 #include <linux/dax.h>
@@ -73,7 +72,7 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
        return -1;
 }
 
-static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
+static void dax_set_mapping(struct vm_fault *vmf, unsigned long pfn,
                              unsigned long fault_size)
 {
        unsigned long i, nr_pages = fault_size / PAGE_SIZE;
@@ -89,7 +88,7 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
                        ALIGN_DOWN(vmf->address, fault_size));
 
        for (i = 0; i < nr_pages; i++) {
-               struct folio *folio = pfn_folio(pfn_t_to_pfn(pfn) + i);
+               struct folio *folio = pfn_folio(pfn + i);
 
                if (folio->mapping)
                        continue;
@@ -104,7 +103,7 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
 {
        struct device *dev = &dev_dax->dev;
        phys_addr_t phys;
-       pfn_t pfn;
+       unsigned long pfn;
        unsigned int fault_size = PAGE_SIZE;
 
        if (check_vma(dev_dax, vmf->vma, __func__))
@@ -125,11 +124,11 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
                return VM_FAULT_SIGBUS;
        }
 
-       pfn = phys_to_pfn_t(phys, 0);
+       pfn = PHYS_PFN(phys);
 
        dax_set_mapping(vmf, pfn, fault_size);
 
-       return vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn),
+       return vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn),
                                        vmf->flags & FAULT_FLAG_WRITE);
 }
 
@@ -140,7 +139,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
        struct device *dev = &dev_dax->dev;
        phys_addr_t phys;
        pgoff_t pgoff;
-       pfn_t pfn;
+       unsigned long pfn;
        unsigned int fault_size = PMD_SIZE;
 
        if (check_vma(dev_dax, vmf->vma, __func__))
@@ -169,11 +168,11 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
                return VM_FAULT_SIGBUS;
        }
 
-       pfn = phys_to_pfn_t(phys, 0);
+       pfn = PHYS_PFN(phys);
 
        dax_set_mapping(vmf, pfn, fault_size);
 
-       return vmf_insert_folio_pmd(vmf, page_folio(pfn_t_to_page(pfn)),
+       return vmf_insert_folio_pmd(vmf, page_folio(pfn_to_page(pfn)),
                                vmf->flags & FAULT_FLAG_WRITE);
 }
 
@@ -185,7 +184,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
        struct device *dev = &dev_dax->dev;
        phys_addr_t phys;
        pgoff_t pgoff;
-       pfn_t pfn;
+       unsigned long pfn;
        unsigned int fault_size = PUD_SIZE;
 
 
@@ -215,11 +214,11 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
                return VM_FAULT_SIGBUS;
        }
 
-       pfn = phys_to_pfn_t(phys, 0);
+       pfn = PHYS_PFN(phys);
 
        dax_set_mapping(vmf, pfn, fault_size);
 
-       return vmf_insert_folio_pud(vmf, page_folio(pfn_t_to_page(pfn)),
+       return vmf_insert_folio_pud(vmf, page_folio(pfn_to_page(pfn)),
                                vmf->flags & FAULT_FLAG_WRITE);
 }
 #else
index 5e7c53f18491622408adeab9d354ea869dbc71de..c18451a37e4f2102bffaa6c27c3b96455514def4 100644 (file)
@@ -2,7 +2,6 @@
 #include <linux/platform_device.h>
 #include <linux/memregion.h>
 #include <linux/module.h>
-#include <linux/pfn_t.h>
 #include <linux/dax.h>
 #include "../bus.h"
 
index 584c70a34b52e20d05b66848cd617e002cd92314..c036e4d0b610b2a3a8ab18d9a43cef57869d7b24 100644 (file)
@@ -5,7 +5,6 @@
 #include <linux/memory.h>
 #include <linux/module.h>
 #include <linux/device.h>
-#include <linux/pfn_t.h>
 #include <linux/slab.h>
 #include <linux/dax.h>
 #include <linux/fs.h>
index c8ebf4e281f2405034065014ecdb830afda66906..bee93066a849770d4f42c635242445e7fe9e73bf 100644 (file)
@@ -2,7 +2,6 @@
 /* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
 #include <linux/memremap.h>
 #include <linux/module.h>
-#include <linux/pfn_t.h>
 #include "../nvdimm/pfn.h"
 #include "../nvdimm/nd.h"
 #include "bus.h"
index e16d1d40d7738acaa89f9572ae62c3ab773e9e66..54c480e874cb304f354e6f48a50175c004d0ce1b 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/mount.h>
 #include <linux/pseudo_fs.h>
 #include <linux/magic.h>
-#include <linux/pfn_t.h>
 #include <linux/cdev.h>
 #include <linux/slab.h>
 #include <linux/uio.h>
@@ -148,7 +147,7 @@ enum dax_device_flags {
  * pages accessible at the device relative @pgoff.
  */
 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
-               enum dax_access_mode mode, void **kaddr, pfn_t *pfn)
+               enum dax_access_mode mode, void **kaddr, unsigned long *pfn)
 {
        long avail;
 
index d44401a695e203bd36b3b6678fdeb3572a91bfda..e3fbb45f37a27bb1dd92082fc727dd812e0eabdf 100644 (file)
@@ -7,7 +7,6 @@
 
 
 #include <linux/dma-buf.h>
-#include <linux/pfn_t.h>
 #include <linux/shmem_fs.h>
 #include <linux/module.h>
 
index 109efdc96ac5a3879d62b5505c4fc5ca2d93b223..68b825fc056e50cb3f2ce667e7b173ddfe11d5fe 100644 (file)
@@ -6,7 +6,6 @@
  **************************************************************************/
 
 #include <linux/fb.h>
-#include <linux/pfn_t.h>
 
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_drv.h>
@@ -33,7 +32,7 @@ static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf)
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        for (i = 0; i < page_num; ++i) {
-               err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, 0));
+               err = vmf_insert_mixed(vma, address, pfn);
                if (unlikely(err & VM_FAULT_ERROR))
                        break;
                address += PAGE_SIZE;
index f6d37dff320d4a77516a1b10d394acba41ffc770..75f5b0e871ef7ad4fa4a60eb2620b1c50eb87dc9 100644 (file)
@@ -5,7 +5,6 @@
 
 #include <linux/anon_inodes.h>
 #include <linux/mman.h>
-#include <linux/pfn_t.h>
 #include <linux/sizes.h>
 
 #include <drm/drm_cache.h>
index 2995e80fec3ba98b9d161e04ce11571454dd7622..20bf31fe799b4bf4e4aaef522ff547b1a3978a13 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/spinlock.h>
 #include <linux/shmem_fs.h>
 #include <linux/dma-buf.h>
-#include <linux/pfn_t.h>
 
 #include <drm/drm_prime.h>
 #include <drm/drm_file.h>
index 9df05b2b7ba04056b1341f42bb05a375714cfb51..381552bfb40903e6474fad2ea7e52ee827302ccf 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/seq_file.h>
 #include <linux/shmem_fs.h>
 #include <linux/spinlock.h>
-#include <linux/pfn_t.h>
 #include <linux/vmalloc.h>
 
 #include <drm/drm_prime.h>
@@ -371,7 +370,7 @@ static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
                        pfn, pfn << PAGE_SHIFT);
 
-       return vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, 0));
+       return vmf_insert_mixed(vma, vmf->address, pfn);
 }
 
 /* Special handling for the case of faulting in 2d tiled buffers */
@@ -466,8 +465,7 @@ static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
                        pfn, pfn << PAGE_SHIFT);
 
        for (i = n; i > 0; i--) {
-               ret = vmf_insert_mixed(vma,
-                       vaddr, __pfn_to_pfn_t(pfn, 0));
+               ret = vmf_insert_mixed(vma, vaddr, pfn);
                if (ret & VM_FAULT_ERROR)
                        break;
                pfn += priv->usergart[fmt].stride_pfn;
index bb7815599435bf590e77bab7c78ee4fa251e9da2..c41476ddde686c4180569979ceef773b61dd4f70 100644 (file)
@@ -16,7 +16,6 @@
  */
 
 #include <linux/dma-buf.h>
-#include <linux/pfn_t.h>
 #include <linux/vmalloc.h>
 
 #include "v3d_drv.h"
index 7163950eb3719ca10f3916deb09b1a624a87fd40..f3a13b300835d5308aa87b718586ea14e43be04a 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/io.h>
 #include <linux/workqueue.h>
 #include <linux/dma-mapping.h>
-#include <linux/pfn_t.h>
 
 #ifdef CONFIG_X86
 #include <asm/set_memory.h>
@@ -1618,7 +1617,7 @@ static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
 
        get_page(page);
-       return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn_t(page));
+       return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn(page));
 }
 
 static const struct vm_operations_struct msc_mmap_ops = {
index 15538ec58f8e4dbc2780ffbb2202883a598dfcd6..73bf290af18124e122d4edf59eb20cae7956ee38 100644 (file)
@@ -170,7 +170,7 @@ static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
 
 static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
                long nr_pages, enum dax_access_mode mode, void **kaddr,
-               pfn_t *pfn)
+               unsigned long *pfn)
 {
        struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
 
index d484e8e1d48a93dc67f24f7504acb20530815b86..679b07dee22942b224a9f9e3f5cd95b2b6c72a4c 100644 (file)
@@ -893,7 +893,7 @@ static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti,
 
 static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
                long nr_pages, enum dax_access_mode mode, void **kaddr,
-               pfn_t *pfn)
+               unsigned long *pfn)
 {
        struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
 
index a7dc04bd55e5cb64d60d94fedcd8d534ab22709e..366f4615978582c598a848ae59a56a6a7d03d510 100644 (file)
@@ -316,7 +316,7 @@ static struct dax_device *stripe_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
 
 static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
                long nr_pages, enum dax_access_mode mode, void **kaddr,
-               pfn_t *pfn)
+               unsigned long *pfn)
 {
        struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
 
index 652627aea11b6188d2873ade3e8717e746dea6dd..2af5a9514c05e6770f03ffa8697918ac2bf26bdd 100644 (file)
@@ -255,7 +255,7 @@ static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
                long nr_pages, enum dax_access_mode mode, void **kaddr,
-               pfn_t *pfn)
+               unsigned long *pfn)
 {
        return -EIO;
 }
index a428e1cacf07fbdb388d3eec8a892e5c049dbdb0..d8de4a3076a17de270f0401eb9c11037ab66d9b1 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/dm-io.h>
 #include <linux/dm-kcopyd.h>
 #include <linux/dax.h>
-#include <linux/pfn_t.h>
 #include <linux/libnvdimm.h>
 #include <linux/delay.h>
 #include "dm-io-tracker.h"
@@ -256,7 +255,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
        int r;
        loff_t s;
        long p, da;
-       pfn_t pfn;
+       unsigned long pfn;
        int id;
        struct page **pages;
        sector_t offset;
@@ -290,7 +289,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
                r = da;
                goto err2;
        }
-       if (!pfn_t_has_page(pfn)) {
+       if (!pfn_valid(pfn)) {
                wc->memory_map = NULL;
                r = -EOPNOTSUPP;
                goto err2;
@@ -314,13 +313,13 @@ static int persistent_memory_claim(struct dm_writecache *wc)
                                r = daa ? daa : -EINVAL;
                                goto err3;
                        }
-                       if (!pfn_t_has_page(pfn)) {
+                       if (!pfn_valid(pfn)) {
                                r = -EOPNOTSUPP;
                                goto err3;
                        }
                        while (daa-- && i < p) {
-                               pages[i++] = pfn_t_to_page(pfn);
-                               pfn.val++;
+                               pages[i++] = pfn_to_page(pfn);
+                               pfn++;
                                if (!(i & 15))
                                        cond_resched();
                        }
index 1726f0f828cc948e5698645273c3b83f0381bb0d..4b9415f718e36156c0fc83678cf1a7aea1fea77b 100644 (file)
@@ -1218,7 +1218,7 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
 
 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
                long nr_pages, enum dax_access_mode mode, void **kaddr,
-               pfn_t *pfn)
+               unsigned long *pfn)
 {
        struct mapped_device *md = dax_get_private(dax_dev);
        sector_t sector = pgoff * PAGE_SECTORS;
index aa50006b7616933e6efce0db268557c014c23d1f..05785ff21a8bd40d06daa4d5d97083bf0bc10595 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/kstrtox.h>
 #include <linux/vmalloc.h>
 #include <linux/blk-mq.h>
-#include <linux/pfn_t.h>
 #include <linux/slab.h>
 #include <linux/uio.h>
 #include <linux/dax.h>
@@ -242,7 +241,7 @@ static void pmem_submit_bio(struct bio *bio)
 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
 __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
                long nr_pages, enum dax_access_mode mode, void **kaddr,
-               pfn_t *pfn)
+               unsigned long *pfn)
 {
        resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
        sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT;
@@ -254,7 +253,7 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
        if (kaddr)
                *kaddr = pmem->virt_addr + offset;
        if (pfn)
-               *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+               *pfn = PHYS_PFN(pmem->phys_addr + offset);
 
        if (bb->count &&
            badblocks_check(bb, sector, num, &first_bad, &num_bad)) {
@@ -303,7 +302,7 @@ static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
 
 static long pmem_dax_direct_access(struct dax_device *dax_dev,
                pgoff_t pgoff, long nr_pages, enum dax_access_mode mode,
-               void **kaddr, pfn_t *pfn)
+               void **kaddr, unsigned long *pfn)
 {
        struct pmem_device *pmem = dax_get_private(dax_dev);
 
@@ -513,7 +512,6 @@ static int pmem_attach_disk(struct device *dev,
 
        pmem->disk = disk;
        pmem->pgmap.owner = pmem;
-       pmem->pfn_flags = 0;
        if (is_nd_pfn(dev)) {
                pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
                pmem->pgmap.ops = &fsdax_pagemap_ops;
index 392b0b38acb97c0a87d8464b0fbcf85b5a2fffa0..a48509f901968ea407dabcf69d837a41f1094ac7 100644 (file)
@@ -5,7 +5,6 @@
 #include <linux/badblocks.h>
 #include <linux/memremap.h>
 #include <linux/types.h>
-#include <linux/pfn_t.h>
 #include <linux/fs.h>
 
 enum dax_access_mode;
@@ -16,7 +15,6 @@ struct pmem_device {
        phys_addr_t             phys_addr;
        /* when non-zero this device is hosting a 'pfn' instance */
        phys_addr_t             data_offset;
-       u64                     pfn_flags;
        void                    *virt_addr;
        /* immutable base size of the namespace */
        size_t                  size;
@@ -31,7 +29,7 @@ struct pmem_device {
 
 long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
                long nr_pages, enum dax_access_mode mode, void **kaddr,
-               pfn_t *pfn);
+               unsigned long *pfn);
 
 #ifdef CONFIG_MEMORY_FAILURE
 static inline bool test_and_clear_pmem_poison(struct page *page)
index 249ae403f69874cf5f56d730211ca8ef30ce2507..94fa5edecaddf8dfd88df711078eb92ea92d367a 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/blkdev.h>
 #include <linux/completion.h>
 #include <linux/interrupt.h>
-#include <linux/pfn_t.h>
 #include <linux/uio.h>
 #include <linux/dax.h>
 #include <linux/io.h>
@@ -33,7 +32,7 @@ static void dcssblk_release(struct gendisk *disk);
 static void dcssblk_submit_bio(struct bio *bio);
 static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
                long nr_pages, enum dax_access_mode mode, void **kaddr,
-               pfn_t *pfn);
+               unsigned long *pfn);
 
 static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
 
@@ -914,7 +913,7 @@ fail:
 
 static long
 __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
-               long nr_pages, void **kaddr, pfn_t *pfn)
+               long nr_pages, void **kaddr, unsigned long *pfn)
 {
        resource_size_t offset = pgoff * PAGE_SIZE;
        unsigned long dev_sz;
@@ -923,7 +922,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
        if (kaddr)
                *kaddr = __va(dev_info->start + offset);
        if (pfn)
-               *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), 0);
+               *pfn = PFN_DOWN(dev_info->start + offset);
 
        return (dev_sz - offset) / PAGE_SIZE;
 }
@@ -931,7 +930,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
 static long
 dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
                long nr_pages, enum dax_access_mode mode, void **kaddr,
-               pfn_t *pfn)
+               unsigned long *pfn)
 {
        struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);
 
index 3f2ad5fb4c172840a47268d675fd5514cb88d5e9..31bdb9110cc0f1a4be7b34000396ce1760fe1a33 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/mutex.h>
 #include <linux/notifier.h>
 #include <linux/pci.h>
-#include <linux/pfn_t.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/types.h>
@@ -1669,12 +1668,12 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
                break;
 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
        case PMD_ORDER:
-               ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn, 0), false);
+               ret = vmf_insert_pfn_pmd(vmf, pfn, false);
                break;
 #endif
 #ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
        case PUD_ORDER:
-               ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn, 0), false);
+               ret = vmf_insert_pfn_pud(vmf, pfn, false);
                break;
 #endif
        default:
index 820a664cfec76b3323bc03d46af471ac74da649b..b002e9b734f99cf9aca3b639f1e35c7af2c68935 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/fs.h>
 #include <linux/file.h>
 #include <linux/pagemap.h>
-#include <linux/pfn_t.h>
 #include <linux/ramfs.h>
 #include <linux/init.h>
 #include <linux/string.h>
@@ -412,8 +411,8 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
                for (i = 0; i < pages && !ret; i++) {
                        vm_fault_t vmf;
                        unsigned long off = i * PAGE_SIZE;
-                       pfn_t pfn = phys_to_pfn_t(address + off, 0);
-                       vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
+                       vmf = vmf_insert_mixed(vma, vma->vm_start + off,
+                                       address + off);
                        if (vmf & VM_FAULT_ERROR)
                                ret = vm_fault_to_errno(vmf, 0);
                }
index f4ffb698227060f09d04e390e6fed65026ef458a..4229513806bea094904b5e50db141b10c2c0c915 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -20,7 +20,6 @@
 #include <linux/sched/signal.h>
 #include <linux/uio.h>
 #include <linux/vmstat.h>
-#include <linux/pfn_t.h>
 #include <linux/sizes.h>
 #include <linux/mmu_notifier.h>
 #include <linux/iomap.h>
@@ -76,9 +75,9 @@ static struct folio *dax_to_folio(void *entry)
        return page_folio(pfn_to_page(dax_to_pfn(entry)));
 }
 
-static void *dax_make_entry(pfn_t pfn, unsigned long flags)
+static void *dax_make_entry(unsigned long pfn, unsigned long flags)
 {
-       return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
+       return xa_mk_value(flags | (pfn << DAX_SHIFT));
 }
 
 static bool dax_is_locked(void *entry)
@@ -713,7 +712,7 @@ retry:
 
                if (order > 0)
                        flags |= DAX_PMD;
-               entry = dax_make_entry(pfn_to_pfn_t(0), flags);
+               entry = dax_make_entry(0, flags);
                dax_lock_entry(xas, entry);
                if (xas_error(xas))
                        goto out_unlock;
@@ -1041,7 +1040,7 @@ static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
  * appropriate.
  */
 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
-               const struct iomap_iter *iter, void *entry, pfn_t pfn,
+               const struct iomap_iter *iter, void *entry, unsigned long pfn,
                unsigned long flags)
 {
        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
@@ -1239,7 +1238,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 
 static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
-               size_t size, void **kaddr, pfn_t *pfnp)
+               size_t size, void **kaddr, unsigned long *pfnp)
 {
        pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
        int id, rc = 0;
@@ -1257,7 +1256,7 @@ static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
        rc = -EINVAL;
        if (PFN_PHYS(length) < size)
                goto out;
-       if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
+       if (*pfnp & (PHYS_PFN(size)-1))
                goto out;
 
        rc = 0;
@@ -1361,12 +1360,12 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
 {
        struct inode *inode = iter->inode;
        unsigned long vaddr = vmf->address;
-       pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
+       unsigned long pfn = my_zero_pfn(vaddr);
        vm_fault_t ret;
 
        *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
 
-       ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), false);
+       ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), false);
        trace_dax_load_hole(inode, vmf, ret);
        return ret;
 }
@@ -1383,14 +1382,14 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
        struct folio *zero_folio;
        spinlock_t *ptl;
        pmd_t pmd_entry;
-       pfn_t pfn;
+       unsigned long pfn;
 
        zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm);
 
        if (unlikely(!zero_folio))
                goto fallback;
 
-       pfn = page_to_pfn_t(&zero_folio->page);
+       pfn = page_to_pfn(&zero_folio->page);
        *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
                                  DAX_PMD | DAX_ZERO_PAGE);
 
@@ -1779,7 +1778,8 @@ static vm_fault_t dax_fault_return(int error)
  * insertion for now and return the pfn so that caller can insert it after the
  * fsync is done.
  */
-static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
+static vm_fault_t dax_fault_synchronous_pfnp(unsigned long *pfnp,
+                                       unsigned long pfn)
 {
        if (WARN_ON_ONCE(!pfnp))
                return VM_FAULT_SIGBUS;
@@ -1827,7 +1827,7 @@ static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
  * @pmd:       distinguish whether it is a pmd fault
  */
 static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
-               const struct iomap_iter *iter, pfn_t *pfnp,
+               const struct iomap_iter *iter, unsigned long *pfnp,
                struct xa_state *xas, void **entry, bool pmd)
 {
        const struct iomap *iomap = &iter->iomap;
@@ -1838,7 +1838,7 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
        unsigned long entry_flags = pmd ? DAX_PMD : 0;
        struct folio *folio;
        int ret, err = 0;
-       pfn_t pfn;
+       unsigned long pfn;
        void *kaddr;
 
        if (!pmd && vmf->cow_page)
@@ -1875,16 +1875,15 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
 
        folio_ref_inc(folio);
        if (pmd)
-               ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn_t_to_pfn(pfn)),
-                                       write);
+               ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn), write);
        else
-               ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), write);
+               ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), write);
        folio_put(folio);
 
        return ret;
 }
 
-static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, unsigned long *pfnp,
                               int *iomap_errp, const struct iomap_ops *ops)
 {
        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
@@ -1996,7 +1995,7 @@ static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
        return false;
 }
 
-static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp,
                               const struct iomap_ops *ops)
 {
        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
@@ -2077,7 +2076,7 @@ out:
        return ret;
 }
 #else
-static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp,
                               const struct iomap_ops *ops)
 {
        return VM_FAULT_FALLBACK;
@@ -2098,7 +2097,8 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
  * successfully.
  */
 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
-                   pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
+                       unsigned long *pfnp, int *iomap_errp,
+                       const struct iomap_ops *ops)
 {
        if (order == 0)
                return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
@@ -2118,8 +2118,8 @@ EXPORT_SYMBOL_GPL(dax_iomap_fault);
  * This function inserts a writeable PTE or PMD entry into the page tables
  * for an mmaped DAX file.  It also marks the page cache entry as dirty.
  */
-static vm_fault_t
-dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
+static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
+                                       unsigned long pfn, unsigned int order)
 {
        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
@@ -2141,7 +2141,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
        xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
        dax_lock_entry(&xas, entry);
        xas_unlock_irq(&xas);
-       folio = pfn_folio(pfn_t_to_pfn(pfn));
+       folio = pfn_folio(pfn);
        folio_ref_inc(folio);
        if (order == 0)
                ret = vmf_insert_page_mkwrite(vmf, &folio->page, true);
@@ -2168,7 +2168,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
  * table entry.
  */
 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order,
-               pfn_t pfn)
+               unsigned long pfn)
 {
        int err;
        loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
index 21df81347147cc7ab3648590b6dbd40e31e2c20b..e6e96298231962f9b6f05e6997c0ab1fa6584b4a 100644 (file)
@@ -747,7 +747,7 @@ static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
        bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
                (vmf->vma->vm_flags & VM_SHARED);
        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
-       pfn_t pfn;
+       unsigned long pfn;
 
        if (write) {
                sb_start_pagefault(sb);
index 0502bf3cdf6a121c62898398a1783a2084bb6867..ac6d4c1064cc66b39220bbd86e7a8646743624ad 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/dax.h>
 #include <linux/uio.h>
 #include <linux/pagemap.h>
-#include <linux/pfn_t.h>
 #include <linux/iomap.h>
 #include <linux/interval_tree.h>
 
@@ -757,7 +756,7 @@ static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order,
        vm_fault_t ret;
        struct inode *inode = file_inode(vmf->vma->vm_file);
        struct super_block *sb = inode->i_sb;
-       pfn_t pfn;
+       unsigned long pfn;
        int error = 0;
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_conn_dax *fcd = fc->dax;
index 53c2626e90e723ad88f1aee69d7507b4f197ab13..aac914b2cd50d8b64951fe6fcc79ccef700abd97 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/group_cpus.h>
-#include <linux/pfn_t.h>
 #include <linux/memremap.h>
 #include <linux/module.h>
 #include <linux/virtio.h>
@@ -1008,7 +1007,7 @@ static void virtio_fs_cleanup_vqs(struct virtio_device *vdev)
  */
 static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
                                    long nr_pages, enum dax_access_mode mode,
-                                   void **kaddr, pfn_t *pfn)
+                                   void **kaddr, unsigned long *pfn)
 {
        struct virtio_fs *fs = dax_get_private(dax_dev);
        phys_addr_t offset = PFN_PHYS(pgoff);
@@ -1017,7 +1016,7 @@ static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
        if (kaddr)
                *kaddr = fs->window_kaddr + offset;
        if (pfn)
-               *pfn = phys_to_pfn_t(fs->window_phys_addr + offset, 0);
+               *pfn = fs->window_phys_addr + offset;
        return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
 }
 
index 0b41b18debf36d9edb405accdb1ab47a6a781654..314a9d9dd7db449a110cc7e2973d3b18c0050283 100644 (file)
@@ -1730,7 +1730,7 @@ xfs_dax_fault_locked(
        bool                    write_fault)
 {
        vm_fault_t              ret;
-       pfn_t                   pfn;
+       unsigned long           pfn;
 
        if (!IS_ENABLED(CONFIG_FS_DAX)) {
                ASSERT(0);
index dcc9fcdf14e4396659016d8d9f115ccb6edaf908..29eec755430b80f9c5e66ddbdc31287564b79692 100644 (file)
@@ -26,7 +26,7 @@ struct dax_operations {
         * number of pages available for DAX at that pfn.
         */
        long (*direct_access)(struct dax_device *, pgoff_t, long,
-                       enum dax_access_mode, void **, pfn_t *);
+                       enum dax_access_mode, void **, unsigned long *);
        /* zero_page_range: required operation. Zero page range   */
        int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
        /*
@@ -241,7 +241,7 @@ static inline void dax_break_layout_final(struct inode *inode)
 bool dax_alive(struct dax_device *dax_dev);
 void *dax_get_private(struct dax_device *dax_dev);
 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
-               enum dax_access_mode mode, void **kaddr, pfn_t *pfn);
+               enum dax_access_mode mode, void **kaddr, unsigned long *pfn);
 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
                size_t bytes, struct iov_iter *i);
 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
@@ -255,9 +255,10 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
                const struct iomap_ops *ops);
 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
-                   pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
+                       unsigned long *pfnp, int *errp,
+                       const struct iomap_ops *ops);
 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
-               unsigned int order, pfn_t pfn);
+               unsigned int order, unsigned long pfn);
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
 void dax_delete_mapping_range(struct address_space *mapping,
                                loff_t start, loff_t end);
index cb95951547abe7653ab3dff9020ad0b7d5242870..84fdc3a6a19a4c09d6974f69f3d8bdf8b9641aad 100644 (file)
@@ -156,7 +156,7 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
  */
 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
                long nr_pages, enum dax_access_mode node, void **kaddr,
-               pfn_t *pfn);
+               unsigned long *pfn);
 typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
                size_t nr_pages);
 
index 26607f2c65fb44c905cc1082165f8748557d61f8..8f1b15213f613105c8628716e2d77c8aa260f10c 100644 (file)
@@ -37,8 +37,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                    pmd_t *pmd, unsigned long addr, pgprot_t newprot,
                    unsigned long cp_flags);
 
-vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
-vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
+                             bool write);
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
+                             bool write);
 vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
                                bool write);
 vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
index 4d833f1599888e8d8ba8228e889e64dbf85f486b..dccebf0abf06e8350b9a490565634a45a5265c6d 100644 (file)
@@ -3522,9 +3522,9 @@ vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn, pgprot_t pgprot);
 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
-                       pfn_t pfn);
+                       unsigned long pfn);
 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
-               unsigned long addr, pfn_t pfn);
+               unsigned long addr, unsigned long pfn);
 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
 
 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
index 14bc053c53d8a4efb49dfca43c7f4116099b1f85..b90ca0b6c331e44e5971787f36c8f1fdd2a074ab 100644 (file)
@@ -4,15 +4,6 @@
 
 #ifndef __ASSEMBLY__
 #include <linux/types.h>
-
-/*
- * pfn_t: encapsulates a page-frame number that is optionally backed
- * by memmap (struct page).  Whether a pfn_t has a 'struct page'
- * backing is indicated by flags in the high bits of the value.
- */
-typedef struct {
-       u64 val;
-} pfn_t;
 #endif
 
 #define PFN_ALIGN(x)   (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
deleted file mode 100644 (file)
index 2c00293..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PFN_T_H_
-#define _LINUX_PFN_T_H_
-#include <linux/mm.h>
-
-/*
- * PFN_FLAGS_MASK - mask of all the possible valid pfn_t flags
- */
-#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
-
-#define PFN_FLAGS_TRACE \
-       { }
-
-static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
-{
-       pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
-
-       return pfn_t;
-}
-
-/* a default pfn to pfn_t conversion assumes that @pfn is pfn_valid() */
-static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
-{
-       return __pfn_to_pfn_t(pfn, 0);
-}
-
-static inline pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
-{
-       return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
-}
-
-static inline bool pfn_t_has_page(pfn_t pfn)
-{
-       return true;
-}
-
-static inline unsigned long pfn_t_to_pfn(pfn_t pfn)
-{
-       return pfn.val & ~PFN_FLAGS_MASK;
-}
-
-static inline struct page *pfn_t_to_page(pfn_t pfn)
-{
-       if (pfn_t_has_page(pfn))
-               return pfn_to_page(pfn_t_to_pfn(pfn));
-       return NULL;
-}
-
-static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
-{
-       return PFN_PHYS(pfn_t_to_pfn(pfn));
-}
-
-static inline pfn_t page_to_pfn_t(struct page *page)
-{
-       return pfn_to_pfn_t(page_to_pfn(page));
-}
-
-static inline int pfn_t_valid(pfn_t pfn)
-{
-       return pfn_valid(pfn_t_to_pfn(pfn));
-}
-
-#ifdef CONFIG_MMU
-static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot)
-{
-       return pfn_pte(pfn_t_to_pfn(pfn), pgprot);
-}
-#endif
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
-{
-       return pfn_pmd(pfn_t_to_pfn(pfn), pgprot);
-}
-
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot)
-{
-       return pfn_pud(pfn_t_to_pfn(pfn), pgprot);
-}
-#endif
-#endif
-
-#endif /* _LINUX_PFN_T_H_ */
index d84d0c49012f9d85593a5bcb55a6da7fe6a11c69..bd8f9317b02580a6c7b5749a876ce204a4875e8a 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/mman.h>
 #include <linux/mm_types.h>
 #include <linux/module.h>
-#include <linux/pfn_t.h>
 #include <linux/printk.h>
 #include <linux/pgtable.h>
 #include <linux/random.h>
index cf808b2eea292421dedeb03974ac0a556d58acc3..ce130225a8e52dab1e1e179673ec64e8ec634ce2 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/mm_types.h>
 #include <linux/khugepaged.h>
 #include <linux/freezer.h>
-#include <linux/pfn_t.h>
 #include <linux/mman.h>
 #include <linux/memremap.h>
 #include <linux/pagemap.h>
@@ -1375,7 +1374,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
 struct folio_or_pfn {
        union {
                struct folio *folio;
-               pfn_t pfn;
+               unsigned long pfn;
        };
        bool is_folio;
 };
@@ -1391,7 +1390,7 @@ static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
 
        if (!pmd_none(*pmd)) {
                const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
-                                         pfn_t_to_pfn(fop.pfn);
+                                         fop.pfn;
 
                if (write) {
                        if (pmd_pfn(*pmd) != pfn) {
@@ -1414,7 +1413,7 @@ static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
                folio_add_file_rmap_pmd(fop.folio, &fop.folio->page, vma);
                add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR);
        } else {
-               entry = pmd_mkhuge(pfn_t_pmd(fop.pfn, prot));
+               entry = pmd_mkhuge(pfn_pmd(fop.pfn, prot));
                entry = pmd_mkspecial(entry);
        }
        if (write) {
@@ -1442,7 +1441,8 @@ static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
  *
  * Return: vm_fault_t value.
  */
-vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
+                             bool write)
 {
        unsigned long addr = vmf->address & PMD_MASK;
        struct vm_area_struct *vma = vmf->vma;
@@ -1473,7 +1473,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
                        return VM_FAULT_OOM;
        }
 
-       pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot);
+       pfnmap_setup_cachemode_pfn(pfn, &pgprot);
 
        ptl = pmd_lock(vma->vm_mm, vmf->pmd);
        error = insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write,
@@ -1539,7 +1539,7 @@ static void insert_pud(struct vm_area_struct *vma, unsigned long addr,
 
        if (!pud_none(*pud)) {
                const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
-                                         pfn_t_to_pfn(fop.pfn);
+                                         fop.pfn;
 
                if (write) {
                        if (WARN_ON_ONCE(pud_pfn(*pud) != pfn))
@@ -1559,7 +1559,7 @@ static void insert_pud(struct vm_area_struct *vma, unsigned long addr,
                folio_add_file_rmap_pud(fop.folio, &fop.folio->page, vma);
                add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PUD_NR);
        } else {
-               entry = pud_mkhuge(pfn_t_pud(fop.pfn, prot));
+               entry = pud_mkhuge(pfn_pud(fop.pfn, prot));
                entry = pud_mkspecial(entry);
        }
        if (write) {
@@ -1580,7 +1580,8 @@ static void insert_pud(struct vm_area_struct *vma, unsigned long addr,
  *
  * Return: vm_fault_t value.
  */
-vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
+                             bool write)
 {
        unsigned long addr = vmf->address & PUD_MASK;
        struct vm_area_struct *vma = vmf->vma;
@@ -1603,7 +1604,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return VM_FAULT_SIGBUS;
 
-       pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot);
+       pfnmap_setup_cachemode_pfn(pfn, &pgprot);
 
        ptl = pud_lock(vma->vm_mm, vmf->pud);
        insert_pud(vma, addr, vmf->pud, fop, pgprot, write);
index e932a007af4ceeef43367f06cc1563c2a7b4d605..0f9b32a20e5b788645496ff69e9d6428d9140c28 100644 (file)
@@ -57,7 +57,6 @@
 #include <linux/export.h>
 #include <linux/delayacct.h>
 #include <linux/init.h>
-#include <linux/pfn_t.h>
 #include <linux/writeback.h>
 #include <linux/memcontrol.h>
 #include <linux/mmu_notifier.h>
@@ -2435,7 +2434,7 @@ int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
 EXPORT_SYMBOL(vm_map_pages_zero);
 
 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
-                       pfn_t pfn, pgprot_t prot, bool mkwrite)
+                       unsigned long pfn, pgprot_t prot, bool mkwrite)
 {
        struct mm_struct *mm = vma->vm_mm;
        pte_t *pte, entry;
@@ -2457,7 +2456,7 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                         * allocation and mapping invalidation so just skip the
                         * update.
                         */
-                       if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) {
+                       if (pte_pfn(entry) != pfn) {
                                WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
                                goto out_unlock;
                        }
@@ -2470,7 +2469,7 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
        }
 
        /* Ok, finally just insert the thing.. */
-       entry = pte_mkspecial(pfn_t_pte(pfn, prot));
+       entry = pte_mkspecial(pfn_pte(pfn, prot));
 
        if (mkwrite) {
                entry = pte_mkyoung(entry);
@@ -2541,8 +2540,7 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
 
        pfnmap_setup_cachemode_pfn(pfn, &pgprot);
 
-       return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, 0), pgprot,
-                       false);
+       return insert_pfn(vma, addr, pfn, pgprot, false);
 }
 EXPORT_SYMBOL(vmf_insert_pfn_prot);
 
@@ -2573,21 +2571,22 @@ vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 }
 EXPORT_SYMBOL(vmf_insert_pfn);
 
-static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite)
+static bool vm_mixed_ok(struct vm_area_struct *vma, unsigned long pfn,
+                       bool mkwrite)
 {
-       if (unlikely(is_zero_pfn(pfn_t_to_pfn(pfn))) &&
+       if (unlikely(is_zero_pfn(pfn)) &&
            (mkwrite || !vm_mixed_zeropage_allowed(vma)))
                return false;
        /* these checks mirror the abort conditions in vm_normal_page */
        if (vma->vm_flags & VM_MIXEDMAP)
                return true;
-       if (is_zero_pfn(pfn_t_to_pfn(pfn)))
+       if (is_zero_pfn(pfn))
                return true;
        return false;
 }
 
 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
-               unsigned long addr, pfn_t pfn, bool mkwrite)
+               unsigned long addr, unsigned long pfn, bool mkwrite)
 {
        pgprot_t pgprot = vma->vm_page_prot;
        int err;
@@ -2598,9 +2597,9 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return VM_FAULT_SIGBUS;
 
-       pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot);
+       pfnmap_setup_cachemode_pfn(pfn, &pgprot);
 
-       if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
+       if (!pfn_modify_allowed(pfn, pgprot))
                return VM_FAULT_SIGBUS;
 
        /*
@@ -2610,7 +2609,7 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
         * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
         * without pte special, it would there be refcounted as a normal page.
         */
-       if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pfn_t_valid(pfn)) {
+       if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pfn_valid(pfn)) {
                struct page *page;
 
                /*
@@ -2618,7 +2617,7 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
                 * regardless of whether the caller specified flags that
                 * result in pfn_t_has_page() == false.
                 */
-               page = pfn_to_page(pfn_t_to_pfn(pfn));
+               page = pfn_to_page(pfn);
                err = insert_page(vma, addr, page, pgprot, mkwrite);
        } else {
                return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
@@ -2653,7 +2652,7 @@ vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
 EXPORT_SYMBOL_GPL(vmf_insert_page_mkwrite);
 
 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
-               pfn_t pfn)
+               unsigned long pfn)
 {
        return __vm_insert_mixed(vma, addr, pfn, false);
 }
@@ -2665,7 +2664,7 @@ EXPORT_SYMBOL(vmf_insert_mixed);
  *  the same entry was actually inserted.
  */
 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
-               unsigned long addr, pfn_t pfn)
+               unsigned long addr, unsigned long pfn)
 {
        return __vm_insert_mixed(vma, addr, pfn, true);
 }
index c17e0a69caceededc723797d29ce6ce0e2323ecc..044a4550671a924c30782fdc48197b7c4f244988 100644 (file)
@@ -5,7 +5,6 @@
 #include <linux/kasan.h>
 #include <linux/memory_hotplug.h>
 #include <linux/memremap.h>
-#include <linux/pfn_t.h>
 #include <linux/swap.h>
 #include <linux/mm.h>
 #include <linux/mmzone.h>
index 8cf0f9c9599d36948d10082834d1094baafa8820..ea8c74d996592371daf473a9cb6d6a05812b4208 100644 (file)
@@ -35,7 +35,6 @@
 #include <linux/compat.h>
 #include <linux/hugetlb.h>
 #include <linux/gfp.h>
-#include <linux/pfn_t.h>
 #include <linux/page_idle.h>
 #include <linux/page_owner.h>
 #include <linux/sched/mm.h>
index c1ec099a3b1d0515f552d16a7934618f4fdcd944..05e763aab104c98483927c95e0d264fab37a40b1 100644 (file)
@@ -10,7 +10,7 @@
 
 long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
                long nr_pages, enum dax_access_mode mode, void **kaddr,
-               pfn_t *pfn)
+               unsigned long *pfn)
 {
        resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
 
@@ -29,7 +29,7 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
                        *kaddr = pmem->virt_addr + offset;
                page = vmalloc_to_page(pmem->virt_addr + offset);
                if (pfn)
-                       *pfn = page_to_pfn_t(page);
+                       *pfn = page_to_pfn(page);
                pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
                                __func__, pmem, pgoff, page_to_pfn(page));
 
@@ -39,7 +39,7 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
        if (kaddr)
                *kaddr = pmem->virt_addr + offset;
        if (pfn)
-               *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+               *pfn = PHYS_PFN(pmem->phys_addr + offset);
 
        /*
         * If badblocks are present, limit known good range to the
index ddceb04b4a9a78febacfc4a522be0dd8d4d49afe..f7e7bfe9bb85d6fd85a75ecb63cc17445f797c5c 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/ioport.h>
 #include <linux/module.h>
 #include <linux/types.h>
-#include <linux/pfn_t.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
 #include <linux/mm.h>
@@ -135,12 +134,6 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
 }
 EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
 
-pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
-{
-        return phys_to_pfn_t(addr, flags);
-}
-EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
-
 void *__wrap_memremap(resource_size_t offset, size_t size,
                unsigned long flags)
 {
index b00583d1eace93c1aa4913ffa3550b0c21873e33..b9047fb8ea4a2913862de7121337363dfd7e38ee 100644 (file)
@@ -212,7 +212,6 @@ void __iomem *__wrap_devm_ioremap(struct device *dev,
 void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
                size_t size, unsigned long flags);
 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
-pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags);
 void *__wrap_memremap(resource_size_t offset, size_t size,
                unsigned long flags);
 void __wrap_devm_memunmap(struct device *dev, void *addr);