]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/huge_memory: remove pXd_devmap usage from insert_pXd_pfn()
authorAlistair Popple <apopple@nvidia.com>
Thu, 19 Jun 2025 08:57:58 +0000 (18:57 +1000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 10 Jul 2025 05:42:17 +0000 (22:42 -0700)
Nothing uses PFN_DEV anymore so no need to create devmap pXd's when
mapping a PFN.  Instead special mappings will be created which ensures
vm_normal_page_pXd() will not return pages which don't have an associated
page.  This could change behaviour slightly on architectures where
pXd_devmap() does not imply pXd_special() as the normal page checks would
have fallen through to checking VM_PFNMAP/MIXEDMAP instead, which in
theory at least could have returned a page.

However vm_normal_page_pXd() should never have been returning pages for
pXd_devmap() entries anyway, so anything relying on that would have been a
bug.

Link: https://lkml.kernel.org/r/cd8658f9ff10afcfffd8b145a39d98bf1c595ffa.1750323463.git-series.apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Björn Töpel <bjorn@kernel.org>
Cc: Björn Töpel <bjorn@rivosinc.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chunyan Zhang <zhang.lyra@gmail.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Inki Dae <m.szyprowski@samsung.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Groves <john@groves.net>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 7434d177b97c15f1aba5c05777b419cabc33db2b..54b5c37d9515128abbf6e12d36c6250191198fa5 100644 (file)
@@ -1415,11 +1415,7 @@ static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
                add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR);
        } else {
                entry = pmd_mkhuge(pfn_t_pmd(fop.pfn, prot));
-
-               if (pfn_t_devmap(fop.pfn))
-                       entry = pmd_mkdevmap(entry);
-               else
-                       entry = pmd_mkspecial(entry);
+               entry = pmd_mkspecial(entry);
        }
        if (write) {
                entry = pmd_mkyoung(pmd_mkdirty(entry));
@@ -1565,11 +1561,7 @@ static void insert_pud(struct vm_area_struct *vma, unsigned long addr,
                add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PUD_NR);
        } else {
                entry = pud_mkhuge(pfn_t_pud(fop.pfn, prot));
-
-               if (pfn_t_devmap(fop.pfn))
-                       entry = pud_mkdevmap(entry);
-               else
-                       entry = pud_mkspecial(entry);
+               entry = pud_mkspecial(entry);
        }
        if (write) {
                entry = pud_mkyoung(pud_mkdirty(entry));