]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: remove redundant pXd_devmap calls
authorAlistair Popple <apopple@nvidia.com>
Thu, 19 Jun 2025 08:57:59 +0000 (18:57 +1000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 10 Jul 2025 05:42:17 +0000 (22:42 -0700)
DAX was the only thing that created pmd_devmap and pud_devmap entries
however it no longer does as DAX pages are now refcounted normally and
pXd_trans_huge() returns true for those.  Therefore checking both
pXd_devmap and pXd_trans_huge() is redundant and the former can be removed
without changing behaviour as it will always be false.

Link: https://lkml.kernel.org/r/d58f089dc16b7feb7c6728164f37dea65d64a0d3.1750323463.git-series.apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Björn Töpel <bjorn@kernel.org>
Cc: Björn Töpel <bjorn@rivosinc.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chunyan Zhang <zhang.lyra@gmail.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Inki Dae <m.szyprowski@samsung.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Groves <john@groves.net>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
15 files changed:
fs/dax.c
include/linux/huge_mm.h
include/linux/pgtable.h
mm/hmm.c
mm/huge_memory.c
mm/mapping_dirty_helpers.c
mm/memory.c
mm/migrate_device.c
mm/mprotect.c
mm/mremap.c
mm/page_vma_mapped.c
mm/pagewalk.c
mm/pgtable-generic.c
mm/userfaultfd.c
mm/vmscan.c

index ea0c35794bf989c90110ca032966732a00dd0022..7d4ecb9d23af3deb3cfe82705f4460c9d88dde60 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1937,7 +1937,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
         * the PTE we need to set up.  If so just return and the fault will be
         * retried.
         */
-       if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
+       if (pmd_trans_huge(*vmf->pmd)) {
                ret = VM_FAULT_NOPAGE;
                goto unlock_entry;
        }
@@ -2060,8 +2060,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
         * the PMD we need to set up.  If so just return and the fault will be
         * retried.
         */
-       if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
-                       !pmd_devmap(*vmf->pmd)) {
+       if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd)) {
                ret = 0;
                goto unlock_entry;
        }
index a2df2308cb2c6f425cc1f89f66fb047daf282c20..26607f2c65fb44c905cc1082165f8748557d61f8 100644 (file)
@@ -400,8 +400,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 #define split_huge_pmd(__vma, __pmd, __address)                                \
        do {                                                            \
                pmd_t *____pmd = (__pmd);                               \
-               if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)   \
-                                       || pmd_devmap(*____pmd))        \
+               if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd))  \
                        __split_huge_pmd(__vma, __pmd, __address,       \
                                         false);                        \
        }  while (0)
@@ -426,8 +425,7 @@ change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
 #define split_huge_pud(__vma, __pud, __address)                                \
        do {                                                            \
                pud_t *____pud = (__pud);                               \
-               if (pud_trans_huge(*____pud)                            \
-                                       || pud_devmap(*____pud))        \
+               if (pud_trans_huge(*____pud))                           \
                        __split_huge_pud(__vma, __pud, __address);      \
        }  while (0)
 
@@ -450,7 +448,7 @@ static inline int is_swap_pmd(pmd_t pmd)
 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
                struct vm_area_struct *vma)
 {
-       if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
+       if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd))
                return __pmd_trans_huge_lock(pmd, vma);
        else
                return NULL;
@@ -458,7 +456,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
                struct vm_area_struct *vma)
 {
-       if (pud_trans_huge(*pud) || pud_devmap(*pud))
+       if (pud_trans_huge(*pud))
                return __pud_trans_huge_lock(pud, vma);
        else
                return NULL;
index d05e35a0facffcb28d3c86bbf34db9db3e2c8bef..ffcd966cf2d45910cf1f6847416a8ecfbd054e2e 100644 (file)
@@ -1672,7 +1672,7 @@ static inline int pud_trans_unstable(pud_t *pud)
        defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
        pud_t pudval = READ_ONCE(*pud);
 
-       if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
+       if (pud_none(pudval) || pud_trans_huge(pudval))
                return 1;
        if (unlikely(pud_bad(pudval))) {
                pud_clear_bad(pud);
index 14914da98416d17650be0c198d26dd9e2171f255..62d3082dc55c42270d605bfe79cac10d738e2a83 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -360,7 +360,7 @@ again:
                return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
        }
 
-       if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
+       if (pmd_trans_huge(pmd)) {
                /*
                 * No need to take pmd_lock here, even if some other thread
                 * is splitting the huge pmd we will get that event through
@@ -371,7 +371,7 @@ again:
                 * values.
                 */
                pmd = pmdp_get_lockless(pmdp);
-               if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
+               if (!pmd_trans_huge(pmd))
                        goto again;
 
                return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
index 54b5c37d9515128abbf6e12d36c6250191198fa5..cf808b2eea292421dedeb03974ac0a556d58acc3 100644 (file)
@@ -1459,8 +1459,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
         * but we need to be consistent with PTEs and architectures that
         * can't support a 'special' bit.
         */
-       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
-                       !pfn_t_devmap(pfn));
+       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
        BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
                                                (VM_PFNMAP|VM_MIXEDMAP));
        BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
@@ -1596,8 +1595,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
         * but we need to be consistent with PTEs and architectures that
         * can't support a 'special' bit.
         */
-       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
-                       !pfn_t_devmap(pfn));
+       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
        BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
                                                (VM_PFNMAP|VM_MIXEDMAP));
        BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
@@ -1815,7 +1813,7 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 
        ret = -EAGAIN;
        pud = *src_pud;
-       if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
+       if (unlikely(!pud_trans_huge(pud)))
                goto out_unlock;
 
        /*
@@ -2677,8 +2675,7 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
 {
        spinlock_t *ptl;
        ptl = pmd_lock(vma->vm_mm, pmd);
-       if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
-                       pmd_devmap(*pmd)))
+       if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd)))
                return ptl;
        spin_unlock(ptl);
        return NULL;
@@ -2695,7 +2692,7 @@ spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
        spinlock_t *ptl;
 
        ptl = pud_lock(vma->vm_mm, pud);
-       if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
+       if (likely(pud_trans_huge(*pud)))
                return ptl;
        spin_unlock(ptl);
        return NULL;
@@ -2747,7 +2744,7 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
        VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
        VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
        VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
-       VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
+       VM_BUG_ON(!pud_trans_huge(*pud));
 
        count_vm_event(THP_SPLIT_PUD);
 
@@ -2780,7 +2777,7 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
                                (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
        mmu_notifier_invalidate_range_start(&range);
        ptl = pud_lock(vma->vm_mm, pud);
-       if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
+       if (unlikely(!pud_trans_huge(*pud)))
                goto out;
        __split_huge_pud_locked(vma, pud, range.start);
 
@@ -2853,8 +2850,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
        VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
        VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
-       VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
-                               && !pmd_devmap(*pmd));
+       VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd));
 
        count_vm_event(THP_SPLIT_PMD);
 
@@ -3062,8 +3058,7 @@ void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
                           pmd_t *pmd, bool freeze)
 {
        VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
-       if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
-           is_pmd_migration_entry(*pmd))
+       if (pmd_trans_huge(*pmd) || is_pmd_migration_entry(*pmd))
                __split_huge_pmd_locked(vma, pmd, address, freeze);
 }
 
index dc1692ff9e583aff824d4d2ffc104830551ee750..c193de6cb23a1fde1427e40043f9f21aec128ce4 100644 (file)
@@ -129,7 +129,7 @@ static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end,
        pmd_t pmdval = pmdp_get_lockless(pmd);
 
        /* Do not split a huge pmd, present or migrated */
-       if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) {
+       if (pmd_trans_huge(pmdval)) {
                WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval));
                walk->action = ACTION_CONTINUE;
        }
@@ -152,7 +152,7 @@ static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end,
        pud_t pudval = READ_ONCE(*pud);
 
        /* Do not split a huge pud */
-       if (pud_trans_huge(pudval) || pud_devmap(pudval)) {
+       if (pud_trans_huge(pudval)) {
                WARN_ON(pud_write(pudval) || pud_dirty(pudval));
                walk->action = ACTION_CONTINUE;
        }
index 01d51bd95197d5faaf4282a36f0fb64db52d1bc4..150bb62855b184efcd5a03f1ad6acb5a13199d5b 100644 (file)
@@ -675,8 +675,6 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
                }
        }
 
-       if (pmd_devmap(pmd))
-               return NULL;
        if (is_huge_zero_pmd(pmd))
                return NULL;
        if (unlikely(pfn > highest_memmap_pfn))
@@ -1240,8 +1238,7 @@ copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
        src_pmd = pmd_offset(src_pud, addr);
        do {
                next = pmd_addr_end(addr, end);
-               if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
-                       || pmd_devmap(*src_pmd)) {
+               if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)) {
                        int err;
                        VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
                        err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
@@ -1277,7 +1274,7 @@ copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
        src_pud = pud_offset(src_p4d, addr);
        do {
                next = pud_addr_end(addr, end);
-               if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
+               if (pud_trans_huge(*src_pud)) {
                        int err;
 
                        VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
@@ -1791,7 +1788,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
-               if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
+               if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd)) {
                        if (next - addr != HPAGE_PMD_SIZE)
                                __split_huge_pmd(vma, pmd, addr, false);
                        else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
@@ -1833,7 +1830,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
        pud = pud_offset(p4d, addr);
        do {
                next = pud_addr_end(addr, end);
-               if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
+               if (pud_trans_huge(*pud)) {
                        if (next - addr != HPAGE_PUD_SIZE) {
                                mmap_assert_locked(tlb->mm);
                                split_huge_pud(vma, pud, addr);
@@ -6136,7 +6133,7 @@ retry_pud:
                pud_t orig_pud = *vmf.pud;
 
                barrier();
-               if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
+               if (pud_trans_huge(orig_pud)) {
 
                        /*
                         * TODO once we support anonymous PUDs: NUMA case and
@@ -6177,7 +6174,7 @@ retry_pud:
                                pmd_migration_entry_wait(mm, vmf.pmd);
                        return 0;
                }
-               if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
+               if (pmd_trans_huge(vmf.orig_pmd)) {
                        if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
                                return do_huge_pmd_numa_page(&vmf);
 
index 3158afe7eb2308f5b259c618a9452785e524574b..e05e14d6eacdb98daede9a735c00545363cacc7f 100644 (file)
@@ -615,7 +615,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
        pmdp = pmd_alloc(mm, pudp, addr);
        if (!pmdp)
                goto abort;
-       if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
+       if (pmd_trans_huge(*pmdp))
                goto abort;
        if (pte_alloc(mm, pmdp))
                goto abort;
index b873b98ab7052ce7ba81fe8c6c3f82240b5f5ed4..88709c01177bac4c8b9e05e2f9e6b962a6ccef6b 100644 (file)
@@ -376,7 +376,7 @@ again:
                        goto next;
 
                _pmd = pmdp_get_lockless(pmd);
-               if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) {
+               if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd)) {
                        if ((next - addr != HPAGE_PMD_SIZE) ||
                            pgtable_split_needed(vma, cp_flags)) {
                                __split_huge_pmd(vma, pmd, addr, false);
index 7e93d3344828f89f8f318271dc31dadd0558b581..36585041c760d1369cc2fe7428c705490437b7c9 100644 (file)
@@ -820,7 +820,7 @@ unsigned long move_page_tables(struct pagetable_move_control *pmc)
                new_pud = alloc_new_pud(mm, pmc->new_addr);
                if (!new_pud)
                        break;
-               if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
+               if (pud_trans_huge(*old_pud)) {
                        if (extent == HPAGE_PUD_SIZE) {
                                move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud);
                                /* We ignore and continue on error? */
@@ -839,8 +839,7 @@ unsigned long move_page_tables(struct pagetable_move_control *pmc)
                if (!new_pmd)
                        break;
 again:
-               if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
-                   pmd_devmap(*old_pmd)) {
+               if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) {
                        if (extent == HPAGE_PMD_SIZE &&
                            move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd))
                                continue;
index e463c3be934ae3785cac9ef876648fe580be9ce6..e981a1a292d25f303621f3213c69322d2c9e2a59 100644 (file)
@@ -246,8 +246,7 @@ restart:
                 */
                pmde = pmdp_get_lockless(pvmw->pmd);
 
-               if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
-                   (pmd_present(pmde) && pmd_devmap(pmde))) {
+               if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
                        pvmw->ptl = pmd_lock(mm, pvmw->pmd);
                        pmde = *pvmw->pmd;
                        if (!pmd_present(pmde)) {
@@ -262,7 +261,7 @@ restart:
                                        return not_found(pvmw);
                                return true;
                        }
-                       if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
+                       if (likely(pmd_trans_huge(pmde))) {
                                if (pvmw->flags & PVMW_MIGRATION)
                                        return not_found(pvmw);
                                if (!check_pmd(pmd_pfn(pmde), pvmw))
index a214a2b40ab9faf65db97898d1ea359acfdffc68..648038247a8d288be3cb789d085bd5a0efb3e7e3 100644 (file)
@@ -143,8 +143,7 @@ again:
                         * We are ONLY installing, so avoid unnecessarily
                         * splitting a present huge page.
                         */
-                       if (pmd_present(*pmd) &&
-                           (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
+                       if (pmd_present(*pmd) && pmd_trans_huge(*pmd))
                                continue;
                }
 
@@ -210,8 +209,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
                         * We are ONLY installing, so avoid unnecessarily
                         * splitting a present huge page.
                         */
-                       if (pud_present(*pud) &&
-                           (pud_trans_huge(*pud) || pud_devmap(*pud)))
+                       if (pud_present(*pud) && pud_trans_huge(*pud))
                                continue;
                }
 
@@ -908,7 +906,7 @@ struct folio *folio_walk_start(struct folio_walk *fw,
                 * TODO: FW_MIGRATION support for PUD migration entries
                 * once there are relevant users.
                 */
-               if (!pud_present(pud) || pud_devmap(pud) || pud_special(pud)) {
+               if (!pud_present(pud) || pud_special(pud)) {
                        spin_unlock(ptl);
                        goto not_found;
                } else if (!pud_leaf(pud)) {
index 5a882f2b10f903891018ac4fb44a3b084658cea5..567e2d084071e3de09b45a96fc83695a1c9f1b43 100644 (file)
@@ -139,8 +139,7 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
 {
        pmd_t pmd;
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-       VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
-                          !pmd_devmap(*pmdp));
+       VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp));
        pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
        flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return pmd;
@@ -153,7 +152,7 @@ pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
        pud_t pud;
 
        VM_BUG_ON(address & ~HPAGE_PUD_MASK);
-       VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
+       VM_BUG_ON(!pud_trans_huge(*pudp));
        pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
        flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
        return pud;
@@ -293,7 +292,7 @@ pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
                *pmdvalp = pmdval;
        if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval)))
                goto nomap;
-       if (unlikely(pmd_trans_huge(pmdval) || pmd_devmap(pmdval)))
+       if (unlikely(pmd_trans_huge(pmdval)))
                goto nomap;
        if (unlikely(pmd_bad(pmdval))) {
                pmd_clear_bad(pmd);
index dd2a25fafb829d7e6dee199bbff7afaf1533e68b..cbed91b0964013568b24ce12c4e04c5058c4f82f 100644 (file)
@@ -795,8 +795,8 @@ retry:
                 * (This includes the case where the PMD used to be THP and
                 * changed back to none after __pte_alloc().)
                 */
-               if (unlikely(!pmd_present(dst_pmdval) || pmd_trans_huge(dst_pmdval) ||
-                            pmd_devmap(dst_pmdval))) {
+               if (unlikely(!pmd_present(dst_pmdval) ||
+                               pmd_trans_huge(dst_pmdval))) {
                        err = -EEXIST;
                        break;
                }
index 6698fadf5d0472f93c3a9522e699a28a18c9ef92..c86a2495138a83976e2041d6771541645b216d3e 100644 (file)
@@ -3450,9 +3450,6 @@ static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned
        if (!pmd_present(pmd) || is_huge_zero_pmd(pmd))
                return -1;
 
-       if (WARN_ON_ONCE(pmd_devmap(pmd)))
-               return -1;
-
        if (!pmd_young(pmd) && !mm_has_notifiers(vma->vm_mm))
                return -1;