]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: provide mm_struct and address to huge_ptep_get()
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Tue, 2 Jul 2024 13:51:20 +0000 (15:51 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Jul 2024 22:52:15 +0000 (15:52 -0700)
On powerpc 8xx huge_ptep_get() will need to know whether the given ptep is
a PTE entry or a PMD entry.  This cannot be known with the PMD entry
itself because there is no easy way to know it from the content of the
entry.

So huge_ptep_get() will need to know either the size of the page or get
the pmd.

In order to be consistent with huge_ptep_get_and_clear(), give mm and
address to huge_ptep_get().

Link: https://lkml.kernel.org/r/cc00c70dd384298796a4e1b25d6c4eb306d3af85.1719928057.git.christophe.leroy@csgroup.eu
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
21 files changed:
arch/arm/include/asm/hugetlb-3level.h
arch/arm64/include/asm/hugetlb.h
arch/arm64/mm/hugetlbpage.c
arch/riscv/include/asm/hugetlb.h
arch/riscv/mm/hugetlbpage.c
arch/s390/include/asm/hugetlb.h
arch/s390/mm/hugetlbpage.c
fs/hugetlbfs/inode.c
fs/proc/task_mmu.c
fs/userfaultfd.c
include/asm-generic/hugetlb.h
include/linux/swapops.h
mm/damon/vaddr.c
mm/gup.c
mm/hmm.c
mm/hugetlb.c
mm/memory-failure.c
mm/mempolicy.c
mm/migrate.c
mm/mincore.c
mm/userfaultfd.c

index a30be550579399cbec2d02a51a0cb726b8fce044..87d48e2d90ad9cf37f87bfd5fc3663fea7118b78 100644 (file)
 
 /*
  * If our huge pte is non-zero then mark the valid bit.
- * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero
+ * This allows pte_present(huge_ptep_get(mm,addr,ptep)) to return true for non-zero
  * ptes.
  * (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes).
  */
 #define __HAVE_ARCH_HUGE_PTEP_GET
-static inline pte_t huge_ptep_get(pte_t *ptep)
+static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
        pte_t retval = *ptep;
        if (pte_val(retval))
index 3954cbd2ff56bc56d086a1d502409bb2e148ebe8..293f880865e8d0a27b4251fe50fb0784240ebbcf 100644 (file)
@@ -46,7 +46,7 @@ extern pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
 extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
                           pte_t *ptep, unsigned long sz);
 #define __HAVE_ARCH_HUGE_PTEP_GET
-extern pte_t huge_ptep_get(pte_t *ptep);
+extern pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
 
 void __init arm64_hugetlb_cma_reserve(void);
 
index 3f09ac73cce3b2213af39fcb83ad81e34b7a5985..5f1e2103888b76e50d589ffdfb70215db60f9a10 100644 (file)
@@ -127,7 +127,7 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
        return contig_ptes;
 }
 
-pte_t huge_ptep_get(pte_t *ptep)
+pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
        int ncontig, i;
        size_t pgsize;
index b1ce97a9dbfce7f7d24d5f31a6b07cb263476852..faf3624d80577c68cef1c3e5a6fd3b4ab615088b 100644 (file)
@@ -44,7 +44,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
                               pte_t pte, int dirty);
 
 #define __HAVE_ARCH_HUGE_PTEP_GET
-pte_t huge_ptep_get(pte_t *ptep);
+pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
 
 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
 #define arch_make_huge_pte arch_make_huge_pte
index 0ebd968b33c9979d5627ec54a506293428bc7708..42314f0939220a0fce9ed77fb7ce113ac163b997 100644 (file)
@@ -3,7 +3,7 @@
 #include <linux/err.h>
 
 #ifdef CONFIG_RISCV_ISA_SVNAPOT
-pte_t huge_ptep_get(pte_t *ptep)
+pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
        unsigned long pte_num;
        int i;
index ce5f4fe8be4dce87b46918585d84f70a46046352..cf1b5d6fb1a6295076d148b1597dd31f0666aaf9 100644 (file)
@@ -19,7 +19,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, pte_t pte, unsigned long sz);
 void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, pte_t pte);
-pte_t huge_ptep_get(pte_t *ptep);
+pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
                              unsigned long addr, pte_t *ptep);
 
@@ -64,7 +64,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
                                             unsigned long addr, pte_t *ptep,
                                             pte_t pte, int dirty)
 {
-       int changed = !pte_same(huge_ptep_get(ptep), pte);
+       int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
        if (changed) {
                huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
                __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
index 2675aab4acc7008d830fdf6938554e8d00415374..1be481672f4ab46dc865ff771c6599a85a70beb0 100644 (file)
@@ -169,7 +169,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
        __set_huge_pte_at(mm, addr, ptep, pte);
 }
 
-pte_t huge_ptep_get(pte_t *ptep)
+pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
        return __rste_to_pte(pte_val(*ptep));
 }
@@ -177,7 +177,7 @@ pte_t huge_ptep_get(pte_t *ptep)
 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
                              unsigned long addr, pte_t *ptep)
 {
-       pte_t pte = huge_ptep_get(ptep);
+       pte_t pte = huge_ptep_get(mm, addr, ptep);
        pmd_t *pmdp = (pmd_t *) ptep;
        pud_t *pudp = (pud_t *) ptep;
 
index ecad73a4f71350d7b6b4f634c3f048780933d4ca..a84832bd06c257256e8bcb2b541af604210a6a75 100644 (file)
@@ -422,7 +422,7 @@ static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
        if (!ptep)
                return false;
 
-       pte = huge_ptep_get(ptep);
+       pte = huge_ptep_get(vma->vm_mm, addr, ptep);
        if (huge_pte_none(pte) || !pte_present(pte))
                return false;
 
index 728693ed00e648d76e8d866b569fd9cf66f53792..775a2e8d600cde22b4d5e806f987c7da6eee861a 100644 (file)
@@ -1013,7 +1013,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
 {
        struct mem_size_stats *mss = walk->private;
        struct vm_area_struct *vma = walk->vma;
-       pte_t ptent = huge_ptep_get(pte);
+       pte_t ptent = huge_ptep_get(walk->mm, addr, pte);
        struct folio *folio = NULL;
        bool present = false;
 
@@ -1878,7 +1878,7 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
        if (vma->vm_flags & VM_SOFTDIRTY)
                flags |= PM_SOFT_DIRTY;
 
-       pte = huge_ptep_get(ptep);
+       pte = huge_ptep_get(walk->mm, addr, ptep);
        if (pte_present(pte)) {
                struct folio *folio = page_folio(pte_page(pte));
 
@@ -2567,7 +2567,7 @@ static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
        if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
                /* Go the short route when not write-protecting pages. */
 
-               pte = huge_ptep_get(ptep);
+               pte = huge_ptep_get(walk->mm, start, ptep);
                categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
 
                if (!pagemap_scan_is_interesting_page(categories, p))
@@ -2579,7 +2579,7 @@ static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
        i_mmap_lock_write(vma->vm_file->f_mapping);
        ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
 
-       pte = huge_ptep_get(ptep);
+       pte = huge_ptep_get(walk->mm, start, ptep);
        categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
 
        if (!pagemap_scan_is_interesting_page(categories, p))
@@ -2975,7 +2975,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
                unsigned long addr, unsigned long end, struct mm_walk *walk)
 {
-       pte_t huge_pte = huge_ptep_get(pte);
+       pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte);
        struct numa_maps *md;
        struct page *page;
 
index 17e409ceaa3366687d92080abe4a055e5d178482..27a3e9285fbf68a658dcc322880ce6dab0bcd3af 100644 (file)
@@ -257,7 +257,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
                goto out;
 
        ret = false;
-       pte = huge_ptep_get(ptep);
+       pte = huge_ptep_get(vma->vm_mm, vmf->address, ptep);
 
        /*
         * Lockless access: we're in a wait_event so it's ok if it
index 6dcf4d576970c4b43ce8be9e0373bc39d10e3518..594d5905f6151281d17edd1e7cdd5b13deddc74c 100644 (file)
@@ -144,7 +144,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 #endif
 
 #ifndef __HAVE_ARCH_HUGE_PTEP_GET
-static inline pte_t huge_ptep_get(pte_t *ptep)
+static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
        return ptep_get(ptep);
 }
index a5c560a2f8c25867e8e3e53588d551ea3b356feb..cb468e418ea11444ad0fe4be3b551787ba26947f 100644 (file)
@@ -334,7 +334,7 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
 
 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
                                        unsigned long address);
-extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
+extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte);
 #else  /* CONFIG_MIGRATION */
 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
 {
@@ -359,7 +359,7 @@ static inline int is_migration_entry(swp_entry_t swp)
 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
                                        unsigned long address) { }
 static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
-                                       pte_t *pte) { }
+                                            unsigned long addr, pte_t *pte) { }
 static inline int is_writable_migration_entry(swp_entry_t entry)
 {
        return 0;
index 381559e4a1faba9e2e771d92303dc0503cd328fa..58829baf8b5d9efbfc059b28db9db8336dcb5017 100644 (file)
@@ -339,7 +339,7 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
                                struct vm_area_struct *vma, unsigned long addr)
 {
        bool referenced = false;
-       pte_t entry = huge_ptep_get(pte);
+       pte_t entry = huge_ptep_get(mm, addr, pte);
        struct folio *folio = pfn_folio(pte_pfn(entry));
        unsigned long psize = huge_page_size(hstate_vma(vma));
 
@@ -373,7 +373,7 @@ static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
        pte_t entry;
 
        ptl = huge_pte_lock(h, walk->mm, pte);
-       entry = huge_ptep_get(pte);
+       entry = huge_ptep_get(walk->mm, addr, pte);
        if (!pte_present(entry))
                goto out;
 
@@ -509,7 +509,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
        pte_t entry;
 
        ptl = huge_pte_lock(h, walk->mm, pte);
-       entry = huge_ptep_get(pte);
+       entry = huge_ptep_get(walk->mm, addr, pte);
        if (!pte_present(entry))
                goto out;
 
index 85d45ec57f7c10dfa23a91edf4369cd14aecbcff..cdb81db7e366fef3d3be35a3eb9c2c3ca2fcbde5 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -604,7 +604,7 @@ static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz
        if (pte_end < end)
                end = pte_end;
 
-       pte = huge_ptep_get(ptep);
+       pte = huge_ptep_get(vma->vm_mm, addr, ptep);
 
        if (!pte_access_permitted(pte, flags & FOLL_WRITE))
                return 0;
index 93aebd9cc130ea4b71c94151db52739ba75d807b..7e0229ae4a5a6bfc06bc8fde06abf31f6198093c 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -480,7 +480,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
        pte_t entry;
 
        ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
-       entry = huge_ptep_get(pte);
+       entry = huge_ptep_get(walk->mm, addr, pte);
 
        i = (start - range->start) >> PAGE_SHIFT;
        pfn_req_flags = range->hmm_pfns[i];
index f88ff03339456871b77779fb9c44f0a151f602fa..fd2050934b13351dec7379d7a03b1e378c5d7d55 100644 (file)
@@ -5287,7 +5287,7 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
 {
        pte_t entry;
 
-       entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
+       entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep)));
        if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
                update_mmu_cache(vma, address, ptep);
 }
@@ -5395,7 +5395,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                dst_ptl = huge_pte_lock(h, dst, dst_pte);
                src_ptl = huge_pte_lockptr(h, src, src_pte);
                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
-               entry = huge_ptep_get(src_pte);
+               entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
 again:
                if (huge_pte_none(entry)) {
                        /*
@@ -5433,7 +5433,7 @@ again:
                                set_huge_pte_at(dst, addr, dst_pte,
                                                make_pte_marker(marker), sz);
                } else {
-                       entry = huge_ptep_get(src_pte);
+                       entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
                        pte_folio = page_folio(pte_page(entry));
                        folio_get(pte_folio);
 
@@ -5474,7 +5474,7 @@ again:
                                dst_ptl = huge_pte_lock(h, dst, dst_pte);
                                src_ptl = huge_pte_lockptr(h, src, src_pte);
                                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
-                               entry = huge_ptep_get(src_pte);
+                               entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
                                if (!pte_same(src_pte_old, entry)) {
                                        restore_reserve_on_error(h, dst_vma, addr,
                                                                new_folio);
@@ -5584,7 +5584,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
                        new_addr |= last_addr_mask;
                        continue;
                }
-               if (huge_pte_none(huge_ptep_get(src_pte)))
+               if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte)))
                        continue;
 
                if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
@@ -5657,7 +5657,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                        continue;
                }
 
-               pte = huge_ptep_get(ptep);
+               pte = huge_ptep_get(mm, address, ptep);
                if (huge_pte_none(pte)) {
                        spin_unlock(ptl);
                        continue;
@@ -5906,7 +5906,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
        struct vm_area_struct *vma = vmf->vma;
        struct mm_struct *mm = vma->vm_mm;
        const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
-       pte_t pte = huge_ptep_get(vmf->pte);
+       pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte);
        struct hstate *h = hstate_vma(vma);
        struct folio *old_folio;
        struct folio *new_folio;
@@ -6027,7 +6027,7 @@ retry_avoidcopy:
                        vmf->pte = hugetlb_walk(vma, vmf->address,
                                        huge_page_size(h));
                        if (likely(vmf->pte &&
-                                  pte_same(huge_ptep_get(vmf->pte), pte)))
+                                  pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte)))
                                goto retry_avoidcopy;
                        /*
                         * race occurs while re-acquiring page table
@@ -6065,7 +6065,7 @@ retry_avoidcopy:
         */
        spin_lock(vmf->ptl);
        vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
-       if (likely(vmf->pte && pte_same(huge_ptep_get(vmf->pte), pte))) {
+       if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) {
                pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
 
                /* Break COW or unshare */
@@ -6166,14 +6166,14 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
  * Recheck pte with pgtable lock.  Returns true if pte didn't change, or
  * false if pte changed or is changing.
  */
-static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
+static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr,
                               pte_t *ptep, pte_t old_pte)
 {
        spinlock_t *ptl;
        bool same;
 
        ptl = huge_pte_lock(h, mm, ptep);
-       same = pte_same(huge_ptep_get(ptep), old_pte);
+       same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte);
        spin_unlock(ptl);
 
        return same;
@@ -6234,7 +6234,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
                         * never happen on the page after UFFDIO_COPY has
                         * correctly installed the page and returned.
                         */
-                       if (!hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte)) {
+                       if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
                                ret = 0;
                                goto out;
                        }
@@ -6263,7 +6263,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
                         * here.  Before returning error, get ptl and make
                         * sure there really is no pte entry.
                         */
-                       if (hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte))
+                       if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte))
                                ret = vmf_error(PTR_ERR(folio));
                        else
                                ret = 0;
@@ -6312,7 +6312,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
                        folio_unlock(folio);
                        folio_put(folio);
                        /* See comment in userfaultfd_missing() block above */
-                       if (!hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte)) {
+                       if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
                                ret = 0;
                                goto out;
                        }
@@ -6339,7 +6339,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
        vmf->ptl = huge_pte_lock(h, mm, vmf->pte);
        ret = 0;
        /* If pte changed from under us, retry */
-       if (!pte_same(huge_ptep_get(vmf->pte), vmf->orig_pte))
+       if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte))
                goto backout;
 
        if (anon_rmap)
@@ -6460,7 +6460,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                return VM_FAULT_OOM;
        }
 
-       vmf.orig_pte = huge_ptep_get(vmf.pte);
+       vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte);
        if (huge_pte_none_mostly(vmf.orig_pte)) {
                if (is_pte_marker(vmf.orig_pte)) {
                        pte_marker marker =
@@ -6501,7 +6501,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                         * be released there.
                         */
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
-                       migration_entry_wait_huge(vma, vmf.pte);
+                       migration_entry_wait_huge(vma, vmf.address, vmf.pte);
                        return 0;
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(vmf.orig_pte)))
                        ret = VM_FAULT_HWPOISON_LARGE |
@@ -6534,11 +6534,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        vmf.ptl = huge_pte_lock(h, mm, vmf.pte);
 
        /* Check for a racing update before calling hugetlb_wp() */
-       if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(vmf.pte))))
+       if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte))))
                goto out_ptl;
 
        /* Handle userfault-wp first, before trying to lock more pages */
-       if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(vmf.pte)) &&
+       if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) &&
            (flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) {
                if (!userfaultfd_wp_async(vma)) {
                        spin_unlock(vmf.ptl);
@@ -6666,7 +6666,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                ptl = huge_pte_lock(h, dst_mm, dst_pte);
 
                /* Don't overwrite any existing PTEs (even markers) */
-               if (!huge_pte_none(huge_ptep_get(dst_pte))) {
+               if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
                        spin_unlock(ptl);
                        return -EEXIST;
                }
@@ -6802,7 +6802,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
         * page backing it, then access the page.
         */
        ret = -EEXIST;
-       if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
+       if (!huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte)))
                goto out_release_unlock;
 
        if (folio_in_pagecache)
@@ -6923,7 +6923,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
                        address |= last_addr_mask;
                        continue;
                }
-               pte = huge_ptep_get(ptep);
+               pte = huge_ptep_get(mm, address, ptep);
                if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
                        /* Nothing to do. */
                } else if (unlikely(is_hugetlb_entry_migration(pte))) {
index 0276cc299b03287d29d83ce79aeefedc7aebc870..16f8651436d5e71e82c4103c41777df94fe195a0 100644 (file)
@@ -835,7 +835,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
                            struct mm_walk *walk)
 {
        struct hwpoison_walk *hwp = walk->private;
-       pte_t pte = huge_ptep_get(ptep);
+       pte_t pte = huge_ptep_get(walk->mm, addr, ptep);
        struct hstate *h = hstate_vma(walk->vma);
 
        return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
index f73acb01ad45d077744fff063b6108acd2d6ef85..f8703feb68b7596319e4bce453af2f5ff113267a 100644 (file)
@@ -624,7 +624,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
        pte_t entry;
 
        ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
-       entry = huge_ptep_get(pte);
+       entry = huge_ptep_get(walk->mm, addr, pte);
        if (!pte_present(entry)) {
                if (unlikely(is_hugetlb_entry_migration(entry)))
                        qp->nr_failed++;
index abb3aa45bed97876ce6cbae950860808d98f9224..ff512c43fecb85b0ff84fb6be3f8263565ae0fb8 100644 (file)
@@ -338,14 +338,14 @@ out:
  *
  * This function will release the vma lock before returning.
  */
-void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
+void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
 {
        spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
        pte_t pte;
 
        hugetlb_vma_assert_locked(vma);
        spin_lock(ptl);
-       pte = huge_ptep_get(ptep);
+       pte = huge_ptep_get(vma->vm_mm, addr, ptep);
 
        if (unlikely(!is_hugetlb_entry_migration(pte))) {
                spin_unlock(ptl);
index e31cf1bde614122f9c474a878aac0e1d01b54ff0..d6bd19e520fcfb9389e6825eefd72d0b51ad2969 100644 (file)
@@ -33,7 +33,7 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
         * Hugepages under user process are always in RAM and never
         * swapped out, but theoretically it needs to be checked.
         */
-       present = pte && !huge_pte_none_mostly(huge_ptep_get(pte));
+       present = pte && !huge_pte_none_mostly(huge_ptep_get(walk->mm, addr, pte));
        for (; addr != end; vec++, addr += PAGE_SIZE)
                *vec = present;
        walk->private = vec;
index 8dedaec0048634bc7deafe3402263c29d6d1b13d..e54e5c8907fa227936d790de8b057ca394a6d1a5 100644 (file)
@@ -587,7 +587,7 @@ retry:
                }
 
                if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
-                   !huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
+                   !huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
                        err = -EEXIST;
                        hugetlb_vma_unlock_read(dst_vma);
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);