]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/hugetlb: add page_mask for hugetlb_follow_page_mask()
authorPeter Xu <peterx@redhat.com>
Wed, 28 Jun 2023 21:53:05 +0000 (17:53 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 18 Aug 2023 17:12:03 +0000 (10:12 -0700)
follow_page() doesn't need it, but we'll start to need it when unifying
gup for hugetlb.

Link: https://lkml.kernel.org/r/20230628215310.73782-4-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kirill A . Shutemov <kirill@shutemov.name>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/gup.c
mm/hugetlb.c

index ca3c8e10f24a0840774620533f43c121fca8ec20..9f282f370d964f1423cb0d8f4b1ff8b3079bf234 100644 (file)
@@ -131,7 +131,8 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
                            struct vm_area_struct *, struct vm_area_struct *);
 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
-                               unsigned long address, unsigned int flags);
+                                     unsigned long address, unsigned int flags,
+                                     unsigned int *page_mask);
 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
                         struct page **, unsigned long *, unsigned long *,
                         long, unsigned int, int *);
@@ -297,8 +298,9 @@ static inline void adjust_range_if_pmd_sharing_possible(
 {
 }
 
-static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
-                               unsigned long address, unsigned int flags)
+static inline struct page *hugetlb_follow_page_mask(
+    struct vm_area_struct *vma, unsigned long address, unsigned int flags,
+    unsigned int *page_mask)
 {
        BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
 }
index 9c62cfa7e486e09163108be9adabcb2ac53fbc1a..818d98b34decc7b81f0320237d4a0e02f8ed0971 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -824,7 +824,8 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
         * Ordinary GUP uses follow_hugetlb_page for hugetlb processing.
         */
        if (is_vm_hugetlb_page(vma))
-               return hugetlb_follow_page_mask(vma, address, flags);
+               return hugetlb_follow_page_mask(vma, address, flags,
+                                               &ctx->page_mask);
 
        pgd = pgd_offset(mm, address);
 
index cc87a51ce71a22596584fa1f51a93eb371e9600b..ab52214b5a75e97caaabe8278e8ae3725415f197 100644 (file)
@@ -6454,7 +6454,8 @@ static inline bool __follow_hugetlb_must_fault(struct vm_area_struct *vma,
 }
 
 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
-                               unsigned long address, unsigned int flags)
+                                     unsigned long address, unsigned int flags,
+                                     unsigned int *page_mask)
 {
        struct hstate *h = hstate_vma(vma);
        struct mm_struct *mm = vma->vm_mm;
@@ -6504,6 +6505,8 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
                        page = ERR_PTR(ret);
                        goto out;
                }
+
+               *page_mask = (1U << huge_page_order(h)) - 1;
        }
 out:
        spin_unlock(ptl);