]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: memory: rename pages_per_huge_page to nr_pages
authorKefeng Wang <wangkefeng.wang@huawei.com>
Tue, 18 Jun 2024 09:12:42 +0000 (17:12 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 4 Jul 2024 02:30:20 +0000 (19:30 -0700)
Since the callers are converted to use nr_pages naming, use it inside too.

Link: https://lkml.kernel.org/r/20240618091242.2140164-5-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 8ebac8a70ab502df479274976a0666ab21221ae3..9e87932c30ec77f73180a442c77e090257c39b09 100644 (file)
@@ -6387,23 +6387,23 @@ EXPORT_SYMBOL(__might_fault);
  * cache lines hot.
  */
 static inline int process_huge_page(
-       unsigned long addr_hint, unsigned int pages_per_huge_page,
+       unsigned long addr_hint, unsigned int nr_pages,
        int (*process_subpage)(unsigned long addr, int idx, void *arg),
        void *arg)
 {
        int i, n, base, l, ret;
        unsigned long addr = addr_hint &
-               ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
+               ~(((unsigned long)nr_pages << PAGE_SHIFT) - 1);
 
        /* Process target subpage last to keep its cache lines hot */
        might_sleep();
        n = (addr_hint - addr) / PAGE_SIZE;
-       if (2 * n <= pages_per_huge_page) {
+       if (2 * n <= nr_pages) {
                /* If target subpage in first half of huge page */
                base = 0;
                l = n;
                /* Process subpages at the end of huge page */
-               for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
+               for (i = nr_pages - 1; i >= 2 * n; i--) {
                        cond_resched();
                        ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
                        if (ret)
@@ -6411,8 +6411,8 @@ static inline int process_huge_page(
                }
        } else {
                /* If target subpage in second half of huge page */
-               base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
-               l = pages_per_huge_page - n;
+               base = nr_pages - 2 * (nr_pages - n);
+               l = nr_pages - n;
                /* Process subpages at the begin of huge page */
                for (i = 0; i < base; i++) {
                        cond_resched();
@@ -6442,12 +6442,12 @@ static inline int process_huge_page(
 }
 
 static void clear_gigantic_page(struct folio *folio, unsigned long addr,
-                               unsigned int pages_per_huge_page)
+                               unsigned int nr_pages)
 {
        int i;
 
        might_sleep();
-       for (i = 0; i < pages_per_huge_page; i++) {
+       for (i = 0; i < nr_pages; i++) {
                cond_resched();
                clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
        }
@@ -6477,15 +6477,15 @@ void folio_zero_user(struct folio *folio, unsigned long addr_hint)
 }
 
 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
-                                    unsigned long addr,
-                                    struct vm_area_struct *vma,
-                                    unsigned int pages_per_huge_page)
+                                  unsigned long addr,
+                                  struct vm_area_struct *vma,
+                                  unsigned int nr_pages)
 {
        int i;
        struct page *dst_page;
        struct page *src_page;
 
-       for (i = 0; i < pages_per_huge_page; i++) {
+       for (i = 0; i < nr_pages; i++) {
                dst_page = folio_page(dst, i);
                src_page = folio_page(src, i);