]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/khugepaged: convert alloc_charge_hpage() to use folios
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Fri, 20 Oct 2023 18:33:30 +0000 (11:33 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 25 Oct 2023 23:47:14 +0000 (16:47 -0700)
Also remove count_memcg_page_event now that its last caller no longer uses
it and reword hpage_collapse_alloc_page() to hpage_collapse_alloc_folio().

This removes 1 call to compound_head() and helps convert khugepaged to
use folios throughout.

Link: https://lkml.kernel.org/r/20231020183331.10770-5-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: Rik van Riel <riel@surriel.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/memcontrol.h
mm/khugepaged.c

index b3d2e3e60eed3e91d4590ae57dd24676ad46892c..7bdcf3020d7a3a987672e45141a03547282b508d 100644 (file)
@@ -1087,15 +1087,6 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
        local_irq_restore(flags);
 }
 
-static inline void count_memcg_page_event(struct page *page,
-                                         enum vm_event_item idx)
-{
-       struct mem_cgroup *memcg = page_memcg(page);
-
-       if (memcg)
-               count_memcg_events(memcg, idx, 1);
-}
-
 static inline void count_memcg_folio_events(struct folio *folio,
                enum vm_event_item idx, unsigned long nr)
 {
@@ -1598,11 +1589,6 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
 {
 }
 
-static inline void count_memcg_page_event(struct page *page,
-                                         int idx)
-{
-}
-
 static inline void count_memcg_folio_events(struct folio *folio,
                enum vm_event_item idx, unsigned long nr)
 {
index 9efd8ff68f063a84d749ad534ca79e1bd45ac3cc..6a7184cd291bdd23412a5de3ff4f281e6e388b28 100644 (file)
@@ -888,16 +888,16 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
 }
 #endif
 
-static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
+static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
                                      nodemask_t *nmask)
 {
-       *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
-       if (unlikely(!*hpage)) {
+       *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
+
+       if (unlikely(!*folio)) {
                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                return false;
        }
 
-       folio_prep_large_rmappable((struct folio *)*hpage);
        count_vm_event(THP_COLLAPSE_ALLOC);
        return true;
 }
@@ -1064,17 +1064,20 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
        int node = hpage_collapse_find_target_node(cc);
        struct folio *folio;
 
-       if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
+       if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask)) {
+               *hpage = NULL;
                return SCAN_ALLOC_HUGE_PAGE_FAIL;
+       }
 
-       folio = page_folio(*hpage);
        if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
                folio_put(folio);
                *hpage = NULL;
                return SCAN_CGROUP_CHARGE_FAIL;
        }
-       count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
 
+       count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
+
+       *hpage = folio_page(folio, 0);
        return SCAN_SUCCEED;
 }