]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
khugepaged: inline hpage_collapse_alloc_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 3 Apr 2024 17:18:30 +0000 (18:18 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:32 +0000 (20:56 -0700)
Patch series "khugepaged folio conversions".

We've been kind of hacking piecemeal at converting khugepaged to use
folios instead of compound pages, and so this patchset is a little larger
than it should be as I undo some of our wrong moves in the past.  In
particular, collapse_file() now consistently uses 'new_folio' for the
freshly allocated folio and 'folio' for the one that's currently in use.

This patch (of 7):

This function has one caller, and the combined function is simpler to
read, reason about and modify.

Link: https://lkml.kernel.org/r/20240403171838.1445826-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20240403171838.1445826-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/khugepaged.c

index 38830174608fba663ed416ad4e2661242e484c58..ad16dd8b26a8aef3d7ef681af92e78a98f3b38da 100644 (file)
@@ -891,20 +891,6 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
 }
 #endif
 
-static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
-                                     nodemask_t *nmask)
-{
-       *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
-
-       if (unlikely(!*folio)) {
-               count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
-               return false;
-       }
-
-       count_vm_event(THP_COLLAPSE_ALLOC);
-       return true;
-}
-
 /*
  * If mmap_lock temporarily dropped, revalidate vma
  * before taking mmap_lock.
@@ -1067,11 +1053,14 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
        int node = hpage_collapse_find_target_node(cc);
        struct folio *folio;
 
-       if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask)) {
+       folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
+       if (!folio) {
                *hpage = NULL;
+               count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                return SCAN_ALLOC_HUGE_PAGE_FAIL;
        }
 
+       count_vm_event(THP_COLLAPSE_ALLOC);
        if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
                folio_put(folio);
                *hpage = NULL;