From d531fd2ccf6b5ad95b718b5748e086f8d4aacf56 Mon Sep 17 00:00:00 2001 From: Oscar Salvador Date: Mon, 30 Jun 2025 16:42:10 +0200 Subject: [PATCH] mm,hugetlb: rename anon_rmap to new_anon_folio and make it boolean anon_rmap is used to determine whether the new allocated folio is anonymous. Rename it to something more meaningul like new_anon_folio and make it boolean, as we use it like that. While we are at it, drop 'new_pagecache_folio' as 'new_anon_folio' is enough to check whether we need to restore the consumed reservation. Link: https://lkml.kernel.org/r/20250627102904.107202-4-osalvador@suse.de Link: https://lkml.kernel.org/r/20250630144212.156938-4-osalvador@suse.de Signed-off-by: Oscar Salvador Acked-by: David Hildenbrand Cc: Gavin Guo Cc: Muchun Song Cc: Peter Xu Signed-off-by: Andrew Morton --- mm/hugetlb.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 68a260e4f4c77..3e5cefd5e1d90 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6406,17 +6406,16 @@ static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned static vm_fault_t hugetlb_no_page(struct address_space *mapping, struct vm_fault *vmf) { + u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); + bool new_folio, new_anon_folio = false; struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; struct hstate *h = hstate_vma(vma); vm_fault_t ret = VM_FAULT_SIGBUS; - int anon_rmap = 0; - unsigned long size; + bool folio_locked = true; struct folio *folio; + unsigned long size; pte_t new_pte; - bool new_folio, new_pagecache_folio = false; - u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); - bool folio_locked = true; /* * Currently, we are forced to kill the process in the event the @@ -6515,10 +6514,9 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, ret = VM_FAULT_SIGBUS; goto out; } - new_pagecache_folio = true; } else { + new_anon_folio = true; folio_lock(folio); - anon_rmap = 1; } } else { /* @@ -6567,7 +6565,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte)) goto backout; - if (anon_rmap) + if (new_anon_folio) hugetlb_add_new_anon_rmap(folio, vma, vmf->address); else hugetlb_add_file_rmap(folio); @@ -6586,7 +6584,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, * No need to keep file folios locked. See comment in * hugetlb_fault(). */ - if (!anon_rmap) { + if (!new_anon_folio) { folio_locked = false; folio_unlock(folio); } @@ -6622,7 +6620,8 @@ out: backout: spin_unlock(vmf->ptl); backout_unlocked: - if (new_folio && !new_pagecache_folio) + /* We only need to restore reservations for private mappings */ + if (new_anon_folio) restore_reserve_on_error(h, vma, vmf->address, folio); folio_unlock(folio); -- 2.47.2