]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: kill mm_wr_locked from unmap_vmas() and unmap_single_vma()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Tue, 4 Nov 2025 08:57:09 +0000 (16:57 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 20 Nov 2025 21:43:57 +0000 (13:43 -0800)
Kill mm_wr_locked since commit f8e97613fed2 ("mm: convert VM_PFNMAP
tracking to pfnmap_track() + pfnmap_untrack()") remove the user.

Link: https://lkml.kernel.org/r/20251104085709.2688433-1-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/memory.c
mm/mmap.c
mm/vma.c
tools/testing/vma/vma_internal.h

index b636d12bb6519876dbc7711dbaa4da22e19be3b8..df9f258a017cff720d9dc5079e110905b522028e 100644 (file)
@@ -2480,7 +2480,7 @@ static inline void zap_vma_pages(struct vm_area_struct *vma)
 }
 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
                struct vm_area_struct *start_vma, unsigned long start,
-               unsigned long end, unsigned long tree_end, bool mm_wr_locked);
+               unsigned long end, unsigned long tree_end);
 
 struct mmu_notifier_range;
 
index 8d8c36adafa865f0dd09bde834275dc9b9844aea..b09de6274da3146f6208e168123eb110cc9e5e1b 100644 (file)
@@ -2023,8 +2023,7 @@ void unmap_page_range(struct mmu_gather *tlb,
 
 static void unmap_single_vma(struct mmu_gather *tlb,
                struct vm_area_struct *vma, unsigned long start_addr,
-               unsigned long end_addr,
-               struct zap_details *details, bool mm_wr_locked)
+               unsigned long end_addr, struct zap_details *details)
 {
        unsigned long start = max(vma->vm_start, start_addr);
        unsigned long end;
@@ -2070,7 +2069,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
  * @start_addr: virtual address at which to start unmapping
  * @end_addr: virtual address at which to end unmapping
  * @tree_end: The maximum index to check
- * @mm_wr_locked: lock flag
  *
  * Unmap all pages in the vma list.
  *
@@ -2085,8 +2083,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
  */
 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
                struct vm_area_struct *vma, unsigned long start_addr,
-               unsigned long end_addr, unsigned long tree_end,
-               bool mm_wr_locked)
+               unsigned long end_addr, unsigned long tree_end)
 {
        struct mmu_notifier_range range;
        struct zap_details details = {
@@ -2102,8 +2099,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
                unsigned long start = start_addr;
                unsigned long end = end_addr;
                hugetlb_zap_begin(vma, &start, &end);
-               unmap_single_vma(tlb, vma, start, end, &details,
-                                mm_wr_locked);
+               unmap_single_vma(tlb, vma, start, end, &details);
                hugetlb_zap_end(vma, &details);
                vma = mas_find(mas, tree_end - 1);
        } while (vma && likely(!xa_is_zero(vma)));
@@ -2139,7 +2135,7 @@ void zap_page_range_single_batched(struct mmu_gather *tlb,
         * unmap 'address-end' not 'range.start-range.end' as range
         * could have been expanded for hugetlb pmd sharing.
         */
-       unmap_single_vma(tlb, vma, address, end, details, false);
+       unmap_single_vma(tlb, vma, address, end, details);
        mmu_notifier_invalidate_range_end(&range);
        if (is_vm_hugetlb_page(vma)) {
                /*
index 644f02071a41b1ea24e69cf2ea4079dabc7267b7..4f51ca644903494475d6ac03f00bd05677dd8ded 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1274,7 +1274,7 @@ void exit_mmap(struct mm_struct *mm)
        tlb_gather_mmu_fullmm(&tlb, mm);
        /* update_hiwater_rss(mm) here? but nobody should be looking */
        /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
-       unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
+       unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX);
        mmap_read_unlock(mm);
 
        /*
index 919d1fc63a5232a63bceb1642271dce47e6bbe62..0c5e391fe2e2f76fc559f11ae539cb2970506efa 100644 (file)
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -483,8 +483,7 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
 
        tlb_gather_mmu(&tlb, mm);
        update_hiwater_rss(mm);
-       unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
-                  /* mm_wr_locked = */ true);
+       unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end);
        mas_set(mas, vma->vm_end);
        free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
                      next ? next->vm_start : USER_PGTABLES_CEILING,
@@ -1228,7 +1227,7 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
        tlb_gather_mmu(&tlb, vms->vma->vm_mm);
        update_hiwater_rss(vms->vma->vm_mm);
        unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
-                  vms->vma_count, mm_wr_locked);
+                  vms->vma_count);
 
        mas_set(mas_detach, 1);
        /* start and end may be different if there is no prev or next vma. */
index d873667704e8f858a8af4ede7f0e372729f6053d..c68d382dac81ff9b9ceaca3c867875efa249fb9a 100644 (file)
@@ -848,8 +848,7 @@ static inline void update_hiwater_vm(struct mm_struct *mm)
 
 static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
                      struct vm_area_struct *vma, unsigned long start_addr,
-                     unsigned long end_addr, unsigned long tree_end,
-                     bool mm_wr_locked)
+                     unsigned long end_addr, unsigned long tree_end)
 {
 }