]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: remove unnecessary calls to lru_add_drain
authorRik van Riel <riel@surriel.com>
Thu, 19 Dec 2024 20:32:53 +0000 (15:32 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 26 Jan 2025 04:22:21 +0000 (20:22 -0800)
There seem to be several categories of calls to lru_add_drain and
lru_add_drain_all.

The first are code paths that recently allocated, swapped in, or otherwise
processed a batch of pages, and want them all on the LRU.  These drain
pages that were recently allocated, probably on the local CPU.

A second category are code paths that are actively trying to reclaim,
migrate, or offline memory.  These often use lru_add_drain_all, to drain
the caches on all CPUs.

However, there also seem to be some other callers where we aren't really
doing either.  They are calling lru_add_drain(), despite operating on
pages that may have been allocated long ago, and quite possibly on
different CPUs.

Those calls are not likely to be effective at anything but creating lock
contention on the LRU locks.

Remove the lru_add_drain calls in the latter category.

For detailed reasoning, see [1] and [2].

Link: https://lkml.kernel.org/r/dca2824e8e88e826c6b260a831d79089b5b9c79d.camel@surriel.com
Link: https://lkml.kernel.org/r/xxfhcjaq2xxcl5adastz5omkytenq7izo2e5f4q7e3ns4z6lko@odigjjc7hqrg
Link: https://lkml.kernel.org/r/20241219153253.3da9e8aa@fangorn
Signed-off-by: Rik van Riel <riel@surriel.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Chris Li <chrisl@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c
mm/mmap.c
mm/swap_state.c
mm/vma.c

index 1cee7517d21f764c2e5a267eb3a62d95086b0434..9defa853dbd2980d9065ff7582f7e0f075c68052 100644 (file)
@@ -2004,7 +2004,6 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
        struct mmu_notifier_range range;
        struct mmu_gather tlb;
 
-       lru_add_drain();
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
                                address, end);
        hugetlb_zap_begin(vma, &range.start, &range.end);
index aef835984b1c9e934dcfb36175af6eab4c04c701..3cc8de07411d2fb8fbacf8bcc3f9491c8d221611 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1336,7 +1336,6 @@ void exit_mmap(struct mm_struct *mm)
                goto destroy;
        }
 
-       lru_add_drain();
        flush_cache_mm(mm);
        tlb_gather_mmu_fullmm(&tlb, mm);
        /* update_hiwater_rss(mm) here? but nobody should be looking */
@@ -1779,7 +1778,6 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
                                       vma, new_start, length, false, true))
                return -ENOMEM;
 
-       lru_add_drain();
        tlb_gather_mmu(&tlb, mm);
        next = vma_next(&vmi);
        if (new_end > old_start) {
index e0c0321b8ff71c3ab00044e1ca505a6f001680d0..ca42b2be64d9069c4768946bd5ac71aeda5040c9 100644 (file)
@@ -317,7 +317,6 @@ void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
        struct folio_batch folios;
        unsigned int refs[PAGEVEC_SIZE];
 
-       lru_add_drain();
        folio_batch_init(&folios);
        for (int i = 0; i < nr; i++) {
                struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
index 6fa240e5b0c51474852068a702512ea6cf75a020..0caaeea899a96ce71f79748f6fa2d09bf69f582c 100644 (file)
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -430,7 +430,6 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
        struct mmu_gather tlb;
 
-       lru_add_drain();
        tlb_gather_mmu(&tlb, mm);
        update_hiwater_rss(mm);
        unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
@@ -1132,7 +1131,6 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
         * were isolated before we downgraded mmap_lock.
         */
        mas_set(mas_detach, 1);
-       lru_add_drain();
        tlb_gather_mmu(&tlb, vms->vma->vm_mm);
        update_hiwater_rss(vms->vma->vm_mm);
        unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,