From: Alexei Starovoitov Date: Mon, 31 Mar 2025 00:28:09 +0000 (-0700) Subject: mm/page_alloc: avoid second trylock of zone->lock X-Git-Tag: v6.15-rc3~43^2~23 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=c5bb27e2da3a6e1006f3e0aeb36d57c1dd1144aa;p=thirdparty%2Flinux.git mm/page_alloc: avoid second trylock of zone->lock spin_trylock followed by spin_lock will cause extra write cache access. If the lock is contended it may cause unnecessary cache line bouncing and will execute redundant irq restore/save pair. Therefore, check alloc/fpi_flags first and use spin_trylock or spin_lock. Link: https://lkml.kernel.org/r/20250331002809.94758-1-alexei.starovoitov@gmail.com Fixes: 97769a53f117 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation") Signed-off-by: Alexei Starovoitov Suggested-by: Linus Torvalds Reviewed-by: Sebastian Andrzej Siewior Acked-by: Michal Hocko Acked-by: Vlastimil Babka Reviewed-by: Harry Yoo Reviewed-by: Shakeel Butt Cc: Andrii Nakryiko Cc: Daniel Borkman Cc: Martin KaFai Lau Cc: Michal Hocko Cc: Peter Zijlstra Cc: Steven Rostedt Signed-off-by: Andrew Morton --- diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fd6b865cb1abf..9a219fe8e130b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1400,11 +1400,12 @@ static void free_one_page(struct zone *zone, struct page *page, struct llist_head *llhead; unsigned long flags; - if (!spin_trylock_irqsave(&zone->lock, flags)) { - if (unlikely(fpi_flags & FPI_TRYLOCK)) { + if (unlikely(fpi_flags & FPI_TRYLOCK)) { + if (!spin_trylock_irqsave(&zone->lock, flags)) { add_page_to_zone_llist(zone, page, order); return; } + } else { spin_lock_irqsave(&zone->lock, flags); } @@ -2314,9 +2315,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long flags; int i; - if (!spin_trylock_irqsave(&zone->lock, flags)) { - if (unlikely(alloc_flags & ALLOC_TRYLOCK)) + if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { + if (!spin_trylock_irqsave(&zone->lock, flags)) return 0; + } else { spin_lock_irqsave(&zone->lock, flags); } for (i = 0; i < count; ++i) { @@ -2937,9 +2939,10 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, do { page = NULL; - if (!spin_trylock_irqsave(&zone->lock, flags)) { - if (unlikely(alloc_flags & ALLOC_TRYLOCK)) + if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { + if (!spin_trylock_irqsave(&zone->lock, flags)) return NULL; + } else { spin_lock_irqsave(&zone->lock, flags); } if (alloc_flags & ALLOC_HIGHATOMIC)