]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/page_alloc: avoid second trylock of zone->lock
authorAlexei Starovoitov <ast@kernel.org>
Mon, 31 Mar 2025 00:28:09 +0000 (17:28 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 12 Apr 2025 00:32:36 +0000 (17:32 -0700)
spin_trylock followed by spin_lock will cause extra write cache access.
If the lock is contended it may cause unnecessary cache line bouncing and
will execute redundant irq restore/save pair.  Therefore, check
alloc/fpi_flags first and use spin_trylock or spin_lock.

Link: https://lkml.kernel.org/r/20250331002809.94758-1-alexei.starovoitov@gmail.com
Fixes: 97769a53f117 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation")
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Daniel Borkman <daniel@iogearbox.net>
Cc: Martin KaFai Lau <martin.lau@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page_alloc.c

index fd6b865cb1abfbd3d2ebd67cdaa5f86d92a62e14..9a219fe8e130bce1ac1a20d9da1ca7623d652923 100644 (file)
@@ -1400,11 +1400,12 @@ static void free_one_page(struct zone *zone, struct page *page,
        struct llist_head *llhead;
        unsigned long flags;
 
-       if (!spin_trylock_irqsave(&zone->lock, flags)) {
-               if (unlikely(fpi_flags & FPI_TRYLOCK)) {
+       if (unlikely(fpi_flags & FPI_TRYLOCK)) {
+               if (!spin_trylock_irqsave(&zone->lock, flags)) {
                        add_page_to_zone_llist(zone, page, order);
                        return;
                }
+       } else {
                spin_lock_irqsave(&zone->lock, flags);
        }
 
@@ -2314,9 +2315,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
        unsigned long flags;
        int i;
 
-       if (!spin_trylock_irqsave(&zone->lock, flags)) {
-               if (unlikely(alloc_flags & ALLOC_TRYLOCK))
+       if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
+               if (!spin_trylock_irqsave(&zone->lock, flags))
                        return 0;
+       } else {
                spin_lock_irqsave(&zone->lock, flags);
        }
        for (i = 0; i < count; ++i) {
@@ -2937,9 +2939,10 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
 
        do {
                page = NULL;
-               if (!spin_trylock_irqsave(&zone->lock, flags)) {
-                       if (unlikely(alloc_flags & ALLOC_TRYLOCK))
+               if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
+                       if (!spin_trylock_irqsave(&zone->lock, flags))
                                return NULL;
+               } else {
                        spin_lock_irqsave(&zone->lock, flags);
                }
                if (alloc_flags & ALLOC_HIGHATOMIC)