]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/page_alloc: remove pcpu_spin_* wrappers
authorVlastimil Babka <vbabka@kernel.org>
Fri, 27 Feb 2026 17:08:00 +0000 (18:08 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:12 +0000 (13:53 -0700)
We only ever use pcpu_spin_trylock()/unlock() with struct per_cpu_pages so
refactor the helpers to remove the generic layer.

No functional change intended.

Link: https://lkml.kernel.org/r/20260227-b4-pcp-locking-cleanup-v1-3-f7e22e603447@kernel.org
Signed-off-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: David Hildenbrand (Arm) <david@kernel.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page_alloc.c

index be367516c59b7fe2782ec5e7c666c4ad57400022..f11f38ba2e1240eaee1372d14c213f81d599f946 100644 (file)
@@ -111,35 +111,29 @@ static DEFINE_MUTEX(pcp_batch_high_lock);
 #endif
 
 /*
- * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
- * Return value should be used with equivalent unlock helper.
+ * A helper to lookup and trylock pcp with embedded spinlock.
+ * The return value should be used with the unlock helper.
+ * NULL return value means the trylock failed.
  */
-#define pcpu_spin_trylock(type, member, ptr)                           \
+#ifdef CONFIG_SMP
+#define pcp_spin_trylock(ptr)                                          \
 ({                                                                     \
-       type *_ret;                                                     \
+       struct per_cpu_pages *_ret;                                     \
        pcpu_task_pin();                                                \
        _ret = this_cpu_ptr(ptr);                                       \
-       if (!spin_trylock(&_ret->member)) {                             \
+       if (!spin_trylock(&_ret->lock)) {                               \
                pcpu_task_unpin();                                      \
                _ret = NULL;                                            \
        }                                                               \
        _ret;                                                           \
 })
 
-#define pcpu_spin_unlock(member, ptr)                                  \
+#define pcp_spin_unlock(ptr)                                           \
 ({                                                                     \
-       spin_unlock(&ptr->member);                                      \
+       spin_unlock(&ptr->lock);                                        \
        pcpu_task_unpin();                                              \
 })
 
-/* struct per_cpu_pages specific helpers.*/
-#ifdef CONFIG_SMP
-#define pcp_spin_trylock(ptr)                                          \
-               pcpu_spin_trylock(struct per_cpu_pages, lock, ptr)
-
-#define pcp_spin_unlock(ptr)                                           \
-               pcpu_spin_unlock(lock, ptr)
-
 /*
  * On CONFIG_SMP=n the UP implementation of spin_trylock() never fails and thus
  * is not compatible with our locking scheme. However we do not need pcp for