--- /dev/null
+From fefc075182275057ce607effaa3daa9e6e3bdc73 Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 6 May 2025 16:32:07 +0300
+Subject: mm/page_alloc: fix race condition in unaccepted memory handling
+
+From: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+
+commit fefc075182275057ce607effaa3daa9e6e3bdc73 upstream.
+
+The page allocator tracks the number of zones that have unaccepted memory
+using static_branch_enc/dec() and uses that static branch in hot paths to
+determine if it needs to deal with unaccepted memory.
+
+Borislav and Thomas pointed out that the tracking is racy: operations on
+static_branch are not serialized against adding/removing unaccepted pages
+to/from the zone.
+
+Sanity checks inside static_branch machinery detects it:
+
+WARNING: CPU: 0 PID: 10 at kernel/jump_label.c:276 __static_key_slow_dec_cpuslocked+0x8e/0xa0
+
+The comment around the WARN() explains the problem:
+
+ /*
+ * Warn about the '-1' case though; since that means a
+ * decrement is concurrent with a first (0->1) increment. IOW
+ * people are trying to disable something that wasn't yet fully
+ * enabled. This suggests an ordering problem on the user side.
+ */
+
+The effect of this static_branch optimization is only visible on
+microbenchmark.
+
+Instead of adding more complexity around it, remove it altogether.
+
+Link: https://lkml.kernel.org/r/20250506133207.1009676-1-kirill.shutemov@linux.intel.com
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Fixes: dcdfdd40fa82 ("mm: Add support for unaccepted memory")
+Link: https://lore.kernel.org/all/20250506092445.GBaBnVXXyvnazly6iF@fat_crate.local
+Reported-by: Borislav Petkov <bp@alien8.de>
+Tested-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reported-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Brendan Jackman <jackmanb@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: <stable@vger.kernel.org> [6.5+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_alloc.c | 27 ---------------------------
+ 1 file changed, 27 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -303,7 +303,6 @@ EXPORT_SYMBOL(nr_online_nodes);
+ static bool page_contains_unaccepted(struct page *page, unsigned int order);
+ static void accept_page(struct page *page, unsigned int order);
+ static bool cond_accept_memory(struct zone *zone, unsigned int order);
+-static inline bool has_unaccepted_memory(void);
+ static bool __free_unaccepted(struct page *page);
+
+ int page_group_by_mobility_disabled __read_mostly;
+@@ -6586,9 +6585,6 @@ bool has_managed_dma(void)
+
+ #ifdef CONFIG_UNACCEPTED_MEMORY
+
+-/* Counts number of zones with unaccepted pages. */
+-static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages);
+-
+ static bool lazy_accept = true;
+
+ static int __init accept_memory_parse(char *p)
+@@ -6624,7 +6620,6 @@ static bool try_to_accept_memory_one(str
+ {
+ unsigned long flags;
+ struct page *page;
+- bool last;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ page = list_first_entry_or_null(&zone->unaccepted_pages,
+@@ -6635,7 +6630,6 @@ static bool try_to_accept_memory_one(str
+ }
+
+ list_del(&page->lru);
+- last = list_empty(&zone->unaccepted_pages);
+
+ __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
+ __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
+@@ -6645,9 +6639,6 @@ static bool try_to_accept_memory_one(str
+
+ __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL);
+
+- if (last)
+- static_branch_dec(&zones_with_unaccepted_pages);
+-
+ return true;
+ }
+
+@@ -6656,9 +6647,6 @@ static bool cond_accept_memory(struct zo
+ long to_accept, wmark;
+ bool ret = false;
+
+- if (!has_unaccepted_memory())
+- return false;
+-
+ if (list_empty(&zone->unaccepted_pages))
+ return false;
+
+@@ -6688,30 +6676,20 @@ static bool cond_accept_memory(struct zo
+ return ret;
+ }
+
+-static inline bool has_unaccepted_memory(void)
+-{
+- return static_branch_unlikely(&zones_with_unaccepted_pages);
+-}
+-
+ static bool __free_unaccepted(struct page *page)
+ {
+ struct zone *zone = page_zone(page);
+ unsigned long flags;
+- bool first = false;
+
+ if (!lazy_accept)
+ return false;
+
+ spin_lock_irqsave(&zone->lock, flags);
+- first = list_empty(&zone->unaccepted_pages);
+ list_add_tail(&page->lru, &zone->unaccepted_pages);
+ __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
+ __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
+ spin_unlock_irqrestore(&zone->lock, flags);
+
+- if (first)
+- static_branch_inc(&zones_with_unaccepted_pages);
+-
+ return true;
+ }
+
+@@ -6730,11 +6708,6 @@ static bool cond_accept_memory(struct zo
+ {
+ return false;
+ }
+-
+-static inline bool has_unaccepted_memory(void)
+-{
+- return false;
+-}
+
+ static bool __free_unaccepted(struct page *page)
+ {