--- /dev/null
+From 93cf4e537ed0c5bd9ba6cbdb2c33864547c1442f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Alexis=20Lothor=C3=A9=20=28eBPF=20Foundation=29?=
+ <alexis.lothore@bootlin.com>
+Date: Thu, 27 Feb 2025 15:08:23 +0100
+Subject: bpf/selftests: test_select_reuseport_kern: Remove unused header
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alexis Lothoré (eBPF Foundation) <alexis.lothore@bootlin.com>
+
+commit 93cf4e537ed0c5bd9ba6cbdb2c33864547c1442f upstream.
+
+test_select_reuseport_kern.c is currently including <stdlib.h>, but it
+does not use any definition from there.
+
+Remove stdlib.h inclusion from test_select_reuseport_kern.c
+
+Signed-off-by: Alexis Lothoré (eBPF Foundation) <alexis.lothore@bootlin.com>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://patch.msgid.link/20250227-remove_wrong_header-v1-1-bc94eb4e2f73@bootlin.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+[shung-hsi.yu: Fix compilation error mentioned in footer of Alexis'
+patch with newer glibc header:
+
+ [...]
+ CLNG-BPF [test_progs-cpuv4] test_select_reuseport_kern.bpf.o
+ In file included from progs/test_select_reuseport_kern.c:4:
+ /usr/include/bits/floatn.h:83:52: error: unsupported machine mode
+ '__TC__'
+ 83 | typedef _Complex float __cfloat128 __attribute__ ((__mode__
+ (__TC__)));
+ | ^
+ /usr/include/bits/floatn.h:97:9: error: __float128 is not supported on
+ this target
+ 97 | typedef __float128 _Float128;
+
+I'm not certain when the problem starts to occur, but I'm quite sure
+test_select_reuseport_kern.c were not meant to be using the C standard
+library in the first place.]
+Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
++++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
+@@ -1,7 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright (c) 2018 Facebook */
+
+-#include <stdlib.h>
+ #include <linux/in.h>
+ #include <linux/ip.h>
+ #include <linux/ipv6.h>
--- /dev/null
+From stable+bounces-213376-greg=kroah.com@vger.kernel.org Wed Feb 4 13:46:38 2026
+From: Wentao Guan <guanwentao@uniontech.com>
+Date: Wed, 4 Feb 2026 20:43:45 +0800
+Subject: drm/amd/display: use udelay rather than fsleep
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, sashal@kernel.org, Alex Deucher <alexander.deucher@amd.com>, Wen Chen <Wen.Chen3@amd.com>, Fangzhi Zuo <jerry.zuo@amd.com>, Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>, Harry Wentland <harry.wentland@amd.com>, Wentao Guan <guanwentao@uniontech.com>
+Message-ID: <20260204124345.1299227-1-guanwentao@uniontech.com>
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 27e4dc2c0543fd1808cc52bd888ee1e0533c4a2e upstream.
+
+This function can be called from an atomic context so we can't use
+fsleep().
+
+Fixes: 01f60348d8fb ("drm/amd/display: Fix 'failed to blank crtc!'")
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4549
+Cc: Wen Chen <Wen.Chen3@amd.com>
+Cc: Fangzhi Zuo <jerry.zuo@amd.com>
+Cc: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+[ Backport for file path changed ]
+Signed-off-by: Wentao Guan <guanwentao@uniontech.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -781,7 +781,7 @@ enum dc_status dcn20_enable_stream_timin
+ return DC_ERROR_UNEXPECTED;
+ }
+
+- fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
++ udelay(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
+
+ params.vertical_total_min = stream->adjust.v_total_min;
+ params.vertical_total_max = stream->adjust.v_total_max;
--- /dev/null
+From stable+bounces-213375-greg=kroah.com@vger.kernel.org Wed Feb 4 13:02:21 2026
+From: alvalan9@foxmail.com
+Date: Wed, 4 Feb 2026 11:55:59 +0000
+Subject: ptr_ring: do not block hard interrupts in ptr_ring_resize_multiple()
+To: stable@vger.kernel.org
+Cc: Eric Dumazet <edumazet@google.com>, syzbot+f56a5c5eac2b28439810@syzkaller.appspotmail.com, "Michael S . Tsirkin" <mst@redhat.com>, Jason Wang <jasowang@redhat.com>, Jakub Kicinski <kuba@kernel.org>, Alva Lan <alvalan9@foxmail.com>
+Message-ID: <tencent_C601CC6B4333AD570DF747A9A49AB5058E06@qq.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a126061c80d5efb4baef4bcf346094139cd81df6 ]
+
+Jakub added a lockdep_assert_no_hardirq() check in __page_pool_put_page()
+to increase test coverage.
+
+syzbot found a splat caused by hard irq blocking in
+ptr_ring_resize_multiple() [1]
+
+As current users of ptr_ring_resize_multiple() do not require
+hard irqs being masked, replace it to only block BH.
+
+Rename helpers to better reflect they are safe against BH only.
+
+- ptr_ring_resize_multiple() to ptr_ring_resize_multiple_bh()
+- skb_array_resize_multiple() to skb_array_resize_multiple_bh()
+
+[1]
+
+WARNING: CPU: 1 PID: 9150 at net/core/page_pool.c:709 __page_pool_put_page net/core/page_pool.c:709 [inline]
+WARNING: CPU: 1 PID: 9150 at net/core/page_pool.c:709 page_pool_put_unrefed_netmem+0x157/0xa40 net/core/page_pool.c:780
+Modules linked in:
+CPU: 1 UID: 0 PID: 9150 Comm: syz.1.1052 Not tainted 6.11.0-rc3-syzkaller-00202-gf8669d7b5f5d #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 08/06/2024
+RIP: 0010:__page_pool_put_page net/core/page_pool.c:709 [inline]
+RIP: 0010:page_pool_put_unrefed_netmem+0x157/0xa40 net/core/page_pool.c:780
+Code: 74 0e e8 7c aa fb f7 eb 43 e8 75 aa fb f7 eb 3c 65 8b 1d 38 a8 6a 76 31 ff 89 de e8 a3 ae fb f7 85 db 74 0b e8 5a aa fb f7 90 <0f> 0b 90 eb 1d 65 8b 1d 15 a8 6a 76 31 ff 89 de e8 84 ae fb f7 85
+RSP: 0018:ffffc9000bda6b58 EFLAGS: 00010083
+RAX: ffffffff8997e523 RBX: 0000000000000000 RCX: 0000000000040000
+RDX: ffffc9000fbd0000 RSI: 0000000000001842 RDI: 0000000000001843
+RBP: 0000000000000000 R08: ffffffff8997df2c R09: 1ffffd40003a000d
+R10: dffffc0000000000 R11: fffff940003a000e R12: ffffea0001d00040
+R13: ffff88802e8a4000 R14: dffffc0000000000 R15: 00000000ffffffff
+FS: 00007fb7aaf716c0(0000) GS:ffff8880b9300000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fa15a0d4b72 CR3: 00000000561b0000 CR4: 00000000003506f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <TASK>
+ tun_ptr_free drivers/net/tun.c:617 [inline]
+ __ptr_ring_swap_queue include/linux/ptr_ring.h:571 [inline]
+ ptr_ring_resize_multiple_noprof include/linux/ptr_ring.h:643 [inline]
+ tun_queue_resize drivers/net/tun.c:3694 [inline]
+ tun_device_event+0xaaf/0x1080 drivers/net/tun.c:3714
+ notifier_call_chain+0x19f/0x3e0 kernel/notifier.c:93
+ call_netdevice_notifiers_extack net/core/dev.c:2032 [inline]
+ call_netdevice_notifiers net/core/dev.c:2046 [inline]
+ dev_change_tx_queue_len+0x158/0x2a0 net/core/dev.c:9024
+ do_setlink+0xff6/0x41f0 net/core/rtnetlink.c:2923
+ rtnl_setlink+0x40d/0x5a0 net/core/rtnetlink.c:3201
+ rtnetlink_rcv_msg+0x73f/0xcf0 net/core/rtnetlink.c:6647
+ netlink_rcv_skb+0x1e3/0x430 net/netlink/af_netlink.c:2550
+
+Fixes: ff4e538c8c3e ("page_pool: add a lockdep check for recycling in hardirq")
+Reported-by: syzbot+f56a5c5eac2b28439810@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/671e10df.050a0220.2b8c0f.01cf.GAE@google.com/T/
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Link: https://patch.msgid.link/20241217135121.326370-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ 2c321f3f70bc ("mm: change inlined allocation helpers to account at the call site")
+ is not ported to Linux-6.6.y. So remove the suffix "_noprof". ]
+Signed-off-by: Alva Lan <alvalan9@foxmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/tap.c | 6 +++---
+ drivers/net/tun.c | 6 +++---
+ include/linux/ptr_ring.h | 17 ++++++++---------
+ include/linux/skb_array.h | 14 ++++++++------
+ net/sched/sch_generic.c | 4 ++--
+ 5 files changed, 24 insertions(+), 23 deletions(-)
+
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -1330,9 +1330,9 @@ int tap_queue_resize(struct tap_dev *tap
+ list_for_each_entry(q, &tap->queue_list, next)
+ rings[i++] = &q->ring;
+
+- ret = ptr_ring_resize_multiple(rings, n,
+- dev->tx_queue_len, GFP_KERNEL,
+- __skb_array_destroy_skb);
++ ret = ptr_ring_resize_multiple_bh(rings, n,
++ dev->tx_queue_len, GFP_KERNEL,
++ __skb_array_destroy_skb);
+
+ kfree(rings);
+ return ret;
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -3682,9 +3682,9 @@ static int tun_queue_resize(struct tun_s
+ list_for_each_entry(tfile, &tun->disabled, next)
+ rings[i++] = &tfile->tx_ring;
+
+- ret = ptr_ring_resize_multiple(rings, n,
+- dev->tx_queue_len, GFP_KERNEL,
+- tun_ptr_free);
++ ret = ptr_ring_resize_multiple_bh(rings, n,
++ dev->tx_queue_len, GFP_KERNEL,
++ tun_ptr_free);
+
+ kfree(rings);
+ return ret;
+--- a/include/linux/ptr_ring.h
++++ b/include/linux/ptr_ring.h
+@@ -613,15 +613,14 @@ static inline int ptr_ring_resize(struct
+ /*
+ * Note: producer lock is nested within consumer lock, so if you
+ * resize you must make sure all uses nest correctly.
+- * In particular if you consume ring in interrupt or BH context, you must
+- * disable interrupts/BH when doing so.
++ * In particular if you consume ring in BH context, you must
++ * disable BH when doing so.
+ */
+-static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
+- unsigned int nrings,
+- int size,
+- gfp_t gfp, void (*destroy)(void *))
++static inline int ptr_ring_resize_multiple_bh(struct ptr_ring **rings,
++ unsigned int nrings,
++ int size, gfp_t gfp,
++ void (*destroy)(void *))
+ {
+- unsigned long flags;
+ void ***queues;
+ int i;
+
+@@ -636,12 +635,12 @@ static inline int ptr_ring_resize_multip
+ }
+
+ for (i = 0; i < nrings; ++i) {
+- spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
++ spin_lock_bh(&(rings[i])->consumer_lock);
+ spin_lock(&(rings[i])->producer_lock);
+ queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
+ size, gfp, destroy);
+ spin_unlock(&(rings[i])->producer_lock);
+- spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
++ spin_unlock_bh(&(rings[i])->consumer_lock);
+ }
+
+ for (i = 0; i < nrings; ++i)
+--- a/include/linux/skb_array.h
++++ b/include/linux/skb_array.h
+@@ -198,16 +198,18 @@ static inline int skb_array_resize(struc
+ return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
+ }
+
+-static inline int skb_array_resize_multiple(struct skb_array **rings,
+- int nrings, unsigned int size,
+- gfp_t gfp)
++static inline int skb_array_resize_multiple_bh(struct skb_array **rings,
++ int nrings,
++ unsigned int size,
++ gfp_t gfp)
+ {
+ BUILD_BUG_ON(offsetof(struct skb_array, ring));
+- return ptr_ring_resize_multiple((struct ptr_ring **)rings,
+- nrings, size, gfp,
+- __skb_array_destroy_skb);
++ return ptr_ring_resize_multiple_bh((struct ptr_ring **)rings,
++ nrings, size, gfp,
++ __skb_array_destroy_skb);
+ }
+
++
+ static inline void skb_array_cleanup(struct skb_array *a)
+ {
+ ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -910,8 +910,8 @@ static int pfifo_fast_change_tx_queue_le
+ bands[prio] = q;
+ }
+
+- return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
+- GFP_KERNEL);
++ return skb_array_resize_multiple_bh(bands, PFIFO_FAST_BANDS, new_len,
++ GFP_KERNEL);
+ }
+
+ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
--- /dev/null
+From 10e735115f4cdbe3342b0079ad9c980f17cb02fa Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Wed, 4 Feb 2026 14:49:18 +0100
+Subject: Revert "net: Allow to use SMP threads for backlog NAPI."
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit f3652768a89cfdaedbe2c9384299eea7ec435fef which is
+commit dad6b97702639fba27a2bd3e986982ad6f0db3a7 upstream.
+
+It is only for issues around PREEMPT_RT, which is not in the 6.6.y tree,
+so revert this for now.
+
+Link: https://lore.kernel.org/r/20260120103833.4kssDD1Y@linutronix.de
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Paolo Abeni <pabeni@redhat.com>
+Cc: Wen Yang <wen.yang@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 152 +++++++++++++--------------------------------------------
+ 1 file changed, 37 insertions(+), 115 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -78,7 +78,6 @@
+ #include <linux/slab.h>
+ #include <linux/sched.h>
+ #include <linux/sched/mm.h>
+-#include <linux/smpboot.h>
+ #include <linux/mutex.h>
+ #include <linux/rwsem.h>
+ #include <linux/string.h>
+@@ -218,31 +217,6 @@ static inline struct hlist_head *dev_ind
+ return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
+ }
+
+-#ifndef CONFIG_PREEMPT_RT
+-
+-static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key);
+-
+-static int __init setup_backlog_napi_threads(char *arg)
+-{
+- static_branch_enable(&use_backlog_threads_key);
+- return 0;
+-}
+-early_param("thread_backlog_napi", setup_backlog_napi_threads);
+-
+-static bool use_backlog_threads(void)
+-{
+- return static_branch_unlikely(&use_backlog_threads_key);
+-}
+-
+-#else
+-
+-static bool use_backlog_threads(void)
+-{
+- return true;
+-}
+-
+-#endif
+-
+ static inline void rps_lock_irqsave(struct softnet_data *sd,
+ unsigned long *flags)
+ {
+@@ -4533,7 +4507,6 @@ EXPORT_SYMBOL(__dev_direct_xmit);
+ /*************************************************************************
+ * Receiver routines
+ *************************************************************************/
+-static DEFINE_PER_CPU(struct task_struct *, backlog_napi);
+
+ int netdev_max_backlog __read_mostly = 1000;
+ EXPORT_SYMBOL(netdev_max_backlog);
+@@ -4566,16 +4539,12 @@ static inline void ____napi_schedule(str
+ */
+ thread = READ_ONCE(napi->thread);
+ if (thread) {
+- if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi))
+- goto use_local_napi;
+-
+ set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
+ wake_up_process(thread);
+ return;
+ }
+ }
+
+-use_local_napi:
+ list_add_tail(&napi->poll_list, &sd->poll_list);
+ WRITE_ONCE(napi->list_owner, smp_processor_id());
+ /* If not called from net_rx_action()
+@@ -4821,11 +4790,6 @@ static void napi_schedule_rps(struct sof
+
+ #ifdef CONFIG_RPS
+ if (sd != mysd) {
+- if (use_backlog_threads()) {
+- __napi_schedule_irqoff(&sd->backlog);
+- return;
+- }
+-
+ sd->rps_ipi_next = mysd->rps_ipi_list;
+ mysd->rps_ipi_list = sd;
+
+@@ -6049,7 +6013,7 @@ static void net_rps_action_and_irq_enabl
+ #ifdef CONFIG_RPS
+ struct softnet_data *remsd = sd->rps_ipi_list;
+
+- if (!use_backlog_threads() && remsd) {
++ if (remsd) {
+ sd->rps_ipi_list = NULL;
+
+ local_irq_enable();
+@@ -6064,7 +6028,7 @@ static void net_rps_action_and_irq_enabl
+ static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+- return !use_backlog_threads() && sd->rps_ipi_list;
++ return sd->rps_ipi_list != NULL;
+ #else
+ return false;
+ #endif
+@@ -6108,7 +6072,7 @@ static int process_backlog(struct napi_s
+ * We can use a plain write instead of clear_bit(),
+ * and we dont need an smp_mb() memory barrier.
+ */
+- napi->state &= NAPIF_STATE_THREADED;
++ napi->state = 0;
+ again = false;
+ } else {
+ skb_queue_splice_tail_init(&sd->input_pkt_queue,
+@@ -6774,48 +6738,43 @@ static void skb_defer_free_flush(struct
+ }
+ }
+
+-static void napi_threaded_poll_loop(struct napi_struct *napi)
++static int napi_threaded_poll(void *data)
+ {
++ struct napi_struct *napi = data;
+ struct softnet_data *sd;
+- unsigned long last_qs = jiffies;
++ void *have;
+
+- for (;;) {
+- bool repoll = false;
+- void *have;
++ while (!napi_thread_wait(napi)) {
++ unsigned long last_qs = jiffies;
+
+- local_bh_disable();
+- sd = this_cpu_ptr(&softnet_data);
+- sd->in_napi_threaded_poll = true;
++ for (;;) {
++ bool repoll = false;
+
+- have = netpoll_poll_lock(napi);
+- __napi_poll(napi, &repoll);
+- netpoll_poll_unlock(have);
+-
+- sd->in_napi_threaded_poll = false;
+- barrier();
+-
+- if (sd_has_rps_ipi_waiting(sd)) {
+- local_irq_disable();
+- net_rps_action_and_irq_enable(sd);
+- }
+- skb_defer_free_flush(sd);
+- local_bh_enable();
++ local_bh_disable();
++ sd = this_cpu_ptr(&softnet_data);
++ sd->in_napi_threaded_poll = true;
++
++ have = netpoll_poll_lock(napi);
++ __napi_poll(napi, &repoll);
++ netpoll_poll_unlock(have);
++
++ sd->in_napi_threaded_poll = false;
++ barrier();
++
++ if (sd_has_rps_ipi_waiting(sd)) {
++ local_irq_disable();
++ net_rps_action_and_irq_enable(sd);
++ }
++ skb_defer_free_flush(sd);
++ local_bh_enable();
+
+- if (!repoll)
+- break;
++ if (!repoll)
++ break;
+
+- rcu_softirq_qs_periodic(last_qs);
+- cond_resched();
++ rcu_softirq_qs_periodic(last_qs);
++ cond_resched();
++ }
+ }
+-}
+-
+-static int napi_threaded_poll(void *data)
+-{
+- struct napi_struct *napi = data;
+-
+- while (!napi_thread_wait(napi))
+- napi_threaded_poll_loop(napi);
+-
+ return 0;
+ }
+
+@@ -11400,7 +11359,7 @@ static int dev_cpu_dead(unsigned int old
+
+ list_del_init(&napi->poll_list);
+ if (napi->poll == process_backlog)
+- napi->state &= NAPIF_STATE_THREADED;
++ napi->state = 0;
+ else
+ ____napi_schedule(sd, napi);
+ }
+@@ -11408,14 +11367,12 @@ static int dev_cpu_dead(unsigned int old
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_enable();
+
+- if (!use_backlog_threads()) {
+ #ifdef CONFIG_RPS
+- remsd = oldsd->rps_ipi_list;
+- oldsd->rps_ipi_list = NULL;
++ remsd = oldsd->rps_ipi_list;
++ oldsd->rps_ipi_list = NULL;
+ #endif
+- /* send out pending IPI's on offline CPU */
+- net_rps_send_ipi(remsd);
+- }
++ /* send out pending IPI's on offline CPU */
++ net_rps_send_ipi(remsd);
+
+ /* Process offline CPU's input_pkt_queue */
+ while ((skb = __skb_dequeue(&oldsd->process_queue))) {
+@@ -11678,38 +11635,6 @@ static struct pernet_operations __net_in
+ *
+ */
+
+-static int backlog_napi_should_run(unsigned int cpu)
+-{
+- struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
+- struct napi_struct *napi = &sd->backlog;
+-
+- return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
+-}
+-
+-static void run_backlog_napi(unsigned int cpu)
+-{
+- struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
+-
+- napi_threaded_poll_loop(&sd->backlog);
+-}
+-
+-static void backlog_napi_setup(unsigned int cpu)
+-{
+- struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
+- struct napi_struct *napi = &sd->backlog;
+-
+- napi->thread = this_cpu_read(backlog_napi);
+- set_bit(NAPI_STATE_THREADED, &napi->state);
+-}
+-
+-static struct smp_hotplug_thread backlog_threads = {
+- .store = &backlog_napi,
+- .thread_should_run = backlog_napi_should_run,
+- .thread_fn = run_backlog_napi,
+- .thread_comm = "backlog_napi/%u",
+- .setup = backlog_napi_setup,
+-};
+-
+ /*
+ * This is called single threaded during boot, so no need
+ * to take the rtnl semaphore.
+@@ -11760,10 +11685,7 @@ static int __init net_dev_init(void)
+ init_gro_hash(&sd->backlog);
+ sd->backlog.poll = process_backlog;
+ sd->backlog.weight = weight_p;
+- INIT_LIST_HEAD(&sd->backlog.poll_list);
+ }
+- if (use_backlog_threads())
+- smpboot_register_percpu_thread(&backlog_threads);
+
+ dev_boot_phase = 0;
+
--- /dev/null
+From b935d636f036aa47ed7f6344422a2321a20a60fb Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Wed, 4 Feb 2026 14:51:26 +0100
+Subject: Revert "net: Remove conditional threaded-NAPI wakeup based on task state."
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit 03765d5c18084eab40351fda09bc6fc1a343cd07 which is
+commit 56364c910691f6d10ba88c964c9041b9ab777bd6 upstream.
+
+It is only for issues around PREEMPT_RT, which is not in the 6.6.y tree,
+so revert this for now.
+
+Link: https://lore.kernel.org/r/20260120103833.4kssDD1Y@linutronix.de
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Paolo Abeni <pabeni@redhat.com>
+Cc: Wen Yang <wen.yang@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4539,7 +4539,13 @@ static inline void ____napi_schedule(str
+ */
+ thread = READ_ONCE(napi->thread);
+ if (thread) {
+- set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
++ /* Avoid doing set_bit() if the thread is in
++ * INTERRUPTIBLE state, cause napi_thread_wait()
++ * makes sure to proceed with napi polling
++ * if the thread is explicitly woken from here.
++ */
++ if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
++ set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
+ wake_up_process(thread);
+ return;
+ }
+@@ -6695,6 +6701,8 @@ static int napi_poll(struct napi_struct
+
+ static int napi_thread_wait(struct napi_struct *napi)
+ {
++ bool woken = false;
++
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (!kthread_should_stop()) {
+@@ -6703,13 +6711,15 @@ static int napi_thread_wait(struct napi_
+ * Testing SCHED bit is not enough because SCHED bit might be
+ * set by some other busy poll thread or by napi_disable().
+ */
+- if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) {
++ if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
+ WARN_ON(!list_empty(&napi->poll_list));
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+
+ schedule();
++ /* woken being true indicates this thread owns this napi. */
++ woken = true;
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ __set_current_state(TASK_RUNNING);
drm-amdgpu-fix-null-pointer-dereference-in-amdgpu_gmc_filter_faults_remove.patch
xsk-fix-race-condition-in-af_xdp-generic-rx-path.patch
ksmbd-fix-recursive-locking-in-rpc-handle-list-access.patch
+ptr_ring-do-not-block-hard-interrupts-in-ptr_ring_resize_multiple.patch
+drm-amd-display-use-udelay-rather-than-fsleep.patch
+revert-net-allow-to-use-smp-threads-for-backlog-napi.patch
+revert-net-remove-conditional-threaded-napi-wakeup-based-on-task-state.patch
+bpf-selftests-test_select_reuseport_kern-remove-unused-header.patch