--- /dev/null
+From 359554d1e525b6b2e2be2eabd21c84239fc13816 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:27:40 +0900
+Subject: can: j1939: j1939_local_ecu_get(): undo increment when
+ j1939_local_ecu_get() fails
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit 06e02da29f6f1a45fc07bd60c7eaf172dc21e334 ]
+
+Since j1939_sk_bind() and j1939_sk_release() call j1939_local_ecu_put()
+when J1939_SOCK_BOUND was already set, but the error handling path for
+j1939_sk_bind() will not set J1939_SOCK_BOUND when j1939_local_ecu_get()
+fails, j1939_local_ecu_get() needs to undo priv->ents[sa].nusers++ when
+j1939_local_ecu_get() returns an error.
+
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/e7f80046-4ff7-4ce2-8ad8-7c3c678a42c9@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/bus.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
+index 4866879016021..e0b966c2517cf 100644
+--- a/net/can/j1939/bus.c
++++ b/net/can/j1939/bus.c
+@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
+ if (!ecu)
+ ecu = j1939_ecu_create_locked(priv, name);
+ err = PTR_ERR_OR_ZERO(ecu);
+- if (err)
++ if (err) {
++ if (j1939_address_is_unicast(sa))
++ priv->ents[sa].nusers--;
+ goto done;
++ }
+
+ ecu->nusers++;
+ /* TODO: do we care if ecu->addr != sa? */
+--
+2.51.0
+
--- /dev/null
+From 60f35868810bc9c5bf07faca336630bd6a36fb5b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:30:09 +0900
+Subject: can: j1939: j1939_sk_bind(): call j1939_priv_put() immediately when
+ j1939_local_ecu_get() failed
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit f214744c8a27c3c1da6b538c232da22cd027530e ]
+
+Commit 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct
+callback") expects that a call to j1939_priv_put() can be unconditionally
+delayed until j1939_sk_sock_destruct() is called. But a refcount leak will
+happen when j1939_sk_bind() is called again after j1939_local_ecu_get()
+ from previous j1939_sk_bind() call returned an error. We need to call
+j1939_priv_put() before j1939_sk_bind() returns an error.
+
+Fixes: 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct callback")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/4f49a1bc-a528-42ad-86c0-187268ab6535@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/socket.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 45ae7a235dbff..34cd4792d5d41 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -520,6 +520,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
+ if (ret) {
+ j1939_netdev_stop(priv);
++ jsk->priv = NULL;
++ synchronize_rcu();
++ j1939_priv_put(priv);
+ goto out_release_sock;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 8dfe874ec413b5b630d1fd97ba7cabb20a1bf09a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 11:49:53 +0200
+Subject: dmaengine: ti: edma: Fix memory allocation size for
+ queue_priority_map
+
+From: Anders Roxell <anders.roxell@linaro.org>
+
+[ Upstream commit e63419dbf2ceb083c1651852209c7f048089ac0f ]
+
+Fix a critical memory allocation bug in edma_setup_from_hw() where
+queue_priority_map was allocated with insufficient memory. The code
+declared queue_priority_map as s8 (*)[2] (pointer to array of 2 s8),
+but allocated memory using sizeof(s8) instead of the correct size.
+
+This caused out-of-bounds memory writes when accessing:
+ queue_priority_map[i][0] = i;
+ queue_priority_map[i][1] = i;
+
+The bug manifested as kernel crashes with "Oops - undefined instruction"
+on ARM platforms (BeagleBoard-X15) during EDMA driver probe, as the
+memory corruption triggered kernel hardening features on Clang.
+
+Change the allocation to use sizeof(*queue_priority_map) which
+automatically gets the correct size for the 2D array structure.
+
+Fixes: 2b6b3b742019 ("ARM/dmaengine: edma: Merge the two drivers under drivers/dma/")
+Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
+Link: https://lore.kernel.org/r/20250830094953.3038012-1-anders.roxell@linaro.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/ti/edma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 560fe658b8942..c555b0991ad10 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2121,8 +2121,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
+ * priority. So Q0 is the highest priority queue and the last queue has
+ * the lowest priority.
+ */
+- queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
+- GFP_KERNEL);
++ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
++ sizeof(*queue_priority_map), GFP_KERNEL);
+ if (!queue_priority_map)
+ return -ENOMEM;
+
+--
+2.51.0
+
--- /dev/null
+From a3d23d9a32950eb4c14698a2601285f0465298af Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Dec 2020 18:36:53 +0800
+Subject: genirq/affinity: Add irq_update_affinity_desc()
+
+From: John Garry <john.garry@huawei.com>
+
+[ Upstream commit 1d3aec89286254487df7641c30f1b14ad1d127a5 ]
+
+Add a function to allow the affinity of an interrupt be switched to
+managed, such that interrupts allocated for platform devices may be
+managed.
+
+This new interface has certain limitations, and attempts to use it in the
+following circumstances will fail:
+- For when the kernel is configured for generic IRQ reservation mode (in
+ config GENERIC_IRQ_RESERVATION_MODE). The reason being that it could
+ conflict with managed vs. non-managed interrupt accounting.
+- The interrupt is already started, which should not be the case during
+ init
+- The interrupt is already configured as managed, which means double init
+
+Suggested-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Garry <john.garry@huawei.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/1606905417-183214-2-git-send-email-john.garry@huawei.com
+Stable-dep-of: 915470e1b44e ("i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/interrupt.h | 8 +++++
+ kernel/irq/manage.c | 70 +++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 78 insertions(+)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 71d3fa7f02655..d6f833b403d59 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -356,6 +356,8 @@ extern int irq_can_set_affinity(unsigned int irq);
+ extern int irq_select_affinity(unsigned int irq);
+
+ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
++extern int irq_update_affinity_desc(unsigned int irq,
++ struct irq_affinity_desc *affinity);
+
+ extern int
+ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
+@@ -391,6 +393,12 @@ static inline int irq_set_affinity_hint(unsigned int irq,
+ return -EINVAL;
+ }
+
++static inline int irq_update_affinity_desc(unsigned int irq,
++ struct irq_affinity_desc *affinity)
++{
++ return -EINVAL;
++}
++
+ static inline int
+ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
+ {
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index c7f4f948f17e4..4ca4ab8ef2a5f 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -386,6 +386,76 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
+ return ret;
+ }
+
++/**
++ * irq_update_affinity_desc - Update affinity management for an interrupt
++ * @irq: The interrupt number to update
++ * @affinity: Pointer to the affinity descriptor
++ *
++ * This interface can be used to configure the affinity management of
++ * interrupts which have been allocated already.
++ *
++ * There are certain limitations on when it may be used - attempts to use it
++ * for when the kernel is configured for generic IRQ reservation mode (in
++ * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
++ * managed/non-managed interrupt accounting. In addition, attempts to use it on
++ * an interrupt which is already started or which has already been configured
++ * as managed will also fail, as these mean invalid init state or double init.
++ */
++int irq_update_affinity_desc(unsigned int irq,
++ struct irq_affinity_desc *affinity)
++{
++ struct irq_desc *desc;
++ unsigned long flags;
++ bool activated;
++ int ret = 0;
++
++ /*
++ * Supporting this with the reservation scheme used by x86 needs
++ * some more thought. Fail it for now.
++ */
++ if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
++ return -EOPNOTSUPP;
++
++ desc = irq_get_desc_buslock(irq, &flags, 0);
++ if (!desc)
++ return -EINVAL;
++
++ /* Requires the interrupt to be shut down */
++ if (irqd_is_started(&desc->irq_data)) {
++ ret = -EBUSY;
++ goto out_unlock;
++ }
++
++ /* Interrupts which are already managed cannot be modified */
++ if (irqd_affinity_is_managed(&desc->irq_data)) {
++ ret = -EBUSY;
++ goto out_unlock;
++ }
++
++ /*
++ * Deactivate the interrupt. That's required to undo
++ * anything an earlier activation has established.
++ */
++ activated = irqd_is_activated(&desc->irq_data);
++ if (activated)
++ irq_domain_deactivate_irq(&desc->irq_data);
++
++ if (affinity->is_managed) {
++ irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
++ irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
++ }
++
++ cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
++
++ /* Restore the activation state */
++ if (activated)
++ irq_domain_activate_irq(&desc->irq_data, false);
++
++out_unlock:
++ irq_put_desc_busunlock(desc, flags);
++ return ret;
++}
++
+ int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
+ {
+ struct irq_desc *desc = irq_to_desc(irq);
+--
+2.51.0
+
--- /dev/null
+From 4b7ce171b3d8dd872bbc96043ff8c3de1de30a16 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 May 2021 11:17:26 +0200
+Subject: genirq: Export affinity setter for modules
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit 4d80d6ca5d77fde9880da8466e5b64f250e5bf82 ]
+
+Perf modules abuse irq_set_affinity_hint() to set the affinity of system
+PMU interrupts just because irq_set_affinity() was not exported.
+
+The fact that irq_set_affinity_hint() actually sets the affinity is a
+non-documented side effect and the name is clearly saying it's a hint.
+
+To clean this up, export the real affinity setter.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Link: https://lore.kernel.org/r/20210518093117.968251441@linutronix.de
+Stable-dep-of: 915470e1b44e ("i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/interrupt.h | 35 ++---------------------------------
+ kernel/irq/manage.c | 33 ++++++++++++++++++++++++++++++++-
+ 2 files changed, 34 insertions(+), 34 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index d6f833b403d59..9c52e263ef399 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -318,39 +318,8 @@ struct irq_affinity_desc {
+
+ extern cpumask_var_t irq_default_affinity;
+
+-/* Internal implementation. Use the helpers below */
+-extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
+- bool force);
+-
+-/**
+- * irq_set_affinity - Set the irq affinity of a given irq
+- * @irq: Interrupt to set affinity
+- * @cpumask: cpumask
+- *
+- * Fails if cpumask does not contain an online CPU
+- */
+-static inline int
+-irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+-{
+- return __irq_set_affinity(irq, cpumask, false);
+-}
+-
+-/**
+- * irq_force_affinity - Force the irq affinity of a given irq
+- * @irq: Interrupt to set affinity
+- * @cpumask: cpumask
+- *
+- * Same as irq_set_affinity, but without checking the mask against
+- * online cpus.
+- *
+- * Solely for low level cpu hotplug code, where we need to make per
+- * cpu interrupts affine before the cpu becomes online.
+- */
+-static inline int
+-irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+-{
+- return __irq_set_affinity(irq, cpumask, true);
+-}
++extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
++extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
+
+ extern int irq_can_set_affinity(unsigned int irq);
+ extern int irq_select_affinity(unsigned int irq);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 4ca4ab8ef2a5f..5cf1a32b74e24 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -456,7 +456,8 @@ int irq_update_affinity_desc(unsigned int irq,
+ return ret;
+ }
+
+-int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
++static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
++ bool force)
+ {
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+@@ -471,6 +472,36 @@ int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
+ return ret;
+ }
+
++/**
++ * irq_set_affinity - Set the irq affinity of a given irq
++ * @irq: Interrupt to set affinity
++ * @cpumask: cpumask
++ *
++ * Fails if cpumask does not contain an online CPU
++ */
++int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
++{
++ return __irq_set_affinity(irq, cpumask, false);
++}
++EXPORT_SYMBOL_GPL(irq_set_affinity);
++
++/**
++ * irq_force_affinity - Force the irq affinity of a given irq
++ * @irq: Interrupt to set affinity
++ * @cpumask: cpumask
++ *
++ * Same as irq_set_affinity, but without checking the mask against
++ * online cpus.
++ *
++ * Solely for low level cpu hotplug code, where we need to make per
++ * cpu interrupts affine before the cpu becomes online.
++ */
++int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
++{
++ return __irq_set_affinity(irq, cpumask, true);
++}
++EXPORT_SYMBOL_GPL(irq_force_affinity);
++
+ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+ {
+ unsigned long flags;
+--
+2.51.0
+
--- /dev/null
+From b7dae128f31c85fac89394337f0d3f3ab595724b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Sep 2021 11:24:17 -0400
+Subject: genirq: Provide new interfaces for affinity hints
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit 65c7cdedeb3026fabcc967a7aae2f755ad4d0783 ]
+
+The discussion about removing the side effect of irq_set_affinity_hint() of
+actually applying the cpumask (if not NULL) as affinity to the interrupt,
+unearthed a few unpleasantries:
+
+ 1) The modular perf drivers rely on the current behaviour for the very
+ wrong reasons.
+
+ 2) While none of the other drivers prevents user space from changing
+ the affinity, a cursorily inspection shows that there are at least
+ expectations in some drivers.
+
+#1 needs to be cleaned up anyway, so that's not a problem
+
+#2 might result in subtle regressions especially when irqbalanced (which
+ nowadays ignores the affinity hint) is disabled.
+
+Provide new interfaces:
+
+ irq_update_affinity_hint() - Only sets the affinity hint pointer
+ irq_set_affinity_and_hint() - Set the pointer and apply the affinity to
+ the interrupt
+
+Make irq_set_affinity_hint() a wrapper around irq_apply_affinity_hint() and
+document it to be phased out.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Nitesh Narayan Lal <nitesh@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20210501021832.743094-1-jesse.brandeburg@intel.com
+Link: https://lore.kernel.org/r/20210903152430.244937-2-nitesh@redhat.com
+Stable-dep-of: 915470e1b44e ("i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/interrupt.h | 53 ++++++++++++++++++++++++++++++++++++++-
+ kernel/irq/manage.c | 8 +++---
+ 2 files changed, 56 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 9c52e263ef399..4994465ad1d9c 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -324,7 +324,46 @@ extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
+ extern int irq_can_set_affinity(unsigned int irq);
+ extern int irq_select_affinity(unsigned int irq);
+
+-extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
++extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
++ bool setaffinity);
++
++/**
++ * irq_update_affinity_hint - Update the affinity hint
++ * @irq: Interrupt to update
++ * @m: cpumask pointer (NULL to clear the hint)
++ *
++ * Updates the affinity hint, but does not change the affinity of the interrupt.
++ */
++static inline int
++irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
++{
++ return __irq_apply_affinity_hint(irq, m, false);
++}
++
++/**
++ * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
++ * cpumask to the interrupt
++ * @irq: Interrupt to update
++ * @m: cpumask pointer (NULL to clear the hint)
++ *
++ * Updates the affinity hint and if @m is not NULL it applies it as the
++ * affinity of that interrupt.
++ */
++static inline int
++irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
++{
++ return __irq_apply_affinity_hint(irq, m, true);
++}
++
++/*
++ * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
++ * instead.
++ */
++static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
++{
++ return irq_set_affinity_and_hint(irq, m);
++}
++
+ extern int irq_update_affinity_desc(unsigned int irq,
+ struct irq_affinity_desc *affinity);
+
+@@ -356,6 +395,18 @@ static inline int irq_can_set_affinity(unsigned int irq)
+
+ static inline int irq_select_affinity(unsigned int irq) { return 0; }
+
++static inline int irq_update_affinity_hint(unsigned int irq,
++ const struct cpumask *m)
++{
++ return -EINVAL;
++}
++
++static inline int irq_set_affinity_and_hint(unsigned int irq,
++ const struct cpumask *m)
++{
++ return -EINVAL;
++}
++
+ static inline int irq_set_affinity_hint(unsigned int irq,
+ const struct cpumask *m)
+ {
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 5cf1a32b74e24..4998e8a561564 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -502,7 +502,8 @@ int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+ }
+ EXPORT_SYMBOL_GPL(irq_force_affinity);
+
+-int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
++int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
++ bool setaffinity)
+ {
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
+@@ -511,12 +512,11 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+ return -EINVAL;
+ desc->affinity_hint = m;
+ irq_put_desc_unlock(desc, flags);
+- /* set the initial affinity to prevent every interrupt being on CPU0 */
+- if (m)
++ if (m && setaffinity)
+ __irq_set_affinity(irq, m, false);
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
++EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
+
+ static void irq_affinity_notify(struct work_struct *work)
+ {
+--
+2.51.0
+
--- /dev/null
+From 6130b00d8e048a0477918a29ed4799dc0be34466 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 17:39:03 +0200
+Subject: i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path
+
+From: Michal Schmidt <mschmidt@redhat.com>
+
+[ Upstream commit 915470e1b44e71d1dd07ee067276f003c3521ee3 ]
+
+If request_irq() in i40e_vsi_request_irq_msix() fails in an iteration
+later than the first, the error path wants to free the IRQs requested
+so far. However, it uses the wrong dev_id argument for free_irq(), so
+it does not free the IRQs correctly and instead triggers the warning:
+
+ Trying to free already-free IRQ 173
+ WARNING: CPU: 25 PID: 1091 at kernel/irq/manage.c:1829 __free_irq+0x192/0x2c0
+ Modules linked in: i40e(+) [...]
+ CPU: 25 UID: 0 PID: 1091 Comm: NetworkManager Not tainted 6.17.0-rc1+ #1 PREEMPT(lazy)
+ Hardware name: [...]
+ RIP: 0010:__free_irq+0x192/0x2c0
+ [...]
+ Call Trace:
+ <TASK>
+ free_irq+0x32/0x70
+ i40e_vsi_request_irq_msix.cold+0x63/0x8b [i40e]
+ i40e_vsi_request_irq+0x79/0x80 [i40e]
+ i40e_vsi_open+0x21f/0x2f0 [i40e]
+ i40e_open+0x63/0x130 [i40e]
+ __dev_open+0xfc/0x210
+ __dev_change_flags+0x1fc/0x240
+ netif_change_flags+0x27/0x70
+ do_setlink.isra.0+0x341/0xc70
+ rtnl_newlink+0x468/0x860
+ rtnetlink_rcv_msg+0x375/0x450
+ netlink_rcv_skb+0x5c/0x110
+ netlink_unicast+0x288/0x3c0
+ netlink_sendmsg+0x20d/0x430
+ ____sys_sendmsg+0x3a2/0x3d0
+ ___sys_sendmsg+0x99/0xe0
+ __sys_sendmsg+0x8a/0xf0
+ do_syscall_64+0x82/0x2c0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ [...]
+ </TASK>
+ ---[ end trace 0000000000000000 ]---
+
+Use the same dev_id for free_irq() as for request_irq().
+
+I tested this with inserting code to fail intentionally.
+
+Fixes: 493fb30011b3 ("i40e: Move q_vectors from pointer to array to array of pointers")
+Signed-off-by: Michal Schmidt <mschmidt@redhat.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Subbaraya Sundeep <sbhatta@marvell.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 0b0f6bf5affe0..f11cb3176cab4 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3984,7 +3984,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ irq_num = pf->msix_entries[base + vector].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
+- free_irq(irq_num, &vsi->q_vectors[vector]);
++ free_irq(irq_num, vsi->q_vectors[vector]);
+ }
+ return err;
+ }
+--
+2.51.0
+
--- /dev/null
+From 56008e4e7f7fcf5ff1a38e0cec7410b90d0d4fb7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Sep 2021 11:24:19 -0400
+Subject: i40e: Use irq_update_affinity_hint()
+
+From: Nitesh Narayan Lal <nitesh@redhat.com>
+
+[ Upstream commit d34c54d1739c2cdf2e4437b74e6da269147f4987 ]
+
+The driver uses irq_set_affinity_hint() for two purposes:
+
+ - To set the affinity_hint which is consumed by the userspace for
+ distributing the interrupts
+
+ - To apply an affinity that it provides for the i40e interrupts
+
+The latter is done to ensure that all the interrupts are evenly spread
+across all available CPUs. However, since commit a0c9259dc4e1 ("irq/matrix:
+Spread interrupts on allocation") the spreading of interrupts is
+dynamically performed at the time of allocation. Hence, there is no need
+for the drivers to enforce their own affinity for the spreading of
+interrupts.
+
+Also, irq_set_affinity_hint() applying the provided cpumask as an affinity
+for the interrupt is an undocumented side effect. To remove this side
+effect irq_set_affinity_hint() has been marked as deprecated and new
+interfaces have been introduced. Hence, replace the irq_set_affinity_hint()
+with the new interface irq_update_affinity_hint() that only sets the
+pointer for the affinity_hint.
+
+Signed-off-by: Nitesh Narayan Lal <nitesh@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Link: https://lore.kernel.org/r/20210903152430.244937-4-nitesh@redhat.com
+Stable-dep-of: 915470e1b44e ("i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index aa24d1808c981..0b0f6bf5affe0 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3969,10 +3969,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ *
+ * get_cpu_mask returns a static constant mask with
+ * a permanent lifetime so it's ok to pass to
+- * irq_set_affinity_hint without making a copy.
++ * irq_update_affinity_hint without making a copy.
+ */
+ cpu = cpumask_local_spread(q_vector->v_idx, -1);
+- irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
++ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
+ }
+
+ vsi->irqs_ready = true;
+@@ -3983,7 +3983,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ vector--;
+ irq_num = pf->msix_entries[base + vector].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+- irq_set_affinity_hint(irq_num, NULL);
++ irq_update_affinity_hint(irq_num, NULL);
+ free_irq(irq_num, &vsi->q_vectors[vector]);
+ }
+ return err;
+@@ -4801,7 +4801,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
+ /* clear the affinity notifier in the IRQ descriptor */
+ irq_set_affinity_notifier(irq_num, NULL);
+ /* remove our suggested affinity mask for this IRQ */
+- irq_set_affinity_hint(irq_num, NULL);
++ irq_update_affinity_hint(irq_num, NULL);
+ synchronize_irq(irq_num);
+ free_irq(irq_num, vsi->q_vectors[i]);
+
+--
+2.51.0
+
--- /dev/null
+From e60e17b99219cab2186e9198e5717f4f2ca18d17 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 15:26:31 +0900
+Subject: igb: fix link test skipping when interface is admin down
+
+From: Kohei Enju <enjuk@amazon.com>
+
+[ Upstream commit d709f178abca22a4d3642513df29afe4323a594b ]
+
+The igb driver incorrectly skips the link test when the network
+interface is admin down (if_running == false), causing the test to
+always report PASS regardless of the actual physical link state.
+
+This behavior is inconsistent with other drivers (e.g. i40e, ice, ixgbe,
+etc.) which correctly test the physical link state regardless of admin
+state.
+Remove the if_running check to ensure link test always reflects the
+physical link state.
+
+Fixes: 8d420a1b3ea6 ("igb: correct link test not being run when link is down")
+Signed-off-by: Kohei Enju <enjuk@amazon.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_ethtool.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index 2d1d9090f2cbf..d472e01c2c996 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2081,11 +2081,8 @@ static void igb_diag_test(struct net_device *netdev,
+ } else {
+ dev_info(&adapter->pdev->dev, "online testing starting\n");
+
+- /* PHY is powered down when interface is down */
+- if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
++ if (igb_link_test(adapter, &data[TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+- else
+- data[TEST_LINK] = 0;
+
+ /* Online tests aren't run; pass by default */
+ data[TEST_REG] = 0;
+--
+2.51.0
+
--- /dev/null
+From edd9787903b5517812c5e86fd9e329fd181154fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 11:13:34 +0200
+Subject: net: fec: Fix possible NPD in fec_enet_phy_reset_after_clk_enable()
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit 03e79de4608bdd48ad6eec272e196124cefaf798 ]
+
+The function of_phy_find_device may return NULL, so we need to take
+care before dereferencing phy_dev.
+
+Fixes: 64a632da538a ("net: fec: Fix phy_device lookup for phy_reset_after_clk_enable()")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Cc: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Cc: Richard Leitner <richard.leitner@skidata.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Wei Fang <wei.fang@nxp.com>
+Link: https://patch.msgid.link/20250904091334.53965-1-wahrenst@gmx.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index adf70a1650f4d..9905e65621004 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1986,7 +1986,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+ */
+ phy_dev = of_phy_find_device(fep->phy_node);
+ phy_reset_after_clk_enable(phy_dev);
+- put_device(&phy_dev->mdio.dev);
++ if (phy_dev)
++ put_device(&phy_dev->mdio.dev);
+ }
+ }
+
+--
+2.51.0
+
mtd-nand-raw-atmel-fix-comment-in-timings-preparation.patch
mtd-nand-raw-atmel-respect-tar-tclr-in-read-setup-timing.patch
input-i8042-add-tuxedo-infinitybook-pro-gen10-amd-to-i8042-quirk-table.patch
+net-fec-fix-possible-npd-in-fec_enet_phy_reset_after.patch
+tunnels-reset-the-gso-metadata-before-reusing-the-sk.patch
+igb-fix-link-test-skipping-when-interface-is-admin-d.patch
+genirq-affinity-add-irq_update_affinity_desc.patch
+genirq-export-affinity-setter-for-modules.patch
+genirq-provide-new-interfaces-for-affinity-hints.patch
+i40e-use-irq_update_affinity_hint.patch
+i40e-fix-irq-freeing-in-i40e_vsi_request_irq_msix-er.patch
+can-j1939-j1939_sk_bind-call-j1939_priv_put-immediat.patch
+can-j1939-j1939_local_ecu_get-undo-increment-when-j1.patch
+dmaengine-ti-edma-fix-memory-allocation-size-for-que.patch
--- /dev/null
+From ef100cfec5da7d02bc7f5aeb4c1678304cbdc679 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 14:53:50 +0200
+Subject: tunnels: reset the GSO metadata before reusing the skb
+
+From: Antoine Tenart <atenart@kernel.org>
+
+[ Upstream commit e3c674db356c4303804b2415e7c2b11776cdd8c3 ]
+
+If a GSO skb is sent through a Geneve tunnel and if Geneve options are
+added, the split GSO skb might not fit in the MTU anymore and an ICMP
+frag needed packet can be generated. In such case the ICMP packet might
+go through the segmentation logic (and dropped) later if it reaches a
+path were the GSO status is checked and segmentation is required.
+
+This is especially true when an OvS bridge is used with a Geneve tunnel
+attached to it. The following set of actions could lead to the ICMP
+packet being wrongfully segmented:
+
+1. An skb is constructed by the TCP layer (e.g. gso_type SKB_GSO_TCPV4,
+ segs >= 2).
+
+2. The skb hits the OvS bridge where Geneve options are added by an OvS
+ action before being sent through the tunnel.
+
+3. When the skb is xmited in the tunnel, the split skb does not fit
+ anymore in the MTU and iptunnel_pmtud_build_icmp is called to
+ generate an ICMP fragmentation needed packet. This is done by reusing
+ the original (GSO!) skb. The GSO metadata is not cleared.
+
+4. The ICMP packet being sent back hits the OvS bridge again and because
+ skb_is_gso returns true, it goes through queue_gso_packets...
+
+5. ...where __skb_gso_segment is called. The skb is then dropped.
+
+6. Note that in the above example on re-transmission the skb won't be a
+ GSO one as it would be segmented (len > MSS) and the ICMP packet
+ should go through.
+
+Fix this by resetting the GSO information before reusing an skb in
+iptunnel_pmtud_build_icmp and iptunnel_pmtud_build_icmpv6.
+
+Fixes: 4cb47a8644cc ("tunnels: PMTU discovery support for directly bridged IP packets")
+Reported-by: Adrian Moreno <amorenoz@redhat.com>
+Signed-off-by: Antoine Tenart <atenart@kernel.org>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Link: https://patch.msgid.link/20250904125351.159740-1-atenart@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/ip_tunnel_core.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index 01d362b5b8826..3cdb546dbc8d7 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -203,6 +203,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
+ return -EINVAL;
+
++ if (skb_is_gso(skb))
++ skb_gso_reset(skb);
++
+ skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ pskb_pull(skb, ETH_HLEN);
+ skb_reset_network_header(skb);
+@@ -297,6 +300,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
+ return -EINVAL;
+
++ if (skb_is_gso(skb))
++ skb_gso_reset(skb);
++
+ skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ pskb_pull(skb, ETH_HLEN);
+ skb_reset_network_header(skb);
+--
+2.51.0
+
--- /dev/null
+From f3e85d2ce9e3a75fcad835a7ef2a079ef06efcb5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:27:40 +0900
+Subject: can: j1939: j1939_local_ecu_get(): undo increment when
+ j1939_local_ecu_get() fails
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit 06e02da29f6f1a45fc07bd60c7eaf172dc21e334 ]
+
+Since j1939_sk_bind() and j1939_sk_release() call j1939_local_ecu_put()
+when J1939_SOCK_BOUND was already set, but the error handling path for
+j1939_sk_bind() will not set J1939_SOCK_BOUND when j1939_local_ecu_get()
+fails, j1939_local_ecu_get() needs to undo priv->ents[sa].nusers++ when
+j1939_local_ecu_get() returns an error.
+
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/e7f80046-4ff7-4ce2-8ad8-7c3c678a42c9@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/bus.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
+index 4866879016021..e0b966c2517cf 100644
+--- a/net/can/j1939/bus.c
++++ b/net/can/j1939/bus.c
+@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
+ if (!ecu)
+ ecu = j1939_ecu_create_locked(priv, name);
+ err = PTR_ERR_OR_ZERO(ecu);
+- if (err)
++ if (err) {
++ if (j1939_address_is_unicast(sa))
++ priv->ents[sa].nusers--;
+ goto done;
++ }
+
+ ecu->nusers++;
+ /* TODO: do we care if ecu->addr != sa? */
+--
+2.51.0
+
--- /dev/null
+From 57c9c3ddf4e05b7340f58770547a50e659323dc6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:30:09 +0900
+Subject: can: j1939: j1939_sk_bind(): call j1939_priv_put() immediately when
+ j1939_local_ecu_get() failed
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit f214744c8a27c3c1da6b538c232da22cd027530e ]
+
+Commit 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct
+callback") expects that a call to j1939_priv_put() can be unconditionally
+delayed until j1939_sk_sock_destruct() is called. But a refcount leak will
+happen when j1939_sk_bind() is called again after j1939_local_ecu_get()
+ from previous j1939_sk_bind() call returned an error. We need to call
+j1939_priv_put() before j1939_sk_bind() returns an error.
+
+Fixes: 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct callback")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/4f49a1bc-a528-42ad-86c0-187268ab6535@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/socket.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index d8ba84828f234..ec2927566cf3e 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -520,6 +520,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
+ if (ret) {
+ j1939_netdev_stop(priv);
++ jsk->priv = NULL;
++ synchronize_rcu();
++ j1939_priv_put(priv);
+ goto out_release_sock;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From c13f93930cd3f427c394f5507610c3d7e622b59c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Aug 2025 12:50:02 +0300
+Subject: can: xilinx_can: xcan_write_frame(): fix use-after-free of
+ transmitted SKB
+
+From: Anssi Hannula <anssi.hannula@bitwise.fi>
+
+[ Upstream commit ef79f00be72bd81d2e1e6f060d83cf7e425deee4 ]
+
+can_put_echo_skb() takes ownership of the SKB and it may be freed
+during or after the call.
+
+However, xilinx_can xcan_write_frame() keeps using SKB after the call.
+
+Fix that by only calling can_put_echo_skb() after the code is done
+touching the SKB.
+
+The tx_lock is held for the entire xcan_write_frame() execution and
+also on the can_get_echo_skb() side so the order of operations does not
+matter.
+
+An earlier fix commit 3d3c817c3a40 ("can: xilinx_can: Fix usage of skb
+memory") did not move the can_put_echo_skb() call far enough.
+
+Signed-off-by: Anssi Hannula <anssi.hannula@bitwise.fi>
+Fixes: 1598efe57b3e ("can: xilinx_can: refactor code in preparation for CAN FD support")
+Link: https://patch.msgid.link/20250822095002.168389-1-anssi.hannula@bitwise.fi
+[mkl: add "commit" in front of sha1 in patch description]
+[mkl: fix indention]
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/xilinx_can.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
+index cb48598e32ad8..ac63e89397774 100644
+--- a/drivers/net/can/xilinx_can.c
++++ b/drivers/net/can/xilinx_can.c
+@@ -590,14 +590,6 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
+ dlc |= XCAN_DLCR_EDL_MASK;
+ }
+
+- if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
+- (priv->devtype.flags & XCAN_FLAG_TXFEMP))
+- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
+- else
+- can_put_echo_skb(skb, ndev, 0, 0);
+-
+- priv->tx_head++;
+-
+ priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
+ /* If the CAN frame is RTR frame this write triggers transmission
+ * (not on CAN FD)
+@@ -630,6 +622,14 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
+ data[1]);
+ }
+ }
++
++ if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
++ (priv->devtype.flags & XCAN_FLAG_TXFEMP))
++ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
++ else
++ can_put_echo_skb(skb, ndev, 0, 0);
++
++ priv->tx_head++;
+ }
+
+ /**
+--
+2.51.0
+
--- /dev/null
+From 8832316c01fef5c2accbc81932ec7112940609ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 11:49:53 +0200
+Subject: dmaengine: ti: edma: Fix memory allocation size for
+ queue_priority_map
+
+From: Anders Roxell <anders.roxell@linaro.org>
+
+[ Upstream commit e63419dbf2ceb083c1651852209c7f048089ac0f ]
+
+Fix a critical memory allocation bug in edma_setup_from_hw() where
+queue_priority_map was allocated with insufficient memory. The code
+declared queue_priority_map as s8 (*)[2] (pointer to array of 2 s8),
+but allocated memory using sizeof(s8) instead of the correct size.
+
+This caused out-of-bounds memory writes when accessing:
+ queue_priority_map[i][0] = i;
+ queue_priority_map[i][1] = i;
+
+The bug manifested as kernel crashes with "Oops - undefined instruction"
+on ARM platforms (BeagleBoard-X15) during EDMA driver probe, as the
+memory corruption triggered kernel hardening features on Clang.
+
+Change the allocation to use sizeof(*queue_priority_map) which
+automatically gets the correct size for the 2D array structure.
+
+Fixes: 2b6b3b742019 ("ARM/dmaengine: edma: Merge the two drivers under drivers/dma/")
+Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
+Link: https://lore.kernel.org/r/20250830094953.3038012-1-anders.roxell@linaro.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/ti/edma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 560fe658b8942..c555b0991ad10 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2121,8 +2121,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
+ * priority. So Q0 is the highest priority queue and the last queue has
+ * the lowest priority.
+ */
+- queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
+- GFP_KERNEL);
++ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
++ sizeof(*queue_priority_map), GFP_KERNEL);
+ if (!queue_priority_map)
+ return -ENOMEM;
+
+--
+2.51.0
+
--- /dev/null
+From 3dd00441843d333d80b92841344c39bc1639b3b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Sep 2021 11:24:17 -0400
+Subject: genirq: Provide new interfaces for affinity hints
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit 65c7cdedeb3026fabcc967a7aae2f755ad4d0783 ]
+
+The discussion about removing the side effect of irq_set_affinity_hint() of
+actually applying the cpumask (if not NULL) as affinity to the interrupt,
+unearthed a few unpleasantries:
+
+ 1) The modular perf drivers rely on the current behaviour for the very
+ wrong reasons.
+
+ 2) While none of the other drivers prevents user space from changing
+ the affinity, a cursorily inspection shows that there are at least
+ expectations in some drivers.
+
+#1 needs to be cleaned up anyway, so that's not a problem
+
+#2 might result in subtle regressions especially when irqbalanced (which
+ nowadays ignores the affinity hint) is disabled.
+
+Provide new interfaces:
+
+ irq_update_affinity_hint() - Only sets the affinity hint pointer
+ irq_set_affinity_and_hint() - Set the pointer and apply the affinity to
+ the interrupt
+
+Make irq_set_affinity_hint() a wrapper around irq_apply_affinity_hint() and
+document it to be phased out.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Nitesh Narayan Lal <nitesh@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20210501021832.743094-1-jesse.brandeburg@intel.com
+Link: https://lore.kernel.org/r/20210903152430.244937-2-nitesh@redhat.com
+Stable-dep-of: 915470e1b44e ("i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/interrupt.h | 53 ++++++++++++++++++++++++++++++++++++++-
+ kernel/irq/manage.c | 8 +++---
+ 2 files changed, 56 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 976bca44bae0c..5c4ba2ee582a9 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -329,7 +329,46 @@ extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
+ extern int irq_can_set_affinity(unsigned int irq);
+ extern int irq_select_affinity(unsigned int irq);
+
+-extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
++extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
++ bool setaffinity);
++
++/**
++ * irq_update_affinity_hint - Update the affinity hint
++ * @irq: Interrupt to update
++ * @m: cpumask pointer (NULL to clear the hint)
++ *
++ * Updates the affinity hint, but does not change the affinity of the interrupt.
++ */
++static inline int
++irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
++{
++ return __irq_apply_affinity_hint(irq, m, false);
++}
++
++/**
++ * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
++ * cpumask to the interrupt
++ * @irq: Interrupt to update
++ * @m: cpumask pointer (NULL to clear the hint)
++ *
++ * Updates the affinity hint and if @m is not NULL it applies it as the
++ * affinity of that interrupt.
++ */
++static inline int
++irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
++{
++ return __irq_apply_affinity_hint(irq, m, true);
++}
++
++/*
++ * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
++ * instead.
++ */
++static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
++{
++ return irq_set_affinity_and_hint(irq, m);
++}
++
+ extern int irq_update_affinity_desc(unsigned int irq,
+ struct irq_affinity_desc *affinity);
+
+@@ -361,6 +400,18 @@ static inline int irq_can_set_affinity(unsigned int irq)
+
+ static inline int irq_select_affinity(unsigned int irq) { return 0; }
+
++static inline int irq_update_affinity_hint(unsigned int irq,
++ const struct cpumask *m)
++{
++ return -EINVAL;
++}
++
++static inline int irq_set_affinity_and_hint(unsigned int irq,
++ const struct cpumask *m)
++{
++ return -EINVAL;
++}
++
+ static inline int irq_set_affinity_hint(unsigned int irq,
+ const struct cpumask *m)
+ {
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index b46fbfbb929f1..ce0433446a8ed 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -501,7 +501,8 @@ int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+ }
+ EXPORT_SYMBOL_GPL(irq_force_affinity);
+
+-int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
++int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
++ bool setaffinity)
+ {
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
+@@ -510,12 +511,11 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+ return -EINVAL;
+ desc->affinity_hint = m;
+ irq_put_desc_unlock(desc, flags);
+- /* set the initial affinity to prevent every interrupt being on CPU0 */
+- if (m)
++ if (m && setaffinity)
+ __irq_set_affinity(irq, m, false);
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
++EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
+
+ static void irq_affinity_notify(struct work_struct *work)
+ {
+--
+2.51.0
+
--- /dev/null
+From 07e1e15d20b5e265f6be39d53b0030e6b0650210 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Mar 2024 15:04:41 +0800
+Subject: hrtimer: Remove unused function
+
+From: Jiapeng Chong <jiapeng.chong@linux.alibaba.com>
+
+[ Upstream commit 82ccdf062a64f3c4ac575c16179ce68edbbbe8e4 ]
+
+The function is defined, but not called anywhere:
+
+ kernel/time/hrtimer.c:1880:20: warning: unused function '__hrtimer_peek_ahead_timers'.
+
+Remove it.
+
+Reported-by: Abaci Robot <abaci@linux.alibaba.com>
+Signed-off-by: Jiapeng Chong <jiapeng.chong@linux.alibaba.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20240322070441.29646-1-jiapeng.chong@linux.alibaba.com
+Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=8611
+Stable-dep-of: e895f8e29119 ("hrtimers: Unconditionally update target CPU base after offline timer migration")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/hrtimer.c | 20 +-------------------
+ 1 file changed, 1 insertion(+), 19 deletions(-)
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 2e4b63f3c6dda..a8fbf4b1ea197 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1873,25 +1873,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+ tick_program_event(expires_next, 1);
+ pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
+ }
+-
+-/* called with interrupts disabled */
+-static inline void __hrtimer_peek_ahead_timers(void)
+-{
+- struct tick_device *td;
+-
+- if (!hrtimer_hres_active())
+- return;
+-
+- td = this_cpu_ptr(&tick_cpu_device);
+- if (td && td->evtdev)
+- hrtimer_interrupt(td->evtdev);
+-}
+-
+-#else /* CONFIG_HIGH_RES_TIMERS */
+-
+-static inline void __hrtimer_peek_ahead_timers(void) { }
+-
+-#endif /* !CONFIG_HIGH_RES_TIMERS */
++#endif /* !CONFIG_HIGH_RES_TIMERS */
+
+ /*
+ * Called from run_local_timers in hardirq context every jiffy
+--
+2.51.0
+
--- /dev/null
+From a7a121ceda802ac58666435fd30196e77a468ede Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Apr 2024 10:30:00 +0800
+Subject: hrtimer: Rename __hrtimer_hres_active() to hrtimer_hres_active()
+
+From: Jiapeng Chong <jiapeng.chong@linux.alibaba.com>
+
+[ Upstream commit b7c8e1f8a7b4352c1d0b4310686385e3cf6c104a ]
+
+The function hrtimer_hres_active() are defined in the hrtimer.c file, but
+not called elsewhere, so rename __hrtimer_hres_active() to
+hrtimer_hres_active() and remove the old hrtimer_hres_active() function.
+
+kernel/time/hrtimer.c:653:19: warning: unused function 'hrtimer_hres_active'.
+
+Fixes: 82ccdf062a64 ("hrtimer: Remove unused function")
+Reported-by: Abaci Robot <abaci@linux.alibaba.com>
+Signed-off-by: Jiapeng Chong <jiapeng.chong@linux.alibaba.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
+Link: https://lore.kernel.org/r/20240418023000.130324-1-jiapeng.chong@linux.alibaba.com
+Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=8778
+Stable-dep-of: e895f8e29119 ("hrtimers: Unconditionally update target CPU base after offline timer migration")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/hrtimer.c | 21 ++++++++-------------
+ 1 file changed, 8 insertions(+), 13 deletions(-)
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index a8fbf4b1ea197..74a71b3a064dc 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -631,17 +631,12 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+ /*
+ * Is the high resolution mode active ?
+ */
+-static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
++static inline int hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
+ {
+ return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+ cpu_base->hres_active : 0;
+ }
+
+-static inline int hrtimer_hres_active(void)
+-{
+- return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
+-}
+-
+ static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
+ struct hrtimer *next_timer,
+ ktime_t expires_next)
+@@ -665,7 +660,7 @@ static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
+ * set. So we'd effectively block all timers until the T2 event
+ * fires.
+ */
+- if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
++ if (!hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
+ return;
+
+ tick_program_event(expires_next, 1);
+@@ -776,12 +771,12 @@ static void retrigger_next_event(void *arg)
+ * function call will take care of the reprogramming in case the
+ * CPU was in a NOHZ idle sleep.
+ */
+- if (!__hrtimer_hres_active(base) && !tick_nohz_active)
++ if (!hrtimer_hres_active(base) && !tick_nohz_active)
+ return;
+
+ raw_spin_lock(&base->lock);
+ hrtimer_update_base(base);
+- if (__hrtimer_hres_active(base))
++ if (hrtimer_hres_active(base))
+ hrtimer_force_reprogram(base, 0);
+ else
+ hrtimer_update_next_event(base);
+@@ -938,7 +933,7 @@ void clock_was_set(unsigned int bases)
+ cpumask_var_t mask;
+ int cpu;
+
+- if (!__hrtimer_hres_active(cpu_base) && !tick_nohz_active)
++ if (!hrtimer_hres_active(cpu_base) && !tick_nohz_active)
+ goto out_timerfd;
+
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+@@ -1489,7 +1484,7 @@ u64 hrtimer_get_next_event(void)
+
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+- if (!__hrtimer_hres_active(cpu_base))
++ if (!hrtimer_hres_active(cpu_base))
+ expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
+
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+@@ -1512,7 +1507,7 @@ u64 hrtimer_next_event_without(const struct hrtimer *exclude)
+
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+- if (__hrtimer_hres_active(cpu_base)) {
++ if (hrtimer_hres_active(cpu_base)) {
+ unsigned int active;
+
+ if (!cpu_base->softirq_activated) {
+@@ -1884,7 +1879,7 @@ void hrtimer_run_queues(void)
+ unsigned long flags;
+ ktime_t now;
+
+- if (__hrtimer_hres_active(cpu_base))
++ if (hrtimer_hres_active(cpu_base))
+ return;
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From 9f6dd0d88391962562f749fafe91975a7e48eedd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 16:10:25 +0800
+Subject: hrtimers: Unconditionally update target CPU base after offline timer
+ migration
+
+From: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+
+[ Upstream commit e895f8e29119c8c966ea794af9e9100b10becb88 ]
+
+When testing softirq based hrtimers on an ARM32 board, with high resolution
+mode and NOHZ inactive, softirq based hrtimers fail to expire after being
+moved away from an offline CPU:
+
+CPU0 CPU1
+ hrtimer_start(..., HRTIMER_MODE_SOFT);
+cpu_down(CPU1) ...
+ hrtimers_cpu_dying()
+ // Migrate timers to CPU0
+ smp_call_function_single(CPU0, returgger_next_event);
+ retrigger_next_event()
+ if (!highres && !nohz)
+ return;
+
+As retrigger_next_event() is a NOOP when both high resolution timers and
+NOHZ are inactive CPU0's hrtimer_cpu_base::softirq_expires_next is not
+updated and the migrated softirq timers never expire unless there is a
+softirq based hrtimer queued on CPU0 later.
+
+Fix this by removing the hrtimer_hres_active() and tick_nohz_active() check
+in retrigger_next_event(), which enforces a full update of the CPU base.
+As this is not a fast path the extra cost does not matter.
+
+[ tglx: Massaged change log ]
+
+Fixes: 5c0930ccaad5 ("hrtimers: Push pending hrtimers away from outgoing CPU earlier")
+Co-developed-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/all/20250805081025.54235-1-wangxiongfeng2@huawei.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/hrtimer.c | 11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 74a71b3a064dc..7e2ed34e9803b 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -770,10 +770,10 @@ static void retrigger_next_event(void *arg)
+ * of the next expiring timer is enough. The return from the SMP
+ * function call will take care of the reprogramming in case the
+ * CPU was in a NOHZ idle sleep.
++ *
++ * In periodic low resolution mode, the next softirq expiration
++ * must also be updated.
+ */
+- if (!hrtimer_hres_active(base) && !tick_nohz_active)
+- return;
+-
+ raw_spin_lock(&base->lock);
+ hrtimer_update_base(base);
+ if (hrtimer_hres_active(base))
+@@ -2229,11 +2229,6 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
+ &new_base->clock_base[i]);
+ }
+
+- /*
+- * The migration might have changed the first expiring softirq
+- * timer on this CPU. Update it.
+- */
+- __hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT);
+ /* Tell the other CPU to retrigger the next event */
+ smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
+
+--
+2.51.0
+
--- /dev/null
+From 7526ffc56e32d0103a5c854dc42bceca03ca9339 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 09:15:32 +0000
+Subject: hsr: use hsr_for_each_port_rtnl in hsr_port_get_hsr
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 393c841fe4333cdd856d0ca37b066d72746cfaa6 ]
+
+hsr_port_get_hsr() iterates over ports using hsr_for_each_port(),
+but many of its callers do not hold the required RCU lock.
+
+Switch to hsr_for_each_port_rtnl(), since most callers already hold
+the rtnl lock. After review, all callers are covered by either the rtnl
+lock or the RCU lock, except hsr_dev_xmit(). Fix this by adding an
+RCU read lock there.
+
+Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250905091533.377443-3-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 3 +++
+ net/hsr/hsr_main.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index ff27935a29523..503f2064e7323 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -231,6 +231,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ struct hsr_priv *hsr = netdev_priv(dev);
+ struct hsr_port *master;
+
++ rcu_read_lock();
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ if (master) {
+ skb->dev = master->dev;
+@@ -243,6 +244,8 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ atomic_long_inc(&dev->tx_dropped);
+ dev_kfree_skb_any(skb);
+ }
++ rcu_read_unlock();
++
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index c325ddad539a7..76a1958609e29 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -125,7 +125,7 @@ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
+ {
+ struct hsr_port *port;
+
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type == pt)
+ return port;
+ return NULL;
+--
+2.51.0
+
--- /dev/null
+From 04b94cdaedfe6b4d21f676b5ce205a319aeedbb3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 09:15:31 +0000
+Subject: hsr: use rtnl lock when iterating over ports
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 8884c693991333ae065830554b9b0c96590b1bb2 ]
+
+hsr_for_each_port is called in many places without holding the RCU read
+lock, this may trigger warnings on debug kernels. Most of the callers
+are actually hold rtnl lock. So add a new helper hsr_for_each_port_rtnl
+to allow callers in suitable contexts to iterate ports safely without
+explicit RCU locking.
+
+This patch only fixed the callers that is hold rtnl lock. Other caller
+issues will be fixed in later patches.
+
+Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250905091533.377443-2-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 18 +++++++++---------
+ net/hsr/hsr_main.c | 2 +-
+ net/hsr/hsr_main.h | 3 +++
+ 3 files changed, 13 insertions(+), 10 deletions(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 7755bf2ce162c..ff27935a29523 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -59,7 +59,7 @@ static bool hsr_check_carrier(struct hsr_port *master)
+
+ ASSERT_RTNL();
+
+- hsr_for_each_port(master->hsr, port) {
++ hsr_for_each_port_rtnl(master->hsr, port) {
+ if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
+ netif_carrier_on(master->dev);
+ return true;
+@@ -112,7 +112,7 @@ int hsr_get_max_mtu(struct hsr_priv *hsr)
+ struct hsr_port *port;
+
+ mtu_max = ETH_DATA_LEN;
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type != HSR_PT_MASTER)
+ mtu_max = min(port->dev->mtu, mtu_max);
+
+@@ -147,7 +147,7 @@ static int hsr_dev_open(struct net_device *dev)
+ hsr = netdev_priv(dev);
+ designation = '\0';
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -177,7 +177,7 @@ static int hsr_dev_close(struct net_device *dev)
+ struct hsr_priv *hsr;
+
+ hsr = netdev_priv(dev);
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -210,7 +210,7 @@ static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
+ * may become enabled.
+ */
+ features &= ~NETIF_F_ONE_FOR_ALL;
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ features = netdev_increment_features(features,
+ port->dev->features,
+ mask);
+@@ -428,7 +428,7 @@ static void hsr_set_rx_mode(struct net_device *dev)
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -450,7 +450,7 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -478,7 +478,7 @@ static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER ||
+ port->type == HSR_PT_INTERLINK)
+ continue;
+@@ -524,7 +524,7 @@ static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ switch (port->type) {
+ case HSR_PT_SLAVE_A:
+ case HSR_PT_SLAVE_B:
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index 257b50124cee5..c325ddad539a7 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -22,7 +22,7 @@ static bool hsr_slave_empty(struct hsr_priv *hsr)
+ {
+ struct hsr_port *port;
+
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type != HSR_PT_MASTER)
+ return false;
+ return true;
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index 4188516cde5da..5c0e5f6d1eda1 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -225,6 +225,9 @@ struct hsr_priv {
+ #define hsr_for_each_port(hsr, port) \
+ list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
+
++#define hsr_for_each_port_rtnl(hsr, port) \
++ list_for_each_entry_rcu((port), &(hsr)->ports, port_list, lockdep_rtnl_is_held())
++
+ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
+
+ /* Caller must ensure skb is a valid HSR frame */
+--
+2.51.0
+
--- /dev/null
+From 9f3ce6d8a602bb2e1ebec1319493a721f1d8c0b1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 17:39:03 +0200
+Subject: i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path
+
+From: Michal Schmidt <mschmidt@redhat.com>
+
+[ Upstream commit 915470e1b44e71d1dd07ee067276f003c3521ee3 ]
+
+If request_irq() in i40e_vsi_request_irq_msix() fails in an iteration
+later than the first, the error path wants to free the IRQs requested
+so far. However, it uses the wrong dev_id argument for free_irq(), so
+it does not free the IRQs correctly and instead triggers the warning:
+
+ Trying to free already-free IRQ 173
+ WARNING: CPU: 25 PID: 1091 at kernel/irq/manage.c:1829 __free_irq+0x192/0x2c0
+ Modules linked in: i40e(+) [...]
+ CPU: 25 UID: 0 PID: 1091 Comm: NetworkManager Not tainted 6.17.0-rc1+ #1 PREEMPT(lazy)
+ Hardware name: [...]
+ RIP: 0010:__free_irq+0x192/0x2c0
+ [...]
+ Call Trace:
+ <TASK>
+ free_irq+0x32/0x70
+ i40e_vsi_request_irq_msix.cold+0x63/0x8b [i40e]
+ i40e_vsi_request_irq+0x79/0x80 [i40e]
+ i40e_vsi_open+0x21f/0x2f0 [i40e]
+ i40e_open+0x63/0x130 [i40e]
+ __dev_open+0xfc/0x210
+ __dev_change_flags+0x1fc/0x240
+ netif_change_flags+0x27/0x70
+ do_setlink.isra.0+0x341/0xc70
+ rtnl_newlink+0x468/0x860
+ rtnetlink_rcv_msg+0x375/0x450
+ netlink_rcv_skb+0x5c/0x110
+ netlink_unicast+0x288/0x3c0
+ netlink_sendmsg+0x20d/0x430
+ ____sys_sendmsg+0x3a2/0x3d0
+ ___sys_sendmsg+0x99/0xe0
+ __sys_sendmsg+0x8a/0xf0
+ do_syscall_64+0x82/0x2c0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ [...]
+ </TASK>
+ ---[ end trace 0000000000000000 ]---
+
+Use the same dev_id for free_irq() as for request_irq().
+
+I tested this with inserting code to fail intentionally.
+
+Fixes: 493fb30011b3 ("i40e: Move q_vectors from pointer to array to array of pointers")
+Signed-off-by: Michal Schmidt <mschmidt@redhat.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Subbaraya Sundeep <sbhatta@marvell.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 9fb598f56be4a..4c50e18707c7f 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -4167,7 +4167,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ irq_num = pf->msix_entries[base + vector].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
+- free_irq(irq_num, &vsi->q_vectors[vector]);
++ free_irq(irq_num, vsi->q_vectors[vector]);
+ }
+ return err;
+ }
+--
+2.51.0
+
--- /dev/null
+From b06319a8f2d74b6544a4f159c440162d4a624899 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Sep 2021 11:24:19 -0400
+Subject: i40e: Use irq_update_affinity_hint()
+
+From: Nitesh Narayan Lal <nitesh@redhat.com>
+
+[ Upstream commit d34c54d1739c2cdf2e4437b74e6da269147f4987 ]
+
+The driver uses irq_set_affinity_hint() for two purposes:
+
+ - To set the affinity_hint which is consumed by the userspace for
+ distributing the interrupts
+
+ - To apply an affinity that it provides for the i40e interrupts
+
+The latter is done to ensure that all the interrupts are evenly spread
+across all available CPUs. However, since commit a0c9259dc4e1 ("irq/matrix:
+Spread interrupts on allocation") the spreading of interrupts is
+dynamically performed at the time of allocation. Hence, there is no need
+for the drivers to enforce their own affinity for the spreading of
+interrupts.
+
+Also, irq_set_affinity_hint() applying the provided cpumask as an affinity
+for the interrupt is an undocumented side effect. To remove this side
+effect irq_set_affinity_hint() has been marked as deprecated and new
+interfaces have been introduced. Hence, replace the irq_set_affinity_hint()
+with the new interface irq_update_affinity_hint() that only sets the
+pointer for the affinity_hint.
+
+Signed-off-by: Nitesh Narayan Lal <nitesh@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Link: https://lore.kernel.org/r/20210903152430.244937-4-nitesh@redhat.com
+Stable-dep-of: 915470e1b44e ("i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 2a3b8dd72686d..9fb598f56be4a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -4152,10 +4152,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ *
+ * get_cpu_mask returns a static constant mask with
+ * a permanent lifetime so it's ok to pass to
+- * irq_set_affinity_hint without making a copy.
++ * irq_update_affinity_hint without making a copy.
+ */
+ cpu = cpumask_local_spread(q_vector->v_idx, -1);
+- irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
++ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
+ }
+
+ vsi->irqs_ready = true;
+@@ -4166,7 +4166,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ vector--;
+ irq_num = pf->msix_entries[base + vector].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+- irq_set_affinity_hint(irq_num, NULL);
++ irq_update_affinity_hint(irq_num, NULL);
+ free_irq(irq_num, &vsi->q_vectors[vector]);
+ }
+ return err;
+@@ -4987,7 +4987,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
+ /* clear the affinity notifier in the IRQ descriptor */
+ irq_set_affinity_notifier(irq_num, NULL);
+ /* remove our suggested affinity mask for this IRQ */
+- irq_set_affinity_hint(irq_num, NULL);
++ irq_update_affinity_hint(irq_num, NULL);
+ synchronize_irq(irq_num);
+ free_irq(irq_num, vsi->q_vectors[i]);
+
+--
+2.51.0
+
--- /dev/null
+From 611030c174b46694ce6c32c797a86fef73a73cc3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 15:26:31 +0900
+Subject: igb: fix link test skipping when interface is admin down
+
+From: Kohei Enju <enjuk@amazon.com>
+
+[ Upstream commit d709f178abca22a4d3642513df29afe4323a594b ]
+
+The igb driver incorrectly skips the link test when the network
+interface is admin down (if_running == false), causing the test to
+always report PASS regardless of the actual physical link state.
+
+This behavior is inconsistent with other drivers (e.g. i40e, ice, ixgbe,
+etc.) which correctly test the physical link state regardless of admin
+state.
+Remove the if_running check to ensure link test always reflects the
+physical link state.
+
+Fixes: 8d420a1b3ea6 ("igb: correct link test not being run when link is down")
+Signed-off-by: Kohei Enju <enjuk@amazon.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_ethtool.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index 39c7bdf8c0e2d..a35e4a54b6e3e 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2077,11 +2077,8 @@ static void igb_diag_test(struct net_device *netdev,
+ } else {
+ dev_info(&adapter->pdev->dev, "online testing starting\n");
+
+- /* PHY is powered down when interface is down */
+- if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
++ if (igb_link_test(adapter, &data[TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+- else
+- data[TEST_LINK] = 0;
+
+ /* Online tests aren't run; pass by default */
+ data[TEST_REG] = 0;
+--
+2.51.0
+
--- /dev/null
+From 18162f2c0dbc12cddd9c0321bbebb9c6380f2091 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 11:13:34 +0200
+Subject: net: fec: Fix possible NPD in fec_enet_phy_reset_after_clk_enable()
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit 03e79de4608bdd48ad6eec272e196124cefaf798 ]
+
+The function of_phy_find_device may return NULL, so we need to take
+care before dereferencing phy_dev.
+
+Fixes: 64a632da538a ("net: fec: Fix phy_device lookup for phy_reset_after_clk_enable()")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Cc: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Cc: Richard Leitner <richard.leitner@skidata.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Wei Fang <wei.fang@nxp.com>
+Link: https://patch.msgid.link/20250904091334.53965-1-wahrenst@gmx.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 437e72110ab54..d457af64f8357 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2033,7 +2033,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+ */
+ phy_dev = of_phy_find_device(fep->phy_node);
+ phy_reset_after_clk_enable(phy_dev);
+- put_device(&phy_dev->mdio.dev);
++ if (phy_dev)
++ put_device(&phy_dev->mdio.dev);
+ }
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 186e9ac5a3a8f2023603f0d4d626bc22d57c2a3a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Nov 2023 11:07:53 +0530
+Subject: net: hsr: Add support for MC filtering at the slave device
+
+From: Murali Karicheri <m-karicheri2@ti.com>
+
+[ Upstream commit 36b20fcdd9663ced36d3aef96f0eff8eb79de4b8 ]
+
+When MC (multicast) list is updated by the networking layer due to a
+user command and as well as when allmulti flag is set, it needs to be
+passed to the enslaved Ethernet devices. This patch allows this
+to happen by implementing ndo_change_rx_flags() and ndo_set_rx_mode()
+API calls that in turns pass it to the slave devices using
+existing API calls.
+
+Signed-off-by: Murali Karicheri <m-karicheri2@ti.com>
+Signed-off-by: Ravi Gunasekaran <r-gunasekaran@ti.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 8884c6939913 ("hsr: use rtnl lock when iterating over ports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 67 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 66 insertions(+), 1 deletion(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 4967dc22824c7..5b7bca9e7e5ae 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -173,7 +173,24 @@ static int hsr_dev_open(struct net_device *dev)
+
+ static int hsr_dev_close(struct net_device *dev)
+ {
+- /* Nothing to do here. */
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++
++ hsr = netdev_priv(dev);
++ hsr_for_each_port(hsr, port) {
++ if (port->type == HSR_PT_MASTER)
++ continue;
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ case HSR_PT_SLAVE_B:
++ dev_uc_unsync(port->dev, dev);
++ dev_mc_unsync(port->dev, dev);
++ break;
++ default:
++ break;
++ }
++ }
++
+ return 0;
+ }
+
+@@ -404,12 +421,60 @@ void hsr_del_ports(struct hsr_priv *hsr)
+ hsr_del_port(port);
+ }
+
++static void hsr_set_rx_mode(struct net_device *dev)
++{
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ if (port->type == HSR_PT_MASTER)
++ continue;
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ case HSR_PT_SLAVE_B:
++ dev_mc_sync_multiple(port->dev, dev);
++ dev_uc_sync_multiple(port->dev, dev);
++ break;
++ default:
++ break;
++ }
++ }
++}
++
++static void hsr_change_rx_flags(struct net_device *dev, int change)
++{
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ if (port->type == HSR_PT_MASTER)
++ continue;
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ case HSR_PT_SLAVE_B:
++ if (change & IFF_ALLMULTI)
++ dev_set_allmulti(port->dev,
++ dev->flags &
++ IFF_ALLMULTI ? 1 : -1);
++ break;
++ default:
++ break;
++ }
++ }
++}
++
+ static const struct net_device_ops hsr_device_ops = {
+ .ndo_change_mtu = hsr_dev_change_mtu,
+ .ndo_open = hsr_dev_open,
+ .ndo_stop = hsr_dev_close,
+ .ndo_start_xmit = hsr_dev_xmit,
++ .ndo_change_rx_flags = hsr_change_rx_flags,
+ .ndo_fix_features = hsr_fix_features,
++ .ndo_set_rx_mode = hsr_set_rx_mode,
+ };
+
+ static struct device_type hsr_type = {
+--
+2.51.0
+
--- /dev/null
+From ca955aa929ad31baa41ea068df4f1c90a9db5eab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Nov 2024 14:47:08 +0530
+Subject: net: hsr: Add VLAN CTAG filter support
+
+From: Murali Karicheri <m-karicheri2@ti.com>
+
+[ Upstream commit 1a8a63a5305e95519de6f941922dfcd8179f82e5 ]
+
+This patch adds support for VLAN ctag based filtering at slave devices.
+The slave ethernet device may be capable of filtering ethernet packets
+based on VLAN ID. This requires that when the VLAN interface is created
+over an HSR/PRP interface, it passes the VID information to the
+associated slave ethernet devices so that it updates the hardware
+filters to filter ethernet frames based on VID. This patch adds the
+required functions to propagate the vid information to the slave
+devices.
+
+Signed-off-by: Murali Karicheri <m-karicheri2@ti.com>
+Signed-off-by: MD Danish Anwar <danishanwar@ti.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Link: https://patch.msgid.link/20241106091710.3308519-3-danishanwar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 8884c6939913 ("hsr: use rtnl lock when iterating over ports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 80 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 79 insertions(+), 1 deletion(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 5b7bca9e7e5ae..7755bf2ce162c 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -467,6 +467,77 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
+ }
+ }
+
++static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
++ __be16 proto, u16 vid)
++{
++ bool is_slave_a_added = false;
++ bool is_slave_b_added = false;
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++ int ret = 0;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ if (port->type == HSR_PT_MASTER ||
++ port->type == HSR_PT_INTERLINK)
++ continue;
++
++ ret = vlan_vid_add(port->dev, proto, vid);
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ if (ret) {
++ /* clean up Slave-B */
++ netdev_err(dev, "add vid failed for Slave-A\n");
++ if (is_slave_b_added)
++ vlan_vid_del(port->dev, proto, vid);
++ return ret;
++ }
++
++ is_slave_a_added = true;
++ break;
++
++ case HSR_PT_SLAVE_B:
++ if (ret) {
++ /* clean up Slave-A */
++ netdev_err(dev, "add vid failed for Slave-B\n");
++ if (is_slave_a_added)
++ vlan_vid_del(port->dev, proto, vid);
++ return ret;
++ }
++
++ is_slave_b_added = true;
++ break;
++ default:
++ break;
++ }
++ }
++
++ return 0;
++}
++
++static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
++ __be16 proto, u16 vid)
++{
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ case HSR_PT_SLAVE_B:
++ vlan_vid_del(port->dev, proto, vid);
++ break;
++ default:
++ break;
++ }
++ }
++
++ return 0;
++}
++
+ static const struct net_device_ops hsr_device_ops = {
+ .ndo_change_mtu = hsr_dev_change_mtu,
+ .ndo_open = hsr_dev_open,
+@@ -475,6 +546,8 @@ static const struct net_device_ops hsr_device_ops = {
+ .ndo_change_rx_flags = hsr_change_rx_flags,
+ .ndo_fix_features = hsr_fix_features,
+ .ndo_set_rx_mode = hsr_set_rx_mode,
++ .ndo_vlan_rx_add_vid = hsr_ndo_vlan_rx_add_vid,
++ .ndo_vlan_rx_kill_vid = hsr_ndo_vlan_rx_kill_vid,
+ };
+
+ static struct device_type hsr_type = {
+@@ -515,7 +588,8 @@ void hsr_dev_setup(struct net_device *dev)
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
+ NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
+- NETIF_F_HW_VLAN_CTAG_TX;
++ NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ dev->features = dev->hw_features;
+
+@@ -602,6 +676,10 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ (slave[1]->features & NETIF_F_HW_HSR_FWD))
+ hsr->fwd_offloaded = true;
+
++ if ((slave[0]->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
++ (slave[1]->features & NETIF_F_HW_VLAN_CTAG_FILTER))
++ hsr_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
++
+ res = register_netdevice(hsr_dev);
+ if (res)
+ goto err_unregister;
+--
+2.51.0
+
--- /dev/null
+From 6e976baff4a03e4f04b4c2305f0b6d74a4d1df8b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jun 2023 17:17:10 +0530
+Subject: net: hsr: Disable promiscuous mode in offload mode
+
+From: Ravi Gunasekaran <r-gunasekaran@ti.com>
+
+[ Upstream commit e748d0fd66abc4b1c136022e4e053004fce2b792 ]
+
+When port-to-port forwarding for interfaces in HSR node is enabled,
+disable promiscuous mode since L2 frame forward happens at the
+offloaded hardware.
+
+Signed-off-by: Ravi Gunasekaran <r-gunasekaran@ti.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20230614114710.31400-1-r-gunasekaran@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 8884c6939913 ("hsr: use rtnl lock when iterating over ports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 5 +++++
+ net/hsr/hsr_main.h | 1 +
+ net/hsr/hsr_slave.c | 15 +++++++++++----
+ 3 files changed, 17 insertions(+), 4 deletions(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 0ffb28406fdc0..4967dc22824c7 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -532,6 +532,11 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ if (res)
+ goto err_add_master;
+
++ /* HSR forwarding offload supported in lower device? */
++ if ((slave[0]->features & NETIF_F_HW_HSR_FWD) &&
++ (slave[1]->features & NETIF_F_HW_HSR_FWD))
++ hsr->fwd_offloaded = true;
++
+ res = register_netdevice(hsr_dev);
+ if (res)
+ goto err_unregister;
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index 53d1f7a824630..4188516cde5da 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -212,6 +212,7 @@ struct hsr_priv {
+ u8 net_id; /* for PRP, it occupies most significant 3 bits
+ * of lan_id
+ */
++ bool fwd_offloaded; /* Forwarding offloaded to HW */
+ unsigned char sup_multicast_addr[ETH_ALEN] __aligned(sizeof(u16));
+ /* Align to u16 boundary to avoid unaligned access
+ * in ether_addr_equal
+diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
+index 0e6daee488b4f..52302a0546133 100644
+--- a/net/hsr/hsr_slave.c
++++ b/net/hsr/hsr_slave.c
+@@ -137,9 +137,14 @@ static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
+ struct hsr_port *master;
+ int res;
+
+- res = dev_set_promiscuity(dev, 1);
+- if (res)
+- return res;
++ /* Don't use promiscuous mode for offload since L2 frame forward
++ * happens at the offloaded hardware.
++ */
++ if (!port->hsr->fwd_offloaded) {
++ res = dev_set_promiscuity(dev, 1);
++ if (res)
++ return res;
++ }
+
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ hsr_dev = master->dev;
+@@ -158,7 +163,9 @@ static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
+ fail_rx_handler:
+ netdev_upper_dev_unlink(dev, hsr_dev);
+ fail_upper_dev_link:
+- dev_set_promiscuity(dev, -1);
++ if (!port->hsr->fwd_offloaded)
++ dev_set_promiscuity(dev, -1);
++
+ return res;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 1c8a213b1746f04f424d644a3c861e7df607fcda Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Sep 2025 11:09:13 +0200
+Subject: regulator: sy7636a: fix lifecycle of power good gpio
+
+From: Andreas Kemnade <akemnade@kernel.org>
+
+[ Upstream commit c05d0b32eebadc8be6e53196e99c64cf2bed1d99 ]
+
+Attach the power good gpio to the regulator device devres instead of the
+parent device to fix problems if probe is run multiple times
+(rmmod/insmod or some deferral).
+
+Fixes: 8c485bedfb785 ("regulator: sy7636a: Initial commit")
+Signed-off-by: Andreas Kemnade <akemnade@kernel.org>
+Reviewed-by: Alistair Francis <alistair@alistair23.me>
+Reviewed-by: Peng Fan <peng.fan@nxp.com>
+Message-ID: <20250906-sy7636-rsrc-v1-2-e2886a9763a7@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/sy7636a-regulator.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/regulator/sy7636a-regulator.c b/drivers/regulator/sy7636a-regulator.c
+index 8360b3947eada..e29ea02f65424 100644
+--- a/drivers/regulator/sy7636a-regulator.c
++++ b/drivers/regulator/sy7636a-regulator.c
+@@ -80,9 +80,11 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
+ if (!regmap)
+ return -EPROBE_DEFER;
+
+- gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN);
++ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
++
++ gdp = devm_gpiod_get(&pdev->dev, "epd-pwr-good", GPIOD_IN);
+ if (IS_ERR(gdp)) {
+- dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
++ dev_err(&pdev->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
+ return PTR_ERR(gdp);
+ }
+
+@@ -102,7 +104,6 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
+ }
+
+ config.dev = &pdev->dev;
+- config.dev->of_node = pdev->dev.parent->of_node;
+ config.regmap = regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, &desc, &config);
+--
+2.51.0
+
mtd-rawnand-stm32_fmc2-fix-dma_map_sg-error-check.patch
mtd-rawnand-stm32_fmc2-avoid-overlapping-mappings-on-ecc-buffer.patch
input-i8042-add-tuxedo-infinitybook-pro-gen10-amd-to-i8042-quirk-table.patch
+net-fec-fix-possible-npd-in-fec_enet_phy_reset_after.patch
+tunnels-reset-the-gso-metadata-before-reusing-the-sk.patch
+igb-fix-link-test-skipping-when-interface-is-admin-d.patch
+genirq-provide-new-interfaces-for-affinity-hints.patch
+i40e-use-irq_update_affinity_hint.patch
+i40e-fix-irq-freeing-in-i40e_vsi_request_irq_msix-er.patch
+can-j1939-j1939_sk_bind-call-j1939_priv_put-immediat.patch
+can-j1939-j1939_local_ecu_get-undo-increment-when-j1.patch
+can-xilinx_can-xcan_write_frame-fix-use-after-free-o.patch
+net-hsr-disable-promiscuous-mode-in-offload-mode.patch
+net-hsr-add-support-for-mc-filtering-at-the-slave-de.patch
+net-hsr-add-vlan-ctag-filter-support.patch
+hsr-use-rtnl-lock-when-iterating-over-ports.patch
+hsr-use-hsr_for_each_port_rtnl-in-hsr_port_get_hsr.patch
+dmaengine-ti-edma-fix-memory-allocation-size-for-que.patch
+regulator-sy7636a-fix-lifecycle-of-power-good-gpio.patch
+hrtimer-remove-unused-function.patch
+hrtimer-rename-__hrtimer_hres_active-to-hrtimer_hres.patch
+hrtimers-unconditionally-update-target-cpu-base-afte.patch
--- /dev/null
+From d7dcc0459c60025e07200c4145bc2a510c76f5ce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 14:53:50 +0200
+Subject: tunnels: reset the GSO metadata before reusing the skb
+
+From: Antoine Tenart <atenart@kernel.org>
+
+[ Upstream commit e3c674db356c4303804b2415e7c2b11776cdd8c3 ]
+
+If a GSO skb is sent through a Geneve tunnel and if Geneve options are
+added, the split GSO skb might not fit in the MTU anymore and an ICMP
+frag needed packet can be generated. In such case the ICMP packet might
+go through the segmentation logic (and dropped) later if it reaches a
+path were the GSO status is checked and segmentation is required.
+
+This is especially true when an OvS bridge is used with a Geneve tunnel
+attached to it. The following set of actions could lead to the ICMP
+packet being wrongfully segmented:
+
+1. An skb is constructed by the TCP layer (e.g. gso_type SKB_GSO_TCPV4,
+ segs >= 2).
+
+2. The skb hits the OvS bridge where Geneve options are added by an OvS
+ action before being sent through the tunnel.
+
+3. When the skb is xmited in the tunnel, the split skb does not fit
+ anymore in the MTU and iptunnel_pmtud_build_icmp is called to
+ generate an ICMP fragmentation needed packet. This is done by reusing
+ the original (GSO!) skb. The GSO metadata is not cleared.
+
+4. The ICMP packet being sent back hits the OvS bridge again and because
+ skb_is_gso returns true, it goes through queue_gso_packets...
+
+5. ...where __skb_gso_segment is called. The skb is then dropped.
+
+6. Note that in the above example on re-transmission the skb won't be a
+ GSO one as it would be segmented (len > MSS) and the ICMP packet
+ should go through.
+
+Fix this by resetting the GSO information before reusing an skb in
+iptunnel_pmtud_build_icmp and iptunnel_pmtud_build_icmpv6.
+
+Fixes: 4cb47a8644cc ("tunnels: PMTU discovery support for directly bridged IP packets")
+Reported-by: Adrian Moreno <amorenoz@redhat.com>
+Signed-off-by: Antoine Tenart <atenart@kernel.org>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Link: https://patch.msgid.link/20250904125351.159740-1-atenart@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/ip_tunnel_core.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index 35189f1b361ea..3737188ba4e1e 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -203,6 +203,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
+ return -EINVAL;
+
++ if (skb_is_gso(skb))
++ skb_gso_reset(skb);
++
+ skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ pskb_pull(skb, ETH_HLEN);
+ skb_reset_network_header(skb);
+@@ -297,6 +300,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
+ return -EINVAL;
+
++ if (skb_is_gso(skb))
++ skb_gso_reset(skb);
++
+ skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ pskb_pull(skb, ETH_HLEN);
+ skb_reset_network_header(skb);
+--
+2.51.0
+
--- /dev/null
+From a13250fbd318eb1b6520bbc84967a11dee46cc0d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:27:40 +0900
+Subject: can: j1939: j1939_local_ecu_get(): undo increment when
+ j1939_local_ecu_get() fails
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit 06e02da29f6f1a45fc07bd60c7eaf172dc21e334 ]
+
+Since j1939_sk_bind() and j1939_sk_release() call j1939_local_ecu_put()
+when J1939_SOCK_BOUND was already set, but the error handling path for
+j1939_sk_bind() will not set J1939_SOCK_BOUND when j1939_local_ecu_get()
+fails, j1939_local_ecu_get() needs to undo priv->ents[sa].nusers++ when
+j1939_local_ecu_get() returns an error.
+
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/e7f80046-4ff7-4ce2-8ad8-7c3c678a42c9@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/bus.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
+index 4866879016021..e0b966c2517cf 100644
+--- a/net/can/j1939/bus.c
++++ b/net/can/j1939/bus.c
+@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
+ if (!ecu)
+ ecu = j1939_ecu_create_locked(priv, name);
+ err = PTR_ERR_OR_ZERO(ecu);
+- if (err)
++ if (err) {
++ if (j1939_address_is_unicast(sa))
++ priv->ents[sa].nusers--;
+ goto done;
++ }
+
+ ecu->nusers++;
+ /* TODO: do we care if ecu->addr != sa? */
+--
+2.51.0
+
--- /dev/null
+From df179129a9e9b60c16c5e040d04b81f7323a2a7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:30:09 +0900
+Subject: can: j1939: j1939_sk_bind(): call j1939_priv_put() immediately when
+ j1939_local_ecu_get() failed
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit f214744c8a27c3c1da6b538c232da22cd027530e ]
+
+Commit 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct
+callback") expects that a call to j1939_priv_put() can be unconditionally
+delayed until j1939_sk_sock_destruct() is called. But a refcount leak will
+happen when j1939_sk_bind() is called again after j1939_local_ecu_get()
+ from previous j1939_sk_bind() call returned an error. We need to call
+j1939_priv_put() before j1939_sk_bind() returns an error.
+
+Fixes: 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct callback")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/4f49a1bc-a528-42ad-86c0-187268ab6535@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/socket.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 29f9a343b0dbf..f477add424bc9 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -520,6 +520,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
+ if (ret) {
+ j1939_netdev_stop(priv);
++ jsk->priv = NULL;
++ synchronize_rcu();
++ j1939_priv_put(priv);
+ goto out_release_sock;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From bd8e460b43ae3a9e9f611a874d9098d4c61176e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 11:49:53 +0200
+Subject: dmaengine: ti: edma: Fix memory allocation size for
+ queue_priority_map
+
+From: Anders Roxell <anders.roxell@linaro.org>
+
+[ Upstream commit e63419dbf2ceb083c1651852209c7f048089ac0f ]
+
+Fix a critical memory allocation bug in edma_setup_from_hw() where
+queue_priority_map was allocated with insufficient memory. The code
+declared queue_priority_map as s8 (*)[2] (pointer to array of 2 s8),
+but allocated memory using sizeof(s8) instead of the correct size.
+
+This caused out-of-bounds memory writes when accessing:
+ queue_priority_map[i][0] = i;
+ queue_priority_map[i][1] = i;
+
+The bug manifested as kernel crashes with "Oops - undefined instruction"
+on ARM platforms (BeagleBoard-X15) during EDMA driver probe, as the
+memory corruption triggered kernel hardening features on Clang.
+
+Change the allocation to use sizeof(*queue_priority_map) which
+automatically gets the correct size for the 2D array structure.
+
+Fixes: 2b6b3b742019 ("ARM/dmaengine: edma: Merge the two drivers under drivers/dma/")
+Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
+Link: https://lore.kernel.org/r/20250830094953.3038012-1-anders.roxell@linaro.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/ti/edma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 01089e5c565f3..7a0f188fb43c5 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2029,8 +2029,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
+ * priority. So Q0 is the highest priority queue and the last queue has
+ * the lowest priority.
+ */
+- queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
+- GFP_KERNEL);
++ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
++ sizeof(*queue_priority_map), GFP_KERNEL);
+ if (!queue_priority_map)
+ return -ENOMEM;
+
+--
+2.51.0
+
--- /dev/null
+From 470bb461bcde4a4da6a55b88621ac7825ef36ee9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Dec 2020 18:36:53 +0800
+Subject: genirq/affinity: Add irq_update_affinity_desc()
+
+From: John Garry <john.garry@huawei.com>
+
+[ Upstream commit 1d3aec89286254487df7641c30f1b14ad1d127a5 ]
+
+Add a function to allow the affinity of an interrupt be switched to
+managed, such that interrupts allocated for platform devices may be
+managed.
+
+This new interface has certain limitations, and attempts to use it in the
+following circumstances will fail:
+- For when the kernel is configured for generic IRQ reservation mode (in
+ config GENERIC_IRQ_RESERVATION_MODE). The reason being that it could
+ conflict with managed vs. non-managed interrupt accounting.
+- The interrupt is already started, which should not be the case during
+ init
+- The interrupt is already configured as managed, which means double init
+
+Suggested-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Garry <john.garry@huawei.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/1606905417-183214-2-git-send-email-john.garry@huawei.com
+Stable-dep-of: 915470e1b44e ("i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/interrupt.h | 8 +++++
+ kernel/irq/manage.c | 70 +++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 78 insertions(+)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 7b8bdc468492e..a601715279f50 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -341,6 +341,8 @@ extern int irq_can_set_affinity(unsigned int irq);
+ extern int irq_select_affinity(unsigned int irq);
+
+ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
++extern int irq_update_affinity_desc(unsigned int irq,
++ struct irq_affinity_desc *affinity);
+
+ extern int
+ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
+@@ -376,6 +378,12 @@ static inline int irq_set_affinity_hint(unsigned int irq,
+ return -EINVAL;
+ }
+
++static inline int irq_update_affinity_desc(unsigned int irq,
++ struct irq_affinity_desc *affinity)
++{
++ return -EINVAL;
++}
++
+ static inline int
+ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
+ {
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 2a8a5e1779c9c..44d77e834c229 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -332,6 +332,76 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
+ return ret;
+ }
+
++/**
++ * irq_update_affinity_desc - Update affinity management for an interrupt
++ * @irq: The interrupt number to update
++ * @affinity: Pointer to the affinity descriptor
++ *
++ * This interface can be used to configure the affinity management of
++ * interrupts which have been allocated already.
++ *
++ * There are certain limitations on when it may be used - attempts to use it
++ * for when the kernel is configured for generic IRQ reservation mode (in
++ * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
++ * managed/non-managed interrupt accounting. In addition, attempts to use it on
++ * an interrupt which is already started or which has already been configured
++ * as managed will also fail, as these mean invalid init state or double init.
++ */
++int irq_update_affinity_desc(unsigned int irq,
++ struct irq_affinity_desc *affinity)
++{
++ struct irq_desc *desc;
++ unsigned long flags;
++ bool activated;
++ int ret = 0;
++
++ /*
++ * Supporting this with the reservation scheme used by x86 needs
++ * some more thought. Fail it for now.
++ */
++ if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
++ return -EOPNOTSUPP;
++
++ desc = irq_get_desc_buslock(irq, &flags, 0);
++ if (!desc)
++ return -EINVAL;
++
++ /* Requires the interrupt to be shut down */
++ if (irqd_is_started(&desc->irq_data)) {
++ ret = -EBUSY;
++ goto out_unlock;
++ }
++
++ /* Interrupts which are already managed cannot be modified */
++ if (irqd_affinity_is_managed(&desc->irq_data)) {
++ ret = -EBUSY;
++ goto out_unlock;
++ }
++
++ /*
++ * Deactivate the interrupt. That's required to undo
++ * anything an earlier activation has established.
++ */
++ activated = irqd_is_activated(&desc->irq_data);
++ if (activated)
++ irq_domain_deactivate_irq(&desc->irq_data);
++
++ if (affinity->is_managed) {
++ irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
++ irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
++ }
++
++ cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
++
++ /* Restore the activation state */
++ if (activated)
++ irq_domain_activate_irq(&desc->irq_data, false);
++
++out_unlock:
++ irq_put_desc_busunlock(desc, flags);
++ return ret;
++}
++
+ int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
+ {
+ struct irq_desc *desc = irq_to_desc(irq);
+--
+2.51.0
+
--- /dev/null
+From f5e5e97c4712ee3c1dfbbd1387ec35a94ba64969 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 May 2021 11:17:26 +0200
+Subject: genirq: Export affinity setter for modules
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit 4d80d6ca5d77fde9880da8466e5b64f250e5bf82 ]
+
+Perf modules abuse irq_set_affinity_hint() to set the affinity of system
+PMU interrupts just because irq_set_affinity() was not exported.
+
+The fact that irq_set_affinity_hint() actually sets the affinity is a
+non-documented side effect and the name is clearly saying it's a hint.
+
+To clean this up, export the real affinity setter.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Link: https://lore.kernel.org/r/20210518093117.968251441@linutronix.de
+Stable-dep-of: 915470e1b44e ("i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/interrupt.h | 35 ++---------------------------------
+ kernel/irq/manage.c | 33 ++++++++++++++++++++++++++++++++-
+ 2 files changed, 34 insertions(+), 34 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index a601715279f50..85a5d13a7dc9c 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -303,39 +303,8 @@ struct irq_affinity_desc {
+
+ extern cpumask_var_t irq_default_affinity;
+
+-/* Internal implementation. Use the helpers below */
+-extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
+- bool force);
+-
+-/**
+- * irq_set_affinity - Set the irq affinity of a given irq
+- * @irq: Interrupt to set affinity
+- * @cpumask: cpumask
+- *
+- * Fails if cpumask does not contain an online CPU
+- */
+-static inline int
+-irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+-{
+- return __irq_set_affinity(irq, cpumask, false);
+-}
+-
+-/**
+- * irq_force_affinity - Force the irq affinity of a given irq
+- * @irq: Interrupt to set affinity
+- * @cpumask: cpumask
+- *
+- * Same as irq_set_affinity, but without checking the mask against
+- * online cpus.
+- *
+- * Solely for low level cpu hotplug code, where we need to make per
+- * cpu interrupts affine before the cpu becomes online.
+- */
+-static inline int
+-irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+-{
+- return __irq_set_affinity(irq, cpumask, true);
+-}
++extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
++extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
+
+ extern int irq_can_set_affinity(unsigned int irq);
+ extern int irq_select_affinity(unsigned int irq);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 44d77e834c229..05601bcd30118 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -402,7 +402,8 @@ int irq_update_affinity_desc(unsigned int irq,
+ return ret;
+ }
+
+-int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
++static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
++ bool force)
+ {
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+@@ -417,6 +418,36 @@ int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
+ return ret;
+ }
+
++/**
++ * irq_set_affinity - Set the irq affinity of a given irq
++ * @irq: Interrupt to set affinity
++ * @cpumask: cpumask
++ *
++ * Fails if cpumask does not contain an online CPU
++ */
++int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
++{
++ return __irq_set_affinity(irq, cpumask, false);
++}
++EXPORT_SYMBOL_GPL(irq_set_affinity);
++
++/**
++ * irq_force_affinity - Force the irq affinity of a given irq
++ * @irq: Interrupt to set affinity
++ * @cpumask: cpumask
++ *
++ * Same as irq_set_affinity, but without checking the mask against
++ * online cpus.
++ *
++ * Solely for low level cpu hotplug code, where we need to make per
++ * cpu interrupts affine before the cpu becomes online.
++ */
++int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
++{
++ return __irq_set_affinity(irq, cpumask, true);
++}
++EXPORT_SYMBOL_GPL(irq_force_affinity);
++
+ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+ {
+ unsigned long flags;
+--
+2.51.0
+
--- /dev/null
+From 2b1a5e928ba1a8de84aa4911b2dcc9fc0a40af07 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Sep 2021 11:24:17 -0400
+Subject: genirq: Provide new interfaces for affinity hints
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit 65c7cdedeb3026fabcc967a7aae2f755ad4d0783 ]
+
+The discussion about removing the side effect of irq_set_affinity_hint() of
+actually applying the cpumask (if not NULL) as affinity to the interrupt,
+unearthed a few unpleasantries:
+
+ 1) The modular perf drivers rely on the current behaviour for the very
+ wrong reasons.
+
+ 2) While none of the other drivers prevents user space from changing
+ the affinity, a cursorily inspection shows that there are at least
+ expectations in some drivers.
+
+#1 needs to be cleaned up anyway, so that's not a problem
+
+#2 might result in subtle regressions especially when irqbalanced (which
+ nowadays ignores the affinity hint) is disabled.
+
+Provide new interfaces:
+
+ irq_update_affinity_hint() - Only sets the affinity hint pointer
+ irq_set_affinity_and_hint() - Set the pointer and apply the affinity to
+ the interrupt
+
+Make irq_set_affinity_hint() a wrapper around irq_apply_affinity_hint() and
+document it to be phased out.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Nitesh Narayan Lal <nitesh@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20210501021832.743094-1-jesse.brandeburg@intel.com
+Link: https://lore.kernel.org/r/20210903152430.244937-2-nitesh@redhat.com
+Stable-dep-of: 915470e1b44e ("i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/interrupt.h | 53 ++++++++++++++++++++++++++++++++++++++-
+ kernel/irq/manage.c | 8 +++---
+ 2 files changed, 56 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 85a5d13a7dc9c..7c06e33ad05ec 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -309,7 +309,46 @@ extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
+ extern int irq_can_set_affinity(unsigned int irq);
+ extern int irq_select_affinity(unsigned int irq);
+
+-extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
++extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
++ bool setaffinity);
++
++/**
++ * irq_update_affinity_hint - Update the affinity hint
++ * @irq: Interrupt to update
++ * @m: cpumask pointer (NULL to clear the hint)
++ *
++ * Updates the affinity hint, but does not change the affinity of the interrupt.
++ */
++static inline int
++irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
++{
++ return __irq_apply_affinity_hint(irq, m, false);
++}
++
++/**
++ * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
++ * cpumask to the interrupt
++ * @irq: Interrupt to update
++ * @m: cpumask pointer (NULL to clear the hint)
++ *
++ * Updates the affinity hint and if @m is not NULL it applies it as the
++ * affinity of that interrupt.
++ */
++static inline int
++irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
++{
++ return __irq_apply_affinity_hint(irq, m, true);
++}
++
++/*
++ * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
++ * instead.
++ */
++static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
++{
++ return irq_set_affinity_and_hint(irq, m);
++}
++
+ extern int irq_update_affinity_desc(unsigned int irq,
+ struct irq_affinity_desc *affinity);
+
+@@ -341,6 +380,18 @@ static inline int irq_can_set_affinity(unsigned int irq)
+
+ static inline int irq_select_affinity(unsigned int irq) { return 0; }
+
++static inline int irq_update_affinity_hint(unsigned int irq,
++ const struct cpumask *m)
++{
++ return -EINVAL;
++}
++
++static inline int irq_set_affinity_and_hint(unsigned int irq,
++ const struct cpumask *m)
++{
++ return -EINVAL;
++}
++
+ static inline int irq_set_affinity_hint(unsigned int irq,
+ const struct cpumask *m)
+ {
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 05601bcd30118..32513a2e26eb0 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -448,7 +448,8 @@ int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+ }
+ EXPORT_SYMBOL_GPL(irq_force_affinity);
+
+-int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
++int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
++ bool setaffinity)
+ {
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
+@@ -457,12 +458,11 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+ return -EINVAL;
+ desc->affinity_hint = m;
+ irq_put_desc_unlock(desc, flags);
+- /* set the initial affinity to prevent every interrupt being on CPU0 */
+- if (m)
++ if (m && setaffinity)
+ __irq_set_affinity(irq, m, false);
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
++EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
+
+ static void irq_affinity_notify(struct work_struct *work)
+ {
+--
+2.51.0
+
--- /dev/null
+From 0527d57bff20c5e550c712b0b5eecdf4d7f0e6e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 17:39:03 +0200
+Subject: i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path
+
+From: Michal Schmidt <mschmidt@redhat.com>
+
+[ Upstream commit 915470e1b44e71d1dd07ee067276f003c3521ee3 ]
+
+If request_irq() in i40e_vsi_request_irq_msix() fails in an iteration
+later than the first, the error path wants to free the IRQs requested
+so far. However, it uses the wrong dev_id argument for free_irq(), so
+it does not free the IRQs correctly and instead triggers the warning:
+
+ Trying to free already-free IRQ 173
+ WARNING: CPU: 25 PID: 1091 at kernel/irq/manage.c:1829 __free_irq+0x192/0x2c0
+ Modules linked in: i40e(+) [...]
+ CPU: 25 UID: 0 PID: 1091 Comm: NetworkManager Not tainted 6.17.0-rc1+ #1 PREEMPT(lazy)
+ Hardware name: [...]
+ RIP: 0010:__free_irq+0x192/0x2c0
+ [...]
+ Call Trace:
+ <TASK>
+ free_irq+0x32/0x70
+ i40e_vsi_request_irq_msix.cold+0x63/0x8b [i40e]
+ i40e_vsi_request_irq+0x79/0x80 [i40e]
+ i40e_vsi_open+0x21f/0x2f0 [i40e]
+ i40e_open+0x63/0x130 [i40e]
+ __dev_open+0xfc/0x210
+ __dev_change_flags+0x1fc/0x240
+ netif_change_flags+0x27/0x70
+ do_setlink.isra.0+0x341/0xc70
+ rtnl_newlink+0x468/0x860
+ rtnetlink_rcv_msg+0x375/0x450
+ netlink_rcv_skb+0x5c/0x110
+ netlink_unicast+0x288/0x3c0
+ netlink_sendmsg+0x20d/0x430
+ ____sys_sendmsg+0x3a2/0x3d0
+ ___sys_sendmsg+0x99/0xe0
+ __sys_sendmsg+0x8a/0xf0
+ do_syscall_64+0x82/0x2c0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ [...]
+ </TASK>
+ ---[ end trace 0000000000000000 ]---
+
+Use the same dev_id for free_irq() as for request_irq().
+
+I tested this with inserting code to fail intentionally.
+
+Fixes: 493fb30011b3 ("i40e: Move q_vectors from pointer to array to array of pointers")
+Signed-off-by: Michal Schmidt <mschmidt@redhat.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Subbaraya Sundeep <sbhatta@marvell.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 13d92c378957f..e37e8a31b35d4 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3897,7 +3897,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ irq_num = pf->msix_entries[base + vector].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
+- free_irq(irq_num, &vsi->q_vectors[vector]);
++ free_irq(irq_num, vsi->q_vectors[vector]);
+ }
+ return err;
+ }
+--
+2.51.0
+
--- /dev/null
+From 65cb5a96f6da46e3ccf631e5c5bf2e1eb3627507 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Sep 2021 11:24:19 -0400
+Subject: i40e: Use irq_update_affinity_hint()
+
+From: Nitesh Narayan Lal <nitesh@redhat.com>
+
+[ Upstream commit d34c54d1739c2cdf2e4437b74e6da269147f4987 ]
+
+The driver uses irq_set_affinity_hint() for two purposes:
+
+ - To set the affinity_hint which is consumed by the userspace for
+ distributing the interrupts
+
+ - To apply an affinity that it provides for the i40e interrupts
+
+The latter is done to ensure that all the interrupts are evenly spread
+across all available CPUs. However, since commit a0c9259dc4e1 ("irq/matrix:
+Spread interrupts on allocation") the spreading of interrupts is
+dynamically performed at the time of allocation. Hence, there is no need
+for the drivers to enforce their own affinity for the spreading of
+interrupts.
+
+Also, irq_set_affinity_hint() applying the provided cpumask as an affinity
+for the interrupt is an undocumented side effect. To remove this side
+effect irq_set_affinity_hint() has been marked as deprecated and new
+interfaces have been introduced. Hence, replace the irq_set_affinity_hint()
+with the new interface irq_update_affinity_hint() that only sets the
+pointer for the affinity_hint.
+
+Signed-off-by: Nitesh Narayan Lal <nitesh@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Link: https://lore.kernel.org/r/20210903152430.244937-4-nitesh@redhat.com
+Stable-dep-of: 915470e1b44e ("i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index dfa06737ff05e..13d92c378957f 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3882,10 +3882,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ *
+ * get_cpu_mask returns a static constant mask with
+ * a permanent lifetime so it's ok to pass to
+- * irq_set_affinity_hint without making a copy.
++ * irq_update_affinity_hint without making a copy.
+ */
+ cpu = cpumask_local_spread(q_vector->v_idx, -1);
+- irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
++ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
+ }
+
+ vsi->irqs_ready = true;
+@@ -3896,7 +3896,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ vector--;
+ irq_num = pf->msix_entries[base + vector].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+- irq_set_affinity_hint(irq_num, NULL);
++ irq_update_affinity_hint(irq_num, NULL);
+ free_irq(irq_num, &vsi->q_vectors[vector]);
+ }
+ return err;
+@@ -4714,7 +4714,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
+ /* clear the affinity notifier in the IRQ descriptor */
+ irq_set_affinity_notifier(irq_num, NULL);
+ /* remove our suggested affinity mask for this IRQ */
+- irq_set_affinity_hint(irq_num, NULL);
++ irq_update_affinity_hint(irq_num, NULL);
+ synchronize_irq(irq_num);
+ free_irq(irq_num, vsi->q_vectors[i]);
+
+--
+2.51.0
+
--- /dev/null
+From e365c949f3741b60c22bf14a91aadafb16978d4a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 15:26:31 +0900
+Subject: igb: fix link test skipping when interface is admin down
+
+From: Kohei Enju <enjuk@amazon.com>
+
+[ Upstream commit d709f178abca22a4d3642513df29afe4323a594b ]
+
+The igb driver incorrectly skips the link test when the network
+interface is admin down (if_running == false), causing the test to
+always report PASS regardless of the actual physical link state.
+
+This behavior is inconsistent with other drivers (e.g. i40e, ice, ixgbe,
+etc.) which correctly test the physical link state regardless of admin
+state.
+Remove the if_running check to ensure link test always reflects the
+physical link state.
+
+Fixes: 8d420a1b3ea6 ("igb: correct link test not being run when link is down")
+Signed-off-by: Kohei Enju <enjuk@amazon.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_ethtool.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index de2c39436fe0a..8166fb619db45 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2070,11 +2070,8 @@ static void igb_diag_test(struct net_device *netdev,
+ } else {
+ dev_info(&adapter->pdev->dev, "online testing starting\n");
+
+- /* PHY is powered down when interface is down */
+- if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
++ if (igb_link_test(adapter, &data[TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+- else
+- data[TEST_LINK] = 0;
+
+ /* Online tests aren't run; pass by default */
+ data[TEST_REG] = 0;
+--
+2.51.0
+
--- /dev/null
+From 270f69abf6a2d9eb9f271966a21dd23746d046a5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 11:13:34 +0200
+Subject: net: fec: Fix possible NPD in fec_enet_phy_reset_after_clk_enable()
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit 03e79de4608bdd48ad6eec272e196124cefaf798 ]
+
+The function of_phy_find_device may return NULL, so we need to take
+care before dereferencing phy_dev.
+
+Fixes: 64a632da538a ("net: fec: Fix phy_device lookup for phy_reset_after_clk_enable()")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Cc: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Cc: Richard Leitner <richard.leitner@skidata.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Wei Fang <wei.fang@nxp.com>
+Link: https://patch.msgid.link/20250904091334.53965-1-wahrenst@gmx.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index f8a0966332805..7b2ab0cc562cc 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2018,7 +2018,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+ */
+ phy_dev = of_phy_find_device(fep->phy_node);
+ phy_reset_after_clk_enable(phy_dev);
+- put_device(&phy_dev->mdio.dev);
++ if (phy_dev)
++ put_device(&phy_dev->mdio.dev);
+ }
+ }
+
+--
+2.51.0
+
mtd-rawnand-stm32_fmc2-avoid-overlapping-mappings-on-ecc-buffer.patch
mtd-nand-raw-atmel-fix-comment-in-timings-preparation.patch
mtd-nand-raw-atmel-respect-tar-tclr-in-read-setup-timing.patch
+net-fec-fix-possible-npd-in-fec_enet_phy_reset_after.patch
+igb-fix-link-test-skipping-when-interface-is-admin-d.patch
+genirq-affinity-add-irq_update_affinity_desc.patch
+genirq-export-affinity-setter-for-modules.patch
+genirq-provide-new-interfaces-for-affinity-hints.patch
+i40e-use-irq_update_affinity_hint.patch
+i40e-fix-irq-freeing-in-i40e_vsi_request_irq_msix-er.patch
+can-j1939-j1939_sk_bind-call-j1939_priv_put-immediat.patch
+can-j1939-j1939_local_ecu_get-undo-increment-when-j1.patch
+dmaengine-ti-edma-fix-memory-allocation-size-for-que.patch
--- /dev/null
+From 0991f9b50dd2383660348b74637f6ffaec0dc52f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:27:40 +0900
+Subject: can: j1939: j1939_local_ecu_get(): undo increment when
+ j1939_local_ecu_get() fails
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit 06e02da29f6f1a45fc07bd60c7eaf172dc21e334 ]
+
+Since j1939_sk_bind() and j1939_sk_release() call j1939_local_ecu_put()
+when J1939_SOCK_BOUND was already set, but the error handling path for
+j1939_sk_bind() will not set J1939_SOCK_BOUND when j1939_local_ecu_get()
+fails, j1939_local_ecu_get() needs to undo priv->ents[sa].nusers++ when
+j1939_local_ecu_get() returns an error.
+
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/e7f80046-4ff7-4ce2-8ad8-7c3c678a42c9@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/bus.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
+index 4866879016021..e0b966c2517cf 100644
+--- a/net/can/j1939/bus.c
++++ b/net/can/j1939/bus.c
+@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
+ if (!ecu)
+ ecu = j1939_ecu_create_locked(priv, name);
+ err = PTR_ERR_OR_ZERO(ecu);
+- if (err)
++ if (err) {
++ if (j1939_address_is_unicast(sa))
++ priv->ents[sa].nusers--;
+ goto done;
++ }
+
+ ecu->nusers++;
+ /* TODO: do we care if ecu->addr != sa? */
+--
+2.51.0
+
--- /dev/null
+From 915d6cb117bae1b723e584a372af09bae6ffd513 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:30:09 +0900
+Subject: can: j1939: j1939_sk_bind(): call j1939_priv_put() immediately when
+ j1939_local_ecu_get() failed
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit f214744c8a27c3c1da6b538c232da22cd027530e ]
+
+Commit 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct
+callback") expects that a call to j1939_priv_put() can be unconditionally
+delayed until j1939_sk_sock_destruct() is called. But a refcount leak will
+happen when j1939_sk_bind() is called again after j1939_local_ecu_get()
+ from previous j1939_sk_bind() call returned an error. We need to call
+j1939_priv_put() before j1939_sk_bind() returns an error.
+
+Fixes: 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct callback")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/4f49a1bc-a528-42ad-86c0-187268ab6535@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/socket.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 0a4267a24263b..502975fd5f97e 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -520,6 +520,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
+ if (ret) {
+ j1939_netdev_stop(priv);
++ jsk->priv = NULL;
++ synchronize_rcu();
++ j1939_priv_put(priv);
+ goto out_release_sock;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 7f1da0c0e61e0d518c0a831579496d2a7a88b99e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Aug 2025 12:50:02 +0300
+Subject: can: xilinx_can: xcan_write_frame(): fix use-after-free of
+ transmitted SKB
+
+From: Anssi Hannula <anssi.hannula@bitwise.fi>
+
+[ Upstream commit ef79f00be72bd81d2e1e6f060d83cf7e425deee4 ]
+
+can_put_echo_skb() takes ownership of the SKB and it may be freed
+during or after the call.
+
+However, xilinx_can xcan_write_frame() keeps using SKB after the call.
+
+Fix that by only calling can_put_echo_skb() after the code is done
+touching the SKB.
+
+The tx_lock is held for the entire xcan_write_frame() execution and
+also on the can_get_echo_skb() side so the order of operations does not
+matter.
+
+An earlier fix commit 3d3c817c3a40 ("can: xilinx_can: Fix usage of skb
+memory") did not move the can_put_echo_skb() call far enough.
+
+Signed-off-by: Anssi Hannula <anssi.hannula@bitwise.fi>
+Fixes: 1598efe57b3e ("can: xilinx_can: refactor code in preparation for CAN FD support")
+Link: https://patch.msgid.link/20250822095002.168389-1-anssi.hannula@bitwise.fi
+[mkl: add "commit" in front of sha1 in patch description]
+[mkl: fix indention]
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/xilinx_can.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
+index 43c812ea1de02..7d8dc36c9bbd8 100644
+--- a/drivers/net/can/xilinx_can.c
++++ b/drivers/net/can/xilinx_can.c
+@@ -622,14 +622,6 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
+ dlc |= XCAN_DLCR_EDL_MASK;
+ }
+
+- if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
+- (priv->devtype.flags & XCAN_FLAG_TXFEMP))
+- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
+- else
+- can_put_echo_skb(skb, ndev, 0, 0);
+-
+- priv->tx_head++;
+-
+ priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
+ /* If the CAN frame is RTR frame this write triggers transmission
+ * (not on CAN FD)
+@@ -662,6 +654,14 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
+ data[1]);
+ }
+ }
++
++ if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
++ (priv->devtype.flags & XCAN_FLAG_TXFEMP))
++ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
++ else
++ can_put_echo_skb(skb, ndev, 0, 0);
++
++ priv->tx_head++;
+ }
+
+ /**
+--
+2.51.0
+
--- /dev/null
+From 5e59187827b8eaabbd242e2d6c3061278b6403b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Aug 2025 13:43:39 +0300
+Subject: dmaengine: idxd: Fix double free in idxd_setup_wqs()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 39aaa337449e71a41d4813be0226a722827ba606 ]
+
+The clean up in idxd_setup_wqs() has had a couple bugs because the error
+handling is a bit subtle. It's simpler to just re-write it in a cleaner
+way. The issues here are:
+
+1) If "idxd->max_wqs" is <= 0 then we call put_device(conf_dev) when
+ "conf_dev" hasn't been initialized.
+2) If kzalloc_node() fails then again "conf_dev" is invalid. It's
+ either uninitialized or it points to the "conf_dev" from the
+ previous iteration so it leads to a double free.
+
+It's better to free partial loop iterations within the loop and then
+the unwinding at the end can handle whole loop iterations. I also
+renamed the labels to describe what the goto does and not where the goto
+was located.
+
+Fixes: 3fd2f4bc010c ("dmaengine: idxd: fix memory leak in error handling path of idxd_setup_wqs")
+Reported-by: Colin Ian King <colin.i.king@gmail.com>
+Closes: https://lore.kernel.org/all/20250811095836.1642093-1-colin.i.king@gmail.com/
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/aJnJW3iYTDDCj9sk@stanley.mountain
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 33 +++++++++++++++++----------------
+ 1 file changed, 17 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index ea651d5cf332d..127a6a302a5bb 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -175,27 +175,30 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->wq_enable_map) {
+ rc = -ENOMEM;
+- goto err_bitmap;
++ goto err_free_wqs;
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
+ if (!wq) {
+ rc = -ENOMEM;
+- goto err;
++ goto err_unwind;
+ }
+
+ idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
+ conf_dev = wq_confdev(wq);
+ wq->id = i;
+ wq->idxd = idxd;
+- device_initialize(wq_confdev(wq));
++ device_initialize(conf_dev);
+ conf_dev->parent = idxd_confdev(idxd);
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = &idxd_wq_device_type;
+ rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
+- if (rc < 0)
+- goto err;
++ if (rc < 0) {
++ put_device(conf_dev);
++ kfree(wq);
++ goto err_unwind;
++ }
+
+ mutex_init(&wq->wq_lock);
+ init_waitqueue_head(&wq->err_queue);
+@@ -206,15 +209,20 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
+ wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
+ if (!wq->wqcfg) {
++ put_device(conf_dev);
++ kfree(wq);
+ rc = -ENOMEM;
+- goto err;
++ goto err_unwind;
+ }
+
+ if (idxd->hw.wq_cap.op_config) {
+ wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
+ if (!wq->opcap_bmap) {
++ kfree(wq->wqcfg);
++ put_device(conf_dev);
++ kfree(wq);
+ rc = -ENOMEM;
+- goto err_opcap_bmap;
++ goto err_unwind;
+ }
+ bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
+ }
+@@ -225,13 +233,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+
+ return 0;
+
+-err_opcap_bmap:
+- kfree(wq->wqcfg);
+-
+-err:
+- put_device(conf_dev);
+- kfree(wq);
+-
++err_unwind:
+ while (--i >= 0) {
+ wq = idxd->wqs[i];
+ if (idxd->hw.wq_cap.op_config)
+@@ -240,11 +242,10 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ conf_dev = wq_confdev(wq);
+ put_device(conf_dev);
+ kfree(wq);
+-
+ }
+ bitmap_free(idxd->wq_enable_map);
+
+-err_bitmap:
++err_free_wqs:
+ kfree(idxd->wqs);
+
+ return rc;
+--
+2.51.0
+
--- /dev/null
+From df95f2e3b54a5578b5a8d389deda378583c98e03 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 11:49:53 +0200
+Subject: dmaengine: ti: edma: Fix memory allocation size for
+ queue_priority_map
+
+From: Anders Roxell <anders.roxell@linaro.org>
+
+[ Upstream commit e63419dbf2ceb083c1651852209c7f048089ac0f ]
+
+Fix a critical memory allocation bug in edma_setup_from_hw() where
+queue_priority_map was allocated with insufficient memory. The code
+declared queue_priority_map as s8 (*)[2] (pointer to array of 2 s8),
+but allocated memory using sizeof(s8) instead of the correct size.
+
+This caused out-of-bounds memory writes when accessing:
+ queue_priority_map[i][0] = i;
+ queue_priority_map[i][1] = i;
+
+The bug manifested as kernel crashes with "Oops - undefined instruction"
+on ARM platforms (BeagleBoard-X15) during EDMA driver probe, as the
+memory corruption triggered kernel hardening features on Clang.
+
+Change the allocation to use sizeof(*queue_priority_map) which
+automatically gets the correct size for the 2D array structure.
+
+Fixes: 2b6b3b742019 ("ARM/dmaengine: edma: Merge the two drivers under drivers/dma/")
+Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
+Link: https://lore.kernel.org/r/20250830094953.3038012-1-anders.roxell@linaro.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/ti/edma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 89e06c87a258b..f24a685da2a22 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2072,8 +2072,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
+ * priority. So Q0 is the highest priority queue and the last queue has
+ * the lowest priority.
+ */
+- queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
+- GFP_KERNEL);
++ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
++ sizeof(*queue_priority_map), GFP_KERNEL);
+ if (!queue_priority_map)
+ return -ENOMEM;
+
+--
+2.51.0
+
--- /dev/null
+From cfaeb7b8b98f7d63e0b2435302a2fae7cf2c59c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Sep 2025 20:17:09 -0700
+Subject: docs: networking: can: change bcm_msg_head frames member to support
+ flexible array
+
+From: Alex Tran <alex.t.tran@gmail.com>
+
+[ Upstream commit 641427d5bf90af0625081bf27555418b101274cd ]
+
+The documentation of the 'bcm_msg_head' struct does not match how
+it is defined in 'bcm.h'. Changed the frames member to a flexible array,
+matching the definition in the header file.
+
+See commit 94dfc73e7cf4 ("treewide: uapi: Replace zero-length arrays with
+flexible-array members")
+
+Signed-off-by: Alex Tran <alex.t.tran@gmail.com>
+Acked-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Link: https://patch.msgid.link/20250904031709.1426895-1-alex.t.tran@gmail.com
+Fixes: 94dfc73e7cf4 ("treewide: uapi: Replace zero-length arrays with flexible-array members")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217783
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/networking/can.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
+index ebc822e605f55..9ac6d3973ad58 100644
+--- a/Documentation/networking/can.rst
++++ b/Documentation/networking/can.rst
+@@ -740,7 +740,7 @@ The broadcast manager sends responses to user space in the same form:
+ struct timeval ival1, ival2; /* count and subsequent interval */
+ canid_t can_id; /* unique can_id for task */
+ __u32 nframes; /* number of can_frames following */
+- struct can_frame frames[0];
++ struct can_frame frames[];
+ };
+
+ The aligned payload 'frames' uses the same basic CAN frame structure defined
+--
+2.51.0
+
--- /dev/null
+From 30adec0a7dd84b6aa28bffafefac510d48a8b16d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Mar 2024 15:04:41 +0800
+Subject: hrtimer: Remove unused function
+
+From: Jiapeng Chong <jiapeng.chong@linux.alibaba.com>
+
+[ Upstream commit 82ccdf062a64f3c4ac575c16179ce68edbbbe8e4 ]
+
+The function is defined, but not called anywhere:
+
+ kernel/time/hrtimer.c:1880:20: warning: unused function '__hrtimer_peek_ahead_timers'.
+
+Remove it.
+
+Reported-by: Abaci Robot <abaci@linux.alibaba.com>
+Signed-off-by: Jiapeng Chong <jiapeng.chong@linux.alibaba.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20240322070441.29646-1-jiapeng.chong@linux.alibaba.com
+Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=8611
+Stable-dep-of: e895f8e29119 ("hrtimers: Unconditionally update target CPU base after offline timer migration")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/hrtimer.c | 20 +-------------------
+ 1 file changed, 1 insertion(+), 19 deletions(-)
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index b3860ec12450e..00eecd81ce3a6 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1935,25 +1935,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+ tick_program_event(expires_next, 1);
+ pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
+ }
+-
+-/* called with interrupts disabled */
+-static inline void __hrtimer_peek_ahead_timers(void)
+-{
+- struct tick_device *td;
+-
+- if (!hrtimer_hres_active())
+- return;
+-
+- td = this_cpu_ptr(&tick_cpu_device);
+- if (td && td->evtdev)
+- hrtimer_interrupt(td->evtdev);
+-}
+-
+-#else /* CONFIG_HIGH_RES_TIMERS */
+-
+-static inline void __hrtimer_peek_ahead_timers(void) { }
+-
+-#endif /* !CONFIG_HIGH_RES_TIMERS */
++#endif /* !CONFIG_HIGH_RES_TIMERS */
+
+ /*
+ * Called from run_local_timers in hardirq context every jiffy
+--
+2.51.0
+
--- /dev/null
+From 96f0b8e186ce42cc28fa85b7f58405b53e6776a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Apr 2024 10:30:00 +0800
+Subject: hrtimer: Rename __hrtimer_hres_active() to hrtimer_hres_active()
+
+From: Jiapeng Chong <jiapeng.chong@linux.alibaba.com>
+
+[ Upstream commit b7c8e1f8a7b4352c1d0b4310686385e3cf6c104a ]
+
+The function hrtimer_hres_active() are defined in the hrtimer.c file, but
+not called elsewhere, so rename __hrtimer_hres_active() to
+hrtimer_hres_active() and remove the old hrtimer_hres_active() function.
+
+kernel/time/hrtimer.c:653:19: warning: unused function 'hrtimer_hres_active'.
+
+Fixes: 82ccdf062a64 ("hrtimer: Remove unused function")
+Reported-by: Abaci Robot <abaci@linux.alibaba.com>
+Signed-off-by: Jiapeng Chong <jiapeng.chong@linux.alibaba.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
+Link: https://lore.kernel.org/r/20240418023000.130324-1-jiapeng.chong@linux.alibaba.com
+Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=8778
+Stable-dep-of: e895f8e29119 ("hrtimers: Unconditionally update target CPU base after offline timer migration")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/hrtimer.c | 21 ++++++++-------------
+ 1 file changed, 8 insertions(+), 13 deletions(-)
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 00eecd81ce3a6..8971014df5b51 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -669,17 +669,12 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+ /*
+ * Is the high resolution mode active ?
+ */
+-static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
++static inline int hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
+ {
+ return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+ cpu_base->hres_active : 0;
+ }
+
+-static inline int hrtimer_hres_active(void)
+-{
+- return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
+-}
+-
+ static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
+ struct hrtimer *next_timer,
+ ktime_t expires_next)
+@@ -703,7 +698,7 @@ static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
+ * set. So we'd effectively block all timers until the T2 event
+ * fires.
+ */
+- if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
++ if (!hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
+ return;
+
+ tick_program_event(expires_next, 1);
+@@ -812,12 +807,12 @@ static void retrigger_next_event(void *arg)
+ * function call will take care of the reprogramming in case the
+ * CPU was in a NOHZ idle sleep.
+ */
+- if (!__hrtimer_hres_active(base) && !tick_nohz_active)
++ if (!hrtimer_hres_active(base) && !tick_nohz_active)
+ return;
+
+ raw_spin_lock(&base->lock);
+ hrtimer_update_base(base);
+- if (__hrtimer_hres_active(base))
++ if (hrtimer_hres_active(base))
+ hrtimer_force_reprogram(base, 0);
+ else
+ hrtimer_update_next_event(base);
+@@ -974,7 +969,7 @@ void clock_was_set(unsigned int bases)
+ cpumask_var_t mask;
+ int cpu;
+
+- if (!__hrtimer_hres_active(cpu_base) && !tick_nohz_active)
++ if (!hrtimer_hres_active(cpu_base) && !tick_nohz_active)
+ goto out_timerfd;
+
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+@@ -1551,7 +1546,7 @@ u64 hrtimer_get_next_event(void)
+
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+- if (!__hrtimer_hres_active(cpu_base))
++ if (!hrtimer_hres_active(cpu_base))
+ expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
+
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+@@ -1574,7 +1569,7 @@ u64 hrtimer_next_event_without(const struct hrtimer *exclude)
+
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+- if (__hrtimer_hres_active(cpu_base)) {
++ if (hrtimer_hres_active(cpu_base)) {
+ unsigned int active;
+
+ if (!cpu_base->softirq_activated) {
+@@ -1946,7 +1941,7 @@ void hrtimer_run_queues(void)
+ unsigned long flags;
+ ktime_t now;
+
+- if (__hrtimer_hres_active(cpu_base))
++ if (hrtimer_hres_active(cpu_base))
+ return;
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From 49186c35ef2b112355e2127802f225c95f05d365 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 16:10:25 +0800
+Subject: hrtimers: Unconditionally update target CPU base after offline timer
+ migration
+
+From: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+
+[ Upstream commit e895f8e29119c8c966ea794af9e9100b10becb88 ]
+
+When testing softirq based hrtimers on an ARM32 board, with high resolution
+mode and NOHZ inactive, softirq based hrtimers fail to expire after being
+moved away from an offline CPU:
+
+CPU0 CPU1
+ hrtimer_start(..., HRTIMER_MODE_SOFT);
+cpu_down(CPU1) ...
+ hrtimers_cpu_dying()
+ // Migrate timers to CPU0
+ smp_call_function_single(CPU0, returgger_next_event);
+ retrigger_next_event()
+ if (!highres && !nohz)
+ return;
+
+As retrigger_next_event() is a NOOP when both high resolution timers and
+NOHZ are inactive CPU0's hrtimer_cpu_base::softirq_expires_next is not
+updated and the migrated softirq timers never expire unless there is a
+softirq based hrtimer queued on CPU0 later.
+
+Fix this by removing the hrtimer_hres_active() and tick_nohz_active() check
+in retrigger_next_event(), which enforces a full update of the CPU base.
+As this is not a fast path the extra cost does not matter.
+
+[ tglx: Massaged change log ]
+
+Fixes: 5c0930ccaad5 ("hrtimers: Push pending hrtimers away from outgoing CPU earlier")
+Co-developed-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/all/20250805081025.54235-1-wangxiongfeng2@huawei.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/hrtimer.c | 11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 8971014df5b51..8aa7ede57e718 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -806,10 +806,10 @@ static void retrigger_next_event(void *arg)
+ * of the next expiring timer is enough. The return from the SMP
+ * function call will take care of the reprogramming in case the
+ * CPU was in a NOHZ idle sleep.
++ *
++ * In periodic low resolution mode, the next softirq expiration
++ * must also be updated.
+ */
+- if (!hrtimer_hres_active(base) && !tick_nohz_active)
+- return;
+-
+ raw_spin_lock(&base->lock);
+ hrtimer_update_base(base);
+ if (hrtimer_hres_active(base))
+@@ -2286,11 +2286,6 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
+ &new_base->clock_base[i]);
+ }
+
+- /*
+- * The migration might have changed the first expiring softirq
+- * timer on this CPU. Update it.
+- */
+- __hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT);
+ /* Tell the other CPU to retrigger the next event */
+ smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
+
+--
+2.51.0
+
--- /dev/null
+From 70c0a0910d8576f311de3ef067c91e0fdc068bea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 09:15:32 +0000
+Subject: hsr: use hsr_for_each_port_rtnl in hsr_port_get_hsr
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 393c841fe4333cdd856d0ca37b066d72746cfaa6 ]
+
+hsr_port_get_hsr() iterates over ports using hsr_for_each_port(),
+but many of its callers do not hold the required RCU lock.
+
+Switch to hsr_for_each_port_rtnl(), since most callers already hold
+the rtnl lock. After review, all callers are covered by either the rtnl
+lock or the RCU lock, except hsr_dev_xmit(). Fix this by adding an
+RCU read lock there.
+
+Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250905091533.377443-3-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 3 +++
+ net/hsr/hsr_main.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 8b98b3f3b71d9..0b23d52b8d87a 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -228,6 +228,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ struct hsr_priv *hsr = netdev_priv(dev);
+ struct hsr_port *master;
+
++ rcu_read_lock();
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ if (master) {
+ skb->dev = master->dev;
+@@ -240,6 +241,8 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ dev_core_stats_tx_dropped_inc(dev);
+ dev_kfree_skb_any(skb);
+ }
++ rcu_read_unlock();
++
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index c325ddad539a7..76a1958609e29 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -125,7 +125,7 @@ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
+ {
+ struct hsr_port *port;
+
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type == pt)
+ return port;
+ return NULL;
+--
+2.51.0
+
--- /dev/null
+From ace4ce5b5c897ec74acbe536ad9f0387c5abd0b0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 09:15:31 +0000
+Subject: hsr: use rtnl lock when iterating over ports
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 8884c693991333ae065830554b9b0c96590b1bb2 ]
+
+hsr_for_each_port is called in many places without holding the RCU read
+lock, this may trigger warnings on debug kernels. Most of the callers
+are actually hold rtnl lock. So add a new helper hsr_for_each_port_rtnl
+to allow callers in suitable contexts to iterate ports safely without
+explicit RCU locking.
+
+This patch only fixed the callers that is hold rtnl lock. Other caller
+issues will be fixed in later patches.
+
+Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250905091533.377443-2-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 18 +++++++++---------
+ net/hsr/hsr_main.c | 2 +-
+ net/hsr/hsr_main.h | 3 +++
+ 3 files changed, 13 insertions(+), 10 deletions(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index cb4787327c66b..8b98b3f3b71d9 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -59,7 +59,7 @@ static bool hsr_check_carrier(struct hsr_port *master)
+
+ ASSERT_RTNL();
+
+- hsr_for_each_port(master->hsr, port) {
++ hsr_for_each_port_rtnl(master->hsr, port) {
+ if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
+ netif_carrier_on(master->dev);
+ return true;
+@@ -109,7 +109,7 @@ int hsr_get_max_mtu(struct hsr_priv *hsr)
+ struct hsr_port *port;
+
+ mtu_max = ETH_DATA_LEN;
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type != HSR_PT_MASTER)
+ mtu_max = min(port->dev->mtu, mtu_max);
+
+@@ -144,7 +144,7 @@ static int hsr_dev_open(struct net_device *dev)
+ hsr = netdev_priv(dev);
+ designation = '\0';
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -174,7 +174,7 @@ static int hsr_dev_close(struct net_device *dev)
+ struct hsr_priv *hsr;
+
+ hsr = netdev_priv(dev);
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -207,7 +207,7 @@ static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
+ * may become enabled.
+ */
+ features &= ~NETIF_F_ONE_FOR_ALL;
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ features = netdev_increment_features(features,
+ port->dev->features,
+ mask);
+@@ -425,7 +425,7 @@ static void hsr_set_rx_mode(struct net_device *dev)
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -447,7 +447,7 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -475,7 +475,7 @@ static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER ||
+ port->type == HSR_PT_INTERLINK)
+ continue;
+@@ -521,7 +521,7 @@ static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ switch (port->type) {
+ case HSR_PT_SLAVE_A:
+ case HSR_PT_SLAVE_B:
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index 257b50124cee5..c325ddad539a7 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -22,7 +22,7 @@ static bool hsr_slave_empty(struct hsr_priv *hsr)
+ {
+ struct hsr_port *port;
+
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type != HSR_PT_MASTER)
+ return false;
+ return true;
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index 044e0b456fcfb..8c011b113411f 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -215,6 +215,9 @@ struct hsr_priv {
+ #define hsr_for_each_port(hsr, port) \
+ list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
+
++#define hsr_for_each_port_rtnl(hsr, port) \
++ list_for_each_entry_rcu((port), &(hsr)->ports, port_list, lockdep_rtnl_is_held())
++
+ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
+
+ /* Caller must ensure skb is a valid HSR frame */
+--
+2.51.0
+
--- /dev/null
+From a147190d6e296eba9f4ab60050e74ecdef91883d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 17:39:03 +0200
+Subject: i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path
+
+From: Michal Schmidt <mschmidt@redhat.com>
+
+[ Upstream commit 915470e1b44e71d1dd07ee067276f003c3521ee3 ]
+
+If request_irq() in i40e_vsi_request_irq_msix() fails in an iteration
+later than the first, the error path wants to free the IRQs requested
+so far. However, it uses the wrong dev_id argument for free_irq(), so
+it does not free the IRQs correctly and instead triggers the warning:
+
+ Trying to free already-free IRQ 173
+ WARNING: CPU: 25 PID: 1091 at kernel/irq/manage.c:1829 __free_irq+0x192/0x2c0
+ Modules linked in: i40e(+) [...]
+ CPU: 25 UID: 0 PID: 1091 Comm: NetworkManager Not tainted 6.17.0-rc1+ #1 PREEMPT(lazy)
+ Hardware name: [...]
+ RIP: 0010:__free_irq+0x192/0x2c0
+ [...]
+ Call Trace:
+ <TASK>
+ free_irq+0x32/0x70
+ i40e_vsi_request_irq_msix.cold+0x63/0x8b [i40e]
+ i40e_vsi_request_irq+0x79/0x80 [i40e]
+ i40e_vsi_open+0x21f/0x2f0 [i40e]
+ i40e_open+0x63/0x130 [i40e]
+ __dev_open+0xfc/0x210
+ __dev_change_flags+0x1fc/0x240
+ netif_change_flags+0x27/0x70
+ do_setlink.isra.0+0x341/0xc70
+ rtnl_newlink+0x468/0x860
+ rtnetlink_rcv_msg+0x375/0x450
+ netlink_rcv_skb+0x5c/0x110
+ netlink_unicast+0x288/0x3c0
+ netlink_sendmsg+0x20d/0x430
+ ____sys_sendmsg+0x3a2/0x3d0
+ ___sys_sendmsg+0x99/0xe0
+ __sys_sendmsg+0x8a/0xf0
+ do_syscall_64+0x82/0x2c0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ [...]
+ </TASK>
+ ---[ end trace 0000000000000000 ]---
+
+Use the same dev_id for free_irq() as for request_irq().
+
+I tested this with inserting code to fail intentionally.
+
+Fixes: 493fb30011b3 ("i40e: Move q_vectors from pointer to array to array of pointers")
+Signed-off-by: Michal Schmidt <mschmidt@redhat.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Subbaraya Sundeep <sbhatta@marvell.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 37d83b4bca7fd..e01eab03971fa 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -4179,7 +4179,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ irq_num = pf->msix_entries[base + vector].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
+- free_irq(irq_num, &vsi->q_vectors[vector]);
++ free_irq(irq_num, vsi->q_vectors[vector]);
+ }
+ return err;
+ }
+--
+2.51.0
+
--- /dev/null
+From 6993b30c5c786c8aeb868133b76ff4db69a582b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 15:26:31 +0900
+Subject: igb: fix link test skipping when interface is admin down
+
+From: Kohei Enju <enjuk@amazon.com>
+
+[ Upstream commit d709f178abca22a4d3642513df29afe4323a594b ]
+
+The igb driver incorrectly skips the link test when the network
+interface is admin down (if_running == false), causing the test to
+always report PASS regardless of the actual physical link state.
+
+This behavior is inconsistent with other drivers (e.g. i40e, ice, ixgbe,
+etc.) which correctly test the physical link state regardless of admin
+state.
+Remove the if_running check to ensure link test always reflects the
+physical link state.
+
+Fixes: 8d420a1b3ea6 ("igb: correct link test not being run when link is down")
+Signed-off-by: Kohei Enju <enjuk@amazon.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_ethtool.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index ceff537d9d22d..ba067c3860a51 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2081,11 +2081,8 @@ static void igb_diag_test(struct net_device *netdev,
+ } else {
+ dev_info(&adapter->pdev->dev, "online testing starting\n");
+
+- /* PHY is powered down when interface is down */
+- if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
++ if (igb_link_test(adapter, &data[TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+- else
+- data[TEST_LINK] = 0;
+
+ /* Online tests aren't run; pass by default */
+ data[TEST_REG] = 0;
+--
+2.51.0
+
--- /dev/null
+From 1106423ddd4d196fca2025f84ff9232548abb0fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 11:13:34 +0200
+Subject: net: fec: Fix possible NPD in fec_enet_phy_reset_after_clk_enable()
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit 03e79de4608bdd48ad6eec272e196124cefaf798 ]
+
+The function of_phy_find_device may return NULL, so we need to take
+care before dereferencing phy_dev.
+
+Fixes: 64a632da538a ("net: fec: Fix phy_device lookup for phy_reset_after_clk_enable()")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Cc: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Cc: Richard Leitner <richard.leitner@skidata.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Wei Fang <wei.fang@nxp.com>
+Link: https://patch.msgid.link/20250904091334.53965-1-wahrenst@gmx.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index d10db5d6d226a..ca271d7a388b4 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2137,7 +2137,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+ */
+ phy_dev = of_phy_find_device(fep->phy_node);
+ phy_reset_after_clk_enable(phy_dev);
+- put_device(&phy_dev->mdio.dev);
++ if (phy_dev)
++ put_device(&phy_dev->mdio.dev);
+ }
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 612929b43d33fb880f9cc5e43356b290cf24cbec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Nov 2023 11:07:53 +0530
+Subject: net: hsr: Add support for MC filtering at the slave device
+
+From: Murali Karicheri <m-karicheri2@ti.com>
+
+[ Upstream commit 36b20fcdd9663ced36d3aef96f0eff8eb79de4b8 ]
+
+When MC (multicast) list is updated by the networking layer due to a
+user command and as well as when allmulti flag is set, it needs to be
+passed to the enslaved Ethernet devices. This patch allows this
+to happen by implementing ndo_change_rx_flags() and ndo_set_rx_mode()
+API calls that in turns pass it to the slave devices using
+existing API calls.
+
+Signed-off-by: Murali Karicheri <m-karicheri2@ti.com>
+Signed-off-by: Ravi Gunasekaran <r-gunasekaran@ti.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 8884c6939913 ("hsr: use rtnl lock when iterating over ports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 67 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 66 insertions(+), 1 deletion(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 511407df49151..e9a1a5cf120dd 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -170,7 +170,24 @@ static int hsr_dev_open(struct net_device *dev)
+
+ static int hsr_dev_close(struct net_device *dev)
+ {
+- /* Nothing to do here. */
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++
++ hsr = netdev_priv(dev);
++ hsr_for_each_port(hsr, port) {
++ if (port->type == HSR_PT_MASTER)
++ continue;
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ case HSR_PT_SLAVE_B:
++ dev_uc_unsync(port->dev, dev);
++ dev_mc_unsync(port->dev, dev);
++ break;
++ default:
++ break;
++ }
++ }
++
+ return 0;
+ }
+
+@@ -401,12 +418,60 @@ void hsr_del_ports(struct hsr_priv *hsr)
+ hsr_del_port(port);
+ }
+
++static void hsr_set_rx_mode(struct net_device *dev)
++{
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ if (port->type == HSR_PT_MASTER)
++ continue;
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ case HSR_PT_SLAVE_B:
++ dev_mc_sync_multiple(port->dev, dev);
++ dev_uc_sync_multiple(port->dev, dev);
++ break;
++ default:
++ break;
++ }
++ }
++}
++
++static void hsr_change_rx_flags(struct net_device *dev, int change)
++{
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ if (port->type == HSR_PT_MASTER)
++ continue;
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ case HSR_PT_SLAVE_B:
++ if (change & IFF_ALLMULTI)
++ dev_set_allmulti(port->dev,
++ dev->flags &
++ IFF_ALLMULTI ? 1 : -1);
++ break;
++ default:
++ break;
++ }
++ }
++}
++
+ static const struct net_device_ops hsr_device_ops = {
+ .ndo_change_mtu = hsr_dev_change_mtu,
+ .ndo_open = hsr_dev_open,
+ .ndo_stop = hsr_dev_close,
+ .ndo_start_xmit = hsr_dev_xmit,
++ .ndo_change_rx_flags = hsr_change_rx_flags,
+ .ndo_fix_features = hsr_fix_features,
++ .ndo_set_rx_mode = hsr_set_rx_mode,
+ };
+
+ static struct device_type hsr_type = {
+--
+2.51.0
+
--- /dev/null
+From 16ddd4bdd7025d48848f1889acace2c1c47f0da6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Nov 2024 14:47:08 +0530
+Subject: net: hsr: Add VLAN CTAG filter support
+
+From: Murali Karicheri <m-karicheri2@ti.com>
+
+[ Upstream commit 1a8a63a5305e95519de6f941922dfcd8179f82e5 ]
+
+This patch adds support for VLAN ctag based filtering at slave devices.
+The slave ethernet device may be capable of filtering ethernet packets
+based on VLAN ID. This requires that when the VLAN interface is created
+over an HSR/PRP interface, it passes the VID information to the
+associated slave ethernet devices so that it updates the hardware
+filters to filter ethernet frames based on VID. This patch adds the
+required functions to propagate the vid information to the slave
+devices.
+
+Signed-off-by: Murali Karicheri <m-karicheri2@ti.com>
+Signed-off-by: MD Danish Anwar <danishanwar@ti.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Link: https://patch.msgid.link/20241106091710.3308519-3-danishanwar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 8884c6939913 ("hsr: use rtnl lock when iterating over ports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 80 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 79 insertions(+), 1 deletion(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index e9a1a5cf120dd..cb4787327c66b 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -464,6 +464,77 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
+ }
+ }
+
++static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
++ __be16 proto, u16 vid)
++{
++ bool is_slave_a_added = false;
++ bool is_slave_b_added = false;
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++ int ret = 0;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ if (port->type == HSR_PT_MASTER ||
++ port->type == HSR_PT_INTERLINK)
++ continue;
++
++ ret = vlan_vid_add(port->dev, proto, vid);
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ if (ret) {
++ /* clean up Slave-B */
++ netdev_err(dev, "add vid failed for Slave-A\n");
++ if (is_slave_b_added)
++ vlan_vid_del(port->dev, proto, vid);
++ return ret;
++ }
++
++ is_slave_a_added = true;
++ break;
++
++ case HSR_PT_SLAVE_B:
++ if (ret) {
++ /* clean up Slave-A */
++ netdev_err(dev, "add vid failed for Slave-B\n");
++ if (is_slave_a_added)
++ vlan_vid_del(port->dev, proto, vid);
++ return ret;
++ }
++
++ is_slave_b_added = true;
++ break;
++ default:
++ break;
++ }
++ }
++
++ return 0;
++}
++
++static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
++ __be16 proto, u16 vid)
++{
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ case HSR_PT_SLAVE_B:
++ vlan_vid_del(port->dev, proto, vid);
++ break;
++ default:
++ break;
++ }
++ }
++
++ return 0;
++}
++
+ static const struct net_device_ops hsr_device_ops = {
+ .ndo_change_mtu = hsr_dev_change_mtu,
+ .ndo_open = hsr_dev_open,
+@@ -472,6 +543,8 @@ static const struct net_device_ops hsr_device_ops = {
+ .ndo_change_rx_flags = hsr_change_rx_flags,
+ .ndo_fix_features = hsr_fix_features,
+ .ndo_set_rx_mode = hsr_set_rx_mode,
++ .ndo_vlan_rx_add_vid = hsr_ndo_vlan_rx_add_vid,
++ .ndo_vlan_rx_kill_vid = hsr_ndo_vlan_rx_kill_vid,
+ };
+
+ static struct device_type hsr_type = {
+@@ -512,7 +585,8 @@ void hsr_dev_setup(struct net_device *dev)
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
+ NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
+- NETIF_F_HW_VLAN_CTAG_TX;
++ NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ dev->features = dev->hw_features;
+
+@@ -599,6 +673,10 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ (slave[1]->features & NETIF_F_HW_HSR_FWD))
+ hsr->fwd_offloaded = true;
+
++ if ((slave[0]->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
++ (slave[1]->features & NETIF_F_HW_VLAN_CTAG_FILTER))
++ hsr_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
++
+ res = register_netdevice(hsr_dev);
+ if (res)
+ goto err_unregister;
+--
+2.51.0
+
--- /dev/null
+From 73ea12f7113c7080ae55aa60f8e9cad789234361 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jun 2023 17:17:10 +0530
+Subject: net: hsr: Disable promiscuous mode in offload mode
+
+From: Ravi Gunasekaran <r-gunasekaran@ti.com>
+
+[ Upstream commit e748d0fd66abc4b1c136022e4e053004fce2b792 ]
+
+When port-to-port forwarding for interfaces in HSR node is enabled,
+disable promiscuous mode since L2 frame forward happens at the
+offloaded hardware.
+
+Signed-off-by: Ravi Gunasekaran <r-gunasekaran@ti.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20230614114710.31400-1-r-gunasekaran@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 8884c6939913 ("hsr: use rtnl lock when iterating over ports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 5 +++++
+ net/hsr/hsr_main.h | 1 +
+ net/hsr/hsr_slave.c | 15 +++++++++++----
+ 3 files changed, 17 insertions(+), 4 deletions(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 6e434af189bc0..511407df49151 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -529,6 +529,11 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ if (res)
+ goto err_add_master;
+
++ /* HSR forwarding offload supported in lower device? */
++ if ((slave[0]->features & NETIF_F_HW_HSR_FWD) &&
++ (slave[1]->features & NETIF_F_HW_HSR_FWD))
++ hsr->fwd_offloaded = true;
++
+ res = register_netdevice(hsr_dev);
+ if (res)
+ goto err_unregister;
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index 58a5a8b3891ff..044e0b456fcfb 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -202,6 +202,7 @@ struct hsr_priv {
+ u8 net_id; /* for PRP, it occupies most significant 3 bits
+ * of lan_id
+ */
++ bool fwd_offloaded; /* Forwarding offloaded to HW */
+ unsigned char sup_multicast_addr[ETH_ALEN] __aligned(sizeof(u16));
+ /* Align to u16 boundary to avoid unaligned access
+ * in ether_addr_equal
+diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
+index 0e6daee488b4f..52302a0546133 100644
+--- a/net/hsr/hsr_slave.c
++++ b/net/hsr/hsr_slave.c
+@@ -137,9 +137,14 @@ static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
+ struct hsr_port *master;
+ int res;
+
+- res = dev_set_promiscuity(dev, 1);
+- if (res)
+- return res;
++ /* Don't use promiscuous mode for offload since L2 frame forward
++ * happens at the offloaded hardware.
++ */
++ if (!port->hsr->fwd_offloaded) {
++ res = dev_set_promiscuity(dev, 1);
++ if (res)
++ return res;
++ }
+
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ hsr_dev = master->dev;
+@@ -158,7 +163,9 @@ static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
+ fail_rx_handler:
+ netdev_upper_dev_unlink(dev, hsr_dev);
+ fail_upper_dev_link:
+- dev_set_promiscuity(dev, -1);
++ if (!port->hsr->fwd_offloaded)
++ dev_set_promiscuity(dev, -1);
++
+ return res;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 744bcabffd11bf3c468f816b077b89f8b5d92d8d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Sep 2025 11:09:13 +0200
+Subject: regulator: sy7636a: fix lifecycle of power good gpio
+
+From: Andreas Kemnade <akemnade@kernel.org>
+
+[ Upstream commit c05d0b32eebadc8be6e53196e99c64cf2bed1d99 ]
+
+Attach the power good gpio to the regulator device devres instead of the
+parent device to fix problems if probe is run multiple times
+(rmmod/insmod or some deferral).
+
+Fixes: 8c485bedfb785 ("regulator: sy7636a: Initial commit")
+Signed-off-by: Andreas Kemnade <akemnade@kernel.org>
+Reviewed-by: Alistair Francis <alistair@alistair23.me>
+Reviewed-by: Peng Fan <peng.fan@nxp.com>
+Message-ID: <20250906-sy7636-rsrc-v1-2-e2886a9763a7@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/sy7636a-regulator.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/regulator/sy7636a-regulator.c b/drivers/regulator/sy7636a-regulator.c
+index 29fc27c2cda0b..dd3b0137d902c 100644
+--- a/drivers/regulator/sy7636a-regulator.c
++++ b/drivers/regulator/sy7636a-regulator.c
+@@ -83,9 +83,11 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
+ if (!regmap)
+ return -EPROBE_DEFER;
+
+- gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN);
++ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
++
++ gdp = devm_gpiod_get(&pdev->dev, "epd-pwr-good", GPIOD_IN);
+ if (IS_ERR(gdp)) {
+- dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
++ dev_err(&pdev->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
+ return PTR_ERR(gdp);
+ }
+
+@@ -105,7 +107,6 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
+ }
+
+ config.dev = &pdev->dev;
+- config.dev->of_node = pdev->dev.parent->of_node;
+ config.regmap = regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, &desc, &config);
+--
+2.51.0
+
input-iqs7222-avoid-enabling-unused-interrupts.patch
input-i8042-add-tuxedo-infinitybook-pro-gen10-amd-to-i8042-quirk-table.patch
revert-net-usb-asix-ax88772-drop-phylink-use-in-pm-to-avoid-mdio-runtime-pm-wakeups.patch
+net-fec-fix-possible-npd-in-fec_enet_phy_reset_after.patch
+tunnels-reset-the-gso-metadata-before-reusing-the-sk.patch
+docs-networking-can-change-bcm_msg_head-frames-membe.patch
+igb-fix-link-test-skipping-when-interface-is-admin-d.patch
+i40e-fix-irq-freeing-in-i40e_vsi_request_irq_msix-er.patch
+can-j1939-j1939_sk_bind-call-j1939_priv_put-immediat.patch
+can-j1939-j1939_local_ecu_get-undo-increment-when-j1.patch
+can-xilinx_can-xcan_write_frame-fix-use-after-free-o.patch
+net-hsr-disable-promiscuous-mode-in-offload-mode.patch
+net-hsr-add-support-for-mc-filtering-at-the-slave-de.patch
+net-hsr-add-vlan-ctag-filter-support.patch
+hsr-use-rtnl-lock-when-iterating-over-ports.patch
+hsr-use-hsr_for_each_port_rtnl-in-hsr_port_get_hsr.patch
+dmaengine-idxd-fix-double-free-in-idxd_setup_wqs.patch
+dmaengine-ti-edma-fix-memory-allocation-size-for-que.patch
+regulator-sy7636a-fix-lifecycle-of-power-good-gpio.patch
+hrtimer-remove-unused-function.patch
+hrtimer-rename-__hrtimer_hres_active-to-hrtimer_hres.patch
+hrtimers-unconditionally-update-target-cpu-base-afte.patch
--- /dev/null
+From da4adb48c6ad038c67c1dbab16cad94f499a725c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 14:53:50 +0200
+Subject: tunnels: reset the GSO metadata before reusing the skb
+
+From: Antoine Tenart <atenart@kernel.org>
+
+[ Upstream commit e3c674db356c4303804b2415e7c2b11776cdd8c3 ]
+
+If a GSO skb is sent through a Geneve tunnel and if Geneve options are
+added, the split GSO skb might not fit in the MTU anymore and an ICMP
+frag needed packet can be generated. In such case the ICMP packet might
+go through the segmentation logic (and dropped) later if it reaches a
+path were the GSO status is checked and segmentation is required.
+
+This is especially true when an OvS bridge is used with a Geneve tunnel
+attached to it. The following set of actions could lead to the ICMP
+packet being wrongfully segmented:
+
+1. An skb is constructed by the TCP layer (e.g. gso_type SKB_GSO_TCPV4,
+ segs >= 2).
+
+2. The skb hits the OvS bridge where Geneve options are added by an OvS
+ action before being sent through the tunnel.
+
+3. When the skb is xmited in the tunnel, the split skb does not fit
+ anymore in the MTU and iptunnel_pmtud_build_icmp is called to
+ generate an ICMP fragmentation needed packet. This is done by reusing
+ the original (GSO!) skb. The GSO metadata is not cleared.
+
+4. The ICMP packet being sent back hits the OvS bridge again and because
+ skb_is_gso returns true, it goes through queue_gso_packets...
+
+5. ...where __skb_gso_segment is called. The skb is then dropped.
+
+6. Note that in the above example on re-transmission the skb won't be a
+ GSO one as it would be segmented (len > MSS) and the ICMP packet
+ should go through.
+
+Fix this by resetting the GSO information before reusing an skb in
+iptunnel_pmtud_build_icmp and iptunnel_pmtud_build_icmpv6.
+
+Fixes: 4cb47a8644cc ("tunnels: PMTU discovery support for directly bridged IP packets")
+Reported-by: Adrian Moreno <amorenoz@redhat.com>
+Signed-off-by: Antoine Tenart <atenart@kernel.org>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Link: https://patch.msgid.link/20250904125351.159740-1-atenart@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/ip_tunnel_core.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index deb08cab44640..75e3d7501752d 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -203,6 +203,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
+ return -EINVAL;
+
++ if (skb_is_gso(skb))
++ skb_gso_reset(skb);
++
+ skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ pskb_pull(skb, ETH_HLEN);
+ skb_reset_network_header(skb);
+@@ -297,6 +300,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
+ return -EINVAL;
+
++ if (skb_is_gso(skb))
++ skb_gso_reset(skb);
++
+ skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ pskb_pull(skb, ETH_HLEN);
+ skb_reset_network_header(skb);
+--
+2.51.0
+
--- /dev/null
+From 05d01d62ea1d51007f0c416fce7042e4ec0db651 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:27:40 +0900
+Subject: can: j1939: j1939_local_ecu_get(): undo increment when
+ j1939_local_ecu_get() fails
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit 06e02da29f6f1a45fc07bd60c7eaf172dc21e334 ]
+
+Since j1939_sk_bind() and j1939_sk_release() call j1939_local_ecu_put()
+when J1939_SOCK_BOUND was already set, but the error handling path for
+j1939_sk_bind() will not set J1939_SOCK_BOUND when j1939_local_ecu_get()
+fails, j1939_local_ecu_get() needs to undo priv->ents[sa].nusers++ when
+j1939_local_ecu_get() returns an error.
+
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/e7f80046-4ff7-4ce2-8ad8-7c3c678a42c9@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/bus.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
+index 4866879016021..e0b966c2517cf 100644
+--- a/net/can/j1939/bus.c
++++ b/net/can/j1939/bus.c
+@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
+ if (!ecu)
+ ecu = j1939_ecu_create_locked(priv, name);
+ err = PTR_ERR_OR_ZERO(ecu);
+- if (err)
++ if (err) {
++ if (j1939_address_is_unicast(sa))
++ priv->ents[sa].nusers--;
+ goto done;
++ }
+
+ ecu->nusers++;
+ /* TODO: do we care if ecu->addr != sa? */
+--
+2.51.0
+
--- /dev/null
+From d00fe2705f8e4e3959e03a1918b930de265a9d57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:30:09 +0900
+Subject: can: j1939: j1939_sk_bind(): call j1939_priv_put() immediately when
+ j1939_local_ecu_get() failed
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit f214744c8a27c3c1da6b538c232da22cd027530e ]
+
+Commit 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct
+callback") expects that a call to j1939_priv_put() can be unconditionally
+delayed until j1939_sk_sock_destruct() is called. But a refcount leak will
+happen when j1939_sk_bind() is called again after j1939_local_ecu_get()
+ from previous j1939_sk_bind() call returned an error. We need to call
+j1939_priv_put() before j1939_sk_bind() returns an error.
+
+Fixes: 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct callback")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/4f49a1bc-a528-42ad-86c0-187268ab6535@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/socket.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 17226b2341d03..75c2b9b233901 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -520,6 +520,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
+ if (ret) {
+ j1939_netdev_stop(priv);
++ jsk->priv = NULL;
++ synchronize_rcu();
++ j1939_priv_put(priv);
+ goto out_release_sock;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 849f5f5c7291f32efe979b58ce3931e6b9215ff9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Aug 2025 12:50:02 +0300
+Subject: can: xilinx_can: xcan_write_frame(): fix use-after-free of
+ transmitted SKB
+
+From: Anssi Hannula <anssi.hannula@bitwise.fi>
+
+[ Upstream commit ef79f00be72bd81d2e1e6f060d83cf7e425deee4 ]
+
+can_put_echo_skb() takes ownership of the SKB and it may be freed
+during or after the call.
+
+However, xilinx_can xcan_write_frame() keeps using SKB after the call.
+
+Fix that by only calling can_put_echo_skb() after the code is done
+touching the SKB.
+
+The tx_lock is held for the entire xcan_write_frame() execution and
+also on the can_get_echo_skb() side so the order of operations does not
+matter.
+
+An earlier fix commit 3d3c817c3a40 ("can: xilinx_can: Fix usage of skb
+memory") did not move the can_put_echo_skb() call far enough.
+
+Signed-off-by: Anssi Hannula <anssi.hannula@bitwise.fi>
+Fixes: 1598efe57b3e ("can: xilinx_can: refactor code in preparation for CAN FD support")
+Link: https://patch.msgid.link/20250822095002.168389-1-anssi.hannula@bitwise.fi
+[mkl: add "commit" in front of sha1 in patch description]
+[mkl: fix indention]
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/xilinx_can.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
+index 436c0e4b0344c..91382225f1140 100644
+--- a/drivers/net/can/xilinx_can.c
++++ b/drivers/net/can/xilinx_can.c
+@@ -690,14 +690,6 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
+ dlc |= XCAN_DLCR_EDL_MASK;
+ }
+
+- if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
+- (priv->devtype.flags & XCAN_FLAG_TXFEMP))
+- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
+- else
+- can_put_echo_skb(skb, ndev, 0, 0);
+-
+- priv->tx_head++;
+-
+ priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
+ /* If the CAN frame is RTR frame this write triggers transmission
+ * (not on CAN FD)
+@@ -730,6 +722,14 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
+ data[1]);
+ }
+ }
++
++ if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
++ (priv->devtype.flags & XCAN_FLAG_TXFEMP))
++ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
++ else
++ can_put_echo_skb(skb, ndev, 0, 0);
++
++ priv->tx_head++;
+ }
+
+ /**
+--
+2.51.0
+
--- /dev/null
+From 7d85773eb4e5b12d7401bc930554c3914d2a3e49 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 6 Apr 2025 10:00:04 -0700
+Subject: Disable SLUB_TINY for build testing
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+[ Upstream commit 6f110a5e4f9977c31ce76fefbfef6fd4eab6bfb7 ]
+
+... and don't error out so hard on missing module descriptions.
+
+Before commit 6c6c1fc09de3 ("modpost: require a MODULE_DESCRIPTION()")
+we used to warn about missing module descriptions, but only when
+building with extra warnigns (ie 'W=1').
+
+After that commit the warning became an unconditional hard error.
+
+And it turns out not all modules have been converted despite the claims
+to the contrary. As reported by Damian Tometzki, the slub KUnit test
+didn't have a module description, and apparently nobody ever really
+noticed.
+
+The reason nobody noticed seems to be that the slub KUnit tests get
+disabled by SLUB_TINY, which also ends up disabling a lot of other code,
+both in tests and in slub itself. And so anybody doing full build tests
+didn't actually see this failre.
+
+So let's disable SLUB_TINY for build-only tests, since it clearly ends
+up limiting build coverage. Also turn the missing module descriptions
+error back into a warning, but let's keep it around for non-'W=1'
+builds.
+
+Reported-by: Damian Tometzki <damian@riscv-rocks.de>
+Link: https://lore.kernel.org/all/01070196099fd059-e8463438-7b1b-4ec8-816d-173874be9966-000000@eu-central-1.amazonses.com/
+Cc: Masahiro Yamada <masahiroy@kernel.org>
+Cc: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
+Fixes: 6c6c1fc09de3 ("modpost: require a MODULE_DESCRIPTION()")
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 33fa51d608dc5..59c36bb9ce6b0 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -244,7 +244,7 @@ config SLUB
+
+ config SLUB_TINY
+ bool "Configure for minimal memory footprint"
+- depends on EXPERT
++ depends on EXPERT && !COMPILE_TEST
+ select SLAB_MERGE_DEFAULT
+ help
+ Configures the slab allocator in a way to achieve minimal memory
+--
+2.51.0
+
--- /dev/null
+From 7d77affd978fbdd1130919a26e014223e89615c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Aug 2025 13:43:39 +0300
+Subject: dmaengine: idxd: Fix double free in idxd_setup_wqs()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 39aaa337449e71a41d4813be0226a722827ba606 ]
+
+The clean up in idxd_setup_wqs() has had a couple bugs because the error
+handling is a bit subtle. It's simpler to just re-write it in a cleaner
+way. The issues here are:
+
+1) If "idxd->max_wqs" is <= 0 then we call put_device(conf_dev) when
+ "conf_dev" hasn't been initialized.
+2) If kzalloc_node() fails then again "conf_dev" is invalid. It's
+ either uninitialized or it points to the "conf_dev" from the
+ previous iteration so it leads to a double free.
+
+It's better to free partial loop iterations within the loop and then
+the unwinding at the end can handle whole loop iterations. I also
+renamed the labels to describe what the goto does and not where the goto
+was located.
+
+Fixes: 3fd2f4bc010c ("dmaengine: idxd: fix memory leak in error handling path of idxd_setup_wqs")
+Reported-by: Colin Ian King <colin.i.king@gmail.com>
+Closes: https://lore.kernel.org/all/20250811095836.1642093-1-colin.i.king@gmail.com/
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/aJnJW3iYTDDCj9sk@stanley.mountain
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 33 +++++++++++++++++----------------
+ 1 file changed, 17 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index a7344aac8dd98..74a83203181d6 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -187,27 +187,30 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->wq_enable_map) {
+ rc = -ENOMEM;
+- goto err_bitmap;
++ goto err_free_wqs;
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
+ if (!wq) {
+ rc = -ENOMEM;
+- goto err;
++ goto err_unwind;
+ }
+
+ idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
+ conf_dev = wq_confdev(wq);
+ wq->id = i;
+ wq->idxd = idxd;
+- device_initialize(wq_confdev(wq));
++ device_initialize(conf_dev);
+ conf_dev->parent = idxd_confdev(idxd);
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = &idxd_wq_device_type;
+ rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
+- if (rc < 0)
+- goto err;
++ if (rc < 0) {
++ put_device(conf_dev);
++ kfree(wq);
++ goto err_unwind;
++ }
+
+ mutex_init(&wq->wq_lock);
+ init_waitqueue_head(&wq->err_queue);
+@@ -218,15 +221,20 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
+ wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
+ if (!wq->wqcfg) {
++ put_device(conf_dev);
++ kfree(wq);
+ rc = -ENOMEM;
+- goto err;
++ goto err_unwind;
+ }
+
+ if (idxd->hw.wq_cap.op_config) {
+ wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
+ if (!wq->opcap_bmap) {
++ kfree(wq->wqcfg);
++ put_device(conf_dev);
++ kfree(wq);
+ rc = -ENOMEM;
+- goto err_opcap_bmap;
++ goto err_unwind;
+ }
+ bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
+ }
+@@ -237,13 +245,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+
+ return 0;
+
+-err_opcap_bmap:
+- kfree(wq->wqcfg);
+-
+-err:
+- put_device(conf_dev);
+- kfree(wq);
+-
++err_unwind:
+ while (--i >= 0) {
+ wq = idxd->wqs[i];
+ if (idxd->hw.wq_cap.op_config)
+@@ -252,11 +254,10 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ conf_dev = wq_confdev(wq);
+ put_device(conf_dev);
+ kfree(wq);
+-
+ }
+ bitmap_free(idxd->wq_enable_map);
+
+-err_bitmap:
++err_free_wqs:
+ kfree(idxd->wqs);
+
+ return rc;
+--
+2.51.0
+
--- /dev/null
+From 25af345c9187793641ce343d57b6e1099d4a8df8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 23:03:13 +0800
+Subject: dmaengine: idxd: Fix refcount underflow on module unload
+
+From: Yi Sun <yi.sun@intel.com>
+
+[ Upstream commit b7cb9a034305d52222433fad10c3de10204f29e7 ]
+
+A recent refactor introduced a misplaced put_device() call, resulting in a
+reference count underflow during module unload.
+
+There is no need to add additional put_device() calls for idxd groups,
+engines, or workqueues. Although the commit claims: "Note, this also
+fixes the missing put_device() for idxd groups, engines, and wqs."
+
+It appears no such omission actually existed. The required cleanup is
+already handled by the call chain:
+idxd_unregister_devices() -> device_unregister() -> put_device()
+
+Extend idxd_cleanup() to handle the remaining necessary cleanup and
+remove idxd_cleanup_internals(), which duplicates deallocation logic
+for idxd, engines, groups, and workqueues. Memory management is also
+properly handled through the Linux device model.
+
+Fixes: a409e919ca32 ("dmaengine: idxd: Refactor remove call with idxd_cleanup() helper")
+Signed-off-by: Yi Sun <yi.sun@intel.com>
+Tested-by: Shuai Xue <xueshuai@linux.alibaba.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Acked-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+Link: https://lore.kernel.org/r/20250729150313.1934101-3-yi.sun@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index d6308adc4eeb9..a7344aac8dd98 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -918,7 +918,10 @@ static void idxd_remove(struct pci_dev *pdev)
+ device_unregister(idxd_confdev(idxd));
+ idxd_shutdown(pdev);
+ idxd_device_remove_debugfs(idxd);
+- idxd_cleanup(idxd);
++ perfmon_pmu_remove(idxd);
++ idxd_cleanup_interrupts(idxd);
++ if (device_pasid_enabled(idxd))
++ idxd_disable_system_pasid(idxd);
+ pci_iounmap(pdev, idxd->reg_base);
+ put_device(idxd_confdev(idxd));
+ pci_disable_device(pdev);
+--
+2.51.0
+
--- /dev/null
+From f4a2d209bf1aa15315b71d9b81305a3379cd3baf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 23:03:12 +0800
+Subject: dmaengine: idxd: Remove improper idxd_free
+
+From: Yi Sun <yi.sun@intel.com>
+
+[ Upstream commit f41c538881eec4dcf5961a242097d447f848cda6 ]
+
+The call to idxd_free() introduces a duplicate put_device() leading to a
+reference count underflow:
+refcount_t: underflow; use-after-free.
+WARNING: CPU: 15 PID: 4428 at lib/refcount.c:28 refcount_warn_saturate+0xbe/0x110
+...
+Call Trace:
+ <TASK>
+ idxd_remove+0xe4/0x120 [idxd]
+ pci_device_remove+0x3f/0xb0
+ device_release_driver_internal+0x197/0x200
+ driver_detach+0x48/0x90
+ bus_remove_driver+0x74/0xf0
+ pci_unregister_driver+0x2e/0xb0
+ idxd_exit_module+0x34/0x7a0 [idxd]
+ __do_sys_delete_module.constprop.0+0x183/0x280
+ do_syscall_64+0x54/0xd70
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+The idxd_unregister_devices() which is invoked at the very beginning of
+idxd_remove(), already takes care of the necessary put_device() through the
+following call path:
+idxd_unregister_devices() -> device_unregister() -> put_device()
+
+In addition, when CONFIG_DEBUG_KOBJECT_RELEASE is enabled, put_device() may
+trigger asynchronous cleanup via schedule_delayed_work(). If idxd_free() is
+called immediately after, it can result in a use-after-free.
+
+Remove the improper idxd_free() to avoid both the refcount underflow and
+potential memory corruption during module unload.
+
+Fixes: d5449ff1b04d ("dmaengine: idxd: Add missing idxd cleanup to fix memory leak in remove call")
+Signed-off-by: Yi Sun <yi.sun@intel.com>
+Tested-by: Shuai Xue <xueshuai@linux.alibaba.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Acked-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+Link: https://lore.kernel.org/r/20250729150313.1934101-2-yi.sun@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 18997f80bdc97..d6308adc4eeb9 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -921,7 +921,6 @@ static void idxd_remove(struct pci_dev *pdev)
+ idxd_cleanup(idxd);
+ pci_iounmap(pdev, idxd->reg_base);
+ put_device(idxd_confdev(idxd));
+- idxd_free(idxd);
+ pci_disable_device(pdev);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 9424adb030d49fe34398332e44cba736000a5b58 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 11:49:53 +0200
+Subject: dmaengine: ti: edma: Fix memory allocation size for
+ queue_priority_map
+
+From: Anders Roxell <anders.roxell@linaro.org>
+
+[ Upstream commit e63419dbf2ceb083c1651852209c7f048089ac0f ]
+
+Fix a critical memory allocation bug in edma_setup_from_hw() where
+queue_priority_map was allocated with insufficient memory. The code
+declared queue_priority_map as s8 (*)[2] (pointer to array of 2 s8),
+but allocated memory using sizeof(s8) instead of the correct size.
+
+This caused out-of-bounds memory writes when accessing:
+ queue_priority_map[i][0] = i;
+ queue_priority_map[i][1] = i;
+
+The bug manifested as kernel crashes with "Oops - undefined instruction"
+on ARM platforms (BeagleBoard-X15) during EDMA driver probe, as the
+memory corruption triggered kernel hardening features on Clang.
+
+Change the allocation to use sizeof(*queue_priority_map) which
+automatically gets the correct size for the 2D array structure.
+
+Fixes: 2b6b3b742019 ("ARM/dmaengine: edma: Merge the two drivers under drivers/dma/")
+Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
+Link: https://lore.kernel.org/r/20250830094953.3038012-1-anders.roxell@linaro.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/ti/edma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 7f861fb07cb83..6333426b4c96c 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2063,8 +2063,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
+ * priority. So Q0 is the highest priority queue and the last queue has
+ * the lowest priority.
+ */
+- queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
+- GFP_KERNEL);
++ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
++ sizeof(*queue_priority_map), GFP_KERNEL);
+ if (!queue_priority_map)
+ return -ENOMEM;
+
+--
+2.51.0
+
--- /dev/null
+From a6715084d8a16d83e70de419091ad750d5d64ef1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Sep 2025 20:17:09 -0700
+Subject: docs: networking: can: change bcm_msg_head frames member to support
+ flexible array
+
+From: Alex Tran <alex.t.tran@gmail.com>
+
+[ Upstream commit 641427d5bf90af0625081bf27555418b101274cd ]
+
+The documentation of the 'bcm_msg_head' struct does not match how
+it is defined in 'bcm.h'. Changed the frames member to a flexible array,
+matching the definition in the header file.
+
+See commit 94dfc73e7cf4 ("treewide: uapi: Replace zero-length arrays with
+flexible-array members")
+
+Signed-off-by: Alex Tran <alex.t.tran@gmail.com>
+Acked-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Link: https://patch.msgid.link/20250904031709.1426895-1-alex.t.tran@gmail.com
+Fixes: 94dfc73e7cf4 ("treewide: uapi: Replace zero-length arrays with flexible-array members")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217783
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/networking/can.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
+index 62519d38c58ba..58cc609e8669b 100644
+--- a/Documentation/networking/can.rst
++++ b/Documentation/networking/can.rst
+@@ -742,7 +742,7 @@ The broadcast manager sends responses to user space in the same form:
+ struct timeval ival1, ival2; /* count and subsequent interval */
+ canid_t can_id; /* unique can_id for task */
+ __u32 nframes; /* number of can_frames following */
+- struct can_frame frames[0];
++ struct can_frame frames[];
+ };
+
+ The aligned payload 'frames' uses the same basic CAN frame structure defined
+--
+2.51.0
+
--- /dev/null
+From 00b4c611347e6faf37e575598c174031aeafb5cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Sep 2025 09:11:12 -0400
+Subject: drm/amd/display: use udelay rather than fsleep
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit 1d66c3f2b8c0b5c51f3f4fe29b362c9851190c5a ]
+
+This function can be called from an atomic context so we can't use
+fsleep().
+
+Fixes: 01f60348d8fb ("drm/amd/display: Fix 'failed to blank crtc!'")
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4549
+Cc: Wen Chen <Wen.Chen3@amd.com>
+Cc: Fangzhi Zuo <jerry.zuo@amd.com>
+Cc: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 27e4dc2c0543fd1808cc52bd888ee1e0533c4a2e)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+index 08fc2a2c399f6..d96f52a551940 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+@@ -945,7 +945,7 @@ enum dc_status dcn20_enable_stream_timing(
+ return DC_ERROR_UNEXPECTED;
+ }
+
+- fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
++ udelay(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
+
+ params.vertical_total_min = stream->adjust.v_total_min;
+ params.vertical_total_max = stream->adjust.v_total_max;
+--
+2.51.0
+
--- /dev/null
+From 9c81141a13a628c206215e9e95a1abc6501f722b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Sep 2025 12:21:33 -0700
+Subject: drm/panthor: validate group queue count
+
+From: Chia-I Wu <olvaffe@gmail.com>
+
+[ Upstream commit a00f2015acdbd8a4b3d2382eaeebe11db1925fad ]
+
+A panthor group can have at most MAX_CS_PER_CSG panthor queues.
+
+Fixes: 4bdca11507928 ("drm/panthor: Add the driver frontend block")
+Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
+Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> # v1
+Reviewed-by: Steven Price <steven.price@arm.com>
+Signed-off-by: Steven Price <steven.price@arm.com>
+Link: https://lore.kernel.org/r/20250903192133.288477-1-olvaffe@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panthor/panthor_drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
+index c520f156e2d73..03eb7d52209a2 100644
+--- a/drivers/gpu/drm/panthor/panthor_drv.c
++++ b/drivers/gpu/drm/panthor/panthor_drv.c
+@@ -1023,7 +1023,7 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
+ struct drm_panthor_queue_create *queue_args;
+ int ret;
+
+- if (!args->queues.count)
++ if (!args->queues.count || args->queues.count > MAX_CS_PER_CSG)
+ return -EINVAL;
+
+ ret = PANTHOR_UOBJ_GET_ARRAY(queue_args, &args->queues);
+--
+2.51.0
+
--- /dev/null
+From a414c6c98b332045a0ac0ecf24683fd86a36dc8b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 06:57:27 -0700
+Subject: genetlink: fix genl_bind() invoking bind() after -EPERM
+
+From: Alok Tiwari <alok.a.tiwari@oracle.com>
+
+[ Upstream commit 1dbfb0363224f6da56f6655d596dc5097308d6f5 ]
+
+Per family bind/unbind callbacks were introduced to allow families
+to track multicast group consumer presence, e.g. to start or stop
+producing events depending on listeners.
+
+However, in genl_bind() the bind() callback was invoked even if
+capability checks failed and ret was set to -EPERM. This means that
+callbacks could run on behalf of unauthorized callers while the
+syscall still returned failure to user space.
+
+Fix this by only invoking bind() after "if (ret) break;" check
+i.e. after permission checks have succeeded.
+
+Fixes: 3de21a8990d3 ("genetlink: Add per family bind/unbind callbacks")
+Signed-off-by: Alok Tiwari <alok.a.tiwari@oracle.com>
+Link: https://patch.msgid.link/20250905135731.3026965-1-alok.a.tiwari@oracle.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netlink/genetlink.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 07ad65774fe29..3327d84518141 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -1836,6 +1836,9 @@ static int genl_bind(struct net *net, int group)
+ !ns_capable(net->user_ns, CAP_SYS_ADMIN))
+ ret = -EPERM;
+
++ if (ret)
++ break;
++
+ if (family->bind)
+ family->bind(i);
+
+--
+2.51.0
+
--- /dev/null
+From d06d23686624248b6e83d434886053c83725b7e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 09:15:32 +0000
+Subject: hsr: use hsr_for_each_port_rtnl in hsr_port_get_hsr
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 393c841fe4333cdd856d0ca37b066d72746cfaa6 ]
+
+hsr_port_get_hsr() iterates over ports using hsr_for_each_port(),
+but many of its callers do not hold the required RCU lock.
+
+Switch to hsr_for_each_port_rtnl(), since most callers already hold
+the rtnl lock. After review, all callers are covered by either the rtnl
+lock or the RCU lock, except hsr_dev_xmit(). Fix this by adding an
+RCU read lock there.
+
+Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250905091533.377443-3-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 3 +++
+ net/hsr/hsr_main.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 56b49d477bec0..d2ae9fbed9e30 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -226,6 +226,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ struct hsr_priv *hsr = netdev_priv(dev);
+ struct hsr_port *master;
+
++ rcu_read_lock();
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ if (master) {
+ skb->dev = master->dev;
+@@ -238,6 +239,8 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ dev_core_stats_tx_dropped_inc(dev);
+ dev_kfree_skb_any(skb);
+ }
++ rcu_read_unlock();
++
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index e1afa54b3fba8..9e4ce1ccc0229 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -125,7 +125,7 @@ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
+ {
+ struct hsr_port *port;
+
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type == pt)
+ return port;
+ return NULL;
+--
+2.51.0
+
--- /dev/null
+From 071ca3e2753b035299353f8fd29131dd53fe0fb8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 09:15:31 +0000
+Subject: hsr: use rtnl lock when iterating over ports
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 8884c693991333ae065830554b9b0c96590b1bb2 ]
+
+hsr_for_each_port is called in many places without holding the RCU read
+lock, this may trigger warnings on debug kernels. Most of the callers
+are actually hold rtnl lock. So add a new helper hsr_for_each_port_rtnl
+to allow callers in suitable contexts to iterate ports safely without
+explicit RCU locking.
+
+This patch only fixed the callers that is hold rtnl lock. Other caller
+issues will be fixed in later patches.
+
+Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250905091533.377443-2-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 18 +++++++++---------
+ net/hsr/hsr_main.c | 2 +-
+ net/hsr/hsr_main.h | 3 +++
+ 3 files changed, 13 insertions(+), 10 deletions(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 9f1106bdd4f09..56b49d477bec0 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -49,7 +49,7 @@ static bool hsr_check_carrier(struct hsr_port *master)
+
+ ASSERT_RTNL();
+
+- hsr_for_each_port(master->hsr, port) {
++ hsr_for_each_port_rtnl(master->hsr, port) {
+ if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
+ netif_carrier_on(master->dev);
+ return true;
+@@ -105,7 +105,7 @@ int hsr_get_max_mtu(struct hsr_priv *hsr)
+ struct hsr_port *port;
+
+ mtu_max = ETH_DATA_LEN;
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type != HSR_PT_MASTER)
+ mtu_max = min(port->dev->mtu, mtu_max);
+
+@@ -139,7 +139,7 @@ static int hsr_dev_open(struct net_device *dev)
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -172,7 +172,7 @@ static int hsr_dev_close(struct net_device *dev)
+ struct hsr_priv *hsr;
+
+ hsr = netdev_priv(dev);
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -205,7 +205,7 @@ static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
+ * may become enabled.
+ */
+ features &= ~NETIF_F_ONE_FOR_ALL;
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ features = netdev_increment_features(features,
+ port->dev->features,
+ mask);
+@@ -483,7 +483,7 @@ static void hsr_set_rx_mode(struct net_device *dev)
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -505,7 +505,7 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -533,7 +533,7 @@ static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER ||
+ port->type == HSR_PT_INTERLINK)
+ continue;
+@@ -579,7 +579,7 @@ static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ switch (port->type) {
+ case HSR_PT_SLAVE_A:
+ case HSR_PT_SLAVE_B:
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index d7ae32473c41a..e1afa54b3fba8 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -22,7 +22,7 @@ static bool hsr_slave_empty(struct hsr_priv *hsr)
+ {
+ struct hsr_port *port;
+
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type != HSR_PT_MASTER)
+ return false;
+ return true;
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index e26244456f639..f066c9c401c60 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -231,6 +231,9 @@ struct hsr_priv {
+ #define hsr_for_each_port(hsr, port) \
+ list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
+
++#define hsr_for_each_port_rtnl(hsr, port) \
++ list_for_each_entry_rcu((port), &(hsr)->ports, port_list, lockdep_rtnl_is_held())
++
+ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
+
+ /* Caller must ensure skb is a valid HSR frame */
+--
+2.51.0
+
--- /dev/null
+From afbcbdb069228ef712bd58f90523406cad8d184b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 17:39:03 +0200
+Subject: i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path
+
+From: Michal Schmidt <mschmidt@redhat.com>
+
+[ Upstream commit 915470e1b44e71d1dd07ee067276f003c3521ee3 ]
+
+If request_irq() in i40e_vsi_request_irq_msix() fails in an iteration
+later than the first, the error path wants to free the IRQs requested
+so far. However, it uses the wrong dev_id argument for free_irq(), so
+it does not free the IRQs correctly and instead triggers the warning:
+
+ Trying to free already-free IRQ 173
+ WARNING: CPU: 25 PID: 1091 at kernel/irq/manage.c:1829 __free_irq+0x192/0x2c0
+ Modules linked in: i40e(+) [...]
+ CPU: 25 UID: 0 PID: 1091 Comm: NetworkManager Not tainted 6.17.0-rc1+ #1 PREEMPT(lazy)
+ Hardware name: [...]
+ RIP: 0010:__free_irq+0x192/0x2c0
+ [...]
+ Call Trace:
+ <TASK>
+ free_irq+0x32/0x70
+ i40e_vsi_request_irq_msix.cold+0x63/0x8b [i40e]
+ i40e_vsi_request_irq+0x79/0x80 [i40e]
+ i40e_vsi_open+0x21f/0x2f0 [i40e]
+ i40e_open+0x63/0x130 [i40e]
+ __dev_open+0xfc/0x210
+ __dev_change_flags+0x1fc/0x240
+ netif_change_flags+0x27/0x70
+ do_setlink.isra.0+0x341/0xc70
+ rtnl_newlink+0x468/0x860
+ rtnetlink_rcv_msg+0x375/0x450
+ netlink_rcv_skb+0x5c/0x110
+ netlink_unicast+0x288/0x3c0
+ netlink_sendmsg+0x20d/0x430
+ ____sys_sendmsg+0x3a2/0x3d0
+ ___sys_sendmsg+0x99/0xe0
+ __sys_sendmsg+0x8a/0xf0
+ do_syscall_64+0x82/0x2c0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ [...]
+ </TASK>
+ ---[ end trace 0000000000000000 ]---
+
+Use the same dev_id for free_irq() as for request_irq().
+
+I tested this with inserting code to fail intentionally.
+
+Fixes: 493fb30011b3 ("i40e: Move q_vectors from pointer to array to array of pointers")
+Signed-off-by: Michal Schmidt <mschmidt@redhat.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Subbaraya Sundeep <sbhatta@marvell.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 55fb362eb5081..037c1a0cbd6a8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -4206,7 +4206,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ irq_num = pf->msix_entries[base + vector].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
+- free_irq(irq_num, &vsi->q_vectors[vector]);
++ free_irq(irq_num, vsi->q_vectors[vector]);
+ }
+ return err;
+ }
+--
+2.51.0
+
--- /dev/null
+From df8b4dcce6a9f8aa6fd7942ab67cf5844dbfcda8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 15:26:31 +0900
+Subject: igb: fix link test skipping when interface is admin down
+
+From: Kohei Enju <enjuk@amazon.com>
+
+[ Upstream commit d709f178abca22a4d3642513df29afe4323a594b ]
+
+The igb driver incorrectly skips the link test when the network
+interface is admin down (if_running == false), causing the test to
+always report PASS regardless of the actual physical link state.
+
+This behavior is inconsistent with other drivers (e.g. i40e, ice, ixgbe,
+etc.) which correctly test the physical link state regardless of admin
+state.
+Remove the if_running check to ensure link test always reflects the
+physical link state.
+
+Fixes: 8d420a1b3ea6 ("igb: correct link test not being run when link is down")
+Signed-off-by: Kohei Enju <enjuk@amazon.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_ethtool.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index ca6ccbc139548..6412c84e2d17d 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2081,11 +2081,8 @@ static void igb_diag_test(struct net_device *netdev,
+ } else {
+ dev_info(&adapter->pdev->dev, "online testing starting\n");
+
+- /* PHY is powered down when interface is down */
+- if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
++ if (igb_link_test(adapter, &data[TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+- else
+- data[TEST_LINK] = 0;
+
+ /* Online tests aren't run; pass by default */
+ data[TEST_REG] = 0;
+--
+2.51.0
+
--- /dev/null
+From 7847cc111003996d86c1333b8bc3598eb404ce7b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 13:12:33 +0200
+Subject: net: bridge: Bounce invalid boolopts
+
+From: Petr Machata <petrm@nvidia.com>
+
+[ Upstream commit 8625f5748fea960d2af4f3c3e9891ee8f6f80906 ]
+
+The bridge driver currently tolerates options that it does not recognize.
+Instead, it should bounce them.
+
+Fixes: a428afe82f98 ("net: bridge: add support for user-controlled bool options")
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/e6fdca3b5a8d54183fbda075daffef38bdd7ddce.1757070067.git.petrm@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/net/bridge/br.c b/net/bridge/br.c
+index 2cab878e0a39c..ed08717541fe7 100644
+--- a/net/bridge/br.c
++++ b/net/bridge/br.c
+@@ -312,6 +312,13 @@ int br_boolopt_multi_toggle(struct net_bridge *br,
+ int err = 0;
+ int opt_id;
+
++ opt_id = find_next_bit(&bitmap, BITS_PER_LONG, BR_BOOLOPT_MAX);
++ if (opt_id != BITS_PER_LONG) {
++ NL_SET_ERR_MSG_FMT_MOD(extack, "Unknown boolean option %d",
++ opt_id);
++ return -EINVAL;
++ }
++
+ for_each_set_bit(opt_id, &bitmap, BR_BOOLOPT_MAX) {
+ bool on = !!(bm->optval & BIT(opt_id));
+
+--
+2.51.0
+
--- /dev/null
+From 1602ef3c8213cec962b251674221bc8194af96d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 11:13:34 +0200
+Subject: net: fec: Fix possible NPD in fec_enet_phy_reset_after_clk_enable()
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit 03e79de4608bdd48ad6eec272e196124cefaf798 ]
+
+The function of_phy_find_device may return NULL, so we need to take
+care before dereferencing phy_dev.
+
+Fixes: 64a632da538a ("net: fec: Fix phy_device lookup for phy_reset_after_clk_enable()")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Cc: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Cc: Richard Leitner <richard.leitner@skidata.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Wei Fang <wei.fang@nxp.com>
+Link: https://patch.msgid.link/20250904091334.53965-1-wahrenst@gmx.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index a1cc338cf20f3..0bd814251d56e 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2356,7 +2356,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+ */
+ phy_dev = of_phy_find_device(fep->phy_node);
+ phy_reset_after_clk_enable(phy_dev);
+- put_device(&phy_dev->mdio.dev);
++ if (phy_dev)
++ put_device(&phy_dev->mdio.dev);
+ }
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 22bfe99d0678d643742ab4df63e12fcdfda1557a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Nov 2024 14:47:08 +0530
+Subject: net: hsr: Add VLAN CTAG filter support
+
+From: Murali Karicheri <m-karicheri2@ti.com>
+
+[ Upstream commit 1a8a63a5305e95519de6f941922dfcd8179f82e5 ]
+
+This patch adds support for VLAN ctag based filtering at slave devices.
+The slave ethernet device may be capable of filtering ethernet packets
+based on VLAN ID. This requires that when the VLAN interface is created
+over an HSR/PRP interface, it passes the VID information to the
+associated slave ethernet devices so that it updates the hardware
+filters to filter ethernet frames based on VID. This patch adds the
+required functions to propagate the vid information to the slave
+devices.
+
+Signed-off-by: Murali Karicheri <m-karicheri2@ti.com>
+Signed-off-by: MD Danish Anwar <danishanwar@ti.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Link: https://patch.msgid.link/20241106091710.3308519-3-danishanwar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 8884c6939913 ("hsr: use rtnl lock when iterating over ports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 80 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 79 insertions(+), 1 deletion(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 9d0754b3642fd..9f1106bdd4f09 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -522,6 +522,77 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
+ }
+ }
+
++static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
++ __be16 proto, u16 vid)
++{
++ bool is_slave_a_added = false;
++ bool is_slave_b_added = false;
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++ int ret = 0;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ if (port->type == HSR_PT_MASTER ||
++ port->type == HSR_PT_INTERLINK)
++ continue;
++
++ ret = vlan_vid_add(port->dev, proto, vid);
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ if (ret) {
++ /* clean up Slave-B */
++ netdev_err(dev, "add vid failed for Slave-A\n");
++ if (is_slave_b_added)
++ vlan_vid_del(port->dev, proto, vid);
++ return ret;
++ }
++
++ is_slave_a_added = true;
++ break;
++
++ case HSR_PT_SLAVE_B:
++ if (ret) {
++ /* clean up Slave-A */
++ netdev_err(dev, "add vid failed for Slave-B\n");
++ if (is_slave_a_added)
++ vlan_vid_del(port->dev, proto, vid);
++ return ret;
++ }
++
++ is_slave_b_added = true;
++ break;
++ default:
++ break;
++ }
++ }
++
++ return 0;
++}
++
++static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
++ __be16 proto, u16 vid)
++{
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ case HSR_PT_SLAVE_B:
++ vlan_vid_del(port->dev, proto, vid);
++ break;
++ default:
++ break;
++ }
++ }
++
++ return 0;
++}
++
+ static const struct net_device_ops hsr_device_ops = {
+ .ndo_change_mtu = hsr_dev_change_mtu,
+ .ndo_open = hsr_dev_open,
+@@ -530,6 +601,8 @@ static const struct net_device_ops hsr_device_ops = {
+ .ndo_change_rx_flags = hsr_change_rx_flags,
+ .ndo_fix_features = hsr_fix_features,
+ .ndo_set_rx_mode = hsr_set_rx_mode,
++ .ndo_vlan_rx_add_vid = hsr_ndo_vlan_rx_add_vid,
++ .ndo_vlan_rx_kill_vid = hsr_ndo_vlan_rx_kill_vid,
+ };
+
+ static const struct device_type hsr_type = {
+@@ -578,7 +651,8 @@ void hsr_dev_setup(struct net_device *dev)
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
+ NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
+- NETIF_F_HW_VLAN_CTAG_TX;
++ NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ dev->features = dev->hw_features;
+
+@@ -661,6 +735,10 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ (slave[1]->features & NETIF_F_HW_HSR_FWD))
+ hsr->fwd_offloaded = true;
+
++ if ((slave[0]->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
++ (slave[1]->features & NETIF_F_HW_VLAN_CTAG_FILTER))
++ hsr_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
++
+ res = register_netdevice(hsr_dev);
+ if (res)
+ goto err_unregister;
+--
+2.51.0
+
--- /dev/null
+From c0c767c326d93d2930ee9478aaa8cb839a32eaad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 10:02:21 +0200
+Subject: netfilter: nf_tables: make nft_set_do_lookup available
+ unconditionally
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 11fe5a82e53ac3581a80c88e0e35fb8a80e15f48 ]
+
+This function was added for retpoline mitigation and is replaced by a
+static inline helper if mitigations are not enabled.
+
+Enable this helper function unconditionally so next patch can add a lookup
+restart mechanism to fix possible false negatives while transactions are
+in progress.
+
+Adding lookup restarts in nft_lookup_eval doesn't work as nft_objref would
+then need the same copypaste loop.
+
+This patch is separate to ease review of the actual bug fix.
+
+Suggested-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Stable-dep-of: b2f742c846ca ("netfilter: nf_tables: restart set lookup on base_seq change")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables_core.h | 10 ++--------
+ net/netfilter/nft_lookup.c | 17 ++++++++++++-----
+ 2 files changed, 14 insertions(+), 13 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
+index 6a52fb97b8443..04699eac5b524 100644
+--- a/include/net/netfilter/nf_tables_core.h
++++ b/include/net/netfilter/nf_tables_core.h
+@@ -109,17 +109,11 @@ nft_hash_lookup_fast(const struct net *net, const struct nft_set *set,
+ const struct nft_set_ext *
+ nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
++#endif
++
+ const struct nft_set_ext *
+ nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
+-#else
+-static inline const struct nft_set_ext *
+-nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key)
+-{
+- return set->ops->lookup(net, set, key);
+-}
+-#endif
+
+ /* called from nft_pipapo_avx2.c */
+ const struct nft_set_ext *
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index 40c602ffbcba7..2c6909bf1b407 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -24,11 +24,11 @@ struct nft_lookup {
+ struct nft_set_binding binding;
+ };
+
+-#ifdef CONFIG_MITIGATION_RETPOLINE
+-const struct nft_set_ext *
+-nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key)
++static const struct nft_set_ext *
++__nft_set_do_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
++#ifdef CONFIG_MITIGATION_RETPOLINE
+ if (set->ops == &nft_set_hash_fast_type.ops)
+ return nft_hash_lookup_fast(net, set, key);
+ if (set->ops == &nft_set_hash_type.ops)
+@@ -51,10 +51,17 @@ nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ return nft_rbtree_lookup(net, set, key);
+
+ WARN_ON_ONCE(1);
++#endif
+ return set->ops->lookup(net, set, key);
+ }
++
++const struct nft_set_ext *
++nft_set_do_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
++{
++ return __nft_set_do_lookup(net, set, key);
++}
+ EXPORT_SYMBOL_GPL(nft_set_do_lookup);
+-#endif
+
+ void nft_lookup_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+--
+2.51.0
+
--- /dev/null
+From 7367b2f81856a6a4f782ee832cf2128473afff13 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 10:02:20 +0200
+Subject: netfilter: nf_tables: place base_seq in struct net
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 64102d9bbc3d41dac5188b8fba75b1344c438970 ]
+
+This will soon be read from packet path around same time as the gencursor.
+
+Both gencursor and base_seq get incremented almost at the same time, so
+it makes sense to place them in the same structure.
+
+This doesn't increase struct net size on 64bit due to padding.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Stable-dep-of: b2f742c846ca ("netfilter: nf_tables: restart set lookup on base_seq change")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables.h | 1 -
+ include/net/netns/nftables.h | 1 +
+ net/netfilter/nf_tables_api.c | 65 ++++++++++++++++---------------
+ 3 files changed, 34 insertions(+), 33 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index bad0c6f7ed53d..ee550229d4ffa 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1909,7 +1909,6 @@ struct nftables_pernet {
+ struct mutex commit_mutex;
+ u64 table_handle;
+ u64 tstamp;
+- unsigned int base_seq;
+ unsigned int gc_seq;
+ u8 validate_state;
+ struct work_struct destroy_work;
+diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
+index cc8060c017d5f..99dd166c5d07c 100644
+--- a/include/net/netns/nftables.h
++++ b/include/net/netns/nftables.h
+@@ -3,6 +3,7 @@
+ #define _NETNS_NFTABLES_H_
+
+ struct netns_nftables {
++ unsigned int base_seq;
+ u8 gencursor;
+ };
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 4430bfa34a993..9422b54ab2c25 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -995,11 +995,14 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
+ return ERR_PTR(-ENOENT);
+ }
+
+-static __be16 nft_base_seq(const struct net *net)
++static unsigned int nft_base_seq(const struct net *net)
+ {
+- struct nftables_pernet *nft_net = nft_pernet(net);
++ return READ_ONCE(net->nft.base_seq);
++}
+
+- return htons(nft_net->base_seq & 0xffff);
++static __be16 nft_base_seq_be16(const struct net *net)
++{
++ return htons(nft_base_seq(net) & 0xffff);
+ }
+
+ static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
+@@ -1019,7 +1022,7 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
+
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+- flags, family, NFNETLINK_V0, nft_base_seq(net));
++ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -1112,7 +1115,7 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -1880,7 +1883,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
+
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+- flags, family, NFNETLINK_V0, nft_base_seq(net));
++ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -1983,7 +1986,7 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -3480,7 +3483,7 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
+ u16 type = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+
+ nlh = nfnl_msg_put(skb, portid, seq, type, flags, family, NFNETLINK_V0,
+- nft_base_seq(net));
++ nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -3648,7 +3651,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -3859,7 +3862,7 @@ static int nf_tables_getrule_reset(struct sk_buff *skb,
+ buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
+ nla_len(nla[NFTA_RULE_TABLE]),
+ (char *)nla_data(nla[NFTA_RULE_TABLE]),
+- nft_net->base_seq);
++ nft_base_seq(net));
+ audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
+ AUDIT_NFT_OP_RULE_RESET, GFP_ATOMIC);
+ kfree(buf);
+@@ -4670,7 +4673,7 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+ flags, ctx->family, NFNETLINK_V0,
+- nft_base_seq(ctx->net));
++ nft_base_seq_be16(ctx->net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -4812,7 +4815,7 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (ctx->family != NFPROTO_UNSPEC &&
+@@ -5988,7 +5991,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (dump_ctx->ctx.family != NFPROTO_UNSPEC &&
+@@ -6017,7 +6020,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
+ seq = cb->nlh->nlmsg_seq;
+
+ nlh = nfnl_msg_put(skb, portid, seq, event, NLM_F_MULTI,
+- table->family, NFNETLINK_V0, nft_base_seq(net));
++ table->family, NFNETLINK_V0, nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -6110,7 +6113,7 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb,
+
+ event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family,
+- NFNETLINK_V0, nft_base_seq(ctx->net));
++ NFNETLINK_V0, nft_base_seq_be16(ctx->net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -6409,7 +6412,7 @@ static int nf_tables_getsetelem_reset(struct sk_buff *skb,
+ }
+ nelems++;
+ }
+- audit_log_nft_set_reset(dump_ctx.ctx.table, nft_net->base_seq, nelems);
++ audit_log_nft_set_reset(dump_ctx.ctx.table, nft_base_seq(info->net), nelems);
+
+ out_unlock:
+ rcu_read_unlock();
+@@ -8012,7 +8015,7 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
+
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+- flags, family, NFNETLINK_V0, nft_base_seq(net));
++ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -8077,7 +8080,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -8111,7 +8114,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ idx++;
+ }
+ if (ctx->reset && entries)
+- audit_log_obj_reset(table, nft_net->base_seq, entries);
++ audit_log_obj_reset(table, nft_base_seq(net), entries);
+ if (rc < 0)
+ break;
+ }
+@@ -8280,7 +8283,7 @@ static int nf_tables_getobj_reset(struct sk_buff *skb,
+ buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
+ nla_len(nla[NFTA_OBJ_TABLE]),
+ (char *)nla_data(nla[NFTA_OBJ_TABLE]),
+- nft_net->base_seq);
++ nft_base_seq(net));
+ audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
+ AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
+ kfree(buf);
+@@ -8385,9 +8388,8 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
+ struct nft_object *obj, u32 portid, u32 seq, int event,
+ u16 flags, int family, int report, gfp_t gfp)
+ {
+- struct nftables_pernet *nft_net = nft_pernet(net);
+ char *buf = kasprintf(gfp, "%s:%u",
+- table->name, nft_net->base_seq);
++ table->name, nft_base_seq(net));
+
+ audit_log_nfcfg(buf,
+ family,
+@@ -9036,7 +9038,7 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
+
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+- flags, family, NFNETLINK_V0, nft_base_seq(net));
++ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -9104,7 +9106,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -9289,17 +9291,16 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
+ static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
+ u32 portid, u32 seq)
+ {
+- struct nftables_pernet *nft_net = nft_pernet(net);
+ struct nlmsghdr *nlh;
+ char buf[TASK_COMM_LEN];
+ int event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWGEN);
+
+ nlh = nfnl_msg_put(skb, portid, seq, event, 0, AF_UNSPEC,
+- NFNETLINK_V0, nft_base_seq(net));
++ NFNETLINK_V0, nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+- if (nla_put_be32(skb, NFTA_GEN_ID, htonl(nft_net->base_seq)) ||
++ if (nla_put_be32(skb, NFTA_GEN_ID, htonl(nft_base_seq(net))) ||
+ nla_put_be32(skb, NFTA_GEN_PROC_PID, htonl(task_pid_nr(current))) ||
+ nla_put_string(skb, NFTA_GEN_PROC_NAME, get_task_comm(buf, current)))
+ goto nla_put_failure;
+@@ -10462,11 +10463,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ * Bump generation counter, invalidate any dump in progress.
+ * Cannot fail after this point.
+ */
+- base_seq = READ_ONCE(nft_net->base_seq);
++ base_seq = nft_base_seq(net);
+ while (++base_seq == 0)
+ ;
+
+- WRITE_ONCE(nft_net->base_seq, base_seq);
++ WRITE_ONCE(net->nft.base_seq, base_seq);
+
+ gc_seq = nft_gc_seq_begin(nft_net);
+
+@@ -10698,7 +10699,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+
+ nft_commit_notify(net, NETLINK_CB(skb).portid);
+ nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
+- nf_tables_commit_audit_log(&adl, nft_net->base_seq);
++ nf_tables_commit_audit_log(&adl, nft_base_seq(net));
+
+ nft_gc_seq_end(nft_net, gc_seq);
+ nft_net->validate_state = NFT_VALIDATE_SKIP;
+@@ -11032,7 +11033,7 @@ static bool nf_tables_valid_genid(struct net *net, u32 genid)
+ mutex_lock(&nft_net->commit_mutex);
+ nft_net->tstamp = get_jiffies_64();
+
+- genid_ok = genid == 0 || nft_net->base_seq == genid;
++ genid_ok = genid == 0 || nft_base_seq(net) == genid;
+ if (!genid_ok)
+ mutex_unlock(&nft_net->commit_mutex);
+
+@@ -11710,7 +11711,7 @@ static int __net_init nf_tables_init_net(struct net *net)
+ INIT_LIST_HEAD(&nft_net->module_list);
+ INIT_LIST_HEAD(&nft_net->notify_list);
+ mutex_init(&nft_net->commit_mutex);
+- nft_net->base_seq = 1;
++ net->nft.base_seq = 1;
+ nft_net->gc_seq = 0;
+ nft_net->validate_state = NFT_VALIDATE_SKIP;
+ INIT_WORK(&nft_net->destroy_work, nf_tables_trans_destroy_work);
+--
+2.51.0
+
--- /dev/null
+From b12bb4f6babe4778b658cc0b6aca603804b62969 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Jun 2025 15:37:03 +0200
+Subject: netfilter: nf_tables: Reintroduce shortened deletion notifications
+
+From: Phil Sutter <phil@nwl.cc>
+
+[ Upstream commit a1050dd071682d2c9d8d6d5c96119f8f401b62f0 ]
+
+Restore commit 28339b21a365 ("netfilter: nf_tables: do not send complete
+notification of deletions") and fix it:
+
+- Avoid upfront modification of 'event' variable so the conditionals
+ become effective.
+- Always include NFTA_OBJ_TYPE attribute in object notifications, user
+ space requires it for proper deserialisation.
+- Catch DESTROY events, too.
+
+Signed-off-by: Phil Sutter <phil@nwl.cc>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: b2f742c846ca ("netfilter: nf_tables: restart set lookup on base_seq change")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 67 ++++++++++++++++++++++++++---------
+ 1 file changed, 50 insertions(+), 17 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 3743e4249dc8c..4430bfa34a993 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1017,9 +1017,9 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
+ {
+ struct nlmsghdr *nlh;
+
+- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+- nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+- NFNETLINK_V0, nft_base_seq(net));
++ nlh = nfnl_msg_put(skb, portid, seq,
++ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++ flags, family, NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -1029,6 +1029,12 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
+ NFTA_TABLE_PAD))
+ goto nla_put_failure;
+
++ if (event == NFT_MSG_DELTABLE ||
++ event == NFT_MSG_DESTROYTABLE) {
++ nlmsg_end(skb, nlh);
++ return 0;
++ }
++
+ if (nla_put_be32(skb, NFTA_TABLE_FLAGS,
+ htonl(table->flags & NFT_TABLE_F_MASK)))
+ goto nla_put_failure;
+@@ -1872,9 +1878,9 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
+ {
+ struct nlmsghdr *nlh;
+
+- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+- nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+- NFNETLINK_V0, nft_base_seq(net));
++ nlh = nfnl_msg_put(skb, portid, seq,
++ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++ flags, family, NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -1884,6 +1890,13 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
+ NFTA_CHAIN_PAD))
+ goto nla_put_failure;
+
++ if (!hook_list &&
++ (event == NFT_MSG_DELCHAIN ||
++ event == NFT_MSG_DESTROYCHAIN)) {
++ nlmsg_end(skb, nlh);
++ return 0;
++ }
++
+ if (nft_is_base_chain(chain)) {
+ const struct nft_base_chain *basechain = nft_base_chain(chain);
+ struct nft_stats __percpu *stats;
+@@ -4654,9 +4667,10 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ u32 seq = ctx->seq;
+ int i;
+
+- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+- nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family,
+- NFNETLINK_V0, nft_base_seq(ctx->net));
++ nlh = nfnl_msg_put(skb, portid, seq,
++ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++ flags, ctx->family, NFNETLINK_V0,
++ nft_base_seq(ctx->net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -4668,6 +4682,12 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ NFTA_SET_PAD))
+ goto nla_put_failure;
+
++ if (event == NFT_MSG_DELSET ||
++ event == NFT_MSG_DESTROYSET) {
++ nlmsg_end(skb, nlh);
++ return 0;
++ }
++
+ if (set->flags != 0)
+ if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags)))
+ goto nla_put_failure;
+@@ -7990,20 +8010,26 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
+ {
+ struct nlmsghdr *nlh;
+
+- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+- nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+- NFNETLINK_V0, nft_base_seq(net));
++ nlh = nfnl_msg_put(skb, portid, seq,
++ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++ flags, family, NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+ if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) ||
+ nla_put_string(skb, NFTA_OBJ_NAME, obj->key.name) ||
++ nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
+ nla_put_be64(skb, NFTA_OBJ_HANDLE, cpu_to_be64(obj->handle),
+ NFTA_OBJ_PAD))
+ goto nla_put_failure;
+
+- if (nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
+- nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
++ if (event == NFT_MSG_DELOBJ ||
++ event == NFT_MSG_DESTROYOBJ) {
++ nlmsg_end(skb, nlh);
++ return 0;
++ }
++
++ if (nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
+ nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset))
+ goto nla_put_failure;
+
+@@ -9008,9 +9034,9 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
+ struct nft_hook *hook;
+ struct nlmsghdr *nlh;
+
+- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+- nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+- NFNETLINK_V0, nft_base_seq(net));
++ nlh = nfnl_msg_put(skb, portid, seq,
++ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++ flags, family, NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -9020,6 +9046,13 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
+ NFTA_FLOWTABLE_PAD))
+ goto nla_put_failure;
+
++ if (!hook_list &&
++ (event == NFT_MSG_DELFLOWTABLE ||
++ event == NFT_MSG_DESTROYFLOWTABLE)) {
++ nlmsg_end(skb, nlh);
++ return 0;
++ }
++
+ if (nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
+ nla_put_be32(skb, NFTA_FLOWTABLE_FLAGS, htonl(flowtable->data.flags)))
+ goto nla_put_failure;
+--
+2.51.0
+
--- /dev/null
+From 3114226e0062dde003daf14b410aad8f1814d9c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 10:02:22 +0200
+Subject: netfilter: nf_tables: restart set lookup on base_seq change
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit b2f742c846cab9afc5953a5d8f17b54922dcc723 ]
+
+The hash, hash_fast, rhash and bitwise sets may indicate no result even
+though a matching element exists during a short time window while other
+cpu is finalizing the transaction.
+
+This happens when the hash lookup/bitwise lookup function has picked up
+the old genbit, right before it was toggled by nf_tables_commit(), but
+then the same cpu managed to unlink the matching old element from the
+hash table:
+
+cpu0 cpu1
+ has added new elements to clone
+ has marked elements as being
+ inactive in new generation
+ perform lookup in the set
+ enters commit phase:
+ A) observes old genbit
+ increments base_seq
+I) increments the genbit
+II) removes old element from the set
+ B) finds matching element
+ C) returns no match: found
+ element is not valid in old
+ generation
+
+ Next lookup observes new genbit and
+ finds matching e2.
+
+Consider a packet matching element e1, e2.
+
+cpu0 processes following transaction:
+1. remove e1
+2. adds e2, which has same key as e1.
+
+P matches both e1 and e2. Therefore, cpu1 should always find a match
+for P. Due to above race, this is not the case:
+
+cpu1 observed the old genbit. e2 will not be considered once it is found.
+The element e1 is not found anymore if cpu0 managed to unlink it from the
+hlist before cpu1 found it during list traversal.
+
+The situation only occurs for a brief time period, lookups happening
+after I) observe new genbit and return e2.
+
+This problem exists in all set types except nft_set_pipapo, so fix it once
+in nft_lookup rather than each set ops individually.
+
+Sample the base sequence counter, which gets incremented right before the
+genbit is changed.
+
+Then, if no match is found, retry the lookup if the base sequence was
+altered in between.
+
+If the base sequence hasn't changed:
+ - No update took place: no-match result is expected.
+ This is the common case. or:
+ - nf_tables_commit() hasn't progressed to genbit update yet.
+ Old elements were still visible and nomatch result is expected, or:
+ - nf_tables_commit updated the genbit:
+ We picked up the new base_seq, so the lookup function also picked
+ up the new genbit, no-match result is expected.
+
+If the old genbit was observed, then nft_lookup also picked up the old
+base_seq: nft_lookup_should_retry() returns true and relookup is performed
+in the new generation.
+
+This problem was added when the unconditional synchronize_rcu() call
+that followed the current/next generation bit toggle was removed.
+
+Thanks to Pablo Neira Ayuso for reviewing an earlier version of this
+patchset, for suggesting re-use of existing base_seq and placement of
+the restart loop in nft_set_do_lookup().
+
+Fixes: 0cbc06b3faba ("netfilter: nf_tables: remove synchronize_rcu in commit phase")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 3 ++-
+ net/netfilter/nft_lookup.c | 31 ++++++++++++++++++++++++++++++-
+ 2 files changed, 32 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 9422b54ab2c25..3028d388b2933 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -10467,7 +10467,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ while (++base_seq == 0)
+ ;
+
+- WRITE_ONCE(net->nft.base_seq, base_seq);
++ /* pairs with smp_load_acquire in nft_lookup_eval */
++ smp_store_release(&net->nft.base_seq, base_seq);
+
+ gc_seq = nft_gc_seq_begin(nft_net);
+
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index 2c6909bf1b407..58c5b14889c47 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -55,11 +55,40 @@ __nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ return set->ops->lookup(net, set, key);
+ }
+
++static unsigned int nft_base_seq(const struct net *net)
++{
++ /* pairs with smp_store_release() in nf_tables_commit() */
++ return smp_load_acquire(&net->nft.base_seq);
++}
++
++static bool nft_lookup_should_retry(const struct net *net, unsigned int seq)
++{
++ return unlikely(seq != nft_base_seq(net));
++}
++
+ const struct nft_set_ext *
+ nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
+ {
+- return __nft_set_do_lookup(net, set, key);
++ const struct nft_set_ext *ext;
++ unsigned int base_seq;
++
++ do {
++ base_seq = nft_base_seq(net);
++
++ ext = __nft_set_do_lookup(net, set, key);
++ if (ext)
++ break;
++ /* No match? There is a small chance that lookup was
++ * performed in the old generation, but nf_tables_commit()
++ * already unlinked a (matching) element.
++ *
++ * We need to repeat the lookup to make sure that we didn't
++ * miss a matching element in the new generation.
++ */
++ } while (nft_lookup_should_retry(net, base_seq));
++
++ return ext;
+ }
+ EXPORT_SYMBOL_GPL(nft_set_do_lookup);
+
+--
+2.51.0
+
--- /dev/null
+From 427c6901c697ab1866b6595fdf9b1377d7817727 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Jul 2025 19:05:13 +0200
+Subject: netfilter: nft_set: remove one argument from lookup and update
+ functions
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 17a20e09f086f2c574ac87f3cf6e14c4377f65f6 ]
+
+Return the extension pointer instead of passing it as a function
+argument to be filled in by the callee.
+
+As-is, whenever false is returned, the extension pointer is not used.
+
+For all set types, when true is returned, the extension pointer was set
+to the matching element.
+
+Only exception: nft_set_bitmap doesn't support extensions.
+Return a pointer to a static const empty element extension container.
+
+return false -> return NULL
+return true -> return the elements' extension pointer.
+
+This saves one function argument.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: c4eaca2e1052 ("netfilter: nft_set_pipapo: don't check genbit from packetpath lookups")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables.h | 10 ++---
+ include/net/netfilter/nf_tables_core.h | 47 ++++++++++++----------
+ net/netfilter/nft_dynset.c | 5 ++-
+ net/netfilter/nft_lookup.c | 27 ++++++-------
+ net/netfilter/nft_objref.c | 5 +--
+ net/netfilter/nft_set_bitmap.c | 11 ++++--
+ net/netfilter/nft_set_hash.c | 54 ++++++++++++--------------
+ net/netfilter/nft_set_pipapo.c | 19 +++++----
+ net/netfilter/nft_set_pipapo_avx2.c | 25 ++++++------
+ net/netfilter/nft_set_rbtree.c | 40 +++++++++----------
+ 10 files changed, 126 insertions(+), 117 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 757abcb54d117..bad0c6f7ed53d 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -459,19 +459,17 @@ struct nft_set_ext;
+ * control plane functions.
+ */
+ struct nft_set_ops {
+- bool (*lookup)(const struct net *net,
++ const struct nft_set_ext * (*lookup)(const struct net *net,
+ const struct nft_set *set,
+- const u32 *key,
+- const struct nft_set_ext **ext);
+- bool (*update)(struct nft_set *set,
++ const u32 *key);
++ const struct nft_set_ext * (*update)(struct nft_set *set,
+ const u32 *key,
+ struct nft_elem_priv *
+ (*new)(struct nft_set *,
+ const struct nft_expr *,
+ struct nft_regs *),
+ const struct nft_expr *expr,
+- struct nft_regs *regs,
+- const struct nft_set_ext **ext);
++ struct nft_regs *regs);
+ bool (*delete)(const struct nft_set *set,
+ const u32 *key);
+
+diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
+index 03b6165756fc5..6a52fb97b8443 100644
+--- a/include/net/netfilter/nf_tables_core.h
++++ b/include/net/netfilter/nf_tables_core.h
+@@ -94,34 +94,41 @@ extern const struct nft_set_type nft_set_pipapo_type;
+ extern const struct nft_set_type nft_set_pipapo_avx2_type;
+
+ #ifdef CONFIG_MITIGATION_RETPOLINE
+-bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
+-bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
+-bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
+-bool nft_hash_lookup_fast(const struct net *net,
+- const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
+-bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
+-bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
++const struct nft_set_ext *
++nft_rhash_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
++const struct nft_set_ext *
++nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
++const struct nft_set_ext *
++nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
++const struct nft_set_ext *
++nft_hash_lookup_fast(const struct net *net, const struct nft_set *set,
++ const u32 *key);
++const struct nft_set_ext *
++nft_hash_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
++const struct nft_set_ext *
++nft_set_do_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
+ #else
+-static inline bool
++static inline const struct nft_set_ext *
+ nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++ const u32 *key)
+ {
+- return set->ops->lookup(net, set, key, ext);
++ return set->ops->lookup(net, set, key);
+ }
+ #endif
+
+ /* called from nft_pipapo_avx2.c */
+-bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
++const struct nft_set_ext *
++nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
+ /* called from nft_set_pipapo.c */
+-bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
++const struct nft_set_ext *
++nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
+
+ void nft_counter_init_seqcount(void);
+
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 88922e0e8e837..e24493d9e7761 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -91,8 +91,9 @@ void nft_dynset_eval(const struct nft_expr *expr,
+ return;
+ }
+
+- if (set->ops->update(set, ®s->data[priv->sreg_key], nft_dynset_new,
+- expr, regs, &ext)) {
++ ext = set->ops->update(set, ®s->data[priv->sreg_key], nft_dynset_new,
++ expr, regs);
++ if (ext) {
+ if (priv->op == NFT_DYNSET_OP_UPDATE &&
+ nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
+ READ_ONCE(nft_set_ext_timeout(ext)->timeout) != 0) {
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index 63ef832b8aa71..40c602ffbcba7 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -25,32 +25,33 @@ struct nft_lookup {
+ };
+
+ #ifdef CONFIG_MITIGATION_RETPOLINE
+-bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_set_do_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ if (set->ops == &nft_set_hash_fast_type.ops)
+- return nft_hash_lookup_fast(net, set, key, ext);
++ return nft_hash_lookup_fast(net, set, key);
+ if (set->ops == &nft_set_hash_type.ops)
+- return nft_hash_lookup(net, set, key, ext);
++ return nft_hash_lookup(net, set, key);
+
+ if (set->ops == &nft_set_rhash_type.ops)
+- return nft_rhash_lookup(net, set, key, ext);
++ return nft_rhash_lookup(net, set, key);
+
+ if (set->ops == &nft_set_bitmap_type.ops)
+- return nft_bitmap_lookup(net, set, key, ext);
++ return nft_bitmap_lookup(net, set, key);
+
+ if (set->ops == &nft_set_pipapo_type.ops)
+- return nft_pipapo_lookup(net, set, key, ext);
++ return nft_pipapo_lookup(net, set, key);
+ #if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
+ if (set->ops == &nft_set_pipapo_avx2_type.ops)
+- return nft_pipapo_avx2_lookup(net, set, key, ext);
++ return nft_pipapo_avx2_lookup(net, set, key);
+ #endif
+
+ if (set->ops == &nft_set_rbtree_type.ops)
+- return nft_rbtree_lookup(net, set, key, ext);
++ return nft_rbtree_lookup(net, set, key);
+
+ WARN_ON_ONCE(1);
+- return set->ops->lookup(net, set, key, ext);
++ return set->ops->lookup(net, set, key);
+ }
+ EXPORT_SYMBOL_GPL(nft_set_do_lookup);
+ #endif
+@@ -61,12 +62,12 @@ void nft_lookup_eval(const struct nft_expr *expr,
+ {
+ const struct nft_lookup *priv = nft_expr_priv(expr);
+ const struct nft_set *set = priv->set;
+- const struct nft_set_ext *ext = NULL;
+ const struct net *net = nft_net(pkt);
++ const struct nft_set_ext *ext;
+ bool found;
+
+- found = nft_set_do_lookup(net, set, ®s->data[priv->sreg], &ext) ^
+- priv->invert;
++ ext = nft_set_do_lookup(net, set, ®s->data[priv->sreg]);
++ found = !!ext ^ priv->invert;
+ if (!found) {
+ ext = nft_set_catchall_lookup(net, set);
+ if (!ext) {
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index 09da7a3f9f967..8ee66a86c3bc7 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -111,10 +111,9 @@ void nft_objref_map_eval(const struct nft_expr *expr,
+ struct net *net = nft_net(pkt);
+ const struct nft_set_ext *ext;
+ struct nft_object *obj;
+- bool found;
+
+- found = nft_set_do_lookup(net, set, ®s->data[priv->sreg], &ext);
+- if (!found) {
++ ext = nft_set_do_lookup(net, set, ®s->data[priv->sreg]);
++ if (!ext) {
+ ext = nft_set_catchall_lookup(net, set);
+ if (!ext) {
+ regs->verdict.code = NFT_BREAK;
+diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
+index 1caa04619dc6d..b4765fb92d727 100644
+--- a/net/netfilter/nft_set_bitmap.c
++++ b/net/netfilter/nft_set_bitmap.c
+@@ -75,16 +75,21 @@ nft_bitmap_active(const u8 *bitmap, u32 idx, u32 off, u8 genmask)
+ }
+
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ const struct nft_bitmap *priv = nft_set_priv(set);
++ static const struct nft_set_ext found;
+ u8 genmask = nft_genmask_cur(net);
+ u32 idx, off;
+
+ nft_bitmap_location(set, key, &idx, &off);
+
+- return nft_bitmap_active(priv->bitmap, idx, off, genmask);
++ if (nft_bitmap_active(priv->bitmap, idx, off, genmask))
++ return &found;
++
++ return NULL;
+ }
+
+ static struct nft_bitmap_elem *
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index 4b3452dff2ec0..900eddb93dcc8 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -81,8 +81,9 @@ static const struct rhashtable_params nft_rhash_params = {
+ };
+
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_rhash_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ struct nft_rhash *priv = nft_set_priv(set);
+ const struct nft_rhash_elem *he;
+@@ -95,9 +96,9 @@ bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+
+ he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
+ if (he != NULL)
+- *ext = &he->ext;
++ return &he->ext;
+
+- return !!he;
++ return NULL;
+ }
+
+ static struct nft_elem_priv *
+@@ -120,14 +121,11 @@ nft_rhash_get(const struct net *net, const struct nft_set *set,
+ return ERR_PTR(-ENOENT);
+ }
+
+-static bool nft_rhash_update(struct nft_set *set, const u32 *key,
+- struct nft_elem_priv *
+- (*new)(struct nft_set *,
+- const struct nft_expr *,
+- struct nft_regs *regs),
+- const struct nft_expr *expr,
+- struct nft_regs *regs,
+- const struct nft_set_ext **ext)
++static const struct nft_set_ext *
++nft_rhash_update(struct nft_set *set, const u32 *key,
++ struct nft_elem_priv *(*new)(struct nft_set *, const struct nft_expr *,
++ struct nft_regs *regs),
++ const struct nft_expr *expr, struct nft_regs *regs)
+ {
+ struct nft_rhash *priv = nft_set_priv(set);
+ struct nft_rhash_elem *he, *prev;
+@@ -161,14 +159,13 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key,
+ }
+
+ out:
+- *ext = &he->ext;
+- return true;
++ return &he->ext;
+
+ err2:
+ nft_set_elem_destroy(set, &he->priv, true);
+ atomic_dec(&set->nelems);
+ err1:
+- return false;
++ return NULL;
+ }
+
+ static int nft_rhash_insert(const struct net *net, const struct nft_set *set,
+@@ -507,8 +504,9 @@ struct nft_hash_elem {
+ };
+
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_hash_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ struct nft_hash *priv = nft_set_priv(set);
+ u8 genmask = nft_genmask_cur(net);
+@@ -519,12 +517,10 @@ bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ hash = reciprocal_scale(hash, priv->buckets);
+ hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
+ if (!memcmp(nft_set_ext_key(&he->ext), key, set->klen) &&
+- nft_set_elem_active(&he->ext, genmask)) {
+- *ext = &he->ext;
+- return true;
+- }
++ nft_set_elem_active(&he->ext, genmask))
++ return &he->ext;
+ }
+- return false;
++ return NULL;
+ }
+
+ static struct nft_elem_priv *
+@@ -547,9 +543,9 @@ nft_hash_get(const struct net *net, const struct nft_set *set,
+ }
+
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_hash_lookup_fast(const struct net *net,
+- const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_hash_lookup_fast(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ struct nft_hash *priv = nft_set_priv(set);
+ u8 genmask = nft_genmask_cur(net);
+@@ -562,12 +558,10 @@ bool nft_hash_lookup_fast(const struct net *net,
+ hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
+ k2 = *(u32 *)nft_set_ext_key(&he->ext)->data;
+ if (k1 == k2 &&
+- nft_set_elem_active(&he->ext, genmask)) {
+- *ext = &he->ext;
+- return true;
+- }
++ nft_set_elem_active(&he->ext, genmask))
++ return &he->ext;
+ }
+- return false;
++ return NULL;
+ }
+
+ static u32 nft_jhash(const struct nft_set *set, const struct nft_hash *priv,
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 9ac48e6b4332c..a844b33fa6002 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -407,8 +407,9 @@ int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
+ *
+ * Return: true on match, false otherwise.
+ */
+-bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_scratch *scratch;
+@@ -465,13 +466,15 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ scratch->map_index = map_index;
+ local_bh_enable();
+
+- return false;
++ return NULL;
+ }
+
+ if (last) {
+- *ext = &f->mt[b].e->ext;
+- if (unlikely(nft_set_elem_expired(*ext) ||
+- !nft_set_elem_active(*ext, genmask)))
++ const struct nft_set_ext *ext;
++
++ ext = &f->mt[b].e->ext;
++ if (unlikely(nft_set_elem_expired(ext) ||
++ !nft_set_elem_active(ext, genmask)))
+ goto next_match;
+
+ /* Last field: we're just returning the key without
+@@ -482,7 +485,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ scratch->map_index = map_index;
+ local_bh_enable();
+
+- return true;
++ return ext;
+ }
+
+ /* Swap bitmap indices: res_map is the initial bitmap for the
+@@ -497,7 +500,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+
+ out:
+ local_bh_enable();
+- return false;
++ return NULL;
+ }
+
+ /**
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index be7c16c79f711..6c441e2dc8af3 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -1146,8 +1146,9 @@ static inline void pipapo_resmap_init_avx2(const struct nft_pipapo_match *m, uns
+ *
+ * Return: true on match, false otherwise.
+ */
+-bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_scratch *scratch;
+@@ -1155,17 +1156,18 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
+ const u8 *rp = (const u8 *)key;
++ const struct nft_set_ext *ext;
+ unsigned long *res, *fill;
+ bool map_index;
+- int i, ret = 0;
++ int i;
+
+ local_bh_disable();
+
+ if (unlikely(!irq_fpu_usable())) {
+- bool fallback_res = nft_pipapo_lookup(net, set, key, ext);
++ ext = nft_pipapo_lookup(net, set, key);
+
+ local_bh_enable();
+- return fallback_res;
++ return ext;
+ }
+
+ m = rcu_dereference(priv->match);
+@@ -1182,7 +1184,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ if (unlikely(!scratch)) {
+ kernel_fpu_end();
+ local_bh_enable();
+- return false;
++ return NULL;
+ }
+
+ map_index = scratch->map_index;
+@@ -1197,6 +1199,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ next_match:
+ nft_pipapo_for_each_field(f, i, m) {
+ bool last = i == m->field_count - 1, first = !i;
++ int ret = 0;
+
+ #define NFT_SET_PIPAPO_AVX2_LOOKUP(b, n) \
+ (ret = nft_pipapo_avx2_lookup_##b##b_##n(res, fill, f, \
+@@ -1244,10 +1247,10 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ goto out;
+
+ if (last) {
+- *ext = &f->mt[ret].e->ext;
+- if (unlikely(nft_set_elem_expired(*ext) ||
+- !nft_set_elem_active(*ext, genmask))) {
+- ret = 0;
++ ext = &f->mt[ret].e->ext;
++ if (unlikely(nft_set_elem_expired(ext) ||
++ !nft_set_elem_active(ext, genmask))) {
++ ext = NULL;
+ goto next_match;
+ }
+
+@@ -1264,5 +1267,5 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ kernel_fpu_end();
+ local_bh_enable();
+
+- return ret >= 0;
++ return ext;
+ }
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 2e8ef16ff191d..938a257c069e2 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -52,9 +52,9 @@ static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
+ return nft_set_elem_expired(&rbe->ext);
+ }
+
+-static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext,
+- unsigned int seq)
++static const struct nft_set_ext *
++__nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key, unsigned int seq)
+ {
+ struct nft_rbtree *priv = nft_set_priv(set);
+ const struct nft_rbtree_elem *rbe, *interval = NULL;
+@@ -65,7 +65,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ parent = rcu_dereference_raw(priv->root.rb_node);
+ while (parent != NULL) {
+ if (read_seqcount_retry(&priv->count, seq))
+- return false;
++ return NULL;
+
+ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+
+@@ -87,50 +87,48 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ }
+
+ if (nft_rbtree_elem_expired(rbe))
+- return false;
++ return NULL;
+
+ if (nft_rbtree_interval_end(rbe)) {
+ if (nft_set_is_anonymous(set))
+- return false;
++ return NULL;
+ parent = rcu_dereference_raw(parent->rb_left);
+ interval = NULL;
+ continue;
+ }
+
+- *ext = &rbe->ext;
+- return true;
++ return &rbe->ext;
+ }
+ }
+
+ if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
+ nft_set_elem_active(&interval->ext, genmask) &&
+ !nft_rbtree_elem_expired(interval) &&
+- nft_rbtree_interval_start(interval)) {
+- *ext = &interval->ext;
+- return true;
+- }
++ nft_rbtree_interval_start(interval))
++ return &interval->ext;
+
+- return false;
++ return NULL;
+ }
+
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ struct nft_rbtree *priv = nft_set_priv(set);
+ unsigned int seq = read_seqcount_begin(&priv->count);
+- bool ret;
++ const struct nft_set_ext *ext;
+
+- ret = __nft_rbtree_lookup(net, set, key, ext, seq);
+- if (ret || !read_seqcount_retry(&priv->count, seq))
+- return ret;
++ ext = __nft_rbtree_lookup(net, set, key, seq);
++ if (ext || !read_seqcount_retry(&priv->count, seq))
++ return ext;
+
+ read_lock_bh(&priv->lock);
+ seq = read_seqcount_begin(&priv->count);
+- ret = __nft_rbtree_lookup(net, set, key, ext, seq);
++ ext = __nft_rbtree_lookup(net, set, key, seq);
+ read_unlock_bh(&priv->lock);
+
+- return ret;
++ return ext;
+ }
+
+ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
+--
+2.51.0
+
--- /dev/null
+From 19f67c56957376ea8e7f52c1237eff0dbbd5d4f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 10:02:18 +0200
+Subject: netfilter: nft_set_pipapo: don't check genbit from packetpath lookups
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit c4eaca2e1052adfd67bed0a36a9d4b8e515666e4 ]
+
+The pipapo set type is special in that it has two copies of its
+datastructure: one live copy containing only valid elements and one
+on-demand clone used during transaction where adds/deletes happen.
+
+This clone is not visible to the datapath.
+
+This is unlike all other set types in nftables, those all link new
+elements into their live hlist/tree.
+
+For those sets, the lookup functions must skip the new elements while the
+transaction is ongoing to ensure consistency.
+
+As the clone is shallow, removal does have an effect on the packet path:
+once the transaction enters the commit phase the 'gencursor' bit that
+determines which elements are active and which elements should be ignored
+(because they are no longer valid) is flipped.
+
+This causes the datapath lookup to ignore these elements if they are found
+during lookup.
+
+This opens up a small race window where pipapo has an inconsistent view of
+the dataset from when the transaction-cpu flipped the genbit until the
+transaction-cpu calls nft_pipapo_commit() to swap live/clone pointers:
+
+cpu0 cpu1
+ has added new elements to clone
+ has marked elements as being
+ inactive in new generation
+ perform lookup in the set
+ enters commit phase:
+
+I) increments the genbit
+ A) observes new genbit
+ removes elements from the clone so
+ they won't be found anymore
+ B) lookup in datastructure
+ can't see new elements yet,
+ but old elements are ignored
+ -> Only matches elements that
+ were not changed in the
+ transaction
+II) calls nft_pipapo_commit(), clone
+ and live pointers are swapped.
+ C New nft_lookup happening now
+ will find matching elements.
+
+Consider a packet matching range r1-r2:
+
+cpu0 processes following transaction:
+1. remove r1-r2
+2. add r1-r3
+
+P is contained in both ranges. Therefore, cpu1 should always find a match
+for P. Due to above race, this is not the case:
+
+cpu1 does find r1-r2, but then ignores it due to the genbit indicating
+the range has been removed.
+
+At the same time, r1-r3 is not visible yet, because it can only be found
+in the clone.
+
+The situation persists for all lookups until after cpu0 hits II).
+
+The fix is easy: Don't check the genbit from pipapo lookup functions.
+This is possible because unlike the other set types, the new elements are
+not reachable from the live copy of the dataset.
+
+The clone/live pointer swap is enough to avoid matching on old elements
+while at the same time all new elements are exposed in one go.
+
+After this change, step B above returns a match in r1-r2.
+This is fine: r1-r2 only becomes truly invalid the moment they get freed.
+This happens after a synchronize_rcu() call and rcu read lock is held
+via netfilter hook traversal (nf_hook_slow()).
+
+Cc: Stefano Brivio <sbrivio@redhat.com>
+Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_pipapo.c | 20 ++++++++++++++++++--
+ net/netfilter/nft_set_pipapo_avx2.c | 4 +---
+ 2 files changed, 19 insertions(+), 5 deletions(-)
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 1a19649c28511..fa6741b3205a6 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -511,6 +511,23 @@ static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
+ *
+ * This function is called from the data path. It will search for
+ * an element matching the given key in the current active copy.
++ * Unlike other set types, this uses NFT_GENMASK_ANY instead of
++ * nft_genmask_cur().
++ *
++ * This is because new (future) elements are not reachable from
++ * priv->match, they get added to priv->clone instead.
++ * When the commit phase flips the generation bitmask, the
++ * 'now old' entries are skipped but without the 'now current'
++ * elements becoming visible. Using nft_genmask_cur() thus creates
++ * inconsistent state: matching old entries get skipped but thew
++ * newly matching entries are unreachable.
++ *
++ * GENMASK will still find the 'now old' entries which ensures consistent
++ * priv->match view.
++ *
++ * nft_pipapo_commit swaps ->clone and ->match shortly after the
++ * genbit flip. As ->clone doesn't contain the old entries in the first
++ * place, lookup will only find the now-current ones.
+ *
+ * Return: ntables API extension pointer or NULL if no match.
+ */
+@@ -519,12 +536,11 @@ nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+- u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_elem *e;
+
+ m = rcu_dereference(priv->match);
+- e = pipapo_get(m, (const u8 *)key, genmask, get_jiffies_64());
++ e = pipapo_get(m, (const u8 *)key, NFT_GENMASK_ANY, get_jiffies_64());
+
+ return e ? &e->ext : NULL;
+ }
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index 2155c7f345c21..39e356c9687a9 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -1153,7 +1153,6 @@ nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ struct nft_pipapo *priv = nft_set_priv(set);
+ const struct nft_set_ext *ext = NULL;
+ struct nft_pipapo_scratch *scratch;
+- u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
+ const u8 *rp = (const u8 *)key;
+@@ -1249,8 +1248,7 @@ nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ if (last) {
+ const struct nft_set_ext *e = &f->mt[ret].e->ext;
+
+- if (unlikely(nft_set_elem_expired(e) ||
+- !nft_set_elem_active(e, genmask)))
++ if (unlikely(nft_set_elem_expired(e)))
+ goto next_match;
+
+ ext = e;
+--
+2.51.0
+
--- /dev/null
+From 341ef8e1a55209d630d3d1d0ec61c51cae62ce21 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 12:10:41 +0200
+Subject: netfilter: nft_set_pipapo: don't return bogus extension pointer
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit c8a7c2c608180f3b4e51dc958b3861242dcdd76d ]
+
+Dan Carpenter says:
+Commit 17a20e09f086 ("netfilter: nft_set: remove one argument from
+lookup and update functions") [..] leads to the following Smatch
+static checker warning:
+
+ net/netfilter/nft_set_pipapo_avx2.c:1269 nft_pipapo_avx2_lookup()
+ error: uninitialized symbol 'ext'.
+
+Fix this by initing ext to NULL and set it only once we've found
+a match.
+
+Fixes: 17a20e09f086 ("netfilter: nft_set: remove one argument from lookup and update functions")
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Closes: https://lore.kernel.org/netfilter-devel/aJBzc3V5wk-yPOnH@stanley.mountain/
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: c4eaca2e1052 ("netfilter: nft_set_pipapo: don't check genbit from packetpath lookups")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_pipapo_avx2.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index 6c441e2dc8af3..2155c7f345c21 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -1151,12 +1151,12 @@ nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
++ const struct nft_set_ext *ext = NULL;
+ struct nft_pipapo_scratch *scratch;
+ u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
+ const u8 *rp = (const u8 *)key;
+- const struct nft_set_ext *ext;
+ unsigned long *res, *fill;
+ bool map_index;
+ int i;
+@@ -1247,13 +1247,13 @@ nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ goto out;
+
+ if (last) {
+- ext = &f->mt[ret].e->ext;
+- if (unlikely(nft_set_elem_expired(ext) ||
+- !nft_set_elem_active(ext, genmask))) {
+- ext = NULL;
++ const struct nft_set_ext *e = &f->mt[ret].e->ext;
++
++ if (unlikely(nft_set_elem_expired(e) ||
++ !nft_set_elem_active(e, genmask)))
+ goto next_match;
+- }
+
++ ext = e;
+ goto out;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From aecfcd09507d9ca18eacfca62131eb54d3181b5c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Jul 2025 19:05:15 +0200
+Subject: netfilter: nft_set_pipapo: merge pipapo_get/lookup
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit d8d871a35ca9ee4881d34995444ed1cb826d01db ]
+
+The matching algorithm has implemented thrice:
+1. data path lookup, generic version
+2. data path lookup, avx2 version
+3. control plane lookup
+
+Merge 1 and 3 by refactoring pipapo_get as a common helper, then make
+nft_pipapo_lookup and nft_pipapo_get both call the common helper.
+
+Aside from the code savings this has the benefit that we no longer allocate
+temporary scratch maps for each control plane get and insertion operation.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: c4eaca2e1052 ("netfilter: nft_set_pipapo: don't check genbit from packetpath lookups")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_pipapo.c | 188 ++++++++++-----------------------
+ 1 file changed, 58 insertions(+), 130 deletions(-)
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index a844b33fa6002..1a19649c28511 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -397,35 +397,36 @@ int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
+ }
+
+ /**
+- * nft_pipapo_lookup() - Lookup function
+- * @net: Network namespace
+- * @set: nftables API set representation
+- * @key: nftables API element representation containing key data
+- * @ext: nftables API extension pointer, filled with matching reference
++ * pipapo_get() - Get matching element reference given key data
++ * @m: storage containing the set elements
++ * @data: Key data to be matched against existing elements
++ * @genmask: If set, check that element is active in given genmask
++ * @tstamp: timestamp to check for expired elements
+ *
+ * For more details, see DOC: Theory of Operation.
+ *
+- * Return: true on match, false otherwise.
++ * This is the main lookup function. It matches key data against either
++ * the working match set or the uncommitted copy, depending on what the
++ * caller passed to us.
++ * nft_pipapo_get (lookup from userspace/control plane) and nft_pipapo_lookup
++ * (datapath lookup) pass the active copy.
++ * The insertion path will pass the uncommitted working copy.
++ *
++ * Return: pointer to &struct nft_pipapo_elem on match, NULL otherwise.
+ */
+-const struct nft_set_ext *
+-nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key)
++static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
++ const u8 *data, u8 genmask,
++ u64 tstamp)
+ {
+- struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_scratch *scratch;
+ unsigned long *res_map, *fill_map;
+- u8 genmask = nft_genmask_cur(net);
+- const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
+- const u8 *rp = (const u8 *)key;
+ bool map_index;
+ int i;
+
+ local_bh_disable();
+
+- m = rcu_dereference(priv->match);
+-
+- if (unlikely(!m || !*raw_cpu_ptr(m->scratch)))
++ if (unlikely(!raw_cpu_ptr(m->scratch)))
+ goto out;
+
+ scratch = *raw_cpu_ptr(m->scratch);
+@@ -445,12 +446,12 @@ nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ * packet bytes value, then AND bucket value
+ */
+ if (likely(f->bb == 8))
+- pipapo_and_field_buckets_8bit(f, res_map, rp);
++ pipapo_and_field_buckets_8bit(f, res_map, data);
+ else
+- pipapo_and_field_buckets_4bit(f, res_map, rp);
++ pipapo_and_field_buckets_4bit(f, res_map, data);
+ NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
+
+- rp += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
++ data += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
+
+ /* Now populate the bitmap for the next field, unless this is
+ * the last field, in which case return the matched 'ext'
+@@ -470,11 +471,11 @@ nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ }
+
+ if (last) {
+- const struct nft_set_ext *ext;
++ struct nft_pipapo_elem *e;
+
+- ext = &f->mt[b].e->ext;
+- if (unlikely(nft_set_elem_expired(ext) ||
+- !nft_set_elem_active(ext, genmask)))
++ e = f->mt[b].e;
++ if (unlikely(__nft_set_elem_expired(&e->ext, tstamp) ||
++ !nft_set_elem_active(&e->ext, genmask)))
+ goto next_match;
+
+ /* Last field: we're just returning the key without
+@@ -484,8 +485,7 @@ nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ */
+ scratch->map_index = map_index;
+ local_bh_enable();
+-
+- return ext;
++ return e;
+ }
+
+ /* Swap bitmap indices: res_map is the initial bitmap for the
+@@ -495,7 +495,7 @@ nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ map_index = !map_index;
+ swap(res_map, fill_map);
+
+- rp += NFT_PIPAPO_GROUPS_PADDING(f);
++ data += NFT_PIPAPO_GROUPS_PADDING(f);
+ }
+
+ out:
+@@ -504,99 +504,29 @@ nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ }
+
+ /**
+- * pipapo_get() - Get matching element reference given key data
+- * @m: storage containing active/existing elements
+- * @data: Key data to be matched against existing elements
+- * @genmask: If set, check that element is active in given genmask
+- * @tstamp: timestamp to check for expired elements
+- * @gfp: the type of memory to allocate (see kmalloc).
++ * nft_pipapo_lookup() - Dataplane fronted for main lookup function
++ * @net: Network namespace
++ * @set: nftables API set representation
++ * @key: pointer to nft registers containing key data
+ *
+- * This is essentially the same as the lookup function, except that it matches
+- * key data against the uncommitted copy and doesn't use preallocated maps for
+- * bitmap results.
++ * This function is called from the data path. It will search for
++ * an element matching the given key in the current active copy.
+ *
+- * Return: pointer to &struct nft_pipapo_elem on match, error pointer otherwise.
++ * Return: ntables API extension pointer or NULL if no match.
+ */
+-static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
+- const u8 *data, u8 genmask,
+- u64 tstamp, gfp_t gfp)
++const struct nft_set_ext *
++nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+- struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
+- unsigned long *res_map, *fill_map = NULL;
+- const struct nft_pipapo_field *f;
+- int i;
+-
+- if (m->bsize_max == 0)
+- return ret;
+-
+- res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), gfp);
+- if (!res_map) {
+- ret = ERR_PTR(-ENOMEM);
+- goto out;
+- }
+-
+- fill_map = kcalloc(m->bsize_max, sizeof(*res_map), gfp);
+- if (!fill_map) {
+- ret = ERR_PTR(-ENOMEM);
+- goto out;
+- }
+-
+- pipapo_resmap_init(m, res_map);
+-
+- nft_pipapo_for_each_field(f, i, m) {
+- bool last = i == m->field_count - 1;
+- int b;
+-
+- /* For each bit group: select lookup table bucket depending on
+- * packet bytes value, then AND bucket value
+- */
+- if (f->bb == 8)
+- pipapo_and_field_buckets_8bit(f, res_map, data);
+- else if (f->bb == 4)
+- pipapo_and_field_buckets_4bit(f, res_map, data);
+- else
+- BUG();
+-
+- data += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
+-
+- /* Now populate the bitmap for the next field, unless this is
+- * the last field, in which case return the matched 'ext'
+- * pointer if any.
+- *
+- * Now res_map contains the matching bitmap, and fill_map is the
+- * bitmap for the next field.
+- */
+-next_match:
+- b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
+- last);
+- if (b < 0)
+- goto out;
+-
+- if (last) {
+- if (__nft_set_elem_expired(&f->mt[b].e->ext, tstamp))
+- goto next_match;
+- if ((genmask &&
+- !nft_set_elem_active(&f->mt[b].e->ext, genmask)))
+- goto next_match;
+-
+- ret = f->mt[b].e;
+- goto out;
+- }
+-
+- data += NFT_PIPAPO_GROUPS_PADDING(f);
++ struct nft_pipapo *priv = nft_set_priv(set);
++ u8 genmask = nft_genmask_cur(net);
++ const struct nft_pipapo_match *m;
++ const struct nft_pipapo_elem *e;
+
+- /* Swap bitmap indices: fill_map will be the initial bitmap for
+- * the next field (i.e. the new res_map), and res_map is
+- * guaranteed to be all-zeroes at this point, ready to be filled
+- * according to the next mapping table.
+- */
+- swap(res_map, fill_map);
+- }
++ m = rcu_dereference(priv->match);
++ e = pipapo_get(m, (const u8 *)key, genmask, get_jiffies_64());
+
+-out:
+- kfree(fill_map);
+- kfree(res_map);
+- return ret;
++ return e ? &e->ext : NULL;
+ }
+
+ /**
+@@ -605,6 +535,11 @@ static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
+ * @set: nftables API set representation
+ * @elem: nftables API element representation containing key data
+ * @flags: Unused
++ *
++ * This function is called from the control plane path under
++ * RCU read lock.
++ *
++ * Return: set element private pointer or ERR_PTR(-ENOENT).
+ */
+ static struct nft_elem_priv *
+ nft_pipapo_get(const struct net *net, const struct nft_set *set,
+@@ -615,10 +550,9 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
+ struct nft_pipapo_elem *e;
+
+ e = pipapo_get(m, (const u8 *)elem->key.val.data,
+- nft_genmask_cur(net), get_jiffies_64(),
+- GFP_ATOMIC);
+- if (IS_ERR(e))
+- return ERR_CAST(e);
++ nft_genmask_cur(net), get_jiffies_64());
++ if (!e)
++ return ERR_PTR(-ENOENT);
+
+ return &e->priv;
+ }
+@@ -1343,8 +1277,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ else
+ end = start;
+
+- dup = pipapo_get(m, start, genmask, tstamp, GFP_KERNEL);
+- if (!IS_ERR(dup)) {
++ dup = pipapo_get(m, start, genmask, tstamp);
++ if (dup) {
+ /* Check if we already have the same exact entry */
+ const struct nft_data *dup_key, *dup_end;
+
+@@ -1363,15 +1297,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ return -ENOTEMPTY;
+ }
+
+- if (PTR_ERR(dup) == -ENOENT) {
+- /* Look for partially overlapping entries */
+- dup = pipapo_get(m, end, nft_genmask_next(net), tstamp,
+- GFP_KERNEL);
+- }
+-
+- if (PTR_ERR(dup) != -ENOENT) {
+- if (IS_ERR(dup))
+- return PTR_ERR(dup);
++ /* Look for partially overlapping entries */
++ dup = pipapo_get(m, end, nft_genmask_next(net), tstamp);
++ if (dup) {
+ *elem_priv = &dup->priv;
+ return -ENOTEMPTY;
+ }
+@@ -1913,8 +1841,8 @@ nft_pipapo_deactivate(const struct net *net, const struct nft_set *set,
+ return NULL;
+
+ e = pipapo_get(m, (const u8 *)elem->key.val.data,
+- nft_genmask_next(net), nft_net_tstamp(net), GFP_KERNEL);
+- if (IS_ERR(e))
++ nft_genmask_next(net), nft_net_tstamp(net));
++ if (!e)
+ return NULL;
+
+ nft_set_elem_change_active(net, set, &e->ext);
+--
+2.51.0
+
--- /dev/null
+From b9c81354db13b0629bb224273cd446f73ac9808d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Jul 2025 19:05:12 +0200
+Subject: netfilter: nft_set_pipapo: remove unused arguments
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 7792c1e03054440c60d4bce0c06a31c134601997 ]
+
+They are not used anymore, so remove them.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: c4eaca2e1052 ("netfilter: nft_set_pipapo: don't check genbit from packetpath lookups")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_pipapo.c | 14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 9e4e25f2458f9..9ac48e6b4332c 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -502,8 +502,6 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+
+ /**
+ * pipapo_get() - Get matching element reference given key data
+- * @net: Network namespace
+- * @set: nftables API set representation
+ * @m: storage containing active/existing elements
+ * @data: Key data to be matched against existing elements
+ * @genmask: If set, check that element is active in given genmask
+@@ -516,9 +514,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ *
+ * Return: pointer to &struct nft_pipapo_elem on match, error pointer otherwise.
+ */
+-static struct nft_pipapo_elem *pipapo_get(const struct net *net,
+- const struct nft_set *set,
+- const struct nft_pipapo_match *m,
++static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
+ const u8 *data, u8 genmask,
+ u64 tstamp, gfp_t gfp)
+ {
+@@ -615,7 +611,7 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
+ struct nft_pipapo_match *m = rcu_dereference(priv->match);
+ struct nft_pipapo_elem *e;
+
+- e = pipapo_get(net, set, m, (const u8 *)elem->key.val.data,
++ e = pipapo_get(m, (const u8 *)elem->key.val.data,
+ nft_genmask_cur(net), get_jiffies_64(),
+ GFP_ATOMIC);
+ if (IS_ERR(e))
+@@ -1344,7 +1340,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ else
+ end = start;
+
+- dup = pipapo_get(net, set, m, start, genmask, tstamp, GFP_KERNEL);
++ dup = pipapo_get(m, start, genmask, tstamp, GFP_KERNEL);
+ if (!IS_ERR(dup)) {
+ /* Check if we already have the same exact entry */
+ const struct nft_data *dup_key, *dup_end;
+@@ -1366,7 +1362,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+
+ if (PTR_ERR(dup) == -ENOENT) {
+ /* Look for partially overlapping entries */
+- dup = pipapo_get(net, set, m, end, nft_genmask_next(net), tstamp,
++ dup = pipapo_get(m, end, nft_genmask_next(net), tstamp,
+ GFP_KERNEL);
+ }
+
+@@ -1913,7 +1909,7 @@ nft_pipapo_deactivate(const struct net *net, const struct nft_set *set,
+ if (!m)
+ return NULL;
+
+- e = pipapo_get(net, set, m, (const u8 *)elem->key.val.data,
++ e = pipapo_get(m, (const u8 *)elem->key.val.data,
+ nft_genmask_next(net), nft_net_tstamp(net), GFP_KERNEL);
+ if (IS_ERR(e))
+ return NULL;
+--
+2.51.0
+
--- /dev/null
+From 6ffcd0ee97b753053736b0bf7c3a28aa6074a3f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 10:02:19 +0200
+Subject: netfilter: nft_set_rbtree: continue traversal if element is inactive
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit a60f7bf4a1524d8896b76ba89623080aebf44272 ]
+
+When the rbtree lookup function finds a match in the rbtree, it sets the
+range start interval to a potentially inactive element.
+
+Then, after tree lookup, if the matching element is inactive, it returns
+NULL and suppresses a matching result.
+
+This is wrong and leads to false negative matches when a transaction has
+already entered the commit phase.
+
+cpu0 cpu1
+ has added new elements to clone
+ has marked elements as being
+ inactive in new generation
+ perform lookup in the set
+ enters commit phase:
+I) increments the genbit
+ A) observes new genbit
+ B) finds matching range
+ C) returns no match: found
+ range invalid in new generation
+II) removes old elements from the tree
+ C New nft_lookup happening now
+ will find matching element,
+ because it is no longer
+ obscured by old, inactive one.
+
+Consider a packet matching range r1-r2:
+
+cpu0 processes following transaction:
+1. remove r1-r2
+2. add r1-r3
+
+P is contained in both ranges. Therefore, cpu1 should always find a match
+for P. Due to above race, this is not the case:
+
+cpu1 does find r1-r2, but then ignores it due to the genbit indicating
+the range has been removed. It does NOT test for further matches.
+
+The situation persists for all lookups until after cpu0 hits II) after
+which r1-r3 range start node is tested for the first time.
+
+Move the "interval start is valid" check ahead so that tree traversal
+continues if the starting interval is not valid in this generation.
+
+Thanks to Stefan Hanreich for providing an initial reproducer for this
+bug.
+
+Reported-by: Stefan Hanreich <s.hanreich@proxmox.com>
+Fixes: c1eda3c6394f ("netfilter: nft_rbtree: ignore inactive matching element with no descendants")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_rbtree.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 938a257c069e2..b1f04168ec937 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -77,7 +77,9 @@ __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ nft_rbtree_interval_end(rbe) &&
+ nft_rbtree_interval_start(interval))
+ continue;
+- interval = rbe;
++ if (nft_set_elem_active(&rbe->ext, genmask) &&
++ !nft_rbtree_elem_expired(rbe))
++ interval = rbe;
+ } else if (d > 0)
+ parent = rcu_dereference_raw(parent->rb_right);
+ else {
+@@ -102,8 +104,6 @@ __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ }
+
+ if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
+- nft_set_elem_active(&interval->ext, genmask) &&
+- !nft_rbtree_elem_expired(interval) &&
+ nft_rbtree_interval_start(interval))
+ return &interval->ext;
+
+--
+2.51.0
+
--- /dev/null
+From 664c3e38f9df802ea45df7ea9dbc9734f4fff688 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Aug 2025 17:39:56 +0800
+Subject: phy: qualcomm: phy-qcom-eusb2-repeater: fix override properties
+
+From: Pengyu Luo <mitltlatltl@gmail.com>
+
+[ Upstream commit 942e47ab228c7dd27c2ae043c17e7aab2028082c ]
+
+property "qcom,tune-usb2-preem" is for EUSB2_TUNE_USB2_PREEM
+property "qcom,tune-usb2-amplitude" is for EUSB2_TUNE_IUSB2
+
+The downstream correspondence is as follows:
+EUSB2_TUNE_USB2_PREEM: Tx pre-emphasis tuning
+EUSB2_TUNE_IUSB2: HS trasmit amplitude
+EUSB2_TUNE_SQUELCH_U: Squelch detection threshold
+EUSB2_TUNE_HSDISC: HS disconnect threshold
+EUSB2_TUNE_EUSB_SLEW: slew rate
+
+Fixes: 31bc94de7602 ("phy: qualcomm: phy-qcom-eusb2-repeater: Don't zero-out registers")
+Signed-off-by: Pengyu Luo <mitltlatltl@gmail.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Reviewed-by: Luca Weiss <luca.weiss@fairphone.com>
+Link: https://lore.kernel.org/r/20250812093957.32235-1-mitltlatltl@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+index 163950e16dbe1..c173c6244d9e5 100644
+--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
++++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+@@ -127,13 +127,13 @@ static int eusb2_repeater_init(struct phy *phy)
+ rptr->cfg->init_tbl[i].value);
+
+ /* Override registers from devicetree values */
+- if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &val))
++ if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &val))
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, val);
+
+ if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &val))
+ regmap_write(regmap, base + EUSB2_TUNE_HSDISC, val);
+
+- if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &val))
++ if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &val))
+ regmap_write(regmap, base + EUSB2_TUNE_IUSB2, val);
+
+ /* Wait for status OK */
+--
+2.51.0
+
--- /dev/null
+From 63e7b46b6b34bcc5427f239f93be1507bdaa00c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Sep 2025 11:09:13 +0200
+Subject: regulator: sy7636a: fix lifecycle of power good gpio
+
+From: Andreas Kemnade <akemnade@kernel.org>
+
+[ Upstream commit c05d0b32eebadc8be6e53196e99c64cf2bed1d99 ]
+
+Attach the power good gpio to the regulator device devres instead of the
+parent device to fix problems if probe is run multiple times
+(rmmod/insmod or some deferral).
+
+Fixes: 8c485bedfb785 ("regulator: sy7636a: Initial commit")
+Signed-off-by: Andreas Kemnade <akemnade@kernel.org>
+Reviewed-by: Alistair Francis <alistair@alistair23.me>
+Reviewed-by: Peng Fan <peng.fan@nxp.com>
+Message-ID: <20250906-sy7636-rsrc-v1-2-e2886a9763a7@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/sy7636a-regulator.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/regulator/sy7636a-regulator.c b/drivers/regulator/sy7636a-regulator.c
+index d1e7ba1fb3e1a..27e3d939b7bb9 100644
+--- a/drivers/regulator/sy7636a-regulator.c
++++ b/drivers/regulator/sy7636a-regulator.c
+@@ -83,9 +83,11 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
+ if (!regmap)
+ return -EPROBE_DEFER;
+
+- gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN);
++ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
++
++ gdp = devm_gpiod_get(&pdev->dev, "epd-pwr-good", GPIOD_IN);
+ if (IS_ERR(gdp)) {
+- dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
++ dev_err(&pdev->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
+ return PTR_ERR(gdp);
+ }
+
+@@ -105,7 +107,6 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
+ }
+
+ config.dev = &pdev->dev;
+- config.dev->of_node = pdev->dev.parent->of_node;
+ config.regmap = regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, &desc, &config);
+--
+2.51.0
+
--- /dev/null
+From d63c53439f79d2082ae05d622f038a248784d4a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Nov 2024 06:32:50 -0800
+Subject: RISC-V: Remove unnecessary include from compat.h
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Palmer Dabbelt <palmer@rivosinc.com>
+
+[ Upstream commit 8d4f1e05ff821a5d59116ab8c3a30fcae81d8597 ]
+
+Without this I get a bunch of build errors like
+
+ In file included from ./include/linux/sched/task_stack.h:12,
+ from ./arch/riscv/include/asm/compat.h:12,
+ from ./arch/riscv/include/asm/pgtable.h:115,
+ from ./include/linux/pgtable.h:6,
+ from ./include/linux/mm.h:30,
+ from arch/riscv/kernel/asm-offsets.c:8:
+ ./include/linux/kasan.h:50:37: error: ‘MAX_PTRS_PER_PTE’ undeclared here (not in a function); did you mean ‘PTRS_PER_PTE’?
+ 50 | extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
+ | ^~~~~~~~~~~~~~~~
+ | PTRS_PER_PTE
+ ./include/linux/kasan.h:51:8: error: unknown type name ‘pmd_t’; did you mean ‘pgd_t’?
+ 51 | extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
+ | ^~~~~
+ | pgd_t
+ ./include/linux/kasan.h:51:37: error: ‘MAX_PTRS_PER_PMD’ undeclared here (not in a function); did you mean ‘PTRS_PER_PGD’?
+ 51 | extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
+ | ^~~~~~~~~~~~~~~~
+ | PTRS_PER_PGD
+ ./include/linux/kasan.h:52:8: error: unknown type name ‘pud_t’; did you mean ‘pgd_t’?
+ 52 | extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
+ | ^~~~~
+ | pgd_t
+ ./include/linux/kasan.h:52:37: error: ‘MAX_PTRS_PER_PUD’ undeclared here (not in a function); did you mean ‘PTRS_PER_PGD’?
+ 52 | extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
+ | ^~~~~~~~~~~~~~~~
+ | PTRS_PER_PGD
+ ./include/linux/kasan.h:53:8: error: unknown type name ‘p4d_t’; did you mean ‘pgd_t’?
+ 53 | extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
+ | ^~~~~
+ | pgd_t
+ ./include/linux/kasan.h:53:37: error: ‘MAX_PTRS_PER_P4D’ undeclared here (not in a function); did you mean ‘PTRS_PER_PGD’?
+ 53 | extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
+ | ^~~~~~~~~~~~~~~~
+ | PTRS_PER_PGD
+
+Link: https://lore.kernel.org/r/20241126143250.29708-1-palmer@rivosinc.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/include/asm/compat.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
+index aa103530a5c83..6081327e55f5b 100644
+--- a/arch/riscv/include/asm/compat.h
++++ b/arch/riscv/include/asm/compat.h
+@@ -9,7 +9,6 @@
+ */
+ #include <linux/types.h>
+ #include <linux/sched.h>
+-#include <linux/sched/task_stack.h>
+ #include <asm-generic/compat.h>
+
+ static inline int is_compat_task(void)
+--
+2.51.0
+
input-iqs7222-avoid-enabling-unused-interrupts.patch
input-i8042-add-tuxedo-infinitybook-pro-gen10-amd-to-i8042-quirk-table.patch
revert-net-usb-asix-ax88772-drop-phylink-use-in-pm-to-avoid-mdio-runtime-pm-wakeups.patch
+disable-slub_tiny-for-build-testing.patch
+drm-panthor-validate-group-queue-count.patch
+net-fec-fix-possible-npd-in-fec_enet_phy_reset_after.patch
+genetlink-fix-genl_bind-invoking-bind-after-eperm.patch
+net-bridge-bounce-invalid-boolopts.patch
+tunnels-reset-the-gso-metadata-before-reusing-the-sk.patch
+docs-networking-can-change-bcm_msg_head-frames-membe.patch
+igb-fix-link-test-skipping-when-interface-is-admin-d.patch
+i40e-fix-irq-freeing-in-i40e_vsi_request_irq_msix-er.patch
+drm-amd-display-use-udelay-rather-than-fsleep.patch
+can-j1939-j1939_sk_bind-call-j1939_priv_put-immediat.patch
+can-j1939-j1939_local_ecu_get-undo-increment-when-j1.patch
+can-xilinx_can-xcan_write_frame-fix-use-after-free-o.patch
+netfilter-nft_set_pipapo-remove-unused-arguments.patch
+netfilter-nft_set-remove-one-argument-from-lookup-an.patch
+netfilter-nft_set_pipapo-merge-pipapo_get-lookup.patch
+netfilter-nft_set_pipapo-don-t-return-bogus-extensio.patch
+netfilter-nft_set_pipapo-don-t-check-genbit-from-pac.patch
+netfilter-nft_set_rbtree-continue-traversal-if-eleme.patch
+netfilter-nf_tables-reintroduce-shortened-deletion-n.patch
+netfilter-nf_tables-place-base_seq-in-struct-net.patch
+netfilter-nf_tables-make-nft_set_do_lookup-available.patch
+netfilter-nf_tables-restart-set-lookup-on-base_seq-c.patch
+net-hsr-add-vlan-ctag-filter-support.patch
+hsr-use-rtnl-lock-when-iterating-over-ports.patch
+hsr-use-hsr_for_each_port_rtnl-in-hsr_port_get_hsr.patch
+phy-qualcomm-phy-qcom-eusb2-repeater-fix-override-pr.patch
+dmaengine-idxd-remove-improper-idxd_free.patch
+dmaengine-idxd-fix-refcount-underflow-on-module-unlo.patch
+dmaengine-idxd-fix-double-free-in-idxd_setup_wqs.patch
+dmaengine-ti-edma-fix-memory-allocation-size-for-que.patch
+regulator-sy7636a-fix-lifecycle-of-power-good-gpio.patch
+risc-v-remove-unnecessary-include-from-compat.h.patch
--- /dev/null
+From 0a20574245d19ae1a9f4eac63df8ca27004d2ebe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 14:53:50 +0200
+Subject: tunnels: reset the GSO metadata before reusing the skb
+
+From: Antoine Tenart <atenart@kernel.org>
+
+[ Upstream commit e3c674db356c4303804b2415e7c2b11776cdd8c3 ]
+
+If a GSO skb is sent through a Geneve tunnel and if Geneve options are
+added, the split GSO skb might not fit in the MTU anymore and an ICMP
+frag needed packet can be generated. In such case the ICMP packet might
+go through the segmentation logic (and dropped) later if it reaches a
+path were the GSO status is checked and segmentation is required.
+
+This is especially true when an OvS bridge is used with a Geneve tunnel
+attached to it. The following set of actions could lead to the ICMP
+packet being wrongfully segmented:
+
+1. An skb is constructed by the TCP layer (e.g. gso_type SKB_GSO_TCPV4,
+ segs >= 2).
+
+2. The skb hits the OvS bridge where Geneve options are added by an OvS
+ action before being sent through the tunnel.
+
+3. When the skb is xmited in the tunnel, the split skb does not fit
+ anymore in the MTU and iptunnel_pmtud_build_icmp is called to
+ generate an ICMP fragmentation needed packet. This is done by reusing
+ the original (GSO!) skb. The GSO metadata is not cleared.
+
+4. The ICMP packet being sent back hits the OvS bridge again and because
+ skb_is_gso returns true, it goes through queue_gso_packets...
+
+5. ...where __skb_gso_segment is called. The skb is then dropped.
+
+6. Note that in the above example on re-transmission the skb won't be a
+ GSO one as it would be segmented (len > MSS) and the ICMP packet
+ should go through.
+
+Fix this by resetting the GSO information before reusing an skb in
+iptunnel_pmtud_build_icmp and iptunnel_pmtud_build_icmpv6.
+
+Fixes: 4cb47a8644cc ("tunnels: PMTU discovery support for directly bridged IP packets")
+Reported-by: Adrian Moreno <amorenoz@redhat.com>
+Signed-off-by: Antoine Tenart <atenart@kernel.org>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Link: https://patch.msgid.link/20250904125351.159740-1-atenart@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/ip_tunnel_core.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index f65d2f7273813..8392d304a72eb 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -204,6 +204,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
+ return -EINVAL;
+
++ if (skb_is_gso(skb))
++ skb_gso_reset(skb);
++
+ skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ pskb_pull(skb, ETH_HLEN);
+ skb_reset_network_header(skb);
+@@ -298,6 +301,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
+ return -EINVAL;
+
++ if (skb_is_gso(skb))
++ skb_gso_reset(skb);
++
+ skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ pskb_pull(skb, ETH_HLEN);
+ skb_reset_network_header(skb);
+--
+2.51.0
+
--- /dev/null
+From 84a07b9e7a229dac84b18831580259f435cd3c47 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Aug 2025 23:07:24 +0900
+Subject: can: j1939: implement NETDEV_UNREGISTER notification handler
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit 7fcbe5b2c6a4b5407bf2241fdb71e0a390f6ab9a ]
+
+syzbot is reporting
+
+ unregister_netdevice: waiting for vcan0 to become free. Usage count = 2
+
+problem, for j1939 protocol did not have NETDEV_UNREGISTER notification
+handler for undoing changes made by j1939_sk_bind().
+
+Commit 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct
+callback") expects that a call to j1939_priv_put() can be unconditionally
+delayed until j1939_sk_sock_destruct() is called. But we need to call
+j1939_priv_put() against an extra ref held by j1939_sk_bind() call
+(as a part of undoing changes made by j1939_sk_bind()) as soon as
+NETDEV_UNREGISTER notification fires (i.e. before j1939_sk_sock_destruct()
+is called via j1939_sk_release()). Otherwise, the extra ref on "struct
+j1939_priv" held by j1939_sk_bind() call prevents "struct net_device" from
+dropping the usage count to 1; making it impossible for
+unregister_netdevice() to continue.
+
+Reported-by: syzbot <syzbot+881d65229ca4f9ae8c84@syzkaller.appspotmail.com>
+Closes: https://syzkaller.appspot.com/bug?extid=881d65229ca4f9ae8c84
+Tested-by: syzbot <syzbot+881d65229ca4f9ae8c84@syzkaller.appspotmail.com>
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Fixes: 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct callback")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/ac9db9a4-6c30-416e-8b94-96e6559d55b2@I-love.SAKURA.ne.jp
+[mkl: remove space in front of label]
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/j1939-priv.h | 1 +
+ net/can/j1939/main.c | 3 +++
+ net/can/j1939/socket.c | 49 ++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 53 insertions(+)
+
+diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h
+index 31a93cae5111b..81f58924b4acd 100644
+--- a/net/can/j1939/j1939-priv.h
++++ b/net/can/j1939/j1939-priv.h
+@@ -212,6 +212,7 @@ void j1939_priv_get(struct j1939_priv *priv);
+
+ /* notify/alert all j1939 sockets bound to ifindex */
+ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv);
++void j1939_sk_netdev_event_unregister(struct j1939_priv *priv);
+ int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk);
+ void j1939_tp_init(struct j1939_priv *priv);
+
+diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
+index 7e8a20f2fc42b..3706a872ecafd 100644
+--- a/net/can/j1939/main.c
++++ b/net/can/j1939/main.c
+@@ -377,6 +377,9 @@ static int j1939_netdev_notify(struct notifier_block *nb,
+ j1939_sk_netdev_event_netdown(priv);
+ j1939_ecu_unmap_all(priv);
+ break;
++ case NETDEV_UNREGISTER:
++ j1939_sk_netdev_event_unregister(priv);
++ break;
+ }
+
+ j1939_priv_put(priv);
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 6fefe7a687611..b3a45aa70cf2f 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -1299,6 +1299,55 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
+ read_unlock_bh(&priv->j1939_socks_lock);
+ }
+
++void j1939_sk_netdev_event_unregister(struct j1939_priv *priv)
++{
++ struct sock *sk;
++ struct j1939_sock *jsk;
++ bool wait_rcu = false;
++
++rescan: /* The caller is holding a ref on this "priv" via j1939_priv_get_by_ndev(). */
++ read_lock_bh(&priv->j1939_socks_lock);
++ list_for_each_entry(jsk, &priv->j1939_socks, list) {
++ /* Skip if j1939_jsk_add() is not called on this socket. */
++ if (!(jsk->state & J1939_SOCK_BOUND))
++ continue;
++ sk = &jsk->sk;
++ sock_hold(sk);
++ read_unlock_bh(&priv->j1939_socks_lock);
++ /* Check if j1939_jsk_del() is not yet called on this socket after holding
++ * socket's lock, for both j1939_sk_bind() and j1939_sk_release() call
++ * j1939_jsk_del() with socket's lock held.
++ */
++ lock_sock(sk);
++ if (jsk->state & J1939_SOCK_BOUND) {
++ /* Neither j1939_sk_bind() nor j1939_sk_release() called j1939_jsk_del().
++ * Make this socket no longer bound, by pretending as if j1939_sk_bind()
++ * dropped old references but did not get new references.
++ */
++ j1939_jsk_del(priv, jsk);
++ j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
++ j1939_netdev_stop(priv);
++ /* Call j1939_priv_put() now and prevent j1939_sk_sock_destruct() from
++ * calling the corresponding j1939_priv_put().
++ *
++ * j1939_sk_sock_destruct() is supposed to call j1939_priv_put() after
++ * an RCU grace period. But since the caller is holding a ref on this
++ * "priv", we can defer synchronize_rcu() until immediately before
++ * the caller calls j1939_priv_put().
++ */
++ j1939_priv_put(priv);
++ jsk->priv = NULL;
++ wait_rcu = true;
++ }
++ release_sock(sk);
++ sock_put(sk);
++ goto rescan;
++ }
++ read_unlock_bh(&priv->j1939_socks_lock);
++ if (wait_rcu)
++ synchronize_rcu();
++}
++
+ static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+ {
+--
+2.51.0
+
--- /dev/null
+From da195a99ae7dd54287fe1bd7e11e75a288bde250 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:27:40 +0900
+Subject: can: j1939: j1939_local_ecu_get(): undo increment when
+ j1939_local_ecu_get() fails
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit 06e02da29f6f1a45fc07bd60c7eaf172dc21e334 ]
+
+Since j1939_sk_bind() and j1939_sk_release() call j1939_local_ecu_put()
+when J1939_SOCK_BOUND was already set, but the error handling path for
+j1939_sk_bind() will not set J1939_SOCK_BOUND when j1939_local_ecu_get()
+fails, j1939_local_ecu_get() needs to undo priv->ents[sa].nusers++ when
+j1939_local_ecu_get() returns an error.
+
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/e7f80046-4ff7-4ce2-8ad8-7c3c678a42c9@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/bus.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
+index 39844f14eed86..797719cb227ec 100644
+--- a/net/can/j1939/bus.c
++++ b/net/can/j1939/bus.c
+@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
+ if (!ecu)
+ ecu = j1939_ecu_create_locked(priv, name);
+ err = PTR_ERR_OR_ZERO(ecu);
+- if (err)
++ if (err) {
++ if (j1939_address_is_unicast(sa))
++ priv->ents[sa].nusers--;
+ goto done;
++ }
+
+ ecu->nusers++;
+ /* TODO: do we care if ecu->addr != sa? */
+--
+2.51.0
+
--- /dev/null
+From 9743cb0007b0d61989daba6535fb272741f50e68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:30:09 +0900
+Subject: can: j1939: j1939_sk_bind(): call j1939_priv_put() immediately when
+ j1939_local_ecu_get() failed
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit f214744c8a27c3c1da6b538c232da22cd027530e ]
+
+Commit 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct
+callback") expects that a call to j1939_priv_put() can be unconditionally
+delayed until j1939_sk_sock_destruct() is called. But a refcount leak will
+happen when j1939_sk_bind() is called again after j1939_local_ecu_get()
+ from previous j1939_sk_bind() call returned an error. We need to call
+j1939_priv_put() before j1939_sk_bind() returns an error.
+
+Fixes: 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct callback")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/4f49a1bc-a528-42ad-86c0-187268ab6535@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/socket.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index b3a45aa70cf2f..785b883a1319d 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -520,6 +520,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
+ if (ret) {
+ j1939_netdev_stop(priv);
++ jsk->priv = NULL;
++ synchronize_rcu();
++ j1939_priv_put(priv);
+ goto out_release_sock;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 52cf48ff356a523e17edb9f92497be732db0bf8c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Aug 2025 12:50:02 +0300
+Subject: can: xilinx_can: xcan_write_frame(): fix use-after-free of
+ transmitted SKB
+
+From: Anssi Hannula <anssi.hannula@bitwise.fi>
+
+[ Upstream commit ef79f00be72bd81d2e1e6f060d83cf7e425deee4 ]
+
+can_put_echo_skb() takes ownership of the SKB and it may be freed
+during or after the call.
+
+However, xilinx_can xcan_write_frame() keeps using SKB after the call.
+
+Fix that by only calling can_put_echo_skb() after the code is done
+touching the SKB.
+
+The tx_lock is held for the entire xcan_write_frame() execution and
+also on the can_get_echo_skb() side so the order of operations does not
+matter.
+
+An earlier fix commit 3d3c817c3a40 ("can: xilinx_can: Fix usage of skb
+memory") did not move the can_put_echo_skb() call far enough.
+
+Signed-off-by: Anssi Hannula <anssi.hannula@bitwise.fi>
+Fixes: 1598efe57b3e ("can: xilinx_can: refactor code in preparation for CAN FD support")
+Link: https://patch.msgid.link/20250822095002.168389-1-anssi.hannula@bitwise.fi
+[mkl: add "commit" in front of sha1 in patch description]
+[mkl: fix indention]
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/xilinx_can.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
+index 3f2e378199abb..5abe4af61655c 100644
+--- a/drivers/net/can/xilinx_can.c
++++ b/drivers/net/can/xilinx_can.c
+@@ -690,14 +690,6 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
+ dlc |= XCAN_DLCR_EDL_MASK;
+ }
+
+- if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
+- (priv->devtype.flags & XCAN_FLAG_TXFEMP))
+- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
+- else
+- can_put_echo_skb(skb, ndev, 0, 0);
+-
+- priv->tx_head++;
+-
+ priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
+ /* If the CAN frame is RTR frame this write triggers transmission
+ * (not on CAN FD)
+@@ -730,6 +722,14 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
+ data[1]);
+ }
+ }
++
++ if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
++ (priv->devtype.flags & XCAN_FLAG_TXFEMP))
++ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
++ else
++ can_put_echo_skb(skb, ndev, 0, 0);
++
++ priv->tx_head++;
+ }
+
+ /**
+--
+2.51.0
+
--- /dev/null
+From 5f8a3bae415d4435d896452f676e7ca9ac89f113 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Aug 2025 13:43:39 +0300
+Subject: dmaengine: idxd: Fix double free in idxd_setup_wqs()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 39aaa337449e71a41d4813be0226a722827ba606 ]
+
+The clean up in idxd_setup_wqs() has had a couple bugs because the error
+handling is a bit subtle. It's simpler to just re-write it in a cleaner
+way. The issues here are:
+
+1) If "idxd->max_wqs" is <= 0 then we call put_device(conf_dev) when
+ "conf_dev" hasn't been initialized.
+2) If kzalloc_node() fails then again "conf_dev" is invalid. It's
+ either uninitialized or it points to the "conf_dev" from the
+ previous iteration so it leads to a double free.
+
+It's better to free partial loop iterations within the loop and then
+the unwinding at the end can handle whole loop iterations. I also
+renamed the labels to describe what the goto does and not where the goto
+was located.
+
+Fixes: 3fd2f4bc010c ("dmaengine: idxd: fix memory leak in error handling path of idxd_setup_wqs")
+Reported-by: Colin Ian King <colin.i.king@gmail.com>
+Closes: https://lore.kernel.org/all/20250811095836.1642093-1-colin.i.king@gmail.com/
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/aJnJW3iYTDDCj9sk@stanley.mountain
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 33 +++++++++++++++++----------------
+ 1 file changed, 17 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 40f4bf4467638..b559b0e18809e 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -189,27 +189,30 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->wq_enable_map) {
+ rc = -ENOMEM;
+- goto err_bitmap;
++ goto err_free_wqs;
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
+ if (!wq) {
+ rc = -ENOMEM;
+- goto err;
++ goto err_unwind;
+ }
+
+ idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
+ conf_dev = wq_confdev(wq);
+ wq->id = i;
+ wq->idxd = idxd;
+- device_initialize(wq_confdev(wq));
++ device_initialize(conf_dev);
+ conf_dev->parent = idxd_confdev(idxd);
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = &idxd_wq_device_type;
+ rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
+- if (rc < 0)
+- goto err;
++ if (rc < 0) {
++ put_device(conf_dev);
++ kfree(wq);
++ goto err_unwind;
++ }
+
+ mutex_init(&wq->wq_lock);
+ init_waitqueue_head(&wq->err_queue);
+@@ -220,15 +223,20 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
+ wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
+ if (!wq->wqcfg) {
++ put_device(conf_dev);
++ kfree(wq);
+ rc = -ENOMEM;
+- goto err;
++ goto err_unwind;
+ }
+
+ if (idxd->hw.wq_cap.op_config) {
+ wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
+ if (!wq->opcap_bmap) {
++ kfree(wq->wqcfg);
++ put_device(conf_dev);
++ kfree(wq);
+ rc = -ENOMEM;
+- goto err_opcap_bmap;
++ goto err_unwind;
+ }
+ bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
+ }
+@@ -239,13 +247,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+
+ return 0;
+
+-err_opcap_bmap:
+- kfree(wq->wqcfg);
+-
+-err:
+- put_device(conf_dev);
+- kfree(wq);
+-
++err_unwind:
+ while (--i >= 0) {
+ wq = idxd->wqs[i];
+ if (idxd->hw.wq_cap.op_config)
+@@ -254,11 +256,10 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ conf_dev = wq_confdev(wq);
+ put_device(conf_dev);
+ kfree(wq);
+-
+ }
+ bitmap_free(idxd->wq_enable_map);
+
+-err_bitmap:
++err_free_wqs:
+ kfree(idxd->wqs);
+
+ return rc;
+--
+2.51.0
+
--- /dev/null
+From 5cee26d7f92c3e5702e3365d25e7375f2a341902 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 23:03:13 +0800
+Subject: dmaengine: idxd: Fix refcount underflow on module unload
+
+From: Yi Sun <yi.sun@intel.com>
+
+[ Upstream commit b7cb9a034305d52222433fad10c3de10204f29e7 ]
+
+A recent refactor introduced a misplaced put_device() call, resulting in a
+reference count underflow during module unload.
+
+There is no need to add additional put_device() calls for idxd groups,
+engines, or workqueues. Although the commit claims: "Note, this also
+fixes the missing put_device() for idxd groups, engines, and wqs."
+
+It appears no such omission actually existed. The required cleanup is
+already handled by the call chain:
+idxd_unregister_devices() -> device_unregister() -> put_device()
+
+Extend idxd_cleanup() to handle the remaining necessary cleanup and
+remove idxd_cleanup_internals(), which duplicates deallocation logic
+for idxd, engines, groups, and workqueues. Memory management is also
+properly handled through the Linux device model.
+
+Fixes: a409e919ca32 ("dmaengine: idxd: Refactor remove call with idxd_cleanup() helper")
+Signed-off-by: Yi Sun <yi.sun@intel.com>
+Tested-by: Shuai Xue <xueshuai@linux.alibaba.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Acked-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+Link: https://lore.kernel.org/r/20250729150313.1934101-3-yi.sun@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 40cc9c070081f..40f4bf4467638 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -1292,7 +1292,10 @@ static void idxd_remove(struct pci_dev *pdev)
+ device_unregister(idxd_confdev(idxd));
+ idxd_shutdown(pdev);
+ idxd_device_remove_debugfs(idxd);
+- idxd_cleanup(idxd);
++ perfmon_pmu_remove(idxd);
++ idxd_cleanup_interrupts(idxd);
++ if (device_pasid_enabled(idxd))
++ idxd_disable_system_pasid(idxd);
+ pci_iounmap(pdev, idxd->reg_base);
+ put_device(idxd_confdev(idxd));
+ pci_disable_device(pdev);
+--
+2.51.0
+
--- /dev/null
+From 50baf2d1695dc9efb6c6ee48c0a6f5d8d95ea11c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 23:03:12 +0800
+Subject: dmaengine: idxd: Remove improper idxd_free
+
+From: Yi Sun <yi.sun@intel.com>
+
+[ Upstream commit f41c538881eec4dcf5961a242097d447f848cda6 ]
+
+The call to idxd_free() introduces a duplicate put_device() leading to a
+reference count underflow:
+refcount_t: underflow; use-after-free.
+WARNING: CPU: 15 PID: 4428 at lib/refcount.c:28 refcount_warn_saturate+0xbe/0x110
+...
+Call Trace:
+ <TASK>
+ idxd_remove+0xe4/0x120 [idxd]
+ pci_device_remove+0x3f/0xb0
+ device_release_driver_internal+0x197/0x200
+ driver_detach+0x48/0x90
+ bus_remove_driver+0x74/0xf0
+ pci_unregister_driver+0x2e/0xb0
+ idxd_exit_module+0x34/0x7a0 [idxd]
+ __do_sys_delete_module.constprop.0+0x183/0x280
+ do_syscall_64+0x54/0xd70
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+The idxd_unregister_devices() which is invoked at the very beginning of
+idxd_remove(), already takes care of the necessary put_device() through the
+following call path:
+idxd_unregister_devices() -> device_unregister() -> put_device()
+
+In addition, when CONFIG_DEBUG_KOBJECT_RELEASE is enabled, put_device() may
+trigger asynchronous cleanup via schedule_delayed_work(). If idxd_free() is
+called immediately after, it can result in a use-after-free.
+
+Remove the improper idxd_free() to avoid both the refcount underflow and
+potential memory corruption during module unload.
+
+Fixes: d5449ff1b04d ("dmaengine: idxd: Add missing idxd cleanup to fix memory leak in remove call")
+Signed-off-by: Yi Sun <yi.sun@intel.com>
+Tested-by: Shuai Xue <xueshuai@linux.alibaba.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Acked-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+Link: https://lore.kernel.org/r/20250729150313.1934101-2-yi.sun@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 80355d03004db..40cc9c070081f 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -1295,7 +1295,6 @@ static void idxd_remove(struct pci_dev *pdev)
+ idxd_cleanup(idxd);
+ pci_iounmap(pdev, idxd->reg_base);
+ put_device(idxd_confdev(idxd));
+- idxd_free(idxd);
+ pci_disable_device(pdev);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From b38776368bd6cc8ffbbede69dd8a70791b8bca6a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 11:49:53 +0200
+Subject: dmaengine: ti: edma: Fix memory allocation size for
+ queue_priority_map
+
+From: Anders Roxell <anders.roxell@linaro.org>
+
+[ Upstream commit e63419dbf2ceb083c1651852209c7f048089ac0f ]
+
+Fix a critical memory allocation bug in edma_setup_from_hw() where
+queue_priority_map was allocated with insufficient memory. The code
+declared queue_priority_map as s8 (*)[2] (pointer to array of 2 s8),
+but allocated memory using sizeof(s8) instead of the correct size.
+
+This caused out-of-bounds memory writes when accessing:
+ queue_priority_map[i][0] = i;
+ queue_priority_map[i][1] = i;
+
+The bug manifested as kernel crashes with "Oops - undefined instruction"
+on ARM platforms (BeagleBoard-X15) during EDMA driver probe, as the
+memory corruption triggered kernel hardening features on Clang.
+
+Change the allocation to use sizeof(*queue_priority_map) which
+automatically gets the correct size for the 2D array structure.
+
+Fixes: 2b6b3b742019 ("ARM/dmaengine: edma: Merge the two drivers under drivers/dma/")
+Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
+Link: https://lore.kernel.org/r/20250830094953.3038012-1-anders.roxell@linaro.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/ti/edma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 3ed406f08c442..552be71db6c47 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2064,8 +2064,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
+ * priority. So Q0 is the highest priority queue and the last queue has
+ * the lowest priority.
+ */
+- queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
+- GFP_KERNEL);
++ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
++ sizeof(*queue_priority_map), GFP_KERNEL);
+ if (!queue_priority_map)
+ return -ENOMEM;
+
+--
+2.51.0
+
--- /dev/null
+From f2407e1b78b3fbcb6ae35c5f1cf46d7081eb9f6d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Sep 2025 20:17:09 -0700
+Subject: docs: networking: can: change bcm_msg_head frames member to support
+ flexible array
+
+From: Alex Tran <alex.t.tran@gmail.com>
+
+[ Upstream commit 641427d5bf90af0625081bf27555418b101274cd ]
+
+The documentation of the 'bcm_msg_head' struct does not match how
+it is defined in 'bcm.h'. Changed the frames member to a flexible array,
+matching the definition in the header file.
+
+See commit 94dfc73e7cf4 ("treewide: uapi: Replace zero-length arrays with
+flexible-array members")
+
+Signed-off-by: Alex Tran <alex.t.tran@gmail.com>
+Acked-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Link: https://patch.msgid.link/20250904031709.1426895-1-alex.t.tran@gmail.com
+Fixes: 94dfc73e7cf4 ("treewide: uapi: Replace zero-length arrays with flexible-array members")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217783
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/networking/can.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
+index b018ce3463926..515a3876f58cf 100644
+--- a/Documentation/networking/can.rst
++++ b/Documentation/networking/can.rst
+@@ -742,7 +742,7 @@ The broadcast manager sends responses to user space in the same form:
+ struct timeval ival1, ival2; /* count and subsequent interval */
+ canid_t can_id; /* unique can_id for task */
+ __u32 nframes; /* number of can_frames following */
+- struct can_frame frames[0];
++ struct can_frame frames[];
+ };
+
+ The aligned payload 'frames' uses the same basic CAN frame structure defined
+--
+2.51.0
+
--- /dev/null
+From fa580c8e4b4d796cd231d0b7d57439402efa1bbd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Sep 2025 09:11:12 -0400
+Subject: drm/amd/display: use udelay rather than fsleep
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit 1d66c3f2b8c0b5c51f3f4fe29b362c9851190c5a ]
+
+This function can be called from an atomic context so we can't use
+fsleep().
+
+Fixes: 01f60348d8fb ("drm/amd/display: Fix 'failed to blank crtc!'")
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4549
+Cc: Wen Chen <Wen.Chen3@amd.com>
+Cc: Fangzhi Zuo <jerry.zuo@amd.com>
+Cc: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 27e4dc2c0543fd1808cc52bd888ee1e0533c4a2e)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+index cdb8685ae7d71..454e362ff096a 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+@@ -955,7 +955,7 @@ enum dc_status dcn20_enable_stream_timing(
+ return DC_ERROR_UNEXPECTED;
+ }
+
+- fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
++ udelay(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
+
+ params.vertical_total_min = stream->adjust.v_total_min;
+ params.vertical_total_max = stream->adjust.v_total_max;
+--
+2.51.0
+
--- /dev/null
+From 4b118c51c709fa820c7d381ba175687e81218953 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Sep 2025 12:21:33 -0700
+Subject: drm/panthor: validate group queue count
+
+From: Chia-I Wu <olvaffe@gmail.com>
+
+[ Upstream commit a00f2015acdbd8a4b3d2382eaeebe11db1925fad ]
+
+A panthor group can have at most MAX_CS_PER_CSG panthor queues.
+
+Fixes: 4bdca11507928 ("drm/panthor: Add the driver frontend block")
+Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
+Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> # v1
+Reviewed-by: Steven Price <steven.price@arm.com>
+Signed-off-by: Steven Price <steven.price@arm.com>
+Link: https://lore.kernel.org/r/20250903192133.288477-1-olvaffe@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panthor/panthor_drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
+index 6200cad22563a..0f4ab9e5ef95c 100644
+--- a/drivers/gpu/drm/panthor/panthor_drv.c
++++ b/drivers/gpu/drm/panthor/panthor_drv.c
+@@ -1093,7 +1093,7 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
+ struct drm_panthor_queue_create *queue_args;
+ int ret;
+
+- if (!args->queues.count)
++ if (!args->queues.count || args->queues.count > MAX_CS_PER_CSG)
+ return -EINVAL;
+
+ ret = PANTHOR_UOBJ_GET_ARRAY(queue_args, &args->queues);
+--
+2.51.0
+
--- /dev/null
+From a336264115f3b83685f6e96848d60a4c6301cff5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 12:35:21 +0200
+Subject: drm/xe/configfs: Don't touch survivability_mode on fini
+
+From: Michal Wajdeczko <michal.wajdeczko@intel.com>
+
+[ Upstream commit 7934fdc25ad642ab3dbc16d734ab58638520ea60 ]
+
+This is a user controlled configfs attribute, we should not
+modify that outside the configfs attr.store() implementation.
+
+Fixes: bc417e54e24b ("drm/xe: Enable configfs support for survivability mode")
+Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
+Cc: Lucas De Marchi <lucas.demarchi@intel.com>
+Cc: Riana Tauro <riana.tauro@intel.com>
+Reviewed-by: Stuart Summers <stuart.summers@intel.com>
+Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Link: https://lore.kernel.org/r/20250904103521.7130-1-michal.wajdeczko@intel.com
+(cherry picked from commit 079a5c83dbd23db7a6eed8f558cf75e264d8a17b)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_survivability_mode.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
+index 1f710b3fc599b..5ae3d70e45167 100644
+--- a/drivers/gpu/drm/xe/xe_survivability_mode.c
++++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
+@@ -40,6 +40,8 @@
+ *
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
+ *
++ * It is the responsibility of the user to clear the mode once firmware flash is complete.
++ *
+ * Refer :ref:`xe_configfs` for more details on how to use configfs
+ *
+ * Survivability mode is indicated by the below admin-only readable sysfs which provides additional
+@@ -146,7 +148,6 @@ static void xe_survivability_mode_fini(void *arg)
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct device *dev = &pdev->dev;
+
+- xe_configfs_clear_survivability_mode(pdev);
+ sysfs_remove_file(&dev->kobj, &dev_attr_survivability_mode.attr);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 8a6a517b8058468c8fb138de0a3994255a4f92fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 23:11:57 +0800
+Subject: erofs: fix invalid algorithm for encoded extents
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit 131897c65e2b86cf14bec7379f44aa8fbb407526 ]
+
+The current algorithm sanity checks do not properly apply to new
+encoded extents.
+
+Unify the algorithm check with Z_EROFS_COMPRESSION(_RUNTIME)_MAX
+and ensure consistency with sbi->available_compr_algs.
+
+Reported-and-tested-by: syzbot+5a398eb460ddaa6f242f@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/r/68a8bd20.050a0220.37038e.005a.GAE@google.com
+Fixes: 1d191b4ca51d ("erofs: implement encoded extent metadata")
+Thanks-to: Edward Adam Davis <eadavis@qq.com>
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/erofs/zmap.c | 67 +++++++++++++++++++++++++++----------------------
+ 1 file changed, 37 insertions(+), 30 deletions(-)
+
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index cd33a5d29406c..14d01474ad9dd 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -403,10 +403,10 @@ static int z_erofs_map_blocks_fo(struct inode *inode,
+ .inode = inode,
+ .map = map,
+ };
+- int err = 0;
+- unsigned int endoff, afmt;
++ unsigned int endoff;
+ unsigned long initial_lcn;
+ unsigned long long ofs, end;
++ int err;
+
+ ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
+ if (fragment && !(flags & EROFS_GET_BLOCKS_FINDTAIL) &&
+@@ -502,20 +502,15 @@ static int z_erofs_map_blocks_fo(struct inode *inode,
+ err = -EFSCORRUPTED;
+ goto unmap_out;
+ }
+- afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ?
+- Z_EROFS_COMPRESSION_INTERLACED :
+- Z_EROFS_COMPRESSION_SHIFTED;
++ if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
++ map->m_algorithmformat = Z_EROFS_COMPRESSION_INTERLACED;
++ else
++ map->m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
++ } else if (m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
++ map->m_algorithmformat = vi->z_algorithmtype[1];
+ } else {
+- afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ?
+- vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
+- if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
+- erofs_err(sb, "inconsistent algorithmtype %u for nid %llu",
+- afmt, vi->nid);
+- err = -EFSCORRUPTED;
+- goto unmap_out;
+- }
++ map->m_algorithmformat = vi->z_algorithmtype[0];
+ }
+- map->m_algorithmformat = afmt;
+
+ if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
+ ((flags & EROFS_GET_BLOCKS_READMORE) &&
+@@ -645,9 +640,9 @@ static int z_erofs_fill_inode(struct inode *inode, struct erofs_map_blocks *map)
+ {
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct super_block *const sb = inode->i_sb;
+- int err, headnr;
+- erofs_off_t pos;
+ struct z_erofs_map_header *h;
++ erofs_off_t pos;
++ int err = 0;
+
+ if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
+ /*
+@@ -661,7 +656,6 @@ static int z_erofs_fill_inode(struct inode *inode, struct erofs_map_blocks *map)
+ if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
+ return -ERESTARTSYS;
+
+- err = 0;
+ if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
+ goto out_unlock;
+
+@@ -698,15 +692,6 @@ static int z_erofs_fill_inode(struct inode *inode, struct erofs_map_blocks *map)
+ else if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER)
+ vi->z_idata_size = le16_to_cpu(h->h_idata_size);
+
+- headnr = 0;
+- if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
+- vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
+- erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
+- headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
+- err = -EOPNOTSUPP;
+- goto out_unlock;
+- }
+-
+ if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
+ vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
+ Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
+@@ -745,6 +730,30 @@ static int z_erofs_fill_inode(struct inode *inode, struct erofs_map_blocks *map)
+ return err;
+ }
+
++static int z_erofs_map_sanity_check(struct inode *inode,
++ struct erofs_map_blocks *map)
++{
++ struct erofs_sb_info *sbi = EROFS_I_SB(inode);
++
++ if (!(map->m_flags & EROFS_MAP_ENCODED))
++ return 0;
++ if (unlikely(map->m_algorithmformat >= Z_EROFS_COMPRESSION_RUNTIME_MAX)) {
++ erofs_err(inode->i_sb, "unknown algorithm %d @ pos %llu for nid %llu, please upgrade kernel",
++ map->m_algorithmformat, map->m_la, EROFS_I(inode)->nid);
++ return -EOPNOTSUPP;
++ }
++ if (unlikely(map->m_algorithmformat < Z_EROFS_COMPRESSION_MAX &&
++ !(sbi->available_compr_algs & (1 << map->m_algorithmformat)))) {
++ erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
++ map->m_algorithmformat, EROFS_I(inode)->nid);
++ return -EFSCORRUPTED;
++ }
++ if (unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
++ map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
++ return -EOPNOTSUPP;
++ return 0;
++}
++
+ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
+ int flags)
+ {
+@@ -765,10 +774,8 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
+ else
+ err = z_erofs_map_blocks_fo(inode, map, flags);
+ }
+- if (!err && (map->m_flags & EROFS_MAP_ENCODED) &&
+- unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
+- map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
+- err = -EOPNOTSUPP;
++ if (!err)
++ err = z_erofs_map_sanity_check(inode, map);
+ if (err)
+ map->m_llen = 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From b539542f0b2e50179c97463c15d35462bb4341de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 13:33:40 +0800
+Subject: erofs: fix runtime warning on truncate_folio_batch_exceptionals()
+
+From: Yuezhang Mo <Yuezhang.Mo@sony.com>
+
+[ Upstream commit 181993bb0d626cf88cc803f4356ce5c5abe86278 ]
+
+Commit 0e2f80afcfa6("fs/dax: ensure all pages are idle prior to
+filesystem unmount") introduced the WARN_ON_ONCE to capture whether
+the filesystem has removed all DAX entries or not and applied the
+fix to xfs and ext4.
+
+Apply the missed fix on erofs to fix the runtime warning:
+
+[ 5.266254] ------------[ cut here ]------------
+[ 5.266274] WARNING: CPU: 6 PID: 3109 at mm/truncate.c:89 truncate_folio_batch_exceptionals+0xff/0x260
+[ 5.266294] Modules linked in:
+[ 5.266999] CPU: 6 UID: 0 PID: 3109 Comm: umount Tainted: G S 6.16.0+ #6 PREEMPT(voluntary)
+[ 5.267012] Tainted: [S]=CPU_OUT_OF_SPEC
+[ 5.267017] Hardware name: Dell Inc. OptiPlex 5000/05WXFV, BIOS 1.5.1 08/24/2022
+[ 5.267024] RIP: 0010:truncate_folio_batch_exceptionals+0xff/0x260
+[ 5.267076] Code: 00 00 41 39 df 7f 11 eb 78 83 c3 01 49 83 c4 08 41 39 df 74 6c 48 63 f3 48 83 fe 1f 0f 83 3c 01 00 00 43 f6 44 26 08 01 74 df <0f> 0b 4a 8b 34 22 4c 89 ef 48 89 55 90 e8 ff 54 1f 00 48 8b 55 90
+[ 5.267083] RSP: 0018:ffffc900013f36c8 EFLAGS: 00010202
+[ 5.267095] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
+[ 5.267101] RDX: ffffc900013f3790 RSI: 0000000000000000 RDI: ffff8882a1407898
+[ 5.267108] RBP: ffffc900013f3740 R08: 0000000000000000 R09: 0000000000000000
+[ 5.267113] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
+[ 5.267119] R13: ffff8882a1407ab8 R14: ffffc900013f3888 R15: 0000000000000001
+[ 5.267125] FS: 00007aaa8b437800(0000) GS:ffff88850025b000(0000) knlGS:0000000000000000
+[ 5.267132] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 5.267138] CR2: 00007aaa8b3aac10 CR3: 000000024f764000 CR4: 0000000000f52ef0
+[ 5.267144] PKRU: 55555554
+[ 5.267150] Call Trace:
+[ 5.267154] <TASK>
+[ 5.267181] truncate_inode_pages_range+0x118/0x5e0
+[ 5.267193] ? save_trace+0x54/0x390
+[ 5.267296] truncate_inode_pages_final+0x43/0x60
+[ 5.267309] evict+0x2a4/0x2c0
+[ 5.267339] dispose_list+0x39/0x80
+[ 5.267352] evict_inodes+0x150/0x1b0
+[ 5.267376] generic_shutdown_super+0x41/0x180
+[ 5.267390] kill_block_super+0x1b/0x50
+[ 5.267402] erofs_kill_sb+0x81/0x90 [erofs]
+[ 5.267436] deactivate_locked_super+0x32/0xb0
+[ 5.267450] deactivate_super+0x46/0x60
+[ 5.267460] cleanup_mnt+0xc3/0x170
+[ 5.267475] __cleanup_mnt+0x12/0x20
+[ 5.267485] task_work_run+0x5d/0xb0
+[ 5.267499] exit_to_user_mode_loop+0x144/0x170
+[ 5.267512] do_syscall_64+0x2b9/0x7c0
+[ 5.267523] ? __lock_acquire+0x665/0x2ce0
+[ 5.267535] ? __lock_acquire+0x665/0x2ce0
+[ 5.267560] ? lock_acquire+0xcd/0x300
+[ 5.267573] ? find_held_lock+0x31/0x90
+[ 5.267582] ? mntput_no_expire+0x97/0x4e0
+[ 5.267606] ? mntput_no_expire+0xa1/0x4e0
+[ 5.267625] ? mntput+0x24/0x50
+[ 5.267634] ? path_put+0x1e/0x30
+[ 5.267647] ? do_faccessat+0x120/0x2f0
+[ 5.267677] ? do_syscall_64+0x1a2/0x7c0
+[ 5.267686] ? from_kgid_munged+0x17/0x30
+[ 5.267703] ? from_kuid_munged+0x13/0x30
+[ 5.267711] ? __do_sys_getuid+0x3d/0x50
+[ 5.267724] ? do_syscall_64+0x1a2/0x7c0
+[ 5.267732] ? irqentry_exit+0x77/0xb0
+[ 5.267743] ? clear_bhb_loop+0x30/0x80
+[ 5.267752] ? clear_bhb_loop+0x30/0x80
+[ 5.267765] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+[ 5.267772] RIP: 0033:0x7aaa8b32a9fb
+[ 5.267781] Code: c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 f3 0f 1e fa 31 f6 e9 05 00 00 00 0f 1f 44 00 00 f3 0f 1e fa b8 a6 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 05 c3 0f 1f 40 00 48 8b 15 e9 83 0d 00 f7 d8
+[ 5.267787] RSP: 002b:00007ffd7c4c9468 EFLAGS: 00000246 ORIG_RAX: 00000000000000a6
+[ 5.267796] RAX: 0000000000000000 RBX: 00005a61592a8b00 RCX: 00007aaa8b32a9fb
+[ 5.267802] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 00005a61592b2080
+[ 5.267806] RBP: 00007ffd7c4c9540 R08: 00007aaa8b403b20 R09: 0000000000000020
+[ 5.267812] R10: 0000000000000001 R11: 0000000000000246 R12: 00005a61592a8c00
+[ 5.267817] R13: 0000000000000000 R14: 00005a61592b2080 R15: 00005a61592a8f10
+[ 5.267849] </TASK>
+[ 5.267854] irq event stamp: 4721
+[ 5.267859] hardirqs last enabled at (4727): [<ffffffff814abf50>] __up_console_sem+0x90/0xa0
+[ 5.267873] hardirqs last disabled at (4732): [<ffffffff814abf35>] __up_console_sem+0x75/0xa0
+[ 5.267884] softirqs last enabled at (3044): [<ffffffff8132adb3>] kernel_fpu_end+0x53/0x70
+[ 5.267895] softirqs last disabled at (3042): [<ffffffff8132b5f4>] kernel_fpu_begin_mask+0xc4/0x120
+[ 5.267905] ---[ end trace 0000000000000000 ]---
+
+Fixes: bde708f1a65d ("fs/dax: always remove DAX page-cache entries when breaking layouts")
+Signed-off-by: Yuezhang Mo <Yuezhang.Mo@sony.com>
+Reviewed-by: Friendy Su <friendy.su@sony.com>
+Reviewed-by: Daniel Palmer <daniel.palmer@sony.com>
+Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/erofs/super.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index 7cc74ef4be031..06c8981eea7f8 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -999,10 +999,22 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
+ return 0;
+ }
+
++static void erofs_evict_inode(struct inode *inode)
++{
++#ifdef CONFIG_FS_DAX
++ if (IS_DAX(inode))
++ dax_break_layout_final(inode);
++#endif
++
++ truncate_inode_pages_final(&inode->i_data);
++ clear_inode(inode);
++}
++
+ const struct super_operations erofs_sops = {
+ .put_super = erofs_put_super,
+ .alloc_inode = erofs_alloc_inode,
+ .free_inode = erofs_free_inode,
++ .evict_inode = erofs_evict_inode,
+ .statfs = erofs_statfs,
+ .show_options = erofs_show_options,
+ };
+--
+2.51.0
+
--- /dev/null
+From 9bdd4a06ae0e9d633541775e8b385d60f90bc24c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Jun 2025 16:54:59 +0800
+Subject: erofs: get rid of {get,put}_page() for ztailpacking data
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit 96debe8c27ee2494bbd78abf3744745a84a745f1 ]
+
+The compressed data for the ztailpacking feature is fetched from
+the metadata inode (e.g., bd_inode), which is folio-based.
+
+Therefore, the folio interface should be used instead.
+
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Link: https://lore.kernel.org/r/20250626085459.339830-1-hsiangkao@linux.alibaba.com
+Stable-dep-of: 131897c65e2b ("erofs: fix invalid algorithm for encoded extents")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/erofs/zdata.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 9bb53f00c2c62..33c61f3b667c3 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -805,6 +805,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe)
+ struct erofs_map_blocks *map = &fe->map;
+ struct super_block *sb = fe->inode->i_sb;
+ struct z_erofs_pcluster *pcl = NULL;
++ void *ptr;
+ int ret;
+
+ DBG_BUGON(fe->pcl);
+@@ -854,15 +855,13 @@ static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe)
+ /* bind cache first when cached decompression is preferred */
+ z_erofs_bind_cache(fe);
+ } else {
+- void *mptr;
+-
+- mptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, false);
+- if (IS_ERR(mptr)) {
+- ret = PTR_ERR(mptr);
++ ptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, false);
++ if (IS_ERR(ptr)) {
++ ret = PTR_ERR(ptr);
+ erofs_err(sb, "failed to get inline data %d", ret);
+ return ret;
+ }
+- get_page(map->buf.page);
++ folio_get(page_folio(map->buf.page));
+ WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
+ fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
+ fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
+@@ -1325,9 +1324,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err)
+
+ /* must handle all compressed pages before actual file pages */
+ if (pcl->from_meta) {
+- page = pcl->compressed_bvecs[0].page;
++ folio_put(page_folio(pcl->compressed_bvecs[0].page));
+ WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
+- put_page(page);
+ } else {
+ /* managed folios are still left in compressed_bvecs[] */
+ for (i = 0; i < pclusterpages; ++i) {
+--
+2.51.0
+
--- /dev/null
+From 6d91e3d1f3598e1e2b96c433328c2fd74751de5b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jul 2025 17:09:06 +0800
+Subject: erofs: remove need_kmap in erofs_read_metabuf()
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit 5e744cb61536bb4e37caca9c5e84feef638782be ]
+
+ - need_kmap is always true except for a ztailpacking case; thus, just
+ open-code that one;
+
+ - The upcoming metadata compression will add a new boolean, so simplify
+ this first.
+
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Link: https://lore.kernel.org/r/20250714090907.4095645-1-hsiangkao@linux.alibaba.com
+Stable-dep-of: 131897c65e2b ("erofs: fix invalid algorithm for encoded extents")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/erofs/data.c | 8 ++++----
+ fs/erofs/fileio.c | 2 +-
+ fs/erofs/fscache.c | 2 +-
+ fs/erofs/inode.c | 8 ++++----
+ fs/erofs/internal.h | 2 +-
+ fs/erofs/super.c | 4 ++--
+ fs/erofs/zdata.c | 5 +++--
+ fs/erofs/zmap.c | 12 ++++++------
+ 8 files changed, 22 insertions(+), 21 deletions(-)
+
+diff --git a/fs/erofs/data.c b/fs/erofs/data.c
+index 16e4a6bd9b973..dd7d86809c188 100644
+--- a/fs/erofs/data.c
++++ b/fs/erofs/data.c
+@@ -65,10 +65,10 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
+ }
+
+ void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
+- erofs_off_t offset, bool need_kmap)
++ erofs_off_t offset)
+ {
+ erofs_init_metabuf(buf, sb);
+- return erofs_bread(buf, offset, need_kmap);
++ return erofs_bread(buf, offset, true);
+ }
+
+ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
+@@ -118,7 +118,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
+ pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
+ vi->xattr_isize, unit) + unit * chunknr;
+
+- idx = erofs_read_metabuf(&buf, sb, pos, true);
++ idx = erofs_read_metabuf(&buf, sb, pos);
+ if (IS_ERR(idx)) {
+ err = PTR_ERR(idx);
+ goto out;
+@@ -299,7 +299,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+
+ iomap->type = IOMAP_INLINE;
+- ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa, true);
++ ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+ iomap->inline_data = ptr;
+diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
+index 91781718199e2..3ee082476c8c5 100644
+--- a/fs/erofs/fileio.c
++++ b/fs/erofs/fileio.c
+@@ -115,7 +115,7 @@ static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
+ void *src;
+
+ src = erofs_read_metabuf(&buf, inode->i_sb,
+- map->m_pa + ofs, true);
++ map->m_pa + ofs);
+ if (IS_ERR(src)) {
+ err = PTR_ERR(src);
+ break;
+diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
+index 34517ca9df915..9a8ee646e51d9 100644
+--- a/fs/erofs/fscache.c
++++ b/fs/erofs/fscache.c
+@@ -274,7 +274,7 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req)
+ size_t size = map.m_llen;
+ void *src;
+
+- src = erofs_read_metabuf(&buf, sb, map.m_pa, true);
++ src = erofs_read_metabuf(&buf, sb, map.m_pa);
+ if (IS_ERR(src))
+ return PTR_ERR(src);
+
+diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
+index a0ae0b4f7b012..47215c5e33855 100644
+--- a/fs/erofs/inode.c
++++ b/fs/erofs/inode.c
+@@ -39,10 +39,10 @@ static int erofs_read_inode(struct inode *inode)
+ void *ptr;
+ int err = 0;
+
+- ptr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), true);
++ ptr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr));
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+- erofs_err(sb, "failed to get inode (nid: %llu) page, err %d",
++ erofs_err(sb, "failed to read inode meta block (nid: %llu): %d",
+ vi->nid, err);
+ goto err_out;
+ }
+@@ -78,10 +78,10 @@ static int erofs_read_inode(struct inode *inode)
+
+ memcpy(&copied, dic, gotten);
+ ptr = erofs_read_metabuf(&buf, sb,
+- erofs_pos(sb, blkaddr + 1), true);
++ erofs_pos(sb, blkaddr + 1));
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+- erofs_err(sb, "failed to get inode payload block (nid: %llu), err %d",
++ erofs_err(sb, "failed to read inode payload block (nid: %llu): %d",
+ vi->nid, err);
+ goto err_out;
+ }
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index 06b867d2fc3b7..a7699114f6fe6 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -385,7 +385,7 @@ void erofs_put_metabuf(struct erofs_buf *buf);
+ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, bool need_kmap);
+ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb);
+ void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
+- erofs_off_t offset, bool need_kmap);
++ erofs_off_t offset);
+ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev);
+ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len);
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index cad87e4d66943..7cc74ef4be031 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -141,7 +141,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
+ struct erofs_deviceslot *dis;
+ struct file *file;
+
+- dis = erofs_read_metabuf(buf, sb, *pos, true);
++ dis = erofs_read_metabuf(buf, sb, *pos);
+ if (IS_ERR(dis))
+ return PTR_ERR(dis);
+
+@@ -268,7 +268,7 @@ static int erofs_read_superblock(struct super_block *sb)
+ void *data;
+ int ret;
+
+- data = erofs_read_metabuf(&buf, sb, 0, true);
++ data = erofs_read_metabuf(&buf, sb, 0);
+ if (IS_ERR(data)) {
+ erofs_err(sb, "cannot read erofs superblock");
+ return PTR_ERR(data);
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 33c61f3b667c3..e8f30eee29b44 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -855,10 +855,11 @@ static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe)
+ /* bind cache first when cached decompression is preferred */
+ z_erofs_bind_cache(fe);
+ } else {
+- ptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, false);
++ erofs_init_metabuf(&map->buf, sb);
++ ptr = erofs_bread(&map->buf, map->m_pa, false);
+ if (IS_ERR(ptr)) {
+ ret = PTR_ERR(ptr);
+- erofs_err(sb, "failed to get inline data %d", ret);
++ erofs_err(sb, "failed to get inline folio %d", ret);
+ return ret;
+ }
+ folio_get(page_folio(map->buf.page));
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index f1a15ff22147b..301afd9be4e30 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -31,7 +31,7 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
+ struct z_erofs_lcluster_index *di;
+ unsigned int advise;
+
+- di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, true);
++ di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos);
+ if (IS_ERR(di))
+ return PTR_ERR(di);
+ m->lcn = lcn;
+@@ -146,7 +146,7 @@ static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
+ else
+ return -EOPNOTSUPP;
+
+- in = erofs_read_metabuf(&m->map->buf, m->inode->i_sb, pos, true);
++ in = erofs_read_metabuf(&m->map->buf, m->inode->i_sb, pos);
+ if (IS_ERR(in))
+ return PTR_ERR(in);
+
+@@ -551,7 +551,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode,
+ map->m_flags = 0;
+ if (recsz <= offsetof(struct z_erofs_extent, pstart_hi)) {
+ if (recsz <= offsetof(struct z_erofs_extent, pstart_lo)) {
+- ext = erofs_read_metabuf(&map->buf, sb, pos, true);
++ ext = erofs_read_metabuf(&map->buf, sb, pos);
+ if (IS_ERR(ext))
+ return PTR_ERR(ext);
+ pa = le64_to_cpu(*(__le64 *)ext);
+@@ -564,7 +564,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode,
+ }
+
+ for (; lstart <= map->m_la; lstart += 1 << vi->z_lclusterbits) {
+- ext = erofs_read_metabuf(&map->buf, sb, pos, true);
++ ext = erofs_read_metabuf(&map->buf, sb, pos);
+ if (IS_ERR(ext))
+ return PTR_ERR(ext);
+ map->m_plen = le32_to_cpu(ext->plen);
+@@ -584,7 +584,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode,
+ for (l = 0, r = vi->z_extents; l < r; ) {
+ mid = l + (r - l) / 2;
+ ext = erofs_read_metabuf(&map->buf, sb,
+- pos + mid * recsz, true);
++ pos + mid * recsz);
+ if (IS_ERR(ext))
+ return PTR_ERR(ext);
+
+@@ -667,7 +667,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
+ goto out_unlock;
+
+ pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
+- h = erofs_read_metabuf(&buf, sb, pos, true);
++ h = erofs_read_metabuf(&buf, sb, pos);
+ if (IS_ERR(h)) {
+ err = PTR_ERR(h);
+ goto out_unlock;
+--
+2.51.0
+
--- /dev/null
+From 89910a4540693e4d3825d9c83b40ec044fa9d9a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Jul 2025 14:41:52 +0800
+Subject: erofs: unify meta buffers in z_erofs_fill_inode()
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit df50848bcd9f17e4e60e6d5823d0e8fe8982bbab ]
+
+There is no need to keep additional local metabufs since we already
+have one in `struct erofs_map_blocks`.
+
+This was actually a leftover when applying meta buffers to zmap
+operations, see commit 09c543798c3c ("erofs: use meta buffers for
+zmap operations").
+
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Link: https://lore.kernel.org/r/20250716064152.3537457-1-hsiangkao@linux.alibaba.com
+Stable-dep-of: 131897c65e2b ("erofs: fix invalid algorithm for encoded extents")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/erofs/zmap.c | 23 ++++++++++-------------
+ 1 file changed, 10 insertions(+), 13 deletions(-)
+
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index 301afd9be4e30..cd33a5d29406c 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -641,13 +641,12 @@ static int z_erofs_map_blocks_ext(struct inode *inode,
+ return 0;
+ }
+
+-static int z_erofs_fill_inode_lazy(struct inode *inode)
++static int z_erofs_fill_inode(struct inode *inode, struct erofs_map_blocks *map)
+ {
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct super_block *const sb = inode->i_sb;
+ int err, headnr;
+ erofs_off_t pos;
+- struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+ struct z_erofs_map_header *h;
+
+ if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
+@@ -667,7 +666,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
+ goto out_unlock;
+
+ pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
+- h = erofs_read_metabuf(&buf, sb, pos);
++ h = erofs_read_metabuf(&map->buf, sb, pos);
+ if (IS_ERR(h)) {
+ err = PTR_ERR(h);
+ goto out_unlock;
+@@ -705,7 +704,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
+ erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
+ headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
+ err = -EOPNOTSUPP;
+- goto out_put_metabuf;
++ goto out_unlock;
+ }
+
+ if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
+@@ -714,7 +713,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
+ erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
+ vi->nid);
+ err = -EFSCORRUPTED;
+- goto out_put_metabuf;
++ goto out_unlock;
+ }
+ if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
+ !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
+@@ -722,27 +721,25 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
+ erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
+ vi->nid);
+ err = -EFSCORRUPTED;
+- goto out_put_metabuf;
++ goto out_unlock;
+ }
+
+ if (vi->z_idata_size ||
+ (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
+- struct erofs_map_blocks map = {
++ struct erofs_map_blocks tm = {
+ .buf = __EROFS_BUF_INITIALIZER
+ };
+
+- err = z_erofs_map_blocks_fo(inode, &map,
++ err = z_erofs_map_blocks_fo(inode, &tm,
+ EROFS_GET_BLOCKS_FINDTAIL);
+- erofs_put_metabuf(&map.buf);
++ erofs_put_metabuf(&tm.buf);
+ if (err < 0)
+- goto out_put_metabuf;
++ goto out_unlock;
+ }
+ done:
+ /* paired with smp_mb() at the beginning of the function */
+ smp_mb();
+ set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
+-out_put_metabuf:
+- erofs_put_metabuf(&buf);
+ out_unlock:
+ clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
+ return err;
+@@ -760,7 +757,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
+ map->m_la = inode->i_size;
+ map->m_flags = 0;
+ } else {
+- err = z_erofs_fill_inode_lazy(inode);
++ err = z_erofs_fill_inode(inode, map);
+ if (!err) {
+ if (vi->datalayout == EROFS_INODE_COMPRESSED_FULL &&
+ (vi->z_advise & Z_EROFS_ADVISE_EXTENTS))
+--
+2.51.0
+
--- /dev/null
+From 40403ca0b01c06957b1dec9f5579506b5760f5f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 06:57:27 -0700
+Subject: genetlink: fix genl_bind() invoking bind() after -EPERM
+
+From: Alok Tiwari <alok.a.tiwari@oracle.com>
+
+[ Upstream commit 1dbfb0363224f6da56f6655d596dc5097308d6f5 ]
+
+Per family bind/unbind callbacks were introduced to allow families
+to track multicast group consumer presence, e.g. to start or stop
+producing events depending on listeners.
+
+However, in genl_bind() the bind() callback was invoked even if
+capability checks failed and ret was set to -EPERM. This means that
+callbacks could run on behalf of unauthorized callers while the
+syscall still returned failure to user space.
+
+Fix this by only invoking bind() after "if (ret) break;" check
+i.e. after permission checks have succeeded.
+
+Fixes: 3de21a8990d3 ("genetlink: Add per family bind/unbind callbacks")
+Signed-off-by: Alok Tiwari <alok.a.tiwari@oracle.com>
+Link: https://patch.msgid.link/20250905135731.3026965-1-alok.a.tiwari@oracle.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netlink/genetlink.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 104732d345434..978c129c60950 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -1836,6 +1836,9 @@ static int genl_bind(struct net *net, int group)
+ !ns_capable(net->user_ns, CAP_SYS_ADMIN))
+ ret = -EPERM;
+
++ if (ret)
++ break;
++
+ if (family->bind)
+ family->bind(i);
+
+--
+2.51.0
+
--- /dev/null
+From 6cfb5733e8ca24a128a4a45062105296cd15481a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 09:15:33 +0000
+Subject: hsr: hold rcu and dev lock for hsr_get_port_ndev
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 847748fc66d08a89135a74e29362a66ba4e3ab15 ]
+
+hsr_get_port_ndev calls hsr_for_each_port, which need to hold rcu lock.
+On the other hand, before return the port device, we need to hold the
+device reference to avoid UaF in the caller function.
+
+Suggested-by: Paolo Abeni <pabeni@redhat.com>
+Fixes: 9c10dd8eed74 ("net: hsr: Create and export hsr_get_port_ndev()")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250905091533.377443-4-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/icssg/icssg_prueth.c | 20 ++++++++++++++------
+ net/hsr/hsr_device.c | 7 ++++++-
+ 2 files changed, 20 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index f436d7cf565a1..1a9cc8206430b 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -691,7 +691,7 @@ static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
+
+ static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+ {
+- struct net_device *real_dev;
++ struct net_device *real_dev, *port_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+@@ -700,11 +700,15 @@ static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+- emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
+- if (!emac)
++ port_dev = hsr_get_port_ndev(real_dev, i);
++ emac = netdev_priv(port_dev);
++ if (!emac) {
++ dev_put(port_dev);
+ return -EINVAL;
++ }
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ true);
++ dev_put(port_dev);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+@@ -716,7 +720,7 @@ static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+
+ static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
+ {
+- struct net_device *real_dev;
++ struct net_device *real_dev, *port_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+@@ -725,11 +729,15 @@ static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+- emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
+- if (!emac)
++ port_dev = hsr_get_port_ndev(real_dev, i);
++ emac = netdev_priv(port_dev);
++ if (!emac) {
++ dev_put(port_dev);
+ return -EINVAL;
++ }
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ false);
++ dev_put(port_dev);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 702da1f9aaa90..fbbc3ccf9df64 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -675,9 +675,14 @@ struct net_device *hsr_get_port_ndev(struct net_device *ndev,
+ struct hsr_priv *hsr = netdev_priv(ndev);
+ struct hsr_port *port;
+
++ rcu_read_lock();
+ hsr_for_each_port(hsr, port)
+- if (port->type == pt)
++ if (port->type == pt) {
++ dev_hold(port->dev);
++ rcu_read_unlock();
+ return port->dev;
++ }
++ rcu_read_unlock();
+ return NULL;
+ }
+ EXPORT_SYMBOL(hsr_get_port_ndev);
+--
+2.51.0
+
--- /dev/null
+From d2ac7211fcdc96a131c465d1b8206ba985fbab14 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 09:15:32 +0000
+Subject: hsr: use hsr_for_each_port_rtnl in hsr_port_get_hsr
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 393c841fe4333cdd856d0ca37b066d72746cfaa6 ]
+
+hsr_port_get_hsr() iterates over ports using hsr_for_each_port(),
+but many of its callers do not hold the required RCU lock.
+
+Switch to hsr_for_each_port_rtnl(), since most callers already hold
+the rtnl lock. After review, all callers are covered by either the rtnl
+lock or the RCU lock, except hsr_dev_xmit(). Fix this by adding an
+RCU read lock there.
+
+Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250905091533.377443-3-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 3 +++
+ net/hsr/hsr_main.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index bce7b4061ce08..702da1f9aaa90 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -226,6 +226,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ struct hsr_priv *hsr = netdev_priv(dev);
+ struct hsr_port *master;
+
++ rcu_read_lock();
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ if (master) {
+ skb->dev = master->dev;
+@@ -238,6 +239,8 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ dev_core_stats_tx_dropped_inc(dev);
+ dev_kfree_skb_any(skb);
+ }
++ rcu_read_unlock();
++
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index ac1eb1db1a52b..bc94b07101d80 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -134,7 +134,7 @@ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
+ {
+ struct hsr_port *port;
+
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type == pt)
+ return port;
+ return NULL;
+--
+2.51.0
+
--- /dev/null
+From 5272840ed3b335b90e92b230e5940fb79ef63df2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 09:15:31 +0000
+Subject: hsr: use rtnl lock when iterating over ports
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 8884c693991333ae065830554b9b0c96590b1bb2 ]
+
+hsr_for_each_port is called in many places without holding the RCU read
+lock, this may trigger warnings on debug kernels. Most of the callers
+are actually hold rtnl lock. So add a new helper hsr_for_each_port_rtnl
+to allow callers in suitable contexts to iterate ports safely without
+explicit RCU locking.
+
+This patch only fixed the callers that is hold rtnl lock. Other caller
+issues will be fixed in later patches.
+
+Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250905091533.377443-2-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 18 +++++++++---------
+ net/hsr/hsr_main.c | 2 +-
+ net/hsr/hsr_main.h | 3 +++
+ 3 files changed, 13 insertions(+), 10 deletions(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 88657255fec12..bce7b4061ce08 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -49,7 +49,7 @@ static bool hsr_check_carrier(struct hsr_port *master)
+
+ ASSERT_RTNL();
+
+- hsr_for_each_port(master->hsr, port) {
++ hsr_for_each_port_rtnl(master->hsr, port) {
+ if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
+ netif_carrier_on(master->dev);
+ return true;
+@@ -105,7 +105,7 @@ int hsr_get_max_mtu(struct hsr_priv *hsr)
+ struct hsr_port *port;
+
+ mtu_max = ETH_DATA_LEN;
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type != HSR_PT_MASTER)
+ mtu_max = min(port->dev->mtu, mtu_max);
+
+@@ -139,7 +139,7 @@ static int hsr_dev_open(struct net_device *dev)
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -172,7 +172,7 @@ static int hsr_dev_close(struct net_device *dev)
+ struct hsr_priv *hsr;
+
+ hsr = netdev_priv(dev);
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -205,7 +205,7 @@ static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
+ * may become enabled.
+ */
+ features &= ~NETIF_F_ONE_FOR_ALL;
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ features = netdev_increment_features(features,
+ port->dev->features,
+ mask);
+@@ -484,7 +484,7 @@ static void hsr_set_rx_mode(struct net_device *dev)
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -506,7 +506,7 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -534,7 +534,7 @@ static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER ||
+ port->type == HSR_PT_INTERLINK)
+ continue;
+@@ -580,7 +580,7 @@ static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ switch (port->type) {
+ case HSR_PT_SLAVE_A:
+ case HSR_PT_SLAVE_B:
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index 192893c3f2ec7..ac1eb1db1a52b 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -22,7 +22,7 @@ static bool hsr_slave_empty(struct hsr_priv *hsr)
+ {
+ struct hsr_port *port;
+
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type != HSR_PT_MASTER)
+ return false;
+ return true;
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index 135ec5fce0196..33b0d2460c9bc 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -224,6 +224,9 @@ struct hsr_priv {
+ #define hsr_for_each_port(hsr, port) \
+ list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
+
++#define hsr_for_each_port_rtnl(hsr, port) \
++ list_for_each_entry_rcu((port), &(hsr)->ports, port_list, lockdep_rtnl_is_held())
++
+ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
+
+ /* Caller must ensure skb is a valid HSR frame */
+--
+2.51.0
+
--- /dev/null
+From 11bc49c24829d0f218f1802aa51ff2b92fd102b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 17:39:03 +0200
+Subject: i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path
+
+From: Michal Schmidt <mschmidt@redhat.com>
+
+[ Upstream commit 915470e1b44e71d1dd07ee067276f003c3521ee3 ]
+
+If request_irq() in i40e_vsi_request_irq_msix() fails in an iteration
+later than the first, the error path wants to free the IRQs requested
+so far. However, it uses the wrong dev_id argument for free_irq(), so
+it does not free the IRQs correctly and instead triggers the warning:
+
+ Trying to free already-free IRQ 173
+ WARNING: CPU: 25 PID: 1091 at kernel/irq/manage.c:1829 __free_irq+0x192/0x2c0
+ Modules linked in: i40e(+) [...]
+ CPU: 25 UID: 0 PID: 1091 Comm: NetworkManager Not tainted 6.17.0-rc1+ #1 PREEMPT(lazy)
+ Hardware name: [...]
+ RIP: 0010:__free_irq+0x192/0x2c0
+ [...]
+ Call Trace:
+ <TASK>
+ free_irq+0x32/0x70
+ i40e_vsi_request_irq_msix.cold+0x63/0x8b [i40e]
+ i40e_vsi_request_irq+0x79/0x80 [i40e]
+ i40e_vsi_open+0x21f/0x2f0 [i40e]
+ i40e_open+0x63/0x130 [i40e]
+ __dev_open+0xfc/0x210
+ __dev_change_flags+0x1fc/0x240
+ netif_change_flags+0x27/0x70
+ do_setlink.isra.0+0x341/0xc70
+ rtnl_newlink+0x468/0x860
+ rtnetlink_rcv_msg+0x375/0x450
+ netlink_rcv_skb+0x5c/0x110
+ netlink_unicast+0x288/0x3c0
+ netlink_sendmsg+0x20d/0x430
+ ____sys_sendmsg+0x3a2/0x3d0
+ ___sys_sendmsg+0x99/0xe0
+ __sys_sendmsg+0x8a/0xf0
+ do_syscall_64+0x82/0x2c0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ [...]
+ </TASK>
+ ---[ end trace 0000000000000000 ]---
+
+Use the same dev_id for free_irq() as for request_irq().
+
+I tested this with inserting code to fail intentionally.
+
+Fixes: 493fb30011b3 ("i40e: Move q_vectors from pointer to array to array of pointers")
+Signed-off-by: Michal Schmidt <mschmidt@redhat.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Subbaraya Sundeep <sbhatta@marvell.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index f1c9e575703ea..26dcdceae741e 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -4182,7 +4182,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ irq_num = pf->msix_entries[base + vector].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
+- free_irq(irq_num, &vsi->q_vectors[vector]);
++ free_irq(irq_num, vsi->q_vectors[vector]);
+ }
+ return err;
+ }
+--
+2.51.0
+
--- /dev/null
+From cfe40ae18a12fae715101bf3feece27e3e49f277 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 15:26:31 +0900
+Subject: igb: fix link test skipping when interface is admin down
+
+From: Kohei Enju <enjuk@amazon.com>
+
+[ Upstream commit d709f178abca22a4d3642513df29afe4323a594b ]
+
+The igb driver incorrectly skips the link test when the network
+interface is admin down (if_running == false), causing the test to
+always report PASS regardless of the actual physical link state.
+
+This behavior is inconsistent with other drivers (e.g. i40e, ice, ixgbe,
+etc.) which correctly test the physical link state regardless of admin
+state.
+Remove the if_running check to ensure link test always reflects the
+physical link state.
+
+Fixes: 8d420a1b3ea6 ("igb: correct link test not being run when link is down")
+Signed-off-by: Kohei Enju <enjuk@amazon.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_ethtool.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index ca6ccbc139548..6412c84e2d17d 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2081,11 +2081,8 @@ static void igb_diag_test(struct net_device *netdev,
+ } else {
+ dev_info(&adapter->pdev->dev, "online testing starting\n");
+
+- /* PHY is powered down when interface is down */
+- if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
++ if (igb_link_test(adapter, &data[TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+- else
+- data[TEST_LINK] = 0;
+
+ /* Online tests aren't run; pass by default */
+ data[TEST_REG] = 0;
+--
+2.51.0
+
--- /dev/null
+From 4ac1dd27e6b7f1534e4d5f950fcbef007af1c27e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Aug 2025 21:10:56 +0800
+Subject: igb: Fix NULL pointer dereference in ethtool loopback test
+
+From: Tianyu Xu <tianyxu@cisco.com>
+
+[ Upstream commit 75871a525a596ff4d16c4aebc0018f8d0923c9b1 ]
+
+The igb driver currently causes a NULL pointer dereference when executing
+the ethtool loopback test. This occurs because there is no associated
+q_vector for the test ring when it is set up, as interrupts are typically
+not added to the test rings.
+
+Since commit 5ef44b3cb43b removed the napi_id assignment in
+__xdp_rxq_info_reg(), there is no longer a need to pass a napi_id to it.
+Therefore, simply use 0 as the last parameter.
+
+Fixes: 2c6196013f84 ("igb: Add AF_XDP zero-copy Rx support")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Joe Damato <joe@dama.to>
+Signed-off-by: Tianyu Xu <tianyxu@cisco.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_main.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index b76a154e635e0..d87438bef6fba 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -4451,8 +4451,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+- rx_ring->queue_index,
+- rx_ring->q_vector->napi.napi_id);
++ rx_ring->queue_index, 0);
+ if (res < 0) {
+ dev_err(dev, "Failed to register xdp_rxq index %u\n",
+ rx_ring->queue_index);
+--
+2.51.0
+
--- /dev/null
+From 0f27b880bf7f54ba5b7401a7e44256ee03e9217a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Sep 2025 10:36:14 -0700
+Subject: macsec: sync features on RTM_NEWLINK
+
+From: Stanislav Fomichev <sdf@fomichev.me>
+
+[ Upstream commit 0f82c3ba66c6b2e3cde0f255156a753b108ee9dc ]
+
+Syzkaller managed to lock the lower device via ETHTOOL_SFEATURES:
+
+ netdev_lock include/linux/netdevice.h:2761 [inline]
+ netdev_lock_ops include/net/netdev_lock.h:42 [inline]
+ netdev_sync_lower_features net/core/dev.c:10649 [inline]
+ __netdev_update_features+0xcb1/0x1be0 net/core/dev.c:10819
+ netdev_update_features+0x6d/0xe0 net/core/dev.c:10876
+ macsec_notify+0x2f5/0x660 drivers/net/macsec.c:4533
+ notifier_call_chain+0x1b3/0x3e0 kernel/notifier.c:85
+ call_netdevice_notifiers_extack net/core/dev.c:2267 [inline]
+ call_netdevice_notifiers net/core/dev.c:2281 [inline]
+ netdev_features_change+0x85/0xc0 net/core/dev.c:1570
+ __dev_ethtool net/ethtool/ioctl.c:3469 [inline]
+ dev_ethtool+0x1536/0x19b0 net/ethtool/ioctl.c:3502
+ dev_ioctl+0x392/0x1150 net/core/dev_ioctl.c:759
+
+It happens because lower features are out of sync with the upper:
+
+ __dev_ethtool (real_dev)
+ netdev_lock_ops(real_dev)
+ ETHTOOL_SFEATURES
+ __netdev_features_change
+ netdev_sync_upper_features
+ disable LRO on the lower
+ if (old_features != dev->features)
+ netdev_features_change
+ fires NETDEV_FEAT_CHANGE
+ macsec_notify
+ NETDEV_FEAT_CHANGE
+ netdev_update_features (for each macsec dev)
+ netdev_sync_lower_features
+ if (upper_features != lower_features)
+ netdev_lock_ops(lower) # lower == real_dev
+ stuck
+ ...
+
+ netdev_unlock_ops(real_dev)
+
+Per commit af5f54b0ef9e ("net: Lock lower level devices when updating
+features"), we elide the lock/unlock when the upper and lower features
+are synced. Makes sure the lower (real_dev) has proper features after
+the macsec link has been created. This makes sure we never hit the
+situation where we need to sync upper flags to the lower.
+
+Reported-by: syzbot+7e0f89fb6cae5d002de0@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=7e0f89fb6cae5d002de0
+Fixes: 7e4d784f5810 ("net: hold netdev instance lock during rtnetlink operations")
+Signed-off-by: Stanislav Fomichev <sdf@fomichev.me>
+Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
+Link: https://patch.msgid.link/20250908173614.3358264-1-sdf@fomichev.me
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/macsec.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 01329fe7451a1..0eca96eeed58a 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -4286,6 +4286,7 @@ static int macsec_newlink(struct net_device *dev,
+ if (err < 0)
+ goto del_dev;
+
++ netdev_update_features(dev);
+ netif_stacked_transfer_operstate(real_dev, dev);
+ linkwatch_fire_event(dev);
+
+--
+2.51.0
+
--- /dev/null
+From 0c4324f7d06fdfa4e24b73536d70788b187b6c0f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Jul 2025 21:47:46 +0200
+Subject: mtd: rawnand: nuvoton: Fix an error handling path in
+ ma35_nand_chips_init()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 1eae113dd5ff5192cfd3e11b6ab7b96193b42c01 ]
+
+If a ma35_nand_chip_init() call fails, then a reference to 'nand_np' still
+needs to be released.
+
+Use for_each_child_of_node_scoped() to fix the issue.
+
+Fixes: 5abb5d414d55 ("mtd: rawnand: nuvoton: add new driver for the Nuvoton MA35 SoC")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c b/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
+index c23b537948d5e..1a285cd8fad62 100644
+--- a/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
++++ b/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
+@@ -935,10 +935,10 @@ static void ma35_chips_cleanup(struct ma35_nand_info *nand)
+
+ static int ma35_nand_chips_init(struct device *dev, struct ma35_nand_info *nand)
+ {
+- struct device_node *np = dev->of_node, *nand_np;
++ struct device_node *np = dev->of_node;
+ int ret;
+
+- for_each_child_of_node(np, nand_np) {
++ for_each_child_of_node_scoped(np, nand_np) {
+ ret = ma35_nand_chip_init(dev, nand, nand_np);
+ if (ret) {
+ ma35_chips_cleanup(nand);
+--
+2.51.0
+
--- /dev/null
+From 3cb9e323b4e80a33a4e3ede87661f517a562d288 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 13:12:33 +0200
+Subject: net: bridge: Bounce invalid boolopts
+
+From: Petr Machata <petrm@nvidia.com>
+
+[ Upstream commit 8625f5748fea960d2af4f3c3e9891ee8f6f80906 ]
+
+The bridge driver currently tolerates options that it does not recognize.
+Instead, it should bounce them.
+
+Fixes: a428afe82f98 ("net: bridge: add support for user-controlled bool options")
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/e6fdca3b5a8d54183fbda075daffef38bdd7ddce.1757070067.git.petrm@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/net/bridge/br.c b/net/bridge/br.c
+index 0adeafe11a365..ad2d8f59fc7bc 100644
+--- a/net/bridge/br.c
++++ b/net/bridge/br.c
+@@ -324,6 +324,13 @@ int br_boolopt_multi_toggle(struct net_bridge *br,
+ int err = 0;
+ int opt_id;
+
++ opt_id = find_next_bit(&bitmap, BITS_PER_LONG, BR_BOOLOPT_MAX);
++ if (opt_id != BITS_PER_LONG) {
++ NL_SET_ERR_MSG_FMT_MOD(extack, "Unknown boolean option %d",
++ opt_id);
++ return -EINVAL;
++ }
++
+ for_each_set_bit(opt_id, &bitmap, BR_BOOLOPT_MAX) {
+ bool on = !!(bm->optval & BIT(opt_id));
+
+--
+2.51.0
+
--- /dev/null
+From 9f2107f2c04d4b6b69f7720e9c60539389128235 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 7 Sep 2025 11:08:21 +0300
+Subject: net: dev_ioctl: take ops lock in hwtstamp lower paths
+
+From: Carolina Jubran <cjubran@nvidia.com>
+
+[ Upstream commit 686cab5a18e443e1d5f2abb17bed45837836425f ]
+
+ndo hwtstamp callbacks are expected to run under the per-device ops
+lock. Make the lower get/set paths consistent with the rest of ndo
+invocations.
+
+Kernel log:
+WARNING: CPU: 13 PID: 51364 at ./include/net/netdev_lock.h:70 __netdev_update_features+0x4bd/0xe60
+...
+RIP: 0010:__netdev_update_features+0x4bd/0xe60
+...
+Call Trace:
+<TASK>
+netdev_update_features+0x1f/0x60
+mlx5_hwtstamp_set+0x181/0x290 [mlx5_core]
+mlx5e_hwtstamp_set+0x19/0x30 [mlx5_core]
+dev_set_hwtstamp_phylib+0x9f/0x220
+dev_set_hwtstamp_phylib+0x9f/0x220
+dev_set_hwtstamp+0x13d/0x240
+dev_ioctl+0x12f/0x4b0
+sock_ioctl+0x171/0x370
+__x64_sys_ioctl+0x3f7/0x900
+? __sys_setsockopt+0x69/0xb0
+do_syscall_64+0x6f/0x2e0
+entry_SYSCALL_64_after_hwframe+0x4b/0x53
+...
+</TASK>
+....
+---[ end trace 0000000000000000 ]---
+
+Note that the mlx5_hwtstamp_set and mlx5e_hwtstamp_set functions shown
+in the trace come from an in progress patch converting the legacy ioctl
+to ndo_hwtstamp_get/set and are not present in mainline.
+
+Fixes: ffb7ed19ac0a ("net: hold netdev instance lock during ioctl operations")
+Signed-off-by: Carolina Jubran <cjubran@nvidia.com>
+Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
+Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
+Link: https://patch.msgid.link/20250907080821.2353388-1-cjubran@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev_ioctl.c | 22 ++++++++++++++++++----
+ 1 file changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
+index 616479e714663..9447065d01afb 100644
+--- a/net/core/dev_ioctl.c
++++ b/net/core/dev_ioctl.c
+@@ -464,8 +464,15 @@ int generic_hwtstamp_get_lower(struct net_device *dev,
+ if (!netif_device_present(dev))
+ return -ENODEV;
+
+- if (ops->ndo_hwtstamp_get)
+- return dev_get_hwtstamp_phylib(dev, kernel_cfg);
++ if (ops->ndo_hwtstamp_get) {
++ int err;
++
++ netdev_lock_ops(dev);
++ err = dev_get_hwtstamp_phylib(dev, kernel_cfg);
++ netdev_unlock_ops(dev);
++
++ return err;
++ }
+
+ /* Legacy path: unconverted lower driver */
+ return generic_hwtstamp_ioctl_lower(dev, SIOCGHWTSTAMP, kernel_cfg);
+@@ -481,8 +488,15 @@ int generic_hwtstamp_set_lower(struct net_device *dev,
+ if (!netif_device_present(dev))
+ return -ENODEV;
+
+- if (ops->ndo_hwtstamp_set)
+- return dev_set_hwtstamp_phylib(dev, kernel_cfg, extack);
++ if (ops->ndo_hwtstamp_set) {
++ int err;
++
++ netdev_lock_ops(dev);
++ err = dev_set_hwtstamp_phylib(dev, kernel_cfg, extack);
++ netdev_unlock_ops(dev);
++
++ return err;
++ }
+
+ /* Legacy path: unconverted lower driver */
+ return generic_hwtstamp_ioctl_lower(dev, SIOCSHWTSTAMP, kernel_cfg);
+--
+2.51.0
+
--- /dev/null
+From 5124047b6ea9267fef41699253a449d378a8633d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 14:45:07 +0200
+Subject: net: dsa: b53: fix ageing time for BCM53101
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 674b34c4c770551e916ae707829c7faea4782d3a ]
+
+For some reason Broadcom decided that BCM53101 uses 0.5s increments for
+the ageing time register, but kept the field width the same [1]. Due to
+this, the actual ageing time was always half of what was configured.
+
+Fix this by adapting the limits and value calculation for BCM53101.
+
+So far it looks like this is the only chip with the increased tick
+speed:
+
+$ grep -l -r "Specifies the aging time in 0.5 seconds" cdk/PKG/chip | sort
+cdk/PKG/chip/bcm53101/bcm53101_a0_defs.h
+
+$ grep -l -r "Specifies the aging time in seconds" cdk/PKG/chip | sort
+cdk/PKG/chip/bcm53010/bcm53010_a0_defs.h
+cdk/PKG/chip/bcm53020/bcm53020_a0_defs.h
+cdk/PKG/chip/bcm53084/bcm53084_a0_defs.h
+cdk/PKG/chip/bcm53115/bcm53115_a0_defs.h
+cdk/PKG/chip/bcm53118/bcm53118_a0_defs.h
+cdk/PKG/chip/bcm53125/bcm53125_a0_defs.h
+cdk/PKG/chip/bcm53128/bcm53128_a0_defs.h
+cdk/PKG/chip/bcm53134/bcm53134_a0_defs.h
+cdk/PKG/chip/bcm53242/bcm53242_a0_defs.h
+cdk/PKG/chip/bcm53262/bcm53262_a0_defs.h
+cdk/PKG/chip/bcm53280/bcm53280_a0_defs.h
+cdk/PKG/chip/bcm53280/bcm53280_b0_defs.h
+cdk/PKG/chip/bcm53600/bcm53600_a0_defs.h
+cdk/PKG/chip/bcm89500/bcm89500_a0_defs.h
+
+[1] https://github.com/Broadcom/OpenMDK/blob/a5d3fc9b12af3eeb68f2ca0ce7ec4056cd14d6c2/cdk/PKG/chip/bcm53101/bcm53101_a0_defs.h#L28966
+
+Fixes: e39d14a760c0 ("net: dsa: b53: implement setting ageing time")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250905124507.59186-1-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index d15d912690c40..073d20241a4c9 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1229,9 +1229,15 @@ static int b53_setup(struct dsa_switch *ds)
+ */
+ ds->untag_vlan_aware_bridge_pvid = true;
+
+- /* Ageing time is set in seconds */
+- ds->ageing_time_min = 1 * 1000;
+- ds->ageing_time_max = AGE_TIME_MAX * 1000;
++ if (dev->chip_id == BCM53101_DEVICE_ID) {
++ /* BCM53101 uses 0.5 second increments */
++ ds->ageing_time_min = 1 * 500;
++ ds->ageing_time_max = AGE_TIME_MAX * 500;
++ } else {
++ /* Everything else uses 1 second increments */
++ ds->ageing_time_min = 1 * 1000;
++ ds->ageing_time_max = AGE_TIME_MAX * 1000;
++ }
+
+ ret = b53_reset_switch(dev);
+ if (ret) {
+@@ -2448,7 +2454,10 @@ int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+ else
+ reg = B53_AGING_TIME_CONTROL;
+
+- atc = DIV_ROUND_CLOSEST(msecs, 1000);
++ if (dev->chip_id == BCM53101_DEVICE_ID)
++ atc = DIV_ROUND_CLOSEST(msecs, 500);
++ else
++ atc = DIV_ROUND_CLOSEST(msecs, 1000);
+
+ if (!is5325(dev) && !is5365(dev))
+ atc |= AGE_CHANGE;
+--
+2.51.0
+
--- /dev/null
+From 8f5a60084d54b53be8ff1d3b3b58a3f310010d4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 11:13:34 +0200
+Subject: net: fec: Fix possible NPD in fec_enet_phy_reset_after_clk_enable()
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit 03e79de4608bdd48ad6eec272e196124cefaf798 ]
+
+The function of_phy_find_device may return NULL, so we need to take
+care before dereferencing phy_dev.
+
+Fixes: 64a632da538a ("net: fec: Fix phy_device lookup for phy_reset_after_clk_enable()")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Cc: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Cc: Richard Leitner <richard.leitner@skidata.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Wei Fang <wei.fang@nxp.com>
+Link: https://patch.msgid.link/20250904091334.53965-1-wahrenst@gmx.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 651b73163b6ee..5f15f42070c53 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2358,7 +2358,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+ */
+ phy_dev = of_phy_find_device(fep->phy_node);
+ phy_reset_after_clk_enable(phy_dev);
+- put_device(&phy_dev->mdio.dev);
++ if (phy_dev)
++ put_device(&phy_dev->mdio.dev);
+ }
+ }
+
+--
+2.51.0
+
--- /dev/null
+From cf027150004a2945b528377d79eaf829f75f3aa1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 15:52:38 +0300
+Subject: net: phy: transfer phy_config_inband() locking responsibility to
+ phylink
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit e2a10daba84968f6b5777d150985fd7d6abc9c84 ]
+
+Problem description
+===================
+
+Lockdep reports a possible circular locking dependency (AB/BA) between
+&pl->state_mutex and &phy->lock, as follows.
+
+phylink_resolve() // acquires &pl->state_mutex
+-> phylink_major_config()
+ -> phy_config_inband() // acquires &pl->phydev->lock
+
+whereas all the other call sites where &pl->state_mutex and
+&pl->phydev->lock have the locking scheme reversed. Everywhere else,
+&pl->phydev->lock is acquired at the top level, and &pl->state_mutex at
+the lower level. A clear example is phylink_bringup_phy().
+
+The outlier is the newly introduced phy_config_inband() and the existing
+lock order is the correct one. To understand why it cannot be the other
+way around, it is sufficient to consider phylink_phy_change(), phylink's
+callback from the PHY device's phy->phy_link_change() virtual method,
+invoked by the PHY state machine.
+
+phy_link_up() and phy_link_down(), the (indirect) callers of
+phylink_phy_change(), are called with &phydev->lock acquired.
+Then phylink_phy_change() acquires its own &pl->state_mutex, to
+serialize changes made to its pl->phy_state and pl->link_config.
+So all other instances of &pl->state_mutex and &phydev->lock must be
+consistent with this order.
+
+Problem impact
+==============
+
+I think the kernel runs a serious deadlock risk if an existing
+phylink_resolve() thread, which results in a phy_config_inband() call,
+is concurrent with a phy_link_up() or phy_link_down() call, which will
+deadlock on &pl->state_mutex in phylink_phy_change(). Practically
+speaking, the impact may be limited by the slow speed of the medium
+auto-negotiation protocol, which makes it unlikely for the current state
+to still be unresolved when a new one is detected, but I think the
+problem is there. Nonetheless, the problem was discovered using lockdep.
+
+Proposed solution
+=================
+
+Practically speaking, the phy_config_inband() requirement of having
+phydev->lock acquired must transfer to the caller (phylink is the only
+caller). There, it must bubble up until immediately before
+&pl->state_mutex is acquired, for the cases where that takes place.
+
+Solution details, considerations, notes
+=======================================
+
+This is the phy_config_inband() call graph:
+
+ sfp_upstream_ops :: connect_phy()
+ |
+ v
+ phylink_sfp_connect_phy()
+ |
+ v
+ phylink_sfp_config_phy()
+ |
+ | sfp_upstream_ops :: module_insert()
+ | |
+ | v
+ | phylink_sfp_module_insert()
+ | |
+ | | sfp_upstream_ops :: module_start()
+ | | |
+ | | v
+ | | phylink_sfp_module_start()
+ | | |
+ | v v
+ | phylink_sfp_config_optical()
+ phylink_start() | |
+ | phylink_resume() v v
+ | | phylink_sfp_set_config()
+ | | |
+ v v v
+ phylink_mac_initial_config()
+ | phylink_resolve()
+ | | phylink_ethtool_ksettings_set()
+ v v v
+ phylink_major_config()
+ |
+ v
+ phy_config_inband()
+
+phylink_major_config() caller #1, phylink_mac_initial_config(), does not
+acquire &pl->state_mutex nor do its callers. It must acquire
+&pl->phydev->lock prior to calling phylink_major_config().
+
+phylink_major_config() caller #2, phylink_resolve() acquires
+&pl->state_mutex, thus also needs to acquire &pl->phydev->lock.
+
+phylink_major_config() caller #3, phylink_ethtool_ksettings_set(), is
+completely uninteresting, because it only calls phylink_major_config()
+if pl->phydev is NULL (otherwise it calls phy_ethtool_ksettings_set()).
+We need to change nothing there.
+
+Other solutions
+===============
+
+The lock inversion between &pl->state_mutex and &pl->phydev->lock has
+occurred at least once before, as seen in commit c718af2d00a3 ("net:
+phylink: fix ethtool -A with attached PHYs"). The solution there was to
+simply not call phy_set_asym_pause() under the &pl->state_mutex. That
+cannot be extended to our case though, where the phy_config_inband()
+call is much deeper inside the &pl->state_mutex section.
+
+Fixes: 5fd0f1a02e75 ("net: phylink: add negotiation of in-band capabilities")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Link: https://patch.msgid.link/20250904125238.193990-2-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/phy.c | 12 ++++--------
+ drivers/net/phy/phylink.c | 9 +++++++++
+ 2 files changed, 13 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 13df28445f020..c02da57a4da5e 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -1065,23 +1065,19 @@ EXPORT_SYMBOL_GPL(phy_inband_caps);
+ */
+ int phy_config_inband(struct phy_device *phydev, unsigned int modes)
+ {
+- int err;
++ lockdep_assert_held(&phydev->lock);
+
+ if (!!(modes & LINK_INBAND_DISABLE) +
+ !!(modes & LINK_INBAND_ENABLE) +
+ !!(modes & LINK_INBAND_BYPASS) != 1)
+ return -EINVAL;
+
+- mutex_lock(&phydev->lock);
+ if (!phydev->drv)
+- err = -EIO;
++ return -EIO;
+ else if (!phydev->drv->config_inband)
+- err = -EOPNOTSUPP;
+- else
+- err = phydev->drv->config_inband(phydev, modes);
+- mutex_unlock(&phydev->lock);
++ return -EOPNOTSUPP;
+
+- return err;
++ return phydev->drv->config_inband(phydev, modes);
+ }
+ EXPORT_SYMBOL(phy_config_inband);
+
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 08cbb31e6dbc1..229a503d601ee 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1411,6 +1411,7 @@ static void phylink_get_fixed_state(struct phylink *pl,
+ static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
+ {
+ struct phylink_link_state link_state;
++ struct phy_device *phy = pl->phydev;
+
+ switch (pl->req_link_an_mode) {
+ case MLO_AN_PHY:
+@@ -1434,7 +1435,11 @@ static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
+ link_state.link = false;
+
+ phylink_apply_manual_flow(pl, &link_state);
++ if (phy)
++ mutex_lock(&phy->lock);
+ phylink_major_config(pl, force_restart, &link_state);
++ if (phy)
++ mutex_unlock(&phy->lock);
+ }
+
+ static const char *phylink_pause_to_str(int pause)
+@@ -1575,6 +1580,8 @@ static void phylink_resolve(struct work_struct *w)
+
+ mutex_lock(&pl->phydev_mutex);
+ phy = pl->phydev;
++ if (phy)
++ mutex_lock(&phy->lock);
+ mutex_lock(&pl->state_mutex);
+ cur_link_state = phylink_link_is_up(pl);
+
+@@ -1676,6 +1683,8 @@ static void phylink_resolve(struct work_struct *w)
+ queue_work(system_power_efficient_wq, &pl->resolve);
+ }
+ mutex_unlock(&pl->state_mutex);
++ if (phy)
++ mutex_unlock(&phy->lock);
+ mutex_unlock(&pl->phydev_mutex);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 1a94f0ac2acb6c8c77aa69df86d52e859cd967b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 15:52:37 +0300
+Subject: net: phylink: add lock for serializing concurrent pl->phydev writes
+ with resolver
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 0ba5b2f2c381dbec9ed9e4ab3ae5d3e667de0dc3 ]
+
+Currently phylink_resolve() protects itself against concurrent
+phylink_bringup_phy() or phylink_disconnect_phy() calls which modify
+pl->phydev by relying on pl->state_mutex.
+
+The problem is that in phylink_resolve(), pl->state_mutex is in a lock
+inversion state with pl->phydev->lock. So pl->phydev->lock needs to be
+acquired prior to pl->state_mutex. But that requires dereferencing
+pl->phydev in the first place, and without pl->state_mutex, that is
+racy.
+
+Hence the reason for the extra lock. Currently it is redundant, but it
+will serve a functional purpose once mutex_lock(&phy->lock) will be
+moved outside of the mutex_lock(&pl->state_mutex) section.
+
+Another alternative considered would have been to let phylink_resolve()
+acquire the rtnl_mutex, which is also held when phylink_bringup_phy()
+and phylink_disconnect_phy() are called. But since phylink_disconnect_phy()
+runs under rtnl_lock(), it would deadlock with phylink_resolve() when
+calling flush_work(&pl->resolve). Additionally, it would have been
+undesirable because it would have unnecessarily blocked many other call
+paths as well in the entire kernel, so the smaller-scoped lock was
+preferred.
+
+Link: https://lore.kernel.org/netdev/aLb6puGVzR29GpPx@shell.armlinux.org.uk/
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Link: https://patch.msgid.link/20250904125238.193990-1-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: e2a10daba849 ("net: phy: transfer phy_config_inband() locking responsibility to phylink")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/phylink.c | 19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 0faa3d97e06b9..08cbb31e6dbc1 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -67,6 +67,8 @@ struct phylink {
+ struct timer_list link_poll;
+
+ struct mutex state_mutex;
++ /* Serialize updates to pl->phydev with phylink_resolve() */
++ struct mutex phydev_mutex;
+ struct phylink_link_state phy_state;
+ unsigned int phy_ib_mode;
+ struct work_struct resolve;
+@@ -1568,8 +1570,11 @@ static void phylink_resolve(struct work_struct *w)
+ struct phylink_link_state link_state;
+ bool mac_config = false;
+ bool retrigger = false;
++ struct phy_device *phy;
+ bool cur_link_state;
+
++ mutex_lock(&pl->phydev_mutex);
++ phy = pl->phydev;
+ mutex_lock(&pl->state_mutex);
+ cur_link_state = phylink_link_is_up(pl);
+
+@@ -1603,11 +1608,11 @@ static void phylink_resolve(struct work_struct *w)
+ /* If we have a phy, the "up" state is the union of both the
+ * PHY and the MAC
+ */
+- if (pl->phydev)
++ if (phy)
+ link_state.link &= pl->phy_state.link;
+
+ /* Only update if the PHY link is up */
+- if (pl->phydev && pl->phy_state.link) {
++ if (phy && pl->phy_state.link) {
+ /* If the interface has changed, force a link down
+ * event if the link isn't already down, and re-resolve.
+ */
+@@ -1671,6 +1676,7 @@ static void phylink_resolve(struct work_struct *w)
+ queue_work(system_power_efficient_wq, &pl->resolve);
+ }
+ mutex_unlock(&pl->state_mutex);
++ mutex_unlock(&pl->phydev_mutex);
+ }
+
+ static void phylink_run_resolve(struct phylink *pl)
+@@ -1806,6 +1812,7 @@ struct phylink *phylink_create(struct phylink_config *config,
+ if (!pl)
+ return ERR_PTR(-ENOMEM);
+
++ mutex_init(&pl->phydev_mutex);
+ mutex_init(&pl->state_mutex);
+ INIT_WORK(&pl->resolve, phylink_resolve);
+
+@@ -2066,6 +2073,7 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
+ dev_name(&phy->mdio.dev), phy->drv->name, irq_str);
+ kfree(irq_str);
+
++ mutex_lock(&pl->phydev_mutex);
+ mutex_lock(&phy->lock);
+ mutex_lock(&pl->state_mutex);
+ pl->phydev = phy;
+@@ -2111,6 +2119,7 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
+
+ mutex_unlock(&pl->state_mutex);
+ mutex_unlock(&phy->lock);
++ mutex_unlock(&pl->phydev_mutex);
+
+ phylink_dbg(pl,
+ "phy: %s setting supported %*pb advertising %*pb\n",
+@@ -2289,6 +2298,7 @@ void phylink_disconnect_phy(struct phylink *pl)
+
+ ASSERT_RTNL();
+
++ mutex_lock(&pl->phydev_mutex);
+ phy = pl->phydev;
+ if (phy) {
+ mutex_lock(&phy->lock);
+@@ -2298,8 +2308,11 @@ void phylink_disconnect_phy(struct phylink *pl)
+ pl->mac_tx_clk_stop = false;
+ mutex_unlock(&pl->state_mutex);
+ mutex_unlock(&phy->lock);
+- flush_work(&pl->resolve);
++ }
++ mutex_unlock(&pl->phydev_mutex);
+
++ if (phy) {
++ flush_work(&pl->resolve);
+ phy_disconnect(phy);
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 370123abaf00c6d13d2d352c1302c59c739c0de1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 10:02:21 +0200
+Subject: netfilter: nf_tables: make nft_set_do_lookup available
+ unconditionally
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 11fe5a82e53ac3581a80c88e0e35fb8a80e15f48 ]
+
+This function was added for retpoline mitigation and is replaced by a
+static inline helper if mitigations are not enabled.
+
+Enable this helper function unconditionally so next patch can add a lookup
+restart mechanism to fix possible false negatives while transactions are
+in progress.
+
+Adding lookup restarts in nft_lookup_eval doesn't work as nft_objref would
+then need the same copypaste loop.
+
+This patch is separate to ease review of the actual bug fix.
+
+Suggested-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Stable-dep-of: b2f742c846ca ("netfilter: nf_tables: restart set lookup on base_seq change")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables_core.h | 10 ++--------
+ net/netfilter/nft_lookup.c | 17 ++++++++++++-----
+ 2 files changed, 14 insertions(+), 13 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
+index 6a52fb97b8443..04699eac5b524 100644
+--- a/include/net/netfilter/nf_tables_core.h
++++ b/include/net/netfilter/nf_tables_core.h
+@@ -109,17 +109,11 @@ nft_hash_lookup_fast(const struct net *net, const struct nft_set *set,
+ const struct nft_set_ext *
+ nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
++#endif
++
+ const struct nft_set_ext *
+ nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
+-#else
+-static inline const struct nft_set_ext *
+-nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key)
+-{
+- return set->ops->lookup(net, set, key);
+-}
+-#endif
+
+ /* called from nft_pipapo_avx2.c */
+ const struct nft_set_ext *
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index 40c602ffbcba7..2c6909bf1b407 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -24,11 +24,11 @@ struct nft_lookup {
+ struct nft_set_binding binding;
+ };
+
+-#ifdef CONFIG_MITIGATION_RETPOLINE
+-const struct nft_set_ext *
+-nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key)
++static const struct nft_set_ext *
++__nft_set_do_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
++#ifdef CONFIG_MITIGATION_RETPOLINE
+ if (set->ops == &nft_set_hash_fast_type.ops)
+ return nft_hash_lookup_fast(net, set, key);
+ if (set->ops == &nft_set_hash_type.ops)
+@@ -51,10 +51,17 @@ nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ return nft_rbtree_lookup(net, set, key);
+
+ WARN_ON_ONCE(1);
++#endif
+ return set->ops->lookup(net, set, key);
+ }
++
++const struct nft_set_ext *
++nft_set_do_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
++{
++ return __nft_set_do_lookup(net, set, key);
++}
+ EXPORT_SYMBOL_GPL(nft_set_do_lookup);
+-#endif
+
+ void nft_lookup_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+--
+2.51.0
+
--- /dev/null
+From 0d47e36f24478a6dbeb5b2585008d46e6b6f8895 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 10:02:20 +0200
+Subject: netfilter: nf_tables: place base_seq in struct net
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 64102d9bbc3d41dac5188b8fba75b1344c438970 ]
+
+This will soon be read from packet path around same time as the gencursor.
+
+Both gencursor and base_seq get incremented almost at the same time, so
+it makes sense to place them in the same structure.
+
+This doesn't increase struct net size on 64bit due to padding.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Stable-dep-of: b2f742c846ca ("netfilter: nf_tables: restart set lookup on base_seq change")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables.h | 1 -
+ include/net/netns/nftables.h | 1 +
+ net/netfilter/nf_tables_api.c | 65 ++++++++++++++++---------------
+ 3 files changed, 34 insertions(+), 33 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index cf65703e221fa..16daeac2ac555 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1916,7 +1916,6 @@ struct nftables_pernet {
+ struct mutex commit_mutex;
+ u64 table_handle;
+ u64 tstamp;
+- unsigned int base_seq;
+ unsigned int gc_seq;
+ u8 validate_state;
+ struct work_struct destroy_work;
+diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
+index cc8060c017d5f..99dd166c5d07c 100644
+--- a/include/net/netns/nftables.h
++++ b/include/net/netns/nftables.h
+@@ -3,6 +3,7 @@
+ #define _NETNS_NFTABLES_H_
+
+ struct netns_nftables {
++ unsigned int base_seq;
+ u8 gencursor;
+ };
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 3a443765d7e90..5ea7a015504bb 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1131,11 +1131,14 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
+ return ERR_PTR(-ENOENT);
+ }
+
+-static __be16 nft_base_seq(const struct net *net)
++static unsigned int nft_base_seq(const struct net *net)
+ {
+- struct nftables_pernet *nft_net = nft_pernet(net);
++ return READ_ONCE(net->nft.base_seq);
++}
+
+- return htons(nft_net->base_seq & 0xffff);
++static __be16 nft_base_seq_be16(const struct net *net)
++{
++ return htons(nft_base_seq(net) & 0xffff);
+ }
+
+ static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
+@@ -1155,7 +1158,7 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
+
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+- flags, family, NFNETLINK_V0, nft_base_seq(net));
++ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -1248,7 +1251,7 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -2030,7 +2033,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
+
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+- flags, family, NFNETLINK_V0, nft_base_seq(net));
++ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -2133,7 +2136,7 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -3671,7 +3674,7 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
+ u16 type = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+
+ nlh = nfnl_msg_put(skb, portid, seq, type, flags, family, NFNETLINK_V0,
+- nft_base_seq(net));
++ nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -3839,7 +3842,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -4050,7 +4053,7 @@ static int nf_tables_getrule_reset(struct sk_buff *skb,
+ buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
+ nla_len(nla[NFTA_RULE_TABLE]),
+ (char *)nla_data(nla[NFTA_RULE_TABLE]),
+- nft_net->base_seq);
++ nft_base_seq(net));
+ audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
+ AUDIT_NFT_OP_RULE_RESET, GFP_ATOMIC);
+ kfree(buf);
+@@ -4887,7 +4890,7 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+ flags, ctx->family, NFNETLINK_V0,
+- nft_base_seq(ctx->net));
++ nft_base_seq_be16(ctx->net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -5032,7 +5035,7 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (ctx->family != NFPROTO_UNSPEC &&
+@@ -6209,7 +6212,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (dump_ctx->ctx.family != NFPROTO_UNSPEC &&
+@@ -6238,7 +6241,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
+ seq = cb->nlh->nlmsg_seq;
+
+ nlh = nfnl_msg_put(skb, portid, seq, event, NLM_F_MULTI,
+- table->family, NFNETLINK_V0, nft_base_seq(net));
++ table->family, NFNETLINK_V0, nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -6331,7 +6334,7 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb,
+
+ event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family,
+- NFNETLINK_V0, nft_base_seq(ctx->net));
++ NFNETLINK_V0, nft_base_seq_be16(ctx->net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -6630,7 +6633,7 @@ static int nf_tables_getsetelem_reset(struct sk_buff *skb,
+ }
+ nelems++;
+ }
+- audit_log_nft_set_reset(dump_ctx.ctx.table, nft_net->base_seq, nelems);
++ audit_log_nft_set_reset(dump_ctx.ctx.table, nft_base_seq(info->net), nelems);
+
+ out_unlock:
+ rcu_read_unlock();
+@@ -8381,7 +8384,7 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
+
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+- flags, family, NFNETLINK_V0, nft_base_seq(net));
++ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -8446,7 +8449,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -8480,7 +8483,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ idx++;
+ }
+ if (ctx->reset && entries)
+- audit_log_obj_reset(table, nft_net->base_seq, entries);
++ audit_log_obj_reset(table, nft_base_seq(net), entries);
+ if (rc < 0)
+ break;
+ }
+@@ -8649,7 +8652,7 @@ static int nf_tables_getobj_reset(struct sk_buff *skb,
+ buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
+ nla_len(nla[NFTA_OBJ_TABLE]),
+ (char *)nla_data(nla[NFTA_OBJ_TABLE]),
+- nft_net->base_seq);
++ nft_base_seq(net));
+ audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
+ AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
+ kfree(buf);
+@@ -8754,9 +8757,8 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
+ struct nft_object *obj, u32 portid, u32 seq, int event,
+ u16 flags, int family, int report, gfp_t gfp)
+ {
+- struct nftables_pernet *nft_net = nft_pernet(net);
+ char *buf = kasprintf(gfp, "%s:%u",
+- table->name, nft_net->base_seq);
++ table->name, nft_base_seq(net));
+
+ audit_log_nfcfg(buf,
+ family,
+@@ -9441,7 +9443,7 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
+
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+- flags, family, NFNETLINK_V0, nft_base_seq(net));
++ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -9510,7 +9512,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
+
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+- cb->seq = READ_ONCE(nft_net->base_seq);
++ cb->seq = nft_base_seq(net);
+
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -9695,17 +9697,16 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
+ static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
+ u32 portid, u32 seq)
+ {
+- struct nftables_pernet *nft_net = nft_pernet(net);
+ struct nlmsghdr *nlh;
+ char buf[TASK_COMM_LEN];
+ int event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWGEN);
+
+ nlh = nfnl_msg_put(skb, portid, seq, event, 0, AF_UNSPEC,
+- NFNETLINK_V0, nft_base_seq(net));
++ NFNETLINK_V0, nft_base_seq_be16(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+- if (nla_put_be32(skb, NFTA_GEN_ID, htonl(nft_net->base_seq)) ||
++ if (nla_put_be32(skb, NFTA_GEN_ID, htonl(nft_base_seq(net))) ||
+ nla_put_be32(skb, NFTA_GEN_PROC_PID, htonl(task_pid_nr(current))) ||
+ nla_put_string(skb, NFTA_GEN_PROC_NAME, get_task_comm(buf, current)))
+ goto nla_put_failure;
+@@ -10966,11 +10967,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ * Bump generation counter, invalidate any dump in progress.
+ * Cannot fail after this point.
+ */
+- base_seq = READ_ONCE(nft_net->base_seq);
++ base_seq = nft_base_seq(net);
+ while (++base_seq == 0)
+ ;
+
+- WRITE_ONCE(nft_net->base_seq, base_seq);
++ WRITE_ONCE(net->nft.base_seq, base_seq);
+
+ gc_seq = nft_gc_seq_begin(nft_net);
+
+@@ -11179,7 +11180,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+
+ nft_commit_notify(net, NETLINK_CB(skb).portid);
+ nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
+- nf_tables_commit_audit_log(&adl, nft_net->base_seq);
++ nf_tables_commit_audit_log(&adl, nft_base_seq(net));
+
+ nft_gc_seq_end(nft_net, gc_seq);
+ nft_net->validate_state = NFT_VALIDATE_SKIP;
+@@ -11504,7 +11505,7 @@ static bool nf_tables_valid_genid(struct net *net, u32 genid)
+ mutex_lock(&nft_net->commit_mutex);
+ nft_net->tstamp = get_jiffies_64();
+
+- genid_ok = genid == 0 || nft_net->base_seq == genid;
++ genid_ok = genid == 0 || nft_base_seq(net) == genid;
+ if (!genid_ok)
+ mutex_unlock(&nft_net->commit_mutex);
+
+@@ -12141,7 +12142,7 @@ static int __net_init nf_tables_init_net(struct net *net)
+ INIT_LIST_HEAD(&nft_net->module_list);
+ INIT_LIST_HEAD(&nft_net->notify_list);
+ mutex_init(&nft_net->commit_mutex);
+- nft_net->base_seq = 1;
++ net->nft.base_seq = 1;
+ nft_net->gc_seq = 0;
+ nft_net->validate_state = NFT_VALIDATE_SKIP;
+ INIT_WORK(&nft_net->destroy_work, nf_tables_trans_destroy_work);
+--
+2.51.0
+
--- /dev/null
+From 8d9f42606443f20cde80abf5efad167b1f36dfb6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Jun 2025 15:37:03 +0200
+Subject: netfilter: nf_tables: Reintroduce shortened deletion notifications
+
+From: Phil Sutter <phil@nwl.cc>
+
+[ Upstream commit a1050dd071682d2c9d8d6d5c96119f8f401b62f0 ]
+
+Restore commit 28339b21a365 ("netfilter: nf_tables: do not send complete
+notification of deletions") and fix it:
+
+- Avoid upfront modification of 'event' variable so the conditionals
+ become effective.
+- Always include NFTA_OBJ_TYPE attribute in object notifications, user
+ space requires it for proper deserialisation.
+- Catch DESTROY events, too.
+
+Signed-off-by: Phil Sutter <phil@nwl.cc>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: b2f742c846ca ("netfilter: nf_tables: restart set lookup on base_seq change")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 67 ++++++++++++++++++++++++++---------
+ 1 file changed, 50 insertions(+), 17 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 0e86434ca13b0..3a443765d7e90 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1153,9 +1153,9 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
+ {
+ struct nlmsghdr *nlh;
+
+- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+- nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+- NFNETLINK_V0, nft_base_seq(net));
++ nlh = nfnl_msg_put(skb, portid, seq,
++ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++ flags, family, NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -1165,6 +1165,12 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
+ NFTA_TABLE_PAD))
+ goto nla_put_failure;
+
++ if (event == NFT_MSG_DELTABLE ||
++ event == NFT_MSG_DESTROYTABLE) {
++ nlmsg_end(skb, nlh);
++ return 0;
++ }
++
+ if (nla_put_be32(skb, NFTA_TABLE_FLAGS,
+ htonl(table->flags & NFT_TABLE_F_MASK)))
+ goto nla_put_failure;
+@@ -2022,9 +2028,9 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
+ {
+ struct nlmsghdr *nlh;
+
+- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+- nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+- NFNETLINK_V0, nft_base_seq(net));
++ nlh = nfnl_msg_put(skb, portid, seq,
++ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++ flags, family, NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -2034,6 +2040,13 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
+ NFTA_CHAIN_PAD))
+ goto nla_put_failure;
+
++ if (!hook_list &&
++ (event == NFT_MSG_DELCHAIN ||
++ event == NFT_MSG_DESTROYCHAIN)) {
++ nlmsg_end(skb, nlh);
++ return 0;
++ }
++
+ if (nft_is_base_chain(chain)) {
+ const struct nft_base_chain *basechain = nft_base_chain(chain);
+ struct nft_stats __percpu *stats;
+@@ -4871,9 +4884,10 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ u32 seq = ctx->seq;
+ int i;
+
+- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+- nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family,
+- NFNETLINK_V0, nft_base_seq(ctx->net));
++ nlh = nfnl_msg_put(skb, portid, seq,
++ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++ flags, ctx->family, NFNETLINK_V0,
++ nft_base_seq(ctx->net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -4885,6 +4899,12 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ NFTA_SET_PAD))
+ goto nla_put_failure;
+
++ if (event == NFT_MSG_DELSET ||
++ event == NFT_MSG_DESTROYSET) {
++ nlmsg_end(skb, nlh);
++ return 0;
++ }
++
+ if (set->flags != 0)
+ if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags)))
+ goto nla_put_failure;
+@@ -8359,20 +8379,26 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
+ {
+ struct nlmsghdr *nlh;
+
+- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+- nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+- NFNETLINK_V0, nft_base_seq(net));
++ nlh = nfnl_msg_put(skb, portid, seq,
++ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++ flags, family, NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+ if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) ||
+ nla_put_string(skb, NFTA_OBJ_NAME, obj->key.name) ||
++ nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
+ nla_put_be64(skb, NFTA_OBJ_HANDLE, cpu_to_be64(obj->handle),
+ NFTA_OBJ_PAD))
+ goto nla_put_failure;
+
+- if (nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
+- nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
++ if (event == NFT_MSG_DELOBJ ||
++ event == NFT_MSG_DESTROYOBJ) {
++ nlmsg_end(skb, nlh);
++ return 0;
++ }
++
++ if (nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
+ nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset))
+ goto nla_put_failure;
+
+@@ -9413,9 +9439,9 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
+ struct nft_hook *hook;
+ struct nlmsghdr *nlh;
+
+- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+- nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+- NFNETLINK_V0, nft_base_seq(net));
++ nlh = nfnl_msg_put(skb, portid, seq,
++ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++ flags, family, NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
+ goto nla_put_failure;
+
+@@ -9425,6 +9451,13 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
+ NFTA_FLOWTABLE_PAD))
+ goto nla_put_failure;
+
++ if (!hook_list &&
++ (event == NFT_MSG_DELFLOWTABLE ||
++ event == NFT_MSG_DESTROYFLOWTABLE)) {
++ nlmsg_end(skb, nlh);
++ return 0;
++ }
++
+ if (nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
+ nla_put_be32(skb, NFTA_FLOWTABLE_FLAGS, htonl(flowtable->data.flags)))
+ goto nla_put_failure;
+--
+2.51.0
+
--- /dev/null
+From 375a3cd8bcec2a6e87054993b694424cc1a4e000 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 10:02:22 +0200
+Subject: netfilter: nf_tables: restart set lookup on base_seq change
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit b2f742c846cab9afc5953a5d8f17b54922dcc723 ]
+
+The hash, hash_fast, rhash and bitwise sets may indicate no result even
+though a matching element exists during a short time window while other
+cpu is finalizing the transaction.
+
+This happens when the hash lookup/bitwise lookup function has picked up
+the old genbit, right before it was toggled by nf_tables_commit(), but
+then the same cpu managed to unlink the matching old element from the
+hash table:
+
+cpu0 cpu1
+ has added new elements to clone
+ has marked elements as being
+ inactive in new generation
+ perform lookup in the set
+ enters commit phase:
+ A) observes old genbit
+ increments base_seq
+I) increments the genbit
+II) removes old element from the set
+ B) finds matching element
+ C) returns no match: found
+ element is not valid in old
+ generation
+
+ Next lookup observes new genbit and
+ finds matching e2.
+
+Consider a packet matching element e1, e2.
+
+cpu0 processes following transaction:
+1. remove e1
+2. adds e2, which has same key as e1.
+
+P matches both e1 and e2. Therefore, cpu1 should always find a match
+for P. Due to above race, this is not the case:
+
+cpu1 observed the old genbit. e2 will not be considered once it is found.
+The element e1 is not found anymore if cpu0 managed to unlink it from the
+hlist before cpu1 found it during list traversal.
+
+The situation only occurs for a brief time period, lookups happening
+after I) observe new genbit and return e2.
+
+This problem exists in all set types except nft_set_pipapo, so fix it once
+in nft_lookup rather than each set ops individually.
+
+Sample the base sequence counter, which gets incremented right before the
+genbit is changed.
+
+Then, if no match is found, retry the lookup if the base sequence was
+altered in between.
+
+If the base sequence hasn't changed:
+ - No update took place: no-match result is expected.
+ This is the common case. or:
+ - nf_tables_commit() hasn't progressed to genbit update yet.
+ Old elements were still visible and nomatch result is expected, or:
+ - nf_tables_commit updated the genbit:
+ We picked up the new base_seq, so the lookup function also picked
+ up the new genbit, no-match result is expected.
+
+If the old genbit was observed, then nft_lookup also picked up the old
+base_seq: nft_lookup_should_retry() returns true and relookup is performed
+in the new generation.
+
+This problem was added when the unconditional synchronize_rcu() call
+that followed the current/next generation bit toggle was removed.
+
+Thanks to Pablo Neira Ayuso for reviewing an earlier version of this
+patchset, for suggesting re-use of existing base_seq and placement of
+the restart loop in nft_set_do_lookup().
+
+Fixes: 0cbc06b3faba ("netfilter: nf_tables: remove synchronize_rcu in commit phase")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 3 ++-
+ net/netfilter/nft_lookup.c | 31 ++++++++++++++++++++++++++++++-
+ 2 files changed, 32 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 5ea7a015504bb..cde63e5f18d8f 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -10971,7 +10971,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ while (++base_seq == 0)
+ ;
+
+- WRITE_ONCE(net->nft.base_seq, base_seq);
++ /* pairs with smp_load_acquire in nft_lookup_eval */
++ smp_store_release(&net->nft.base_seq, base_seq);
+
+ gc_seq = nft_gc_seq_begin(nft_net);
+
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index 2c6909bf1b407..58c5b14889c47 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -55,11 +55,40 @@ __nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ return set->ops->lookup(net, set, key);
+ }
+
++static unsigned int nft_base_seq(const struct net *net)
++{
++ /* pairs with smp_store_release() in nf_tables_commit() */
++ return smp_load_acquire(&net->nft.base_seq);
++}
++
++static bool nft_lookup_should_retry(const struct net *net, unsigned int seq)
++{
++ return unlikely(seq != nft_base_seq(net));
++}
++
+ const struct nft_set_ext *
+ nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
+ {
+- return __nft_set_do_lookup(net, set, key);
++ const struct nft_set_ext *ext;
++ unsigned int base_seq;
++
++ do {
++ base_seq = nft_base_seq(net);
++
++ ext = __nft_set_do_lookup(net, set, key);
++ if (ext)
++ break;
++ /* No match? There is a small chance that lookup was
++ * performed in the old generation, but nf_tables_commit()
++ * already unlinked a (matching) element.
++ *
++ * We need to repeat the lookup to make sure that we didn't
++ * miss a matching element in the new generation.
++ */
++ } while (nft_lookup_should_retry(net, base_seq));
++
++ return ext;
+ }
+ EXPORT_SYMBOL_GPL(nft_set_do_lookup);
+
+--
+2.51.0
+
--- /dev/null
+From 10f249feb3c2435ec8f042d8dd49ab44bd147856 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Jul 2025 19:05:13 +0200
+Subject: netfilter: nft_set: remove one argument from lookup and update
+ functions
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 17a20e09f086f2c574ac87f3cf6e14c4377f65f6 ]
+
+Return the extension pointer instead of passing it as a function
+argument to be filled in by the callee.
+
+As-is, whenever false is returned, the extension pointer is not used.
+
+For all set types, when true is returned, the extension pointer was set
+to the matching element.
+
+Only exception: nft_set_bitmap doesn't support extensions.
+Return a pointer to a static const empty element extension container.
+
+return false -> return NULL
+return true -> return the elements' extension pointer.
+
+This saves one function argument.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: c4eaca2e1052 ("netfilter: nft_set_pipapo: don't check genbit from packetpath lookups")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables.h | 10 ++---
+ include/net/netfilter/nf_tables_core.h | 47 ++++++++++++----------
+ net/netfilter/nft_dynset.c | 5 ++-
+ net/netfilter/nft_lookup.c | 27 ++++++-------
+ net/netfilter/nft_objref.c | 5 +--
+ net/netfilter/nft_set_bitmap.c | 11 ++++--
+ net/netfilter/nft_set_hash.c | 54 ++++++++++++--------------
+ net/netfilter/nft_set_pipapo.c | 19 +++++----
+ net/netfilter/nft_set_pipapo_avx2.c | 25 ++++++------
+ net/netfilter/nft_set_rbtree.c | 40 +++++++++----------
+ 10 files changed, 126 insertions(+), 117 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 5e49619ae49c6..cf65703e221fa 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -459,19 +459,17 @@ struct nft_set_ext;
+ * control plane functions.
+ */
+ struct nft_set_ops {
+- bool (*lookup)(const struct net *net,
++ const struct nft_set_ext * (*lookup)(const struct net *net,
+ const struct nft_set *set,
+- const u32 *key,
+- const struct nft_set_ext **ext);
+- bool (*update)(struct nft_set *set,
++ const u32 *key);
++ const struct nft_set_ext * (*update)(struct nft_set *set,
+ const u32 *key,
+ struct nft_elem_priv *
+ (*new)(struct nft_set *,
+ const struct nft_expr *,
+ struct nft_regs *),
+ const struct nft_expr *expr,
+- struct nft_regs *regs,
+- const struct nft_set_ext **ext);
++ struct nft_regs *regs);
+ bool (*delete)(const struct nft_set *set,
+ const u32 *key);
+
+diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
+index 03b6165756fc5..6a52fb97b8443 100644
+--- a/include/net/netfilter/nf_tables_core.h
++++ b/include/net/netfilter/nf_tables_core.h
+@@ -94,34 +94,41 @@ extern const struct nft_set_type nft_set_pipapo_type;
+ extern const struct nft_set_type nft_set_pipapo_avx2_type;
+
+ #ifdef CONFIG_MITIGATION_RETPOLINE
+-bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
+-bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
+-bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
+-bool nft_hash_lookup_fast(const struct net *net,
+- const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
+-bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
+-bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
++const struct nft_set_ext *
++nft_rhash_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
++const struct nft_set_ext *
++nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
++const struct nft_set_ext *
++nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
++const struct nft_set_ext *
++nft_hash_lookup_fast(const struct net *net, const struct nft_set *set,
++ const u32 *key);
++const struct nft_set_ext *
++nft_hash_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
++const struct nft_set_ext *
++nft_set_do_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
+ #else
+-static inline bool
++static inline const struct nft_set_ext *
+ nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++ const u32 *key)
+ {
+- return set->ops->lookup(net, set, key, ext);
++ return set->ops->lookup(net, set, key);
+ }
+ #endif
+
+ /* called from nft_pipapo_avx2.c */
+-bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
++const struct nft_set_ext *
++nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
+ /* called from nft_set_pipapo.c */
+-bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext);
++const struct nft_set_ext *
++nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key);
+
+ void nft_counter_init_seqcount(void);
+
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 88922e0e8e837..e24493d9e7761 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -91,8 +91,9 @@ void nft_dynset_eval(const struct nft_expr *expr,
+ return;
+ }
+
+- if (set->ops->update(set, ®s->data[priv->sreg_key], nft_dynset_new,
+- expr, regs, &ext)) {
++ ext = set->ops->update(set, ®s->data[priv->sreg_key], nft_dynset_new,
++ expr, regs);
++ if (ext) {
+ if (priv->op == NFT_DYNSET_OP_UPDATE &&
+ nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
+ READ_ONCE(nft_set_ext_timeout(ext)->timeout) != 0) {
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index 63ef832b8aa71..40c602ffbcba7 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -25,32 +25,33 @@ struct nft_lookup {
+ };
+
+ #ifdef CONFIG_MITIGATION_RETPOLINE
+-bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_set_do_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ if (set->ops == &nft_set_hash_fast_type.ops)
+- return nft_hash_lookup_fast(net, set, key, ext);
++ return nft_hash_lookup_fast(net, set, key);
+ if (set->ops == &nft_set_hash_type.ops)
+- return nft_hash_lookup(net, set, key, ext);
++ return nft_hash_lookup(net, set, key);
+
+ if (set->ops == &nft_set_rhash_type.ops)
+- return nft_rhash_lookup(net, set, key, ext);
++ return nft_rhash_lookup(net, set, key);
+
+ if (set->ops == &nft_set_bitmap_type.ops)
+- return nft_bitmap_lookup(net, set, key, ext);
++ return nft_bitmap_lookup(net, set, key);
+
+ if (set->ops == &nft_set_pipapo_type.ops)
+- return nft_pipapo_lookup(net, set, key, ext);
++ return nft_pipapo_lookup(net, set, key);
+ #if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
+ if (set->ops == &nft_set_pipapo_avx2_type.ops)
+- return nft_pipapo_avx2_lookup(net, set, key, ext);
++ return nft_pipapo_avx2_lookup(net, set, key);
+ #endif
+
+ if (set->ops == &nft_set_rbtree_type.ops)
+- return nft_rbtree_lookup(net, set, key, ext);
++ return nft_rbtree_lookup(net, set, key);
+
+ WARN_ON_ONCE(1);
+- return set->ops->lookup(net, set, key, ext);
++ return set->ops->lookup(net, set, key);
+ }
+ EXPORT_SYMBOL_GPL(nft_set_do_lookup);
+ #endif
+@@ -61,12 +62,12 @@ void nft_lookup_eval(const struct nft_expr *expr,
+ {
+ const struct nft_lookup *priv = nft_expr_priv(expr);
+ const struct nft_set *set = priv->set;
+- const struct nft_set_ext *ext = NULL;
+ const struct net *net = nft_net(pkt);
++ const struct nft_set_ext *ext;
+ bool found;
+
+- found = nft_set_do_lookup(net, set, ®s->data[priv->sreg], &ext) ^
+- priv->invert;
++ ext = nft_set_do_lookup(net, set, ®s->data[priv->sreg]);
++ found = !!ext ^ priv->invert;
+ if (!found) {
+ ext = nft_set_catchall_lookup(net, set);
+ if (!ext) {
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index 09da7a3f9f967..8ee66a86c3bc7 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -111,10 +111,9 @@ void nft_objref_map_eval(const struct nft_expr *expr,
+ struct net *net = nft_net(pkt);
+ const struct nft_set_ext *ext;
+ struct nft_object *obj;
+- bool found;
+
+- found = nft_set_do_lookup(net, set, ®s->data[priv->sreg], &ext);
+- if (!found) {
++ ext = nft_set_do_lookup(net, set, ®s->data[priv->sreg]);
++ if (!ext) {
+ ext = nft_set_catchall_lookup(net, set);
+ if (!ext) {
+ regs->verdict.code = NFT_BREAK;
+diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
+index 8a435cc0e98c4..8d3f040a904a2 100644
+--- a/net/netfilter/nft_set_bitmap.c
++++ b/net/netfilter/nft_set_bitmap.c
+@@ -75,16 +75,21 @@ nft_bitmap_active(const u8 *bitmap, u32 idx, u32 off, u8 genmask)
+ }
+
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ const struct nft_bitmap *priv = nft_set_priv(set);
++ static const struct nft_set_ext found;
+ u8 genmask = nft_genmask_cur(net);
+ u32 idx, off;
+
+ nft_bitmap_location(set, key, &idx, &off);
+
+- return nft_bitmap_active(priv->bitmap, idx, off, genmask);
++ if (nft_bitmap_active(priv->bitmap, idx, off, genmask))
++ return &found;
++
++ return NULL;
+ }
+
+ static struct nft_bitmap_elem *
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index abb0c8ec63719..9903c737c9f0a 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -81,8 +81,9 @@ static const struct rhashtable_params nft_rhash_params = {
+ };
+
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_rhash_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ struct nft_rhash *priv = nft_set_priv(set);
+ const struct nft_rhash_elem *he;
+@@ -95,9 +96,9 @@ bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+
+ he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
+ if (he != NULL)
+- *ext = &he->ext;
++ return &he->ext;
+
+- return !!he;
++ return NULL;
+ }
+
+ static struct nft_elem_priv *
+@@ -120,14 +121,11 @@ nft_rhash_get(const struct net *net, const struct nft_set *set,
+ return ERR_PTR(-ENOENT);
+ }
+
+-static bool nft_rhash_update(struct nft_set *set, const u32 *key,
+- struct nft_elem_priv *
+- (*new)(struct nft_set *,
+- const struct nft_expr *,
+- struct nft_regs *regs),
+- const struct nft_expr *expr,
+- struct nft_regs *regs,
+- const struct nft_set_ext **ext)
++static const struct nft_set_ext *
++nft_rhash_update(struct nft_set *set, const u32 *key,
++ struct nft_elem_priv *(*new)(struct nft_set *, const struct nft_expr *,
++ struct nft_regs *regs),
++ const struct nft_expr *expr, struct nft_regs *regs)
+ {
+ struct nft_rhash *priv = nft_set_priv(set);
+ struct nft_rhash_elem *he, *prev;
+@@ -161,14 +159,13 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key,
+ }
+
+ out:
+- *ext = &he->ext;
+- return true;
++ return &he->ext;
+
+ err2:
+ nft_set_elem_destroy(set, &he->priv, true);
+ atomic_dec(&set->nelems);
+ err1:
+- return false;
++ return NULL;
+ }
+
+ static int nft_rhash_insert(const struct net *net, const struct nft_set *set,
+@@ -507,8 +504,9 @@ struct nft_hash_elem {
+ };
+
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_hash_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ struct nft_hash *priv = nft_set_priv(set);
+ u8 genmask = nft_genmask_cur(net);
+@@ -519,12 +517,10 @@ bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ hash = reciprocal_scale(hash, priv->buckets);
+ hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
+ if (!memcmp(nft_set_ext_key(&he->ext), key, set->klen) &&
+- nft_set_elem_active(&he->ext, genmask)) {
+- *ext = &he->ext;
+- return true;
+- }
++ nft_set_elem_active(&he->ext, genmask))
++ return &he->ext;
+ }
+- return false;
++ return NULL;
+ }
+
+ static struct nft_elem_priv *
+@@ -547,9 +543,9 @@ nft_hash_get(const struct net *net, const struct nft_set *set,
+ }
+
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_hash_lookup_fast(const struct net *net,
+- const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_hash_lookup_fast(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ struct nft_hash *priv = nft_set_priv(set);
+ u8 genmask = nft_genmask_cur(net);
+@@ -562,12 +558,10 @@ bool nft_hash_lookup_fast(const struct net *net,
+ hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
+ k2 = *(u32 *)nft_set_ext_key(&he->ext)->data;
+ if (k1 == k2 &&
+- nft_set_elem_active(&he->ext, genmask)) {
+- *ext = &he->ext;
+- return true;
+- }
++ nft_set_elem_active(&he->ext, genmask))
++ return &he->ext;
+ }
+- return false;
++ return NULL;
+ }
+
+ static u32 nft_jhash(const struct nft_set *set, const struct nft_hash *priv,
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 9ac48e6b4332c..a844b33fa6002 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -407,8 +407,9 @@ int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
+ *
+ * Return: true on match, false otherwise.
+ */
+-bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_scratch *scratch;
+@@ -465,13 +466,15 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ scratch->map_index = map_index;
+ local_bh_enable();
+
+- return false;
++ return NULL;
+ }
+
+ if (last) {
+- *ext = &f->mt[b].e->ext;
+- if (unlikely(nft_set_elem_expired(*ext) ||
+- !nft_set_elem_active(*ext, genmask)))
++ const struct nft_set_ext *ext;
++
++ ext = &f->mt[b].e->ext;
++ if (unlikely(nft_set_elem_expired(ext) ||
++ !nft_set_elem_active(ext, genmask)))
+ goto next_match;
+
+ /* Last field: we're just returning the key without
+@@ -482,7 +485,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ scratch->map_index = map_index;
+ local_bh_enable();
+
+- return true;
++ return ext;
+ }
+
+ /* Swap bitmap indices: res_map is the initial bitmap for the
+@@ -497,7 +500,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+
+ out:
+ local_bh_enable();
+- return false;
++ return NULL;
+ }
+
+ /**
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index be7c16c79f711..6c441e2dc8af3 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -1146,8 +1146,9 @@ static inline void pipapo_resmap_init_avx2(const struct nft_pipapo_match *m, uns
+ *
+ * Return: true on match, false otherwise.
+ */
+-bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_scratch *scratch;
+@@ -1155,17 +1156,18 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
+ const u8 *rp = (const u8 *)key;
++ const struct nft_set_ext *ext;
+ unsigned long *res, *fill;
+ bool map_index;
+- int i, ret = 0;
++ int i;
+
+ local_bh_disable();
+
+ if (unlikely(!irq_fpu_usable())) {
+- bool fallback_res = nft_pipapo_lookup(net, set, key, ext);
++ ext = nft_pipapo_lookup(net, set, key);
+
+ local_bh_enable();
+- return fallback_res;
++ return ext;
+ }
+
+ m = rcu_dereference(priv->match);
+@@ -1182,7 +1184,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ if (unlikely(!scratch)) {
+ kernel_fpu_end();
+ local_bh_enable();
+- return false;
++ return NULL;
+ }
+
+ map_index = scratch->map_index;
+@@ -1197,6 +1199,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ next_match:
+ nft_pipapo_for_each_field(f, i, m) {
+ bool last = i == m->field_count - 1, first = !i;
++ int ret = 0;
+
+ #define NFT_SET_PIPAPO_AVX2_LOOKUP(b, n) \
+ (ret = nft_pipapo_avx2_lookup_##b##b_##n(res, fill, f, \
+@@ -1244,10 +1247,10 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ goto out;
+
+ if (last) {
+- *ext = &f->mt[ret].e->ext;
+- if (unlikely(nft_set_elem_expired(*ext) ||
+- !nft_set_elem_active(*ext, genmask))) {
+- ret = 0;
++ ext = &f->mt[ret].e->ext;
++ if (unlikely(nft_set_elem_expired(ext) ||
++ !nft_set_elem_active(ext, genmask))) {
++ ext = NULL;
+ goto next_match;
+ }
+
+@@ -1264,5 +1267,5 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ kernel_fpu_end();
+ local_bh_enable();
+
+- return ret >= 0;
++ return ext;
+ }
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 2e8ef16ff191d..938a257c069e2 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -52,9 +52,9 @@ static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
+ return nft_set_elem_expired(&rbe->ext);
+ }
+
+-static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext,
+- unsigned int seq)
++static const struct nft_set_ext *
++__nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key, unsigned int seq)
+ {
+ struct nft_rbtree *priv = nft_set_priv(set);
+ const struct nft_rbtree_elem *rbe, *interval = NULL;
+@@ -65,7 +65,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ parent = rcu_dereference_raw(priv->root.rb_node);
+ while (parent != NULL) {
+ if (read_seqcount_retry(&priv->count, seq))
+- return false;
++ return NULL;
+
+ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+
+@@ -87,50 +87,48 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ }
+
+ if (nft_rbtree_elem_expired(rbe))
+- return false;
++ return NULL;
+
+ if (nft_rbtree_interval_end(rbe)) {
+ if (nft_set_is_anonymous(set))
+- return false;
++ return NULL;
+ parent = rcu_dereference_raw(parent->rb_left);
+ interval = NULL;
+ continue;
+ }
+
+- *ext = &rbe->ext;
+- return true;
++ return &rbe->ext;
+ }
+ }
+
+ if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
+ nft_set_elem_active(&interval->ext, genmask) &&
+ !nft_rbtree_elem_expired(interval) &&
+- nft_rbtree_interval_start(interval)) {
+- *ext = &interval->ext;
+- return true;
+- }
++ nft_rbtree_interval_start(interval))
++ return &interval->ext;
+
+- return false;
++ return NULL;
+ }
+
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+ struct nft_rbtree *priv = nft_set_priv(set);
+ unsigned int seq = read_seqcount_begin(&priv->count);
+- bool ret;
++ const struct nft_set_ext *ext;
+
+- ret = __nft_rbtree_lookup(net, set, key, ext, seq);
+- if (ret || !read_seqcount_retry(&priv->count, seq))
+- return ret;
++ ext = __nft_rbtree_lookup(net, set, key, seq);
++ if (ext || !read_seqcount_retry(&priv->count, seq))
++ return ext;
+
+ read_lock_bh(&priv->lock);
+ seq = read_seqcount_begin(&priv->count);
+- ret = __nft_rbtree_lookup(net, set, key, ext, seq);
++ ext = __nft_rbtree_lookup(net, set, key, seq);
+ read_unlock_bh(&priv->lock);
+
+- return ret;
++ return ext;
+ }
+
+ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
+--
+2.51.0
+
--- /dev/null
+From f7a719ba2ee849ba20ff941b730d2b41d4d9d0f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Sep 2025 14:45:21 +0200
+Subject: netfilter: nft_set_bitmap: fix lockdep splat due to missing
+ annotation
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 5e13f2c491a4100d208e77e92fe577fe3dbad6c2 ]
+
+Running new 'set_flush_add_atomic_bitmap' test case for nftables.git
+with CONFIG_PROVE_RCU_LIST=y yields:
+
+net/netfilter/nft_set_bitmap.c:231 RCU-list traversed in non-reader section!!
+rcu_scheduler_active = 2, debug_locks = 1
+1 lock held by nft/4008:
+ #0: ffff888147f79cd8 (&nft_net->commit_mutex){+.+.}-{4:4}, at: nf_tables_valid_genid+0x2f/0xd0
+
+ lockdep_rcu_suspicious+0x116/0x160
+ nft_bitmap_walk+0x22d/0x240
+ nf_tables_delsetelem+0x1010/0x1a00
+ ..
+
+This is a false positive, the list cannot be altered while the
+transaction mutex is held, so pass the relevant argument to the iterator.
+
+Fixes tag intentionally wrong; no point in picking this up if earlier
+false-positive-fixups were not applied.
+
+Fixes: 28b7a6b84c0a ("netfilter: nf_tables: avoid false-positive lockdep splats in set walker")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_bitmap.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
+index 12390d2e994fc..8a435cc0e98c4 100644
+--- a/net/netfilter/nft_set_bitmap.c
++++ b/net/netfilter/nft_set_bitmap.c
+@@ -221,7 +221,8 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
+ const struct nft_bitmap *priv = nft_set_priv(set);
+ struct nft_bitmap_elem *be;
+
+- list_for_each_entry_rcu(be, &priv->list, head) {
++ list_for_each_entry_rcu(be, &priv->list, head,
++ lockdep_is_held(&nft_pernet(ctx->net)->commit_mutex)) {
+ if (iter->count < iter->skip)
+ goto cont;
+
+--
+2.51.0
+
--- /dev/null
+From 1ff691a76be6769b562064d42a7bdbe3647fb0f0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 10:02:18 +0200
+Subject: netfilter: nft_set_pipapo: don't check genbit from packetpath lookups
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit c4eaca2e1052adfd67bed0a36a9d4b8e515666e4 ]
+
+The pipapo set type is special in that it has two copies of its
+datastructure: one live copy containing only valid elements and one
+on-demand clone used during transaction where adds/deletes happen.
+
+This clone is not visible to the datapath.
+
+This is unlike all other set types in nftables, those all link new
+elements into their live hlist/tree.
+
+For those sets, the lookup functions must skip the new elements while the
+transaction is ongoing to ensure consistency.
+
+As the clone is shallow, removal does have an effect on the packet path:
+once the transaction enters the commit phase the 'gencursor' bit that
+determines which elements are active and which elements should be ignored
+(because they are no longer valid) is flipped.
+
+This causes the datapath lookup to ignore these elements if they are found
+during lookup.
+
+This opens up a small race window where pipapo has an inconsistent view of
+the dataset from when the transaction-cpu flipped the genbit until the
+transaction-cpu calls nft_pipapo_commit() to swap live/clone pointers:
+
+cpu0 cpu1
+ has added new elements to clone
+ has marked elements as being
+ inactive in new generation
+ perform lookup in the set
+ enters commit phase:
+
+I) increments the genbit
+ A) observes new genbit
+ removes elements from the clone so
+ they won't be found anymore
+ B) lookup in datastructure
+ can't see new elements yet,
+ but old elements are ignored
+ -> Only matches elements that
+ were not changed in the
+ transaction
+II) calls nft_pipapo_commit(), clone
+ and live pointers are swapped.
+ C New nft_lookup happening now
+ will find matching elements.
+
+Consider a packet matching range r1-r2:
+
+cpu0 processes following transaction:
+1. remove r1-r2
+2. add r1-r3
+
+P is contained in both ranges. Therefore, cpu1 should always find a match
+for P. Due to above race, this is not the case:
+
+cpu1 does find r1-r2, but then ignores it due to the genbit indicating
+the range has been removed.
+
+At the same time, r1-r3 is not visible yet, because it can only be found
+in the clone.
+
+The situation persists for all lookups until after cpu0 hits II).
+
+The fix is easy: Don't check the genbit from pipapo lookup functions.
+This is possible because unlike the other set types, the new elements are
+not reachable from the live copy of the dataset.
+
+The clone/live pointer swap is enough to avoid matching on old elements
+while at the same time all new elements are exposed in one go.
+
+After this change, step B above returns a match in r1-r2.
+This is fine: r1-r2 only becomes truly invalid the moment they get freed.
+This happens after a synchronize_rcu() call and rcu read lock is held
+via netfilter hook traversal (nf_hook_slow()).
+
+Cc: Stefano Brivio <sbrivio@redhat.com>
+Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_pipapo.c | 20 ++++++++++++++++++--
+ net/netfilter/nft_set_pipapo_avx2.c | 4 +---
+ 2 files changed, 19 insertions(+), 5 deletions(-)
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 1a19649c28511..fa6741b3205a6 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -511,6 +511,23 @@ static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
+ *
+ * This function is called from the data path. It will search for
+ * an element matching the given key in the current active copy.
++ * Unlike other set types, this uses NFT_GENMASK_ANY instead of
++ * nft_genmask_cur().
++ *
++ * This is because new (future) elements are not reachable from
++ * priv->match, they get added to priv->clone instead.
++ * When the commit phase flips the generation bitmask, the
++ * 'now old' entries are skipped but without the 'now current'
++ * elements becoming visible. Using nft_genmask_cur() thus creates
++ * inconsistent state: matching old entries get skipped but thew
++ * newly matching entries are unreachable.
++ *
++ * GENMASK will still find the 'now old' entries which ensures consistent
++ * priv->match view.
++ *
++ * nft_pipapo_commit swaps ->clone and ->match shortly after the
++ * genbit flip. As ->clone doesn't contain the old entries in the first
++ * place, lookup will only find the now-current ones.
+ *
+ * Return: ntables API extension pointer or NULL if no match.
+ */
+@@ -519,12 +536,11 @@ nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+- u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_elem *e;
+
+ m = rcu_dereference(priv->match);
+- e = pipapo_get(m, (const u8 *)key, genmask, get_jiffies_64());
++ e = pipapo_get(m, (const u8 *)key, NFT_GENMASK_ANY, get_jiffies_64());
+
+ return e ? &e->ext : NULL;
+ }
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index 2155c7f345c21..39e356c9687a9 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -1153,7 +1153,6 @@ nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ struct nft_pipapo *priv = nft_set_priv(set);
+ const struct nft_set_ext *ext = NULL;
+ struct nft_pipapo_scratch *scratch;
+- u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
+ const u8 *rp = (const u8 *)key;
+@@ -1249,8 +1248,7 @@ nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ if (last) {
+ const struct nft_set_ext *e = &f->mt[ret].e->ext;
+
+- if (unlikely(nft_set_elem_expired(e) ||
+- !nft_set_elem_active(e, genmask)))
++ if (unlikely(nft_set_elem_expired(e)))
+ goto next_match;
+
+ ext = e;
+--
+2.51.0
+
--- /dev/null
+From 1eb6b772ed5aacad8a47ac266f9dcd7c122c372a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 12:10:41 +0200
+Subject: netfilter: nft_set_pipapo: don't return bogus extension pointer
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit c8a7c2c608180f3b4e51dc958b3861242dcdd76d ]
+
+Dan Carpenter says:
+Commit 17a20e09f086 ("netfilter: nft_set: remove one argument from
+lookup and update functions") [..] leads to the following Smatch
+static checker warning:
+
+ net/netfilter/nft_set_pipapo_avx2.c:1269 nft_pipapo_avx2_lookup()
+ error: uninitialized symbol 'ext'.
+
+Fix this by initing ext to NULL and set it only once we've found
+a match.
+
+Fixes: 17a20e09f086 ("netfilter: nft_set: remove one argument from lookup and update functions")
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Closes: https://lore.kernel.org/netfilter-devel/aJBzc3V5wk-yPOnH@stanley.mountain/
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: c4eaca2e1052 ("netfilter: nft_set_pipapo: don't check genbit from packetpath lookups")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_pipapo_avx2.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index 6c441e2dc8af3..2155c7f345c21 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -1151,12 +1151,12 @@ nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
++ const struct nft_set_ext *ext = NULL;
+ struct nft_pipapo_scratch *scratch;
+ u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
+ const u8 *rp = (const u8 *)key;
+- const struct nft_set_ext *ext;
+ unsigned long *res, *fill;
+ bool map_index;
+ int i;
+@@ -1247,13 +1247,13 @@ nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ goto out;
+
+ if (last) {
+- ext = &f->mt[ret].e->ext;
+- if (unlikely(nft_set_elem_expired(ext) ||
+- !nft_set_elem_active(ext, genmask))) {
+- ext = NULL;
++ const struct nft_set_ext *e = &f->mt[ret].e->ext;
++
++ if (unlikely(nft_set_elem_expired(e) ||
++ !nft_set_elem_active(e, genmask)))
+ goto next_match;
+- }
+
++ ext = e;
+ goto out;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 1115aa0aee17218e1fe8b3cf7781025c8bd2311b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Jul 2025 19:05:15 +0200
+Subject: netfilter: nft_set_pipapo: merge pipapo_get/lookup
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit d8d871a35ca9ee4881d34995444ed1cb826d01db ]
+
+The matching algorithm has implemented thrice:
+1. data path lookup, generic version
+2. data path lookup, avx2 version
+3. control plane lookup
+
+Merge 1 and 3 by refactoring pipapo_get as a common helper, then make
+nft_pipapo_lookup and nft_pipapo_get both call the common helper.
+
+Aside from the code savings this has the benefit that we no longer allocate
+temporary scratch maps for each control plane get and insertion operation.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: c4eaca2e1052 ("netfilter: nft_set_pipapo: don't check genbit from packetpath lookups")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_pipapo.c | 188 ++++++++++-----------------------
+ 1 file changed, 58 insertions(+), 130 deletions(-)
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index a844b33fa6002..1a19649c28511 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -397,35 +397,36 @@ int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
+ }
+
+ /**
+- * nft_pipapo_lookup() - Lookup function
+- * @net: Network namespace
+- * @set: nftables API set representation
+- * @key: nftables API element representation containing key data
+- * @ext: nftables API extension pointer, filled with matching reference
++ * pipapo_get() - Get matching element reference given key data
++ * @m: storage containing the set elements
++ * @data: Key data to be matched against existing elements
++ * @genmask: If set, check that element is active in given genmask
++ * @tstamp: timestamp to check for expired elements
+ *
+ * For more details, see DOC: Theory of Operation.
+ *
+- * Return: true on match, false otherwise.
++ * This is the main lookup function. It matches key data against either
++ * the working match set or the uncommitted copy, depending on what the
++ * caller passed to us.
++ * nft_pipapo_get (lookup from userspace/control plane) and nft_pipapo_lookup
++ * (datapath lookup) pass the active copy.
++ * The insertion path will pass the uncommitted working copy.
++ *
++ * Return: pointer to &struct nft_pipapo_elem on match, NULL otherwise.
+ */
+-const struct nft_set_ext *
+-nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+- const u32 *key)
++static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
++ const u8 *data, u8 genmask,
++ u64 tstamp)
+ {
+- struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_scratch *scratch;
+ unsigned long *res_map, *fill_map;
+- u8 genmask = nft_genmask_cur(net);
+- const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
+- const u8 *rp = (const u8 *)key;
+ bool map_index;
+ int i;
+
+ local_bh_disable();
+
+- m = rcu_dereference(priv->match);
+-
+- if (unlikely(!m || !*raw_cpu_ptr(m->scratch)))
++ if (unlikely(!raw_cpu_ptr(m->scratch)))
+ goto out;
+
+ scratch = *raw_cpu_ptr(m->scratch);
+@@ -445,12 +446,12 @@ nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ * packet bytes value, then AND bucket value
+ */
+ if (likely(f->bb == 8))
+- pipapo_and_field_buckets_8bit(f, res_map, rp);
++ pipapo_and_field_buckets_8bit(f, res_map, data);
+ else
+- pipapo_and_field_buckets_4bit(f, res_map, rp);
++ pipapo_and_field_buckets_4bit(f, res_map, data);
+ NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
+
+- rp += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
++ data += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
+
+ /* Now populate the bitmap for the next field, unless this is
+ * the last field, in which case return the matched 'ext'
+@@ -470,11 +471,11 @@ nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ }
+
+ if (last) {
+- const struct nft_set_ext *ext;
++ struct nft_pipapo_elem *e;
+
+- ext = &f->mt[b].e->ext;
+- if (unlikely(nft_set_elem_expired(ext) ||
+- !nft_set_elem_active(ext, genmask)))
++ e = f->mt[b].e;
++ if (unlikely(__nft_set_elem_expired(&e->ext, tstamp) ||
++ !nft_set_elem_active(&e->ext, genmask)))
+ goto next_match;
+
+ /* Last field: we're just returning the key without
+@@ -484,8 +485,7 @@ nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ */
+ scratch->map_index = map_index;
+ local_bh_enable();
+-
+- return ext;
++ return e;
+ }
+
+ /* Swap bitmap indices: res_map is the initial bitmap for the
+@@ -495,7 +495,7 @@ nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ map_index = !map_index;
+ swap(res_map, fill_map);
+
+- rp += NFT_PIPAPO_GROUPS_PADDING(f);
++ data += NFT_PIPAPO_GROUPS_PADDING(f);
+ }
+
+ out:
+@@ -504,99 +504,29 @@ nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ }
+
+ /**
+- * pipapo_get() - Get matching element reference given key data
+- * @m: storage containing active/existing elements
+- * @data: Key data to be matched against existing elements
+- * @genmask: If set, check that element is active in given genmask
+- * @tstamp: timestamp to check for expired elements
+- * @gfp: the type of memory to allocate (see kmalloc).
++ * nft_pipapo_lookup() - Dataplane fronted for main lookup function
++ * @net: Network namespace
++ * @set: nftables API set representation
++ * @key: pointer to nft registers containing key data
+ *
+- * This is essentially the same as the lookup function, except that it matches
+- * key data against the uncommitted copy and doesn't use preallocated maps for
+- * bitmap results.
++ * This function is called from the data path. It will search for
++ * an element matching the given key in the current active copy.
+ *
+- * Return: pointer to &struct nft_pipapo_elem on match, error pointer otherwise.
++ * Return: ntables API extension pointer or NULL if no match.
+ */
+-static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
+- const u8 *data, u8 genmask,
+- u64 tstamp, gfp_t gfp)
++const struct nft_set_ext *
++nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
++ const u32 *key)
+ {
+- struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
+- unsigned long *res_map, *fill_map = NULL;
+- const struct nft_pipapo_field *f;
+- int i;
+-
+- if (m->bsize_max == 0)
+- return ret;
+-
+- res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), gfp);
+- if (!res_map) {
+- ret = ERR_PTR(-ENOMEM);
+- goto out;
+- }
+-
+- fill_map = kcalloc(m->bsize_max, sizeof(*res_map), gfp);
+- if (!fill_map) {
+- ret = ERR_PTR(-ENOMEM);
+- goto out;
+- }
+-
+- pipapo_resmap_init(m, res_map);
+-
+- nft_pipapo_for_each_field(f, i, m) {
+- bool last = i == m->field_count - 1;
+- int b;
+-
+- /* For each bit group: select lookup table bucket depending on
+- * packet bytes value, then AND bucket value
+- */
+- if (f->bb == 8)
+- pipapo_and_field_buckets_8bit(f, res_map, data);
+- else if (f->bb == 4)
+- pipapo_and_field_buckets_4bit(f, res_map, data);
+- else
+- BUG();
+-
+- data += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
+-
+- /* Now populate the bitmap for the next field, unless this is
+- * the last field, in which case return the matched 'ext'
+- * pointer if any.
+- *
+- * Now res_map contains the matching bitmap, and fill_map is the
+- * bitmap for the next field.
+- */
+-next_match:
+- b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
+- last);
+- if (b < 0)
+- goto out;
+-
+- if (last) {
+- if (__nft_set_elem_expired(&f->mt[b].e->ext, tstamp))
+- goto next_match;
+- if ((genmask &&
+- !nft_set_elem_active(&f->mt[b].e->ext, genmask)))
+- goto next_match;
+-
+- ret = f->mt[b].e;
+- goto out;
+- }
+-
+- data += NFT_PIPAPO_GROUPS_PADDING(f);
++ struct nft_pipapo *priv = nft_set_priv(set);
++ u8 genmask = nft_genmask_cur(net);
++ const struct nft_pipapo_match *m;
++ const struct nft_pipapo_elem *e;
+
+- /* Swap bitmap indices: fill_map will be the initial bitmap for
+- * the next field (i.e. the new res_map), and res_map is
+- * guaranteed to be all-zeroes at this point, ready to be filled
+- * according to the next mapping table.
+- */
+- swap(res_map, fill_map);
+- }
++ m = rcu_dereference(priv->match);
++ e = pipapo_get(m, (const u8 *)key, genmask, get_jiffies_64());
+
+-out:
+- kfree(fill_map);
+- kfree(res_map);
+- return ret;
++ return e ? &e->ext : NULL;
+ }
+
+ /**
+@@ -605,6 +535,11 @@ static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
+ * @set: nftables API set representation
+ * @elem: nftables API element representation containing key data
+ * @flags: Unused
++ *
++ * This function is called from the control plane path under
++ * RCU read lock.
++ *
++ * Return: set element private pointer or ERR_PTR(-ENOENT).
+ */
+ static struct nft_elem_priv *
+ nft_pipapo_get(const struct net *net, const struct nft_set *set,
+@@ -615,10 +550,9 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
+ struct nft_pipapo_elem *e;
+
+ e = pipapo_get(m, (const u8 *)elem->key.val.data,
+- nft_genmask_cur(net), get_jiffies_64(),
+- GFP_ATOMIC);
+- if (IS_ERR(e))
+- return ERR_CAST(e);
++ nft_genmask_cur(net), get_jiffies_64());
++ if (!e)
++ return ERR_PTR(-ENOENT);
+
+ return &e->priv;
+ }
+@@ -1343,8 +1277,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ else
+ end = start;
+
+- dup = pipapo_get(m, start, genmask, tstamp, GFP_KERNEL);
+- if (!IS_ERR(dup)) {
++ dup = pipapo_get(m, start, genmask, tstamp);
++ if (dup) {
+ /* Check if we already have the same exact entry */
+ const struct nft_data *dup_key, *dup_end;
+
+@@ -1363,15 +1297,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ return -ENOTEMPTY;
+ }
+
+- if (PTR_ERR(dup) == -ENOENT) {
+- /* Look for partially overlapping entries */
+- dup = pipapo_get(m, end, nft_genmask_next(net), tstamp,
+- GFP_KERNEL);
+- }
+-
+- if (PTR_ERR(dup) != -ENOENT) {
+- if (IS_ERR(dup))
+- return PTR_ERR(dup);
++ /* Look for partially overlapping entries */
++ dup = pipapo_get(m, end, nft_genmask_next(net), tstamp);
++ if (dup) {
+ *elem_priv = &dup->priv;
+ return -ENOTEMPTY;
+ }
+@@ -1913,8 +1841,8 @@ nft_pipapo_deactivate(const struct net *net, const struct nft_set *set,
+ return NULL;
+
+ e = pipapo_get(m, (const u8 *)elem->key.val.data,
+- nft_genmask_next(net), nft_net_tstamp(net), GFP_KERNEL);
+- if (IS_ERR(e))
++ nft_genmask_next(net), nft_net_tstamp(net));
++ if (!e)
+ return NULL;
+
+ nft_set_elem_change_active(net, set, &e->ext);
+--
+2.51.0
+
--- /dev/null
+From 033ee234b5f4526bba84a988f1d4688bf796e4c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Jul 2025 19:05:12 +0200
+Subject: netfilter: nft_set_pipapo: remove unused arguments
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 7792c1e03054440c60d4bce0c06a31c134601997 ]
+
+They are not used anymore, so remove them.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: c4eaca2e1052 ("netfilter: nft_set_pipapo: don't check genbit from packetpath lookups")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_pipapo.c | 14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 9e4e25f2458f9..9ac48e6b4332c 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -502,8 +502,6 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+
+ /**
+ * pipapo_get() - Get matching element reference given key data
+- * @net: Network namespace
+- * @set: nftables API set representation
+ * @m: storage containing active/existing elements
+ * @data: Key data to be matched against existing elements
+ * @genmask: If set, check that element is active in given genmask
+@@ -516,9 +514,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ *
+ * Return: pointer to &struct nft_pipapo_elem on match, error pointer otherwise.
+ */
+-static struct nft_pipapo_elem *pipapo_get(const struct net *net,
+- const struct nft_set *set,
+- const struct nft_pipapo_match *m,
++static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
+ const u8 *data, u8 genmask,
+ u64 tstamp, gfp_t gfp)
+ {
+@@ -615,7 +611,7 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
+ struct nft_pipapo_match *m = rcu_dereference(priv->match);
+ struct nft_pipapo_elem *e;
+
+- e = pipapo_get(net, set, m, (const u8 *)elem->key.val.data,
++ e = pipapo_get(m, (const u8 *)elem->key.val.data,
+ nft_genmask_cur(net), get_jiffies_64(),
+ GFP_ATOMIC);
+ if (IS_ERR(e))
+@@ -1344,7 +1340,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ else
+ end = start;
+
+- dup = pipapo_get(net, set, m, start, genmask, tstamp, GFP_KERNEL);
++ dup = pipapo_get(m, start, genmask, tstamp, GFP_KERNEL);
+ if (!IS_ERR(dup)) {
+ /* Check if we already have the same exact entry */
+ const struct nft_data *dup_key, *dup_end;
+@@ -1366,7 +1362,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+
+ if (PTR_ERR(dup) == -ENOENT) {
+ /* Look for partially overlapping entries */
+- dup = pipapo_get(net, set, m, end, nft_genmask_next(net), tstamp,
++ dup = pipapo_get(m, end, nft_genmask_next(net), tstamp,
+ GFP_KERNEL);
+ }
+
+@@ -1913,7 +1909,7 @@ nft_pipapo_deactivate(const struct net *net, const struct nft_set *set,
+ if (!m)
+ return NULL;
+
+- e = pipapo_get(net, set, m, (const u8 *)elem->key.val.data,
++ e = pipapo_get(m, (const u8 *)elem->key.val.data,
+ nft_genmask_next(net), nft_net_tstamp(net), GFP_KERNEL);
+ if (IS_ERR(e))
+ return NULL;
+--
+2.51.0
+
--- /dev/null
+From 6b84cf2571e5a63327b445070c3de7219ed490d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 10:02:19 +0200
+Subject: netfilter: nft_set_rbtree: continue traversal if element is inactive
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit a60f7bf4a1524d8896b76ba89623080aebf44272 ]
+
+When the rbtree lookup function finds a match in the rbtree, it sets the
+range start interval to a potentially inactive element.
+
+Then, after tree lookup, if the matching element is inactive, it returns
+NULL and suppresses a matching result.
+
+This is wrong and leads to false negative matches when a transaction has
+already entered the commit phase.
+
+cpu0 cpu1
+ has added new elements to clone
+ has marked elements as being
+ inactive in new generation
+ perform lookup in the set
+ enters commit phase:
+I) increments the genbit
+ A) observes new genbit
+ B) finds matching range
+ C) returns no match: found
+ range invalid in new generation
+II) removes old elements from the tree
+ C New nft_lookup happening now
+ will find matching element,
+ because it is no longer
+ obscured by old, inactive one.
+
+Consider a packet matching range r1-r2:
+
+cpu0 processes following transaction:
+1. remove r1-r2
+2. add r1-r3
+
+P is contained in both ranges. Therefore, cpu1 should always find a match
+for P. Due to above race, this is not the case:
+
+cpu1 does find r1-r2, but then ignores it due to the genbit indicating
+the range has been removed. It does NOT test for further matches.
+
+The situation persists for all lookups until after cpu0 hits II) after
+which r1-r3 range start node is tested for the first time.
+
+Move the "interval start is valid" check ahead so that tree traversal
+continues if the starting interval is not valid in this generation.
+
+Thanks to Stefan Hanreich for providing an initial reproducer for this
+bug.
+
+Reported-by: Stefan Hanreich <s.hanreich@proxmox.com>
+Fixes: c1eda3c6394f ("netfilter: nft_rbtree: ignore inactive matching element with no descendants")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_rbtree.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 938a257c069e2..b1f04168ec937 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -77,7 +77,9 @@ __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ nft_rbtree_interval_end(rbe) &&
+ nft_rbtree_interval_start(interval))
+ continue;
+- interval = rbe;
++ if (nft_set_elem_active(&rbe->ext, genmask) &&
++ !nft_rbtree_elem_expired(rbe))
++ interval = rbe;
+ } else if (d > 0)
+ parent = rcu_dereference_raw(parent->rb_right);
+ else {
+@@ -102,8 +104,6 @@ __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ }
+
+ if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
+- nft_set_elem_active(&interval->ext, genmask) &&
+- !nft_rbtree_elem_expired(interval) &&
+ nft_rbtree_interval_start(interval))
+ return &interval->ext;
+
+--
+2.51.0
+
--- /dev/null
+From 5506cd5a1f6b5f887d954ba08f6f83a39facdcfc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 7 Sep 2025 12:21:46 +0200
+Subject: PCI: mvebu: Fix use of for_each_of_range() iterator
+
+From: Klaus Kudielka <klaus.kudielka@gmail.com>
+
+[ Upstream commit b816265396daf1beb915e0ffbfd7f3906c2bf4a4 ]
+
+5da3d94a23c6 ("PCI: mvebu: Use for_each_of_range() iterator for parsing
+"ranges"") simplified code by using the for_each_of_range() iterator, but
+it broke PCI enumeration on Turris Omnia (and probably other mvebu
+targets).
+
+Issue #1:
+
+To determine range.flags, of_pci_range_parser_one() uses bus->get_flags(),
+which resolves to of_bus_pci_get_flags(), which already returns an
+IORESOURCE bit field, and NOT the original flags from the "ranges"
+resource.
+
+Then mvebu_get_tgt_attr() attempts the very same conversion again. Remove
+the misinterpretation of range.flags in mvebu_get_tgt_attr(), to restore
+the intended behavior.
+
+Issue #2:
+
+The driver needs target and attributes, which are encoded in the raw
+address values of the "/soc/pcie/ranges" resource. According to
+of_pci_range_parser_one(), the raw values are stored in range.bus_addr and
+range.parent_bus_addr, respectively. range.cpu_addr is a translated version
+of range.parent_bus_addr, and not relevant here.
+
+Use the correct range structure member, to extract target and attributes.
+This restores the intended behavior.
+
+Fixes: 5da3d94a23c6 ("PCI: mvebu: Use for_each_of_range() iterator for parsing "ranges"")
+Reported-by: Jan Palus <jpalus@fastmail.com>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220479
+Signed-off-by: Klaus Kudielka <klaus.kudielka@gmail.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Tested-by: Tony Dinh <mibodhi@gmail.com>
+Tested-by: Jan Palus <jpalus@fastmail.com>
+Link: https://patch.msgid.link/20250907102303.29735-1-klaus.kudielka@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/pci-mvebu.c | 21 ++++-----------------
+ 1 file changed, 4 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
+index a4a2bac4f4b27..2f8d0223c1a6d 100644
+--- a/drivers/pci/controller/pci-mvebu.c
++++ b/drivers/pci/controller/pci-mvebu.c
+@@ -1168,12 +1168,6 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
+ return devm_ioremap_resource(&pdev->dev, &port->regs);
+ }
+
+-#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
+-#define DT_TYPE_IO 0x1
+-#define DT_TYPE_MEM32 0x2
+-#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
+-#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
+-
+ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+ unsigned long type,
+ unsigned int *tgt,
+@@ -1189,19 +1183,12 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+ return -EINVAL;
+
+ for_each_of_range(&parser, &range) {
+- unsigned long rtype;
+ u32 slot = upper_32_bits(range.bus_addr);
+
+- if (DT_FLAGS_TO_TYPE(range.flags) == DT_TYPE_IO)
+- rtype = IORESOURCE_IO;
+- else if (DT_FLAGS_TO_TYPE(range.flags) == DT_TYPE_MEM32)
+- rtype = IORESOURCE_MEM;
+- else
+- continue;
+-
+- if (slot == PCI_SLOT(devfn) && type == rtype) {
+- *tgt = DT_CPUADDR_TO_TARGET(range.cpu_addr);
+- *attr = DT_CPUADDR_TO_ATTR(range.cpu_addr);
++ if (slot == PCI_SLOT(devfn) &&
++ type == (range.flags & IORESOURCE_TYPE_BITS)) {
++ *tgt = (range.parent_bus_addr >> 56) & 0xFF;
++ *attr = (range.parent_bus_addr >> 48) & 0xFF;
+ return 0;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From ed4d4255fe601d0053c064d97368338028ae41c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Aug 2025 17:39:56 +0800
+Subject: phy: qualcomm: phy-qcom-eusb2-repeater: fix override properties
+
+From: Pengyu Luo <mitltlatltl@gmail.com>
+
+[ Upstream commit 942e47ab228c7dd27c2ae043c17e7aab2028082c ]
+
+property "qcom,tune-usb2-preem" is for EUSB2_TUNE_USB2_PREEM
+property "qcom,tune-usb2-amplitude" is for EUSB2_TUNE_IUSB2
+
+The downstream correspondence is as follows:
+EUSB2_TUNE_USB2_PREEM: Tx pre-emphasis tuning
+EUSB2_TUNE_IUSB2: HS trasmit amplitude
+EUSB2_TUNE_SQUELCH_U: Squelch detection threshold
+EUSB2_TUNE_HSDISC: HS disconnect threshold
+EUSB2_TUNE_EUSB_SLEW: slew rate
+
+Fixes: 31bc94de7602 ("phy: qualcomm: phy-qcom-eusb2-repeater: Don't zero-out registers")
+Signed-off-by: Pengyu Luo <mitltlatltl@gmail.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Reviewed-by: Luca Weiss <luca.weiss@fairphone.com>
+Link: https://lore.kernel.org/r/20250812093957.32235-1-mitltlatltl@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+index d7493c2294ef2..3709fba42ebd8 100644
+--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
++++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+@@ -127,13 +127,13 @@ static int eusb2_repeater_init(struct phy *phy)
+ rptr->cfg->init_tbl[i].value);
+
+ /* Override registers from devicetree values */
+- if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &val))
++ if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &val))
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, val);
+
+ if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &val))
+ regmap_write(regmap, base + EUSB2_TUNE_HSDISC, val);
+
+- if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &val))
++ if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &val))
+ regmap_write(regmap, base + EUSB2_TUNE_IUSB2, val);
+
+ /* Wait for status OK */
+--
+2.51.0
+
--- /dev/null
+From 66484530253d2736f4922cf3988107c2acfedf41 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Sep 2025 11:09:13 +0200
+Subject: regulator: sy7636a: fix lifecycle of power good gpio
+
+From: Andreas Kemnade <akemnade@kernel.org>
+
+[ Upstream commit c05d0b32eebadc8be6e53196e99c64cf2bed1d99 ]
+
+Attach the power good gpio to the regulator device devres instead of the
+parent device to fix problems if probe is run multiple times
+(rmmod/insmod or some deferral).
+
+Fixes: 8c485bedfb785 ("regulator: sy7636a: Initial commit")
+Signed-off-by: Andreas Kemnade <akemnade@kernel.org>
+Reviewed-by: Alistair Francis <alistair@alistair23.me>
+Reviewed-by: Peng Fan <peng.fan@nxp.com>
+Message-ID: <20250906-sy7636-rsrc-v1-2-e2886a9763a7@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/sy7636a-regulator.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/regulator/sy7636a-regulator.c b/drivers/regulator/sy7636a-regulator.c
+index d1e7ba1fb3e1a..27e3d939b7bb9 100644
+--- a/drivers/regulator/sy7636a-regulator.c
++++ b/drivers/regulator/sy7636a-regulator.c
+@@ -83,9 +83,11 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
+ if (!regmap)
+ return -EPROBE_DEFER;
+
+- gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN);
++ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
++
++ gdp = devm_gpiod_get(&pdev->dev, "epd-pwr-good", GPIOD_IN);
+ if (IS_ERR(gdp)) {
+- dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
++ dev_err(&pdev->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
+ return PTR_ERR(gdp);
+ }
+
+@@ -105,7 +107,6 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
+ }
+
+ config.dev = &pdev->dev;
+- config.dev->of_node = pdev->dev.parent->of_node;
+ config.regmap = regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, &desc, &config);
+--
+2.51.0
+
--- /dev/null
+From d6082f94373a76019bb7170786e0734517853941 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 16:56:06 +0200
+Subject: selftests: can: enable CONFIG_CAN_VCAN as a module
+
+From: Davide Caratti <dcaratti@redhat.com>
+
+[ Upstream commit d013ebc3499fd87cb9dee1dafd0c58aeb05c27c1 ]
+
+A proper kernel configuration for running kselftest can be obtained with:
+
+ $ yes | make kselftest-merge
+
+Build of 'vcan' driver is currently missing, while the other required knobs
+are already there because of net/link_netns.py [1]. Add a config file in
+selftests/net/can to store the minimum set of kconfig needed for CAN
+selftests.
+
+[1] https://patch.msgid.link/20250219125039.18024-14-shaw.leon@gmail.com
+
+Fixes: 77442ffa83e8 ("selftests: can: Import tst-filter from can-tests")
+Reviewed-by: Vincent Mailhol <mailhol@kernel.org>
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Link: https://patch.msgid.link/fa4c0ea262ec529f25e5f5aa9269d84764c67321.1757516009.git.dcaratti@redhat.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/can/config | 3 +++
+ 1 file changed, 3 insertions(+)
+ create mode 100644 tools/testing/selftests/net/can/config
+
+diff --git a/tools/testing/selftests/net/can/config b/tools/testing/selftests/net/can/config
+new file mode 100644
+index 0000000000000..188f797966709
+--- /dev/null
++++ b/tools/testing/selftests/net/can/config
+@@ -0,0 +1,3 @@
++CONFIG_CAN=m
++CONFIG_CAN_DEV=m
++CONFIG_CAN_VCAN=m
+--
+2.51.0
+
input-i8042-add-tuxedo-infinitybook-pro-gen10-amd-to-i8042-quirk-table.patch
input-xpad-add-support-for-flydigi-apex-5.patch
revert-net-usb-asix-ax88772-drop-phylink-use-in-pm-to-avoid-mdio-runtime-pm-wakeups.patch
+mtd-rawnand-nuvoton-fix-an-error-handling-path-in-ma.patch
+drm-panthor-validate-group-queue-count.patch
+net-fec-fix-possible-npd-in-fec_enet_phy_reset_after.patch
+net-phylink-add-lock-for-serializing-concurrent-pl-p.patch
+net-phy-transfer-phy_config_inband-locking-responsib.patch
+wifi-ath12k-fix-missing-station-power-save-configura.patch
+wifi-ath12k-add-link-support-for-multi-link-in-arsta.patch
+wifi-ath12k-add-support-to-enqueue-management-frame-.patch
+wifi-ath12k-fix-wmi-tlv-header-misalignment.patch
+pci-mvebu-fix-use-of-for_each_of_range-iterator.patch
+genetlink-fix-genl_bind-invoking-bind-after-eperm.patch
+net-dsa-b53-fix-ageing-time-for-bcm53101.patch
+net-bridge-bounce-invalid-boolopts.patch
+tunnels-reset-the-gso-metadata-before-reusing-the-sk.patch
+docs-networking-can-change-bcm_msg_head-frames-membe.patch
+igb-fix-null-pointer-dereference-in-ethtool-loopback.patch
+igb-fix-link-test-skipping-when-interface-is-admin-d.patch
+i40e-fix-irq-freeing-in-i40e_vsi_request_irq_msix-er.patch
+drm-xe-configfs-don-t-touch-survivability_mode-on-fi.patch
+drm-amd-display-use-udelay-rather-than-fsleep.patch
+net-dev_ioctl-take-ops-lock-in-hwtstamp-lower-paths.patch
+macsec-sync-features-on-rtm_newlink.patch
+selftests-can-enable-config_can_vcan-as-a-module.patch
+can-j1939-implement-netdev_unregister-notification-h.patch
+can-j1939-j1939_sk_bind-call-j1939_priv_put-immediat.patch
+can-j1939-j1939_local_ecu_get-undo-increment-when-j1.patch
+can-xilinx_can-xcan_write_frame-fix-use-after-free-o.patch
+netfilter-nft_set_bitmap-fix-lockdep-splat-due-to-mi.patch
+netfilter-nft_set_pipapo-remove-unused-arguments.patch
+netfilter-nft_set-remove-one-argument-from-lookup-an.patch
+netfilter-nft_set_pipapo-merge-pipapo_get-lookup.patch
+netfilter-nft_set_pipapo-don-t-return-bogus-extensio.patch
+netfilter-nft_set_pipapo-don-t-check-genbit-from-pac.patch
+netfilter-nft_set_rbtree-continue-traversal-if-eleme.patch
+netfilter-nf_tables-reintroduce-shortened-deletion-n.patch
+netfilter-nf_tables-place-base_seq-in-struct-net.patch
+netfilter-nf_tables-make-nft_set_do_lookup-available.patch
+netfilter-nf_tables-restart-set-lookup-on-base_seq-c.patch
+hsr-use-rtnl-lock-when-iterating-over-ports.patch
+hsr-use-hsr_for_each_port_rtnl-in-hsr_port_get_hsr.patch
+hsr-hold-rcu-and-dev-lock-for-hsr_get_port_ndev.patch
+phy-qualcomm-phy-qcom-eusb2-repeater-fix-override-pr.patch
+dmaengine-idxd-remove-improper-idxd_free.patch
+dmaengine-idxd-fix-refcount-underflow-on-module-unlo.patch
+dmaengine-idxd-fix-double-free-in-idxd_setup_wqs.patch
+erofs-get-rid-of-get-put-_page-for-ztailpacking-data.patch
+erofs-remove-need_kmap-in-erofs_read_metabuf.patch
+erofs-unify-meta-buffers-in-z_erofs_fill_inode.patch
+erofs-fix-invalid-algorithm-for-encoded-extents.patch
+dmaengine-ti-edma-fix-memory-allocation-size-for-que.patch
+regulator-sy7636a-fix-lifecycle-of-power-good-gpio.patch
+erofs-fix-runtime-warning-on-truncate_folio_batch_ex.patch
--- /dev/null
+From 115ff14bb2490c699b3f5c7928d085b5bf37743e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 14:53:50 +0200
+Subject: tunnels: reset the GSO metadata before reusing the skb
+
+From: Antoine Tenart <atenart@kernel.org>
+
+[ Upstream commit e3c674db356c4303804b2415e7c2b11776cdd8c3 ]
+
+If a GSO skb is sent through a Geneve tunnel and if Geneve options are
+added, the split GSO skb might not fit in the MTU anymore and an ICMP
+frag needed packet can be generated. In such case the ICMP packet might
+go through the segmentation logic (and dropped) later if it reaches a
+path were the GSO status is checked and segmentation is required.
+
+This is especially true when an OvS bridge is used with a Geneve tunnel
+attached to it. The following set of actions could lead to the ICMP
+packet being wrongfully segmented:
+
+1. An skb is constructed by the TCP layer (e.g. gso_type SKB_GSO_TCPV4,
+ segs >= 2).
+
+2. The skb hits the OvS bridge where Geneve options are added by an OvS
+ action before being sent through the tunnel.
+
+3. When the skb is xmited in the tunnel, the split skb does not fit
+ anymore in the MTU and iptunnel_pmtud_build_icmp is called to
+ generate an ICMP fragmentation needed packet. This is done by reusing
+ the original (GSO!) skb. The GSO metadata is not cleared.
+
+4. The ICMP packet being sent back hits the OvS bridge again and because
+ skb_is_gso returns true, it goes through queue_gso_packets...
+
+5. ...where __skb_gso_segment is called. The skb is then dropped.
+
+6. Note that in the above example on re-transmission the skb won't be a
+ GSO one as it would be segmented (len > MSS) and the ICMP packet
+ should go through.
+
+Fix this by resetting the GSO information before reusing an skb in
+iptunnel_pmtud_build_icmp and iptunnel_pmtud_build_icmpv6.
+
+Fixes: 4cb47a8644cc ("tunnels: PMTU discovery support for directly bridged IP packets")
+Reported-by: Adrian Moreno <amorenoz@redhat.com>
+Signed-off-by: Antoine Tenart <atenart@kernel.org>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Link: https://patch.msgid.link/20250904125351.159740-1-atenart@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/ip_tunnel_core.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index f65d2f7273813..8392d304a72eb 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -204,6 +204,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
+ return -EINVAL;
+
++ if (skb_is_gso(skb))
++ skb_gso_reset(skb);
++
+ skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ pskb_pull(skb, ETH_HLEN);
+ skb_reset_network_header(skb);
+@@ -298,6 +301,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
+ return -EINVAL;
+
++ if (skb_is_gso(skb))
++ skb_gso_reset(skb);
++
+ skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ pskb_pull(skb, ETH_HLEN);
+ skb_reset_network_header(skb);
+--
+2.51.0
+
--- /dev/null
+From d3bc0e922d826b9048667fd43b5f10dfd648ee3c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Jul 2025 16:29:24 +0530
+Subject: wifi: ath12k: add link support for multi-link in arsta
+
+From: Sarika Sharma <quic_sarishar@quicinc.com>
+
+[ Upstream commit 3b8aa249d0fce93590888a6ed3d22b458091ecb9 ]
+
+Currently, statistics in arsta are updated at deflink for both non-ML
+and multi-link(ML) station. Link statistics are not updated for
+multi-link operation(MLO).
+
+Hence, add support to correctly obtain the link ID if the peer is ML,
+fetch the arsta from the appropriate link ID, and update the
+statistics in the corresponding arsta.
+
+Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.4.1-00199-QCAHKSWPL_SILICONZ-1
+
+Signed-off-by: Sarika Sharma <quic_sarishar@quicinc.com>
+Reviewed-by: Vasanthakumar Thiagarajan <vasanthakumar.thiagarajan@oss.qualcomm.com>
+Link: https://patch.msgid.link/20250701105927.803342-3-quic_sarishar@quicinc.com
+Signed-off-by: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
+Stable-dep-of: 82e2be57d544 ("wifi: ath12k: fix WMI TLV header misalignment")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath12k/dp_mon.c | 22 ++++++++++++++------
+ drivers/net/wireless/ath/ath12k/dp_rx.c | 11 +++++-----
+ drivers/net/wireless/ath/ath12k/peer.h | 26 ++++++++++++++++++++++++
+ 3 files changed, 48 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
+index 91f4e3aff74c3..6a0915a0c7aae 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
++++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
+@@ -3610,7 +3610,6 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ u32 uid)
+ {
+- struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
+ struct ath12k_rx_peer_stats *rx_stats = NULL;
+ struct hal_rx_user_status *user_stats = &ppdu_info->userstats[uid];
+@@ -3628,8 +3627,13 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
+ return;
+ }
+
+- ahsta = ath12k_sta_to_ahsta(peer->sta);
+- arsta = &ahsta->deflink;
++ arsta = ath12k_peer_get_link_sta(ar->ab, peer);
++ if (!arsta) {
++ ath12k_warn(ar->ab, "link sta not found on peer %pM id %d\n",
++ peer->addr, peer->peer_id);
++ return;
++ }
++
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
+ rx_stats = arsta->rx_stats;
+@@ -3742,7 +3746,6 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget,
+ struct dp_srng *mon_dst_ring;
+ struct hal_srng *srng;
+ struct dp_rxdma_mon_ring *buf_ring;
+- struct ath12k_sta *ahsta = NULL;
+ struct ath12k_link_sta *arsta;
+ struct ath12k_peer *peer;
+ struct sk_buff_head skb_list;
+@@ -3868,8 +3871,15 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget,
+ }
+
+ if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
+- ahsta = ath12k_sta_to_ahsta(peer->sta);
+- arsta = &ahsta->deflink;
++ arsta = ath12k_peer_get_link_sta(ar->ab, peer);
++ if (!arsta) {
++ ath12k_warn(ar->ab, "link sta not found on peer %pM id %d\n",
++ peer->addr, peer->peer_id);
++ spin_unlock_bh(&ab->base_lock);
++ rcu_read_unlock();
++ dev_kfree_skb_any(skb);
++ continue;
++ }
+ ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
+ ppdu_info);
+ } else if ((ppdu_info->fc_valid) &&
+diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
+index bd95dc88f9b21..e9137ffeb5ab4 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
+@@ -1418,8 +1418,6 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar,
+ {
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+- struct ieee80211_sta *sta;
+- struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
+ struct htt_ppdu_stats_user_rate *user_rate;
+ struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
+@@ -1500,9 +1498,12 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar,
+ return;
+ }
+
+- sta = peer->sta;
+- ahsta = ath12k_sta_to_ahsta(sta);
+- arsta = &ahsta->deflink;
++ arsta = ath12k_peer_get_link_sta(ab, peer);
++ if (!arsta) {
++ spin_unlock_bh(&ab->base_lock);
++ rcu_read_unlock();
++ return;
++ }
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+
+diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
+index f3a5e054d2b55..92c4988df2f16 100644
+--- a/drivers/net/wireless/ath/ath12k/peer.h
++++ b/drivers/net/wireless/ath/ath12k/peer.h
+@@ -91,5 +91,31 @@ struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash
+ int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+ int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+ int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta);
++static inline
++struct ath12k_link_sta *ath12k_peer_get_link_sta(struct ath12k_base *ab,
++ struct ath12k_peer *peer)
++{
++ struct ath12k_sta *ahsta;
++ struct ath12k_link_sta *arsta;
++
++ if (!peer->sta)
++ return NULL;
++
++ ahsta = ath12k_sta_to_ahsta(peer->sta);
++ if (peer->ml_id & ATH12K_PEER_ML_ID_VALID) {
++ if (!(ahsta->links_map & BIT(peer->link_id))) {
++ ath12k_warn(ab, "peer %pM id %d link_id %d can't found in STA link_map 0x%x\n",
++ peer->addr, peer->peer_id, peer->link_id,
++ ahsta->links_map);
++ return NULL;
++ }
++ arsta = rcu_dereference(ahsta->link[peer->link_id]);
++ if (!arsta)
++ return NULL;
++ } else {
++ arsta = &ahsta->deflink;
++ }
++ return arsta;
++}
+
+ #endif /* _PEER_H_ */
+--
+2.51.0
+
--- /dev/null
+From bed9cd96b93ca9b267f1e18d207f6cac0b05009a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Jul 2025 14:47:04 +0530
+Subject: wifi: ath12k: Add support to enqueue management frame at MLD level
+
+From: Sriram R <quic_srirrama@quicinc.com>
+
+[ Upstream commit 9d2abd4162fca8a1eb46f664268dffad35c8ad20 ]
+
+A multi-link client can use any link for transmissions. It can decide to
+put one link in power save mode for longer periods while listening on the
+other links as per MLD listen interval. Unicast management frames sent to
+that link station might get dropped if that link station is in power save
+mode or inactive. In such cases, firmware can take decision on which link
+to use.
+
+Allow the firmware to decide on which link management frame should be
+sent on, by filling the hardware link with maximum value of u32, so that
+the firmware will not have a specific link to transmit data on and so
+the management frames will be link agnostic. For QCN devices, all action
+frames are marked as link agnostic. For WCN devices, if the device is
+configured as an AP, then all frames other than probe response frames,
+authentication frames, association response frames, re-association response
+frames and ADDBA response frames are marked as link agnostic and if the
+device is configured as a station, then all frames other than probe request
+frames, authentication frames, de-authentication frames and ADDBA response
+frames are marked as link agnostic.
+
+Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.4.1-00199-QCAHKSWPL_SILICONZ-1
+Tested-on: WCN7850 hw2.0 PCI WLAN.HMT.1.0.c5-00481-QCAHMTSWPL_V1.0_V2.0_SILICONZ-3
+
+Signed-off-by: Sriram R <quic_srirrama@quicinc.com>
+Co-developed-by: Roopni Devanathan <quic_rdevanat@quicinc.com>
+Signed-off-by: Roopni Devanathan <quic_rdevanat@quicinc.com>
+Reviewed-by: Vasanthakumar Thiagarajan <vasanthakumar.thiagarajan@oss.qualcomm.com>
+Link: https://patch.msgid.link/20250711091704.3704379-1-quic_rdevanat@quicinc.com
+Signed-off-by: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
+Stable-dep-of: 82e2be57d544 ("wifi: ath12k: fix WMI TLV header misalignment")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath12k/core.h | 1 +
+ drivers/net/wireless/ath/ath12k/hw.c | 55 +++++++++++++++++++++++++
+ drivers/net/wireless/ath/ath12k/hw.h | 2 +
+ drivers/net/wireless/ath/ath12k/mac.c | 5 ++-
+ drivers/net/wireless/ath/ath12k/peer.c | 2 +-
+ drivers/net/wireless/ath/ath12k/peer.h | 2 +
+ drivers/net/wireless/ath/ath12k/wmi.c | 56 ++++++++++++++++++++++++--
+ drivers/net/wireless/ath/ath12k/wmi.h | 16 +++++++-
+ 8 files changed, 131 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
+index 4bd286296da79..cebdf62ce3db9 100644
+--- a/drivers/net/wireless/ath/ath12k/core.h
++++ b/drivers/net/wireless/ath/ath12k/core.h
+@@ -116,6 +116,7 @@ static inline u64 ath12k_le32hilo_to_u64(__le32 hi, __le32 lo)
+ enum ath12k_skb_flags {
+ ATH12K_SKB_HW_80211_ENCAP = BIT(0),
+ ATH12K_SKB_CIPHER_SET = BIT(1),
++ ATH12K_SKB_MLO_STA = BIT(2),
+ };
+
+ struct ath12k_skb_cb {
+diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
+index ec77ad498b33a..6791ae1d64e50 100644
+--- a/drivers/net/wireless/ath/ath12k/hw.c
++++ b/drivers/net/wireless/ath/ath12k/hw.c
+@@ -14,6 +14,7 @@
+ #include "hw.h"
+ #include "mhi.h"
+ #include "dp_rx.h"
++#include "peer.h"
+
+ static const guid_t wcn7850_uuid = GUID_INIT(0xf634f534, 0x6147, 0x11ec,
+ 0x90, 0xd6, 0x02, 0x42,
+@@ -49,6 +50,12 @@ static bool ath12k_dp_srng_is_comp_ring_qcn9274(int ring_num)
+ return false;
+ }
+
++static bool ath12k_is_frame_link_agnostic_qcn9274(struct ath12k_link_vif *arvif,
++ struct ieee80211_mgmt *mgmt)
++{
++ return ieee80211_is_action(mgmt->frame_control);
++}
++
+ static int ath12k_hw_mac_id_to_pdev_id_wcn7850(const struct ath12k_hw_params *hw,
+ int mac_id)
+ {
+@@ -74,6 +81,52 @@ static bool ath12k_dp_srng_is_comp_ring_wcn7850(int ring_num)
+ return false;
+ }
+
++static bool ath12k_is_addba_resp_action_code(struct ieee80211_mgmt *mgmt)
++{
++ if (!ieee80211_is_action(mgmt->frame_control))
++ return false;
++
++ if (mgmt->u.action.category != WLAN_CATEGORY_BACK)
++ return false;
++
++ if (mgmt->u.action.u.addba_resp.action_code != WLAN_ACTION_ADDBA_RESP)
++ return false;
++
++ return true;
++}
++
++static bool ath12k_is_frame_link_agnostic_wcn7850(struct ath12k_link_vif *arvif,
++ struct ieee80211_mgmt *mgmt)
++{
++ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
++ struct ath12k_hw *ah = ath12k_ar_to_ah(arvif->ar);
++ struct ath12k_base *ab = arvif->ar->ab;
++ __le16 fc = mgmt->frame_control;
++
++ spin_lock_bh(&ab->base_lock);
++ if (!ath12k_peer_find_by_addr(ab, mgmt->da) &&
++ !ath12k_peer_ml_find(ah, mgmt->da)) {
++ spin_unlock_bh(&ab->base_lock);
++ return false;
++ }
++ spin_unlock_bh(&ab->base_lock);
++
++ if (vif->type == NL80211_IFTYPE_STATION)
++ return arvif->is_up &&
++ (vif->valid_links == vif->active_links) &&
++ !ieee80211_is_probe_req(fc) &&
++ !ieee80211_is_auth(fc) &&
++ !ieee80211_is_deauth(fc) &&
++ !ath12k_is_addba_resp_action_code(mgmt);
++
++ if (vif->type == NL80211_IFTYPE_AP)
++ return !(ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
++ ieee80211_is_assoc_resp(fc) || ieee80211_is_reassoc_resp(fc) ||
++ ath12k_is_addba_resp_action_code(mgmt));
++
++ return false;
++}
++
+ static const struct ath12k_hw_ops qcn9274_ops = {
+ .get_hw_mac_from_pdev_id = ath12k_hw_qcn9274_mac_from_pdev_id,
+ .mac_id_to_pdev_id = ath12k_hw_mac_id_to_pdev_id_qcn9274,
+@@ -81,6 +134,7 @@ static const struct ath12k_hw_ops qcn9274_ops = {
+ .rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_qcn9274,
+ .get_ring_selector = ath12k_hw_get_ring_selector_qcn9274,
+ .dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_qcn9274,
++ .is_frame_link_agnostic = ath12k_is_frame_link_agnostic_qcn9274,
+ };
+
+ static const struct ath12k_hw_ops wcn7850_ops = {
+@@ -90,6 +144,7 @@ static const struct ath12k_hw_ops wcn7850_ops = {
+ .rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_wcn7850,
+ .get_ring_selector = ath12k_hw_get_ring_selector_wcn7850,
+ .dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_wcn7850,
++ .is_frame_link_agnostic = ath12k_is_frame_link_agnostic_wcn7850,
+ };
+
+ #define ATH12K_TX_RING_MASK_0 0x1
+diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
+index 0a75bc5abfa24..9c69dd5a22afa 100644
+--- a/drivers/net/wireless/ath/ath12k/hw.h
++++ b/drivers/net/wireless/ath/ath12k/hw.h
+@@ -246,6 +246,8 @@ struct ath12k_hw_ops {
+ int (*rxdma_ring_sel_config)(struct ath12k_base *ab);
+ u8 (*get_ring_selector)(struct sk_buff *skb);
+ bool (*dp_srng_is_tx_comp_ring)(int ring_num);
++ bool (*is_frame_link_agnostic)(struct ath12k_link_vif *arvif,
++ struct ieee80211_mgmt *mgmt);
+ };
+
+ static inline
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index 87944e4467050..708dc3dd4347a 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -7685,7 +7685,7 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
+
+ skb_cb->paddr = paddr;
+
+- ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
++ ret = ath12k_wmi_mgmt_send(arvif, buf_id, skb);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
+ goto err_unmap_buf;
+@@ -7997,6 +7997,9 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
+
+ skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
+ } else if (ieee80211_is_mgmt(hdr->frame_control)) {
++ if (sta && sta->mlo)
++ skb_cb->flags |= ATH12K_SKB_MLO_STA;
++
+ ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to queue management frame %d\n",
+diff --git a/drivers/net/wireless/ath/ath12k/peer.c b/drivers/net/wireless/ath/ath12k/peer.c
+index ec7236bbccc0f..eb7aeff014903 100644
+--- a/drivers/net/wireless/ath/ath12k/peer.c
++++ b/drivers/net/wireless/ath/ath12k/peer.c
+@@ -8,7 +8,7 @@
+ #include "peer.h"
+ #include "debug.h"
+
+-static struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
++struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
+ {
+ struct ath12k_ml_peer *ml_peer;
+
+diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
+index 92c4988df2f16..44afc0b7dd53e 100644
+--- a/drivers/net/wireless/ath/ath12k/peer.h
++++ b/drivers/net/wireless/ath/ath12k/peer.h
+@@ -91,6 +91,8 @@ struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash
+ int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+ int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+ int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta);
++struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah,
++ const u8 *addr);
+ static inline
+ struct ath12k_link_sta *ath12k_peer_get_link_sta(struct ath12k_base *ab,
+ struct ath12k_peer *peer)
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index eac5d48cade66..d333f40408feb 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -782,20 +782,46 @@ struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
+ return skb;
+ }
+
+-int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
++int ath12k_wmi_mgmt_send(struct ath12k_link_vif *arvif, u32 buf_id,
+ struct sk_buff *frame)
+ {
++ struct ath12k *ar = arvif->ar;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_mgmt_send_cmd *cmd;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
+- struct wmi_tlv *frame_tlv;
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)frame->data;
++ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
++ int cmd_len = sizeof(struct ath12k_wmi_mgmt_send_tx_params);
++ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
++ struct ath12k_wmi_mlo_mgmt_send_params *ml_params;
++ struct ath12k_base *ab = ar->ab;
++ struct wmi_tlv *frame_tlv, *tlv;
++ struct ath12k_skb_cb *skb_cb;
++ u32 buf_len, buf_len_aligned;
++ u32 vdev_id = arvif->vdev_id;
++ bool link_agnostic = false;
+ struct sk_buff *skb;
+- u32 buf_len;
+ int ret, len;
++ void *ptr;
+
+ buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
+
+- len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
++ buf_len_aligned = roundup(buf_len, sizeof(u32));
++
++ len = sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned;
++
++ if (ieee80211_vif_is_mld(vif)) {
++ skb_cb = ATH12K_SKB_CB(frame);
++ if ((skb_cb->flags & ATH12K_SKB_MLO_STA) &&
++ ab->hw_params->hw_ops->is_frame_link_agnostic &&
++ ab->hw_params->hw_ops->is_frame_link_agnostic(arvif, mgmt)) {
++ len += cmd_len + TLV_HDR_SIZE + sizeof(*ml_params);
++ ath12k_generic_dbg(ATH12K_DBG_MGMT,
++ "Sending Mgmt Frame fc 0x%0x as link agnostic",
++ mgmt->frame_control);
++ link_agnostic = true;
++ }
++ }
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+@@ -818,6 +844,28 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
+
+ memcpy(frame_tlv->value, frame->data, buf_len);
+
++ if (!link_agnostic)
++ goto send;
++
++ ptr = skb->data + sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned;
++
++ tlv = ptr;
++
++ /* Tx params not used currently */
++ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TX_SEND_PARAMS, cmd_len);
++ ptr += cmd_len;
++
++ tlv = ptr;
++ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*ml_params));
++ ptr += TLV_HDR_SIZE;
++
++ ml_params = ptr;
++ ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TX_SEND_PARAMS,
++ sizeof(*ml_params));
++
++ ml_params->hw_link_id = cpu_to_le32(WMI_MGMT_LINK_AGNOSTIC_ID);
++
++send:
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
+index 8627154f1680f..6dbcedcf08175 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.h
++++ b/drivers/net/wireless/ath/ath12k/wmi.h
+@@ -3963,6 +3963,7 @@ struct wmi_scan_chan_list_cmd {
+ } __packed;
+
+ #define WMI_MGMT_SEND_DOWNLD_LEN 64
++#define WMI_MGMT_LINK_AGNOSTIC_ID 0xFFFFFFFF
+
+ #define WMI_TX_PARAMS_DWORD0_POWER GENMASK(7, 0)
+ #define WMI_TX_PARAMS_DWORD0_MCS_MASK GENMASK(19, 8)
+@@ -3988,7 +3989,18 @@ struct wmi_mgmt_send_cmd {
+
+ /* This TLV is followed by struct wmi_mgmt_frame */
+
+- /* Followed by struct wmi_mgmt_send_params */
++ /* Followed by struct ath12k_wmi_mlo_mgmt_send_params */
++} __packed;
++
++struct ath12k_wmi_mlo_mgmt_send_params {
++ __le32 tlv_header;
++ __le32 hw_link_id;
++} __packed;
++
++struct ath12k_wmi_mgmt_send_tx_params {
++ __le32 tlv_header;
++ __le32 tx_param_dword0;
++ __le32 tx_param_dword1;
+ } __packed;
+
+ struct wmi_sta_powersave_mode_cmd {
+@@ -6183,7 +6195,7 @@ void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
+ int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
+ u32 cmd_id);
+ struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len);
+-int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
++int ath12k_wmi_mgmt_send(struct ath12k_link_vif *arvif, u32 buf_id,
+ struct sk_buff *frame);
+ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
+ const u8 *p2p_ie);
+--
+2.51.0
+
--- /dev/null
+From 5f18c9528a2228b7f2b52f33cb8f75a4d835e2d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Sep 2025 09:50:25 +0800
+Subject: wifi: ath12k: Fix missing station power save configuration
+
+From: Miaoqing Pan <miaoqing.pan@oss.qualcomm.com>
+
+[ Upstream commit 4b66d18918f8e4d85e51974a9e3ce9abad5c7c3d ]
+
+Commit afbab6e4e88d ("wifi: ath12k: modify ath12k_mac_op_bss_info_changed()
+for MLO") replaced the bss_info_changed() callback with vif_cfg_changed()
+and link_info_changed() to support Multi-Link Operation (MLO). As a result,
+the station power save configuration is no longer correctly applied in
+ath12k_mac_bss_info_changed().
+
+Move the handling of 'BSS_CHANGED_PS' into ath12k_mac_op_vif_cfg_changed()
+to align with the updated callback structure introduced for MLO, ensuring
+proper power-save behavior for station interfaces.
+
+Tested-on: WCN7850 hw2.0 PCI WLAN.IOE_HMT.1.1-00011-QCAHMTSWPL_V1.0_V2.0_SILICONZ-1
+
+Fixes: afbab6e4e88d ("wifi: ath12k: modify ath12k_mac_op_bss_info_changed() for MLO")
+Signed-off-by: Miaoqing Pan <miaoqing.pan@oss.qualcomm.com>
+Reviewed-by: Baochen Qiang <baochen.qiang@oss.qualcomm.com>
+Link: https://patch.msgid.link/20250908015025.1301398-1-miaoqing.pan@oss.qualcomm.com
+Signed-off-by: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath12k/mac.c | 122 ++++++++++++++------------
+ 1 file changed, 67 insertions(+), 55 deletions(-)
+
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index a885dd168a372..87944e4467050 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -3650,12 +3650,68 @@ static int ath12k_mac_fils_discovery(struct ath12k_link_vif *arvif,
+ return ret;
+ }
+
++static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
++{
++ struct ath12k *ar = arvif->ar;
++ struct ieee80211_vif *vif = arvif->ahvif->vif;
++ struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
++ enum wmi_sta_powersave_param param;
++ struct ieee80211_bss_conf *info;
++ enum wmi_sta_ps_mode psmode;
++ int ret;
++ int timeout;
++ bool enable_ps;
++
++ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
++
++ if (vif->type != NL80211_IFTYPE_STATION)
++ return;
++
++ enable_ps = arvif->ahvif->ps;
++ if (enable_ps) {
++ psmode = WMI_STA_PS_MODE_ENABLED;
++ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
++
++ timeout = conf->dynamic_ps_timeout;
++ if (timeout == 0) {
++ info = ath12k_mac_get_link_bss_conf(arvif);
++ if (!info) {
++ ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
++ vif->addr, arvif->link_id);
++ return;
++ }
++
++ /* firmware doesn't like 0 */
++ timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
++ }
++
++ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
++ timeout);
++ if (ret) {
++ ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
++ arvif->vdev_id, ret);
++ return;
++ }
++ } else {
++ psmode = WMI_STA_PS_MODE_DISABLED;
++ }
++
++ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
++ arvif->vdev_id, psmode ? "enable" : "disable");
++
++ ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
++ if (ret)
++ ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
++ psmode, arvif->vdev_id, ret);
++}
++
+ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 changed)
+ {
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ unsigned long links = ahvif->links_map;
++ struct ieee80211_vif_cfg *vif_cfg;
+ struct ieee80211_bss_conf *info;
+ struct ath12k_link_vif *arvif;
+ struct ieee80211_sta *sta;
+@@ -3719,61 +3775,24 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
+ }
+ }
+ }
+-}
+-
+-static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
+-{
+- struct ath12k *ar = arvif->ar;
+- struct ieee80211_vif *vif = arvif->ahvif->vif;
+- struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
+- enum wmi_sta_powersave_param param;
+- struct ieee80211_bss_conf *info;
+- enum wmi_sta_ps_mode psmode;
+- int ret;
+- int timeout;
+- bool enable_ps;
+
+- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
++ if (changed & BSS_CHANGED_PS) {
++ links = ahvif->links_map;
++ vif_cfg = &vif->cfg;
+
+- if (vif->type != NL80211_IFTYPE_STATION)
+- return;
++ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
++ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
++ if (!arvif || !arvif->ar)
++ continue;
+
+- enable_ps = arvif->ahvif->ps;
+- if (enable_ps) {
+- psmode = WMI_STA_PS_MODE_ENABLED;
+- param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
++ ar = arvif->ar;
+
+- timeout = conf->dynamic_ps_timeout;
+- if (timeout == 0) {
+- info = ath12k_mac_get_link_bss_conf(arvif);
+- if (!info) {
+- ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
+- vif->addr, arvif->link_id);
+- return;
++ if (ar->ab->hw_params->supports_sta_ps) {
++ ahvif->ps = vif_cfg->ps;
++ ath12k_mac_vif_setup_ps(arvif);
+ }
+-
+- /* firmware doesn't like 0 */
+- timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
+ }
+-
+- ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+- timeout);
+- if (ret) {
+- ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
+- arvif->vdev_id, ret);
+- return;
+- }
+- } else {
+- psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+-
+- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
+- arvif->vdev_id, psmode ? "enable" : "disable");
+-
+- ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
+- if (ret)
+- ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
+- psmode, arvif->vdev_id, ret);
+ }
+
+ static bool ath12k_mac_supports_station_tpc(struct ath12k *ar,
+@@ -3795,7 +3814,6 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
+ {
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+- struct ieee80211_vif_cfg *vif_cfg = &vif->cfg;
+ struct cfg80211_chan_def def;
+ u32 param_id, param_value;
+ enum nl80211_band band;
+@@ -4069,12 +4087,6 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
+ }
+
+ ath12k_mac_fils_discovery(arvif, info);
+-
+- if (changed & BSS_CHANGED_PS &&
+- ar->ab->hw_params->supports_sta_ps) {
+- ahvif->ps = vif_cfg->ps;
+- ath12k_mac_vif_setup_ps(arvif);
+- }
+ }
+
+ static struct ath12k_vif_cache *ath12k_ahvif_get_link_cache(struct ath12k_vif *ahvif,
+--
+2.51.0
+
--- /dev/null
+From 0adc070682d19b450959ad2d081daa3ca1d9924c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Sep 2025 09:51:39 +0800
+Subject: wifi: ath12k: fix WMI TLV header misalignment
+
+From: Miaoqing Pan <miaoqing.pan@oss.qualcomm.com>
+
+[ Upstream commit 82e2be57d544ff9ad4696c85600827b39be8ce9e ]
+
+When buf_len is not 4-byte aligned in ath12k_wmi_mgmt_send(), the
+firmware asserts and triggers a recovery. The following error
+messages are observed:
+
+ath12k_pci 0004:01:00.0: failed to submit WMI_MGMT_TX_SEND_CMDID cmd
+ath12k_pci 0004:01:00.0: failed to send mgmt frame: -108
+ath12k_pci 0004:01:00.0: failed to tx mgmt frame, vdev_id 0 :-108
+ath12k_pci 0004:01:00.0: waiting recovery start...
+
+This issue was observed when running 'iw wlanx set power_save off/on'
+in MLO station mode, which triggers the sending of an SMPS action frame
+with a length of 27 bytes to the AP. To resolve the misalignment, use
+buf_len_aligned instead of buf_len when constructing the WMI TLV header.
+
+Tested-on: WCN7850 hw2.0 PCI WLAN.IOE_HMT.1.1-00011-QCAHMTSWPL_V1.0_V2.0_SILICONZ-1
+
+Fixes: d889913205cf ("wifi: ath12k: driver for Qualcomm Wi-Fi 7 devices")
+Signed-off-by: Miaoqing Pan <miaoqing.pan@oss.qualcomm.com>
+Reviewed-by: Baochen Qiang <baochen.qiang@oss.qualcomm.com>
+Link: https://patch.msgid.link/20250908015139.1301437-1-miaoqing.pan@oss.qualcomm.com
+Signed-off-by: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath12k/wmi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index d333f40408feb..d740326079e1d 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -840,7 +840,7 @@ int ath12k_wmi_mgmt_send(struct ath12k_link_vif *arvif, u32 buf_id,
+ cmd->tx_params_valid = 0;
+
+ frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+- frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
++ frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len_aligned);
+
+ memcpy(frame_tlv->value, frame->data, buf_len);
+
+--
+2.51.0
+
--- /dev/null
+From 5c4c9026b01f4d57cf045361636c6b87dd93d2ea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:27:40 +0900
+Subject: can: j1939: j1939_local_ecu_get(): undo increment when
+ j1939_local_ecu_get() fails
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit 06e02da29f6f1a45fc07bd60c7eaf172dc21e334 ]
+
+Since j1939_sk_bind() and j1939_sk_release() call j1939_local_ecu_put()
+when J1939_SOCK_BOUND was already set, but the error handling path for
+j1939_sk_bind() will not set J1939_SOCK_BOUND when j1939_local_ecu_get()
+fails, j1939_local_ecu_get() needs to undo priv->ents[sa].nusers++ when
+j1939_local_ecu_get() returns an error.
+
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/e7f80046-4ff7-4ce2-8ad8-7c3c678a42c9@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/bus.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
+index 4866879016021..e0b966c2517cf 100644
+--- a/net/can/j1939/bus.c
++++ b/net/can/j1939/bus.c
+@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
+ if (!ecu)
+ ecu = j1939_ecu_create_locked(priv, name);
+ err = PTR_ERR_OR_ZERO(ecu);
+- if (err)
++ if (err) {
++ if (j1939_address_is_unicast(sa))
++ priv->ents[sa].nusers--;
+ goto done;
++ }
+
+ ecu->nusers++;
+ /* TODO: do we care if ecu->addr != sa? */
+--
+2.51.0
+
--- /dev/null
+From 091a00cd014c43c451b317938963452f108d1c34 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Aug 2025 19:30:09 +0900
+Subject: can: j1939: j1939_sk_bind(): call j1939_priv_put() immediately when
+ j1939_local_ecu_get() failed
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit f214744c8a27c3c1da6b538c232da22cd027530e ]
+
+Commit 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct
+callback") expects that a call to j1939_priv_put() can be unconditionally
+delayed until j1939_sk_sock_destruct() is called. But a refcount leak will
+happen when j1939_sk_bind() is called again after j1939_local_ecu_get()
+ from previous j1939_sk_bind() call returned an error. We need to call
+j1939_priv_put() before j1939_sk_bind() returns an error.
+
+Fixes: 25fe97cb7620 ("can: j1939: move j1939_priv_put() into sk_destruct callback")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/4f49a1bc-a528-42ad-86c0-187268ab6535@I-love.SAKURA.ne.jp
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/j1939/socket.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index cf9a12d8da6f9..7bf4d4fb96735 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -520,6 +520,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
+ if (ret) {
+ j1939_netdev_stop(priv);
++ jsk->priv = NULL;
++ synchronize_rcu();
++ j1939_priv_put(priv);
+ goto out_release_sock;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From c5a5b75a29b2c181c9e77949ea93db303bde01ed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Aug 2025 12:50:02 +0300
+Subject: can: xilinx_can: xcan_write_frame(): fix use-after-free of
+ transmitted SKB
+
+From: Anssi Hannula <anssi.hannula@bitwise.fi>
+
+[ Upstream commit ef79f00be72bd81d2e1e6f060d83cf7e425deee4 ]
+
+can_put_echo_skb() takes ownership of the SKB and it may be freed
+during or after the call.
+
+However, xilinx_can xcan_write_frame() keeps using SKB after the call.
+
+Fix that by only calling can_put_echo_skb() after the code is done
+touching the SKB.
+
+The tx_lock is held for the entire xcan_write_frame() execution and
+also on the can_get_echo_skb() side so the order of operations does not
+matter.
+
+An earlier fix commit 3d3c817c3a40 ("can: xilinx_can: Fix usage of skb
+memory") did not move the can_put_echo_skb() call far enough.
+
+Signed-off-by: Anssi Hannula <anssi.hannula@bitwise.fi>
+Fixes: 1598efe57b3e ("can: xilinx_can: refactor code in preparation for CAN FD support")
+Link: https://patch.msgid.link/20250822095002.168389-1-anssi.hannula@bitwise.fi
+[mkl: add "commit" in front of sha1 in patch description]
+[mkl: fix indention]
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/xilinx_can.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
+index abe58f1030433..57d1209134f11 100644
+--- a/drivers/net/can/xilinx_can.c
++++ b/drivers/net/can/xilinx_can.c
+@@ -628,14 +628,6 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
+ dlc |= XCAN_DLCR_EDL_MASK;
+ }
+
+- if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
+- (priv->devtype.flags & XCAN_FLAG_TXFEMP))
+- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
+- else
+- can_put_echo_skb(skb, ndev, 0, 0);
+-
+- priv->tx_head++;
+-
+ priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
+ /* If the CAN frame is RTR frame this write triggers transmission
+ * (not on CAN FD)
+@@ -668,6 +660,14 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
+ data[1]);
+ }
+ }
++
++ if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
++ (priv->devtype.flags & XCAN_FLAG_TXFEMP))
++ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
++ else
++ can_put_echo_skb(skb, ndev, 0, 0);
++
++ priv->tx_head++;
+ }
+
+ /**
+--
+2.51.0
+
--- /dev/null
+From f942bc22903822dc51381a83d5c3162f79e7869f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 6 Apr 2025 10:00:04 -0700
+Subject: Disable SLUB_TINY for build testing
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+[ Upstream commit 6f110a5e4f9977c31ce76fefbfef6fd4eab6bfb7 ]
+
+... and don't error out so hard on missing module descriptions.
+
+Before commit 6c6c1fc09de3 ("modpost: require a MODULE_DESCRIPTION()")
+we used to warn about missing module descriptions, but only when
+building with extra warnigns (ie 'W=1').
+
+After that commit the warning became an unconditional hard error.
+
+And it turns out not all modules have been converted despite the claims
+to the contrary. As reported by Damian Tometzki, the slub KUnit test
+didn't have a module description, and apparently nobody ever really
+noticed.
+
+The reason nobody noticed seems to be that the slub KUnit tests get
+disabled by SLUB_TINY, which also ends up disabling a lot of other code,
+both in tests and in slub itself. And so anybody doing full build tests
+didn't actually see this failre.
+
+So let's disable SLUB_TINY for build-only tests, since it clearly ends
+up limiting build coverage. Also turn the missing module descriptions
+error back into a warning, but let's keep it around for non-'W=1'
+builds.
+
+Reported-by: Damian Tometzki <damian@riscv-rocks.de>
+Link: https://lore.kernel.org/all/01070196099fd059-e8463438-7b1b-4ec8-816d-173874be9966-000000@eu-central-1.amazonses.com/
+Cc: Masahiro Yamada <masahiroy@kernel.org>
+Cc: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
+Fixes: 6c6c1fc09de3 ("modpost: require a MODULE_DESCRIPTION()")
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/Kconfig b/mm/Kconfig
+index c11cd01169e8d..046c32686fc4d 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -280,7 +280,7 @@ config SLAB
+
+ config SLUB_TINY
+ bool "Configure SLUB for minimal memory footprint"
+- depends on SLUB && EXPERT
++ depends on SLUB && EXPERT && !COMPILE_TEST
+ select SLAB_MERGE_DEFAULT
+ help
+ Configures the SLUB allocator in a way to achieve minimal memory
+--
+2.51.0
+
--- /dev/null
+From b11b5a58db0788a18d405f10c5a407646c05f0a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Aug 2025 13:43:39 +0300
+Subject: dmaengine: idxd: Fix double free in idxd_setup_wqs()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 39aaa337449e71a41d4813be0226a722827ba606 ]
+
+The clean up in idxd_setup_wqs() has had a couple bugs because the error
+handling is a bit subtle. It's simpler to just re-write it in a cleaner
+way. The issues here are:
+
+1) If "idxd->max_wqs" is <= 0 then we call put_device(conf_dev) when
+ "conf_dev" hasn't been initialized.
+2) If kzalloc_node() fails then again "conf_dev" is invalid. It's
+ either uninitialized or it points to the "conf_dev" from the
+ previous iteration so it leads to a double free.
+
+It's better to free partial loop iterations within the loop and then
+the unwinding at the end can handle whole loop iterations. I also
+renamed the labels to describe what the goto does and not where the goto
+was located.
+
+Fixes: 3fd2f4bc010c ("dmaengine: idxd: fix memory leak in error handling path of idxd_setup_wqs")
+Reported-by: Colin Ian King <colin.i.king@gmail.com>
+Closes: https://lore.kernel.org/all/20250811095836.1642093-1-colin.i.king@gmail.com/
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/aJnJW3iYTDDCj9sk@stanley.mountain
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 33 +++++++++++++++++----------------
+ 1 file changed, 17 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 163a276d2670e..4b999c5802f4b 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -179,27 +179,30 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->wq_enable_map) {
+ rc = -ENOMEM;
+- goto err_bitmap;
++ goto err_free_wqs;
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
+ if (!wq) {
+ rc = -ENOMEM;
+- goto err;
++ goto err_unwind;
+ }
+
+ idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
+ conf_dev = wq_confdev(wq);
+ wq->id = i;
+ wq->idxd = idxd;
+- device_initialize(wq_confdev(wq));
++ device_initialize(conf_dev);
+ conf_dev->parent = idxd_confdev(idxd);
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = &idxd_wq_device_type;
+ rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
+- if (rc < 0)
+- goto err;
++ if (rc < 0) {
++ put_device(conf_dev);
++ kfree(wq);
++ goto err_unwind;
++ }
+
+ mutex_init(&wq->wq_lock);
+ init_waitqueue_head(&wq->err_queue);
+@@ -210,15 +213,20 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
+ wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
+ if (!wq->wqcfg) {
++ put_device(conf_dev);
++ kfree(wq);
+ rc = -ENOMEM;
+- goto err;
++ goto err_unwind;
+ }
+
+ if (idxd->hw.wq_cap.op_config) {
+ wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
+ if (!wq->opcap_bmap) {
++ kfree(wq->wqcfg);
++ put_device(conf_dev);
++ kfree(wq);
+ rc = -ENOMEM;
+- goto err_opcap_bmap;
++ goto err_unwind;
+ }
+ bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
+ }
+@@ -229,13 +237,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+
+ return 0;
+
+-err_opcap_bmap:
+- kfree(wq->wqcfg);
+-
+-err:
+- put_device(conf_dev);
+- kfree(wq);
+-
++err_unwind:
+ while (--i >= 0) {
+ wq = idxd->wqs[i];
+ if (idxd->hw.wq_cap.op_config)
+@@ -244,11 +246,10 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ conf_dev = wq_confdev(wq);
+ put_device(conf_dev);
+ kfree(wq);
+-
+ }
+ bitmap_free(idxd->wq_enable_map);
+
+-err_bitmap:
++err_free_wqs:
+ kfree(idxd->wqs);
+
+ return rc;
+--
+2.51.0
+
--- /dev/null
+From fe6938ccfb1519160d96a236ac230c79c13d1c0d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 23:03:13 +0800
+Subject: dmaengine: idxd: Fix refcount underflow on module unload
+
+From: Yi Sun <yi.sun@intel.com>
+
+[ Upstream commit b7cb9a034305d52222433fad10c3de10204f29e7 ]
+
+A recent refactor introduced a misplaced put_device() call, resulting in a
+reference count underflow during module unload.
+
+There is no need to add additional put_device() calls for idxd groups,
+engines, or workqueues. Although the commit claims: "Note, this also
+fixes the missing put_device() for idxd groups, engines, and wqs."
+
+It appears no such omission actually existed. The required cleanup is
+already handled by the call chain:
+idxd_unregister_devices() -> device_unregister() -> put_device()
+
+Extend idxd_cleanup() to handle the remaining necessary cleanup and
+remove idxd_cleanup_internals(), which duplicates deallocation logic
+for idxd, engines, groups, and workqueues. Memory management is also
+properly handled through the Linux device model.
+
+Fixes: a409e919ca32 ("dmaengine: idxd: Refactor remove call with idxd_cleanup() helper")
+Signed-off-by: Yi Sun <yi.sun@intel.com>
+Tested-by: Shuai Xue <xueshuai@linux.alibaba.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Acked-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+Link: https://lore.kernel.org/r/20250729150313.1934101-3-yi.sun@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 3a78973882211..163a276d2670e 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -904,7 +904,10 @@ static void idxd_remove(struct pci_dev *pdev)
+ device_unregister(idxd_confdev(idxd));
+ idxd_shutdown(pdev);
+ idxd_device_remove_debugfs(idxd);
+- idxd_cleanup(idxd);
++ perfmon_pmu_remove(idxd);
++ idxd_cleanup_interrupts(idxd);
++ if (device_pasid_enabled(idxd))
++ idxd_disable_system_pasid(idxd);
+ pci_iounmap(pdev, idxd->reg_base);
+ put_device(idxd_confdev(idxd));
+ pci_disable_device(pdev);
+--
+2.51.0
+
--- /dev/null
+From 180d8a49223d067c969f035a20e1121b26cfe631 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 23:03:12 +0800
+Subject: dmaengine: idxd: Remove improper idxd_free
+
+From: Yi Sun <yi.sun@intel.com>
+
+[ Upstream commit f41c538881eec4dcf5961a242097d447f848cda6 ]
+
+The call to idxd_free() introduces a duplicate put_device() leading to a
+reference count underflow:
+refcount_t: underflow; use-after-free.
+WARNING: CPU: 15 PID: 4428 at lib/refcount.c:28 refcount_warn_saturate+0xbe/0x110
+...
+Call Trace:
+ <TASK>
+ idxd_remove+0xe4/0x120 [idxd]
+ pci_device_remove+0x3f/0xb0
+ device_release_driver_internal+0x197/0x200
+ driver_detach+0x48/0x90
+ bus_remove_driver+0x74/0xf0
+ pci_unregister_driver+0x2e/0xb0
+ idxd_exit_module+0x34/0x7a0 [idxd]
+ __do_sys_delete_module.constprop.0+0x183/0x280
+ do_syscall_64+0x54/0xd70
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+The idxd_unregister_devices() which is invoked at the very beginning of
+idxd_remove(), already takes care of the necessary put_device() through the
+following call path:
+idxd_unregister_devices() -> device_unregister() -> put_device()
+
+In addition, when CONFIG_DEBUG_KOBJECT_RELEASE is enabled, put_device() may
+trigger asynchronous cleanup via schedule_delayed_work(). If idxd_free() is
+called immediately after, it can result in a use-after-free.
+
+Remove the improper idxd_free() to avoid both the refcount underflow and
+potential memory corruption during module unload.
+
+Fixes: d5449ff1b04d ("dmaengine: idxd: Add missing idxd cleanup to fix memory leak in remove call")
+Signed-off-by: Yi Sun <yi.sun@intel.com>
+Tested-by: Shuai Xue <xueshuai@linux.alibaba.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Acked-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+Link: https://lore.kernel.org/r/20250729150313.1934101-2-yi.sun@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 92e86ae9db29d..3a78973882211 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -907,7 +907,6 @@ static void idxd_remove(struct pci_dev *pdev)
+ idxd_cleanup(idxd);
+ pci_iounmap(pdev, idxd->reg_base);
+ put_device(idxd_confdev(idxd));
+- idxd_free(idxd);
+ pci_disable_device(pdev);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 46f87b944ac2c4af4f5eb11c407b2d96bad09485 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 11:49:53 +0200
+Subject: dmaengine: ti: edma: Fix memory allocation size for
+ queue_priority_map
+
+From: Anders Roxell <anders.roxell@linaro.org>
+
+[ Upstream commit e63419dbf2ceb083c1651852209c7f048089ac0f ]
+
+Fix a critical memory allocation bug in edma_setup_from_hw() where
+queue_priority_map was allocated with insufficient memory. The code
+declared queue_priority_map as s8 (*)[2] (pointer to array of 2 s8),
+but allocated memory using sizeof(s8) instead of the correct size.
+
+This caused out-of-bounds memory writes when accessing:
+ queue_priority_map[i][0] = i;
+ queue_priority_map[i][1] = i;
+
+The bug manifested as kernel crashes with "Oops - undefined instruction"
+on ARM platforms (BeagleBoard-X15) during EDMA driver probe, as the
+memory corruption triggered kernel hardening features on Clang.
+
+Change the allocation to use sizeof(*queue_priority_map) which
+automatically gets the correct size for the 2D array structure.
+
+Fixes: 2b6b3b742019 ("ARM/dmaengine: edma: Merge the two drivers under drivers/dma/")
+Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
+Link: https://lore.kernel.org/r/20250830094953.3038012-1-anders.roxell@linaro.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/ti/edma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index c0fa541324675..f7ddf588b7f9b 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2063,8 +2063,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
+ * priority. So Q0 is the highest priority queue and the last queue has
+ * the lowest priority.
+ */
+- queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
+- GFP_KERNEL);
++ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
++ sizeof(*queue_priority_map), GFP_KERNEL);
+ if (!queue_priority_map)
+ return -ENOMEM;
+
+--
+2.51.0
+
--- /dev/null
+From 43e05a200f051bc14b1bf578df88937ed058586f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Sep 2025 20:17:09 -0700
+Subject: docs: networking: can: change bcm_msg_head frames member to support
+ flexible array
+
+From: Alex Tran <alex.t.tran@gmail.com>
+
+[ Upstream commit 641427d5bf90af0625081bf27555418b101274cd ]
+
+The documentation of the 'bcm_msg_head' struct does not match how
+it is defined in 'bcm.h'. Changed the frames member to a flexible array,
+matching the definition in the header file.
+
+See commit 94dfc73e7cf4 ("treewide: uapi: Replace zero-length arrays with
+flexible-array members")
+
+Signed-off-by: Alex Tran <alex.t.tran@gmail.com>
+Acked-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Link: https://patch.msgid.link/20250904031709.1426895-1-alex.t.tran@gmail.com
+Fixes: 94dfc73e7cf4 ("treewide: uapi: Replace zero-length arrays with flexible-array members")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217783
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/networking/can.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
+index d7e1ada905b2d..3bdd155838105 100644
+--- a/Documentation/networking/can.rst
++++ b/Documentation/networking/can.rst
+@@ -740,7 +740,7 @@ The broadcast manager sends responses to user space in the same form:
+ struct timeval ival1, ival2; /* count and subsequent interval */
+ canid_t can_id; /* unique can_id for task */
+ __u32 nframes; /* number of can_frames following */
+- struct can_frame frames[0];
++ struct can_frame frames[];
+ };
+
+ The aligned payload 'frames' uses the same basic CAN frame structure defined
+--
+2.51.0
+
--- /dev/null
+From 846d96d7807f77f0c18ab7f06c964788858ac2fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Mar 2024 15:04:41 +0800
+Subject: hrtimer: Remove unused function
+
+From: Jiapeng Chong <jiapeng.chong@linux.alibaba.com>
+
+[ Upstream commit 82ccdf062a64f3c4ac575c16179ce68edbbbe8e4 ]
+
+The function is defined, but not called anywhere:
+
+ kernel/time/hrtimer.c:1880:20: warning: unused function '__hrtimer_peek_ahead_timers'.
+
+Remove it.
+
+Reported-by: Abaci Robot <abaci@linux.alibaba.com>
+Signed-off-by: Jiapeng Chong <jiapeng.chong@linux.alibaba.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20240322070441.29646-1-jiapeng.chong@linux.alibaba.com
+Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=8611
+Stable-dep-of: e895f8e29119 ("hrtimers: Unconditionally update target CPU base after offline timer migration")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/hrtimer.c | 20 +-------------------
+ 1 file changed, 1 insertion(+), 19 deletions(-)
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 6d9da768604d6..e9833a30a86a4 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1938,25 +1938,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+ tick_program_event(expires_next, 1);
+ pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
+ }
+-
+-/* called with interrupts disabled */
+-static inline void __hrtimer_peek_ahead_timers(void)
+-{
+- struct tick_device *td;
+-
+- if (!hrtimer_hres_active())
+- return;
+-
+- td = this_cpu_ptr(&tick_cpu_device);
+- if (td && td->evtdev)
+- hrtimer_interrupt(td->evtdev);
+-}
+-
+-#else /* CONFIG_HIGH_RES_TIMERS */
+-
+-static inline void __hrtimer_peek_ahead_timers(void) { }
+-
+-#endif /* !CONFIG_HIGH_RES_TIMERS */
++#endif /* !CONFIG_HIGH_RES_TIMERS */
+
+ /*
+ * Called from run_local_timers in hardirq context every jiffy
+--
+2.51.0
+
--- /dev/null
+From fb263b28a3b2464d443f71ee611a4ac26caf0840 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Apr 2024 10:30:00 +0800
+Subject: hrtimer: Rename __hrtimer_hres_active() to hrtimer_hres_active()
+
+From: Jiapeng Chong <jiapeng.chong@linux.alibaba.com>
+
+[ Upstream commit b7c8e1f8a7b4352c1d0b4310686385e3cf6c104a ]
+
+The function hrtimer_hres_active() are defined in the hrtimer.c file, but
+not called elsewhere, so rename __hrtimer_hres_active() to
+hrtimer_hres_active() and remove the old hrtimer_hres_active() function.
+
+kernel/time/hrtimer.c:653:19: warning: unused function 'hrtimer_hres_active'.
+
+Fixes: 82ccdf062a64 ("hrtimer: Remove unused function")
+Reported-by: Abaci Robot <abaci@linux.alibaba.com>
+Signed-off-by: Jiapeng Chong <jiapeng.chong@linux.alibaba.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
+Link: https://lore.kernel.org/r/20240418023000.130324-1-jiapeng.chong@linux.alibaba.com
+Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=8778
+Stable-dep-of: e895f8e29119 ("hrtimers: Unconditionally update target CPU base after offline timer migration")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/hrtimer.c | 21 ++++++++-------------
+ 1 file changed, 8 insertions(+), 13 deletions(-)
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index e9833a30a86a4..0c3ad1755cc26 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -671,17 +671,12 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+ /*
+ * Is the high resolution mode active ?
+ */
+-static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
++static inline int hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
+ {
+ return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+ cpu_base->hres_active : 0;
+ }
+
+-static inline int hrtimer_hres_active(void)
+-{
+- return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
+-}
+-
+ static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
+ struct hrtimer *next_timer,
+ ktime_t expires_next)
+@@ -705,7 +700,7 @@ static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
+ * set. So we'd effectively block all timers until the T2 event
+ * fires.
+ */
+- if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
++ if (!hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
+ return;
+
+ tick_program_event(expires_next, 1);
+@@ -814,12 +809,12 @@ static void retrigger_next_event(void *arg)
+ * function call will take care of the reprogramming in case the
+ * CPU was in a NOHZ idle sleep.
+ */
+- if (!__hrtimer_hres_active(base) && !tick_nohz_active)
++ if (!hrtimer_hres_active(base) && !tick_nohz_active)
+ return;
+
+ raw_spin_lock(&base->lock);
+ hrtimer_update_base(base);
+- if (__hrtimer_hres_active(base))
++ if (hrtimer_hres_active(base))
+ hrtimer_force_reprogram(base, 0);
+ else
+ hrtimer_update_next_event(base);
+@@ -976,7 +971,7 @@ void clock_was_set(unsigned int bases)
+ cpumask_var_t mask;
+ int cpu;
+
+- if (!__hrtimer_hres_active(cpu_base) && !tick_nohz_active)
++ if (!hrtimer_hres_active(cpu_base) && !tick_nohz_active)
+ goto out_timerfd;
+
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+@@ -1554,7 +1549,7 @@ u64 hrtimer_get_next_event(void)
+
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+- if (!__hrtimer_hres_active(cpu_base))
++ if (!hrtimer_hres_active(cpu_base))
+ expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
+
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+@@ -1577,7 +1572,7 @@ u64 hrtimer_next_event_without(const struct hrtimer *exclude)
+
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+- if (__hrtimer_hres_active(cpu_base)) {
++ if (hrtimer_hres_active(cpu_base)) {
+ unsigned int active;
+
+ if (!cpu_base->softirq_activated) {
+@@ -1949,7 +1944,7 @@ void hrtimer_run_queues(void)
+ unsigned long flags;
+ ktime_t now;
+
+- if (__hrtimer_hres_active(cpu_base))
++ if (hrtimer_hres_active(cpu_base))
+ return;
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From 89ccf49f5ebb893240a49e3e868d4d50a08baf6a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 16:10:25 +0800
+Subject: hrtimers: Unconditionally update target CPU base after offline timer
+ migration
+
+From: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+
+[ Upstream commit e895f8e29119c8c966ea794af9e9100b10becb88 ]
+
+When testing softirq based hrtimers on an ARM32 board, with high resolution
+mode and NOHZ inactive, softirq based hrtimers fail to expire after being
+moved away from an offline CPU:
+
+CPU0 CPU1
+ hrtimer_start(..., HRTIMER_MODE_SOFT);
+cpu_down(CPU1) ...
+ hrtimers_cpu_dying()
+ // Migrate timers to CPU0
+ smp_call_function_single(CPU0, returgger_next_event);
+ retrigger_next_event()
+ if (!highres && !nohz)
+ return;
+
+As retrigger_next_event() is a NOOP when both high resolution timers and
+NOHZ are inactive CPU0's hrtimer_cpu_base::softirq_expires_next is not
+updated and the migrated softirq timers never expire unless there is a
+softirq based hrtimer queued on CPU0 later.
+
+Fix this by removing the hrtimer_hres_active() and tick_nohz_active() check
+in retrigger_next_event(), which enforces a full update of the CPU base.
+As this is not a fast path the extra cost does not matter.
+
+[ tglx: Massaged change log ]
+
+Fixes: 5c0930ccaad5 ("hrtimers: Push pending hrtimers away from outgoing CPU earlier")
+Co-developed-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/all/20250805081025.54235-1-wangxiongfeng2@huawei.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/hrtimer.c | 11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 0c3ad1755cc26..ccea52adcba67 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -808,10 +808,10 @@ static void retrigger_next_event(void *arg)
+ * of the next expiring timer is enough. The return from the SMP
+ * function call will take care of the reprogramming in case the
+ * CPU was in a NOHZ idle sleep.
++ *
++ * In periodic low resolution mode, the next softirq expiration
++ * must also be updated.
+ */
+- if (!hrtimer_hres_active(base) && !tick_nohz_active)
+- return;
+-
+ raw_spin_lock(&base->lock);
+ hrtimer_update_base(base);
+ if (hrtimer_hres_active(base))
+@@ -2289,11 +2289,6 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
+ &new_base->clock_base[i]);
+ }
+
+- /*
+- * The migration might have changed the first expiring softirq
+- * timer on this CPU. Update it.
+- */
+- __hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT);
+ /* Tell the other CPU to retrigger the next event */
+ smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
+
+--
+2.51.0
+
--- /dev/null
+From effe6a58ea109fca1b519cfb49cb30082795d94f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 09:15:32 +0000
+Subject: hsr: use hsr_for_each_port_rtnl in hsr_port_get_hsr
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 393c841fe4333cdd856d0ca37b066d72746cfaa6 ]
+
+hsr_port_get_hsr() iterates over ports using hsr_for_each_port(),
+but many of its callers do not hold the required RCU lock.
+
+Switch to hsr_for_each_port_rtnl(), since most callers already hold
+the rtnl lock. After review, all callers are covered by either the rtnl
+lock or the RCU lock, except hsr_dev_xmit(). Fix this by adding an
+RCU read lock there.
+
+Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250905091533.377443-3-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 3 +++
+ net/hsr/hsr_main.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index a6fc3d7b02224..5514b5bedc929 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -228,6 +228,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ struct hsr_priv *hsr = netdev_priv(dev);
+ struct hsr_port *master;
+
++ rcu_read_lock();
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ if (master) {
+ skb->dev = master->dev;
+@@ -240,6 +241,8 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ dev_core_stats_tx_dropped_inc(dev);
+ dev_kfree_skb_any(skb);
+ }
++ rcu_read_unlock();
++
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index c325ddad539a7..76a1958609e29 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -125,7 +125,7 @@ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
+ {
+ struct hsr_port *port;
+
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type == pt)
+ return port;
+ return NULL;
+--
+2.51.0
+
--- /dev/null
+From 28eb6ea010bea12675cff472e1a44ceb275ea1a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 09:15:31 +0000
+Subject: hsr: use rtnl lock when iterating over ports
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 8884c693991333ae065830554b9b0c96590b1bb2 ]
+
+hsr_for_each_port is called in many places without holding the RCU read
+lock, this may trigger warnings on debug kernels. Most of the callers
+are actually hold rtnl lock. So add a new helper hsr_for_each_port_rtnl
+to allow callers in suitable contexts to iterate ports safely without
+explicit RCU locking.
+
+This patch only fixed the callers that is hold rtnl lock. Other caller
+issues will be fixed in later patches.
+
+Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250905091533.377443-2-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 18 +++++++++---------
+ net/hsr/hsr_main.c | 2 +-
+ net/hsr/hsr_main.h | 3 +++
+ 3 files changed, 13 insertions(+), 10 deletions(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 69f6c704352de..a6fc3d7b02224 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -59,7 +59,7 @@ static bool hsr_check_carrier(struct hsr_port *master)
+
+ ASSERT_RTNL();
+
+- hsr_for_each_port(master->hsr, port) {
++ hsr_for_each_port_rtnl(master->hsr, port) {
+ if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
+ netif_carrier_on(master->dev);
+ return true;
+@@ -109,7 +109,7 @@ int hsr_get_max_mtu(struct hsr_priv *hsr)
+ struct hsr_port *port;
+
+ mtu_max = ETH_DATA_LEN;
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type != HSR_PT_MASTER)
+ mtu_max = min(port->dev->mtu, mtu_max);
+
+@@ -144,7 +144,7 @@ static int hsr_dev_open(struct net_device *dev)
+ hsr = netdev_priv(dev);
+ designation = '\0';
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -174,7 +174,7 @@ static int hsr_dev_close(struct net_device *dev)
+ struct hsr_priv *hsr;
+
+ hsr = netdev_priv(dev);
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -207,7 +207,7 @@ static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
+ * may become enabled.
+ */
+ features &= ~NETIF_F_ONE_FOR_ALL;
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ features = netdev_increment_features(features,
+ port->dev->features,
+ mask);
+@@ -425,7 +425,7 @@ static void hsr_set_rx_mode(struct net_device *dev)
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -447,7 +447,7 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+@@ -475,7 +475,7 @@ static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ if (port->type == HSR_PT_MASTER ||
+ port->type == HSR_PT_INTERLINK)
+ continue;
+@@ -521,7 +521,7 @@ static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
+
+ hsr = netdev_priv(dev);
+
+- hsr_for_each_port(hsr, port) {
++ hsr_for_each_port_rtnl(hsr, port) {
+ switch (port->type) {
+ case HSR_PT_SLAVE_A:
+ case HSR_PT_SLAVE_B:
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index 257b50124cee5..c325ddad539a7 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -22,7 +22,7 @@ static bool hsr_slave_empty(struct hsr_priv *hsr)
+ {
+ struct hsr_port *port;
+
+- hsr_for_each_port(hsr, port)
++ hsr_for_each_port_rtnl(hsr, port)
+ if (port->type != HSR_PT_MASTER)
+ return false;
+ return true;
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index 18e01791ad799..2fcabe39e61f4 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -221,6 +221,9 @@ struct hsr_priv {
+ #define hsr_for_each_port(hsr, port) \
+ list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
+
++#define hsr_for_each_port_rtnl(hsr, port) \
++ list_for_each_entry_rcu((port), &(hsr)->ports, port_list, lockdep_rtnl_is_held())
++
+ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
+
+ /* Caller must ensure skb is a valid HSR frame */
+--
+2.51.0
+
--- /dev/null
+From 54805c638b0998deeff862d735fb77f7baed1f8a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 17:39:03 +0200
+Subject: i40e: fix IRQ freeing in i40e_vsi_request_irq_msix error path
+
+From: Michal Schmidt <mschmidt@redhat.com>
+
+[ Upstream commit 915470e1b44e71d1dd07ee067276f003c3521ee3 ]
+
+If request_irq() in i40e_vsi_request_irq_msix() fails in an iteration
+later than the first, the error path wants to free the IRQs requested
+so far. However, it uses the wrong dev_id argument for free_irq(), so
+it does not free the IRQs correctly and instead triggers the warning:
+
+ Trying to free already-free IRQ 173
+ WARNING: CPU: 25 PID: 1091 at kernel/irq/manage.c:1829 __free_irq+0x192/0x2c0
+ Modules linked in: i40e(+) [...]
+ CPU: 25 UID: 0 PID: 1091 Comm: NetworkManager Not tainted 6.17.0-rc1+ #1 PREEMPT(lazy)
+ Hardware name: [...]
+ RIP: 0010:__free_irq+0x192/0x2c0
+ [...]
+ Call Trace:
+ <TASK>
+ free_irq+0x32/0x70
+ i40e_vsi_request_irq_msix.cold+0x63/0x8b [i40e]
+ i40e_vsi_request_irq+0x79/0x80 [i40e]
+ i40e_vsi_open+0x21f/0x2f0 [i40e]
+ i40e_open+0x63/0x130 [i40e]
+ __dev_open+0xfc/0x210
+ __dev_change_flags+0x1fc/0x240
+ netif_change_flags+0x27/0x70
+ do_setlink.isra.0+0x341/0xc70
+ rtnl_newlink+0x468/0x860
+ rtnetlink_rcv_msg+0x375/0x450
+ netlink_rcv_skb+0x5c/0x110
+ netlink_unicast+0x288/0x3c0
+ netlink_sendmsg+0x20d/0x430
+ ____sys_sendmsg+0x3a2/0x3d0
+ ___sys_sendmsg+0x99/0xe0
+ __sys_sendmsg+0x8a/0xf0
+ do_syscall_64+0x82/0x2c0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ [...]
+ </TASK>
+ ---[ end trace 0000000000000000 ]---
+
+Use the same dev_id for free_irq() as for request_irq().
+
+I tested this with inserting code to fail intentionally.
+
+Fixes: 493fb30011b3 ("i40e: Move q_vectors from pointer to array to array of pointers")
+Signed-off-by: Michal Schmidt <mschmidt@redhat.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Subbaraya Sundeep <sbhatta@marvell.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index b749aa3e783ff..72869336e3a9a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -4210,7 +4210,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ irq_num = pf->msix_entries[base + vector].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
+- free_irq(irq_num, &vsi->q_vectors[vector]);
++ free_irq(irq_num, vsi->q_vectors[vector]);
+ }
+ return err;
+ }
+--
+2.51.0
+
--- /dev/null
+From 628093a552aa8deb3bd2ea0ccd506d4eb4c7e5a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 15:26:31 +0900
+Subject: igb: fix link test skipping when interface is admin down
+
+From: Kohei Enju <enjuk@amazon.com>
+
+[ Upstream commit d709f178abca22a4d3642513df29afe4323a594b ]
+
+The igb driver incorrectly skips the link test when the network
+interface is admin down (if_running == false), causing the test to
+always report PASS regardless of the actual physical link state.
+
+This behavior is inconsistent with other drivers (e.g. i40e, ice, ixgbe,
+etc.) which correctly test the physical link state regardless of admin
+state.
+Remove the if_running check to ensure link test always reflects the
+physical link state.
+
+Fixes: 8d420a1b3ea6 ("igb: correct link test not being run when link is down")
+Signed-off-by: Kohei Enju <enjuk@amazon.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_ethtool.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index 92b2be06a6e93..f11cba65e5d85 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2081,11 +2081,8 @@ static void igb_diag_test(struct net_device *netdev,
+ } else {
+ dev_info(&adapter->pdev->dev, "online testing starting\n");
+
+- /* PHY is powered down when interface is down */
+- if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
++ if (igb_link_test(adapter, &data[TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+- else
+- data[TEST_LINK] = 0;
+
+ /* Online tests aren't run; pass by default */
+ data[TEST_REG] = 0;
+--
+2.51.0
+
--- /dev/null
+From d5a9ddadcee3dc102079effe7eaa7a2542b8b462 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 13:12:33 +0200
+Subject: net: bridge: Bounce invalid boolopts
+
+From: Petr Machata <petrm@nvidia.com>
+
+[ Upstream commit 8625f5748fea960d2af4f3c3e9891ee8f6f80906 ]
+
+The bridge driver currently tolerates options that it does not recognize.
+Instead, it should bounce them.
+
+Fixes: a428afe82f98 ("net: bridge: add support for user-controlled bool options")
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/e6fdca3b5a8d54183fbda075daffef38bdd7ddce.1757070067.git.petrm@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/net/bridge/br.c b/net/bridge/br.c
+index a6e94ceb7c9a0..a45db67197226 100644
+--- a/net/bridge/br.c
++++ b/net/bridge/br.c
+@@ -312,6 +312,13 @@ int br_boolopt_multi_toggle(struct net_bridge *br,
+ int err = 0;
+ int opt_id;
+
++ opt_id = find_next_bit(&bitmap, BITS_PER_LONG, BR_BOOLOPT_MAX);
++ if (opt_id != BITS_PER_LONG) {
++ NL_SET_ERR_MSG_FMT_MOD(extack, "Unknown boolean option %d",
++ opt_id);
++ return -EINVAL;
++ }
++
+ for_each_set_bit(opt_id, &bitmap, BR_BOOLOPT_MAX) {
+ bool on = !!(bm->optval & BIT(opt_id));
+
+--
+2.51.0
+
--- /dev/null
+From 11fd579c49f798859a74cb0fdd2d5cce7dd95320 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 11:13:34 +0200
+Subject: net: fec: Fix possible NPD in fec_enet_phy_reset_after_clk_enable()
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit 03e79de4608bdd48ad6eec272e196124cefaf798 ]
+
+The function of_phy_find_device may return NULL, so we need to take
+care before dereferencing phy_dev.
+
+Fixes: 64a632da538a ("net: fec: Fix phy_device lookup for phy_reset_after_clk_enable()")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Cc: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Cc: Richard Leitner <richard.leitner@skidata.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Wei Fang <wei.fang@nxp.com>
+Link: https://patch.msgid.link/20250904091334.53965-1-wahrenst@gmx.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 2a8b5429df595..8352d9b6469f2 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2300,7 +2300,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+ */
+ phy_dev = of_phy_find_device(fep->phy_node);
+ phy_reset_after_clk_enable(phy_dev);
+- put_device(&phy_dev->mdio.dev);
++ if (phy_dev)
++ put_device(&phy_dev->mdio.dev);
+ }
+ }
+
+--
+2.51.0
+
--- /dev/null
+From f31678fd9b2dd816a37db33ecc9cac4d5ecdf299 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Nov 2023 11:07:53 +0530
+Subject: net: hsr: Add support for MC filtering at the slave device
+
+From: Murali Karicheri <m-karicheri2@ti.com>
+
+[ Upstream commit 36b20fcdd9663ced36d3aef96f0eff8eb79de4b8 ]
+
+When MC (multicast) list is updated by the networking layer due to a
+user command and as well as when allmulti flag is set, it needs to be
+passed to the enslaved Ethernet devices. This patch allows this
+to happen by implementing ndo_change_rx_flags() and ndo_set_rx_mode()
+API calls that in turns pass it to the slave devices using
+existing API calls.
+
+Signed-off-by: Murali Karicheri <m-karicheri2@ti.com>
+Signed-off-by: Ravi Gunasekaran <r-gunasekaran@ti.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 8884c6939913 ("hsr: use rtnl lock when iterating over ports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 67 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 66 insertions(+), 1 deletion(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 906c38b9d66ff..cff37637c96d3 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -170,7 +170,24 @@ static int hsr_dev_open(struct net_device *dev)
+
+ static int hsr_dev_close(struct net_device *dev)
+ {
+- /* Nothing to do here. */
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++
++ hsr = netdev_priv(dev);
++ hsr_for_each_port(hsr, port) {
++ if (port->type == HSR_PT_MASTER)
++ continue;
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ case HSR_PT_SLAVE_B:
++ dev_uc_unsync(port->dev, dev);
++ dev_mc_unsync(port->dev, dev);
++ break;
++ default:
++ break;
++ }
++ }
++
+ return 0;
+ }
+
+@@ -401,12 +418,60 @@ void hsr_del_ports(struct hsr_priv *hsr)
+ hsr_del_port(port);
+ }
+
++static void hsr_set_rx_mode(struct net_device *dev)
++{
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ if (port->type == HSR_PT_MASTER)
++ continue;
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ case HSR_PT_SLAVE_B:
++ dev_mc_sync_multiple(port->dev, dev);
++ dev_uc_sync_multiple(port->dev, dev);
++ break;
++ default:
++ break;
++ }
++ }
++}
++
++static void hsr_change_rx_flags(struct net_device *dev, int change)
++{
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ if (port->type == HSR_PT_MASTER)
++ continue;
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ case HSR_PT_SLAVE_B:
++ if (change & IFF_ALLMULTI)
++ dev_set_allmulti(port->dev,
++ dev->flags &
++ IFF_ALLMULTI ? 1 : -1);
++ break;
++ default:
++ break;
++ }
++ }
++}
++
+ static const struct net_device_ops hsr_device_ops = {
+ .ndo_change_mtu = hsr_dev_change_mtu,
+ .ndo_open = hsr_dev_open,
+ .ndo_stop = hsr_dev_close,
+ .ndo_start_xmit = hsr_dev_xmit,
++ .ndo_change_rx_flags = hsr_change_rx_flags,
+ .ndo_fix_features = hsr_fix_features,
++ .ndo_set_rx_mode = hsr_set_rx_mode,
+ };
+
+ static struct device_type hsr_type = {
+--
+2.51.0
+
--- /dev/null
+From cf97dfc16b8f34a0c127edf1686c0891f84df35f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Nov 2024 14:47:08 +0530
+Subject: net: hsr: Add VLAN CTAG filter support
+
+From: Murali Karicheri <m-karicheri2@ti.com>
+
+[ Upstream commit 1a8a63a5305e95519de6f941922dfcd8179f82e5 ]
+
+This patch adds support for VLAN ctag based filtering at slave devices.
+The slave ethernet device may be capable of filtering ethernet packets
+based on VLAN ID. This requires that when the VLAN interface is created
+over an HSR/PRP interface, it passes the VID information to the
+associated slave ethernet devices so that it updates the hardware
+filters to filter ethernet frames based on VID. This patch adds the
+required functions to propagate the vid information to the slave
+devices.
+
+Signed-off-by: Murali Karicheri <m-karicheri2@ti.com>
+Signed-off-by: MD Danish Anwar <danishanwar@ti.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Link: https://patch.msgid.link/20241106091710.3308519-3-danishanwar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 8884c6939913 ("hsr: use rtnl lock when iterating over ports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c | 80 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 79 insertions(+), 1 deletion(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index cff37637c96d3..69f6c704352de 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -464,6 +464,77 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
+ }
+ }
+
++static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
++ __be16 proto, u16 vid)
++{
++ bool is_slave_a_added = false;
++ bool is_slave_b_added = false;
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++ int ret = 0;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ if (port->type == HSR_PT_MASTER ||
++ port->type == HSR_PT_INTERLINK)
++ continue;
++
++ ret = vlan_vid_add(port->dev, proto, vid);
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ if (ret) {
++ /* clean up Slave-B */
++ netdev_err(dev, "add vid failed for Slave-A\n");
++ if (is_slave_b_added)
++ vlan_vid_del(port->dev, proto, vid);
++ return ret;
++ }
++
++ is_slave_a_added = true;
++ break;
++
++ case HSR_PT_SLAVE_B:
++ if (ret) {
++ /* clean up Slave-A */
++ netdev_err(dev, "add vid failed for Slave-B\n");
++ if (is_slave_a_added)
++ vlan_vid_del(port->dev, proto, vid);
++ return ret;
++ }
++
++ is_slave_b_added = true;
++ break;
++ default:
++ break;
++ }
++ }
++
++ return 0;
++}
++
++static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
++ __be16 proto, u16 vid)
++{
++ struct hsr_port *port;
++ struct hsr_priv *hsr;
++
++ hsr = netdev_priv(dev);
++
++ hsr_for_each_port(hsr, port) {
++ switch (port->type) {
++ case HSR_PT_SLAVE_A:
++ case HSR_PT_SLAVE_B:
++ vlan_vid_del(port->dev, proto, vid);
++ break;
++ default:
++ break;
++ }
++ }
++
++ return 0;
++}
++
+ static const struct net_device_ops hsr_device_ops = {
+ .ndo_change_mtu = hsr_dev_change_mtu,
+ .ndo_open = hsr_dev_open,
+@@ -472,6 +543,8 @@ static const struct net_device_ops hsr_device_ops = {
+ .ndo_change_rx_flags = hsr_change_rx_flags,
+ .ndo_fix_features = hsr_fix_features,
+ .ndo_set_rx_mode = hsr_set_rx_mode,
++ .ndo_vlan_rx_add_vid = hsr_ndo_vlan_rx_add_vid,
++ .ndo_vlan_rx_kill_vid = hsr_ndo_vlan_rx_kill_vid,
+ };
+
+ static struct device_type hsr_type = {
+@@ -512,7 +585,8 @@ void hsr_dev_setup(struct net_device *dev)
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
+ NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
+- NETIF_F_HW_VLAN_CTAG_TX;
++ NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ dev->features = dev->hw_features;
+
+@@ -598,6 +672,10 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ (slave[1]->features & NETIF_F_HW_HSR_FWD))
+ hsr->fwd_offloaded = true;
+
++ if ((slave[0]->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
++ (slave[1]->features & NETIF_F_HW_VLAN_CTAG_FILTER))
++ hsr_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
++
+ res = register_netdevice(hsr_dev);
+ if (res)
+ goto err_unregister;
+--
+2.51.0
+
--- /dev/null
+From f968b365fd2c107f879dc11570918ff9f97e4bd6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Sep 2025 11:09:13 +0200
+Subject: regulator: sy7636a: fix lifecycle of power good gpio
+
+From: Andreas Kemnade <akemnade@kernel.org>
+
+[ Upstream commit c05d0b32eebadc8be6e53196e99c64cf2bed1d99 ]
+
+Attach the power good gpio to the regulator device devres instead of the
+parent device to fix problems if probe is run multiple times
+(rmmod/insmod or some deferral).
+
+Fixes: 8c485bedfb785 ("regulator: sy7636a: Initial commit")
+Signed-off-by: Andreas Kemnade <akemnade@kernel.org>
+Reviewed-by: Alistair Francis <alistair@alistair23.me>
+Reviewed-by: Peng Fan <peng.fan@nxp.com>
+Message-ID: <20250906-sy7636-rsrc-v1-2-e2886a9763a7@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/sy7636a-regulator.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/regulator/sy7636a-regulator.c b/drivers/regulator/sy7636a-regulator.c
+index d1e7ba1fb3e1a..27e3d939b7bb9 100644
+--- a/drivers/regulator/sy7636a-regulator.c
++++ b/drivers/regulator/sy7636a-regulator.c
+@@ -83,9 +83,11 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
+ if (!regmap)
+ return -EPROBE_DEFER;
+
+- gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN);
++ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
++
++ gdp = devm_gpiod_get(&pdev->dev, "epd-pwr-good", GPIOD_IN);
+ if (IS_ERR(gdp)) {
+- dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
++ dev_err(&pdev->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
+ return PTR_ERR(gdp);
+ }
+
+@@ -105,7 +107,6 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
+ }
+
+ config.dev = &pdev->dev;
+- config.dev->of_node = pdev->dev.parent->of_node;
+ config.regmap = regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, &desc, &config);
+--
+2.51.0
+
--- /dev/null
+From 78fb8ebed2c4a5e751cd0d7a87d217cfabe76f65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Nov 2024 06:32:50 -0800
+Subject: RISC-V: Remove unnecessary include from compat.h
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Palmer Dabbelt <palmer@rivosinc.com>
+
+[ Upstream commit 8d4f1e05ff821a5d59116ab8c3a30fcae81d8597 ]
+
+Without this I get a bunch of build errors like
+
+ In file included from ./include/linux/sched/task_stack.h:12,
+ from ./arch/riscv/include/asm/compat.h:12,
+ from ./arch/riscv/include/asm/pgtable.h:115,
+ from ./include/linux/pgtable.h:6,
+ from ./include/linux/mm.h:30,
+ from arch/riscv/kernel/asm-offsets.c:8:
+ ./include/linux/kasan.h:50:37: error: ‘MAX_PTRS_PER_PTE’ undeclared here (not in a function); did you mean ‘PTRS_PER_PTE’?
+ 50 | extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
+ | ^~~~~~~~~~~~~~~~
+ | PTRS_PER_PTE
+ ./include/linux/kasan.h:51:8: error: unknown type name ‘pmd_t’; did you mean ‘pgd_t’?
+ 51 | extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
+ | ^~~~~
+ | pgd_t
+ ./include/linux/kasan.h:51:37: error: ‘MAX_PTRS_PER_PMD’ undeclared here (not in a function); did you mean ‘PTRS_PER_PGD’?
+ 51 | extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
+ | ^~~~~~~~~~~~~~~~
+ | PTRS_PER_PGD
+ ./include/linux/kasan.h:52:8: error: unknown type name ‘pud_t’; did you mean ‘pgd_t’?
+ 52 | extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
+ | ^~~~~
+ | pgd_t
+ ./include/linux/kasan.h:52:37: error: ‘MAX_PTRS_PER_PUD’ undeclared here (not in a function); did you mean ‘PTRS_PER_PGD’?
+ 52 | extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
+ | ^~~~~~~~~~~~~~~~
+ | PTRS_PER_PGD
+ ./include/linux/kasan.h:53:8: error: unknown type name ‘p4d_t’; did you mean ‘pgd_t’?
+ 53 | extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
+ | ^~~~~
+ | pgd_t
+ ./include/linux/kasan.h:53:37: error: ‘MAX_PTRS_PER_P4D’ undeclared here (not in a function); did you mean ‘PTRS_PER_PGD’?
+ 53 | extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
+ | ^~~~~~~~~~~~~~~~
+ | PTRS_PER_PGD
+
+Link: https://lore.kernel.org/r/20241126143250.29708-1-palmer@rivosinc.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/include/asm/compat.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
+index 2ac955b51148f..6b79287baecc0 100644
+--- a/arch/riscv/include/asm/compat.h
++++ b/arch/riscv/include/asm/compat.h
+@@ -9,7 +9,6 @@
+ */
+ #include <linux/types.h>
+ #include <linux/sched.h>
+-#include <linux/sched/task_stack.h>
+ #include <asm-generic/compat.h>
+
+ static inline int is_compat_task(void)
+--
+2.51.0
+
input-iqs7222-avoid-enabling-unused-interrupts.patch
input-i8042-add-tuxedo-infinitybook-pro-gen10-amd-to-i8042-quirk-table.patch
revert-net-usb-asix-ax88772-drop-phylink-use-in-pm-to-avoid-mdio-runtime-pm-wakeups.patch
+disable-slub_tiny-for-build-testing.patch
+net-fec-fix-possible-npd-in-fec_enet_phy_reset_after.patch
+net-bridge-bounce-invalid-boolopts.patch
+tunnels-reset-the-gso-metadata-before-reusing-the-sk.patch
+docs-networking-can-change-bcm_msg_head-frames-membe.patch
+igb-fix-link-test-skipping-when-interface-is-admin-d.patch
+i40e-fix-irq-freeing-in-i40e_vsi_request_irq_msix-er.patch
+can-j1939-j1939_sk_bind-call-j1939_priv_put-immediat.patch
+can-j1939-j1939_local_ecu_get-undo-increment-when-j1.patch
+can-xilinx_can-xcan_write_frame-fix-use-after-free-o.patch
+net-hsr-add-support-for-mc-filtering-at-the-slave-de.patch
+net-hsr-add-vlan-ctag-filter-support.patch
+hsr-use-rtnl-lock-when-iterating-over-ports.patch
+hsr-use-hsr_for_each_port_rtnl-in-hsr_port_get_hsr.patch
+dmaengine-idxd-remove-improper-idxd_free.patch
+dmaengine-idxd-fix-refcount-underflow-on-module-unlo.patch
+dmaengine-idxd-fix-double-free-in-idxd_setup_wqs.patch
+dmaengine-ti-edma-fix-memory-allocation-size-for-que.patch
+regulator-sy7636a-fix-lifecycle-of-power-good-gpio.patch
+hrtimer-remove-unused-function.patch
+hrtimer-rename-__hrtimer_hres_active-to-hrtimer_hres.patch
+hrtimers-unconditionally-update-target-cpu-base-afte.patch
+risc-v-remove-unnecessary-include-from-compat.h.patch
--- /dev/null
+From 26e68d4ce7ca72544843ddcf3e4ddbd80aeacd99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 14:53:50 +0200
+Subject: tunnels: reset the GSO metadata before reusing the skb
+
+From: Antoine Tenart <atenart@kernel.org>
+
+[ Upstream commit e3c674db356c4303804b2415e7c2b11776cdd8c3 ]
+
+If a GSO skb is sent through a Geneve tunnel and if Geneve options are
+added, the split GSO skb might not fit in the MTU anymore and an ICMP
+frag needed packet can be generated. In such case the ICMP packet might
+go through the segmentation logic (and dropped) later if it reaches a
+path were the GSO status is checked and segmentation is required.
+
+This is especially true when an OvS bridge is used with a Geneve tunnel
+attached to it. The following set of actions could lead to the ICMP
+packet being wrongfully segmented:
+
+1. An skb is constructed by the TCP layer (e.g. gso_type SKB_GSO_TCPV4,
+ segs >= 2).
+
+2. The skb hits the OvS bridge where Geneve options are added by an OvS
+ action before being sent through the tunnel.
+
+3. When the skb is xmited in the tunnel, the split skb does not fit
+ anymore in the MTU and iptunnel_pmtud_build_icmp is called to
+ generate an ICMP fragmentation needed packet. This is done by reusing
+ the original (GSO!) skb. The GSO metadata is not cleared.
+
+4. The ICMP packet being sent back hits the OvS bridge again and because
+ skb_is_gso returns true, it goes through queue_gso_packets...
+
+5. ...where __skb_gso_segment is called. The skb is then dropped.
+
+6. Note that in the above example on re-transmission the skb won't be a
+ GSO one as it would be segmented (len > MSS) and the ICMP packet
+ should go through.
+
+Fix this by resetting the GSO information before reusing an skb in
+iptunnel_pmtud_build_icmp and iptunnel_pmtud_build_icmpv6.
+
+Fixes: 4cb47a8644cc ("tunnels: PMTU discovery support for directly bridged IP packets")
+Reported-by: Adrian Moreno <amorenoz@redhat.com>
+Signed-off-by: Antoine Tenart <atenart@kernel.org>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Link: https://patch.msgid.link/20250904125351.159740-1-atenart@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/ip_tunnel_core.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index deb08cab44640..75e3d7501752d 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -203,6 +203,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
+ return -EINVAL;
+
++ if (skb_is_gso(skb))
++ skb_gso_reset(skb);
++
+ skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ pskb_pull(skb, ETH_HLEN);
+ skb_reset_network_header(skb);
+@@ -297,6 +300,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
+ return -EINVAL;
+
++ if (skb_is_gso(skb))
++ skb_gso_reset(skb);
++
+ skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ pskb_pull(skb, ETH_HLEN);
+ skb_reset_network_header(skb);
+--
+2.51.0
+