+++ /dev/null
-From 5415d916549bb61e3ad0ca9cab93982e1aaa578f Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 10 Jan 2025 23:13:39 -0800
-Subject: eth: iavf: extend the netdev_lock usage
-
-From: Jakub Kicinski <kuba@kernel.org>
-
-[ Upstream commit afc664987ab318c227ebc0f639f5afc921aaf674 ]
-
-iavf uses the netdev->lock already to protect shapers.
-In an upcoming series we'll try to protect NAPI instances
-with netdev->lock.
-
-We need to modify the protection a bit. All NAPI related
-calls in the driver need to be consistently under the lock.
-This will allow us to easily switch to a "we already hold
-the lock" NAPI API later.
-
-register_netdevice(), OTOH, must not be called under
-the netdev_lock() as we do not intend to have an
-"already locked" version of this call.
-
-Link: https://patch.msgid.link/20250111071339.3709071-1-kuba@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-Stable-dep-of: 6bc7e4eb0499 ("Revert "net: skb: introduce and use a single page frag cache"")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/net/ethernet/intel/iavf/iavf_main.c | 53 +++++++++++++++++----
- 1 file changed, 45 insertions(+), 8 deletions(-)
-
-diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
-index 2b8700abe56bb..7c427003184d5 100644
---- a/drivers/net/ethernet/intel/iavf/iavf_main.c
-+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
-@@ -1983,6 +1983,7 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool runni
- static void iavf_finish_config(struct work_struct *work)
- {
- struct iavf_adapter *adapter;
-+ bool netdev_released = false;
- int pairs, err;
-
- adapter = container_of(work, struct iavf_adapter, finish_config);
-@@ -2003,7 +2004,16 @@ static void iavf_finish_config(struct work_struct *work)
-
- switch (adapter->state) {
- case __IAVF_DOWN:
-+ /* Set the real number of queues when reset occurs while
-+ * state == __IAVF_DOWN
-+ */
-+ pairs = adapter->num_active_queues;
-+ netif_set_real_num_rx_queues(adapter->netdev, pairs);
-+ netif_set_real_num_tx_queues(adapter->netdev, pairs);
-+
- if (adapter->netdev->reg_state != NETREG_REGISTERED) {
-+ mutex_unlock(&adapter->netdev->lock);
-+ netdev_released = true;
- err = register_netdevice(adapter->netdev);
- if (err) {
- dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
-@@ -2018,11 +2028,7 @@ static void iavf_finish_config(struct work_struct *work)
- goto out;
- }
- }
--
-- /* Set the real number of queues when reset occurs while
-- * state == __IAVF_DOWN
-- */
-- fallthrough;
-+ break;
- case __IAVF_RUNNING:
- pairs = adapter->num_active_queues;
- netif_set_real_num_rx_queues(adapter->netdev, pairs);
-@@ -2035,7 +2041,8 @@ static void iavf_finish_config(struct work_struct *work)
-
- out:
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&adapter->netdev->lock);
-+ if (!netdev_released)
-+ mutex_unlock(&adapter->netdev->lock);
- rtnl_unlock();
- }
-
-@@ -2728,12 +2735,16 @@ static void iavf_watchdog_task(struct work_struct *work)
- struct iavf_adapter *adapter = container_of(work,
- struct iavf_adapter,
- watchdog_task.work);
-+ struct net_device *netdev = adapter->netdev;
- struct iavf_hw *hw = &adapter->hw;
- u32 reg_val;
-
-+ mutex_lock(&netdev->lock);
- if (!mutex_trylock(&adapter->crit_lock)) {
-- if (adapter->state == __IAVF_REMOVE)
-+ if (adapter->state == __IAVF_REMOVE) {
-+ mutex_unlock(&netdev->lock);
- return;
-+ }
-
- goto restart_watchdog;
- }
-@@ -2745,30 +2756,35 @@ static void iavf_watchdog_task(struct work_struct *work)
- case __IAVF_STARTUP:
- iavf_startup(adapter);
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
- case __IAVF_INIT_VERSION_CHECK:
- iavf_init_version_check(adapter);
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
- case __IAVF_INIT_GET_RESOURCES:
- iavf_init_get_resources(adapter);
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
- case __IAVF_INIT_EXTENDED_CAPS:
- iavf_init_process_extended_caps(adapter);
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
- case __IAVF_INIT_CONFIG_ADAPTER:
- iavf_init_config_adapter(adapter);
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
-@@ -2780,6 +2796,7 @@ static void iavf_watchdog_task(struct work_struct *work)
- * as it can loop forever
- */
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- return;
- }
- if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
-@@ -2788,6 +2805,7 @@ static void iavf_watchdog_task(struct work_struct *work)
- adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
- iavf_shutdown_adminq(hw);
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, (5 * HZ));
- return;
-@@ -2795,6 +2813,7 @@ static void iavf_watchdog_task(struct work_struct *work)
- /* Try again from failed step*/
- iavf_change_state(adapter, adapter->last_state);
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
- return;
- case __IAVF_COMM_FAILED:
-@@ -2807,6 +2826,7 @@ static void iavf_watchdog_task(struct work_struct *work)
- iavf_change_state(adapter, __IAVF_INIT_FAILED);
- adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- return;
- }
- reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
-@@ -2826,12 +2846,14 @@ static void iavf_watchdog_task(struct work_struct *work)
- adapter->aq_required = 0;
- adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task,
- msecs_to_jiffies(10));
- return;
- case __IAVF_RESETTING:
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- HZ * 2);
- return;
-@@ -2862,6 +2884,7 @@ static void iavf_watchdog_task(struct work_struct *work)
- case __IAVF_REMOVE:
- default:
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- return;
- }
-
-@@ -2873,12 +2896,14 @@ static void iavf_watchdog_task(struct work_struct *work)
- dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
- iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, HZ * 2);
- return;
- }
-
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- restart_watchdog:
- if (adapter->state >= __IAVF_DOWN)
- queue_work(adapter->wq, &adapter->adminq_task);
-@@ -4355,14 +4380,17 @@ static int iavf_open(struct net_device *netdev)
- return -EIO;
- }
-
-+ mutex_lock(&netdev->lock);
- while (!mutex_trylock(&adapter->crit_lock)) {
- /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
- * is already taken and iavf_open is called from an upper
- * device's notifier reacting on NETDEV_REGISTER event.
- * We have to leave here to avoid dead lock.
- */
-- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
-+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) {
-+ mutex_unlock(&netdev->lock);
- return -EBUSY;
-+ }
-
- usleep_range(500, 1000);
- }
-@@ -4411,6 +4439,7 @@ static int iavf_open(struct net_device *netdev)
- iavf_irq_enable(adapter, true);
-
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
-
- return 0;
-
-@@ -4423,6 +4452,7 @@ static int iavf_open(struct net_device *netdev)
- iavf_free_all_tx_resources(adapter);
- err_unlock:
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
-
- return err;
- }
-@@ -4444,10 +4474,12 @@ static int iavf_close(struct net_device *netdev)
- u64 aq_to_restore;
- int status;
-
-+ mutex_lock(&netdev->lock);
- mutex_lock(&adapter->crit_lock);
-
- if (adapter->state <= __IAVF_DOWN_PENDING) {
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
- return 0;
- }
-
-@@ -4481,6 +4513,7 @@ static int iavf_close(struct net_device *netdev)
- iavf_free_traffic_irqs(adapter);
-
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
-
- /* We explicitly don't free resources here because the hardware is
- * still active and can DMA into memory. Resources are cleared in
-@@ -5357,6 +5390,7 @@ static int iavf_suspend(struct device *dev_d)
-
- netif_device_detach(netdev);
-
-+ mutex_lock(&netdev->lock);
- mutex_lock(&adapter->crit_lock);
-
- if (netif_running(netdev)) {
-@@ -5368,6 +5402,7 @@ static int iavf_suspend(struct device *dev_d)
- iavf_reset_interrupt_capability(adapter);
-
- mutex_unlock(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
-
- return 0;
- }
-@@ -5466,6 +5501,7 @@ static void iavf_remove(struct pci_dev *pdev)
- if (netdev->reg_state == NETREG_REGISTERED)
- unregister_netdev(netdev);
-
-+ mutex_lock(&netdev->lock);
- mutex_lock(&adapter->crit_lock);
- dev_info(&adapter->pdev->dev, "Removing device\n");
- iavf_change_state(adapter, __IAVF_REMOVE);
-@@ -5502,6 +5538,7 @@ static void iavf_remove(struct pci_dev *pdev)
- mutex_destroy(&hw->aq.asq_mutex);
- mutex_unlock(&adapter->crit_lock);
- mutex_destroy(&adapter->crit_lock);
-+ mutex_unlock(&netdev->lock);
-
- iounmap(hw->hw_addr);
- pci_release_regions(pdev);
---
-2.39.5
-
+++ /dev/null
-From faab6320159cee097c6242bf85f466bb2629cf65 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 14 Jan 2025 19:53:12 -0800
-Subject: net: add netdev->up protected by netdev_lock()
-
-From: Jakub Kicinski <kuba@kernel.org>
-
-[ Upstream commit 5112457f3d8e41f987908266068af88ef9f3ab78 ]
-
-Some uAPI (netdev netlink) hide net_device's sub-objects while
-the interface is down to ensure uniform behavior across drivers.
-To remove the rtnl_lock dependency from those uAPIs we need a way
-to safely tell if the device is down or up.
-
-Add an indication of whether device is open or closed, protected
-by netdev->lock. The semantics are the same as IFF_UP, but taking
-netdev_lock around every write to ->flags would be a lot of code
-churn.
-
-We don't want to blanket the entire open / close path by netdev_lock,
-because it will prevent us from applying it to specific structures -
-core helpers won't be able to take that lock from any function
-called by the drivers on open/close paths.
-
-So the state of the flag is "pessimistic", as in it may report false
-negatives, but never false positives.
-
-Reviewed-by: Joe Damato <jdamato@fastly.com>
-Reviewed-by: Eric Dumazet <edumazet@google.com>
-Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
-Link: https://patch.msgid.link/20250115035319.559603-5-kuba@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-Stable-dep-of: 6bc7e4eb0499 ("Revert "net: skb: introduce and use a single page frag cache"")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/netdevice.h | 14 +++++++++++++-
- net/core/dev.c | 4 ++--
- net/core/dev.h | 12 ++++++++++++
- 3 files changed, 27 insertions(+), 3 deletions(-)
-
-diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index e0a8093c9be80..eb4d61eee7e97 100644
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -2441,12 +2441,24 @@ struct net_device {
- unsigned long gro_flush_timeout;
- u32 napi_defer_hard_irqs;
-
-+ /**
-+ * @up: copy of @state's IFF_UP, but safe to read with just @lock.
-+ * May report false negatives while the device is being opened
-+ * or closed (@lock does not protect .ndo_open, or .ndo_close).
-+ */
-+ bool up;
-+
- /**
- * @lock: netdev-scope lock, protects a small selection of fields.
- * Should always be taken using netdev_lock() / netdev_unlock() helpers.
- * Drivers are free to use it for other protection.
- *
-- * Protects: @reg_state, @net_shaper_hierarchy.
-+ * Protects:
-+ * @net_shaper_hierarchy, @reg_state
-+ *
-+ * Partially protects (writers must hold both @lock and rtnl_lock):
-+ * @up
-+ *
- * Ordering: take after rtnl_lock.
- */
- struct mutex lock;
-diff --git a/net/core/dev.c b/net/core/dev.c
-index d1e8613151a4a..60f48d63559a1 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -1543,7 +1543,7 @@ static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
- if (ret)
- clear_bit(__LINK_STATE_START, &dev->state);
- else {
-- dev->flags |= IFF_UP;
-+ netif_set_up(dev, true);
- dev_set_rx_mode(dev);
- dev_activate(dev);
- add_device_randomness(dev->dev_addr, dev->addr_len);
-@@ -1622,7 +1622,7 @@ static void __dev_close_many(struct list_head *head)
- if (ops->ndo_stop)
- ops->ndo_stop(dev);
-
-- dev->flags &= ~IFF_UP;
-+ netif_set_up(dev, false);
- netpoll_poll_enable(dev);
- }
- }
-diff --git a/net/core/dev.h b/net/core/dev.h
-index deb5eae5749fa..e17c640c05fb9 100644
---- a/net/core/dev.h
-+++ b/net/core/dev.h
-@@ -111,6 +111,18 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
- void unregister_netdevice_many_notify(struct list_head *head,
- u32 portid, const struct nlmsghdr *nlh);
-
-+static inline void netif_set_up(struct net_device *dev, bool value)
-+{
-+ if (value)
-+ dev->flags |= IFF_UP;
-+ else
-+ dev->flags &= ~IFF_UP;
-+
-+ netdev_lock(dev);
-+ dev->up = value;
-+ netdev_unlock(dev);
-+}
-+
- static inline void netif_set_gso_max_size(struct net_device *dev,
- unsigned int size)
- {
---
-2.39.5
-
+++ /dev/null
-From a4f67934c19efbd4ab93b7736294d67544cffd53 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 14 Jan 2025 19:53:09 -0800
-Subject: net: add netdev_lock() / netdev_unlock() helpers
-
-From: Jakub Kicinski <kuba@kernel.org>
-
-[ Upstream commit ebda2f0bbde540ff7da168d2837f8cfb14581e2e ]
-
-Add helpers for locking the netdev instance, use it in drivers
-and the shaper code. This will make grepping for the lock usage
-much easier, as we extend the lock to cover more fields.
-
-Reviewed-by: Joe Damato <jdamato@fastly.com>
-Reviewed-by: Eric Dumazet <edumazet@google.com>
-Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
-Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
-Link: https://patch.msgid.link/20250115035319.559603-2-kuba@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-Stable-dep-of: 6bc7e4eb0499 ("Revert "net: skb: introduce and use a single page frag cache"")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/net/ethernet/intel/iavf/iavf_main.c | 74 ++++++++++-----------
- drivers/net/netdevsim/ethtool.c | 4 +-
- include/linux/netdevice.h | 23 ++++++-
- net/shaper/shaper.c | 6 +-
- 4 files changed, 63 insertions(+), 44 deletions(-)
-
-diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
-index 7c427003184d5..72314b0a1b25b 100644
---- a/drivers/net/ethernet/intel/iavf/iavf_main.c
-+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
-@@ -1992,7 +1992,7 @@ static void iavf_finish_config(struct work_struct *work)
- * The dev->lock is needed to update the queue number
- */
- rtnl_lock();
-- mutex_lock(&adapter->netdev->lock);
-+ netdev_lock(adapter->netdev);
- mutex_lock(&adapter->crit_lock);
-
- if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
-@@ -2012,7 +2012,7 @@ static void iavf_finish_config(struct work_struct *work)
- netif_set_real_num_tx_queues(adapter->netdev, pairs);
-
- if (adapter->netdev->reg_state != NETREG_REGISTERED) {
-- mutex_unlock(&adapter->netdev->lock);
-+ netdev_unlock(adapter->netdev);
- netdev_released = true;
- err = register_netdevice(adapter->netdev);
- if (err) {
-@@ -2042,7 +2042,7 @@ static void iavf_finish_config(struct work_struct *work)
- out:
- mutex_unlock(&adapter->crit_lock);
- if (!netdev_released)
-- mutex_unlock(&adapter->netdev->lock);
-+ netdev_unlock(adapter->netdev);
- rtnl_unlock();
- }
-
-@@ -2739,10 +2739,10 @@ static void iavf_watchdog_task(struct work_struct *work)
- struct iavf_hw *hw = &adapter->hw;
- u32 reg_val;
-
-- mutex_lock(&netdev->lock);
-+ netdev_lock(netdev);
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE) {
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- return;
- }
-
-@@ -2756,35 +2756,35 @@ static void iavf_watchdog_task(struct work_struct *work)
- case __IAVF_STARTUP:
- iavf_startup(adapter);
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
- case __IAVF_INIT_VERSION_CHECK:
- iavf_init_version_check(adapter);
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
- case __IAVF_INIT_GET_RESOURCES:
- iavf_init_get_resources(adapter);
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
- case __IAVF_INIT_EXTENDED_CAPS:
- iavf_init_process_extended_caps(adapter);
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
- case __IAVF_INIT_CONFIG_ADAPTER:
- iavf_init_config_adapter(adapter);
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
-@@ -2796,7 +2796,7 @@ static void iavf_watchdog_task(struct work_struct *work)
- * as it can loop forever
- */
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- return;
- }
- if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
-@@ -2805,7 +2805,7 @@ static void iavf_watchdog_task(struct work_struct *work)
- adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
- iavf_shutdown_adminq(hw);
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, (5 * HZ));
- return;
-@@ -2813,7 +2813,7 @@ static void iavf_watchdog_task(struct work_struct *work)
- /* Try again from failed step*/
- iavf_change_state(adapter, adapter->last_state);
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
- return;
- case __IAVF_COMM_FAILED:
-@@ -2826,7 +2826,7 @@ static void iavf_watchdog_task(struct work_struct *work)
- iavf_change_state(adapter, __IAVF_INIT_FAILED);
- adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- return;
- }
- reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
-@@ -2846,14 +2846,14 @@ static void iavf_watchdog_task(struct work_struct *work)
- adapter->aq_required = 0;
- adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task,
- msecs_to_jiffies(10));
- return;
- case __IAVF_RESETTING:
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- HZ * 2);
- return;
-@@ -2884,7 +2884,7 @@ static void iavf_watchdog_task(struct work_struct *work)
- case __IAVF_REMOVE:
- default:
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- return;
- }
-
-@@ -2896,14 +2896,14 @@ static void iavf_watchdog_task(struct work_struct *work)
- dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
- iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, HZ * 2);
- return;
- }
-
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- restart_watchdog:
- if (adapter->state >= __IAVF_DOWN)
- queue_work(adapter->wq, &adapter->adminq_task);
-@@ -3030,12 +3030,12 @@ static void iavf_reset_task(struct work_struct *work)
- /* When device is being removed it doesn't make sense to run the reset
- * task, just return in such a case.
- */
-- mutex_lock(&netdev->lock);
-+ netdev_lock(netdev);
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state != __IAVF_REMOVE)
- queue_work(adapter->wq, &adapter->reset_task);
-
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- return;
- }
-
-@@ -3083,7 +3083,7 @@ static void iavf_reset_task(struct work_struct *work)
- reg_val);
- iavf_disable_vf(adapter);
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- return; /* Do not attempt to reinit. It's dead, Jim. */
- }
-
-@@ -3224,7 +3224,7 @@ static void iavf_reset_task(struct work_struct *work)
-
- wake_up(&adapter->reset_waitqueue);
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
-
- return;
- reset_err:
-@@ -3235,7 +3235,7 @@ static void iavf_reset_task(struct work_struct *work)
- iavf_disable_vf(adapter);
-
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
- }
-
-@@ -3707,10 +3707,10 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
- if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
- return 0;
-
-- mutex_lock(&netdev->lock);
-+ netdev_lock(netdev);
- netif_set_real_num_rx_queues(netdev, total_qps);
- netif_set_real_num_tx_queues(netdev, total_qps);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
-
- return ret;
- }
-@@ -4380,7 +4380,7 @@ static int iavf_open(struct net_device *netdev)
- return -EIO;
- }
-
-- mutex_lock(&netdev->lock);
-+ netdev_lock(netdev);
- while (!mutex_trylock(&adapter->crit_lock)) {
- /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
- * is already taken and iavf_open is called from an upper
-@@ -4388,7 +4388,7 @@ static int iavf_open(struct net_device *netdev)
- * We have to leave here to avoid dead lock.
- */
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) {
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- return -EBUSY;
- }
-
-@@ -4439,7 +4439,7 @@ static int iavf_open(struct net_device *netdev)
- iavf_irq_enable(adapter, true);
-
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
-
- return 0;
-
-@@ -4452,7 +4452,7 @@ static int iavf_open(struct net_device *netdev)
- iavf_free_all_tx_resources(adapter);
- err_unlock:
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
-
- return err;
- }
-@@ -4474,12 +4474,12 @@ static int iavf_close(struct net_device *netdev)
- u64 aq_to_restore;
- int status;
-
-- mutex_lock(&netdev->lock);
-+ netdev_lock(netdev);
- mutex_lock(&adapter->crit_lock);
-
- if (adapter->state <= __IAVF_DOWN_PENDING) {
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
- return 0;
- }
-
-@@ -4513,7 +4513,7 @@ static int iavf_close(struct net_device *netdev)
- iavf_free_traffic_irqs(adapter);
-
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
-
- /* We explicitly don't free resources here because the hardware is
- * still active and can DMA into memory. Resources are cleared in
-@@ -5390,7 +5390,7 @@ static int iavf_suspend(struct device *dev_d)
-
- netif_device_detach(netdev);
-
-- mutex_lock(&netdev->lock);
-+ netdev_lock(netdev);
- mutex_lock(&adapter->crit_lock);
-
- if (netif_running(netdev)) {
-@@ -5402,7 +5402,7 @@ static int iavf_suspend(struct device *dev_d)
- iavf_reset_interrupt_capability(adapter);
-
- mutex_unlock(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
-
- return 0;
- }
-@@ -5501,7 +5501,7 @@ static void iavf_remove(struct pci_dev *pdev)
- if (netdev->reg_state == NETREG_REGISTERED)
- unregister_netdev(netdev);
-
-- mutex_lock(&netdev->lock);
-+ netdev_lock(netdev);
- mutex_lock(&adapter->crit_lock);
- dev_info(&adapter->pdev->dev, "Removing device\n");
- iavf_change_state(adapter, __IAVF_REMOVE);
-@@ -5538,7 +5538,7 @@ static void iavf_remove(struct pci_dev *pdev)
- mutex_destroy(&hw->aq.asq_mutex);
- mutex_unlock(&adapter->crit_lock);
- mutex_destroy(&adapter->crit_lock);
-- mutex_unlock(&netdev->lock);
-+ netdev_unlock(netdev);
-
- iounmap(hw->hw_addr);
- pci_release_regions(pdev);
-diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
-index 5fe1eaef99b5b..3f44a11aec83e 100644
---- a/drivers/net/netdevsim/ethtool.c
-+++ b/drivers/net/netdevsim/ethtool.c
-@@ -103,10 +103,10 @@ nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
- struct netdevsim *ns = netdev_priv(dev);
- int err;
-
-- mutex_lock(&dev->lock);
-+ netdev_lock(dev);
- err = netif_set_real_num_queues(dev, ch->combined_count,
- ch->combined_count);
-- mutex_unlock(&dev->lock);
-+ netdev_unlock(dev);
- if (err)
- return err;
-
-diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index bb71ad82b42ba..4b2964d0d885e 100644
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -2442,8 +2442,12 @@ struct net_device {
- u32 napi_defer_hard_irqs;
-
- /**
-- * @lock: protects @net_shaper_hierarchy, feel free to use for other
-- * netdev-scope protection. Ordering: take after rtnl_lock.
-+ * @lock: netdev-scope lock, protects a small selection of fields.
-+ * Should always be taken using netdev_lock() / netdev_unlock() helpers.
-+ * Drivers are free to use it for other protection.
-+ *
-+ * Protects: @net_shaper_hierarchy.
-+ * Ordering: take after rtnl_lock.
- */
- struct mutex lock;
-
-@@ -2673,6 +2677,21 @@ void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
- enum netdev_queue_type type,
- struct napi_struct *napi);
-
-+static inline void netdev_lock(struct net_device *dev)
-+{
-+ mutex_lock(&dev->lock);
-+}
-+
-+static inline void netdev_unlock(struct net_device *dev)
-+{
-+ mutex_unlock(&dev->lock);
-+}
-+
-+static inline void netdev_assert_locked(struct net_device *dev)
-+{
-+ lockdep_assert_held(&dev->lock);
-+}
-+
- static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
- {
- napi->irq = irq;
-diff --git a/net/shaper/shaper.c b/net/shaper/shaper.c
-index 15463062fe7b6..7101a48bce545 100644
---- a/net/shaper/shaper.c
-+++ b/net/shaper/shaper.c
-@@ -40,7 +40,7 @@ static void net_shaper_lock(struct net_shaper_binding *binding)
- {
- switch (binding->type) {
- case NET_SHAPER_BINDING_TYPE_NETDEV:
-- mutex_lock(&binding->netdev->lock);
-+ netdev_lock(binding->netdev);
- break;
- }
- }
-@@ -49,7 +49,7 @@ static void net_shaper_unlock(struct net_shaper_binding *binding)
- {
- switch (binding->type) {
- case NET_SHAPER_BINDING_TYPE_NETDEV:
-- mutex_unlock(&binding->netdev->lock);
-+ netdev_unlock(binding->netdev);
- break;
- }
- }
-@@ -1398,7 +1398,7 @@ void net_shaper_set_real_num_tx_queues(struct net_device *dev,
- /* Only drivers implementing shapers support ensure
- * the lock is acquired in advance.
- */
-- lockdep_assert_held(&dev->lock);
-+ netdev_assert_locked(dev);
-
- /* Take action only when decreasing the tx queue number. */
- for (i = txq; i < dev->real_num_tx_queues; ++i) {
---
-2.39.5
-
+++ /dev/null
-From 9ecd605aef6d75542cf0ec6864c4602deaf809c6 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 14 Jan 2025 19:53:10 -0800
-Subject: net: make netdev_lock() protect netdev->reg_state
-
-From: Jakub Kicinski <kuba@kernel.org>
-
-[ Upstream commit 5fda3f35349b6b7f22f5f5095a3821261d515075 ]
-
-Protect writes to netdev->reg_state with netdev_lock().
-From now on holding netdev_lock() is sufficient to prevent
-the net_device from getting unregistered, so code which
-wants to hold just a single netdev around no longer needs
-to hold rtnl_lock.
-
-We do not protect the NETREG_UNREGISTERED -> NETREG_RELEASED
-transition. We'd need to move mutex_destroy(netdev->lock)
-to .release, but the real reason is that trying to stop
-the unregistration process mid-way would be unsafe / crazy.
-Taking references on such devices is not safe, either.
-So the intended semantics are to lock REGISTERED devices.
-
-Reviewed-by: Joe Damato <jdamato@fastly.com>
-Reviewed-by: Eric Dumazet <edumazet@google.com>
-Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
-Link: https://patch.msgid.link/20250115035319.559603-3-kuba@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-Stable-dep-of: 6bc7e4eb0499 ("Revert "net: skb: introduce and use a single page frag cache"")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/netdevice.h | 2 +-
- net/core/dev.c | 6 ++++++
- 2 files changed, 7 insertions(+), 1 deletion(-)
-
-diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index 4b2964d0d885e..e0a8093c9be80 100644
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -2446,7 +2446,7 @@ struct net_device {
- * Should always be taken using netdev_lock() / netdev_unlock() helpers.
- * Drivers are free to use it for other protection.
- *
-- * Protects: @net_shaper_hierarchy.
-+ * Protects: @reg_state, @net_shaper_hierarchy.
- * Ordering: take after rtnl_lock.
- */
- struct mutex lock;
-diff --git a/net/core/dev.c b/net/core/dev.c
-index c5e5b827bb800..d1e8613151a4a 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -10687,7 +10687,9 @@ int register_netdevice(struct net_device *dev)
-
- ret = netdev_register_kobject(dev);
-
-+ netdev_lock(dev);
- WRITE_ONCE(dev->reg_state, ret ? NETREG_UNREGISTERED : NETREG_REGISTERED);
-+ netdev_unlock(dev);
-
- if (ret)
- goto err_uninit_notify;
-@@ -10985,7 +10987,9 @@ void netdev_run_todo(void)
- continue;
- }
-
-+ netdev_lock(dev);
- WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERED);
-+ netdev_unlock(dev);
- linkwatch_sync_dev(dev);
- }
-
-@@ -11591,7 +11595,9 @@ void unregister_netdevice_many_notify(struct list_head *head,
- list_for_each_entry(dev, head, unreg_list) {
- /* And unlink it from device chain. */
- unlist_netdevice(dev);
-+ netdev_lock(dev);
- WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING);
-+ netdev_unlock(dev);
- }
- flush_all_backlogs();
-
---
-2.39.5
-
+++ /dev/null
-From cbcf8c6d4b7fb6fab20ae92429077352181644eb Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 7 Jan 2025 08:08:39 -0800
-Subject: net: make sure we retain NAPI ordering on netdev->napi_list
-
-From: Jakub Kicinski <kuba@kernel.org>
-
-[ Upstream commit d6c7b03497eef8b66bf0b5572881359913e39787 ]
-
-Netlink code depends on NAPI instances being sorted by ID on
-the netdev list for dump continuation. We need to be able to
-find the position on the list where we left off if dump does
-not fit in a single skb, and in the meantime NAPI instances
-can come and go.
-
-This was trivially true when we were assigning a new ID to every
-new NAPI instance. Since we added the NAPI config API, we try
-to retain the ID previously used for the same queue, but still
-add the new NAPI instance at the start of the list.
-
-This is fine if we reset the entire netdev and all NAPIs get
-removed and added back. If driver replaces a NAPI instance
-during an operation like DEVMEM queue reset, or recreates
-a subset of NAPI instances in other ways we may end up with
-broken ordering, and therefore Netlink dumps with either
-missing or duplicated entries.
-
-At this stage the problem is theoretical. Only two drivers
-support queue API, bnxt and gve. gve recreates NAPIs during
-queue reset, but it doesn't support NAPI config.
-bnxt supports NAPI config but doesn't recreate instances
-during reset.
-
-We need to save the ID in the config as soon as it is assigned
-because otherwise the new NAPI will not know what ID it will
-get at enable time, at the time it is being added.
-
-Reviewed-by: Willem de Bruijn <willemb@google.com>
-Reviewed-by: Eric Dumazet <edumazet@google.com>
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-Stable-dep-of: 6bc7e4eb0499 ("Revert "net: skb: introduce and use a single page frag cache"")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- net/core/dev.c | 42 ++++++++++++++++++++++++++++++++++++------
- 1 file changed, 36 insertions(+), 6 deletions(-)
-
-diff --git a/net/core/dev.c b/net/core/dev.c
-index 2b09714761c62..c5e5b827bb800 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -6766,13 +6766,14 @@ static void napi_restore_config(struct napi_struct *n)
- n->gro_flush_timeout = n->config->gro_flush_timeout;
- n->irq_suspend_timeout = n->config->irq_suspend_timeout;
- /* a NAPI ID might be stored in the config, if so use it. if not, use
-- * napi_hash_add to generate one for us. It will be saved to the config
-- * in napi_disable.
-+ * napi_hash_add to generate one for us.
- */
-- if (n->config->napi_id)
-+ if (n->config->napi_id) {
- napi_hash_add_with_id(n, n->config->napi_id);
-- else
-+ } else {
- napi_hash_add(n);
-+ n->config->napi_id = n->napi_id;
-+ }
- }
-
- static void napi_save_config(struct napi_struct *n)
-@@ -6780,10 +6781,39 @@ static void napi_save_config(struct napi_struct *n)
- n->config->defer_hard_irqs = n->defer_hard_irqs;
- n->config->gro_flush_timeout = n->gro_flush_timeout;
- n->config->irq_suspend_timeout = n->irq_suspend_timeout;
-- n->config->napi_id = n->napi_id;
- napi_hash_del(n);
- }
-
-+/* Netlink wants the NAPI list to be sorted by ID, if adding a NAPI which will
-+ * inherit an existing ID try to insert it at the right position.
-+ */
-+static void
-+netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi)
-+{
-+ unsigned int new_id, pos_id;
-+ struct list_head *higher;
-+ struct napi_struct *pos;
-+
-+ new_id = UINT_MAX;
-+ if (napi->config && napi->config->napi_id)
-+ new_id = napi->config->napi_id;
-+
-+ higher = &dev->napi_list;
-+ list_for_each_entry(pos, &dev->napi_list, dev_list) {
-+ if (pos->napi_id >= MIN_NAPI_ID)
-+ pos_id = pos->napi_id;
-+ else if (pos->config)
-+ pos_id = pos->config->napi_id;
-+ else
-+ pos_id = UINT_MAX;
-+
-+ if (pos_id <= new_id)
-+ break;
-+ higher = &pos->dev_list;
-+ }
-+ list_add_rcu(&napi->dev_list, higher); /* adds after higher */
-+}
-+
- void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight)
- {
-@@ -6810,7 +6840,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
- napi->list_owner = -1;
- set_bit(NAPI_STATE_SCHED, &napi->state);
- set_bit(NAPI_STATE_NPSVC, &napi->state);
-- list_add_rcu(&napi->dev_list, &dev->napi_list);
-+ netif_napi_dev_list_add(dev, napi);
-
- /* default settings from sysfs are applied to all NAPIs. any per-NAPI
- * configuration will be loaded in napi_enable
---
-2.39.5
-
+++ /dev/null
-From d828e5de2b0cc4f102f4218c7e466a32fe197a1c Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 14 Jan 2025 19:53:13 -0800
-Subject: net: protect netdev->napi_list with netdev_lock()
-
-From: Jakub Kicinski <kuba@kernel.org>
-
-[ Upstream commit 1b23cdbd2bbc4b40e21c12ae86c2781e347ff0f8 ]
-
-Hold netdev->lock when NAPIs are getting added or removed.
-This will allow safe access to NAPI instances of a net_device
-without rtnl_lock.
-
-Create a family of helpers which assume the lock is already taken.
-Switch iavf to them, as it makes extensive use of netdev->lock,
-already.
-
-Reviewed-by: Joe Damato <jdamato@fastly.com>
-Reviewed-by: Eric Dumazet <edumazet@google.com>
-Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
-Link: https://patch.msgid.link/20250115035319.559603-6-kuba@kernel.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-Stable-dep-of: 6bc7e4eb0499 ("Revert "net: skb: introduce and use a single page frag cache"")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/net/ethernet/intel/iavf/iavf_main.c | 6 +--
- include/linux/netdevice.h | 54 ++++++++++++++++++---
- net/core/dev.c | 15 ++++--
- 3 files changed, 60 insertions(+), 15 deletions(-)
-
-diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
-index 72314b0a1b25b..4639f55a17be1 100644
---- a/drivers/net/ethernet/intel/iavf/iavf_main.c
-+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
-@@ -1815,8 +1815,8 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
- q_vector->v_idx = q_idx;
- q_vector->reg_idx = q_idx;
- cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
-- netif_napi_add(adapter->netdev, &q_vector->napi,
-- iavf_napi_poll);
-+ netif_napi_add_locked(adapter->netdev, &q_vector->napi,
-+ iavf_napi_poll);
- }
-
- return 0;
-@@ -1842,7 +1842,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
- for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
-
-- netif_napi_del(&q_vector->napi);
-+ netif_napi_del_locked(&q_vector->napi);
- }
- kfree(adapter->q_vectors);
- adapter->q_vectors = NULL;
-diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index eb4d61eee7e97..db4facb384684 100644
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -2454,7 +2454,7 @@ struct net_device {
- * Drivers are free to use it for other protection.
- *
- * Protects:
-- * @net_shaper_hierarchy, @reg_state
-+ * @napi_list, @net_shaper_hierarchy, @reg_state
- *
- * Partially protects (writers must hold both @lock and rtnl_lock):
- * @up
-@@ -2714,8 +2714,19 @@ static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
- */
- #define NAPI_POLL_WEIGHT 64
-
--void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
-- int (*poll)(struct napi_struct *, int), int weight);
-+void netif_napi_add_weight_locked(struct net_device *dev,
-+ struct napi_struct *napi,
-+ int (*poll)(struct napi_struct *, int),
-+ int weight);
-+
-+static inline void
-+netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
-+ int (*poll)(struct napi_struct *, int), int weight)
-+{
-+ netdev_lock(dev);
-+ netif_napi_add_weight_locked(dev, napi, poll, weight);
-+ netdev_unlock(dev);
-+}
-
- /**
- * netif_napi_add() - initialize a NAPI context
-@@ -2733,6 +2744,13 @@ netif_napi_add(struct net_device *dev, struct napi_struct *napi,
- netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
- }
-
-+static inline void
-+netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi,
-+ int (*poll)(struct napi_struct *, int))
-+{
-+ netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
-+}
-+
- static inline void
- netif_napi_add_tx_weight(struct net_device *dev,
- struct napi_struct *napi,
-@@ -2743,6 +2761,15 @@ netif_napi_add_tx_weight(struct net_device *dev,
- netif_napi_add_weight(dev, napi, poll, weight);
- }
-
-+static inline void
-+netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi,
-+ int (*poll)(struct napi_struct *, int), int index)
-+{
-+ napi->index = index;
-+ napi->config = &dev->napi_config[index];
-+ netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
-+}
-+
- /**
- * netif_napi_add_config - initialize a NAPI context with persistent config
- * @dev: network device
-@@ -2754,9 +2781,9 @@ static inline void
- netif_napi_add_config(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int index)
- {
-- napi->index = index;
-- napi->config = &dev->napi_config[index];
-- netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
-+ netdev_lock(dev);
-+ netif_napi_add_config_locked(dev, napi, poll, index);
-+ netdev_unlock(dev);
- }
-
- /**
-@@ -2776,6 +2803,8 @@ static inline void netif_napi_add_tx(struct net_device *dev,
- netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
- }
-
-+void __netif_napi_del_locked(struct napi_struct *napi);
-+
- /**
- * __netif_napi_del - remove a NAPI context
- * @napi: NAPI context
-@@ -2784,7 +2813,18 @@ static inline void netif_napi_add_tx(struct net_device *dev,
- * containing @napi. Drivers might want to call this helper to combine
- * all the needed RCU grace periods into a single one.
- */
--void __netif_napi_del(struct napi_struct *napi);
-+static inline void __netif_napi_del(struct napi_struct *napi)
-+{
-+ netdev_lock(napi->dev);
-+ __netif_napi_del_locked(napi);
-+ netdev_unlock(napi->dev);
-+}
-+
-+static inline void netif_napi_del_locked(struct napi_struct *napi)
-+{
-+ __netif_napi_del_locked(napi);
-+ synchronize_net();
-+}
-
- /**
- * netif_napi_del - remove a NAPI context
-diff --git a/net/core/dev.c b/net/core/dev.c
-index 60f48d63559a1..6dfed2746c528 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -6814,9 +6814,12 @@ netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi)
- list_add_rcu(&napi->dev_list, higher); /* adds after higher */
- }
-
--void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
-- int (*poll)(struct napi_struct *, int), int weight)
-+void netif_napi_add_weight_locked(struct net_device *dev,
-+ struct napi_struct *napi,
-+ int (*poll)(struct napi_struct *, int),
-+ int weight)
- {
-+ netdev_assert_locked(dev);
- if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
- return;
-
-@@ -6857,7 +6860,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
- dev->threaded = false;
- netif_napi_set_irq(napi, -1);
- }
--EXPORT_SYMBOL(netif_napi_add_weight);
-+EXPORT_SYMBOL(netif_napi_add_weight_locked);
-
- void napi_disable(struct napi_struct *n)
- {
-@@ -6928,8 +6931,10 @@ static void flush_gro_hash(struct napi_struct *napi)
- }
-
- /* Must be called in process context */
--void __netif_napi_del(struct napi_struct *napi)
-+void __netif_napi_del_locked(struct napi_struct *napi)
- {
-+ netdev_assert_locked(napi->dev);
-+
- if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
- return;
-
-@@ -6949,7 +6954,7 @@ void __netif_napi_del(struct napi_struct *napi)
- napi->thread = NULL;
- }
- }
--EXPORT_SYMBOL(__netif_napi_del);
-+EXPORT_SYMBOL(__netif_napi_del_locked);
-
- static int __napi_poll(struct napi_struct *n, bool *repoll)
- {
---
-2.39.5
-
+++ /dev/null
-From fa448d203fb13c76dc22efb5e7521731b7d0c879 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 18 Feb 2025 19:29:40 +0100
-Subject: Revert "net: skb: introduce and use a single page frag cache"
-
-From: Paolo Abeni <pabeni@redhat.com>
-
-[ Upstream commit 011b0335903832facca86cd8ed05d7d8d94c9c76 ]
-
-This reverts commit dbae2b062824 ("net: skb: introduce and use a single
-page frag cache"). The intended goal of such change was to counter a
-performance regression introduced by commit 3226b158e67c ("net: avoid
-32 x truesize under-estimation for tiny skbs").
-
-Unfortunately, the blamed commit introduces another regression for the
-virtio_net driver. Such a driver calls napi_alloc_skb() with a tiny
-size, so that the whole head frag could fit a 512-byte block.
-
-The single page frag cache uses a 1K fragment for such allocation, and
-the additional overhead, under small UDP packets flood, makes the page
-allocator a bottleneck.
-
-Thanks to commit bf9f1baa279f ("net: add dedicated kmem_cache for
-typical/small skb->head"), this revert does not re-introduce the
-original regression. Actually, in the relevant test on top of this
-revert, I measure a small but noticeable positive delta, just above
-noise level.
-
-The revert itself required some additional mangling due to the
-introduction of the SKB_HEAD_ALIGN() helper and local lock infra in the
-affected code.
-
-Suggested-by: Eric Dumazet <edumazet@google.com>
-Fixes: dbae2b062824 ("net: skb: introduce and use a single page frag cache")
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-Link: https://patch.msgid.link/e649212fde9f0fdee23909ca0d14158d32bb7425.1738877290.git.pabeni@redhat.com
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-Stable-dep-of: 6bc7e4eb0499 ("Revert "net: skb: introduce and use a single page frag cache"")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/netdevice.h | 1 -
- net/core/dev.c | 17 +++++++
- net/core/skbuff.c | 104 ++------------------------------------
- 3 files changed, 22 insertions(+), 100 deletions(-)
-
-diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index db4facb384684..48437fd44e32c 100644
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -4050,7 +4050,6 @@ void netif_receive_skb_list(struct list_head *head);
- gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
- void napi_gro_flush(struct napi_struct *napi, bool flush_old);
- struct sk_buff *napi_get_frags(struct napi_struct *napi);
--void napi_get_frags_check(struct napi_struct *napi);
- gro_result_t napi_gro_frags(struct napi_struct *napi);
-
- static inline void napi_free_frags(struct napi_struct *napi)
-diff --git a/net/core/dev.c b/net/core/dev.c
-index 6dfed2746c528..5e3a82eba041a 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -6814,6 +6814,23 @@ netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi)
- list_add_rcu(&napi->dev_list, higher); /* adds after higher */
- }
-
-+/* Double check that napi_get_frags() allocates skbs with
-+ * skb->head being backed by slab, not a page fragment.
-+ * This is to make sure bug fixed in 3226b158e67c
-+ * ("net: avoid 32 x truesize under-estimation for tiny skbs")
-+ * does not accidentally come back.
-+ */
-+static void napi_get_frags_check(struct napi_struct *napi)
-+{
-+ struct sk_buff *skb;
-+
-+ local_bh_disable();
-+ skb = napi_get_frags(napi);
-+ WARN_ON_ONCE(skb && skb->head_frag);
-+ napi_free_frags(napi);
-+ local_bh_enable();
-+}
-+
- void netif_napi_add_weight_locked(struct net_device *dev,
- struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int),
-diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index f251a99f8d421..d2697211e00a0 100644
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -223,67 +223,9 @@ static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
- #define NAPI_SKB_CACHE_BULK 16
- #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
-
--#if PAGE_SIZE == SZ_4K
--
--#define NAPI_HAS_SMALL_PAGE_FRAG 1
--#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc)
--
--/* specialized page frag allocator using a single order 0 page
-- * and slicing it into 1K sized fragment. Constrained to systems
-- * with a very limited amount of 1K fragments fitting a single
-- * page - to avoid excessive truesize underestimation
-- */
--
--struct page_frag_1k {
-- void *va;
-- u16 offset;
-- bool pfmemalloc;
--};
--
--static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp)
--{
-- struct page *page;
-- int offset;
--
-- offset = nc->offset - SZ_1K;
-- if (likely(offset >= 0))
-- goto use_frag;
--
-- page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
-- if (!page)
-- return NULL;
--
-- nc->va = page_address(page);
-- nc->pfmemalloc = page_is_pfmemalloc(page);
-- offset = PAGE_SIZE - SZ_1K;
-- page_ref_add(page, offset / SZ_1K);
--
--use_frag:
-- nc->offset = offset;
-- return nc->va + offset;
--}
--#else
--
--/* the small page is actually unused in this build; add dummy helpers
-- * to please the compiler and avoid later preprocessor's conditionals
-- */
--#define NAPI_HAS_SMALL_PAGE_FRAG 0
--#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false
--
--struct page_frag_1k {
--};
--
--static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask)
--{
-- return NULL;
--}
--
--#endif
--
- struct napi_alloc_cache {
- local_lock_t bh_lock;
- struct page_frag_cache page;
-- struct page_frag_1k page_small;
- unsigned int skb_count;
- void *skb_cache[NAPI_SKB_CACHE_SIZE];
- };
-@@ -293,23 +235,6 @@ static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = {
- .bh_lock = INIT_LOCAL_LOCK(bh_lock),
- };
-
--/* Double check that napi_get_frags() allocates skbs with
-- * skb->head being backed by slab, not a page fragment.
-- * This is to make sure bug fixed in 3226b158e67c
-- * ("net: avoid 32 x truesize under-estimation for tiny skbs")
-- * does not accidentally come back.
-- */
--void napi_get_frags_check(struct napi_struct *napi)
--{
-- struct sk_buff *skb;
--
-- local_bh_disable();
-- skb = napi_get_frags(napi);
-- WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag);
-- napi_free_frags(napi);
-- local_bh_enable();
--}
--
- void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
- {
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
-@@ -816,11 +741,8 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
-
- /* If requested length is either too small or too big,
- * we use kmalloc() for skb->head allocation.
-- * When the small frag allocator is available, prefer it over kmalloc
-- * for small fragments
- */
-- if ((!NAPI_HAS_SMALL_PAGE_FRAG &&
-- len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)) ||
-+ if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) ||
- len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
- (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
- skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
-@@ -830,32 +752,16 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
- goto skb_success;
- }
-
-+ len = SKB_HEAD_ALIGN(len);
-+
- if (sk_memalloc_socks())
- gfp_mask |= __GFP_MEMALLOC;
-
- local_lock_nested_bh(&napi_alloc_cache.bh_lock);
- nc = this_cpu_ptr(&napi_alloc_cache);
-- if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) {
-- /* we are artificially inflating the allocation size, but
-- * that is not as bad as it may look like, as:
-- * - 'len' less than GRO_MAX_HEAD makes little sense
-- * - On most systems, larger 'len' values lead to fragment
-- * size above 512 bytes
-- * - kmalloc would use the kmalloc-1k slab for such values
-- * - Builds with smaller GRO_MAX_HEAD will very likely do
-- * little networking, as that implies no WiFi and no
-- * tunnels support, and 32 bits arches.
-- */
-- len = SZ_1K;
-
-- data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
-- pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
-- } else {
-- len = SKB_HEAD_ALIGN(len);
--
-- data = page_frag_alloc(&nc->page, len, gfp_mask);
-- pfmemalloc = page_frag_cache_is_pfmemalloc(&nc->page);
-- }
-+ data = page_frag_alloc(&nc->page, len, gfp_mask);
-+ pfmemalloc = page_frag_cache_is_pfmemalloc(&nc->page);
- local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
-
- if (unlikely(!data))
---
-2.39.5
-
net-axienet-set-mac_managed_pm.patch
tcp-drop-secpath-at-the-same-time-as-we-currently-dr.patch
net-allow-small-head-cache-usage-with-large-max_skb_.patch
-net-make-sure-we-retain-napi-ordering-on-netdev-napi.patch
-eth-iavf-extend-the-netdev_lock-usage.patch
-net-add-netdev_lock-netdev_unlock-helpers.patch
-net-make-netdev_lock-protect-netdev-reg_state.patch
-net-add-netdev-up-protected-by-netdev_lock.patch
-net-protect-netdev-napi_list-with-netdev_lock.patch
-revert-net-skb-introduce-and-use-a-single-page-frag-.patch
rust-finish-using-custom-ffi-integer-types.patch
rust-map-long-to-isize-and-char-to-u8.patch
rust-cleanup-unnecessary-casts.patch
ftrace-correct-preemption-accounting-for-function-tracing.patch
ftrace-fix-accounting-of-adding-subops-to-a-manager-ops.patch
ftrace-do-not-add-duplicate-entries-in-subops-manager-ops.patch
+drm-select-drm_kms_helper-from-drm_gem_shmem_helper.patch
+tracing-fix-using-ret-variable-in-tracing_set_tracer.patch
+net-pse-pd-fix-deadlock-in-current-limit-functions.patch