]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
eth: iavf: extend the netdev_lock usage
authorJakub Kicinski <kuba@kernel.org>
Sat, 11 Jan 2025 07:13:39 +0000 (23:13 -0800)
committerJakub Kicinski <kuba@kernel.org>
Tue, 14 Jan 2025 03:08:30 +0000 (19:08 -0800)
iavf uses the netdev->lock already to protect shapers.
In an upcoming series we'll try to protect NAPI instances
with netdev->lock.

We need to modify the protection a bit. All NAPI related
calls in the driver need to be consistently under the lock.
This will allow us to easily switch to a "we already hold
the lock" NAPI API later.

register_netdevice(), OTOH, must not be called under
the netdev_lock() as we do not intend to have an
"already locked" version of this call.

Link: https://patch.msgid.link/20250111071339.3709071-1-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/intel/iavf/iavf_main.c

index a9e54866ae6bfa4a792368c1b1d8ed140bb159dc..7740f446c73f4ae4ce01546e80f83c5e96dc85bf 100644 (file)
@@ -1968,6 +1968,7 @@ err:
 static void iavf_finish_config(struct work_struct *work)
 {
        struct iavf_adapter *adapter;
+       bool netdev_released = false;
        int pairs, err;
 
        adapter = container_of(work, struct iavf_adapter, finish_config);
@@ -1988,7 +1989,16 @@ static void iavf_finish_config(struct work_struct *work)
 
        switch (adapter->state) {
        case __IAVF_DOWN:
+               /* Set the real number of queues when reset occurs while
+                * state == __IAVF_DOWN
+                */
+               pairs = adapter->num_active_queues;
+               netif_set_real_num_rx_queues(adapter->netdev, pairs);
+               netif_set_real_num_tx_queues(adapter->netdev, pairs);
+
                if (adapter->netdev->reg_state != NETREG_REGISTERED) {
+                       mutex_unlock(&adapter->netdev->lock);
+                       netdev_released = true;
                        err = register_netdevice(adapter->netdev);
                        if (err) {
                                dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
@@ -2003,11 +2013,7 @@ static void iavf_finish_config(struct work_struct *work)
                                goto out;
                        }
                }
-
-               /* Set the real number of queues when reset occurs while
-                * state == __IAVF_DOWN
-                */
-               fallthrough;
+               break;
        case __IAVF_RUNNING:
                pairs = adapter->num_active_queues;
                netif_set_real_num_rx_queues(adapter->netdev, pairs);
@@ -2020,7 +2026,8 @@ static void iavf_finish_config(struct work_struct *work)
 
 out:
        mutex_unlock(&adapter->crit_lock);
-       mutex_unlock(&adapter->netdev->lock);
+       if (!netdev_released)
+               mutex_unlock(&adapter->netdev->lock);
        rtnl_unlock();
 }
 
@@ -2713,12 +2720,16 @@ static void iavf_watchdog_task(struct work_struct *work)
        struct iavf_adapter *adapter = container_of(work,
                                                    struct iavf_adapter,
                                                    watchdog_task.work);
+       struct net_device *netdev = adapter->netdev;
        struct iavf_hw *hw = &adapter->hw;
        u32 reg_val;
 
+       mutex_lock(&netdev->lock);
        if (!mutex_trylock(&adapter->crit_lock)) {
-               if (adapter->state == __IAVF_REMOVE)
+               if (adapter->state == __IAVF_REMOVE) {
+                       mutex_unlock(&netdev->lock);
                        return;
+               }
 
                goto restart_watchdog;
        }
@@ -2730,30 +2741,35 @@ static void iavf_watchdog_task(struct work_struct *work)
        case __IAVF_STARTUP:
                iavf_startup(adapter);
                mutex_unlock(&adapter->crit_lock);
+               mutex_unlock(&netdev->lock);
                queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(30));
                return;
        case __IAVF_INIT_VERSION_CHECK:
                iavf_init_version_check(adapter);
                mutex_unlock(&adapter->crit_lock);
+               mutex_unlock(&netdev->lock);
                queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(30));
                return;
        case __IAVF_INIT_GET_RESOURCES:
                iavf_init_get_resources(adapter);
                mutex_unlock(&adapter->crit_lock);
+               mutex_unlock(&netdev->lock);
                queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(1));
                return;
        case __IAVF_INIT_EXTENDED_CAPS:
                iavf_init_process_extended_caps(adapter);
                mutex_unlock(&adapter->crit_lock);
+               mutex_unlock(&netdev->lock);
                queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(1));
                return;
        case __IAVF_INIT_CONFIG_ADAPTER:
                iavf_init_config_adapter(adapter);
                mutex_unlock(&adapter->crit_lock);
+               mutex_unlock(&netdev->lock);
                queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(1));
                return;
@@ -2765,6 +2781,7 @@ static void iavf_watchdog_task(struct work_struct *work)
                         * as it can loop forever
                         */
                        mutex_unlock(&adapter->crit_lock);
+                       mutex_unlock(&netdev->lock);
                        return;
                }
                if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
@@ -2773,6 +2790,7 @@ static void iavf_watchdog_task(struct work_struct *work)
                        adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
                        iavf_shutdown_adminq(hw);
                        mutex_unlock(&adapter->crit_lock);
+                       mutex_unlock(&netdev->lock);
                        queue_delayed_work(adapter->wq,
                                           &adapter->watchdog_task, (5 * HZ));
                        return;
@@ -2780,6 +2798,7 @@ static void iavf_watchdog_task(struct work_struct *work)
                /* Try again from failed step*/
                iavf_change_state(adapter, adapter->last_state);
                mutex_unlock(&adapter->crit_lock);
+               mutex_unlock(&netdev->lock);
                queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
                return;
        case __IAVF_COMM_FAILED:
@@ -2792,6 +2811,7 @@ static void iavf_watchdog_task(struct work_struct *work)
                        iavf_change_state(adapter, __IAVF_INIT_FAILED);
                        adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
                        mutex_unlock(&adapter->crit_lock);
+                       mutex_unlock(&netdev->lock);
                        return;
                }
                reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
@@ -2811,12 +2831,14 @@ static void iavf_watchdog_task(struct work_struct *work)
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
                mutex_unlock(&adapter->crit_lock);
+               mutex_unlock(&netdev->lock);
                queue_delayed_work(adapter->wq,
                                   &adapter->watchdog_task,
                                   msecs_to_jiffies(10));
                return;
        case __IAVF_RESETTING:
                mutex_unlock(&adapter->crit_lock);
+               mutex_unlock(&netdev->lock);
                queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   HZ * 2);
                return;
@@ -2847,6 +2869,7 @@ static void iavf_watchdog_task(struct work_struct *work)
        case __IAVF_REMOVE:
        default:
                mutex_unlock(&adapter->crit_lock);
+               mutex_unlock(&netdev->lock);
                return;
        }
 
@@ -2858,12 +2881,14 @@ static void iavf_watchdog_task(struct work_struct *work)
                dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
                iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
                mutex_unlock(&adapter->crit_lock);
+               mutex_unlock(&netdev->lock);
                queue_delayed_work(adapter->wq,
                                   &adapter->watchdog_task, HZ * 2);
                return;
        }
 
        mutex_unlock(&adapter->crit_lock);
+       mutex_unlock(&netdev->lock);
 restart_watchdog:
        if (adapter->state >= __IAVF_DOWN)
                queue_work(adapter->wq, &adapter->adminq_task);
@@ -4340,14 +4365,17 @@ static int iavf_open(struct net_device *netdev)
                return -EIO;
        }
 
+       mutex_lock(&netdev->lock);
        while (!mutex_trylock(&adapter->crit_lock)) {
                /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
                 * is already taken and iavf_open is called from an upper
                 * device's notifier reacting on NETDEV_REGISTER event.
                 * We have to leave here to avoid dead lock.
                 */
-               if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+               if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) {
+                       mutex_unlock(&netdev->lock);
                        return -EBUSY;
+               }
 
                usleep_range(500, 1000);
        }
@@ -4396,6 +4424,7 @@ static int iavf_open(struct net_device *netdev)
        iavf_irq_enable(adapter, true);
 
        mutex_unlock(&adapter->crit_lock);
+       mutex_unlock(&netdev->lock);
 
        return 0;
 
@@ -4408,6 +4437,7 @@ err_setup_tx:
        iavf_free_all_tx_resources(adapter);
 err_unlock:
        mutex_unlock(&adapter->crit_lock);
+       mutex_unlock(&netdev->lock);
 
        return err;
 }
@@ -4429,10 +4459,12 @@ static int iavf_close(struct net_device *netdev)
        u64 aq_to_restore;
        int status;
 
+       mutex_lock(&netdev->lock);
        mutex_lock(&adapter->crit_lock);
 
        if (adapter->state <= __IAVF_DOWN_PENDING) {
                mutex_unlock(&adapter->crit_lock);
+               mutex_unlock(&netdev->lock);
                return 0;
        }
 
@@ -4466,6 +4498,7 @@ static int iavf_close(struct net_device *netdev)
        iavf_free_traffic_irqs(adapter);
 
        mutex_unlock(&adapter->crit_lock);
+       mutex_unlock(&netdev->lock);
 
        /* We explicitly don't free resources here because the hardware is
         * still active and can DMA into memory. Resources are cleared in
@@ -5342,6 +5375,7 @@ static int iavf_suspend(struct device *dev_d)
 
        netif_device_detach(netdev);
 
+       mutex_lock(&netdev->lock);
        mutex_lock(&adapter->crit_lock);
 
        if (netif_running(netdev)) {
@@ -5353,6 +5387,7 @@ static int iavf_suspend(struct device *dev_d)
        iavf_reset_interrupt_capability(adapter);
 
        mutex_unlock(&adapter->crit_lock);
+       mutex_unlock(&netdev->lock);
 
        return 0;
 }
@@ -5451,6 +5486,7 @@ static void iavf_remove(struct pci_dev *pdev)
        if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
 
+       mutex_lock(&netdev->lock);
        mutex_lock(&adapter->crit_lock);
        dev_info(&adapter->pdev->dev, "Removing device\n");
        iavf_change_state(adapter, __IAVF_REMOVE);
@@ -5487,6 +5523,7 @@ static void iavf_remove(struct pci_dev *pdev)
        mutex_destroy(&hw->aq.asq_mutex);
        mutex_unlock(&adapter->crit_lock);
        mutex_destroy(&adapter->crit_lock);
+       mutex_unlock(&netdev->lock);
 
        iounmap(hw->hw_addr);
        pci_release_regions(pdev);