static void iavf_finish_config(struct work_struct *work)
{
struct iavf_adapter *adapter;
+ bool netdev_released = false;
int pairs, err;
adapter = container_of(work, struct iavf_adapter, finish_config);
switch (adapter->state) {
case __IAVF_DOWN:
+ /* Set the real number of queues when reset occurs while
+ * state == __IAVF_DOWN
+ */
+ pairs = adapter->num_active_queues;
+ netif_set_real_num_rx_queues(adapter->netdev, pairs);
+ netif_set_real_num_tx_queues(adapter->netdev, pairs);
+
if (adapter->netdev->reg_state != NETREG_REGISTERED) {
+ mutex_unlock(&adapter->netdev->lock);
+ netdev_released = true;
err = register_netdevice(adapter->netdev);
if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
goto out;
}
}
-
- /* Set the real number of queues when reset occurs while
- * state == __IAVF_DOWN
- */
- fallthrough;
+ break;
case __IAVF_RUNNING:
pairs = adapter->num_active_queues;
netif_set_real_num_rx_queues(adapter->netdev, pairs);
out:
mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&adapter->netdev->lock);
+ if (!netdev_released)
+ mutex_unlock(&adapter->netdev->lock);
rtnl_unlock();
}
struct iavf_adapter *adapter = container_of(work,
struct iavf_adapter,
watchdog_task.work);
+ struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
+ mutex_lock(&netdev->lock);
if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE)
+ if (adapter->state == __IAVF_REMOVE) {
+ mutex_unlock(&netdev->lock);
return;
+ }
goto restart_watchdog;
}
case __IAVF_STARTUP:
iavf_startup(adapter);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_VERSION_CHECK:
iavf_init_version_check(adapter);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_GET_RESOURCES:
iavf_init_get_resources(adapter);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_EXTENDED_CAPS:
iavf_init_process_extended_caps(adapter);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_CONFIG_ADAPTER:
iavf_init_config_adapter(adapter);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
* as it can loop forever
*/
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
return;
}
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task, (5 * HZ));
return;
/* Try again from failed step*/
iavf_change_state(adapter, adapter->last_state);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
return;
case __IAVF_COMM_FAILED:
iavf_change_state(adapter, __IAVF_INIT_FAILED);
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
return;
}
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
return;
case __IAVF_RESETTING:
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
HZ * 2);
return;
case __IAVF_REMOVE:
default:
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
return;
}
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task, HZ * 2);
return;
}
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
restart_watchdog:
if (adapter->state >= __IAVF_DOWN)
queue_work(adapter->wq, &adapter->adminq_task);
return -EIO;
}
+ mutex_lock(&netdev->lock);
while (!mutex_trylock(&adapter->crit_lock)) {
/* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
* is already taken and iavf_open is called from an upper
* device's notifier reacting on NETDEV_REGISTER event.
* We have to leave here to avoid dead lock.
*/
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) {
+ mutex_unlock(&netdev->lock);
return -EBUSY;
+ }
usleep_range(500, 1000);
}
iavf_irq_enable(adapter, true);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
return 0;
iavf_free_all_tx_resources(adapter);
err_unlock:
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
return err;
}
u64 aq_to_restore;
int status;
+ mutex_lock(&netdev->lock);
mutex_lock(&adapter->crit_lock);
if (adapter->state <= __IAVF_DOWN_PENDING) {
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
return 0;
}
iavf_free_traffic_irqs(adapter);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
netif_device_detach(netdev);
+ mutex_lock(&netdev->lock);
mutex_lock(&adapter->crit_lock);
if (netif_running(netdev)) {
iavf_reset_interrupt_capability(adapter);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
return 0;
}
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ mutex_lock(&netdev->lock);
mutex_lock(&adapter->crit_lock);
dev_info(&adapter->pdev->dev, "Removing device\n");
iavf_change_state(adapter, __IAVF_REMOVE);
mutex_destroy(&hw->aq.asq_mutex);
mutex_unlock(&adapter->crit_lock);
mutex_destroy(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
iounmap(hw->hw_addr);
pci_release_regions(pdev);