Commit
cc34acd577f1 ("docs: net: document new locking reality")
introduced netif_ vs dev_ function semantics: the former expects locked
netdev, the latter takes care of the locking. We don't strictly
follow this semantics on either side, but there are more dev_xxx handlers
now that don't fit. Rename them to netif_xxx where appropriate.
Note that one dev_set_threaded call still remains in mt76 for debugfs file.
Signed-off-by: Stanislav Fomichev <sdf@fomichev.me>
Link: https://patch.msgid.link/20250717172333.1288349-7-sdf@fomichev.me
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
struct lock_class_key* qdisc_tx_busylock
bool proto_down
unsigned:1 wol_enabled
-unsigned:1 threaded napi_poll(napi_enable,dev_set_threaded)
+unsigned:1 threaded napi_poll(napi_enable,netif_set_threaded)
unsigned_long:1 see_all_hwtstamp_requests
unsigned_long:1 change_proto_down
unsigned_long:1 netns_immutable
adapter->mii.mdio_write = atl1c_mdio_write;
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
- dev_set_threaded(netdev, true);
+ netif_set_threaded(netdev, true);
for (i = 0; i < adapter->rx_queue_count; ++i)
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
atl1c_clean_rx);
}
strscpy(mlxsw_pci->napi_dev_rx->name, "mlxsw_rx",
sizeof(mlxsw_pci->napi_dev_rx->name));
- dev_set_threaded(mlxsw_pci->napi_dev_rx, true);
+ netif_set_threaded(mlxsw_pci->napi_dev_rx, true);
return 0;
if (info->coalesce_irqs) {
netdev_sw_irq_coalesce_default_on(ndev);
if (num_present_cpus() == 1)
- dev_set_threaded(ndev, true);
+ netif_set_threaded(ndev, true);
}
/* Network device register */
if (ret < 0)
goto err_free_handshake_queue;
- dev_set_threaded(dev, true);
+ netif_set_threaded(dev, true);
ret = register_netdevice(dev);
if (ret < 0)
goto err_uninit_ratelimiter;
bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
- dev_set_threaded(ar->napi_dev, true);
+ netif_set_threaded(ar->napi_dev, true);
ath10k_core_napi_enable(ar);
/* IRQs are left enabled when we restart due to a firmware crash */
if (!test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
return napi_complete_done(n, 0);
}
+int netif_set_threaded(struct net_device *dev, bool threaded);
int dev_set_threaded(struct net_device *dev, bool threaded);
void napi_disable(struct napi_struct *n);
if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
/* Paired with smp_mb__before_atomic() in
- * napi_enable()/dev_set_threaded().
+ * napi_enable()/netif_set_threaded().
* Use READ_ONCE() to guarantee a complete
* read on napi->thread. Only call
* wake_up_process() when it's not NULL.
return 0;
}
-int dev_set_threaded(struct net_device *dev, bool threaded)
+int netif_set_threaded(struct net_device *dev, bool threaded)
{
struct napi_struct *napi;
int err = 0;
return err;
}
-EXPORT_SYMBOL(dev_set_threaded);
+EXPORT_SYMBOL(netif_set_threaded);
/**
* netif_queue_set_napi - Associate queue with the napi
netdev_unlock_ops(dev);
}
EXPORT_SYMBOL(netdev_state_change);
+
+int dev_set_threaded(struct net_device *dev, bool threaded)
+{
+ int ret;
+
+ netdev_lock(dev);
+ ret = netif_set_threaded(dev, threaded);
+ netdev_unlock(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(dev_set_threaded);
if (val != 0 && val != 1)
return -EOPNOTSUPP;
- ret = dev_set_threaded(dev, val);
+ ret = netif_set_threaded(dev, val);
return ret;
}