net_mp_open_rxq is currently not used in the tree as all callers are
using __net_mp_open_rxq directly, and net_mp_close_rxq is only used
once while all other locations use __net_mp_close_rxq.
Consolidate into a single API, netif_mp_{open,close}_rxq, using the
netif_ prefix to indicate that the caller is responsible for locking.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Co-developed-by: David Wei <dw@davidwei.uk>
Signed-off-by: David Wei <dw@davidwei.uk>
Reviewed-by: Nikolay Aleksandrov <razor@blackwall.org>
Link: https://patch.msgid.link/20260402231031.447597-6-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov);
void net_mp_niov_clear_page_pool(struct net_iov *niov);
-int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
- struct pp_memory_provider_params *p);
-int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
+int netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *p,
struct netlink_ext_ack *extack);
-void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
- struct pp_memory_provider_params *old_p);
-void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
+void netif_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *old_p);
/**
}
if (netdev) {
- if (ifq->if_rxq != -1)
- net_mp_close_rxq(netdev, ifq->if_rxq, &p);
+ if (ifq->if_rxq != -1) {
+ netdev_lock(netdev);
+ netif_mp_close_rxq(netdev, ifq->if_rxq, &p);
+ netdev_unlock(netdev);
+ }
netdev_put(netdev, &netdev_tracker);
}
ifq->if_rxq = -1;
mp_param.rx_page_size = 1U << ifq->niov_shift;
mp_param.mp_ops = &io_uring_pp_zc_ops;
mp_param.mp_priv = ifq;
- ret = __net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, NULL);
+ ret = netif_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, NULL);
if (ret)
goto netdev_put_unlock;
netdev_unlock(ifq->netdev);
rxq_idx = get_netdev_rx_queue_index(rxq);
- __net_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
+ netif_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
}
percpu_ref_kill(&binding->ref);
u32 xa_idx;
int err;
- err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
+ err = netif_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
if (err)
return err;
return 0;
err_close_rxq:
- __net_mp_close_rxq(dev, rxq_idx, &mp_params);
+ netif_mp_close_rxq(dev, rxq_idx, &mp_params);
return err;
}
}
EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
-int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
+int netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *p,
struct netlink_ext_ack *extack)
{
return ret;
}
-int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
- struct pp_memory_provider_params *p)
-{
- int ret;
-
- netdev_lock(dev);
- ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL);
- netdev_unlock(dev);
- return ret;
-}
-
-void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
+void netif_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
const struct pp_memory_provider_params *old_p)
{
struct netdev_queue_config qcfg[2];
err = netdev_rx_queue_reconfig(dev, ifq_idx, &qcfg[0], &qcfg[1]);
WARN_ON(err && err != -ENETDOWN);
}
-
-void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
- struct pp_memory_provider_params *old_p)
-{
- netdev_lock(dev);
- __net_mp_close_rxq(dev, ifq_idx, old_p);
- netdev_unlock(dev);
-}