__netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq,
enum netif_lease_dir dir);
-struct netdev_rx_queue *
-netif_get_rx_queue_lease_locked(struct net_device **dev, unsigned int *rxq);
-void netif_put_rx_queue_lease_locked(struct net_device *orig_dev,
- struct net_device *dev);
-
int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
void netdev_rx_queue_lease(struct netdev_rx_queue *rxq_dst,
struct netdev_rx_queue *rxq_src);
bool netif_rxq_has_mp(struct net_device *dev, unsigned int rxq_idx);
bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx);
+bool netif_is_queue_leasee(const struct net_device *dev);
void __netif_mp_uninstall_rxq(struct netdev_rx_queue *rxq,
const struct pp_memory_provider_params *p);
struct netdev_rx_queue *rxq;
struct net *net, *peer_net;
- rxq = __netif_get_rx_queue_lease(&netdev, &q_idx,
- NETIF_PHYS_TO_VIRT);
+ rxq = __netif_get_rx_queue_lease(&netdev, &q_idx, NETIF_PHYS_TO_VIRT);
if (!rxq || orig_netdev == netdev)
return 0;
return -ENOMEM;
}
+static int
+__netdev_nl_queue_fill_mp(struct sk_buff *rsp, struct netdev_rx_queue *rxq)
+{
+ struct pp_memory_provider_params *params = &rxq->mp_params;
+
+ if (params->mp_ops &&
+ params->mp_ops->nl_fill(params->mp_priv, rsp, rxq))
+ return -EMSGSIZE;
+
+#ifdef CONFIG_XDP_SOCKETS
+ if (rxq->pool)
+ if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK))
+ return -EMSGSIZE;
+#endif
+ return 0;
+}
+
+static int
+netdev_nl_queue_fill_mp(struct sk_buff *rsp, struct net_device *netdev,
+ struct netdev_rx_queue *rxq)
+{
+ struct netdev_rx_queue *hw_rxq;
+ int ret;
+
+ hw_rxq = rxq->lease;
+ if (!hw_rxq || !netif_is_queue_leasee(netdev))
+ return __netdev_nl_queue_fill_mp(rsp, rxq);
+
+ netdev_lock(hw_rxq->dev);
+ ret = __netdev_nl_queue_fill_mp(rsp, hw_rxq);
+ netdev_unlock(hw_rxq->dev);
+ return ret;
+}
+
static int
netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
u32 q_idx, u32 q_type, const struct genl_info *info)
{
- struct pp_memory_provider_params *params;
- struct net_device *orig_netdev = netdev;
- struct netdev_rx_queue *rxq, *rxq_lease;
+ struct netdev_rx_queue *rxq;
struct netdev_queue *txq;
void *hdr;
goto nla_put_failure;
if (netdev_nl_queue_fill_lease(rsp, netdev, q_idx, q_type))
goto nla_put_failure;
-
- rxq_lease = netif_get_rx_queue_lease_locked(&netdev, &q_idx);
- if (rxq_lease)
- rxq = rxq_lease;
- params = &rxq->mp_params;
- if (params->mp_ops &&
- params->mp_ops->nl_fill(params->mp_priv, rsp, rxq))
- goto nla_put_failure_lease;
-#ifdef CONFIG_XDP_SOCKETS
- if (rxq->pool)
- if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK))
- goto nla_put_failure_lease;
-#endif
- netif_put_rx_queue_lease_locked(orig_netdev, netdev);
+ if (netdev_nl_queue_fill_mp(rsp, netdev, rxq))
+ goto nla_put_failure;
break;
case NETDEV_QUEUE_TYPE_TX:
txq = netdev_get_tx_queue(netdev, q_idx);
return 0;
-nla_put_failure_lease:
- netif_put_rx_queue_lease_locked(orig_netdev, netdev);
nla_put_failure:
genlmsg_cancel(rsp, hdr);
return -EMSGSIZE;
unsigned int idx,
enum netdev_queue_type type)
{
- struct net_device *orig_dev = dev;
+ struct netdev_rx_queue *hw_rxq;
struct device *dma_dev;
+ netdev_ops_assert_locked(dev);
+
/* Only RX side supports queue leasing today. */
if (type != NETDEV_QUEUE_TYPE_RX || !netif_rxq_is_leased(dev, idx))
return __netdev_queue_get_dma_dev(dev, idx);
-
- if (!netif_get_rx_queue_lease_locked(&dev, &idx))
+ if (!netif_is_queue_leasee(dev))
return NULL;
- dma_dev = __netdev_queue_get_dma_dev(dev, idx);
- netif_put_rx_queue_lease_locked(orig_dev, dev);
+ hw_rxq = __netif_get_rx_queue(dev, idx)->lease;
+
+ netdev_lock(hw_rxq->dev);
+ idx = get_netdev_rx_queue_index(hw_rxq);
+ dma_dev = __netdev_queue_get_dma_dev(hw_rxq->dev, idx);
+ netdev_unlock(hw_rxq->dev);
+
return dma_dev;
}
return false;
}
+bool netif_is_queue_leasee(const struct net_device *dev)
+{
+ return netif_lease_dir_ok(dev, NETIF_VIRT_TO_PHYS);
+}
+
struct netdev_rx_queue *
__netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq_idx,
enum netif_lease_dir dir)
return rxq;
}
-struct netdev_rx_queue *
-netif_get_rx_queue_lease_locked(struct net_device **dev, unsigned int *rxq_idx)
-{
- struct net_device *orig_dev = *dev;
- struct netdev_rx_queue *rxq;
-
- /* Locking order is always from the virtual to the physical device
- * see netdev_nl_queue_create_doit().
- */
- netdev_ops_assert_locked(orig_dev);
- rxq = __netif_get_rx_queue_lease(dev, rxq_idx, NETIF_VIRT_TO_PHYS);
- if (rxq && orig_dev != *dev)
- netdev_lock(*dev);
- return rxq;
-}
-
-void netif_put_rx_queue_lease_locked(struct net_device *orig_dev,
- struct net_device *dev)
-{
- if (orig_dev != dev)
- netdev_unlock(dev);
-}
-
/* See also page_pool_is_unreadable() */
bool netif_rxq_has_unreadable_mp(struct net_device *dev, unsigned int rxq_idx)
{
const struct pp_memory_provider_params *p,
struct netlink_ext_ack *extack)
{
- struct net_device *orig_dev = dev;
int ret;
if (!netdev_need_ops_lock(dev))
if (!netif_rxq_is_leased(dev, rxq_idx))
return __netif_mp_open_rxq(dev, rxq_idx, p, extack);
- if (!netif_get_rx_queue_lease_locked(&dev, &rxq_idx)) {
+ if (!__netif_get_rx_queue_lease(&dev, &rxq_idx, NETIF_VIRT_TO_PHYS)) {
NL_SET_ERR_MSG(extack, "rx queue leased to a virtual netdev");
return -EBUSY;
}
if (!dev->dev.parent) {
NL_SET_ERR_MSG(extack, "rx queue belongs to a virtual netdev");
- ret = -EOPNOTSUPP;
- goto out;
+ return -EOPNOTSUPP;
}
+ netdev_lock(dev);
ret = __netif_mp_open_rxq(dev, rxq_idx, p, extack);
-out:
- netif_put_rx_queue_lease_locked(orig_dev, dev);
+ netdev_unlock(dev);
return ret;
}
void netif_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
const struct pp_memory_provider_params *old_p)
{
- struct net_device *orig_dev = dev;
-
if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
return;
if (!netif_rxq_is_leased(dev, ifq_idx))
return __netif_mp_close_rxq(dev, ifq_idx, old_p);
- if (WARN_ON_ONCE(!netif_get_rx_queue_lease_locked(&dev, &ifq_idx)))
+ if (!__netif_get_rx_queue_lease(&dev, &ifq_idx, NETIF_VIRT_TO_PHYS)) {
+ WARN_ON_ONCE(1);
return;
-
+ }
+ netdev_lock(dev);
__netif_mp_close_rxq(dev, ifq_idx, old_p);
- netif_put_rx_queue_lease_locked(orig_dev, dev);
+ netdev_unlock(dev);
}
void __netif_mp_uninstall_rxq(struct netdev_rx_queue *rxq,
#include <net/netdev_rx_queue.h>
#include <net/xdp.h>
+#include "../core/dev.h"
+
#include "xsk_queue.h"
#include "xdp_umem.h"
#include "xsk.h"
}
EXPORT_SYMBOL(xsk_get_pool_from_qid);
+static void __xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
+{
+ if (queue_id < dev->num_rx_queues)
+ dev->_rx[queue_id].pool = NULL;
+ if (queue_id < dev->num_tx_queues)
+ dev->_tx[queue_id].pool = NULL;
+}
+
void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
{
- struct net_device *orig_dev = dev;
- unsigned int id = queue_id;
+ struct netdev_rx_queue *hw_rxq;
+
+ if (!netif_rxq_is_leased(dev, queue_id))
+ return __xsk_clear_pool_at_qid(dev, queue_id);
+ WARN_ON_ONCE(!netif_is_queue_leasee(dev));
- if (id < dev->real_num_rx_queues)
- WARN_ON_ONCE(!netif_get_rx_queue_lease_locked(&dev, &id));
+ hw_rxq = __netif_get_rx_queue(dev, queue_id)->lease;
- if (id < dev->num_rx_queues)
- dev->_rx[id].pool = NULL;
- if (id < dev->num_tx_queues)
- dev->_tx[id].pool = NULL;
+ netdev_lock(hw_rxq->dev);
+ queue_id = get_netdev_rx_queue_index(hw_rxq);
+ __xsk_clear_pool_at_qid(hw_rxq->dev, queue_id);
+ netdev_unlock(hw_rxq->dev);
+}
+
+static int __xsk_reg_pool_at_qid(struct net_device *dev,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ if (xsk_get_pool_from_qid(dev, queue_id))
+ return -EBUSY;
+
+ if (queue_id < dev->real_num_rx_queues)
+ dev->_rx[queue_id].pool = pool;
+ if (queue_id < dev->real_num_tx_queues)
+ dev->_tx[queue_id].pool = pool;
- netif_put_rx_queue_lease_locked(orig_dev, dev);
+ return 0;
}
/* The buffer pool is stored both in the _rx struct and the _tx struct as we do
int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
u16 queue_id)
{
- struct net_device *orig_dev = dev;
- unsigned int id = queue_id;
- int ret = 0;
+ struct netdev_rx_queue *hw_rxq;
+ int ret;
- if (id >= max(dev->real_num_rx_queues,
- dev->real_num_tx_queues))
+ if (queue_id >= max(dev->real_num_rx_queues,
+ dev->real_num_tx_queues))
return -EINVAL;
- if (id < dev->real_num_rx_queues) {
- if (!netif_get_rx_queue_lease_locked(&dev, &id))
- return -EBUSY;
- if (xsk_get_pool_from_qid(dev, id)) {
- ret = -EBUSY;
- goto out;
- }
- }
+ if (queue_id >= dev->real_num_rx_queues ||
+ !netif_rxq_is_leased(dev, queue_id))
+ return __xsk_reg_pool_at_qid(dev, pool, queue_id);
+ if (!netif_is_queue_leasee(dev))
+ return -EBUSY;
+
+ hw_rxq = __netif_get_rx_queue(dev, queue_id)->lease;
+
+ netdev_lock(hw_rxq->dev);
+ queue_id = get_netdev_rx_queue_index(hw_rxq);
+ ret = __xsk_reg_pool_at_qid(hw_rxq->dev, pool, queue_id);
+ netdev_unlock(hw_rxq->dev);
- if (id < dev->real_num_rx_queues)
- dev->_rx[id].pool = pool;
- if (id < dev->real_num_tx_queues)
- dev->_tx[id].pool = pool;
-out:
- netif_put_rx_queue_lease_locked(orig_dev, dev);
return ret;
}