* @ndo_queue_get_dma_dev: Get dma device for zero-copy operations to be used
* for this queue. Return NULL on error.
*
+ * @ndo_queue_create: Create a new RX queue which can be leased to another queue.
+ * Ops on this queue are redirected to the leased queue e.g.
+ * when opening a memory provider. Return the new queue id on
+ * success. Return negative error code on failure.
+ *
* Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while
* the interface is closed. @ndo_queue_start and @ndo_queue_stop will only
* be called for an interface which is open.
int idx);
struct device * (*ndo_queue_get_dma_dev)(struct net_device *dev,
int idx);
+ int (*ndo_queue_create)(struct net_device *dev);
};
-bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx);
+bool netif_rxq_has_unreadable_mp(struct net_device *dev, unsigned int rxq_idx);
+bool netif_rxq_has_mp(struct net_device *dev, unsigned int rxq_idx);
+bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx);
/**
* DOC: Lockless queue stopping / waking helpers.
})
struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx);
-
-#endif
+bool netdev_can_create_queue(const struct net_device *dev,
+ struct netlink_ext_ack *extack);
+bool netdev_can_lease_queue(const struct net_device *dev,
+ struct netlink_ext_ack *extack);
+bool netdev_queue_busy(struct net_device *dev, int idx,
+ struct netlink_ext_ack *extack);
+#endif /* _LINUX_NET_QUEUES_H */
#endif
struct napi_struct *napi;
struct pp_memory_provider_params mp_params;
+ struct netdev_rx_queue *lease;
+ netdevice_tracker lease_tracker;
} ____cacheline_aligned_in_smp;
/*
}
int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
-
-#endif
+void netdev_rx_queue_lease(struct netdev_rx_queue *rxq_dst,
+ struct netdev_rx_queue *rxq_src);
+void netdev_rx_queue_unlease(struct netdev_rx_queue *rxq_dst,
+ struct netdev_rx_queue *rxq_src);
+#endif /* _LINUX_NETDEV_RX_QUEUE_H */
bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
void xsk_tx_release(struct xsk_buff_pool *pool);
-struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
+struct xsk_buff_pool *xsk_get_pool_from_qid(const struct net_device *dev,
u16 queue_id);
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
return __netdev_put_lock_ops_compat(dev, net);
}
+struct net_device *
+netdev_put_lock(struct net_device *dev, netdevice_tracker *tracker)
+{
+ netdev_tracker_free(dev, tracker);
+ return __netdev_put_lock(dev, dev_net(dev));
+}
+
struct net_device *
netdev_xa_find_lock(struct net *net, struct net_device *dev,
unsigned long *index)
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
struct net_device *__netdev_put_lock(struct net_device *dev, struct net *net);
+struct net_device *netdev_put_lock(struct net_device *dev,
+ netdevice_tracker *tracker);
struct net_device *
netdev_xa_find_lock(struct net *net, struct net_device *dev,
unsigned long *index);
int netdev_nl_queue_create_doit(struct sk_buff *skb, struct genl_info *info)
{
- return -EOPNOTSUPP;
+ const int qmaxtype = ARRAY_SIZE(netdev_queue_id_nl_policy) - 1;
+ const int lmaxtype = ARRAY_SIZE(netdev_lease_nl_policy) - 1;
+ int err, ifindex, ifindex_lease, queue_id, queue_id_lease;
+ struct nlattr *qtb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
+ struct nlattr *ltb[ARRAY_SIZE(netdev_lease_nl_policy)];
+ struct netdev_rx_queue *rxq, *rxq_lease;
+ struct net_device *dev, *dev_lease;
+ netdevice_tracker dev_tracker;
+ struct nlattr *nest;
+ struct sk_buff *rsp;
+ void *hdr;
+
+ if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX) ||
+ GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) ||
+ GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_LEASE))
+ return -EINVAL;
+ if (nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]) !=
+ NETDEV_QUEUE_TYPE_RX) {
+ NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_QUEUE_TYPE]);
+ return -EINVAL;
+ }
+
+ ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
+
+ nest = info->attrs[NETDEV_A_QUEUE_LEASE];
+ err = nla_parse_nested(ltb, lmaxtype, nest,
+ netdev_lease_nl_policy, info->extack);
+ if (err < 0)
+ return err;
+ if (NL_REQ_ATTR_CHECK(info->extack, nest, ltb, NETDEV_A_LEASE_IFINDEX) ||
+ NL_REQ_ATTR_CHECK(info->extack, nest, ltb, NETDEV_A_LEASE_QUEUE))
+ return -EINVAL;
+ if (ltb[NETDEV_A_LEASE_NETNS_ID]) {
+ NL_SET_BAD_ATTR(info->extack, ltb[NETDEV_A_LEASE_NETNS_ID]);
+ return -EINVAL;
+ }
+
+ ifindex_lease = nla_get_u32(ltb[NETDEV_A_LEASE_IFINDEX]);
+
+ nest = ltb[NETDEV_A_LEASE_QUEUE];
+ err = nla_parse_nested(qtb, qmaxtype, nest,
+ netdev_queue_id_nl_policy, info->extack);
+ if (err < 0)
+ return err;
+ if (NL_REQ_ATTR_CHECK(info->extack, nest, qtb, NETDEV_A_QUEUE_ID) ||
+ NL_REQ_ATTR_CHECK(info->extack, nest, qtb, NETDEV_A_QUEUE_TYPE))
+ return -EINVAL;
+ if (nla_get_u32(qtb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
+ NL_SET_BAD_ATTR(info->extack, qtb[NETDEV_A_QUEUE_TYPE]);
+ return -EINVAL;
+ }
+ if (ifindex == ifindex_lease) {
+ NL_SET_ERR_MSG(info->extack,
+ "Lease ifindex cannot be the same as queue creation ifindex");
+ return -EINVAL;
+ }
+
+ queue_id_lease = nla_get_u32(qtb[NETDEV_A_QUEUE_ID]);
+
+ rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_genlmsg_free;
+ }
+
+ /* Locking order is always from the virtual to the physical device
+ * since this is also the same order when applications open the
+ * memory provider later on.
+ */
+ dev = netdev_get_by_index_lock(genl_info_net(info), ifindex);
+ if (!dev) {
+ err = -ENODEV;
+ goto err_genlmsg_free;
+ }
+ if (!netdev_can_create_queue(dev, info->extack)) {
+ err = -EINVAL;
+ goto err_unlock_dev;
+ }
+
+ dev_lease = netdev_get_by_index(genl_info_net(info), ifindex_lease,
+ &dev_tracker, GFP_KERNEL);
+ if (!dev_lease) {
+ err = -ENODEV;
+ goto err_unlock_dev;
+ }
+ if (!netdev_can_lease_queue(dev_lease, info->extack)) {
+ netdev_put(dev_lease, &dev_tracker);
+ err = -EINVAL;
+ goto err_unlock_dev;
+ }
+
+ dev_lease = netdev_put_lock(dev_lease, &dev_tracker);
+ if (!dev_lease) {
+ err = -ENODEV;
+ goto err_unlock_dev;
+ }
+ if (queue_id_lease >= dev_lease->real_num_rx_queues) {
+ err = -ERANGE;
+ NL_SET_BAD_ATTR(info->extack, qtb[NETDEV_A_QUEUE_ID]);
+ goto err_unlock_dev_lease;
+ }
+ if (netdev_queue_busy(dev_lease, queue_id_lease, info->extack)) {
+ err = -EBUSY;
+ goto err_unlock_dev_lease;
+ }
+
+ rxq_lease = __netif_get_rx_queue(dev_lease, queue_id_lease);
+ rxq = __netif_get_rx_queue(dev, dev->real_num_rx_queues - 1);
+
+ if (rxq->lease && rxq->lease->dev != dev_lease) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG(info->extack,
+ "Leasing multiple queues from different devices not supported");
+ goto err_unlock_dev_lease;
+ }
+
+ err = queue_id = dev->queue_mgmt_ops->ndo_queue_create(dev);
+ if (err < 0) {
+ NL_SET_ERR_MSG(info->extack,
+ "Device is unable to create a new queue");
+ goto err_unlock_dev_lease;
+ }
+
+ rxq = __netif_get_rx_queue(dev, queue_id);
+ netdev_rx_queue_lease(rxq, rxq_lease);
+
+ nla_put_u32(rsp, NETDEV_A_QUEUE_ID, queue_id);
+ genlmsg_end(rsp, hdr);
+
+ netdev_unlock(dev_lease);
+ netdev_unlock(dev);
+
+ return genlmsg_reply(rsp, info);
+
+err_unlock_dev_lease:
+ netdev_unlock(dev_lease);
+err_unlock_dev:
+ netdev_unlock(dev);
+err_genlmsg_free:
+ nlmsg_free(rsp);
+ return err;
}
void netdev_nl_sock_priv_init(struct netdev_nl_sock *priv)
// SPDX-License-Identifier: GPL-2.0-or-later
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <net/xdp_sock_drv.h>
/**
* netdev_queue_get_dma_dev() - get dma device for zero-copy operations
return dma_dev && dma_dev->dma_mask ? dma_dev : NULL;
}
+bool netdev_can_create_queue(const struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ if (dev->dev.parent) {
+ NL_SET_ERR_MSG(extack, "Device is not a virtual device");
+ return false;
+ }
+ if (!dev->queue_mgmt_ops ||
+ !dev->queue_mgmt_ops->ndo_queue_create) {
+ NL_SET_ERR_MSG(extack, "Device does not support queue creation");
+ return false;
+ }
+ if (dev->real_num_rx_queues < 1 ||
+ dev->real_num_tx_queues < 1) {
+ NL_SET_ERR_MSG(extack, "Device must have at least one real queue");
+ return false;
+ }
+ return true;
+}
+
+bool netdev_can_lease_queue(const struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ if (!dev->dev.parent) {
+ NL_SET_ERR_MSG(extack, "Lease device is a virtual device");
+ return false;
+ }
+ if (!netif_device_present(dev)) {
+ NL_SET_ERR_MSG(extack, "Lease device has been removed from the system");
+ return false;
+ }
+ if (!dev->queue_mgmt_ops) {
+ NL_SET_ERR_MSG(extack, "Lease device does not support queue management operations");
+ return false;
+ }
+ return true;
+}
+
+bool netdev_queue_busy(struct net_device *dev, int idx,
+ struct netlink_ext_ack *extack)
+{
+ if (netif_rxq_is_leased(dev, idx)) {
+ NL_SET_ERR_MSG(extack, "Lease device queue is already leased");
+ return true;
+ }
+ if (xsk_get_pool_from_qid(dev, idx)) {
+ NL_SET_ERR_MSG(extack, "Lease device queue in use by AF_XDP");
+ return true;
+ }
+ if (netif_rxq_has_mp(dev, idx)) {
+ NL_SET_ERR_MSG(extack, "Lease device queue in use by memory provider");
+ return true;
+ }
+ return false;
+}
#include "page_pool_priv.h"
-/* See also page_pool_is_unreadable() */
-bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx)
+void netdev_rx_queue_lease(struct netdev_rx_queue *rxq_dst,
+ struct netdev_rx_queue *rxq_src)
{
- struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, idx);
+ netdev_assert_locked(rxq_src->dev);
+ netdev_assert_locked(rxq_dst->dev);
+
+ netdev_hold(rxq_src->dev, &rxq_src->lease_tracker, GFP_KERNEL);
- return !!rxq->mp_params.mp_ops;
+ WRITE_ONCE(rxq_src->lease, rxq_dst);
+ WRITE_ONCE(rxq_dst->lease, rxq_src);
+}
+
+void netdev_rx_queue_unlease(struct netdev_rx_queue *rxq_dst,
+ struct netdev_rx_queue *rxq_src)
+{
+ netdev_assert_locked(rxq_dst->dev);
+ netdev_assert_locked(rxq_src->dev);
+
+ WRITE_ONCE(rxq_src->lease, NULL);
+ WRITE_ONCE(rxq_dst->lease, NULL);
+
+ netdev_put(rxq_src->dev, &rxq_src->lease_tracker);
+}
+
+bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx)
+{
+ if (rxq_idx < dev->real_num_rx_queues)
+ return READ_ONCE(__netif_get_rx_queue(dev, rxq_idx)->lease);
+ return false;
+}
+
+/* See also page_pool_is_unreadable() */
+bool netif_rxq_has_unreadable_mp(struct net_device *dev, unsigned int rxq_idx)
+{
+ if (rxq_idx < dev->real_num_rx_queues)
+ return __netif_get_rx_queue(dev, rxq_idx)->mp_params.mp_ops;
+ return false;
}
EXPORT_SYMBOL(netif_rxq_has_unreadable_mp);
+bool netif_rxq_has_mp(struct net_device *dev, unsigned int rxq_idx)
+{
+ if (rxq_idx < dev->real_num_rx_queues)
+ return __netif_get_rx_queue(dev, rxq_idx)->mp_params.mp_priv;
+ return false;
+}
+
int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
{
struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
}
EXPORT_SYMBOL(xsk_uses_need_wakeup);
-struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
+struct xsk_buff_pool *xsk_get_pool_from_qid(const struct net_device *dev,
u16 queue_id)
{
if (queue_id < dev->real_num_rx_queues)