get_desc, start_thrs); \
})
-struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx);
+struct device *netdev_queue_get_dma_dev(struct net_device *dev,
+ unsigned int idx,
+ enum netdev_queue_type type);
bool netdev_can_create_queue(const struct net_device *dev,
struct netlink_ext_ack *extack);
bool netdev_can_lease_queue(const struct net_device *dev,
for_each_set_bit(rxq_idx, rxq_bitmap, netdev->real_num_rx_queues) {
struct device *rxq_dma_dev;
- rxq_dma_dev = netdev_queue_get_dma_dev(netdev, rxq_idx);
+ rxq_dma_dev = netdev_queue_get_dma_dev(netdev, rxq_idx,
+ NETDEV_QUEUE_TYPE_RX);
if (dma_dev && rxq_dma_dev != dma_dev) {
NL_SET_ERR_MSG_FMT(extack, "DMA device mismatch between queue %u and %u (multi-PF device?)",
rxq_idx, prev_rxq_idx);
goto err_unlock_netdev;
}
- dma_dev = netdev_queue_get_dma_dev(netdev, 0);
+ dma_dev = netdev_queue_get_dma_dev(netdev, 0, NETDEV_QUEUE_TYPE_TX);
binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_TO_DEVICE,
dmabuf_fd, priv, info->extack);
if (IS_ERR(binding)) {
#include "dev.h"
+static struct device *
+__netdev_queue_get_dma_dev(struct net_device *dev, unsigned int idx)
+{
+ const struct netdev_queue_mgmt_ops *queue_ops = dev->queue_mgmt_ops;
+ struct device *dma_dev;
+
+ if (queue_ops && queue_ops->ndo_queue_get_dma_dev)
+ dma_dev = queue_ops->ndo_queue_get_dma_dev(dev, idx);
+ else
+ dma_dev = dev->dev.parent;
+
+ return dma_dev && dma_dev->dma_mask ? dma_dev : NULL;
+}
+
/**
* netdev_queue_get_dma_dev() - get dma device for zero-copy operations
* @dev: net_device
* @idx: queue index
+ * @type: queue type (RX or TX)
*
- * Get dma device for zero-copy operations to be used for this queue.
- * When such device is not available or valid, the function will return NULL.
+ * Get dma device for zero-copy operations to be used for this queue. If
+ * the queue is an RX queue leased from a physical queue, we retrieve the
+ * physical queue's dma device. When the dma device is not available or
+ * valid, the function will return NULL.
*
* Return: Device or NULL on error
*/
-struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx)
+struct device *netdev_queue_get_dma_dev(struct net_device *dev,
+ unsigned int idx,
+ enum netdev_queue_type type)
{
- const struct netdev_queue_mgmt_ops *queue_ops = dev->queue_mgmt_ops;
+ struct net_device *orig_dev = dev;
struct device *dma_dev;
- if (queue_ops && queue_ops->ndo_queue_get_dma_dev)
- dma_dev = queue_ops->ndo_queue_get_dma_dev(dev, idx);
- else
- dma_dev = dev->dev.parent;
+ /* Only RX side supports queue leasing today. */
+ if (type != NETDEV_QUEUE_TYPE_RX || !netif_rxq_is_leased(dev, idx))
+ return __netdev_queue_get_dma_dev(dev, idx);
- return dma_dev && dma_dev->dma_mask ? dma_dev : NULL;
+ if (!netif_get_rx_queue_lease_locked(&dev, &idx))
+ return NULL;
+
+ dma_dev = __netdev_queue_get_dma_dev(dev, idx);
+ netif_put_rx_queue_lease_locked(orig_dev, dev);
+ return dma_dev;
}
bool netdev_can_create_queue(const struct net_device *dev,