return false;
}
+static bool xsk_dev_queue_valid(const struct xdp_sock *xs,
+ const struct xdp_rxq_info *info)
+{
+ struct net_device *dev = xs->dev;
+ u32 queue_index = xs->queue_id;
+ struct netdev_rx_queue *rxq;
+
+ if (info->dev == dev &&
+ info->queue_index == queue_index)
+ return true;
+
+ if (queue_index < dev->real_num_rx_queues) {
+ rxq = READ_ONCE(__netif_get_rx_queue(dev, queue_index)->lease);
+ if (!rxq)
+ return false;
+
+ dev = rxq->dev;
+ queue_index = get_netdev_rx_queue_index(rxq);
+
+ return info->dev == dev &&
+ info->queue_index == queue_index;
+ }
+ return false;
+}
+
static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
if (!xsk_is_bound(xs))
return -ENXIO;
-
- if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
+ if (!xsk_dev_queue_valid(xs, xdp->rxq))
return -EINVAL;
-
if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
xs->rx_dropped++;
return -ENOSPC;