xsk_buffs = rq->xsk_buffs;
num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
- if (!num)
+ if (!num) {
+ if (xsk_uses_need_wakeup(pool)) {
+ xsk_set_rx_need_wakeup(pool);
+ /* Return 0 instead of -ENOMEM so that NAPI is
+ * descheduled.
+ */
+ return 0;
+ }
+
return -ENOMEM;
+ } else {
+ xsk_clear_rx_need_wakeup(pool);
+ }
len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
return sent;
}
-static void xsk_wakeup(struct send_queue *sq)
+static void xsk_wakeup(struct napi_struct *napi, struct virtqueue *vq)
{
- if (napi_if_scheduled_mark_missed(&sq->napi))
+ if (napi_if_scheduled_mark_missed(napi))
return;
local_bh_disable();
- virtqueue_napi_schedule(&sq->napi, sq->vq);
+ virtqueue_napi_schedule(napi, vq);
local_bh_enable();
}
static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
{
struct virtnet_info *vi = netdev_priv(dev);
- struct send_queue *sq;
if (!netif_running(dev))
return -ENETDOWN;
if (qid >= vi->curr_queue_pairs)
return -EINVAL;
- sq = &vi->sq[qid];
+ if (flag & XDP_WAKEUP_TX) {
+ struct send_queue *sq = &vi->sq[qid];
+
+ xsk_wakeup(&sq->napi, sq->vq);
+ }
+
+ if (flag & XDP_WAKEUP_RX) {
+ struct receive_queue *rq = &vi->rq[qid];
+
+ xsk_wakeup(&rq->napi, rq->vq);
+ }
- xsk_wakeup(sq);
return 0;
}
* wakeup the tx napi to consume the xsk tx queue, because the tx
* interrupt may not be triggered.
*/
- xsk_wakeup(sq);
+ xsk_wakeup(&sq->napi, sq->vq);
}
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
}
/*
- * Returns false if we couldn't fill entirely (OOM).
+ * Returns false if we couldn't fill entirely (OOM) and need to retry.
+ * In XSK mode, it's when the receive buffer is not allocated and
+ * xsk_use_need_wakeup is not set.
*
* Normally run in the receive path, but can also be run from ndo_open
* before we're receiving packets, or from refill_work which is