static void fec_enet_itr_coal_set(struct net_device *ndev);
static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
int cpu, struct xdp_buff *xdp,
- u32 dma_sync_len);
+ u32 dma_sync_len, int queue);
#define DRIVER_NAME "fec"
page_pool_put_page(rxq->page_pool, page, sync, true);
}
+static int
+fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
+{
+ if (unlikely(index < 0))
+ return 0;
+
+ return (index % fep->num_tx_queues);
+}
+
static int fec_enet_rx_queue_xdp(struct fec_enet_private *fep, int queue,
int budget, struct bpf_prog *prog)
{
struct sk_buff *skb;
u16 status, pkt_len;
struct xdp_buff xdp;
+ int tx_qid = queue;
struct page *page;
u32 xdp_res = 0;
dma_addr_t dma;
flush_cache_all();
#endif
+ if (unlikely(tx_qid >= fep->num_tx_queues))
+ tx_qid = fec_enet_xdp_get_tx_queue(fep, cpu);
+
xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
break;
case XDP_TX:
rxq->stats[RX_XDP_TX]++;
- err = fec_enet_xdp_tx_xmit(fep, cpu, &xdp, sync);
+ err = fec_enet_xdp_tx_xmit(fep, cpu, &xdp, sync, tx_qid);
if (unlikely(err)) {
rxq->stats[RX_XDP_TX_ERRORS]++;
fec_xdp_drop(rxq, &xdp, sync);
}
}
-static int
-fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
-{
- if (unlikely(index < 0))
- return 0;
-
- return (index % fep->num_tx_queues);
-}
-
static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
struct fec_enet_priv_tx_q *txq,
void *frame, u32 dma_sync_len,
static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
int cpu, struct xdp_buff *xdp,
- u32 dma_sync_len)
+ u32 dma_sync_len, int queue)
{
- struct fec_enet_priv_tx_q *txq;
- struct netdev_queue *nq;
- int queue, ret;
-
- queue = fec_enet_xdp_get_tx_queue(fep, cpu);
- txq = fep->tx_queue[queue];
- nq = netdev_get_tx_queue(fep->netdev, queue);
+ struct netdev_queue *nq = netdev_get_tx_queue(fep->netdev, queue);
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ int ret;
__netif_tx_lock(nq, cpu);