* not been given to the system, we just set the empty indicator,
* effectively tossing the packet.
*/
-static int
-fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
+static int fec_enet_rx_queue(struct fec_enet_private *fep,
+ u16 queue, int budget)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_enet_priv_rx_q *rxq;
- struct bufdesc *bdp;
- unsigned short status;
- struct sk_buff *skb;
- ushort pkt_len;
- int pkt_received = 0;
- int index = 0;
- bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
u32 data_start = FEC_ENET_XDP_HEADROOM + fep->rx_shift;
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
+ bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
u32 ret, xdp_result = FEC_ENET_XDP_PASS;
+ struct net_device *ndev = fep->netdev;
+ struct bufdesc *bdp = rxq->bd.cur;
u32 sub_len = 4 + fep->rx_shift;
int cpu = smp_processor_id();
+ int pkt_received = 0;
+ u16 status, pkt_len;
+ struct sk_buff *skb;
struct xdp_buff xdp;
struct page *page;
- __fec32 cbd_bufaddr;
+ dma_addr_t dma;
+ int index;
#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
/*
*/
flush_cache_all();
#endif
- rxq = fep->rx_queue[queue_id];
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
- bdp = rxq->bd.cur;
xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
break;
pkt_received++;
- writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
+ writel(FEC_ENET_RXF_GET(queue), fep->hwp + FEC_IEVENT);
/* Check for errors. */
status ^= BD_ENET_RX_LAST;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
page = rxq->rx_buf[index];
- cbd_bufaddr = bdp->cbd_bufaddr;
+ dma = fec32_to_cpu(bdp->cbd_bufaddr);
if (fec_enet_update_cbd(rxq, bdp, index)) {
ndev->stats.rx_dropped++;
goto rx_processing_done;
}
- dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(cbd_bufaddr),
- pkt_len,
+ dma_sync_single_for_cpu(&fep->pdev->dev, dma, pkt_len,
DMA_FROM_DEVICE);
prefetch(page_address(page));
/* Make sure that AVB queues are processed first. */
for (i = fep->num_rx_queues - 1; i >= 0; i--)
- done += fec_enet_rx_queue(ndev, i, budget - done);
+ done += fec_enet_rx_queue(fep, i, budget - done);
return done;
}