return 0;
}
-static u32
-fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
- struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
-{
- unsigned int sync, len = xdp->data_end - xdp->data;
- u32 ret = FEC_ENET_XDP_PASS;
- struct page *page;
- int err;
- u32 act;
-
- act = bpf_prog_run_xdp(prog, xdp);
-
- /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
- * max len CPU touch
- */
- sync = xdp->data_end - xdp->data;
- sync = max(sync, len);
-
- switch (act) {
- case XDP_PASS:
- rxq->stats[RX_XDP_PASS]++;
- ret = FEC_ENET_XDP_PASS;
- break;
-
- case XDP_REDIRECT:
- rxq->stats[RX_XDP_REDIRECT]++;
- err = xdp_do_redirect(fep->netdev, xdp, prog);
- if (unlikely(err))
- goto xdp_err;
-
- ret = FEC_ENET_XDP_REDIR;
- break;
-
- case XDP_TX:
- rxq->stats[RX_XDP_TX]++;
- err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
- if (unlikely(err)) {
- rxq->stats[RX_XDP_TX_ERRORS]++;
- goto xdp_err;
- }
-
- ret = FEC_ENET_XDP_TX;
- break;
-
- default:
- bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
- fallthrough;
-
- case XDP_ABORTED:
- fallthrough; /* handle aborts by dropping packet */
-
- case XDP_DROP:
- rxq->stats[RX_XDP_DROP]++;
-xdp_err:
- ret = FEC_ENET_XDP_CONSUMED;
- page = virt_to_head_page(xdp->data);
- page_pool_put_page(rxq->page_pool, page, sync, true);
- if (act != XDP_DROP)
- trace_xdp_exception(fep->netdev, prog, act);
- break;
- }
-
- return ret;
-}
-
static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb)
{
if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
static int fec_enet_rx_queue(struct fec_enet_private *fep,
u16 queue, int budget)
{
- u32 data_start = FEC_ENET_XDP_HEADROOM + fep->rx_shift;
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
- struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
- u32 ret, xdp_result = FEC_ENET_XDP_PASS;
struct net_device *ndev = fep->netdev;
struct bufdesc *bdp = rxq->bd.cur;
u32 sub_len = 4 + fep->rx_shift;
- int cpu = smp_processor_id();
int pkt_received = 0;
u16 status, pkt_len;
struct sk_buff *skb;
- struct xdp_buff xdp;
struct page *page;
dma_addr_t dma;
int index;
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
- xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
-
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
if (pkt_received >= budget)
DMA_FROM_DEVICE);
prefetch(page_address(page));
- if (xdp_prog) {
- xdp_buff_clear_frags_flag(&xdp);
- /* subtract 16bit shift and FCS */
- xdp_prepare_buff(&xdp, page_address(page),
- data_start, pkt_len - sub_len, false);
- ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
- xdp_result |= ret;
- if (ret != FEC_ENET_XDP_PASS)
- goto rx_processing_done;
- }
-
if (unlikely(need_swap)) {
u8 *data;
}
rxq->bd.cur = bdp;
- if (xdp_result & FEC_ENET_XDP_REDIR)
+ return pkt_received;
+}
+
+static void fec_xdp_drop(struct fec_enet_priv_rx_q *rxq,
+ struct xdp_buff *xdp, u32 sync)
+{
+ struct page *page = virt_to_head_page(xdp->data);
+
+ page_pool_put_page(rxq->page_pool, page, sync, true);
+}
+
+static int fec_enet_rx_queue_xdp(struct fec_enet_private *fep, int queue,
+ int budget, struct bpf_prog *prog)
+{
+ u32 data_start = FEC_ENET_XDP_HEADROOM + fep->rx_shift;
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
+ struct net_device *ndev = fep->netdev;
+ struct bufdesc *bdp = rxq->bd.cur;
+ u32 sub_len = 4 + fep->rx_shift;
+ int cpu = smp_processor_id();
+ int pkt_received = 0;
+ struct sk_buff *skb;
+ u16 status, pkt_len;
+ struct xdp_buff xdp;
+ struct page *page;
+ u32 xdp_res = 0;
+ dma_addr_t dma;
+ int index, err;
+ u32 act, sync;
+
+#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
+ /*
+ * Hacky flush of all caches instead of using the DMA API for the TSO
+ * headers.
+ */
+ flush_cache_all();
+#endif
+
+ xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
+
+ while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
+ if (pkt_received >= budget)
+ break;
+ pkt_received++;
+
+ writel(FEC_ENET_RXF_GET(queue), fep->hwp + FEC_IEVENT);
+
+ /* Check for errors. */
+ status ^= BD_ENET_RX_LAST;
+ if (unlikely(fec_rx_error_check(ndev, status)))
+ goto rx_processing_done;
+
+ /* Process the incoming frame. */
+ ndev->stats.rx_packets++;
+ pkt_len = fec16_to_cpu(bdp->cbd_datlen);
+ ndev->stats.rx_bytes += pkt_len - fep->rx_shift;
+
+ index = fec_enet_get_bd_index(bdp, &rxq->bd);
+ page = rxq->rx_buf[index];
+ dma = fec32_to_cpu(bdp->cbd_bufaddr);
+
+ if (fec_enet_update_cbd(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+
+ dma_sync_single_for_cpu(&fep->pdev->dev, dma, pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(page_address(page));
+
+ xdp_buff_clear_frags_flag(&xdp);
+ /* subtract 16bit shift and FCS */
+ pkt_len -= sub_len;
+ xdp_prepare_buff(&xdp, page_address(page), data_start,
+ pkt_len, false);
+
+ act = bpf_prog_run_xdp(prog, &xdp);
+ /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync
+ * for_device cover max len CPU touch.
+ */
+ sync = xdp.data_end - xdp.data;
+ sync = max(sync, pkt_len);
+
+ switch (act) {
+ case XDP_PASS:
+ rxq->stats[RX_XDP_PASS]++;
+ /* The packet length includes FCS, but we don't want to
+ * include that when passing upstream as it messes up
+ * bridging applications.
+ */
+ skb = fec_build_skb(fep, rxq, bdp, page, pkt_len);
+ if (!skb)
+ trace_xdp_exception(ndev, prog, XDP_PASS);
+ else
+ napi_gro_receive(&fep->napi, skb);
+
+ break;
+ case XDP_REDIRECT:
+ rxq->stats[RX_XDP_REDIRECT]++;
+ err = xdp_do_redirect(ndev, &xdp, prog);
+ if (unlikely(err)) {
+ fec_xdp_drop(rxq, &xdp, sync);
+ trace_xdp_exception(ndev, prog, XDP_REDIRECT);
+ } else {
+ xdp_res |= FEC_ENET_XDP_REDIR;
+ }
+ break;
+ case XDP_TX:
+ rxq->stats[RX_XDP_TX]++;
+ err = fec_enet_xdp_tx_xmit(fep, cpu, &xdp, sync);
+ if (unlikely(err)) {
+ rxq->stats[RX_XDP_TX_ERRORS]++;
+ fec_xdp_drop(rxq, &xdp, sync);
+ trace_xdp_exception(ndev, prog, XDP_TX);
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ /* handle aborts by dropping packet */
+ fallthrough;
+ case XDP_DROP:
+ rxq->stats[RX_XDP_DROP]++;
+ fec_xdp_drop(rxq, &xdp, sync);
+ break;
+ }
+
+rx_processing_done:
+ /* Clear the status flags for this buffer */
+ status &= ~BD_ENET_RX_STATS;
+ /* Mark the buffer empty */
+ status |= BD_ENET_RX_EMPTY;
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+ ebdp->cbd_prot = 0;
+ ebdp->cbd_bdu = 0;
+ }
+
+ /* Make sure the updates to rest of the descriptor are
+ * performed before transferring ownership.
+ */
+ dma_wmb();
+ bdp->cbd_sc = cpu_to_fec16(status);
+
+ /* Update BD pointer to next entry */
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames. On a heavily loaded network, we should be
+ * able to keep up at the expense of system resources.
+ */
+ writel(0, rxq->bd.reg_desc_active);
+ }
+
+ rxq->bd.cur = bdp;
+
+ if (xdp_res & FEC_ENET_XDP_REDIR)
xdp_do_flush();
return pkt_received;
static int fec_enet_rx(struct net_device *ndev, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct bpf_prog *prog = READ_ONCE(fep->xdp_prog);
int i, done = 0;
/* Make sure that AVB queues are processed first. */
- for (i = fep->num_rx_queues - 1; i >= 0; i--)
- done += fec_enet_rx_queue(fep, i, budget - done);
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ if (prog)
+ done += fec_enet_rx_queue_xdp(fep, i, budget - done,
+ prog);
+ else
+ done += fec_enet_rx_queue(fep, i, budget - done);
+ }
return done;
}