return err;
}
- err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
- if (err < 0)
- goto err_free_pp;
-
- err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
- rxq->page_pool);
- if (err)
- goto err_unregister_rxq;
-
return 0;
-
-err_unregister_rxq:
- xdp_rxq_info_unreg(&rxq->xdp_rxq);
-err_free_pp:
- page_pool_destroy(rxq->page_pool);
- rxq->page_pool = NULL;
- return err;
}
static void fec_txq_trigger_xmit(struct fec_enet_private *fep,
.self_test = net_selftest,
};
+static int fec_xdp_rxq_info_reg(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq)
+{
+ struct net_device *ndev = fep->netdev;
+ int err;
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq->id, 0);
+ if (err) {
+ netdev_err(ndev, "Failed to register xdp rxq info\n");
+ return err;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool);
+ if (err) {
+ netdev_err(ndev, "Failed to register XDP mem model\n");
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static void fec_xdp_rxq_info_unreg(struct fec_enet_priv_rx_q *rxq)
+{
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) {
+ xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq);
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ }
+}
+
static void fec_enet_free_buffers(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
+
+ fec_xdp_rxq_info_unreg(rxq);
+
for (i = 0; i < rxq->bd.ring_size; i++)
page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
false);
for (i = 0; i < XDP_STATS_TOTAL; i++)
rxq->stats[i] = 0;
- if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
- xdp_rxq_info_unreg(&rxq->xdp_rxq);
page_pool_destroy(rxq->page_pool);
rxq->page_pool = NULL;
}
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
+
+ err = fec_xdp_rxq_info_reg(fep, rxq);
+ if (err)
+ goto err_alloc;
+
return 0;
err_alloc: