bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
else
bdp->cbd_sc = cpu_to_fec16(0);
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+ }
+
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
}
}
}
+static void fec_free_rxq_buffers(struct fec_enet_priv_rx_q *rxq)
+{
+ int i;
+
+ for (i = 0; i < rxq->bd.ring_size; i++) {
+ struct page *page = rxq->rx_buf[i];
+
+ if (!page)
+ continue;
+
+ page_pool_put_full_page(rxq->page_pool, page, false);
+ rxq->rx_buf[i] = NULL;
+ }
+
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+}
+
static void fec_enet_free_buffers(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
rxq = fep->rx_queue[q];
fec_xdp_rxq_info_unreg(rxq);
-
- for (i = 0; i < rxq->bd.ring_size; i++)
- page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
- false);
+ fec_free_rxq_buffers(rxq);
for (i = 0; i < XDP_STATS_TOTAL; i++)
rxq->stats[i] = 0;
-
- page_pool_destroy(rxq->page_pool);
- rxq->page_pool = NULL;
}
for (q = 0; q < fep->num_tx_queues; q++) {
return ret;
}
-static int
-fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
+static int fec_alloc_rxq_buffers_pp(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_enet_priv_rx_q *rxq;
+ struct bufdesc *bdp = rxq->bd.base;
dma_addr_t phys_addr;
- struct bufdesc *bdp;
struct page *page;
int i, err;
- rxq = fep->rx_queue[queue];
- bdp = rxq->bd.base;
-
err = fec_enet_create_page_pool(fep, rxq);
if (err < 0) {
- netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
+ netdev_err(fep->netdev, "%s failed queue %d (%d)\n",
+ __func__, rxq->bd.qid, err);
return err;
}
for (i = 0; i < rxq->bd.ring_size; i++) {
page = page_pool_dev_alloc_pages(rxq->page_pool);
- if (!page)
- goto err_alloc;
+ if (!page) {
+ err = -ENOMEM;
+ goto free_rx_buffers;
+ }
phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
-
rxq->rx_buf[i] = page;
- bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
-
- if (fep->bufdesc_ex) {
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
- }
-
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
}
- /* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
+ return 0;
+
+free_rx_buffers:
+ fec_free_rxq_buffers(rxq);
+
+ return err;
+}
+
+static int
+fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_rx_q *rxq;
+ int err;
+
+ rxq = fep->rx_queue[queue];
+ err = fec_alloc_rxq_buffers_pp(fep, rxq);
+ if (err)
+ goto free_buffers;
err = fec_xdp_rxq_info_reg(fep, rxq);
if (err)
- goto err_alloc;
+ goto free_buffers;
return 0;
- err_alloc:
+free_buffers:
fec_enet_free_buffers(ndev);
- return -ENOMEM;
+
+ return err;
}
static int