bnapi->events &= ~BNXT_TX_CMP_EVENT;
}
-static bool bnxt_separate_head_pool(void)
+static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
{
- return PAGE_SIZE > BNXT_RX_PAGE_SIZE;
+ return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
return page;
}
+static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
+ struct bnxt_rx_ring_info *rxr,
+ gfp_t gfp)
+{
+ netmem_ref netmem;
+
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ if (!netmem)
+ return 0;
+
+ *mapping = page_pool_get_dma_addr_netmem(netmem);
+ return netmem;
+}
+
static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
gfp_t gfp)
return next;
}
-static inline int bnxt_alloc_rx_page(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd =
&rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
- struct page *page;
- dma_addr_t mapping;
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
+ dma_addr_t mapping;
+ netmem_ref netmem;
- page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
-
- if (!page)
+ netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp);
+ if (!netmem)
return -ENOMEM;
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
- rx_agg_buf->page = page;
+ rx_agg_buf->netmem = netmem;
rx_agg_buf->offset = offset;
rx_agg_buf->mapping = mapping;
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
- u16 cons;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_agg_cmp *agg;
struct rx_bd *prod_bd;
- struct page *page;
+ netmem_ref netmem;
+ u16 cons;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
cons_rx_buf = &rxr->rx_agg_ring[cons];
/* It is possible for sw_prod to be equal to cons, so
- * set cons_rx_buf->page to NULL first.
+ * set cons_rx_buf->netmem to 0 first.
*/
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
- prod_rx_buf->page = page;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
+ prod_rx_buf->netmem = netmem;
prod_rx_buf->offset = cons_rx_buf->offset;
prod_rx_buf->mapping = cons_rx_buf->mapping;
return skb;
}
-static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct skb_shared_info *shinfo,
- u16 idx, u32 agg_bufs, bool tpa,
- struct xdp_buff *xdp)
+static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct sk_buff *skb,
+ struct xdp_buff *xdp)
{
struct bnxt_napi *bnapi = cpr->bnapi;
- struct pci_dev *pdev = bp->pdev;
- struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- u16 prod = rxr->rx_agg_prod;
+ struct skb_shared_info *shinfo;
+ struct bnxt_rx_ring_info *rxr;
u32 i, total_frag_len = 0;
bool p5_tpa = false;
+ u16 prod;
+
+ rxr = bnapi->rx_ring;
+ prod = rxr->rx_agg_prod;
if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
+ if (skb)
+ shinfo = skb_shinfo(skb);
+ else
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+
for (i = 0; i < agg_bufs; i++) {
- skb_frag_t *frag = &shinfo->frags[i];
- u16 cons, frag_len;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
- struct page *page;
- dma_addr_t mapping;
+ struct rx_agg_cmp *agg;
+ u16 cons, frag_len;
+ netmem_ref netmem;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_frag_fill_page_desc(frag, cons_rx_buf->page,
- cons_rx_buf->offset, frag_len);
- shinfo->nr_frags = i + 1;
+ if (skb) {
+ skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len, BNXT_RX_PAGE_SIZE);
+ } else {
+ skb_frag_t *frag = &shinfo->frags[i];
+
+ skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len);
+ shinfo->nr_frags = i + 1;
+ }
__clear_bit(cons, rxr->rx_agg_bmap);
- /* It is possible for bnxt_alloc_rx_page() to allocate
+ /* It is possible for bnxt_alloc_rx_netmem() to allocate
* a sw_prod index that equals the cons index, so we
* need to clear the cons entry now.
*/
- mapping = cons_rx_buf->mapping;
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
- if (xdp && page_is_pfmemalloc(page))
+ if (xdp && netmem_is_pfmemalloc(netmem))
xdp_buff_set_frag_pfmemalloc(xdp);
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ if (skb) {
+ skb->len -= frag_len;
+ skb->data_len -= frag_len;
+ skb->truesize -= BNXT_RX_PAGE_SIZE;
+ }
+
--shinfo->nr_frags;
- cons_rx_buf->page = page;
+ cons_rx_buf->netmem = netmem;
- /* Update prod since possibly some pages have been
+ /* Update prod since possibly some netmems have been
* allocated already.
*/
rxr->rx_agg_prod = prod;
return 0;
}
- dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- bp->rx_dir);
+ page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
+ BNXT_RX_PAGE_SIZE);
total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
return total_frag_len;
}
-static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 idx,
- u32 agg_bufs, bool tpa)
+static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 total_frag_len = 0;
- total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
- agg_bufs, tpa, NULL);
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ skb, NULL);
if (!total_frag_len) {
skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
- skb->data_len += total_frag_len;
- skb->len += total_frag_len;
- skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
return skb;
}
-static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct xdp_buff *xdp, u16 idx,
- u32 agg_bufs, bool tpa)
+static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct xdp_buff *xdp, u16 idx,
+ u32 agg_bufs, bool tpa)
{
struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
u32 total_frag_len = 0;
if (!xdp_buff_has_frags(xdp))
shinfo->nr_frags = 0;
- total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
- idx, agg_bufs, tpa, xdp);
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ NULL, xdp);
if (total_frag_len) {
xdp_buff_set_frags_flag(xdp);
shinfo->nr_frags = agg_bufs;
}
if (agg_bufs) {
- skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
+ true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
cpr->sw_stats->rx.rx_oom_discards += 1;
if (bnxt_xdp_attached(bp, rxr)) {
bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
if (agg_bufs) {
- u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
- cp_cons, agg_bufs,
- false);
+ u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
+ cp_cons,
+ agg_bufs,
+ false);
if (!frag_len)
goto oom_next_rx;
if (agg_bufs) {
if (!xdp_active) {
- skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
+ agg_bufs, false);
if (!skb)
goto oom_next_rx;
} else {
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
- struct page *page = rx_agg_buf->page;
+ netmem_ref netmem = rx_agg_buf->netmem;
- if (!page)
+ if (!netmem)
continue;
- rx_agg_buf->page = NULL;
+ rx_agg_buf->netmem = 0;
__clear_bit(i, rxr->rx_agg_bmap);
- page_pool_recycle_direct(rxr->page_pool, page);
+ page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
}
}
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- if (bnxt_separate_head_pool())
+ if (bnxt_separate_head_pool(rxr))
page_pool_destroy(rxr->head_pool);
rxr->page_pool = rxr->head_pool = NULL;
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE;
- pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
+ PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rxr->bnapi->index;
pool = page_pool_create(&pp);
if (IS_ERR(pool))
return PTR_ERR(pool);
rxr->page_pool = pool;
- if (bnxt_separate_head_pool()) {
+ rxr->need_head_pool = page_pool_is_unreadable(pool);
+ if (bnxt_separate_head_pool(rxr)) {
pp.pool_size = max(bp->rx_ring_size, 1024);
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pool = page_pool_create(&pp);
if (IS_ERR(pool))
goto err_destroy_pp;
rxr->page_pool->p.napi = NULL;
rxr->page_pool = NULL;
+ rxr->head_pool->p.napi = NULL;
+ rxr->head_pool = NULL;
memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
ring = &rxr->rx_ring_struct;
rxr->rx_prod = prod;
}
-static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- int ring_nr)
+static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
{
u32 prod;
int i;
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_ring_size);
break;
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
- bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
+ bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
if (rxr->rx_tpa) {
rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
clone->rx_agg_prod = 0;
clone->rx_sw_agg_prod = 0;
clone->rx_next_cons = 0;
+ clone->need_head_pool = false;
rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
if (rc)
bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- bnxt_alloc_one_rx_ring_page(bp, clone, idx);
+ bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
if (bp->flags & BNXT_FLAG_TPA)
bnxt_alloc_one_tpa_info_data(bp, clone);
xdp_rxq_info_unreg(&clone->xdp_rxq);
err_page_pool_destroy:
page_pool_destroy(clone->page_pool);
- if (bnxt_separate_head_pool())
+ if (bnxt_separate_head_pool(clone))
page_pool_destroy(clone->head_pool);
clone->page_pool = NULL;
clone->head_pool = NULL;
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- if (bnxt_separate_head_pool())
+ if (bnxt_separate_head_pool(rxr))
page_pool_destroy(rxr->head_pool);
rxr->page_pool = NULL;
rxr->head_pool = NULL;
rxr->page_pool = clone->page_pool;
rxr->head_pool = clone->head_pool;
rxr->xdp_rxq = clone->xdp_rxq;
+ rxr->need_head_pool = clone->need_head_pool;
bnxt_copy_rx_ring(bp, rxr, clone);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
page_pool_disable_direct_recycling(rxr->page_pool);
- if (bnxt_separate_head_pool())
+ if (bnxt_separate_head_pool(rxr))
page_pool_disable_direct_recycling(rxr->head_pool);
if (bp->flags & BNXT_FLAG_SHARED_RINGS)