}
}
+static int bnxt_rx_agg_ring_fill_level(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ /* User may have chosen larger than default rx_page_size,
+ * we keep the ring sizes uniform and also want uniform amount
+ * of bytes consumed per ring, so cap how much of the rings we fill.
+ */
+ int fill_level = bp->rx_agg_ring_size;
+
+ if (rxr->rx_page_size > BNXT_RX_PAGE_SIZE)
+ fill_level /= rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
+
+ return fill_level;
+}
+
static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
int numa_node)
{
- const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE;
+ unsigned int agg_size_fac = rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
struct page_pool_params pp = { 0 };
struct page_pool *pool;
- pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
+ pp.pool_size = bnxt_rx_agg_ring_fill_level(bp, rxr) / agg_size_fac;
if (BNXT_RX_PAGE_MODE(bp))
pp.pool_size += bp->rx_ring_size / rx_size_fac;
struct bnxt_rx_ring_info *rxr,
int ring_nr)
{
+ int fill_level, i;
u32 prod;
- int i;
+
+ fill_level = bnxt_rx_agg_ring_fill_level(bp, rxr);
prod = rxr->rx_agg_prod;
- for (i = 0; i < bp->rx_agg_ring_size; i++) {
+ for (i = 0; i < fill_level; i++) {
if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_agg_ring_size);