struct bnxt_rx_ring_info *rxr;
struct bnxt_tx_ring_info *txr;
struct bnxt_ring_struct *ring;
+ struct netdev_rx_queue *rxq;
if (!bnapi)
continue;
if (!rxr)
goto skip_rx;
- rxr->rx_page_size = BNXT_RX_PAGE_SIZE;
+ rxq = __netif_get_rx_queue(bp->dev, i);
+ rxr->rx_page_size = rxq->qcfg.rx_page_size;
ring = &rxr->rx_ring_struct;
rmem = &ring->ring_mem;
.get_base_stats = bnxt_get_base_stats,
};
+static void bnxt_queue_default_qcfg(struct net_device *dev,
+ struct netdev_queue_config *qcfg)
+{
+ qcfg->rx_page_size = BNXT_RX_PAGE_SIZE;
+}
+
+static int bnxt_validate_qcfg(struct bnxt *bp, struct netdev_queue_config *qcfg)
+{
+ /* Older chips need MSS calc so rx_page_size is not supported */
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ qcfg->rx_page_size != BNXT_RX_PAGE_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(qcfg->rx_page_size))
+ return -ERANGE;
+
+ if (qcfg->rx_page_size < BNXT_RX_PAGE_SIZE ||
+ qcfg->rx_page_size > BNXT_MAX_RX_PAGE_SIZE)
+ return -ERANGE;
+
+ return 0;
+}
+
static int bnxt_queue_mem_alloc(struct net_device *dev,
struct netdev_queue_config *qcfg,
void *qmem, int idx)
if (!bp->rx_ring)
return -ENETDOWN;
+ rc = bnxt_validate_qcfg(bp, qcfg);
+ if (rc < 0)
+ return rc;
+
rxr = &bp->rx_ring[idx];
clone = qmem;
memcpy(clone, rxr, sizeof(*rxr));
clone->rx_sw_agg_prod = 0;
clone->rx_next_cons = 0;
clone->need_head_pool = false;
+ clone->rx_page_size = qcfg->rx_page_size;
rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
if (rc)
src_ring = &src->rx_agg_ring_struct;
src_rmem = &src_ring->ring_mem;
+ dst->rx_page_size = src->rx_page_size;
+
WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
WARN_ON(dst_rmem->page_size != src_rmem->page_size);
WARN_ON(dst_rmem->flags != src_rmem->flags);
.ndo_queue_mem_free = bnxt_queue_mem_free,
.ndo_queue_start = bnxt_queue_start,
.ndo_queue_stop = bnxt_queue_stop,
+ .ndo_default_qcfg = bnxt_queue_default_qcfg,
+ .supported_params = QCFG_RX_PAGE_SIZE,
};
static void bnxt_remove_one(struct pci_dev *pdev)