};
struct netdev_queue_config {
+ u32 rx_page_size;
};
/* See the netdev.yaml spec for definition of each statistic */
int tx_start, int tx_end,
struct netdev_queue_stats_tx *tx_sum);
+enum {
+ /* The queue checks and honours the page size qcfg parameter */
+ QCFG_RX_PAGE_SIZE = 0x1,
+};
+
/**
* struct netdev_queue_mgmt_ops - netdev ops for queue management
*
*
* @ndo_default_qcfg: Populate queue config struct with defaults. Optional.
*
+ * @supported_params: Bitmask of supported parameters, see QCFG_*.
+ *
* Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while
* the interface is closed. @ndo_queue_start and @ndo_queue_stop will only
* be called for an interface which is open.
struct netdev_queue_config *qcfg);
struct device * (*ndo_queue_get_dma_dev)(struct net_device *dev,
int idx);
+
+ unsigned int supported_params;
};
bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx);
!qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(qops->supported_params && !qops->ndo_default_qcfg))
+ return -EINVAL;
+
netdev_assert_locked(dev);
memset(&qcfg, 0, sizeof(qcfg));
if (qops->ndo_default_qcfg)
qops->ndo_default_qcfg(dev, &qcfg);
+ if (rxq->mp_params.rx_page_size) {
+ if (!(qops->supported_params & QCFG_RX_PAGE_SIZE))
+ return -EOPNOTSUPP;
+ qcfg.rx_page_size = rxq->mp_params.rx_page_size;
+ }
+
new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
if (!new_mem)
return -ENOMEM;