void netdev_queue_config(struct net_device *dev, int rxq_idx,
struct netdev_queue_config *qcfg)
{
- struct netdev_queue_config *stored;
+ struct pp_memory_provider_params *mpp;
memset(qcfg, 0, sizeof(*qcfg));
- stored = &__netif_get_rx_queue(dev, rxq_idx)->qcfg;
- qcfg->rx_page_size = stored->rx_page_size;
+ /* Get defaults from the driver, in case user config not set */
+ if (dev->queue_mgmt_ops->ndo_default_qcfg)
+ dev->queue_mgmt_ops->ndo_default_qcfg(dev, qcfg);
+
+ /* Apply MP overrides */
+ mpp = &__netif_get_rx_queue(dev, rxq_idx)->mp_params;
+ if (mpp->rx_page_size)
+ qcfg->rx_page_size = mpp->rx_page_size;
}
EXPORT_SYMBOL(netdev_queue_config);
}
EXPORT_SYMBOL(netif_rxq_has_unreadable_mp);
-int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
+static int netdev_rx_queue_reconfig(struct net_device *dev,
+ unsigned int rxq_idx,
+ struct netdev_queue_config *qcfg_old,
+ struct netdev_queue_config *qcfg_new)
{
struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
- struct netdev_queue_config qcfg;
void *new_mem, *old_mem;
int err;
!qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
return -EOPNOTSUPP;
- if (WARN_ON_ONCE(qops->supported_params && !qops->ndo_default_qcfg))
- return -EINVAL;
-
netdev_assert_locked(dev);
- memset(&qcfg, 0, sizeof(qcfg));
- if (qops->ndo_default_qcfg)
- qops->ndo_default_qcfg(dev, &qcfg);
-
- if (rxq->mp_params.rx_page_size)
- qcfg.rx_page_size = rxq->mp_params.rx_page_size;
-
new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
if (!new_mem)
return -ENOMEM;
goto err_free_new_mem;
}
- err = qops->ndo_queue_mem_alloc(dev, &qcfg, new_mem, rxq_idx);
+ err = qops->ndo_queue_mem_alloc(dev, qcfg_new, new_mem, rxq_idx);
if (err)
goto err_free_old_mem;
if (err)
goto err_free_new_queue_mem;
- err = qops->ndo_queue_start(dev, &qcfg, new_mem, rxq_idx);
+ err = qops->ndo_queue_start(dev, qcfg_new, new_mem, rxq_idx);
if (err)
goto err_start_queue;
} else {
kvfree(old_mem);
kvfree(new_mem);
- rxq->qcfg = qcfg;
return 0;
err_start_queue:
* WARN if we fail to recover the old rx queue, and at least free
* old_mem so we don't also leak that.
*/
- if (qops->ndo_queue_start(dev, &rxq->qcfg, old_mem, rxq_idx)) {
+ if (qops->ndo_queue_start(dev, qcfg_old, old_mem, rxq_idx)) {
WARN(1,
"Failed to restart old queue in error path. RX queue %d may be unhealthy.",
rxq_idx);
return err;
}
+
+int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
+{
+ struct netdev_queue_config qcfg;
+
+ netdev_queue_config(dev, rxq_idx, &qcfg);
+ return netdev_rx_queue_reconfig(dev, rxq_idx, &qcfg, &qcfg);
+}
EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
struct netlink_ext_ack *extack)
{
const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
+ struct netdev_queue_config qcfg[2];
struct netdev_rx_queue *rxq;
int ret;
}
#endif
+ netdev_queue_config(dev, rxq_idx, &qcfg[0]);
rxq->mp_params = *p;
- ret = netdev_rx_queue_restart(dev, rxq_idx);
+ netdev_queue_config(dev, rxq_idx, &qcfg[1]);
+
+ ret = netdev_rx_queue_reconfig(dev, rxq_idx, &qcfg[0], &qcfg[1]);
if (ret)
memset(&rxq->mp_params, 0, sizeof(rxq->mp_params));
void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
const struct pp_memory_provider_params *old_p)
{
+ struct netdev_queue_config qcfg[2];
struct netdev_rx_queue *rxq;
int err;
rxq->mp_params.mp_priv != old_p->mp_priv))
return;
+ netdev_queue_config(dev, ifq_idx, &qcfg[0]);
memset(&rxq->mp_params, 0, sizeof(rxq->mp_params));
- err = netdev_rx_queue_restart(dev, ifq_idx);
+ netdev_queue_config(dev, ifq_idx, &qcfg[1]);
+
+ err = netdev_rx_queue_reconfig(dev, ifq_idx, &qcfg[0], &qcfg[1]);
WARN_ON(err && err != -ENETDOWN);
}