netdev_assert_locked(rxq_dst->dev);
netdev_assert_locked(rxq_src->dev);
+ netif_rxq_cleanup_unlease(rxq_src, rxq_dst);
+
WRITE_ONCE(rxq_src->lease, NULL);
WRITE_ONCE(rxq_dst->lease, NULL);
}
EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
-int netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
- const struct pp_memory_provider_params *p,
- struct netlink_ext_ack *extack)
+static int __netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
+ const struct pp_memory_provider_params *p,
+ struct netlink_ext_ack *extack)
{
const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
struct netdev_queue_config qcfg[2];
struct netdev_rx_queue *rxq;
int ret;
- if (!netdev_need_ops_lock(dev))
- return -EOPNOTSUPP;
-
- if (rxq_idx >= dev->real_num_rx_queues) {
- NL_SET_ERR_MSG(extack, "rx queue index out of range");
- return -ERANGE;
- }
- rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
-
if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
return -EINVAL;
return ret;
}
-void netif_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
- const struct pp_memory_provider_params *old_p)
+int netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
+ const struct pp_memory_provider_params *p,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *orig_dev = dev;
+ int ret;
+
+ if (!netdev_need_ops_lock(dev))
+ return -EOPNOTSUPP;
+
+ if (rxq_idx >= dev->real_num_rx_queues) {
+ NL_SET_ERR_MSG(extack, "rx queue index out of range");
+ return -ERANGE;
+ }
+ rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
+
+ if (!netif_rxq_is_leased(dev, rxq_idx))
+ return __netif_mp_open_rxq(dev, rxq_idx, p, extack);
+
+ if (!netif_get_rx_queue_lease_locked(&dev, &rxq_idx)) {
+ NL_SET_ERR_MSG(extack, "rx queue leased to a virtual netdev");
+ return -EBUSY;
+ }
+ if (!dev->dev.parent) {
+ NL_SET_ERR_MSG(extack, "rx queue belongs to a virtual netdev");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = __netif_mp_open_rxq(dev, rxq_idx, p, extack);
+out:
+ netif_put_rx_queue_lease_locked(orig_dev, dev);
+ return ret;
+}
+
+static void __netif_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
+ const struct pp_memory_provider_params *old_p)
{
struct netdev_queue_config qcfg[2];
struct netdev_rx_queue *rxq;
int err;
- if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
- return;
-
rxq = __netif_get_rx_queue(dev, ifq_idx);
/* Callers holding a netdev ref may get here after we already
err = netdev_rx_queue_reconfig(dev, ifq_idx, &qcfg[0], &qcfg[1]);
WARN_ON(err && err != -ENETDOWN);
}
+
+void netif_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
+ const struct pp_memory_provider_params *old_p)
+{
+ struct net_device *orig_dev = dev;
+
+ if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
+ return;
+ if (!netif_rxq_is_leased(dev, ifq_idx))
+ return __netif_mp_close_rxq(dev, ifq_idx, old_p);
+
+ if (WARN_ON_ONCE(!netif_get_rx_queue_lease_locked(&dev, &ifq_idx)))
+ return;
+
+ __netif_mp_close_rxq(dev, ifq_idx, old_p);
+ netif_put_rx_queue_lease_locked(orig_dev, dev);
+}
+
+void __netif_mp_uninstall_rxq(struct netdev_rx_queue *rxq,
+ const struct pp_memory_provider_params *p)
+{
+ if (p->mp_ops && p->mp_ops->uninstall)
+ p->mp_ops->uninstall(p->mp_priv, rxq);
+}
+
+/* Clean up memory provider state when a queue lease is torn down. If
+ * a memory provider was installed on the physical queue via the lease,
+ * close it now. The memory provider is a property of the queue itself,
+ * and it was _guaranteed_ to be installed on the physical queue via
+ * the lease redirection. The extra __netif_mp_close_rxq is needed
+ * since the physical queue can outlive the virtual queue in the lease
+ * case, so it needs to be reconfigured to clear the memory provider.
+ */
+void netif_rxq_cleanup_unlease(struct netdev_rx_queue *phys_rxq,
+ struct netdev_rx_queue *virt_rxq)
+{
+ struct pp_memory_provider_params *p = &phys_rxq->mp_params;
+ unsigned int ifq_idx = get_netdev_rx_queue_index(phys_rxq);
+
+ if (!p->mp_ops)
+ return;
+
+ __netif_mp_uninstall_rxq(virt_rxq, p);
+ __netif_mp_close_rxq(phys_rxq->dev, ifq_idx, p);
+}