Following typical script is extremely disruptive,
because each graft operation calls dev_deactivate()
which resets all the queues of the device.
QPARAM="limit 100000 flow_limit 1000 buckets 4096"
TXQS=64
for ETH in eth1
do
tc qd del dev $ETH root 2>/dev/null
tc qd add dev $ETH root handle 1: mq
for i in `seq 1 $TXQS`
do
slot=$( printf %x $(( i )) )
tc qd add dev $ETH parent 1:$slot fq $QPARAM
done
done
One can add "ip link set dev $ETH down/up" to reduce the disruption time:
QPARAM="limit 100000 flow_limit 1000 buckets 4096"
TXQS=64
for ETH in eth1
do
ip link set dev $ETH down
tc qd del dev $ETH root 2>/dev/null
tc qd add dev $ETH root handle 1: mq
for i in `seq 1 $TXQS`
do
slot=$( printf %x $(( i )) )
tc qd add dev $ETH parent 1:$slot fq $QPARAM
done
ip link set dev $ETH up
done
Or we can add a @reset_needed flag to dev_deactivate() and
dev_deactivate_many().
This flag is set to true at device dismantle or linkwatch_do_dev(),
and to false for graft operations.
In the future, we might only stop one queue instead of the whole
device, ie call dev_deactivate_queue() instead of dev_deactivate().
I think the problem (quadratic behavior) was added in commit
2fb541c862c9 ("net: sch_generic: aviod concurrent reset and enqueue op
for lockless qdisc") but this does not look serious enough to deserve
risky backports.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Yunsheng Lin <linyunsheng@huawei.com>
Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
Reviewed-by: Victor Nogueira <victor@mojatatu.com>
Link: https://patch.msgid.link/20260307163430.470644-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
void dev_init_scheduler(struct net_device *dev);
void dev_shutdown(struct net_device *dev);
void dev_activate(struct net_device *dev);
-void dev_deactivate(struct net_device *dev);
-void dev_deactivate_many(struct list_head *head);
+void dev_deactivate(struct net_device *dev, bool reset_needed);
+void dev_deactivate_many(struct list_head *head, bool reset_needed);
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
smp_mb__after_atomic(); /* Commit netif_running(). */
}
- dev_deactivate_many(head);
+ dev_deactivate_many(head, true);
list_for_each_entry(dev, head, close_list) {
const struct net_device_ops *ops = dev->netdev_ops;
if (netif_carrier_ok(dev))
dev_activate(dev);
else
- dev_deactivate(dev);
+ dev_deactivate(dev, true);
netif_state_change(dev);
}
}
if (dev->flags & IFF_UP)
- dev_deactivate(dev);
+ dev_deactivate(dev, false);
qdisc_offload_graft_root(dev, new, old, extack);
/**
* dev_deactivate_many - deactivate transmissions on several devices
* @head: list of devices to deactivate
+ * @reset_needed: qdisc should be reset if true.
*
* This function returns only when all outstanding transmissions
* have completed, unless all devices are in dismantle phase.
*/
-void dev_deactivate_many(struct list_head *head)
+void dev_deactivate_many(struct list_head *head, bool reset_needed)
{
bool sync_needed = false;
struct net_device *dev;
if (sync_needed)
synchronize_net();
- list_for_each_entry(dev, head, close_list) {
- netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
+ if (reset_needed) {
+ list_for_each_entry(dev, head, close_list) {
+ netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
- if (dev_ingress_queue(dev))
- dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
+ if (dev_ingress_queue(dev))
+ dev_reset_queue(dev, dev_ingress_queue(dev),
+ NULL);
+ }
}
/* Wait for outstanding qdisc_run calls. */
}
}
-void dev_deactivate(struct net_device *dev)
+void dev_deactivate(struct net_device *dev, bool reset_needed)
{
LIST_HEAD(single);
list_add(&dev->close_list, &single);
- dev_deactivate_many(&single);
+ dev_deactivate_many(&single, reset_needed);
list_del(&single);
}
EXPORT_SYMBOL(dev_deactivate);
int ret = 0;
if (up)
- dev_deactivate(dev);
+ dev_deactivate(dev, false);
for (i = 0; i < dev->num_tx_queues; i++) {
ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
struct Qdisc *old_q;
if (dev->flags & IFF_UP)
- dev_deactivate(dev);
+ dev_deactivate(dev, false);
old_q = dev_graft_qdisc(dev_queue, new_q);
if (new_q)
new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
struct Qdisc *qdisc;
if (dev->flags & IFF_UP)
- dev_deactivate(dev);
+ dev_deactivate(dev, false);
qdisc = dev_graft_qdisc(queue_old, NULL);
WARN_ON(qdisc != cl_old->leaf.q);
}
struct net_device *dev = qdisc_dev(sch);
if (dev->flags & IFF_UP)
- dev_deactivate(dev);
+ dev_deactivate(dev, false);
*old = dev_graft_qdisc(dev_queue, new);
if (new)
return -EINVAL;
if (dev->flags & IFF_UP)
- dev_deactivate(dev);
+ dev_deactivate(dev, false);
*old = dev_graft_qdisc(dev_queue, new);
return -EINVAL;
if (dev->flags & IFF_UP)
- dev_deactivate(dev);
+ dev_deactivate(dev, false);
/* In offload mode, the child Qdisc is directly attached to the netdev
* TX queue, and thus, we need to keep its refcount elevated in order