]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
netdevsim: implement peer queue flow control
authorBreno Leitao <leitao@debian.org>
Fri, 11 Jul 2025 17:06:59 +0000 (10:06 -0700)
committerJakub Kicinski <kuba@kernel.org>
Tue, 15 Jul 2025 00:36:50 +0000 (17:36 -0700)
Add flow control mechanism between paired netdevsim devices to stop the
TX queue during high traffic scenarios. When a receive queue becomes
congested (approaching NSIM_RING_SIZE limit), the corresponding transmit
queue on the peer device is stopped using netif_subqueue_try_stop().

Once the receive queue has sufficient capacity again, the peer's
transmit queue is resumed with netif_tx_wake_queue().

Key changes:
  * Add nsim_stop_peer_tx_queue() to pause peer TX when RX queue is full
  * Add nsim_start_peer_tx_queue() to resume peer TX when RX queue drains
  * Implement queue mapping validation to ensure TX/RX queue count match
  * Wake all queues during device unlinking to prevent stuck queues
  * Use RCU protection when accessing peer device references
  * wake the queues when changing the queue numbers
  * Remove IFF_NO_QUEUE given it will enqueue packets now

The flow control only activates when devices have matching TX/RX queue
counts to ensure proper queue mapping.

Suggested-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Breno Leitao <leitao@debian.org>
Link: https://patch.msgid.link/20250711-netdev_flow_control-v3-1-aa1d5a155762@debian.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/netdevsim/bus.c
drivers/net/netdevsim/ethtool.c
drivers/net/netdevsim/netdev.c

index 830abcb98476587940b79be36705ee00f86b51d3..70e8c38ddad6b63611113d7743f89158d78cfef3 100644 (file)
@@ -384,6 +384,9 @@ static ssize_t unlink_device_store(const struct bus_type *bus, const char *buf,
        err = 0;
        RCU_INIT_POINTER(nsim->peer, NULL);
        RCU_INIT_POINTER(peer->peer, NULL);
+       synchronize_net();
+       netif_tx_wake_all_queues(dev);
+       netif_tx_wake_all_queues(peer->netdev);
 
 out_put_netns:
        put_net(ns);
index 4d191a3293c74ca89551bf0e15d8ad0d8e44081b..f631d90c428ac09440ee4a3f6c9b000a37861fa7 100644 (file)
@@ -101,6 +101,22 @@ nsim_get_channels(struct net_device *dev, struct ethtool_channels *ch)
        ch->combined_count = ns->ethtool.channels;
 }
 
+static void
+nsim_wake_queues(struct net_device *dev)
+{
+       struct netdevsim *ns = netdev_priv(dev);
+       struct netdevsim *peer;
+
+       synchronize_net();
+       netif_tx_wake_all_queues(dev);
+
+       rcu_read_lock();
+       peer = rcu_dereference(ns->peer);
+       if (peer)
+               netif_tx_wake_all_queues(peer->netdev);
+       rcu_read_unlock();
+}
+
 static int
 nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
 {
@@ -113,6 +129,11 @@ nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
                return err;
 
        ns->ethtool.channels = ch->combined_count;
+
+       /* Only wake up queues if devices are linked */
+       if (rcu_access_pointer(ns->peer))
+               nsim_wake_queues(dev);
+
        return 0;
 }
 
index f316e44130f722759f0ac4e5baac6a2f6956dc7d..611e7f65291cd9273880af747e17f28adefe980f 100644 (file)
@@ -37,7 +37,53 @@ MODULE_IMPORT_NS("NETDEV_INTERNAL");
 
 #define NSIM_RING_SIZE         256
 
-static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb)
+static void nsim_start_peer_tx_queue(struct net_device *dev, struct nsim_rq *rq)
+{
+       struct netdevsim *ns = netdev_priv(dev);
+       struct net_device *peer_dev;
+       struct netdevsim *peer_ns;
+       struct netdev_queue *txq;
+       u16 idx;
+
+       idx = rq->napi.index;
+       rcu_read_lock();
+       peer_ns = rcu_dereference(ns->peer);
+       if (!peer_ns)
+               goto out;
+
+       /* TX device */
+       peer_dev = peer_ns->netdev;
+       if (dev->real_num_tx_queues != peer_dev->num_rx_queues)
+               goto out;
+
+       txq = netdev_get_tx_queue(peer_dev, idx);
+       if (!netif_tx_queue_stopped(txq))
+               goto out;
+
+       netif_tx_wake_queue(txq);
+out:
+       rcu_read_unlock();
+}
+
+static void nsim_stop_tx_queue(struct net_device *tx_dev,
+                              struct net_device *rx_dev,
+                              struct nsim_rq *rq,
+                              u16 idx)
+{
+       /* If different queues size, do not stop, since it is not
+        * easy to find which TX queue is mapped here
+        */
+       if (rx_dev->real_num_tx_queues != tx_dev->num_rx_queues)
+               return;
+
+       /* rq is the queue on the receive side */
+       netif_subqueue_try_stop(tx_dev, idx,
+                               NSIM_RING_SIZE - skb_queue_len(&rq->skb_queue),
+                               NSIM_RING_SIZE / 2);
+}
+
+static int nsim_napi_rx(struct net_device *tx_dev, struct net_device *rx_dev,
+                       struct nsim_rq *rq, struct sk_buff *skb)
 {
        if (skb_queue_len(&rq->skb_queue) > NSIM_RING_SIZE) {
                dev_kfree_skb_any(skb);
@@ -45,13 +91,22 @@ static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb)
        }
 
        skb_queue_tail(&rq->skb_queue, skb);
+
+       /* Stop the peer TX queue avoiding dropping packets later */
+       if (skb_queue_len(&rq->skb_queue) >= NSIM_RING_SIZE)
+               nsim_stop_tx_queue(tx_dev, rx_dev, rq,
+                                  skb_get_queue_mapping(skb));
+
        return NET_RX_SUCCESS;
 }
 
-static int nsim_forward_skb(struct net_device *dev, struct sk_buff *skb,
+static int nsim_forward_skb(struct net_device *tx_dev,
+                           struct net_device *rx_dev,
+                           struct sk_buff *skb,
                            struct nsim_rq *rq)
 {
-       return __dev_forward_skb(dev, skb) ?: nsim_napi_rx(rq, skb);
+       return __dev_forward_skb(rx_dev, skb) ?:
+               nsim_napi_rx(tx_dev, rx_dev, rq, skb);
 }
 
 static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -86,7 +141,7 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
                skb_linearize(skb);
 
        skb_tx_timestamp(skb);
-       if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP))
+       if (unlikely(nsim_forward_skb(dev, peer_dev, skb, rq) == NET_RX_DROP))
                goto out_drop_cnt;
 
        if (!hrtimer_active(&rq->napi_timer))
@@ -351,6 +406,7 @@ static int nsim_rcv(struct nsim_rq *rq, int budget)
                        dev_dstats_rx_dropped(dev);
        }
 
+       nsim_start_peer_tx_queue(dev, rq);
        return i;
 }
 
@@ -864,10 +920,8 @@ static void nsim_setup(struct net_device *dev)
        ether_setup(dev);
        eth_hw_addr_random(dev);
 
-       dev->tx_queue_len = 0;
        dev->flags &= ~IFF_MULTICAST;
-       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE |
-                          IFF_NO_QUEUE;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        dev->features |= NETIF_F_HIGHDMA |
                         NETIF_F_SG |
                         NETIF_F_FRAGLIST |