ch->combined_count = ns->ethtool.channels;
}
+static void
+nsim_wake_queues(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct netdevsim *peer;
+
+ synchronize_net();
+ netif_tx_wake_all_queues(dev);
+
+ rcu_read_lock();
+ peer = rcu_dereference(ns->peer);
+ if (peer)
+ netif_tx_wake_all_queues(peer->netdev);
+ rcu_read_unlock();
+}
+
static int
nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
{
return err;
ns->ethtool.channels = ch->combined_count;
+
+ /* Only wake up queues if devices are linked */
+ if (rcu_access_pointer(ns->peer))
+ nsim_wake_queues(dev);
+
return 0;
}
#define NSIM_RING_SIZE 256
-static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb)
+static void nsim_start_peer_tx_queue(struct net_device *dev, struct nsim_rq *rq)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct net_device *peer_dev;
+ struct netdevsim *peer_ns;
+ struct netdev_queue *txq;
+ u16 idx;
+
+ idx = rq->napi.index;
+ rcu_read_lock();
+ peer_ns = rcu_dereference(ns->peer);
+ if (!peer_ns)
+ goto out;
+
+ /* TX device */
+ peer_dev = peer_ns->netdev;
+ if (dev->real_num_tx_queues != peer_dev->num_rx_queues)
+ goto out;
+
+ txq = netdev_get_tx_queue(peer_dev, idx);
+ if (!netif_tx_queue_stopped(txq))
+ goto out;
+
+ netif_tx_wake_queue(txq);
+out:
+ rcu_read_unlock();
+}
+
+static void nsim_stop_tx_queue(struct net_device *tx_dev,
+ struct net_device *rx_dev,
+ struct nsim_rq *rq,
+ u16 idx)
+{
+ /* If different queues size, do not stop, since it is not
+ * easy to find which TX queue is mapped here
+ */
+ if (rx_dev->real_num_tx_queues != tx_dev->num_rx_queues)
+ return;
+
+ /* rq is the queue on the receive side */
+ netif_subqueue_try_stop(tx_dev, idx,
+ NSIM_RING_SIZE - skb_queue_len(&rq->skb_queue),
+ NSIM_RING_SIZE / 2);
+}
+
+static int nsim_napi_rx(struct net_device *tx_dev, struct net_device *rx_dev,
+ struct nsim_rq *rq, struct sk_buff *skb)
{
if (skb_queue_len(&rq->skb_queue) > NSIM_RING_SIZE) {
dev_kfree_skb_any(skb);
}
skb_queue_tail(&rq->skb_queue, skb);
+
+ /* Stop the peer TX queue avoiding dropping packets later */
+ if (skb_queue_len(&rq->skb_queue) >= NSIM_RING_SIZE)
+ nsim_stop_tx_queue(tx_dev, rx_dev, rq,
+ skb_get_queue_mapping(skb));
+
return NET_RX_SUCCESS;
}
-static int nsim_forward_skb(struct net_device *dev, struct sk_buff *skb,
+static int nsim_forward_skb(struct net_device *tx_dev,
+ struct net_device *rx_dev,
+ struct sk_buff *skb,
struct nsim_rq *rq)
{
- return __dev_forward_skb(dev, skb) ?: nsim_napi_rx(rq, skb);
+ return __dev_forward_skb(rx_dev, skb) ?:
+ nsim_napi_rx(tx_dev, rx_dev, rq, skb);
}
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_linearize(skb);
skb_tx_timestamp(skb);
- if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP))
+ if (unlikely(nsim_forward_skb(dev, peer_dev, skb, rq) == NET_RX_DROP))
goto out_drop_cnt;
if (!hrtimer_active(&rq->napi_timer))
dev_dstats_rx_dropped(dev);
}
+ nsim_start_peer_tx_queue(dev, rq);
return i;
}
ether_setup(dev);
eth_hw_addr_random(dev);
- dev->tx_queue_len = 0;
dev->flags &= ~IFF_MULTICAST;
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE |
- IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->features |= NETIF_F_HIGHDMA |
NETIF_F_SG |
NETIF_F_FRAGLIST |