return 0;
}
+static void airoha_qdma_wake_netdev_txqs(struct airoha_queue *q)
+{
+ struct airoha_qdma *qdma = q->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+
+ if (port && port->qdma == qdma)
+ netif_tx_wake_all_queues(port->dev);
+ }
+ q->txq_stopped = false;
+}
+
static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
{
struct airoha_tx_irq_queue *irq_q;
txq = netdev_get_tx_queue(skb->dev, queue);
netdev_tx_completed_queue(txq, 1, skb->len);
- if (netif_tx_queue_stopped(txq) &&
- q->ndesc - q->queued >= q->free_thr)
- netif_tx_wake_queue(txq);
-
dev_kfree_skb_any(skb);
}
+
+ if (q->txq_stopped && q->ndesc - q->queued >= q->free_thr) {
+ /* Since multiple net_device TX queues can share the
+ * same hw QDMA TX queue, there is no guarantee we have
+ * inflight packets queued in hw belonging to a
+ * net_device TX queue stopped in the xmit path.
+ * In order to avoid any potential net_device TX queue
+ * stall, we need to wake all the net_device TX queues
+ * feeding the same hw QDMA TX queue.
+ */
+ airoha_qdma_wake_netdev_txqs(q);
+ }
+
unlock:
spin_unlock_bh(&q->lock);
}
if (q->queued + nr_frags >= q->ndesc) {
/* not enough space in the queue */
netif_tx_stop_queue(txq);
+ q->txq_stopped = true;
spin_unlock_bh(&q->lock);
return NETDEV_TX_BUSY;
}
TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
- if (q->ndesc - q->queued < q->free_thr)
+ if (q->ndesc - q->queued < q->free_thr) {
netif_tx_stop_queue(txq);
+ q->txq_stopped = true;
+ }
spin_unlock_bh(&q->lock);