static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
{
- struct airoha_eth *eth = q->qdma->eth;
- int i;
+ struct airoha_qdma *qdma = q->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ int i, qid = q - &qdma->q_tx[0];
+ u16 index = 0;
spin_lock_bh(&q->lock);
for (i = 0; i < q->ndesc; i++) {
struct airoha_queue_entry *e = &q->entry[i];
+ struct airoha_qdma_desc *desc = &q->desc[i];
if (!e->dma_addr)
continue;
e->dma_addr = 0;
e->skb = NULL;
list_add_tail(&e->list, &q->tx_list);
+
+ /* Reset DMA descriptor */
+ WRITE_ONCE(desc->ctrl, 0);
+ WRITE_ONCE(desc->addr, 0);
+ WRITE_ONCE(desc->data, 0);
+ WRITE_ONCE(desc->msg0, 0);
+ WRITE_ONCE(desc->msg1, 0);
+ WRITE_ONCE(desc->msg2, 0);
+
q->queued--;
}
+
+ if (!list_empty(&q->tx_list)) {
+ struct airoha_queue_entry *e;
+
+ e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
+ list);
+ index = e - q->entry;
+ }
+ /* Set TX_DMA_IDX to TX_CPU_IDX to notify the hw the QDMA TX ring is
+ * empty.
+ */
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
+ airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
+ FIELD_PREP(TX_RING_DMA_IDX_MASK, index));
+
spin_unlock_bh(&q->lock);
}