q->tail = q->head;
}
-void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
- bool reset_idx)
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx)
{
if (!q || !q->ndesc)
return;
mt76_dma_sync_idx(dev, q);
}
-void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
-{
- __mt76_dma_queue_reset(dev, q, true);
-}
-
static int
mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, void *data)
return 0;
}
- mt76_dma_queue_reset(dev, q);
+ mt76_dma_queue_reset(dev, q, true);
return 0;
}
void mt76_dma_cleanup(struct mt76_dev *dev);
int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
bool allow_direct);
-void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
- bool reset_idx);
-void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q);
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx);
static inline void
mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- dev->queue_ops->reset_q(dev, q);
+ dev->queue_ops->reset_q(dev, q, true);
if (mtk_wed_device_active(&dev->mmio.wed))
mt76_wed_dma_setup(dev, q, true);
}
void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
- void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
+ void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx);
};
enum mt76_phy_type {
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
if (mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
continue;
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ mt76_queue_reset(dev, &dev->mt76.q_rx[i], true);
}
mt76_tx_status_check(&dev->mt76, true);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++)
- mt76_queue_reset(dev, dev->mphy.q_tx[i]);
+ mt76_queue_reset(dev, dev->mphy.q_tx[i], true);
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i)
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ mt76_queue_reset(dev, &dev->mt76.q_rx[i], true);
mt76_tx_status_check(&dev->mt76, true);
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
if (mtk_wed_device_active(&dev->mt76.mmio.wed))
mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
continue;
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ mt76_queue_reset(dev, &dev->mt76.q_rx[i], true);
}
mt76_tx_status_check(&dev->mt76, true);
case MT76_WED_Q_TXFREE:
/* WED txfree queue needs ring to be initialized before setup */
q->flags = 0;
- mt76_dma_queue_reset(dev, q);
+ mt76_dma_queue_reset(dev, q, true);
mt76_dma_rx_fill(dev, q, false);
ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
break;
case MT76_WED_RRO_Q_DATA:
q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
+ mt76_dma_queue_reset(dev, q, false);
mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
q->head = q->ndesc - 1;
q->queued = q->head;
break;
case MT76_WED_RRO_Q_MSDU_PG:
q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
+ mt76_dma_queue_reset(dev, q, false);
mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
q->head = q->ndesc - 1;
q->queued = q->head;
break;
case MT76_WED_RRO_Q_IND:
q->flags &= ~MT_QFLAG_WED;
- mt76_dma_queue_reset(dev, q);
+ mt76_dma_queue_reset(dev, q, true);
mt76_dma_rx_fill(dev, q, false);
mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
break;