wake_up(&dev->tx_wait);
}
+static void *
+mt76_dma_get_rxdmad_c_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int *len, bool *more)
+{
+ struct mt76_queue_entry *e = &q->entry[idx];
+ struct mt76_rro_rxdmad_c *dmad = e->buf;
+ u32 data1 = le32_to_cpu(dmad->data1);
+ u32 data2 = le32_to_cpu(dmad->data2);
+ struct mt76_txwi_cache *t;
+ u16 rx_token_id;
+ u8 ind_reason;
+ void *buf;
+
+ rx_token_id = FIELD_GET(RRO_RXDMAD_DATA2_RX_TOKEN_ID_MASK, data2);
+ t = mt76_rx_token_release(dev, rx_token_id);
+ if (!t)
+ return ERR_PTR(-EAGAIN);
+
+ q = &dev->q_rx[t->qid];
+ dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
+
+ if (len)
+ *len = FIELD_GET(RRO_RXDMAD_DATA1_SDL0_MASK, data1);
+ if (more)
+ *more = !FIELD_GET(RRO_RXDMAD_DATA1_LS_MASK, data1);
+
+ buf = t->ptr;
+ ind_reason = FIELD_GET(RRO_RXDMAD_DATA2_IND_REASON_MASK, data2);
+ if (ind_reason == MT_DMA_WED_IND_REASON_REPEAT ||
+ ind_reason == MT_DMA_WED_IND_REASON_OLDPKT) {
+ mt76_put_page_pool_buf(buf, false);
+ buf = ERR_PTR(-EAGAIN);
+ }
+ t->ptr = NULL;
+ t->dma_addr = 0;
+
+ mt76_put_rxwi(dev, t);
+
+ return buf;
+}
+
static void *
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
- int *len, u32 *info, bool *more, bool *drop)
+ int *len, u32 *info, bool *more, bool *drop, bool flush)
{
struct mt76_queue_entry *e = &q->entry[idx];
struct mt76_desc *desc = &q->desc[idx];
u32 ctrl, desc_info, buf1;
void *buf = e->buf;
+ if (mt76_queue_is_wed_rro_rxdmad_c(q) && !flush)
+ buf = mt76_dma_get_rxdmad_c_buf(dev, q, idx, len, more);
+
if (mt76_queue_is_wed_rro(q))
goto done;
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
- return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
}
static int
if (!data)
break;
+ if (PTR_ERR(data) == -EAGAIN) {
+ done++;
+ continue;
+ }
+
if (mt76_queue_is_wed_rro_ind(q) && dev->drv->rx_rro_ind_process)
dev->drv->rx_rro_ind_process(dev, data);
- if (mt76_queue_is_wed_rro(q)) {
+ if (mt76_queue_is_wed_rro(q) &&
+ !mt76_queue_is_wed_rro_rxdmad_c(q)) {
done++;
continue;
}
#define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n)
#define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n)
#define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0)
+#define MT_WED_RRO_Q_RXDMAD_C __MT_WED_RRO_Q(MT76_WED_RRO_Q_RXDMAD_C, 0)
struct mt76_dev;
struct mt76_phy;
MT76_WED_RRO_Q_DATA,
MT76_WED_RRO_Q_MSDU_PG,
MT76_WED_RRO_Q_IND,
+ MT76_WED_RRO_Q_RXDMAD_C,
};
struct mt76_bus_ops {
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND;
}
+static inline bool mt76_queue_is_wed_rro_rxdmad_c(struct mt76_queue *q)
+{
+ return mt76_queue_is_wed_rro(q) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_RXDMAD_C;
+}
+
static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q)
{
return mt76_queue_is_wed_rro(q) &&