mt76_queue_is_wed_rro(q))
continue;
+ if (mt76_npu_device_active(dev) &&
+ mt76_queue_is_wed_rro(q))
+ continue;
+
if (!mt76_queue_is_wed_rro_rxdmad_c(q) &&
!mt76_queue_is_wed_rro_ind(q))
mt76_put_page_pool_buf(buf, false);
mt76_queue_is_wed_rro(q))
return;
+ if (mt76_npu_device_active(dev) &&
+ mt76_queue_is_wed_rro(q))
+ return;
+
+ if (mt76_queue_is_npu_txfree(q))
+ return;
+
mt76_dma_sync_idx(dev, q);
if (mt76_queue_is_npu(q))
mt76_npu_fill_rx_queue(dev, q);
FIELD_PREP(MT_QFLAG_WED_RING, _n))
#define MT_NPU_Q_TX(_n) __MT_NPU_Q(MT76_WED_Q_TX, _n)
#define MT_NPU_Q_RX(_n) __MT_NPU_Q(MT76_WED_Q_RX, _n)
+#define MT_NPU_Q_TXFREE(_n) (FIELD_PREP(MT_QFLAG_WED_TYPE, MT76_WED_Q_TXFREE) | \
+ FIELD_PREP(MT_QFLAG_WED_RING, _n))
struct mt76_dev;
struct mt76_phy;
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
}
+static inline bool mt76_queue_is_npu_txfree(struct mt76_queue *q)
+{
+ if (q->flags & MT_QFLAG_WED)
+ return false;
+
+ return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE;
+}
+
struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
(is_mt7992(&dev->mt76)))) {
dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
+ } else if (is_mt7992(&dev->mt76) &&
+ mt76_npu_device_active(&dev->mt76)) {
+ dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_NPU_Q_TXFREE(0);
}
if (mt7996_has_wa(dev)) {
/* tx free notify event from WA for band0 */
dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ } else if (mt76_npu_device_active(&dev->mt76)) {
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_NPU_Q_TXFREE(0);
}
ret = mt76_queue_alloc(dev,
mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
continue;
+ if (mt76_npu_device_active(&dev->mt76) &&
+ mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
+ continue;
+
+ if (mt76_queue_is_npu_txfree(&dev->mt76.q_rx[i]))
+ continue;
+
napi_disable(&dev->mt76.napi[i]);
}
napi_disable(&dev->mt76.tx_napi);
mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
continue;
+ if (mt76_npu_device_active(&dev->mt76) &&
+ mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
+ continue;
+
+ if (mt76_queue_is_npu_txfree(&dev->mt76.q_rx[i]))
+ continue;
+
napi_enable(&dev->mt76.napi[i]);
local_bh_disable();
napi_schedule(&dev->mt76.napi[i]);