if (ret)
return ret;
- /* We need to set cpu idx pointer before resetting the EMI
- * queues.
- */
- mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].emi_cpu_idx =
- &dev->wed_rro.emi_rings_cpu.ptr->ring[0].idx;
- mt76_queue_reset(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C], true);
+ if (!mtk_wed_device_active(&mdev->mmio.wed)) {
+ /* We need to set cpu idx pointer before resetting the
+ * EMI queues.
+ */
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].emi_cpu_idx =
+ &dev->wed_rro.emi_rings_cpu.ptr->ring[0].idx;
+ mt76_queue_reset(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C],
+ true);
+ }
goto start_hw_rro;
}
MT7996_RRO_MSDU_PG_SIZE_PER_CR);
}
- if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ if (!mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
- sizeof(dev->wed_rro.emi_rings_cpu.ptr),
+ sizeof(*dev->wed_rro.emi_rings_cpu.ptr),
&dev->wed_rro.emi_rings_cpu.phy_addr,
GFP_KERNEL);
if (!ptr)
dev->wed_rro.emi_rings_cpu.ptr = ptr;
ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
- sizeof(dev->wed_rro.emi_rings_dma.ptr),
+ sizeof(*dev->wed_rro.emi_rings_dma.ptr),
&dev->wed_rro.emi_rings_dma.phy_addr,
GFP_KERNEL);
if (!ptr)
dev->wed_rro.msdu_pg[i].phy_addr);
}
+ if (dev->wed_rro.emi_rings_cpu.ptr)
+ dmam_free_coherent(dev->mt76.dma_dev,
+ sizeof(*dev->wed_rro.emi_rings_cpu.ptr),
+ dev->wed_rro.emi_rings_cpu.ptr,
+ dev->wed_rro.emi_rings_cpu.phy_addr);
+
+ if (dev->wed_rro.emi_rings_dma.ptr)
+ dmam_free_coherent(dev->mt76.dma_dev,
+ sizeof(*dev->wed_rro.emi_rings_dma.ptr),
+ dev->wed_rro.emi_rings_dma.ptr,
+ dev->wed_rro.emi_rings_dma.phy_addr);
+
if (!dev->wed_rro.session.ptr)
return;