]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
wifi: mt76: Get rid of dma_sync_single_for_device() for MMIO devices
authorLorenzo Bianconi <lorenzo@kernel.org>
Wed, 25 Jun 2025 17:40:30 +0000 (19:40 +0200)
committerFelix Fietkau <nbd@nbd.name>
Mon, 7 Jul 2025 16:05:09 +0000 (18:05 +0200)
Since the page_pool for MT76 MMIO devices are created with
PP_FLAG_DMA_SYNC_DEV flag, we do not need to sync_for_device each page
received from the pool since it is already done by the page_pool
codebase.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Link: https://patch.msgid.link/20250625-mt76-sync-for-device-v1-1-e687e3278e1a@kernel.org
Signed-off-by: Felix Fietkau <nbd@nbd.name>
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/wed.c

index 35b4ec91979e6a55b80feec0eca68afa28d2c5b9..87f531297f8513b0caee2d034184ddd8fa3b6a21 100644 (file)
@@ -643,10 +643,8 @@ mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
 
        while (q->queued < q->ndesc - 1) {
                struct mt76_queue_buf qbuf = {};
-               enum dma_data_direction dir;
-               dma_addr_t addr;
-               int offset;
                void *buf = NULL;
+               int offset;
 
                if (mt76_queue_is_wed_rro_ind(q))
                        goto done;
@@ -655,11 +653,8 @@ mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
                if (!buf)
                        break;
 
-               addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
-               dir = page_pool_get_dma_dir(q->page_pool);
-               dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
-
-               qbuf.addr = addr + q->buf_offset;
+               qbuf.addr = page_pool_get_dma_addr(virt_to_head_page(buf)) +
+                           offset + q->buf_offset;
 done:
                qbuf.len = len - q->buf_offset;
                qbuf.skip_unmap = false;
index f89e4537555c59ac05a64067a43d4a806917bb1d..63f69e152b1cbb5bc0170a44976be1c7c52f21dc 100644 (file)
@@ -34,11 +34,10 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
        struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
        struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
        struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
-       int i, len = SKB_WITH_OVERHEAD(q->buf_size);
        struct mt76_txwi_cache *t = NULL;
+       int i;
 
        for (i = 0; i < size; i++) {
-               enum dma_data_direction dir;
                dma_addr_t addr;
                u32 offset;
                int token;
@@ -53,9 +52,6 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
                        goto unmap;
 
                addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
-               dir = page_pool_get_dma_dir(q->page_pool);
-               dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
-
                desc->buf0 = cpu_to_le32(addr);
                token = mt76_rx_token_consume(dev, buf, t, addr);
                if (token < 0) {