]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net: airoha: Fix page recycling in airoha_qdma_rx_process()
authorLorenzo Bianconi <lorenzo@kernel.org>
Thu, 15 May 2025 06:33:06 +0000 (08:33 +0200)
committerJakub Kicinski <kuba@kernel.org>
Fri, 16 May 2025 23:35:45 +0000 (16:35 -0700)
Do not recycle the page twice in airoha_qdma_rx_process routine in case
of error. Just run dev_kfree_skb() if the skb has been allocated and marked
for recycling. Run page_pool_put_full_page() directly if the skb has not
been allocated yet.
Moreover, rely on DMA address from queue entry element instead of reading
it from the DMA descriptor for DMA syncing in airoha_qdma_rx_process().

Fixes: e12182ddb6e71 ("net: airoha: Enable Rx Scatter-Gather")
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Link: https://patch.msgid.link/20250515-airoha-fix-rx-process-error-condition-v2-1-657e92c894b9@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/airoha/airoha_eth.c

index d748dc6de92367365db9f9548f9af52a7fdac187..1e9ab65218ff144d99b47f5d4ad5ff4f9c227418 100644 (file)
@@ -614,7 +614,6 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
                struct airoha_queue_entry *e = &q->entry[q->tail];
                struct airoha_qdma_desc *desc = &q->desc[q->tail];
                u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
-               dma_addr_t dma_addr = le32_to_cpu(desc->addr);
                struct page *page = virt_to_head_page(e->buf);
                u32 desc_ctrl = le32_to_cpu(desc->ctrl);
                struct airoha_gdm_port *port;
@@ -623,22 +622,16 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
                if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
                        break;
 
-               if (!dma_addr)
-                       break;
-
-               len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
-               if (!len)
-                       break;
-
                q->tail = (q->tail + 1) % q->ndesc;
                q->queued--;
 
-               dma_sync_single_for_cpu(eth->dev, dma_addr,
+               dma_sync_single_for_cpu(eth->dev, e->dma_addr,
                                        SKB_WITH_OVERHEAD(q->buf_size), dir);
 
+               len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
                data_len = q->skb ? q->buf_size
                                  : SKB_WITH_OVERHEAD(q->buf_size);
-               if (data_len < len)
+               if (!len || data_len < len)
                        goto free_frag;
 
                p = airoha_qdma_get_gdm_port(eth, desc);
@@ -701,9 +694,12 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
                q->skb = NULL;
                continue;
 free_frag:
-               page_pool_put_full_page(q->page_pool, page, true);
-               dev_kfree_skb(q->skb);
-               q->skb = NULL;
+               if (q->skb) {
+                       dev_kfree_skb(q->skb);
+                       q->skb = NULL;
+               } else {
+                       page_pool_put_full_page(q->page_pool, page, true);
+               }
        }
        airoha_qdma_fill_rx_queue(q);