]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: airoha: Add dma_rmb() and READ_ONCE() in airoha_qdma_rx_process()
authorLorenzo Bianconi <lorenzo@kernel.org>
Tue, 7 Apr 2026 06:48:04 +0000 (08:48 +0200)
committerJakub Kicinski <kuba@kernel.org>
Fri, 10 Apr 2026 03:35:15 +0000 (20:35 -0700)
Add missing dma_rmb() in airoha_qdma_rx_process routine to make sure the
DMA read operations are completed when the NIC reports the processing on
the current descriptor is done. Moreover, add missing READ_ONCE() in
airoha_qdma_rx_process() for DMA descriptor control fields in order to
avoid any compiler reordering.

Fixes: 23020f0493270 ("net: airoha: Introduce ethernet support for EN7581 SoC")
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Link: https://patch.msgid.link/20260407-airoha_qdma_rx_process-fix-reordering-v3-1-91c36e9da31f@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/airoha/airoha_eth.c

index 91cb63a32d9904e0700bcce45b53624677d75c6c..9285a68f435feaabb380bc4bdf1529bf1ddf3138 100644 (file)
@@ -584,7 +584,7 @@ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
 static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
                                    struct airoha_qdma_desc *desc)
 {
-       u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
+       u32 port, sport, msg1 = le32_to_cpu(READ_ONCE(desc->msg1));
 
        sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
        switch (sport) {
@@ -612,21 +612,24 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
        while (done < budget) {
                struct airoha_queue_entry *e = &q->entry[q->tail];
                struct airoha_qdma_desc *desc = &q->desc[q->tail];
-               u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
-               struct page *page = virt_to_head_page(e->buf);
-               u32 desc_ctrl = le32_to_cpu(desc->ctrl);
+               u32 hash, reason, msg1, desc_ctrl;
                struct airoha_gdm_port *port;
                int data_len, len, p;
+               struct page *page;
 
+               desc_ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
                if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
                        break;
 
+               dma_rmb();
+
                q->tail = (q->tail + 1) % q->ndesc;
                q->queued--;
 
                dma_sync_single_for_cpu(eth->dev, e->dma_addr,
                                        SKB_WITH_OVERHEAD(q->buf_size), dir);
 
+               page = virt_to_head_page(e->buf);
                len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
                data_len = q->skb ? q->buf_size
                                  : SKB_WITH_OVERHEAD(q->buf_size);
@@ -670,8 +673,8 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
                         * DMA descriptor. Report DSA tag to the DSA stack
                         * via skb dst info.
                         */
-                       u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG,
-                                             le32_to_cpu(desc->msg0));
+                       u32 msg0 = le32_to_cpu(READ_ONCE(desc->msg0));
+                       u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG, msg0);
 
                        if (sptag < ARRAY_SIZE(port->dsa_meta) &&
                            port->dsa_meta[sptag])
@@ -679,6 +682,7 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
                                                  &port->dsa_meta[sptag]->dst);
                }
 
+               msg1 = le32_to_cpu(READ_ONCE(desc->msg1));
                hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
                if (hash != AIROHA_RXD4_FOE_ENTRY)
                        skb_set_hash(q->skb, jhash_1word(hash, 0),