]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.4
authorSasha Levin <sashal@kernel.org>
Mon, 19 Aug 2024 14:19:33 +0000 (10:19 -0400)
committerSasha Levin <sashal@kernel.org>
Mon, 19 Aug 2024 14:19:33 +0000 (10:19 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
18 files changed:
queue-5.4/alsa-hda-realtek-fix-noise-from-speakers-on-lenovo-i.patch [new file with mode: 0644]
queue-5.4/atm-idt77252-prevent-use-after-free-in-dequeue_rx.patch [new file with mode: 0644]
queue-5.4/net-axienet-autodetect-64-bit-dma-capability.patch [new file with mode: 0644]
queue-5.4/net-axienet-check-for-dma-mapping-errors.patch [new file with mode: 0644]
queue-5.4/net-axienet-drop-mdio-interrupt-registers-from-ethto.patch [new file with mode: 0644]
queue-5.4/net-axienet-factor-out-tx-descriptor-chain-cleanup.patch [new file with mode: 0644]
queue-5.4/net-axienet-fix-dma-descriptor-cleanup-path.patch [new file with mode: 0644]
queue-5.4/net-axienet-fix-register-defines-comment-description.patch [new file with mode: 0644]
queue-5.4/net-axienet-improve-dma-error-handling.patch [new file with mode: 0644]
queue-5.4/net-axienet-upgrade-descriptors-to-hold-64-bit-addre.patch [new file with mode: 0644]
queue-5.4/net-axienet-wrap-dma-pointer-writes-to-prepare-for-6.patch [new file with mode: 0644]
queue-5.4/net-dsa-vsc73xx-pass-value-in-phy_write-operation.patch [new file with mode: 0644]
queue-5.4/net-hns3-fix-a-deadlock-problem-when-config-tc-durin.patch [new file with mode: 0644]
queue-5.4/net-mlx5e-correctly-report-errors-for-ethtool-rx-flo.patch [new file with mode: 0644]
queue-5.4/netfilter-allow-ipv6-fragments-to-arrive-on-differen.patch [new file with mode: 0644]
queue-5.4/netfilter-nf_defrag_ipv6-use-net_generic-infra.patch [new file with mode: 0644]
queue-5.4/s390-uv-panic-for-set-and-remove-shared-access-uvc-e.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/alsa-hda-realtek-fix-noise-from-speakers-on-lenovo-i.patch b/queue-5.4/alsa-hda-realtek-fix-noise-from-speakers-on-lenovo-i.patch
new file mode 100644 (file)
index 0000000..d5044d2
--- /dev/null
@@ -0,0 +1,39 @@
+From 633be5b8298a6bd58262edbe6c905cbdeda84d19 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 10 Aug 2024 18:39:06 +0330
+Subject: ALSA: hda/realtek: Fix noise from speakers on Lenovo IdeaPad 3 15IAU7
+
+From: Parsa Poorshikhian <parsa.poorsh@gmail.com>
+
+[ Upstream commit ef9718b3d54e822de294351251f3a574f8a082ce ]
+
+Fix noise from speakers connected to AUX port when no sound is playing.
+The problem occurs because the `alc_shutup_pins` function includes
+a 0x10ec0257 vendor ID, which causes noise on Lenovo IdeaPad 3 15IAU7 with
+Realtek ALC257 codec when no sound is playing.
+Removing this vendor ID from the function fixes the bug.
+
+Fixes: 70794b9563fe ("ALSA: hda/realtek: Add more codec ID to no shutup pins list")
+Signed-off-by: Parsa Poorshikhian <parsa.poorsh@gmail.com>
+Link: https://patch.msgid.link/20240810150939.330693-1-parsa.poorsh@gmail.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 77034c31fa120..dddf3f55cb13b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -520,7 +520,6 @@ static void alc_shutup_pins(struct hda_codec *codec)
+       switch (codec->core.vendor_id) {
+       case 0x10ec0236:
+       case 0x10ec0256:
+-      case 0x10ec0257:
+       case 0x19e58326:
+       case 0x10ec0283:
+       case 0x10ec0285:
+-- 
+2.43.0
+
diff --git a/queue-5.4/atm-idt77252-prevent-use-after-free-in-dequeue_rx.patch b/queue-5.4/atm-idt77252-prevent-use-after-free-in-dequeue_rx.patch
new file mode 100644 (file)
index 0000000..50a3315
--- /dev/null
@@ -0,0 +1,56 @@
+From 6c4b47bc22712840a7f030a4c309a7a91fa3403a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Aug 2024 15:28:19 +0300
+Subject: atm: idt77252: prevent use after free in dequeue_rx()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit a9a18e8f770c9b0703dab93580d0b02e199a4c79 ]
+
+We can't dereference "skb" after calling vcc->push() because the skb
+is released.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/atm/idt77252.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index 605e992d25df5..06e2fea1ffa92 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -1117,8 +1117,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+       rpp->len += skb->len;
+       if (stat & SAR_RSQE_EPDU) {
++              unsigned int len, truesize;
+               unsigned char *l1l2;
+-              unsigned int len;
+               l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6);
+@@ -1188,14 +1188,15 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+               ATM_SKB(skb)->vcc = vcc;
+               __net_timestamp(skb);
++              truesize = skb->truesize;
+               vcc->push(vcc, skb);
+               atomic_inc(&vcc->stats->rx);
+-              if (skb->truesize > SAR_FB_SIZE_3)
++              if (truesize > SAR_FB_SIZE_3)
+                       add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
+-              else if (skb->truesize > SAR_FB_SIZE_2)
++              else if (truesize > SAR_FB_SIZE_2)
+                       add_rx_skb(card, 2, SAR_FB_SIZE_2, 1);
+-              else if (skb->truesize > SAR_FB_SIZE_1)
++              else if (truesize > SAR_FB_SIZE_1)
+                       add_rx_skb(card, 1, SAR_FB_SIZE_1, 1);
+               else
+                       add_rx_skb(card, 0, SAR_FB_SIZE_0, 1);
+-- 
+2.43.0
+
diff --git a/queue-5.4/net-axienet-autodetect-64-bit-dma-capability.patch b/queue-5.4/net-axienet-autodetect-64-bit-dma-capability.patch
new file mode 100644 (file)
index 0000000..162585a
--- /dev/null
@@ -0,0 +1,92 @@
+From d9a1d217fbdfba5ac225fa4bf7ef2a2c59597e9c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 13:23:46 +0000
+Subject: net: axienet: Autodetect 64-bit DMA capability
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+[ Upstream commit f735c40ed93ccaeb52d026def47ac1a423df7133 ]
+
+When newer revisions of the Axienet IP are configured for a 64-bit bus,
+we *need* to write to the MSB part of the an address registers,
+otherwise the IP won't recognise this as a DMA start condition.
+This is even true when the actual DMA address comes from the lower 4 GB.
+
+To autodetect this configuration, at probe time we write all 1's to such
+an MSB register, and see if any bits stick. If this is configured for a
+32-bit bus, those MSB registers are RES0, so reading back 0 indicates
+that no MSB writes are necessary.
+On the other hands reading anything other than 0 indicated the need to
+write the MSB registers, so we set the respective flag.
+
+The actual DMA mask stays at 32-bit for now. To help bisecting, a
+separate patch will enable allocations from higher addresses.
+
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 9ff2f816e2aa ("net: axienet: Fix register defines comment description")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/xilinx/xilinx_axienet.h  |  1 +
+ .../net/ethernet/xilinx/xilinx_axienet_main.c | 26 +++++++++++++++++++
+ 2 files changed, 27 insertions(+)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+index 84c4c3655516a..fbaf3c987d9c1 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+@@ -161,6 +161,7 @@
+ #define XAE_FCC_OFFSET                0x0000040C /* Flow Control Configuration */
+ #define XAE_EMMC_OFFSET               0x00000410 /* EMAC mode configuration */
+ #define XAE_PHYC_OFFSET               0x00000414 /* RGMII/SGMII configuration */
++#define XAE_ID_OFFSET         0x000004F8 /* Identification register */
+ #define XAE_MDIO_MC_OFFSET    0x00000500 /* MII Management Config */
+ #define XAE_MDIO_MCR_OFFSET   0x00000504 /* MII Management Control */
+ #define XAE_MDIO_MWD_OFFSET   0x00000508 /* MII Management Write Data */
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 5440f39c5760d..1156719210cdb 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -152,6 +152,9 @@ static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+                                dma_addr_t addr)
+ {
+       axienet_dma_out32(lp, reg, lower_32_bits(addr));
++
++      if (lp->features & XAE_FEATURE_DMA_64BIT)
++              axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
+ }
+ static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
+@@ -1954,6 +1957,29 @@ static int axienet_probe(struct platform_device *pdev)
+               goto free_netdev;
+       }
++      /* Autodetect the need for 64-bit DMA pointers.
++       * When the IP is configured for a bus width bigger than 32 bits,
++       * writing the MSB registers is mandatory, even if they are all 0.
++       * We can detect this case by writing all 1's to one such register
++       * and see if that sticks: when the IP is configured for 32 bits
++       * only, those registers are RES0.
++       * Those MSB registers were introduced in IP v7.1, which we check first.
++       */
++      if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
++              void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
++
++              iowrite32(0x0, desc);
++              if (ioread32(desc) == 0) {      /* sanity check */
++                      iowrite32(0xffffffff, desc);
++                      if (ioread32(desc) > 0) {
++                              lp->features |= XAE_FEATURE_DMA_64BIT;
++                              dev_info(&pdev->dev,
++                                       "autodetected 64-bit DMA range\n");
++                      }
++                      iowrite32(0x0, desc);
++              }
++      }
++
+       /* Check for Ethernet core IRQ (optional) */
+       if (lp->eth_irq <= 0)
+               dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
+-- 
+2.43.0
+
diff --git a/queue-5.4/net-axienet-check-for-dma-mapping-errors.patch b/queue-5.4/net-axienet-check-for-dma-mapping-errors.patch
new file mode 100644 (file)
index 0000000..4eac925
--- /dev/null
@@ -0,0 +1,97 @@
+From b1947230bd8b5614fae9e918431125f0d4168321 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 13:23:40 +0000
+Subject: net: axienet: Check for DMA mapping errors
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+[ Upstream commit 71791dc8bdea55eeb2a0caefe98a0b7450c6e0af ]
+
+Especially with the default 32-bit DMA mask, DMA buffers are a limited
+resource, so their allocation can fail.
+So as the DMA API documentation requires, add error checking code after
+dma_map_single() calls to catch the case where we run out of "low" memory.
+
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 9ff2f816e2aa ("net: axienet: Fix register defines comment description")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/xilinx/xilinx_axienet_main.c | 31 ++++++++++++++++++-
+ 1 file changed, 30 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 4467719095432..88bb3b0663ae4 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -249,6 +249,11 @@ static int axienet_dma_bd_init(struct net_device *ndev)
+                                                    skb->data,
+                                                    lp->max_frm_size,
+                                                    DMA_FROM_DEVICE);
++              if (dma_mapping_error(ndev->dev.parent, lp->rx_bd_v[i].phys)) {
++                      netdev_err(ndev, "DMA mapping error\n");
++                      goto out;
++              }
++
+               lp->rx_bd_v[i].cntrl = lp->max_frm_size;
+       }
+@@ -680,6 +685,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+       dma_addr_t tail_p;
+       struct axienet_local *lp = netdev_priv(ndev);
+       struct axidma_bd *cur_p;
++      u32 orig_tail_ptr = lp->tx_bd_tail;
+       num_frag = skb_shinfo(skb)->nr_frags;
+       cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+@@ -715,9 +721,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+               cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
+       }
+-      cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
+       cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+                                    skb_headlen(skb), DMA_TO_DEVICE);
++      if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
++              if (net_ratelimit())
++                      netdev_err(ndev, "TX DMA mapping error\n");
++              ndev->stats.tx_dropped++;
++              return NETDEV_TX_OK;
++      }
++      cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
+       for (ii = 0; ii < num_frag; ii++) {
+               if (++lp->tx_bd_tail >= lp->tx_bd_num)
+@@ -728,6 +740,16 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+                                            skb_frag_address(frag),
+                                            skb_frag_size(frag),
+                                            DMA_TO_DEVICE);
++              if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
++                      if (net_ratelimit())
++                              netdev_err(ndev, "TX DMA mapping error\n");
++                      ndev->stats.tx_dropped++;
++                      axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
++                                            NULL);
++                      lp->tx_bd_tail = orig_tail_ptr;
++
++                      return NETDEV_TX_OK;
++              }
+               cur_p->cntrl = skb_frag_size(frag);
+       }
+@@ -808,6 +830,13 @@ static void axienet_recv(struct net_device *ndev)
+               cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
+                                            lp->max_frm_size,
+                                            DMA_FROM_DEVICE);
++              if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
++                      if (net_ratelimit())
++                              netdev_err(ndev, "RX DMA mapping error\n");
++                      dev_kfree_skb(new_skb);
++                      return;
++              }
++
+               cur_p->cntrl = lp->max_frm_size;
+               cur_p->status = 0;
+               cur_p->skb = new_skb;
+-- 
+2.43.0
+
diff --git a/queue-5.4/net-axienet-drop-mdio-interrupt-registers-from-ethto.patch b/queue-5.4/net-axienet-drop-mdio-interrupt-registers-from-ethto.patch
new file mode 100644 (file)
index 0000000..e5f17a0
--- /dev/null
@@ -0,0 +1,57 @@
+From b703bbfa6cc175ab9f91db59b152f96cff45a089 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 13:23:42 +0000
+Subject: net: axienet: Drop MDIO interrupt registers from ethtools dump
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+[ Upstream commit c30cb8f0bec69d56e1fbc7fb65bd735c729a69e4 ]
+
+Newer revisions of the IP don't have these registers. Since we don't
+really use them, just drop them from the ethtools dump.
+
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 9ff2f816e2aa ("net: axienet: Fix register defines comment description")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/xilinx/xilinx_axienet.h      | 7 -------
+ drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 4 ----
+ 2 files changed, 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+index 04e51af32178c..fb7450ca5c532 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+@@ -165,13 +165,6 @@
+ #define XAE_MDIO_MCR_OFFSET   0x00000504 /* MII Management Control */
+ #define XAE_MDIO_MWD_OFFSET   0x00000508 /* MII Management Write Data */
+ #define XAE_MDIO_MRD_OFFSET   0x0000050C /* MII Management Read Data */
+-#define XAE_MDIO_MIS_OFFSET   0x00000600 /* MII Management Interrupt Status */
+-/* MII Mgmt Interrupt Pending register offset */
+-#define XAE_MDIO_MIP_OFFSET   0x00000620
+-/* MII Management Interrupt Enable register offset */
+-#define XAE_MDIO_MIE_OFFSET   0x00000640
+-/* MII Management Interrupt Clear register offset. */
+-#define XAE_MDIO_MIC_OFFSET   0x00000660
+ #define XAE_UAW0_OFFSET               0x00000700 /* Unicast address word 0 */
+ #define XAE_UAW1_OFFSET               0x00000704 /* Unicast address word 1 */
+ #define XAE_FMI_OFFSET                0x00000708 /* Filter Mask Index */
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 88bb3b0663ae4..76f719c28355c 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1259,10 +1259,6 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
+       data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
+       data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
+       data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
+-      data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
+-      data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
+-      data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
+-      data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
+       data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
+       data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
+       data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
+-- 
+2.43.0
+
diff --git a/queue-5.4/net-axienet-factor-out-tx-descriptor-chain-cleanup.patch b/queue-5.4/net-axienet-factor-out-tx-descriptor-chain-cleanup.patch
new file mode 100644 (file)
index 0000000..6ecbe0d
--- /dev/null
@@ -0,0 +1,138 @@
+From 406c26f30ec40727210937da60dc90ae412bac44 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 13:23:39 +0000
+Subject: net: axienet: Factor out TX descriptor chain cleanup
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+[ Upstream commit ab365c3393664f32116aa22fe322cb04a93fab31 ]
+
+Factor out the code that cleans up a number of connected TX descriptors,
+as we will need it to properly roll back a failed _xmit() call.
+There are subtle differences between cleaning up a successfully sent
+chain (unknown number of involved descriptors, total data size needed)
+and a chain that was about to set up (number of descriptors known), so
+cater for those variations with some extra parameters.
+
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 9ff2f816e2aa ("net: axienet: Fix register defines comment description")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/xilinx/xilinx_axienet_main.c | 79 +++++++++++++------
+ 1 file changed, 57 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 22222d79e4902..4467719095432 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -546,32 +546,46 @@ static int axienet_device_reset(struct net_device *ndev)
+ }
+ /**
+- * axienet_start_xmit_done - Invoked once a transmit is completed by the
+- * Axi DMA Tx channel.
++ * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
+  * @ndev:     Pointer to the net_device structure
++ * @first_bd: Index of first descriptor to clean up
++ * @nr_bds:   Number of descriptors to clean up, can be -1 if unknown.
++ * @sizep:    Pointer to a u32 filled with the total sum of all bytes
++ *            in all cleaned-up descriptors. Ignored if NULL.
+  *
+- * This function is invoked from the Axi DMA Tx isr to notify the completion
+- * of transmit operation. It clears fields in the corresponding Tx BDs and
+- * unmaps the corresponding buffer so that CPU can regain ownership of the
+- * buffer. It finally invokes "netif_wake_queue" to restart transmission if
+- * required.
++ * Would either be called after a successful transmit operation, or after
++ * there was an error when setting up the chain.
++ * Returns the number of descriptors handled.
+  */
+-static void axienet_start_xmit_done(struct net_device *ndev)
++static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
++                               int nr_bds, u32 *sizep)
+ {
+-      u32 size = 0;
+-      u32 packets = 0;
+       struct axienet_local *lp = netdev_priv(ndev);
+       struct axidma_bd *cur_p;
+-      unsigned int status = 0;
++      int max_bds = nr_bds;
++      unsigned int status;
++      int i;
++
++      if (max_bds == -1)
++              max_bds = lp->tx_bd_num;
++
++      for (i = 0; i < max_bds; i++) {
++              cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
++              status = cur_p->status;
++
++              /* If no number is given, clean up *all* descriptors that have
++               * been completed by the MAC.
++               */
++              if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
++                      break;
+-      cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+-      status = cur_p->status;
+-      while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
+               dma_unmap_single(ndev->dev.parent, cur_p->phys,
+                               (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
+                               DMA_TO_DEVICE);
+-              if (cur_p->skb)
++
++              if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
+                       dev_consume_skb_irq(cur_p->skb);
++
+               cur_p->cntrl = 0;
+               cur_p->app0 = 0;
+               cur_p->app1 = 0;
+@@ -580,15 +594,36 @@ static void axienet_start_xmit_done(struct net_device *ndev)
+               cur_p->status = 0;
+               cur_p->skb = NULL;
+-              size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
+-              packets++;
+-
+-              if (++lp->tx_bd_ci >= lp->tx_bd_num)
+-                      lp->tx_bd_ci = 0;
+-              cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+-              status = cur_p->status;
++              if (sizep)
++                      *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
+       }
++      return i;
++}
++
++/**
++ * axienet_start_xmit_done - Invoked once a transmit is completed by the
++ * Axi DMA Tx channel.
++ * @ndev:     Pointer to the net_device structure
++ *
++ * This function is invoked from the Axi DMA Tx isr to notify the completion
++ * of transmit operation. It clears fields in the corresponding Tx BDs and
++ * unmaps the corresponding buffer so that CPU can regain ownership of the
++ * buffer. It finally invokes "netif_wake_queue" to restart transmission if
++ * required.
++ */
++static void axienet_start_xmit_done(struct net_device *ndev)
++{
++      struct axienet_local *lp = netdev_priv(ndev);
++      u32 packets = 0;
++      u32 size = 0;
++
++      packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size);
++
++      lp->tx_bd_ci += packets;
++      if (lp->tx_bd_ci >= lp->tx_bd_num)
++              lp->tx_bd_ci -= lp->tx_bd_num;
++
+       ndev->stats.tx_packets += packets;
+       ndev->stats.tx_bytes += size;
+-- 
+2.43.0
+
diff --git a/queue-5.4/net-axienet-fix-dma-descriptor-cleanup-path.patch b/queue-5.4/net-axienet-fix-dma-descriptor-cleanup-path.patch
new file mode 100644 (file)
index 0000000..0fdf38c
--- /dev/null
@@ -0,0 +1,100 @@
+From 5c40675bc8aef56d49bdafd5f66e09af49d337a5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 13:23:37 +0000
+Subject: net: axienet: Fix DMA descriptor cleanup path
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+[ Upstream commit f26667a373f34ace925c90a1e881b1774d640dc8 ]
+
+When axienet_dma_bd_init() bails out during the initialisation process,
+it might do so with parts of the structure already allocated and
+initialised, while other parts have not been touched yet. Before
+returning in this case, we call axienet_dma_bd_release(), which does not
+take care of this corner case.
+This is most obvious by the first loop happily dereferencing
+lp->rx_bd_v, which we actually check to be non NULL *afterwards*.
+
+Make sure we only unmap or free already allocated structures, by:
+- directly returning with -ENOMEM if nothing has been allocated at all
+- checking for lp->rx_bd_v to be non-NULL *before* using it
+- only unmapping allocated DMA RX regions
+
+This avoids NULL pointer dereferences when initialisation fails.
+
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 9ff2f816e2aa ("net: axienet: Fix register defines comment description")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/xilinx/xilinx_axienet_main.c | 43 ++++++++++++-------
+ 1 file changed, 28 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index bbc1cf288d25f..27901bb7cd5b5 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -161,24 +161,37 @@ static void axienet_dma_bd_release(struct net_device *ndev)
+       int i;
+       struct axienet_local *lp = netdev_priv(ndev);
++      /* If we end up here, tx_bd_v must have been DMA allocated. */
++      dma_free_coherent(ndev->dev.parent,
++                        sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
++                        lp->tx_bd_v,
++                        lp->tx_bd_p);
++
++      if (!lp->rx_bd_v)
++              return;
++
+       for (i = 0; i < lp->rx_bd_num; i++) {
+-              dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
+-                               lp->max_frm_size, DMA_FROM_DEVICE);
++              /* A NULL skb means this descriptor has not been initialised
++               * at all.
++               */
++              if (!lp->rx_bd_v[i].skb)
++                      break;
++
+               dev_kfree_skb(lp->rx_bd_v[i].skb);
+-      }
+-      if (lp->rx_bd_v) {
+-              dma_free_coherent(ndev->dev.parent,
+-                                sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+-                                lp->rx_bd_v,
+-                                lp->rx_bd_p);
+-      }
+-      if (lp->tx_bd_v) {
+-              dma_free_coherent(ndev->dev.parent,
+-                                sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+-                                lp->tx_bd_v,
+-                                lp->tx_bd_p);
++              /* For each descriptor, we programmed cntrl with the (non-zero)
++               * descriptor size, after it had been successfully allocated.
++               * So a non-zero value in there means we need to unmap it.
++               */
++              if (lp->rx_bd_v[i].cntrl)
++                      dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
++                                       lp->max_frm_size, DMA_FROM_DEVICE);
+       }
++
++      dma_free_coherent(ndev->dev.parent,
++                        sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
++                        lp->rx_bd_v,
++                        lp->rx_bd_p);
+ }
+ /**
+@@ -208,7 +221,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
+                                        sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+                                        &lp->tx_bd_p, GFP_KERNEL);
+       if (!lp->tx_bd_v)
+-              goto out;
++              return -ENOMEM;
+       lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+                                        sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+-- 
+2.43.0
+
diff --git a/queue-5.4/net-axienet-fix-register-defines-comment-description.patch b/queue-5.4/net-axienet-fix-register-defines-comment-description.patch
new file mode 100644 (file)
index 0000000..b9dc6e6
--- /dev/null
@@ -0,0 +1,62 @@
+From 128b7246d4c306c409758ff00f32ab3582e3d08c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Aug 2024 11:56:09 +0530
+Subject: net: axienet: Fix register defines comment description
+
+From: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+
+[ Upstream commit 9ff2f816e2aa65ca9a1cdf0954842f8173c0f48d ]
+
+In axiethernet header fix register defines comment description to be
+inline with IP documentation. It updates MAC configuration register,
+MDIO configuration register and frame filter control description.
+
+Fixes: 8a3b7a252dca ("drivers/net/ethernet/xilinx: added Xilinx AXI Ethernet driver")
+Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/xilinx/xilinx_axienet.h | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+index fbaf3c987d9c1..bf1a19a00adc6 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+@@ -159,16 +159,16 @@
+ #define XAE_RCW1_OFFSET               0x00000404 /* Rx Configuration Word 1 */
+ #define XAE_TC_OFFSET         0x00000408 /* Tx Configuration */
+ #define XAE_FCC_OFFSET                0x0000040C /* Flow Control Configuration */
+-#define XAE_EMMC_OFFSET               0x00000410 /* EMAC mode configuration */
+-#define XAE_PHYC_OFFSET               0x00000414 /* RGMII/SGMII configuration */
++#define XAE_EMMC_OFFSET               0x00000410 /* MAC speed configuration */
++#define XAE_PHYC_OFFSET               0x00000414 /* RX Max Frame Configuration */
+ #define XAE_ID_OFFSET         0x000004F8 /* Identification register */
+-#define XAE_MDIO_MC_OFFSET    0x00000500 /* MII Management Config */
+-#define XAE_MDIO_MCR_OFFSET   0x00000504 /* MII Management Control */
+-#define XAE_MDIO_MWD_OFFSET   0x00000508 /* MII Management Write Data */
+-#define XAE_MDIO_MRD_OFFSET   0x0000050C /* MII Management Read Data */
++#define XAE_MDIO_MC_OFFSET    0x00000500 /* MDIO Setup */
++#define XAE_MDIO_MCR_OFFSET   0x00000504 /* MDIO Control */
++#define XAE_MDIO_MWD_OFFSET   0x00000508 /* MDIO Write Data */
++#define XAE_MDIO_MRD_OFFSET   0x0000050C /* MDIO Read Data */
+ #define XAE_UAW0_OFFSET               0x00000700 /* Unicast address word 0 */
+ #define XAE_UAW1_OFFSET               0x00000704 /* Unicast address word 1 */
+-#define XAE_FMI_OFFSET                0x00000708 /* Filter Mask Index */
++#define XAE_FMI_OFFSET                0x00000708 /* Frame Filter Control */
+ #define XAE_AF0_OFFSET                0x00000710 /* Address Filter 0 */
+ #define XAE_AF1_OFFSET                0x00000714 /* Address Filter 1 */
+@@ -307,7 +307,7 @@
+  */
+ #define XAE_UAW1_UNICASTADDR_MASK     0x0000FFFF
+-/* Bit masks for Axi Ethernet FMI register */
++/* Bit masks for Axi Ethernet FMC register */
+ #define XAE_FMI_PM_MASK                       0x80000000 /* Promis. mode enable */
+ #define XAE_FMI_IND_MASK              0x00000003 /* Index Mask */
+-- 
+2.43.0
+
diff --git a/queue-5.4/net-axienet-improve-dma-error-handling.patch b/queue-5.4/net-axienet-improve-dma-error-handling.patch
new file mode 100644 (file)
index 0000000..ff51c3d
--- /dev/null
@@ -0,0 +1,50 @@
+From 6d9c4bcc73d77d8bbaef9024fb3ada12a759e596 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 13:23:38 +0000
+Subject: net: axienet: Improve DMA error handling
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+[ Upstream commit e7fea0b9d09e2f7d32776f5198192dfc2572a5b9 ]
+
+Since 0 is a valid DMA address, we cannot use the physical address to
+check whether a TX descriptor is valid and is holding a DMA mapping.
+
+Use the "cntrl" member of the descriptor to make this decision, as it
+contains at least the length of the buffer, so 0 points to an
+uninitialised buffer.
+
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 9ff2f816e2aa ("net: axienet: Fix register defines comment description")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 27901bb7cd5b5..22222d79e4902 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -572,7 +572,7 @@ static void axienet_start_xmit_done(struct net_device *ndev)
+                               DMA_TO_DEVICE);
+               if (cur_p->skb)
+                       dev_consume_skb_irq(cur_p->skb);
+-              /*cur_p->phys = 0;*/
++              cur_p->cntrl = 0;
+               cur_p->app0 = 0;
+               cur_p->app1 = 0;
+               cur_p->app2 = 0;
+@@ -1562,7 +1562,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
+       for (i = 0; i < lp->tx_bd_num; i++) {
+               cur_p = &lp->tx_bd_v[i];
+-              if (cur_p->phys)
++              if (cur_p->cntrl)
+                       dma_unmap_single(ndev->dev.parent, cur_p->phys,
+                                        (cur_p->cntrl &
+                                         XAXIDMA_BD_CTRL_LENGTH_MASK),
+-- 
+2.43.0
+
diff --git a/queue-5.4/net-axienet-upgrade-descriptors-to-hold-64-bit-addre.patch b/queue-5.4/net-axienet-upgrade-descriptors-to-hold-64-bit-addre.patch
new file mode 100644 (file)
index 0000000..1fa60e6
--- /dev/null
@@ -0,0 +1,323 @@
+From 5d4fc9d1440e31db4bd595aeee44eb901f4d4003 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 13:23:45 +0000
+Subject: net: axienet: Upgrade descriptors to hold 64-bit addresses
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+[ Upstream commit 4e958f33ee8f404787711416fe0f78cce2b2f4e2 ]
+
+Newer revisions of the AXI DMA IP (>= v7.1) support 64-bit addresses,
+both for the descriptors itself, as well as for the buffers they are
+pointing to.
+This is realised by adding "MSB" words for the next and phys pointer
+right behind the existing address word, now named "LSB". These MSB words
+live in formerly reserved areas of the descriptor.
+
+If the hardware supports it, write both words when setting an address.
+The buffer address is handled by two wrapper functions, the two
+occasions where we set the next pointers are open coded.
+
+For now this is guarded by a flag which we don't set yet.
+
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 9ff2f816e2aa ("net: axienet: Fix register defines comment description")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/xilinx/xilinx_axienet.h  |   9 +-
+ .../net/ethernet/xilinx/xilinx_axienet_main.c | 113 ++++++++++++------
+ 2 files changed, 83 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+index fb7450ca5c532..84c4c3655516a 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+@@ -328,6 +328,7 @@
+ #define XAE_FEATURE_PARTIAL_TX_CSUM   (1 << 1)
+ #define XAE_FEATURE_FULL_RX_CSUM      (1 << 2)
+ #define XAE_FEATURE_FULL_TX_CSUM      (1 << 3)
++#define XAE_FEATURE_DMA_64BIT         (1 << 4)
+ #define XAE_NO_CSUM_OFFLOAD           0
+@@ -340,9 +341,9 @@
+ /**
+  * struct axidma_bd - Axi Dma buffer descriptor layout
+  * @next:         MM2S/S2MM Next Descriptor Pointer
+- * @reserved1:    Reserved and not used
++ * @next_msb:     MM2S/S2MM Next Descriptor Pointer (high 32 bits)
+  * @phys:         MM2S/S2MM Buffer Address
+- * @reserved2:    Reserved and not used
++ * @phys_msb:     MM2S/S2MM Buffer Address (high 32 bits)
+  * @reserved3:    Reserved and not used
+  * @reserved4:    Reserved and not used
+  * @cntrl:        MM2S/S2MM Control value
+@@ -355,9 +356,9 @@
+  */
+ struct axidma_bd {
+       u32 next;       /* Physical address of next buffer descriptor */
+-      u32 reserved1;
++      u32 next_msb;   /* high 32 bits for IP >= v7.1, reserved on older IP */
+       u32 phys;
+-      u32 reserved2;
++      u32 phys_msb;   /* for IP >= v7.1, reserved for older IP */
+       u32 reserved3;
+       u32 reserved4;
+       u32 cntrl;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index bd03a6d66e122..5440f39c5760d 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -154,6 +154,25 @@ static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+       axienet_dma_out32(lp, reg, lower_32_bits(addr));
+ }
++static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
++                             struct axidma_bd *desc)
++{
++      desc->phys = lower_32_bits(addr);
++      if (lp->features & XAE_FEATURE_DMA_64BIT)
++              desc->phys_msb = upper_32_bits(addr);
++}
++
++static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
++                                   struct axidma_bd *desc)
++{
++      dma_addr_t ret = desc->phys;
++
++      if (lp->features & XAE_FEATURE_DMA_64BIT)
++              ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
++
++      return ret;
++}
++
+ /**
+  * axienet_dma_bd_release - Release buffer descriptor rings
+  * @ndev:     Pointer to the net_device structure
+@@ -177,6 +196,8 @@ static void axienet_dma_bd_release(struct net_device *ndev)
+               return;
+       for (i = 0; i < lp->rx_bd_num; i++) {
++              dma_addr_t phys;
++
+               /* A NULL skb means this descriptor has not been initialised
+                * at all.
+                */
+@@ -189,9 +210,11 @@ static void axienet_dma_bd_release(struct net_device *ndev)
+                * descriptor size, after it had been successfully allocated.
+                * So a non-zero value in there means we need to unmap it.
+                */
+-              if (lp->rx_bd_v[i].cntrl)
+-                      dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
++              if (lp->rx_bd_v[i].cntrl) {
++                      phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
++                      dma_unmap_single(ndev->dev.parent, phys,
+                                        lp->max_frm_size, DMA_FROM_DEVICE);
++              }
+       }
+       dma_free_coherent(ndev->dev.parent,
+@@ -236,29 +259,36 @@ static int axienet_dma_bd_init(struct net_device *ndev)
+               goto out;
+       for (i = 0; i < lp->tx_bd_num; i++) {
+-              lp->tx_bd_v[i].next = lp->tx_bd_p +
+-                                    sizeof(*lp->tx_bd_v) *
+-                                    ((i + 1) % lp->tx_bd_num);
++              dma_addr_t addr = lp->tx_bd_p +
++                                sizeof(*lp->tx_bd_v) *
++                                ((i + 1) % lp->tx_bd_num);
++
++              lp->tx_bd_v[i].next = lower_32_bits(addr);
++              if (lp->features & XAE_FEATURE_DMA_64BIT)
++                      lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
+       }
+       for (i = 0; i < lp->rx_bd_num; i++) {
+-              lp->rx_bd_v[i].next = lp->rx_bd_p +
+-                                    sizeof(*lp->rx_bd_v) *
+-                                    ((i + 1) % lp->rx_bd_num);
++              dma_addr_t addr;
++
++              addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
++                      ((i + 1) % lp->rx_bd_num);
++              lp->rx_bd_v[i].next = lower_32_bits(addr);
++              if (lp->features & XAE_FEATURE_DMA_64BIT)
++                      lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
+               skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
+               if (!skb)
+                       goto out;
+               lp->rx_bd_v[i].skb = skb;
+-              lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+-                                                   skb->data,
+-                                                   lp->max_frm_size,
+-                                                   DMA_FROM_DEVICE);
+-              if (dma_mapping_error(ndev->dev.parent, lp->rx_bd_v[i].phys)) {
++              addr = dma_map_single(ndev->dev.parent, skb->data,
++                                    lp->max_frm_size, DMA_FROM_DEVICE);
++              if (dma_mapping_error(ndev->dev.parent, addr)) {
+                       netdev_err(ndev, "DMA mapping error\n");
+                       goto out;
+               }
++              desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
+               lp->rx_bd_v[i].cntrl = lp->max_frm_size;
+       }
+@@ -575,6 +605,7 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
+       struct axidma_bd *cur_p;
+       int max_bds = nr_bds;
+       unsigned int status;
++      dma_addr_t phys;
+       int i;
+       if (max_bds == -1)
+@@ -590,9 +621,10 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
+               if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
+                       break;
+-              dma_unmap_single(ndev->dev.parent, cur_p->phys,
+-                              (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
+-                              DMA_TO_DEVICE);
++              phys = desc_get_phys_addr(lp, cur_p);
++              dma_unmap_single(ndev->dev.parent, phys,
++                               (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
++                               DMA_TO_DEVICE);
+               if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
+                       dev_consume_skb_irq(cur_p->skb);
+@@ -688,7 +720,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+       u32 csum_start_off;
+       u32 csum_index_off;
+       skb_frag_t *frag;
+-      dma_addr_t tail_p;
++      dma_addr_t tail_p, phys;
+       struct axienet_local *lp = netdev_priv(ndev);
+       struct axidma_bd *cur_p;
+       u32 orig_tail_ptr = lp->tx_bd_tail;
+@@ -727,14 +759,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+               cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
+       }
+-      cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+-                                   skb_headlen(skb), DMA_TO_DEVICE);
+-      if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
++      phys = dma_map_single(ndev->dev.parent, skb->data,
++                            skb_headlen(skb), DMA_TO_DEVICE);
++      if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+               if (net_ratelimit())
+                       netdev_err(ndev, "TX DMA mapping error\n");
+               ndev->stats.tx_dropped++;
+               return NETDEV_TX_OK;
+       }
++      desc_set_phys_addr(lp, phys, cur_p);
+       cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
+       for (ii = 0; ii < num_frag; ii++) {
+@@ -742,11 +775,11 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+                       lp->tx_bd_tail = 0;
+               cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+               frag = &skb_shinfo(skb)->frags[ii];
+-              cur_p->phys = dma_map_single(ndev->dev.parent,
+-                                           skb_frag_address(frag),
+-                                           skb_frag_size(frag),
+-                                           DMA_TO_DEVICE);
+-              if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
++              phys = dma_map_single(ndev->dev.parent,
++                                    skb_frag_address(frag),
++                                    skb_frag_size(frag),
++                                    DMA_TO_DEVICE);
++              if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+                       if (net_ratelimit())
+                               netdev_err(ndev, "TX DMA mapping error\n");
+                       ndev->stats.tx_dropped++;
+@@ -756,6 +789,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+                       return NETDEV_TX_OK;
+               }
++              desc_set_phys_addr(lp, phys, cur_p);
+               cur_p->cntrl = skb_frag_size(frag);
+       }
+@@ -794,10 +828,12 @@ static void axienet_recv(struct net_device *ndev)
+       cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+       while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
++              dma_addr_t phys;
++
+               tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+-              dma_unmap_single(ndev->dev.parent, cur_p->phys,
+-                               lp->max_frm_size,
++              phys = desc_get_phys_addr(lp, cur_p);
++              dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
+                                DMA_FROM_DEVICE);
+               skb = cur_p->skb;
+@@ -833,15 +869,16 @@ static void axienet_recv(struct net_device *ndev)
+               if (!new_skb)
+                       return;
+-              cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
+-                                           lp->max_frm_size,
+-                                           DMA_FROM_DEVICE);
+-              if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
++              phys = dma_map_single(ndev->dev.parent, new_skb->data,
++                                    lp->max_frm_size,
++                                    DMA_FROM_DEVICE);
++              if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+                       if (net_ratelimit())
+                               netdev_err(ndev, "RX DMA mapping error\n");
+                       dev_kfree_skb(new_skb);
+                       return;
+               }
++              desc_set_phys_addr(lp, phys, cur_p);
+               cur_p->cntrl = lp->max_frm_size;
+               cur_p->status = 0;
+@@ -886,7 +923,8 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
+               return IRQ_NONE;
+       if (status & XAXIDMA_IRQ_ERROR_MASK) {
+               dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
+-              dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
++              dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
++                      (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
+                       (lp->tx_bd_v[lp->tx_bd_ci]).phys);
+               cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+@@ -935,7 +973,8 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
+               return IRQ_NONE;
+       if (status & XAXIDMA_IRQ_ERROR_MASK) {
+               dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
+-              dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
++              dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
++                      (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
+                       (lp->rx_bd_v[lp->rx_bd_ci]).phys);
+               cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+@@ -1628,14 +1667,18 @@ static void axienet_dma_err_handler(struct work_struct *work)
+       for (i = 0; i < lp->tx_bd_num; i++) {
+               cur_p = &lp->tx_bd_v[i];
+-              if (cur_p->cntrl)
+-                      dma_unmap_single(ndev->dev.parent, cur_p->phys,
++              if (cur_p->cntrl) {
++                      dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
++
++                      dma_unmap_single(ndev->dev.parent, addr,
+                                        (cur_p->cntrl &
+                                         XAXIDMA_BD_CTRL_LENGTH_MASK),
+                                        DMA_TO_DEVICE);
++              }
+               if (cur_p->skb)
+                       dev_kfree_skb_irq(cur_p->skb);
+               cur_p->phys = 0;
++              cur_p->phys_msb = 0;
+               cur_p->cntrl = 0;
+               cur_p->status = 0;
+               cur_p->app0 = 0;
+-- 
+2.43.0
+
diff --git a/queue-5.4/net-axienet-wrap-dma-pointer-writes-to-prepare-for-6.patch b/queue-5.4/net-axienet-wrap-dma-pointer-writes-to-prepare-for-6.patch
new file mode 100644 (file)
index 0000000..aeb59e0
--- /dev/null
@@ -0,0 +1,111 @@
+From 4eea50d248ed61fff5d377c9e2f94502570cfc64 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 13:23:44 +0000
+Subject: net: axienet: Wrap DMA pointer writes to prepare for 64 bit
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+[ Upstream commit 6a00d0dd3fcfa2ef200973479fbeee62f3681130 ]
+
+Newer versions of the Xilink DMA IP support busses with more than 32
+address bits, by introducing an MSB word for the registers holding DMA
+pointers (tail/current, RX/TX descriptor addresses).
+On IP configured for more than 32 bits, it is also *required* to write
+both words, to let the IP recognise this as a start condition for an
+MM2S request, for instance.
+
+Wrap the DMA pointer writes with a separate function, to add this
+functionality later. For now we stick to the lower 32 bits.
+
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 9ff2f816e2aa ("net: axienet: Fix register defines comment description")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/xilinx/xilinx_axienet_main.c | 26 ++++++++++++-------
+ 1 file changed, 16 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 76f719c28355c..bd03a6d66e122 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -148,6 +148,12 @@ static inline void axienet_dma_out32(struct axienet_local *lp,
+       iowrite32(value, lp->dma_regs + reg);
+ }
++static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
++                               dma_addr_t addr)
++{
++      axienet_dma_out32(lp, reg, lower_32_bits(addr));
++}
++
+ /**
+  * axienet_dma_bd_release - Release buffer descriptor rings
+  * @ndev:     Pointer to the net_device structure
+@@ -286,18 +292,18 @@ static int axienet_dma_bd_init(struct net_device *ndev)
+       /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+        * halted state. This will make the Rx side ready for reception.
+        */
+-      axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
++      axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+       cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+       axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
+                         cr | XAXIDMA_CR_RUNSTOP_MASK);
+-      axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+-                        (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
++      axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
++                           (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+       /* Write to the RS (Run-stop) bit in the Tx channel control register.
+        * Tx channel is now ready to run. But only after we write to the
+        * tail pointer register that the Tx channel will start transmitting.
+        */
+-      axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
++      axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+       cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+       axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
+                         cr | XAXIDMA_CR_RUNSTOP_MASK);
+@@ -758,7 +764,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+       tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+       /* Start the transfer */
+-      axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
++      axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+       if (++lp->tx_bd_tail >= lp->tx_bd_num)
+               lp->tx_bd_tail = 0;
+@@ -850,7 +856,7 @@ static void axienet_recv(struct net_device *ndev)
+       ndev->stats.rx_bytes += size;
+       if (tail_p)
+-              axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
++              axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ }
+ /**
+@@ -1683,18 +1689,18 @@ static void axienet_dma_err_handler(struct work_struct *work)
+       /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+        * halted state. This will make the Rx side ready for reception.
+        */
+-      axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
++      axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+       cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+       axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
+                         cr | XAXIDMA_CR_RUNSTOP_MASK);
+-      axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+-                        (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
++      axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
++                           (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+       /* Write to the RS (Run-stop) bit in the Tx channel control register.
+        * Tx channel is now ready to run. But only after we write to the
+        * tail pointer register that the Tx channel will start transmitting
+        */
+-      axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
++      axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+       cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+       axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
+                         cr | XAXIDMA_CR_RUNSTOP_MASK);
+-- 
+2.43.0
+
diff --git a/queue-5.4/net-dsa-vsc73xx-pass-value-in-phy_write-operation.patch b/queue-5.4/net-dsa-vsc73xx-pass-value-in-phy_write-operation.patch
new file mode 100644 (file)
index 0000000..aa7c4a7
--- /dev/null
@@ -0,0 +1,40 @@
+From c77422334dcfb2435d06310f742a8ecd2a55ddb7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Aug 2024 21:38:03 +0200
+Subject: net: dsa: vsc73xx: pass value in phy_write operation
+
+From: Pawel Dembicki <paweldembicki@gmail.com>
+
+[ Upstream commit 5b9eebc2c7a5f0cc7950d918c1e8a4ad4bed5010 ]
+
+In the 'vsc73xx_phy_write' function, the register value is missing,
+and the phy write operation always sends zeros.
+
+This commit passes the value variable into the proper register.
+
+Fixes: 05bd97fc559d ("net: dsa: Add Vitesse VSC73xx DSA router driver")
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/vitesse-vsc73xx-core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
+index c7ff98c26ee39..a1dd82d25ce3c 100644
+--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
+@@ -531,7 +531,7 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
+               return 0;
+       }
+-      cmd = (phy << 21) | (regnum << 16);
++      cmd = (phy << 21) | (regnum << 16) | val;
+       ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+       if (ret)
+               return ret;
+-- 
+2.43.0
+
diff --git a/queue-5.4/net-hns3-fix-a-deadlock-problem-when-config-tc-durin.patch b/queue-5.4/net-hns3-fix-a-deadlock-problem-when-config-tc-durin.patch
new file mode 100644 (file)
index 0000000..ade75c9
--- /dev/null
@@ -0,0 +1,76 @@
+From b50fa2024d06996a6322738f659000ff8a25ffc1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Aug 2024 22:10:22 +0800
+Subject: net: hns3: fix a deadlock problem when config TC during resetting
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jie Wang <wangjie125@huawei.com>
+
+[ Upstream commit be5e816d00a506719e9dbb1a9c861c5ced30a109 ]
+
+When config TC during the reset process, may cause a deadlock, the flow is
+as below:
+                             pf reset start
+                                 │
+                                 ▼
+                              ......
+setup tc                         │
+    │                            ▼
+    ▼                      DOWN: napi_disable()
+napi_disable()(skip)             │
+    │                            │
+    ▼                            ▼
+  ......                      ......
+    │                            │
+    ▼                            │
+napi_enable()                    │
+                                 ▼
+                           UINIT: netif_napi_del()
+                                 │
+                                 ▼
+                              ......
+                                 │
+                                 ▼
+                           INIT: netif_napi_add()
+                                 │
+                                 ▼
+                              ......                 global reset start
+                                 │                      │
+                                 ▼                      ▼
+                           UP: napi_enable()(skip)    ......
+                                 │                      │
+                                 ▼                      ▼
+                              ......                 napi_disable()
+
+In reset process, the driver will DOWN the port and then UINIT, in this
+case, the setup tc process will UP the port before UINIT, so cause the
+problem. Adds a DOWN process in UINIT to fix it.
+
+Fixes: bb6b94a896d4 ("net: hns3: Add reset interface implementation in client")
+Signed-off-by: Jie Wang <wangjie125@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index d09cc10b3517f..8736e254f098b 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -4388,6 +4388,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
+       struct hns3_nic_priv *priv = netdev_priv(netdev);
+       int ret;
++      if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
++              hns3_nic_net_stop(netdev);
++
+       if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+               netdev_warn(netdev, "already uninitialized\n");
+               return 0;
+-- 
+2.43.0
+
diff --git a/queue-5.4/net-mlx5e-correctly-report-errors-for-ethtool-rx-flo.patch b/queue-5.4/net-mlx5e-correctly-report-errors-for-ethtool-rx-flo.patch
new file mode 100644 (file)
index 0000000..c57612f
--- /dev/null
@@ -0,0 +1,46 @@
+From a94ac5e33730afc8a292b7a1de5f9c7cb884be22 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Aug 2024 17:41:05 +0300
+Subject: net/mlx5e: Correctly report errors for ethtool rx flows
+
+From: Cosmin Ratiu <cratiu@nvidia.com>
+
+[ Upstream commit cbc796be1779c4dbc9a482c7233995e2a8b6bfb3 ]
+
+Previously, an ethtool rx flow with no attrs would not be added to the
+NIC as it has no rules to configure the hw with, but it would be
+reported as successful to the caller (return code 0). This is confusing
+for the user as ethtool then reports "Added rule $num", but no rule was
+actually added.
+
+This change corrects that by instead reporting these wrong rules as
+-EINVAL.
+
+Fixes: b29c61dac3a2 ("net/mlx5e: Ethtool steering flow validation refactoring")
+Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
+Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
+Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Link: https://patch.msgid.link/20240808144107.2095424-5-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+index acd946f2ddbe7..be49a2a53f29d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+@@ -676,7 +676,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+       if (num_tuples <= 0) {
+               netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
+                           __func__, num_tuples);
+-              return num_tuples;
++              return num_tuples < 0 ? num_tuples : -EINVAL;
+       }
+       eth_ft = get_flow_table(priv, fs, num_tuples);
+-- 
+2.43.0
+
diff --git a/queue-5.4/netfilter-allow-ipv6-fragments-to-arrive-on-differen.patch b/queue-5.4/netfilter-allow-ipv6-fragments-to-arrive-on-differen.patch
new file mode 100644 (file)
index 0000000..3b75ab9
--- /dev/null
@@ -0,0 +1,46 @@
+From 33b64cb513359df11f7e2931f56c55f0efeebf2a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Aug 2024 12:40:52 +0100
+Subject: netfilter: allow ipv6 fragments to arrive on different devices
+
+From: Tom Hughes <tom@compton.nu>
+
+[ Upstream commit 3cd740b985963f874a1a094f1969e998b9d05554 ]
+
+Commit 264640fc2c5f4 ("ipv6: distinguish frag queues by device
+for multicast and link-local packets") modified the ipv6 fragment
+reassembly logic to distinguish frag queues by device for multicast
+and link-local packets but in fact only the main reassembly code
+limits the use of the device to those address types and the netfilter
+reassembly code uses the device for all packets.
+
+This means that if fragments of a packet arrive on different interfaces
+then netfilter will fail to reassemble them and the fragments will be
+expired without going any further through the filters.
+
+Fixes: 648700f76b03 ("inet: frags: use rhashtables for reassembly units")
+Signed-off-by: Tom Hughes <tom@compton.nu>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 24ec295454940..db4592ada9491 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -155,6 +155,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
+       };
+       struct inet_frag_queue *q;
++      if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
++                                          IPV6_ADDR_LINKLOCAL)))
++              key.iif = 0;
++
+       q = inet_frag_find(nf_frag->fqdir, &key);
+       if (!q)
+               return NULL;
+-- 
+2.43.0
+
diff --git a/queue-5.4/netfilter-nf_defrag_ipv6-use-net_generic-infra.patch b/queue-5.4/netfilter-nf_defrag_ipv6-use-net_generic-infra.patch
new file mode 100644 (file)
index 0000000..36635c3
--- /dev/null
@@ -0,0 +1,264 @@
+From 90a4f908c0a2bcb903bfa33cae72c235c4559c24 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Apr 2021 16:11:07 +0200
+Subject: netfilter: nf_defrag_ipv6: use net_generic infra
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 8b0adbe3e38dbe5aae9edf6f5159ffdca7cfbdf1 ]
+
+This allows followup patch to remove these members from struct net.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: 3cd740b98596 ("netfilter: allow ipv6 fragments to arrive on different devices")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/ipv6/nf_defrag_ipv6.h |  6 ++
+ net/ipv6/netfilter/nf_conntrack_reasm.c     | 68 +++++++++++----------
+ net/ipv6/netfilter/nf_defrag_ipv6_hooks.c   | 15 +++--
+ 3 files changed, 52 insertions(+), 37 deletions(-)
+
+diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+index 6d31cd0411434..ece923e2035b5 100644
+--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
++++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+@@ -13,4 +13,10 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user);
+ struct inet_frags_ctl;
++struct nft_ct_frag6_pernet {
++      struct ctl_table_header *nf_frag_frags_hdr;
++      struct fqdir    *fqdir;
++      unsigned int users;
++};
++
+ #endif /* _NF_DEFRAG_IPV6_H */
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index fed9666a2f7da..24ec295454940 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -15,28 +15,13 @@
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/string.h>
+-#include <linux/socket.h>
+-#include <linux/sockios.h>
+-#include <linux/jiffies.h>
+ #include <linux/net.h>
+-#include <linux/list.h>
+ #include <linux/netdevice.h>
+-#include <linux/in6.h>
+ #include <linux/ipv6.h>
+-#include <linux/icmpv6.h>
+-#include <linux/random.h>
+ #include <linux/slab.h>
+-#include <net/sock.h>
+-#include <net/snmp.h>
+ #include <net/ipv6_frag.h>
+-#include <net/protocol.h>
+-#include <net/transp_v6.h>
+-#include <net/rawv6.h>
+-#include <net/ndisc.h>
+-#include <net/addrconf.h>
+-#include <net/inet_ecn.h>
+ #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+ #include <linux/sysctl.h>
+ #include <linux/netfilter.h>
+@@ -44,11 +29,18 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
++#include <net/netns/generic.h>
+ static const char nf_frags_cache_name[] = "nf-frags";
++unsigned int nf_frag_pernet_id __read_mostly;
+ static struct inet_frags nf_frags;
++static struct nft_ct_frag6_pernet *nf_frag_pernet(struct net *net)
++{
++      return net_generic(net, nf_frag_pernet_id);
++}
++
+ #ifdef CONFIG_SYSCTL
+ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
+@@ -75,6 +67,7 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
+ static int nf_ct_frag6_sysctl_register(struct net *net)
+ {
++      struct nft_ct_frag6_pernet *nf_frag;
+       struct ctl_table *table;
+       struct ctl_table_header *hdr;
+@@ -86,18 +79,20 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
+                       goto err_alloc;
+       }
+-      table[0].data   = &net->nf_frag.fqdir->timeout;
+-      table[1].data   = &net->nf_frag.fqdir->low_thresh;
+-      table[1].extra2 = &net->nf_frag.fqdir->high_thresh;
+-      table[2].data   = &net->nf_frag.fqdir->high_thresh;
+-      table[2].extra1 = &net->nf_frag.fqdir->low_thresh;
+-      table[2].extra2 = &init_net.nf_frag.fqdir->high_thresh;
++      nf_frag = nf_frag_pernet(net);
++
++      table[0].data   = &nf_frag->fqdir->timeout;
++      table[1].data   = &nf_frag->fqdir->low_thresh;
++      table[1].extra2 = &nf_frag->fqdir->high_thresh;
++      table[2].data   = &nf_frag->fqdir->high_thresh;
++      table[2].extra1 = &nf_frag->fqdir->low_thresh;
++      table[2].extra2 = &nf_frag->fqdir->high_thresh;
+       hdr = register_net_sysctl(net, "net/netfilter", table);
+       if (hdr == NULL)
+               goto err_reg;
+-      net->nf_frag_frags_hdr = hdr;
++      nf_frag->nf_frag_frags_hdr = hdr;
+       return 0;
+ err_reg:
+@@ -109,10 +104,11 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
+ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
+ {
++      struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
+       struct ctl_table *table;
+-      table = net->nf_frag_frags_hdr->ctl_table_arg;
+-      unregister_net_sysctl_table(net->nf_frag_frags_hdr);
++      table = nf_frag->nf_frag_frags_hdr->ctl_table_arg;
++      unregister_net_sysctl_table(nf_frag->nf_frag_frags_hdr);
+       if (!net_eq(net, &init_net))
+               kfree(table);
+ }
+@@ -149,6 +145,7 @@ static void nf_ct_frag6_expire(struct timer_list *t)
+ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
+                                 const struct ipv6hdr *hdr, int iif)
+ {
++      struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
+       struct frag_v6_compare_key key = {
+               .id = id,
+               .saddr = hdr->saddr,
+@@ -158,7 +155,7 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
+       };
+       struct inet_frag_queue *q;
+-      q = inet_frag_find(net->nf_frag.fqdir, &key);
++      q = inet_frag_find(nf_frag->fqdir, &key);
+       if (!q)
+               return NULL;
+@@ -485,37 +482,44 @@ EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
+ static int nf_ct_net_init(struct net *net)
+ {
++      struct nft_ct_frag6_pernet *nf_frag  = nf_frag_pernet(net);
+       int res;
+-      res = fqdir_init(&net->nf_frag.fqdir, &nf_frags, net);
++      res = fqdir_init(&nf_frag->fqdir, &nf_frags, net);
+       if (res < 0)
+               return res;
+-      net->nf_frag.fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH;
+-      net->nf_frag.fqdir->low_thresh = IPV6_FRAG_LOW_THRESH;
+-      net->nf_frag.fqdir->timeout = IPV6_FRAG_TIMEOUT;
++      nf_frag->fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH;
++      nf_frag->fqdir->low_thresh = IPV6_FRAG_LOW_THRESH;
++      nf_frag->fqdir->timeout = IPV6_FRAG_TIMEOUT;
+       res = nf_ct_frag6_sysctl_register(net);
+       if (res < 0)
+-              fqdir_exit(net->nf_frag.fqdir);
++              fqdir_exit(nf_frag->fqdir);
+       return res;
+ }
+ static void nf_ct_net_pre_exit(struct net *net)
+ {
+-      fqdir_pre_exit(net->nf_frag.fqdir);
++      struct nft_ct_frag6_pernet *nf_frag  = nf_frag_pernet(net);
++
++      fqdir_pre_exit(nf_frag->fqdir);
+ }
+ static void nf_ct_net_exit(struct net *net)
+ {
++      struct nft_ct_frag6_pernet *nf_frag  = nf_frag_pernet(net);
++
+       nf_ct_frags6_sysctl_unregister(net);
+-      fqdir_exit(net->nf_frag.fqdir);
++      fqdir_exit(nf_frag->fqdir);
+ }
+ static struct pernet_operations nf_ct_net_ops = {
+       .init           = nf_ct_net_init,
+       .pre_exit       = nf_ct_net_pre_exit,
+       .exit           = nf_ct_net_exit,
++      .id             = &nf_frag_pernet_id,
++      .size           = sizeof(struct nft_ct_frag6_pernet),
+ };
+ static const struct rhashtable_params nfct_rhash_params = {
+diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+index 6646a87fb5dc1..402dc4ca9504f 100644
+--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
++++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+@@ -25,6 +25,8 @@
+ #include <net/netfilter/nf_conntrack_zones.h>
+ #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
++extern unsigned int nf_frag_pernet_id;
++
+ static DEFINE_MUTEX(defrag6_mutex);
+ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
+@@ -89,10 +91,12 @@ static const struct nf_hook_ops ipv6_defrag_ops[] = {
+ static void __net_exit defrag6_net_exit(struct net *net)
+ {
+-      if (net->nf.defrag_ipv6) {
++      struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);
++
++      if (nf_frag->users) {
+               nf_unregister_net_hooks(net, ipv6_defrag_ops,
+                                       ARRAY_SIZE(ipv6_defrag_ops));
+-              net->nf.defrag_ipv6 = false;
++              nf_frag->users = 0;
+       }
+ }
+@@ -130,21 +134,22 @@ static void __exit nf_defrag_fini(void)
+ int nf_defrag_ipv6_enable(struct net *net)
+ {
++      struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);
+       int err = 0;
+       might_sleep();
+-      if (net->nf.defrag_ipv6)
++      if (nf_frag->users)
+               return 0;
+       mutex_lock(&defrag6_mutex);
+-      if (net->nf.defrag_ipv6)
++      if (nf_frag->users)
+               goto out_unlock;
+       err = nf_register_net_hooks(net, ipv6_defrag_ops,
+                                   ARRAY_SIZE(ipv6_defrag_ops));
+       if (err == 0)
+-              net->nf.defrag_ipv6 = true;
++              nf_frag->users = 1;
+  out_unlock:
+       mutex_unlock(&defrag6_mutex);
+-- 
+2.43.0
+
diff --git a/queue-5.4/s390-uv-panic-for-set-and-remove-shared-access-uvc-e.patch b/queue-5.4/s390-uv-panic-for-set-and-remove-shared-access-uvc-e.patch
new file mode 100644 (file)
index 0000000..94c6211
--- /dev/null
@@ -0,0 +1,60 @@
+From a40ea99078bf9ca8c54f98ad04857ef85de7653f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Aug 2024 13:25:48 +0200
+Subject: s390/uv: Panic for set and remove shared access UVC errors
+
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+
+[ Upstream commit cff59d8631e1409ffdd22d9d717e15810181b32c ]
+
+The return value uv_set_shared() and uv_remove_shared() (which are
+wrappers around the share() function) is not always checked. The system
+integrity of a protected guest depends on the Share and Unshare UVCs
+being successful. This means that any caller that fails to check the
+return value will compromise the security of the protected guest.
+
+No code path that would lead to such violation of the security
+guarantees is currently exercised, since all the areas that are shared
+never get unshared during the lifetime of the system. This might
+change and become an issue in the future.
+
+The Share and Unshare UVCs can only fail in case of hypervisor
+misbehaviour (either a bug or malicious behaviour). In such cases there
+is no reasonable way forward, and the system needs to panic.
+
+This patch replaces the return at the end of the share() function with
+a panic, to guarantee system integrity.
+
+Fixes: 5abb9351dfd9 ("s390/uv: introduce guest side ultravisor code")
+Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Reviewed-by: Christian Borntraeger <borntraeger@linux.ibm.com>
+Reviewed-by: Steffen Eiden <seiden@linux.ibm.com>
+Reviewed-by: Janosch Frank <frankja@linux.ibm.com>
+Link: https://lore.kernel.org/r/20240801112548.85303-1-imbrenda@linux.ibm.com
+Message-ID: <20240801112548.85303-1-imbrenda@linux.ibm.com>
+[frankja@linux.ibm.com: Fixed up patch subject]
+Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/include/asm/uv.h | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
+index ef3c00b049ab4..67f63b76dc18b 100644
+--- a/arch/s390/include/asm/uv.h
++++ b/arch/s390/include/asm/uv.h
+@@ -97,7 +97,10 @@ static inline int share(unsigned long addr, u16 cmd)
+       if (!uv_call(0, (u64)&uvcb))
+               return 0;
+-      return -EINVAL;
++      pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n",
++             uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare",
++             uvcb.header.rc, uvcb.header.rrc);
++      panic("System security cannot be guaranteed unless the system panics now.\n");
+ }
+ /*
+-- 
+2.43.0
+
index 8d6f363935734e72a12d951222e3e2c38d8e09e9..b5c7cd9d18119bda9df4498868c2065503d78fa8 100644 (file)
@@ -13,3 +13,20 @@ drm-amdgpu-actually-check-flags-for-all-context-ops.patch
 memcg_write_event_control-fix-a-user-triggerable-oops.patch
 s390-cio-rename-bitmap_size-idset_bitmap_size.patch
 btrfs-rename-bitmap_set_bits-btrfs_bitmap_set_bits.patch
+s390-uv-panic-for-set-and-remove-shared-access-uvc-e.patch
+net-mlx5e-correctly-report-errors-for-ethtool-rx-flo.patch
+atm-idt77252-prevent-use-after-free-in-dequeue_rx.patch
+net-axienet-fix-dma-descriptor-cleanup-path.patch
+net-axienet-improve-dma-error-handling.patch
+net-axienet-factor-out-tx-descriptor-chain-cleanup.patch
+net-axienet-check-for-dma-mapping-errors.patch
+net-axienet-drop-mdio-interrupt-registers-from-ethto.patch
+net-axienet-wrap-dma-pointer-writes-to-prepare-for-6.patch
+net-axienet-upgrade-descriptors-to-hold-64-bit-addre.patch
+net-axienet-autodetect-64-bit-dma-capability.patch
+net-axienet-fix-register-defines-comment-description.patch
+net-dsa-vsc73xx-pass-value-in-phy_write-operation.patch
+netfilter-nf_defrag_ipv6-use-net_generic-infra.patch
+netfilter-allow-ipv6-fragments-to-arrive-on-differen.patch
+net-hns3-fix-a-deadlock-problem-when-config-tc-durin.patch
+alsa-hda-realtek-fix-noise-from-speakers-on-lenovo-i.patch