--- /dev/null
+From 5160fe00d5395ddbf14ebef49c2e693669013b08 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Jan 2021 09:48:59 +0500
+Subject: lan743x: fix endianness when accessing descriptors
+
+From: Alexey Denisov <rtgbnm@gmail.com>
+
+[ Upstream commit 462512824f902a24de794290dd622e664587da1d ]
+
+TX/RX descriptor ring fields are always little-endian, but conversion
+wasn't performed for big-endian CPUs, so the driver failed to work.
+
+This patch makes the driver work on big-endian CPUs. It was tested and
+confirmed to work on NXP P1010 processor (PowerPC).
+
+Signed-off-by: Alexey Denisov <rtgbnm@gmail.com>
+Link: https://lore.kernel.org/r/20210128044859.280219-1-rtgbnm@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 2d52e2e38b85 ("net: lan743x: Fix memleak issue when GSO enabled")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/microchip/lan743x_main.c | 66 +++++++++----------
+ drivers/net/ethernet/microchip/lan743x_main.h | 20 +++---
+ 2 files changed, 43 insertions(+), 43 deletions(-)
+
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index 5d539e4e942cc..ebcbb719e9002 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -1221,7 +1221,7 @@ static void lan743x_tx_release_desc(struct lan743x_tx *tx,
+ if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
+ goto done;
+
+- descriptor_type = (descriptor->data0) &
++ descriptor_type = le32_to_cpu(descriptor->data0) &
+ TX_DESC_DATA0_DTYPE_MASK_;
+ if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
+ goto clean_up_data_descriptor;
+@@ -1281,7 +1281,7 @@ static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
+
+ static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
+ {
+- while ((*tx->head_cpu_ptr) != (tx->last_head)) {
++ while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) {
+ lan743x_tx_release_desc(tx, tx->last_head, false);
+ tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
+ }
+@@ -1367,10 +1367,10 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
+ if (dma_mapping_error(dev, dma_ptr))
+ return -ENOMEM;
+
+- tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
+- tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
+- tx_descriptor->data3 = (frame_length << 16) &
+- TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
++ tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
++ tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
++ tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
++ TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
+
+ buffer_info->skb = NULL;
+ buffer_info->dma_ptr = dma_ptr;
+@@ -1411,7 +1411,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ }
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+- tx_descriptor->data0 = tx->frame_data0;
++ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+
+ /* move to next descriptor */
+ tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
+@@ -1455,7 +1455,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
+
+ /* wrap up previous descriptor */
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+- tx_descriptor->data0 = tx->frame_data0;
++ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+
+ /* move to next descriptor */
+ tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
+@@ -1481,10 +1481,10 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
+ return -ENOMEM;
+ }
+
+- tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
+- tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
+- tx_descriptor->data3 = (frame_length << 16) &
+- TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
++ tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
++ tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
++ tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
++ TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
+
+ buffer_info->skb = NULL;
+ buffer_info->dma_ptr = dma_ptr;
+@@ -1528,7 +1528,7 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
+ if (ignore_sync)
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
+
+- tx_descriptor->data0 = tx->frame_data0;
++ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+ tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
+ tx->last_tail = tx->frame_tail;
+
+@@ -1946,11 +1946,11 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
+ }
+
+ buffer_info->buffer_length = length;
+- descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
+- descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
++ descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
++ descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
+ descriptor->data3 = 0;
+- descriptor->data0 = (RX_DESC_DATA0_OWN_ |
+- (length & RX_DESC_DATA0_BUF_LENGTH_MASK_));
++ descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
++ (length & RX_DESC_DATA0_BUF_LENGTH_MASK_)));
+ skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
+ lan743x_rx_update_tail(rx, index);
+
+@@ -1965,12 +1965,12 @@ static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
+ descriptor = &rx->ring_cpu_ptr[index];
+ buffer_info = &rx->buffer_info[index];
+
+- descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
+- descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
++ descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
++ descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
+ descriptor->data3 = 0;
+- descriptor->data0 = (RX_DESC_DATA0_OWN_ |
++ descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
+ ((buffer_info->buffer_length) &
+- RX_DESC_DATA0_BUF_LENGTH_MASK_));
++ RX_DESC_DATA0_BUF_LENGTH_MASK_)));
+ lan743x_rx_update_tail(rx, index);
+ }
+
+@@ -2004,7 +2004,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
+ {
+ struct skb_shared_hwtstamps *hwtstamps = NULL;
+ int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
+- int current_head_index = *rx->head_cpu_ptr;
++ int current_head_index = le32_to_cpu(*rx->head_cpu_ptr);
+ struct lan743x_rx_buffer_info *buffer_info;
+ struct lan743x_rx_descriptor *descriptor;
+ int extension_index = -1;
+@@ -2019,14 +2019,14 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
+
+ if (rx->last_head != current_head_index) {
+ descriptor = &rx->ring_cpu_ptr[rx->last_head];
+- if (descriptor->data0 & RX_DESC_DATA0_OWN_)
++ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
+ goto done;
+
+- if (!(descriptor->data0 & RX_DESC_DATA0_FS_))
++ if (!(le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_))
+ goto done;
+
+ first_index = rx->last_head;
+- if (descriptor->data0 & RX_DESC_DATA0_LS_) {
++ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_) {
+ last_index = rx->last_head;
+ } else {
+ int index;
+@@ -2034,10 +2034,10 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
+ index = lan743x_rx_next_index(rx, first_index);
+ while (index != current_head_index) {
+ descriptor = &rx->ring_cpu_ptr[index];
+- if (descriptor->data0 & RX_DESC_DATA0_OWN_)
++ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
+ goto done;
+
+- if (descriptor->data0 & RX_DESC_DATA0_LS_) {
++ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_) {
+ last_index = index;
+ break;
+ }
+@@ -2046,17 +2046,17 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
+ }
+ if (last_index >= 0) {
+ descriptor = &rx->ring_cpu_ptr[last_index];
+- if (descriptor->data0 & RX_DESC_DATA0_EXT_) {
++ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) {
+ /* extension is expected to follow */
+ int index = lan743x_rx_next_index(rx,
+ last_index);
+ if (index != current_head_index) {
+ descriptor = &rx->ring_cpu_ptr[index];
+- if (descriptor->data0 &
++ if (le32_to_cpu(descriptor->data0) &
+ RX_DESC_DATA0_OWN_) {
+ goto done;
+ }
+- if (descriptor->data0 &
++ if (le32_to_cpu(descriptor->data0) &
+ RX_DESC_DATA0_EXT_) {
+ extension_index = index;
+ } else {
+@@ -2109,7 +2109,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
+ }
+ buffer_info->skb = NULL;
+ packet_length = RX_DESC_DATA0_FRAME_LENGTH_GET_
+- (descriptor->data0);
++ (le32_to_cpu(descriptor->data0));
+ skb_put(skb, packet_length - 4);
+ skb->protocol = eth_type_trans(skb,
+ rx->adapter->netdev);
+@@ -2147,8 +2147,8 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
+ descriptor = &rx->ring_cpu_ptr[extension_index];
+ buffer_info = &rx->buffer_info[extension_index];
+
+- ts_sec = descriptor->data1;
+- ts_nsec = (descriptor->data2 &
++ ts_sec = le32_to_cpu(descriptor->data1);
++ ts_nsec = (le32_to_cpu(descriptor->data2) &
+ RX_DESC_DATA2_TS_NS_MASK_);
+ lan743x_rx_reuse_ring_element(rx, extension_index);
+ real_last_index = extension_index;
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
+index 1fbcef3910989..a7b97287d84ba 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.h
++++ b/drivers/net/ethernet/microchip/lan743x_main.h
+@@ -655,7 +655,7 @@ struct lan743x_tx {
+
+ struct lan743x_tx_buffer_info *buffer_info;
+
+- u32 *head_cpu_ptr;
++ __le32 *head_cpu_ptr;
+ dma_addr_t head_dma_ptr;
+ int last_head;
+ int last_tail;
+@@ -685,7 +685,7 @@ struct lan743x_rx {
+
+ struct lan743x_rx_buffer_info *buffer_info;
+
+- u32 *head_cpu_ptr;
++ __le32 *head_cpu_ptr;
+ dma_addr_t head_dma_ptr;
+ u32 last_head;
+ u32 last_tail;
+@@ -769,10 +769,10 @@ struct lan743x_adapter {
+ #define TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_ (0x3FFF0000)
+
+ struct lan743x_tx_descriptor {
+- u32 data0;
+- u32 data1;
+- u32 data2;
+- u32 data3;
++ __le32 data0;
++ __le32 data1;
++ __le32 data2;
++ __le32 data3;
+ } __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING);
+
+ #define TX_BUFFER_INFO_FLAG_ACTIVE BIT(0)
+@@ -807,10 +807,10 @@ struct lan743x_tx_buffer_info {
+ #define RX_HEAD_PADDING NET_IP_ALIGN
+
+ struct lan743x_rx_descriptor {
+- u32 data0;
+- u32 data1;
+- u32 data2;
+- u32 data3;
++ __le32 data0;
++ __le32 data1;
++ __le32 data2;
++ __le32 data3;
+ } __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING);
+
+ #define RX_BUFFER_INFO_FLAG_ACTIVE BIT(0)
+--
+2.39.5
+
--- /dev/null
+From b1b16aed7ff882425763628009b982a18fb3ab35 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jul 2020 16:12:21 +0100
+Subject: lan743x: remove redundant initialization of variable
+ current_head_index
+
+From: Colin Ian King <colin.king@canonical.com>
+
+[ Upstream commit bb809a047eb5070e2fc76aa62d111fbbe656c532 ]
+
+The variable current_head_index is being initialized with a value that
+is never read and it is being updated later with a new value. Replace
+the initialization of -1 with the latter assignment.
+
+Addresses-Coverity: ("Unused value")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 2d52e2e38b85 ("net: lan743x: Fix memleak issue when GSO enabled")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/microchip/lan743x_main.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index 6458dbd6c631a..5d539e4e942cc 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -2004,14 +2004,13 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
+ {
+ struct skb_shared_hwtstamps *hwtstamps = NULL;
+ int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
++ int current_head_index = *rx->head_cpu_ptr;
+ struct lan743x_rx_buffer_info *buffer_info;
+ struct lan743x_rx_descriptor *descriptor;
+- int current_head_index = -1;
+ int extension_index = -1;
+ int first_index = -1;
+ int last_index = -1;
+
+- current_head_index = *rx->head_cpu_ptr;
+ if (current_head_index < 0 || current_head_index >= rx->ring_size)
+ goto done;
+
+--
+2.39.5
+
--- /dev/null
+From 77bbb5e1f1cfd9e648ef8d7989dae3502c689fd7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 16:50:47 +0100
+Subject: net: dlink: Correct endianness handling of led_mode
+
+From: Simon Horman <horms@kernel.org>
+
+[ Upstream commit e7e5ae71831c44d58627a991e603845a2fed2cab ]
+
+As it's name suggests, parse_eeprom() parses EEPROM data.
+
+This is done by reading data, 16 bits at a time as follows:
+
+ for (i = 0; i < 128; i++)
+ ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
+
+sromdata is at the same memory location as psrom.
+And the type of psrom is a pointer to struct t_SROM.
+
+As can be seen in the loop above, data is stored in sromdata, and thus psrom,
+as 16-bit little-endian values.
+
+However, the integer fields of t_SROM are host byte order integers.
+And in the case of led_mode this leads to a little endian value
+being incorrectly treated as host byte order.
+
+Looking at rio_set_led_mode, this does appear to be a bug as that code
+masks led_mode with 0x1, 0x2 and 0x8. Logic that would be effected by a
+reversed byte order.
+
+This problem would only manifest on big endian hosts.
+
+Found by inspection while investigating a sparse warning
+regarding the crc field of t_SROM.
+
+I believe that warning is a false positive. And although I plan
+to send a follow-up to use little-endian types for other the integer
+fields of PSROM_t I do not believe that will involve any bug fixes.
+
+Compile tested only.
+
+Fixes: c3f45d322cbd ("dl2k: Add support for IP1000A-based cards")
+Signed-off-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250425-dlink-led-mode-v1-1-6bae3c36e736@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/dlink/dl2k.c | 2 +-
+ drivers/net/ethernet/dlink/dl2k.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
+index 55e720d2ea0c7..eb23157641343 100644
+--- a/drivers/net/ethernet/dlink/dl2k.c
++++ b/drivers/net/ethernet/dlink/dl2k.c
+@@ -358,7 +358,7 @@ parse_eeprom (struct net_device *dev)
+ dev->dev_addr[i] = psrom->mac_addr[i];
+
+ if (np->chip_id == CHIP_IP1000A) {
+- np->led_mode = psrom->led_mode;
++ np->led_mode = le16_to_cpu(psrom->led_mode);
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
+index 195dc6cfd8955..0e33e2eaae960 100644
+--- a/drivers/net/ethernet/dlink/dl2k.h
++++ b/drivers/net/ethernet/dlink/dl2k.h
+@@ -335,7 +335,7 @@ typedef struct t_SROM {
+ u16 sub_system_id; /* 0x06 */
+ u16 pci_base_1; /* 0x08 (IP1000A only) */
+ u16 pci_base_2; /* 0x0a (IP1000A only) */
+- u16 led_mode; /* 0x0c (IP1000A only) */
++ __le16 led_mode; /* 0x0c (IP1000A only) */
+ u16 reserved1[9]; /* 0x0e-0x1f */
+ u8 mac_addr[6]; /* 0x20-0x25 */
+ u8 reserved2[10]; /* 0x26-0x2f */
+--
+2.39.5
+
--- /dev/null
+From 88882da86fc6357ee1a243a5ff320a9f88dd0cf2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 11:08:26 +0200
+Subject: net: fec: ERR007885 Workaround for conventional TX
+
+From: Mattias Barthel <mattias.barthel@atlascopco.com>
+
+[ Upstream commit a179aad12badc43201cbf45d1e8ed2c1383c76b9 ]
+
+Activate TX hang workaround also in
+fec_enet_txq_submit_skb() when TSO is not enabled.
+
+Errata: ERR007885
+
+Symptoms: NETDEV WATCHDOG: eth0 (fec): transmit queue 0 timed out
+
+commit 37d6017b84f7 ("net: fec: Workaround for imx6sx enet tx hang when enable three queues")
+There is a TDAR race condition for mutliQ when the software sets TDAR
+and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
+This will cause the udma_tx and udma_tx_arbiter state machines to hang.
+
+So, the Workaround is checking TDAR status four time, if TDAR cleared by
+ hardware and then write TDAR, otherwise don't set TDAR.
+
+Fixes: 53bb20d1faba ("net: fec: add variable reg_desc_active to speed things up")
+Signed-off-by: Mattias Barthel <mattias.barthel@atlascopco.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250429090826.3101258-1-mattiasbarthel@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 5660a83356eb0..fd7c504b44f2e 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -605,7 +605,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+ txq->bd.cur = bdp;
+
+ /* Trigger transmission start */
+- writel(0, txq->bd.reg_desc_active);
++ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
++ !readl(txq->bd.reg_desc_active) ||
++ !readl(txq->bd.reg_desc_active) ||
++ !readl(txq->bd.reg_desc_active) ||
++ !readl(txq->bd.reg_desc_active))
++ writel(0, txq->bd.reg_desc_active);
+
+ return 0;
+ }
+--
+2.39.5
+
--- /dev/null
+From 86f6b60162a3cab61147bd6ffa9687c2394c8553 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 10:55:27 +0530
+Subject: net: lan743x: Fix memleak issue when GSO enabled
+
+From: Thangaraj Samynathan <thangaraj.s@microchip.com>
+
+[ Upstream commit 2d52e2e38b85c8b7bc00dca55c2499f46f8c8198 ]
+
+Always map the `skb` to the LS descriptor. Previously skb was
+mapped to EXT descriptor when the number of fragments is zero with
+GSO enabled. Mapping the skb to EXT descriptor prevents it from
+being freed, leading to a memory leak
+
+Fixes: 23f0703c125b ("lan743x: Add main source files for new lan743x driver")
+Signed-off-by: Thangaraj Samynathan <thangaraj.s@microchip.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20250429052527.10031-1-thangaraj.s@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/microchip/lan743x_main.c | 8 ++++++--
+ drivers/net/ethernet/microchip/lan743x_main.h | 1 +
+ 2 files changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index ebcbb719e9002..a69a34d93ad62 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -1409,6 +1409,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
+ if (nr_frags <= 0) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++ tx->frame_last = tx->frame_first;
+ }
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+@@ -1478,6 +1479,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
+ tx->frame_first = 0;
+ tx->frame_data0 = 0;
+ tx->frame_tail = 0;
++ tx->frame_last = 0;
+ return -ENOMEM;
+ }
+
+@@ -1518,16 +1520,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
+ TX_DESC_DATA0_DTYPE_DATA_) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++ tx->frame_last = tx->frame_tail;
+ }
+
+- tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+- buffer_info = &tx->buffer_info[tx->frame_tail];
++ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
++ buffer_info = &tx->buffer_info[tx->frame_last];
+ buffer_info->skb = skb;
+ if (time_stamp)
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
+ if (ignore_sync)
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
+
++ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+ tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
+ tx->last_tail = tx->frame_tail;
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
+index a7b97287d84ba..44b107caba84f 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.h
++++ b/drivers/net/ethernet/microchip/lan743x_main.h
+@@ -652,6 +652,7 @@ struct lan743x_tx {
+ u32 frame_first;
+ u32 frame_data0;
+ u32 frame_tail;
++ u32 frame_last;
+
+ struct lan743x_tx_buffer_info *buffer_info;
+
+--
+2.39.5
+
--- /dev/null
+From 56f1d215ecefb5374a05b9981ddda2679eadce69 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Apr 2025 11:36:08 +0300
+Subject: net/mlx5: E-Switch, Initialize MAC Address for Default GID
+
+From: Maor Gottlieb <maorg@nvidia.com>
+
+[ Upstream commit 5d1a04f347e6cbf5ffe74da409a5d71fbe8c5f19 ]
+
+Initialize the source MAC address when creating the default GID entry.
+Since this entry is used only for loopback traffic, it only needs to
+be a unicast address. A zeroed-out MAC address is sufficient for this
+purpose.
+Without this fix, random bits would be assigned as the source address.
+If these bits formed a multicast address, the firmware would return an
+error, preventing the user from switching to switchdev mode:
+
+Error: mlx5_core: Failed setting eswitch to offloads.
+kernel answers: Invalid argument
+
+Fixes: 80f09dfc237f ("net/mlx5: Eswitch, enable RoCE loopback traffic")
+Signed-off-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Link: https://patch.msgid.link/20250423083611.324567-3-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/rdma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+index 2389239acadc9..945d90844f0cb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+@@ -130,8 +130,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
+
+ static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
+ {
++ u8 mac[ETH_ALEN] = {};
+ union ib_gid gid;
+- u8 mac[ETH_ALEN];
+
+ mlx5_rdma_make_default_gid(dev, &gid);
+ return mlx5_core_roce_gid_set(dev, 0,
+--
+2.39.5
+
--- /dev/null
+From 883cbea784a99458185ed728d1af319aea69fe19 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 19:07:05 -0300
+Subject: net_sched: drr: Fix double list add in class with netem as child
+ qdisc
+
+From: Victor Nogueira <victor@mojatatu.com>
+
+[ Upstream commit f99a3fbf023e20b626be4b0f042463d598050c9a ]
+
+As described in Gerrard's report [1], there are use cases where a netem
+child qdisc will make the parent qdisc's enqueue callback reentrant.
+In the case of drr, there won't be a UAF, but the code will add the same
+classifier to the list twice, which will cause memory corruption.
+
+In addition to checking for qlen being zero, this patch checks whether the
+class was already added to the active_list (cl_is_active) before adding
+to the list to cover for the reentrant case.
+
+[1] https://lore.kernel.org/netdev/CAHcdcOm+03OD2j6R0=YHKqmy=VgJ8xEOKuP6c7mSgnp-TEJJbw@mail.gmail.com/
+
+Fixes: 37d9cf1a3ce3 ("sched: Fix detection of empty queues in child qdiscs")
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Link: https://patch.msgid.link/20250425220710.3964791-2-victor@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_drr.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
+index 07a2b0b354954..1a05718063426 100644
+--- a/net/sched/sch_drr.c
++++ b/net/sched/sch_drr.c
+@@ -36,6 +36,11 @@ struct drr_sched {
+ struct Qdisc_class_hash clhash;
+ };
+
++static bool cl_is_active(struct drr_class *cl)
++{
++ return !list_empty(&cl->alist);
++}
++
+ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
+ {
+ struct drr_sched *q = qdisc_priv(sch);
+@@ -344,7 +349,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct drr_sched *q = qdisc_priv(sch);
+ struct drr_class *cl;
+ int err = 0;
+- bool first;
+
+ cl = drr_classify(skb, sch, &err);
+ if (cl == NULL) {
+@@ -354,7 +358,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ return err;
+ }
+
+- first = !cl->qdisc->q.qlen;
+ err = qdisc_enqueue(skb, cl->qdisc, to_free);
+ if (unlikely(err != NET_XMIT_SUCCESS)) {
+ if (net_xmit_drop_count(err)) {
+@@ -364,7 +367,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ return err;
+ }
+
+- if (first) {
++ if (!cl_is_active(cl)) {
+ list_add_tail(&cl->alist, &q->active);
+ cl->deficit = cl->quantum;
+ }
+--
+2.39.5
+
--- /dev/null
+From 8a58bba39ebbfbce1de4cb0d92b107822b4e46e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 19:07:06 -0300
+Subject: net_sched: hfsc: Fix a UAF vulnerability in class with netem as child
+ qdisc
+
+From: Victor Nogueira <victor@mojatatu.com>
+
+[ Upstream commit 141d34391abbb315d68556b7c67ad97885407547 ]
+
+As described in Gerrard's report [1], we have a UAF case when an hfsc class
+has a netem child qdisc. The crux of the issue is that hfsc is assuming
+that checking for cl->qdisc->q.qlen == 0 guarantees that it hasn't inserted
+the class in the vttree or eltree (which is not true for the netem
+duplicate case).
+
+This patch checks the n_active class variable to make sure that the code
+won't insert the class in the vttree or eltree twice, catering for the
+reentrant case.
+
+[1] https://lore.kernel.org/netdev/CAHcdcOm+03OD2j6R0=YHKqmy=VgJ8xEOKuP6c7mSgnp-TEJJbw@mail.gmail.com/
+
+Fixes: 37d9cf1a3ce3 ("sched: Fix detection of empty queues in child qdiscs")
+Reported-by: Gerrard Tai <gerrard.tai@starlabs.sg>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Link: https://patch.msgid.link/20250425220710.3964791-3-victor@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_hfsc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index 79c63c4610d3a..5d73d02b8dce7 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -1573,7 +1573,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
+ return err;
+ }
+
+- if (first) {
++ if (first && !cl->cl_nactive) {
+ if (cl->cl_flags & HFSC_RSC)
+ init_ed(cl, len);
+ if (cl->cl_flags & HFSC_FSC)
+--
+2.39.5
+
--- /dev/null
+From 58288e01b276ebc1327a7aa29f7b0eb94313dd13 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 19:07:08 -0300
+Subject: net_sched: qfq: Fix double list add in class with netem as child
+ qdisc
+
+From: Victor Nogueira <victor@mojatatu.com>
+
+[ Upstream commit f139f37dcdf34b67f5bf92bc8e0f7f6b3ac63aa4 ]
+
+As described in Gerrard's report [1], there are use cases where a netem
+child qdisc will make the parent qdisc's enqueue callback reentrant.
+In the case of qfq, there won't be a UAF, but the code will add the same
+classifier to the list twice, which will cause memory corruption.
+
+This patch checks whether the class was already added to the agg->active
+list (cl_is_active) before doing the addition to cater for the reentrant
+case.
+
+[1] https://lore.kernel.org/netdev/CAHcdcOm+03OD2j6R0=YHKqmy=VgJ8xEOKuP6c7mSgnp-TEJJbw@mail.gmail.com/
+
+Fixes: 37d9cf1a3ce3 ("sched: Fix detection of empty queues in child qdiscs")
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Link: https://patch.msgid.link/20250425220710.3964791-5-victor@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_qfq.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index 6e9e3405f26b9..c466d255f7865 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -203,6 +203,11 @@ struct qfq_sched {
+ */
+ enum update_reason {enqueue, requeue};
+
++static bool cl_is_active(struct qfq_class *cl)
++{
++ return !list_empty(&cl->alist);
++}
++
+ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
+ {
+ struct qfq_sched *q = qdisc_priv(sch);
+@@ -1218,7 +1223,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct qfq_class *cl;
+ struct qfq_aggregate *agg;
+ int err = 0;
+- bool first;
+
+ cl = qfq_classify(skb, sch, &err);
+ if (cl == NULL) {
+@@ -1240,7 +1244,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ }
+
+ gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+- first = !cl->qdisc->q.qlen;
+ err = qdisc_enqueue(skb, cl->qdisc, to_free);
+ if (unlikely(err != NET_XMIT_SUCCESS)) {
+ pr_debug("qfq_enqueue: enqueue failed %d\n", err);
+@@ -1257,8 +1260,8 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ ++sch->q.qlen;
+
+ agg = cl->agg;
+- /* if the queue was not empty, then done here */
+- if (!first) {
++ /* if the class is active, then done here */
++ if (cl_is_active(cl)) {
+ if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
+ list_first_entry(&agg->active, struct qfq_class, alist)
+ == cl && cl->deficit < len)
+--
+2.39.5
+
--- /dev/null
+From 5161126f51101a0d41443650b12d4eb1579ebfde Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 10:42:01 -0600
+Subject: nvme-tcp: fix premature queue removal and I/O failover
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michael Liang <mliang@purestorage.com>
+
+[ Upstream commit 77e40bbce93059658aee02786a32c5c98a240a8a ]
+
+This patch addresses a data corruption issue observed in nvme-tcp during
+testing.
+
+In an NVMe native multipath setup, when an I/O timeout occurs, all
+inflight I/Os are canceled almost immediately after the kernel socket is
+shut down. These canceled I/Os are reported as host path errors,
+triggering a failover that succeeds on a different path.
+
+However, at this point, the original I/O may still be outstanding in the
+host's network transmission path (e.g., the NIC’s TX queue). From the
+user-space app's perspective, the buffer associated with the I/O is
+considered completed since they're acked on the different path and may
+be reused for new I/O requests.
+
+Because nvme-tcp enables zero-copy by default in the transmission path,
+this can lead to corrupted data being sent to the original target,
+ultimately causing data corruption.
+
+We can reproduce this data corruption by injecting delay on one path and
+triggering i/o timeout.
+
+To prevent this issue, this change ensures that all inflight
+transmissions are fully completed from host's perspective before
+returning from queue stop. To handle concurrent I/O timeout from multiple
+namespaces under the same controller, always wait in queue stop
+regardless of queue's state.
+
+This aligns with the behavior of queue stopping in other NVMe fabric
+transports.
+
+Fixes: 3f2304f8c6d6 ("nvme-tcp: add NVMe over TCP host driver")
+Signed-off-by: Michael Liang <mliang@purestorage.com>
+Reviewed-by: Mohamed Khalfella <mkhalfella@purestorage.com>
+Reviewed-by: Randy Jennings <randyj@purestorage.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/tcp.c | 31 +++++++++++++++++++++++++++++--
+ 1 file changed, 29 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 61296032ce6de..364f83c92b182 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1423,7 +1423,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
+ cancel_work_sync(&queue->io_work);
+ }
+
+-static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
++static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid)
+ {
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+@@ -1433,6 +1433,31 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+ __nvme_tcp_stop_queue(queue);
+ }
+
++static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid)
++{
++ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
++ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
++ int timeout = 100;
++
++ while (timeout > 0) {
++ if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) ||
++ !sk_wmem_alloc_get(queue->sock->sk))
++ return;
++ msleep(2);
++ timeout -= 2;
++ }
++ dev_warn(nctrl->device,
++ "qid %d: timeout draining sock wmem allocation expired\n",
++ qid);
++}
++
++static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
++{
++ nvme_tcp_stop_queue_nowait(nctrl, qid);
++ nvme_tcp_wait_queue(nctrl, qid);
++}
++
++
+ static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
+ {
+ write_lock_bh(&queue->sock->sk->sk_callback_lock);
+@@ -1539,7 +1564,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+- nvme_tcp_stop_queue(ctrl, i);
++ nvme_tcp_stop_queue_nowait(ctrl, i);
++ for (i = 1; i < ctrl->queue_count; i++)
++ nvme_tcp_wait_queue(ctrl, i);
+ }
+
+ static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
+--
+2.39.5
+
dm-integrity-fix-a-warning-on-invalid-table-line.patch
dm-always-update-the-array-size-in-realloc_argv-on-success.patch
tracing-fix-oob-write-in-trace_seq_to_buffer.patch
+net-mlx5-e-switch-initialize-mac-address-for-default.patch
+net_sched-drr-fix-double-list-add-in-class-with-nete.patch
+net_sched-hfsc-fix-a-uaf-vulnerability-in-class-with.patch
+net_sched-qfq-fix-double-list-add-in-class-with-nete.patch
+net-dlink-correct-endianness-handling-of-led_mode.patch
+nvme-tcp-fix-premature-queue-removal-and-i-o-failove.patch
+lan743x-remove-redundant-initialization-of-variable-.patch
+lan743x-fix-endianness-when-accessing-descriptors.patch
+net-lan743x-fix-memleak-issue-when-gso-enabled.patch
+net-fec-err007885-workaround-for-conventional-tx.patch