--- /dev/null
+From 144eb94653602861047d8367de3b2abe7b5ee349 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Apr 2025 10:59:53 +0000
+Subject: ASoC: soc-pcm: Fix hw_params() and DAPM widget sequence
+
+From: Sheetal <sheetal@nvidia.com>
+
+[ Upstream commit 9aff2e8df240e84a36f2607f98a0a9924a24e65d ]
+
+Issue:
+ When multiple audio streams share a common BE DAI, the BE DAI
+ widget can be powered up before its hardware parameters are configured.
+ This incorrect sequence leads to intermittent pcm_write errors.
+
+ For example, the below Tegra use-case throws an error:
+ aplay(2 streams) -> AMX(mux) -> ADX(demux) -> arecord(2 streams),
+ here, 'AMX TX' and 'ADX RX' are common BE DAIs.
+
+For above usecase when failure happens below sequence is observed:
+ aplay(1) FE open()
+ - BE DAI callbacks added to the list
+ - BE DAI state = SND_SOC_DPCM_STATE_OPEN
+ aplay(2) FE open()
+ - BE DAI callbacks are not added to the list as the state is
+ already SND_SOC_DPCM_STATE_OPEN during aplay(1) FE open().
+ aplay(2) FE hw_params()
+ - BE DAI hw_params() callback ignored
+ aplay(2) FE prepare()
+ - Widget is powered ON without BE DAI hw_params() call
+ aplay(1) FE hw_params()
+ - BE DAI hw_params() is now called
+
+Fix:
+ Add BE DAIs in the list if its state is either SND_SOC_DPCM_STATE_OPEN
+ or SND_SOC_DPCM_STATE_HW_PARAMS as well.
+
+It ensures the widget is powered ON after BE DAI hw_params() callback.
+
+Fixes: 0c25db3f7621 ("ASoC: soc-pcm: Don't reconnect an already active BE")
+Signed-off-by: Sheetal <sheetal@nvidia.com>
+Link: https://patch.msgid.link/20250404105953.2784819-1-sheetal@nvidia.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/soc-pcm.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 3f998a09fc42e..5a0fec90ae259 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1499,10 +1499,13 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
+ /*
+ * Filter for systems with 'component_chaining' enabled.
+ * This helps to avoid unnecessary re-configuration of an
+- * already active BE on such systems.
++ * already active BE on such systems and ensures the BE DAI
++ * widget is powered ON after hw_params() BE DAI callback.
+ */
+ if (fe->card->component_chaining &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_NEW) &&
++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) &&
++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE))
+ continue;
+
+--
+2.39.5
+
--- /dev/null
+From 934fc7f827dd06ba4587e59b7e86a40fd9d33afc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Apr 2025 22:51:03 +0300
+Subject: Bluetooth: L2CAP: copy RX timestamp to new fragments
+
+From: Pauli Virtanen <pav@iki.fi>
+
+[ Upstream commit 3908feb1bd7f319a10e18d84369a48163264cc7d ]
+
+Copy timestamp too when allocating new skb for received fragment.
+Fixes missing RX timestamps with fragmentation.
+
+Fixes: 4d7ea8ee90e4 ("Bluetooth: L2CAP: Fix handling fragmented length")
+Signed-off-by: Pauli Virtanen <pav@iki.fi>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/l2cap_core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 222105e24d2d8..ee67c9f658ab8 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -8481,6 +8481,9 @@ static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
+ return -ENOMEM;
+ /* Init rx_len */
+ conn->rx_len = len;
++
++ skb_set_delivery_time(conn->rx_skb, skb->tstamp,
++ skb->tstamp_type);
+ }
+
+ /* Copy as much as the rx_skb can hold */
+--
+2.39.5
+
--- /dev/null
+From cb177326a5401a2112e83fc20d5220c98daa8511 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Apr 2025 15:59:01 -0700
+Subject: bnxt_en: Fix coredump logic to free allocated buffer
+
+From: Shruti Parab <shruti.parab@broadcom.com>
+
+[ Upstream commit ea9376cf68230e05492f22ca45d329f16e262c7b ]
+
+When handling HWRM_DBG_COREDUMP_LIST FW command in
+bnxt_hwrm_dbg_dma_data(), the allocated buffer info->dest_buf is
+not freed in the error path. In the normal path, info->dest_buf
+is assigned to coredump->data and it will eventually be freed after
+the coredump is collected.
+
+Free info->dest_buf immediately inside bnxt_hwrm_dbg_dma_data() in
+the error path.
+
+Fixes: c74751f4c392 ("bnxt_en: Return error if FW returns more data than dump length")
+Reported-by: Michael Chan <michael.chan@broadcom.com>
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Shruti Parab <shruti.parab@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+index c067898820360..b57d2a25ae276 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+@@ -72,6 +72,11 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
+ memcpy(info->dest_buf + off, dma_buf, len);
+ } else {
+ rc = -ENOBUFS;
++ if (cmn_req->req_type ==
++ cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
++ kfree(info->dest_buf);
++ info->dest_buf = NULL;
++ }
+ break;
+ }
+ }
+--
+2.39.5
+
--- /dev/null
+From ef310795879260f9f18dd2f177ddc09c16017353 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Apr 2025 15:59:03 -0700
+Subject: bnxt_en: Fix ethtool -d byte order for 32-bit values
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+[ Upstream commit 02e8be5a032cae0f4ca33c6053c44d83cf4acc93 ]
+
+For version 1 register dump that includes the PCIe stats, the existing
+code incorrectly assumes that all PCIe stats are 64-bit values. Fix it
+by using an array containing the starting and ending index of the 32-bit
+values. The loop in bnxt_get_regs() will use the array to do proper
+endian swap for the 32-bit values.
+
+Fixes: b5d600b027eb ("bnxt_en: Add support for 'ethtool -d'")
+Reviewed-by: Shruti Parab <shruti.parab@broadcom.com>
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Reviewed-by: Andy Gospodarek <andrew.gospodarek@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 38 ++++++++++++++++---
+ 1 file changed, 32 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 799adba0034a4..7daaed4520ace 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -1392,6 +1392,17 @@ static int bnxt_get_regs_len(struct net_device *dev)
+ return reg_len;
+ }
+
++#define BNXT_PCIE_32B_ENTRY(start, end) \
++ { offsetof(struct pcie_ctx_hw_stats, start), \
++ offsetof(struct pcie_ctx_hw_stats, end) }
++
++static const struct {
++ u16 start;
++ u16 end;
++} bnxt_pcie_32b_entries[] = {
++ BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
++};
++
+ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *_p)
+ {
+@@ -1423,12 +1434,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+- __le64 *src = (__le64 *)hw_pcie_stats;
+- u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
+- int i;
+-
+- for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
+- dst[i] = le64_to_cpu(src[i]);
++ u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
++ u8 *src = (u8 *)hw_pcie_stats;
++ int i, j;
++
++ for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) {
++ if (i >= bnxt_pcie_32b_entries[j].start &&
++ i <= bnxt_pcie_32b_entries[j].end) {
++ u32 *dst32 = (u32 *)(dst + i);
++
++ *dst32 = le32_to_cpu(*(__le32 *)(src + i));
++ i += 4;
++ if (i > bnxt_pcie_32b_entries[j].end &&
++ j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
++ j++;
++ } else {
++ u64 *dst64 = (u64 *)(dst + i);
++
++ *dst64 = le64_to_cpu(*(__le64 *)(src + i));
++ i += 8;
++ }
++ }
+ }
+ hwrm_req_drop(bp, req);
+ }
+--
+2.39.5
+
--- /dev/null
+From 6a94c29bbbb5a1f243e3abe5499eaf693400ec56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Apr 2025 15:59:02 -0700
+Subject: bnxt_en: Fix out-of-bound memcpy() during ethtool -w
+
+From: Shruti Parab <shruti.parab@broadcom.com>
+
+[ Upstream commit 6b87bd94f34370bbf1dfa59352bed8efab5bf419 ]
+
+When retrieving the FW coredump using ethtool, it can sometimes cause
+memory corruption:
+
+BUG: KFENCE: memory corruption in __bnxt_get_coredump+0x3ef/0x670 [bnxt_en]
+Corrupted memory at 0x000000008f0f30e8 [ ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ] (in kfence-#45):
+__bnxt_get_coredump+0x3ef/0x670 [bnxt_en]
+ethtool_get_dump_data+0xdc/0x1a0
+__dev_ethtool+0xa1e/0x1af0
+dev_ethtool+0xa8/0x170
+dev_ioctl+0x1b5/0x580
+sock_do_ioctl+0xab/0xf0
+sock_ioctl+0x1ce/0x2e0
+__x64_sys_ioctl+0x87/0xc0
+do_syscall_64+0x5c/0xf0
+entry_SYSCALL_64_after_hwframe+0x78/0x80
+
+...
+
+This happens when copying the coredump segment list in
+bnxt_hwrm_dbg_dma_data() with the HWRM_DBG_COREDUMP_LIST FW command.
+The info->dest_buf buffer is allocated based on the number of coredump
+segments returned by the FW. The segment list is then DMA'ed by
+the FW and the length of the DMA is returned by FW. The driver then
+copies this DMA'ed segment list to info->dest_buf.
+
+In some cases, this DMA length may exceed the info->dest_buf length
+and cause the above BUG condition. Fix it by capping the copy
+length to not exceed the length of info->dest_buf. The extra
+DMA data contains no useful information.
+
+This code path is shared for the HWRM_DBG_COREDUMP_LIST and the
+HWRM_DBG_COREDUMP_RETRIEVE FW commands. The buffering is different
+for these 2 FW commands. To simplify the logic, we need to move
+the line to adjust the buffer length for HWRM_DBG_COREDUMP_RETRIEVE
+up, so that the new check to cap the copy length will work for both
+commands.
+
+Fixes: c74751f4c392 ("bnxt_en: Return error if FW returns more data than dump length")
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Shruti Parab <shruti.parab@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/broadcom/bnxt/bnxt_coredump.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+index b57d2a25ae276..32813cdd5aa5c 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+@@ -66,10 +66,19 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
+ }
+ }
+
++ if (cmn_req->req_type ==
++ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
++ info->dest_buf_size += len;
++
+ if (info->dest_buf) {
+ if ((info->seg_start + off + len) <=
+ BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
+- memcpy(info->dest_buf + off, dma_buf, len);
++ u16 copylen = min_t(u16, len,
++ info->dest_buf_size - off);
++
++ memcpy(info->dest_buf + off, dma_buf, copylen);
++ if (copylen < len)
++ break;
+ } else {
+ rc = -ENOBUFS;
+ if (cmn_req->req_type ==
+@@ -81,10 +90,6 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
+ }
+ }
+
+- if (cmn_req->req_type ==
+- cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+- info->dest_buf_size += len;
+-
+ if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
+ break;
+
+--
+2.39.5
+
--- /dev/null
+From ea53b16e0c8e18c6f30940d825b1cb5d35c6f09c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 15:26:32 -0700
+Subject: ice: Check VF VSI Pointer Value in ice_vc_add_fdir_fltr()
+
+From: Xuanqiang Luo <luoxuanqiang@kylinos.cn>
+
+[ Upstream commit 425c5f266b2edeee0ce16fedd8466410cdcfcfe3 ]
+
+As mentioned in the commit baeb705fd6a7 ("ice: always check VF VSI
+pointer values"), we need to perform a null pointer check on the return
+value of ice_get_vf_vsi() before using it.
+
+Fixes: 6ebbe97a4881 ("ice: Add a per-VF limit on number of FDIR filters")
+Signed-off-by: Xuanqiang Luo <luoxuanqiang@kylinos.cn>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250425222636.3188441-3-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+index bff3e9662a8fd..a9df95088df35 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+@@ -1811,6 +1811,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ vf_vsi = ice_get_vf_vsi(vf);
++ if (!vf_vsi) {
++ dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
++ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
++ goto err_exit;
++ }
+
+ #define ICE_VF_MAX_FDIR_FILTERS 128
+ if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
+--
+2.39.5
+
--- /dev/null
+From 5f42e31935a5b5c8c6f8aa5eed12f8ee8e799a57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 16:50:47 +0100
+Subject: net: dlink: Correct endianness handling of led_mode
+
+From: Simon Horman <horms@kernel.org>
+
+[ Upstream commit e7e5ae71831c44d58627a991e603845a2fed2cab ]
+
+As it's name suggests, parse_eeprom() parses EEPROM data.
+
+This is done by reading data, 16 bits at a time as follows:
+
+ for (i = 0; i < 128; i++)
+ ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
+
+sromdata is at the same memory location as psrom.
+And the type of psrom is a pointer to struct t_SROM.
+
+As can be seen in the loop above, data is stored in sromdata, and thus psrom,
+as 16-bit little-endian values.
+
+However, the integer fields of t_SROM are host byte order integers.
+And in the case of led_mode this leads to a little endian value
+being incorrectly treated as host byte order.
+
+Looking at rio_set_led_mode, this does appear to be a bug as that code
+masks led_mode with 0x1, 0x2 and 0x8. Logic that would be effected by a
+reversed byte order.
+
+This problem would only manifest on big endian hosts.
+
+Found by inspection while investigating a sparse warning
+regarding the crc field of t_SROM.
+
+I believe that warning is a false positive. And although I plan
+to send a follow-up to use little-endian types for other the integer
+fields of PSROM_t I do not believe that will involve any bug fixes.
+
+Compile tested only.
+
+Fixes: c3f45d322cbd ("dl2k: Add support for IP1000A-based cards")
+Signed-off-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250425-dlink-led-mode-v1-1-6bae3c36e736@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/dlink/dl2k.c | 2 +-
+ drivers/net/ethernet/dlink/dl2k.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
+index 2c67a857a42ff..71cb7fe63de3c 100644
+--- a/drivers/net/ethernet/dlink/dl2k.c
++++ b/drivers/net/ethernet/dlink/dl2k.c
+@@ -352,7 +352,7 @@ parse_eeprom (struct net_device *dev)
+ eth_hw_addr_set(dev, psrom->mac_addr);
+
+ if (np->chip_id == CHIP_IP1000A) {
+- np->led_mode = psrom->led_mode;
++ np->led_mode = le16_to_cpu(psrom->led_mode);
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
+index 195dc6cfd8955..0e33e2eaae960 100644
+--- a/drivers/net/ethernet/dlink/dl2k.h
++++ b/drivers/net/ethernet/dlink/dl2k.h
+@@ -335,7 +335,7 @@ typedef struct t_SROM {
+ u16 sub_system_id; /* 0x06 */
+ u16 pci_base_1; /* 0x08 (IP1000A only) */
+ u16 pci_base_2; /* 0x0a (IP1000A only) */
+- u16 led_mode; /* 0x0c (IP1000A only) */
++ __le16 led_mode; /* 0x0c (IP1000A only) */
+ u16 reserved1[9]; /* 0x0e-0x1f */
+ u8 mac_addr[6]; /* 0x20-0x25 */
+ u8 reserved2[10]; /* 0x26-0x2f */
+--
+2.39.5
+
--- /dev/null
+From d2aa1c14e4f9dd5f6b8071f756778533f208d26e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 26 Apr 2025 17:48:55 +0300
+Subject: net: dsa: felix: fix broken taprio gate states after clock jump
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 426d487bca38b34f39c483edfc6313a036446b33 ]
+
+Simplest setup to reproduce the issue: connect 2 ports of the
+LS1028A-RDB together (eno0 with swp0) and run:
+
+$ ip link set eno0 up && ip link set swp0 up
+$ tc qdisc replace dev swp0 parent root handle 100 taprio num_tc 8 \
+ queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 map 0 1 2 3 4 5 6 7 \
+ base-time 0 sched-entry S 20 300000 sched-entry S 10 200000 \
+ sched-entry S 20 300000 sched-entry S 48 200000 \
+ sched-entry S 20 300000 sched-entry S 83 200000 \
+ sched-entry S 40 300000 sched-entry S 00 200000 flags 2
+$ ptp4l -i eno0 -f /etc/linuxptp/configs/gPTP.cfg -m &
+$ ptp4l -i swp0 -f /etc/linuxptp/configs/gPTP.cfg -m
+
+One will observe that the PTP state machine on swp0 starts
+synchronizing, then it attempts to do a clock step, and after that, it
+never fails to recover from the condition below.
+
+ptp4l[82.427]: selected best master clock 00049f.fffe.05f627
+ptp4l[82.428]: port 1 (swp0): MASTER to UNCALIBRATED on RS_SLAVE
+ptp4l[83.252]: port 1 (swp0): UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED
+ptp4l[83.886]: rms 4537731277 max 9075462553 freq -18518 +/- 11467 delay 818 +/- 0
+ptp4l[84.170]: timed out while polling for tx timestamp
+ptp4l[84.171]: increasing tx_timestamp_timeout or increasing kworker priority may correct this issue, but a driver bug likely causes it
+ptp4l[84.172]: port 1 (swp0): send peer delay request failed
+ptp4l[84.173]: port 1 (swp0): clearing fault immediately
+ptp4l[84.269]: port 1 (swp0): SLAVE to LISTENING on INIT_COMPLETE
+ptp4l[85.303]: timed out while polling for tx timestamp
+ptp4l[84.171]: increasing tx_timestamp_timeout or increasing kworker priority may correct this issue, but a driver bug likely causes it
+ptp4l[84.172]: port 1 (swp0): send peer delay request failed
+ptp4l[84.173]: port 1 (swp0): clearing fault immediately
+ptp4l[84.269]: port 1 (swp0): SLAVE to LISTENING on INIT_COMPLETE
+ptp4l[85.303]: timed out while polling for tx timestamp
+ptp4l[85.304]: increasing tx_timestamp_timeout or increasing kworker priority may correct this issue, but a driver bug likely causes it
+ptp4l[85.305]: port 1 (swp0): send peer delay response failed
+ptp4l[85.306]: port 1 (swp0): clearing fault immediately
+ptp4l[86.304]: timed out while polling for tx timestamp
+
+A hint is given by the non-zero statistics for dropped packets which
+were expecting hardware TX timestamps:
+
+$ ethtool --include-statistics -T swp0
+(...)
+Statistics:
+ tx_pkts: 30
+ tx_lost: 11
+ tx_err: 0
+
+We know that when PTP clock stepping takes place (from ocelot_ptp_settime64()
+or from ocelot_ptp_adjtime()), vsc9959_tas_clock_adjust() is called.
+
+Another interesting hint is that placing an early return in
+vsc9959_tas_clock_adjust(), so as to neutralize this function, fixes the
+issue and TX timestamps are no longer dropped.
+
+The debugging function written by me and included below is intended to
+read the GCL RAM, after the admin schedule became operational, through
+the two status registers available for this purpose:
+QSYS_GCL_STATUS_REG_1 and QSYS_GCL_STATUS_REG_2.
+
+static void vsc9959_print_tas_gcl(struct ocelot *ocelot)
+{
+ u32 val, list_length, interval, gate_state;
+ int i, err;
+
+ err = read_poll_timeout(ocelot_read, val,
+ !(val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING),
+ 10, 100000, false, ocelot, QSYS_PARAM_STATUS_REG_8);
+ if (err) {
+ dev_err(ocelot->dev,
+ "Failed to wait for TAS config pending bit to clear: %pe\n",
+ ERR_PTR(err));
+ return;
+ }
+
+ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_3);
+ list_length = QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_X(val);
+
+ dev_info(ocelot->dev, "GCL length: %u\n", list_length);
+
+ for (i = 0; i < list_length; i++) {
+ ocelot_rmw(ocelot,
+ QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM(i),
+ QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM_M,
+ QSYS_GCL_STATUS_REG_1);
+ interval = ocelot_read(ocelot, QSYS_GCL_STATUS_REG_2);
+ val = ocelot_read(ocelot, QSYS_GCL_STATUS_REG_1);
+ gate_state = QSYS_GCL_STATUS_REG_1_GATE_STATE_X(val);
+
+ dev_info(ocelot->dev, "GCL entry %d: states 0x%x interval %u\n",
+ i, gate_state, interval);
+ }
+}
+
+Calling it from two places: after the initial QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE
+performed by vsc9959_qos_port_tas_set(), and after the one done by
+vsc9959_tas_clock_adjust(), I notice the following difference.
+
+From the tc-taprio process context, where the schedule was initially
+configured, the GCL looks like this:
+
+mscc_felix 0000:00:00.5: GCL length: 8
+mscc_felix 0000:00:00.5: GCL entry 0: states 0x20 interval 300000
+mscc_felix 0000:00:00.5: GCL entry 1: states 0x10 interval 200000
+mscc_felix 0000:00:00.5: GCL entry 2: states 0x20 interval 300000
+mscc_felix 0000:00:00.5: GCL entry 3: states 0x48 interval 200000
+mscc_felix 0000:00:00.5: GCL entry 4: states 0x20 interval 300000
+mscc_felix 0000:00:00.5: GCL entry 5: states 0x83 interval 200000
+mscc_felix 0000:00:00.5: GCL entry 6: states 0x40 interval 300000
+mscc_felix 0000:00:00.5: GCL entry 7: states 0x0 interval 200000
+
+But from the ptp4l clock stepping process context, when the
+vsc9959_tas_clock_adjust() hook is called, the GCL RAM of the
+operational schedule now looks like this:
+
+mscc_felix 0000:00:00.5: GCL length: 8
+mscc_felix 0000:00:00.5: GCL entry 0: states 0x0 interval 0
+mscc_felix 0000:00:00.5: GCL entry 1: states 0x0 interval 0
+mscc_felix 0000:00:00.5: GCL entry 2: states 0x0 interval 0
+mscc_felix 0000:00:00.5: GCL entry 3: states 0x0 interval 0
+mscc_felix 0000:00:00.5: GCL entry 4: states 0x0 interval 0
+mscc_felix 0000:00:00.5: GCL entry 5: states 0x0 interval 0
+mscc_felix 0000:00:00.5: GCL entry 6: states 0x0 interval 0
+mscc_felix 0000:00:00.5: GCL entry 7: states 0x0 interval 0
+
+I do not have a formal explanation, just experimental conclusions.
+It appears that after triggering QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE
+for a port's TAS, the GCL entry RAM is updated anyway, despite what the
+documentation claims: "Specify the time interval in
+QSYS::GCL_CFG_REG_2.TIME_INTERVAL. This triggers the actual RAM
+write with the gate state and the time interval for the entry number
+specified". We don't touch that register (through vsc9959_tas_gcl_set())
+from vsc9959_tas_clock_adjust(), yet the GCL RAM is updated anyway.
+
+It seems to be updated with effectively stale memory, which in my
+testing can hold a variety of things, including even pieces of the
+previously applied schedule, for particular schedule lengths.
+
+As such, in most circumstances it is very difficult to pinpoint this
+issue, because the newly updated schedule would "behave strangely",
+but ultimately might still pass traffic to some extent, due to some
+gate entries still being present in the stale GCL entry RAM. It is easy
+to miss.
+
+With the particular schedule given at the beginning, the GCL RAM
+"happens" to be reproducibly rewritten with all zeroes, and this is
+consistent with what we see: when the time-aware shaper has gate entries
+with all gates closed, traffic is dropped on TX, no wonder we can't
+retrieve TX timestamps.
+
+Rewriting the GCL entry RAM when reapplying the new base time fixes the
+observed issue.
+
+Fixes: 8670dc33f48b ("net: dsa: felix: update base time of time-aware shaper when adjusting PTP time")
+Reported-by: Richie Pearn <richard.pearn@nxp.com>
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://patch.msgid.link/20250426144859.3128352-2-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/ocelot/felix_vsc9959.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 391c4e3cb66f4..67af798686b8f 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -1517,7 +1517,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
+ struct tc_taprio_qopt_offload *taprio;
+ struct ocelot_port *ocelot_port;
+ struct timespec64 base_ts;
+- int port;
++ int i, port;
+ u32 val;
+
+ mutex_lock(&ocelot->tas_lock);
+@@ -1549,6 +1549,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
+ QSYS_PARAM_CFG_REG_3);
+
++ for (i = 0; i < taprio->num_entries; i++)
++ vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
++
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL);
+--
+2.39.5
+
--- /dev/null
+From 7cdd1ae7a0558ae5e2cc11410fd135c05753b459 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Apr 2025 10:38:48 +0200
+Subject: net: ethernet: mtk-star-emac: fix spinlock recursion issues on rx/tx
+ poll
+
+From: Louis-Alexis Eyraud <louisalexis.eyraud@collabora.com>
+
+[ Upstream commit 6fe0866014486736cc3ba1c6fd4606d3dbe55c9c ]
+
+Use spin_lock_irqsave and spin_unlock_irqrestore instead of spin_lock
+and spin_unlock in mtk_star_emac driver to avoid spinlock recursion
+occurrence that can happen when enabling the DMA interrupts again in
+rx/tx poll.
+
+```
+BUG: spinlock recursion on CPU#0, swapper/0/0
+ lock: 0xffff00000db9cf20, .magic: dead4ead, .owner: swapper/0/0,
+ .owner_cpu: 0
+CPU: 0 UID: 0 PID: 0 Comm: swapper/0 Not tainted
+ 6.15.0-rc2-next-20250417-00001-gf6a27738686c-dirty #28 PREEMPT
+Hardware name: MediaTek MT8365 Open Platform EVK (DT)
+Call trace:
+ show_stack+0x18/0x24 (C)
+ dump_stack_lvl+0x60/0x80
+ dump_stack+0x18/0x24
+ spin_dump+0x78/0x88
+ do_raw_spin_lock+0x11c/0x120
+ _raw_spin_lock+0x20/0x2c
+ mtk_star_handle_irq+0xc0/0x22c [mtk_star_emac]
+ __handle_irq_event_percpu+0x48/0x140
+ handle_irq_event+0x4c/0xb0
+ handle_fasteoi_irq+0xa0/0x1bc
+ handle_irq_desc+0x34/0x58
+ generic_handle_domain_irq+0x1c/0x28
+ gic_handle_irq+0x4c/0x120
+ do_interrupt_handler+0x50/0x84
+ el1_interrupt+0x34/0x68
+ el1h_64_irq_handler+0x18/0x24
+ el1h_64_irq+0x6c/0x70
+ regmap_mmio_read32le+0xc/0x20 (P)
+ _regmap_bus_reg_read+0x6c/0xac
+ _regmap_read+0x60/0xdc
+ regmap_read+0x4c/0x80
+ mtk_star_rx_poll+0x2f4/0x39c [mtk_star_emac]
+ __napi_poll+0x38/0x188
+ net_rx_action+0x164/0x2c0
+ handle_softirqs+0x100/0x244
+ __do_softirq+0x14/0x20
+ ____do_softirq+0x10/0x20
+ call_on_irq_stack+0x24/0x64
+ do_softirq_own_stack+0x1c/0x40
+ __irq_exit_rcu+0xd4/0x10c
+ irq_exit_rcu+0x10/0x1c
+ el1_interrupt+0x38/0x68
+ el1h_64_irq_handler+0x18/0x24
+ el1h_64_irq+0x6c/0x70
+ cpuidle_enter_state+0xac/0x320 (P)
+ cpuidle_enter+0x38/0x50
+ do_idle+0x1e4/0x260
+ cpu_startup_entry+0x34/0x3c
+ rest_init+0xdc/0xe0
+ console_on_rootfs+0x0/0x6c
+ __primary_switched+0x88/0x90
+```
+
+Fixes: 0a8bd81fd6aa ("net: ethernet: mtk-star-emac: separate tx/rx handling with two NAPIs")
+Signed-off-by: Louis-Alexis Eyraud <louisalexis.eyraud@collabora.com>
+Reviewed-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Acked-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Link: https://patch.msgid.link/20250424-mtk_star_emac-fix-spinlock-recursion-issue-v2-1-f3fde2e529d8@collabora.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_star_emac.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+index ad27749c0931c..fd729469b29f4 100644
+--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+@@ -1164,6 +1164,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
+ struct net_device *ndev = priv->ndev;
+ unsigned int head = ring->head;
+ unsigned int entry = ring->tail;
++ unsigned long flags;
+
+ while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
+ ret = mtk_star_tx_complete_one(priv);
+@@ -1183,9 +1184,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
+ netif_wake_queue(ndev);
+
+ if (napi_complete(napi)) {
+- spin_lock(&priv->lock);
++ spin_lock_irqsave(&priv->lock, flags);
+ mtk_star_enable_dma_irq(priv, false, true);
+- spin_unlock(&priv->lock);
++ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return 0;
+@@ -1342,6 +1343,7 @@ static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
+ static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
+ {
+ struct mtk_star_priv *priv;
++ unsigned long flags;
+ int work_done = 0;
+
+ priv = container_of(napi, struct mtk_star_priv, rx_napi);
+@@ -1349,9 +1351,9 @@ static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
+ work_done = mtk_star_rx(priv, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+- spin_lock(&priv->lock);
++ spin_lock_irqsave(&priv->lock, flags);
+ mtk_star_enable_dma_irq(priv, true, false);
+- spin_unlock(&priv->lock);
++ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return work_done;
+--
+2.39.5
+
--- /dev/null
+From cd87f99fd7b7e335ea558ee024491e48a9691df9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Apr 2025 10:38:49 +0200
+Subject: net: ethernet: mtk-star-emac: rearm interrupts in rx_poll only when
+ advised
+
+From: Louis-Alexis Eyraud <louisalexis.eyraud@collabora.com>
+
+[ Upstream commit e54b4db35e201a9173da9cb7abc8377e12abaf87 ]
+
+In mtk_star_rx_poll function, on event processing completion, the
+mtk_star_emac driver calls napi_complete_done but ignores its return
+code and enable RX DMA interrupts inconditionally. This return code
+gives the info if a device should avoid rearming its interrupts or not,
+so fix this behaviour by taking it into account.
+
+Fixes: 8c7bd5a454ff ("net: ethernet: mtk-star-emac: new driver")
+Signed-off-by: Louis-Alexis Eyraud <louisalexis.eyraud@collabora.com>
+Acked-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Link: https://patch.msgid.link/20250424-mtk_star_emac-fix-spinlock-recursion-issue-v2-2-f3fde2e529d8@collabora.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_star_emac.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+index fd729469b29f4..c42e9f741f959 100644
+--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+@@ -1349,8 +1349,7 @@ static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
+ priv = container_of(napi, struct mtk_star_priv, rx_napi);
+
+ work_done = mtk_star_rx(priv, budget);
+- if (work_done < budget) {
+- napi_complete_done(napi, work_done);
++ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ mtk_star_enable_dma_irq(priv, true, false);
+ spin_unlock_irqrestore(&priv->lock, flags);
+--
+2.39.5
+
--- /dev/null
+From 88fae022897b2956e36ad0cd2f490e2f73832a88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 11:08:26 +0200
+Subject: net: fec: ERR007885 Workaround for conventional TX
+
+From: Mattias Barthel <mattias.barthel@atlascopco.com>
+
+[ Upstream commit a179aad12badc43201cbf45d1e8ed2c1383c76b9 ]
+
+Activate TX hang workaround also in
+fec_enet_txq_submit_skb() when TSO is not enabled.
+
+Errata: ERR007885
+
+Symptoms: NETDEV WATCHDOG: eth0 (fec): transmit queue 0 timed out
+
+commit 37d6017b84f7 ("net: fec: Workaround for imx6sx enet tx hang when enable three queues")
+There is a TDAR race condition for mutliQ when the software sets TDAR
+and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
+This will cause the udma_tx and udma_tx_arbiter state machines to hang.
+
+So, the Workaround is checking TDAR status four time, if TDAR cleared by
+ hardware and then write TDAR, otherwise don't set TDAR.
+
+Fixes: 53bb20d1faba ("net: fec: add variable reg_desc_active to speed things up")
+Signed-off-by: Mattias Barthel <mattias.barthel@atlascopco.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250429090826.3101258-1-mattiasbarthel@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 018ce4f4be6f3..4a513dba8f53e 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -692,7 +692,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+ txq->bd.cur = bdp;
+
+ /* Trigger transmission start */
+- writel(0, txq->bd.reg_desc_active);
++ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
++ !readl(txq->bd.reg_desc_active) ||
++ !readl(txq->bd.reg_desc_active) ||
++ !readl(txq->bd.reg_desc_active) ||
++ !readl(txq->bd.reg_desc_active))
++ writel(0, txq->bd.reg_desc_active);
+
+ return 0;
+ }
+--
+2.39.5
+
--- /dev/null
+From 3ec5fd0c5accf8b273595f7abb24cb16562a7a41 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 17:30:52 +0800
+Subject: net: hns3: defer calling ptp_clock_register()
+
+From: Jian Shen <shenjian15@huawei.com>
+
+[ Upstream commit 4971394d9d624f91689d766f31ce668d169d9959 ]
+
+Currently the ptp_clock_register() is called before relative
+ptp resource ready. It may cause unexpected result when upper
+layer called the ptp API during the timewindow. Fix it by
+moving the ptp_clock_register() to the function end.
+
+Fixes: 0bf5eb788512 ("net: hns3: add support for PTP")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Link: https://patch.msgid.link/20250430093052.2400464-5-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+index 4d4cea1f50157..b7cf9fbf97183 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+@@ -452,6 +452,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
+ ptp->info.settime64 = hclge_ptp_settime;
+
+ ptp->info.n_alarm = 0;
++
++ spin_lock_init(&ptp->lock);
++ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
++ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
++ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
++ hdev->ptp = ptp;
++
+ ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
+ if (IS_ERR(ptp->clock)) {
+ dev_err(&hdev->pdev->dev,
+@@ -463,12 +470,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
+ return -ENODEV;
+ }
+
+- spin_lock_init(&ptp->lock);
+- ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
+- ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+- ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+- hdev->ptp = ptp;
+-
+ return 0;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From e93d55e613dc529cd994f3e1666cae9f8c4364c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 17:30:50 +0800
+Subject: net: hns3: fix an interrupt residual problem
+
+From: Yonglong Liu <liuyonglong@huawei.com>
+
+[ Upstream commit 8e6b9c6ea5a55045eed6526d8ee49e93192d1a58 ]
+
+When a VF is passthrough to a VM, and the VM is killed, the reported
+interrupt may not been handled, it will remain, and won't be clear by
+the nic engine even with a flr or tqp reset. When the VM restart, the
+interrupt of the first vector may be dropped by the second enable_irq
+in vfio, see the issue below:
+https://gitlab.com/qemu-project/qemu/-/issues/2884#note_2423361621
+
+We notice that the vfio has always behaved this way, and the interrupt
+is a residue of the nic engine, so we fix the problem by moving the
+vector enable process out of the enable_irq loop.
+
+Fixes: 08a100689d4b ("net: hns3: re-organize vector handle")
+Signed-off-by: Yonglong Liu <liuyonglong@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Link: https://patch.msgid.link/20250430093052.2400464-3-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/hisilicon/hns3/hns3_enet.c | 82 +++++++++----------
+ 1 file changed, 39 insertions(+), 43 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 9d27fad9f35fe..9bcd03e1994f6 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -472,20 +472,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
+ writel(mask_en, tqp_vector->mask_addr);
+ }
+
+-static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
++static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
+ {
+ napi_enable(&tqp_vector->napi);
+ enable_irq(tqp_vector->vector_irq);
+-
+- /* enable vector */
+- hns3_mask_vector_irq(tqp_vector, 1);
+ }
+
+-static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
++static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
+ {
+- /* disable vector */
+- hns3_mask_vector_irq(tqp_vector, 0);
+-
+ disable_irq(tqp_vector->vector_irq);
+ napi_disable(&tqp_vector->napi);
+ cancel_work_sync(&tqp_vector->rx_group.dim.work);
+@@ -706,11 +700,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
+ return 0;
+ }
+
++static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
++{
++ struct hns3_nic_priv *priv = netdev_priv(netdev);
++ struct hnae3_handle *h = priv->ae_handle;
++ u16 i;
++
++ for (i = 0; i < priv->vector_num; i++)
++ hns3_irq_enable(&priv->tqp_vector[i]);
++
++ for (i = 0; i < priv->vector_num; i++)
++ hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
++
++ for (i = 0; i < h->kinfo.num_tqps; i++)
++ hns3_tqp_enable(h->kinfo.tqp[i]);
++}
++
++static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
++{
++ struct hns3_nic_priv *priv = netdev_priv(netdev);
++ struct hnae3_handle *h = priv->ae_handle;
++ u16 i;
++
++ for (i = 0; i < h->kinfo.num_tqps; i++)
++ hns3_tqp_disable(h->kinfo.tqp[i]);
++
++ for (i = 0; i < priv->vector_num; i++)
++ hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
++
++ for (i = 0; i < priv->vector_num; i++)
++ hns3_irq_disable(&priv->tqp_vector[i]);
++}
++
+ static int hns3_nic_net_up(struct net_device *netdev)
+ {
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+- int i, j;
+ int ret;
+
+ ret = hns3_nic_reset_all_ring(h);
+@@ -719,23 +744,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
+
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
+- /* enable the vectors */
+- for (i = 0; i < priv->vector_num; i++)
+- hns3_vector_enable(&priv->tqp_vector[i]);
+-
+- /* enable rcb */
+- for (j = 0; j < h->kinfo.num_tqps; j++)
+- hns3_tqp_enable(h->kinfo.tqp[j]);
++ hns3_enable_irqs_and_tqps(netdev);
+
+ /* start the ae_dev */
+ ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
+ if (ret) {
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+- while (j--)
+- hns3_tqp_disable(h->kinfo.tqp[j]);
+-
+- for (j = i - 1; j >= 0; j--)
+- hns3_vector_disable(&priv->tqp_vector[j]);
++ hns3_disable_irqs_and_tqps(netdev);
+ }
+
+ return ret;
+@@ -822,17 +837,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
+ static void hns3_nic_net_down(struct net_device *netdev)
+ {
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+- struct hnae3_handle *h = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops;
+- int i;
+
+- /* disable vectors */
+- for (i = 0; i < priv->vector_num; i++)
+- hns3_vector_disable(&priv->tqp_vector[i]);
+-
+- /* disable rcb */
+- for (i = 0; i < h->kinfo.num_tqps; i++)
+- hns3_tqp_disable(h->kinfo.tqp[i]);
++ hns3_disable_irqs_and_tqps(netdev);
+
+ /* stop ae_dev */
+ ops = priv->ae_handle->ae_algo->ops;
+@@ -5869,8 +5876,6 @@ int hns3_set_channels(struct net_device *netdev,
+ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+ {
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+- struct hnae3_handle *h = priv->ae_handle;
+- int i;
+
+ if (!if_running)
+ return;
+@@ -5881,11 +5886,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+ netif_carrier_off(ndev);
+ netif_tx_disable(ndev);
+
+- for (i = 0; i < priv->vector_num; i++)
+- hns3_vector_disable(&priv->tqp_vector[i]);
+-
+- for (i = 0; i < h->kinfo.num_tqps; i++)
+- hns3_tqp_disable(h->kinfo.tqp[i]);
++ hns3_disable_irqs_and_tqps(ndev);
+
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+@@ -5901,7 +5902,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
+ {
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+- int i;
+
+ if (!if_running)
+ return;
+@@ -5917,11 +5917,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
+
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
+- for (i = 0; i < priv->vector_num; i++)
+- hns3_vector_enable(&priv->tqp_vector[i]);
+-
+- for (i = 0; i < h->kinfo.num_tqps; i++)
+- hns3_tqp_enable(h->kinfo.tqp[i]);
++ hns3_enable_irqs_and_tqps(ndev);
+
+ netif_tx_wake_all_queues(ndev);
+
+--
+2.39.5
+
--- /dev/null
+From 9b23d2eae0690e8c2f5828001d5252e1c3719df5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 17:30:51 +0800
+Subject: net: hns3: fixed debugfs tm_qset size
+
+From: Hao Lan <lanhao@huawei.com>
+
+[ Upstream commit e317aebeefcb3b0c71f2305af3c22871ca6b3833 ]
+
+The size of the tm_qset file of debugfs is limited to 64 KB,
+which is too small in the scenario with 1280 qsets.
+The size needs to be expanded to 1 MB.
+
+Fixes: 5e69ea7ee2a6 ("net: hns3: refactor the debugfs process")
+Signed-off-by: Hao Lan <lanhao@huawei.com>
+Signed-off-by: Peiyang Wang <wangpeiyang1@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Link: https://patch.msgid.link/20250430093052.2400464-4-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index d2603cfc122c8..430b3ec800a9e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -60,7 +60,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
+ .name = "tm_qset",
+ .cmd = HNAE3_DBG_CMD_TM_QSET,
+ .dentry = HNS3_DBG_DENTRY_TM,
+- .buf_len = HNS3_DBG_READ_LEN,
++ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+--
+2.39.5
+
--- /dev/null
+From e293a8dcb03ca485f376137a42576cd34c0d85c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 17:30:49 +0800
+Subject: net: hns3: store rx VLAN tag offload state for VF
+
+From: Jian Shen <shenjian15@huawei.com>
+
+[ Upstream commit ef2383d078edcbe3055032436b16cdf206f26de2 ]
+
+The VF driver missed to store the rx VLAN tag strip state when
+user change the rx VLAN tag offload state. And it will default
+to enable the rx vlan tag strip when re-init VF device after
+reset. So if user disable rx VLAN tag offload, and trig reset,
+then the HW will still strip the VLAN tag from packet nad fill
+into RX BD, but the VF driver will ignore it for rx VLAN tag
+offload disabled. It may cause the rx VLAN tag dropped.
+
+Fixes: b2641e2ad456 ("net: hns3: Add support of hardware rx-vlan-offload to HNS3 VF driver")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250430093052.2400464-2-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../hisilicon/hns3/hns3vf/hclgevf_main.c | 25 ++++++++++++++-----
+ .../hisilicon/hns3/hns3vf/hclgevf_main.h | 1 +
+ 2 files changed, 20 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 06493853b2b49..b11d38a6093f8 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1309,9 +1309,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
+ rtnl_unlock();
+ }
+
+-static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
++static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
+ {
+- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+@@ -1320,6 +1319,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ }
+
++static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
++{
++ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
++ int ret;
++
++ ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
++ if (ret)
++ return ret;
++
++ hdev->rxvtag_strip_en = enable;
++ return 0;
++}
++
+ static int hclgevf_reset_tqp(struct hnae3_handle *handle)
+ {
+ #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
+@@ -2198,12 +2210,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+ tc_valid, tc_size);
+ }
+
+-static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
++static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
++ bool rxvtag_strip_en)
+ {
+ struct hnae3_handle *nic = &hdev->nic;
+ int ret;
+
+- ret = hclgevf_en_hw_strip_rxvtag(nic, true);
++ ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to enable rx vlan offload, ret = %d\n", ret);
+@@ -2872,7 +2885,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
+ if (ret)
+ return ret;
+
+- ret = hclgevf_init_vlan_config(hdev);
++ ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+@@ -2985,7 +2998,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ goto err_config;
+ }
+
+- ret = hclgevf_init_vlan_config(hdev);
++ ret = hclgevf_init_vlan_config(hdev, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+index 976414d00e67a..1f62ac062d040 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -253,6 +253,7 @@ struct hclgevf_dev {
+ int *vector_irq;
+
+ bool gro_en;
++ bool rxvtag_strip_en;
+
+ unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
+
+--
+2.39.5
+
--- /dev/null
+From b1dc7fb61e01ca6726f94df852f80fa6e61922e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 26 Apr 2025 17:32:09 +0200
+Subject: net: ipv6: fix UDPv6 GSO segmentation with NAT
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit b936a9b8d4a585ccb6d454921c36286bfe63e01d ]
+
+If any address or port is changed, update it in all packets and recalculate
+checksum.
+
+Fixes: 9fd1ff5d2ac7 ("udp: Support UDP fraglist GRO/GSO.")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20250426153210.14044-1-nbd@nbd.name
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/udp_offload.c | 61 +++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 60 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 2f1f038b0dc1b..d415b4fb2f1f4 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -246,6 +246,62 @@ static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs)
+ return segs;
+ }
+
++static void __udpv6_gso_segment_csum(struct sk_buff *seg,
++ struct in6_addr *oldip,
++ const struct in6_addr *newip,
++ __be16 *oldport, __be16 newport)
++{
++ struct udphdr *uh = udp_hdr(seg);
++
++ if (ipv6_addr_equal(oldip, newip) && *oldport == newport)
++ return;
++
++ if (uh->check) {
++ inet_proto_csum_replace16(&uh->check, seg, oldip->s6_addr32,
++ newip->s6_addr32, true);
++
++ inet_proto_csum_replace2(&uh->check, seg, *oldport, newport,
++ false);
++ if (!uh->check)
++ uh->check = CSUM_MANGLED_0;
++ }
++
++ *oldip = *newip;
++ *oldport = newport;
++}
++
++static struct sk_buff *__udpv6_gso_segment_list_csum(struct sk_buff *segs)
++{
++ const struct ipv6hdr *iph;
++ const struct udphdr *uh;
++ struct ipv6hdr *iph2;
++ struct sk_buff *seg;
++ struct udphdr *uh2;
++
++ seg = segs;
++ uh = udp_hdr(seg);
++ iph = ipv6_hdr(seg);
++ uh2 = udp_hdr(seg->next);
++ iph2 = ipv6_hdr(seg->next);
++
++ if (!(*(const u32 *)&uh->source ^ *(const u32 *)&uh2->source) &&
++ ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
++ ipv6_addr_equal(&iph->daddr, &iph2->daddr))
++ return segs;
++
++ while ((seg = seg->next)) {
++ uh2 = udp_hdr(seg);
++ iph2 = ipv6_hdr(seg);
++
++ __udpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr,
++ &uh2->source, uh->source);
++ __udpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr,
++ &uh2->dest, uh->dest);
++ }
++
++ return segs;
++}
++
+ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
+ netdev_features_t features,
+ bool is_ipv6)
+@@ -258,7 +314,10 @@ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
+
+ udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
+
+- return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb);
++ if (is_ipv6)
++ return __udpv6_gso_segment_list_csum(skb);
++ else
++ return __udpv4_gso_segment_list_csum(skb);
+ }
+
+ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+--
+2.39.5
+
--- /dev/null
+From d53f5b1f66710b08af1ac3c508db52fa518f70a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 10:55:27 +0530
+Subject: net: lan743x: Fix memleak issue when GSO enabled
+
+From: Thangaraj Samynathan <thangaraj.s@microchip.com>
+
+[ Upstream commit 2d52e2e38b85c8b7bc00dca55c2499f46f8c8198 ]
+
+Always map the `skb` to the LS descriptor. Previously skb was
+mapped to EXT descriptor when the number of fragments is zero with
+GSO enabled. Mapping the skb to EXT descriptor prevents it from
+being freed, leading to a memory leak
+
+Fixes: 23f0703c125b ("lan743x: Add main source files for new lan743x driver")
+Signed-off-by: Thangaraj Samynathan <thangaraj.s@microchip.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20250429052527.10031-1-thangaraj.s@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/microchip/lan743x_main.c | 8 ++++++--
+ drivers/net/ethernet/microchip/lan743x_main.h | 1 +
+ 2 files changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index 0b2eaed110720..2e69ba0143b15 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -1943,6 +1943,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
+ if (nr_frags <= 0) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++ tx->frame_last = tx->frame_first;
+ }
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+@@ -2012,6 +2013,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
+ tx->frame_first = 0;
+ tx->frame_data0 = 0;
+ tx->frame_tail = 0;
++ tx->frame_last = 0;
+ return -ENOMEM;
+ }
+
+@@ -2052,16 +2054,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
+ TX_DESC_DATA0_DTYPE_DATA_) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++ tx->frame_last = tx->frame_tail;
+ }
+
+- tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+- buffer_info = &tx->buffer_info[tx->frame_tail];
++ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
++ buffer_info = &tx->buffer_info[tx->frame_last];
+ buffer_info->skb = skb;
+ if (time_stamp)
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
+ if (ignore_sync)
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
+
++ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+ tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
+ tx->last_tail = tx->frame_tail;
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
+index 92a5660b88202..c0d209f36188a 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.h
++++ b/drivers/net/ethernet/microchip/lan743x_main.h
+@@ -974,6 +974,7 @@ struct lan743x_tx {
+ u32 frame_first;
+ u32 frame_data0;
+ u32 frame_tail;
++ u32 frame_last;
+
+ struct lan743x_tx_buffer_info *buffer_info;
+
+--
+2.39.5
+
--- /dev/null
+From fd889f30454f5c335dd73d665607f3d87d0be81f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Apr 2025 11:36:11 +0300
+Subject: net/mlx5: E-switch, Fix error handling for enabling roce
+
+From: Chris Mi <cmi@nvidia.com>
+
+[ Upstream commit 90538d23278a981e344d364e923162fce752afeb ]
+
+The cited commit assumes enabling roce always succeeds. But it is
+not true. Add error handling for it.
+
+Fixes: 80f09dfc237f ("net/mlx5: Eswitch, enable RoCE loopback traffic")
+Signed-off-by: Chris Mi <cmi@nvidia.com>
+Reviewed-by: Roi Dayan <roid@nvidia.com>
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Link: https://patch.msgid.link/20250423083611.324567-6-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 5 ++++-
+ drivers/net/ethernet/mellanox/mlx5/core/rdma.c | 9 +++++----
+ drivers/net/ethernet/mellanox/mlx5/core/rdma.h | 4 ++--
+ 3 files changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 433cdd0a2cf34..5237abbdcda11 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -3320,7 +3320,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
+ int err;
+
+ mutex_init(&esw->offloads.termtbl_mutex);
+- mlx5_rdma_enable_roce(esw->dev);
++ err = mlx5_rdma_enable_roce(esw->dev);
++ if (err)
++ goto err_roce;
+
+ err = mlx5_esw_host_number_init(esw);
+ if (err)
+@@ -3378,6 +3380,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
+ esw_offloads_metadata_uninit(esw);
+ err_metadata:
+ mlx5_rdma_disable_roce(esw->dev);
++err_roce:
+ mutex_destroy(&esw->offloads.termtbl_mutex);
+ return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+index ab5afa6c5e0fd..e61a4fa46d772 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+@@ -152,17 +152,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
+ mlx5_nic_vport_disable_roce(dev);
+ }
+
+-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
++int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+ {
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, roce))
+- return;
++ return 0;
+
+ err = mlx5_nic_vport_enable_roce(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
+- return;
++ return err;
+ }
+
+ err = mlx5_rdma_add_roce_addr(dev);
+@@ -177,10 +177,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+ goto del_roce_addr;
+ }
+
+- return;
++ return err;
+
+ del_roce_addr:
+ mlx5_rdma_del_roce_addr(dev);
+ disable_roce:
+ mlx5_nic_vport_disable_roce(dev);
++ return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
+index 750cff2a71a4b..3d9e76c3d42fb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
+@@ -8,12 +8,12 @@
+
+ #ifdef CONFIG_MLX5_ESWITCH
+
+-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
++int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
+ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
+
+ #else /* CONFIG_MLX5_ESWITCH */
+
+-static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
++static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
+ static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
+
+ #endif /* CONFIG_MLX5_ESWITCH */
+--
+2.39.5
+
--- /dev/null
+From 5590381e67d6bd61730bb26dd3fefd98ebdecbef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Apr 2025 11:36:08 +0300
+Subject: net/mlx5: E-Switch, Initialize MAC Address for Default GID
+
+From: Maor Gottlieb <maorg@nvidia.com>
+
+[ Upstream commit 5d1a04f347e6cbf5ffe74da409a5d71fbe8c5f19 ]
+
+Initialize the source MAC address when creating the default GID entry.
+Since this entry is used only for loopback traffic, it only needs to
+be a unicast address. A zeroed-out MAC address is sufficient for this
+purpose.
+Without this fix, random bits would be assigned as the source address.
+If these bits formed a multicast address, the firmware would return an
+error, preventing the user from switching to switchdev mode:
+
+Error: mlx5_core: Failed setting eswitch to offloads.
+kernel answers: Invalid argument
+
+Fixes: 80f09dfc237f ("net/mlx5: Eswitch, enable RoCE loopback traffic")
+Signed-off-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Link: https://patch.msgid.link/20250423083611.324567-3-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/rdma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+index 540cf05f63739..ab5afa6c5e0fd 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+@@ -130,8 +130,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
+
+ static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
+ {
++ u8 mac[ETH_ALEN] = {};
+ union ib_gid gid;
+- u8 mac[ETH_ALEN];
+
+ mlx5_rdma_make_default_gid(dev, &gid);
+ return mlx5_core_roce_gid_set(dev, 0,
+--
+2.39.5
+
--- /dev/null
+From fcc54bea4872cad7b5bd7c64a70789aa7841d6e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 01:37:33 +0300
+Subject: net: mscc: ocelot: delete PVID VLAN when readding it as non-PVID
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 5ec6d7d737a491256cd37e33910f7ac1978db591 ]
+
+The following set of commands:
+
+ip link add br0 type bridge vlan_filtering 1 # vlan_default_pvid 1 is implicit
+ip link set swp0 master br0
+bridge vlan add dev swp0 vid 1
+
+should result in the dropping of untagged and 802.1p-tagged traffic, but
+we see that it continues to be accepted. Whereas, had we deleted VID 1
+instead, the aforementioned dropping would have worked
+
+This is because the ANA_PORT_DROP_CFG update logic doesn't run, because
+ocelot_vlan_add() only calls ocelot_port_set_pvid() if the new VLAN has
+the BRIDGE_VLAN_INFO_PVID flag.
+
+Similar to other drivers like mt7530_port_vlan_add() which handle this
+case correctly, we need to test whether the VLAN we're changing used to
+have the BRIDGE_VLAN_INFO_PVID flag, but lost it now. That amounts to a
+PVID deletion and should be treated as such.
+
+Regarding blame attribution: this never worked properly since the
+introduction of bridge VLAN filtering in commit 7142529f1688 ("net:
+mscc: ocelot: add VLAN filtering"). However, there was a significant
+paradigm shift which aligned the ANA_PORT_DROP_CFG register with the
+PVID concept rather than with the native VLAN concept, and that change
+wasn't targeted for 'stable'. Realistically, that is as far as this fix
+needs to be propagated to.
+
+Fixes: be0576fed6d3 ("net: mscc: ocelot: move the logic to drop 802.1p traffic to the pvid deletion")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://patch.msgid.link/20250424223734.3096202-1-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mscc/ocelot.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index ec644a201b8e5..203cb4978544a 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -793,6 +793,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare);
+ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ bool untagged)
+ {
++ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int err;
+
+ /* Ignore VID 0 added to our RX filter by the 8021q module, since
+@@ -812,6 +813,11 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ ocelot_bridge_vlan_find(ocelot, vid));
+ if (err)
+ return err;
++ } else if (ocelot_port->pvid_vlan &&
++ ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) {
++ err = ocelot_port_set_pvid(ocelot, port, NULL);
++ if (err)
++ return err;
+ }
+
+ /* Untagged egress vlan clasification */
+--
+2.39.5
+
--- /dev/null
+From 3b816a86f924dde3f49d5e154f6d72bea6171143 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Aug 2024 03:07:07 +0300
+Subject: net: mscc: ocelot: treat 802.1ad tagged traffic as 802.1Q-untagged
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 36dd1141be70b5966906919714dc504a24c65ddf ]
+
+I was revisiting the topic of 802.1ad treatment in the Ocelot switch [0]
+and realized that not only is its basic VLAN classification pipeline
+improper for offloading vlan_protocol 802.1ad bridges, but also improper
+for offloading regular 802.1Q bridges already.
+
+Namely, 802.1ad-tagged traffic should be treated as VLAN-untagged by
+bridged ports, but this switch treats it as if it was 802.1Q-tagged with
+the same VID as in the 802.1ad header. This is markedly different to
+what the Linux bridge expects; see the "other_tpid()" function in
+tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh.
+
+An idea came to me that the VCAP IS1 TCAM is more powerful than I'm
+giving it credit for, and that it actually overwrites the classified VID
+before the VLAN Table lookup takes place. In other words, it can be
+used even to save a packet from being dropped on ingress due to VLAN
+membership.
+
+Add a sophisticated TCAM rule hardcoded into the driver to force the
+switch to behave like a Linux bridge with vlan_filtering 1 vlan_protocol
+802.1Q.
+
+Regarding the lifetime of the filter: eventually the bridge will
+disappear, and vlan_filtering on the port will be restored to 0 for
+standalone mode. Then the filter will be deleted.
+
+[0]: https://lore.kernel.org/netdev/20201009122947.nvhye4hvcha3tljh@skbuf/
+
+Fixes: 7142529f1688 ("net: mscc: ocelot: add VLAN filtering")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 5ec6d7d737a4 ("net: mscc: ocelot: delete PVID VLAN when readding it as non-PVID")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mscc/ocelot.c | 188 ++++++++++++++++++++++--
+ drivers/net/ethernet/mscc/ocelot_vcap.c | 1 +
+ include/soc/mscc/ocelot_vcap.h | 2 +
+ 3 files changed, 180 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index 71dbdac38020b..ec644a201b8e5 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -416,9 +416,158 @@ static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot,
+ return VLAN_N_VID - bridge_num - 1;
+ }
+
++/**
++ * ocelot_update_vlan_reclassify_rule() - Make switch aware only to bridge VLAN TPID
++ *
++ * @ocelot: Switch private data structure
++ * @port: Index of ingress port
++ *
++ * IEEE 802.1Q-2018 clauses "5.5 C-VLAN component conformance" and "5.6 S-VLAN
++ * component conformance" suggest that a C-VLAN component should only recognize
++ * and filter on C-Tags, and an S-VLAN component should only recognize and
++ * process based on C-Tags.
++ *
++ * In Linux, as per commit 1a0b20b25732 ("Merge branch 'bridge-next'"), C-VLAN
++ * components are largely represented by a bridge with vlan_protocol 802.1Q,
++ * and S-VLAN components by a bridge with vlan_protocol 802.1ad.
++ *
++ * Currently the driver only offloads vlan_protocol 802.1Q, but the hardware
++ * design is non-conformant, because the switch assigns each frame to a VLAN
++ * based on an entirely different question, as detailed in figure "Basic VLAN
++ * Classification Flow" from its manual and reproduced below.
++ *
++ * Set TAG_TYPE, PCP, DEI, VID to port-default values in VLAN_CFG register
++ * if VLAN_AWARE_ENA[port] and frame has outer tag then:
++ * if VLAN_INNER_TAG_ENA[port] and frame has inner tag then:
++ * TAG_TYPE = (Frame.InnerTPID <> 0x8100)
++ * Set PCP, DEI, VID to values from inner VLAN header
++ * else:
++ * TAG_TYPE = (Frame.OuterTPID <> 0x8100)
++ * Set PCP, DEI, VID to values from outer VLAN header
++ * if VID == 0 then:
++ * VID = VLAN_CFG.VLAN_VID
++ *
++ * Summarized, the switch will recognize both 802.1Q and 802.1ad TPIDs as VLAN
++ * "with equal rights", and just set the TAG_TYPE bit to 0 (if 802.1Q) or to 1
++ * (if 802.1ad). It will classify based on whichever of the tags is "outer", no
++ * matter what TPID that may have (or "inner", if VLAN_INNER_TAG_ENA[port]).
++ *
++ * In the VLAN Table, the TAG_TYPE information is not accessible - just the
++ * classified VID is - so it is as if each VLAN Table entry is for 2 VLANs:
++ * C-VLAN X, and S-VLAN X.
++ *
++ * Whereas the Linux bridge behavior is to only filter on frames with a TPID
++ * equal to the vlan_protocol, and treat everything else as VLAN-untagged.
++ *
++ * Consider an ingress packet tagged with 802.1ad VID=3 and 802.1Q VID=5,
++ * received on a bridge vlan_filtering=1 vlan_protocol=802.1Q port. This frame
++ * should be treated as 802.1Q-untagged, and classified to the PVID of that
++ * bridge port. Not to VID=3, and not to VID=5.
++ *
++ * The VCAP IS1 TCAM has everything we need to overwrite the choices made in
++ * the basic VLAN classification pipeline: it can match on TAG_TYPE in the key,
++ * and it can modify the classified VID in the action. Thus, for each port
++ * under a vlan_filtering bridge, we can insert a rule in VCAP IS1 lookup 0 to
++ * match on 802.1ad tagged frames and modify their classified VID to the 802.1Q
++ * PVID of the port. This effectively makes it appear to the outside world as
++ * if those packets were processed as VLAN-untagged.
++ *
++ * The rule needs to be updated each time the bridge PVID changes, and needs
++ * to be deleted if the bridge PVID is deleted, or if the port becomes
++ * VLAN-unaware.
++ */
++static int ocelot_update_vlan_reclassify_rule(struct ocelot *ocelot, int port)
++{
++ unsigned long cookie = OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port);
++ struct ocelot_vcap_block *block_vcap_is1 = &ocelot->block[VCAP_IS1];
++ struct ocelot_port *ocelot_port = ocelot->ports[port];
++ const struct ocelot_bridge_vlan *pvid_vlan;
++ struct ocelot_vcap_filter *filter;
++ int err, val, pcp, dei;
++ bool vid_replace_ena;
++ u16 vid;
++
++ pvid_vlan = ocelot_port->pvid_vlan;
++ vid_replace_ena = ocelot_port->vlan_aware && pvid_vlan;
++
++ filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, cookie,
++ false);
++ if (!vid_replace_ena) {
++ /* If the reclassification filter doesn't need to exist, delete
++ * it if it was previously installed, and exit doing nothing
++ * otherwise.
++ */
++ if (filter)
++ return ocelot_vcap_filter_del(ocelot, filter);
++
++ return 0;
++ }
++
++ /* The reclassification rule must apply. See if it already exists
++ * or if it must be created.
++ */
++
++ /* Treating as VLAN-untagged means using as classified VID equal to
++ * the bridge PVID, and PCP/DEI set to the port default QoS values.
++ */
++ vid = pvid_vlan->vid;
++ val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
++ pcp = ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val);
++ dei = !!(val & ANA_PORT_QOS_CFG_DP_DEFAULT_VAL);
++
++ if (filter) {
++ bool changed = false;
++
++ /* Filter exists, just update it */
++ if (filter->action.vid != vid) {
++ filter->action.vid = vid;
++ changed = true;
++ }
++ if (filter->action.pcp != pcp) {
++ filter->action.pcp = pcp;
++ changed = true;
++ }
++ if (filter->action.dei != dei) {
++ filter->action.dei = dei;
++ changed = true;
++ }
++
++ if (!changed)
++ return 0;
++
++ return ocelot_vcap_filter_replace(ocelot, filter);
++ }
++
++ /* Filter doesn't exist, create it */
++ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
++ if (!filter)
++ return -ENOMEM;
++
++ filter->key_type = OCELOT_VCAP_KEY_ANY;
++ filter->ingress_port_mask = BIT(port);
++ filter->vlan.tpid = OCELOT_VCAP_BIT_1;
++ filter->prio = 1;
++ filter->id.cookie = cookie;
++ filter->id.tc_offload = false;
++ filter->block_id = VCAP_IS1;
++ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
++ filter->lookup = 0;
++ filter->action.vid_replace_ena = true;
++ filter->action.pcp_dei_ena = true;
++ filter->action.vid = vid;
++ filter->action.pcp = pcp;
++ filter->action.dei = dei;
++
++ err = ocelot_vcap_filter_add(ocelot, filter, NULL);
++ if (err)
++ kfree(filter);
++
++ return err;
++}
++
+ /* Default vlan to clasify for untagged frames (may be zero) */
+-static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
+- const struct ocelot_bridge_vlan *pvid_vlan)
++static int ocelot_port_set_pvid(struct ocelot *ocelot, int port,
++ const struct ocelot_bridge_vlan *pvid_vlan)
+ {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge);
+@@ -438,15 +587,23 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
+ * happens automatically), but also 802.1p traffic which gets
+ * classified to VLAN 0, but that is always in our RX filter, so it
+ * would get accepted were it not for this setting.
++ *
++ * Also, we only support the bridge 802.1Q VLAN protocol, so
++ * 802.1ad-tagged frames (carrying S-Tags) should be considered
++ * 802.1Q-untagged, and also dropped.
+ */
+ if (!pvid_vlan && ocelot_port->vlan_aware)
+ val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
++ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA |
++ ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA;
+
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
++ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA |
++ ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA,
+ ANA_PORT_DROP_CFG, port);
++
++ return ocelot_update_vlan_reclassify_rule(ocelot, port);
+ }
+
+ static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
+@@ -594,7 +751,10 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
+ ANA_PORT_VLAN_CFG, port);
+
+- ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
++ err = ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
++ if (err)
++ return err;
++
+ ocelot_port_manage_port_tag(ocelot, port);
+
+ return 0;
+@@ -647,9 +807,12 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ return err;
+
+ /* Default ingress vlan classification */
+- if (pvid)
+- ocelot_port_set_pvid(ocelot, port,
+- ocelot_bridge_vlan_find(ocelot, vid));
++ if (pvid) {
++ err = ocelot_port_set_pvid(ocelot, port,
++ ocelot_bridge_vlan_find(ocelot, vid));
++ if (err)
++ return err;
++ }
+
+ /* Untagged egress vlan clasification */
+ ocelot_port_manage_port_tag(ocelot, port);
+@@ -675,8 +838,11 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
+ return err;
+
+ /* Ingress */
+- if (del_pvid)
+- ocelot_port_set_pvid(ocelot, port, NULL);
++ if (del_pvid) {
++ err = ocelot_port_set_pvid(ocelot, port, NULL);
++ if (err)
++ return err;
++ }
+
+ /* Egress */
+ ocelot_port_manage_port_tag(ocelot, port);
+@@ -2502,7 +2668,7 @@ int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio)
+ ANA_PORT_QOS_CFG,
+ port);
+
+- return 0;
++ return ocelot_update_vlan_reclassify_rule(ocelot, port);
+ }
+ EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);
+
+diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
+index 73cdec5ca6a34..5734b86aed5b5 100644
+--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
++++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
+@@ -695,6 +695,7 @@ static void is1_entry_set(struct ocelot *ocelot, int ix,
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged);
++ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TPID, tag->tpid);
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_VID,
+ tag->vid.value, tag->vid.mask);
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP,
+diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
+index c601a4598b0da..eb19668a06db1 100644
+--- a/include/soc/mscc/ocelot_vcap.h
++++ b/include/soc/mscc/ocelot_vcap.h
+@@ -13,6 +13,7 @@
+ */
+ #define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream) ((upstream) << 16 | (port))
+ #define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port)
++#define OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port) ((ocelot)->num_phys_ports + (port))
+ #define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port)
+ #define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port))
+ #define OCELOT_VCAP_IS2_MRP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2)
+@@ -499,6 +500,7 @@ struct ocelot_vcap_key_vlan {
+ struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */
+ enum ocelot_vcap_bit dei; /* DEI */
+ enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */
++ enum ocelot_vcap_bit tpid;
+ };
+
+ struct ocelot_vcap_key_etype {
+--
+2.39.5
+
--- /dev/null
+From 06fad7270e66036d764fe7db9aebb6265ebcc626 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 15:30:42 +0200
+Subject: net: vertexcom: mse102x: Add range check for CMD_RTS
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit d4dda902dac194e3231a1ed0f76c6c3b6340ba8a ]
+
+Since there is no protection in the SPI protocol against electrical
+interferences, the driver shouldn't blindly trust the length payload
+of CMD_RTS. So introduce a bounds check for incoming frames.
+
+Fixes: 2f207cbf0dd4 ("net: vertexcom: Add MSE102x SPI support")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250430133043.7722-4-wahrenst@gmx.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/vertexcom/mse102x.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
+index 55f34d9d111c4..2b1aac72601d0 100644
+--- a/drivers/net/ethernet/vertexcom/mse102x.c
++++ b/drivers/net/ethernet/vertexcom/mse102x.c
+@@ -6,6 +6,7 @@
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
++#include <linux/if_vlan.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -337,8 +338,9 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ }
+
+ rxlen = cmd_resp & LEN_MASK;
+- if (!rxlen) {
+- net_dbg_ratelimited("%s: No frame length defined\n", __func__);
++ if (rxlen < ETH_ZLEN || rxlen > VLAN_ETH_FRAME_LEN) {
++ net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__,
++ rxlen);
+ mse->stats.invalid_len++;
+ return;
+ }
+--
+2.39.5
+
--- /dev/null
+From ee7ef8b5b00bd813444aa050980719a6f714d67e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 15:30:41 +0200
+Subject: net: vertexcom: mse102x: Fix LEN_MASK
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit 74987089ec678b4018dba0a609e9f4bf6ef7f4ad ]
+
+The LEN_MASK for CMD_RTS doesn't cover the whole parameter mask.
+The Bit 11 is reserved, so adjust LEN_MASK accordingly.
+
+Fixes: 2f207cbf0dd4 ("net: vertexcom: Add MSE102x SPI support")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250430133043.7722-3-wahrenst@gmx.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/vertexcom/mse102x.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
+index 45f4d2cb5b31a..55f34d9d111c4 100644
+--- a/drivers/net/ethernet/vertexcom/mse102x.c
++++ b/drivers/net/ethernet/vertexcom/mse102x.c
+@@ -33,7 +33,7 @@
+ #define CMD_CTR (0x2 << CMD_SHIFT)
+
+ #define CMD_MASK GENMASK(15, CMD_SHIFT)
+-#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0)
++#define LEN_MASK GENMASK(CMD_SHIFT - 2, 0)
+
+ #define DET_CMD_LEN 4
+ #define DET_SOF_LEN 2
+--
+2.39.5
+
--- /dev/null
+From badaae404f8629dc4e6a72dd13cd169561357b4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 15:30:40 +0200
+Subject: net: vertexcom: mse102x: Fix possible stuck of SPI interrupt
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit 55f362885951b2d00fd7fbb02ef0227deea572c2 ]
+
+The MSE102x doesn't provide any SPI commands for interrupt handling.
+So in case the interrupt fired before the driver requests the IRQ,
+the interrupt will never fire again. In order to fix this always poll
+for pending packets after opening the interface.
+
+Fixes: 2f207cbf0dd4 ("net: vertexcom: Add MSE102x SPI support")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250430133043.7722-2-wahrenst@gmx.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/vertexcom/mse102x.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
+index 8f67c39f479ee..45f4d2cb5b31a 100644
+--- a/drivers/net/ethernet/vertexcom/mse102x.c
++++ b/drivers/net/ethernet/vertexcom/mse102x.c
+@@ -509,6 +509,7 @@ static irqreturn_t mse102x_irq(int irq, void *_mse)
+ static int mse102x_net_open(struct net_device *ndev)
+ {
+ struct mse102x_net *mse = netdev_priv(ndev);
++ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ int ret;
+
+ ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
+@@ -524,6 +525,13 @@ static int mse102x_net_open(struct net_device *ndev)
+
+ netif_carrier_on(ndev);
+
++ /* The SPI interrupt can stuck in case of pending packet(s).
++ * So poll for possible packet(s) to re-arm the interrupt.
++ */
++ mutex_lock(&mses->lock);
++ mse102x_rx_pkt_spi(mse);
++ mutex_unlock(&mses->lock);
++
+ netif_dbg(mse, ifup, ndev, "network device up\n");
+
+ return 0;
+--
+2.39.5
+
--- /dev/null
+From 263c828b01ead6407c366f2e72d148aaac7d05d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 15:30:43 +0200
+Subject: net: vertexcom: mse102x: Fix RX error handling
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit ee512922ddd7d64afe2b28830a88f19063217649 ]
+
+In case the CMD_RTS got corrupted by interferences, the MSE102x
+doesn't allow a retransmission of the command. Instead the Ethernet
+frame must be shifted out of the SPI FIFO. Since the actual length is
+unknown, assume the maximum possible value.
+
+Fixes: 2f207cbf0dd4 ("net: vertexcom: Add MSE102x SPI support")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250430133043.7722-5-wahrenst@gmx.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/vertexcom/mse102x.c | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
+index 2b1aac72601d0..060a566bc6aae 100644
+--- a/drivers/net/ethernet/vertexcom/mse102x.c
++++ b/drivers/net/ethernet/vertexcom/mse102x.c
+@@ -263,7 +263,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
+ }
+
+ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
+- unsigned int frame_len)
++ unsigned int frame_len, bool drop)
+ {
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ struct spi_transfer *xfer = &mses->spi_xfer;
+@@ -281,6 +281,9 @@ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
+ netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ __func__, ret);
+ mse->stats.xfer_err++;
++ } else if (drop) {
++ netdev_dbg(mse->ndev, "%s: Drop frame\n", __func__);
++ ret = -EINVAL;
+ } else if (*sof != cpu_to_be16(DET_SOF)) {
+ netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
+ __func__, *sof);
+@@ -308,6 +311,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ struct sk_buff *skb;
+ unsigned int rxalign;
+ unsigned int rxlen;
++ bool drop = false;
+ __be16 rx = 0;
+ u16 cmd_resp;
+ u8 *rxpkt;
+@@ -330,7 +334,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, cmd_resp);
+ mse->stats.invalid_rts++;
+- return;
++ drop = true;
++ goto drop;
+ }
+
+ net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
+@@ -342,9 +347,16 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__,
+ rxlen);
+ mse->stats.invalid_len++;
+- return;
++ drop = true;
+ }
+
++ /* In case of a invalid CMD_RTS, the frame must be consumed anyway.
++ * So assume the maximum possible frame length.
++ */
++drop:
++ if (drop)
++ rxlen = VLAN_ETH_FRAME_LEN;
++
+ rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
+ skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
+ if (!skb)
+@@ -355,7 +367,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ * They are copied, but ignored.
+ */
+ rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
+- if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
++ if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) {
+ mse->ndev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ return;
+--
+2.39.5
+
--- /dev/null
+From ea18d753676c5ed722389d5741e31ec144eb3549 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 19:07:05 -0300
+Subject: net_sched: drr: Fix double list add in class with netem as child
+ qdisc
+
+From: Victor Nogueira <victor@mojatatu.com>
+
+[ Upstream commit f99a3fbf023e20b626be4b0f042463d598050c9a ]
+
+As described in Gerrard's report [1], there are use cases where a netem
+child qdisc will make the parent qdisc's enqueue callback reentrant.
+In the case of drr, there won't be a UAF, but the code will add the same
+classifier to the list twice, which will cause memory corruption.
+
+In addition to checking for qlen being zero, this patch checks whether the
+class was already added to the active_list (cl_is_active) before adding
+to the list to cover for the reentrant case.
+
+[1] https://lore.kernel.org/netdev/CAHcdcOm+03OD2j6R0=YHKqmy=VgJ8xEOKuP6c7mSgnp-TEJJbw@mail.gmail.com/
+
+Fixes: 37d9cf1a3ce3 ("sched: Fix detection of empty queues in child qdiscs")
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Link: https://patch.msgid.link/20250425220710.3964791-2-victor@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_drr.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
+index e35a4e90f4e6c..b35d6086a972f 100644
+--- a/net/sched/sch_drr.c
++++ b/net/sched/sch_drr.c
+@@ -36,6 +36,11 @@ struct drr_sched {
+ struct Qdisc_class_hash clhash;
+ };
+
++static bool cl_is_active(struct drr_class *cl)
++{
++ return !list_empty(&cl->alist);
++}
++
+ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
+ {
+ struct drr_sched *q = qdisc_priv(sch);
+@@ -335,7 +340,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct drr_sched *q = qdisc_priv(sch);
+ struct drr_class *cl;
+ int err = 0;
+- bool first;
+
+ cl = drr_classify(skb, sch, &err);
+ if (cl == NULL) {
+@@ -345,7 +349,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ return err;
+ }
+
+- first = !cl->qdisc->q.qlen;
+ err = qdisc_enqueue(skb, cl->qdisc, to_free);
+ if (unlikely(err != NET_XMIT_SUCCESS)) {
+ if (net_xmit_drop_count(err)) {
+@@ -355,7 +358,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ return err;
+ }
+
+- if (first) {
++ if (!cl_is_active(cl)) {
+ list_add_tail(&cl->alist, &q->active);
+ cl->deficit = cl->quantum;
+ }
+--
+2.39.5
+
--- /dev/null
+From a954d3a9f2ad8d79593f1f0d8c7824928be74fb2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 19:07:07 -0300
+Subject: net_sched: ets: Fix double list add in class with netem as child
+ qdisc
+
+From: Victor Nogueira <victor@mojatatu.com>
+
+[ Upstream commit 1a6d0c00fa07972384b0c308c72db091d49988b6 ]
+
+As described in Gerrard's report [1], there are use cases where a netem
+child qdisc will make the parent qdisc's enqueue callback reentrant.
+In the case of ets, there won't be a UAF, but the code will add the same
+classifier to the list twice, which will cause memory corruption.
+
+In addition to checking for qlen being zero, this patch checks whether
+the class was already added to the active_list (cl_is_active) before
+doing the addition to cater for the reentrant case.
+
+[1] https://lore.kernel.org/netdev/CAHcdcOm+03OD2j6R0=YHKqmy=VgJ8xEOKuP6c7mSgnp-TEJJbw@mail.gmail.com/
+
+Fixes: 37d9cf1a3ce3 ("sched: Fix detection of empty queues in child qdiscs")
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Link: https://patch.msgid.link/20250425220710.3964791-4-victor@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_ets.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
+index 9fd70462b41d5..18269f86d21ef 100644
+--- a/net/sched/sch_ets.c
++++ b/net/sched/sch_ets.c
+@@ -74,6 +74,11 @@ static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = {
+ [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
+ };
+
++static bool cl_is_active(struct ets_class *cl)
++{
++ return !list_empty(&cl->alist);
++}
++
+ static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr,
+ unsigned int *quantum,
+ struct netlink_ext_ack *extack)
+@@ -416,7 +421,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct ets_sched *q = qdisc_priv(sch);
+ struct ets_class *cl;
+ int err = 0;
+- bool first;
+
+ cl = ets_classify(skb, sch, &err);
+ if (!cl) {
+@@ -426,7 +430,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ return err;
+ }
+
+- first = !cl->qdisc->q.qlen;
+ err = qdisc_enqueue(skb, cl->qdisc, to_free);
+ if (unlikely(err != NET_XMIT_SUCCESS)) {
+ if (net_xmit_drop_count(err)) {
+@@ -436,7 +439,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ return err;
+ }
+
+- if (first && !ets_class_is_strict(q, cl)) {
++ if (!cl_is_active(cl) && !ets_class_is_strict(q, cl)) {
+ list_add_tail(&cl->alist, &q->active);
+ cl->deficit = cl->quantum;
+ }
+--
+2.39.5
+
--- /dev/null
+From 2df5d4c3be623477655180aa893eec18e0476512 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 19:07:06 -0300
+Subject: net_sched: hfsc: Fix a UAF vulnerability in class with netem as child
+ qdisc
+
+From: Victor Nogueira <victor@mojatatu.com>
+
+[ Upstream commit 141d34391abbb315d68556b7c67ad97885407547 ]
+
+As described in Gerrard's report [1], we have a UAF case when an hfsc class
+has a netem child qdisc. The crux of the issue is that hfsc is assuming
+that checking for cl->qdisc->q.qlen == 0 guarantees that it hasn't inserted
+the class in the vttree or eltree (which is not true for the netem
+duplicate case).
+
+This patch checks the n_active class variable to make sure that the code
+won't insert the class in the vttree or eltree twice, catering for the
+reentrant case.
+
+[1] https://lore.kernel.org/netdev/CAHcdcOm+03OD2j6R0=YHKqmy=VgJ8xEOKuP6c7mSgnp-TEJJbw@mail.gmail.com/
+
+Fixes: 37d9cf1a3ce3 ("sched: Fix detection of empty queues in child qdiscs")
+Reported-by: Gerrard Tai <gerrard.tai@starlabs.sg>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Link: https://patch.msgid.link/20250425220710.3964791-3-victor@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_hfsc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index dbed490aafd3d..4b9f323845b91 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -1564,7 +1564,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
+ return err;
+ }
+
+- if (first) {
++ if (first && !cl->cl_nactive) {
+ if (cl->cl_flags & HFSC_RSC)
+ init_ed(cl, len);
+ if (cl->cl_flags & HFSC_FSC)
+--
+2.39.5
+
--- /dev/null
+From d8e8d72805aa5a8e8e890b5f619c10d8d2c1d540 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 19:07:08 -0300
+Subject: net_sched: qfq: Fix double list add in class with netem as child
+ qdisc
+
+From: Victor Nogueira <victor@mojatatu.com>
+
+[ Upstream commit f139f37dcdf34b67f5bf92bc8e0f7f6b3ac63aa4 ]
+
+As described in Gerrard's report [1], there are use cases where a netem
+child qdisc will make the parent qdisc's enqueue callback reentrant.
+In the case of qfq, there won't be a UAF, but the code will add the same
+classifier to the list twice, which will cause memory corruption.
+
+This patch checks whether the class was already added to the agg->active
+list (cl_is_active) before doing the addition to cater for the reentrant
+case.
+
+[1] https://lore.kernel.org/netdev/CAHcdcOm+03OD2j6R0=YHKqmy=VgJ8xEOKuP6c7mSgnp-TEJJbw@mail.gmail.com/
+
+Fixes: 37d9cf1a3ce3 ("sched: Fix detection of empty queues in child qdiscs")
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Link: https://patch.msgid.link/20250425220710.3964791-5-victor@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_qfq.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index ed01634af82c2..e6743e17408b2 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -204,6 +204,11 @@ struct qfq_sched {
+ */
+ enum update_reason {enqueue, requeue};
+
++static bool cl_is_active(struct qfq_class *cl)
++{
++ return !list_empty(&cl->alist);
++}
++
+ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
+ {
+ struct qfq_sched *q = qdisc_priv(sch);
+@@ -1216,7 +1221,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct qfq_class *cl;
+ struct qfq_aggregate *agg;
+ int err = 0;
+- bool first;
+
+ cl = qfq_classify(skb, sch, &err);
+ if (cl == NULL) {
+@@ -1238,7 +1242,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ }
+
+ gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+- first = !cl->qdisc->q.qlen;
+ err = qdisc_enqueue(skb, cl->qdisc, to_free);
+ if (unlikely(err != NET_XMIT_SUCCESS)) {
+ pr_debug("qfq_enqueue: enqueue failed %d\n", err);
+@@ -1254,8 +1257,8 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ ++sch->q.qlen;
+
+ agg = cl->agg;
+- /* if the queue was not empty, then done here */
+- if (!first) {
++ /* if the class is active, then done here */
++ if (cl_is_active(cl)) {
+ if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
+ list_first_entry(&agg->active, struct qfq_class, alist)
+ == cl && cl->deficit < len)
+--
+2.39.5
+
--- /dev/null
+From 8d5b4ca2155f9db7f13ec2422e3eaf6c45d33869 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 10:42:01 -0600
+Subject: nvme-tcp: fix premature queue removal and I/O failover
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michael Liang <mliang@purestorage.com>
+
+[ Upstream commit 77e40bbce93059658aee02786a32c5c98a240a8a ]
+
+This patch addresses a data corruption issue observed in nvme-tcp during
+testing.
+
+In an NVMe native multipath setup, when an I/O timeout occurs, all
+inflight I/Os are canceled almost immediately after the kernel socket is
+shut down. These canceled I/Os are reported as host path errors,
+triggering a failover that succeeds on a different path.
+
+However, at this point, the original I/O may still be outstanding in the
+host's network transmission path (e.g., the NIC’s TX queue). From the
+user-space app's perspective, the buffer associated with the I/O is
+considered completed since they're acked on the different path and may
+be reused for new I/O requests.
+
+Because nvme-tcp enables zero-copy by default in the transmission path,
+this can lead to corrupted data being sent to the original target,
+ultimately causing data corruption.
+
+We can reproduce this data corruption by injecting delay on one path and
+triggering i/o timeout.
+
+To prevent this issue, this change ensures that all inflight
+transmissions are fully completed from host's perspective before
+returning from queue stop. To handle concurrent I/O timeout from multiple
+namespaces under the same controller, always wait in queue stop
+regardless of queue's state.
+
+This aligns with the behavior of queue stopping in other NVMe fabric
+transports.
+
+Fixes: 3f2304f8c6d6 ("nvme-tcp: add NVMe over TCP host driver")
+Signed-off-by: Michael Liang <mliang@purestorage.com>
+Reviewed-by: Mohamed Khalfella <mkhalfella@purestorage.com>
+Reviewed-by: Randy Jennings <randyj@purestorage.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/tcp.c | 31 +++++++++++++++++++++++++++++--
+ 1 file changed, 29 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 6dd19322c7f8e..4e1b91c0416b9 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1686,7 +1686,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
+ cancel_work_sync(&queue->io_work);
+ }
+
+-static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
++static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid)
+ {
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+@@ -1700,6 +1700,31 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+ mutex_unlock(&queue->queue_lock);
+ }
+
++static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid)
++{
++ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
++ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
++ int timeout = 100;
++
++ while (timeout > 0) {
++ if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) ||
++ !sk_wmem_alloc_get(queue->sock->sk))
++ return;
++ msleep(2);
++ timeout -= 2;
++ }
++ dev_warn(nctrl->device,
++ "qid %d: timeout draining sock wmem allocation expired\n",
++ qid);
++}
++
++static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
++{
++ nvme_tcp_stop_queue_nowait(nctrl, qid);
++ nvme_tcp_wait_queue(nctrl, qid);
++}
++
++
+ static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
+ {
+ write_lock_bh(&queue->sock->sk->sk_callback_lock);
+@@ -1766,7 +1791,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+- nvme_tcp_stop_queue(ctrl, i);
++ nvme_tcp_stop_queue_nowait(ctrl, i);
++ for (i = 1; i < ctrl->queue_count; i++)
++ nvme_tcp_wait_queue(ctrl, i);
+ }
+
+ static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
+--
+2.39.5
+
--- /dev/null
+From 2a5b26ba73b0c5bed73cc85377bb669c8c531b2b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Dec 2023 20:57:54 +0200
+Subject: pinctrl: core: Add a convenient define PINCTRL_GROUP_DESC()
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 383da0c7f25428de5ad09dc2cfed7cd43c4fb6ba ]
+
+Add PINCTRL_GROUP_DESC() macro for inline use.
+
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20231211190321.307330-2-andriy.shevchenko@linux.intel.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Stable-dep-of: e64c0ff0d5d8 ("pinctrl: imx: Return NULL if no group is matched and found")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/core.c | 5 +----
+ drivers/pinctrl/core.h | 9 +++++++++
+ 2 files changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 3b6051d632181..4694fa15176e4 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -651,10 +651,7 @@ int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
+ if (!group)
+ return -ENOMEM;
+
+- group->name = name;
+- group->pins = pins;
+- group->num_pins = num_pins;
+- group->data = data;
++ *group = PINCTRL_GROUP_DESC(name, pins, num_pins, data);
+
+ radix_tree_insert(&pctldev->pin_group_tree, selector, group);
+
+diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
+index 840103c40c14a..dd6057eeb403b 100644
+--- a/drivers/pinctrl/core.h
++++ b/drivers/pinctrl/core.h
+@@ -198,6 +198,15 @@ struct group_desc {
+ void *data;
+ };
+
++/* Convenience macro to define a generic pin group descriptor */
++#define PINCTRL_GROUP_DESC(_name, _pins, _num_pins, _data) \
++(struct group_desc) { \
++ .name = _name, \
++ .pins = _pins, \
++ .num_pins = _num_pins, \
++ .data = _data, \
++}
++
+ int pinctrl_generic_get_group_count(struct pinctrl_dev *pctldev);
+
+ const char *pinctrl_generic_get_group_name(struct pinctrl_dev *pctldev,
+--
+2.39.5
+
--- /dev/null
+From e3cd7ed17e127b8344191cd0130446efa3272c0b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Dec 2023 20:57:57 +0200
+Subject: pinctrl: core: Embed struct pingroup into struct group_desc
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 85174ad7c30fca29a354221e01fad82c0d00d644 ]
+
+struct group_desc is a particular version of the struct pingroup
+with associated opaque data. Start switching pin control core and
+drivers to use it explicitly.
+
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20231211190321.307330-5-andriy.shevchenko@linux.intel.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Stable-dep-of: e64c0ff0d5d8 ("pinctrl: imx: Return NULL if no group is matched and found")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/core.c | 15 ++++++++++++---
+ drivers/pinctrl/core.h | 5 +++++
+ 2 files changed, 17 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 4694fa15176e4..53ec0d1f05e7e 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -550,7 +550,10 @@ const char *pinctrl_generic_get_group_name(struct pinctrl_dev *pctldev,
+ if (!group)
+ return NULL;
+
+- return group->name;
++ if (group->name)
++ return group->name;
++
++ return group->grp.name;
+ }
+ EXPORT_SYMBOL_GPL(pinctrl_generic_get_group_name);
+
+@@ -576,8 +579,14 @@ int pinctrl_generic_get_group_pins(struct pinctrl_dev *pctldev,
+ return -EINVAL;
+ }
+
+- *pins = group->pins;
+- *num_pins = group->num_pins;
++ if (group->pins) {
++ *pins = group->pins;
++ *num_pins = group->num_pins;
++ return 0;
++ }
++
++ *pins = group->grp.pins;
++ *num_pins = group->grp.npins;
+
+ return 0;
+ }
+diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
+index dd6057eeb403b..79d545d53a769 100644
+--- a/drivers/pinctrl/core.h
++++ b/drivers/pinctrl/core.h
+@@ -184,14 +184,18 @@ struct pinctrl_maps {
+
+ #ifdef CONFIG_GENERIC_PINCTRL_GROUPS
+
++#include <linux/pinctrl/pinctrl.h>
++
+ /**
+ * struct group_desc - generic pin group descriptor
++ * @grp: generic data of the pin group (name and pins)
+ * @name: name of the pin group
+ * @pins: array of pins that belong to the group
+ * @num_pins: number of pins in the group
+ * @data: pin controller driver specific data
+ */
+ struct group_desc {
++ struct pingroup grp;
+ const char *name;
+ int *pins;
+ int num_pins;
+@@ -201,6 +205,7 @@ struct group_desc {
+ /* Convenience macro to define a generic pin group descriptor */
+ #define PINCTRL_GROUP_DESC(_name, _pins, _num_pins, _data) \
+ (struct group_desc) { \
++ .grp = PINCTRL_PINGROUP(_name, _pins, _num_pins), \
+ .name = _name, \
+ .pins = _pins, \
+ .num_pins = _num_pins, \
+--
+2.39.5
+
--- /dev/null
+From 626353c9eb8092eb85e7b8a4433885f4bf59564f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Dec 2023 20:58:00 +0200
+Subject: pinctrl: imx: Convert to use grp member
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 390270f25b414fd54b307cd68851b36b52f952b5 ]
+
+Convert drivers to use grp member embedded in struct group_desc,
+because other members will be removed to avoid duplication and
+desynchronisation of the generic pin group description.
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20231211190321.307330-8-andriy.shevchenko@linux.intel.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Stable-dep-of: e64c0ff0d5d8 ("pinctrl: imx: Return NULL if no group is matched and found")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/freescale/pinctrl-imx.c | 31 +++++++++++--------------
+ 1 file changed, 14 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
+index ba02f4e9ca748..c163ed1badd2e 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
+@@ -40,7 +40,7 @@ static inline const struct group_desc *imx_pinctrl_find_group_by_name(
+
+ for (i = 0; i < pctldev->num_groups; i++) {
+ grp = pinctrl_generic_get_group(pctldev, i);
+- if (grp && !strcmp(grp->name, name))
++ if (grp && !strcmp(grp->grp.name, name))
+ break;
+ }
+
+@@ -77,9 +77,9 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
+ }
+
+ if (info->flags & IMX_USE_SCU) {
+- map_num += grp->num_pins;
++ map_num += grp->grp.npins;
+ } else {
+- for (i = 0; i < grp->num_pins; i++) {
++ for (i = 0; i < grp->grp.npins; i++) {
+ pin = &((struct imx_pin *)(grp->data))[i];
+ if (!(pin->conf.mmio.config & IMX_NO_PAD_CTL))
+ map_num++;
+@@ -107,7 +107,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
+
+ /* create config map */
+ new_map++;
+- for (i = j = 0; i < grp->num_pins; i++) {
++ for (i = j = 0; i < grp->grp.npins; i++) {
+ pin = &((struct imx_pin *)(grp->data))[i];
+
+ /*
+@@ -261,10 +261,10 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
+ if (!func)
+ return -EINVAL;
+
+- npins = grp->num_pins;
++ npins = grp->grp.npins;
+
+ dev_dbg(ipctl->dev, "enable function %s group %s\n",
+- func->name, grp->name);
++ func->name, grp->grp.name);
+
+ for (i = 0; i < npins; i++) {
+ /*
+@@ -477,7 +477,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
+ if (!grp)
+ return;
+
+- for (i = 0; i < grp->num_pins; i++) {
++ for (i = 0; i < grp->grp.npins; i++) {
+ struct imx_pin *pin = &((struct imx_pin *)(grp->data))[i];
+
+ name = pin_get_name(pctldev, pin->pin);
+@@ -590,7 +590,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
+ pin_size -= 4;
+
+ /* Initialise group */
+- grp->name = np->name;
++ grp->grp.name = np->name;
+
+ /*
+ * the binding format is fsl,pins = <PIN_FUNC_ID CONFIG ...>,
+@@ -618,19 +618,17 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
+ return -EINVAL;
+ }
+
+- grp->num_pins = size / pin_size;
+- grp->data = devm_kcalloc(ipctl->dev,
+- grp->num_pins, sizeof(struct imx_pin),
+- GFP_KERNEL);
++ grp->grp.npins = size / pin_size;
++ grp->data = devm_kcalloc(ipctl->dev, grp->grp.npins, sizeof(*pin), GFP_KERNEL);
+ if (!grp->data)
+ return -ENOMEM;
+
+- pins = devm_kcalloc(ipctl->dev, grp->num_pins, sizeof(*pins), GFP_KERNEL);
++ pins = devm_kcalloc(ipctl->dev, grp->grp.npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+- grp->pins = pins;
++ grp->grp.pins = pins;
+
+- for (i = 0; i < grp->num_pins; i++) {
++ for (i = 0; i < grp->grp.npins; i++) {
+ pin = &((struct imx_pin *)(grp->data))[i];
+ if (info->flags & IMX_USE_SCU)
+ info->imx_pinctrl_parse_pin(ipctl, &pins[i], pin, &list);
+@@ -677,8 +675,7 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
+
+ i = 0;
+ for_each_child_of_node(np, child) {
+- grp = devm_kzalloc(ipctl->dev, sizeof(struct group_desc),
+- GFP_KERNEL);
++ grp = devm_kzalloc(ipctl->dev, sizeof(*grp), GFP_KERNEL);
+ if (!grp) {
+ of_node_put(child);
+ return -ENOMEM;
+--
+2.39.5
+
--- /dev/null
+From 0179fe18fc106ed8f11a4aeb36a2e5893cd5d3d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Mar 2025 11:16:00 +0800
+Subject: pinctrl: imx: Return NULL if no group is matched and found
+
+From: Hui Wang <hui.wang@canonical.com>
+
+[ Upstream commit e64c0ff0d5d85791fbcd126ee558100a06a24a97 ]
+
+Currently if no group is matched and found, this function will return
+the last grp to the caller, this is not expected, it is supposed to
+return NULL in this case.
+
+Fixes: e566fc11ea76 ("pinctrl: imx: use generic pinctrl helpers for managing groups")
+Signed-off-by: Hui Wang <hui.wang@canonical.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://lore.kernel.org/20250327031600.99723-1-hui.wang@canonical.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/freescale/pinctrl-imx.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
+index c163ed1badd2e..54ed21d9b02a8 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
+@@ -35,16 +35,16 @@ static inline const struct group_desc *imx_pinctrl_find_group_by_name(
+ struct pinctrl_dev *pctldev,
+ const char *name)
+ {
+- const struct group_desc *grp = NULL;
++ const struct group_desc *grp;
+ int i;
+
+ for (i = 0; i < pctldev->num_groups; i++) {
+ grp = pinctrl_generic_get_group(pctldev, i);
+ if (grp && !strcmp(grp->grp.name, name))
+- break;
++ return grp;
+ }
+
+- return grp;
++ return NULL;
+ }
+
+ static void imx_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+--
+2.39.5
+
--- /dev/null
+From 6378135c251192e3893bf2dff7e3871d0561b357 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Nov 2023 18:06:28 +0200
+Subject: pinctrl: imx: Use temporary variable to hold pins
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 26ea8229e7adb508133b078790990486c1657cc7 ]
+
+The pins are allocated from the heap, but in order to pass
+them as constant object, we need to use non-constant pointer.
+Achieve this by using a temporary variable.
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20231129161459.1002323-6-andriy.shevchenko@linux.intel.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Stable-dep-of: e64c0ff0d5d8 ("pinctrl: imx: Return NULL if no group is matched and found")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/freescale/pinctrl-imx.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
+index 3a7d2de10b13c..ba02f4e9ca748 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
+@@ -572,6 +572,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
+ {
+ const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pin *pin;
++ unsigned int *pins;
+ int size, pin_size;
+ const __be32 *list;
+ int i;
+@@ -621,20 +622,20 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
+ grp->data = devm_kcalloc(ipctl->dev,
+ grp->num_pins, sizeof(struct imx_pin),
+ GFP_KERNEL);
+- grp->pins = devm_kcalloc(ipctl->dev,
+- grp->num_pins, sizeof(unsigned int),
+- GFP_KERNEL);
+- if (!grp->pins || !grp->data)
++ if (!grp->data)
++ return -ENOMEM;
++
++ pins = devm_kcalloc(ipctl->dev, grp->num_pins, sizeof(*pins), GFP_KERNEL);
++ if (!pins)
+ return -ENOMEM;
++ grp->pins = pins;
+
+ for (i = 0; i < grp->num_pins; i++) {
+ pin = &((struct imx_pin *)(grp->data))[i];
+ if (info->flags & IMX_USE_SCU)
+- info->imx_pinctrl_parse_pin(ipctl, &grp->pins[i],
+- pin, &list);
++ info->imx_pinctrl_parse_pin(ipctl, &pins[i], pin, &list);
+ else
+- imx_pinctrl_parse_pin_mmio(ipctl, &grp->pins[i],
+- pin, &list, np);
++ imx_pinctrl_parse_pin_mmio(ipctl, &pins[i], pin, &list, np);
+ }
+
+ return 0;
+--
+2.39.5
+
--- /dev/null
+From af0780f99d78d8e94ee6e86410e6097d45ec1d85 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Dec 2023 20:57:55 +0200
+Subject: pinctrl: mediatek: Use C99 initializers in PINCTRL_PIN_GROUP()
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit bb5eace1562fcef3c7ac9d0bd3e01af1187e46d0 ]
+
+For the better flexibility use C99 initializers in PINCTRL_PIN_GROUP().
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20231211190321.307330-3-andriy.shevchenko@linux.intel.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Stable-dep-of: e64c0ff0d5d8 ("pinctrl: imx: Return NULL if no group is matched and found")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/mediatek/pinctrl-moore.h | 12 ++++++------
+ drivers/pinctrl/mediatek/pinctrl-paris.h | 12 ++++++------
+ 2 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.h b/drivers/pinctrl/mediatek/pinctrl-moore.h
+index e1b4b82b9d3db..36ee0021cf7ed 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-moore.h
++++ b/drivers/pinctrl/mediatek/pinctrl-moore.h
+@@ -37,12 +37,12 @@
+ .funcs = NULL, \
+ }
+
+-#define PINCTRL_PIN_GROUP(name, id) \
+- { \
+- name, \
+- id##_pins, \
+- ARRAY_SIZE(id##_pins), \
+- id##_funcs, \
++#define PINCTRL_PIN_GROUP(_name_, id) \
++ { \
++ .name = _name_, \
++ .pins = id##_pins, \
++ .num_pins = ARRAY_SIZE(id##_pins), \
++ .data = id##_funcs, \
+ }
+
+ int mtk_moore_pinctrl_probe(struct platform_device *pdev,
+diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.h b/drivers/pinctrl/mediatek/pinctrl-paris.h
+index 8762ac5993292..5c4e5b74e43e2 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-paris.h
++++ b/drivers/pinctrl/mediatek/pinctrl-paris.h
+@@ -49,12 +49,12 @@
+ __VA_ARGS__, { } }, \
+ }
+
+-#define PINCTRL_PIN_GROUP(name, id) \
+- { \
+- name, \
+- id##_pins, \
+- ARRAY_SIZE(id##_pins), \
+- id##_funcs, \
++#define PINCTRL_PIN_GROUP(_name_, id) \
++ { \
++ .name = _name_, \
++ .pins = id##_pins, \
++ .num_pins = ARRAY_SIZE(id##_pins), \
++ .data = id##_funcs, \
+ }
+
+ int mtk_paris_pinctrl_probe(struct platform_device *pdev);
+--
+2.39.5
+
xfs-restrict-when-we-try-to-align-cow-fork-delalloc-to-cowextsz-hints.patch
kvm-x86-load-dr6-with-guest-value-only-before-entering-.vcpu_run-loop.patch
dm-bufio-don-t-schedule-in-atomic-context.patch
+asoc-soc-pcm-fix-hw_params-and-dapm-widget-sequence.patch
+pinctrl-imx-use-temporary-variable-to-hold-pins.patch
+pinctrl-core-add-a-convenient-define-pinctrl_group_d.patch
+pinctrl-mediatek-use-c99-initializers-in-pinctrl_pin.patch
+pinctrl-core-embed-struct-pingroup-into-struct-group.patch
+pinctrl-imx-convert-to-use-grp-member.patch
+pinctrl-imx-return-null-if-no-group-is-matched-and-f.patch
+wifi-plfxlc-remove-erroneous-assert-in-plfxlc_mac_re.patch
+vxlan-vnifilter-fix-unlocked-deletion-of-default-fdb.patch
+net-mlx5-e-switch-initialize-mac-address-for-default.patch
+net-mlx5-e-switch-fix-error-handling-for-enabling-ro.patch
+bluetooth-l2cap-copy-rx-timestamp-to-new-fragments.patch
+net-mscc-ocelot-treat-802.1ad-tagged-traffic-as-802..patch
+net-mscc-ocelot-delete-pvid-vlan-when-readding-it-as.patch
+net-ethernet-mtk-star-emac-fix-spinlock-recursion-is.patch
+net-ethernet-mtk-star-emac-rearm-interrupts-in-rx_po.patch
+net_sched-drr-fix-double-list-add-in-class-with-nete.patch
+net_sched-hfsc-fix-a-uaf-vulnerability-in-class-with.patch
+net_sched-ets-fix-double-list-add-in-class-with-nete.patch
+net_sched-qfq-fix-double-list-add-in-class-with-nete.patch
+ice-check-vf-vsi-pointer-value-in-ice_vc_add_fdir_fl.patch
+net-dlink-correct-endianness-handling-of-led_mode.patch
+net-dsa-felix-fix-broken-taprio-gate-states-after-cl.patch
+net-ipv6-fix-udpv6-gso-segmentation-with-nat.patch
+bnxt_en-fix-coredump-logic-to-free-allocated-buffer.patch
+bnxt_en-fix-out-of-bound-memcpy-during-ethtool-w.patch
+bnxt_en-fix-ethtool-d-byte-order-for-32-bit-values.patch
+nvme-tcp-fix-premature-queue-removal-and-i-o-failove.patch
+net-lan743x-fix-memleak-issue-when-gso-enabled.patch
+net-fec-err007885-workaround-for-conventional-tx.patch
+net-hns3-store-rx-vlan-tag-offload-state-for-vf.patch
+net-hns3-fix-an-interrupt-residual-problem.patch
+net-hns3-fixed-debugfs-tm_qset-size.patch
+net-hns3-defer-calling-ptp_clock_register.patch
+net-vertexcom-mse102x-fix-possible-stuck-of-spi-inte.patch
+net-vertexcom-mse102x-fix-len_mask.patch
+net-vertexcom-mse102x-add-range-check-for-cmd_rts.patch
+net-vertexcom-mse102x-fix-rx-error-handling.patch
--- /dev/null
+From 31dbc41ca81b066f36133d058c7f426ed179b3cd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Apr 2025 17:51:31 +0300
+Subject: vxlan: vnifilter: Fix unlocked deletion of default FDB entry
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit 087a9eb9e5978e3ba362e1163691e41097e8ca20 ]
+
+When a VNI is deleted from a VXLAN device in 'vnifilter' mode, the FDB
+entry associated with the default remote (assuming one was configured)
+is deleted without holding the hash lock. This is wrong and will result
+in a warning [1] being generated by the lockdep annotation that was
+added by commit ebe642067455 ("vxlan: Create wrappers for FDB lookup").
+
+Reproducer:
+
+ # ip link add vx0 up type vxlan dstport 4789 external vnifilter local 192.0.2.1
+ # bridge vni add vni 10010 remote 198.51.100.1 dev vx0
+ # bridge vni del vni 10010 dev vx0
+
+Fix by acquiring the hash lock before the deletion and releasing it
+afterwards. Blame the original commit that introduced the issue rather
+than the one that exposed it.
+
+[1]
+WARNING: CPU: 3 PID: 392 at drivers/net/vxlan/vxlan_core.c:417 vxlan_find_mac+0x17f/0x1a0
+[...]
+RIP: 0010:vxlan_find_mac+0x17f/0x1a0
+[...]
+Call Trace:
+ <TASK>
+ __vxlan_fdb_delete+0xbe/0x560
+ vxlan_vni_delete_group+0x2ba/0x940
+ vxlan_vni_del.isra.0+0x15f/0x580
+ vxlan_process_vni_filter+0x38b/0x7b0
+ vxlan_vnifilter_process+0x3bb/0x510
+ rtnetlink_rcv_msg+0x2f7/0xb70
+ netlink_rcv_skb+0x131/0x360
+ netlink_unicast+0x426/0x710
+ netlink_sendmsg+0x75a/0xc20
+ __sock_sendmsg+0xc1/0x150
+ ____sys_sendmsg+0x5aa/0x7b0
+ ___sys_sendmsg+0xfc/0x180
+ __sys_sendmsg+0x121/0x1b0
+ do_syscall_64+0xbb/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+Fixes: f9c4bb0b245c ("vxlan: vni filtering support on collect metadata device")
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20250423145131.513029-1-idosch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vxlan/vxlan_vnifilter.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
+index 1ffc00e270802..c6d4fae958ca8 100644
+--- a/drivers/net/vxlan/vxlan_vnifilter.c
++++ b/drivers/net/vxlan/vxlan_vnifilter.c
+@@ -627,7 +627,11 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
+ * default dst remote_ip previously added for this vni
+ */
+ if (!vxlan_addr_any(&vninode->remote_ip) ||
+- !vxlan_addr_any(&dst->remote_ip))
++ !vxlan_addr_any(&dst->remote_ip)) {
++ u32 hash_index = fdb_head_index(vxlan, all_zeros_mac,
++ vninode->vni);
++
++ spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ __vxlan_fdb_delete(vxlan, all_zeros_mac,
+ (vxlan_addr_any(&vninode->remote_ip) ?
+ dst->remote_ip : vninode->remote_ip),
+@@ -635,6 +639,8 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
+ vninode->vni, vninode->vni,
+ dst->remote_ifindex,
+ true);
++ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
++ }
+
+ if (vxlan->dev->flags & IFF_UP) {
+ if (vxlan_addr_multicast(&vninode->remote_ip) &&
+--
+2.39.5
+
--- /dev/null
+From b4bb6acc445d90c70d8b9cdb3b7d2b352c7e9ea2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Mar 2025 21:52:25 +0300
+Subject: wifi: plfxlc: Remove erroneous assert in plfxlc_mac_release
+
+From: Murad Masimov <m.masimov@mt-integration.ru>
+
+[ Upstream commit 0fb15ae3b0a9221be01715dac0335647c79f3362 ]
+
+plfxlc_mac_release() asserts that mac->lock is held. This assertion is
+incorrect, because even if it was possible, it would not be the valid
+behaviour. The function is used when probe fails or after the device is
+disconnected. In both cases mac->lock can not be held as the driver is
+not working with the device at the moment. All functions that use mac->lock
+unlock it just after it was held. There is also no need to hold mac->lock
+for plfxlc_mac_release() itself, as mac data is not affected, except for
+mac->flags, which is modified atomically.
+
+This bug leads to the following warning:
+================================================================
+WARNING: CPU: 0 PID: 127 at drivers/net/wireless/purelifi/plfxlc/mac.c:106 plfxlc_mac_release+0x7d/0xa0
+Modules linked in:
+CPU: 0 PID: 127 Comm: kworker/0:2 Not tainted 6.1.124-syzkaller #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/13/2024
+Workqueue: usb_hub_wq hub_event
+RIP: 0010:plfxlc_mac_release+0x7d/0xa0 drivers/net/wireless/purelifi/plfxlc/mac.c:106
+Call Trace:
+ <TASK>
+ probe+0x941/0xbd0 drivers/net/wireless/purelifi/plfxlc/usb.c:694
+ usb_probe_interface+0x5c0/0xaf0 drivers/usb/core/driver.c:396
+ really_probe+0x2ab/0xcb0 drivers/base/dd.c:639
+ __driver_probe_device+0x1a2/0x3d0 drivers/base/dd.c:785
+ driver_probe_device+0x50/0x420 drivers/base/dd.c:815
+ __device_attach_driver+0x2cf/0x510 drivers/base/dd.c:943
+ bus_for_each_drv+0x183/0x200 drivers/base/bus.c:429
+ __device_attach+0x359/0x570 drivers/base/dd.c:1015
+ bus_probe_device+0xba/0x1e0 drivers/base/bus.c:489
+ device_add+0xb48/0xfd0 drivers/base/core.c:3696
+ usb_set_configuration+0x19dd/0x2020 drivers/usb/core/message.c:2165
+ usb_generic_driver_probe+0x84/0x140 drivers/usb/core/generic.c:238
+ usb_probe_device+0x130/0x260 drivers/usb/core/driver.c:293
+ really_probe+0x2ab/0xcb0 drivers/base/dd.c:639
+ __driver_probe_device+0x1a2/0x3d0 drivers/base/dd.c:785
+ driver_probe_device+0x50/0x420 drivers/base/dd.c:815
+ __device_attach_driver+0x2cf/0x510 drivers/base/dd.c:943
+ bus_for_each_drv+0x183/0x200 drivers/base/bus.c:429
+ __device_attach+0x359/0x570 drivers/base/dd.c:1015
+ bus_probe_device+0xba/0x1e0 drivers/base/bus.c:489
+ device_add+0xb48/0xfd0 drivers/base/core.c:3696
+ usb_new_device+0xbdd/0x18f0 drivers/usb/core/hub.c:2620
+ hub_port_connect drivers/usb/core/hub.c:5477 [inline]
+ hub_port_connect_change drivers/usb/core/hub.c:5617 [inline]
+ port_event drivers/usb/core/hub.c:5773 [inline]
+ hub_event+0x2efe/0x5730 drivers/usb/core/hub.c:5855
+ process_one_work+0x8a9/0x11d0 kernel/workqueue.c:2292
+ worker_thread+0xa47/0x1200 kernel/workqueue.c:2439
+ kthread+0x28d/0x320 kernel/kthread.c:376
+ ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:295
+ </TASK>
+================================================================
+
+Found by Linux Verification Center (linuxtesting.org) with Syzkaller.
+
+Fixes: 68d57a07bfe5 ("wireless: add plfxlc driver for pureLiFi X, XL, XC devices")
+Reported-by: syzbot+7d4f142f6c288de8abfe@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=7d4f142f6c288de8abfe
+Signed-off-by: Murad Masimov <m.masimov@mt-integration.ru>
+Link: https://patch.msgid.link/20250321185226.71-2-m.masimov@mt-integration.ru
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/purelifi/plfxlc/mac.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
+index 87a4ff888ddd4..70d6f5244e5e4 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
++++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
+@@ -103,7 +103,6 @@ int plfxlc_mac_init_hw(struct ieee80211_hw *hw)
+ void plfxlc_mac_release(struct plfxlc_mac *mac)
+ {
+ plfxlc_chip_release(&mac->chip);
+- lockdep_assert_held(&mac->lock);
+ }
+
+ int plfxlc_op_start(struct ieee80211_hw *hw)
+--
+2.39.5
+