--- /dev/null
+From 570ecb73cbe9e7dfa77f42ba7646bcab7e661ced Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Apr 2025 22:51:03 +0300
+Subject: Bluetooth: L2CAP: copy RX timestamp to new fragments
+
+From: Pauli Virtanen <pav@iki.fi>
+
+[ Upstream commit 3908feb1bd7f319a10e18d84369a48163264cc7d ]
+
+Copy timestamp too when allocating new skb for received fragment.
+Fixes missing RX timestamps with fragmentation.
+
+Fixes: 4d7ea8ee90e4 ("Bluetooth: L2CAP: Fix handling fragmented length")
+Signed-off-by: Pauli Virtanen <pav@iki.fi>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/l2cap_core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index d34e161a30b37..a1411ee8f162f 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -8436,6 +8436,9 @@ static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
+ return -ENOMEM;
+ /* Init rx_len */
+ conn->rx_len = len;
++
++ skb_set_delivery_time(conn->rx_skb, skb->tstamp,
++ skb->tstamp_type);
+ }
+
+ /* Copy as much as the rx_skb can hold */
+--
+2.39.5
+
--- /dev/null
+From 81e37bbdc7ddaf4f5a07fa77c296f53516c1f687 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Apr 2025 15:59:01 -0700
+Subject: bnxt_en: Fix coredump logic to free allocated buffer
+
+From: Shruti Parab <shruti.parab@broadcom.com>
+
+[ Upstream commit ea9376cf68230e05492f22ca45d329f16e262c7b ]
+
+When handling HWRM_DBG_COREDUMP_LIST FW command in
+bnxt_hwrm_dbg_dma_data(), the allocated buffer info->dest_buf is
+not freed in the error path. In the normal path, info->dest_buf
+is assigned to coredump->data and it will eventually be freed after
+the coredump is collected.
+
+Free info->dest_buf immediately inside bnxt_hwrm_dbg_dma_data() in
+the error path.
+
+Fixes: c74751f4c392 ("bnxt_en: Return error if FW returns more data than dump length")
+Reported-by: Michael Chan <michael.chan@broadcom.com>
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Shruti Parab <shruti.parab@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+index 156f76bcea7eb..e0e7bfaf860b7 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+@@ -72,6 +72,11 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
+ memcpy(info->dest_buf + off, dma_buf, len);
+ } else {
+ rc = -ENOBUFS;
++ if (cmn_req->req_type ==
++ cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
++ kfree(info->dest_buf);
++ info->dest_buf = NULL;
++ }
+ break;
+ }
+ }
+--
+2.39.5
+
--- /dev/null
+From 8b2fc72473069f3a3903076ca94ff1c96001a103 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Apr 2025 15:59:03 -0700
+Subject: bnxt_en: Fix ethtool -d byte order for 32-bit values
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+[ Upstream commit 02e8be5a032cae0f4ca33c6053c44d83cf4acc93 ]
+
+For version 1 register dump that includes the PCIe stats, the existing
+code incorrectly assumes that all PCIe stats are 64-bit values. Fix it
+by using an array containing the starting and ending index of the 32-bit
+values. The loop in bnxt_get_regs() will use the array to do proper
+endian swap for the 32-bit values.
+
+Fixes: b5d600b027eb ("bnxt_en: Add support for 'ethtool -d'")
+Reviewed-by: Shruti Parab <shruti.parab@broadcom.com>
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Reviewed-by: Andy Gospodarek <andrew.gospodarek@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 38 ++++++++++++++++---
+ 1 file changed, 32 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 8ebc1c522a05b..ad307df8d97ba 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -1360,6 +1360,17 @@ static int bnxt_get_regs_len(struct net_device *dev)
+ return reg_len;
+ }
+
++#define BNXT_PCIE_32B_ENTRY(start, end) \
++ { offsetof(struct pcie_ctx_hw_stats, start), \
++ offsetof(struct pcie_ctx_hw_stats, end) }
++
++static const struct {
++ u16 start;
++ u16 end;
++} bnxt_pcie_32b_entries[] = {
++ BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
++};
++
+ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *_p)
+ {
+@@ -1391,12 +1402,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+- __le64 *src = (__le64 *)hw_pcie_stats;
+- u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
+- int i;
+-
+- for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
+- dst[i] = le64_to_cpu(src[i]);
++ u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
++ u8 *src = (u8 *)hw_pcie_stats;
++ int i, j;
++
++ for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) {
++ if (i >= bnxt_pcie_32b_entries[j].start &&
++ i <= bnxt_pcie_32b_entries[j].end) {
++ u32 *dst32 = (u32 *)(dst + i);
++
++ *dst32 = le32_to_cpu(*(__le32 *)(src + i));
++ i += 4;
++ if (i > bnxt_pcie_32b_entries[j].end &&
++ j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
++ j++;
++ } else {
++ u64 *dst64 = (u64 *)(dst + i);
++
++ *dst64 = le64_to_cpu(*(__le64 *)(src + i));
++ i += 8;
++ }
++ }
+ }
+ hwrm_req_drop(bp, req);
+ }
+--
+2.39.5
+
--- /dev/null
+From 82a1321321c21f8ff6ef0065cb8aff20679f3a31 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Apr 2025 15:59:02 -0700
+Subject: bnxt_en: Fix out-of-bound memcpy() during ethtool -w
+
+From: Shruti Parab <shruti.parab@broadcom.com>
+
+[ Upstream commit 6b87bd94f34370bbf1dfa59352bed8efab5bf419 ]
+
+When retrieving the FW coredump using ethtool, it can sometimes cause
+memory corruption:
+
+BUG: KFENCE: memory corruption in __bnxt_get_coredump+0x3ef/0x670 [bnxt_en]
+Corrupted memory at 0x000000008f0f30e8 [ ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ] (in kfence-#45):
+__bnxt_get_coredump+0x3ef/0x670 [bnxt_en]
+ethtool_get_dump_data+0xdc/0x1a0
+__dev_ethtool+0xa1e/0x1af0
+dev_ethtool+0xa8/0x170
+dev_ioctl+0x1b5/0x580
+sock_do_ioctl+0xab/0xf0
+sock_ioctl+0x1ce/0x2e0
+__x64_sys_ioctl+0x87/0xc0
+do_syscall_64+0x5c/0xf0
+entry_SYSCALL_64_after_hwframe+0x78/0x80
+
+...
+
+This happens when copying the coredump segment list in
+bnxt_hwrm_dbg_dma_data() with the HWRM_DBG_COREDUMP_LIST FW command.
+The info->dest_buf buffer is allocated based on the number of coredump
+segments returned by the FW. The segment list is then DMA'ed by
+the FW and the length of the DMA is returned by FW. The driver then
+copies this DMA'ed segment list to info->dest_buf.
+
+In some cases, this DMA length may exceed the info->dest_buf length
+and cause the above BUG condition. Fix it by capping the copy
+length to not exceed the length of info->dest_buf. The extra
+DMA data contains no useful information.
+
+This code path is shared for the HWRM_DBG_COREDUMP_LIST and the
+HWRM_DBG_COREDUMP_RETRIEVE FW commands. The buffering is different
+for these 2 FW commands. To simplify the logic, we need to move
+the line to adjust the buffer length for HWRM_DBG_COREDUMP_RETRIEVE
+up, so that the new check to cap the copy length will work for both
+commands.
+
+Fixes: c74751f4c392 ("bnxt_en: Return error if FW returns more data than dump length")
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Shruti Parab <shruti.parab@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/broadcom/bnxt/bnxt_coredump.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+index e0e7bfaf860b7..8716c924f3f50 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+@@ -66,10 +66,19 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
+ }
+ }
+
++ if (cmn_req->req_type ==
++ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
++ info->dest_buf_size += len;
++
+ if (info->dest_buf) {
+ if ((info->seg_start + off + len) <=
+ BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
+- memcpy(info->dest_buf + off, dma_buf, len);
++ u16 copylen = min_t(u16, len,
++ info->dest_buf_size - off);
++
++ memcpy(info->dest_buf + off, dma_buf, copylen);
++ if (copylen < len)
++ break;
+ } else {
+ rc = -ENOBUFS;
+ if (cmn_req->req_type ==
+@@ -81,10 +90,6 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
+ }
+ }
+
+- if (cmn_req->req_type ==
+- cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+- info->dest_buf_size += len;
+-
+ if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
+ break;
+
+--
+2.39.5
+
--- /dev/null
+From 287f3428b2dac80de26b5b719acd881f44e4f18e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 15:26:32 -0700
+Subject: ice: Check VF VSI Pointer Value in ice_vc_add_fdir_fltr()
+
+From: Xuanqiang Luo <luoxuanqiang@kylinos.cn>
+
+[ Upstream commit 425c5f266b2edeee0ce16fedd8466410cdcfcfe3 ]
+
+As mentioned in the commit baeb705fd6a7 ("ice: always check VF VSI
+pointer values"), we need to perform a null pointer check on the return
+value of ice_get_vf_vsi() before using it.
+
+Fixes: 6ebbe97a4881 ("ice: Add a per-VF limit on number of FDIR filters")
+Signed-off-by: Xuanqiang Luo <luoxuanqiang@kylinos.cn>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250425222636.3188441-3-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+index 2ca8102e8f36e..3b87cc9dfd46e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+@@ -2079,6 +2079,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ vf_vsi = ice_get_vf_vsi(vf);
++ if (!vf_vsi) {
++ dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
++ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
++ goto err_exit;
++ }
+
+ #define ICE_VF_MAX_FDIR_FILTERS 128
+ if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
+--
+2.39.5
+
--- /dev/null
+From 31bb907a6319d45b2704152474047144ecf12cb4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Mar 2021 10:15:34 -0800
+Subject: ice: Refactor promiscuous functions
+
+From: Brett Creeley <brett.creeley@intel.com>
+
+[ Upstream commit fabf480bf95d71c9cfe8a8d6307e0035df963a6a ]
+
+Some of the promiscuous mode functions take a boolean to indicate
+set/clear, which affects readability. Refactor and provide an
+interface for the promiscuous mode code with explicit set and clear
+promiscuous mode operations.
+
+Signed-off-by: Brett Creeley <brett.creeley@intel.com>
+Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Stable-dep-of: 425c5f266b2e ("ice: Check VF VSI Pointer Value in ice_vc_add_fdir_fltr()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_fltr.c | 58 ++++++++
+ drivers/net/ethernet/intel/ice/ice_fltr.h | 12 ++
+ drivers/net/ethernet/intel/ice/ice_main.c | 49 +++---
+ .../net/ethernet/intel/ice/ice_virtchnl_pf.c | 139 +++++++-----------
+ 4 files changed, 156 insertions(+), 102 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
+index e27b4de7e7aa3..7536451cb09ef 100644
+--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
++++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
+@@ -46,6 +46,64 @@ ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info,
+ return 0;
+ }
+
++/**
++ * ice_fltr_set_vlan_vsi_promisc
++ * @hw: pointer to the hardware structure
++ * @vsi: the VSI being configured
++ * @promisc_mask: mask of promiscuous config bits
++ *
++ * Set VSI with all associated VLANs to given promiscuous mode(s)
++ */
++enum ice_status
++ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
++ u8 promisc_mask)
++{
++ return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
++}
++
++/**
++ * ice_fltr_clear_vlan_vsi_promisc
++ * @hw: pointer to the hardware structure
++ * @vsi: the VSI being configured
++ * @promisc_mask: mask of promiscuous config bits
++ *
++ * Clear VSI with all associated VLANs to given promiscuous mode(s)
++ */
++enum ice_status
++ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
++ u8 promisc_mask)
++{
++ return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
++}
++
++/**
++ * ice_fltr_clear_vsi_promisc - clear specified promiscuous mode(s)
++ * @hw: pointer to the hardware structure
++ * @vsi_handle: VSI handle to clear mode
++ * @promisc_mask: mask of promiscuous config bits to clear
++ * @vid: VLAN ID to clear VLAN promiscuous
++ */
++enum ice_status
++ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
++ u16 vid)
++{
++ return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
++}
++
++/**
++ * ice_fltr_set_vsi_promisc - set given VSI to given promiscuous mode(s)
++ * @hw: pointer to the hardware structure
++ * @vsi_handle: VSI handle to configure
++ * @promisc_mask: mask of promiscuous config bits
++ * @vid: VLAN ID to set VLAN promiscuous
++ */
++enum ice_status
++ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
++ u16 vid)
++{
++ return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
++}
++
+ /**
+ * ice_fltr_add_mac_list - add list of MAC filters
+ * @vsi: pointer to VSI struct
+diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
+index 361cb4da9b43b..a0e8226f64f61 100644
+--- a/drivers/net/ethernet/intel/ice/ice_fltr.h
++++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
+@@ -6,6 +6,18 @@
+
+ void ice_fltr_free_list(struct device *dev, struct list_head *h);
+ enum ice_status
++ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
++ u8 promisc_mask);
++enum ice_status
++ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
++ u8 promisc_mask);
++enum ice_status
++ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
++ u16 vid);
++enum ice_status
++ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
++ u16 vid);
++enum ice_status
+ ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
+ const u8 *mac, enum ice_sw_fwd_act_type action);
+ enum ice_status
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 329bf24a3f0e5..735f8cef6bfa4 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -222,32 +222,45 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
+ }
+
+ /**
+- * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
++ * ice_set_promisc - Enable promiscuous mode for a given PF
+ * @vsi: the VSI being configured
+ * @promisc_m: mask of promiscuous config bits
+- * @set_promisc: enable or disable promisc flag request
+ *
+ */
+-static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
++static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
+ {
+- struct ice_hw *hw = &vsi->back->hw;
+- enum ice_status status = 0;
++ enum ice_status status;
+
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+- if (vsi->num_vlan > 1) {
+- status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
+- set_promisc);
+- } else {
+- if (set_promisc)
+- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+- 0);
+- else
+- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+- 0);
+- }
++ if (vsi->num_vlan > 1)
++ status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
++ else
++ status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
++ if (status)
++ return -EIO;
++
++ return 0;
++}
+
++/**
++ * ice_clear_promisc - Disable promiscuous mode for a given PF
++ * @vsi: the VSI being configured
++ * @promisc_m: mask of promiscuous config bits
++ *
++ */
++static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
++{
++ enum ice_status status;
++
++ if (vsi->type != ICE_VSI_PF)
++ return 0;
++
++ if (vsi->num_vlan > 1)
++ status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
++ else
++ status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
+ if (status)
+ return -EIO;
+
+@@ -343,7 +356,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
+ else
+ promisc_m = ICE_MCAST_PROMISC_BITS;
+
+- err = ice_cfg_promisc(vsi, promisc_m, true);
++ err = ice_set_promisc(vsi, promisc_m);
+ if (err) {
+ netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+@@ -357,7 +370,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
+ else
+ promisc_m = ICE_MCAST_PROMISC_BITS;
+
+- err = ice_cfg_promisc(vsi, promisc_m, false);
++ err = ice_clear_promisc(vsi, promisc_m);
+ if (err) {
+ netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+index 9d4d58757e040..e4e25f3ba8493 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+@@ -286,37 +286,6 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
+ return 0;
+ }
+
+-/**
+- * ice_err_to_virt_err - translate errors for VF return code
+- * @ice_err: error return code
+- */
+-static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
+-{
+- switch (ice_err) {
+- case ICE_SUCCESS:
+- return VIRTCHNL_STATUS_SUCCESS;
+- case ICE_ERR_BAD_PTR:
+- case ICE_ERR_INVAL_SIZE:
+- case ICE_ERR_DEVICE_NOT_SUPPORTED:
+- case ICE_ERR_PARAM:
+- case ICE_ERR_CFG:
+- return VIRTCHNL_STATUS_ERR_PARAM;
+- case ICE_ERR_NO_MEMORY:
+- return VIRTCHNL_STATUS_ERR_NO_MEMORY;
+- case ICE_ERR_NOT_READY:
+- case ICE_ERR_RESET_FAILED:
+- case ICE_ERR_FW_API_VER:
+- case ICE_ERR_AQ_ERROR:
+- case ICE_ERR_AQ_TIMEOUT:
+- case ICE_ERR_AQ_FULL:
+- case ICE_ERR_AQ_NO_WORK:
+- case ICE_ERR_AQ_EMPTY:
+- return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+- default:
+- return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+- }
+-}
+-
+ /**
+ * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
+ * @pf: pointer to the PF structure
+@@ -1301,45 +1270,50 @@ static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
+ ice_flush(hw);
+ }
+
+-/**
+- * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
+- * @vf: pointer to the VF info
+- * @vsi: the VSI being configured
+- * @promisc_m: mask of promiscuous config bits
+- * @rm_promisc: promisc flag request from the VF to remove or add filter
+- *
+- * This function configures VF VSI promiscuous mode, based on the VF requests,
+- * for Unicast, Multicast and VLAN
+- */
+-static enum ice_status
+-ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
+- bool rm_promisc)
++static int
++ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+ {
+- struct ice_pf *pf = vf->pf;
+- enum ice_status status = 0;
+- struct ice_hw *hw;
++ struct ice_hw *hw = &vsi->back->hw;
++ enum ice_status status;
+
+- hw = &pf->hw;
+- if (vsi->num_vlan) {
+- status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
+- rm_promisc);
+- } else if (vf->port_vlan_info) {
+- if (rm_promisc)
+- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+- vf->port_vlan_info);
+- else
+- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+- vf->port_vlan_info);
+- } else {
+- if (rm_promisc)
+- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+- 0);
+- else
+- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+- 0);
++ if (vf->port_vlan_info)
++ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
++ vf->port_vlan_info & VLAN_VID_MASK);
++ else if (vsi->num_vlan > 1)
++ status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
++ else
++ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
++
++ if (status && status != ICE_ERR_ALREADY_EXISTS) {
++ dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %s\n",
++ vf->vf_id, ice_stat_str(status));
++ return ice_status_to_errno(status);
++ }
++
++ return 0;
++}
++
++static int
++ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
++{
++ struct ice_hw *hw = &vsi->back->hw;
++ enum ice_status status;
++
++ if (vf->port_vlan_info)
++ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
++ vf->port_vlan_info & VLAN_VID_MASK);
++ else if (vsi->num_vlan > 1)
++ status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
++ else
++ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
++
++ if (status && status != ICE_ERR_DOES_NOT_EXIST) {
++ dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %s\n",
++ vf->vf_id, ice_stat_str(status));
++ return ice_status_to_errno(status);
+ }
+
+- return status;
++ return 0;
+ }
+
+ static void ice_vf_clear_counters(struct ice_vf *vf)
+@@ -1700,7 +1674,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
+ else
+ promisc_m = ICE_UCAST_PROMISC_BITS;
+
+- if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
++ if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
+ dev_err(dev, "disabling promiscuous mode failed\n");
+ }
+
+@@ -2952,10 +2926,10 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
+ {
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+- enum ice_status mcast_status = 0, ucast_status = 0;
+ bool rm_promisc, alluni = false, allmulti = false;
+ struct virtchnl_promisc_info *info =
+ (struct virtchnl_promisc_info *)msg;
++ int mcast_err = 0, ucast_err = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+@@ -3052,24 +3026,21 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
+ ucast_m = ICE_UCAST_PROMISC_BITS;
+ }
+
+- ucast_status = ice_vf_set_vsi_promisc(vf, vsi, ucast_m,
+- !alluni);
+- if (ucast_status) {
+- dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n",
+- alluni ? "en" : "dis", vf->vf_id);
+- v_ret = ice_err_to_virt_err(ucast_status);
+- }
++ if (alluni)
++ ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
++ else
++ ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
+
+- mcast_status = ice_vf_set_vsi_promisc(vf, vsi, mcast_m,
+- !allmulti);
+- if (mcast_status) {
+- dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n",
+- allmulti ? "en" : "dis", vf->vf_id);
+- v_ret = ice_err_to_virt_err(mcast_status);
+- }
++ if (allmulti)
++ mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
++ else
++ mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
++
++ if (ucast_err || mcast_err)
++ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ }
+
+- if (!mcast_status) {
++ if (!mcast_err) {
+ if (allmulti &&
+ !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
+@@ -3079,7 +3050,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
+ vf->vf_id);
+ }
+
+- if (!ucast_status) {
++ if (!ucast_err) {
+ if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
+ vf->vf_id);
+--
+2.39.5
+
--- /dev/null
+From 97100e7d23c3c1a0c9c95baf69c14196de3699b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 16:50:47 +0100
+Subject: net: dlink: Correct endianness handling of led_mode
+
+From: Simon Horman <horms@kernel.org>
+
+[ Upstream commit e7e5ae71831c44d58627a991e603845a2fed2cab ]
+
+As it's name suggests, parse_eeprom() parses EEPROM data.
+
+This is done by reading data, 16 bits at a time as follows:
+
+ for (i = 0; i < 128; i++)
+ ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
+
+sromdata is at the same memory location as psrom.
+And the type of psrom is a pointer to struct t_SROM.
+
+As can be seen in the loop above, data is stored in sromdata, and thus psrom,
+as 16-bit little-endian values.
+
+However, the integer fields of t_SROM are host byte order integers.
+And in the case of led_mode this leads to a little endian value
+being incorrectly treated as host byte order.
+
+Looking at rio_set_led_mode, this does appear to be a bug as that code
+masks led_mode with 0x1, 0x2 and 0x8. Logic that would be effected by a
+reversed byte order.
+
+This problem would only manifest on big endian hosts.
+
+Found by inspection while investigating a sparse warning
+regarding the crc field of t_SROM.
+
+I believe that warning is a false positive. And although I plan
+to send a follow-up to use little-endian types for other the integer
+fields of PSROM_t I do not believe that will involve any bug fixes.
+
+Compile tested only.
+
+Fixes: c3f45d322cbd ("dl2k: Add support for IP1000A-based cards")
+Signed-off-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250425-dlink-led-mode-v1-1-6bae3c36e736@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/dlink/dl2k.c | 2 +-
+ drivers/net/ethernet/dlink/dl2k.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
+index 993bba0ffb161..af0b6fa296e56 100644
+--- a/drivers/net/ethernet/dlink/dl2k.c
++++ b/drivers/net/ethernet/dlink/dl2k.c
+@@ -353,7 +353,7 @@ parse_eeprom (struct net_device *dev)
+ dev->dev_addr[i] = psrom->mac_addr[i];
+
+ if (np->chip_id == CHIP_IP1000A) {
+- np->led_mode = psrom->led_mode;
++ np->led_mode = le16_to_cpu(psrom->led_mode);
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
+index 195dc6cfd8955..0e33e2eaae960 100644
+--- a/drivers/net/ethernet/dlink/dl2k.h
++++ b/drivers/net/ethernet/dlink/dl2k.h
+@@ -335,7 +335,7 @@ typedef struct t_SROM {
+ u16 sub_system_id; /* 0x06 */
+ u16 pci_base_1; /* 0x08 (IP1000A only) */
+ u16 pci_base_2; /* 0x0a (IP1000A only) */
+- u16 led_mode; /* 0x0c (IP1000A only) */
++ __le16 led_mode; /* 0x0c (IP1000A only) */
+ u16 reserved1[9]; /* 0x0e-0x1f */
+ u8 mac_addr[6]; /* 0x20-0x25 */
+ u8 reserved2[10]; /* 0x26-0x2f */
+--
+2.39.5
+
--- /dev/null
+From f2679cc8f91e6605e2b2549755d7fc5ef61f70d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Apr 2025 10:38:48 +0200
+Subject: net: ethernet: mtk-star-emac: fix spinlock recursion issues on rx/tx
+ poll
+
+From: Louis-Alexis Eyraud <louisalexis.eyraud@collabora.com>
+
+[ Upstream commit 6fe0866014486736cc3ba1c6fd4606d3dbe55c9c ]
+
+Use spin_lock_irqsave and spin_unlock_irqrestore instead of spin_lock
+and spin_unlock in mtk_star_emac driver to avoid spinlock recursion
+occurrence that can happen when enabling the DMA interrupts again in
+rx/tx poll.
+
+```
+BUG: spinlock recursion on CPU#0, swapper/0/0
+ lock: 0xffff00000db9cf20, .magic: dead4ead, .owner: swapper/0/0,
+ .owner_cpu: 0
+CPU: 0 UID: 0 PID: 0 Comm: swapper/0 Not tainted
+ 6.15.0-rc2-next-20250417-00001-gf6a27738686c-dirty #28 PREEMPT
+Hardware name: MediaTek MT8365 Open Platform EVK (DT)
+Call trace:
+ show_stack+0x18/0x24 (C)
+ dump_stack_lvl+0x60/0x80
+ dump_stack+0x18/0x24
+ spin_dump+0x78/0x88
+ do_raw_spin_lock+0x11c/0x120
+ _raw_spin_lock+0x20/0x2c
+ mtk_star_handle_irq+0xc0/0x22c [mtk_star_emac]
+ __handle_irq_event_percpu+0x48/0x140
+ handle_irq_event+0x4c/0xb0
+ handle_fasteoi_irq+0xa0/0x1bc
+ handle_irq_desc+0x34/0x58
+ generic_handle_domain_irq+0x1c/0x28
+ gic_handle_irq+0x4c/0x120
+ do_interrupt_handler+0x50/0x84
+ el1_interrupt+0x34/0x68
+ el1h_64_irq_handler+0x18/0x24
+ el1h_64_irq+0x6c/0x70
+ regmap_mmio_read32le+0xc/0x20 (P)
+ _regmap_bus_reg_read+0x6c/0xac
+ _regmap_read+0x60/0xdc
+ regmap_read+0x4c/0x80
+ mtk_star_rx_poll+0x2f4/0x39c [mtk_star_emac]
+ __napi_poll+0x38/0x188
+ net_rx_action+0x164/0x2c0
+ handle_softirqs+0x100/0x244
+ __do_softirq+0x14/0x20
+ ____do_softirq+0x10/0x20
+ call_on_irq_stack+0x24/0x64
+ do_softirq_own_stack+0x1c/0x40
+ __irq_exit_rcu+0xd4/0x10c
+ irq_exit_rcu+0x10/0x1c
+ el1_interrupt+0x38/0x68
+ el1h_64_irq_handler+0x18/0x24
+ el1h_64_irq+0x6c/0x70
+ cpuidle_enter_state+0xac/0x320 (P)
+ cpuidle_enter+0x38/0x50
+ do_idle+0x1e4/0x260
+ cpu_startup_entry+0x34/0x3c
+ rest_init+0xdc/0xe0
+ console_on_rootfs+0x0/0x6c
+ __primary_switched+0x88/0x90
+```
+
+Fixes: 0a8bd81fd6aa ("net: ethernet: mtk-star-emac: separate tx/rx handling with two NAPIs")
+Signed-off-by: Louis-Alexis Eyraud <louisalexis.eyraud@collabora.com>
+Reviewed-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Acked-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Link: https://patch.msgid.link/20250424-mtk_star_emac-fix-spinlock-recursion-issue-v2-1-f3fde2e529d8@collabora.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: e54b4db35e20 ("net: ethernet: mtk-star-emac: rearm interrupts in rx_poll only when advised")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_star_emac.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+index 209e79f2c3e8c..c7155e0102232 100644
+--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+@@ -1153,6 +1153,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
+ struct net_device *ndev = priv->ndev;
+ unsigned int head = ring->head;
+ unsigned int entry = ring->tail;
++ unsigned long flags;
+
+ while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
+ ret = mtk_star_tx_complete_one(priv);
+@@ -1172,9 +1173,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
+ netif_wake_queue(ndev);
+
+ if (napi_complete(napi)) {
+- spin_lock(&priv->lock);
++ spin_lock_irqsave(&priv->lock, flags);
+ mtk_star_enable_dma_irq(priv, false, true);
+- spin_unlock(&priv->lock);
++ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return 0;
+@@ -1331,6 +1332,7 @@ static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
+ static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
+ {
+ struct mtk_star_priv *priv;
++ unsigned long flags;
+ int work_done = 0;
+
+ priv = container_of(napi, struct mtk_star_priv, rx_napi);
+@@ -1338,9 +1340,9 @@ static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
+ work_done = mtk_star_rx(priv, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+- spin_lock(&priv->lock);
++ spin_lock_irqsave(&priv->lock, flags);
+ mtk_star_enable_dma_irq(priv, true, false);
+- spin_unlock(&priv->lock);
++ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return work_done;
+--
+2.39.5
+
--- /dev/null
+From 9f70cabd181f9c50c8426bb37b1e679e110354ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Apr 2025 10:38:49 +0200
+Subject: net: ethernet: mtk-star-emac: rearm interrupts in rx_poll only when
+ advised
+
+From: Louis-Alexis Eyraud <louisalexis.eyraud@collabora.com>
+
+[ Upstream commit e54b4db35e201a9173da9cb7abc8377e12abaf87 ]
+
+In mtk_star_rx_poll function, on event processing completion, the
+mtk_star_emac driver calls napi_complete_done but ignores its return
+code and enable RX DMA interrupts inconditionally. This return code
+gives the info if a device should avoid rearming its interrupts or not,
+so fix this behaviour by taking it into account.
+
+Fixes: 8c7bd5a454ff ("net: ethernet: mtk-star-emac: new driver")
+Signed-off-by: Louis-Alexis Eyraud <louisalexis.eyraud@collabora.com>
+Acked-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Link: https://patch.msgid.link/20250424-mtk_star_emac-fix-spinlock-recursion-issue-v2-2-f3fde2e529d8@collabora.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_star_emac.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+index c7155e0102232..639cf1c27dbd4 100644
+--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+@@ -1338,8 +1338,7 @@ static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
+ priv = container_of(napi, struct mtk_star_priv, rx_napi);
+
+ work_done = mtk_star_rx(priv, budget);
+- if (work_done < budget) {
+- napi_complete_done(napi, work_done);
++ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ mtk_star_enable_dma_irq(priv, true, false);
+ spin_unlock_irqrestore(&priv->lock, flags);
+--
+2.39.5
+
--- /dev/null
+From 6f1dd0bd237a308f3d868b2705fb0fdf6b05a3f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Jun 2022 11:17:42 +0800
+Subject: net: ethernet: mtk-star-emac: separate tx/rx handling with two NAPIs
+
+From: Biao Huang <biao.huang@mediatek.com>
+
+[ Upstream commit 0a8bd81fd6aaace14979152e0540da8ff158a00a ]
+
+Current driver may lost tx interrupts under bidirectional test with iperf3,
+which leads to some unexpected issues.
+
+This patch let rx/tx interrupt enable/disable separately, and rx/tx are
+handled in different NAPIs.
+
+Signed-off-by: Biao Huang <biao.huang@mediatek.com>
+Signed-off-by: Yinghua Pan <ot_yinghua.pan@mediatek.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: e54b4db35e20 ("net: ethernet: mtk-star-emac: rearm interrupts in rx_poll only when advised")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_star_emac.c | 340 ++++++++++--------
+ 1 file changed, 199 insertions(+), 141 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+index 392648246d8f4..209e79f2c3e8c 100644
+--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+@@ -32,6 +32,7 @@
+ #define MTK_STAR_SKB_ALIGNMENT 16
+ #define MTK_STAR_HASHTABLE_MC_LIMIT 256
+ #define MTK_STAR_HASHTABLE_SIZE_MAX 512
++#define MTK_STAR_DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+ /* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
+ * work for this controller.
+@@ -216,7 +217,8 @@ struct mtk_star_ring_desc_data {
+ struct sk_buff *skb;
+ };
+
+-#define MTK_STAR_RING_NUM_DESCS 128
++#define MTK_STAR_RING_NUM_DESCS 512
++#define MTK_STAR_TX_THRESH (MTK_STAR_RING_NUM_DESCS / 4)
+ #define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
+ #define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
+ #define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
+@@ -246,7 +248,8 @@ struct mtk_star_priv {
+ struct mtk_star_ring rx_ring;
+
+ struct mii_bus *mii;
+- struct napi_struct napi;
++ struct napi_struct tx_napi;
++ struct napi_struct rx_napi;
+
+ struct device_node *phy_node;
+ phy_interface_t phy_intf;
+@@ -357,19 +360,16 @@ mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
+ mtk_star_ring_push_head(ring, desc_data, flags);
+ }
+
+-static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
++static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring)
+ {
+- return abs(ring->head - ring->tail);
+-}
++ u32 avail;
+
+-static bool mtk_star_ring_full(struct mtk_star_ring *ring)
+-{
+- return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
+-}
++ if (ring->tail > ring->head)
++ avail = ring->tail - ring->head - 1;
++ else
++ avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1;
+
+-static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
+-{
+- return mtk_star_ring_num_used_descs(ring) > 0;
++ return avail;
+ }
+
+ static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
+@@ -414,6 +414,36 @@ static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
+ MTK_STAR_BIT_MAC_CFG_NIC_PD);
+ }
+
++static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv,
++ bool rx, bool tx)
++{
++ u32 value;
++
++ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
++
++ if (tx)
++ value &= ~MTK_STAR_BIT_INT_STS_TNTC;
++ if (rx)
++ value &= ~MTK_STAR_BIT_INT_STS_FNRC;
++
++ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
++}
++
++static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv,
++ bool rx, bool tx)
++{
++ u32 value;
++
++ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
++
++ if (tx)
++ value |= MTK_STAR_BIT_INT_STS_TNTC;
++ if (rx)
++ value |= MTK_STAR_BIT_INT_STS_FNRC;
++
++ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
++}
++
+ /* Unmask the three interrupts we care about, mask all others. */
+ static void mtk_star_intr_enable(struct mtk_star_priv *priv)
+ {
+@@ -429,20 +459,11 @@ static void mtk_star_intr_disable(struct mtk_star_priv *priv)
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
+ }
+
+-static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
+-{
+- unsigned int val;
+-
+- regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
+-
+- return val;
+-}
+-
+ static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
+ {
+ unsigned int val;
+
+- val = mtk_star_intr_read(priv);
++ regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
+ regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
+
+ return val;
+@@ -714,25 +735,44 @@ static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
+ mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
+ }
+
+-/* All processing for TX and RX happens in the napi poll callback.
+- *
+- * FIXME: The interrupt handling should be more fine-grained with each
+- * interrupt enabled/disabled independently when needed. Unfortunatly this
+- * turned out to impact the driver's stability and until we have something
+- * working properly, we're disabling all interrupts during TX & RX processing
+- * or when resetting the counter registers.
+- */
++/**
++ * mtk_star_handle_irq - Interrupt Handler.
++ * @irq: interrupt number.
++ * @data: pointer to a network interface device structure.
++ * Description : this is the driver interrupt service routine.
++ * it mainly handles:
++ * 1. tx complete interrupt for frame transmission.
++ * 2. rx complete interrupt for frame reception.
++ * 3. MAC Management Counter interrupt to avoid counter overflow.
++ **/
+ static irqreturn_t mtk_star_handle_irq(int irq, void *data)
+ {
+- struct mtk_star_priv *priv;
+- struct net_device *ndev;
+-
+- ndev = data;
+- priv = netdev_priv(ndev);
++ struct net_device *ndev = data;
++ struct mtk_star_priv *priv = netdev_priv(ndev);
++ unsigned int intr_status = mtk_star_intr_ack_all(priv);
++ bool rx, tx;
++
++ rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) &&
++ napi_schedule_prep(&priv->rx_napi);
++ tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) &&
++ napi_schedule_prep(&priv->tx_napi);
++
++ if (rx || tx) {
++ spin_lock(&priv->lock);
++ /* mask Rx and TX Complete interrupt */
++ mtk_star_disable_dma_irq(priv, rx, tx);
++ spin_unlock(&priv->lock);
++
++ if (rx)
++ __napi_schedule(&priv->rx_napi);
++ if (tx)
++ __napi_schedule(&priv->tx_napi);
++ }
+
+- if (netif_running(ndev)) {
+- mtk_star_intr_disable(priv);
+- napi_schedule(&priv->napi);
++ /* interrupt is triggered once any counters reach 0x8000000 */
++ if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) {
++ mtk_star_update_stats(priv);
++ mtk_star_reset_counters(priv);
+ }
+
+ return IRQ_HANDLED;
+@@ -955,7 +995,8 @@ static int mtk_star_enable(struct net_device *ndev)
+ if (ret)
+ goto err_free_skbs;
+
+- napi_enable(&priv->napi);
++ napi_enable(&priv->tx_napi);
++ napi_enable(&priv->rx_napi);
+
+ mtk_star_intr_ack_all(priv);
+ mtk_star_intr_enable(priv);
+@@ -988,7 +1029,8 @@ static void mtk_star_disable(struct net_device *ndev)
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+- napi_disable(&priv->napi);
++ napi_disable(&priv->tx_napi);
++ napi_disable(&priv->rx_napi);
+ mtk_star_intr_disable(priv);
+ mtk_star_dma_disable(priv);
+ mtk_star_intr_ack_all(priv);
+@@ -1020,13 +1062,45 @@ static int mtk_star_netdev_ioctl(struct net_device *ndev,
+ return phy_mii_ioctl(ndev->phydev, req, cmd);
+ }
+
+-static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
+- struct net_device *ndev)
++static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
++{
++ netif_stop_queue(priv->ndev);
++
++ /* Might race with mtk_star_tx_poll, check again */
++ smp_mb();
++ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size))
++ return -EBUSY;
++
++ netif_start_queue(priv->ndev);
++
++ return 0;
++}
++
++static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
++{
++ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size))
++ return 0;
++
++ return __mtk_star_maybe_stop_tx(priv, size);
++}
++
++static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb,
++ struct net_device *ndev)
+ {
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct mtk_star_ring *ring = &priv->tx_ring;
+ struct device *dev = mtk_star_get_dev(priv);
+ struct mtk_star_ring_desc_data desc_data;
++ int nfrags = skb_shinfo(skb)->nr_frags;
++
++ if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) {
++ if (!netif_queue_stopped(ndev)) {
++ netif_stop_queue(ndev);
++ /* This is a hard error, log it. */
++ pr_err_ratelimited("Tx ring full when queue awake\n");
++ }
++ return NETDEV_TX_BUSY;
++ }
+
+ desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
+ if (dma_mapping_error(dev, desc_data.dma_addr))
+@@ -1034,17 +1108,11 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
+
+ desc_data.skb = skb;
+ desc_data.len = skb->len;
+-
+- spin_lock_bh(&priv->lock);
+-
+ mtk_star_ring_push_head_tx(ring, &desc_data);
+
+ netdev_sent_queue(ndev, skb->len);
+
+- if (mtk_star_ring_full(ring))
+- netif_stop_queue(ndev);
+-
+- spin_unlock_bh(&priv->lock);
++ mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED);
+
+ mtk_star_dma_resume_tx(priv);
+
+@@ -1076,31 +1144,40 @@ static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
+ return ret;
+ }
+
+-static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
++static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
+ {
++ struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv,
++ tx_napi);
++ int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0;
+ struct mtk_star_ring *ring = &priv->tx_ring;
+ struct net_device *ndev = priv->ndev;
+- int ret, pkts_compl, bytes_compl;
+- bool wake = false;
+-
+- spin_lock(&priv->lock);
+-
+- for (pkts_compl = 0, bytes_compl = 0;;
+- pkts_compl++, bytes_compl += ret, wake = true) {
+- if (!mtk_star_ring_descs_available(ring))
+- break;
++ unsigned int head = ring->head;
++ unsigned int entry = ring->tail;
+
++ while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
+ ret = mtk_star_tx_complete_one(priv);
+ if (ret < 0)
+ break;
++
++ count++;
++ pkts_compl++;
++ bytes_compl += ret;
++ entry = ring->tail;
+ }
+
+ netdev_completed_queue(ndev, pkts_compl, bytes_compl);
+
+- if (wake && netif_queue_stopped(ndev))
++ if (unlikely(netif_queue_stopped(ndev)) &&
++ (mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH))
+ netif_wake_queue(ndev);
+
+- spin_unlock(&priv->lock);
++ if (napi_complete(napi)) {
++ spin_lock(&priv->lock);
++ mtk_star_enable_dma_irq(priv, false, true);
++ spin_unlock(&priv->lock);
++ }
++
++ return 0;
+ }
+
+ static void mtk_star_netdev_get_stats64(struct net_device *ndev,
+@@ -1180,7 +1257,7 @@ static const struct ethtool_ops mtk_star_ethtool_ops = {
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ };
+
+-static int mtk_star_receive_packet(struct mtk_star_priv *priv)
++static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
+ {
+ struct mtk_star_ring *ring = &priv->rx_ring;
+ struct device *dev = mtk_star_get_dev(priv);
+@@ -1188,107 +1265,85 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv)
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *curr_skb, *new_skb;
+ dma_addr_t new_dma_addr;
+- int ret;
++ int ret, count = 0;
+
+- spin_lock(&priv->lock);
+- ret = mtk_star_ring_pop_tail(ring, &desc_data);
+- spin_unlock(&priv->lock);
+- if (ret)
+- return -1;
++ while (count < budget) {
++ ret = mtk_star_ring_pop_tail(ring, &desc_data);
++ if (ret)
++ return -1;
+
+- curr_skb = desc_data.skb;
++ curr_skb = desc_data.skb;
+
+- if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
+- (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
+- /* Error packet -> drop and reuse skb. */
+- new_skb = curr_skb;
+- goto push_new_skb;
+- }
++ if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
++ (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
++ /* Error packet -> drop and reuse skb. */
++ new_skb = curr_skb;
++ goto push_new_skb;
++ }
+
+- /* Prepare new skb before receiving the current one. Reuse the current
+- * skb if we fail at any point.
+- */
+- new_skb = mtk_star_alloc_skb(ndev);
+- if (!new_skb) {
+- ndev->stats.rx_dropped++;
+- new_skb = curr_skb;
+- goto push_new_skb;
+- }
++ /* Prepare new skb before receiving the current one.
++ * Reuse the current skb if we fail at any point.
++ */
++ new_skb = mtk_star_alloc_skb(ndev);
++ if (!new_skb) {
++ ndev->stats.rx_dropped++;
++ new_skb = curr_skb;
++ goto push_new_skb;
++ }
+
+- new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
+- if (dma_mapping_error(dev, new_dma_addr)) {
+- ndev->stats.rx_dropped++;
+- dev_kfree_skb(new_skb);
+- new_skb = curr_skb;
+- netdev_err(ndev, "DMA mapping error of RX descriptor\n");
+- goto push_new_skb;
+- }
++ new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
++ if (dma_mapping_error(dev, new_dma_addr)) {
++ ndev->stats.rx_dropped++;
++ dev_kfree_skb(new_skb);
++ new_skb = curr_skb;
++ netdev_err(ndev, "DMA mapping error of RX descriptor\n");
++ goto push_new_skb;
++ }
+
+- /* We can't fail anymore at this point: it's safe to unmap the skb. */
+- mtk_star_dma_unmap_rx(priv, &desc_data);
++ /* We can't fail anymore at this point:
++ * it's safe to unmap the skb.
++ */
++ mtk_star_dma_unmap_rx(priv, &desc_data);
+
+- skb_put(desc_data.skb, desc_data.len);
+- desc_data.skb->ip_summed = CHECKSUM_NONE;
+- desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
+- desc_data.skb->dev = ndev;
+- netif_receive_skb(desc_data.skb);
++ skb_put(desc_data.skb, desc_data.len);
++ desc_data.skb->ip_summed = CHECKSUM_NONE;
++ desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
++ desc_data.skb->dev = ndev;
++ netif_receive_skb(desc_data.skb);
+
+- /* update dma_addr for new skb */
+- desc_data.dma_addr = new_dma_addr;
++ /* update dma_addr for new skb */
++ desc_data.dma_addr = new_dma_addr;
+
+ push_new_skb:
+- desc_data.len = skb_tailroom(new_skb);
+- desc_data.skb = new_skb;
+
+- spin_lock(&priv->lock);
+- mtk_star_ring_push_head_rx(ring, &desc_data);
+- spin_unlock(&priv->lock);
+-
+- return 0;
+-}
+-
+-static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
+-{
+- int received, ret;
++ count++;
+
+- for (received = 0, ret = 0; received < budget && ret == 0; received++)
+- ret = mtk_star_receive_packet(priv);
++ desc_data.len = skb_tailroom(new_skb);
++ desc_data.skb = new_skb;
++ mtk_star_ring_push_head_rx(ring, &desc_data);
++ }
+
+ mtk_star_dma_resume_rx(priv);
+
+- return received;
++ return count;
+ }
+
+-static int mtk_star_poll(struct napi_struct *napi, int budget)
++static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
+ {
+ struct mtk_star_priv *priv;
+- unsigned int status;
+- int received = 0;
+-
+- priv = container_of(napi, struct mtk_star_priv, napi);
+-
+- status = mtk_star_intr_read(priv);
+- mtk_star_intr_ack_all(priv);
+-
+- if (status & MTK_STAR_BIT_INT_STS_TNTC)
+- /* Clean-up all TX descriptors. */
+- mtk_star_tx_complete_all(priv);
++ int work_done = 0;
+
+- if (status & MTK_STAR_BIT_INT_STS_FNRC)
+- /* Receive up to $budget packets. */
+- received = mtk_star_process_rx(priv, budget);
++ priv = container_of(napi, struct mtk_star_priv, rx_napi);
+
+- if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
+- mtk_star_update_stats(priv);
+- mtk_star_reset_counters(priv);
++ work_done = mtk_star_rx(priv, budget);
++ if (work_done < budget) {
++ napi_complete_done(napi, work_done);
++ spin_lock(&priv->lock);
++ mtk_star_enable_dma_irq(priv, true, false);
++ spin_unlock(&priv->lock);
+ }
+
+- if (received < budget)
+- napi_complete_done(napi, received);
+-
+- mtk_star_intr_enable(priv);
+-
+- return received;
++ return work_done;
+ }
+
+ static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
+@@ -1551,7 +1606,10 @@ static int mtk_star_probe(struct platform_device *pdev)
+ ndev->netdev_ops = &mtk_star_netdev_ops;
+ ndev->ethtool_ops = &mtk_star_ethtool_ops;
+
+- netif_napi_add(ndev, &priv->napi, mtk_star_poll, NAPI_POLL_WEIGHT);
++ netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll,
++ NAPI_POLL_WEIGHT);
++ netif_tx_napi_add(ndev, &priv->tx_napi, mtk_star_tx_poll,
++ NAPI_POLL_WEIGHT);
+
+ phydev = of_phy_find_device(priv->phy_node);
+ if (phydev) {
+--
+2.39.5
+
--- /dev/null
+From cee1921af05bbcea6fb88ce3a4d176129a68b1b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 11:08:26 +0200
+Subject: net: fec: ERR007885 Workaround for conventional TX
+
+From: Mattias Barthel <mattias.barthel@atlascopco.com>
+
+[ Upstream commit a179aad12badc43201cbf45d1e8ed2c1383c76b9 ]
+
+Activate TX hang workaround also in
+fec_enet_txq_submit_skb() when TSO is not enabled.
+
+Errata: ERR007885
+
+Symptoms: NETDEV WATCHDOG: eth0 (fec): transmit queue 0 timed out
+
+commit 37d6017b84f7 ("net: fec: Workaround for imx6sx enet tx hang when enable three queues")
+There is a TDAR race condition for mutliQ when the software sets TDAR
+and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
+This will cause the udma_tx and udma_tx_arbiter state machines to hang.
+
+So, the Workaround is checking TDAR status four time, if TDAR cleared by
+ hardware and then write TDAR, otherwise don't set TDAR.
+
+Fixes: 53bb20d1faba ("net: fec: add variable reg_desc_active to speed things up")
+Signed-off-by: Mattias Barthel <mattias.barthel@atlascopco.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250429090826.3101258-1-mattiasbarthel@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 7b5585bc21d8f..5c860eef03007 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -635,7 +635,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+ txq->bd.cur = bdp;
+
+ /* Trigger transmission start */
+- writel(0, txq->bd.reg_desc_active);
++ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
++ !readl(txq->bd.reg_desc_active) ||
++ !readl(txq->bd.reg_desc_active) ||
++ !readl(txq->bd.reg_desc_active) ||
++ !readl(txq->bd.reg_desc_active))
++ writel(0, txq->bd.reg_desc_active);
+
+ return 0;
+ }
+--
+2.39.5
+
--- /dev/null
+From 134918b697db53a983d3d80d67f881c9dde718e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Sep 2022 10:38:00 +0800
+Subject: net: hns3: add support for external loopback test
+
+From: Yonglong Liu <liuyonglong@huawei.com>
+
+[ Upstream commit 04b6ba143521f4485b7f2c36c655b262a79dae97 ]
+
+This patch add support for external loopback test.
+The successful test need the link is up with duplex full. The
+driver do external loopback first, and then the whole offline
+test.
+
+Signed-off-by: Yonglong Liu <liuyonglong@huawei.com>
+Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 8e6b9c6ea5a5 ("net: hns3: fix an interrupt residual problem")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +
+ .../net/ethernet/hisilicon/hns3/hns3_enet.c | 51 ++++++++++++++++
+ .../net/ethernet/hisilicon/hns3/hns3_enet.h | 3 +
+ .../ethernet/hisilicon/hns3/hns3_ethtool.c | 61 +++++++++++++------
+ .../hisilicon/hns3/hns3pf/hclge_main.c | 26 +++++---
+ 5 files changed, 119 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+index fa16cdcee10db..8d1b66281c095 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -178,6 +178,7 @@ struct hns3_mac_stats {
+
+ /* hnae3 loop mode */
+ enum hnae3_loop {
++ HNAE3_LOOP_EXTERNAL,
+ HNAE3_LOOP_APP,
+ HNAE3_LOOP_SERIAL_SERDES,
+ HNAE3_LOOP_PARALLEL_SERDES,
+@@ -802,6 +803,7 @@ struct hnae3_roce_private_info {
+ #define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2)
+ #define HNAE3_SUPPORT_VF BIT(3)
+ #define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
++#define HNAE3_SUPPORT_EXTERNAL_LOOPBACK BIT(5)
+
+ #define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */
+ #define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 60592e8ddf3b8..03fe5e0729f64 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -5642,6 +5642,57 @@ int hns3_set_channels(struct net_device *netdev,
+ return 0;
+ }
+
++void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
++{
++ struct hns3_nic_priv *priv = netdev_priv(ndev);
++ struct hnae3_handle *h = priv->ae_handle;
++ int i;
++
++ if (!if_running)
++ return;
++
++ netif_carrier_off(ndev);
++ netif_tx_disable(ndev);
++
++ for (i = 0; i < priv->vector_num; i++)
++ hns3_vector_disable(&priv->tqp_vector[i]);
++
++ for (i = 0; i < h->kinfo.num_tqps; i++)
++ hns3_tqp_disable(h->kinfo.tqp[i]);
++
++ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
++ * during reset process, because driver may not be able
++ * to disable the ring through firmware when downing the netdev.
++ */
++ if (!hns3_nic_resetting(ndev))
++ hns3_nic_reset_all_ring(priv->ae_handle);
++
++ hns3_reset_tx_queue(priv->ae_handle);
++}
++
++void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
++{
++ struct hns3_nic_priv *priv = netdev_priv(ndev);
++ struct hnae3_handle *h = priv->ae_handle;
++ int i;
++
++ if (!if_running)
++ return;
++
++ hns3_nic_reset_all_ring(priv->ae_handle);
++
++ for (i = 0; i < priv->vector_num; i++)
++ hns3_vector_enable(&priv->tqp_vector[i]);
++
++ for (i = 0; i < h->kinfo.num_tqps; i++)
++ hns3_tqp_enable(h->kinfo.tqp[i]);
++
++ netif_tx_wake_all_queues(ndev);
++
++ if (h->ae_algo->ops->get_status(h))
++ netif_carrier_on(ndev);
++}
++
+ static const struct hns3_hw_error_info hns3_hw_err[] = {
+ { .type = HNAE3_PPU_POISON_ERROR,
+ .msg = "PPU poison" },
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+index f60ba2ee8b8b1..f3f7f370807f0 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -729,4 +729,7 @@ u16 hns3_get_max_available_channels(struct hnae3_handle *h);
+ void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
+ enum dim_cq_period_mode tx_mode,
+ enum dim_cq_period_mode rx_mode);
++
++void hns3_external_lb_prepare(struct net_device *ndev, bool if_running);
++void hns3_external_lb_restore(struct net_device *ndev, bool if_running);
+ #endif
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index 17fa4e7684cd2..b01ce4fd6bc43 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -67,7 +67,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
+
+ #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
+
+-#define HNS3_SELF_TEST_TYPE_NUM 4
+ #define HNS3_NIC_LB_TEST_PKT_NUM 1
+ #define HNS3_NIC_LB_TEST_RING_ID 0
+ #define HNS3_NIC_LB_TEST_PACKET_SIZE 128
+@@ -93,6 +92,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
+ case HNAE3_LOOP_PARALLEL_SERDES:
+ case HNAE3_LOOP_APP:
+ case HNAE3_LOOP_PHY:
++ case HNAE3_LOOP_EXTERNAL:
+ ret = h->ae_algo->ops->set_loopback(h, loop, en);
+ break;
+ default:
+@@ -300,6 +300,10 @@ static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
+
+ static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
+ {
++ st_param[HNAE3_LOOP_EXTERNAL][0] = HNAE3_LOOP_EXTERNAL;
++ st_param[HNAE3_LOOP_EXTERNAL][1] =
++ h->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
++
+ st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
+ st_param[HNAE3_LOOP_APP][1] =
+ h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
+@@ -318,17 +322,11 @@ static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
+ h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
+ }
+
+-static void hns3_selftest_prepare(struct net_device *ndev,
+- bool if_running, int (*st_param)[2])
++static void hns3_selftest_prepare(struct net_device *ndev, bool if_running)
+ {
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+
+- if (netif_msg_ifdown(h))
+- netdev_info(ndev, "self test start\n");
+-
+- hns3_set_selftest_param(h, st_param);
+-
+ if (if_running)
+ ndev->netdev_ops->ndo_stop(ndev);
+
+@@ -367,18 +365,15 @@ static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
+
+ if (if_running)
+ ndev->netdev_ops->ndo_open(ndev);
+-
+- if (netif_msg_ifdown(h))
+- netdev_info(ndev, "self test end\n");
+ }
+
+ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
+ struct ethtool_test *eth_test, u64 *data)
+ {
+- int test_index = 0;
++ int test_index = HNAE3_LOOP_APP;
+ u32 i;
+
+- for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
++ for (i = HNAE3_LOOP_APP; i < HNAE3_LOOP_NONE; i++) {
+ enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
+
+ if (!st_param[i][1])
+@@ -397,6 +392,20 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
+ }
+ }
+
++static void hns3_do_external_lb(struct net_device *ndev,
++ struct ethtool_test *eth_test, u64 *data)
++{
++ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_up(ndev, HNAE3_LOOP_EXTERNAL);
++ if (!data[HNAE3_LOOP_EXTERNAL])
++ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_run_test(ndev, HNAE3_LOOP_EXTERNAL);
++ hns3_lp_down(ndev, HNAE3_LOOP_EXTERNAL);
++
++ if (data[HNAE3_LOOP_EXTERNAL])
++ eth_test->flags |= ETH_TEST_FL_FAILED;
++
++ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
++}
++
+ /**
+ * hns3_nic_self_test - self test
+ * @ndev: net device
+@@ -406,7 +415,9 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
+ static void hns3_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+ {
+- int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
++ struct hns3_nic_priv *priv = netdev_priv(ndev);
++ struct hnae3_handle *h = priv->ae_handle;
++ int st_param[HNAE3_LOOP_NONE][2];
+ bool if_running = netif_running(ndev);
+
+ if (hns3_nic_resetting(ndev)) {
+@@ -414,13 +425,29 @@ static void hns3_self_test(struct net_device *ndev,
+ return;
+ }
+
+- /* Only do offline selftest, or pass by default */
+- if (eth_test->flags != ETH_TEST_FL_OFFLINE)
++ if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
+ return;
+
+- hns3_selftest_prepare(ndev, if_running, st_param);
++ if (netif_msg_ifdown(h))
++ netdev_info(ndev, "self test start\n");
++
++ hns3_set_selftest_param(h, st_param);
++
++ /* external loopback test requires that the link is up and the duplex is
++ * full, do external test first to reduce the whole test time
++ */
++ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
++ hns3_external_lb_prepare(ndev, if_running);
++ hns3_do_external_lb(ndev, eth_test, data);
++ hns3_external_lb_restore(ndev, if_running);
++ }
++
++ hns3_selftest_prepare(ndev, if_running);
+ hns3_do_selftest(ndev, st_param, eth_test, data);
+ hns3_selftest_restore(ndev, if_running);
++
++ if (netif_msg_ifdown(h))
++ netdev_info(ndev, "self test end\n");
+ }
+
+ static void hns3_update_limit_promisc_mode(struct net_device *netdev,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 35411f9a14323..a0284a9d90e89 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -151,10 +151,11 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
+ HCLGE_TQP_INTR_RL_REG};
+
+ static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
+- "App Loopback test",
+- "Serdes serial Loopback test",
+- "Serdes parallel Loopback test",
+- "Phy Loopback test"
++ "External Loopback test",
++ "App Loopback test",
++ "Serdes serial Loopback test",
++ "Serdes parallel Loopback test",
++ "Phy Loopback test"
+ };
+
+ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
+@@ -754,7 +755,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
+ #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
+ HNAE3_SUPPORT_PHY_LOOPBACK | \
+ HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
+- HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
++ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
++ HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
+
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+@@ -776,9 +778,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
+ handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
+ }
+
+- count += 2;
++ count += 1;
+ handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
++ count += 1;
+ handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
++ count += 1;
++ handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
+
+ if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+ hdev->hw.mac.phydev->drv->set_loopback) ||
+@@ -806,6 +811,11 @@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
+ size, p);
+ p = hclge_tqps_get_strings(handle, p);
+ } else if (stringset == ETH_SS_TEST) {
++ if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
++ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
++ ETH_GSTRING_LEN);
++ p += ETH_GSTRING_LEN;
++ }
+ if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
+ ETH_GSTRING_LEN);
+@@ -8060,7 +8070,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
+ {
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+- int ret;
++ int ret = 0;
+
+ /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
+ * default, SSU loopback is enabled, so if the SMAC and the DMAC are
+@@ -8087,6 +8097,8 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
+ case HNAE3_LOOP_PHY:
+ ret = hclge_set_phy_loopback(hdev, en);
+ break;
++ case HNAE3_LOOP_EXTERNAL:
++ break;
+ default:
+ ret = -ENOTSUPP;
+ dev_err(&hdev->pdev->dev,
+--
+2.39.5
+
--- /dev/null
+From 7d9e1aeb0b157421ccda2aca158e0b52c6a2668e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 17:30:52 +0800
+Subject: net: hns3: defer calling ptp_clock_register()
+
+From: Jian Shen <shenjian15@huawei.com>
+
+[ Upstream commit 4971394d9d624f91689d766f31ce668d169d9959 ]
+
+Currently the ptp_clock_register() is called before relative
+ptp resource ready. It may cause unexpected result when upper
+layer called the ptp API during the timewindow. Fix it by
+moving the ptp_clock_register() to the function end.
+
+Fixes: 0bf5eb788512 ("net: hns3: add support for PTP")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Link: https://patch.msgid.link/20250430093052.2400464-5-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+index 4d4cea1f50157..b7cf9fbf97183 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+@@ -452,6 +452,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
+ ptp->info.settime64 = hclge_ptp_settime;
+
+ ptp->info.n_alarm = 0;
++
++ spin_lock_init(&ptp->lock);
++ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
++ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
++ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
++ hdev->ptp = ptp;
++
+ ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
+ if (IS_ERR(ptp->clock)) {
+ dev_err(&hdev->pdev->dev,
+@@ -463,12 +470,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
+ return -ENODEV;
+ }
+
+- spin_lock_init(&ptp->lock);
+- ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
+- ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+- ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+- hdev->ptp = ptp;
+-
+ return 0;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From e067ed1f7adb979a1da9165b0c8d6b3690ee1da0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 17:30:50 +0800
+Subject: net: hns3: fix an interrupt residual problem
+
+From: Yonglong Liu <liuyonglong@huawei.com>
+
+[ Upstream commit 8e6b9c6ea5a55045eed6526d8ee49e93192d1a58 ]
+
+When a VF is passthrough to a VM, and the VM is killed, the reported
+interrupt may not been handled, it will remain, and won't be clear by
+the nic engine even with a flr or tqp reset. When the VM restart, the
+interrupt of the first vector may be dropped by the second enable_irq
+in vfio, see the issue below:
+https://gitlab.com/qemu-project/qemu/-/issues/2884#note_2423361621
+
+We notice that the vfio has always behaved this way, and the interrupt
+is a residue of the nic engine, so we fix the problem by moving the
+vector enable process out of the enable_irq loop.
+
+Fixes: 08a100689d4b ("net: hns3: re-organize vector handle")
+Signed-off-by: Yonglong Liu <liuyonglong@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Link: https://patch.msgid.link/20250430093052.2400464-3-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/hisilicon/hns3/hns3_enet.c | 82 +++++++++----------
+ 1 file changed, 39 insertions(+), 43 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 03fe5e0729f64..af33074267ec9 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -473,20 +473,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
+ writel(mask_en, tqp_vector->mask_addr);
+ }
+
+-static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
++static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
+ {
+ napi_enable(&tqp_vector->napi);
+ enable_irq(tqp_vector->vector_irq);
+-
+- /* enable vector */
+- hns3_mask_vector_irq(tqp_vector, 1);
+ }
+
+-static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
++static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
+ {
+- /* disable vector */
+- hns3_mask_vector_irq(tqp_vector, 0);
+-
+ disable_irq(tqp_vector->vector_irq);
+ napi_disable(&tqp_vector->napi);
+ cancel_work_sync(&tqp_vector->rx_group.dim.work);
+@@ -707,11 +701,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
+ return 0;
+ }
+
++static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
++{
++ struct hns3_nic_priv *priv = netdev_priv(netdev);
++ struct hnae3_handle *h = priv->ae_handle;
++ u16 i;
++
++ for (i = 0; i < priv->vector_num; i++)
++ hns3_irq_enable(&priv->tqp_vector[i]);
++
++ for (i = 0; i < priv->vector_num; i++)
++ hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
++
++ for (i = 0; i < h->kinfo.num_tqps; i++)
++ hns3_tqp_enable(h->kinfo.tqp[i]);
++}
++
++static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
++{
++ struct hns3_nic_priv *priv = netdev_priv(netdev);
++ struct hnae3_handle *h = priv->ae_handle;
++ u16 i;
++
++ for (i = 0; i < h->kinfo.num_tqps; i++)
++ hns3_tqp_disable(h->kinfo.tqp[i]);
++
++ for (i = 0; i < priv->vector_num; i++)
++ hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
++
++ for (i = 0; i < priv->vector_num; i++)
++ hns3_irq_disable(&priv->tqp_vector[i]);
++}
++
+ static int hns3_nic_net_up(struct net_device *netdev)
+ {
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+- int i, j;
+ int ret;
+
+ ret = hns3_nic_reset_all_ring(h);
+@@ -720,23 +745,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
+
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
+- /* enable the vectors */
+- for (i = 0; i < priv->vector_num; i++)
+- hns3_vector_enable(&priv->tqp_vector[i]);
+-
+- /* enable rcb */
+- for (j = 0; j < h->kinfo.num_tqps; j++)
+- hns3_tqp_enable(h->kinfo.tqp[j]);
++ hns3_enable_irqs_and_tqps(netdev);
+
+ /* start the ae_dev */
+ ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
+ if (ret) {
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+- while (j--)
+- hns3_tqp_disable(h->kinfo.tqp[j]);
+-
+- for (j = i - 1; j >= 0; j--)
+- hns3_vector_disable(&priv->tqp_vector[j]);
++ hns3_disable_irqs_and_tqps(netdev);
+ }
+
+ return ret;
+@@ -823,17 +838,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
+ static void hns3_nic_net_down(struct net_device *netdev)
+ {
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+- struct hnae3_handle *h = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops;
+- int i;
+
+- /* disable vectors */
+- for (i = 0; i < priv->vector_num; i++)
+- hns3_vector_disable(&priv->tqp_vector[i]);
+-
+- /* disable rcb */
+- for (i = 0; i < h->kinfo.num_tqps; i++)
+- hns3_tqp_disable(h->kinfo.tqp[i]);
++ hns3_disable_irqs_and_tqps(netdev);
+
+ /* stop ae_dev */
+ ops = priv->ae_handle->ae_algo->ops;
+@@ -5645,8 +5652,6 @@ int hns3_set_channels(struct net_device *netdev,
+ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+ {
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+- struct hnae3_handle *h = priv->ae_handle;
+- int i;
+
+ if (!if_running)
+ return;
+@@ -5654,11 +5659,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+ netif_carrier_off(ndev);
+ netif_tx_disable(ndev);
+
+- for (i = 0; i < priv->vector_num; i++)
+- hns3_vector_disable(&priv->tqp_vector[i]);
+-
+- for (i = 0; i < h->kinfo.num_tqps; i++)
+- hns3_tqp_disable(h->kinfo.tqp[i]);
++ hns3_disable_irqs_and_tqps(ndev);
+
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+@@ -5674,18 +5675,13 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
+ {
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+- int i;
+
+ if (!if_running)
+ return;
+
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+- for (i = 0; i < priv->vector_num; i++)
+- hns3_vector_enable(&priv->tqp_vector[i]);
+-
+- for (i = 0; i < h->kinfo.num_tqps; i++)
+- hns3_tqp_enable(h->kinfo.tqp[i]);
++ hns3_enable_irqs_and_tqps(ndev);
+
+ netif_tx_wake_all_queues(ndev);
+
+--
+2.39.5
+
--- /dev/null
+From 3a55534416df8e5d649dc914d1ec36a5d9b940f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 17:30:51 +0800
+Subject: net: hns3: fixed debugfs tm_qset size
+
+From: Hao Lan <lanhao@huawei.com>
+
+[ Upstream commit e317aebeefcb3b0c71f2305af3c22871ca6b3833 ]
+
+The size of the tm_qset file of debugfs is limited to 64 KB,
+which is too small in the scenario with 1280 qsets.
+The size needs to be expanded to 1 MB.
+
+Fixes: 5e69ea7ee2a6 ("net: hns3: refactor the debugfs process")
+Signed-off-by: Hao Lan <lanhao@huawei.com>
+Signed-off-by: Peiyang Wang <wangpeiyang1@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Link: https://patch.msgid.link/20250430093052.2400464-4-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index bd801e35d51ea..d6fe09ca03d27 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -60,7 +60,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
+ .name = "tm_qset",
+ .cmd = HNAE3_DBG_CMD_TM_QSET,
+ .dentry = HNS3_DBG_DENTRY_TM,
+- .buf_len = HNS3_DBG_READ_LEN,
++ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+--
+2.39.5
+
--- /dev/null
+From a7ea783eba0602c5f7209ec33680bb39c4c44d84 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 17:30:49 +0800
+Subject: net: hns3: store rx VLAN tag offload state for VF
+
+From: Jian Shen <shenjian15@huawei.com>
+
+[ Upstream commit ef2383d078edcbe3055032436b16cdf206f26de2 ]
+
+The VF driver missed to store the rx VLAN tag strip state when
+user change the rx VLAN tag offload state. And it will default
+to enable the rx vlan tag strip when re-init VF device after
+reset. So if user disable rx VLAN tag offload, and trig reset,
+then the HW will still strip the VLAN tag from packet nad fill
+into RX BD, but the VF driver will ignore it for rx VLAN tag
+offload disabled. It may cause the rx VLAN tag dropped.
+
+Fixes: b2641e2ad456 ("net: hns3: Add support of hardware rx-vlan-offload to HNS3 VF driver")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250430093052.2400464-2-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../hisilicon/hns3/hns3vf/hclgevf_main.c | 25 ++++++++++++++-----
+ .../hisilicon/hns3/hns3vf/hclgevf_main.h | 1 +
+ 2 files changed, 20 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 7bb01eafba745..628d5c5ad75de 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1761,9 +1761,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
+ rtnl_unlock();
+ }
+
+-static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
++static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
+ {
+- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+@@ -1772,6 +1771,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ }
+
++static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
++{
++ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
++ int ret;
++
++ ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
++ if (ret)
++ return ret;
++
++ hdev->rxvtag_strip_en = enable;
++ return 0;
++}
++
+ static int hclgevf_reset_tqp(struct hnae3_handle *handle)
+ {
+ #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
+@@ -2684,12 +2696,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+ return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size);
+ }
+
+-static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
++static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
++ bool rxvtag_strip_en)
+ {
+ struct hnae3_handle *nic = &hdev->nic;
+ int ret;
+
+- ret = hclgevf_en_hw_strip_rxvtag(nic, true);
++ ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to enable rx vlan offload, ret = %d\n", ret);
+@@ -3359,7 +3372,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
+ if (ret)
+ return ret;
+
+- ret = hclgevf_init_vlan_config(hdev);
++ ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+@@ -3472,7 +3485,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ goto err_config;
+ }
+
+- ret = hclgevf_init_vlan_config(hdev);
++ ret = hclgevf_init_vlan_config(hdev, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+index 2b216ac96914c..a6468fe2ec326 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -315,6 +315,7 @@ struct hclgevf_dev {
+ int *vector_irq;
+
+ bool gro_en;
++ bool rxvtag_strip_en;
+
+ unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
+
+--
+2.39.5
+
--- /dev/null
+From c06fd790da1954d9a094e5ba134ceed8e7aaaa91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 26 Apr 2025 17:32:09 +0200
+Subject: net: ipv6: fix UDPv6 GSO segmentation with NAT
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit b936a9b8d4a585ccb6d454921c36286bfe63e01d ]
+
+If any address or port is changed, update it in all packets and recalculate
+checksum.
+
+Fixes: 9fd1ff5d2ac7 ("udp: Support UDP fraglist GRO/GSO.")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20250426153210.14044-1-nbd@nbd.name
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/udp_offload.c | 61 +++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 60 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 1a57dd8aa513b..612da8ec1081c 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -245,6 +245,62 @@ static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs)
+ return segs;
+ }
+
++static void __udpv6_gso_segment_csum(struct sk_buff *seg,
++ struct in6_addr *oldip,
++ const struct in6_addr *newip,
++ __be16 *oldport, __be16 newport)
++{
++ struct udphdr *uh = udp_hdr(seg);
++
++ if (ipv6_addr_equal(oldip, newip) && *oldport == newport)
++ return;
++
++ if (uh->check) {
++ inet_proto_csum_replace16(&uh->check, seg, oldip->s6_addr32,
++ newip->s6_addr32, true);
++
++ inet_proto_csum_replace2(&uh->check, seg, *oldport, newport,
++ false);
++ if (!uh->check)
++ uh->check = CSUM_MANGLED_0;
++ }
++
++ *oldip = *newip;
++ *oldport = newport;
++}
++
++static struct sk_buff *__udpv6_gso_segment_list_csum(struct sk_buff *segs)
++{
++ const struct ipv6hdr *iph;
++ const struct udphdr *uh;
++ struct ipv6hdr *iph2;
++ struct sk_buff *seg;
++ struct udphdr *uh2;
++
++ seg = segs;
++ uh = udp_hdr(seg);
++ iph = ipv6_hdr(seg);
++ uh2 = udp_hdr(seg->next);
++ iph2 = ipv6_hdr(seg->next);
++
++ if (!(*(const u32 *)&uh->source ^ *(const u32 *)&uh2->source) &&
++ ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
++ ipv6_addr_equal(&iph->daddr, &iph2->daddr))
++ return segs;
++
++ while ((seg = seg->next)) {
++ uh2 = udp_hdr(seg);
++ iph2 = ipv6_hdr(seg);
++
++ __udpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr,
++ &uh2->source, uh->source);
++ __udpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr,
++ &uh2->dest, uh->dest);
++ }
++
++ return segs;
++}
++
+ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
+ netdev_features_t features,
+ bool is_ipv6)
+@@ -257,7 +313,10 @@ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
+
+ udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
+
+- return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb);
++ if (is_ipv6)
++ return __udpv6_gso_segment_list_csum(skb);
++ else
++ return __udpv4_gso_segment_list_csum(skb);
+ }
+
+ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+--
+2.39.5
+
--- /dev/null
+From 3f489f4587d08ace26f1b78c8b3cbd4530d9c555 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 10:55:27 +0530
+Subject: net: lan743x: Fix memleak issue when GSO enabled
+
+From: Thangaraj Samynathan <thangaraj.s@microchip.com>
+
+[ Upstream commit 2d52e2e38b85c8b7bc00dca55c2499f46f8c8198 ]
+
+Always map the `skb` to the LS descriptor. Previously skb was
+mapped to EXT descriptor when the number of fragments is zero with
+GSO enabled. Mapping the skb to EXT descriptor prevents it from
+being freed, leading to a memory leak
+
+Fixes: 23f0703c125b ("lan743x: Add main source files for new lan743x driver")
+Signed-off-by: Thangaraj Samynathan <thangaraj.s@microchip.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20250429052527.10031-1-thangaraj.s@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/microchip/lan743x_main.c | 8 ++++++--
+ drivers/net/ethernet/microchip/lan743x_main.h | 1 +
+ 2 files changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index a3392c74372a8..fe919c1974505 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -1448,6 +1448,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
+ if (nr_frags <= 0) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++ tx->frame_last = tx->frame_first;
+ }
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+@@ -1517,6 +1518,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
+ tx->frame_first = 0;
+ tx->frame_data0 = 0;
+ tx->frame_tail = 0;
++ tx->frame_last = 0;
+ return -ENOMEM;
+ }
+
+@@ -1557,16 +1559,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
+ TX_DESC_DATA0_DTYPE_DATA_) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++ tx->frame_last = tx->frame_tail;
+ }
+
+- tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+- buffer_info = &tx->buffer_info[tx->frame_tail];
++ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
++ buffer_info = &tx->buffer_info[tx->frame_last];
+ buffer_info->skb = skb;
+ if (time_stamp)
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
+ if (ignore_sync)
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
+
++ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+ tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
+ tx->last_tail = tx->frame_tail;
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
+index 6080028c1df2c..a1226ab0fb421 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.h
++++ b/drivers/net/ethernet/microchip/lan743x_main.h
+@@ -658,6 +658,7 @@ struct lan743x_tx {
+ u32 frame_first;
+ u32 frame_data0;
+ u32 frame_tail;
++ u32 frame_last;
+
+ struct lan743x_tx_buffer_info *buffer_info;
+
+--
+2.39.5
+
--- /dev/null
+From 93b6eff410237810839e25efea2c027aa6143654 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Apr 2025 11:36:11 +0300
+Subject: net/mlx5: E-switch, Fix error handling for enabling roce
+
+From: Chris Mi <cmi@nvidia.com>
+
+[ Upstream commit 90538d23278a981e344d364e923162fce752afeb ]
+
+The cited commit assumes enabling roce always succeeds. But it is
+not true. Add error handling for it.
+
+Fixes: 80f09dfc237f ("net/mlx5: Eswitch, enable RoCE loopback traffic")
+Signed-off-by: Chris Mi <cmi@nvidia.com>
+Reviewed-by: Roi Dayan <roid@nvidia.com>
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Link: https://patch.msgid.link/20250423083611.324567-6-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 5 ++++-
+ drivers/net/ethernet/mellanox/mlx5/core/rdma.c | 9 +++++----
+ drivers/net/ethernet/mellanox/mlx5/core/rdma.h | 4 ++--
+ 3 files changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 829f703233a9e..766a05f557fba 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -3138,7 +3138,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
+ int err;
+
+ mutex_init(&esw->offloads.termtbl_mutex);
+- mlx5_rdma_enable_roce(esw->dev);
++ err = mlx5_rdma_enable_roce(esw->dev);
++ if (err)
++ goto err_roce;
+
+ err = mlx5_esw_host_number_init(esw);
+ if (err)
+@@ -3198,6 +3200,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
+ esw_offloads_metadata_uninit(esw);
+ err_metadata:
+ mlx5_rdma_disable_roce(esw->dev);
++err_roce:
+ mutex_destroy(&esw->offloads.termtbl_mutex);
+ return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+index ab5afa6c5e0fd..e61a4fa46d772 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+@@ -152,17 +152,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
+ mlx5_nic_vport_disable_roce(dev);
+ }
+
+-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
++int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+ {
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, roce))
+- return;
++ return 0;
+
+ err = mlx5_nic_vport_enable_roce(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
+- return;
++ return err;
+ }
+
+ err = mlx5_rdma_add_roce_addr(dev);
+@@ -177,10 +177,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+ goto del_roce_addr;
+ }
+
+- return;
++ return err;
+
+ del_roce_addr:
+ mlx5_rdma_del_roce_addr(dev);
+ disable_roce:
+ mlx5_nic_vport_disable_roce(dev);
++ return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
+index 750cff2a71a4b..3d9e76c3d42fb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
+@@ -8,12 +8,12 @@
+
+ #ifdef CONFIG_MLX5_ESWITCH
+
+-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
++int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
+ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
+
+ #else /* CONFIG_MLX5_ESWITCH */
+
+-static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
++static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
+ static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
+
+ #endif /* CONFIG_MLX5_ESWITCH */
+--
+2.39.5
+
--- /dev/null
+From 1dafb1c70e9dbcf6fa49c1a12773687ccbe669fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Apr 2025 11:36:08 +0300
+Subject: net/mlx5: E-Switch, Initialize MAC Address for Default GID
+
+From: Maor Gottlieb <maorg@nvidia.com>
+
+[ Upstream commit 5d1a04f347e6cbf5ffe74da409a5d71fbe8c5f19 ]
+
+Initialize the source MAC address when creating the default GID entry.
+Since this entry is used only for loopback traffic, it only needs to
+be a unicast address. A zeroed-out MAC address is sufficient for this
+purpose.
+Without this fix, random bits would be assigned as the source address.
+If these bits formed a multicast address, the firmware would return an
+error, preventing the user from switching to switchdev mode:
+
+Error: mlx5_core: Failed setting eswitch to offloads.
+kernel answers: Invalid argument
+
+Fixes: 80f09dfc237f ("net/mlx5: Eswitch, enable RoCE loopback traffic")
+Signed-off-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Link: https://patch.msgid.link/20250423083611.324567-3-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/rdma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+index 540cf05f63739..ab5afa6c5e0fd 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+@@ -130,8 +130,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
+
+ static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
+ {
++ u8 mac[ETH_ALEN] = {};
+ union ib_gid gid;
+- u8 mac[ETH_ALEN];
+
+ mlx5_rdma_make_default_gid(dev, &gid);
+ return mlx5_core_roce_gid_set(dev, 0,
+--
+2.39.5
+
--- /dev/null
+From 3d59b03c5680a5726cce2f994b565dee9d70f9c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 19:07:05 -0300
+Subject: net_sched: drr: Fix double list add in class with netem as child
+ qdisc
+
+From: Victor Nogueira <victor@mojatatu.com>
+
+[ Upstream commit f99a3fbf023e20b626be4b0f042463d598050c9a ]
+
+As described in Gerrard's report [1], there are use cases where a netem
+child qdisc will make the parent qdisc's enqueue callback reentrant.
+In the case of drr, there won't be a UAF, but the code will add the same
+classifier to the list twice, which will cause memory corruption.
+
+In addition to checking for qlen being zero, this patch checks whether the
+class was already added to the active_list (cl_is_active) before adding
+to the list to cover for the reentrant case.
+
+[1] https://lore.kernel.org/netdev/CAHcdcOm+03OD2j6R0=YHKqmy=VgJ8xEOKuP6c7mSgnp-TEJJbw@mail.gmail.com/
+
+Fixes: 37d9cf1a3ce3 ("sched: Fix detection of empty queues in child qdiscs")
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Link: https://patch.msgid.link/20250425220710.3964791-2-victor@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_drr.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
+index 80a88e208d2bc..e33a72c356c87 100644
+--- a/net/sched/sch_drr.c
++++ b/net/sched/sch_drr.c
+@@ -36,6 +36,11 @@ struct drr_sched {
+ struct Qdisc_class_hash clhash;
+ };
+
++static bool cl_is_active(struct drr_class *cl)
++{
++ return !list_empty(&cl->alist);
++}
++
+ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
+ {
+ struct drr_sched *q = qdisc_priv(sch);
+@@ -345,7 +350,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct drr_sched *q = qdisc_priv(sch);
+ struct drr_class *cl;
+ int err = 0;
+- bool first;
+
+ cl = drr_classify(skb, sch, &err);
+ if (cl == NULL) {
+@@ -355,7 +359,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ return err;
+ }
+
+- first = !cl->qdisc->q.qlen;
+ err = qdisc_enqueue(skb, cl->qdisc, to_free);
+ if (unlikely(err != NET_XMIT_SUCCESS)) {
+ if (net_xmit_drop_count(err)) {
+@@ -365,7 +368,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ return err;
+ }
+
+- if (first) {
++ if (!cl_is_active(cl)) {
+ list_add_tail(&cl->alist, &q->active);
+ cl->deficit = cl->quantum;
+ }
+--
+2.39.5
+
--- /dev/null
+From cf2c93bee815d309ec82d32ef2f6b48a54dfd824 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 19:07:07 -0300
+Subject: net_sched: ets: Fix double list add in class with netem as child
+ qdisc
+
+From: Victor Nogueira <victor@mojatatu.com>
+
+[ Upstream commit 1a6d0c00fa07972384b0c308c72db091d49988b6 ]
+
+As described in Gerrard's report [1], there are use cases where a netem
+child qdisc will make the parent qdisc's enqueue callback reentrant.
+In the case of ets, there won't be a UAF, but the code will add the same
+classifier to the list twice, which will cause memory corruption.
+
+In addition to checking for qlen being zero, this patch checks whether
+the class was already added to the active_list (cl_is_active) before
+doing the addition to cater for the reentrant case.
+
+[1] https://lore.kernel.org/netdev/CAHcdcOm+03OD2j6R0=YHKqmy=VgJ8xEOKuP6c7mSgnp-TEJJbw@mail.gmail.com/
+
+Fixes: 37d9cf1a3ce3 ("sched: Fix detection of empty queues in child qdiscs")
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Link: https://patch.msgid.link/20250425220710.3964791-4-victor@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_ets.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
+index d686ea7e8db49..07fae45f58732 100644
+--- a/net/sched/sch_ets.c
++++ b/net/sched/sch_ets.c
+@@ -74,6 +74,11 @@ static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = {
+ [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
+ };
+
++static bool cl_is_active(struct ets_class *cl)
++{
++ return !list_empty(&cl->alist);
++}
++
+ static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr,
+ unsigned int *quantum,
+ struct netlink_ext_ack *extack)
+@@ -424,7 +429,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct ets_sched *q = qdisc_priv(sch);
+ struct ets_class *cl;
+ int err = 0;
+- bool first;
+
+ cl = ets_classify(skb, sch, &err);
+ if (!cl) {
+@@ -434,7 +438,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ return err;
+ }
+
+- first = !cl->qdisc->q.qlen;
+ err = qdisc_enqueue(skb, cl->qdisc, to_free);
+ if (unlikely(err != NET_XMIT_SUCCESS)) {
+ if (net_xmit_drop_count(err)) {
+@@ -444,7 +447,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ return err;
+ }
+
+- if (first && !ets_class_is_strict(q, cl)) {
++ if (!cl_is_active(cl) && !ets_class_is_strict(q, cl)) {
+ list_add_tail(&cl->alist, &q->active);
+ cl->deficit = cl->quantum;
+ }
+--
+2.39.5
+
--- /dev/null
+From 7b757921920fc9cf1484454b6aff4f13a3cf4e5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 19:07:06 -0300
+Subject: net_sched: hfsc: Fix a UAF vulnerability in class with netem as child
+ qdisc
+
+From: Victor Nogueira <victor@mojatatu.com>
+
+[ Upstream commit 141d34391abbb315d68556b7c67ad97885407547 ]
+
+As described in Gerrard's report [1], we have a UAF case when an hfsc class
+has a netem child qdisc. The crux of the issue is that hfsc is assuming
+that checking for cl->qdisc->q.qlen == 0 guarantees that it hasn't inserted
+the class in the vttree or eltree (which is not true for the netem
+duplicate case).
+
+This patch checks the n_active class variable to make sure that the code
+won't insert the class in the vttree or eltree twice, catering for the
+reentrant case.
+
+[1] https://lore.kernel.org/netdev/CAHcdcOm+03OD2j6R0=YHKqmy=VgJ8xEOKuP6c7mSgnp-TEJJbw@mail.gmail.com/
+
+Fixes: 37d9cf1a3ce3 ("sched: Fix detection of empty queues in child qdiscs")
+Reported-by: Gerrard Tai <gerrard.tai@starlabs.sg>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Link: https://patch.msgid.link/20250425220710.3964791-3-victor@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_hfsc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index 85c296664c9ab..d6c5fc543f652 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -1572,7 +1572,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
+ return err;
+ }
+
+- if (first) {
++ if (first && !cl->cl_nactive) {
+ if (cl->cl_flags & HFSC_RSC)
+ init_ed(cl, len);
+ if (cl->cl_flags & HFSC_FSC)
+--
+2.39.5
+
--- /dev/null
+From 00ddf4b5e653c9529dac25e80691ae5e610b7902 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 19:07:08 -0300
+Subject: net_sched: qfq: Fix double list add in class with netem as child
+ qdisc
+
+From: Victor Nogueira <victor@mojatatu.com>
+
+[ Upstream commit f139f37dcdf34b67f5bf92bc8e0f7f6b3ac63aa4 ]
+
+As described in Gerrard's report [1], there are use cases where a netem
+child qdisc will make the parent qdisc's enqueue callback reentrant.
+In the case of qfq, there won't be a UAF, but the code will add the same
+classifier to the list twice, which will cause memory corruption.
+
+This patch checks whether the class was already added to the agg->active
+list (cl_is_active) before doing the addition to cater for the reentrant
+case.
+
+[1] https://lore.kernel.org/netdev/CAHcdcOm+03OD2j6R0=YHKqmy=VgJ8xEOKuP6c7mSgnp-TEJJbw@mail.gmail.com/
+
+Fixes: 37d9cf1a3ce3 ("sched: Fix detection of empty queues in child qdiscs")
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Link: https://patch.msgid.link/20250425220710.3964791-5-victor@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_qfq.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index b1dbe03dde1b5..a198145f1251f 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -204,6 +204,11 @@ struct qfq_sched {
+ */
+ enum update_reason {enqueue, requeue};
+
++static bool cl_is_active(struct qfq_class *cl)
++{
++ return !list_empty(&cl->alist);
++}
++
+ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
+ {
+ struct qfq_sched *q = qdisc_priv(sch);
+@@ -1223,7 +1228,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct qfq_class *cl;
+ struct qfq_aggregate *agg;
+ int err = 0;
+- bool first;
+
+ cl = qfq_classify(skb, sch, &err);
+ if (cl == NULL) {
+@@ -1245,7 +1249,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ }
+
+ gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+- first = !cl->qdisc->q.qlen;
+ err = qdisc_enqueue(skb, cl->qdisc, to_free);
+ if (unlikely(err != NET_XMIT_SUCCESS)) {
+ pr_debug("qfq_enqueue: enqueue failed %d\n", err);
+@@ -1262,8 +1265,8 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ ++sch->q.qlen;
+
+ agg = cl->agg;
+- /* if the queue was not empty, then done here */
+- if (!first) {
++ /* if the class is active, then done here */
++ if (cl_is_active(cl)) {
+ if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
+ list_first_entry(&agg->active, struct qfq_class, alist)
+ == cl && cl->deficit < len)
+--
+2.39.5
+
--- /dev/null
+From af8f736d84ee7f3717e6576a6a6d40af72520156 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 10:42:01 -0600
+Subject: nvme-tcp: fix premature queue removal and I/O failover
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michael Liang <mliang@purestorage.com>
+
+[ Upstream commit 77e40bbce93059658aee02786a32c5c98a240a8a ]
+
+This patch addresses a data corruption issue observed in nvme-tcp during
+testing.
+
+In an NVMe native multipath setup, when an I/O timeout occurs, all
+inflight I/Os are canceled almost immediately after the kernel socket is
+shut down. These canceled I/Os are reported as host path errors,
+triggering a failover that succeeds on a different path.
+
+However, at this point, the original I/O may still be outstanding in the
+host's network transmission path (e.g., the NIC’s TX queue). From the
+user-space app's perspective, the buffer associated with the I/O is
+considered completed since they're acked on the different path and may
+be reused for new I/O requests.
+
+Because nvme-tcp enables zero-copy by default in the transmission path,
+this can lead to corrupted data being sent to the original target,
+ultimately causing data corruption.
+
+We can reproduce this data corruption by injecting delay on one path and
+triggering i/o timeout.
+
+To prevent this issue, this change ensures that all inflight
+transmissions are fully completed from host's perspective before
+returning from queue stop. To handle concurrent I/O timeout from multiple
+namespaces under the same controller, always wait in queue stop
+regardless of queue's state.
+
+This aligns with the behavior of queue stopping in other NVMe fabric
+transports.
+
+Fixes: 3f2304f8c6d6 ("nvme-tcp: add NVMe over TCP host driver")
+Signed-off-by: Michael Liang <mliang@purestorage.com>
+Reviewed-by: Mohamed Khalfella <mkhalfella@purestorage.com>
+Reviewed-by: Randy Jennings <randyj@purestorage.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/tcp.c | 31 +++++++++++++++++++++++++++++--
+ 1 file changed, 29 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 0fc5aba88bc15..99bf17f2dcfca 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1602,7 +1602,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
+ cancel_work_sync(&queue->io_work);
+ }
+
+-static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
++static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid)
+ {
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+@@ -1613,6 +1613,31 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+ mutex_unlock(&queue->queue_lock);
+ }
+
++static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid)
++{
++ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
++ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
++ int timeout = 100;
++
++ while (timeout > 0) {
++ if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) ||
++ !sk_wmem_alloc_get(queue->sock->sk))
++ return;
++ msleep(2);
++ timeout -= 2;
++ }
++ dev_warn(nctrl->device,
++ "qid %d: timeout draining sock wmem allocation expired\n",
++ qid);
++}
++
++static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
++{
++ nvme_tcp_stop_queue_nowait(nctrl, qid);
++ nvme_tcp_wait_queue(nctrl, qid);
++}
++
++
+ static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
+ {
+ write_lock_bh(&queue->sock->sk->sk_callback_lock);
+@@ -1720,7 +1745,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+- nvme_tcp_stop_queue(ctrl, i);
++ nvme_tcp_stop_queue_nowait(ctrl, i);
++ for (i = 1; i < ctrl->queue_count; i++)
++ nvme_tcp_wait_queue(ctrl, i);
+ }
+
+ static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
+--
+2.39.5
+
tracing-fix-oob-write-in-trace_seq_to_buffer.patch
kvm-x86-load-dr6-with-guest-value-only-before-entering-.vcpu_run-loop.patch
net-sched-act_mirred-don-t-override-retval-if-we-already-lost-the-skb.patch
+net-mlx5-e-switch-initialize-mac-address-for-default.patch
+net-mlx5-e-switch-fix-error-handling-for-enabling-ro.patch
+bluetooth-l2cap-copy-rx-timestamp-to-new-fragments.patch
+net-ethernet-mtk-star-emac-separate-tx-rx-handling-w.patch
+net-ethernet-mtk-star-emac-fix-spinlock-recursion-is.patch
+net-ethernet-mtk-star-emac-rearm-interrupts-in-rx_po.patch
+net_sched-drr-fix-double-list-add-in-class-with-nete.patch
+net_sched-hfsc-fix-a-uaf-vulnerability-in-class-with.patch
+net_sched-ets-fix-double-list-add-in-class-with-nete.patch
+net_sched-qfq-fix-double-list-add-in-class-with-nete.patch
+ice-refactor-promiscuous-functions.patch
+ice-check-vf-vsi-pointer-value-in-ice_vc_add_fdir_fl.patch
+net-dlink-correct-endianness-handling-of-led_mode.patch
+net-ipv6-fix-udpv6-gso-segmentation-with-nat.patch
+bnxt_en-fix-coredump-logic-to-free-allocated-buffer.patch
+bnxt_en-fix-out-of-bound-memcpy-during-ethtool-w.patch
+bnxt_en-fix-ethtool-d-byte-order-for-32-bit-values.patch
+nvme-tcp-fix-premature-queue-removal-and-i-o-failove.patch
+net-lan743x-fix-memleak-issue-when-gso-enabled.patch
+net-fec-err007885-workaround-for-conventional-tx.patch
+net-hns3-store-rx-vlan-tag-offload-state-for-vf.patch
+net-hns3-add-support-for-external-loopback-test.patch
+net-hns3-fix-an-interrupt-residual-problem.patch
+net-hns3-fixed-debugfs-tm_qset-size.patch
+net-hns3-defer-calling-ptp_clock_register.patch