]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.12
authorSasha Levin <sashal@kernel.org>
Fri, 10 Jan 2025 14:26:04 +0000 (09:26 -0500)
committerSasha Levin <sashal@kernel.org>
Fri, 10 Jan 2025 14:26:04 +0000 (09:26 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
37 files changed:
queue-6.12/bluetooth-btmtk-fix-failed-to-send-func-ctrl-for-med.patch [new file with mode: 0644]
queue-6.12/bluetooth-btnxpuart-fix-driver-sending-truncated-dat.patch [new file with mode: 0644]
queue-6.12/bluetooth-hci_sync-fix-not-setting-random-address-wh.patch [new file with mode: 0644]
queue-6.12/bluetooth-mgmt-fix-add-device-to-responding-before-c.patch [new file with mode: 0644]
queue-6.12/bnxt_en-fix-dim-shutdown.patch [new file with mode: 0644]
queue-6.12/bnxt_en-fix-possible-memory-leak-when-hwrm_req_repla.patch [new file with mode: 0644]
queue-6.12/btrfs-avoid-null-pointer-dereference-if-no-valid-ext.patch [new file with mode: 0644]
queue-6.12/cxgb4-avoid-removal-of-uninserted-tid.patch [new file with mode: 0644]
queue-6.12/eth-gve-use-appropriate-helper-to-set-xdp_features.patch [new file with mode: 0644]
queue-6.12/ice-fix-incorrect-phy-settings-for-100-gb-s.patch [new file with mode: 0644]
queue-6.12/ice-fix-max-values-for-dpll-pin-phase-adjust.patch [new file with mode: 0644]
queue-6.12/ieee802154-ca8210-add-missing-check-for-kfifo_alloc-.patch [new file with mode: 0644]
queue-6.12/igc-return-early-when-failing-to-read-eecd-register.patch [new file with mode: 0644]
queue-6.12/ipvlan-fix-use-after-free-in-ipvlan_get_iflink.patch [new file with mode: 0644]
queue-6.12/mctp-i3c-fix-mctp-i3c-driver-multi-thread-issue.patch [new file with mode: 0644]
queue-6.12/net-802-llc-snap-oid-pid-lookup-on-start-of-skb-data.patch [new file with mode: 0644]
queue-6.12/net-don-t-dump-tx-and-uninitialized-napis.patch [new file with mode: 0644]
queue-6.12/net-hns3-don-t-auto-enable-misc-vector.patch [new file with mode: 0644]
queue-6.12/net-hns3-fix-kernel-crash-when-1588-is-sent-on-hip08.patch [new file with mode: 0644]
queue-6.12/net-hns3-fix-missing-features-due-to-dev-features-co.patch [new file with mode: 0644]
queue-6.12/net-hns3-fixed-hclge_fetch_pf_reg-accesses-bar-space.patch [new file with mode: 0644]
queue-6.12/net-hns3-fixed-reset-failure-issues-caused-by-the-in.patch [new file with mode: 0644]
queue-6.12/net-hns3-initialize-reset_timer-before-hclgevf_misc_.patch [new file with mode: 0644]
queue-6.12/net-hns3-resolved-the-issue-that-the-debugfs-query-r.patch [new file with mode: 0644]
queue-6.12/net-libwx-fix-firmware-mailbox-abnormal-return.patch [new file with mode: 0644]
queue-6.12/net-mlx5-fix-variable-not-being-completed-when-funct.patch [new file with mode: 0644]
queue-6.12/net-stmmac-dwmac-tegra-read-iommu-stream-id-from-dev.patch [new file with mode: 0644]
queue-6.12/net_sched-cls_flow-validate-tca_flow_rshift-attribut.patch [new file with mode: 0644]
queue-6.12/netfilter-conntrack-clamp-maximum-hashtable-size-to-.patch [new file with mode: 0644]
queue-6.12/netfilter-nf_tables-imbalance-in-flowtable-binding.patch [new file with mode: 0644]
queue-6.12/pds_core-limit-loop-over-fw-name-list.patch [new file with mode: 0644]
queue-6.12/rtase-fix-a-check-for-error-in-rtase_alloc_msix.patch [new file with mode: 0644]
queue-6.12/sched-sch_cake-add-bounds-checks-to-host-bulk-flow-f.patch [new file with mode: 0644]
queue-6.12/series
queue-6.12/tcp-annotate-data-race-around-sk-sk_mark-in-tcp_v4_s.patch [new file with mode: 0644]
queue-6.12/tcp-dccp-allow-a-connection-when-sk_max_ack_backlog-.patch [new file with mode: 0644]
queue-6.12/tls-fix-tls_sw_sendmsg-error-handling.patch [new file with mode: 0644]

diff --git a/queue-6.12/bluetooth-btmtk-fix-failed-to-send-func-ctrl-for-med.patch b/queue-6.12/bluetooth-btmtk-fix-failed-to-send-func-ctrl-for-med.patch
new file mode 100644 (file)
index 0000000..460cdfd
--- /dev/null
@@ -0,0 +1,83 @@
+From 917c6bad424d8e453e9573915f7ef2c1ba13c8ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jan 2025 17:50:28 +0800
+Subject: Bluetooth: btmtk: Fix failed to send func ctrl for MediaTek devices.
+
+From: Chris Lu <chris.lu@mediatek.com>
+
+[ Upstream commit 67dba2c28fe0af7e25ea1aeade677162ed05310a ]
+
+Use usb_autopm_get_interface() and usb_autopm_put_interface()
+in btmtk_usb_shutdown(), it could send func ctrl after enabling
+autosuspend.
+
+Bluetooth: btmtk_usb_hci_wmt_sync() hci0: Execution of wmt command
+           timed out
+Bluetooth: btmtk_usb_shutdown() hci0: Failed to send wmt func ctrl
+           (-110)
+
+Fixes: 5c5e8c52e3ca ("Bluetooth: btmtk: move btusb_mtk_[setup, shutdown] to btmtk.c")
+Signed-off-by: Chris Lu <chris.lu@mediatek.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bluetooth/btmtk.c  | 7 +++++++
+ net/bluetooth/rfcomm/tty.c | 4 ++--
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
+index 85e99641eaae..af487abe9932 100644
+--- a/drivers/bluetooth/btmtk.c
++++ b/drivers/bluetooth/btmtk.c
+@@ -1472,10 +1472,15 @@ EXPORT_SYMBOL_GPL(btmtk_usb_setup);
+ int btmtk_usb_shutdown(struct hci_dev *hdev)
+ {
++      struct btmtk_data *data = hci_get_priv(hdev);
+       struct btmtk_hci_wmt_params wmt_params;
+       u8 param = 0;
+       int err;
++      err = usb_autopm_get_interface(data->intf);
++      if (err < 0)
++              return err;
++
+       /* Disable the device */
+       wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+       wmt_params.flag = 0;
+@@ -1486,9 +1491,11 @@ int btmtk_usb_shutdown(struct hci_dev *hdev)
+       err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
+       if (err < 0) {
+               bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
++              usb_autopm_put_interface(data->intf);
+               return err;
+       }
++      usb_autopm_put_interface(data->intf);
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
+diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
+index af80d599c337..21a5b5535ebc 100644
+--- a/net/bluetooth/rfcomm/tty.c
++++ b/net/bluetooth/rfcomm/tty.c
+@@ -201,14 +201,14 @@ static ssize_t address_show(struct device *tty_dev,
+                           struct device_attribute *attr, char *buf)
+ {
+       struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
+-      return sprintf(buf, "%pMR\n", &dev->dst);
++      return sysfs_emit(buf, "%pMR\n", &dev->dst);
+ }
+ static ssize_t channel_show(struct device *tty_dev,
+                           struct device_attribute *attr, char *buf)
+ {
+       struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
+-      return sprintf(buf, "%d\n", dev->channel);
++      return sysfs_emit(buf, "%d\n", dev->channel);
+ }
+ static DEVICE_ATTR_RO(address);
+-- 
+2.39.5
+
diff --git a/queue-6.12/bluetooth-btnxpuart-fix-driver-sending-truncated-dat.patch b/queue-6.12/bluetooth-btnxpuart-fix-driver-sending-truncated-dat.patch
new file mode 100644 (file)
index 0000000..5e71910
--- /dev/null
@@ -0,0 +1,40 @@
+From 2be8178f5c2707fc2350aa1629bfff618c27aab1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Dec 2024 18:32:52 +0530
+Subject: Bluetooth: btnxpuart: Fix driver sending truncated data
+
+From: Neeraj Sanjay Kale <neeraj.sanjaykale@nxp.com>
+
+[ Upstream commit 8023dd2204254a70887f5ee58d914bf70a060b9d ]
+
+This fixes the apparent controller hang issue seen during stress test
+where the host sends a truncated payload, followed by HCI commands. The
+controller treats these HCI commands as a part of previously truncated
+payload, leading to command timeouts.
+
+Adding a serdev_device_wait_until_sent() call after
+serdev_device_write_buf() fixed the issue.
+
+Fixes: 689ca16e5232 ("Bluetooth: NXP: Add protocol support for NXP Bluetooth chipsets")
+Signed-off-by: Neeraj Sanjay Kale <neeraj.sanjaykale@nxp.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bluetooth/btnxpuart.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
+index 5ea0d23e88c0..a028984f2782 100644
+--- a/drivers/bluetooth/btnxpuart.c
++++ b/drivers/bluetooth/btnxpuart.c
+@@ -1336,6 +1336,7 @@ static void btnxpuart_tx_work(struct work_struct *work)
+       while ((skb = nxp_dequeue(nxpdev))) {
+               len = serdev_device_write_buf(serdev, skb->data, skb->len);
++              serdev_device_wait_until_sent(serdev, 0);
+               hdev->stat.byte_tx += len;
+               skb_pull(skb, len);
+-- 
+2.39.5
+
diff --git a/queue-6.12/bluetooth-hci_sync-fix-not-setting-random-address-wh.patch b/queue-6.12/bluetooth-hci_sync-fix-not-setting-random-address-wh.patch
new file mode 100644 (file)
index 0000000..28eb11c
--- /dev/null
@@ -0,0 +1,76 @@
+From 5fab62a218af4fb932bbf3a2e649854aa7c9467d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Nov 2024 15:42:09 -0500
+Subject: Bluetooth: hci_sync: Fix not setting Random Address when required
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit c2994b008492db033d40bd767be1620229a3035e ]
+
+This fixes errors such as the following when Own address type is set to
+Random Address but it has not been programmed yet due to either be
+advertising or connecting:
+
+< HCI Command: LE Set Exte.. (0x08|0x0041) plen 13
+        Own address type: Random (0x03)
+        Filter policy: Ignore not in accept list (0x01)
+        PHYs: 0x05
+        Entry 0: LE 1M
+          Type: Passive (0x00)
+          Interval: 60.000 msec (0x0060)
+          Window: 30.000 msec (0x0030)
+        Entry 1: LE Coded
+          Type: Passive (0x00)
+          Interval: 180.000 msec (0x0120)
+          Window: 90.000 msec (0x0090)
+> HCI Event: Command Complete (0x0e) plen 4
+      LE Set Extended Scan Parameters (0x08|0x0041) ncmd 1
+        Status: Success (0x00)
+< HCI Command: LE Set Exten.. (0x08|0x0042) plen 6
+        Extended scan: Enabled (0x01)
+        Filter duplicates: Enabled (0x01)
+        Duration: 0 msec (0x0000)
+        Period: 0.00 sec (0x0000)
+> HCI Event: Command Complete (0x0e) plen 4
+      LE Set Extended Scan Enable (0x08|0x0042) ncmd 1
+        Status: Invalid HCI Command Parameters (0x12)
+
+Fixes: c45074d68a9b ("Bluetooth: Fix not generating RPA when required")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_sync.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index c86f4e42e69c..7b2b04d6b856 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -1031,9 +1031,9 @@ static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
+ static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
+ {
+-      /* If we're advertising or initiating an LE connection we can't
+-       * go ahead and change the random address at this time. This is
+-       * because the eventual initiator address used for the
++      /* If a random_addr has been set we're advertising or initiating an LE
++       * connection we can't go ahead and change the random address at this
++       * time. This is because the eventual initiator address used for the
+        * subsequently created connection will be undefined (some
+        * controllers use the new address and others the one we had
+        * when the operation started).
+@@ -1041,8 +1041,9 @@ static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
+        * In this kind of scenario skip the update and let the random
+        * address be updated at the next cycle.
+        */
+-      if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
+-          hci_lookup_le_connect(hdev)) {
++      if (bacmp(&hdev->random_addr, BDADDR_ANY) &&
++          (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
++          hci_lookup_le_connect(hdev))) {
+               bt_dev_dbg(hdev, "Deferring random address update");
+               hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
+               return 0;
+-- 
+2.39.5
+
diff --git a/queue-6.12/bluetooth-mgmt-fix-add-device-to-responding-before-c.patch b/queue-6.12/bluetooth-mgmt-fix-add-device-to-responding-before-c.patch
new file mode 100644 (file)
index 0000000..b27dffa
--- /dev/null
@@ -0,0 +1,89 @@
+From e57e77864de5000132c8d1b4944ee6d433d8670c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Nov 2024 15:42:10 -0500
+Subject: Bluetooth: MGMT: Fix Add Device to responding before completing
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit a182d9c84f9c52fb5db895ecceeee8b3a1bf661e ]
+
+Add Device with LE type requires updating resolving/accept list which
+requires quite a number of commands to complete and each of them may
+fail, so instead of pretending it would always work this checks the
+return of hci_update_passive_scan_sync which indicates if everything
+worked as intended.
+
+Fixes: e8907f76544f ("Bluetooth: hci_sync: Make use of hci_cmd_sync_queue set 3")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/mgmt.c | 38 ++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 36 insertions(+), 2 deletions(-)
+
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 2343e15f8938..7dc315c1658e 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -7596,6 +7596,24 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
+       mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
+ }
++static void add_device_complete(struct hci_dev *hdev, void *data, int err)
++{
++      struct mgmt_pending_cmd *cmd = data;
++      struct mgmt_cp_add_device *cp = cmd->param;
++
++      if (!err) {
++              device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
++                           cp->action);
++              device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
++                                   cp->addr.type, hdev->conn_flags,
++                                   PTR_UINT(cmd->user_data));
++      }
++
++      mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
++                        mgmt_status(err), &cp->addr, sizeof(cp->addr));
++      mgmt_pending_free(cmd);
++}
++
+ static int add_device_sync(struct hci_dev *hdev, void *data)
+ {
+       return hci_update_passive_scan_sync(hdev);
+@@ -7604,6 +7622,7 @@ static int add_device_sync(struct hci_dev *hdev, void *data)
+ static int add_device(struct sock *sk, struct hci_dev *hdev,
+                     void *data, u16 len)
+ {
++      struct mgmt_pending_cmd *cmd;
+       struct mgmt_cp_add_device *cp = data;
+       u8 auto_conn, addr_type;
+       struct hci_conn_params *params;
+@@ -7684,9 +7703,24 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
+                       current_flags = params->flags;
+       }
+-      err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
+-      if (err < 0)
++      cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
++      if (!cmd) {
++              err = -ENOMEM;
+               goto unlock;
++      }
++
++      cmd->user_data = UINT_PTR(current_flags);
++
++      err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
++                               add_device_complete);
++      if (err < 0) {
++              err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
++                                      MGMT_STATUS_FAILED, &cp->addr,
++                                      sizeof(cp->addr));
++              mgmt_pending_free(cmd);
++      }
++
++      goto unlock;
+ added:
+       device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
+-- 
+2.39.5
+
diff --git a/queue-6.12/bnxt_en-fix-dim-shutdown.patch b/queue-6.12/bnxt_en-fix-dim-shutdown.patch
new file mode 100644 (file)
index 0000000..1bc66ba
--- /dev/null
@@ -0,0 +1,133 @@
+From e86b9ac1deb96cf08deacc1ffbf6418c1bb8cede Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Jan 2025 20:38:48 -0800
+Subject: bnxt_en: Fix DIM shutdown
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+[ Upstream commit 40452969a50652e3cbf89dac83d54eebf2206d27 ]
+
+DIM work will call the firmware to adjust the coalescing parameters on
+the RX rings.  We should cancel DIM work before we call the firmware
+to free the RX rings.  Otherwise, FW will reject the call from DIM
+work if the RX ring has been freed.  This will generate an error
+message like this:
+
+bnxt_en 0000:21:00.1 ens2f1np1: hwrm req_type 0x53 seq id 0x6fca error 0x2
+
+and cause unnecessary concern for the user.  It is also possible to
+modify the coalescing parameters of the wrong ring if the ring has
+been re-allocated.
+
+To prevent this, cancel DIM work right before freeing the RX rings.
+We also have to add a check in NAPI poll to not schedule DIM if the
+RX rings are shutting down.  Check that the VNIC is active before we
+schedule DIM.  The VNIC is always disabled before we free the RX rings.
+
+Fixes: 0bc0b97fca73 ("bnxt_en: cleanup DIM work on device shutdown")
+Reviewed-by: Hongguang Gao <hongguang.gao@broadcom.com>
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Link: https://patch.msgid.link/20250104043849.3482067-3-michael.chan@broadcom.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 38 ++++++++++++++++++++---
+ 1 file changed, 33 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index dafc5a4039cd..c255445e97f3 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2826,6 +2826,13 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
+       return 0;
+ }
++static bool bnxt_vnic_is_active(struct bnxt *bp)
++{
++      struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
++
++      return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
++}
++
+ static irqreturn_t bnxt_msix(int irq, void *dev_instance)
+ {
+       struct bnxt_napi *bnapi = dev_instance;
+@@ -3093,7 +3100,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
+                       break;
+               }
+       }
+-      if (bp->flags & BNXT_FLAG_DIM) {
++      if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
+               struct dim_sample dim_sample = {};
+               dim_update_sample(cpr->event_ctr,
+@@ -3224,7 +3231,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
+ poll_done:
+       cpr_rx = &cpr->cp_ring_arr[0];
+       if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
+-          (bp->flags & BNXT_FLAG_DIM)) {
++          (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
+               struct dim_sample dim_sample = {};
+               dim_update_sample(cpr->event_ctr,
+@@ -7116,6 +7123,26 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+       return rc;
+ }
++static void bnxt_cancel_dim(struct bnxt *bp)
++{
++      int i;
++
++      /* DIM work is initialized in bnxt_enable_napi().  Proceed only
++       * if NAPI is enabled.
++       */
++      if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
++              return;
++
++      /* Make sure NAPI sees that the VNIC is disabled */
++      synchronize_net();
++      for (i = 0; i < bp->rx_nr_rings; i++) {
++              struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
++              struct bnxt_napi *bnapi = rxr->bnapi;
++
++              cancel_work_sync(&bnapi->cp_ring.dim.work);
++      }
++}
++
+ static int hwrm_ring_free_send_msg(struct bnxt *bp,
+                                  struct bnxt_ring_struct *ring,
+                                  u32 ring_type, int cmpl_ring_id)
+@@ -7216,6 +7243,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+               }
+       }
++      bnxt_cancel_dim(bp);
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
+               bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
+@@ -11012,8 +11040,6 @@ static void bnxt_disable_napi(struct bnxt *bp)
+               if (bnapi->in_reset)
+                       cpr->sw_stats->rx.rx_resets++;
+               napi_disable(&bnapi->napi);
+-              if (bnapi->rx_ring)
+-                      cancel_work_sync(&cpr->dim.work);
+       }
+ }
+@@ -15269,8 +15295,10 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
+               bnxt_hwrm_vnic_update(bp, vnic,
+                                     VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+       }
+-
++      /* Make sure NAPI sees that the VNIC is disabled */
++      synchronize_net();
+       rxr = &bp->rx_ring[idx];
++      cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
+       bnxt_hwrm_rx_ring_free(bp, rxr, false);
+       bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
+       rxr->rx_next_cons = 0;
+-- 
+2.39.5
+
diff --git a/queue-6.12/bnxt_en-fix-possible-memory-leak-when-hwrm_req_repla.patch b/queue-6.12/bnxt_en-fix-possible-memory-leak-when-hwrm_req_repla.patch
new file mode 100644 (file)
index 0000000..b2346a6
--- /dev/null
@@ -0,0 +1,47 @@
+From 81209c0613312fc72eafc060149d82685c0373df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Jan 2025 20:38:47 -0800
+Subject: bnxt_en: Fix possible memory leak when hwrm_req_replace fails
+
+From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+
+[ Upstream commit c8dafb0e4398dacc362832098a04b97da3b0395b ]
+
+When hwrm_req_replace() fails, the driver is not invoking bnxt_req_drop()
+which could cause a memory leak.
+
+Fixes: bbf33d1d9805 ("bnxt_en: update all firmware calls to use the new APIs")
+Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Link: https://patch.msgid.link/20250104043849.3482067-2-michael.chan@broadcom.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+index fdd6356f21ef..546d9a3d7efe 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+@@ -208,7 +208,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
+       rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
+       if (rc)
+-              return rc;
++              goto drop_req;
+       hwrm_req_timeout(bp, req, fw_msg->timeout);
+       resp = hwrm_req_hold(bp, req);
+@@ -220,6 +220,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
+               memcpy(fw_msg->resp, resp, resp_len);
+       }
++drop_req:
+       hwrm_req_drop(bp, req);
+       return rc;
+ }
+-- 
+2.39.5
+
diff --git a/queue-6.12/btrfs-avoid-null-pointer-dereference-if-no-valid-ext.patch b/queue-6.12/btrfs-avoid-null-pointer-dereference-if-no-valid-ext.patch
new file mode 100644 (file)
index 0000000..bec9db1
--- /dev/null
@@ -0,0 +1,91 @@
+From 3342f0ed8b56177c11d360aceb2d39d458220aec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Jan 2025 14:44:16 +1030
+Subject: btrfs: avoid NULL pointer dereference if no valid extent tree
+
+From: Qu Wenruo <wqu@suse.com>
+
+[ Upstream commit 6aecd91a5c5b68939cf4169e32bc49f3cd2dd329 ]
+
+[BUG]
+Syzbot reported a crash with the following call trace:
+
+  BTRFS info (device loop0): scrub: started on devid 1
+  BUG: kernel NULL pointer dereference, address: 0000000000000208
+  #PF: supervisor read access in kernel mode
+  #PF: error_code(0x0000) - not-present page
+  PGD 106e70067 P4D 106e70067 PUD 107143067 PMD 0
+  Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+  CPU: 1 UID: 0 PID: 689 Comm: repro Kdump: loaded Tainted: G           O       6.13.0-rc4-custom+ #206
+  Tainted: [O]=OOT_MODULE
+  Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS unknown 02/02/2022
+  RIP: 0010:find_first_extent_item+0x26/0x1f0 [btrfs]
+  Call Trace:
+   <TASK>
+   scrub_find_fill_first_stripe+0x13d/0x3b0 [btrfs]
+   scrub_simple_mirror+0x175/0x260 [btrfs]
+   scrub_stripe+0x5d4/0x6c0 [btrfs]
+   scrub_chunk+0xbb/0x170 [btrfs]
+   scrub_enumerate_chunks+0x2f4/0x5f0 [btrfs]
+   btrfs_scrub_dev+0x240/0x600 [btrfs]
+   btrfs_ioctl+0x1dc8/0x2fa0 [btrfs]
+   ? do_sys_openat2+0xa5/0xf0
+   __x64_sys_ioctl+0x97/0xc0
+   do_syscall_64+0x4f/0x120
+   entry_SYSCALL_64_after_hwframe+0x76/0x7e
+   </TASK>
+
+[CAUSE]
+The reproducer is using a corrupted image where extent tree root is
+corrupted, thus forcing to use "rescue=all,ro" mount option to mount the
+image.
+
+Then it triggered a scrub, but since scrub relies on extent tree to find
+where the data/metadata extents are, scrub_find_fill_first_stripe()
+relies on an non-empty extent root.
+
+But unfortunately scrub_find_fill_first_stripe() doesn't really expect
+an NULL pointer for extent root, it use extent_root to grab fs_info and
+triggered a NULL pointer dereference.
+
+[FIX]
+Add an extra check for a valid extent root at the beginning of
+scrub_find_fill_first_stripe().
+
+The new error path is introduced by 42437a6386ff ("btrfs: introduce
+mount option rescue=ignorebadroots"), but that's pretty old, and later
+commit b979547513ff ("btrfs: scrub: introduce helper to find and fill
+sector info for a scrub_stripe") changed how we do scrub.
+
+So for kernels older than 6.6, the fix will need manual backport.
+
+Reported-by: syzbot+339e9dbe3a2ca419b85d@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/linux-btrfs/67756935.050a0220.25abdd.0a12.GAE@google.com/
+Fixes: 42437a6386ff ("btrfs: introduce mount option rescue=ignorebadroots")
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/scrub.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 3a3427428074..c73a41b1ad56 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -1541,6 +1541,10 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
+       u64 extent_gen;
+       int ret;
++      if (unlikely(!extent_root)) {
++              btrfs_err(fs_info, "no valid extent root for scrub");
++              return -EUCLEAN;
++      }
+       memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
+                                  stripe->nr_sectors);
+       scrub_stripe_reset_bitmaps(stripe);
+-- 
+2.39.5
+
diff --git a/queue-6.12/cxgb4-avoid-removal-of-uninserted-tid.patch b/queue-6.12/cxgb4-avoid-removal-of-uninserted-tid.patch
new file mode 100644 (file)
index 0000000..64d62dc
--- /dev/null
@@ -0,0 +1,42 @@
+From 5794bbe50ce6e2a9ecd1e1c5ed75c8ab5bc94d71 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Jan 2025 14:53:27 +0530
+Subject: cxgb4: Avoid removal of uninserted tid
+
+From: Anumula Murali Mohan Reddy <anumula@chelsio.com>
+
+[ Upstream commit 4c1224501e9d6c5fd12d83752f1c1b444e0e3418 ]
+
+During ARP failure, tid is not inserted but _c4iw_free_ep()
+attempts to remove tid which results in error.
+This patch fixes the issue by avoiding removal of uninserted tid.
+
+Fixes: 59437d78f088 ("cxgb4/chtls: fix ULD connection failures due to wrong TID base")
+Signed-off-by: Anumula Murali Mohan Reddy <anumula@chelsio.com>
+Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
+Link: https://patch.msgid.link/20250103092327.1011925-1-anumula@chelsio.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index fb3933fbb842..757c6484f535 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -1799,7 +1799,10 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
+       struct adapter *adap = container_of(t, struct adapter, tids);
+       struct sk_buff *skb;
+-      WARN_ON(tid_out_of_range(&adap->tids, tid));
++      if (tid_out_of_range(&adap->tids, tid)) {
++              dev_err(adap->pdev_dev, "tid %d out of range\n", tid);
++              return;
++      }
+       if (t->tid_tab[tid - adap->tids.tid_base]) {
+               t->tid_tab[tid - adap->tids.tid_base] = NULL;
+-- 
+2.39.5
+
diff --git a/queue-6.12/eth-gve-use-appropriate-helper-to-set-xdp_features.patch b/queue-6.12/eth-gve-use-appropriate-helper-to-set-xdp_features.patch
new file mode 100644 (file)
index 0000000..a86f3ee
--- /dev/null
@@ -0,0 +1,59 @@
+From e02702660642da1c8003d275e7999205840309d3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jan 2025 10:02:10 -0800
+Subject: eth: gve: use appropriate helper to set xdp_features
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit db78475ba0d3c66d430f7ded2388cc041078a542 ]
+
+Commit f85949f98206 ("xdp: add xdp_set_features_flag utility routine")
+added routines to inform the core about XDP flag changes.
+GVE support was added around the same time and missed using them.
+
+GVE only changes the flags on error recover or resume.
+Presumably the flags may change during resume if VM migrated.
+User would not get the notification and upper devices would
+not get a chance to recalculate their flags.
+
+Fixes: 75eaae158b1b ("gve: Add XDP DROP and TX support for GQI-QPL format")
+Reviewed-By: Jeroen de Borst <jeroendb@google.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20250106180210.1861784-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/google/gve/gve_main.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index d404819ebc9b..f985a3cf2b11 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -2224,14 +2224,18 @@ static void gve_service_task(struct work_struct *work)
+ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
+ {
++      xdp_features_t xdp_features;
++
+       if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
+-              priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
+-              priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+-              priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
+-              priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
++              xdp_features = NETDEV_XDP_ACT_BASIC;
++              xdp_features |= NETDEV_XDP_ACT_REDIRECT;
++              xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
++              xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+       } else {
+-              priv->dev->xdp_features = 0;
++              xdp_features = 0;
+       }
++
++      xdp_set_features_flag(priv->dev, xdp_features);
+ }
+ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
+-- 
+2.39.5
+
diff --git a/queue-6.12/ice-fix-incorrect-phy-settings-for-100-gb-s.patch b/queue-6.12/ice-fix-incorrect-phy-settings-for-100-gb-s.patch
new file mode 100644 (file)
index 0000000..a667fbd
--- /dev/null
@@ -0,0 +1,65 @@
+From 0154eacf84c0f545d182e74c04d774865a342ea4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Dec 2024 14:22:18 +0100
+Subject: ice: fix incorrect PHY settings for 100 GB/s
+
+From: Przemyslaw Korba <przemyslaw.korba@intel.com>
+
+[ Upstream commit 6c5b989116083a98f45aada548ff54e7a83a9c2d ]
+
+ptp4l application reports too high offset when ran on E823 device
+with a 100GB/s link. Those values cannot go under 100ns, like in a
+working case when using 100 GB/s cable.
+
+This is due to incorrect frequency settings on the PHY clocks for
+100 GB/s speed. Changes are introduced to align with the internal
+hardware documentation, and correctly initialize frequency in PHY
+clocks with the frequency values that are in our HW spec.
+
+To reproduce the issue run ptp4l as a Time Receiver on E823 device,
+and observe the offset, which will never approach values seen
+in the PTP working case.
+
+Reproduction output:
+ptp4l -i enp137s0f3 -m -2 -s -f /etc/ptp4l_8275.conf
+ptp4l[5278.775]: master offset      12470 s2 freq  +41288 path delay -3002
+ptp4l[5278.837]: master offset      10525 s2 freq  +39202 path delay -3002
+ptp4l[5278.900]: master offset     -24840 s2 freq  -20130 path delay -3002
+ptp4l[5278.963]: master offset      10597 s2 freq  +37908 path delay -3002
+ptp4l[5279.025]: master offset       8883 s2 freq  +36031 path delay -3002
+ptp4l[5279.088]: master offset       7267 s2 freq  +34151 path delay -3002
+ptp4l[5279.150]: master offset       5771 s2 freq  +32316 path delay -3002
+ptp4l[5279.213]: master offset       4388 s2 freq  +30526 path delay -3002
+ptp4l[5279.275]: master offset     -30434 s2 freq  -28485 path delay -3002
+ptp4l[5279.338]: master offset     -28041 s2 freq  -27412 path delay -3002
+ptp4l[5279.400]: master offset       7870 s2 freq  +31118 path delay -3002
+
+Fixes: 3a7496234d17 ("ice: implement basic E822 PTP support")
+Reviewed-by: Milena Olech <milena.olech@intel.com>
+Signed-off-by: Przemyslaw Korba <przemyslaw.korba@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_ptp_consts.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+index e6980b94a6c1..3005dd252a10 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
++++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+@@ -761,9 +761,9 @@ const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
+               /* rx_desk_rsgb_par */
+               644531250, /* 644.53125 MHz Reed Solomon gearbox */
+               /* tx_desk_rsgb_pcs */
+-              644531250, /* 644.53125 MHz Reed Solomon gearbox */
++              390625000, /* 390.625 MHz Reed Solomon gearbox */
+               /* rx_desk_rsgb_pcs */
+-              644531250, /* 644.53125 MHz Reed Solomon gearbox */
++              390625000, /* 390.625 MHz Reed Solomon gearbox */
+               /* tx_fixed_delay */
+               1620,
+               /* pmd_adj_divisor */
+-- 
+2.39.5
+
diff --git a/queue-6.12/ice-fix-max-values-for-dpll-pin-phase-adjust.patch b/queue-6.12/ice-fix-max-values-for-dpll-pin-phase-adjust.patch
new file mode 100644 (file)
index 0000000..9d0cc3f
--- /dev/null
@@ -0,0 +1,151 @@
+From 587d244eaa31d035ef90272f32a3fdb5cc342f5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Nov 2024 08:51:12 +0100
+Subject: ice: fix max values for dpll pin phase adjust
+
+From: Arkadiusz Kubalewski <arkadiusz.kubalewski@intel.com>
+
+[ Upstream commit 65104599b3a8ed42d85b3f8f27be650afe1f3a7e ]
+
+Mask admin command returned max phase adjust value for both input and
+output pins. Only 31 bits are relevant, last released data sheet wrongly
+points that 32 bits are valid - see [1] 3.2.6.4.1 Get CCU Capabilities
+Command for reference. Fix of the datasheet itself is in progress.
+
+Fix the min/max assignment logic, previously the value was wrongly
+considered as negative value due to most significant bit being set.
+
+Example of previous broken behavior:
+$ ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/dpll.yaml \
+--do pin-get --json '{"id":1}'| grep phase-adjust
+ 'phase-adjust': 0,
+ 'phase-adjust-max': 16723,
+ 'phase-adjust-min': -16723,
+
+Correct behavior with the fix:
+$ ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/dpll.yaml \
+--do pin-get --json '{"id":1}'| grep phase-adjust
+ 'phase-adjust': 0,
+ 'phase-adjust-max': 2147466925,
+ 'phase-adjust-min': -2147466925,
+
+[1] https://cdrdv2.intel.com/v1/dl/getContent/613875?explicitVersion=true
+
+Fixes: 90e1c90750d7 ("ice: dpll: implement phase related callbacks")
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Signed-off-by: Arkadiusz Kubalewski <arkadiusz.kubalewski@intel.com>
+Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/intel/ice/ice_adminq_cmd.h   |  2 ++
+ drivers/net/ethernet/intel/ice/ice_dpll.c     | 35 ++++++++++++-------
+ 2 files changed, 25 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+index 0be1a98d7cc1..79a6edd0be0e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
++++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+@@ -2238,6 +2238,8 @@ struct ice_aqc_get_pkg_info_resp {
+       struct ice_aqc_get_pkg_info pkg_info[];
+ };
++#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0)
++
+ /* Get CGU abilities command response data structure (indirect 0x0C61) */
+ struct ice_aqc_get_cgu_abilities {
+       u8 num_inputs;
+diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
+index d5ad6d84007c..38e151c7ea23 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
++++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
+@@ -2064,6 +2064,18 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
+       return 0;
+ }
++/**
++ * ice_dpll_phase_range_set - initialize phase adjust range helper
++ * @range: pointer to phase adjust range struct to be initialized
++ * @phase_adj: a value to be used as min(-)/max(+) boundary
++ */
++static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range,
++                                   u32 phase_adj)
++{
++      range->min = -phase_adj;
++      range->max = phase_adj;
++}
++
+ /**
+  * ice_dpll_init_info_pins_generic - initializes generic pins info
+  * @pf: board private structure
+@@ -2105,8 +2117,8 @@ static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input)
+       for (i = 0; i < pin_num; i++) {
+               pins[i].idx = i;
+               pins[i].prop.board_label = labels[i];
+-              pins[i].prop.phase_range.min = phase_adj_max;
+-              pins[i].prop.phase_range.max = -phase_adj_max;
++              ice_dpll_phase_range_set(&pins[i].prop.phase_range,
++                                       phase_adj_max);
+               pins[i].prop.capabilities = cap;
+               pins[i].pf = pf;
+               ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
+@@ -2152,6 +2164,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
+       struct ice_hw *hw = &pf->hw;
+       struct ice_dpll_pin *pins;
+       unsigned long caps;
++      u32 phase_adj_max;
+       u8 freq_supp_num;
+       bool input;
+@@ -2159,11 +2172,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
+       case ICE_DPLL_PIN_TYPE_INPUT:
+               pins = pf->dplls.inputs;
+               num_pins = pf->dplls.num_inputs;
++              phase_adj_max = pf->dplls.input_phase_adj_max;
+               input = true;
+               break;
+       case ICE_DPLL_PIN_TYPE_OUTPUT:
+               pins = pf->dplls.outputs;
+               num_pins = pf->dplls.num_outputs;
++              phase_adj_max = pf->dplls.output_phase_adj_max;
+               input = false;
+               break;
+       default:
+@@ -2188,19 +2203,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
+                               return ret;
+                       caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+                                DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
+-                      pins[i].prop.phase_range.min =
+-                              pf->dplls.input_phase_adj_max;
+-                      pins[i].prop.phase_range.max =
+-                              -pf->dplls.input_phase_adj_max;
+               } else {
+-                      pins[i].prop.phase_range.min =
+-                              pf->dplls.output_phase_adj_max;
+-                      pins[i].prop.phase_range.max =
+-                              -pf->dplls.output_phase_adj_max;
+                       ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
+                       if (ret)
+                               return ret;
+               }
++              ice_dpll_phase_range_set(&pins[i].prop.phase_range,
++                                       phase_adj_max);
+               pins[i].prop.capabilities = caps;
+               ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
+               if (ret)
+@@ -2308,8 +2317,10 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
+       dp->dpll_idx = abilities.pps_dpll_idx;
+       d->num_inputs = abilities.num_inputs;
+       d->num_outputs = abilities.num_outputs;
+-      d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj);
+-      d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj);
++      d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj) &
++              ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
++      d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj) &
++              ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
+       alloc_size = sizeof(*d->inputs) * d->num_inputs;
+       d->inputs = kzalloc(alloc_size, GFP_KERNEL);
+-- 
+2.39.5
+
diff --git a/queue-6.12/ieee802154-ca8210-add-missing-check-for-kfifo_alloc-.patch b/queue-6.12/ieee802154-ca8210-add-missing-check-for-kfifo_alloc-.patch
new file mode 100644 (file)
index 0000000..9ad50b1
--- /dev/null
@@ -0,0 +1,45 @@
+From 83f641895506260aac90420fd07bdcf4fb5b52b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Oct 2024 19:27:12 +0100
+Subject: ieee802154: ca8210: Add missing check for kfifo_alloc() in
+ ca8210_probe()
+
+From: Keisuke Nishimura <keisuke.nishimura@inria.fr>
+
+[ Upstream commit 2c87309ea741341c6722efdf1fb3f50dd427c823 ]
+
+ca8210_test_interface_init() returns the result of kfifo_alloc(),
+which can be non-zero in case of an error. The caller, ca8210_probe(),
+should check the return value and do error-handling if it fails.
+
+Fixes: ded845a781a5 ("ieee802154: Add CA8210 IEEE 802.15.4 device driver")
+Signed-off-by: Keisuke Nishimura <keisuke.nishimura@inria.fr>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/20241029182712.318271-1-keisuke.nishimura@inria.fr
+Signed-off-by: Stefan Schmidt <stefan@datenfreihafen.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ieee802154/ca8210.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index e685a7f946f0..753215ebc67c 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -3072,7 +3072,11 @@ static int ca8210_probe(struct spi_device *spi_device)
+       spi_set_drvdata(priv->spi, priv);
+       if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) {
+               cascoda_api_upstream = ca8210_test_int_driver_write;
+-              ca8210_test_interface_init(priv);
++              ret = ca8210_test_interface_init(priv);
++              if (ret) {
++                      dev_crit(&spi_device->dev, "ca8210_test_interface_init failed\n");
++                      goto error;
++              }
+       } else {
+               cascoda_api_upstream = NULL;
+       }
+-- 
+2.39.5
+
diff --git a/queue-6.12/igc-return-early-when-failing-to-read-eecd-register.patch b/queue-6.12/igc-return-early-when-failing-to-read-eecd-register.patch
new file mode 100644 (file)
index 0000000..fc364d7
--- /dev/null
@@ -0,0 +1,78 @@
+From 0c1f95108dbc02e9db6e84b199b50366d3366e23 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Dec 2024 10:37:42 +0800
+Subject: igc: return early when failing to read EECD register
+
+From: En-Wei Wu <en-wei.wu@canonical.com>
+
+[ Upstream commit bd2776e39c2a82ef4681d02678bb77b3d41e79be ]
+
+When booting with a dock connected, the igc driver may get stuck for ~40
+seconds if PCIe link is lost during initialization.
+
+This happens because the driver access device after EECD register reads
+return all F's, indicating failed reads. Consequently, hw->hw_addr is set
+to NULL, which impacts subsequent rd32() reads. This leads to the driver
+hanging in igc_get_hw_semaphore_i225(), as the invalid hw->hw_addr
+prevents retrieving the expected value.
+
+To address this, a validation check and a corresponding return value
+catch is added for the EECD register read result. If all F's are
+returned, indicating PCIe link loss, the driver will return -ENXIO
+immediately. This avoids the 40-second hang and significantly improves
+boot time when using a dock with an igc NIC.
+
+Log before the patch:
+[    0.911913] igc 0000:70:00.0: enabling device (0000 -> 0002)
+[    0.912386] igc 0000:70:00.0: PTM enabled, 4ns granularity
+[    1.571098] igc 0000:70:00.0 (unnamed net_device) (uninitialized): PCIe link lost, device now detached
+[   43.449095] igc_get_hw_semaphore_i225: igc 0000:70:00.0 (unnamed net_device) (uninitialized): Driver can't access device - SMBI bit is set.
+[   43.449186] igc 0000:70:00.0: probe with driver igc failed with error -13
+[   46.345701] igc 0000:70:00.0: enabling device (0000 -> 0002)
+[   46.345777] igc 0000:70:00.0: PTM enabled, 4ns granularity
+
+Log after the patch:
+[    1.031000] igc 0000:70:00.0: enabling device (0000 -> 0002)
+[    1.032097] igc 0000:70:00.0: PTM enabled, 4ns granularity
+[    1.642291] igc 0000:70:00.0 (unnamed net_device) (uninitialized): PCIe link lost, device now detached
+[    5.480490] igc 0000:70:00.0: enabling device (0000 -> 0002)
+[    5.480516] igc 0000:70:00.0: PTM enabled, 4ns granularity
+
+Fixes: ab4056126813 ("igc: Add NVM support")
+Cc: Chia-Lin Kao (AceLan) <acelan.kao@canonical.com>
+Signed-off-by: En-Wei Wu <en-wei.wu@canonical.com>
+Reviewed-by: Vitaly Lifshits <vitaly.lifshits@intel.com>
+Tested-by: Mor Bar-Gabay <morx.bar.gabay@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_base.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
+index 9fae8bdec2a7..1613b562d17c 100644
+--- a/drivers/net/ethernet/intel/igc/igc_base.c
++++ b/drivers/net/ethernet/intel/igc/igc_base.c
+@@ -68,6 +68,10 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw)
+       u32 eecd = rd32(IGC_EECD);
+       u16 size;
++      /* failed to read reg and got all F's */
++      if (!(~eecd))
++              return -ENXIO;
++
+       size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd);
+       /* Added to a constant, "size" becomes the left-shift value
+@@ -221,6 +225,8 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
+       /* NVM initialization */
+       ret_val = igc_init_nvm_params_base(hw);
++      if (ret_val)
++              goto out;
+       switch (hw->mac.type) {
+       case igc_i225:
+               ret_val = igc_init_nvm_params_i225(hw);
+-- 
+2.39.5
+
diff --git a/queue-6.12/ipvlan-fix-use-after-free-in-ipvlan_get_iflink.patch b/queue-6.12/ipvlan-fix-use-after-free-in-ipvlan_get_iflink.patch
new file mode 100644 (file)
index 0000000..0145f32
--- /dev/null
@@ -0,0 +1,205 @@
+From 0cf0a5f6824b4f0a9bda10dd7dd73842535cbfdf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jan 2025 16:19:11 +0900
+Subject: ipvlan: Fix use-after-free in ipvlan_get_iflink().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit cb358ff94154774d031159b018adf45e17673941 ]
+
+syzbot presented an use-after-free report [0] regarding ipvlan and
+linkwatch.
+
+ipvlan does not hold a refcnt of the lower device unlike vlan and
+macvlan.
+
+If the linkwatch work is triggered for the ipvlan dev, the lower dev
+might have already been freed, resulting in UAF of ipvlan->phy_dev in
+ipvlan_get_iflink().
+
+We can delay the lower dev unregistration like vlan and macvlan by
+holding the lower dev's refcnt in dev->netdev_ops->ndo_init() and
+releasing it in dev->priv_destructor().
+
+Jakub pointed out calling .ndo_XXX after unregister_netdevice() has
+returned is error prone and suggested [1] addressing this UAF in the
+core by taking commit 750e51603395 ("net: avoid potential UAF in
+default_operstate()") further.
+
+Let's assume unregistering devices DOWN and use RCU protection in
+default_operstate() not to race with the device unregistration.
+
+[0]:
+BUG: KASAN: slab-use-after-free in ipvlan_get_iflink+0x84/0x88 drivers/net/ipvlan/ipvlan_main.c:353
+Read of size 4 at addr ffff0000d768c0e0 by task kworker/u8:35/6944
+
+CPU: 0 UID: 0 PID: 6944 Comm: kworker/u8:35 Not tainted 6.13.0-rc2-g9bc5c9515b48 #12 4c3cb9e8b4565456f6a355f312ff91f4f29b3c47
+Hardware name: linux,dummy-virt (DT)
+Workqueue: events_unbound linkwatch_event
+Call trace:
+ show_stack+0x38/0x50 arch/arm64/kernel/stacktrace.c:484 (C)
+ __dump_stack lib/dump_stack.c:94 [inline]
+ dump_stack_lvl+0xbc/0x108 lib/dump_stack.c:120
+ print_address_description mm/kasan/report.c:378 [inline]
+ print_report+0x16c/0x6f0 mm/kasan/report.c:489
+ kasan_report+0xc0/0x120 mm/kasan/report.c:602
+ __asan_report_load4_noabort+0x20/0x30 mm/kasan/report_generic.c:380
+ ipvlan_get_iflink+0x84/0x88 drivers/net/ipvlan/ipvlan_main.c:353
+ dev_get_iflink+0x7c/0xd8 net/core/dev.c:674
+ default_operstate net/core/link_watch.c:45 [inline]
+ rfc2863_policy+0x144/0x360 net/core/link_watch.c:72
+ linkwatch_do_dev+0x60/0x228 net/core/link_watch.c:175
+ __linkwatch_run_queue+0x2f4/0x5b8 net/core/link_watch.c:239
+ linkwatch_event+0x64/0xa8 net/core/link_watch.c:282
+ process_one_work+0x700/0x1398 kernel/workqueue.c:3229
+ process_scheduled_works kernel/workqueue.c:3310 [inline]
+ worker_thread+0x8c4/0xe10 kernel/workqueue.c:3391
+ kthread+0x2b0/0x360 kernel/kthread.c:389
+ ret_from_fork+0x10/0x20 arch/arm64/kernel/entry.S:862
+
+Allocated by task 9303:
+ kasan_save_stack mm/kasan/common.c:47 [inline]
+ kasan_save_track+0x30/0x68 mm/kasan/common.c:68
+ kasan_save_alloc_info+0x44/0x58 mm/kasan/generic.c:568
+ poison_kmalloc_redzone mm/kasan/common.c:377 [inline]
+ __kasan_kmalloc+0x84/0xa0 mm/kasan/common.c:394
+ kasan_kmalloc include/linux/kasan.h:260 [inline]
+ __do_kmalloc_node mm/slub.c:4283 [inline]
+ __kmalloc_node_noprof+0x2a0/0x560 mm/slub.c:4289
+ __kvmalloc_node_noprof+0x9c/0x230 mm/util.c:650
+ alloc_netdev_mqs+0xb4/0x1118 net/core/dev.c:11209
+ rtnl_create_link+0x2b8/0xb60 net/core/rtnetlink.c:3595
+ rtnl_newlink_create+0x19c/0x868 net/core/rtnetlink.c:3771
+ __rtnl_newlink net/core/rtnetlink.c:3896 [inline]
+ rtnl_newlink+0x122c/0x15c0 net/core/rtnetlink.c:4011
+ rtnetlink_rcv_msg+0x61c/0x918 net/core/rtnetlink.c:6901
+ netlink_rcv_skb+0x1dc/0x398 net/netlink/af_netlink.c:2542
+ rtnetlink_rcv+0x34/0x50 net/core/rtnetlink.c:6928
+ netlink_unicast_kernel net/netlink/af_netlink.c:1321 [inline]
+ netlink_unicast+0x618/0x838 net/netlink/af_netlink.c:1347
+ netlink_sendmsg+0x5fc/0x8b0 net/netlink/af_netlink.c:1891
+ sock_sendmsg_nosec net/socket.c:711 [inline]
+ __sock_sendmsg net/socket.c:726 [inline]
+ __sys_sendto+0x2ec/0x438 net/socket.c:2197
+ __do_sys_sendto net/socket.c:2204 [inline]
+ __se_sys_sendto net/socket.c:2200 [inline]
+ __arm64_sys_sendto+0xe4/0x110 net/socket.c:2200
+ __invoke_syscall arch/arm64/kernel/syscall.c:35 [inline]
+ invoke_syscall+0x90/0x278 arch/arm64/kernel/syscall.c:49
+ el0_svc_common+0x13c/0x250 arch/arm64/kernel/syscall.c:132
+ do_el0_svc+0x54/0x70 arch/arm64/kernel/syscall.c:151
+ el0_svc+0x4c/0xa8 arch/arm64/kernel/entry-common.c:744
+ el0t_64_sync_handler+0x78/0x108 arch/arm64/kernel/entry-common.c:762
+ el0t_64_sync+0x198/0x1a0 arch/arm64/kernel/entry.S:600
+
+Freed by task 10200:
+ kasan_save_stack mm/kasan/common.c:47 [inline]
+ kasan_save_track+0x30/0x68 mm/kasan/common.c:68
+ kasan_save_free_info+0x58/0x70 mm/kasan/generic.c:582
+ poison_slab_object mm/kasan/common.c:247 [inline]
+ __kasan_slab_free+0x48/0x68 mm/kasan/common.c:264
+ kasan_slab_free include/linux/kasan.h:233 [inline]
+ slab_free_hook mm/slub.c:2338 [inline]
+ slab_free mm/slub.c:4598 [inline]
+ kfree+0x140/0x420 mm/slub.c:4746
+ kvfree+0x4c/0x68 mm/util.c:693
+ netdev_release+0x94/0xc8 net/core/net-sysfs.c:2034
+ device_release+0x98/0x1c0
+ kobject_cleanup lib/kobject.c:689 [inline]
+ kobject_release lib/kobject.c:720 [inline]
+ kref_put include/linux/kref.h:65 [inline]
+ kobject_put+0x2b0/0x438 lib/kobject.c:737
+ netdev_run_todo+0xdd8/0xf48 net/core/dev.c:10924
+ rtnl_unlock net/core/rtnetlink.c:152 [inline]
+ rtnl_net_unlock net/core/rtnetlink.c:209 [inline]
+ rtnl_dellink+0x484/0x680 net/core/rtnetlink.c:3526
+ rtnetlink_rcv_msg+0x61c/0x918 net/core/rtnetlink.c:6901
+ netlink_rcv_skb+0x1dc/0x398 net/netlink/af_netlink.c:2542
+ rtnetlink_rcv+0x34/0x50 net/core/rtnetlink.c:6928
+ netlink_unicast_kernel net/netlink/af_netlink.c:1321 [inline]
+ netlink_unicast+0x618/0x838 net/netlink/af_netlink.c:1347
+ netlink_sendmsg+0x5fc/0x8b0 net/netlink/af_netlink.c:1891
+ sock_sendmsg_nosec net/socket.c:711 [inline]
+ __sock_sendmsg net/socket.c:726 [inline]
+ ____sys_sendmsg+0x410/0x708 net/socket.c:2583
+ ___sys_sendmsg+0x178/0x1d8 net/socket.c:2637
+ __sys_sendmsg net/socket.c:2669 [inline]
+ __do_sys_sendmsg net/socket.c:2674 [inline]
+ __se_sys_sendmsg net/socket.c:2672 [inline]
+ __arm64_sys_sendmsg+0x12c/0x1c8 net/socket.c:2672
+ __invoke_syscall arch/arm64/kernel/syscall.c:35 [inline]
+ invoke_syscall+0x90/0x278 arch/arm64/kernel/syscall.c:49
+ el0_svc_common+0x13c/0x250 arch/arm64/kernel/syscall.c:132
+ do_el0_svc+0x54/0x70 arch/arm64/kernel/syscall.c:151
+ el0_svc+0x4c/0xa8 arch/arm64/kernel/entry-common.c:744
+ el0t_64_sync_handler+0x78/0x108 arch/arm64/kernel/entry-common.c:762
+ el0t_64_sync+0x198/0x1a0 arch/arm64/kernel/entry.S:600
+
+The buggy address belongs to the object at ffff0000d768c000
+ which belongs to the cache kmalloc-cg-4k of size 4096
+The buggy address is located 224 bytes inside of
+ freed 4096-byte region [ffff0000d768c000, ffff0000d768d000)
+
+The buggy address belongs to the physical page:
+page: refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x117688
+head: order:3 mapcount:0 entire_mapcount:0 nr_pages_mapped:0 pincount:0
+memcg:ffff0000c77ef981
+flags: 0xbfffe0000000040(head|node=0|zone=2|lastcpupid=0x1ffff)
+page_type: f5(slab)
+raw: 0bfffe0000000040 ffff0000c000f500 dead000000000100 dead000000000122
+raw: 0000000000000000 0000000000040004 00000001f5000000 ffff0000c77ef981
+head: 0bfffe0000000040 ffff0000c000f500 dead000000000100 dead000000000122
+head: 0000000000000000 0000000000040004 00000001f5000000 ffff0000c77ef981
+head: 0bfffe0000000003 fffffdffc35da201 ffffffffffffffff 0000000000000000
+head: 0000000000000008 0000000000000000 00000000ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff0000d768bf80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff0000d768c000: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+>ffff0000d768c080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+                                                       ^
+ ffff0000d768c100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff0000d768c180: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+
+Fixes: 8c55facecd7a ("net: linkwatch: only report IF_OPER_LOWERLAYERDOWN if iflink is actually down")
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/netdev/20250102174400.085fd8ac@kernel.org/ [1]
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://patch.msgid.link/20250106071911.64355-1-kuniyu@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/link_watch.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/net/core/link_watch.c b/net/core/link_watch.c
+index 1b4d39e38084..cb04ef2b9807 100644
+--- a/net/core/link_watch.c
++++ b/net/core/link_watch.c
+@@ -42,14 +42,18 @@ static unsigned int default_operstate(const struct net_device *dev)
+        * first check whether lower is indeed the source of its down state.
+        */
+       if (!netif_carrier_ok(dev)) {
+-              int iflink = dev_get_iflink(dev);
+               struct net_device *peer;
++              int iflink;
+               /* If called from netdev_run_todo()/linkwatch_sync_dev(),
+                * dev_net(dev) can be already freed, and RTNL is not held.
+                */
+-              if (dev->reg_state == NETREG_UNREGISTERED ||
+-                  iflink == dev->ifindex)
++              if (dev->reg_state <= NETREG_REGISTERED)
++                      iflink = dev_get_iflink(dev);
++              else
++                      iflink = dev->ifindex;
++
++              if (iflink == dev->ifindex)
+                       return IF_OPER_DOWN;
+               ASSERT_RTNL();
+-- 
+2.39.5
+
diff --git a/queue-6.12/mctp-i3c-fix-mctp-i3c-driver-multi-thread-issue.patch b/queue-6.12/mctp-i3c-fix-mctp-i3c-driver-multi-thread-issue.patch
new file mode 100644 (file)
index 0000000..e99392f
--- /dev/null
@@ -0,0 +1,68 @@
+From c02f8c26af718ca06db85126c7b2e14586622664 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jan 2025 11:15:30 +0800
+Subject: mctp i3c: fix MCTP I3C driver multi-thread issue
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Leo Yang <leo.yang.sy0@gmail.com>
+
+[ Upstream commit 2d2d4f60ed266a8f340a721102d035252606980b ]
+
+We found a timeout problem with the pldm command on our system.  The
+reason is that the MCTP-I3C driver has a race condition when receiving
+multiple-packet messages in multi-thread, resulting in a wrong packet
+order problem.
+
+We identified this problem by adding a debug message to the
+mctp_i3c_read function.
+
+According to the MCTP spec, a multiple-packet message must be composed
+in sequence, and if there is a wrong sequence, the whole message will be
+discarded and wait for the next SOM.
+For example, SOM → Pkt Seq #2 → Pkt Seq #1 → Pkt Seq #3 → EOM.
+
+Therefore, we try to solve this problem by adding a mutex to the
+mctp_i3c_read function.  Before the modification, when a command
+requesting a multiple-packet message response is sent consecutively, an
+error usually occurs within 100 loops.  After the mutex, it can go
+through 40000 loops without any error, and it seems to run well.
+
+Fixes: c8755b29b58e ("mctp i3c: MCTP I3C driver")
+Signed-off-by: Leo Yang <Leo-Yang@quantatw.com>
+Link: https://patch.msgid.link/20250107031529.3296094-1-Leo-Yang@quantatw.com
+[pabeni@redhat.com: dropped already answered question from changelog]
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/mctp/mctp-i3c.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
+index 1bc87a062686..ee9d562f0817 100644
+--- a/drivers/net/mctp/mctp-i3c.c
++++ b/drivers/net/mctp/mctp-i3c.c
+@@ -125,6 +125,8 @@ static int mctp_i3c_read(struct mctp_i3c_device *mi)
+       xfer.data.in = skb_put(skb, mi->mrl);
++      /* Make sure netif_rx() is read in the same order as i3c. */
++      mutex_lock(&mi->lock);
+       rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1);
+       if (rc < 0)
+               goto err;
+@@ -166,8 +168,10 @@ static int mctp_i3c_read(struct mctp_i3c_device *mi)
+               stats->rx_dropped++;
+       }
++      mutex_unlock(&mi->lock);
+       return 0;
+ err:
++      mutex_unlock(&mi->lock);
+       kfree_skb(skb);
+       return rc;
+ }
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-802-llc-snap-oid-pid-lookup-on-start-of-skb-data.patch b/queue-6.12/net-802-llc-snap-oid-pid-lookup-on-start-of-skb-data.patch
new file mode 100644 (file)
index 0000000..99ae023
--- /dev/null
@@ -0,0 +1,56 @@
+From 42232bf70d5e88710dd0ec85d35b3a4e64fbd3f8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Jan 2025 20:23:00 -0500
+Subject: net: 802: LLC+SNAP OID:PID lookup on start of skb data
+
+From: Antonio Pastor <antonio.pastor@gmail.com>
+
+[ Upstream commit 1e9b0e1c550c42c13c111d1a31e822057232abc4 ]
+
+802.2+LLC+SNAP frames received by napi_complete_done() with GRO and DSA
+have skb->transport_header set two bytes short, or pointing 2 bytes
+before network_header & skb->data. This was an issue as snap_rcv()
+expected offset to point to SNAP header (OID:PID), causing packet to
+be dropped.
+
+A fix at llc_fixup_skb() (a024e377efed) resets transport_header for any
+LLC consumers that may care about it, and stops SNAP packets from being
+dropped, but doesn't fix the problem which is that LLC and SNAP should
+not use transport_header offset.
+
+Ths patch eliminates the use of transport_header offset for SNAP lookup
+of OID:PID so that SNAP does not rely on the offset at all.
+The offset is reset after pull for any SNAP packet consumers that may
+(but shouldn't) use it.
+
+Fixes: fda55eca5a33 ("net: introduce skb_transport_header_was_set()")
+Signed-off-by: Antonio Pastor <antonio.pastor@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250103012303.746521-1-antonio.pastor@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/802/psnap.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/802/psnap.c b/net/802/psnap.c
+index fca9d454905f..389df460c8c4 100644
+--- a/net/802/psnap.c
++++ b/net/802/psnap.c
+@@ -55,11 +55,11 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
+               goto drop;
+       rcu_read_lock();
+-      proto = find_snap_client(skb_transport_header(skb));
++      proto = find_snap_client(skb->data);
+       if (proto) {
+               /* Pass the frame on. */
+-              skb->transport_header += 5;
+               skb_pull_rcsum(skb, 5);
++              skb_reset_transport_header(skb);
+               rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
+       }
+       rcu_read_unlock();
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-don-t-dump-tx-and-uninitialized-napis.patch b/queue-6.12/net-don-t-dump-tx-and-uninitialized-napis.patch
new file mode 100644 (file)
index 0000000..e7ff088
--- /dev/null
@@ -0,0 +1,52 @@
+From 3552059d7d839be6a98942ae86eafd41f57c315e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Jan 2025 10:32:07 -0800
+Subject: net: don't dump Tx and uninitialized NAPIs
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit fd48f071a3d6d51e737e953bb43fe69785cf59a9 ]
+
+We use NAPI ID as the key for continuing dumps. We also depend
+on the NAPIs being sorted by ID within the driver list. Tx NAPIs
+(which don't have an ID assigned) break this expectation, it's
+not currently possible to dump them reliably. Since Tx NAPIs
+are relatively rare, and can't be used in doit (GET or SET)
+hide them from the dump API as well.
+
+Fixes: 27f91aaf49b3 ("netdev-genl: Add netlink framework functions for napi")
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250103183207.1216004-1-kuba@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/netdev-genl.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
+index d58270b48cb2..c639acb5abfd 100644
+--- a/net/core/netdev-genl.c
++++ b/net/core/netdev-genl.c
+@@ -173,8 +173,7 @@ netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
+       if (!hdr)
+               return -EMSGSIZE;
+-      if (napi->napi_id >= MIN_NAPI_ID &&
+-          nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
++      if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
+               goto nla_put_failure;
+       if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
+@@ -254,6 +253,8 @@ netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
+               return err;
+       list_for_each_entry(napi, &netdev->napi_list, dev_list) {
++              if (napi->napi_id < MIN_NAPI_ID)
++                      continue;
+               if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
+                       continue;
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-hns3-don-t-auto-enable-misc-vector.patch b/queue-6.12/net-hns3-don-t-auto-enable-misc-vector.patch
new file mode 100644 (file)
index 0000000..5ae7e58
--- /dev/null
@@ -0,0 +1,98 @@
+From 86d786e3fc9328ffb5e346760408a724f7ca8f65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jan 2025 22:36:39 +0800
+Subject: net: hns3: don't auto enable misc vector
+
+From: Jian Shen <shenjian15@huawei.com>
+
+[ Upstream commit 98b1e3b27734139c76295754b6c317aa4df6d32e ]
+
+Currently, there is a time window between misc irq enabled
+and service task inited. If an interrupte is reported at
+this time, it will cause warning like below:
+
+[   16.324639] Call trace:
+[   16.324641]  __queue_delayed_work+0xb8/0xe0
+[   16.324643]  mod_delayed_work_on+0x78/0xd0
+[   16.324655]  hclge_errhand_task_schedule+0x58/0x90 [hclge]
+[   16.324662]  hclge_misc_irq_handle+0x168/0x240 [hclge]
+[   16.324666]  __handle_irq_event_percpu+0x64/0x1e0
+[   16.324667]  handle_irq_event+0x80/0x170
+[   16.324670]  handle_fasteoi_edge_irq+0x110/0x2bc
+[   16.324671]  __handle_domain_irq+0x84/0xfc
+[   16.324673]  gic_handle_irq+0x88/0x2c0
+[   16.324674]  el1_irq+0xb8/0x140
+[   16.324677]  arch_cpu_idle+0x18/0x40
+[   16.324679]  default_idle_call+0x5c/0x1bc
+[   16.324682]  cpuidle_idle_call+0x18c/0x1c4
+[   16.324684]  do_idle+0x174/0x17c
+[   16.324685]  cpu_startup_entry+0x30/0x6c
+[   16.324687]  secondary_start_kernel+0x1a4/0x280
+[   16.324688] ---[ end trace 6aa0bff672a964aa ]---
+
+So don't auto enable misc vector when request irq..
+
+Fixes: 7be1b9f3e99f ("net: hns3: make hclge_service use delayed workqueue")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Link: https://patch.msgid.link/20250106143642.539698-5-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c  | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 35c618c794be..9a67fe0554a5 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -6,6 +6,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/netdevice.h>
+@@ -3780,7 +3781,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
+       snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
+                HCLGE_NAME, pci_name(hdev->pdev));
+       ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
+-                        0, hdev->misc_vector.name, hdev);
++                        IRQF_NO_AUTOEN, hdev->misc_vector.name, hdev);
+       if (ret) {
+               hclge_free_vector(hdev, 0);
+               dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
+@@ -11916,9 +11917,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+       hclge_init_rxd_adv_layout(hdev);
+-      /* Enable MISC vector(vector0) */
+-      hclge_enable_vector(&hdev->misc_vector, true);
+-
+       ret = hclge_init_wol(hdev);
+       if (ret)
+               dev_warn(&pdev->dev,
+@@ -11931,6 +11929,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+       hclge_state_init(hdev);
+       hdev->last_reset_time = jiffies;
++      /* Enable MISC vector(vector0) */
++      enable_irq(hdev->misc_vector.vector_irq);
++      hclge_enable_vector(&hdev->misc_vector, true);
++
+       dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
+                HCLGE_DRIVER_NAME);
+@@ -12336,7 +12338,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
+       /* Disable MISC vector(vector0) */
+       hclge_enable_vector(&hdev->misc_vector, false);
+-      synchronize_irq(hdev->misc_vector.vector_irq);
++      disable_irq(hdev->misc_vector.vector_irq);
+       /* Disable all hw interrupts */
+       hclge_config_mac_tnl_int(hdev, false);
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-hns3-fix-kernel-crash-when-1588-is-sent-on-hip08.patch b/queue-6.12/net-hns3-fix-kernel-crash-when-1588-is-sent-on-hip08.patch
new file mode 100644 (file)
index 0000000..c9d39eb
--- /dev/null
@@ -0,0 +1,98 @@
+From 52f6908e30f50376de13a3695b89b4fb79168f7b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jan 2025 22:36:42 +0800
+Subject: net: hns3: fix kernel crash when 1588 is sent on HIP08 devices
+
+From: Jie Wang <wangjie125@huawei.com>
+
+[ Upstream commit 9741e72b2286de8b38de9db685588ac421a95c87 ]
+
+Currently, HIP08 devices does not register the ptp devices, so the
+hdev->ptp is NULL. But the tx process would still try to set hardware time
+stamp info with SKBTX_HW_TSTAMP flag and cause a kernel crash.
+
+[  128.087798] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000018
+...
+[  128.280251] pc : hclge_ptp_set_tx_info+0x2c/0x140 [hclge]
+[  128.286600] lr : hclge_ptp_set_tx_info+0x20/0x140 [hclge]
+[  128.292938] sp : ffff800059b93140
+[  128.297200] x29: ffff800059b93140 x28: 0000000000003280
+[  128.303455] x27: ffff800020d48280 x26: ffff0cb9dc814080
+[  128.309715] x25: ffff0cb9cde93fa0 x24: 0000000000000001
+[  128.315969] x23: 0000000000000000 x22: 0000000000000194
+[  128.322219] x21: ffff0cd94f986000 x20: 0000000000000000
+[  128.328462] x19: ffff0cb9d2a166c0 x18: 0000000000000000
+[  128.334698] x17: 0000000000000000 x16: ffffcf1fc523ed24
+[  128.340934] x15: 0000ffffd530a518 x14: 0000000000000000
+[  128.347162] x13: ffff0cd6bdb31310 x12: 0000000000000368
+[  128.353388] x11: ffff0cb9cfbc7070 x10: ffff2cf55dd11e02
+[  128.359606] x9 : ffffcf1f85a212b4 x8 : ffff0cd7cf27dab0
+[  128.365831] x7 : 0000000000000a20 x6 : ffff0cd7cf27d000
+[  128.372040] x5 : 0000000000000000 x4 : 000000000000ffff
+[  128.378243] x3 : 0000000000000400 x2 : ffffcf1f85a21294
+[  128.384437] x1 : ffff0cb9db520080 x0 : ffff0cb9db500080
+[  128.390626] Call trace:
+[  128.393964]  hclge_ptp_set_tx_info+0x2c/0x140 [hclge]
+[  128.399893]  hns3_nic_net_xmit+0x39c/0x4c4 [hns3]
+[  128.405468]  xmit_one.constprop.0+0xc4/0x200
+[  128.410600]  dev_hard_start_xmit+0x54/0xf0
+[  128.415556]  sch_direct_xmit+0xe8/0x634
+[  128.420246]  __dev_queue_xmit+0x224/0xc70
+[  128.425101]  dev_queue_xmit+0x1c/0x40
+[  128.429608]  ovs_vport_send+0xac/0x1a0 [openvswitch]
+[  128.435409]  do_output+0x60/0x17c [openvswitch]
+[  128.440770]  do_execute_actions+0x898/0x8c4 [openvswitch]
+[  128.446993]  ovs_execute_actions+0x64/0xf0 [openvswitch]
+[  128.453129]  ovs_dp_process_packet+0xa0/0x224 [openvswitch]
+[  128.459530]  ovs_vport_receive+0x7c/0xfc [openvswitch]
+[  128.465497]  internal_dev_xmit+0x34/0xb0 [openvswitch]
+[  128.471460]  xmit_one.constprop.0+0xc4/0x200
+[  128.476561]  dev_hard_start_xmit+0x54/0xf0
+[  128.481489]  __dev_queue_xmit+0x968/0xc70
+[  128.486330]  dev_queue_xmit+0x1c/0x40
+[  128.490856]  ip_finish_output2+0x250/0x570
+[  128.495810]  __ip_finish_output+0x170/0x1e0
+[  128.500832]  ip_finish_output+0x3c/0xf0
+[  128.505504]  ip_output+0xbc/0x160
+[  128.509654]  ip_send_skb+0x58/0xd4
+[  128.513892]  udp_send_skb+0x12c/0x354
+[  128.518387]  udp_sendmsg+0x7a8/0x9c0
+[  128.522793]  inet_sendmsg+0x4c/0x8c
+[  128.527116]  __sock_sendmsg+0x48/0x80
+[  128.531609]  __sys_sendto+0x124/0x164
+[  128.536099]  __arm64_sys_sendto+0x30/0x5c
+[  128.540935]  invoke_syscall+0x50/0x130
+[  128.545508]  el0_svc_common.constprop.0+0x10c/0x124
+[  128.551205]  do_el0_svc+0x34/0xdc
+[  128.555347]  el0_svc+0x20/0x30
+[  128.559227]  el0_sync_handler+0xb8/0xc0
+[  128.563883]  el0_sync+0x160/0x180
+
+Fixes: 0bf5eb788512 ("net: hns3: add support for PTP")
+Signed-off-by: Jie Wang <wangjie125@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Link: https://patch.msgid.link/20250106143642.539698-8-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+index 5505caea88e9..bab16c2191b2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+@@ -58,6 +58,9 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
+       struct hclge_dev *hdev = vport->back;
+       struct hclge_ptp *ptp = hdev->ptp;
++      if (!ptp)
++              return false;
++
+       if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
+           test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
+               ptp->tx_skipped++;
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-hns3-fix-missing-features-due-to-dev-features-co.patch b/queue-6.12/net-hns3-fix-missing-features-due-to-dev-features-co.patch
new file mode 100644 (file)
index 0000000..061efe1
--- /dev/null
@@ -0,0 +1,41 @@
+From 51d641d15b831dbcb65b89f11054ef6ddc53c56d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jan 2025 22:36:37 +0800
+Subject: net: hns3: fix missing features due to dev->features configuration
+ too early
+
+From: Hao Lan <lanhao@huawei.com>
+
+[ Upstream commit ac1e2836fe294c2007ca81cf7006862c3bdf0510 ]
+
+Currently, the netdev->features is configured in hns3_nic_set_features.
+As a result, __netdev_update_features considers that there is no feature
+difference, and the procedures of the real features are missing.
+
+Fixes: 2a7556bb2b73 ("net: hns3: implement ndo_features_check ops for hns3 driver")
+Signed-off-by: Hao Lan <lanhao@huawei.com>
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Link: https://patch.msgid.link/20250106143642.539698-3-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 4cbc4d069a1f..73825b6bd485 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2452,7 +2452,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
+                       return ret;
+       }
+-      netdev->features = features;
+       return 0;
+ }
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-hns3-fixed-hclge_fetch_pf_reg-accesses-bar-space.patch b/queue-6.12/net-hns3-fixed-hclge_fetch_pf_reg-accesses-bar-space.patch
new file mode 100644 (file)
index 0000000..939336a
--- /dev/null
@@ -0,0 +1,116 @@
+From 714b143d3b7e0019e13d77d8848cfc57d570864c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jan 2025 22:36:41 +0800
+Subject: net: hns3: fixed hclge_fetch_pf_reg accesses bar space out of bounds
+ issue
+
+From: Hao Lan <lanhao@huawei.com>
+
+[ Upstream commit 7997ddd46c54408bcba5e37fe18b4d832e45d4d4 ]
+
+The TQP BAR space is divided into two segments. TQPs 0-1023 and TQPs
+1024-1279 are in different BAR space addresses. However,
+hclge_fetch_pf_reg does not distinguish the tqp space information when
+reading the tqp space information. When the number of TQPs is greater
+than 1024, access bar space overwriting occurs.
+The problem of different segments has been considered during the
+initialization of tqp.io_base. Therefore, tqp.io_base is directly used
+when the queue is read in hclge_fetch_pf_reg.
+
+The error message:
+
+Unable to handle kernel paging request at virtual address ffff800037200000
+pc : hclge_fetch_pf_reg+0x138/0x250 [hclge]
+lr : hclge_get_regs+0x84/0x1d0 [hclge]
+Call trace:
+ hclge_fetch_pf_reg+0x138/0x250 [hclge]
+ hclge_get_regs+0x84/0x1d0 [hclge]
+ hns3_get_regs+0x2c/0x50 [hns3]
+ ethtool_get_regs+0xf4/0x270
+ dev_ethtool+0x674/0x8a0
+ dev_ioctl+0x270/0x36c
+ sock_do_ioctl+0x110/0x2a0
+ sock_ioctl+0x2ac/0x530
+ __arm64_sys_ioctl+0xa8/0x100
+ invoke_syscall+0x4c/0x124
+ el0_svc_common.constprop.0+0x140/0x15c
+ do_el0_svc+0x30/0xd0
+ el0_svc+0x1c/0x2c
+ el0_sync_handler+0xb0/0xb4
+ el0_sync+0x168/0x180
+
+Fixes: 939ccd107ffc ("net: hns3: move dump regs function to a separate file")
+Signed-off-by: Hao Lan <lanhao@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Link: https://patch.msgid.link/20250106143642.539698-7-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c  | 9 +++++----
+ .../net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c    | 9 +++++----
+ 2 files changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
+index 43c1c18fa81f..8c057192aae6 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
+@@ -510,9 +510,9 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+                             struct hnae3_knic_private_info *kinfo)
+ {
+-#define HCLGE_RING_REG_OFFSET         0x200
+ #define HCLGE_RING_INT_REG_OFFSET     0x4
++      struct hnae3_queue *tqp;
+       int i, j, reg_num;
+       int data_num_sum;
+       u32 *reg = data;
+@@ -533,10 +533,11 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+       reg_num = ARRAY_SIZE(ring_reg_addr_list);
+       for (j = 0; j < kinfo->num_tqps; j++) {
+               reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
++              tqp = kinfo->tqp[j];
+               for (i = 0; i < reg_num; i++)
+-                      *reg++ = hclge_read_dev(&hdev->hw,
+-                                              ring_reg_addr_list[i] +
+-                                              HCLGE_RING_REG_OFFSET * j);
++                      *reg++ = readl_relaxed(tqp->io_base -
++                                             HCLGE_TQP_REG_OFFSET +
++                                             ring_reg_addr_list[i]);
+       }
+       data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+index 6db415d8b917..7d9d9dbc7560 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+@@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
+ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+                     void *data)
+ {
+-#define HCLGEVF_RING_REG_OFFSET               0x200
+ #define HCLGEVF_RING_INT_REG_OFFSET   0x4
+       struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
++      struct hnae3_queue *tqp;
+       int i, j, reg_um;
+       u32 *reg = data;
+@@ -147,10 +147,11 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+       reg_um = ARRAY_SIZE(ring_reg_addr_list);
+       for (j = 0; j < hdev->num_tqps; j++) {
+               reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
++              tqp = &hdev->htqp[j].q;
+               for (i = 0; i < reg_um; i++)
+-                      *reg++ = hclgevf_read_dev(&hdev->hw,
+-                                                ring_reg_addr_list[i] +
+-                                                HCLGEVF_RING_REG_OFFSET * j);
++                      *reg++ = readl_relaxed(tqp->io_base -
++                                             HCLGEVF_TQP_REG_OFFSET +
++                                             ring_reg_addr_list[i]);
+       }
+       reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-hns3-fixed-reset-failure-issues-caused-by-the-in.patch b/queue-6.12/net-hns3-fixed-reset-failure-issues-caused-by-the-in.patch
new file mode 100644 (file)
index 0000000..f719711
--- /dev/null
@@ -0,0 +1,243 @@
+From dd7841e54b06ef3c838d2fbe9306eb2f7a1607c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jan 2025 22:36:36 +0800
+Subject: net: hns3: fixed reset failure issues caused by the incorrect reset
+ type
+
+From: Hao Lan <lanhao@huawei.com>
+
+[ Upstream commit 5a4b584c67699a69981f0740618a144965a63237 ]
+
+When a reset type that is not supported by the driver is input, a reset
+pending flag bit of the HNAE3_NONE_RESET type is generated in
+reset_pending. The driver does not have a mechanism to clear this type
+of error. As a result, the driver considers that the reset is not
+complete. This patch provides a mechanism to clear the
+HNAE3_NONE_RESET flag and the parameter of
+hnae3_ae_ops.set_default_reset_request is verified.
+
+The error message:
+hns3 0000:39:01.0: cmd failed -16
+hns3 0000:39:01.0: hclge device re-init failed, VF is disabled!
+hns3 0000:39:01.0: failed to reset VF stack
+hns3 0000:39:01.0: failed to reset VF(4)
+hns3 0000:39:01.0: prepare reset(2) wait done
+hns3 0000:39:01.0 eth4: already uninitialized
+
+Use the crash tool to view struct hclgevf_dev:
+struct hclgevf_dev {
+...
+       default_reset_request = 0x20,
+       reset_level = HNAE3_NONE_RESET,
+       reset_pending = 0x100,
+       reset_type = HNAE3_NONE_RESET,
+...
+};
+
+Fixes: 720bd5837e37 ("net: hns3: add set_default_reset_request in the hnae3_ae_ops")
+Signed-off-by: Hao Lan <lanhao@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Link: https://patch.msgid.link/20250106143642.539698-2-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../hisilicon/hns3/hns3pf/hclge_main.c        | 33 ++++++++++++++--
+ .../hisilicon/hns3/hns3vf/hclgevf_main.c      | 38 ++++++++++++++++---
+ 2 files changed, 61 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index bd86efd92a5a..35c618c794be 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -3584,6 +3584,17 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
+       return ret;
+ }
++static void hclge_set_reset_pending(struct hclge_dev *hdev,
++                                  enum hnae3_reset_type reset_type)
++{
++      /* When an incorrect reset type is executed, the get_reset_level
++       * function generates the HNAE3_NONE_RESET flag. As a result, this
++       * type do not need to pending.
++       */
++      if (reset_type != HNAE3_NONE_RESET)
++              set_bit(reset_type, &hdev->reset_pending);
++}
++
+ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+ {
+       u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
+@@ -3604,7 +3615,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+        */
+       if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
+               dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
+-              set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
++              hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
+               set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+               *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+               hdev->rst_stats.imp_rst_cnt++;
+@@ -3614,7 +3625,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+       if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
+               dev_info(&hdev->pdev->dev, "global reset interrupt\n");
+               set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+-              set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
++              hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
+               *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+               hdev->rst_stats.global_rst_cnt++;
+               return HCLGE_VECTOR0_EVENT_RST;
+@@ -4062,7 +4073,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
+       case HNAE3_FUNC_RESET:
+               dev_info(&pdev->dev, "PF reset requested\n");
+               /* schedule again to check later */
+-              set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
++              hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
+               hclge_reset_task_schedule(hdev);
+               break;
+       default:
+@@ -4096,6 +4107,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
+               clear_bit(HNAE3_FLR_RESET, addr);
+       }
++      clear_bit(HNAE3_NONE_RESET, addr);
++
+       if (hdev->reset_type != HNAE3_NONE_RESET &&
+           rst_level < hdev->reset_type)
+               return HNAE3_NONE_RESET;
+@@ -4237,7 +4250,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
+               return false;
+       } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
+               hdev->rst_stats.reset_fail_cnt++;
+-              set_bit(hdev->reset_type, &hdev->reset_pending);
++              hclge_set_reset_pending(hdev, hdev->reset_type);
+               dev_info(&hdev->pdev->dev,
+                        "re-schedule reset task(%u)\n",
+                        hdev->rst_stats.reset_fail_cnt);
+@@ -4480,8 +4493,20 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
+ static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+                                       enum hnae3_reset_type rst_type)
+ {
++#define HCLGE_SUPPORT_RESET_TYPE \
++      (BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
++      BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
++
+       struct hclge_dev *hdev = ae_dev->priv;
++      if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
++              /* To prevent reset triggered by hclge_reset_event */
++              set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
++              dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
++                       rst_type);
++              return;
++      }
++
+       set_bit(rst_type, &hdev->default_reset_request);
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 094a7c7b5592..ab54e6155e93 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1395,6 +1395,17 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
+       return ret;
+ }
++static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
++                                    enum hnae3_reset_type reset_type)
++{
++      /* When an incorrect reset type is executed, the get_reset_level
++       * function generates the HNAE3_NONE_RESET flag. As a result, this
++       * type do not need to pending.
++       */
++      if (reset_type != HNAE3_NONE_RESET)
++              set_bit(reset_type, &hdev->reset_pending);
++}
++
+ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
+ {
+ #define HCLGEVF_RESET_WAIT_US 20000
+@@ -1544,7 +1555,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
+               hdev->rst_stats.rst_fail_cnt);
+       if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
+-              set_bit(hdev->reset_type, &hdev->reset_pending);
++              hclgevf_set_reset_pending(hdev, hdev->reset_type);
+       if (hclgevf_is_reset_pending(hdev)) {
+               set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+@@ -1664,6 +1675,8 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
+               clear_bit(HNAE3_FLR_RESET, addr);
+       }
++      clear_bit(HNAE3_NONE_RESET, addr);
++
+       return rst_level;
+ }
+@@ -1673,14 +1686,15 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
+       struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+       struct hclgevf_dev *hdev = ae_dev->priv;
+-      dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
+-
+       if (hdev->default_reset_request)
+               hdev->reset_level =
+                       hclgevf_get_reset_level(&hdev->default_reset_request);
+       else
+               hdev->reset_level = HNAE3_VF_FUNC_RESET;
++      dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
++               hdev->reset_level);
++
+       /* reset of this VF requested */
+       set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
+       hclgevf_reset_task_schedule(hdev);
+@@ -1691,8 +1705,20 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
+ static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+                                         enum hnae3_reset_type rst_type)
+ {
++#define HCLGEVF_SUPPORT_RESET_TYPE \
++      (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
++      BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
++      BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
++
+       struct hclgevf_dev *hdev = ae_dev->priv;
++      if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
++              /* To prevent reset triggered by hclge_reset_event */
++              set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
++              dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
++                       rst_type);
++              return;
++      }
+       set_bit(rst_type, &hdev->default_reset_request);
+ }
+@@ -1849,14 +1875,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
+                */
+               if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
+                       /* prepare for full reset of stack + pcie interface */
+-                      set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
++                      hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
+                       /* "defer" schedule the reset task again */
+                       set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+               } else {
+                       hdev->reset_attempts++;
+-                      set_bit(hdev->reset_level, &hdev->reset_pending);
++                      hclgevf_set_reset_pending(hdev, hdev->reset_level);
+                       set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+               }
+               hclgevf_reset_task_schedule(hdev);
+@@ -1979,7 +2005,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+               rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+               dev_info(&hdev->pdev->dev,
+                        "receive reset interrupt 0x%x!\n", rst_ing_reg);
+-              set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
++              hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
+               set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+               set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+               *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-hns3-initialize-reset_timer-before-hclgevf_misc_.patch b/queue-6.12/net-hns3-initialize-reset_timer-before-hclgevf_misc_.patch
new file mode 100644 (file)
index 0000000..ca38a76
--- /dev/null
@@ -0,0 +1,47 @@
+From 34ff64427f84bcaccf25068d43a2868295d47c7d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jan 2025 22:36:40 +0800
+Subject: net: hns3: initialize reset_timer before hclgevf_misc_irq_init()
+
+From: Jian Shen <shenjian15@huawei.com>
+
+[ Upstream commit 247fd1e33e1cd156aabe444e932d2648d33f1245 ]
+
+Currently the misc irq is initialized before reset_timer setup. But
+it will access the reset_timer in the irq handler. So initialize
+the reset_timer earlier.
+
+Fixes: ff200099d271 ("net: hns3: remove unnecessary work in hclgevf_main")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Link: https://patch.msgid.link/20250106143642.539698-6-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index ab54e6155e93..d47bd8d6145f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -2315,6 +2315,8 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
+       clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
+       INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
++      /* timer needs to be initialized before misc irq */
++      timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
+       mutex_init(&hdev->mbx_resp.mbx_mutex);
+       sema_init(&hdev->reset_sem, 1);
+@@ -3014,7 +3016,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+                HCLGEVF_DRIVER_NAME);
+       hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
+-      timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
+       return 0;
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-hns3-resolved-the-issue-that-the-debugfs-query-r.patch b/queue-6.12/net-hns3-resolved-the-issue-that-the-debugfs-query-r.patch
new file mode 100644 (file)
index 0000000..8a13811
--- /dev/null
@@ -0,0 +1,208 @@
+From 1bd45b08d3f965059e3f56316526c24867be9439 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jan 2025 22:36:38 +0800
+Subject: net: hns3: Resolved the issue that the debugfs query result is
+ inconsistent.
+
+From: Hao Lan <lanhao@huawei.com>
+
+[ Upstream commit 5191a8d3c2ab5bc01930ea3425e06a739af5b0e9 ]
+
+This patch modifies the implementation of debugfs:
+
+When the user process stops unexpectedly, not all data of the file system
+is read. In this case, the save_buf pointer is not released. When the
+user process is called next time, save_buf is used to copy the cached
+data to the user space. As a result, the queried data is stale.
+
+To solve this problem, this patch implements .open() and .release() handler
+for debugfs file_operations. moving allocation buffer and execution
+of the cmd to the .open() handler and freeing in to the .release() handler.
+Allocate separate buffer for each reader and associate the buffer
+with the file pointer.
+When different user read processes no longer share the buffer,
+the stale data problem is fixed.
+
+Fixes: 5e69ea7ee2a6 ("net: hns3: refactor the debugfs process")
+Signed-off-by: Hao Lan <lanhao@huawei.com>
+Signed-off-by: Guangwei Zhang <zhangwangwei6@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Link: https://patch.msgid.link/20250106143642.539698-4-shaojijie@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hnae3.h   |  3 -
+ .../ethernet/hisilicon/hns3/hns3_debugfs.c    | 96 ++++++-------------
+ 2 files changed, 31 insertions(+), 68 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+index 27dbe367f3d3..d873523e84f2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -916,9 +916,6 @@ struct hnae3_handle {
+       u8 netdev_flags;
+       struct dentry *hnae3_dbgfs;
+-      /* protects concurrent contention between debugfs commands */
+-      struct mutex dbgfs_lock;
+-      char **dbgfs_buf;
+       /* Network interface message level enabled bits */
+       u32 msg_enable;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index 807eb3bbb11c..9bbece25552b 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -1260,69 +1260,55 @@ static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
+ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
+                            size_t count, loff_t *ppos)
+ {
+-      struct hns3_dbg_data *dbg_data = filp->private_data;
++      char *buf = filp->private_data;
++
++      return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
++}
++
++static int hns3_dbg_open(struct inode *inode, struct file *filp)
++{
++      struct hns3_dbg_data *dbg_data = inode->i_private;
+       struct hnae3_handle *handle = dbg_data->handle;
+       struct hns3_nic_priv *priv = handle->priv;
+-      ssize_t size = 0;
+-      char **save_buf;
+-      char *read_buf;
+       u32 index;
++      char *buf;
+       int ret;
++      if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
++          test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
++              return -EBUSY;
++
+       ret = hns3_dbg_get_cmd_index(dbg_data, &index);
+       if (ret)
+               return ret;
+-      mutex_lock(&handle->dbgfs_lock);
+-      save_buf = &handle->dbgfs_buf[index];
+-
+-      if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+-          test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
+-              ret = -EBUSY;
+-              goto out;
+-      }
+-
+-      if (*save_buf) {
+-              read_buf = *save_buf;
+-      } else {
+-              read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
+-              if (!read_buf) {
+-                      ret = -ENOMEM;
+-                      goto out;
+-              }
+-
+-              /* save the buffer addr until the last read operation */
+-              *save_buf = read_buf;
+-
+-              /* get data ready for the first time to read */
+-              ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
+-                                      read_buf, hns3_dbg_cmd[index].buf_len);
+-              if (ret)
+-                      goto out;
+-      }
++      buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
++      if (!buf)
++              return -ENOMEM;
+-      size = simple_read_from_buffer(buffer, count, ppos, read_buf,
+-                                     strlen(read_buf));
+-      if (size > 0) {
+-              mutex_unlock(&handle->dbgfs_lock);
+-              return size;
++      ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
++                              buf, hns3_dbg_cmd[index].buf_len);
++      if (ret) {
++              kvfree(buf);
++              return ret;
+       }
+-out:
+-      /* free the buffer for the last read operation */
+-      if (*save_buf) {
+-              kvfree(*save_buf);
+-              *save_buf = NULL;
+-      }
++      filp->private_data = buf;
++      return 0;
++}
+-      mutex_unlock(&handle->dbgfs_lock);
+-      return ret;
++static int hns3_dbg_release(struct inode *inode, struct file *filp)
++{
++      kvfree(filp->private_data);
++      filp->private_data = NULL;
++      return 0;
+ }
+ static const struct file_operations hns3_dbg_fops = {
+       .owner = THIS_MODULE,
+-      .open  = simple_open,
++      .open  = hns3_dbg_open,
+       .read  = hns3_dbg_read,
++      .release = hns3_dbg_release,
+ };
+ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
+@@ -1379,13 +1365,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
+       int ret;
+       u32 i;
+-      handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
+-                                       ARRAY_SIZE(hns3_dbg_cmd),
+-                                       sizeof(*handle->dbgfs_buf),
+-                                       GFP_KERNEL);
+-      if (!handle->dbgfs_buf)
+-              return -ENOMEM;
+-
+       hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
+                               debugfs_create_dir(name, hns3_dbgfs_root);
+       handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
+@@ -1395,8 +1374,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
+                       debugfs_create_dir(hns3_dbg_dentry[i].name,
+                                          handle->hnae3_dbgfs);
+-      mutex_init(&handle->dbgfs_lock);
+-
+       for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
+               if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
+                    ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
+@@ -1425,24 +1402,13 @@ int hns3_dbg_init(struct hnae3_handle *handle)
+ out:
+       debugfs_remove_recursive(handle->hnae3_dbgfs);
+       handle->hnae3_dbgfs = NULL;
+-      mutex_destroy(&handle->dbgfs_lock);
+       return ret;
+ }
+ void hns3_dbg_uninit(struct hnae3_handle *handle)
+ {
+-      u32 i;
+-
+       debugfs_remove_recursive(handle->hnae3_dbgfs);
+       handle->hnae3_dbgfs = NULL;
+-
+-      for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
+-              if (handle->dbgfs_buf[i]) {
+-                      kvfree(handle->dbgfs_buf[i]);
+-                      handle->dbgfs_buf[i] = NULL;
+-              }
+-
+-      mutex_destroy(&handle->dbgfs_lock);
+ }
+ void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-libwx-fix-firmware-mailbox-abnormal-return.patch b/queue-6.12/net-libwx-fix-firmware-mailbox-abnormal-return.patch
new file mode 100644 (file)
index 0000000..73561b5
--- /dev/null
@@ -0,0 +1,76 @@
+From 66912070306358dffe733c185ea2098446809569 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Jan 2025 16:10:13 +0800
+Subject: net: libwx: fix firmware mailbox abnormal return
+
+From: Jiawen Wu <jiawenwu@trustnetic.com>
+
+[ Upstream commit 8ce4f287524c74a118b0af1eebd4b24a8efca57a ]
+
+The existing SW-FW interaction flow on the driver is wrong. Follow this
+wrong flow, driver would never return error if there is a unknown command.
+Since firmware writes back 'firmware ready' and 'unknown command' in the
+mailbox message if there is an unknown command sent by driver. So reading
+'firmware ready' does not timeout. Then driver would mistakenly believe
+that the interaction has completed successfully.
+
+It tends to happen with the use of custom firmware. Move the check for
+'unknown command' out of the poll timeout for 'firmware ready'. And adjust
+the debug log so that mailbox messages are always printed when commands
+timeout.
+
+Fixes: 1efa9bfe58c5 ("net: libwx: Implement interaction with firmware")
+Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
+Link: https://patch.msgid.link/20250103081013.1995939-1-jiawenwu@trustnetic.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/wangxun/libwx/wx_hw.c | 24 ++++++++++------------
+ 1 file changed, 11 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+index 1bf9c38e4125..deaf670c160e 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+@@ -334,27 +334,25 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
+       status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
+                                  timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
++      buf[0] = rd32(wx, WX_MNG_MBOX);
++      if ((buf[0] & 0xff0000) >> 16 == 0x80) {
++              wx_err(wx, "Unknown FW command: 0x%x\n", buffer[0] & 0xff);
++              status = -EINVAL;
++              goto rel_out;
++      }
++
+       /* Check command completion */
+       if (status) {
+-              wx_dbg(wx, "Command has failed with no status valid.\n");
+-
+-              buf[0] = rd32(wx, WX_MNG_MBOX);
+-              if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
+-                      status = -EINVAL;
+-                      goto rel_out;
+-              }
+-              if ((buf[0] & 0xff0000) >> 16 == 0x80) {
+-                      wx_dbg(wx, "It's unknown cmd.\n");
+-                      status = -EINVAL;
+-                      goto rel_out;
+-              }
+-
++              wx_err(wx, "Command has failed with no status valid.\n");
+               wx_dbg(wx, "write value:\n");
+               for (i = 0; i < dword_len; i++)
+                       wx_dbg(wx, "%x ", buffer[i]);
+               wx_dbg(wx, "read value:\n");
+               for (i = 0; i < dword_len; i++)
+                       wx_dbg(wx, "%x ", buf[i]);
++              wx_dbg(wx, "\ncheck: %x %x\n", buffer[0] & 0xff, ~buf[0] >> 24);
++
++              goto rel_out;
+       }
+       if (!return_data)
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-mlx5-fix-variable-not-being-completed-when-funct.patch b/queue-6.12/net-mlx5-fix-variable-not-being-completed-when-funct.patch
new file mode 100644 (file)
index 0000000..8e6bdc5
--- /dev/null
@@ -0,0 +1,62 @@
+From 5e1e8e218c3ef408c3c5fa6b08c93f02b3952ee0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jan 2025 11:00:09 +0800
+Subject: net/mlx5: Fix variable not being completed when function returns
+
+From: Chenguang Zhao <zhaochenguang@kylinos.cn>
+
+[ Upstream commit 0e2909c6bec9048f49d0c8e16887c63b50b14647 ]
+
+When cmd_alloc_index(), fails cmd_work_handler() needs
+to complete ent->slotted before returning early.
+Otherwise the task which issued the command may hang:
+
+   mlx5_core 0000:01:00.0: cmd_work_handler:877:(pid 3880418): failed to allocate command entry
+   INFO: task kworker/13:2:4055883 blocked for more than 120 seconds.
+         Not tainted 4.19.90-25.44.v2101.ky10.aarch64 #1
+   "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+   kworker/13:2    D    0 4055883      2 0x00000228
+   Workqueue: events mlx5e_tx_dim_work [mlx5_core]
+   Call trace:
+      __switch_to+0xe8/0x150
+      __schedule+0x2a8/0x9b8
+      schedule+0x2c/0x88
+      schedule_timeout+0x204/0x478
+      wait_for_common+0x154/0x250
+      wait_for_completion+0x28/0x38
+      cmd_exec+0x7a0/0xa00 [mlx5_core]
+      mlx5_cmd_exec+0x54/0x80 [mlx5_core]
+      mlx5_core_modify_cq+0x6c/0x80 [mlx5_core]
+      mlx5_core_modify_cq_moderation+0xa0/0xb8 [mlx5_core]
+      mlx5e_tx_dim_work+0x54/0x68 [mlx5_core]
+      process_one_work+0x1b0/0x448
+      worker_thread+0x54/0x468
+      kthread+0x134/0x138
+      ret_from_fork+0x10/0x18
+
+Fixes: 485d65e13571 ("net/mlx5: Add a timeout to acquire the command queue semaphore")
+Signed-off-by: Chenguang Zhao <zhaochenguang@kylinos.cn>
+Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
+Acked-by: Tariq Toukan <tariqt@nvidia.com>
+Link: https://patch.msgid.link/20250108030009.68520-1-zhaochenguang@kylinos.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index 6bd8a18e3af3..e733b81e18a2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -1013,6 +1013,7 @@ static void cmd_work_handler(struct work_struct *work)
+                               complete(&ent->done);
+                       }
+                       up(&cmd->vars.sem);
++                      complete(&ent->slotted);
+                       return;
+               }
+       } else {
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-stmmac-dwmac-tegra-read-iommu-stream-id-from-dev.patch b/queue-6.12/net-stmmac-dwmac-tegra-read-iommu-stream-id-from-dev.patch
new file mode 100644 (file)
index 0000000..8348f4c
--- /dev/null
@@ -0,0 +1,217 @@
+From 51eb95c62ff836aaa32e692b6519f8b6803344bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jan 2025 16:24:59 -0500
+Subject: net: stmmac: dwmac-tegra: Read iommu stream id from device tree
+
+From: Parker Newman <pnewman@connecttech.com>
+
+[ Upstream commit 426046e2d62dd19533808661e912b8e8a9eaec16 ]
+
+Nvidia's Tegra MGBE controllers require the IOMMU "Stream ID" (SID) to be
+written to the MGBE_WRAP_AXI_ASID0_CTRL register.
+
+The current driver is hard coded to use MGBE0's SID for all controllers.
+This causes softirq time outs and kernel panics when using controllers
+other than MGBE0.
+
+Example dmesg errors when an ethernet cable is connected to MGBE1:
+
+[  116.133290] tegra-mgbe 6910000.ethernet eth1: Link is Up - 1Gbps/Full - flow control rx/tx
+[  121.851283] tegra-mgbe 6910000.ethernet eth1: NETDEV WATCHDOG: CPU: 5: transmit queue 0 timed out 5690 ms
+[  121.851782] tegra-mgbe 6910000.ethernet eth1: Reset adapter.
+[  121.892464] tegra-mgbe 6910000.ethernet eth1: Register MEM_TYPE_PAGE_POOL RxQ-0
+[  121.905920] tegra-mgbe 6910000.ethernet eth1: PHY [stmmac-1:00] driver [Aquantia AQR113] (irq=171)
+[  121.907356] tegra-mgbe 6910000.ethernet eth1: Enabling Safety Features
+[  121.907578] tegra-mgbe 6910000.ethernet eth1: IEEE 1588-2008 Advanced Timestamp supported
+[  121.908399] tegra-mgbe 6910000.ethernet eth1: registered PTP clock
+[  121.908582] tegra-mgbe 6910000.ethernet eth1: configuring for phy/10gbase-r link mode
+[  125.961292] tegra-mgbe 6910000.ethernet eth1: Link is Up - 1Gbps/Full - flow control rx/tx
+[  181.921198] rcu: INFO: rcu_preempt detected stalls on CPUs/tasks:
+[  181.921404] rcu:    7-....: (1 GPs behind) idle=540c/1/0x4000000000000002 softirq=1748/1749 fqs=2337
+[  181.921684] rcu:    (detected by 4, t=6002 jiffies, g=1357, q=1254 ncpus=8)
+[  181.921878] Sending NMI from CPU 4 to CPUs 7:
+[  181.921886] NMI backtrace for cpu 7
+[  181.922131] CPU: 7 UID: 0 PID: 0 Comm: swapper/7 Kdump: loaded Not tainted 6.13.0-rc3+ #6
+[  181.922390] Hardware name: NVIDIA CTI Forge + Orin AGX/Jetson, BIOS 202402.1-Unknown 10/28/2024
+[  181.922658] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+[  181.922847] pc : handle_softirqs+0x98/0x368
+[  181.922978] lr : __do_softirq+0x18/0x20
+[  181.923095] sp : ffff80008003bf50
+[  181.923189] x29: ffff80008003bf50 x28: 0000000000000008 x27: 0000000000000000
+[  181.923379] x26: ffffce78ea277000 x25: 0000000000000000 x24: 0000001c61befda0
+[  181.924486] x23: 0000000060400009 x22: ffffce78e99918bc x21: ffff80008018bd70
+[  181.925568] x20: ffffce78e8bb00d8 x19: ffff80008018bc20 x18: 0000000000000000
+[  181.926655] x17: ffff318ebe7d3000 x16: ffff800080038000 x15: 0000000000000000
+[  181.931455] x14: ffff000080816680 x13: ffff318ebe7d3000 x12: 000000003464d91d
+[  181.938628] x11: 0000000000000040 x10: ffff000080165a70 x9 : ffffce78e8bb0160
+[  181.945804] x8 : ffff8000827b3160 x7 : f9157b241586f343 x6 : eeb6502a01c81c74
+[  181.953068] x5 : a4acfcdd2e8096bb x4 : ffffce78ea277340 x3 : 00000000ffffd1e1
+[  181.960329] x2 : 0000000000000101 x1 : ffffce78ea277340 x0 : ffff318ebe7d3000
+[  181.967591] Call trace:
+[  181.970043]  handle_softirqs+0x98/0x368 (P)
+[  181.974240]  __do_softirq+0x18/0x20
+[  181.977743]  ____do_softirq+0x14/0x28
+[  181.981415]  call_on_irq_stack+0x24/0x30
+[  181.985180]  do_softirq_own_stack+0x20/0x30
+[  181.989379]  __irq_exit_rcu+0x114/0x140
+[  181.993142]  irq_exit_rcu+0x14/0x28
+[  181.996816]  el1_interrupt+0x44/0xb8
+[  182.000316]  el1h_64_irq_handler+0x14/0x20
+[  182.004343]  el1h_64_irq+0x80/0x88
+[  182.007755]  cpuidle_enter_state+0xc4/0x4a8 (P)
+[  182.012305]  cpuidle_enter+0x3c/0x58
+[  182.015980]  cpuidle_idle_call+0x128/0x1c0
+[  182.020005]  do_idle+0xe0/0xf0
+[  182.023155]  cpu_startup_entry+0x3c/0x48
+[  182.026917]  secondary_start_kernel+0xdc/0x120
+[  182.031379]  __secondary_switched+0x74/0x78
+[  212.971162] rcu: INFO: rcu_preempt detected expedited stalls on CPUs/tasks: { 7-.... } 6103 jiffies s: 417 root: 0x80/.
+[  212.985935] rcu: blocking rcu_node structures (internal RCU debug):
+[  212.992758] Sending NMI from CPU 0 to CPUs 7:
+[  212.998539] NMI backtrace for cpu 7
+[  213.004304] CPU: 7 UID: 0 PID: 0 Comm: swapper/7 Kdump: loaded Not tainted 6.13.0-rc3+ #6
+[  213.016116] Hardware name: NVIDIA CTI Forge + Orin AGX/Jetson, BIOS 202402.1-Unknown 10/28/2024
+[  213.030817] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+[  213.040528] pc : handle_softirqs+0x98/0x368
+[  213.046563] lr : __do_softirq+0x18/0x20
+[  213.051293] sp : ffff80008003bf50
+[  213.055839] x29: ffff80008003bf50 x28: 0000000000000008 x27: 0000000000000000
+[  213.067304] x26: ffffce78ea277000 x25: 0000000000000000 x24: 0000001c61befda0
+[  213.077014] x23: 0000000060400009 x22: ffffce78e99918bc x21: ffff80008018bd70
+[  213.087339] x20: ffffce78e8bb00d8 x19: ffff80008018bc20 x18: 0000000000000000
+[  213.097313] x17: ffff318ebe7d3000 x16: ffff800080038000 x15: 0000000000000000
+[  213.107201] x14: ffff000080816680 x13: ffff318ebe7d3000 x12: 000000003464d91d
+[  213.116651] x11: 0000000000000040 x10: ffff000080165a70 x9 : ffffce78e8bb0160
+[  213.127500] x8 : ffff8000827b3160 x7 : 0a37b344852820af x6 : 3f049caedd1ff608
+[  213.138002] x5 : cff7cfdbfaf31291 x4 : ffffce78ea277340 x3 : 00000000ffffde04
+[  213.150428] x2 : 0000000000000101 x1 : ffffce78ea277340 x0 : ffff318ebe7d3000
+[  213.162063] Call trace:
+[  213.165494]  handle_softirqs+0x98/0x368 (P)
+[  213.171256]  __do_softirq+0x18/0x20
+[  213.177291]  ____do_softirq+0x14/0x28
+[  213.182017]  call_on_irq_stack+0x24/0x30
+[  213.186565]  do_softirq_own_stack+0x20/0x30
+[  213.191815]  __irq_exit_rcu+0x114/0x140
+[  213.196891]  irq_exit_rcu+0x14/0x28
+[  213.202401]  el1_interrupt+0x44/0xb8
+[  213.207741]  el1h_64_irq_handler+0x14/0x20
+[  213.213519]  el1h_64_irq+0x80/0x88
+[  213.217541]  cpuidle_enter_state+0xc4/0x4a8 (P)
+[  213.224364]  cpuidle_enter+0x3c/0x58
+[  213.228653]  cpuidle_idle_call+0x128/0x1c0
+[  213.233993]  do_idle+0xe0/0xf0
+[  213.237928]  cpu_startup_entry+0x3c/0x48
+[  213.243791]  secondary_start_kernel+0xdc/0x120
+[  213.249830]  __secondary_switched+0x74/0x78
+
+This bug has existed since the dwmac-tegra driver was added in Dec 2022
+(See Fixes tag below for commit hash).
+
+The Tegra234 SOC has 4 MGBE controllers, however Nvidia's Developer Kit
+only uses MGBE0 which is why the bug was not found previously. Connect Tech
+has many products that use 2 (or more) MGBE controllers.
+
+The solution is to read the controller's SID from the existing "iommus"
+device tree property. The 2nd field of the "iommus" device tree property
+is the controller's SID.
+
+Device tree snippet from tegra234.dtsi showing MGBE1's "iommus" property:
+
+smmu_niso0: iommu@12000000 {
+        compatible = "nvidia,tegra234-smmu", "nvidia,smmu-500";
+...
+}
+
+/* MGBE1 */
+ethernet@6900000 {
+       compatible = "nvidia,tegra234-mgbe";
+...
+       iommus = <&smmu_niso0 TEGRA234_SID_MGBE_VF1>;
+...
+}
+
+Nvidia's arm-smmu driver reads the "iommus" property and stores the SID in
+the MGBE device's "fwspec" struct. The dwmac-tegra driver can access the
+SID using the tegra_dev_iommu_get_stream_id() helper function found in
+linux/iommu.h.
+
+Calling tegra_dev_iommu_get_stream_id() should not fail unless the "iommus"
+property is removed from the device tree or the IOMMU is disabled.
+
+While the Tegra234 SOC technically supports bypassing the IOMMU, it is not
+supported by the current firmware, has not been tested and not recommended.
+More detailed discussion with Thierry Reding from Nvidia linked below.
+
+Fixes: d8ca113724e7 ("net: stmmac: tegra: Add MGBE support")
+Link: https://lore.kernel.org/netdev/cover.1731685185.git.pnewman@connecttech.com
+Signed-off-by: Parker Newman <pnewman@connecttech.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Acked-by: Thierry Reding <treding@nvidia.com>
+Link: https://patch.msgid.link/6fb97f32cf4accb4f7cf92846f6b60064ba0a3bd.1736284360.git.pnewman@connecttech.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+index 6fdd94c8919e..2996bcdea9a2 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+@@ -1,4 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0-only
++#include <linux/iommu.h>
+ #include <linux/platform_device.h>
+ #include <linux/of.h>
+ #include <linux/module.h>
+@@ -19,6 +20,8 @@ struct tegra_mgbe {
+       struct reset_control *rst_mac;
+       struct reset_control *rst_pcs;
++      u32 iommu_sid;
++
+       void __iomem *hv;
+       void __iomem *regs;
+       void __iomem *xpcs;
+@@ -50,7 +53,6 @@ struct tegra_mgbe {
+ #define MGBE_WRAP_COMMON_INTR_ENABLE  0x8704
+ #define MAC_SBD_INTR                  BIT(2)
+ #define MGBE_WRAP_AXI_ASID0_CTRL      0x8400
+-#define MGBE_SID                      0x6
+ static int __maybe_unused tegra_mgbe_suspend(struct device *dev)
+ {
+@@ -84,7 +86,7 @@ static int __maybe_unused tegra_mgbe_resume(struct device *dev)
+       writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
+       /* Program SID */
+-      writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
++      writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+       value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
+       if ((value & XPCS_WRAP_UPHY_STATUS_TX_P_UP) == 0) {
+@@ -241,6 +243,12 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
+       if (IS_ERR(mgbe->xpcs))
+               return PTR_ERR(mgbe->xpcs);
++      /* get controller's stream id from iommu property in device tree */
++      if (!tegra_dev_iommu_get_stream_id(mgbe->dev, &mgbe->iommu_sid)) {
++              dev_err(mgbe->dev, "failed to get iommu stream id\n");
++              return -EINVAL;
++      }
++
+       res.addr = mgbe->regs;
+       res.irq = irq;
+@@ -346,7 +354,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
+       writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
+       /* Program SID */
+-      writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
++      writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+       plat->flags |= STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP;
+-- 
+2.39.5
+
diff --git a/queue-6.12/net_sched-cls_flow-validate-tca_flow_rshift-attribut.patch b/queue-6.12/net_sched-cls_flow-validate-tca_flow_rshift-attribut.patch
new file mode 100644 (file)
index 0000000..16d0895
--- /dev/null
@@ -0,0 +1,74 @@
+From 90f0d79e2aa907b5f499881e4fa13fe7efb8744f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Jan 2025 10:45:46 +0000
+Subject: net_sched: cls_flow: validate TCA_FLOW_RSHIFT attribute
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a039e54397c6a75b713b9ce7894a62e06956aa92 ]
+
+syzbot found that TCA_FLOW_RSHIFT attribute was not validated.
+Right shitfing a 32bit integer is undefined for large shift values.
+
+UBSAN: shift-out-of-bounds in net/sched/cls_flow.c:329:23
+shift exponent 9445 is too large for 32-bit type 'u32' (aka 'unsigned int')
+CPU: 1 UID: 0 PID: 54 Comm: kworker/u8:3 Not tainted 6.13.0-rc3-syzkaller-00180-g4f619d518db9 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/13/2024
+Workqueue: ipv6_addrconf addrconf_dad_work
+Call Trace:
+ <TASK>
+  __dump_stack lib/dump_stack.c:94 [inline]
+  dump_stack_lvl+0x241/0x360 lib/dump_stack.c:120
+  ubsan_epilogue lib/ubsan.c:231 [inline]
+  __ubsan_handle_shift_out_of_bounds+0x3c8/0x420 lib/ubsan.c:468
+  flow_classify+0x24d5/0x25b0 net/sched/cls_flow.c:329
+  tc_classify include/net/tc_wrapper.h:197 [inline]
+  __tcf_classify net/sched/cls_api.c:1771 [inline]
+  tcf_classify+0x420/0x1160 net/sched/cls_api.c:1867
+  sfb_classify net/sched/sch_sfb.c:260 [inline]
+  sfb_enqueue+0x3ad/0x18b0 net/sched/sch_sfb.c:318
+  dev_qdisc_enqueue+0x4b/0x290 net/core/dev.c:3793
+  __dev_xmit_skb net/core/dev.c:3889 [inline]
+  __dev_queue_xmit+0xf0e/0x3f50 net/core/dev.c:4400
+  dev_queue_xmit include/linux/netdevice.h:3168 [inline]
+  neigh_hh_output include/net/neighbour.h:523 [inline]
+  neigh_output include/net/neighbour.h:537 [inline]
+  ip_finish_output2+0xd41/0x1390 net/ipv4/ip_output.c:236
+  iptunnel_xmit+0x55d/0x9b0 net/ipv4/ip_tunnel_core.c:82
+  udp_tunnel_xmit_skb+0x262/0x3b0 net/ipv4/udp_tunnel_core.c:173
+  geneve_xmit_skb drivers/net/geneve.c:916 [inline]
+  geneve_xmit+0x21dc/0x2d00 drivers/net/geneve.c:1039
+  __netdev_start_xmit include/linux/netdevice.h:5002 [inline]
+  netdev_start_xmit include/linux/netdevice.h:5011 [inline]
+  xmit_one net/core/dev.c:3590 [inline]
+  dev_hard_start_xmit+0x27a/0x7d0 net/core/dev.c:3606
+  __dev_queue_xmit+0x1b73/0x3f50 net/core/dev.c:4434
+
+Fixes: e5dfb815181f ("[NET_SCHED]: Add flow classifier")
+Reported-by: syzbot+1dbb57d994e54aaa04d2@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/6777bf49.050a0220.178762.0040.GAE@google.com/T/#u
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250103104546.3714168-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/cls_flow.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
+index 5502998aace7..5c2580a07530 100644
+--- a/net/sched/cls_flow.c
++++ b/net/sched/cls_flow.c
+@@ -356,7 +356,8 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
+       [TCA_FLOW_KEYS]         = { .type = NLA_U32 },
+       [TCA_FLOW_MODE]         = { .type = NLA_U32 },
+       [TCA_FLOW_BASECLASS]    = { .type = NLA_U32 },
+-      [TCA_FLOW_RSHIFT]       = { .type = NLA_U32 },
++      [TCA_FLOW_RSHIFT]       = NLA_POLICY_MAX(NLA_U32,
++                                               31 /* BITS_PER_U32 - 1 */),
+       [TCA_FLOW_ADDEND]       = { .type = NLA_U32 },
+       [TCA_FLOW_MASK]         = { .type = NLA_U32 },
+       [TCA_FLOW_XOR]          = { .type = NLA_U32 },
+-- 
+2.39.5
+
diff --git a/queue-6.12/netfilter-conntrack-clamp-maximum-hashtable-size-to-.patch b/queue-6.12/netfilter-conntrack-clamp-maximum-hashtable-size-to-.patch
new file mode 100644 (file)
index 0000000..c53c7ed
--- /dev/null
@@ -0,0 +1,48 @@
+From 8819e3d2a08b07084945c54b456ee062b67cf87e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jan 2025 22:56:33 +0100
+Subject: netfilter: conntrack: clamp maximum hashtable size to INT_MAX
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit b541ba7d1f5a5b7b3e2e22dc9e40e18a7d6dbc13 ]
+
+Use INT_MAX as maximum size for the conntrack hashtable. Otherwise, it
+is possible to hit WARN_ON_ONCE in __kvmalloc_node_noprof() when
+resizing hashtable because __GFP_NOWARN is unset. See:
+
+  0708a0afe291 ("mm: Consider __GFP_NOWARN flag for oversized kvmalloc() calls")
+
+Note: hashtable resize is only possible from init_netns.
+
+Fixes: 9cc1c73ad666 ("netfilter: conntrack: avoid integer overflow when resizing")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_core.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 9db3e2b0b1c3..456446d7af20 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -2517,12 +2517,15 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
+       struct hlist_nulls_head *hash;
+       unsigned int nr_slots, i;
+-      if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
++      if (*sizep > (INT_MAX / sizeof(struct hlist_nulls_head)))
+               return NULL;
+       BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
+       nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
++      if (nr_slots > (INT_MAX / sizeof(struct hlist_nulls_head)))
++              return NULL;
++
+       hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
+       if (hash && nulls)
+-- 
+2.39.5
+
diff --git a/queue-6.12/netfilter-nf_tables-imbalance-in-flowtable-binding.patch b/queue-6.12/netfilter-nf_tables-imbalance-in-flowtable-binding.patch
new file mode 100644 (file)
index 0000000..85116e8
--- /dev/null
@@ -0,0 +1,117 @@
+From 12571b70ba97ccaf6eacaf42866967232aa4425e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Jan 2025 13:01:13 +0100
+Subject: netfilter: nf_tables: imbalance in flowtable binding
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 13210fc63f353fe78584048079343413a3cdf819 ]
+
+All these cases cause imbalance between BIND and UNBIND calls:
+
+- Delete an interface from a flowtable with multiple interfaces
+
+- Add a (device to a) flowtable with --check flag
+
+- Delete a netns containing a flowtable
+
+- In an interactive nft session, create a table with owner flag and
+  flowtable inside, then quit.
+
+Fix it by calling FLOW_BLOCK_UNBIND when unregistering hooks, then
+remove late FLOW_BLOCK_UNBIND call when destroying flowtable.
+
+Fixes: ff4bf2f42a40 ("netfilter: nf_tables: add nft_unregister_flowtable_hook()")
+Reported-by: Phil Sutter <phil@nwl.cc>
+Tested-by: Phil Sutter <phil@nwl.cc>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 0c5ff4afc370..42dc8cc721ff 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -8565,6 +8565,7 @@ static void nft_unregister_flowtable_hook(struct net *net,
+ }
+ static void __nft_unregister_flowtable_net_hooks(struct net *net,
++                                               struct nft_flowtable *flowtable,
+                                                struct list_head *hook_list,
+                                                bool release_netdev)
+ {
+@@ -8572,6 +8573,8 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net,
+       list_for_each_entry_safe(hook, next, hook_list, list) {
+               nf_unregister_net_hook(net, &hook->ops);
++              flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
++                                          FLOW_BLOCK_UNBIND);
+               if (release_netdev) {
+                       list_del(&hook->list);
+                       kfree_rcu(hook, rcu);
+@@ -8580,9 +8583,10 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net,
+ }
+ static void nft_unregister_flowtable_net_hooks(struct net *net,
++                                             struct nft_flowtable *flowtable,
+                                              struct list_head *hook_list)
+ {
+-      __nft_unregister_flowtable_net_hooks(net, hook_list, false);
++      __nft_unregister_flowtable_net_hooks(net, flowtable, hook_list, false);
+ }
+ static int nft_register_flowtable_net_hooks(struct net *net,
+@@ -9223,8 +9227,6 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
+       flowtable->data.type->free(&flowtable->data);
+       list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+-              flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
+-                                          FLOW_BLOCK_UNBIND);
+               list_del_rcu(&hook->list);
+               kfree_rcu(hook, rcu);
+       }
+@@ -10622,6 +10624,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+                                                          &nft_trans_flowtable_hooks(trans),
+                                                          trans->msg_type);
+                               nft_unregister_flowtable_net_hooks(net,
++                                                                 nft_trans_flowtable(trans),
+                                                                  &nft_trans_flowtable_hooks(trans));
+                       } else {
+                               list_del_rcu(&nft_trans_flowtable(trans)->list);
+@@ -10630,6 +10633,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+                                                          NULL,
+                                                          trans->msg_type);
+                               nft_unregister_flowtable_net_hooks(net,
++                                              nft_trans_flowtable(trans),
+                                               &nft_trans_flowtable(trans)->hook_list);
+                       }
+                       break;
+@@ -10901,11 +10905,13 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+               case NFT_MSG_NEWFLOWTABLE:
+                       if (nft_trans_flowtable_update(trans)) {
+                               nft_unregister_flowtable_net_hooks(net,
++                                              nft_trans_flowtable(trans),
+                                               &nft_trans_flowtable_hooks(trans));
+                       } else {
+                               nft_use_dec_restore(&table->use);
+                               list_del_rcu(&nft_trans_flowtable(trans)->list);
+                               nft_unregister_flowtable_net_hooks(net,
++                                              nft_trans_flowtable(trans),
+                                               &nft_trans_flowtable(trans)->hook_list);
+                       }
+                       break;
+@@ -11498,7 +11504,8 @@ static void __nft_release_hook(struct net *net, struct nft_table *table)
+       list_for_each_entry(chain, &table->chains, list)
+               __nf_tables_unregister_hook(net, table, chain, true);
+       list_for_each_entry(flowtable, &table->flowtables, list)
+-              __nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list,
++              __nft_unregister_flowtable_net_hooks(net, flowtable,
++                                                   &flowtable->hook_list,
+                                                    true);
+ }
+-- 
+2.39.5
+
diff --git a/queue-6.12/pds_core-limit-loop-over-fw-name-list.patch b/queue-6.12/pds_core-limit-loop-over-fw-name-list.patch
new file mode 100644 (file)
index 0000000..1112ca0
--- /dev/null
@@ -0,0 +1,42 @@
+From 2a8cd49e1f4c32edd9bfeabaec28e6fb04824e68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Jan 2025 11:51:47 -0800
+Subject: pds_core: limit loop over fw name list
+
+From: Shannon Nelson <shannon.nelson@amd.com>
+
+[ Upstream commit 8c817eb26230dc0ae553cee16ff43a4a895f6756 ]
+
+Add an array size limit to the for-loop to be sure we don't try
+to reference a fw_version string off the end of the fw info names
+array.  We know that our firmware only has a limited number
+of firmware slot names, but we shouldn't leave this unchecked.
+
+Fixes: 45d76f492938 ("pds_core: set up device and adminq")
+Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Brett Creeley <brett.creeley@amd.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20250103195147.7408-1-shannon.nelson@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/pds_core/devlink.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
+index 2681889162a2..44971e71991f 100644
+--- a/drivers/net/ethernet/amd/pds_core/devlink.c
++++ b/drivers/net/ethernet/amd/pds_core/devlink.c
+@@ -118,7 +118,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+       if (err && err != -EIO)
+               return err;
+-      listlen = fw_list.num_fw_slots;
++      listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
+       for (i = 0; i < listlen; i++) {
+               if (i < ARRAY_SIZE(fw_slotnames))
+                       strscpy(buf, fw_slotnames[i], sizeof(buf));
+-- 
+2.39.5
+
diff --git a/queue-6.12/rtase-fix-a-check-for-error-in-rtase_alloc_msix.patch b/queue-6.12/rtase-fix-a-check-for-error-in-rtase_alloc_msix.patch
new file mode 100644 (file)
index 0000000..0a049bd
--- /dev/null
@@ -0,0 +1,40 @@
+From 321156240136e86d559c3f5be53e1a21296a7cf0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jan 2025 12:15:53 +0300
+Subject: rtase: Fix a check for error in rtase_alloc_msix()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 2055272e3ae01a954e41a5afb437c5d76f758e0b ]
+
+The pci_irq_vector() function never returns zero.  It returns negative
+error codes or a positive non-zero IRQ number.  Fix the error checking to
+test for negatives.
+
+Fixes: a36e9f5cfe9e ("rtase: Add support for a pci table in this module")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://patch.msgid.link/f2ecc88d-af13-4651-9820-7cc665230019@stanley.mountain
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/realtek/rtase/rtase_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
+index 1bfe5ef40c52..14ffd45e9a25 100644
+--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
++++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
+@@ -1827,7 +1827,7 @@ static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp)
+       for (i = 0; i < tp->int_nums; i++) {
+               irq = pci_irq_vector(pdev, i);
+-              if (!irq) {
++              if (irq < 0) {
+                       pci_disable_msix(pdev);
+                       return irq;
+               }
+-- 
+2.39.5
+
diff --git a/queue-6.12/sched-sch_cake-add-bounds-checks-to-host-bulk-flow-f.patch b/queue-6.12/sched-sch_cake-add-bounds-checks-to-host-bulk-flow-f.patch
new file mode 100644 (file)
index 0000000..85ed41f
--- /dev/null
@@ -0,0 +1,290 @@
+From b176f96d70b38bb6f58c5234586966e475f2c3f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jan 2025 13:01:05 +0100
+Subject: sched: sch_cake: add bounds checks to host bulk flow fairness counts
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@redhat.com>
+
+[ Upstream commit 737d4d91d35b5f7fa5bb442651472277318b0bfd ]
+
+Even though we fixed a logic error in the commit cited below, syzbot
+still managed to trigger an underflow of the per-host bulk flow
+counters, leading to an out of bounds memory access.
+
+To avoid any such logic errors causing out of bounds memory accesses,
+this commit factors out all accesses to the per-host bulk flow counters
+to a series of helpers that perform bounds-checking before any
+increments and decrements. This also has the benefit of improving
+readability by moving the conditional checks for the flow mode into
+these helpers, instead of having them spread out throughout the
+code (which was the cause of the original logic error).
+
+As part of this change, the flow quantum calculation is consolidated
+into a helper function, which means that the dithering applied to the
+ost load scaling is now applied both in the DRR rotation and when a
+sparse flow's quantum is first initiated. The only user-visible effect
+of this is that the maximum packet size that can be sent while a flow
+stays sparse will now vary with +/- one byte in some cases. This should
+not make a noticeable difference in practice, and thus it's not worth
+complicating the code to preserve the old behaviour.
+
+Fixes: 546ea84d07e3 ("sched: sch_cake: fix bulk flow accounting logic for host fairness")
+Reported-by: syzbot+f63600d288bfb7057424@syzkaller.appspotmail.com
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Acked-by: Dave Taht <dave.taht@gmail.com>
+Link: https://patch.msgid.link/20250107120105.70685-1-toke@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_cake.c | 140 +++++++++++++++++++++++--------------------
+ 1 file changed, 75 insertions(+), 65 deletions(-)
+
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 8d8b2db4653c..2c2e2a67f3b2 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -627,6 +627,63 @@ static bool cake_ddst(int flow_mode)
+       return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
+ }
++static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
++                                           struct cake_flow *flow,
++                                           int flow_mode)
++{
++      if (likely(cake_dsrc(flow_mode) &&
++                 q->hosts[flow->srchost].srchost_bulk_flow_count))
++              q->hosts[flow->srchost].srchost_bulk_flow_count--;
++}
++
++static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
++                                           struct cake_flow *flow,
++                                           int flow_mode)
++{
++      if (likely(cake_dsrc(flow_mode) &&
++                 q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
++              q->hosts[flow->srchost].srchost_bulk_flow_count++;
++}
++
++static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
++                                           struct cake_flow *flow,
++                                           int flow_mode)
++{
++      if (likely(cake_ddst(flow_mode) &&
++                 q->hosts[flow->dsthost].dsthost_bulk_flow_count))
++              q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
++}
++
++static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
++                                           struct cake_flow *flow,
++                                           int flow_mode)
++{
++      if (likely(cake_ddst(flow_mode) &&
++                 q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
++              q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
++}
++
++static u16 cake_get_flow_quantum(struct cake_tin_data *q,
++                               struct cake_flow *flow,
++                               int flow_mode)
++{
++      u16 host_load = 1;
++
++      if (cake_dsrc(flow_mode))
++              host_load = max(host_load,
++                              q->hosts[flow->srchost].srchost_bulk_flow_count);
++
++      if (cake_ddst(flow_mode))
++              host_load = max(host_load,
++                              q->hosts[flow->dsthost].dsthost_bulk_flow_count);
++
++      /* The get_random_u16() is a way to apply dithering to avoid
++       * accumulating roundoff errors
++       */
++      return (q->flow_quantum * quantum_div[host_load] +
++              get_random_u16()) >> 16;
++}
++
+ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+                    int flow_mode, u16 flow_override, u16 host_override)
+ {
+@@ -773,10 +830,8 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+               allocate_dst = cake_ddst(flow_mode);
+               if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
+-                      if (allocate_src)
+-                              q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
+-                      if (allocate_dst)
+-                              q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
++                      cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
++                      cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
+               }
+ found:
+               /* reserve queue for future packets in same flow */
+@@ -801,9 +856,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+                       q->hosts[outer_hash + k].srchost_tag = srchost_hash;
+ found_src:
+                       srchost_idx = outer_hash + k;
+-                      if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+-                              q->hosts[srchost_idx].srchost_bulk_flow_count++;
+                       q->flows[reduced_hash].srchost = srchost_idx;
++
++                      if (q->flows[reduced_hash].set == CAKE_SET_BULK)
++                              cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
+               }
+               if (allocate_dst) {
+@@ -824,9 +880,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+                       q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
+ found_dst:
+                       dsthost_idx = outer_hash + k;
+-                      if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+-                              q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
+                       q->flows[reduced_hash].dsthost = dsthost_idx;
++
++                      if (q->flows[reduced_hash].set == CAKE_SET_BULK)
++                              cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
+               }
+       }
+@@ -1839,10 +1896,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+       /* flowchain */
+       if (!flow->set || flow->set == CAKE_SET_DECAYING) {
+-              struct cake_host *srchost = &b->hosts[flow->srchost];
+-              struct cake_host *dsthost = &b->hosts[flow->dsthost];
+-              u16 host_load = 1;
+-
+               if (!flow->set) {
+                       list_add_tail(&flow->flowchain, &b->new_flows);
+               } else {
+@@ -1852,18 +1905,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+               flow->set = CAKE_SET_SPARSE;
+               b->sparse_flow_count++;
+-              if (cake_dsrc(q->flow_mode))
+-                      host_load = max(host_load, srchost->srchost_bulk_flow_count);
+-
+-              if (cake_ddst(q->flow_mode))
+-                      host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
+-
+-              flow->deficit = (b->flow_quantum *
+-                               quantum_div[host_load]) >> 16;
++              flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode);
+       } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
+-              struct cake_host *srchost = &b->hosts[flow->srchost];
+-              struct cake_host *dsthost = &b->hosts[flow->dsthost];
+-
+               /* this flow was empty, accounted as a sparse flow, but actually
+                * in the bulk rotation.
+                */
+@@ -1871,12 +1914,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+               b->sparse_flow_count--;
+               b->bulk_flow_count++;
+-              if (cake_dsrc(q->flow_mode))
+-                      srchost->srchost_bulk_flow_count++;
+-
+-              if (cake_ddst(q->flow_mode))
+-                      dsthost->dsthost_bulk_flow_count++;
+-
++              cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
++              cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+       }
+       if (q->buffer_used > q->buffer_max_used)
+@@ -1933,13 +1972,11 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+ {
+       struct cake_sched_data *q = qdisc_priv(sch);
+       struct cake_tin_data *b = &q->tins[q->cur_tin];
+-      struct cake_host *srchost, *dsthost;
+       ktime_t now = ktime_get();
+       struct cake_flow *flow;
+       struct list_head *head;
+       bool first_flow = true;
+       struct sk_buff *skb;
+-      u16 host_load;
+       u64 delay;
+       u32 len;
+@@ -2039,11 +2076,6 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+       q->cur_flow = flow - b->flows;
+       first_flow = false;
+-      /* triple isolation (modified DRR++) */
+-      srchost = &b->hosts[flow->srchost];
+-      dsthost = &b->hosts[flow->dsthost];
+-      host_load = 1;
+-
+       /* flow isolation (DRR++) */
+       if (flow->deficit <= 0) {
+               /* Keep all flows with deficits out of the sparse and decaying
+@@ -2055,11 +2087,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+                               b->sparse_flow_count--;
+                               b->bulk_flow_count++;
+-                              if (cake_dsrc(q->flow_mode))
+-                                      srchost->srchost_bulk_flow_count++;
+-
+-                              if (cake_ddst(q->flow_mode))
+-                                      dsthost->dsthost_bulk_flow_count++;
++                              cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
++                              cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+                               flow->set = CAKE_SET_BULK;
+                       } else {
+@@ -2071,19 +2100,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+                       }
+               }
+-              if (cake_dsrc(q->flow_mode))
+-                      host_load = max(host_load, srchost->srchost_bulk_flow_count);
+-
+-              if (cake_ddst(q->flow_mode))
+-                      host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
+-
+-              WARN_ON(host_load > CAKE_QUEUES);
+-
+-              /* The get_random_u16() is a way to apply dithering to avoid
+-               * accumulating roundoff errors
+-               */
+-              flow->deficit += (b->flow_quantum * quantum_div[host_load] +
+-                                get_random_u16()) >> 16;
++              flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode);
+               list_move_tail(&flow->flowchain, &b->old_flows);
+               goto retry;
+@@ -2107,11 +2124,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+                               if (flow->set == CAKE_SET_BULK) {
+                                       b->bulk_flow_count--;
+-                                      if (cake_dsrc(q->flow_mode))
+-                                              srchost->srchost_bulk_flow_count--;
+-
+-                                      if (cake_ddst(q->flow_mode))
+-                                              dsthost->dsthost_bulk_flow_count--;
++                                      cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
++                                      cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+                                       b->decaying_flow_count++;
+                               } else if (flow->set == CAKE_SET_SPARSE ||
+@@ -2129,12 +2143,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+                               else if (flow->set == CAKE_SET_BULK) {
+                                       b->bulk_flow_count--;
+-                                      if (cake_dsrc(q->flow_mode))
+-                                              srchost->srchost_bulk_flow_count--;
+-
+-                                      if (cake_ddst(q->flow_mode))
+-                                              dsthost->dsthost_bulk_flow_count--;
+-
++                                      cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
++                                      cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+                               } else
+                                       b->decaying_flow_count--;
+-- 
+2.39.5
+
index e21373cc10bb3a2c017ecdcf83fba315e48a30b4..b552fb322c611698e4f0693b6e4d9ce895fb57f1 100644 (file)
@@ -21,3 +21,39 @@ ovl-support-encoding-fid-from-inode-with-no-alias.patch
 asoc-rt722-add-delay-time-to-wait-for-the-calibratio.patch
 asoc-mediatek-disable-buffer-pre-allocation.patch
 selftests-alsa-fix-circular-dependency-involving-glo.patch
+ieee802154-ca8210-add-missing-check-for-kfifo_alloc-.patch
+net-802-llc-snap-oid-pid-lookup-on-start-of-skb-data.patch
+tcp-dccp-allow-a-connection-when-sk_max_ack_backlog-.patch
+net_sched-cls_flow-validate-tca_flow_rshift-attribut.patch
+net-libwx-fix-firmware-mailbox-abnormal-return.patch
+btrfs-avoid-null-pointer-dereference-if-no-valid-ext.patch
+pds_core-limit-loop-over-fw-name-list.patch
+bnxt_en-fix-possible-memory-leak-when-hwrm_req_repla.patch
+bnxt_en-fix-dim-shutdown.patch
+cxgb4-avoid-removal-of-uninserted-tid.patch
+net-don-t-dump-tx-and-uninitialized-napis.patch
+ice-fix-max-values-for-dpll-pin-phase-adjust.patch
+ice-fix-incorrect-phy-settings-for-100-gb-s.patch
+igc-return-early-when-failing-to-read-eecd-register.patch
+tls-fix-tls_sw_sendmsg-error-handling.patch
+ipvlan-fix-use-after-free-in-ipvlan_get_iflink.patch
+eth-gve-use-appropriate-helper-to-set-xdp_features.patch
+bluetooth-hci_sync-fix-not-setting-random-address-wh.patch
+bluetooth-mgmt-fix-add-device-to-responding-before-c.patch
+bluetooth-btnxpuart-fix-driver-sending-truncated-dat.patch
+bluetooth-btmtk-fix-failed-to-send-func-ctrl-for-med.patch
+tcp-annotate-data-race-around-sk-sk_mark-in-tcp_v4_s.patch
+net-hns3-fixed-reset-failure-issues-caused-by-the-in.patch
+net-hns3-fix-missing-features-due-to-dev-features-co.patch
+net-hns3-resolved-the-issue-that-the-debugfs-query-r.patch
+net-hns3-don-t-auto-enable-misc-vector.patch
+net-hns3-initialize-reset_timer-before-hclgevf_misc_.patch
+net-hns3-fixed-hclge_fetch_pf_reg-accesses-bar-space.patch
+net-hns3-fix-kernel-crash-when-1588-is-sent-on-hip08.patch
+mctp-i3c-fix-mctp-i3c-driver-multi-thread-issue.patch
+netfilter-nf_tables-imbalance-in-flowtable-binding.patch
+netfilter-conntrack-clamp-maximum-hashtable-size-to-.patch
+sched-sch_cake-add-bounds-checks-to-host-bulk-flow-f.patch
+net-stmmac-dwmac-tegra-read-iommu-stream-id-from-dev.patch
+rtase-fix-a-check-for-error-in-rtase_alloc_msix.patch
+net-mlx5-fix-variable-not-being-completed-when-funct.patch
diff --git a/queue-6.12/tcp-annotate-data-race-around-sk-sk_mark-in-tcp_v4_s.patch b/queue-6.12/tcp-annotate-data-race-around-sk-sk_mark-in-tcp_v4_s.patch
new file mode 100644 (file)
index 0000000..cdc5a4a
--- /dev/null
@@ -0,0 +1,40 @@
+From f40f1a5c0699e94b1e711981f0d8ed7d40248817 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jan 2025 11:14:39 +0100
+Subject: tcp: Annotate data-race around sk->sk_mark in tcp_v4_send_reset
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 80fb40baba19e25a1b6f3ecff6fc5c0171806bde ]
+
+This is a follow-up to 3c5b4d69c358 ("net: annotate data-races around
+sk->sk_mark"). sk->sk_mark can be read and written without holding
+the socket lock. IPv6 equivalent is already covered with READ_ONCE()
+annotation in tcp_v6_send_response().
+
+Fixes: 3c5b4d69c358 ("net: annotate data-races around sk->sk_mark")
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/f459d1fc44f205e13f6d8bdca2c8bfb9902ffac9.1736244569.git.daniel@iogearbox.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_ipv4.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index a7cd433a54c9..bcc2f1e090c7 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -896,7 +896,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb,
+       sock_net_set(ctl_sk, net);
+       if (sk) {
+               ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
+-                                 inet_twsk(sk)->tw_mark : sk->sk_mark;
++                                 inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark);
+               ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
+                                  inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
+               transmit_time = tcp_transmit_time(sk);
+-- 
+2.39.5
+
diff --git a/queue-6.12/tcp-dccp-allow-a-connection-when-sk_max_ack_backlog-.patch b/queue-6.12/tcp-dccp-allow-a-connection-when-sk_max_ack_backlog-.patch
new file mode 100644 (file)
index 0000000..2e31a5c
--- /dev/null
@@ -0,0 +1,47 @@
+From 588b8aa2079a2114ea34601a969bf65924add2f4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Jan 2025 17:14:26 +0000
+Subject: tcp/dccp: allow a connection when sk_max_ack_backlog is zero
+
+From: Zhongqiu Duan <dzq.aishenghu0@gmail.com>
+
+[ Upstream commit 3479c7549fb1dfa7a1db4efb7347c7b8ef50de4b ]
+
+If the backlog of listen() is set to zero, sk_acceptq_is_full() allows
+one connection to be made, but inet_csk_reqsk_queue_is_full() does not.
+When the net.ipv4.tcp_syncookies is zero, inet_csk_reqsk_queue_is_full()
+will cause an immediate drop before the sk_acceptq_is_full() check in
+tcp_conn_request(), resulting in no connection can be made.
+
+This patch tries to keep consistent with 64a146513f8f ("[NET]: Revert
+incorrect accept queue backlog changes.").
+
+Link: https://lore.kernel.org/netdev/20250102080258.53858-1-kuniyu@amazon.com/
+Fixes: ef547f2ac16b ("tcp: remove max_qlen_log")
+Signed-off-by: Zhongqiu Duan <dzq.aishenghu0@gmail.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Reviewed-by: Jason Xing <kerneljasonxing@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250102171426.915276-1-dzq.aishenghu0@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/inet_connection_sock.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index c0deaafebfdc..4bd93571e6c1 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -281,7 +281,7 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
+ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
+ {
+-      return inet_csk_reqsk_queue_len(sk) >= READ_ONCE(sk->sk_max_ack_backlog);
++      return inet_csk_reqsk_queue_len(sk) > READ_ONCE(sk->sk_max_ack_backlog);
+ }
+ bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
+-- 
+2.39.5
+
diff --git a/queue-6.12/tls-fix-tls_sw_sendmsg-error-handling.patch b/queue-6.12/tls-fix-tls_sw_sendmsg-error-handling.patch
new file mode 100644 (file)
index 0000000..21e19d7
--- /dev/null
@@ -0,0 +1,46 @@
+From 1835e4256bc93dc5ebf084f0b4e07dd0b6dc0acc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 4 Jan 2025 10:29:45 -0500
+Subject: tls: Fix tls_sw_sendmsg error handling
+
+From: Benjamin Coddington <bcodding@redhat.com>
+
+[ Upstream commit b341ca51d2679829d26a3f6a4aa9aee9abd94f92 ]
+
+We've noticed that NFS can hang when using RPC over TLS on an unstable
+connection, and investigation shows that the RPC layer is stuck in a tight
+loop attempting to transmit, but forever getting -EBADMSG back from the
+underlying network.  The loop begins when tcp_sendmsg_locked() returns
+-EPIPE to tls_tx_records(), but that error is converted to -EBADMSG when
+calling the socket's error reporting handler.
+
+Instead of converting errors from tcp_sendmsg_locked(), let's pass them
+along in this path.  The RPC layer handles -EPIPE by reconnecting the
+transport, which prevents the endless attempts to transmit on a broken
+connection.
+
+Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
+Fixes: a42055e8d2c3 ("net/tls: Add support for async encryption of records for performance")
+Link: https://patch.msgid.link/9594185559881679d81f071b181a10eb07cd079f.1736004079.git.bcodding@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls_sw.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index bbf26cc4f6ee..7bcc9b4408a2 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -458,7 +458,7 @@ int tls_tx_records(struct sock *sk, int flags)
+ tx_err:
+       if (rc < 0 && rc != -EAGAIN)
+-              tls_err_abort(sk, -EBADMSG);
++              tls_err_abort(sk, rc);
+       return rc;
+ }
+-- 
+2.39.5
+