]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.1
authorSasha Levin <sashal@kernel.org>
Sun, 11 Jun 2023 02:02:38 +0000 (22:02 -0400)
committerSasha Levin <sashal@kernel.org>
Sun, 11 Jun 2023 02:02:38 +0000 (22:02 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
56 files changed:
queue-6.1/afs-fix-setting-of-mtime-when-creating-a-file-dir-sy.patch [new file with mode: 0644]
queue-6.1/bluetooth-fix-l2cap_disconnect_req-deadlock.patch [new file with mode: 0644]
queue-6.1/bluetooth-hci_sync-add-lock-to-protect-hci_unregiste.patch [new file with mode: 0644]
queue-6.1/bluetooth-iso-don-t-try-to-remove-cig-if-there-are-b.patch [new file with mode: 0644]
queue-6.1/bluetooth-l2cap-add-missing-checks-for-invalid-dcid.patch [new file with mode: 0644]
queue-6.1/bnxt_en-don-t-issue-ap-reset-during-ethtool-s-reset-.patch [new file with mode: 0644]
queue-6.1/bnxt_en-implement-.set_port-.unset_port-udp-tunnel-c.patch [new file with mode: 0644]
queue-6.1/bnxt_en-prevent-kernel-panic-when-receiving-unexpect.patch [new file with mode: 0644]
queue-6.1/bnxt_en-query-default-vlan-before-vnic-setup-on-a-vf.patch [new file with mode: 0644]
queue-6.1/bnxt_en-skip-firmware-fatal-error-recovery-if-chip-i.patch [new file with mode: 0644]
queue-6.1/bpf-add-extra-path-pointer-check-to-d_path-helper.patch [new file with mode: 0644]
queue-6.1/bpf-fix-elem_size-not-being-set-for-inner-maps.patch [new file with mode: 0644]
queue-6.1/bpf-fix-uaf-in-task-local-storage.patch [new file with mode: 0644]
queue-6.1/bpf-sockmap-avoid-potential-null-dereference-in-sk_p.patch [new file with mode: 0644]
queue-6.1/drm-amdgpu-fix-null-pointer-dereference-error-in-amd.patch [new file with mode: 0644]
queue-6.1/drm-i915-explain-the-magic-numbers-for-aux-sync-prec.patch [new file with mode: 0644]
queue-6.1/drm-i915-selftests-add-some-missing-error-propagatio.patch [new file with mode: 0644]
queue-6.1/drm-i915-selftests-stop-using-kthread_stop.patch [new file with mode: 0644]
queue-6.1/drm-i915-use-18-fast-wake-aux-sync-len.patch [new file with mode: 0644]
queue-6.1/ipv6-rpl-fix-route-of-death.patch [new file with mode: 0644]
queue-6.1/lib-cpu_rmap-fix-potential-use-after-free-in-irq_cpu.patch [new file with mode: 0644]
queue-6.1/neighbour-fix-unaligned-access-to-pneigh_entry.patch [new file with mode: 0644]
queue-6.1/net-bcmgenet-fix-eee-implementation.patch [new file with mode: 0644]
queue-6.1/net-dsa-lan9303-allow-vid-0-in-port_fdb_-add-del-met.patch [new file with mode: 0644]
queue-6.1/net-enetc-correct-rx_bytes-statistics-of-xdp.patch [new file with mode: 0644]
queue-6.1/net-enetc-correct-the-statistics-of-rx-bytes.patch [new file with mode: 0644]
queue-6.1/net-ipv4-ping_group_range-allow-gid-from-2147483648-.patch [new file with mode: 0644]
queue-6.1/net-ipv6-fix-bool-int-mismatch-for-skip_notify_on_de.patch [new file with mode: 0644]
queue-6.1/net-sched-act_police-fix-sparse-errors-in-tcf_police.patch [new file with mode: 0644]
queue-6.1/net-sched-add-rcu-annotations-around-qdisc-qdisc_sle.patch [new file with mode: 0644]
queue-6.1/net-sched-fix-possible-refcount-leak-in-tc_chain_tmp.patch [new file with mode: 0644]
queue-6.1/net-sched-fq_pie-ensure-reasonable-tca_fq_pie_quantu.patch [new file with mode: 0644]
queue-6.1/net-sched-move-rtm_tca_policy-declaration-to-include.patch [new file with mode: 0644]
queue-6.1/net-smc-avoid-to-access-invalid-rmbs-mrs-in-smcrv1-a.patch [new file with mode: 0644]
queue-6.1/netfilter-conntrack-fix-null-pointer-dereference-in-.patch [new file with mode: 0644]
queue-6.1/netfilter-ipset-add-schedule-point-in-call_ad.patch [new file with mode: 0644]
queue-6.1/netfilter-nf_tables-out-of-bound-check-in-chain-blob.patch [new file with mode: 0644]
queue-6.1/netfilter-nft_bitwise-fix-register-tracking.patch [new file with mode: 0644]
queue-6.1/platform-surface-aggregator-allow-completion-work-it.patch [new file with mode: 0644]
queue-6.1/platform-surface-aggregator_tabletsw-add-support-for.patch [new file with mode: 0644]
queue-6.1/qed-qede-fix-scheduling-while-atomic.patch [new file with mode: 0644]
queue-6.1/rfs-annotate-lockless-accesses-to-rfs-sock-flow-tabl.patch [new file with mode: 0644]
queue-6.1/rfs-annotate-lockless-accesses-to-sk-sk_rxhash.patch [new file with mode: 0644]
queue-6.1/selftests-bpf-fix-sockopt_sk-selftest.patch [new file with mode: 0644]
queue-6.1/selftests-bpf-verify-optval-null-case.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/spi-mt65xx-make-sure-operations-completed-before-unl.patch [new file with mode: 0644]
queue-6.1/spi-qup-request-dma-before-enabling-clocks.patch [new file with mode: 0644]
queue-6.1/tcp-gso-really-support-big-tcp.patch [new file with mode: 0644]
queue-6.1/wifi-cfg80211-fix-locking-in-regulatory-disconnect.patch [new file with mode: 0644]
queue-6.1/wifi-cfg80211-fix-locking-in-sched-scan-stop-work.patch [new file with mode: 0644]
queue-6.1/wifi-cfg80211-reject-bad-ap-mld-address.patch [new file with mode: 0644]
queue-6.1/wifi-mac80211-don-t-translate-beacon-presp-addrs.patch [new file with mode: 0644]
queue-6.1/wifi-mac80211-mlme-fix-non-inheritence-element.patch [new file with mode: 0644]
queue-6.1/wifi-mac80211-use-correct-iftype-he-cap.patch [new file with mode: 0644]
queue-6.1/wifi-mt76-mt7615-fix-possible-race-in-mt7615_mac_sta.patch [new file with mode: 0644]

diff --git a/queue-6.1/afs-fix-setting-of-mtime-when-creating-a-file-dir-sy.patch b/queue-6.1/afs-fix-setting-of-mtime-when-creating-a-file-dir-sy.patch
new file mode 100644 (file)
index 0000000..e74a10f
--- /dev/null
@@ -0,0 +1,61 @@
+From f458e45a067d8f6551151fe876f24fad1ecfcb4b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 09:47:13 +0100
+Subject: afs: Fix setting of mtime when creating a file/dir/symlink
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit a27648c742104a833a01c54becc24429898d85bf ]
+
+kafs incorrectly passes a zero mtime (ie. 1st Jan 1970) to the server when
+creating a file, dir or symlink because the mtime recorded in the
+afs_operation struct gets passed to the server by the marshalling routines,
+but the afs_mkdir(), afs_create() and afs_symlink() functions don't set it.
+
+This gets masked if a file or directory is subsequently modified.
+
+Fix this by filling in op->mtime before calling the create op.
+
+Fixes: e49c7b2f6de7 ("afs: Build an abstraction around an "operation" concept")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Jeffrey Altman <jaltman@auristor.com>
+Reviewed-by: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/dir.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index f73b2f62afaae..07dc4ec73520c 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -1357,6 +1357,7 @@ static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+       op->dentry      = dentry;
+       op->create.mode = S_IFDIR | mode;
+       op->create.reason = afs_edit_dir_for_mkdir;
++      op->mtime       = current_time(dir);
+       op->ops         = &afs_mkdir_operation;
+       return afs_do_sync_operation(op);
+ }
+@@ -1660,6 +1661,7 @@ static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
+       op->dentry      = dentry;
+       op->create.mode = S_IFREG | mode;
+       op->create.reason = afs_edit_dir_for_create;
++      op->mtime       = current_time(dir);
+       op->ops         = &afs_create_operation;
+       return afs_do_sync_operation(op);
+@@ -1795,6 +1797,7 @@ static int afs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+       op->ops                 = &afs_symlink_operation;
+       op->create.reason       = afs_edit_dir_for_symlink;
+       op->create.symlink      = content;
++      op->mtime               = current_time(dir);
+       return afs_do_sync_operation(op);
+ error:
+-- 
+2.39.2
+
diff --git a/queue-6.1/bluetooth-fix-l2cap_disconnect_req-deadlock.patch b/queue-6.1/bluetooth-fix-l2cap_disconnect_req-deadlock.patch
new file mode 100644 (file)
index 0000000..dd68220
--- /dev/null
@@ -0,0 +1,61 @@
+From 11e7b837dca6709d37e78ed7bdcf43446ca6acb5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 May 2023 03:44:56 +0000
+Subject: Bluetooth: Fix l2cap_disconnect_req deadlock
+
+From: Ying Hsu <yinghsu@chromium.org>
+
+[ Upstream commit 02c5ea5246a44d6ffde0fddebfc1d56188052976 ]
+
+L2CAP assumes that the locks conn->chan_lock and chan->lock are
+acquired in the order conn->chan_lock, chan->lock to avoid
+potential deadlock.
+For example, l2sock_shutdown acquires these locks in the order:
+  mutex_lock(&conn->chan_lock)
+  l2cap_chan_lock(chan)
+
+However, l2cap_disconnect_req acquires chan->lock in
+l2cap_get_chan_by_scid first and then acquires conn->chan_lock
+before calling l2cap_chan_del. This means that these locks are
+acquired in unexpected order, which leads to potential deadlock:
+  l2cap_chan_lock(c)
+  mutex_lock(&conn->chan_lock)
+
+This patch releases chan->lock before acquiring the conn_chan_lock
+to avoid the potential deadlock.
+
+Fixes: a2a9339e1c9d ("Bluetooth: L2CAP: Fix use-after-free in l2cap_disconnect_{req,rsp}")
+Signed-off-by: Ying Hsu <yinghsu@chromium.org>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/l2cap_core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index ee8f806534dfb..1287de387bc53 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4664,7 +4664,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+       chan->ops->set_shutdown(chan);
++      l2cap_chan_unlock(chan);
+       mutex_lock(&conn->chan_lock);
++      l2cap_chan_lock(chan);
+       l2cap_chan_del(chan, ECONNRESET);
+       mutex_unlock(&conn->chan_lock);
+@@ -4703,7 +4705,9 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+               return 0;
+       }
++      l2cap_chan_unlock(chan);
+       mutex_lock(&conn->chan_lock);
++      l2cap_chan_lock(chan);
+       l2cap_chan_del(chan, 0);
+       mutex_unlock(&conn->chan_lock);
+-- 
+2.39.2
+
diff --git a/queue-6.1/bluetooth-hci_sync-add-lock-to-protect-hci_unregiste.patch b/queue-6.1/bluetooth-hci_sync-add-lock-to-protect-hci_unregiste.patch
new file mode 100644 (file)
index 0000000..958a9a0
--- /dev/null
@@ -0,0 +1,100 @@
+From 4752c0cb4e72eeeb45c97e6385e85c876e65dd70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 May 2023 17:11:58 -0700
+Subject: Bluetooth: hci_sync: add lock to protect HCI_UNREGISTER
+
+From: Zhengping Jiang <jiangzp@google.com>
+
+[ Upstream commit 1857c19941c87eb36ad47f22a406be5dfe5eff9f ]
+
+When the HCI_UNREGISTER flag is set, no jobs should be scheduled. Fix
+potential race when HCI_UNREGISTER is set after the flag is tested in
+hci_cmd_sync_queue.
+
+Fixes: 0b94f2651f56 ("Bluetooth: hci_sync: Fix queuing commands when HCI_UNREGISTER is set")
+Signed-off-by: Zhengping Jiang <jiangzp@google.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/bluetooth/hci_core.h |  1 +
+ net/bluetooth/hci_core.c         |  2 ++
+ net/bluetooth/hci_sync.c         | 20 ++++++++++++++------
+ 3 files changed, 17 insertions(+), 6 deletions(-)
+
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 061fec6fd0152..84c5ce57eab69 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -513,6 +513,7 @@ struct hci_dev {
+       struct work_struct      cmd_sync_work;
+       struct list_head        cmd_sync_work_list;
+       struct mutex            cmd_sync_work_lock;
++      struct mutex            unregister_lock;
+       struct work_struct      cmd_sync_cancel_work;
+       struct work_struct      reenable_adv_work;
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 334e308451f53..ac36e7ae70b21 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2685,7 +2685,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
+ {
+       BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
++      mutex_lock(&hdev->unregister_lock);
+       hci_dev_set_flag(hdev, HCI_UNREGISTER);
++      mutex_unlock(&hdev->unregister_lock);
+       write_lock(&hci_dev_list_lock);
+       list_del(&hdev->list);
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index e8b78104a4071..40a6cfa2f9a07 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -629,6 +629,7 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
+       INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
+       INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
+       mutex_init(&hdev->cmd_sync_work_lock);
++      mutex_init(&hdev->unregister_lock);
+       INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
+       INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
+@@ -688,14 +689,19 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+                      void *data, hci_cmd_sync_work_destroy_t destroy)
+ {
+       struct hci_cmd_sync_work_entry *entry;
++      int err = 0;
+-      if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
+-              return -ENODEV;
++      mutex_lock(&hdev->unregister_lock);
++      if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
++              err = -ENODEV;
++              goto unlock;
++      }
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+-      if (!entry)
+-              return -ENOMEM;
+-
++      if (!entry) {
++              err = -ENOMEM;
++              goto unlock;
++      }
+       entry->func = func;
+       entry->data = data;
+       entry->destroy = destroy;
+@@ -706,7 +712,9 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+       queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
+-      return 0;
++unlock:
++      mutex_unlock(&hdev->unregister_lock);
++      return err;
+ }
+ EXPORT_SYMBOL(hci_cmd_sync_queue);
+-- 
+2.39.2
+
diff --git a/queue-6.1/bluetooth-iso-don-t-try-to-remove-cig-if-there-are-b.patch b/queue-6.1/bluetooth-iso-don-t-try-to-remove-cig-if-there-are-b.patch
new file mode 100644 (file)
index 0000000..425169f
--- /dev/null
@@ -0,0 +1,37 @@
+From 69d8a8dc439a8b17f1b8bd43ab6398f9745db343 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Jun 2023 09:34:44 +0300
+Subject: Bluetooth: ISO: don't try to remove CIG if there are bound CIS left
+
+From: Pauli Virtanen <pav@iki.fi>
+
+[ Upstream commit 6c242c64a09e78349fb0a5f0a6f8076a3d7c0bb4 ]
+
+Consider existing BOUND & CONNECT state CIS to block CIG removal.
+Otherwise, under suitable timing conditions we may attempt to remove CIG
+while Create CIS is pending, which fails.
+
+Fixes: 26afbd826ee3 ("Bluetooth: Add initial implementation of CIS connections")
+Signed-off-by: Pauli Virtanen <pav@iki.fi>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_conn.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index c2c6dea01cc91..ab9f00252dc2a 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -966,6 +966,8 @@ static void cis_cleanup(struct hci_conn *conn)
+       /* Check if ISO connection is a CIS and remove CIG if there are
+        * no other connections using it.
+        */
++      hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
++      hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
+       hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
+       if (d.count)
+               return;
+-- 
+2.39.2
+
diff --git a/queue-6.1/bluetooth-l2cap-add-missing-checks-for-invalid-dcid.patch b/queue-6.1/bluetooth-l2cap-add-missing-checks-for-invalid-dcid.patch
new file mode 100644 (file)
index 0000000..013e493
--- /dev/null
@@ -0,0 +1,53 @@
+From 5e7e1f628d8c6aaf12fb7d787faf0bfa1f5aa550 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 3 Jun 2023 08:28:09 -0400
+Subject: Bluetooth: L2CAP: Add missing checks for invalid DCID
+
+From: Sungwoo Kim <iam@sung-woo.kim>
+
+[ Upstream commit 75767213f3d9b97f63694d02260b6a49a2271876 ]
+
+When receiving a connect response we should make sure that the DCID is
+within the valid range and that we don't already have another channel
+allocated for the same DCID.
+Missing checks may violate the specification (BLUETOOTH CORE SPECIFICATION
+Version 5.4 | Vol 3, Part A, Page 1046).
+
+Fixes: 40624183c202 ("Bluetooth: L2CAP: Add missing checks for invalid LE DCID")
+Signed-off-by: Sungwoo Kim <iam@sung-woo.kim>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/l2cap_core.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 1287de387bc53..02fc9961464cf 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4307,6 +4307,10 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+       result = __le16_to_cpu(rsp->result);
+       status = __le16_to_cpu(rsp->status);
++      if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
++                                         dcid > L2CAP_CID_DYN_END))
++              return -EPROTO;
++
+       BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
+              dcid, scid, result, status);
+@@ -4338,6 +4342,11 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+       switch (result) {
+       case L2CAP_CR_SUCCESS:
++              if (__l2cap_get_chan_by_dcid(conn, dcid)) {
++                      err = -EBADSLT;
++                      break;
++              }
++
+               l2cap_state_change(chan, BT_CONFIG);
+               chan->ident = 0;
+               chan->dcid = dcid;
+-- 
+2.39.2
+
diff --git a/queue-6.1/bnxt_en-don-t-issue-ap-reset-during-ethtool-s-reset-.patch b/queue-6.1/bnxt_en-don-t-issue-ap-reset-during-ethtool-s-reset-.patch
new file mode 100644 (file)
index 0000000..167ee83
--- /dev/null
@@ -0,0 +1,43 @@
+From 07671f00697a0a868b6a5f799bc1b32746655813 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 00:54:05 -0700
+Subject: bnxt_en: Don't issue AP reset during ethtool's reset operation
+
+From: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+
+[ Upstream commit 1d997801c7cc6a7f542e46d5a6bf16f893ad3fe9 ]
+
+Only older NIC controller's firmware uses the PROC AP reset type.
+Firmware on 5731X/5741X and newer chips does not support this reset
+type.  When bnxt_reset() issues a series of resets, this PROC AP
+reset may actually fail on these newer chips because the firmware
+is not ready to accept this unsupported command yet.  Avoid this
+unnecessary error by skipping this reset type on chips that don't
+support it.
+
+Fixes: 7a13240e3718 ("bnxt_en: fix ethtool_reset_flags ABI violations")
+Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index b2d531e014c57..89f046ce1373c 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -3727,7 +3727,7 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
+               }
+       }
+-      if (req & BNXT_FW_RESET_AP) {
++      if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
+               /* This feature is not supported in older firmware versions */
+               if (bp->hwrm_spec_code >= 0x10803) {
+                       if (!bnxt_firmware_reset_ap(dev)) {
+-- 
+2.39.2
+
diff --git a/queue-6.1/bnxt_en-implement-.set_port-.unset_port-udp-tunnel-c.patch b/queue-6.1/bnxt_en-implement-.set_port-.unset_port-udp-tunnel-c.patch
new file mode 100644 (file)
index 0000000..3e61e1b
--- /dev/null
@@ -0,0 +1,81 @@
+From 745b3114894f0d35542eb982728fa967328fe6f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 00:54:09 -0700
+Subject: bnxt_en: Implement .set_port / .unset_port UDP tunnel callbacks
+
+From: Somnath Kotur <somnath.kotur@broadcom.com>
+
+[ Upstream commit 1eb4ef12591348c440ac9d6efcf7521e73cf2b10 ]
+
+As per the new udp tunnel framework, drivers which need to know the
+details of a port entry (i.e. port type) when it gets deleted should
+use the .set_port / .unset_port callbacks.
+
+Implementing the current .udp_tunnel_sync callback would mean that the
+deleted tunnel port entry would be all zeros.  This used to work on
+older firmware because it would not check the input when deleting a
+tunnel port.  With newer firmware, the delete will now fail and
+subsequent tunnel port allocation will fail as a result.
+
+Fixes: 442a35a5a7aa ("bnxt: convert to new udp_tunnel_nic infra")
+Reviewed-by: Kalesh Anakkur Purayil <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 25 ++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 37a31f684938a..6469fb8a42a89 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -13008,26 +13008,37 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+ #endif /* CONFIG_RFS_ACCEL */
+-static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
++static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
++                                  unsigned int entry, struct udp_tunnel_info *ti)
+ {
+       struct bnxt *bp = netdev_priv(netdev);
+-      struct udp_tunnel_info ti;
+       unsigned int cmd;
+-      udp_tunnel_nic_get_port(netdev, table, 0, &ti);
+-      if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
++      if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
+               cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
+       else
+               cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
+-      if (ti.port)
+-              return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
++      return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
++}
++
++static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
++                                    unsigned int entry, struct udp_tunnel_info *ti)
++{
++      struct bnxt *bp = netdev_priv(netdev);
++      unsigned int cmd;
++
++      if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
++              cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
++      else
++              cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
+       return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
+ }
+ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
+-      .sync_table     = bnxt_udp_tunnel_sync,
++      .set_port       = bnxt_udp_tunnel_set_port,
++      .unset_port     = bnxt_udp_tunnel_unset_port,
+       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+                         UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+       .tables         = {
+-- 
+2.39.2
+
diff --git a/queue-6.1/bnxt_en-prevent-kernel-panic-when-receiving-unexpect.patch b/queue-6.1/bnxt_en-prevent-kernel-panic-when-receiving-unexpect.patch
new file mode 100644 (file)
index 0000000..fb117d5
--- /dev/null
@@ -0,0 +1,67 @@
+From 87ef8f88191219a61facdb919fda8b171f3abb5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 00:54:08 -0700
+Subject: bnxt_en: Prevent kernel panic when receiving unexpected PHC_UPDATE
+ event
+
+From: Pavan Chebbi <pavan.chebbi@broadcom.com>
+
+[ Upstream commit 319a7827df9784048abe072afe6b4fb4501d8de4 ]
+
+The firmware can send PHC_RTC_UPDATE async event on a PF that may not
+have PTP registered. In such a case, there will be a null pointer
+deference for bp->ptp_cfg when we try to handle the event.
+
+Fix it by not registering for this event with the firmware if !bp->ptp_cfg.
+Also, check that bp->ptp_cfg is valid before proceeding when we receive
+the event.
+
+Fixes: 8bcf6f04d4a5 ("bnxt_en: Handle async event when the PHC is updated in RTC mode")
+Signed-off-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c     | 6 ++++++
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c | 1 +
+ 2 files changed, 7 insertions(+)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index f7b2c4e94e898..37a31f684938a 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2389,6 +2389,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
+                               struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+                               u64 ns;
++                              if (!ptp)
++                                      goto async_event_process_exit;
++
+                               spin_lock_bh(&ptp->ptp_lock);
+                               bnxt_ptp_update_current_time(bp);
+                               ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
+@@ -4787,6 +4790,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
+               if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
+                   !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+                       continue;
++              if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
++                  !bp->ptp_cfg)
++                      continue;
+               __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+       }
+       if (bmap && bmap_size) {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+index 2132ce63193ce..4faaa9a50f4bc 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+@@ -929,6 +929,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
+       } else {
+               bnxt_ptp_timecounter_init(bp, true);
+       }
++      bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
+       ptp->ptp_info = bnxt_ptp_caps;
+       if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
+-- 
+2.39.2
+
diff --git a/queue-6.1/bnxt_en-query-default-vlan-before-vnic-setup-on-a-vf.patch b/queue-6.1/bnxt_en-query-default-vlan-before-vnic-setup-on-a-vf.patch
new file mode 100644 (file)
index 0000000..77c2e9d
--- /dev/null
@@ -0,0 +1,51 @@
+From 38f31ab3f54d30eab55b03e40bd97340f02cc9db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 00:54:06 -0700
+Subject: bnxt_en: Query default VLAN before VNIC setup on a VF
+
+From: Somnath Kotur <somnath.kotur@broadcom.com>
+
+[ Upstream commit 1a9e4f501bc6ff1b6ecb60df54fbf2b54db43bfe ]
+
+We need to call bnxt_hwrm_func_qcfg() on a VF to query the default
+VLAN that may be setup by the PF.  If a default VLAN is enabled,
+the VF cannot support VLAN acceleration on the receive side and
+the VNIC must be setup to strip out the default VLAN tag.  If a
+default VLAN is not enabled, the VF can support VLAN acceleration
+on the receive side.  The VNIC should be set up to strip or not
+strip the VLAN based on the RX VLAN acceleration setting.
+
+Without this call to determine the default VLAN before calling
+bnxt_setup_vnic(), the VNIC may not be set up correctly.  For
+example, bnxt_setup_vnic() may set up to strip the VLAN tag based
+on stale default VLAN information.  If RX VLAN acceleration is
+not enabled, the VLAN tag will be incorrectly stripped and the
+RX data path will not work correctly.
+
+Fixes: cf6645f8ebc6 ("bnxt_en: Add function for VF driver to query default VLAN.")
+Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index e3e5a427222f6..93c3b8316c46a 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -8808,6 +8808,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
+               goto err_out;
+       }
++      if (BNXT_VF(bp))
++              bnxt_hwrm_func_qcfg(bp);
++
+       rc = bnxt_setup_vnic(bp, 0);
+       if (rc)
+               goto err_out;
+-- 
+2.39.2
+
diff --git a/queue-6.1/bnxt_en-skip-firmware-fatal-error-recovery-if-chip-i.patch b/queue-6.1/bnxt_en-skip-firmware-fatal-error-recovery-if-chip-i.patch
new file mode 100644 (file)
index 0000000..bd78b99
--- /dev/null
@@ -0,0 +1,60 @@
+From d7fecdbca7813c1eb6c6dd195b3ebc8f920b96c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 00:54:07 -0700
+Subject: bnxt_en: Skip firmware fatal error recovery if chip is not accessible
+
+From: Vikas Gupta <vikas.gupta@broadcom.com>
+
+[ Upstream commit 83474a9b252ab23e6003865c2775024344cb9c09 ]
+
+Driver starts firmware fatal error recovery by detecting
+heartbeat failure or fw reset count register changing.  But
+these checks are not reliable if the device is not accessible.
+This can happen while DPC (Downstream Port containment) is in
+progress.  Skip firmware fatal recovery if pci_device_is_present()
+returns false.
+
+Fixes: acfb50e4e773 ("bnxt_en: Add FW fatal devlink_health_reporter.")
+Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
+Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: Vikas Gupta <vikas.gupta@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 93c3b8316c46a..f7b2c4e94e898 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -11576,6 +11576,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ static void bnxt_fw_health_check(struct bnxt *bp)
+ {
+       struct bnxt_fw_health *fw_health = bp->fw_health;
++      struct pci_dev *pdev = bp->pdev;
+       u32 val;
+       if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+@@ -11589,7 +11590,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
+       }
+       val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+-      if (val == fw_health->last_fw_heartbeat) {
++      if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
+               fw_health->arrests++;
+               goto fw_reset;
+       }
+@@ -11597,7 +11598,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
+       fw_health->last_fw_heartbeat = val;
+       val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+-      if (val != fw_health->last_fw_reset_cnt) {
++      if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
+               fw_health->discoveries++;
+               goto fw_reset;
+       }
+-- 
+2.39.2
+
diff --git a/queue-6.1/bpf-add-extra-path-pointer-check-to-d_path-helper.patch b/queue-6.1/bpf-add-extra-path-pointer-check-to-d_path-helper.patch
new file mode 100644 (file)
index 0000000..4c8871b
--- /dev/null
@@ -0,0 +1,98 @@
+From 0789b8f9eaf757986679f414b08ed840ac78c8c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 11:17:14 -0700
+Subject: bpf: Add extra path pointer check to d_path helper
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+[ Upstream commit f46fab0e36e611a2389d3843f34658c849b6bd60 ]
+
+Anastasios reported crash on stable 5.15 kernel with following
+BPF attached to lsm hook:
+
+  SEC("lsm.s/bprm_creds_for_exec")
+  int BPF_PROG(bprm_creds_for_exec, struct linux_binprm *bprm)
+  {
+          struct path *path = &bprm->executable->f_path;
+          char p[128] = { 0 };
+
+          bpf_d_path(path, p, 128);
+          return 0;
+  }
+
+But bprm->executable can be NULL, so bpf_d_path call will crash:
+
+  BUG: kernel NULL pointer dereference, address: 0000000000000018
+  #PF: supervisor read access in kernel mode
+  #PF: error_code(0x0000) - not-present page
+  PGD 0 P4D 0
+  Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC NOPTI
+  ...
+  RIP: 0010:d_path+0x22/0x280
+  ...
+  Call Trace:
+   <TASK>
+   bpf_d_path+0x21/0x60
+   bpf_prog_db9cf176e84498d9_bprm_creds_for_exec+0x94/0x99
+   bpf_trampoline_6442506293_0+0x55/0x1000
+   bpf_lsm_bprm_creds_for_exec+0x5/0x10
+   security_bprm_creds_for_exec+0x29/0x40
+   bprm_execve+0x1c1/0x900
+   do_execveat_common.isra.0+0x1af/0x260
+   __x64_sys_execve+0x32/0x40
+
+It's problem for all stable trees with bpf_d_path helper, which was
+added in 5.9.
+
+This issue is fixed in current bpf code, where we identify and mark
+trusted pointers, so the above code would fail even to load.
+
+For the sake of the stable trees and to workaround potentially broken
+verifier in the future, adding the code that reads the path object from
+the passed pointer and verifies it's valid in kernel space.
+
+Fixes: 6e22ab9da793 ("bpf: Add d_path helper")
+Reported-by: Anastasios Papagiannis <tasos.papagiannnis@gmail.com>
+Suggested-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Stanislav Fomichev <sdf@google.com>
+Acked-by: Yonghong Song <yhs@fb.com>
+Link: https://lore.kernel.org/bpf/20230606181714.532998-1-jolsa@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/bpf_trace.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 9d4163abadf4e..1642548892a8e 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -903,13 +903,23 @@ static const struct bpf_func_proto bpf_send_signal_thread_proto = {
+ BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
+ {
++      struct path copy;
+       long len;
+       char *p;
+       if (!sz)
+               return 0;
+-      p = d_path(path, buf, sz);
++      /*
++       * The path pointer is verified as trusted and safe to use,
++       * but let's double check it's valid anyway to workaround
++       * potentially broken verifier.
++       */
++      len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
++      if (len < 0)
++              return len;
++
++      p = d_path(&copy, buf, sz);
+       if (IS_ERR(p)) {
+               len = PTR_ERR(p);
+       } else {
+-- 
+2.39.2
+
diff --git a/queue-6.1/bpf-fix-elem_size-not-being-set-for-inner-maps.patch b/queue-6.1/bpf-fix-elem_size-not-being-set-for-inner-maps.patch
new file mode 100644 (file)
index 0000000..2a72e68
--- /dev/null
@@ -0,0 +1,56 @@
+From 6811d6288f67588d44a8605de1b665edf41feffa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Jun 2023 19:02:02 +0000
+Subject: bpf: Fix elem_size not being set for inner maps
+
+From: Rhys Rustad-Elliott <me@rhysre.net>
+
+[ Upstream commit cba41bb78d70aad98d8e61e019fd48c561f7f396 ]
+
+Commit d937bc3449fa ("bpf: make uniform use of array->elem_size
+everywhere in arraymap.c") changed array_map_gen_lookup to use
+array->elem_size instead of round_up(map->value_size, 8) as the element
+size when generating code to access a value in an array map.
+
+array->elem_size, however, is not set by bpf_map_meta_alloc when
+initializing an BPF_MAP_TYPE_ARRAY_OF_MAPS or BPF_MAP_TYPE_HASH_OF_MAPS.
+This results in array_map_gen_lookup incorrectly outputting code that
+always accesses index 0 in the array (as the index will be calculated
+via a multiplication with the element size, which is incorrectly set to
+0).
+
+Set elem_size on the bpf_array object when allocating an array or hash
+of maps to fix this.
+
+Fixes: d937bc3449fa ("bpf: make uniform use of array->elem_size everywhere in arraymap.c")
+Signed-off-by: Rhys Rustad-Elliott <me@rhysre.net>
+Link: https://lore.kernel.org/r/20230602190110.47068-2-me@rhysre.net
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/map_in_map.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
+index 135205d0d5607..8e87f69aae60d 100644
+--- a/kernel/bpf/map_in_map.c
++++ b/kernel/bpf/map_in_map.c
+@@ -61,9 +61,13 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
+       /* Misc members not needed in bpf_map_meta_equal() check. */
+       inner_map_meta->ops = inner_map->ops;
+       if (inner_map->ops == &array_map_ops) {
++              struct bpf_array *inner_array_meta =
++                      container_of(inner_map_meta, struct bpf_array, map);
++              struct bpf_array *inner_array = container_of(inner_map, struct bpf_array, map);
++
++              inner_array_meta->index_mask = inner_array->index_mask;
++              inner_array_meta->elem_size = inner_array->elem_size;
+               inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1;
+-              container_of(inner_map_meta, struct bpf_array, map)->index_mask =
+-                   container_of(inner_map, struct bpf_array, map)->index_mask;
+       }
+       fdput(f);
+-- 
+2.39.2
+
diff --git a/queue-6.1/bpf-fix-uaf-in-task-local-storage.patch b/queue-6.1/bpf-fix-uaf-in-task-local-storage.patch
new file mode 100644 (file)
index 0000000..7b6f0a2
--- /dev/null
@@ -0,0 +1,56 @@
+From c6bb829ea714a7a9ce96dc2e1c2b23331b1081f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Jun 2023 02:26:12 +0200
+Subject: bpf: Fix UAF in task local storage
+
+From: KP Singh <kpsingh@kernel.org>
+
+[ Upstream commit b0fd1852bcc21accca6260ef245356d5c141ff66 ]
+
+When task local storage was generalized for tracing programs, the
+bpf_task_local_storage callback was moved from a BPF LSM hook
+callback for security_task_free LSM hook to it's own callback. But a
+failure case in bad_fork_cleanup_security was missed which, when
+triggered, led to a dangling task owner pointer and a subsequent
+use-after-free. Move the bpf_task_storage_free to the very end of
+free_task to handle all failure cases.
+
+This issue was noticed when a BPF LSM program was attached to the
+task_alloc hook on a kernel with KASAN enabled. The program used
+bpf_task_storage_get to copy the task local storage from the current
+task to the new task being created.
+
+Fixes: a10787e6d58c ("bpf: Enable task local storage for tracing programs")
+Reported-by: Kuba Piecuch <jpiecuch@google.com>
+Signed-off-by: KP Singh <kpsingh@kernel.org>
+Acked-by: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/r/20230602002612.1117381-1-kpsingh@kernel.org
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/fork.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/fork.c b/kernel/fork.c
+index ec913b13c5edb..6bb91fbbf73cc 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -559,6 +559,7 @@ void free_task(struct task_struct *tsk)
+       arch_release_task_struct(tsk);
+       if (tsk->flags & PF_KTHREAD)
+               free_kthread_struct(tsk);
++      bpf_task_storage_free(tsk);
+       free_task_struct(tsk);
+ }
+ EXPORT_SYMBOL(free_task);
+@@ -843,7 +844,6 @@ void __put_task_struct(struct task_struct *tsk)
+       cgroup_free(tsk);
+       task_numa_free(tsk, true);
+       security_task_free(tsk);
+-      bpf_task_storage_free(tsk);
+       exit_creds(tsk);
+       delayacct_tsk_free(tsk);
+       put_signal_struct(tsk->signal);
+-- 
+2.39.2
+
diff --git a/queue-6.1/bpf-sockmap-avoid-potential-null-dereference-in-sk_p.patch b/queue-6.1/bpf-sockmap-avoid-potential-null-dereference-in-sk_p.patch
new file mode 100644 (file)
index 0000000..9f86c14
--- /dev/null
@@ -0,0 +1,92 @@
+From 0ae2769ab3958371531306c48f6bd8fae02be80b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 May 2023 19:51:49 +0000
+Subject: bpf, sockmap: Avoid potential NULL dereference in
+ sk_psock_verdict_data_ready()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit b320a45638296b63be8d9a901ca8bc43716b1ae1 ]
+
+syzbot found sk_psock(sk) could return NULL when called
+from sk_psock_verdict_data_ready().
+
+Just make sure to handle this case.
+
+[1]
+general protection fault, probably for non-canonical address 0xdffffc000000005c: 0000 [#1] PREEMPT SMP KASAN
+KASAN: null-ptr-deref in range [0x00000000000002e0-0x00000000000002e7]
+CPU: 0 PID: 15 Comm: ksoftirqd/0 Not tainted 6.4.0-rc3-syzkaller-00588-g4781e965e655 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/16/2023
+RIP: 0010:sk_psock_verdict_data_ready+0x19f/0x3c0 net/core/skmsg.c:1213
+Code: 4c 89 e6 e8 63 70 5e f9 4d 85 e4 75 75 e8 19 74 5e f9 48 8d bb e0 02 00 00 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <80> 3c 02 00 0f 85 07 02 00 00 48 89 ef ff 93 e0 02 00 00 e8 29 fd
+RSP: 0018:ffffc90000147688 EFLAGS: 00010206
+RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000100
+RDX: 000000000000005c RSI: ffffffff8825ceb7 RDI: 00000000000002e0
+RBP: ffff888076518c40 R08: 0000000000000007 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000001 R12: 0000000000000000
+R13: 0000000000000000 R14: 0000000000008000 R15: ffff888076518c40
+FS: 0000000000000000(0000) GS:ffff8880b9800000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f901375bab0 CR3: 000000004bf26000 CR4: 00000000003506f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+<TASK>
+tcp_data_ready+0x10a/0x520 net/ipv4/tcp_input.c:5006
+tcp_data_queue+0x25d3/0x4c50 net/ipv4/tcp_input.c:5080
+tcp_rcv_established+0x829/0x1f90 net/ipv4/tcp_input.c:6019
+tcp_v4_do_rcv+0x65a/0x9c0 net/ipv4/tcp_ipv4.c:1726
+tcp_v4_rcv+0x2cbf/0x3340 net/ipv4/tcp_ipv4.c:2148
+ip_protocol_deliver_rcu+0x9f/0x480 net/ipv4/ip_input.c:205
+ip_local_deliver_finish+0x2ec/0x520 net/ipv4/ip_input.c:233
+NF_HOOK include/linux/netfilter.h:303 [inline]
+NF_HOOK include/linux/netfilter.h:297 [inline]
+ip_local_deliver+0x1ae/0x200 net/ipv4/ip_input.c:254
+dst_input include/net/dst.h:468 [inline]
+ip_rcv_finish+0x1cf/0x2f0 net/ipv4/ip_input.c:449
+NF_HOOK include/linux/netfilter.h:303 [inline]
+NF_HOOK include/linux/netfilter.h:297 [inline]
+ip_rcv+0xae/0xd0 net/ipv4/ip_input.c:569
+__netif_receive_skb_one_core+0x114/0x180 net/core/dev.c:5491
+__netif_receive_skb+0x1f/0x1c0 net/core/dev.c:5605
+process_backlog+0x101/0x670 net/core/dev.c:5933
+__napi_poll+0xb7/0x6f0 net/core/dev.c:6499
+napi_poll net/core/dev.c:6566 [inline]
+net_rx_action+0x8a9/0xcb0 net/core/dev.c:6699
+__do_softirq+0x1d4/0x905 kernel/softirq.c:571
+run_ksoftirqd kernel/softirq.c:939 [inline]
+run_ksoftirqd+0x31/0x60 kernel/softirq.c:931
+smpboot_thread_fn+0x659/0x9e0 kernel/smpboot.c:164
+kthread+0x344/0x440 kernel/kthread.c:379
+ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:308
+</TASK>
+
+Fixes: 6df7f764cd3c ("bpf, sockmap: Wake up polling after data copy")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20230530195149.68145-1-edumazet@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skmsg.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 9e0f694515636..65fb6f5b21b28 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1205,7 +1205,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
+               rcu_read_lock();
+               psock = sk_psock(sk);
+-              psock->saved_data_ready(sk);
++              if (psock)
++                      psock->saved_data_ready(sk);
+               rcu_read_unlock();
+       }
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.1/drm-amdgpu-fix-null-pointer-dereference-error-in-amd.patch b/queue-6.1/drm-amdgpu-fix-null-pointer-dereference-error-in-amd.patch
new file mode 100644 (file)
index 0000000..5b06c55
--- /dev/null
@@ -0,0 +1,78 @@
+From 61eacceec3d75f88b29908797fa4344dc1683d73 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 May 2023 14:23:37 -0400
+Subject: drm/amdgpu: fix Null pointer dereference error in
+ amdgpu_device_recover_vram
+
+From: Horatio Zhang <Hongkun.Zhang@amd.com>
+
+[ Upstream commit 2a1eb1a343208ce7d6839b73d62aece343e693ff ]
+
+Use the function of amdgpu_bo_vm_destroy to handle the resource release
+of shadow bo. During the amdgpu_mes_self_test, shadow bo released, but
+vmbo->shadow_list was not, which caused a null pointer reference error
+in amdgpu_device_recover_vram when GPU reset.
+
+Fixes: 6c032c37ac3e ("drm/amdgpu: Fix vram recover doesn't work after whole GPU reset (v2)")
+Signed-off-by: xinhui pan <xinhui.pan@amd.com>
+Signed-off-by: Horatio Zhang <Hongkun.Zhang@amd.com>
+Acked-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 10 ++++------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c  |  1 -
+ 2 files changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 4feedf518a191..ad8cb9e6d1ab0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -79,9 +79,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
+ static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
+ {
+       struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
+-      struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
++      struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
+       struct amdgpu_bo_vm *vmbo;
++      bo = shadow_bo->parent;
+       vmbo = to_amdgpu_bo_vm(bo);
+       /* in case amdgpu_device_recover_vram got NULL of bo->parent */
+       if (!list_empty(&vmbo->shadow_list)) {
+@@ -691,11 +692,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
+               return r;
+       *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
+-      INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
+-      /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
+-       * is initialized.
+-       */
+-      bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
+       return r;
+ }
+@@ -712,6 +708,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
+       mutex_lock(&adev->shadow_list_lock);
+       list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
++      vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
++      vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
+       mutex_unlock(&adev->shadow_list_lock);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+index 01e42bdd8e4e8..4642cff0e1a4f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+@@ -564,7 +564,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+               return r;
+       }
+-      (*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
+       amdgpu_bo_add_to_shadow_list(*vmbo);
+       return 0;
+-- 
+2.39.2
+
diff --git a/queue-6.1/drm-i915-explain-the-magic-numbers-for-aux-sync-prec.patch b/queue-6.1/drm-i915-explain-the-magic-numbers-for-aux-sync-prec.patch
new file mode 100644 (file)
index 0000000..60239c0
--- /dev/null
@@ -0,0 +1,88 @@
+From 4ab09dc7a39143679b741de19b145e8cebe3608f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Mar 2023 20:24:34 +0300
+Subject: drm/i915: Explain the magic numbers for AUX SYNC/precharge length
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit 26bfc3f36f2104c174dfc72415547d5c28ef3f1c ]
+
+Replace the hardcoded final numbers in the AUX SYNC/precharge
+setup, and derive those from numbers from the (e)DP specs.
+
+The new functions can serve as the single point of truth for
+the number of SYNC pulses we use.
+
+Cc: Jouni Högander <jouni.hogander@intel.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230329172434.18744-2-ville.syrjala@linux.intel.com
+Reviewed-by: Jouni Högander <jouni.hogander@intel.com>
+Stable-dep-of: 2d6f2f79e065 ("drm/i915: Use 18 fast wake AUX sync len")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_dp_aux.c | 32 +++++++++++++++++++--
+ 1 file changed, 29 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
+index 7f3f2d50e6cde..8325868770994 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
+@@ -119,6 +119,32 @@ static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+       return index ? 0 : 1;
+ }
++static int intel_dp_aux_sync_len(void)
++{
++      int precharge = 16; /* 10-16 */
++      int preamble = 16;
++
++      return precharge + preamble;
++}
++
++static int intel_dp_aux_fw_sync_len(void)
++{
++      int precharge = 16; /* 10-16 */
++      int preamble = 8;
++
++      return precharge + preamble;
++}
++
++static int g4x_dp_aux_precharge_len(void)
++{
++      int precharge_min = 10;
++      int preamble = 16;
++
++      /* HW wants the length of the extra precharge in 2us units */
++      return (intel_dp_aux_sync_len() -
++              precharge_min - preamble) / 2;
++}
++
+ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+                               int send_bytes,
+                               u32 aux_clock_divider)
+@@ -141,7 +167,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+              timeout |
+              DP_AUX_CH_CTL_RECEIVE_ERROR |
+              (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+-             (3 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
++             (g4x_dp_aux_precharge_len() << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+              (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+ }
+@@ -165,8 +191,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+             DP_AUX_CH_CTL_TIME_OUT_MAX |
+             DP_AUX_CH_CTL_RECEIVE_ERROR |
+             (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+-            DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(24) |
+-            DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
++            DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) |
++            DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
+       if (intel_tc_port_in_tbt_alt_mode(dig_port))
+               ret |= DP_AUX_CH_CTL_TBT_IO;
+-- 
+2.39.2
+
diff --git a/queue-6.1/drm-i915-selftests-add-some-missing-error-propagatio.patch b/queue-6.1/drm-i915-selftests-add-some-missing-error-propagatio.patch
new file mode 100644 (file)
index 0000000..b1eb796
--- /dev/null
@@ -0,0 +1,73 @@
+From b38128a1862f092d5c41d9fbd6868bd1928eb718 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 14:11:35 +0100
+Subject: drm/i915/selftests: Add some missing error propagation
+
+From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+
+[ Upstream commit 79d0150d2d983a4f6efee676cea06027f586fcd0 ]
+
+Add some missing error propagation in live_parallel_switch.
+
+To avoid needlessly burdening the various backport processes, note I am
+not marking it as a fix against any patches and not copying stable since
+it is debug/selftests only code.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Cc: Andi Shyti <andi.shyti@linux.intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Fixes: 50d16d44cce4 ("drm/i915/selftests: Exercise context switching in parallel")
+Fixes: 6407cf533217 ("drm/i915/selftests: Stop using kthread_stop()")
+Link: https://patchwork.freedesktop.org/patch/msgid/20230605131135.396854-1-tvrtko.ursulin@linux.intel.com
+(cherry picked from commit 412fa1f097f48c8c1321806dd25e46618e0da147)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/i915/gem/selftests/i915_gem_context.c  | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+index d8864444432b7..a4858be12ee76 100644
+--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+@@ -346,8 +346,10 @@ static int live_parallel_switch(void *arg)
+                               continue;
+                       ce = intel_context_create(data[m].ce[0]->engine);
+-                      if (IS_ERR(ce))
++                      if (IS_ERR(ce)) {
++                              err = PTR_ERR(ce);
+                               goto out;
++                      }
+                       err = intel_context_pin(ce);
+                       if (err) {
+@@ -367,8 +369,10 @@ static int live_parallel_switch(void *arg)
+               worker = kthread_create_worker(0, "igt/parallel:%s",
+                                              data[n].ce[0]->engine->name);
+-              if (IS_ERR(worker))
++              if (IS_ERR(worker)) {
++                      err = PTR_ERR(worker);
+                       goto out;
++              }
+               data[n].worker = worker;
+       }
+@@ -397,8 +401,10 @@ static int live_parallel_switch(void *arg)
+                       }
+               }
+-              if (igt_live_test_end(&t))
+-                      err = -EIO;
++              if (igt_live_test_end(&t)) {
++                      err = err ?: -EIO;
++                      break;
++              }
+       }
+ out:
+-- 
+2.39.2
+
diff --git a/queue-6.1/drm-i915-selftests-stop-using-kthread_stop.patch b/queue-6.1/drm-i915-selftests-stop-using-kthread_stop.patch
new file mode 100644 (file)
index 0000000..331e6fa
--- /dev/null
@@ -0,0 +1,1075 @@
+From 615b3c88cd6e43bdc78af4a42a130bce008aee48 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Oct 2022 14:08:41 +0100
+Subject: drm/i915/selftests: Stop using kthread_stop()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+
+[ Upstream commit 6407cf533217e09dfd895e64984c3f1ee3802373 ]
+
+Since a7c01fa93aeb ("signal: break out of wait loops on kthread_stop()")
+kthread_stop() started asserting a pending signal which wreaks havoc with
+a few of our selftests. Mainly because they are not fully expecting to
+handle signals, but also cutting the intended test runtimes short due
+signal_pending() now returning true (via __igt_timeout), which therefore
+breaks both the patterns of:
+
+  kthread_run()
+  ..sleep for igt_timeout_ms to allow test to exercise stuff..
+  kthread_stop()
+
+And check for errors recorded in the thread.
+
+And also:
+
+    Main thread  |   Test thread
+  ---------------+------------------------------
+  kthread_run()  |
+  kthread_stop() |  do stuff until __igt_timeout
+                |  -- exits early due signal --
+
+Where this kthread_stop() was assume would have a "join" semantics, which
+it would have had if not the new signal assertion issue.
+
+To recap, threads are now likely to catch a previously impossible
+ERESTARTSYS or EINTR, marking the test as failed, or have a pointlessly
+short run time.
+
+To work around this start using kthread_work(er) API which provides
+an explicit way of waiting for threads to exit. And for cases where
+parent controls the test duration we add explicit signaling which threads
+will now use instead of relying on kthread_should_stop().
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20221020130841.3845791-1-tvrtko.ursulin@linux.intel.com
+Stable-dep-of: 79d0150d2d98 ("drm/i915/selftests: Add some missing error propagation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../drm/i915/gem/selftests/i915_gem_context.c | 118 ++++----
+ drivers/gpu/drm/i915/gt/selftest_execlists.c  |  48 ++--
+ drivers/gpu/drm/i915/gt/selftest_hangcheck.c  |  51 ++--
+ drivers/gpu/drm/i915/selftests/i915_request.c | 252 +++++++++++-------
+ 4 files changed, 281 insertions(+), 188 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+index c6ad67b90e8af..d8864444432b7 100644
+--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+@@ -179,97 +179,108 @@ static int live_nop_switch(void *arg)
+ }
+ struct parallel_switch {
+-      struct task_struct *tsk;
++      struct kthread_worker *worker;
++      struct kthread_work work;
+       struct intel_context *ce[2];
++      int result;
+ };
+-static int __live_parallel_switch1(void *data)
++static void __live_parallel_switch1(struct kthread_work *work)
+ {
+-      struct parallel_switch *arg = data;
++      struct parallel_switch *arg =
++              container_of(work, typeof(*arg), work);
+       IGT_TIMEOUT(end_time);
+       unsigned long count;
+       count = 0;
++      arg->result = 0;
+       do {
+               struct i915_request *rq = NULL;
+-              int err, n;
++              int n;
+-              err = 0;
+-              for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) {
++              for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
+                       struct i915_request *prev = rq;
+                       rq = i915_request_create(arg->ce[n]);
+                       if (IS_ERR(rq)) {
+                               i915_request_put(prev);
+-                              return PTR_ERR(rq);
++                              arg->result = PTR_ERR(rq);
++                              break;
+                       }
+                       i915_request_get(rq);
+                       if (prev) {
+-                              err = i915_request_await_dma_fence(rq, &prev->fence);
++                              arg->result =
++                                      i915_request_await_dma_fence(rq,
++                                                                   &prev->fence);
+                               i915_request_put(prev);
+                       }
+                       i915_request_add(rq);
+               }
++
++              if (IS_ERR_OR_NULL(rq))
++                      break;
++
+               if (i915_request_wait(rq, 0, HZ) < 0)
+-                      err = -ETIME;
++                      arg->result = -ETIME;
++
+               i915_request_put(rq);
+-              if (err)
+-                      return err;
+               count++;
+-      } while (!__igt_timeout(end_time, NULL));
++      } while (!arg->result && !__igt_timeout(end_time, NULL));
+-      pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
+-      return 0;
++      pr_info("%s: %lu switches (sync) <%d>\n",
++              arg->ce[0]->engine->name, count, arg->result);
+ }
+-static int __live_parallel_switchN(void *data)
++static void __live_parallel_switchN(struct kthread_work *work)
+ {
+-      struct parallel_switch *arg = data;
++      struct parallel_switch *arg =
++              container_of(work, typeof(*arg), work);
+       struct i915_request *rq = NULL;
+       IGT_TIMEOUT(end_time);
+       unsigned long count;
+       int n;
+       count = 0;
++      arg->result = 0;
+       do {
+-              for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
++              for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
+                       struct i915_request *prev = rq;
+-                      int err = 0;
+                       rq = i915_request_create(arg->ce[n]);
+                       if (IS_ERR(rq)) {
+                               i915_request_put(prev);
+-                              return PTR_ERR(rq);
++                              arg->result = PTR_ERR(rq);
++                              break;
+                       }
+                       i915_request_get(rq);
+                       if (prev) {
+-                              err = i915_request_await_dma_fence(rq, &prev->fence);
++                              arg->result =
++                                      i915_request_await_dma_fence(rq,
++                                                                   &prev->fence);
+                               i915_request_put(prev);
+                       }
+                       i915_request_add(rq);
+-                      if (err) {
+-                              i915_request_put(rq);
+-                              return err;
+-                      }
+               }
+               count++;
+-      } while (!__igt_timeout(end_time, NULL));
+-      i915_request_put(rq);
++      } while (!arg->result && !__igt_timeout(end_time, NULL));
+-      pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
+-      return 0;
++      if (!IS_ERR_OR_NULL(rq))
++              i915_request_put(rq);
++
++      pr_info("%s: %lu switches (many) <%d>\n",
++              arg->ce[0]->engine->name, count, arg->result);
+ }
+ static int live_parallel_switch(void *arg)
+ {
+       struct drm_i915_private *i915 = arg;
+-      static int (* const func[])(void *arg) = {
++      static void (* const func[])(struct kthread_work *) = {
+               __live_parallel_switch1,
+               __live_parallel_switchN,
+               NULL,
+@@ -277,7 +288,7 @@ static int live_parallel_switch(void *arg)
+       struct parallel_switch *data = NULL;
+       struct i915_gem_engines *engines;
+       struct i915_gem_engines_iter it;
+-      int (* const *fn)(void *arg);
++      void (* const *fn)(struct kthread_work *);
+       struct i915_gem_context *ctx;
+       struct intel_context *ce;
+       struct file *file;
+@@ -348,9 +359,22 @@ static int live_parallel_switch(void *arg)
+               }
+       }
++      for (n = 0; n < count; n++) {
++              struct kthread_worker *worker;
++
++              if (!data[n].ce[0])
++                      continue;
++
++              worker = kthread_create_worker(0, "igt/parallel:%s",
++                                             data[n].ce[0]->engine->name);
++              if (IS_ERR(worker))
++                      goto out;
++
++              data[n].worker = worker;
++      }
++
+       for (fn = func; !err && *fn; fn++) {
+               struct igt_live_test t;
+-              int n;
+               err = igt_live_test_begin(&t, i915, __func__, "");
+               if (err)
+@@ -360,30 +384,17 @@ static int live_parallel_switch(void *arg)
+                       if (!data[n].ce[0])
+                               continue;
+-                      data[n].tsk = kthread_run(*fn, &data[n],
+-                                                "igt/parallel:%s",
+-                                                data[n].ce[0]->engine->name);
+-                      if (IS_ERR(data[n].tsk)) {
+-                              err = PTR_ERR(data[n].tsk);
+-                              break;
+-                      }
+-                      get_task_struct(data[n].tsk);
++                      data[n].result = 0;
++                      kthread_init_work(&data[n].work, *fn);
++                      kthread_queue_work(data[n].worker, &data[n].work);
+               }
+-              yield(); /* start all threads before we kthread_stop() */
+-
+               for (n = 0; n < count; n++) {
+-                      int status;
+-
+-                      if (IS_ERR_OR_NULL(data[n].tsk))
+-                              continue;
+-
+-                      status = kthread_stop(data[n].tsk);
+-                      if (status && !err)
+-                              err = status;
+-
+-                      put_task_struct(data[n].tsk);
+-                      data[n].tsk = NULL;
++                      if (data[n].ce[0]) {
++                              kthread_flush_work(&data[n].work);
++                              if (data[n].result && !err)
++                                      err = data[n].result;
++                      }
+               }
+               if (igt_live_test_end(&t))
+@@ -399,6 +410,9 @@ static int live_parallel_switch(void *arg)
+                       intel_context_unpin(data[n].ce[m]);
+                       intel_context_put(data[n].ce[m]);
+               }
++
++              if (data[n].worker)
++                      kthread_destroy_worker(data[n].worker);
+       }
+       kfree(data);
+ out_file:
+diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
+index 1e08b2473b993..6312f42daef48 100644
+--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
++++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
+@@ -3475,12 +3475,14 @@ static int random_priority(struct rnd_state *rnd)
+ struct preempt_smoke {
+       struct intel_gt *gt;
++      struct kthread_work work;
+       struct i915_gem_context **contexts;
+       struct intel_engine_cs *engine;
+       struct drm_i915_gem_object *batch;
+       unsigned int ncontext;
+       struct rnd_state prng;
+       unsigned long count;
++      int result;
+ };
+ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
+@@ -3540,34 +3542,31 @@ static int smoke_submit(struct preempt_smoke *smoke,
+       return err;
+ }
+-static int smoke_crescendo_thread(void *arg)
++static void smoke_crescendo_work(struct kthread_work *work)
+ {
+-      struct preempt_smoke *smoke = arg;
++      struct preempt_smoke *smoke = container_of(work, typeof(*smoke), work);
+       IGT_TIMEOUT(end_time);
+       unsigned long count;
+       count = 0;
+       do {
+               struct i915_gem_context *ctx = smoke_context(smoke);
+-              int err;
+-              err = smoke_submit(smoke,
+-                                 ctx, count % I915_PRIORITY_MAX,
+-                                 smoke->batch);
+-              if (err)
+-                      return err;
++              smoke->result = smoke_submit(smoke, ctx,
++                                           count % I915_PRIORITY_MAX,
++                                           smoke->batch);
+               count++;
+-      } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
++      } while (!smoke->result && count < smoke->ncontext &&
++               !__igt_timeout(end_time, NULL));
+       smoke->count = count;
+-      return 0;
+ }
+ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+ #define BATCH BIT(0)
+ {
+-      struct task_struct *tsk[I915_NUM_ENGINES] = {};
++      struct kthread_worker *worker[I915_NUM_ENGINES] = {};
+       struct preempt_smoke *arg;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+@@ -3578,6 +3577,8 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+       if (!arg)
+               return -ENOMEM;
++      memset(arg, 0, I915_NUM_ENGINES * sizeof(*arg));
++
+       for_each_engine(engine, smoke->gt, id) {
+               arg[id] = *smoke;
+               arg[id].engine = engine;
+@@ -3585,31 +3586,28 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+                       arg[id].batch = NULL;
+               arg[id].count = 0;
+-              tsk[id] = kthread_run(smoke_crescendo_thread, arg,
+-                                    "igt/smoke:%d", id);
+-              if (IS_ERR(tsk[id])) {
+-                      err = PTR_ERR(tsk[id]);
++              worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
++              if (IS_ERR(worker[id])) {
++                      err = PTR_ERR(worker[id]);
+                       break;
+               }
+-              get_task_struct(tsk[id]);
+-      }
+-      yield(); /* start all threads before we kthread_stop() */
++              kthread_init_work(&arg[id].work, smoke_crescendo_work);
++              kthread_queue_work(worker[id], &arg[id].work);
++      }
+       count = 0;
+       for_each_engine(engine, smoke->gt, id) {
+-              int status;
+-
+-              if (IS_ERR_OR_NULL(tsk[id]))
++              if (IS_ERR_OR_NULL(worker[id]))
+                       continue;
+-              status = kthread_stop(tsk[id]);
+-              if (status && !err)
+-                      err = status;
++              kthread_flush_work(&arg[id].work);
++              if (arg[id].result && !err)
++                      err = arg[id].result;
+               count += arg[id].count;
+-              put_task_struct(tsk[id]);
++              kthread_destroy_worker(worker[id]);
+       }
+       pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+index 7f3bb1d34dfbf..71263058a7b05 100644
+--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
++++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+@@ -866,10 +866,13 @@ static int igt_reset_active_engine(void *arg)
+ }
+ struct active_engine {
+-      struct task_struct *task;
++      struct kthread_worker *worker;
++      struct kthread_work work;
+       struct intel_engine_cs *engine;
+       unsigned long resets;
+       unsigned int flags;
++      bool stop;
++      int result;
+ };
+ #define TEST_ACTIVE   BIT(0)
+@@ -900,10 +903,10 @@ static int active_request_put(struct i915_request *rq)
+       return err;
+ }
+-static int active_engine(void *data)
++static void active_engine(struct kthread_work *work)
+ {
+       I915_RND_STATE(prng);
+-      struct active_engine *arg = data;
++      struct active_engine *arg = container_of(work, typeof(*arg), work);
+       struct intel_engine_cs *engine = arg->engine;
+       struct i915_request *rq[8] = {};
+       struct intel_context *ce[ARRAY_SIZE(rq)];
+@@ -913,16 +916,17 @@ static int active_engine(void *data)
+       for (count = 0; count < ARRAY_SIZE(ce); count++) {
+               ce[count] = intel_context_create(engine);
+               if (IS_ERR(ce[count])) {
+-                      err = PTR_ERR(ce[count]);
+-                      pr_err("[%s] Create context #%ld failed: %d!\n", engine->name, count, err);
++                      arg->result = PTR_ERR(ce[count]);
++                      pr_err("[%s] Create context #%ld failed: %d!\n",
++                             engine->name, count, arg->result);
+                       while (--count)
+                               intel_context_put(ce[count]);
+-                      return err;
++                      return;
+               }
+       }
+       count = 0;
+-      while (!kthread_should_stop()) {
++      while (!READ_ONCE(arg->stop)) {
+               unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
+               struct i915_request *old = rq[idx];
+               struct i915_request *new;
+@@ -967,7 +971,7 @@ static int active_engine(void *data)
+               intel_context_put(ce[count]);
+       }
+-      return err;
++      arg->result = err;
+ }
+ static int __igt_reset_engines(struct intel_gt *gt,
+@@ -1022,7 +1026,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
+               memset(threads, 0, sizeof(*threads) * I915_NUM_ENGINES);
+               for_each_engine(other, gt, tmp) {
+-                      struct task_struct *tsk;
++                      struct kthread_worker *worker;
+                       threads[tmp].resets =
+                               i915_reset_engine_count(global, other);
+@@ -1036,19 +1040,21 @@ static int __igt_reset_engines(struct intel_gt *gt,
+                       threads[tmp].engine = other;
+                       threads[tmp].flags = flags;
+-                      tsk = kthread_run(active_engine, &threads[tmp],
+-                                        "igt/%s", other->name);
+-                      if (IS_ERR(tsk)) {
+-                              err = PTR_ERR(tsk);
+-                              pr_err("[%s] Thread spawn failed: %d!\n", engine->name, err);
++                      worker = kthread_create_worker(0, "igt/%s",
++                                                     other->name);
++                      if (IS_ERR(worker)) {
++                              err = PTR_ERR(worker);
++                              pr_err("[%s] Worker create failed: %d!\n",
++                                     engine->name, err);
+                               goto unwind;
+                       }
+-                      threads[tmp].task = tsk;
+-                      get_task_struct(tsk);
+-              }
++                      threads[tmp].worker = worker;
+-              yield(); /* start all threads before we begin */
++                      kthread_init_work(&threads[tmp].work, active_engine);
++                      kthread_queue_work(threads[tmp].worker,
++                                         &threads[tmp].work);
++              }
+               st_engine_heartbeat_disable_no_pm(engine);
+               GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+@@ -1197,17 +1203,20 @@ static int __igt_reset_engines(struct intel_gt *gt,
+               for_each_engine(other, gt, tmp) {
+                       int ret;
+-                      if (!threads[tmp].task)
++                      if (!threads[tmp].worker)
+                               continue;
+-                      ret = kthread_stop(threads[tmp].task);
++                      WRITE_ONCE(threads[tmp].stop, true);
++                      kthread_flush_work(&threads[tmp].work);
++                      ret = READ_ONCE(threads[tmp].result);
+                       if (ret) {
+                               pr_err("kthread for other engine %s failed, err=%d\n",
+                                      other->name, ret);
+                               if (!err)
+                                       err = ret;
+                       }
+-                      put_task_struct(threads[tmp].task);
++
++                      kthread_destroy_worker(threads[tmp].worker);
+                       /* GuC based resets are not logged per engine */
+                       if (!using_guc) {
+diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
+index 818a4909c1f35..a46350c37e9d4 100644
+--- a/drivers/gpu/drm/i915/selftests/i915_request.c
++++ b/drivers/gpu/drm/i915/selftests/i915_request.c
+@@ -299,9 +299,18 @@ __live_request_alloc(struct intel_context *ce)
+       return intel_context_create_request(ce);
+ }
+-static int __igt_breadcrumbs_smoketest(void *arg)
++struct smoke_thread {
++      struct kthread_worker *worker;
++      struct kthread_work work;
++      struct smoketest *t;
++      bool stop;
++      int result;
++};
++
++static void __igt_breadcrumbs_smoketest(struct kthread_work *work)
+ {
+-      struct smoketest *t = arg;
++      struct smoke_thread *thread = container_of(work, typeof(*thread), work);
++      struct smoketest *t = thread->t;
+       const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
+       const unsigned int total = 4 * t->ncontexts + 1;
+       unsigned int num_waits = 0, num_fences = 0;
+@@ -320,8 +329,10 @@ static int __igt_breadcrumbs_smoketest(void *arg)
+        */
+       requests = kcalloc(total, sizeof(*requests), GFP_KERNEL);
+-      if (!requests)
+-              return -ENOMEM;
++      if (!requests) {
++              thread->result = -ENOMEM;
++              return;
++      }
+       order = i915_random_order(total, &prng);
+       if (!order) {
+@@ -329,7 +340,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
+               goto out_requests;
+       }
+-      while (!kthread_should_stop()) {
++      while (!READ_ONCE(thread->stop)) {
+               struct i915_sw_fence *submit, *wait;
+               unsigned int n, count;
+@@ -437,7 +448,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
+       kfree(order);
+ out_requests:
+       kfree(requests);
+-      return err;
++      thread->result = err;
+ }
+ static int mock_breadcrumbs_smoketest(void *arg)
+@@ -450,7 +461,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
+               .request_alloc = __mock_request_alloc
+       };
+       unsigned int ncpus = num_online_cpus();
+-      struct task_struct **threads;
++      struct smoke_thread *threads;
+       unsigned int n;
+       int ret = 0;
+@@ -479,28 +490,37 @@ static int mock_breadcrumbs_smoketest(void *arg)
+       }
+       for (n = 0; n < ncpus; n++) {
+-              threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
+-                                       &t, "igt/%d", n);
+-              if (IS_ERR(threads[n])) {
+-                      ret = PTR_ERR(threads[n]);
++              struct kthread_worker *worker;
++
++              worker = kthread_create_worker(0, "igt/%d", n);
++              if (IS_ERR(worker)) {
++                      ret = PTR_ERR(worker);
+                       ncpus = n;
+                       break;
+               }
+-              get_task_struct(threads[n]);
++              threads[n].worker = worker;
++              threads[n].t = &t;
++              threads[n].stop = false;
++              threads[n].result = 0;
++
++              kthread_init_work(&threads[n].work,
++                                __igt_breadcrumbs_smoketest);
++              kthread_queue_work(worker, &threads[n].work);
+       }
+-      yield(); /* start all threads before we begin */
+       msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
+       for (n = 0; n < ncpus; n++) {
+               int err;
+-              err = kthread_stop(threads[n]);
++              WRITE_ONCE(threads[n].stop, true);
++              kthread_flush_work(&threads[n].work);
++              err = READ_ONCE(threads[n].result);
+               if (err < 0 && !ret)
+                       ret = err;
+-              put_task_struct(threads[n]);
++              kthread_destroy_worker(threads[n].worker);
+       }
+       pr_info("Completed %lu waits for %lu fence across %d cpus\n",
+               atomic_long_read(&t.num_waits),
+@@ -1419,9 +1439,18 @@ static int live_sequential_engines(void *arg)
+       return err;
+ }
+-static int __live_parallel_engine1(void *arg)
++struct parallel_thread {
++      struct kthread_worker *worker;
++      struct kthread_work work;
++      struct intel_engine_cs *engine;
++      int result;
++};
++
++static void __live_parallel_engine1(struct kthread_work *work)
+ {
+-      struct intel_engine_cs *engine = arg;
++      struct parallel_thread *thread =
++              container_of(work, typeof(*thread), work);
++      struct intel_engine_cs *engine = thread->engine;
+       IGT_TIMEOUT(end_time);
+       unsigned long count;
+       int err = 0;
+@@ -1452,12 +1481,14 @@ static int __live_parallel_engine1(void *arg)
+       intel_engine_pm_put(engine);
+       pr_info("%s: %lu request + sync\n", engine->name, count);
+-      return err;
++      thread->result = err;
+ }
+-static int __live_parallel_engineN(void *arg)
++static void __live_parallel_engineN(struct kthread_work *work)
+ {
+-      struct intel_engine_cs *engine = arg;
++      struct parallel_thread *thread =
++              container_of(work, typeof(*thread), work);
++      struct intel_engine_cs *engine = thread->engine;
+       IGT_TIMEOUT(end_time);
+       unsigned long count;
+       int err = 0;
+@@ -1479,7 +1510,7 @@ static int __live_parallel_engineN(void *arg)
+       intel_engine_pm_put(engine);
+       pr_info("%s: %lu requests\n", engine->name, count);
+-      return err;
++      thread->result = err;
+ }
+ static bool wake_all(struct drm_i915_private *i915)
+@@ -1505,9 +1536,11 @@ static int wait_for_all(struct drm_i915_private *i915)
+       return -ETIME;
+ }
+-static int __live_parallel_spin(void *arg)
++static void __live_parallel_spin(struct kthread_work *work)
+ {
+-      struct intel_engine_cs *engine = arg;
++      struct parallel_thread *thread =
++              container_of(work, typeof(*thread), work);
++      struct intel_engine_cs *engine = thread->engine;
+       struct igt_spinner spin;
+       struct i915_request *rq;
+       int err = 0;
+@@ -1520,7 +1553,8 @@ static int __live_parallel_spin(void *arg)
+       if (igt_spinner_init(&spin, engine->gt)) {
+               wake_all(engine->i915);
+-              return -ENOMEM;
++              thread->result = -ENOMEM;
++              return;
+       }
+       intel_engine_pm_get(engine);
+@@ -1553,22 +1587,22 @@ static int __live_parallel_spin(void *arg)
+ out_spin:
+       igt_spinner_fini(&spin);
+-      return err;
++      thread->result = err;
+ }
+ static int live_parallel_engines(void *arg)
+ {
+       struct drm_i915_private *i915 = arg;
+-      static int (* const func[])(void *arg) = {
++      static void (* const func[])(struct kthread_work *) = {
+               __live_parallel_engine1,
+               __live_parallel_engineN,
+               __live_parallel_spin,
+               NULL,
+       };
+       const unsigned int nengines = num_uabi_engines(i915);
++      struct parallel_thread *threads;
+       struct intel_engine_cs *engine;
+-      int (* const *fn)(void *arg);
+-      struct task_struct **tsk;
++      void (* const *fn)(struct kthread_work *);
+       int err = 0;
+       /*
+@@ -1576,8 +1610,8 @@ static int live_parallel_engines(void *arg)
+        * tests that we load up the system maximally.
+        */
+-      tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL);
+-      if (!tsk)
++      threads = kcalloc(nengines, sizeof(*threads), GFP_KERNEL);
++      if (!threads)
+               return -ENOMEM;
+       for (fn = func; !err && *fn; fn++) {
+@@ -1594,37 +1628,44 @@ static int live_parallel_engines(void *arg)
+               idx = 0;
+               for_each_uabi_engine(engine, i915) {
+-                      tsk[idx] = kthread_run(*fn, engine,
+-                                             "igt/parallel:%s",
+-                                             engine->name);
+-                      if (IS_ERR(tsk[idx])) {
+-                              err = PTR_ERR(tsk[idx]);
++                      struct kthread_worker *worker;
++
++                      worker = kthread_create_worker(0, "igt/parallel:%s",
++                                                     engine->name);
++                      if (IS_ERR(worker)) {
++                              err = PTR_ERR(worker);
+                               break;
+                       }
+-                      get_task_struct(tsk[idx++]);
+-              }
+-              yield(); /* start all threads before we kthread_stop() */
++                      threads[idx].worker = worker;
++                      threads[idx].result = 0;
++                      threads[idx].engine = engine;
++
++                      kthread_init_work(&threads[idx].work, *fn);
++                      kthread_queue_work(worker, &threads[idx].work);
++                      idx++;
++              }
+               idx = 0;
+               for_each_uabi_engine(engine, i915) {
+                       int status;
+-                      if (IS_ERR(tsk[idx]))
++                      if (!threads[idx].worker)
+                               break;
+-                      status = kthread_stop(tsk[idx]);
++                      kthread_flush_work(&threads[idx].work);
++                      status = READ_ONCE(threads[idx].result);
+                       if (status && !err)
+                               err = status;
+-                      put_task_struct(tsk[idx++]);
++                      kthread_destroy_worker(threads[idx++].worker);
+               }
+               if (igt_live_test_end(&t))
+                       err = -EIO;
+       }
+-      kfree(tsk);
++      kfree(threads);
+       return err;
+ }
+@@ -1672,7 +1713,7 @@ static int live_breadcrumbs_smoketest(void *arg)
+       const unsigned int ncpus = num_online_cpus();
+       unsigned long num_waits, num_fences;
+       struct intel_engine_cs *engine;
+-      struct task_struct **threads;
++      struct smoke_thread *threads;
+       struct igt_live_test live;
+       intel_wakeref_t wakeref;
+       struct smoketest *smoke;
+@@ -1746,23 +1787,26 @@ static int live_breadcrumbs_smoketest(void *arg)
+                        smoke[idx].max_batch, engine->name);
+               for (n = 0; n < ncpus; n++) {
+-                      struct task_struct *tsk;
++                      unsigned int i = idx * ncpus + n;
++                      struct kthread_worker *worker;
+-                      tsk = kthread_run(__igt_breadcrumbs_smoketest,
+-                                        &smoke[idx], "igt/%d.%d", idx, n);
+-                      if (IS_ERR(tsk)) {
+-                              ret = PTR_ERR(tsk);
++                      worker = kthread_create_worker(0, "igt/%d.%d", idx, n);
++                      if (IS_ERR(worker)) {
++                              ret = PTR_ERR(worker);
+                               goto out_flush;
+                       }
+-                      get_task_struct(tsk);
+-                      threads[idx * ncpus + n] = tsk;
++                      threads[i].worker = worker;
++                      threads[i].t = &smoke[idx];
++
++                      kthread_init_work(&threads[i].work,
++                                        __igt_breadcrumbs_smoketest);
++                      kthread_queue_work(worker, &threads[i].work);
+               }
+               idx++;
+       }
+-      yield(); /* start all threads before we begin */
+       msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
+ out_flush:
+@@ -1771,17 +1815,19 @@ static int live_breadcrumbs_smoketest(void *arg)
+       num_fences = 0;
+       for_each_uabi_engine(engine, i915) {
+               for (n = 0; n < ncpus; n++) {
+-                      struct task_struct *tsk = threads[idx * ncpus + n];
++                      unsigned int i = idx * ncpus + n;
+                       int err;
+-                      if (!tsk)
++                      if (!threads[i].worker)
+                               continue;
+-                      err = kthread_stop(tsk);
++                      WRITE_ONCE(threads[i].stop, true);
++                      kthread_flush_work(&threads[i].work);
++                      err = READ_ONCE(threads[i].result);
+                       if (err < 0 && !ret)
+                               ret = err;
+-                      put_task_struct(tsk);
++                      kthread_destroy_worker(threads[i].worker);
+               }
+               num_waits += atomic_long_read(&smoke[idx].num_waits);
+@@ -2891,9 +2937,18 @@ static int perf_series_engines(void *arg)
+       return err;
+ }
+-static int p_sync0(void *arg)
++struct p_thread {
++      struct perf_stats p;
++      struct kthread_worker *worker;
++      struct kthread_work work;
++      struct intel_engine_cs *engine;
++      int result;
++};
++
++static void p_sync0(struct kthread_work *work)
+ {
+-      struct perf_stats *p = arg;
++      struct p_thread *thread = container_of(work, typeof(*thread), work);
++      struct perf_stats *p = &thread->p;
+       struct intel_engine_cs *engine = p->engine;
+       struct intel_context *ce;
+       IGT_TIMEOUT(end_time);
+@@ -2902,13 +2957,16 @@ static int p_sync0(void *arg)
+       int err = 0;
+       ce = intel_context_create(engine);
+-      if (IS_ERR(ce))
+-              return PTR_ERR(ce);
++      if (IS_ERR(ce)) {
++              thread->result = PTR_ERR(ce);
++              return;
++      }
+       err = intel_context_pin(ce);
+       if (err) {
+               intel_context_put(ce);
+-              return err;
++              thread->result = err;
++              return;
+       }
+       if (intel_engine_supports_stats(engine)) {
+@@ -2958,12 +3016,13 @@ static int p_sync0(void *arg)
+       intel_context_unpin(ce);
+       intel_context_put(ce);
+-      return err;
++      thread->result = err;
+ }
+-static int p_sync1(void *arg)
++static void p_sync1(struct kthread_work *work)
+ {
+-      struct perf_stats *p = arg;
++      struct p_thread *thread = container_of(work, typeof(*thread), work);
++      struct perf_stats *p = &thread->p;
+       struct intel_engine_cs *engine = p->engine;
+       struct i915_request *prev = NULL;
+       struct intel_context *ce;
+@@ -2973,13 +3032,16 @@ static int p_sync1(void *arg)
+       int err = 0;
+       ce = intel_context_create(engine);
+-      if (IS_ERR(ce))
+-              return PTR_ERR(ce);
++      if (IS_ERR(ce)) {
++              thread->result = PTR_ERR(ce);
++              return;
++      }
+       err = intel_context_pin(ce);
+       if (err) {
+               intel_context_put(ce);
+-              return err;
++              thread->result = err;
++              return;
+       }
+       if (intel_engine_supports_stats(engine)) {
+@@ -3031,12 +3093,13 @@ static int p_sync1(void *arg)
+       intel_context_unpin(ce);
+       intel_context_put(ce);
+-      return err;
++      thread->result = err;
+ }
+-static int p_many(void *arg)
++static void p_many(struct kthread_work *work)
+ {
+-      struct perf_stats *p = arg;
++      struct p_thread *thread = container_of(work, typeof(*thread), work);
++      struct perf_stats *p = &thread->p;
+       struct intel_engine_cs *engine = p->engine;
+       struct intel_context *ce;
+       IGT_TIMEOUT(end_time);
+@@ -3045,13 +3108,16 @@ static int p_many(void *arg)
+       bool busy;
+       ce = intel_context_create(engine);
+-      if (IS_ERR(ce))
+-              return PTR_ERR(ce);
++      if (IS_ERR(ce)) {
++              thread->result = PTR_ERR(ce);
++              return;
++      }
+       err = intel_context_pin(ce);
+       if (err) {
+               intel_context_put(ce);
+-              return err;
++              thread->result = err;
++              return;
+       }
+       if (intel_engine_supports_stats(engine)) {
+@@ -3092,26 +3158,23 @@ static int p_many(void *arg)
+       intel_context_unpin(ce);
+       intel_context_put(ce);
+-      return err;
++      thread->result = err;
+ }
+ static int perf_parallel_engines(void *arg)
+ {
+       struct drm_i915_private *i915 = arg;
+-      static int (* const func[])(void *arg) = {
++      static void (* const func[])(struct kthread_work *) = {
+               p_sync0,
+               p_sync1,
+               p_many,
+               NULL,
+       };
+       const unsigned int nengines = num_uabi_engines(i915);
++      void (* const *fn)(struct kthread_work *);
+       struct intel_engine_cs *engine;
+-      int (* const *fn)(void *arg);
+       struct pm_qos_request qos;
+-      struct {
+-              struct perf_stats p;
+-              struct task_struct *tsk;
+-      } *engines;
++      struct p_thread *engines;
+       int err = 0;
+       engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
+@@ -3134,36 +3197,45 @@ static int perf_parallel_engines(void *arg)
+               idx = 0;
+               for_each_uabi_engine(engine, i915) {
++                      struct kthread_worker *worker;
++
+                       intel_engine_pm_get(engine);
+                       memset(&engines[idx].p, 0, sizeof(engines[idx].p));
+-                      engines[idx].p.engine = engine;
+-                      engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
+-                                                     "igt:%s", engine->name);
+-                      if (IS_ERR(engines[idx].tsk)) {
+-                              err = PTR_ERR(engines[idx].tsk);
++                      worker = kthread_create_worker(0, "igt:%s",
++                                                     engine->name);
++                      if (IS_ERR(worker)) {
++                              err = PTR_ERR(worker);
+                               intel_engine_pm_put(engine);
+                               break;
+                       }
+-                      get_task_struct(engines[idx++].tsk);
+-              }
++                      engines[idx].worker = worker;
++                      engines[idx].result = 0;
++                      engines[idx].p.engine = engine;
++                      engines[idx].engine = engine;
+-              yield(); /* start all threads before we kthread_stop() */
++                      kthread_init_work(&engines[idx].work, *fn);
++                      kthread_queue_work(worker, &engines[idx].work);
++                      idx++;
++              }
+               idx = 0;
+               for_each_uabi_engine(engine, i915) {
+                       int status;
+-                      if (IS_ERR(engines[idx].tsk))
++                      if (!engines[idx].worker)
+                               break;
+-                      status = kthread_stop(engines[idx].tsk);
++                      kthread_flush_work(&engines[idx].work);
++                      status = READ_ONCE(engines[idx].result);
+                       if (status && !err)
+                               err = status;
+                       intel_engine_pm_put(engine);
+-                      put_task_struct(engines[idx++].tsk);
++
++                      kthread_destroy_worker(engines[idx].worker);
++                      idx++;
+               }
+               if (igt_live_test_end(&t))
+-- 
+2.39.2
+
diff --git a/queue-6.1/drm-i915-use-18-fast-wake-aux-sync-len.patch b/queue-6.1/drm-i915-use-18-fast-wake-aux-sync-len.patch
new file mode 100644 (file)
index 0000000..89e408c
--- /dev/null
@@ -0,0 +1,47 @@
+From a2fb42fd186c1071db9528ed94fcde5b8095d4ea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 May 2023 13:16:49 +0300
+Subject: drm/i915: Use 18 fast wake AUX sync len
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jouni Högander <jouni.hogander@intel.com>
+
+[ Upstream commit 2d6f2f79e06571d41eb1223abebe9097511c9544 ]
+
+HW default for wake sync pulses is 18. 10 precharge and 8 preamble. There
+is no reason to change this especially as it is causing problems with
+certain eDP panels.
+
+v3: Change "Fixes:" commit
+v2: Remove "fast wake" repeat from subject
+
+Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
+Fixes: e1c71f8f9180 ("drm/i915: Fix fast wake AUX sync len")
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/8475
+Reviewed-by: Luca Coelho <luciano.coelho@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230530101649.2549949-1-jouni.hogander@intel.com
+(cherry picked from commit b29a20f7c4995a059ed764ce42389857426397c7)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_dp_aux.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
+index 8325868770994..ab357161ccc3a 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
+@@ -129,7 +129,7 @@ static int intel_dp_aux_sync_len(void)
+ static int intel_dp_aux_fw_sync_len(void)
+ {
+-      int precharge = 16; /* 10-16 */
++      int precharge = 10; /* 10-16 */
+       int preamble = 8;
+       return precharge + preamble;
+-- 
+2.39.2
+
diff --git a/queue-6.1/ipv6-rpl-fix-route-of-death.patch b/queue-6.1/ipv6-rpl-fix-route-of-death.patch
new file mode 100644 (file)
index 0000000..016d441
--- /dev/null
@@ -0,0 +1,195 @@
+From 04e5cf8eb9ff1d6a6df6da32f40480aec460ec02 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 11:06:17 -0700
+Subject: ipv6: rpl: Fix Route of Death.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit a2f4c143d76b1a47c91ef9bc46907116b111da0b ]
+
+A remote DoS vulnerability of RPL Source Routing is assigned CVE-2023-2156.
+
+The Source Routing Header (SRH) has the following format:
+
+  0                   1                   2                   3
+  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+  |  Next Header  |  Hdr Ext Len  | Routing Type  | Segments Left |
+  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+  | CmprI | CmprE |  Pad  |               Reserved                |
+  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+  |                                                               |
+  .                                                               .
+  .                        Addresses[1..n]                        .
+  .                                                               .
+  |                                                               |
+  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+The originator of an SRH places the first hop's IPv6 address in the IPv6
+header's IPv6 Destination Address and the second hop's IPv6 address as
+the first address in Addresses[1..n].
+
+The CmprI and CmprE fields indicate the number of prefix octets that are
+shared with the IPv6 Destination Address.  When CmprI or CmprE is not 0,
+Addresses[1..n] are compressed as follows:
+
+  1..n-1 : (16 - CmprI) bytes
+       n : (16 - CmprE) bytes
+
+Segments Left indicates the number of route segments remaining.  When the
+value is not zero, the SRH is forwarded to the next hop.  Its address
+is extracted from Addresses[n - Segment Left + 1] and swapped with IPv6
+Destination Address.
+
+When Segment Left is greater than or equal to 2, the size of SRH is not
+changed because Addresses[1..n-1] are decompressed and recompressed with
+CmprI.
+
+OTOH, when Segment Left changes from 1 to 0, the new SRH could have a
+different size because Addresses[1..n-1] are decompressed with CmprI and
+recompressed with CmprE.
+
+Let's say CmprI is 15 and CmprE is 0.  When we receive SRH with Segment
+Left >= 2, Addresses[1..n-1] have 1 byte for each, and Addresses[n] has
+16 bytes.  When Segment Left is 1, Addresses[1..n-1] is decompressed to
+16 bytes and not recompressed.  Finally, the new SRH will need more room
+in the header, and the size is (16 - 1) * (n - 1) bytes.
+
+Here the max value of n is 255 as Segment Left is u8, so in the worst case,
+we have to allocate 3825 bytes in the skb headroom.  However, now we only
+allocate a small fixed buffer that is IPV6_RPL_SRH_WORST_SWAP_SIZE (16 + 7
+bytes).  If the decompressed size overflows the room, skb_push() hits BUG()
+below [0].
+
+Instead of allocating the fixed buffer for every packet, let's allocate
+enough headroom only when we receive SRH with Segment Left 1.
+
+[0]:
+skbuff: skb_under_panic: text:ffffffff81c9f6e2 len:576 put:576 head:ffff8880070b5180 data:ffff8880070b4fb0 tail:0x70 end:0x140 dev:lo
+kernel BUG at net/core/skbuff.c:200!
+invalid opcode: 0000 [#1] PREEMPT SMP PTI
+CPU: 0 PID: 154 Comm: python3 Not tainted 6.4.0-rc4-00190-gc308e9ec0047 #7
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
+RIP: 0010:skb_panic (net/core/skbuff.c:200)
+Code: 4f 70 50 8b 87 bc 00 00 00 50 8b 87 b8 00 00 00 50 ff b7 c8 00 00 00 4c 8b 8f c0 00 00 00 48 c7 c7 80 6e 77 82 e8 ad 8b 60 ff <0f> 0b 66 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90 90 90
+RSP: 0018:ffffc90000003da0 EFLAGS: 00000246
+RAX: 0000000000000085 RBX: ffff8880058a6600 RCX: 0000000000000000
+RDX: 0000000000000000 RSI: ffff88807dc1c540 RDI: ffff88807dc1c540
+RBP: ffffc90000003e48 R08: ffffffff82b392c8 R09: 00000000ffffdfff
+R10: ffffffff82a592e0 R11: ffffffff82b092e0 R12: ffff888005b1c800
+R13: ffff8880070b51b8 R14: ffff888005b1ca18 R15: ffff8880070b5190
+FS:  00007f4539f0b740(0000) GS:ffff88807dc00000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 000055670baf3000 CR3: 0000000005b0e000 CR4: 00000000007506f0
+PKRU: 55555554
+Call Trace:
+ <IRQ>
+ skb_push (net/core/skbuff.c:210)
+ ipv6_rthdr_rcv (./include/linux/skbuff.h:2880 net/ipv6/exthdrs.c:634 net/ipv6/exthdrs.c:718)
+ ip6_protocol_deliver_rcu (net/ipv6/ip6_input.c:437 (discriminator 5))
+ ip6_input_finish (./include/linux/rcupdate.h:805 net/ipv6/ip6_input.c:483)
+ __netif_receive_skb_one_core (net/core/dev.c:5494)
+ process_backlog (./include/linux/rcupdate.h:805 net/core/dev.c:5934)
+ __napi_poll (net/core/dev.c:6496)
+ net_rx_action (net/core/dev.c:6565 net/core/dev.c:6696)
+ __do_softirq (./arch/x86/include/asm/jump_label.h:27 ./include/linux/jump_label.h:207 ./include/trace/events/irq.h:142 kernel/softirq.c:572)
+ do_softirq (kernel/softirq.c:472 kernel/softirq.c:459)
+ </IRQ>
+ <TASK>
+ __local_bh_enable_ip (kernel/softirq.c:396)
+ __dev_queue_xmit (net/core/dev.c:4272)
+ ip6_finish_output2 (./include/net/neighbour.h:544 net/ipv6/ip6_output.c:134)
+ rawv6_sendmsg (./include/net/dst.h:458 ./include/linux/netfilter.h:303 net/ipv6/raw.c:656 net/ipv6/raw.c:914)
+ sock_sendmsg (net/socket.c:724 net/socket.c:747)
+ __sys_sendto (net/socket.c:2144)
+ __x64_sys_sendto (net/socket.c:2156 net/socket.c:2152 net/socket.c:2152)
+ do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80)
+ entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120)
+RIP: 0033:0x7f453a138aea
+Code: d8 64 89 02 48 c7 c0 ff ff ff ff eb b8 0f 1f 00 f3 0f 1e fa 41 89 ca 64 8b 04 25 18 00 00 00 85 c0 75 15 b8 2c 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 7e c3 0f 1f 44 00 00 41 54 48 83 ec 30 44 89
+RSP: 002b:00007ffcc212a1c8 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
+RAX: ffffffffffffffda RBX: 00007ffcc212a288 RCX: 00007f453a138aea
+RDX: 0000000000000060 RSI: 00007f4539084c20 RDI: 0000000000000003
+RBP: 00007f4538308e80 R08: 00007ffcc212a300 R09: 000000000000001c
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: ffffffffc4653600 R14: 0000000000000001 R15: 00007f4539712d1b
+ </TASK>
+Modules linked in:
+
+Fixes: 8610c7c6e3bd ("net: ipv6: add support for rpl sr exthdr")
+Reported-by: Max VA
+Closes: https://www.interruptlabs.co.uk/articles/linux-ipv6-route-of-death
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230605180617.67284-1-kuniyu@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/rpl.h  |  3 ---
+ net/ipv6/exthdrs.c | 29 +++++++++++------------------
+ 2 files changed, 11 insertions(+), 21 deletions(-)
+
+diff --git a/include/net/rpl.h b/include/net/rpl.h
+index 308ef0a05caef..30fe780d1e7c8 100644
+--- a/include/net/rpl.h
++++ b/include/net/rpl.h
+@@ -23,9 +23,6 @@ static inline int rpl_init(void)
+ static inline void rpl_exit(void) {}
+ #endif
+-/* Worst decompression memory usage ipv6 address (16) + pad 7 */
+-#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7)
+-
+ size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
+                        unsigned char cmpre);
+diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
+index a8d961d3a477f..5fa0e37305d9d 100644
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -569,24 +569,6 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
+               return -1;
+       }
+-      if (skb_cloned(skb)) {
+-              if (pskb_expand_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE, 0,
+-                                   GFP_ATOMIC)) {
+-                      __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+-                                      IPSTATS_MIB_OUTDISCARDS);
+-                      kfree_skb(skb);
+-                      return -1;
+-              }
+-      } else {
+-              err = skb_cow_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE);
+-              if (unlikely(err)) {
+-                      kfree_skb(skb);
+-                      return -1;
+-              }
+-      }
+-
+-      hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
+-
+       if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri,
+                                                 hdr->cmpre))) {
+               kfree_skb(skb);
+@@ -630,6 +612,17 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
+       skb_pull(skb, ((hdr->hdrlen + 1) << 3));
+       skb_postpull_rcsum(skb, oldhdr,
+                          sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3));
++      if (unlikely(!hdr->segments_left)) {
++              if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0,
++                                   GFP_ATOMIC)) {
++                      __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS);
++                      kfree_skb(skb);
++                      kfree(buf);
++                      return -1;
++              }
++
++              oldhdr = ipv6_hdr(skb);
++      }
+       skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr));
+       skb_reset_network_header(skb);
+       skb_mac_header_rebuild(skb);
+-- 
+2.39.2
+
diff --git a/queue-6.1/lib-cpu_rmap-fix-potential-use-after-free-in-irq_cpu.patch b/queue-6.1/lib-cpu_rmap-fix-potential-use-after-free-in-irq_cpu.patch
new file mode 100644 (file)
index 0000000..ddd9a68
--- /dev/null
@@ -0,0 +1,40 @@
+From 8019a056b4068f636833ad83864bbaf7e9161eac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Jun 2023 20:28:15 +0200
+Subject: lib: cpu_rmap: Fix potential use-after-free in irq_cpu_rmap_release()
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+[ Upstream commit 7c5d4801ecf0564c860033d89726b99723c55146 ]
+
+irq_cpu_rmap_release() calls cpu_rmap_put(), which may free the rmap.
+So we need to clear the pointer to our glue structure in rmap before
+doing that, not after.
+
+Fixes: 4e0473f1060a ("lib: cpu_rmap: Avoid use after free on rmap->obj array entries")
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/ZHo0vwquhOy3FaXc@decadent.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/cpu_rmap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
+index e77f12bb3c774..1833ad73de6fc 100644
+--- a/lib/cpu_rmap.c
++++ b/lib/cpu_rmap.c
+@@ -268,8 +268,8 @@ static void irq_cpu_rmap_release(struct kref *ref)
+       struct irq_glue *glue =
+               container_of(ref, struct irq_glue, notify.kref);
+-      cpu_rmap_put(glue->rmap);
+       glue->rmap->obj[glue->index] = NULL;
++      cpu_rmap_put(glue->rmap);
+       kfree(glue);
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.1/neighbour-fix-unaligned-access-to-pneigh_entry.patch b/queue-6.1/neighbour-fix-unaligned-access-to-pneigh_entry.patch
new file mode 100644 (file)
index 0000000..e8b247f
--- /dev/null
@@ -0,0 +1,41 @@
+From 91e3af3f7ec6df3c7b7dbb0e5a2535caa71a0a6e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Jun 2023 09:54:32 +0800
+Subject: neighbour: fix unaligned access to pneigh_entry
+
+From: Qingfang DENG <qingfang.deng@siflower.com.cn>
+
+[ Upstream commit ed779fe4c9b5a20b4ab4fd6f3e19807445bb78c7 ]
+
+After the blamed commit, the member key is longer 4-byte aligned. On
+platforms that do not support unaligned access, e.g., MIPS32R2 with
+unaligned_action set to 1, this will trigger a crash when accessing
+an IPv6 pneigh_entry, as the key is cast to an in6_addr pointer.
+
+Change the type of the key to u32 to make it aligned.
+
+Fixes: 62dd93181aaa ("[IPV6] NDISC: Set per-entry is_router flag in Proxy NA.")
+Signed-off-by: Qingfang DENG <qingfang.deng@siflower.com.cn>
+Link: https://lore.kernel.org/r/20230601015432.159066-1-dqfext@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/neighbour.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 2f2a6023fb0e5..94a1599824d8f 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -180,7 +180,7 @@ struct pneigh_entry {
+       netdevice_tracker       dev_tracker;
+       u32                     flags;
+       u8                      protocol;
+-      u8                      key[];
++      u32                     key[];
+ };
+ /*
+-- 
+2.39.2
+
diff --git a/queue-6.1/net-bcmgenet-fix-eee-implementation.patch b/queue-6.1/net-bcmgenet-fix-eee-implementation.patch
new file mode 100644 (file)
index 0000000..d773165
--- /dev/null
@@ -0,0 +1,141 @@
+From ebd18735891ab6bbe719122dfda13ef5daa313ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 14:43:47 -0700
+Subject: net: bcmgenet: Fix EEE implementation
+
+From: Florian Fainelli <florian.fainelli@broadcom.com>
+
+[ Upstream commit a9f31047baca57d47440c879cf259b86f900260c ]
+
+We had a number of short comings:
+
+- EEE must be re-evaluated whenever the state machine detects a link
+  change as wight be switching from a link partner with EEE
+  enabled/disabled
+
+- tx_lpi_enabled controls whether EEE should be enabled/disabled for the
+  transmit path, which applies to the TBUF block
+
+- We do not need to forcibly enable EEE upon system resume, as the PHY
+  state machine will trigger a link event that will do that, too
+
+Fixes: 6ef398ea60d9 ("net: bcmgenet: add EEE support")
+Signed-off-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Link: https://lore.kernel.org/r/20230606214348.2408018-1-florian.fainelli@broadcom.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/broadcom/genet/bcmgenet.c    | 22 +++++++------------
+ .../net/ethernet/broadcom/genet/bcmgenet.h    |  3 +++
+ drivers/net/ethernet/broadcom/genet/bcmmii.c  |  5 +++++
+ 3 files changed, 16 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 4da2becfa950c..1ae082eb9e905 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1290,7 +1290,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
+       }
+ }
+-static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
++void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
++                           bool tx_lpi_enabled)
+ {
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
+@@ -1310,7 +1311,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
+       /* Enable EEE and switch to a 27Mhz clock automatically */
+       reg = bcmgenet_readl(priv->base + off);
+-      if (enable)
++      if (tx_lpi_enabled)
+               reg |= TBUF_EEE_EN | TBUF_PM_EN;
+       else
+               reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
+@@ -1331,6 +1332,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
+       priv->eee.eee_enabled = enable;
+       priv->eee.eee_active = enable;
++      priv->eee.tx_lpi_enabled = tx_lpi_enabled;
+ }
+ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+@@ -1346,6 +1348,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+       e->eee_enabled = p->eee_enabled;
+       e->eee_active = p->eee_active;
++      e->tx_lpi_enabled = p->tx_lpi_enabled;
+       e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
+       return phy_ethtool_get_eee(dev->phydev, e);
+@@ -1355,7 +1358,6 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+ {
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct ethtool_eee *p = &priv->eee;
+-      int ret = 0;
+       if (GENET_IS_V1(priv))
+               return -EOPNOTSUPP;
+@@ -1366,16 +1368,11 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+       p->eee_enabled = e->eee_enabled;
+       if (!p->eee_enabled) {
+-              bcmgenet_eee_enable_set(dev, false);
++              bcmgenet_eee_enable_set(dev, false, false);
+       } else {
+-              ret = phy_init_eee(dev->phydev, false);
+-              if (ret) {
+-                      netif_err(priv, hw, dev, "EEE initialization failed\n");
+-                      return ret;
+-              }
+-
++              p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
+               bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
+-              bcmgenet_eee_enable_set(dev, true);
++              bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
+       }
+       return phy_ethtool_set_eee(dev->phydev, e);
+@@ -4274,9 +4271,6 @@ static int bcmgenet_resume(struct device *d)
+       if (!device_may_wakeup(d))
+               phy_resume(dev->phydev);
+-      if (priv->eee.eee_enabled)
+-              bcmgenet_eee_enable_set(dev, true);
+-
+       bcmgenet_netif_start(dev);
+       netif_device_attach(dev);
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+index 946f6e283c4e6..1985c0ec4da2a 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+@@ -703,4 +703,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+                              enum bcmgenet_power_mode mode);
++void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
++                           bool tx_lpi_enabled);
++
+ #endif /* __BCMGENET_H__ */
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index ded0e64a9f6a1..bf9e246784b6e 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -88,6 +88,11 @@ static void bcmgenet_mac_config(struct net_device *dev)
+               reg |= CMD_TX_EN | CMD_RX_EN;
+       }
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++
++      priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
++      bcmgenet_eee_enable_set(dev,
++                              priv->eee.eee_enabled && priv->eee.eee_active,
++                              priv->eee.tx_lpi_enabled);
+ }
+ /* setup netdev link state when PHY link status change and
+-- 
+2.39.2
+
diff --git a/queue-6.1/net-dsa-lan9303-allow-vid-0-in-port_fdb_-add-del-met.patch b/queue-6.1/net-dsa-lan9303-allow-vid-0-in-port_fdb_-add-del-met.patch
new file mode 100644 (file)
index 0000000..1c0ea30
--- /dev/null
@@ -0,0 +1,56 @@
+From 8f45e638f0bdd40abc8c1a07d39b58c88d06fcda Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 May 2023 16:38:26 +0200
+Subject: net: dsa: lan9303: allow vid != 0 in port_fdb_{add|del} methods
+
+From: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+
+[ Upstream commit 5a59a58ec25d44f853c26bdbfda47d73b3067435 ]
+
+LAN9303 doesn't associate FDB (ALR) entries with VLANs, it has just one
+global Address Logic Resolution table [1].
+
+Ignore VID in port_fdb_{add|del} methods, go on with the global table. This
+is the same semantics as hellcreek or RZ/N1 implement.
+
+Visible symptoms:
+LAN9303_MDIO 5b050000.ethernet-1:00: port 2 failed to delete 00:xx:xx:xx:xx:cf vid 1 from fdb: -2
+LAN9303_MDIO 5b050000.ethernet-1:00: port 2 failed to add 00:xx:xx:xx:xx:cf vid 1 to fdb: -95
+
+[1] https://ww1.microchip.com/downloads/en/DeviceDoc/00002308A.pdf
+
+Fixes: 0620427ea0d6 ("net: dsa: lan9303: Add fdb/mdb manipulation")
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://lore.kernel.org/r/20230531143826.477267-1-alexander.sverdlin@siemens.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/lan9303-core.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
+index 2e270b4791432..a08e7cbb23c91 100644
+--- a/drivers/net/dsa/lan9303-core.c
++++ b/drivers/net/dsa/lan9303-core.c
+@@ -1199,8 +1199,6 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
+       struct lan9303 *chip = ds->priv;
+       dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
+-      if (vid)
+-              return -EOPNOTSUPP;
+       return lan9303_alr_add_port(chip, addr, port, false);
+ }
+@@ -1212,8 +1210,6 @@ static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
+       struct lan9303 *chip = ds->priv;
+       dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
+-      if (vid)
+-              return -EOPNOTSUPP;
+       lan9303_alr_del_port(chip, addr, port);
+       return 0;
+-- 
+2.39.2
+
diff --git a/queue-6.1/net-enetc-correct-rx_bytes-statistics-of-xdp.patch b/queue-6.1/net-enetc-correct-rx_bytes-statistics-of-xdp.patch
new file mode 100644 (file)
index 0000000..9ed42c2
--- /dev/null
@@ -0,0 +1,43 @@
+From 2f7ff0c44b377895f8e8aba24603b1a5d6672a4a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Jun 2023 17:46:59 +0800
+Subject: net: enetc: correct rx_bytes statistics of XDP
+
+From: Wei Fang <wei.fang@nxp.com>
+
+[ Upstream commit fdebd850cc065495abf1d64756496050bb22db67 ]
+
+The rx_bytes statistics of XDP are always zero, because rx_byte_cnt
+is not updated after it is initialized to 0. So fix it.
+
+Fixes: d1b15102dd16 ("net: enetc: add support for XDP_DROP and XDP_PASS")
+Signed-off-by: Wei Fang <wei.fang@nxp.com>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/enetc/enetc.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index df7747e49bb84..25c303406e6b4 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -1538,6 +1538,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+               enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
+                                    &cleaned_cnt, &xdp_buff);
++              /* When set, the outer VLAN header is extracted and reported
++               * in the receive buffer descriptor. So rx_byte_cnt should
++               * add the length of the extracted VLAN header.
++               */
++              if (bd_status & ENETC_RXBD_FLAG_VLAN)
++                      rx_byte_cnt += VLAN_HLEN;
++              rx_byte_cnt += xdp_get_buff_len(&xdp_buff);
++
+               xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
+               switch (xdp_act) {
+-- 
+2.39.2
+
diff --git a/queue-6.1/net-enetc-correct-the-statistics-of-rx-bytes.patch b/queue-6.1/net-enetc-correct-the-statistics-of-rx-bytes.patch
new file mode 100644 (file)
index 0000000..ddeba6d
--- /dev/null
@@ -0,0 +1,50 @@
+From 69e3698605a17fedfcb99ad88a9b4103100ffed3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Jun 2023 17:46:58 +0800
+Subject: net: enetc: correct the statistics of rx bytes
+
+From: Wei Fang <wei.fang@nxp.com>
+
+[ Upstream commit 7190d0ff0e17690a9b1279d84a06473600ba2060 ]
+
+The rx_bytes of struct net_device_stats should count the length of
+ethernet frames excluding the FCS. However, there are two problems
+with the rx_bytes statistics of the current enetc driver. one is
+that the length of VLAN header is not counted if the VLAN extraction
+feature is enabled. The other is that the length of L2 header is not
+counted, because eth_type_trans() is invoked before updating rx_bytes
+which will subtract the length of L2 header from skb->len.
+BTW, the rx_bytes statistics of XDP path also have similar problem,
+I will fix it in another patch.
+
+Fixes: a800abd3ecb9 ("net: enetc: move skb creation into enetc_build_skb")
+Signed-off-by: Wei Fang <wei.fang@nxp.com>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/enetc/enetc.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index e96449eedfb54..df7747e49bb84 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -1209,7 +1209,13 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+               if (!skb)
+                       break;
+-              rx_byte_cnt += skb->len;
++              /* When set, the outer VLAN header is extracted and reported
++               * in the receive buffer descriptor. So rx_byte_cnt should
++               * add the length of the extracted VLAN header.
++               */
++              if (bd_status & ENETC_RXBD_FLAG_VLAN)
++                      rx_byte_cnt += VLAN_HLEN;
++              rx_byte_cnt += skb->len + ETH_HLEN;
+               rx_frm_cnt++;
+               napi_gro_receive(napi, skb);
+-- 
+2.39.2
+
diff --git a/queue-6.1/net-ipv4-ping_group_range-allow-gid-from-2147483648-.patch b/queue-6.1/net-ipv4-ping_group_range-allow-gid-from-2147483648-.patch
new file mode 100644 (file)
index 0000000..f6cba4a
--- /dev/null
@@ -0,0 +1,100 @@
+From b4b798cb7fc925ec7aa2dc6b6e8a4d27c465e9c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Jun 2023 12:13:05 +0900
+Subject: net/ipv4: ping_group_range: allow GID from 2147483648 to 4294967294
+
+From: Akihiro Suda <suda.gitsendemail@gmail.com>
+
+[ Upstream commit e209fee4118fe9a449d4d805361eb2de6796be39 ]
+
+With this commit, all the GIDs ("0 4294967294") can be written to the
+"net.ipv4.ping_group_range" sysctl.
+
+Note that 4294967295 (0xffffffff) is an invalid GID (see gid_valid() in
+include/linux/uidgid.h), and an attempt to register this number will cause
+-EINVAL.
+
+Prior to this commit, only up to GID 2147483647 could be covered.
+Documentation/networking/ip-sysctl.rst had "0 4294967295" as an example
+value, but this example was wrong and causing -EINVAL.
+
+Fixes: c319b4d76b9e ("net: ipv4: add IPPROTO_ICMP socket kind")
+Co-developed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/networking/ip-sysctl.rst | 4 ++--
+ include/net/ping.h                     | 6 +-----
+ net/ipv4/sysctl_net_ipv4.c             | 8 ++++----
+ 3 files changed, 7 insertions(+), 11 deletions(-)
+
+diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
+index 4ecb549fd052e..3301288a7c692 100644
+--- a/Documentation/networking/ip-sysctl.rst
++++ b/Documentation/networking/ip-sysctl.rst
+@@ -1247,8 +1247,8 @@ ping_group_range - 2 INTEGERS
+       Restrict ICMP_PROTO datagram sockets to users in the group range.
+       The default is "1 0", meaning, that nobody (not even root) may
+       create ping sockets.  Setting it to "100 100" would grant permissions
+-      to the single group. "0 4294967295" would enable it for the world, "100
+-      4294967295" would enable it for the users, but not daemons.
++      to the single group. "0 4294967294" would enable it for the world, "100
++      4294967294" would enable it for the users, but not daemons.
+ tcp_early_demux - BOOLEAN
+       Enable early demux for established TCP sockets.
+diff --git a/include/net/ping.h b/include/net/ping.h
+index 9233ad3de0ade..bc7779262e603 100644
+--- a/include/net/ping.h
++++ b/include/net/ping.h
+@@ -16,11 +16,7 @@
+ #define PING_HTABLE_SIZE      64
+ #define PING_HTABLE_MASK      (PING_HTABLE_SIZE-1)
+-/*
+- * gid_t is either uint or ushort.  We want to pass it to
+- * proc_dointvec_minmax(), so it must not be larger than MAX_INT
+- */
+-#define GID_T_MAX (((gid_t)~0U) >> 1)
++#define GID_T_MAX (((gid_t)~0U) - 1)
+ /* Compatibility glue so we can support IPv6 when it's compiled as a module */
+ struct pingv6_ops {
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 39dbeb6071965..f68762ce4d8a3 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -34,8 +34,8 @@ static int ip_ttl_min = 1;
+ static int ip_ttl_max = 255;
+ static int tcp_syn_retries_min = 1;
+ static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
+-static int ip_ping_group_range_min[] = { 0, 0 };
+-static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
++static unsigned long ip_ping_group_range_min[] = { 0, 0 };
++static unsigned long ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+ static u32 u32_max_div_HZ = UINT_MAX / HZ;
+ static int one_day_secs = 24 * 3600;
+ static u32 fib_multipath_hash_fields_all_mask __maybe_unused =
+@@ -162,7 +162,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
+ {
+       struct user_namespace *user_ns = current_user_ns();
+       int ret;
+-      gid_t urange[2];
++      unsigned long urange[2];
+       kgid_t low, high;
+       struct ctl_table tmp = {
+               .data = &urange,
+@@ -175,7 +175,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
+       inet_get_ping_group_range_table(table, &low, &high);
+       urange[0] = from_kgid_munged(user_ns, low);
+       urange[1] = from_kgid_munged(user_ns, high);
+-      ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
++      ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
+       if (write && ret == 0) {
+               low = make_kgid(user_ns, urange[0]);
+-- 
+2.39.2
+
diff --git a/queue-6.1/net-ipv6-fix-bool-int-mismatch-for-skip_notify_on_de.patch b/queue-6.1/net-ipv6-fix-bool-int-mismatch-for-skip_notify_on_de.patch
new file mode 100644 (file)
index 0000000..e78cb04
--- /dev/null
@@ -0,0 +1,43 @@
+From 549ecdda50d537b3e9feeb824d583857f64f8a9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Jun 2023 16:04:44 +0000
+Subject: net/ipv6: fix bool/int mismatch for skip_notify_on_dev_down
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit edf2e1d2019b2730d6076dbe4c040d37d7c10bbe ]
+
+skip_notify_on_dev_down ctl table expects this field
+to be an int (4 bytes), not a bool (1 byte).
+
+Because proc_dou8vec_minmax() was added in 5.13,
+this patch converts skip_notify_on_dev_down to an int.
+
+Following patch then converts the field to u8 and use proc_dou8vec_minmax().
+
+Fixes: 7c6bb7d2faaf ("net/ipv6: Add knob to skip DELROUTE message on device down")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Acked-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netns/ipv6.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
+index b4af4837d80b4..f6e6a3ab91489 100644
+--- a/include/net/netns/ipv6.h
++++ b/include/net/netns/ipv6.h
+@@ -53,7 +53,7 @@ struct netns_sysctl_ipv6 {
+       int seg6_flowlabel;
+       u32 ioam6_id;
+       u64 ioam6_id_wide;
+-      bool skip_notify_on_dev_down;
++      int skip_notify_on_dev_down;
+       u8 fib_notify_on_flag_change;
+ };
+-- 
+2.39.2
+
diff --git a/queue-6.1/net-sched-act_police-fix-sparse-errors-in-tcf_police.patch b/queue-6.1/net-sched-act_police-fix-sparse-errors-in-tcf_police.patch
new file mode 100644 (file)
index 0000000..894aec4
--- /dev/null
@@ -0,0 +1,66 @@
+From 2e56db4160e1d33363cf89dfde4b53ab7aeff663 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 13:13:04 +0000
+Subject: net: sched: act_police: fix sparse errors in tcf_police_dump()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 682881ee45c81daa883dcd4fe613b0b0d988bb22 ]
+
+Fixes following sparse errors:
+
+net/sched/act_police.c:360:28: warning: dereference of noderef expression
+net/sched/act_police.c:362:45: warning: dereference of noderef expression
+net/sched/act_police.c:362:45: warning: dereference of noderef expression
+net/sched/act_police.c:368:28: warning: dereference of noderef expression
+net/sched/act_police.c:370:45: warning: dereference of noderef expression
+net/sched/act_police.c:370:45: warning: dereference of noderef expression
+net/sched/act_police.c:376:45: warning: dereference of noderef expression
+net/sched/act_police.c:376:45: warning: dereference of noderef expression
+
+Fixes: d1967e495a8d ("net_sched: act_police: add 2 new attributes to support police 64bit rate and peakrate")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/act_police.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/net/sched/act_police.c b/net/sched/act_police.c
+index 0adb26e366a7b..94be21378e7ca 100644
+--- a/net/sched/act_police.c
++++ b/net/sched/act_police.c
+@@ -355,23 +355,23 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
+       opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
+       if (p->rate_present) {
+               psched_ratecfg_getrate(&opt.rate, &p->rate);
+-              if ((police->params->rate.rate_bytes_ps >= (1ULL << 32)) &&
++              if ((p->rate.rate_bytes_ps >= (1ULL << 32)) &&
+                   nla_put_u64_64bit(skb, TCA_POLICE_RATE64,
+-                                    police->params->rate.rate_bytes_ps,
++                                    p->rate.rate_bytes_ps,
+                                     TCA_POLICE_PAD))
+                       goto nla_put_failure;
+       }
+       if (p->peak_present) {
+               psched_ratecfg_getrate(&opt.peakrate, &p->peak);
+-              if ((police->params->peak.rate_bytes_ps >= (1ULL << 32)) &&
++              if ((p->peak.rate_bytes_ps >= (1ULL << 32)) &&
+                   nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64,
+-                                    police->params->peak.rate_bytes_ps,
++                                    p->peak.rate_bytes_ps,
+                                     TCA_POLICE_PAD))
+                       goto nla_put_failure;
+       }
+       if (p->pps_present) {
+               if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64,
+-                                    police->params->ppsrate.rate_pkts_ps,
++                                    p->ppsrate.rate_pkts_ps,
+                                     TCA_POLICE_PAD))
+                       goto nla_put_failure;
+               if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64,
+-- 
+2.39.2
+
diff --git a/queue-6.1/net-sched-add-rcu-annotations-around-qdisc-qdisc_sle.patch b/queue-6.1/net-sched-add-rcu-annotations-around-qdisc-qdisc_sle.patch
new file mode 100644 (file)
index 0000000..3b5e8ed
--- /dev/null
@@ -0,0 +1,571 @@
+From 013ddf678052e09c19838620cb030e6ed3183439 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 11:19:29 +0000
+Subject: net: sched: add rcu annotations around qdisc->qdisc_sleeping
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit d636fc5dd692c8f4e00ae6e0359c0eceeb5d9bdb ]
+
+syzbot reported a race around qdisc->qdisc_sleeping [1]
+
+It is time we add proper annotations to reads and writes to/from
+qdisc->qdisc_sleeping.
+
+[1]
+BUG: KCSAN: data-race in dev_graft_qdisc / qdisc_lookup_rcu
+
+read to 0xffff8881286fc618 of 8 bytes by task 6928 on cpu 1:
+qdisc_lookup_rcu+0x192/0x2c0 net/sched/sch_api.c:331
+__tcf_qdisc_find+0x74/0x3c0 net/sched/cls_api.c:1174
+tc_get_tfilter+0x18f/0x990 net/sched/cls_api.c:2547
+rtnetlink_rcv_msg+0x7af/0x8c0 net/core/rtnetlink.c:6386
+netlink_rcv_skb+0x126/0x220 net/netlink/af_netlink.c:2546
+rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:6413
+netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline]
+netlink_unicast+0x56f/0x640 net/netlink/af_netlink.c:1365
+netlink_sendmsg+0x665/0x770 net/netlink/af_netlink.c:1913
+sock_sendmsg_nosec net/socket.c:724 [inline]
+sock_sendmsg net/socket.c:747 [inline]
+____sys_sendmsg+0x375/0x4c0 net/socket.c:2503
+___sys_sendmsg net/socket.c:2557 [inline]
+__sys_sendmsg+0x1e3/0x270 net/socket.c:2586
+__do_sys_sendmsg net/socket.c:2595 [inline]
+__se_sys_sendmsg net/socket.c:2593 [inline]
+__x64_sys_sendmsg+0x46/0x50 net/socket.c:2593
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+write to 0xffff8881286fc618 of 8 bytes by task 6912 on cpu 0:
+dev_graft_qdisc+0x4f/0x80 net/sched/sch_generic.c:1115
+qdisc_graft+0x7d0/0xb60 net/sched/sch_api.c:1103
+tc_modify_qdisc+0x712/0xf10 net/sched/sch_api.c:1693
+rtnetlink_rcv_msg+0x807/0x8c0 net/core/rtnetlink.c:6395
+netlink_rcv_skb+0x126/0x220 net/netlink/af_netlink.c:2546
+rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:6413
+netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline]
+netlink_unicast+0x56f/0x640 net/netlink/af_netlink.c:1365
+netlink_sendmsg+0x665/0x770 net/netlink/af_netlink.c:1913
+sock_sendmsg_nosec net/socket.c:724 [inline]
+sock_sendmsg net/socket.c:747 [inline]
+____sys_sendmsg+0x375/0x4c0 net/socket.c:2503
+___sys_sendmsg net/socket.c:2557 [inline]
+__sys_sendmsg+0x1e3/0x270 net/socket.c:2586
+__do_sys_sendmsg net/socket.c:2595 [inline]
+__se_sys_sendmsg net/socket.c:2593 [inline]
+__x64_sys_sendmsg+0x46/0x50 net/socket.c:2593
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 6912 Comm: syz-executor.5 Not tainted 6.4.0-rc3-syzkaller-00190-g0d85b27b0cc6 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/16/2023
+
+Fixes: 3a7d0d07a386 ("net: sched: extend Qdisc with rcu")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Vlad Buslov <vladbu@nvidia.com>
+Acked-by: Jamal Hadi Salim<jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netdevice.h |  2 +-
+ include/net/sch_generic.h |  6 ++++--
+ net/core/dev.c            |  2 +-
+ net/sched/sch_api.c       | 26 ++++++++++++++++----------
+ net/sched/sch_fq_pie.c    |  2 ++
+ net/sched/sch_generic.c   | 30 +++++++++++++++---------------
+ net/sched/sch_mq.c        |  8 ++++----
+ net/sched/sch_mqprio.c    |  8 ++++----
+ net/sched/sch_pie.c       |  5 ++++-
+ net/sched/sch_red.c       |  5 ++++-
+ net/sched/sch_sfq.c       |  5 ++++-
+ net/sched/sch_taprio.c    |  6 +++---
+ net/sched/sch_teql.c      |  2 +-
+ 13 files changed, 63 insertions(+), 44 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 74e05b82f1bf7..d5eb3ab8e38f2 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -609,7 +609,7 @@ struct netdev_queue {
+       netdevice_tracker       dev_tracker;
+       struct Qdisc __rcu      *qdisc;
+-      struct Qdisc            *qdisc_sleeping;
++      struct Qdisc __rcu      *qdisc_sleeping;
+ #ifdef CONFIG_SYSFS
+       struct kobject          kobj;
+ #endif
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index af4aa66aaa4eb..989eb972fcaec 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -543,7 +543,7 @@ static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
+ static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
+ {
+-      return qdisc->dev_queue->qdisc_sleeping;
++      return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping);
+ }
+ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
+@@ -752,7 +752,9 @@ static inline bool qdisc_tx_changing(const struct net_device *dev)
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+-              if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
++
++              if (rcu_access_pointer(txq->qdisc) !=
++                  rcu_access_pointer(txq->qdisc_sleeping))
+                       return true;
+       }
+       return false;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index ee00d3cfcb564..a2e3c6470ab3f 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10518,7 +10518,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
+               return NULL;
+       netdev_init_one_queue(dev, queue, NULL);
+       RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
+-      queue->qdisc_sleeping = &noop_qdisc;
++      RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc);
+       rcu_assign_pointer(dev->ingress_queue, queue);
+ #endif
+       return queue;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index f6a7b876d5954..6fb345ec22641 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -308,7 +308,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
+       if (dev_ingress_queue(dev))
+               q = qdisc_match_from_root(
+-                      dev_ingress_queue(dev)->qdisc_sleeping,
++                      rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping),
+                       handle);
+ out:
+       return q;
+@@ -327,7 +327,8 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
+       nq = dev_ingress_queue_rcu(dev);
+       if (nq)
+-              q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
++              q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping),
++                                        handle);
+ out:
+       return q;
+ }
+@@ -633,8 +634,13 @@ EXPORT_SYMBOL(qdisc_watchdog_init);
+ void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
+                                     u64 delta_ns)
+ {
+-      if (test_bit(__QDISC_STATE_DEACTIVATED,
+-                   &qdisc_root_sleeping(wd->qdisc)->state))
++      bool deactivated;
++
++      rcu_read_lock();
++      deactivated = test_bit(__QDISC_STATE_DEACTIVATED,
++                             &qdisc_root_sleeping(wd->qdisc)->state);
++      rcu_read_unlock();
++      if (deactivated)
+               return;
+       if (hrtimer_is_queued(&wd->timer)) {
+@@ -1473,7 +1479,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+                               }
+                               q = qdisc_leaf(p, clid);
+                       } else if (dev_ingress_queue(dev)) {
+-                              q = dev_ingress_queue(dev)->qdisc_sleeping;
++                              q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
+                       }
+               } else {
+                       q = rtnl_dereference(dev->qdisc);
+@@ -1559,7 +1565,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+                               }
+                               q = qdisc_leaf(p, clid);
+                       } else if (dev_ingress_queue_create(dev)) {
+-                              q = dev_ingress_queue(dev)->qdisc_sleeping;
++                              q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
+                       }
+               } else {
+                       q = rtnl_dereference(dev->qdisc);
+@@ -1800,8 +1806,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
+               dev_queue = dev_ingress_queue(dev);
+               if (dev_queue &&
+-                  tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
+-                                     &q_idx, s_q_idx, false,
++                  tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping),
++                                     skb, cb, &q_idx, s_q_idx, false,
+                                      tca[TCA_DUMP_INVISIBLE]) < 0)
+                       goto done;
+@@ -2239,8 +2245,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
+       dev_queue = dev_ingress_queue(dev);
+       if (dev_queue &&
+-          tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
+-                              &t, s_t, false) < 0)
++          tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping),
++                              skb, tcm, cb, &t, s_t, false) < 0)
+               goto done;
+ done:
+diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
+index c699e5095607d..591d87d5e5c0f 100644
+--- a/net/sched/sch_fq_pie.c
++++ b/net/sched/sch_fq_pie.c
+@@ -379,6 +379,7 @@ static void fq_pie_timer(struct timer_list *t)
+       spinlock_t *root_lock; /* to lock qdisc for probability calculations */
+       u32 idx;
++      rcu_read_lock();
+       root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+       spin_lock(root_lock);
+@@ -391,6 +392,7 @@ static void fq_pie_timer(struct timer_list *t)
+               mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate);
+       spin_unlock(root_lock);
++      rcu_read_unlock();
+ }
+ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index a9aadc4e68581..ee43e8ac039ed 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -648,7 +648,7 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = {
+ static struct netdev_queue noop_netdev_queue = {
+       RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
+-      .qdisc_sleeping =       &noop_qdisc,
++      RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc),
+ };
+ struct Qdisc noop_qdisc = {
+@@ -1103,7 +1103,7 @@ EXPORT_SYMBOL(qdisc_put_unlocked);
+ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+                             struct Qdisc *qdisc)
+ {
+-      struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
++      struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+       spinlock_t *root_lock;
+       root_lock = qdisc_lock(oqdisc);
+@@ -1112,7 +1112,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+       /* ... and graft new one */
+       if (qdisc == NULL)
+               qdisc = &noop_qdisc;
+-      dev_queue->qdisc_sleeping = qdisc;
++      rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
+       rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
+       spin_unlock_bh(root_lock);
+@@ -1125,12 +1125,12 @@ static void shutdown_scheduler_queue(struct net_device *dev,
+                                    struct netdev_queue *dev_queue,
+                                    void *_qdisc_default)
+ {
+-      struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
++      struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+       struct Qdisc *qdisc_default = _qdisc_default;
+       if (qdisc) {
+               rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
+-              dev_queue->qdisc_sleeping = qdisc_default;
++              rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default);
+               qdisc_put(qdisc);
+       }
+@@ -1154,7 +1154,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
+       if (!netif_is_multiqueue(dev))
+               qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+-      dev_queue->qdisc_sleeping = qdisc;
++      rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
+ }
+ static void attach_default_qdiscs(struct net_device *dev)
+@@ -1167,7 +1167,7 @@ static void attach_default_qdiscs(struct net_device *dev)
+       if (!netif_is_multiqueue(dev) ||
+           dev->priv_flags & IFF_NO_QUEUE) {
+               netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
+-              qdisc = txq->qdisc_sleeping;
++              qdisc = rtnl_dereference(txq->qdisc_sleeping);
+               rcu_assign_pointer(dev->qdisc, qdisc);
+               qdisc_refcount_inc(qdisc);
+       } else {
+@@ -1186,7 +1186,7 @@ static void attach_default_qdiscs(struct net_device *dev)
+               netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
+               dev->priv_flags |= IFF_NO_QUEUE;
+               netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
+-              qdisc = txq->qdisc_sleeping;
++              qdisc = rtnl_dereference(txq->qdisc_sleeping);
+               rcu_assign_pointer(dev->qdisc, qdisc);
+               qdisc_refcount_inc(qdisc);
+               dev->priv_flags ^= IFF_NO_QUEUE;
+@@ -1202,7 +1202,7 @@ static void transition_one_qdisc(struct net_device *dev,
+                                struct netdev_queue *dev_queue,
+                                void *_need_watchdog)
+ {
+-      struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
++      struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+       int *need_watchdog_p = _need_watchdog;
+       if (!(new_qdisc->flags & TCQ_F_BUILTIN))
+@@ -1272,7 +1272,7 @@ static void dev_reset_queue(struct net_device *dev,
+       struct Qdisc *qdisc;
+       bool nolock;
+-      qdisc = dev_queue->qdisc_sleeping;
++      qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+       if (!qdisc)
+               return;
+@@ -1303,7 +1303,7 @@ static bool some_qdisc_is_busy(struct net_device *dev)
+               int val;
+               dev_queue = netdev_get_tx_queue(dev, i);
+-              q = dev_queue->qdisc_sleeping;
++              q = rtnl_dereference(dev_queue->qdisc_sleeping);
+               root_lock = qdisc_lock(q);
+               spin_lock_bh(root_lock);
+@@ -1379,7 +1379,7 @@ EXPORT_SYMBOL(dev_deactivate);
+ static int qdisc_change_tx_queue_len(struct net_device *dev,
+                                    struct netdev_queue *dev_queue)
+ {
+-      struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
++      struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+       const struct Qdisc_ops *ops = qdisc->ops;
+       if (ops->change_tx_queue_len)
+@@ -1404,7 +1404,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
+       unsigned int i;
+       for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
+-              qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
++              qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
+               /* Only update the default qdiscs we created,
+                * qdiscs with handles are always hashed.
+                */
+@@ -1412,7 +1412,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
+                       qdisc_hash_del(qdisc);
+       }
+       for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
+-              qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
++              qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
+               if (qdisc != &noop_qdisc && !qdisc->handle)
+                       qdisc_hash_add(qdisc, false);
+       }
+@@ -1449,7 +1449,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
+       struct Qdisc *qdisc = _qdisc;
+       rcu_assign_pointer(dev_queue->qdisc, qdisc);
+-      dev_queue->qdisc_sleeping = qdisc;
++      rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
+ }
+ void dev_init_scheduler(struct net_device *dev)
+diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
+index d0bc660d7401f..c860119a8f091 100644
+--- a/net/sched/sch_mq.c
++++ b/net/sched/sch_mq.c
+@@ -141,7 +141,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
+        * qdisc totals are added at end.
+        */
+       for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+-              qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
++              qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
+               spin_lock_bh(qdisc_lock(qdisc));
+               gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
+@@ -202,7 +202,7 @@ static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
+ {
+       struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
+-      return dev_queue->qdisc_sleeping;
++      return rtnl_dereference(dev_queue->qdisc_sleeping);
+ }
+ static unsigned long mq_find(struct Qdisc *sch, u32 classid)
+@@ -221,7 +221,7 @@ static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
+       tcm->tcm_parent = TC_H_ROOT;
+       tcm->tcm_handle |= TC_H_MIN(cl);
+-      tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
++      tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
+       return 0;
+ }
+@@ -230,7 +230,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ {
+       struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
+-      sch = dev_queue->qdisc_sleeping;
++      sch = rtnl_dereference(dev_queue->qdisc_sleeping);
+       if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 ||
+           qdisc_qstats_copy(d, sch) < 0)
+               return -1;
+diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
+index 4c68abaa289bd..9f26fb7d5823c 100644
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -399,7 +399,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+        * qdisc totals are added at end.
+        */
+       for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+-              qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
++              qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
+               spin_lock_bh(qdisc_lock(qdisc));
+               gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
+@@ -449,7 +449,7 @@ static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
+       if (!dev_queue)
+               return NULL;
+-      return dev_queue->qdisc_sleeping;
++      return rtnl_dereference(dev_queue->qdisc_sleeping);
+ }
+ static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
+@@ -482,7 +482,7 @@ static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
+               tcm->tcm_parent = (tc < 0) ? 0 :
+                       TC_H_MAKE(TC_H_MAJ(sch->handle),
+                                 TC_H_MIN(tc + TC_H_MIN_PRIORITY));
+-              tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
++              tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
+       } else {
+               tcm->tcm_parent = TC_H_ROOT;
+               tcm->tcm_info = 0;
+@@ -538,7 +538,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+       } else {
+               struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+-              sch = dev_queue->qdisc_sleeping;
++              sch = rtnl_dereference(dev_queue->qdisc_sleeping);
+               if (gnet_stats_copy_basic(d, sch->cpu_bstats,
+                                         &sch->bstats, true) < 0 ||
+                   qdisc_qstats_copy(d, sch) < 0)
+diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
+index 265c238047a42..b60b31ef71cc5 100644
+--- a/net/sched/sch_pie.c
++++ b/net/sched/sch_pie.c
+@@ -421,8 +421,10 @@ static void pie_timer(struct timer_list *t)
+ {
+       struct pie_sched_data *q = from_timer(q, t, adapt_timer);
+       struct Qdisc *sch = q->sch;
+-      spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
++      spinlock_t *root_lock;
++      rcu_read_lock();
++      root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+       spin_lock(root_lock);
+       pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog);
+@@ -430,6 +432,7 @@ static void pie_timer(struct timer_list *t)
+       if (q->params.tupdate)
+               mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
+       spin_unlock(root_lock);
++      rcu_read_unlock();
+ }
+ static int pie_init(struct Qdisc *sch, struct nlattr *opt,
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index 98129324e1573..16277b6a0238d 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -321,12 +321,15 @@ static inline void red_adaptative_timer(struct timer_list *t)
+ {
+       struct red_sched_data *q = from_timer(q, t, adapt_timer);
+       struct Qdisc *sch = q->sch;
+-      spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
++      spinlock_t *root_lock;
++      rcu_read_lock();
++      root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+       spin_lock(root_lock);
+       red_adaptative_algo(&q->parms, &q->vars);
+       mod_timer(&q->adapt_timer, jiffies + HZ/2);
+       spin_unlock(root_lock);
++      rcu_read_unlock();
+ }
+ static int red_init(struct Qdisc *sch, struct nlattr *opt,
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index abd436307d6a8..66dcb18638fea 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -606,10 +606,12 @@ static void sfq_perturbation(struct timer_list *t)
+ {
+       struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
+       struct Qdisc *sch = q->sch;
+-      spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
++      spinlock_t *root_lock;
+       siphash_key_t nkey;
+       get_random_bytes(&nkey, sizeof(nkey));
++      rcu_read_lock();
++      root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+       spin_lock(root_lock);
+       q->perturbation = nkey;
+       if (!q->filter_list && q->tail)
+@@ -618,6 +620,7 @@ static void sfq_perturbation(struct timer_list *t)
+       if (q->perturb_period)
+               mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
++      rcu_read_unlock();
+ }
+ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index c322a61eaeeac..a274a9332f333 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -2050,7 +2050,7 @@ static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
+       if (!dev_queue)
+               return NULL;
+-      return dev_queue->qdisc_sleeping;
++      return rtnl_dereference(dev_queue->qdisc_sleeping);
+ }
+ static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
+@@ -2069,7 +2069,7 @@ static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
+       tcm->tcm_parent = TC_H_ROOT;
+       tcm->tcm_handle |= TC_H_MIN(cl);
+-      tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
++      tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
+       return 0;
+ }
+@@ -2081,7 +2081,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ {
+       struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
+-      sch = dev_queue->qdisc_sleeping;
++      sch = rtnl_dereference(dev_queue->qdisc_sleeping);
+       if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 ||
+           qdisc_qstats_copy(d, sch) < 0)
+               return -1;
+diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
+index 16f9238aa51d1..7721239c185fb 100644
+--- a/net/sched/sch_teql.c
++++ b/net/sched/sch_teql.c
+@@ -297,7 +297,7 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
+               struct net_device *slave = qdisc_dev(q);
+               struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
+-              if (slave_txq->qdisc_sleeping != q)
++              if (rcu_access_pointer(slave_txq->qdisc_sleeping) != q)
+                       continue;
+               if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
+                   !netif_running(slave)) {
+-- 
+2.39.2
+
diff --git a/queue-6.1/net-sched-fix-possible-refcount-leak-in-tc_chain_tmp.patch b/queue-6.1/net-sched-fix-possible-refcount-leak-in-tc_chain_tmp.patch
new file mode 100644 (file)
index 0000000..632c131
--- /dev/null
@@ -0,0 +1,37 @@
+From 864869b7e378af630bc22605f40d7aab2a9f5b83 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 10:23:01 +0800
+Subject: net: sched: fix possible refcount leak in tc_chain_tmplt_add()
+
+From: Hangyu Hua <hbh25y@gmail.com>
+
+[ Upstream commit 44f8baaf230c655c249467ca415b570deca8df77 ]
+
+try_module_get will be called in tcf_proto_lookup_ops. So module_put needs
+to be called to drop the refcount if ops don't implement the required
+function.
+
+Fixes: 9f407f1768d3 ("net: sched: introduce chain templates")
+Signed-off-by: Hangyu Hua <hbh25y@gmail.com>
+Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/cls_api.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index b51d80a2fece3..abaf75300497d 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -2780,6 +2780,7 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
+               return PTR_ERR(ops);
+       if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
+               NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
++              module_put(ops->owner);
+               return -EOPNOTSUPP;
+       }
+-- 
+2.39.2
+
diff --git a/queue-6.1/net-sched-fq_pie-ensure-reasonable-tca_fq_pie_quantu.patch b/queue-6.1/net-sched-fq_pie-ensure-reasonable-tca_fq_pie_quantu.patch
new file mode 100644 (file)
index 0000000..5f2eb54
--- /dev/null
@@ -0,0 +1,129 @@
+From 1a2c7c6d385424016463d6d5376d80063922ef66 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Jun 2023 12:37:47 +0000
+Subject: net/sched: fq_pie: ensure reasonable TCA_FQ_PIE_QUANTUM values
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit cd2b8113c2e8b9f5a88a942e1eaca61eba401b85 ]
+
+We got multiple syzbot reports, all duplicates of the following [1]
+
+syzbot managed to install fq_pie with a zero TCA_FQ_PIE_QUANTUM,
+thus triggering infinite loops.
+
+Use limits similar to sch_fq, with commits
+3725a269815b ("pkt_sched: fq: avoid hang when quantum 0") and
+d9e15a273306 ("pkt_sched: fq: do not accept silly TCA_FQ_QUANTUM")
+
+[1]
+watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [swapper/0:0]
+Modules linked in:
+irq event stamp: 172817
+hardirqs last enabled at (172816): [<ffff80001242fde4>] __el1_irq arch/arm64/kernel/entry-common.c:476 [inline]
+hardirqs last enabled at (172816): [<ffff80001242fde4>] el1_interrupt+0x58/0x68 arch/arm64/kernel/entry-common.c:486
+hardirqs last disabled at (172817): [<ffff80001242fdb0>] __el1_irq arch/arm64/kernel/entry-common.c:468 [inline]
+hardirqs last disabled at (172817): [<ffff80001242fdb0>] el1_interrupt+0x24/0x68 arch/arm64/kernel/entry-common.c:486
+softirqs last enabled at (167634): [<ffff800008020c1c>] softirq_handle_end kernel/softirq.c:414 [inline]
+softirqs last enabled at (167634): [<ffff800008020c1c>] __do_softirq+0xac0/0xd54 kernel/softirq.c:600
+softirqs last disabled at (167701): [<ffff80000802a660>] ____do_softirq+0x14/0x20 arch/arm64/kernel/irq.c:80
+CPU: 0 PID: 0 Comm: swapper/0 Not tainted 6.4.0-rc3-syzkaller-geb0f1697d729 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/28/2023
+pstate: 80400005 (Nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : fq_pie_qdisc_dequeue+0x10c/0x8ac net/sched/sch_fq_pie.c:246
+lr : fq_pie_qdisc_dequeue+0xe4/0x8ac net/sched/sch_fq_pie.c:240
+sp : ffff800008007210
+x29: ffff800008007280 x28: ffff0000c86f7890 x27: ffff0000cb20c2e8
+x26: ffff0000cb20c2f0 x25: dfff800000000000 x24: ffff0000cb20c2e0
+x23: ffff0000c86f7880 x22: 0000000000000040 x21: 1fffe000190def10
+x20: ffff0000cb20c2e0 x19: ffff0000cb20c2e0 x18: ffff800008006e60
+x17: 0000000000000000 x16: ffff80000850af6c x15: 0000000000000302
+x14: 0000000000000100 x13: 0000000000000000 x12: 0000000000000001
+x11: 0000000000000302 x10: 0000000000000100 x9 : 0000000000000000
+x8 : 0000000000000000 x7 : ffff80000841c468 x6 : 0000000000000000
+x5 : 0000000000000001 x4 : 0000000000000001 x3 : 0000000000000000
+x2 : ffff0000cb20c2e0 x1 : ffff0000cb20c2e0 x0 : 0000000000000001
+Call trace:
+fq_pie_qdisc_dequeue+0x10c/0x8ac net/sched/sch_fq_pie.c:246
+dequeue_skb net/sched/sch_generic.c:292 [inline]
+qdisc_restart net/sched/sch_generic.c:397 [inline]
+__qdisc_run+0x1fc/0x231c net/sched/sch_generic.c:415
+__dev_xmit_skb net/core/dev.c:3868 [inline]
+__dev_queue_xmit+0xc80/0x3318 net/core/dev.c:4210
+dev_queue_xmit include/linux/netdevice.h:3085 [inline]
+neigh_connected_output+0x2f8/0x38c net/core/neighbour.c:1581
+neigh_output include/net/neighbour.h:544 [inline]
+ip6_finish_output2+0xd60/0x1a1c net/ipv6/ip6_output.c:134
+__ip6_finish_output net/ipv6/ip6_output.c:195 [inline]
+ip6_finish_output+0x538/0x8c8 net/ipv6/ip6_output.c:206
+NF_HOOK_COND include/linux/netfilter.h:292 [inline]
+ip6_output+0x270/0x594 net/ipv6/ip6_output.c:227
+dst_output include/net/dst.h:458 [inline]
+NF_HOOK include/linux/netfilter.h:303 [inline]
+ndisc_send_skb+0xc30/0x1790 net/ipv6/ndisc.c:508
+ndisc_send_rs+0x47c/0x5d4 net/ipv6/ndisc.c:718
+addrconf_rs_timer+0x300/0x58c net/ipv6/addrconf.c:3936
+call_timer_fn+0x19c/0x8cc kernel/time/timer.c:1700
+expire_timers kernel/time/timer.c:1751 [inline]
+__run_timers+0x55c/0x734 kernel/time/timer.c:2022
+run_timer_softirq+0x7c/0x114 kernel/time/timer.c:2035
+__do_softirq+0x2d0/0xd54 kernel/softirq.c:571
+____do_softirq+0x14/0x20 arch/arm64/kernel/irq.c:80
+call_on_irq_stack+0x24/0x4c arch/arm64/kernel/entry.S:882
+do_softirq_own_stack+0x20/0x2c arch/arm64/kernel/irq.c:85
+invoke_softirq kernel/softirq.c:452 [inline]
+__irq_exit_rcu+0x28c/0x534 kernel/softirq.c:650
+irq_exit_rcu+0x14/0x84 kernel/softirq.c:662
+__el1_irq arch/arm64/kernel/entry-common.c:472 [inline]
+el1_interrupt+0x38/0x68 arch/arm64/kernel/entry-common.c:486
+el1h_64_irq_handler+0x18/0x24 arch/arm64/kernel/entry-common.c:491
+el1h_64_irq+0x64/0x68 arch/arm64/kernel/entry.S:587
+__daif_local_irq_enable arch/arm64/include/asm/irqflags.h:33 [inline]
+arch_local_irq_enable+0x8/0xc arch/arm64/include/asm/irqflags.h:55
+cpuidle_idle_call kernel/sched/idle.c:170 [inline]
+do_idle+0x1f0/0x4e8 kernel/sched/idle.c:282
+cpu_startup_entry+0x24/0x28 kernel/sched/idle.c:379
+rest_init+0x2dc/0x2f4 init/main.c:735
+start_kernel+0x0/0x55c init/main.c:834
+start_kernel+0x3f0/0x55c init/main.c:1088
+__primary_switched+0xb8/0xc0 arch/arm64/kernel/head.S:523
+
+Fixes: ec97ecf1ebe4 ("net: sched: add Flow Queue PIE packet scheduler")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_fq_pie.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
+index 6980796d435d9..c699e5095607d 100644
+--- a/net/sched/sch_fq_pie.c
++++ b/net/sched/sch_fq_pie.c
+@@ -201,6 +201,11 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+       return NET_XMIT_CN;
+ }
++static struct netlink_range_validation fq_pie_q_range = {
++      .min = 1,
++      .max = 1 << 20,
++};
++
+ static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
+       [TCA_FQ_PIE_LIMIT]              = {.type = NLA_U32},
+       [TCA_FQ_PIE_FLOWS]              = {.type = NLA_U32},
+@@ -208,7 +213,8 @@ static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
+       [TCA_FQ_PIE_TUPDATE]            = {.type = NLA_U32},
+       [TCA_FQ_PIE_ALPHA]              = {.type = NLA_U32},
+       [TCA_FQ_PIE_BETA]               = {.type = NLA_U32},
+-      [TCA_FQ_PIE_QUANTUM]            = {.type = NLA_U32},
++      [TCA_FQ_PIE_QUANTUM]            =
++                      NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range),
+       [TCA_FQ_PIE_MEMORY_LIMIT]       = {.type = NLA_U32},
+       [TCA_FQ_PIE_ECN_PROB]           = {.type = NLA_U32},
+       [TCA_FQ_PIE_ECN]                = {.type = NLA_U32},
+-- 
+2.39.2
+
diff --git a/queue-6.1/net-sched-move-rtm_tca_policy-declaration-to-include.patch b/queue-6.1/net-sched-move-rtm_tca_policy-declaration-to-include.patch
new file mode 100644 (file)
index 0000000..eec22f8
--- /dev/null
@@ -0,0 +1,54 @@
+From 5308463c28753e45ebfe71c4f7df9da838cda6c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 11:42:33 +0000
+Subject: net: sched: move rtm_tca_policy declaration to include file
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 886bc7d6ed3357975c5f1d3c784da96000d4bbb4 ]
+
+rtm_tca_policy is used from net/sched/sch_api.c and net/sched/cls_api.c,
+thus should be declared in an include file.
+
+This fixes the following sparse warning:
+net/sched/sch_api.c:1434:25: warning: symbol 'rtm_tca_policy' was not declared. Should it be static?
+
+Fixes: e331473fee3d ("net/sched: cls_api: add missing validation of netlink attributes")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/pkt_sched.h | 2 ++
+ net/sched/cls_api.c     | 2 --
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
+index 38207873eda69..8ab75128512ab 100644
+--- a/include/net/pkt_sched.h
++++ b/include/net/pkt_sched.h
+@@ -128,6 +128,8 @@ static inline void qdisc_run(struct Qdisc *q)
+       }
+ }
++extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
++
+ /* Calculate maximal size of packet seen by hard_start_xmit
+    routine of this device.
+  */
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 7b2aa04a7cdfd..b51d80a2fece3 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -41,8 +41,6 @@
+ #include <net/tc_act/tc_gate.h>
+ #include <net/flow_offload.h>
+-extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
+-
+ /* The list of all installed classifier types */
+ static LIST_HEAD(tcf_proto_base);
+-- 
+2.39.2
+
diff --git a/queue-6.1/net-smc-avoid-to-access-invalid-rmbs-mrs-in-smcrv1-a.patch b/queue-6.1/net-smc-avoid-to-access-invalid-rmbs-mrs-in-smcrv1-a.patch
new file mode 100644 (file)
index 0000000..bad6c2e
--- /dev/null
@@ -0,0 +1,89 @@
+From 2623b5953e076f94077e6a74291dfc2be26ede33 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Jun 2023 16:41:52 +0800
+Subject: net/smc: Avoid to access invalid RMBs' MRs in SMCRv1 ADD LINK CONT
+
+From: Wen Gu <guwen@linux.alibaba.com>
+
+[ Upstream commit c308e9ec004721a656c193243eab61a8be324657 ]
+
+SMCRv1 has a similar issue to SMCRv2 (see link below) that may access
+invalid MRs of RMBs when construct LLC ADD LINK CONT messages.
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000014
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 0 P4D 0
+ Oops: 0000 [#1] PREEMPT SMP PTI
+ CPU: 5 PID: 48 Comm: kworker/5:0 Kdump: loaded Tainted: G W   E      6.4.0-rc3+ #49
+ Workqueue: events smc_llc_add_link_work [smc]
+ RIP: 0010:smc_llc_add_link_cont+0x160/0x270 [smc]
+ RSP: 0018:ffffa737801d3d50 EFLAGS: 00010286
+ RAX: ffff964f82144000 RBX: ffffa737801d3dd8 RCX: 0000000000000000
+ RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff964f81370c30
+ RBP: ffffa737801d3dd4 R08: ffff964f81370000 R09: ffffa737801d3db0
+ R10: 0000000000000001 R11: 0000000000000060 R12: ffff964f82e70000
+ R13: ffff964f81370c38 R14: ffffa737801d3dd3 R15: 0000000000000001
+ FS:  0000000000000000(0000) GS:ffff9652bfd40000(0000) knlGS:0000000000000000
+ CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000014 CR3: 000000008fa20004 CR4: 00000000003706e0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Call Trace:
+  <TASK>
+  smc_llc_srv_rkey_exchange+0xa7/0x190 [smc]
+  smc_llc_srv_add_link+0x3ae/0x5a0 [smc]
+  smc_llc_add_link_work+0xb8/0x140 [smc]
+  process_one_work+0x1e5/0x3f0
+  worker_thread+0x4d/0x2f0
+  ? __pfx_worker_thread+0x10/0x10
+  kthread+0xe5/0x120
+  ? __pfx_kthread+0x10/0x10
+  ret_from_fork+0x2c/0x50
+  </TASK>
+
+When an alernate RNIC is available in system, SMC will try to add a new
+link based on the RNIC for resilience. All the RMBs in use will be mapped
+to the new link. Then the RMBs' MRs corresponding to the new link will
+be filled into LLC messages. For SMCRv1, they are ADD LINK CONT messages.
+
+However smc_llc_add_link_cont() may mistakenly access to unused RMBs which
+haven't been mapped to the new link and have no valid MRs, thus causing a
+crash. So this patch fixes it.
+
+Fixes: 87f88cda2128 ("net/smc: rkey processing for a new link as SMC client")
+Link: https://lore.kernel.org/r/1685101741-74826-3-git-send-email-guwen@linux.alibaba.com
+Signed-off-by: Wen Gu <guwen@linux.alibaba.com>
+Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
+Reviewed-by: Tony Lu <tonylu@linux.alibaba.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc_llc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
+index 3008dfdf7c55e..760f8bbff822e 100644
+--- a/net/smc/smc_llc.c
++++ b/net/smc/smc_llc.c
+@@ -851,6 +851,8 @@ static int smc_llc_add_link_cont(struct smc_link *link,
+       addc_llc->num_rkeys = *num_rkeys_todo;
+       n = *num_rkeys_todo;
+       for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
++              while (*buf_pos && !(*buf_pos)->used)
++                      *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
+               if (!*buf_pos) {
+                       addc_llc->num_rkeys = addc_llc->num_rkeys -
+                                             *num_rkeys_todo;
+@@ -867,8 +869,6 @@ static int smc_llc_add_link_cont(struct smc_link *link,
+               (*num_rkeys_todo)--;
+               *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
+-              while (*buf_pos && !(*buf_pos)->used)
+-                      *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
+       }
+       addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT;
+       addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
+-- 
+2.39.2
+
diff --git a/queue-6.1/netfilter-conntrack-fix-null-pointer-dereference-in-.patch b/queue-6.1/netfilter-conntrack-fix-null-pointer-dereference-in-.patch
new file mode 100644 (file)
index 0000000..7763828
--- /dev/null
@@ -0,0 +1,59 @@
+From 2d451b88e38b4838eb668e9d8b20d3e08cdfa728 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 May 2023 12:25:26 +0200
+Subject: netfilter: conntrack: fix NULL pointer dereference in
+ nf_confirm_cthelper
+
+From: Tijs Van Buggenhout <tijs.van.buggenhout@axsguard.com>
+
+[ Upstream commit e1f543dc660b44618a1bd72ddb4ca0828a95f7ad ]
+
+An nf_conntrack_helper from nf_conn_help may become NULL after DNAT.
+
+Observed when TCP port 1720 (Q931_PORT), associated with h323 conntrack
+helper, is DNAT'ed to another destination port (e.g. 1730), while
+nfqueue is being used for final acceptance (e.g. snort).
+
+This happenned after transition from kernel 4.14 to 5.10.161.
+
+Workarounds:
+ * keep the same port (1720) in DNAT
+ * disable nfqueue
+ * disable/unload h323 NAT helper
+
+$ linux-5.10/scripts/decode_stacktrace.sh vmlinux < /tmp/kernel.log
+BUG: kernel NULL pointer dereference, address: 0000000000000084
+[..]
+RIP: 0010:nf_conntrack_update (net/netfilter/nf_conntrack_core.c:2080 net/netfilter/nf_conntrack_core.c:2134) nf_conntrack
+[..]
+nfqnl_reinject (net/netfilter/nfnetlink_queue.c:237) nfnetlink_queue
+nfqnl_recv_verdict (net/netfilter/nfnetlink_queue.c:1230) nfnetlink_queue
+nfnetlink_rcv_msg (net/netfilter/nfnetlink.c:241) nfnetlink
+[..]
+
+Fixes: ee04805ff54a ("netfilter: conntrack: make conntrack userspace helpers work again")
+Signed-off-by: Tijs Van Buggenhout <tijs.van.buggenhout@axsguard.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index a0e9c7af08467..7960262966094 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -2277,6 +2277,9 @@ static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
+               return 0;
+       helper = rcu_dereference(help->helper);
++      if (!helper)
++              return 0;
++
+       if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
+               return 0;
+-- 
+2.39.2
+
diff --git a/queue-6.1/netfilter-ipset-add-schedule-point-in-call_ad.patch b/queue-6.1/netfilter-ipset-add-schedule-point-in-call_ad.patch
new file mode 100644 (file)
index 0000000..4e5e9e3
--- /dev/null
@@ -0,0 +1,100 @@
+From 0835eef789b29b08839ebdd63f958fc4f715d9cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 May 2023 10:33:00 -0700
+Subject: netfilter: ipset: Add schedule point in call_ad().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 24e227896bbf003165e006732dccb3516f87f88e ]
+
+syzkaller found a repro that causes Hung Task [0] with ipset.  The repro
+first creates an ipset and then tries to delete a large number of IPs
+from the ipset concurrently:
+
+  IPSET_ATTR_IPADDR_IPV4 : 172.20.20.187
+  IPSET_ATTR_CIDR        : 2
+
+The first deleting thread hogs a CPU with nfnl_lock(NFNL_SUBSYS_IPSET)
+held, and other threads wait for it to be released.
+
+Previously, the same issue existed in set->variant->uadt() that could run
+so long under ip_set_lock(set).  Commit 5e29dc36bd5e ("netfilter: ipset:
+Rework long task execution when adding/deleting entries") tried to fix it,
+but the issue still exists in the caller with another mutex.
+
+While adding/deleting many IPs, we should release the CPU periodically to
+prevent someone from abusing ipset to hang the system.
+
+Note we need to increment the ipset's refcnt to prevent the ipset from
+being destroyed while rescheduling.
+
+[0]:
+INFO: task syz-executor174:268 blocked for more than 143 seconds.
+      Not tainted 6.4.0-rc1-00145-gba79e9a73284 #1
+"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+task:syz-executor174 state:D stack:0     pid:268   ppid:260    flags:0x0000000d
+Call trace:
+ __switch_to+0x308/0x714 arch/arm64/kernel/process.c:556
+ context_switch kernel/sched/core.c:5343 [inline]
+ __schedule+0xd84/0x1648 kernel/sched/core.c:6669
+ schedule+0xf0/0x214 kernel/sched/core.c:6745
+ schedule_preempt_disabled+0x58/0xf0 kernel/sched/core.c:6804
+ __mutex_lock_common kernel/locking/mutex.c:679 [inline]
+ __mutex_lock+0x6fc/0xdb0 kernel/locking/mutex.c:747
+ __mutex_lock_slowpath+0x14/0x20 kernel/locking/mutex.c:1035
+ mutex_lock+0x98/0xf0 kernel/locking/mutex.c:286
+ nfnl_lock net/netfilter/nfnetlink.c:98 [inline]
+ nfnetlink_rcv_msg+0x480/0x70c net/netfilter/nfnetlink.c:295
+ netlink_rcv_skb+0x1c0/0x350 net/netlink/af_netlink.c:2546
+ nfnetlink_rcv+0x18c/0x199c net/netfilter/nfnetlink.c:658
+ netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline]
+ netlink_unicast+0x664/0x8cc net/netlink/af_netlink.c:1365
+ netlink_sendmsg+0x6d0/0xa4c net/netlink/af_netlink.c:1913
+ sock_sendmsg_nosec net/socket.c:724 [inline]
+ sock_sendmsg net/socket.c:747 [inline]
+ ____sys_sendmsg+0x4b8/0x810 net/socket.c:2503
+ ___sys_sendmsg net/socket.c:2557 [inline]
+ __sys_sendmsg+0x1f8/0x2a4 net/socket.c:2586
+ __do_sys_sendmsg net/socket.c:2595 [inline]
+ __se_sys_sendmsg net/socket.c:2593 [inline]
+ __arm64_sys_sendmsg+0x80/0x94 net/socket.c:2593
+ __invoke_syscall arch/arm64/kernel/syscall.c:38 [inline]
+ invoke_syscall+0x84/0x270 arch/arm64/kernel/syscall.c:52
+ el0_svc_common+0x134/0x24c arch/arm64/kernel/syscall.c:142
+ do_el0_svc+0x64/0x198 arch/arm64/kernel/syscall.c:193
+ el0_svc+0x2c/0x7c arch/arm64/kernel/entry-common.c:637
+ el0t_64_sync_handler+0x84/0xf0 arch/arm64/kernel/entry-common.c:655
+ el0t_64_sync+0x190/0x194 arch/arm64/kernel/entry.S:591
+
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Fixes: a7b4f989a629 ("netfilter: ipset: IP set core support")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Acked-by: Jozsef Kadlecsik <kadlec@netfilter.org>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/ipset/ip_set_core.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 46ebee9400dab..9a6b64779e644 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1694,6 +1694,14 @@ call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb,
+       bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
+       do {
++              if (retried) {
++                      __ip_set_get(set);
++                      nfnl_unlock(NFNL_SUBSYS_IPSET);
++                      cond_resched();
++                      nfnl_lock(NFNL_SUBSYS_IPSET);
++                      __ip_set_put(set);
++              }
++
+               ip_set_lock(set);
+               ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
+               ip_set_unlock(set);
+-- 
+2.39.2
+
diff --git a/queue-6.1/netfilter-nf_tables-out-of-bound-check-in-chain-blob.patch b/queue-6.1/netfilter-nf_tables-out-of-bound-check-in-chain-blob.patch
new file mode 100644 (file)
index 0000000..0b695ec
--- /dev/null
@@ -0,0 +1,34 @@
+From 113dbd23df0f6d81ac68370b52991ae278991df3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 16:32:44 +0200
+Subject: netfilter: nf_tables: out-of-bound check in chain blob
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 08e42a0d3ad30f276f9597b591f975971a1b0fcf ]
+
+Add current size of rule expressions to the boundary check.
+
+Fixes: 2c865a8a28a1 ("netfilter: nf_tables: add rule blob layout")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 31775d54f4b40..437891cb8c417 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -8723,7 +8723,7 @@ static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *cha
+                               continue;
+                       }
+-                      if (WARN_ON_ONCE(data + expr->ops->size > data_boundary))
++                      if (WARN_ON_ONCE(data + size + expr->ops->size > data_boundary))
+                               return -ENOMEM;
+                       memcpy(data + size, expr, expr->ops->size);
+-- 
+2.39.2
+
diff --git a/queue-6.1/netfilter-nft_bitwise-fix-register-tracking.patch b/queue-6.1/netfilter-nft_bitwise-fix-register-tracking.patch
new file mode 100644 (file)
index 0000000..bc2d28a
--- /dev/null
@@ -0,0 +1,39 @@
+From 562c65a29f55694dc85f14b34b2d885ee0e0d70d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 May 2023 15:07:24 +0100
+Subject: netfilter: nft_bitwise: fix register tracking
+
+From: Jeremy Sowden <jeremy@azazel.net>
+
+[ Upstream commit 14e8b293903785590a0ef168745ac84250cb1f4c ]
+
+At the end of `nft_bitwise_reduce`, there is a loop which is intended to
+update the bitwise expression associated with each tracked destination
+register.  However, currently, it just updates the first register
+repeatedly.  Fix it.
+
+Fixes: 34cc9e52884a ("netfilter: nf_tables: cancel tracking for clobbered destination registers")
+Signed-off-by: Jeremy Sowden <jeremy@azazel.net>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_bitwise.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
+index e6e402b247d09..b84312df9aa1e 100644
+--- a/net/netfilter/nft_bitwise.c
++++ b/net/netfilter/nft_bitwise.c
+@@ -322,7 +322,7 @@ static bool nft_bitwise_reduce(struct nft_regs_track *track,
+       dreg = priv->dreg;
+       regcount = DIV_ROUND_UP(priv->len, NFT_REG32_SIZE);
+       for (i = 0; i < regcount; i++, dreg++)
+-              track->regs[priv->dreg].bitwise = expr;
++              track->regs[dreg].bitwise = expr;
+       return false;
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.1/platform-surface-aggregator-allow-completion-work-it.patch b/queue-6.1/platform-surface-aggregator-allow-completion-work-it.patch
new file mode 100644 (file)
index 0000000..b87e864
--- /dev/null
@@ -0,0 +1,67 @@
+From a12eb36ca44a9f7fcd3def4a52b22d1da58c0e7a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 May 2023 23:01:10 +0200
+Subject: platform/surface: aggregator: Allow completion work-items to be
+ executed in parallel
+
+From: Maximilian Luz <luzmaximilian@gmail.com>
+
+[ Upstream commit 539e0a7f9105d19c00629c3f4da00330488e8c60 ]
+
+Currently, event completion work-items are restricted to be run strictly
+in non-parallel fashion by the respective workqueue. However, this has
+lead to some problems:
+
+In some instances, the event notifier function called inside this
+completion workqueue takes a non-negligible amount of time to execute.
+One such example is the battery event handling code (surface_battery.c),
+which can result in a full battery information refresh, involving
+further synchronous communication with the EC inside the event handler.
+This is made worse if the communication fails spuriously, generally
+incurring a multi-second timeout.
+
+Since the event completions are run strictly non-parallel, this blocks
+other events from being propagated to the respective subsystems. This
+becomes especially noticeable for keyboard and touchpad input, which
+also funnel their events through this system. Here, users have reported
+occasional multi-second "freezes".
+
+Note, however, that the event handling system was never intended to run
+purely sequentially. Instead, we have one work struct per EC/SAM
+subsystem, processing the event queue for that subsystem. These work
+structs were intended to run in parallel, allowing sequential processing
+of work items for each subsystem but parallel processing of work items
+across subsystems.
+
+The only restriction to this is the way the workqueue is created.
+Therefore, replace create_workqueue() with alloc_workqueue() and do not
+restrict the maximum number of parallel work items to be executed on
+that queue, resolving any cross-subsystem blockage.
+
+Fixes: c167b9c7e3d6 ("platform/surface: Add Surface Aggregator subsystem")
+Link: https://github.com/linux-surface/linux-surface/issues/1026
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Link: https://lore.kernel.org/r/20230525210110.2785470-1-luzmaximilian@gmail.com
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/surface/aggregator/controller.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
+index c6537a1b3a2ec..30cea324ff95f 100644
+--- a/drivers/platform/surface/aggregator/controller.c
++++ b/drivers/platform/surface/aggregator/controller.c
+@@ -825,7 +825,7 @@ static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
+       cplt->dev = dev;
+-      cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME);
++      cplt->wq = alloc_workqueue(SSAM_CPLT_WQ_NAME, WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+       if (!cplt->wq)
+               return -ENOMEM;
+-- 
+2.39.2
+
diff --git a/queue-6.1/platform-surface-aggregator_tabletsw-add-support-for.patch b/queue-6.1/platform-surface-aggregator_tabletsw-add-support-for.patch
new file mode 100644 (file)
index 0000000..934d53a
--- /dev/null
@@ -0,0 +1,61 @@
+From ed4d40caae435a63b98979505fd7813859facad8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 May 2023 23:32:17 +0200
+Subject: platform/surface: aggregator_tabletsw: Add support for book mode in
+ KIP subsystem
+
+From: Maximilian Luz <luzmaximilian@gmail.com>
+
+[ Upstream commit 9bed667033e66083d363a11e9414ad401ecc242c ]
+
+Devices with a type-cover have an additional "book" mode, deactivating
+type-cover input and turning off its backlight. This is currently
+unsupported, leading to the warning
+
+  surface_aggregator_tablet_mode_switch 01:0e:01:00:01: unknown KIP cover state: 6
+
+Therefore, add support for this state and map it to enable tablet-mode.
+
+Fixes: 9f794056db5b ("platform/surface: Add KIP/POS tablet-mode switch driver")
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Link: https://lore.kernel.org/r/20230525213218.2797480-2-luzmaximilian@gmail.com
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/surface/surface_aggregator_tabletsw.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/platform/surface/surface_aggregator_tabletsw.c b/drivers/platform/surface/surface_aggregator_tabletsw.c
+index 27d95a6a78513..af8b547cffdc6 100644
+--- a/drivers/platform/surface/surface_aggregator_tabletsw.c
++++ b/drivers/platform/surface/surface_aggregator_tabletsw.c
+@@ -201,6 +201,7 @@ enum ssam_kip_cover_state {
+       SSAM_KIP_COVER_STATE_LAPTOP        = 0x03,
+       SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04,
+       SSAM_KIP_COVER_STATE_FOLDED_BACK   = 0x05,
++      SSAM_KIP_COVER_STATE_BOOK          = 0x06,
+ };
+ static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 state)
+@@ -221,6 +222,9 @@ static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 stat
+       case SSAM_KIP_COVER_STATE_FOLDED_BACK:
+               return "folded-back";
++      case SSAM_KIP_COVER_STATE_BOOK:
++              return "book";
++
+       default:
+               dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state);
+               return "<unknown>";
+@@ -233,6 +237,7 @@ static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 s
+       case SSAM_KIP_COVER_STATE_DISCONNECTED:
+       case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
+       case SSAM_KIP_COVER_STATE_FOLDED_BACK:
++      case SSAM_KIP_COVER_STATE_BOOK:
+               return true;
+       case SSAM_KIP_COVER_STATE_CLOSED:
+-- 
+2.39.2
+
diff --git a/queue-6.1/qed-qede-fix-scheduling-while-atomic.patch b/queue-6.1/qed-qede-fix-scheduling-while-atomic.patch
new file mode 100644 (file)
index 0000000..8cd1fff
--- /dev/null
@@ -0,0 +1,275 @@
+From 1417b05300715684f70b70aa5e5d2db7698ba681 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 16:56:00 +0530
+Subject: qed/qede: Fix scheduling while atomic
+
+From: Manish Chopra <manishc@marvell.com>
+
+[ Upstream commit 42510dffd0e2c27046905f742172ed6662af5557 ]
+
+Statistics read through bond interface via sysfs causes
+below bug and traces as it triggers the bonding module to
+collect the slave device statistics while holding the spinlock,
+beneath that qede->qed driver statistics flow gets scheduled out
+due to usleep_range() used in PTT acquire logic
+
+[ 3673.988874] Hardware name: HPE ProLiant DL365 Gen10 Plus/ProLiant DL365 Gen10 Plus, BIOS A42 10/29/2021
+[ 3673.988878] Call Trace:
+[ 3673.988891]  dump_stack_lvl+0x34/0x44
+[ 3673.988908]  __schedule_bug.cold+0x47/0x53
+[ 3673.988918]  __schedule+0x3fb/0x560
+[ 3673.988929]  schedule+0x43/0xb0
+[ 3673.988932]  schedule_hrtimeout_range_clock+0xbf/0x1b0
+[ 3673.988937]  ? __hrtimer_init+0xc0/0xc0
+[ 3673.988950]  usleep_range+0x5e/0x80
+[ 3673.988955]  qed_ptt_acquire+0x2b/0xd0 [qed]
+[ 3673.988981]  _qed_get_vport_stats+0x141/0x240 [qed]
+[ 3673.989001]  qed_get_vport_stats+0x18/0x80 [qed]
+[ 3673.989016]  qede_fill_by_demand_stats+0x37/0x400 [qede]
+[ 3673.989028]  qede_get_stats64+0x19/0xe0 [qede]
+[ 3673.989034]  dev_get_stats+0x5c/0xc0
+[ 3673.989045]  netstat_show.constprop.0+0x52/0xb0
+[ 3673.989055]  dev_attr_show+0x19/0x40
+[ 3673.989065]  sysfs_kf_seq_show+0x9b/0xf0
+[ 3673.989076]  seq_read_iter+0x120/0x4b0
+[ 3673.989087]  new_sync_read+0x118/0x1a0
+[ 3673.989095]  vfs_read+0xf3/0x180
+[ 3673.989099]  ksys_read+0x5f/0xe0
+[ 3673.989102]  do_syscall_64+0x3b/0x90
+[ 3673.989109]  entry_SYSCALL_64_after_hwframe+0x44/0xae
+[ 3673.989115] RIP: 0033:0x7f8467d0b082
+[ 3673.989119] Code: c0 e9 b2 fe ff ff 50 48 8d 3d ca 05 08 00 e8 35 e7 01 00 0f 1f 44 00 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 0f 05 <48> 3d 00 f0 ff ff 77 56 c3 0f 1f 44 00 00 48 83 ec 28 48 89 54 24
+[ 3673.989121] RSP: 002b:00007ffffb21fd08 EFLAGS: 00000246 ORIG_RAX: 0000000000000000
+[ 3673.989127] RAX: ffffffffffffffda RBX: 000000000100eca0 RCX: 00007f8467d0b082
+[ 3673.989128] RDX: 00000000000003ff RSI: 00007ffffb21fdc0 RDI: 0000000000000003
+[ 3673.989130] RBP: 00007f8467b96028 R08: 0000000000000010 R09: 00007ffffb21ec00
+[ 3673.989132] R10: 00007ffffb27b170 R11: 0000000000000246 R12: 00000000000000f0
+[ 3673.989134] R13: 0000000000000003 R14: 00007f8467b92000 R15: 0000000000045a05
+[ 3673.989139] CPU: 30 PID: 285188 Comm: read_all Kdump: loaded Tainted: G        W  OE
+
+Fix this by collecting the statistics asynchronously from a periodic
+delayed work scheduled at default stats coalescing interval and return
+the recent copy of statisitcs from .ndo_get_stats64(), also add ability
+to configure/retrieve stats coalescing interval using below commands -
+
+ethtool -C ethx stats-block-usecs <val>
+ethtool -c ethx
+
+Fixes: 133fac0eedc3 ("qede: Add basic ethtool support")
+Cc: Sudarsana Kalluru <skalluru@marvell.com>
+Cc: David Miller <davem@davemloft.net>
+Signed-off-by: Manish Chopra <manishc@marvell.com>
+Link: https://lore.kernel.org/r/20230605112600.48238-1-manishc@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_l2.c      |  2 +-
+ drivers/net/ethernet/qlogic/qede/qede.h       |  4 +++
+ .../net/ethernet/qlogic/qede/qede_ethtool.c   | 24 +++++++++++--
+ drivers/net/ethernet/qlogic/qede/qede_main.c  | 34 ++++++++++++++++++-
+ 4 files changed, 60 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+index 2edd6bf64a3cc..7776d3bdd459a 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+@@ -1903,7 +1903,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
+ {
+       u32 i;
+-      if (!cdev) {
++      if (!cdev || cdev->recov_in_prog) {
+               memset(stats, 0, sizeof(*stats));
+               return;
+       }
+diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
+index f90dcfe9ee688..8a63f99d499c4 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede.h
++++ b/drivers/net/ethernet/qlogic/qede/qede.h
+@@ -271,6 +271,10 @@ struct qede_dev {
+ #define QEDE_ERR_WARN                 3
+       struct qede_dump_info           dump_info;
++      struct delayed_work             periodic_task;
++      unsigned long                   stats_coal_ticks;
++      u32                             stats_coal_usecs;
++      spinlock_t                      stats_lock; /* lock for vport stats access */
+ };
+ enum QEDE_STATE {
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+index 8034d812d5a00..d0a3395b2bc1f 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+@@ -430,6 +430,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
+               }
+       }
++      spin_lock(&edev->stats_lock);
++
+       for (i = 0; i < QEDE_NUM_STATS; i++) {
+               if (qede_is_irrelevant_stat(edev, i))
+                       continue;
+@@ -439,6 +441,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
+               buf++;
+       }
++      spin_unlock(&edev->stats_lock);
++
+       __qede_unlock(edev);
+ }
+@@ -830,6 +834,7 @@ static int qede_get_coalesce(struct net_device *dev,
+       coal->rx_coalesce_usecs = rx_coal;
+       coal->tx_coalesce_usecs = tx_coal;
++      coal->stats_block_coalesce_usecs = edev->stats_coal_usecs;
+       return rc;
+ }
+@@ -843,6 +848,19 @@ int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
+       int i, rc = 0;
+       u16 rxc, txc;
++      if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) {
++              edev->stats_coal_usecs = coal->stats_block_coalesce_usecs;
++              if (edev->stats_coal_usecs) {
++                      edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs);
++                      schedule_delayed_work(&edev->periodic_task, 0);
++
++                      DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n",
++                              edev->stats_coal_ticks);
++              } else {
++                      cancel_delayed_work_sync(&edev->periodic_task);
++              }
++      }
++
+       if (!netif_running(dev)) {
+               DP_INFO(edev, "Interface is down\n");
+               return -EINVAL;
+@@ -2253,7 +2271,8 @@ static int qede_get_per_coalesce(struct net_device *dev,
+ }
+ static const struct ethtool_ops qede_ethtool_ops = {
+-      .supported_coalesce_params      = ETHTOOL_COALESCE_USECS,
++      .supported_coalesce_params      = ETHTOOL_COALESCE_USECS |
++                                        ETHTOOL_COALESCE_STATS_BLOCK_USECS,
+       .get_link_ksettings             = qede_get_link_ksettings,
+       .set_link_ksettings             = qede_set_link_ksettings,
+       .get_drvinfo                    = qede_get_drvinfo,
+@@ -2304,7 +2323,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
+ };
+ static const struct ethtool_ops qede_vf_ethtool_ops = {
+-      .supported_coalesce_params      = ETHTOOL_COALESCE_USECS,
++      .supported_coalesce_params      = ETHTOOL_COALESCE_USECS |
++                                        ETHTOOL_COALESCE_STATS_BLOCK_USECS,
+       .get_link_ksettings             = qede_get_link_ksettings,
+       .get_drvinfo                    = qede_get_drvinfo,
+       .get_msglevel                   = qede_get_msglevel,
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 89d64a5a4951a..e8d427c7d1cff 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -308,6 +308,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
+       edev->ops->get_vport_stats(edev->cdev, &stats);
++      spin_lock(&edev->stats_lock);
++
+       p_common->no_buff_discards = stats.common.no_buff_discards;
+       p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
+       p_common->ttl0_discard = stats.common.ttl0_discard;
+@@ -405,6 +407,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
+               p_ah->tx_1519_to_max_byte_packets =
+                   stats.ah.tx_1519_to_max_byte_packets;
+       }
++
++      spin_unlock(&edev->stats_lock);
+ }
+ static void qede_get_stats64(struct net_device *dev,
+@@ -413,9 +417,10 @@ static void qede_get_stats64(struct net_device *dev,
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qede_stats_common *p_common;
+-      qede_fill_by_demand_stats(edev);
+       p_common = &edev->stats.common;
++      spin_lock(&edev->stats_lock);
++
+       stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+                           p_common->rx_bcast_pkts;
+       stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+@@ -435,6 +440,8 @@ static void qede_get_stats64(struct net_device *dev,
+               stats->collisions = edev->stats.bb.tx_total_collisions;
+       stats->rx_crc_errors = p_common->rx_crc_errors;
+       stats->rx_frame_errors = p_common->rx_align_errors;
++
++      spin_unlock(&edev->stats_lock);
+ }
+ #ifdef CONFIG_QED_SRIOV
+@@ -1061,6 +1068,23 @@ static void qede_unlock(struct qede_dev *edev)
+       rtnl_unlock();
+ }
++static void qede_periodic_task(struct work_struct *work)
++{
++      struct qede_dev *edev = container_of(work, struct qede_dev,
++                                           periodic_task.work);
++
++      qede_fill_by_demand_stats(edev);
++      schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
++}
++
++static void qede_init_periodic_task(struct qede_dev *edev)
++{
++      INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
++      spin_lock_init(&edev->stats_lock);
++      edev->stats_coal_usecs = USEC_PER_SEC;
++      edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
++}
++
+ static void qede_sp_task(struct work_struct *work)
+ {
+       struct qede_dev *edev = container_of(work, struct qede_dev,
+@@ -1080,6 +1104,7 @@ static void qede_sp_task(struct work_struct *work)
+        */
+       if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
++              cancel_delayed_work_sync(&edev->periodic_task);
+ #ifdef CONFIG_QED_SRIOV
+               /* SRIOV must be disabled outside the lock to avoid a deadlock.
+                * The recovery of the active VFs is currently not supported.
+@@ -1270,6 +1295,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+                */
+               INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+               mutex_init(&edev->qede_lock);
++              qede_init_periodic_task(edev);
+               rc = register_netdev(edev->ndev);
+               if (rc) {
+@@ -1294,6 +1320,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+       edev->rx_copybreak = QEDE_RX_HDR_SIZE;
+       qede_log_probe(edev);
++
++      /* retain user config (for example - after recovery) */
++      if (edev->stats_coal_usecs)
++              schedule_delayed_work(&edev->periodic_task, 0);
++
+       return 0;
+ err4:
+@@ -1362,6 +1393,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
+               unregister_netdev(ndev);
+               cancel_delayed_work_sync(&edev->sp_task);
++              cancel_delayed_work_sync(&edev->periodic_task);
+               edev->ops->common->set_power_state(cdev, PCI_D0);
+-- 
+2.39.2
+
diff --git a/queue-6.1/rfs-annotate-lockless-accesses-to-rfs-sock-flow-tabl.patch b/queue-6.1/rfs-annotate-lockless-accesses-to-rfs-sock-flow-tabl.patch
new file mode 100644 (file)
index 0000000..5cabe7e
--- /dev/null
@@ -0,0 +1,67 @@
+From 32d045ce474daefc05466dbc8a5d7526ca29f474 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 07:41:15 +0000
+Subject: rfs: annotate lockless accesses to RFS sock flow table
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 5c3b74a92aa285a3df722bf6329ba7ccf70346d6 ]
+
+Add READ_ONCE()/WRITE_ONCE() on accesses to the sock flow table.
+
+This also prevents a (smart ?) compiler to remove the condition in:
+
+if (table->ents[index] != newval)
+        table->ents[index] = newval;
+
+We need the condition to avoid dirtying a shared cache line.
+
+Fixes: fec5e652e58f ("rfs: Receive Flow Steering")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netdevice.h | 7 +++++--
+ net/core/dev.c            | 6 ++++--
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index eac51e22a52a8..74e05b82f1bf7 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -757,8 +757,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
+               /* We only give a hint, preemption can change CPU under us */
+               val |= raw_smp_processor_id();
+-              if (table->ents[index] != val)
+-                      table->ents[index] = val;
++              /* The following WRITE_ONCE() is paired with the READ_ONCE()
++               * here, and another one in get_rps_cpu().
++               */
++              if (READ_ONCE(table->ents[index]) != val)
++                      WRITE_ONCE(table->ents[index], val);
+       }
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 93d430693ca0f..ee00d3cfcb564 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4483,8 +4483,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+               u32 next_cpu;
+               u32 ident;
+-              /* First check into global flow table if there is a match */
+-              ident = sock_flow_table->ents[hash & sock_flow_table->mask];
++              /* First check into global flow table if there is a match.
++               * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
++               */
++              ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
+               if ((ident ^ hash) & ~rps_cpu_mask)
+                       goto try_rps;
+-- 
+2.39.2
+
diff --git a/queue-6.1/rfs-annotate-lockless-accesses-to-sk-sk_rxhash.patch b/queue-6.1/rfs-annotate-lockless-accesses-to-sk-sk_rxhash.patch
new file mode 100644 (file)
index 0000000..c562d77
--- /dev/null
@@ -0,0 +1,73 @@
+From fe17fb0445d37bacf57f0ecba03d416cdc042cae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 07:41:14 +0000
+Subject: rfs: annotate lockless accesses to sk->sk_rxhash
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 1e5c647c3f6d4f8497dedcd226204e1880e0ffb3 ]
+
+Add READ_ONCE()/WRITE_ONCE() on accesses to sk->sk_rxhash.
+
+This also prevents a (smart ?) compiler to remove the condition in:
+
+if (sk->sk_rxhash != newval)
+       sk->sk_rxhash = newval;
+
+We need the condition to avoid dirtying a shared cache line.
+
+Fixes: fec5e652e58f ("rfs: Receive Flow Steering")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sock.h | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index f11b98bd0244c..2f35b82a123f8 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1148,8 +1148,12 @@ static inline void sock_rps_record_flow(const struct sock *sk)
+                * OR   an additional socket flag
+                * [1] : sk_state and sk_prot are in the same cache line.
+                */
+-              if (sk->sk_state == TCP_ESTABLISHED)
+-                      sock_rps_record_flow_hash(sk->sk_rxhash);
++              if (sk->sk_state == TCP_ESTABLISHED) {
++                      /* This READ_ONCE() is paired with the WRITE_ONCE()
++                       * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
++                       */
++                      sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
++              }
+       }
+ #endif
+ }
+@@ -1158,15 +1162,19 @@ static inline void sock_rps_save_rxhash(struct sock *sk,
+                                       const struct sk_buff *skb)
+ {
+ #ifdef CONFIG_RPS
+-      if (unlikely(sk->sk_rxhash != skb->hash))
+-              sk->sk_rxhash = skb->hash;
++      /* The following WRITE_ONCE() is paired with the READ_ONCE()
++       * here, and another one in sock_rps_record_flow().
++       */
++      if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
++              WRITE_ONCE(sk->sk_rxhash, skb->hash);
+ #endif
+ }
+ static inline void sock_rps_reset_rxhash(struct sock *sk)
+ {
+ #ifdef CONFIG_RPS
+-      sk->sk_rxhash = 0;
++      /* Paired with READ_ONCE() in sock_rps_record_flow() */
++      WRITE_ONCE(sk->sk_rxhash, 0);
+ #endif
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.1/selftests-bpf-fix-sockopt_sk-selftest.patch b/queue-6.1/selftests-bpf-fix-sockopt_sk-selftest.patch
new file mode 100644 (file)
index 0000000..66b9ee8
--- /dev/null
@@ -0,0 +1,54 @@
+From 3381b4d2a6a48ee347796d4b7e88744146c3cf7c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 10:22:02 -0700
+Subject: selftests/bpf: Fix sockopt_sk selftest
+
+From: Yonghong Song <yhs@fb.com>
+
+[ Upstream commit 69844e335d8c22454746c7903776533d8b4ab8fa ]
+
+Commit f4e4534850a9 ("net/netlink: fix NETLINK_LIST_MEMBERSHIPS length report")
+fixed NETLINK_LIST_MEMBERSHIPS length report which caused
+selftest sockopt_sk failure. The failure log looks like
+
+  test_sockopt_sk:PASS:join_cgroup /sockopt_sk 0 nsec
+  run_test:PASS:skel_load 0 nsec
+  run_test:PASS:setsockopt_link 0 nsec
+  run_test:PASS:getsockopt_link 0 nsec
+  getsetsockopt:FAIL:Unexpected NETLINK_LIST_MEMBERSHIPS value unexpected Unexpected NETLINK_LIST_MEMBERSHIPS value: actual 8 != expected 4
+  run_test:PASS:getsetsockopt 0 nsec
+  #201     sockopt_sk:FAIL
+
+In net/netlink/af_netlink.c, function netlink_getsockopt(), for NETLINK_LIST_MEMBERSHIPS,
+nlk->ngroups equals to 36. Before Commit f4e4534850a9, the optlen is calculated as
+  ALIGN(nlk->ngroups / 8, sizeof(u32)) = 4
+After that commit, the optlen is
+  ALIGN(BITS_TO_BYTES(nlk->ngroups), sizeof(u32)) = 8
+
+Fix the test by setting the expected optlen to be 8.
+
+Fixes: f4e4534850a9 ("net/netlink: fix NETLINK_LIST_MEMBERSHIPS length report")
+Signed-off-by: Yonghong Song <yhs@fb.com>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/bpf/20230606172202.1606249-1-yhs@fb.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/bpf/prog_tests/sockopt_sk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+index 4512dd808c335..05d0e07da3942 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+@@ -209,7 +209,7 @@ static int getsetsockopt(void)
+                       err, errno);
+               goto err;
+       }
+-      ASSERT_EQ(optlen, 4, "Unexpected NETLINK_LIST_MEMBERSHIPS value");
++      ASSERT_EQ(optlen, 8, "Unexpected NETLINK_LIST_MEMBERSHIPS value");
+       free(big_buf);
+       close(fd);
+-- 
+2.39.2
+
diff --git a/queue-6.1/selftests-bpf-verify-optval-null-case.patch b/queue-6.1/selftests-bpf-verify-optval-null-case.patch
new file mode 100644 (file)
index 0000000..ad5f38e
--- /dev/null
@@ -0,0 +1,100 @@
+From 87f7f6841ed41365283f51feedf2a9baeb1c5a27 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Apr 2023 15:53:39 -0700
+Subject: selftests/bpf: Verify optval=NULL case
+
+From: Stanislav Fomichev <sdf@google.com>
+
+[ Upstream commit 833d67ecdc5f35f1ebf59d0fccc1ce771434be9c ]
+
+Make sure we get optlen exported instead of getting EFAULT.
+
+Signed-off-by: Stanislav Fomichev <sdf@google.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20230418225343.553806-3-sdf@google.com
+Stable-dep-of: 69844e335d8c ("selftests/bpf: Fix sockopt_sk selftest")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../selftests/bpf/prog_tests/sockopt_sk.c     | 28 +++++++++++++++++++
+ .../testing/selftests/bpf/progs/sockopt_sk.c  | 12 ++++++++
+ 2 files changed, 40 insertions(+)
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+index 60d952719d275..4512dd808c335 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+@@ -3,6 +3,7 @@
+ #include "cgroup_helpers.h"
+ #include <linux/tcp.h>
++#include <linux/netlink.h>
+ #include "sockopt_sk.skel.h"
+ #ifndef SOL_TCP
+@@ -183,6 +184,33 @@ static int getsetsockopt(void)
+               goto err;
+       }
++      /* optval=NULL case is handled correctly */
++
++      close(fd);
++      fd = socket(AF_NETLINK, SOCK_RAW, 0);
++      if (fd < 0) {
++              log_err("Failed to create AF_NETLINK socket");
++              return -1;
++      }
++
++      buf.u32 = 1;
++      optlen = sizeof(__u32);
++      err = setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &buf, optlen);
++      if (err) {
++              log_err("Unexpected getsockopt(NETLINK_ADD_MEMBERSHIP) err=%d errno=%d",
++                      err, errno);
++              goto err;
++      }
++
++      optlen = 0;
++      err = getsockopt(fd, SOL_NETLINK, NETLINK_LIST_MEMBERSHIPS, NULL, &optlen);
++      if (err) {
++              log_err("Unexpected getsockopt(NETLINK_LIST_MEMBERSHIPS) err=%d errno=%d",
++                      err, errno);
++              goto err;
++      }
++      ASSERT_EQ(optlen, 4, "Unexpected NETLINK_LIST_MEMBERSHIPS value");
++
+       free(big_buf);
+       close(fd);
+       return 0;
+diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c
+index c8d810010a946..fe1df4cd206eb 100644
+--- a/tools/testing/selftests/bpf/progs/sockopt_sk.c
++++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c
+@@ -32,6 +32,12 @@ int _getsockopt(struct bpf_sockopt *ctx)
+       __u8 *optval_end = ctx->optval_end;
+       __u8 *optval = ctx->optval;
+       struct sockopt_sk *storage;
++      struct bpf_sock *sk;
++
++      /* Bypass AF_NETLINK. */
++      sk = ctx->sk;
++      if (sk && sk->family == AF_NETLINK)
++              return 1;
+       /* Make sure bpf_get_netns_cookie is callable.
+        */
+@@ -131,6 +137,12 @@ int _setsockopt(struct bpf_sockopt *ctx)
+       __u8 *optval_end = ctx->optval_end;
+       __u8 *optval = ctx->optval;
+       struct sockopt_sk *storage;
++      struct bpf_sock *sk;
++
++      /* Bypass AF_NETLINK. */
++      sk = ctx->sk;
++      if (sk && sk->family == AF_NETLINK)
++              return 1;
+       /* Make sure bpf_get_netns_cookie is callable.
+        */
+-- 
+2.39.2
+
index 9a0adfbd39eadfca311f13b5d0998d7c34c570f9..446f5d7d099bfb37e62e589c8ac2cdca54258616 100644 (file)
@@ -1 +1,56 @@
 scsi-megaraid_sas-add-flexible-array-member-for-sgls.patch
+spi-mt65xx-make-sure-operations-completed-before-unl.patch
+platform-surface-aggregator-allow-completion-work-it.patch
+platform-surface-aggregator_tabletsw-add-support-for.patch
+spi-qup-request-dma-before-enabling-clocks.patch
+afs-fix-setting-of-mtime-when-creating-a-file-dir-sy.patch
+wifi-mt76-mt7615-fix-possible-race-in-mt7615_mac_sta.patch
+bpf-sockmap-avoid-potential-null-dereference-in-sk_p.patch
+neighbour-fix-unaligned-access-to-pneigh_entry.patch
+net-dsa-lan9303-allow-vid-0-in-port_fdb_-add-del-met.patch
+net-ipv4-ping_group_range-allow-gid-from-2147483648-.patch
+bpf-fix-uaf-in-task-local-storage.patch
+bpf-fix-elem_size-not-being-set-for-inner-maps.patch
+net-ipv6-fix-bool-int-mismatch-for-skip_notify_on_de.patch
+net-smc-avoid-to-access-invalid-rmbs-mrs-in-smcrv1-a.patch
+net-enetc-correct-the-statistics-of-rx-bytes.patch
+net-enetc-correct-rx_bytes-statistics-of-xdp.patch
+net-sched-fq_pie-ensure-reasonable-tca_fq_pie_quantu.patch
+drm-i915-explain-the-magic-numbers-for-aux-sync-prec.patch
+drm-i915-use-18-fast-wake-aux-sync-len.patch
+bluetooth-hci_sync-add-lock-to-protect-hci_unregiste.patch
+bluetooth-fix-l2cap_disconnect_req-deadlock.patch
+bluetooth-iso-don-t-try-to-remove-cig-if-there-are-b.patch
+bluetooth-l2cap-add-missing-checks-for-invalid-dcid.patch
+wifi-mac80211-use-correct-iftype-he-cap.patch
+wifi-cfg80211-reject-bad-ap-mld-address.patch
+wifi-mac80211-mlme-fix-non-inheritence-element.patch
+wifi-mac80211-don-t-translate-beacon-presp-addrs.patch
+qed-qede-fix-scheduling-while-atomic.patch
+wifi-cfg80211-fix-locking-in-sched-scan-stop-work.patch
+wifi-cfg80211-fix-locking-in-regulatory-disconnect.patch
+selftests-bpf-verify-optval-null-case.patch
+selftests-bpf-fix-sockopt_sk-selftest.patch
+netfilter-nft_bitwise-fix-register-tracking.patch
+netfilter-conntrack-fix-null-pointer-dereference-in-.patch
+netfilter-ipset-add-schedule-point-in-call_ad.patch
+netfilter-nf_tables-out-of-bound-check-in-chain-blob.patch
+ipv6-rpl-fix-route-of-death.patch
+tcp-gso-really-support-big-tcp.patch
+rfs-annotate-lockless-accesses-to-sk-sk_rxhash.patch
+rfs-annotate-lockless-accesses-to-rfs-sock-flow-tabl.patch
+net-sched-add-rcu-annotations-around-qdisc-qdisc_sle.patch
+drm-i915-selftests-stop-using-kthread_stop.patch
+drm-i915-selftests-add-some-missing-error-propagatio.patch
+net-sched-move-rtm_tca_policy-declaration-to-include.patch
+net-sched-act_police-fix-sparse-errors-in-tcf_police.patch
+net-sched-fix-possible-refcount-leak-in-tc_chain_tmp.patch
+bpf-add-extra-path-pointer-check-to-d_path-helper.patch
+drm-amdgpu-fix-null-pointer-dereference-error-in-amd.patch
+lib-cpu_rmap-fix-potential-use-after-free-in-irq_cpu.patch
+net-bcmgenet-fix-eee-implementation.patch
+bnxt_en-don-t-issue-ap-reset-during-ethtool-s-reset-.patch
+bnxt_en-query-default-vlan-before-vnic-setup-on-a-vf.patch
+bnxt_en-skip-firmware-fatal-error-recovery-if-chip-i.patch
+bnxt_en-prevent-kernel-panic-when-receiving-unexpect.patch
+bnxt_en-implement-.set_port-.unset_port-udp-tunnel-c.patch
diff --git a/queue-6.1/spi-mt65xx-make-sure-operations-completed-before-unl.patch b/queue-6.1/spi-mt65xx-make-sure-operations-completed-before-unl.patch
new file mode 100644 (file)
index 0000000..eda89f1
--- /dev/null
@@ -0,0 +1,77 @@
+From fc80bbd3b072879e41ebc66f472e825cc29e0459 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 May 2023 19:33:14 +0100
+Subject: spi: mt65xx: make sure operations completed before unloading
+
+From: Daniel Golle <daniel@makrotopia.org>
+
+[ Upstream commit 4be47a5d59cbc9396a6ffd327913eb4c8d67a32f ]
+
+When unloading the spi-mt65xx kernel module during an ongoing spi-mem
+operation the kernel will Oops shortly after unloading the module.
+This is because wait_for_completion_timeout was still running and
+returning into the no longer loaded module:
+
+Internal error: Oops: 0000000096000005 [#1] SMP
+Modules linked in: [many, but spi-mt65xx is no longer there]
+CPU: 0 PID: 2578 Comm: block Tainted: G        W  O       6.3.0-next-20230428+ #0
+Hardware name: Bananapi BPI-R3 (DT)
+pstate: 804000c5 (Nzcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : __lock_acquire+0x18c/0x20e8
+lr : __lock_acquire+0x9b8/0x20e8
+sp : ffffffc009ec3400
+x29: ffffffc009ec3400 x28: 0000000000000001 x27: 0000000000000004
+x26: ffffff80082888c8 x25: 0000000000000000 x24: 0000000000000000
+x23: ffffffc009609da8 x22: ffffff8008288000 x21: ffffff8008288968
+x20: 00000000000003c2 x19: ffffff8008be7990 x18: 00000000000002af
+x17: 0000000000000000 x16: 0000000000000000 x15: ffffffc008d78970
+x14: 000000000000080d x13: 00000000000002af x12: 00000000ffffffea
+x11: 00000000ffffefff x10: ffffffc008dd0970 x9 : ffffffc008d78918
+x8 : 0000000000017fe8 x7 : 0000000000000001 x6 : 0000000000000000
+x5 : ffffff807fb53910 x4 : 0000000000000000 x3 : 0000000000000027
+x2 : 0000000000000027 x1 : 0000000000000000 x0 : 00000000000c03c2
+Call trace:
+ __lock_acquire+0x18c/0x20e8
+ lock_acquire+0x100/0x2a4
+ _raw_spin_lock_irq+0x58/0x74
+ __wait_for_common+0xe0/0x1b4
+ wait_for_completion_timeout+0x1c/0x24
+ 0xffffffc000acc8a4 <--- used to be mtk_spi_transfer_wait
+ spi_mem_exec_op+0x390/0x3ec
+ spi_mem_no_dirmap_read+0x6c/0x88
+ spi_mem_dirmap_read+0xcc/0x12c
+ spinand_read_page+0xf8/0x1dc
+ spinand_mtd_read+0x1b4/0x2fc
+ mtd_read_oob_std+0x58/0x7c
+ mtd_read_oob+0x8c/0x148
+ mtd_read+0x50/0x6c
+ ...
+
+Prevent this by completing in mtk_spi_remove if needed.
+
+Fixes: 9f763fd20da7 ("spi: mediatek: add spi memory support for ipm design")
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+Link: https://lore.kernel.org/r/ZFAF6pJxMu1z6k4w@makrotopia.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-mt65xx.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
+index 9eab6c20dbc56..6e95efb50acbc 100644
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -1275,6 +1275,9 @@ static int mtk_spi_remove(struct platform_device *pdev)
+       struct mtk_spi *mdata = spi_master_get_devdata(master);
+       int ret;
++      if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
++              complete(&mdata->spimem_done);
++
+       ret = pm_runtime_resume_and_get(&pdev->dev);
+       if (ret < 0)
+               return ret;
+-- 
+2.39.2
+
diff --git a/queue-6.1/spi-qup-request-dma-before-enabling-clocks.patch b/queue-6.1/spi-qup-request-dma-before-enabling-clocks.patch
new file mode 100644 (file)
index 0000000..ea90115
--- /dev/null
@@ -0,0 +1,122 @@
+From 74d75789c1ddf36e4dd0e5e4f7d9ee1e7fbe7401 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 May 2023 15:04:25 +0200
+Subject: spi: qup: Request DMA before enabling clocks
+
+From: Stephan Gerhold <stephan@gerhold.net>
+
+[ Upstream commit 0c331fd1dccfba657129380ee084b95c1cedfbef ]
+
+It is usually better to request all necessary resources (clocks,
+regulators, ...) before starting to make use of them. That way they do
+not change state in case one of the resources is not available yet and
+probe deferral (-EPROBE_DEFER) is necessary. This is particularly
+important for DMA channels and IOMMUs which are not enforced by
+fw_devlink yet (unless you use fw_devlink.strict=1).
+
+spi-qup does this in the wrong order, the clocks are enabled and
+disabled again when the DMA channels are not available yet.
+
+This causes issues in some cases: On most SoCs one of the SPI QUP
+clocks is shared with the UART controller. When using earlycon UART is
+actively used during boot but might not have probed yet, usually for
+the same reason (waiting for the DMA controller). In this case, the
+brief enable/disable cycle ends up gating the clock and further UART
+console output will halt the system completely.
+
+Avoid this by requesting the DMA channels before changing the clock
+state.
+
+Fixes: 612762e82ae6 ("spi: qup: Add DMA capabilities")
+Signed-off-by: Stephan Gerhold <stephan@gerhold.net>
+Link: https://lore.kernel.org/r/20230518-spi-qup-clk-defer-v1-1-f49fc9ca4e02@gerhold.net
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-qup.c | 37 ++++++++++++++++++-------------------
+ 1 file changed, 18 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
+index 205e54f157b4a..fb6b7738b4f55 100644
+--- a/drivers/spi/spi-qup.c
++++ b/drivers/spi/spi-qup.c
+@@ -1029,23 +1029,8 @@ static int spi_qup_probe(struct platform_device *pdev)
+               return -ENXIO;
+       }
+-      ret = clk_prepare_enable(cclk);
+-      if (ret) {
+-              dev_err(dev, "cannot enable core clock\n");
+-              return ret;
+-      }
+-
+-      ret = clk_prepare_enable(iclk);
+-      if (ret) {
+-              clk_disable_unprepare(cclk);
+-              dev_err(dev, "cannot enable iface clock\n");
+-              return ret;
+-      }
+-
+       master = spi_alloc_master(dev, sizeof(struct spi_qup));
+       if (!master) {
+-              clk_disable_unprepare(cclk);
+-              clk_disable_unprepare(iclk);
+               dev_err(dev, "cannot allocate master\n");
+               return -ENOMEM;
+       }
+@@ -1093,6 +1078,19 @@ static int spi_qup_probe(struct platform_device *pdev)
+       spin_lock_init(&controller->lock);
+       init_completion(&controller->done);
++      ret = clk_prepare_enable(cclk);
++      if (ret) {
++              dev_err(dev, "cannot enable core clock\n");
++              goto error_dma;
++      }
++
++      ret = clk_prepare_enable(iclk);
++      if (ret) {
++              clk_disable_unprepare(cclk);
++              dev_err(dev, "cannot enable iface clock\n");
++              goto error_dma;
++      }
++
+       iomode = readl_relaxed(base + QUP_IO_M_MODES);
+       size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
+@@ -1122,7 +1120,7 @@ static int spi_qup_probe(struct platform_device *pdev)
+       ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+       if (ret) {
+               dev_err(dev, "cannot set RESET state\n");
+-              goto error_dma;
++              goto error_clk;
+       }
+       writel_relaxed(0, base + QUP_OPERATIONAL);
+@@ -1146,7 +1144,7 @@ static int spi_qup_probe(struct platform_device *pdev)
+       ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
+                              IRQF_TRIGGER_HIGH, pdev->name, controller);
+       if (ret)
+-              goto error_dma;
++              goto error_clk;
+       pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+       pm_runtime_use_autosuspend(dev);
+@@ -1161,11 +1159,12 @@ static int spi_qup_probe(struct platform_device *pdev)
+ disable_pm:
+       pm_runtime_disable(&pdev->dev);
++error_clk:
++      clk_disable_unprepare(cclk);
++      clk_disable_unprepare(iclk);
+ error_dma:
+       spi_qup_release_dma(master);
+ error:
+-      clk_disable_unprepare(cclk);
+-      clk_disable_unprepare(iclk);
+       spi_master_put(master);
+       return ret;
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.1/tcp-gso-really-support-big-tcp.patch b/queue-6.1/tcp-gso-really-support-big-tcp.patch
new file mode 100644 (file)
index 0000000..bec4479
--- /dev/null
@@ -0,0 +1,100 @@
+From 5a3d817d3f3146b47ac36e93f60475d920e7b436 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 16:16:47 +0000
+Subject: tcp: gso: really support BIG TCP
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 82a01ab35bd02ba4b0b4e12bc95c5b69240eb7b0 ]
+
+We missed that tcp_gso_segment() was assuming skb->len was smaller than 65535 :
+
+oldlen = (u16)~skb->len;
+
+This part came with commit 0718bcc09b35 ("[NET]: Fix CHECKSUM_HW GSO problems.")
+
+This leads to wrong TCP checksum.
+
+Adapt the code to accept arbitrary packet length.
+
+v2:
+  - use two csum_add() instead of csum_fold() (Alexander Duyck)
+  - Change delta type to __wsum to reduce casts (Alexander Duyck)
+
+Fixes: 09f3d1a3a52c ("ipv6/gso: remove temporary HBH/jumbo header")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20230605161647.3624428-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_offload.c | 19 +++++++++----------
+ 1 file changed, 9 insertions(+), 10 deletions(-)
+
+diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
+index 45dda78893870..4851211aa60d6 100644
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -60,12 +60,12 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+       struct tcphdr *th;
+       unsigned int thlen;
+       unsigned int seq;
+-      __be32 delta;
+       unsigned int oldlen;
+       unsigned int mss;
+       struct sk_buff *gso_skb = skb;
+       __sum16 newcheck;
+       bool ooo_okay, copy_destructor;
++      __wsum delta;
+       th = tcp_hdr(skb);
+       thlen = th->doff * 4;
+@@ -75,7 +75,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+       if (!pskb_may_pull(skb, thlen))
+               goto out;
+-      oldlen = (u16)~skb->len;
++      oldlen = ~skb->len;
+       __skb_pull(skb, thlen);
+       mss = skb_shinfo(skb)->gso_size;
+@@ -110,7 +110,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+       if (skb_is_gso(segs))
+               mss *= skb_shinfo(segs)->gso_segs;
+-      delta = htonl(oldlen + (thlen + mss));
++      delta = (__force __wsum)htonl(oldlen + thlen + mss);
+       skb = segs;
+       th = tcp_hdr(skb);
+@@ -119,8 +119,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+       if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
+               tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
+-      newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
+-                                             (__force u32)delta));
++      newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
+       while (skb->next) {
+               th->fin = th->psh = 0;
+@@ -165,11 +164,11 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+                       WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
+       }
+-      delta = htonl(oldlen + (skb_tail_pointer(skb) -
+-                              skb_transport_header(skb)) +
+-                    skb->data_len);
+-      th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
+-                              (__force u32)delta));
++      delta = (__force __wsum)htonl(oldlen +
++                                    (skb_tail_pointer(skb) -
++                                     skb_transport_header(skb)) +
++                                    skb->data_len);
++      th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               gso_reset_checksum(skb, ~th->check);
+       else
+-- 
+2.39.2
+
diff --git a/queue-6.1/wifi-cfg80211-fix-locking-in-regulatory-disconnect.patch b/queue-6.1/wifi-cfg80211-fix-locking-in-regulatory-disconnect.patch
new file mode 100644 (file)
index 0000000..d6233e0
--- /dev/null
@@ -0,0 +1,41 @@
+From 419fd79aac2c2cc301761cfb9cf178fc20072432 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 14:34:48 +0200
+Subject: wifi: cfg80211: fix locking in regulatory disconnect
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit f7e60032c6618dfd643c7210d5cba2789e2de2e2 ]
+
+This should use wiphy_lock() now instead of requiring the
+RTNL, since __cfg80211_leave() via cfg80211_leave() is now
+requiring that lock to be held.
+
+Fixes: a05829a7222e ("cfg80211: avoid holding the RTNL when calling the driver")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/reg.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 4f3f31244e8ba..50b16e643f381 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -2478,11 +2478,11 @@ static void reg_leave_invalid_chans(struct wiphy *wiphy)
+       struct wireless_dev *wdev;
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+-      ASSERT_RTNL();
+-
++      wiphy_lock(wiphy);
+       list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
+               if (!reg_wdev_chan_valid(wiphy, wdev))
+                       cfg80211_leave(rdev, wdev);
++      wiphy_unlock(wiphy);
+ }
+ static void reg_check_chans_work(struct work_struct *work)
+-- 
+2.39.2
+
diff --git a/queue-6.1/wifi-cfg80211-fix-locking-in-sched-scan-stop-work.patch b/queue-6.1/wifi-cfg80211-fix-locking-in-sched-scan-stop-work.patch
new file mode 100644 (file)
index 0000000..04bd644
--- /dev/null
@@ -0,0 +1,41 @@
+From dd36eca83570f8675904f386c4c5c484b5b8a4a6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 14:34:47 +0200
+Subject: wifi: cfg80211: fix locking in sched scan stop work
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 3e54ed8247c94c8bdf370bd872bd9dfe72b1b12b ]
+
+This should use wiphy_lock() now instead of acquiring the
+RTNL, since cfg80211_stop_sched_scan_req() now needs that.
+
+Fixes: a05829a7222e ("cfg80211: avoid holding the RTNL when calling the driver")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 5b0c4d5b80cf5..b3ec9eaec36b3 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -368,12 +368,12 @@ static void cfg80211_sched_scan_stop_wk(struct work_struct *work)
+       rdev = container_of(work, struct cfg80211_registered_device,
+                          sched_scan_stop_wk);
+-      rtnl_lock();
++      wiphy_lock(&rdev->wiphy);
+       list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) {
+               if (req->nl_owner_dead)
+                       cfg80211_stop_sched_scan_req(rdev, req, false);
+       }
+-      rtnl_unlock();
++      wiphy_unlock(&rdev->wiphy);
+ }
+ static void cfg80211_propagate_radar_detect_wk(struct work_struct *work)
+-- 
+2.39.2
+
diff --git a/queue-6.1/wifi-cfg80211-reject-bad-ap-mld-address.patch b/queue-6.1/wifi-cfg80211-reject-bad-ap-mld-address.patch
new file mode 100644 (file)
index 0000000..a1b03d5
--- /dev/null
@@ -0,0 +1,39 @@
+From d3b451cbd18809fecb1138a9e2beb744495a9684 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 4 Jun 2023 12:11:18 +0300
+Subject: wifi: cfg80211: reject bad AP MLD address
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 727073ca5e55ab6a07df316250be8a12606e8677 ]
+
+When trying to authenticate, if the AP MLD address isn't
+a valid address, mac80211 can throw a warning. Avoid that
+by rejecting such addresses.
+
+Fixes: d648c23024bd ("wifi: nl80211: support MLO in auth/assoc")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
+Link: https://lore.kernel.org/r/20230604120651.89188912bd1d.I8dbc6c8ee0cb766138803eec59508ef4ce477709@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/nl80211.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 7320d676ce3a5..087c0c442e231 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -10542,6 +10542,8 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
+               if (!info->attrs[NL80211_ATTR_MLD_ADDR])
+                       return -EINVAL;
+               req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
++              if (!is_valid_ether_addr(req.ap_mld_addr))
++                      return -EINVAL;
+       }
+       req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
+-- 
+2.39.2
+
diff --git a/queue-6.1/wifi-mac80211-don-t-translate-beacon-presp-addrs.patch b/queue-6.1/wifi-mac80211-don-t-translate-beacon-presp-addrs.patch
new file mode 100644 (file)
index 0000000..b193120
--- /dev/null
@@ -0,0 +1,44 @@
+From 9c09a1d135cacd74726b6b7349b0137b64c9c512 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 4 Jun 2023 12:11:15 +0300
+Subject: wifi: mac80211: don't translate beacon/presp addrs
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 47c171a426e305f2225b92ed7b5e0a990c95f6d4 ]
+
+Don't do link address translation for beacons and probe responses,
+this leads to reporting multiple scan list entries for the same AP
+(one with the MLD address) which just breaks things.
+
+We might need to extend this in the future for some other (action)
+frames that aren't MLD addressed.
+
+Fixes: 42fb9148c078 ("wifi: mac80211: do link->MLD address translation on RX")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
+Link: https://lore.kernel.org/r/20230604120651.62adead1b43a.Ifc25eed26ebf3b269f60b1ec10060156d0e7ec0d@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/rx.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 44e407e1a14c7..0f81492da0b46 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4857,7 +4857,9 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
+       }
+       if (unlikely(rx->sta && rx->sta->sta.mlo) &&
+-          is_unicast_ether_addr(hdr->addr1)) {
++          is_unicast_ether_addr(hdr->addr1) &&
++          !ieee80211_is_probe_resp(hdr->frame_control) &&
++          !ieee80211_is_beacon(hdr->frame_control)) {
+               /* translate to MLD addresses */
+               if (ether_addr_equal(link->conf->addr, hdr->addr1))
+                       ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
+-- 
+2.39.2
+
diff --git a/queue-6.1/wifi-mac80211-mlme-fix-non-inheritence-element.patch b/queue-6.1/wifi-mac80211-mlme-fix-non-inheritence-element.patch
new file mode 100644 (file)
index 0000000..7fd91ac
--- /dev/null
@@ -0,0 +1,72 @@
+From ebee060550bb9a7cd532663e4277da89f6479631 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 4 Jun 2023 12:11:16 +0300
+Subject: wifi: mac80211: mlme: fix non-inheritence element
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 68c228557d52616cf040651abefda9839de7086a ]
+
+There were two bugs when creating the non-inheritence
+element:
+ 1) 'at_extension' needs to be declared outside the loop,
+    otherwise the value resets every iteration and we
+    can never really switch properly
+ 2) 'added' never got set to true, so we always cut off
+    the extension element again at the end of the function
+
+This shows another issue that we might add a list but no
+extension list, but we need to make the extension list a
+zero-length one in that case.
+
+Fix all these issues. While at it, add a comment explaining
+the trim.
+
+Fixes: 81151ce462e5 ("wifi: mac80211: support MLO authentication/association with one link")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
+Link: https://lore.kernel.org/r/20230604120651.3addaa5c4782.If3a78f9305997ad7ef4ba7ffc17a8234c956f613@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/mlme.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 0125b3e6175b7..dc9e7eb7dd857 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1143,6 +1143,7 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
+                                              const u16 *inner)
+ {
+       unsigned int skb_len = skb->len;
++      bool at_extension = false;
+       bool added = false;
+       int i, j;
+       u8 *len, *list_len = NULL;
+@@ -1154,7 +1155,6 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
+       for (i = 0; i < PRESENT_ELEMS_MAX && outer[i]; i++) {
+               u16 elem = outer[i];
+               bool have_inner = false;
+-              bool at_extension = false;
+               /* should at least be sorted in the sense of normal -> ext */
+               WARN_ON(at_extension && elem < PRESENT_ELEM_EXT_OFFS);
+@@ -1183,8 +1183,14 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
+               }
+               *list_len += 1;
+               skb_put_u8(skb, (u8)elem);
++              added = true;
+       }
++      /* if we added a list but no extension list, make a zero-len one */
++      if (added && (!at_extension || !list_len))
++              skb_put_u8(skb, 0);
++
++      /* if nothing added remove extension element completely */
+       if (!added)
+               skb_trim(skb, skb_len);
+       else
+-- 
+2.39.2
+
diff --git a/queue-6.1/wifi-mac80211-use-correct-iftype-he-cap.patch b/queue-6.1/wifi-mac80211-use-correct-iftype-he-cap.patch
new file mode 100644 (file)
index 0000000..6fc34ed
--- /dev/null
@@ -0,0 +1,68 @@
+From 803e378b92ab068c5fa94ae224bf3641fea45665 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 4 Jun 2023 12:11:23 +0300
+Subject: wifi: mac80211: use correct iftype HE cap
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit c37ab22bb1a43cdca8bf69cc0a22f1ccfc449e68 ]
+
+We already check that the right iftype capa exists,
+but then don't use it. Assign it to a variable so we
+can actually use it, and then do that.
+
+Fixes: bac2fd3d7534 ("mac80211: remove use of ieee80211_get_he_sta_cap()")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
+Link: https://lore.kernel.org/r/20230604120651.0e908e5c5fdd.Iac142549a6144ac949ebd116b921a59ae5282735@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/he.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/net/mac80211/he.c b/net/mac80211/he.c
+index 729f261520c77..0322abae08250 100644
+--- a/net/mac80211/he.c
++++ b/net/mac80211/he.c
+@@ -3,7 +3,7 @@
+  * HE handling
+  *
+  * Copyright(c) 2017 Intel Deutschland GmbH
+- * Copyright(c) 2019 - 2022 Intel Corporation
++ * Copyright(c) 2019 - 2023 Intel Corporation
+  */
+ #include "ieee80211_i.h"
+@@ -114,6 +114,7 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
+                                 struct link_sta_info *link_sta)
+ {
+       struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap;
++      const struct ieee80211_sta_he_cap *own_he_cap_ptr;
+       struct ieee80211_sta_he_cap own_he_cap;
+       struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
+       u8 he_ppe_size;
+@@ -123,12 +124,16 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
+       memset(he_cap, 0, sizeof(*he_cap));
+-      if (!he_cap_ie ||
+-          !ieee80211_get_he_iftype_cap(sband,
+-                                       ieee80211_vif_type_p2p(&sdata->vif)))
++      if (!he_cap_ie)
+               return;
+-      own_he_cap = sband->iftype_data->he_cap;
++      own_he_cap_ptr =
++              ieee80211_get_he_iftype_cap(sband,
++                                          ieee80211_vif_type_p2p(&sdata->vif));
++      if (!own_he_cap_ptr)
++              return;
++
++      own_he_cap = *own_he_cap_ptr;
+       /* Make sure size is OK */
+       mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem);
+-- 
+2.39.2
+
diff --git a/queue-6.1/wifi-mt76-mt7615-fix-possible-race-in-mt7615_mac_sta.patch b/queue-6.1/wifi-mt76-mt7615-fix-possible-race-in-mt7615_mac_sta.patch
new file mode 100644 (file)
index 0000000..699bc3c
--- /dev/null
@@ -0,0 +1,40 @@
+From 7de6a8f8251a2a446dee88a92cd79d3deca937bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 May 2023 16:39:32 +0200
+Subject: wifi: mt76: mt7615: fix possible race in mt7615_mac_sta_poll
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 30bc32c7c1f975cc3c14e1c7dc437266311282cf ]
+
+Grab sta_poll_lock spinlock in mt7615_mac_sta_poll routine in order to
+avoid possible races with mt7615_mac_add_txs() or mt7615_mac_fill_rx()
+removing msta pointer from sta_poll_list.
+
+Fixes: a621372a04ac ("mt76: mt7615: rework mt7615_mac_sta_poll for usb code")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/48b23404b759de4f1db2ef85975c72a4aeb1097c.1684938695.git.lorenzo@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7615/mac.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index faed43b11ec93..40c80d09d108a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -921,7 +921,10 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
+               msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
+                                       poll_list);
++
++              spin_lock_bh(&dev->sta_poll_lock);
+               list_del_init(&msta->poll_list);
++              spin_unlock_bh(&dev->sta_poll_lock);
+               addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
+-- 
+2.39.2
+