--- /dev/null
+From e33e2e35689271f1b476a7002c3e5837fcfffc20 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 09:47:13 +0100
+Subject: afs: Fix setting of mtime when creating a file/dir/symlink
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit a27648c742104a833a01c54becc24429898d85bf ]
+
+kafs incorrectly passes a zero mtime (ie. 1st Jan 1970) to the server when
+creating a file, dir or symlink because the mtime recorded in the
+afs_operation struct gets passed to the server by the marshalling routines,
+but the afs_mkdir(), afs_create() and afs_symlink() functions don't set it.
+
+This gets masked if a file or directory is subsequently modified.
+
+Fix this by filling in op->mtime before calling the create op.
+
+Fixes: e49c7b2f6de7 ("afs: Build an abstraction around an "operation" concept")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Jeffrey Altman <jaltman@auristor.com>
+Reviewed-by: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/dir.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 159795059547f..a59d6293a32b2 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -1313,6 +1313,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ op->dentry = dentry;
+ op->create.mode = S_IFDIR | mode;
+ op->create.reason = afs_edit_dir_for_mkdir;
++ op->mtime = current_time(dir);
+ op->ops = &afs_mkdir_operation;
+ return afs_do_sync_operation(op);
+ }
+@@ -1616,6 +1617,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ op->dentry = dentry;
+ op->create.mode = S_IFREG | mode;
+ op->create.reason = afs_edit_dir_for_create;
++ op->mtime = current_time(dir);
+ op->ops = &afs_create_operation;
+ return afs_do_sync_operation(op);
+
+@@ -1745,6 +1747,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
+ op->ops = &afs_symlink_operation;
+ op->create.reason = afs_edit_dir_for_symlink;
+ op->create.symlink = content;
++ op->mtime = current_time(dir);
+ return afs_do_sync_operation(op);
+
+ error:
+--
+2.39.2
+
--- /dev/null
+From 1bac6b2db101330c791937f720fa7117d5627d35 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 May 2023 03:44:56 +0000
+Subject: Bluetooth: Fix l2cap_disconnect_req deadlock
+
+From: Ying Hsu <yinghsu@chromium.org>
+
+[ Upstream commit 02c5ea5246a44d6ffde0fddebfc1d56188052976 ]
+
+L2CAP assumes that the locks conn->chan_lock and chan->lock are
+acquired in the order conn->chan_lock, chan->lock to avoid
+potential deadlock.
+For example, l2sock_shutdown acquires these locks in the order:
+ mutex_lock(&conn->chan_lock)
+ l2cap_chan_lock(chan)
+
+However, l2cap_disconnect_req acquires chan->lock in
+l2cap_get_chan_by_scid first and then acquires conn->chan_lock
+before calling l2cap_chan_del. This means that these locks are
+acquired in unexpected order, which leads to potential deadlock:
+ l2cap_chan_lock(c)
+ mutex_lock(&conn->chan_lock)
+
+This patch releases chan->lock before acquiring the conn_chan_lock
+to avoid the potential deadlock.
+
+Fixes: a2a9339e1c9d ("Bluetooth: L2CAP: Fix use-after-free in l2cap_disconnect_{req,rsp}")
+Signed-off-by: Ying Hsu <yinghsu@chromium.org>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/l2cap_core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index b85ce276e2a3c..2f05507bb36ef 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4659,7 +4659,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+
+ chan->ops->set_shutdown(chan);
+
++ l2cap_chan_unlock(chan);
+ mutex_lock(&conn->chan_lock);
++ l2cap_chan_lock(chan);
+ l2cap_chan_del(chan, ECONNRESET);
+ mutex_unlock(&conn->chan_lock);
+
+@@ -4698,7 +4700,9 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+ return 0;
+ }
+
++ l2cap_chan_unlock(chan);
+ mutex_lock(&conn->chan_lock);
++ l2cap_chan_lock(chan);
+ l2cap_chan_del(chan, 0);
+ mutex_unlock(&conn->chan_lock);
+
+--
+2.39.2
+
--- /dev/null
+From d3220bbe404dcec44c700970e9856b09a0540c90 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 3 Jun 2023 08:28:09 -0400
+Subject: Bluetooth: L2CAP: Add missing checks for invalid DCID
+
+From: Sungwoo Kim <iam@sung-woo.kim>
+
+[ Upstream commit 75767213f3d9b97f63694d02260b6a49a2271876 ]
+
+When receiving a connect response we should make sure that the DCID is
+within the valid range and that we don't already have another channel
+allocated for the same DCID.
+Missing checks may violate the specification (BLUETOOTH CORE SPECIFICATION
+Version 5.4 | Vol 3, Part A, Page 1046).
+
+Fixes: 40624183c202 ("Bluetooth: L2CAP: Add missing checks for invalid LE DCID")
+Signed-off-by: Sungwoo Kim <iam@sung-woo.kim>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/l2cap_core.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 2f05507bb36ef..568f0f072b3df 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4303,6 +4303,10 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ result = __le16_to_cpu(rsp->result);
+ status = __le16_to_cpu(rsp->status);
+
++ if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
++ dcid > L2CAP_CID_DYN_END))
++ return -EPROTO;
++
+ BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
+ dcid, scid, result, status);
+
+@@ -4334,6 +4338,11 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+
+ switch (result) {
+ case L2CAP_CR_SUCCESS:
++ if (__l2cap_get_chan_by_dcid(conn, dcid)) {
++ err = -EBADSLT;
++ break;
++ }
++
+ l2cap_state_change(chan, BT_CONFIG);
+ chan->ident = 0;
+ chan->dcid = dcid;
+--
+2.39.2
+
--- /dev/null
+From 8499b22bd3bfd8d81070b2bad208f132221fcd74 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 00:54:05 -0700
+Subject: bnxt_en: Don't issue AP reset during ethtool's reset operation
+
+From: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+
+[ Upstream commit 1d997801c7cc6a7f542e46d5a6bf16f893ad3fe9 ]
+
+Only older NIC controller's firmware uses the PROC AP reset type.
+Firmware on 5731X/5741X and newer chips does not support this reset
+type. When bnxt_reset() issues a series of resets, this PROC AP
+reset may actually fail on these newer chips because the firmware
+is not ready to accept this unsupported command yet. Avoid this
+unnecessary error by skipping this reset type on chips that don't
+support it.
+
+Fixes: 7a13240e3718 ("bnxt_en: fix ethtool_reset_flags ABI violations")
+Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 1e67e86fc3344..2984234df67eb 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -3440,7 +3440,7 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
+ }
+ }
+
+- if (req & BNXT_FW_RESET_AP) {
++ if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
+ /* This feature is not supported in older firmware versions */
+ if (bp->hwrm_spec_code >= 0x10803) {
+ if (!bnxt_firmware_reset_ap(dev)) {
+--
+2.39.2
+
--- /dev/null
+From f9531054ffba884c724ab46d4ac437f7d4289c34 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 00:54:09 -0700
+Subject: bnxt_en: Implement .set_port / .unset_port UDP tunnel callbacks
+
+From: Somnath Kotur <somnath.kotur@broadcom.com>
+
+[ Upstream commit 1eb4ef12591348c440ac9d6efcf7521e73cf2b10 ]
+
+As per the new udp tunnel framework, drivers which need to know the
+details of a port entry (i.e. port type) when it gets deleted should
+use the .set_port / .unset_port callbacks.
+
+Implementing the current .udp_tunnel_sync callback would mean that the
+deleted tunnel port entry would be all zeros. This used to work on
+older firmware because it would not check the input when deleting a
+tunnel port. With newer firmware, the delete will now fail and
+subsequent tunnel port allocation will fail as a result.
+
+Fixes: 442a35a5a7aa ("bnxt: convert to new udp_tunnel_nic infra")
+Reviewed-by: Kalesh Anakkur Purayil <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 25 ++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 127ed119b5f71..d8366351cf14a 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -12104,26 +12104,37 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+
+ #endif /* CONFIG_RFS_ACCEL */
+
+-static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
++static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
++ unsigned int entry, struct udp_tunnel_info *ti)
+ {
+ struct bnxt *bp = netdev_priv(netdev);
+- struct udp_tunnel_info ti;
+ unsigned int cmd;
+
+- udp_tunnel_nic_get_port(netdev, table, 0, &ti);
+- if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
++ if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
+ else
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
+
+- if (ti.port)
+- return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
++ return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
++}
++
++static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
++ unsigned int entry, struct udp_tunnel_info *ti)
++{
++ struct bnxt *bp = netdev_priv(netdev);
++ unsigned int cmd;
++
++ if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
++ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
++ else
++ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
+
+ return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
+ }
+
+ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
+- .sync_table = bnxt_udp_tunnel_sync,
++ .set_port = bnxt_udp_tunnel_set_port,
++ .unset_port = bnxt_udp_tunnel_unset_port,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+--
+2.39.2
+
--- /dev/null
+From 209900de23a03609c7061e562546143cad442c84 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 00:54:06 -0700
+Subject: bnxt_en: Query default VLAN before VNIC setup on a VF
+
+From: Somnath Kotur <somnath.kotur@broadcom.com>
+
+[ Upstream commit 1a9e4f501bc6ff1b6ecb60df54fbf2b54db43bfe ]
+
+We need to call bnxt_hwrm_func_qcfg() on a VF to query the default
+VLAN that may be setup by the PF. If a default VLAN is enabled,
+the VF cannot support VLAN acceleration on the receive side and
+the VNIC must be setup to strip out the default VLAN tag. If a
+default VLAN is not enabled, the VF can support VLAN acceleration
+on the receive side. The VNIC should be set up to strip or not
+strip the VLAN based on the RX VLAN acceleration setting.
+
+Without this call to determine the default VLAN before calling
+bnxt_setup_vnic(), the VNIC may not be set up correctly. For
+example, bnxt_setup_vnic() may set up to strip the VLAN tag based
+on stale default VLAN information. If RX VLAN acceleration is
+not enabled, the VLAN tag will be incorrectly stripped and the
+RX data path will not work correctly.
+
+Fixes: cf6645f8ebc6 ("bnxt_en: Add function for VF driver to query default VLAN.")
+Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 3a9fcf942a6de..127ed119b5f71 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -8337,6 +8337,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
+ goto err_out;
+ }
+
++ if (BNXT_VF(bp))
++ bnxt_hwrm_func_qcfg(bp);
++
+ rc = bnxt_setup_vnic(bp, 0);
+ if (rc)
+ goto err_out;
+--
+2.39.2
+
--- /dev/null
+From c18b82d7f4a7268924ddb9deb89f4bd8d7778dd4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 11:17:14 -0700
+Subject: bpf: Add extra path pointer check to d_path helper
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+[ Upstream commit f46fab0e36e611a2389d3843f34658c849b6bd60 ]
+
+Anastasios reported crash on stable 5.15 kernel with following
+BPF attached to lsm hook:
+
+ SEC("lsm.s/bprm_creds_for_exec")
+ int BPF_PROG(bprm_creds_for_exec, struct linux_binprm *bprm)
+ {
+ struct path *path = &bprm->executable->f_path;
+ char p[128] = { 0 };
+
+ bpf_d_path(path, p, 128);
+ return 0;
+ }
+
+But bprm->executable can be NULL, so bpf_d_path call will crash:
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000018
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 0 P4D 0
+ Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC NOPTI
+ ...
+ RIP: 0010:d_path+0x22/0x280
+ ...
+ Call Trace:
+ <TASK>
+ bpf_d_path+0x21/0x60
+ bpf_prog_db9cf176e84498d9_bprm_creds_for_exec+0x94/0x99
+ bpf_trampoline_6442506293_0+0x55/0x1000
+ bpf_lsm_bprm_creds_for_exec+0x5/0x10
+ security_bprm_creds_for_exec+0x29/0x40
+ bprm_execve+0x1c1/0x900
+ do_execveat_common.isra.0+0x1af/0x260
+ __x64_sys_execve+0x32/0x40
+
+It's problem for all stable trees with bpf_d_path helper, which was
+added in 5.9.
+
+This issue is fixed in current bpf code, where we identify and mark
+trusted pointers, so the above code would fail even to load.
+
+For the sake of the stable trees and to workaround potentially broken
+verifier in the future, adding the code that reads the path object from
+the passed pointer and verifies it's valid in kernel space.
+
+Fixes: 6e22ab9da793 ("bpf: Add d_path helper")
+Reported-by: Anastasios Papagiannis <tasos.papagiannnis@gmail.com>
+Suggested-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Stanislav Fomichev <sdf@google.com>
+Acked-by: Yonghong Song <yhs@fb.com>
+Link: https://lore.kernel.org/bpf/20230606181714.532998-1-jolsa@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/bpf_trace.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 94e51d36fb497..9e90d1e7af2c8 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -1128,13 +1128,23 @@ static const struct bpf_func_proto bpf_send_signal_thread_proto = {
+
+ BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
+ {
++ struct path copy;
+ long len;
+ char *p;
+
+ if (!sz)
+ return 0;
+
+- p = d_path(path, buf, sz);
++ /*
++ * The path pointer is verified as trusted and safe to use,
++ * but let's double check it's valid anyway to workaround
++ * potentially broken verifier.
++ */
++ len = copy_from_kernel_nofault(©, path, sizeof(*path));
++ if (len < 0)
++ return len;
++
++ p = d_path(©, buf, sz);
+ if (IS_ERR(p)) {
+ len = PTR_ERR(p);
+ } else {
+--
+2.39.2
+
--- /dev/null
+From c486ab34c0e1c90a6b1c21bd8bac367b394eac6b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 11:06:17 -0700
+Subject: ipv6: rpl: Fix Route of Death.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit a2f4c143d76b1a47c91ef9bc46907116b111da0b ]
+
+A remote DoS vulnerability of RPL Source Routing is assigned CVE-2023-2156.
+
+The Source Routing Header (SRH) has the following format:
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Next Header | Hdr Ext Len | Routing Type | Segments Left |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | CmprI | CmprE | Pad | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ . .
+ . Addresses[1..n] .
+ . .
+ | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+The originator of an SRH places the first hop's IPv6 address in the IPv6
+header's IPv6 Destination Address and the second hop's IPv6 address as
+the first address in Addresses[1..n].
+
+The CmprI and CmprE fields indicate the number of prefix octets that are
+shared with the IPv6 Destination Address. When CmprI or CmprE is not 0,
+Addresses[1..n] are compressed as follows:
+
+ 1..n-1 : (16 - CmprI) bytes
+ n : (16 - CmprE) bytes
+
+Segments Left indicates the number of route segments remaining. When the
+value is not zero, the SRH is forwarded to the next hop. Its address
+is extracted from Addresses[n - Segment Left + 1] and swapped with IPv6
+Destination Address.
+
+When Segment Left is greater than or equal to 2, the size of SRH is not
+changed because Addresses[1..n-1] are decompressed and recompressed with
+CmprI.
+
+OTOH, when Segment Left changes from 1 to 0, the new SRH could have a
+different size because Addresses[1..n-1] are decompressed with CmprI and
+recompressed with CmprE.
+
+Let's say CmprI is 15 and CmprE is 0. When we receive SRH with Segment
+Left >= 2, Addresses[1..n-1] have 1 byte for each, and Addresses[n] has
+16 bytes. When Segment Left is 1, Addresses[1..n-1] is decompressed to
+16 bytes and not recompressed. Finally, the new SRH will need more room
+in the header, and the size is (16 - 1) * (n - 1) bytes.
+
+Here the max value of n is 255 as Segment Left is u8, so in the worst case,
+we have to allocate 3825 bytes in the skb headroom. However, now we only
+allocate a small fixed buffer that is IPV6_RPL_SRH_WORST_SWAP_SIZE (16 + 7
+bytes). If the decompressed size overflows the room, skb_push() hits BUG()
+below [0].
+
+Instead of allocating the fixed buffer for every packet, let's allocate
+enough headroom only when we receive SRH with Segment Left 1.
+
+[0]:
+skbuff: skb_under_panic: text:ffffffff81c9f6e2 len:576 put:576 head:ffff8880070b5180 data:ffff8880070b4fb0 tail:0x70 end:0x140 dev:lo
+kernel BUG at net/core/skbuff.c:200!
+invalid opcode: 0000 [#1] PREEMPT SMP PTI
+CPU: 0 PID: 154 Comm: python3 Not tainted 6.4.0-rc4-00190-gc308e9ec0047 #7
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
+RIP: 0010:skb_panic (net/core/skbuff.c:200)
+Code: 4f 70 50 8b 87 bc 00 00 00 50 8b 87 b8 00 00 00 50 ff b7 c8 00 00 00 4c 8b 8f c0 00 00 00 48 c7 c7 80 6e 77 82 e8 ad 8b 60 ff <0f> 0b 66 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90 90 90
+RSP: 0018:ffffc90000003da0 EFLAGS: 00000246
+RAX: 0000000000000085 RBX: ffff8880058a6600 RCX: 0000000000000000
+RDX: 0000000000000000 RSI: ffff88807dc1c540 RDI: ffff88807dc1c540
+RBP: ffffc90000003e48 R08: ffffffff82b392c8 R09: 00000000ffffdfff
+R10: ffffffff82a592e0 R11: ffffffff82b092e0 R12: ffff888005b1c800
+R13: ffff8880070b51b8 R14: ffff888005b1ca18 R15: ffff8880070b5190
+FS: 00007f4539f0b740(0000) GS:ffff88807dc00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 000055670baf3000 CR3: 0000000005b0e000 CR4: 00000000007506f0
+PKRU: 55555554
+Call Trace:
+ <IRQ>
+ skb_push (net/core/skbuff.c:210)
+ ipv6_rthdr_rcv (./include/linux/skbuff.h:2880 net/ipv6/exthdrs.c:634 net/ipv6/exthdrs.c:718)
+ ip6_protocol_deliver_rcu (net/ipv6/ip6_input.c:437 (discriminator 5))
+ ip6_input_finish (./include/linux/rcupdate.h:805 net/ipv6/ip6_input.c:483)
+ __netif_receive_skb_one_core (net/core/dev.c:5494)
+ process_backlog (./include/linux/rcupdate.h:805 net/core/dev.c:5934)
+ __napi_poll (net/core/dev.c:6496)
+ net_rx_action (net/core/dev.c:6565 net/core/dev.c:6696)
+ __do_softirq (./arch/x86/include/asm/jump_label.h:27 ./include/linux/jump_label.h:207 ./include/trace/events/irq.h:142 kernel/softirq.c:572)
+ do_softirq (kernel/softirq.c:472 kernel/softirq.c:459)
+ </IRQ>
+ <TASK>
+ __local_bh_enable_ip (kernel/softirq.c:396)
+ __dev_queue_xmit (net/core/dev.c:4272)
+ ip6_finish_output2 (./include/net/neighbour.h:544 net/ipv6/ip6_output.c:134)
+ rawv6_sendmsg (./include/net/dst.h:458 ./include/linux/netfilter.h:303 net/ipv6/raw.c:656 net/ipv6/raw.c:914)
+ sock_sendmsg (net/socket.c:724 net/socket.c:747)
+ __sys_sendto (net/socket.c:2144)
+ __x64_sys_sendto (net/socket.c:2156 net/socket.c:2152 net/socket.c:2152)
+ do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80)
+ entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120)
+RIP: 0033:0x7f453a138aea
+Code: d8 64 89 02 48 c7 c0 ff ff ff ff eb b8 0f 1f 00 f3 0f 1e fa 41 89 ca 64 8b 04 25 18 00 00 00 85 c0 75 15 b8 2c 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 7e c3 0f 1f 44 00 00 41 54 48 83 ec 30 44 89
+RSP: 002b:00007ffcc212a1c8 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
+RAX: ffffffffffffffda RBX: 00007ffcc212a288 RCX: 00007f453a138aea
+RDX: 0000000000000060 RSI: 00007f4539084c20 RDI: 0000000000000003
+RBP: 00007f4538308e80 R08: 00007ffcc212a300 R09: 000000000000001c
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: ffffffffc4653600 R14: 0000000000000001 R15: 00007f4539712d1b
+ </TASK>
+Modules linked in:
+
+Fixes: 8610c7c6e3bd ("net: ipv6: add support for rpl sr exthdr")
+Reported-by: Max VA
+Closes: https://www.interruptlabs.co.uk/articles/linux-ipv6-route-of-death
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230605180617.67284-1-kuniyu@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/rpl.h | 3 ---
+ net/ipv6/exthdrs.c | 29 +++++++++++------------------
+ 2 files changed, 11 insertions(+), 21 deletions(-)
+
+diff --git a/include/net/rpl.h b/include/net/rpl.h
+index 308ef0a05caef..30fe780d1e7c8 100644
+--- a/include/net/rpl.h
++++ b/include/net/rpl.h
+@@ -23,9 +23,6 @@ static inline int rpl_init(void)
+ static inline void rpl_exit(void) {}
+ #endif
+
+-/* Worst decompression memory usage ipv6 address (16) + pad 7 */
+-#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7)
+-
+ size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
+ unsigned char cmpre);
+
+diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
+index 4932dea9820ba..cdad9019c77c4 100644
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -552,24 +552,6 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
+ return -1;
+ }
+
+- if (skb_cloned(skb)) {
+- if (pskb_expand_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE, 0,
+- GFP_ATOMIC)) {
+- __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+- IPSTATS_MIB_OUTDISCARDS);
+- kfree_skb(skb);
+- return -1;
+- }
+- } else {
+- err = skb_cow_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE);
+- if (unlikely(err)) {
+- kfree_skb(skb);
+- return -1;
+- }
+- }
+-
+- hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
+-
+ if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri,
+ hdr->cmpre))) {
+ kfree_skb(skb);
+@@ -615,6 +597,17 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
+ skb_pull(skb, ((hdr->hdrlen + 1) << 3));
+ skb_postpull_rcsum(skb, oldhdr,
+ sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3));
++ if (unlikely(!hdr->segments_left)) {
++ if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0,
++ GFP_ATOMIC)) {
++ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS);
++ kfree_skb(skb);
++ kfree(buf);
++ return -1;
++ }
++
++ oldhdr = ipv6_hdr(skb);
++ }
+ skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr));
+ skb_reset_network_header(skb);
+ skb_mac_header_rebuild(skb);
+--
+2.39.2
+
--- /dev/null
+From 21f498c0b982c19865431c05ff3d8611bd83da5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Jun 2023 20:28:15 +0200
+Subject: lib: cpu_rmap: Fix potential use-after-free in irq_cpu_rmap_release()
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+[ Upstream commit 7c5d4801ecf0564c860033d89726b99723c55146 ]
+
+irq_cpu_rmap_release() calls cpu_rmap_put(), which may free the rmap.
+So we need to clear the pointer to our glue structure in rmap before
+doing that, not after.
+
+Fixes: 4e0473f1060a ("lib: cpu_rmap: Avoid use after free on rmap->obj array entries")
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/ZHo0vwquhOy3FaXc@decadent.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/cpu_rmap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
+index e77f12bb3c774..1833ad73de6fc 100644
+--- a/lib/cpu_rmap.c
++++ b/lib/cpu_rmap.c
+@@ -268,8 +268,8 @@ static void irq_cpu_rmap_release(struct kref *ref)
+ struct irq_glue *glue =
+ container_of(ref, struct irq_glue, notify.kref);
+
+- cpu_rmap_put(glue->rmap);
+ glue->rmap->obj[glue->index] = NULL;
++ cpu_rmap_put(glue->rmap);
+ kfree(glue);
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 486d7a52ffd37b3c5f2a0438e219e5fa255044a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Jun 2023 09:54:32 +0800
+Subject: neighbour: fix unaligned access to pneigh_entry
+
+From: Qingfang DENG <qingfang.deng@siflower.com.cn>
+
+[ Upstream commit ed779fe4c9b5a20b4ab4fd6f3e19807445bb78c7 ]
+
+After the blamed commit, the member key is longer 4-byte aligned. On
+platforms that do not support unaligned access, e.g., MIPS32R2 with
+unaligned_action set to 1, this will trigger a crash when accessing
+an IPv6 pneigh_entry, as the key is cast to an in6_addr pointer.
+
+Change the type of the key to u32 to make it aligned.
+
+Fixes: 62dd93181aaa ("[IPV6] NDISC: Set per-entry is_router flag in Proxy NA.")
+Signed-off-by: Qingfang DENG <qingfang.deng@siflower.com.cn>
+Link: https://lore.kernel.org/r/20230601015432.159066-1-dqfext@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/neighbour.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index d5767e25509cc..abb22cfd4827f 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -174,7 +174,7 @@ struct pneigh_entry {
+ struct net_device *dev;
+ u8 flags;
+ u8 protocol;
+- u8 key[];
++ u32 key[];
+ };
+
+ /*
+--
+2.39.2
+
--- /dev/null
+From bc3eeef7c0d6544ac60d66269befa71e9fda511d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 May 2023 16:38:26 +0200
+Subject: net: dsa: lan9303: allow vid != 0 in port_fdb_{add|del} methods
+
+From: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+
+[ Upstream commit 5a59a58ec25d44f853c26bdbfda47d73b3067435 ]
+
+LAN9303 doesn't associate FDB (ALR) entries with VLANs, it has just one
+global Address Logic Resolution table [1].
+
+Ignore VID in port_fdb_{add|del} methods, go on with the global table. This
+is the same semantics as hellcreek or RZ/N1 implement.
+
+Visible symptoms:
+LAN9303_MDIO 5b050000.ethernet-1:00: port 2 failed to delete 00:xx:xx:xx:xx:cf vid 1 from fdb: -2
+LAN9303_MDIO 5b050000.ethernet-1:00: port 2 failed to add 00:xx:xx:xx:xx:cf vid 1 to fdb: -95
+
+[1] https://ww1.microchip.com/downloads/en/DeviceDoc/00002308A.pdf
+
+Fixes: 0620427ea0d6 ("net: dsa: lan9303: Add fdb/mdb manipulation")
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://lore.kernel.org/r/20230531143826.477267-1-alexander.sverdlin@siemens.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/lan9303-core.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
+index deeed50a42c05..f5ab0bff4ac29 100644
+--- a/drivers/net/dsa/lan9303-core.c
++++ b/drivers/net/dsa/lan9303-core.c
+@@ -1187,8 +1187,6 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
+- if (vid)
+- return -EOPNOTSUPP;
+
+ return lan9303_alr_add_port(chip, addr, port, false);
+ }
+@@ -1200,8 +1198,6 @@ static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
+- if (vid)
+- return -EOPNOTSUPP;
+ lan9303_alr_del_port(chip, addr, port);
+
+ return 0;
+--
+2.39.2
+
--- /dev/null
+From 28320b975b7a9bd089f9836be1bc28ffa1d4e4cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 10:23:01 +0800
+Subject: net: sched: fix possible refcount leak in tc_chain_tmplt_add()
+
+From: Hangyu Hua <hbh25y@gmail.com>
+
+[ Upstream commit 44f8baaf230c655c249467ca415b570deca8df77 ]
+
+try_module_get will be called in tcf_proto_lookup_ops. So module_put needs
+to be called to drop the refcount if ops don't implement the required
+function.
+
+Fixes: 9f407f1768d3 ("net: sched: introduce chain templates")
+Signed-off-by: Hangyu Hua <hbh25y@gmail.com>
+Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/cls_api.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 16960d9663e9e..befe42aad04ba 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -2772,6 +2772,7 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
+ return PTR_ERR(ops);
+ if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
+ NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
++ module_put(ops->owner);
+ return -EOPNOTSUPP;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 60cc5b6e6d027f14318f262e07c6fbedad6e1975 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Jun 2023 12:37:47 +0000
+Subject: net/sched: fq_pie: ensure reasonable TCA_FQ_PIE_QUANTUM values
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit cd2b8113c2e8b9f5a88a942e1eaca61eba401b85 ]
+
+We got multiple syzbot reports, all duplicates of the following [1]
+
+syzbot managed to install fq_pie with a zero TCA_FQ_PIE_QUANTUM,
+thus triggering infinite loops.
+
+Use limits similar to sch_fq, with commits
+3725a269815b ("pkt_sched: fq: avoid hang when quantum 0") and
+d9e15a273306 ("pkt_sched: fq: do not accept silly TCA_FQ_QUANTUM")
+
+[1]
+watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [swapper/0:0]
+Modules linked in:
+irq event stamp: 172817
+hardirqs last enabled at (172816): [<ffff80001242fde4>] __el1_irq arch/arm64/kernel/entry-common.c:476 [inline]
+hardirqs last enabled at (172816): [<ffff80001242fde4>] el1_interrupt+0x58/0x68 arch/arm64/kernel/entry-common.c:486
+hardirqs last disabled at (172817): [<ffff80001242fdb0>] __el1_irq arch/arm64/kernel/entry-common.c:468 [inline]
+hardirqs last disabled at (172817): [<ffff80001242fdb0>] el1_interrupt+0x24/0x68 arch/arm64/kernel/entry-common.c:486
+softirqs last enabled at (167634): [<ffff800008020c1c>] softirq_handle_end kernel/softirq.c:414 [inline]
+softirqs last enabled at (167634): [<ffff800008020c1c>] __do_softirq+0xac0/0xd54 kernel/softirq.c:600
+softirqs last disabled at (167701): [<ffff80000802a660>] ____do_softirq+0x14/0x20 arch/arm64/kernel/irq.c:80
+CPU: 0 PID: 0 Comm: swapper/0 Not tainted 6.4.0-rc3-syzkaller-geb0f1697d729 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/28/2023
+pstate: 80400005 (Nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : fq_pie_qdisc_dequeue+0x10c/0x8ac net/sched/sch_fq_pie.c:246
+lr : fq_pie_qdisc_dequeue+0xe4/0x8ac net/sched/sch_fq_pie.c:240
+sp : ffff800008007210
+x29: ffff800008007280 x28: ffff0000c86f7890 x27: ffff0000cb20c2e8
+x26: ffff0000cb20c2f0 x25: dfff800000000000 x24: ffff0000cb20c2e0
+x23: ffff0000c86f7880 x22: 0000000000000040 x21: 1fffe000190def10
+x20: ffff0000cb20c2e0 x19: ffff0000cb20c2e0 x18: ffff800008006e60
+x17: 0000000000000000 x16: ffff80000850af6c x15: 0000000000000302
+x14: 0000000000000100 x13: 0000000000000000 x12: 0000000000000001
+x11: 0000000000000302 x10: 0000000000000100 x9 : 0000000000000000
+x8 : 0000000000000000 x7 : ffff80000841c468 x6 : 0000000000000000
+x5 : 0000000000000001 x4 : 0000000000000001 x3 : 0000000000000000
+x2 : ffff0000cb20c2e0 x1 : ffff0000cb20c2e0 x0 : 0000000000000001
+Call trace:
+fq_pie_qdisc_dequeue+0x10c/0x8ac net/sched/sch_fq_pie.c:246
+dequeue_skb net/sched/sch_generic.c:292 [inline]
+qdisc_restart net/sched/sch_generic.c:397 [inline]
+__qdisc_run+0x1fc/0x231c net/sched/sch_generic.c:415
+__dev_xmit_skb net/core/dev.c:3868 [inline]
+__dev_queue_xmit+0xc80/0x3318 net/core/dev.c:4210
+dev_queue_xmit include/linux/netdevice.h:3085 [inline]
+neigh_connected_output+0x2f8/0x38c net/core/neighbour.c:1581
+neigh_output include/net/neighbour.h:544 [inline]
+ip6_finish_output2+0xd60/0x1a1c net/ipv6/ip6_output.c:134
+__ip6_finish_output net/ipv6/ip6_output.c:195 [inline]
+ip6_finish_output+0x538/0x8c8 net/ipv6/ip6_output.c:206
+NF_HOOK_COND include/linux/netfilter.h:292 [inline]
+ip6_output+0x270/0x594 net/ipv6/ip6_output.c:227
+dst_output include/net/dst.h:458 [inline]
+NF_HOOK include/linux/netfilter.h:303 [inline]
+ndisc_send_skb+0xc30/0x1790 net/ipv6/ndisc.c:508
+ndisc_send_rs+0x47c/0x5d4 net/ipv6/ndisc.c:718
+addrconf_rs_timer+0x300/0x58c net/ipv6/addrconf.c:3936
+call_timer_fn+0x19c/0x8cc kernel/time/timer.c:1700
+expire_timers kernel/time/timer.c:1751 [inline]
+__run_timers+0x55c/0x734 kernel/time/timer.c:2022
+run_timer_softirq+0x7c/0x114 kernel/time/timer.c:2035
+__do_softirq+0x2d0/0xd54 kernel/softirq.c:571
+____do_softirq+0x14/0x20 arch/arm64/kernel/irq.c:80
+call_on_irq_stack+0x24/0x4c arch/arm64/kernel/entry.S:882
+do_softirq_own_stack+0x20/0x2c arch/arm64/kernel/irq.c:85
+invoke_softirq kernel/softirq.c:452 [inline]
+__irq_exit_rcu+0x28c/0x534 kernel/softirq.c:650
+irq_exit_rcu+0x14/0x84 kernel/softirq.c:662
+__el1_irq arch/arm64/kernel/entry-common.c:472 [inline]
+el1_interrupt+0x38/0x68 arch/arm64/kernel/entry-common.c:486
+el1h_64_irq_handler+0x18/0x24 arch/arm64/kernel/entry-common.c:491
+el1h_64_irq+0x64/0x68 arch/arm64/kernel/entry.S:587
+__daif_local_irq_enable arch/arm64/include/asm/irqflags.h:33 [inline]
+arch_local_irq_enable+0x8/0xc arch/arm64/include/asm/irqflags.h:55
+cpuidle_idle_call kernel/sched/idle.c:170 [inline]
+do_idle+0x1f0/0x4e8 kernel/sched/idle.c:282
+cpu_startup_entry+0x24/0x28 kernel/sched/idle.c:379
+rest_init+0x2dc/0x2f4 init/main.c:735
+start_kernel+0x0/0x55c init/main.c:834
+start_kernel+0x3f0/0x55c init/main.c:1088
+__primary_switched+0xb8/0xc0 arch/arm64/kernel/head.S:523
+
+Fixes: ec97ecf1ebe4 ("net: sched: add Flow Queue PIE packet scheduler")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_fq_pie.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
+index cf04f70e96bf1..4f6b5b6fba3ed 100644
+--- a/net/sched/sch_fq_pie.c
++++ b/net/sched/sch_fq_pie.c
+@@ -201,6 +201,11 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ return NET_XMIT_CN;
+ }
+
++static struct netlink_range_validation fq_pie_q_range = {
++ .min = 1,
++ .max = 1 << 20,
++};
++
+ static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
+ [TCA_FQ_PIE_LIMIT] = {.type = NLA_U32},
+ [TCA_FQ_PIE_FLOWS] = {.type = NLA_U32},
+@@ -208,7 +213,8 @@ static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
+ [TCA_FQ_PIE_TUPDATE] = {.type = NLA_U32},
+ [TCA_FQ_PIE_ALPHA] = {.type = NLA_U32},
+ [TCA_FQ_PIE_BETA] = {.type = NLA_U32},
+- [TCA_FQ_PIE_QUANTUM] = {.type = NLA_U32},
++ [TCA_FQ_PIE_QUANTUM] =
++ NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range),
+ [TCA_FQ_PIE_MEMORY_LIMIT] = {.type = NLA_U32},
+ [TCA_FQ_PIE_ECN_PROB] = {.type = NLA_U32},
+ [TCA_FQ_PIE_ECN] = {.type = NLA_U32},
+--
+2.39.2
+
--- /dev/null
+From cbcadd0a7259eda7477e53d813d06d976001eb5c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 11:42:33 +0000
+Subject: net: sched: move rtm_tca_policy declaration to include file
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 886bc7d6ed3357975c5f1d3c784da96000d4bbb4 ]
+
+rtm_tca_policy is used from net/sched/sch_api.c and net/sched/cls_api.c,
+thus should be declared in an include file.
+
+This fixes the following sparse warning:
+net/sched/sch_api.c:1434:25: warning: symbol 'rtm_tca_policy' was not declared. Should it be static?
+
+Fixes: e331473fee3d ("net/sched: cls_api: add missing validation of netlink attributes")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/pkt_sched.h | 2 ++
+ net/sched/cls_api.c | 2 --
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
+index 50d5ffbad473e..ba781e0aaf566 100644
+--- a/include/net/pkt_sched.h
++++ b/include/net/pkt_sched.h
+@@ -129,6 +129,8 @@ static inline void qdisc_run(struct Qdisc *q)
+ }
+ }
+
++extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
++
+ /* Calculate maximal size of packet seen by hard_start_xmit
+ routine of this device.
+ */
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 53d315ed94307..16960d9663e9e 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -41,8 +41,6 @@
+ #include <net/tc_act/tc_gate.h>
+ #include <net/flow_offload.h>
+
+-extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
+-
+ /* The list of all installed classifier types */
+ static LIST_HEAD(tcf_proto_base);
+
+--
+2.39.2
+
--- /dev/null
+From dad3d3222f632dd334a9da435858025efeef3a57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Jun 2023 16:41:52 +0800
+Subject: net/smc: Avoid to access invalid RMBs' MRs in SMCRv1 ADD LINK CONT
+
+From: Wen Gu <guwen@linux.alibaba.com>
+
+[ Upstream commit c308e9ec004721a656c193243eab61a8be324657 ]
+
+SMCRv1 has a similar issue to SMCRv2 (see link below) that may access
+invalid MRs of RMBs when construct LLC ADD LINK CONT messages.
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000014
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 0 P4D 0
+ Oops: 0000 [#1] PREEMPT SMP PTI
+ CPU: 5 PID: 48 Comm: kworker/5:0 Kdump: loaded Tainted: G W E 6.4.0-rc3+ #49
+ Workqueue: events smc_llc_add_link_work [smc]
+ RIP: 0010:smc_llc_add_link_cont+0x160/0x270 [smc]
+ RSP: 0018:ffffa737801d3d50 EFLAGS: 00010286
+ RAX: ffff964f82144000 RBX: ffffa737801d3dd8 RCX: 0000000000000000
+ RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff964f81370c30
+ RBP: ffffa737801d3dd4 R08: ffff964f81370000 R09: ffffa737801d3db0
+ R10: 0000000000000001 R11: 0000000000000060 R12: ffff964f82e70000
+ R13: ffff964f81370c38 R14: ffffa737801d3dd3 R15: 0000000000000001
+ FS: 0000000000000000(0000) GS:ffff9652bfd40000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000014 CR3: 000000008fa20004 CR4: 00000000003706e0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Call Trace:
+ <TASK>
+ smc_llc_srv_rkey_exchange+0xa7/0x190 [smc]
+ smc_llc_srv_add_link+0x3ae/0x5a0 [smc]
+ smc_llc_add_link_work+0xb8/0x140 [smc]
+ process_one_work+0x1e5/0x3f0
+ worker_thread+0x4d/0x2f0
+ ? __pfx_worker_thread+0x10/0x10
+ kthread+0xe5/0x120
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork+0x2c/0x50
+ </TASK>
+
+When an alernate RNIC is available in system, SMC will try to add a new
+link based on the RNIC for resilience. All the RMBs in use will be mapped
+to the new link. Then the RMBs' MRs corresponding to the new link will
+be filled into LLC messages. For SMCRv1, they are ADD LINK CONT messages.
+
+However smc_llc_add_link_cont() may mistakenly access to unused RMBs which
+haven't been mapped to the new link and have no valid MRs, thus causing a
+crash. So this patch fixes it.
+
+Fixes: 87f88cda2128 ("net/smc: rkey processing for a new link as SMC client")
+Link: https://lore.kernel.org/r/1685101741-74826-3-git-send-email-guwen@linux.alibaba.com
+Signed-off-by: Wen Gu <guwen@linux.alibaba.com>
+Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
+Reviewed-by: Tony Lu <tonylu@linux.alibaba.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc_llc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
+index 0ef15f8fba902..d5ee961ca72d5 100644
+--- a/net/smc/smc_llc.c
++++ b/net/smc/smc_llc.c
+@@ -716,6 +716,8 @@ static int smc_llc_add_link_cont(struct smc_link *link,
+ addc_llc->num_rkeys = *num_rkeys_todo;
+ n = *num_rkeys_todo;
+ for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
++ while (*buf_pos && !(*buf_pos)->used)
++ *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
+ if (!*buf_pos) {
+ addc_llc->num_rkeys = addc_llc->num_rkeys -
+ *num_rkeys_todo;
+@@ -731,8 +733,6 @@ static int smc_llc_add_link_cont(struct smc_link *link,
+
+ (*num_rkeys_todo)--;
+ *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
+- while (*buf_pos && !(*buf_pos)->used)
+- *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
+ }
+ addc_llc->hd.common.type = SMC_LLC_ADD_LINK_CONT;
+ addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
+--
+2.39.2
+
--- /dev/null
+From 0b1e6eb80fdb2f570939e760dcf780ae00366380 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 May 2023 12:25:26 +0200
+Subject: netfilter: conntrack: fix NULL pointer dereference in
+ nf_confirm_cthelper
+
+From: Tijs Van Buggenhout <tijs.van.buggenhout@axsguard.com>
+
+[ Upstream commit e1f543dc660b44618a1bd72ddb4ca0828a95f7ad ]
+
+An nf_conntrack_helper from nf_conn_help may become NULL after DNAT.
+
+Observed when TCP port 1720 (Q931_PORT), associated with h323 conntrack
+helper, is DNAT'ed to another destination port (e.g. 1730), while
+nfqueue is being used for final acceptance (e.g. snort).
+
+This happenned after transition from kernel 4.14 to 5.10.161.
+
+Workarounds:
+ * keep the same port (1720) in DNAT
+ * disable nfqueue
+ * disable/unload h323 NAT helper
+
+$ linux-5.10/scripts/decode_stacktrace.sh vmlinux < /tmp/kernel.log
+BUG: kernel NULL pointer dereference, address: 0000000000000084
+[..]
+RIP: 0010:nf_conntrack_update (net/netfilter/nf_conntrack_core.c:2080 net/netfilter/nf_conntrack_core.c:2134) nf_conntrack
+[..]
+nfqnl_reinject (net/netfilter/nfnetlink_queue.c:237) nfnetlink_queue
+nfqnl_recv_verdict (net/netfilter/nfnetlink_queue.c:1230) nfnetlink_queue
+nfnetlink_rcv_msg (net/netfilter/nfnetlink.c:241) nfnetlink
+[..]
+
+Fixes: ee04805ff54a ("netfilter: conntrack: make conntrack userspace helpers work again")
+Signed-off-by: Tijs Van Buggenhout <tijs.van.buggenhout@axsguard.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 193a18bfddc0a..f82a234ac53a1 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -2075,6 +2075,9 @@ static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
+ return 0;
+
+ helper = rcu_dereference(help->helper);
++ if (!helper)
++ return 0;
++
+ if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
+ return 0;
+
+--
+2.39.2
+
--- /dev/null
+From bd8f01bdf8b8fa3ad1730fddf9f4413c4a13ad61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 May 2023 10:33:00 -0700
+Subject: netfilter: ipset: Add schedule point in call_ad().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 24e227896bbf003165e006732dccb3516f87f88e ]
+
+syzkaller found a repro that causes Hung Task [0] with ipset. The repro
+first creates an ipset and then tries to delete a large number of IPs
+from the ipset concurrently:
+
+ IPSET_ATTR_IPADDR_IPV4 : 172.20.20.187
+ IPSET_ATTR_CIDR : 2
+
+The first deleting thread hogs a CPU with nfnl_lock(NFNL_SUBSYS_IPSET)
+held, and other threads wait for it to be released.
+
+Previously, the same issue existed in set->variant->uadt() that could run
+so long under ip_set_lock(set). Commit 5e29dc36bd5e ("netfilter: ipset:
+Rework long task execution when adding/deleting entries") tried to fix it,
+but the issue still exists in the caller with another mutex.
+
+While adding/deleting many IPs, we should release the CPU periodically to
+prevent someone from abusing ipset to hang the system.
+
+Note we need to increment the ipset's refcnt to prevent the ipset from
+being destroyed while rescheduling.
+
+[0]:
+INFO: task syz-executor174:268 blocked for more than 143 seconds.
+ Not tainted 6.4.0-rc1-00145-gba79e9a73284 #1
+"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+task:syz-executor174 state:D stack:0 pid:268 ppid:260 flags:0x0000000d
+Call trace:
+ __switch_to+0x308/0x714 arch/arm64/kernel/process.c:556
+ context_switch kernel/sched/core.c:5343 [inline]
+ __schedule+0xd84/0x1648 kernel/sched/core.c:6669
+ schedule+0xf0/0x214 kernel/sched/core.c:6745
+ schedule_preempt_disabled+0x58/0xf0 kernel/sched/core.c:6804
+ __mutex_lock_common kernel/locking/mutex.c:679 [inline]
+ __mutex_lock+0x6fc/0xdb0 kernel/locking/mutex.c:747
+ __mutex_lock_slowpath+0x14/0x20 kernel/locking/mutex.c:1035
+ mutex_lock+0x98/0xf0 kernel/locking/mutex.c:286
+ nfnl_lock net/netfilter/nfnetlink.c:98 [inline]
+ nfnetlink_rcv_msg+0x480/0x70c net/netfilter/nfnetlink.c:295
+ netlink_rcv_skb+0x1c0/0x350 net/netlink/af_netlink.c:2546
+ nfnetlink_rcv+0x18c/0x199c net/netfilter/nfnetlink.c:658
+ netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline]
+ netlink_unicast+0x664/0x8cc net/netlink/af_netlink.c:1365
+ netlink_sendmsg+0x6d0/0xa4c net/netlink/af_netlink.c:1913
+ sock_sendmsg_nosec net/socket.c:724 [inline]
+ sock_sendmsg net/socket.c:747 [inline]
+ ____sys_sendmsg+0x4b8/0x810 net/socket.c:2503
+ ___sys_sendmsg net/socket.c:2557 [inline]
+ __sys_sendmsg+0x1f8/0x2a4 net/socket.c:2586
+ __do_sys_sendmsg net/socket.c:2595 [inline]
+ __se_sys_sendmsg net/socket.c:2593 [inline]
+ __arm64_sys_sendmsg+0x80/0x94 net/socket.c:2593
+ __invoke_syscall arch/arm64/kernel/syscall.c:38 [inline]
+ invoke_syscall+0x84/0x270 arch/arm64/kernel/syscall.c:52
+ el0_svc_common+0x134/0x24c arch/arm64/kernel/syscall.c:142
+ do_el0_svc+0x64/0x198 arch/arm64/kernel/syscall.c:193
+ el0_svc+0x2c/0x7c arch/arm64/kernel/entry-common.c:637
+ el0t_64_sync_handler+0x84/0xf0 arch/arm64/kernel/entry-common.c:655
+ el0t_64_sync+0x190/0x194 arch/arm64/kernel/entry.S:591
+
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Fixes: a7b4f989a629 ("netfilter: ipset: IP set core support")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Acked-by: Jozsef Kadlecsik <kadlec@netfilter.org>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/ipset/ip_set_core.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 1bf6ab83644b3..55ac0cc12657c 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1704,6 +1704,14 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
+ bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
+
+ do {
++ if (retried) {
++ __ip_set_get(set);
++ nfnl_unlock(NFNL_SUBSYS_IPSET);
++ cond_resched();
++ nfnl_lock(NFNL_SUBSYS_IPSET);
++ __ip_set_put(set);
++ }
++
+ ip_set_lock(set);
+ ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
+ ip_set_unlock(set);
+--
+2.39.2
+
--- /dev/null
+From ac0cefdf1ac998625d6a0b7dd450a2f2cad1bef7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 16:56:00 +0530
+Subject: qed/qede: Fix scheduling while atomic
+
+From: Manish Chopra <manishc@marvell.com>
+
+[ Upstream commit 42510dffd0e2c27046905f742172ed6662af5557 ]
+
+Statistics read through bond interface via sysfs causes
+below bug and traces as it triggers the bonding module to
+collect the slave device statistics while holding the spinlock,
+beneath that qede->qed driver statistics flow gets scheduled out
+due to usleep_range() used in PTT acquire logic
+
+[ 3673.988874] Hardware name: HPE ProLiant DL365 Gen10 Plus/ProLiant DL365 Gen10 Plus, BIOS A42 10/29/2021
+[ 3673.988878] Call Trace:
+[ 3673.988891] dump_stack_lvl+0x34/0x44
+[ 3673.988908] __schedule_bug.cold+0x47/0x53
+[ 3673.988918] __schedule+0x3fb/0x560
+[ 3673.988929] schedule+0x43/0xb0
+[ 3673.988932] schedule_hrtimeout_range_clock+0xbf/0x1b0
+[ 3673.988937] ? __hrtimer_init+0xc0/0xc0
+[ 3673.988950] usleep_range+0x5e/0x80
+[ 3673.988955] qed_ptt_acquire+0x2b/0xd0 [qed]
+[ 3673.988981] _qed_get_vport_stats+0x141/0x240 [qed]
+[ 3673.989001] qed_get_vport_stats+0x18/0x80 [qed]
+[ 3673.989016] qede_fill_by_demand_stats+0x37/0x400 [qede]
+[ 3673.989028] qede_get_stats64+0x19/0xe0 [qede]
+[ 3673.989034] dev_get_stats+0x5c/0xc0
+[ 3673.989045] netstat_show.constprop.0+0x52/0xb0
+[ 3673.989055] dev_attr_show+0x19/0x40
+[ 3673.989065] sysfs_kf_seq_show+0x9b/0xf0
+[ 3673.989076] seq_read_iter+0x120/0x4b0
+[ 3673.989087] new_sync_read+0x118/0x1a0
+[ 3673.989095] vfs_read+0xf3/0x180
+[ 3673.989099] ksys_read+0x5f/0xe0
+[ 3673.989102] do_syscall_64+0x3b/0x90
+[ 3673.989109] entry_SYSCALL_64_after_hwframe+0x44/0xae
+[ 3673.989115] RIP: 0033:0x7f8467d0b082
+[ 3673.989119] Code: c0 e9 b2 fe ff ff 50 48 8d 3d ca 05 08 00 e8 35 e7 01 00 0f 1f 44 00 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 0f 05 <48> 3d 00 f0 ff ff 77 56 c3 0f 1f 44 00 00 48 83 ec 28 48 89 54 24
+[ 3673.989121] RSP: 002b:00007ffffb21fd08 EFLAGS: 00000246 ORIG_RAX: 0000000000000000
+[ 3673.989127] RAX: ffffffffffffffda RBX: 000000000100eca0 RCX: 00007f8467d0b082
+[ 3673.989128] RDX: 00000000000003ff RSI: 00007ffffb21fdc0 RDI: 0000000000000003
+[ 3673.989130] RBP: 00007f8467b96028 R08: 0000000000000010 R09: 00007ffffb21ec00
+[ 3673.989132] R10: 00007ffffb27b170 R11: 0000000000000246 R12: 00000000000000f0
+[ 3673.989134] R13: 0000000000000003 R14: 00007f8467b92000 R15: 0000000000045a05
+[ 3673.989139] CPU: 30 PID: 285188 Comm: read_all Kdump: loaded Tainted: G W OE
+
+Fix this by collecting the statistics asynchronously from a periodic
+delayed work scheduled at default stats coalescing interval and return
+the recent copy of statisitcs from .ndo_get_stats64(), also add ability
+to configure/retrieve stats coalescing interval using below commands -
+
+ethtool -C ethx stats-block-usecs <val>
+ethtool -c ethx
+
+Fixes: 133fac0eedc3 ("qede: Add basic ethtool support")
+Cc: Sudarsana Kalluru <skalluru@marvell.com>
+Cc: David Miller <davem@davemloft.net>
+Signed-off-by: Manish Chopra <manishc@marvell.com>
+Link: https://lore.kernel.org/r/20230605112600.48238-1-manishc@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_l2.c | 2 +-
+ drivers/net/ethernet/qlogic/qede/qede.h | 4 +++
+ .../net/ethernet/qlogic/qede/qede_ethtool.c | 24 +++++++++++--
+ drivers/net/ethernet/qlogic/qede/qede_main.c | 34 ++++++++++++++++++-
+ 4 files changed, 60 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+index 07824bf9d68d9..0157bcd2efffa 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+@@ -1902,7 +1902,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
+ {
+ u32 i;
+
+- if (!cdev) {
++ if (!cdev || cdev->recov_in_prog) {
+ memset(stats, 0, sizeof(*stats));
+ return;
+ }
+diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
+index f313fd7303316..3251d58a263fa 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede.h
++++ b/drivers/net/ethernet/qlogic/qede/qede.h
+@@ -273,6 +273,10 @@ struct qede_dev {
+ #define QEDE_ERR_WARN 3
+
+ struct qede_dump_info dump_info;
++ struct delayed_work periodic_task;
++ unsigned long stats_coal_ticks;
++ u32 stats_coal_usecs;
++ spinlock_t stats_lock; /* lock for vport stats access */
+ };
+
+ enum QEDE_STATE {
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+index bedbb85a179ae..db104e035ba18 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+@@ -426,6 +426,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
+ }
+ }
+
++ spin_lock(&edev->stats_lock);
++
+ for (i = 0; i < QEDE_NUM_STATS; i++) {
+ if (qede_is_irrelevant_stat(edev, i))
+ continue;
+@@ -435,6 +437,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
+ buf++;
+ }
+
++ spin_unlock(&edev->stats_lock);
++
+ __qede_unlock(edev);
+ }
+
+@@ -815,6 +819,7 @@ static int qede_get_coalesce(struct net_device *dev,
+
+ coal->rx_coalesce_usecs = rx_coal;
+ coal->tx_coalesce_usecs = tx_coal;
++ coal->stats_block_coalesce_usecs = edev->stats_coal_usecs;
+
+ return rc;
+ }
+@@ -827,6 +832,19 @@ static int qede_set_coalesce(struct net_device *dev,
+ int i, rc = 0;
+ u16 rxc, txc;
+
++ if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) {
++ edev->stats_coal_usecs = coal->stats_block_coalesce_usecs;
++ if (edev->stats_coal_usecs) {
++ edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs);
++ schedule_delayed_work(&edev->periodic_task, 0);
++
++ DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n",
++ edev->stats_coal_ticks);
++ } else {
++ cancel_delayed_work_sync(&edev->periodic_task);
++ }
++ }
++
+ if (!netif_running(dev)) {
+ DP_INFO(edev, "Interface is down\n");
+ return -EINVAL;
+@@ -2106,7 +2124,8 @@ static int qede_get_dump_data(struct net_device *dev,
+ }
+
+ static const struct ethtool_ops qede_ethtool_ops = {
+- .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
++ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
++ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
+ .get_link_ksettings = qede_get_link_ksettings,
+ .set_link_ksettings = qede_set_link_ksettings,
+ .get_drvinfo = qede_get_drvinfo,
+@@ -2155,7 +2174,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
+ };
+
+ static const struct ethtool_ops qede_vf_ethtool_ops = {
+- .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
++ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
++ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
+ .get_link_ksettings = qede_get_link_ksettings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_msglevel = qede_get_msglevel,
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index e93f06e4a1729..681ec142c23de 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -313,6 +313,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
+
+ edev->ops->get_vport_stats(edev->cdev, &stats);
+
++ spin_lock(&edev->stats_lock);
++
+ p_common->no_buff_discards = stats.common.no_buff_discards;
+ p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
+ p_common->ttl0_discard = stats.common.ttl0_discard;
+@@ -410,6 +412,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
+ p_ah->tx_1519_to_max_byte_packets =
+ stats.ah.tx_1519_to_max_byte_packets;
+ }
++
++ spin_unlock(&edev->stats_lock);
+ }
+
+ static void qede_get_stats64(struct net_device *dev,
+@@ -418,9 +422,10 @@ static void qede_get_stats64(struct net_device *dev,
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_stats_common *p_common;
+
+- qede_fill_by_demand_stats(edev);
+ p_common = &edev->stats.common;
+
++ spin_lock(&edev->stats_lock);
++
+ stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+ p_common->rx_bcast_pkts;
+ stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+@@ -440,6 +445,8 @@ static void qede_get_stats64(struct net_device *dev,
+ stats->collisions = edev->stats.bb.tx_total_collisions;
+ stats->rx_crc_errors = p_common->rx_crc_errors;
+ stats->rx_frame_errors = p_common->rx_align_errors;
++
++ spin_unlock(&edev->stats_lock);
+ }
+
+ #ifdef CONFIG_QED_SRIOV
+@@ -1001,6 +1008,23 @@ static void qede_unlock(struct qede_dev *edev)
+ rtnl_unlock();
+ }
+
++static void qede_periodic_task(struct work_struct *work)
++{
++ struct qede_dev *edev = container_of(work, struct qede_dev,
++ periodic_task.work);
++
++ qede_fill_by_demand_stats(edev);
++ schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
++}
++
++static void qede_init_periodic_task(struct qede_dev *edev)
++{
++ INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
++ spin_lock_init(&edev->stats_lock);
++ edev->stats_coal_usecs = USEC_PER_SEC;
++ edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
++}
++
+ static void qede_sp_task(struct work_struct *work)
+ {
+ struct qede_dev *edev = container_of(work, struct qede_dev,
+@@ -1020,6 +1044,7 @@ static void qede_sp_task(struct work_struct *work)
+ */
+
+ if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
++ cancel_delayed_work_sync(&edev->periodic_task);
+ #ifdef CONFIG_QED_SRIOV
+ /* SRIOV must be disabled outside the lock to avoid a deadlock.
+ * The recovery of the active VFs is currently not supported.
+@@ -1216,6 +1241,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+ */
+ INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+ mutex_init(&edev->qede_lock);
++ qede_init_periodic_task(edev);
+
+ rc = register_netdev(edev->ndev);
+ if (rc) {
+@@ -1240,6 +1266,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+ edev->rx_copybreak = QEDE_RX_HDR_SIZE;
+
+ qede_log_probe(edev);
++
++ /* retain user config (for example - after recovery) */
++ if (edev->stats_coal_usecs)
++ schedule_delayed_work(&edev->periodic_task, 0);
++
+ return 0;
+
+ err4:
+@@ -1308,6 +1339,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
+ unregister_netdev(ndev);
+
+ cancel_delayed_work_sync(&edev->sp_task);
++ cancel_delayed_work_sync(&edev->periodic_task);
+
+ edev->ops->common->set_power_state(cdev, PCI_D0);
+
+--
+2.39.2
+
--- /dev/null
+From 3eddd81eb69554e960e8208aac9ad967722bbd2e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 07:41:15 +0000
+Subject: rfs: annotate lockless accesses to RFS sock flow table
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 5c3b74a92aa285a3df722bf6329ba7ccf70346d6 ]
+
+Add READ_ONCE()/WRITE_ONCE() on accesses to the sock flow table.
+
+This also prevents a (smart ?) compiler to remove the condition in:
+
+if (table->ents[index] != newval)
+ table->ents[index] = newval;
+
+We need the condition to avoid dirtying a shared cache line.
+
+Fixes: fec5e652e58f ("rfs: Receive Flow Steering")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netdevice.h | 7 +++++--
+ net/core/dev.c | 6 ++++--
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 9ef63bc14b002..24fe2cd4b0e8d 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -744,8 +744,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
+ /* We only give a hint, preemption can change CPU under us */
+ val |= raw_smp_processor_id();
+
+- if (table->ents[index] != val)
+- table->ents[index] = val;
++ /* The following WRITE_ONCE() is paired with the READ_ONCE()
++ * here, and another one in get_rps_cpu().
++ */
++ if (READ_ONCE(table->ents[index]) != val)
++ WRITE_ONCE(table->ents[index], val);
+ }
+ }
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 29e6e11c481c6..f4aad9b00cc90 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4385,8 +4385,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ u32 next_cpu;
+ u32 ident;
+
+- /* First check into global flow table if there is a match */
+- ident = sock_flow_table->ents[hash & sock_flow_table->mask];
++ /* First check into global flow table if there is a match.
++ * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
++ */
++ ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
+ if ((ident ^ hash) & ~rps_cpu_mask)
+ goto try_rps;
+
+--
+2.39.2
+
--- /dev/null
+From 83c358258a6f3300b9f6b03a8e44c715fc02afb2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 07:41:14 +0000
+Subject: rfs: annotate lockless accesses to sk->sk_rxhash
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 1e5c647c3f6d4f8497dedcd226204e1880e0ffb3 ]
+
+Add READ_ONCE()/WRITE_ONCE() on accesses to sk->sk_rxhash.
+
+This also prevents a (smart ?) compiler to remove the condition in:
+
+if (sk->sk_rxhash != newval)
+ sk->sk_rxhash = newval;
+
+We need the condition to avoid dirtying a shared cache line.
+
+Fixes: fec5e652e58f ("rfs: Receive Flow Steering")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sock.h | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 3da0601b573ed..51b499d745499 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1073,8 +1073,12 @@ static inline void sock_rps_record_flow(const struct sock *sk)
+ * OR an additional socket flag
+ * [1] : sk_state and sk_prot are in the same cache line.
+ */
+- if (sk->sk_state == TCP_ESTABLISHED)
+- sock_rps_record_flow_hash(sk->sk_rxhash);
++ if (sk->sk_state == TCP_ESTABLISHED) {
++ /* This READ_ONCE() is paired with the WRITE_ONCE()
++ * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
++ */
++ sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
++ }
+ }
+ #endif
+ }
+@@ -1083,15 +1087,19 @@ static inline void sock_rps_save_rxhash(struct sock *sk,
+ const struct sk_buff *skb)
+ {
+ #ifdef CONFIG_RPS
+- if (unlikely(sk->sk_rxhash != skb->hash))
+- sk->sk_rxhash = skb->hash;
++ /* The following WRITE_ONCE() is paired with the READ_ONCE()
++ * here, and another one in sock_rps_record_flow().
++ */
++ if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
++ WRITE_ONCE(sk->sk_rxhash, skb->hash);
+ #endif
+ }
+
+ static inline void sock_rps_reset_rxhash(struct sock *sk)
+ {
+ #ifdef CONFIG_RPS
+- sk->sk_rxhash = 0;
++ /* Paired with READ_ONCE() in sock_rps_record_flow() */
++ WRITE_ONCE(sk->sk_rxhash, 0);
+ #endif
+ }
+
+--
+2.39.2
+
i40e-fix-build-warnings-in-i40e_alloc.h.patch
i40e-fix-build-warning-in-ice_fltr_add_mac_to_list.patch
staging-vchiq_core-drop-vchiq_status-from-vchiq_initialise.patch
+spi-qup-request-dma-before-enabling-clocks.patch
+afs-fix-setting-of-mtime-when-creating-a-file-dir-sy.patch
+wifi-mt76-mt7615-fix-possible-race-in-mt7615_mac_sta.patch
+neighbour-fix-unaligned-access-to-pneigh_entry.patch
+net-dsa-lan9303-allow-vid-0-in-port_fdb_-add-del-met.patch
+net-smc-avoid-to-access-invalid-rmbs-mrs-in-smcrv1-a.patch
+net-sched-fq_pie-ensure-reasonable-tca_fq_pie_quantu.patch
+bluetooth-fix-l2cap_disconnect_req-deadlock.patch
+bluetooth-l2cap-add-missing-checks-for-invalid-dcid.patch
+qed-qede-fix-scheduling-while-atomic.patch
+netfilter-conntrack-fix-null-pointer-dereference-in-.patch
+netfilter-ipset-add-schedule-point-in-call_ad.patch
+ipv6-rpl-fix-route-of-death.patch
+rfs-annotate-lockless-accesses-to-sk-sk_rxhash.patch
+rfs-annotate-lockless-accesses-to-rfs-sock-flow-tabl.patch
+net-sched-move-rtm_tca_policy-declaration-to-include.patch
+net-sched-fix-possible-refcount-leak-in-tc_chain_tmp.patch
+bpf-add-extra-path-pointer-check-to-d_path-helper.patch
+lib-cpu_rmap-fix-potential-use-after-free-in-irq_cpu.patch
+bnxt_en-don-t-issue-ap-reset-during-ethtool-s-reset-.patch
+bnxt_en-query-default-vlan-before-vnic-setup-on-a-vf.patch
+bnxt_en-implement-.set_port-.unset_port-udp-tunnel-c.patch
--- /dev/null
+From 34629a97f2b89f145cabe6c882b2b5d438c65f40 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 May 2023 15:04:25 +0200
+Subject: spi: qup: Request DMA before enabling clocks
+
+From: Stephan Gerhold <stephan@gerhold.net>
+
+[ Upstream commit 0c331fd1dccfba657129380ee084b95c1cedfbef ]
+
+It is usually better to request all necessary resources (clocks,
+regulators, ...) before starting to make use of them. That way they do
+not change state in case one of the resources is not available yet and
+probe deferral (-EPROBE_DEFER) is necessary. This is particularly
+important for DMA channels and IOMMUs which are not enforced by
+fw_devlink yet (unless you use fw_devlink.strict=1).
+
+spi-qup does this in the wrong order, the clocks are enabled and
+disabled again when the DMA channels are not available yet.
+
+This causes issues in some cases: On most SoCs one of the SPI QUP
+clocks is shared with the UART controller. When using earlycon UART is
+actively used during boot but might not have probed yet, usually for
+the same reason (waiting for the DMA controller). In this case, the
+brief enable/disable cycle ends up gating the clock and further UART
+console output will halt the system completely.
+
+Avoid this by requesting the DMA channels before changing the clock
+state.
+
+Fixes: 612762e82ae6 ("spi: qup: Add DMA capabilities")
+Signed-off-by: Stephan Gerhold <stephan@gerhold.net>
+Link: https://lore.kernel.org/r/20230518-spi-qup-clk-defer-v1-1-f49fc9ca4e02@gerhold.net
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-qup.c | 37 ++++++++++++++++++-------------------
+ 1 file changed, 18 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
+index 8bf58510cca6d..2cc9bb413c108 100644
+--- a/drivers/spi/spi-qup.c
++++ b/drivers/spi/spi-qup.c
+@@ -1030,23 +1030,8 @@ static int spi_qup_probe(struct platform_device *pdev)
+ return -ENXIO;
+ }
+
+- ret = clk_prepare_enable(cclk);
+- if (ret) {
+- dev_err(dev, "cannot enable core clock\n");
+- return ret;
+- }
+-
+- ret = clk_prepare_enable(iclk);
+- if (ret) {
+- clk_disable_unprepare(cclk);
+- dev_err(dev, "cannot enable iface clock\n");
+- return ret;
+- }
+-
+ master = spi_alloc_master(dev, sizeof(struct spi_qup));
+ if (!master) {
+- clk_disable_unprepare(cclk);
+- clk_disable_unprepare(iclk);
+ dev_err(dev, "cannot allocate master\n");
+ return -ENOMEM;
+ }
+@@ -1092,6 +1077,19 @@ static int spi_qup_probe(struct platform_device *pdev)
+ spin_lock_init(&controller->lock);
+ init_completion(&controller->done);
+
++ ret = clk_prepare_enable(cclk);
++ if (ret) {
++ dev_err(dev, "cannot enable core clock\n");
++ goto error_dma;
++ }
++
++ ret = clk_prepare_enable(iclk);
++ if (ret) {
++ clk_disable_unprepare(cclk);
++ dev_err(dev, "cannot enable iface clock\n");
++ goto error_dma;
++ }
++
+ iomode = readl_relaxed(base + QUP_IO_M_MODES);
+
+ size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
+@@ -1121,7 +1119,7 @@ static int spi_qup_probe(struct platform_device *pdev)
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret) {
+ dev_err(dev, "cannot set RESET state\n");
+- goto error_dma;
++ goto error_clk;
+ }
+
+ writel_relaxed(0, base + QUP_OPERATIONAL);
+@@ -1145,7 +1143,7 @@ static int spi_qup_probe(struct platform_device *pdev)
+ ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
+ IRQF_TRIGGER_HIGH, pdev->name, controller);
+ if (ret)
+- goto error_dma;
++ goto error_clk;
+
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+@@ -1160,11 +1158,12 @@ static int spi_qup_probe(struct platform_device *pdev)
+
+ disable_pm:
+ pm_runtime_disable(&pdev->dev);
++error_clk:
++ clk_disable_unprepare(cclk);
++ clk_disable_unprepare(iclk);
+ error_dma:
+ spi_qup_release_dma(master);
+ error:
+- clk_disable_unprepare(cclk);
+- clk_disable_unprepare(iclk);
+ spi_master_put(master);
+ return ret;
+ }
+--
+2.39.2
+
--- /dev/null
+From fcc1b510ab19fd0a02a1ef01cc6eeb5c0a26759f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 May 2023 16:39:32 +0200
+Subject: wifi: mt76: mt7615: fix possible race in mt7615_mac_sta_poll
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 30bc32c7c1f975cc3c14e1c7dc437266311282cf ]
+
+Grab sta_poll_lock spinlock in mt7615_mac_sta_poll routine in order to
+avoid possible races with mt7615_mac_add_txs() or mt7615_mac_fill_rx()
+removing msta pointer from sta_poll_list.
+
+Fixes: a621372a04ac ("mt76: mt7615: rework mt7615_mac_sta_poll for usb code")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/48b23404b759de4f1db2ef85975c72a4aeb1097c.1684938695.git.lorenzo@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7615/mac.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index b26617026e831..4364f73b501da 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -779,7 +779,10 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
+
+ msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
+ poll_list);
++
++ spin_lock_bh(&dev->sta_poll_lock);
+ list_del_init(&msta->poll_list);
++ spin_unlock_bh(&dev->sta_poll_lock);
+
+ addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
+
+--
+2.39.2
+