--- /dev/null
+From 27a30f66df386d641b79f804350807a5a0340d74 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 May 2023 03:44:56 +0000
+Subject: Bluetooth: Fix l2cap_disconnect_req deadlock
+
+From: Ying Hsu <yinghsu@chromium.org>
+
+[ Upstream commit 02c5ea5246a44d6ffde0fddebfc1d56188052976 ]
+
+L2CAP assumes that the locks conn->chan_lock and chan->lock are
+acquired in the order conn->chan_lock, chan->lock to avoid
+potential deadlock.
+For example, l2sock_shutdown acquires these locks in the order:
+ mutex_lock(&conn->chan_lock)
+ l2cap_chan_lock(chan)
+
+However, l2cap_disconnect_req acquires chan->lock in
+l2cap_get_chan_by_scid first and then acquires conn->chan_lock
+before calling l2cap_chan_del. This means that these locks are
+acquired in unexpected order, which leads to potential deadlock:
+ l2cap_chan_lock(c)
+ mutex_lock(&conn->chan_lock)
+
+This patch releases chan->lock before acquiring the conn_chan_lock
+to avoid the potential deadlock.
+
+Fixes: a2a9339e1c9d ("Bluetooth: L2CAP: Fix use-after-free in l2cap_disconnect_{req,rsp}")
+Signed-off-by: Ying Hsu <yinghsu@chromium.org>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/l2cap_core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index b0bb4cf52a7ee..281d1b375838a 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4362,7 +4362,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+
+ chan->ops->set_shutdown(chan);
+
++ l2cap_chan_unlock(chan);
+ mutex_lock(&conn->chan_lock);
++ l2cap_chan_lock(chan);
+ l2cap_chan_del(chan, ECONNRESET);
+ mutex_unlock(&conn->chan_lock);
+
+@@ -4401,7 +4403,9 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+ return 0;
+ }
+
++ l2cap_chan_unlock(chan);
+ mutex_lock(&conn->chan_lock);
++ l2cap_chan_lock(chan);
+ l2cap_chan_del(chan, 0);
+ mutex_unlock(&conn->chan_lock);
+
+--
+2.39.2
+
--- /dev/null
+From 5a03f5fcca3e66d77dbc320ebb003e9ddf3d7c6b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 3 Jun 2023 08:28:09 -0400
+Subject: Bluetooth: L2CAP: Add missing checks for invalid DCID
+
+From: Sungwoo Kim <iam@sung-woo.kim>
+
+[ Upstream commit 75767213f3d9b97f63694d02260b6a49a2271876 ]
+
+When receiving a connect response we should make sure that the DCID is
+within the valid range and that we don't already have another channel
+allocated for the same DCID.
+Missing checks may violate the specification (BLUETOOTH CORE SPECIFICATION
+Version 5.4 | Vol 3, Part A, Page 1046).
+
+Fixes: 40624183c202 ("Bluetooth: L2CAP: Add missing checks for invalid LE DCID")
+Signed-off-by: Sungwoo Kim <iam@sung-woo.kim>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/l2cap_core.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 281d1b375838a..25d88b8cfae97 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4007,6 +4007,10 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ result = __le16_to_cpu(rsp->result);
+ status = __le16_to_cpu(rsp->status);
+
++ if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
++ dcid > L2CAP_CID_DYN_END))
++ return -EPROTO;
++
+ BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
+ dcid, scid, result, status);
+
+@@ -4038,6 +4042,11 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+
+ switch (result) {
+ case L2CAP_CR_SUCCESS:
++ if (__l2cap_get_chan_by_dcid(conn, dcid)) {
++ err = -EBADSLT;
++ break;
++ }
++
+ l2cap_state_change(chan, BT_CONFIG);
+ chan->ident = 0;
+ chan->dcid = dcid;
+--
+2.39.2
+
--- /dev/null
+From 00343f9a9e854ab4c3ca96b9515629a21ecda64c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 00:54:06 -0700
+Subject: bnxt_en: Query default VLAN before VNIC setup on a VF
+
+From: Somnath Kotur <somnath.kotur@broadcom.com>
+
+[ Upstream commit 1a9e4f501bc6ff1b6ecb60df54fbf2b54db43bfe ]
+
+We need to call bnxt_hwrm_func_qcfg() on a VF to query the default
+VLAN that may be setup by the PF. If a default VLAN is enabled,
+the VF cannot support VLAN acceleration on the receive side and
+the VNIC must be setup to strip out the default VLAN tag. If a
+default VLAN is not enabled, the VF can support VLAN acceleration
+on the receive side. The VNIC should be set up to strip or not
+strip the VLAN based on the RX VLAN acceleration setting.
+
+Without this call to determine the default VLAN before calling
+bnxt_setup_vnic(), the VNIC may not be set up correctly. For
+example, bnxt_setup_vnic() may set up to strip the VLAN tag based
+on stale default VLAN information. If RX VLAN acceleration is
+not enabled, the VLAN tag will be incorrectly stripped and the
+RX data path will not work correctly.
+
+Fixes: cf6645f8ebc6 ("bnxt_en: Add function for VF driver to query default VLAN.")
+Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 89782e8e0b913..ee438674d6cc4 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5239,6 +5239,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
+ goto err_out;
+ }
+
++ if (BNXT_VF(bp))
++ bnxt_hwrm_func_qcfg(bp);
++
+ rc = bnxt_setup_vnic(bp, 0);
+ if (rc)
+ goto err_out;
+--
+2.39.2
+
--- /dev/null
+From b7e965d59cda095d61422f560efe42ef7b64b9f4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Jun 2023 20:28:15 +0200
+Subject: lib: cpu_rmap: Fix potential use-after-free in irq_cpu_rmap_release()
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+[ Upstream commit 7c5d4801ecf0564c860033d89726b99723c55146 ]
+
+irq_cpu_rmap_release() calls cpu_rmap_put(), which may free the rmap.
+So we need to clear the pointer to our glue structure in rmap before
+doing that, not after.
+
+Fixes: 4e0473f1060a ("lib: cpu_rmap: Avoid use after free on rmap->obj array entries")
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/ZHo0vwquhOy3FaXc@decadent.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/cpu_rmap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
+index f52389054a24f..a0de1b2579f71 100644
+--- a/lib/cpu_rmap.c
++++ b/lib/cpu_rmap.c
+@@ -271,8 +271,8 @@ static void irq_cpu_rmap_release(struct kref *ref)
+ struct irq_glue *glue =
+ container_of(ref, struct irq_glue, notify.kref);
+
+- cpu_rmap_put(glue->rmap);
+ glue->rmap->obj[glue->index] = NULL;
++ cpu_rmap_put(glue->rmap);
+ kfree(glue);
+ }
+
+--
+2.39.2
+
--- /dev/null
+From b6c1ad285247d9a589cdd54ee7fdc733bfc57760 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 11:42:33 +0000
+Subject: net: sched: move rtm_tca_policy declaration to include file
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 886bc7d6ed3357975c5f1d3c784da96000d4bbb4 ]
+
+rtm_tca_policy is used from net/sched/sch_api.c and net/sched/cls_api.c,
+thus should be declared in an include file.
+
+This fixes the following sparse warning:
+net/sched/sch_api.c:1434:25: warning: symbol 'rtm_tca_policy' was not declared. Should it be static?
+
+Fixes: e331473fee3d ("net/sched: cls_api: add missing validation of netlink attributes")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/pkt_sched.h | 2 ++
+ net/sched/cls_api.c | 2 --
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
+index b3869f97d37d7..85e059d3bc233 100644
+--- a/include/net/pkt_sched.h
++++ b/include/net/pkt_sched.h
+@@ -126,6 +126,8 @@ static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
+ return skb->protocol;
+ }
+
++extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
++
+ /* Calculate maximal size of packet seen by hard_start_xmit
+ routine of this device.
+ */
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 32819d1e20754..8808133e78a37 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -31,8 +31,6 @@
+ #include <net/pkt_sched.h>
+ #include <net/pkt_cls.h>
+
+-extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
+-
+ /* The list of all installed classifier types */
+ static LIST_HEAD(tcf_proto_base);
+
+--
+2.39.2
+
--- /dev/null
+From a0faa2bf4015e51918f7c6bda3d96392a7470c83 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 07:41:15 +0000
+Subject: rfs: annotate lockless accesses to RFS sock flow table
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 5c3b74a92aa285a3df722bf6329ba7ccf70346d6 ]
+
+Add READ_ONCE()/WRITE_ONCE() on accesses to the sock flow table.
+
+This also prevents a (smart ?) compiler to remove the condition in:
+
+if (table->ents[index] != newval)
+ table->ents[index] = newval;
+
+We need the condition to avoid dirtying a shared cache line.
+
+Fixes: fec5e652e58f ("rfs: Receive Flow Steering")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netdevice.h | 7 +++++--
+ net/core/dev.c | 6 ++++--
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 2cd7eb2b91739..d2fc7e6ca9ccc 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -670,8 +670,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
+ /* We only give a hint, preemption can change CPU under us */
+ val |= raw_smp_processor_id();
+
+- if (table->ents[index] != val)
+- table->ents[index] = val;
++ /* The following WRITE_ONCE() is paired with the READ_ONCE()
++ * here, and another one in get_rps_cpu().
++ */
++ if (READ_ONCE(table->ents[index]) != val)
++ WRITE_ONCE(table->ents[index], val);
+ }
+ }
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index a4d68da682322..04c43c7ecaca3 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3696,8 +3696,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ u32 next_cpu;
+ u32 ident;
+
+- /* First check into global flow table if there is a match */
+- ident = sock_flow_table->ents[hash & sock_flow_table->mask];
++ /* First check into global flow table if there is a match.
++ * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
++ */
++ ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
+ if ((ident ^ hash) & ~rps_cpu_mask)
+ goto try_rps;
+
+--
+2.39.2
+
--- /dev/null
+From b055cb6a8d660460a57d67e296c7b16392a0abf4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 07:41:14 +0000
+Subject: rfs: annotate lockless accesses to sk->sk_rxhash
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 1e5c647c3f6d4f8497dedcd226204e1880e0ffb3 ]
+
+Add READ_ONCE()/WRITE_ONCE() on accesses to sk->sk_rxhash.
+
+This also prevents a (smart ?) compiler to remove the condition in:
+
+if (sk->sk_rxhash != newval)
+ sk->sk_rxhash = newval;
+
+We need the condition to avoid dirtying a shared cache line.
+
+Fixes: fec5e652e58f ("rfs: Receive Flow Steering")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sock.h | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index ee1a2217a98c0..eccec5df94b9c 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -957,8 +957,12 @@ static inline void sock_rps_record_flow(const struct sock *sk)
+ * OR an additional socket flag
+ * [1] : sk_state and sk_prot are in the same cache line.
+ */
+- if (sk->sk_state == TCP_ESTABLISHED)
+- sock_rps_record_flow_hash(sk->sk_rxhash);
++ if (sk->sk_state == TCP_ESTABLISHED) {
++ /* This READ_ONCE() is paired with the WRITE_ONCE()
++ * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
++ */
++ sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
++ }
+ }
+ #endif
+ }
+@@ -967,15 +971,19 @@ static inline void sock_rps_save_rxhash(struct sock *sk,
+ const struct sk_buff *skb)
+ {
+ #ifdef CONFIG_RPS
+- if (unlikely(sk->sk_rxhash != skb->hash))
+- sk->sk_rxhash = skb->hash;
++ /* The following WRITE_ONCE() is paired with the READ_ONCE()
++ * here, and another one in sock_rps_record_flow().
++ */
++ if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
++ WRITE_ONCE(sk->sk_rxhash, skb->hash);
+ #endif
+ }
+
+ static inline void sock_rps_reset_rxhash(struct sock *sk)
+ {
+ #ifdef CONFIG_RPS
+- sk->sk_rxhash = 0;
++ /* Paired with READ_ONCE() in sock_rps_record_flow() */
++ WRITE_ONCE(sk->sk_rxhash, 0);
+ #endif
+ }
+
+--
+2.39.2
+
i40iw-fix-build-warning-in-i40iw_manage_apbvt.patch
i40e-fix-build-warnings-in-i40e_alloc.h.patch
+spi-qup-request-dma-before-enabling-clocks.patch
+bluetooth-fix-l2cap_disconnect_req-deadlock.patch
+bluetooth-l2cap-add-missing-checks-for-invalid-dcid.patch
+rfs-annotate-lockless-accesses-to-sk-sk_rxhash.patch
+rfs-annotate-lockless-accesses-to-rfs-sock-flow-tabl.patch
+net-sched-move-rtm_tca_policy-declaration-to-include.patch
+lib-cpu_rmap-fix-potential-use-after-free-in-irq_cpu.patch
+bnxt_en-query-default-vlan-before-vnic-setup-on-a-vf.patch
--- /dev/null
+From 498ee6554384c41fa4518e94e6f35cbe56a52ecb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 May 2023 15:04:25 +0200
+Subject: spi: qup: Request DMA before enabling clocks
+
+From: Stephan Gerhold <stephan@gerhold.net>
+
+[ Upstream commit 0c331fd1dccfba657129380ee084b95c1cedfbef ]
+
+It is usually better to request all necessary resources (clocks,
+regulators, ...) before starting to make use of them. That way they do
+not change state in case one of the resources is not available yet and
+probe deferral (-EPROBE_DEFER) is necessary. This is particularly
+important for DMA channels and IOMMUs which are not enforced by
+fw_devlink yet (unless you use fw_devlink.strict=1).
+
+spi-qup does this in the wrong order, the clocks are enabled and
+disabled again when the DMA channels are not available yet.
+
+This causes issues in some cases: On most SoCs one of the SPI QUP
+clocks is shared with the UART controller. When using earlycon UART is
+actively used during boot but might not have probed yet, usually for
+the same reason (waiting for the DMA controller). In this case, the
+brief enable/disable cycle ends up gating the clock and further UART
+console output will halt the system completely.
+
+Avoid this by requesting the DMA channels before changing the clock
+state.
+
+Fixes: 612762e82ae6 ("spi: qup: Add DMA capabilities")
+Signed-off-by: Stephan Gerhold <stephan@gerhold.net>
+Link: https://lore.kernel.org/r/20230518-spi-qup-clk-defer-v1-1-f49fc9ca4e02@gerhold.net
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-qup.c | 37 ++++++++++++++++++-------------------
+ 1 file changed, 18 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
+index 1ca678bcb5279..933f9342529a4 100644
+--- a/drivers/spi/spi-qup.c
++++ b/drivers/spi/spi-qup.c
+@@ -1003,23 +1003,8 @@ static int spi_qup_probe(struct platform_device *pdev)
+ return -ENXIO;
+ }
+
+- ret = clk_prepare_enable(cclk);
+- if (ret) {
+- dev_err(dev, "cannot enable core clock\n");
+- return ret;
+- }
+-
+- ret = clk_prepare_enable(iclk);
+- if (ret) {
+- clk_disable_unprepare(cclk);
+- dev_err(dev, "cannot enable iface clock\n");
+- return ret;
+- }
+-
+ master = spi_alloc_master(dev, sizeof(struct spi_qup));
+ if (!master) {
+- clk_disable_unprepare(cclk);
+- clk_disable_unprepare(iclk);
+ dev_err(dev, "cannot allocate master\n");
+ return -ENOMEM;
+ }
+@@ -1065,6 +1050,19 @@ static int spi_qup_probe(struct platform_device *pdev)
+ spin_lock_init(&controller->lock);
+ init_completion(&controller->done);
+
++ ret = clk_prepare_enable(cclk);
++ if (ret) {
++ dev_err(dev, "cannot enable core clock\n");
++ goto error_dma;
++ }
++
++ ret = clk_prepare_enable(iclk);
++ if (ret) {
++ clk_disable_unprepare(cclk);
++ dev_err(dev, "cannot enable iface clock\n");
++ goto error_dma;
++ }
++
+ iomode = readl_relaxed(base + QUP_IO_M_MODES);
+
+ size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
+@@ -1094,7 +1092,7 @@ static int spi_qup_probe(struct platform_device *pdev)
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret) {
+ dev_err(dev, "cannot set RESET state\n");
+- goto error_dma;
++ goto error_clk;
+ }
+
+ writel_relaxed(0, base + QUP_OPERATIONAL);
+@@ -1118,7 +1116,7 @@ static int spi_qup_probe(struct platform_device *pdev)
+ ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
+ IRQF_TRIGGER_HIGH, pdev->name, controller);
+ if (ret)
+- goto error_dma;
++ goto error_clk;
+
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+@@ -1133,11 +1131,12 @@ static int spi_qup_probe(struct platform_device *pdev)
+
+ disable_pm:
+ pm_runtime_disable(&pdev->dev);
++error_clk:
++ clk_disable_unprepare(cclk);
++ clk_disable_unprepare(iclk);
+ error_dma:
+ spi_qup_release_dma(master);
+ error:
+- clk_disable_unprepare(cclk);
+- clk_disable_unprepare(iclk);
+ spi_master_put(master);
+ return ret;
+ }
+--
+2.39.2
+