--- /dev/null
+From 7c1e19351f3efdef300eac110d695b85e07ed142 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Sep 2023 14:02:11 +0800
+Subject: ASoC: imx-audmix: Fix return error with devm_clk_get()
+
+From: Shengjiu Wang <shengjiu.wang@nxp.com>
+
+[ Upstream commit b19a5733de255cabba5feecabf6e900638b582d1 ]
+
+The devm_clk_get() can return -EPROBE_DEFER error,
+modify the error code to be -EINVAL is not correct, which
+cause the -EPROBE_DEFER error is not correctly handled.
+
+This patch is to fix the return error code.
+
+Fixes: b86ef5367761 ("ASoC: fsl: Add Audio Mixer machine driver")
+Signed-off-by: Shengjiu Wang <shengjiu.wang@nxp.com>
+Reviewed-by: Daniel Baluta <daniel.baluta@nxp.com>
+Link: https://lore.kernel.org/r/1694757731-18308-1-git-send-email-shengjiu.wang@nxp.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/fsl/imx-audmix.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
+index 77d8234c7ac49..bb2aab1d2389e 100644
+--- a/sound/soc/fsl/imx-audmix.c
++++ b/sound/soc/fsl/imx-audmix.c
+@@ -322,7 +322,7 @@ static int imx_audmix_probe(struct platform_device *pdev)
+ if (IS_ERR(priv->cpu_mclk)) {
+ ret = PTR_ERR(priv->cpu_mclk);
+ dev_err(&cpu_pdev->dev, "failed to get DAI mclk1: %d\n", ret);
+- return -EINVAL;
++ return ret;
+ }
+
+ priv->audmix_pdev = audmix_pdev;
+--
+2.40.1
+
--- /dev/null
+From 4495b55f1694674d04c01776b7c892a41211de6d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Sep 2023 11:05:04 +0200
+Subject: ASoC: meson: spdifin: start hw on dai probe
+
+From: Jerome Brunet <jbrunet@baylibre.com>
+
+[ Upstream commit aedf323b66b2b875137422ecb7d2525179759076 ]
+
+For spdif input to report the locked rate correctly, even when no capture
+is running, the HW and reference clock must be started as soon as
+the dai is probed.
+
+Fixes: 5ce5658375e6 ("ASoC: meson: add axg spdif input")
+Signed-off-by: Jerome Brunet <jbrunet@baylibre.com>
+Link: https://lore.kernel.org/r/20230907090504.12700-1-jbrunet@baylibre.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/meson/axg-spdifin.c | 49 ++++++++++++-----------------------
+ 1 file changed, 17 insertions(+), 32 deletions(-)
+
+diff --git a/sound/soc/meson/axg-spdifin.c b/sound/soc/meson/axg-spdifin.c
+index d0d09f945b489..7aaded1fc376b 100644
+--- a/sound/soc/meson/axg-spdifin.c
++++ b/sound/soc/meson/axg-spdifin.c
+@@ -112,34 +112,6 @@ static int axg_spdifin_prepare(struct snd_pcm_substream *substream,
+ return 0;
+ }
+
+-static int axg_spdifin_startup(struct snd_pcm_substream *substream,
+- struct snd_soc_dai *dai)
+-{
+- struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+- int ret;
+-
+- ret = clk_prepare_enable(priv->refclk);
+- if (ret) {
+- dev_err(dai->dev,
+- "failed to enable spdifin reference clock\n");
+- return ret;
+- }
+-
+- regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN,
+- SPDIFIN_CTRL0_EN);
+-
+- return 0;
+-}
+-
+-static void axg_spdifin_shutdown(struct snd_pcm_substream *substream,
+- struct snd_soc_dai *dai)
+-{
+- struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+-
+- regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0);
+- clk_disable_unprepare(priv->refclk);
+-}
+-
+ static void axg_spdifin_write_mode_param(struct regmap *map, int mode,
+ unsigned int val,
+ unsigned int num_per_reg,
+@@ -251,25 +223,38 @@ static int axg_spdifin_dai_probe(struct snd_soc_dai *dai)
+ ret = axg_spdifin_sample_mode_config(dai, priv);
+ if (ret) {
+ dev_err(dai->dev, "mode configuration failed\n");
+- clk_disable_unprepare(priv->pclk);
+- return ret;
++ goto pclk_err;
+ }
+
++ ret = clk_prepare_enable(priv->refclk);
++ if (ret) {
++ dev_err(dai->dev,
++ "failed to enable spdifin reference clock\n");
++ goto pclk_err;
++ }
++
++ regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN,
++ SPDIFIN_CTRL0_EN);
++
+ return 0;
++
++pclk_err:
++ clk_disable_unprepare(priv->pclk);
++ return ret;
+ }
+
+ static int axg_spdifin_dai_remove(struct snd_soc_dai *dai)
+ {
+ struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+
++ regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0);
++ clk_disable_unprepare(priv->refclk);
+ clk_disable_unprepare(priv->pclk);
+ return 0;
+ }
+
+ static const struct snd_soc_dai_ops axg_spdifin_ops = {
+ .prepare = axg_spdifin_prepare,
+- .startup = axg_spdifin_startup,
+- .shutdown = axg_spdifin_shutdown,
+ };
+
+ static int axg_spdifin_iec958_info(struct snd_kcontrol *kcontrol,
+--
+2.40.1
+
--- /dev/null
+From 588d76922868d783c2495c07238125cdaec288ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Sep 2023 17:36:10 +0200
+Subject: bnxt_en: Flush XDP for bnxt_poll_nitroa0()'s NAPI
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+[ Upstream commit edc0140cc3b7b91874ebe70eb7d2a851e8817ccc ]
+
+bnxt_poll_nitroa0() invokes bnxt_rx_pkt() which can run a XDP program
+which in turn can return XDP_REDIRECT. bnxt_rx_pkt() is also used by
+__bnxt_poll_work() which flushes (xdp_do_flush()) the packets after each
+round. bnxt_poll_nitroa0() lacks this feature.
+xdp_do_flush() should be invoked before leaving the NAPI callback.
+
+Invoke xdp_do_flush() after a redirect in bnxt_poll_nitroa0() NAPI.
+
+Cc: Michael Chan <michael.chan@broadcom.com>
+Fixes: f18c2b77b2e4e ("bnxt_en: optimized XDP_REDIRECT support")
+Reviewed-by: Andy Gospodarek <gospo@broadcom.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Michael Chan <michael.chan@broadcom.com>
+Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index d8366351cf14a..c67a108c2c07f 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2404,6 +2404,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
+ struct rx_cmp_ext *rxcmp1;
+ u32 cp_cons, tmp_raw_cons;
+ u32 raw_cons = cpr->cp_raw_cons;
++ bool flush_xdp = false;
+ u32 rx_pkts = 0;
+ u8 event = 0;
+
+@@ -2438,6 +2439,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
+ rx_pkts++;
+ else if (rc == -EBUSY) /* partial completion */
+ break;
++ if (event & BNXT_REDIRECT_EVENT)
++ flush_xdp = true;
+ } else if (unlikely(TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_DONE)) {
+ bnxt_hwrm_handler(bp, txcmp);
+@@ -2457,6 +2460,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
+
+ if (event & BNXT_AGG_EVENT)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
++ if (flush_xdp)
++ xdp_do_flush();
+
+ if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
+ napi_complete_done(napi, rx_pkts);
+--
+2.40.1
+
--- /dev/null
+From 5e20896b0d6758a2197c3f166e0f578f1310144e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Sep 2023 15:28:14 +0200
+Subject: bpf: Avoid deadlock when using queue and stack maps from NMI
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@redhat.com>
+
+[ Upstream commit a34a9f1a19afe9c60ca0ea61dfeee63a1c2baac8 ]
+
+Sysbot discovered that the queue and stack maps can deadlock if they are
+being used from a BPF program that can be called from NMI context (such as
+one that is attached to a perf HW counter event). To fix this, add an
+in_nmi() check and use raw_spin_trylock() in NMI context, erroring out if
+grabbing the lock fails.
+
+Fixes: f1a2e44a3aec ("bpf: add queue and stack maps")
+Reported-by: Hsin-Wei Hung <hsinweih@uci.edu>
+Tested-by: Hsin-Wei Hung <hsinweih@uci.edu>
+Co-developed-by: Hsin-Wei Hung <hsinweih@uci.edu>
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Link: https://lore.kernel.org/r/20230911132815.717240-1-toke@redhat.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/queue_stack_maps.c | 21 ++++++++++++++++++---
+ 1 file changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
+index 0ee2347ba510d..a047a2053d41a 100644
+--- a/kernel/bpf/queue_stack_maps.c
++++ b/kernel/bpf/queue_stack_maps.c
+@@ -111,7 +111,12 @@ static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
+ int err = 0;
+ void *ptr;
+
+- raw_spin_lock_irqsave(&qs->lock, flags);
++ if (in_nmi()) {
++ if (!raw_spin_trylock_irqsave(&qs->lock, flags))
++ return -EBUSY;
++ } else {
++ raw_spin_lock_irqsave(&qs->lock, flags);
++ }
+
+ if (queue_stack_map_is_empty(qs)) {
+ memset(value, 0, qs->map.value_size);
+@@ -141,7 +146,12 @@ static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
+ void *ptr;
+ u32 index;
+
+- raw_spin_lock_irqsave(&qs->lock, flags);
++ if (in_nmi()) {
++ if (!raw_spin_trylock_irqsave(&qs->lock, flags))
++ return -EBUSY;
++ } else {
++ raw_spin_lock_irqsave(&qs->lock, flags);
++ }
+
+ if (queue_stack_map_is_empty(qs)) {
+ memset(value, 0, qs->map.value_size);
+@@ -206,7 +216,12 @@ static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
+ if (flags & BPF_NOEXIST || flags > BPF_EXIST)
+ return -EINVAL;
+
+- raw_spin_lock_irqsave(&qs->lock, irq_flags);
++ if (in_nmi()) {
++ if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
++ return -EBUSY;
++ } else {
++ raw_spin_lock_irqsave(&qs->lock, irq_flags);
++ }
+
+ if (queue_stack_map_is_full(qs)) {
+ if (!replace) {
+--
+2.40.1
+
--- /dev/null
+From 3bf95f5ec0d49f60e4d7e5a1abb36adf0a39fd85 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Sep 2023 19:00:35 +0000
+Subject: dccp: fix dccp_v4_err()/dccp_v6_err() again
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 6af289746a636f71f4c0535a9801774118486c7a ]
+
+dh->dccph_x is the 9th byte (offset 8) in "struct dccp_hdr",
+not in the "byte 7" as Jann claimed.
+
+We need to make sure the ICMP messages are big enough,
+using more standard ways (no more assumptions).
+
+syzbot reported:
+BUG: KMSAN: uninit-value in pskb_may_pull_reason include/linux/skbuff.h:2667 [inline]
+BUG: KMSAN: uninit-value in pskb_may_pull include/linux/skbuff.h:2681 [inline]
+BUG: KMSAN: uninit-value in dccp_v6_err+0x426/0x1aa0 net/dccp/ipv6.c:94
+pskb_may_pull_reason include/linux/skbuff.h:2667 [inline]
+pskb_may_pull include/linux/skbuff.h:2681 [inline]
+dccp_v6_err+0x426/0x1aa0 net/dccp/ipv6.c:94
+icmpv6_notify+0x4c7/0x880 net/ipv6/icmp.c:867
+icmpv6_rcv+0x19d5/0x30d0
+ip6_protocol_deliver_rcu+0xda6/0x2a60 net/ipv6/ip6_input.c:438
+ip6_input_finish net/ipv6/ip6_input.c:483 [inline]
+NF_HOOK include/linux/netfilter.h:304 [inline]
+ip6_input+0x15d/0x430 net/ipv6/ip6_input.c:492
+ip6_mc_input+0xa7e/0xc80 net/ipv6/ip6_input.c:586
+dst_input include/net/dst.h:468 [inline]
+ip6_rcv_finish+0x5db/0x870 net/ipv6/ip6_input.c:79
+NF_HOOK include/linux/netfilter.h:304 [inline]
+ipv6_rcv+0xda/0x390 net/ipv6/ip6_input.c:310
+__netif_receive_skb_one_core net/core/dev.c:5523 [inline]
+__netif_receive_skb+0x1a6/0x5a0 net/core/dev.c:5637
+netif_receive_skb_internal net/core/dev.c:5723 [inline]
+netif_receive_skb+0x58/0x660 net/core/dev.c:5782
+tun_rx_batched+0x83b/0x920
+tun_get_user+0x564c/0x6940 drivers/net/tun.c:2002
+tun_chr_write_iter+0x3af/0x5d0 drivers/net/tun.c:2048
+call_write_iter include/linux/fs.h:1985 [inline]
+new_sync_write fs/read_write.c:491 [inline]
+vfs_write+0x8ef/0x15c0 fs/read_write.c:584
+ksys_write+0x20f/0x4c0 fs/read_write.c:637
+__do_sys_write fs/read_write.c:649 [inline]
+__se_sys_write fs/read_write.c:646 [inline]
+__x64_sys_write+0x93/0xd0 fs/read_write.c:646
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Uninit was created at:
+slab_post_alloc_hook+0x12f/0xb70 mm/slab.h:767
+slab_alloc_node mm/slub.c:3478 [inline]
+kmem_cache_alloc_node+0x577/0xa80 mm/slub.c:3523
+kmalloc_reserve+0x13d/0x4a0 net/core/skbuff.c:559
+__alloc_skb+0x318/0x740 net/core/skbuff.c:650
+alloc_skb include/linux/skbuff.h:1286 [inline]
+alloc_skb_with_frags+0xc8/0xbd0 net/core/skbuff.c:6313
+sock_alloc_send_pskb+0xa80/0xbf0 net/core/sock.c:2795
+tun_alloc_skb drivers/net/tun.c:1531 [inline]
+tun_get_user+0x23cf/0x6940 drivers/net/tun.c:1846
+tun_chr_write_iter+0x3af/0x5d0 drivers/net/tun.c:2048
+call_write_iter include/linux/fs.h:1985 [inline]
+new_sync_write fs/read_write.c:491 [inline]
+vfs_write+0x8ef/0x15c0 fs/read_write.c:584
+ksys_write+0x20f/0x4c0 fs/read_write.c:637
+__do_sys_write fs/read_write.c:649 [inline]
+__se_sys_write fs/read_write.c:646 [inline]
+__x64_sys_write+0x93/0xd0 fs/read_write.c:646
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+CPU: 0 PID: 4995 Comm: syz-executor153 Not tainted 6.6.0-rc1-syzkaller-00014-ga747acc0b752 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 08/04/2023
+
+Fixes: 977ad86c2a1b ("dccp: Fix out of bounds access in DCCP error handler")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Jann Horn <jannh@google.com>
+Reviewed-by: Jann Horn <jannh@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/dccp/ipv4.c | 9 ++-------
+ net/dccp/ipv6.c | 9 ++-------
+ 2 files changed, 4 insertions(+), 14 deletions(-)
+
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 398dc3e47d0c8..f2a0a4e6dd748 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -243,13 +243,8 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info)
+ int err;
+ struct net *net = dev_net(skb->dev);
+
+- /* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
+- * which is in byte 7 of the dccp header.
+- * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
+- *
+- * Later on, we want to access the sequence number fields, which are
+- * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
+- */
++ if (!pskb_may_pull(skb, offset + sizeof(*dh)))
++ return -EINVAL;
+ dh = (struct dccp_hdr *)(skb->data + offset);
+ if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
+ return -EINVAL;
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index bfe11e96af7c9..6d6bbd43a1419 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -76,13 +76,8 @@ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ __u64 seq;
+ struct net *net = dev_net(skb->dev);
+
+- /* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
+- * which is in byte 7 of the dccp header.
+- * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
+- *
+- * Later on, we want to access the sequence number fields, which are
+- * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
+- */
++ if (!pskb_may_pull(skb, offset + sizeof(*dh)))
++ return -EINVAL;
+ dh = (struct dccp_hdr *)(skb->data + offset);
+ if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
+ return -EINVAL;
+--
+2.40.1
+
--- /dev/null
+From a431f0780ea80b393f821bbada81ebfe70527a86 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Sep 2023 17:44:57 +0200
+Subject: i40e: Fix VF VLAN offloading when port VLAN is configured
+
+From: Ivan Vecera <ivecera@redhat.com>
+
+[ Upstream commit d0d362ffa33da4acdcf7aee2116ceef8c8fef658 ]
+
+If port VLAN is configured on a VF then any other VLANs on top of this VF
+are broken.
+
+During i40e_ndo_set_vf_port_vlan() call the i40e driver reset the VF and
+iavf driver asks PF (using VIRTCHNL_OP_GET_VF_RESOURCES) for VF capabilities
+but this reset occurs too early, prior setting of vf->info.pvid field
+and because this field can be zero during i40e_vc_get_vf_resources_msg()
+then VIRTCHNL_VF_OFFLOAD_VLAN capability is reported to iavf driver.
+
+This is wrong because iavf driver should not report VLAN offloading
+capability when port VLAN is configured as i40e does not support QinQ
+offloading.
+
+Fix the issue by moving VF reset after setting of vf->port_vlan_id
+field.
+
+Without this patch:
+$ echo 1 > /sys/class/net/enp2s0f0/device/sriov_numvfs
+$ ip link set enp2s0f0 vf 0 vlan 3
+$ ip link set enp2s0f0v0 up
+$ ip link add link enp2s0f0v0 name vlan4 type vlan id 4
+$ ip link set vlan4 up
+...
+$ ethtool -k enp2s0f0v0 | grep vlan-offload
+rx-vlan-offload: on
+tx-vlan-offload: on
+$ dmesg -l err | grep iavf
+[1292500.742914] iavf 0000:02:02.0: Failed to add VLAN filter, error IAVF_ERR_INVALID_QP_ID
+
+With this patch:
+$ echo 1 > /sys/class/net/enp2s0f0/device/sriov_numvfs
+$ ip link set enp2s0f0 vf 0 vlan 3
+$ ip link set enp2s0f0v0 up
+$ ip link add link enp2s0f0v0 name vlan4 type vlan id 4
+$ ip link set vlan4 up
+...
+$ ethtool -k enp2s0f0v0 | grep vlan-offload
+rx-vlan-offload: off [requested on]
+tx-vlan-offload: off [requested on]
+$ dmesg -l err | grep iavf
+
+Fixes: f9b4b6278d51 ("i40e: Reset the VF upon conflicting VLAN configuration")
+Signed-off-by: Ivan Vecera <ivecera@redhat.com>
+Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index bb2a79b70c3ae..dfaa34f2473ab 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -4332,9 +4332,6 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ /* duplicate request, so just return success */
+ goto error_pvid;
+
+- i40e_vc_reset_vf(vf, true);
+- /* During reset the VF got a new VSI, so refresh a pointer. */
+- vsi = pf->vsi[vf->lan_vsi_idx];
+ /* Locked once because multiple functions below iterate list */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+@@ -4420,6 +4417,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ */
+ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+
++ i40e_vc_reset_vf(vf, true);
++ /* During reset the VF got a new VSI, so refresh a pointer. */
++ vsi = pf->vsi[vf->lan_vsi_idx];
++
+ ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
+--
+2.40.1
+
--- /dev/null
+From 878721b8d51c762db20e61f70c33d7e4a8b605b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Sep 2023 22:12:57 -0700
+Subject: ipv4: fix null-deref in ipv4_link_failure
+
+From: Kyle Zeng <zengyhkyle@gmail.com>
+
+[ Upstream commit 0113d9c9d1ccc07f5a3710dac4aa24b6d711278c ]
+
+Currently, we assume the skb is associated with a device before calling
+__ip_options_compile, which is not always the case if it is re-routed by
+ipvs.
+When skb->dev is NULL, dev_net(skb->dev) will become null-dereference.
+This patch adds a check for the edge case and switch to use the net_device
+from the rtable when skb->dev is NULL.
+
+Fixes: ed0de45a1008 ("ipv4: recompile ip options in ipv4_link_failure")
+Suggested-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: Kyle Zeng <zengyhkyle@gmail.com>
+Cc: Stephen Suryaputra <ssuryaextr@gmail.com>
+Cc: Vadim Fedorenko <vfedorenko@novek.ru>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/route.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 3ddeb4fc0d08a..445b1a2966d79 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1240,6 +1240,7 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+
+ static void ipv4_send_dest_unreach(struct sk_buff *skb)
+ {
++ struct net_device *dev;
+ struct ip_options opt;
+ int res;
+
+@@ -1257,7 +1258,8 @@ static void ipv4_send_dest_unreach(struct sk_buff *skb)
+ opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+
+ rcu_read_lock();
+- res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
++ dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
++ res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
+ rcu_read_unlock();
+
+ if (res)
+--
+2.40.1
+
--- /dev/null
+From 3b30abd64cb7ae3246c342dd8216db82b395a864 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Sep 2023 12:46:27 +0200
+Subject: locking/seqlock: Do the lockdep annotation before locking in
+ do_write_seqcount_begin_nested()
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+[ Upstream commit 41b43b6c6e30a832c790b010a06772e793bca193 ]
+
+It was brought up by Tetsuo that the following sequence:
+
+ write_seqlock_irqsave()
+ printk_deferred_enter()
+
+could lead to a deadlock if the lockdep annotation within
+write_seqlock_irqsave() triggers.
+
+The problem is that the sequence counter is incremented before the lockdep
+annotation is performed. The lockdep splat would then attempt to invoke
+printk() but the reader side, of the same seqcount, could have a
+tty_port::lock acquired waiting for the sequence number to become even again.
+
+The other lockdep annotations come before the actual locking because "we
+want to see the locking error before it happens". There is no reason why
+seqcount should be different here.
+
+Do the lockdep annotation first then perform the locking operation (the
+sequence increment).
+
+Fixes: 1ca7d67cf5d5a ("seqcount: Add lockdep functionality to seqcount/seqlock structures")
+Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lore.kernel.org/r/20230920104627._DTHgPyA@linutronix.de
+
+Closes: https://lore.kernel.org/20230621130641.-5iueY1I@linutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/seqlock.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index 008fa88ad58e7..0928a60b8f825 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -511,8 +511,8 @@ do { \
+
+ static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
+ {
+- do_raw_write_seqcount_begin(s);
+ seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
++ do_raw_write_seqcount_begin(s);
+ }
+
+ /**
+--
+2.40.1
+
--- /dev/null
+From 4c840c371691b2c81ec5d897dd1d299d66a65e9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Sep 2023 09:13:51 +0000
+Subject: net: bridge: use DEV_STATS_INC()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 44bdb313da57322c9b3c108eb66981c6ec6509f4 ]
+
+syzbot/KCSAN reported data-races in br_handle_frame_finish() [1]
+This function can run from multiple cpus without mutual exclusion.
+
+Adopt SMP safe DEV_STATS_INC() to update dev->stats fields.
+
+Handles updates to dev->stats.tx_dropped while we are at it.
+
+[1]
+BUG: KCSAN: data-race in br_handle_frame_finish / br_handle_frame_finish
+
+read-write to 0xffff8881374b2178 of 8 bytes by interrupt on cpu 1:
+br_handle_frame_finish+0xd4f/0xef0 net/bridge/br_input.c:189
+br_nf_hook_thresh+0x1ed/0x220
+br_nf_pre_routing_finish_ipv6+0x50f/0x540
+NF_HOOK include/linux/netfilter.h:304 [inline]
+br_nf_pre_routing_ipv6+0x1e3/0x2a0 net/bridge/br_netfilter_ipv6.c:178
+br_nf_pre_routing+0x526/0xba0 net/bridge/br_netfilter_hooks.c:508
+nf_hook_entry_hookfn include/linux/netfilter.h:144 [inline]
+nf_hook_bridge_pre net/bridge/br_input.c:272 [inline]
+br_handle_frame+0x4c9/0x940 net/bridge/br_input.c:417
+__netif_receive_skb_core+0xa8a/0x21e0 net/core/dev.c:5417
+__netif_receive_skb_one_core net/core/dev.c:5521 [inline]
+__netif_receive_skb+0x57/0x1b0 net/core/dev.c:5637
+process_backlog+0x21f/0x380 net/core/dev.c:5965
+__napi_poll+0x60/0x3b0 net/core/dev.c:6527
+napi_poll net/core/dev.c:6594 [inline]
+net_rx_action+0x32b/0x750 net/core/dev.c:6727
+__do_softirq+0xc1/0x265 kernel/softirq.c:553
+run_ksoftirqd+0x17/0x20 kernel/softirq.c:921
+smpboot_thread_fn+0x30a/0x4a0 kernel/smpboot.c:164
+kthread+0x1d7/0x210 kernel/kthread.c:388
+ret_from_fork+0x48/0x60 arch/x86/kernel/process.c:147
+ret_from_fork_asm+0x11/0x20 arch/x86/entry/entry_64.S:304
+
+read-write to 0xffff8881374b2178 of 8 bytes by interrupt on cpu 0:
+br_handle_frame_finish+0xd4f/0xef0 net/bridge/br_input.c:189
+br_nf_hook_thresh+0x1ed/0x220
+br_nf_pre_routing_finish_ipv6+0x50f/0x540
+NF_HOOK include/linux/netfilter.h:304 [inline]
+br_nf_pre_routing_ipv6+0x1e3/0x2a0 net/bridge/br_netfilter_ipv6.c:178
+br_nf_pre_routing+0x526/0xba0 net/bridge/br_netfilter_hooks.c:508
+nf_hook_entry_hookfn include/linux/netfilter.h:144 [inline]
+nf_hook_bridge_pre net/bridge/br_input.c:272 [inline]
+br_handle_frame+0x4c9/0x940 net/bridge/br_input.c:417
+__netif_receive_skb_core+0xa8a/0x21e0 net/core/dev.c:5417
+__netif_receive_skb_one_core net/core/dev.c:5521 [inline]
+__netif_receive_skb+0x57/0x1b0 net/core/dev.c:5637
+process_backlog+0x21f/0x380 net/core/dev.c:5965
+__napi_poll+0x60/0x3b0 net/core/dev.c:6527
+napi_poll net/core/dev.c:6594 [inline]
+net_rx_action+0x32b/0x750 net/core/dev.c:6727
+__do_softirq+0xc1/0x265 kernel/softirq.c:553
+do_softirq+0x5e/0x90 kernel/softirq.c:454
+__local_bh_enable_ip+0x64/0x70 kernel/softirq.c:381
+__raw_spin_unlock_bh include/linux/spinlock_api_smp.h:167 [inline]
+_raw_spin_unlock_bh+0x36/0x40 kernel/locking/spinlock.c:210
+spin_unlock_bh include/linux/spinlock.h:396 [inline]
+batadv_tt_local_purge+0x1a8/0x1f0 net/batman-adv/translation-table.c:1356
+batadv_tt_purge+0x2b/0x630 net/batman-adv/translation-table.c:3560
+process_one_work kernel/workqueue.c:2630 [inline]
+process_scheduled_works+0x5b8/0xa30 kernel/workqueue.c:2703
+worker_thread+0x525/0x730 kernel/workqueue.c:2784
+kthread+0x1d7/0x210 kernel/kthread.c:388
+ret_from_fork+0x48/0x60 arch/x86/kernel/process.c:147
+ret_from_fork_asm+0x11/0x20 arch/x86/entry/entry_64.S:304
+
+value changed: 0x00000000000d7190 -> 0x00000000000d7191
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 14848 Comm: kworker/u4:11 Not tainted 6.6.0-rc1-syzkaller-00236-gad8a69f361b9 #0
+
+Fixes: 1c29fc4989bc ("[BRIDGE]: keep track of received multicast packets")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Roopa Prabhu <roopa@nvidia.com>
+Cc: Nikolay Aleksandrov <razor@blackwall.org>
+Cc: bridge@lists.linux-foundation.org
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://lore.kernel.org/r/20230918091351.1356153-1-edumazet@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_forward.c | 4 ++--
+ net/bridge/br_input.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index 4610f3a13966f..f2ef75c7ccc68 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -118,7 +118,7 @@ static int deliver_clone(const struct net_bridge_port *prev,
+
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (!skb) {
+- dev->stats.tx_dropped++;
++ DEV_STATS_INC(dev, tx_dropped);
+ return -ENOMEM;
+ }
+
+@@ -255,7 +255,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
+
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb) {
+- dev->stats.tx_dropped++;
++ DEV_STATS_INC(dev, tx_dropped);
+ return;
+ }
+
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index bf5bf148091f9..52dd0708fd143 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -145,12 +145,12 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ if ((mdst && mdst->host_joined) ||
+ br_multicast_is_router(br)) {
+ local_rcv = true;
+- br->dev->stats.multicast++;
++ DEV_STATS_INC(br->dev, multicast);
+ }
+ mcast_hit = true;
+ } else {
+ local_rcv = true;
+- br->dev->stats.multicast++;
++ DEV_STATS_INC(br->dev, multicast);
+ }
+ break;
+ case BR_PKT_UNICAST:
+--
+2.40.1
+
--- /dev/null
+From ee099048cb6f0622c2bb4a569462d600e5964255 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Sep 2023 15:48:40 +0800
+Subject: net: hns3: add 5ms delay before clear firmware reset irq source
+
+From: Jie Wang <wangjie125@huawei.com>
+
+[ Upstream commit 0770063096d5da4a8e467b6e73c1646a75589628 ]
+
+Currently the reset process in hns3 and firmware watchdog init process is
+asynchronous. we think firmware watchdog initialization is completed
+before hns3 clear the firmware interrupt source. However, firmware
+initialization may not complete early.
+
+so we add delay before hns3 clear firmware interrupt source and 5 ms delay
+is enough to avoid second firmware reset interrupt.
+
+Fixes: c1a81619d73a ("net: hns3: Add mailbox interrupt handling to PF driver")
+Signed-off-by: Jie Wang <wangjie125@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 49eeeb0c9a1f8..deba485ced1bd 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -3125,8 +3125,13 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+ u32 regclr)
+ {
++#define HCLGE_IMP_RESET_DELAY 5
++
+ switch (event_type) {
+ case HCLGE_VECTOR0_EVENT_RST:
++ if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
++ mdelay(HCLGE_IMP_RESET_DELAY);
++
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+--
+2.40.1
+
--- /dev/null
+From db7890e460d50ff1ff625b5b3a7906b8aa4f21e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Sep 2023 15:48:38 +0800
+Subject: net: hns3: only enable unicast promisc when mac table full
+
+From: Jian Shen <shenjian15@huawei.com>
+
+[ Upstream commit f2ed304922a55690529bcca59678dd92d7466ce8 ]
+
+Currently, the driver will enable unicast promisc for the function
+once configure mac address fail. It's unreasonable when the failure
+is caused by using same mac address with other functions. So only
+enable unicast promisc when mac table full.
+
+Fixes: c631c696823c ("net: hns3: refactor the promisc mode setting")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 47f8f66cf7ecd..49eeeb0c9a1f8 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -7850,7 +7850,7 @@ static void hclge_update_overflow_flags(struct hclge_vport *vport,
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ if (is_all_added)
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
+- else
++ else if (hclge_is_umv_space_full(vport, true))
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
+ } else {
+ if (is_all_added)
+--
+2.40.1
+
--- /dev/null
+From 5f7252886eb6776cce95452873a4537926944b23 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Sep 2023 16:56:23 +0300
+Subject: net: rds: Fix possible NULL-pointer dereference
+
+From: Artem Chernyshev <artem.chernyshev@red-soft.ru>
+
+[ Upstream commit f1d95df0f31048f1c59092648997686e3f7d9478 ]
+
+In rds_rdma_cm_event_handler_cmn() check, if conn pointer exists
+before dereferencing it as rdma_set_service_type() argument
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: fd261ce6a30e ("rds: rdma: update rdma transport for tos")
+Signed-off-by: Artem Chernyshev <artem.chernyshev@red-soft.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rds/rdma_transport.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
+index 5f741e51b4baa..bb38124a5d3db 100644
+--- a/net/rds/rdma_transport.c
++++ b/net/rds/rdma_transport.c
+@@ -86,10 +86,12 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
+ break;
+
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+- rdma_set_service_type(cm_id, conn->c_tos);
+- /* XXX do we need to clean up if this fails? */
+- ret = rdma_resolve_route(cm_id,
++ if (conn) {
++ rdma_set_service_type(cm_id, conn->c_tos);
++ /* XXX do we need to clean up if this fails? */
++ ret = rdma_resolve_route(cm_id,
+ RDS_RDMA_RESOLVE_TIMEOUT_MS);
++ }
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+--
+2.40.1
+
--- /dev/null
+From 0da842b2502efec3d15d34c3188e5cdc2d9ebaf2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Sep 2023 20:04:45 +0200
+Subject: netfilter: ipset: Fix race between IPSET_CMD_CREATE and
+ IPSET_CMD_SWAP
+
+From: Jozsef Kadlecsik <kadlec@netfilter.org>
+
+[ Upstream commit 7433b6d2afd512d04398c73aa984d1e285be125b ]
+
+Kyle Zeng reported that there is a race between IPSET_CMD_ADD and IPSET_CMD_SWAP
+in netfilter/ip_set, which can lead to the invocation of `__ip_set_put` on a
+wrong `set`, triggering the `BUG_ON(set->ref == 0);` check in it.
+
+The race is caused by using the wrong reference counter, i.e. the ref counter instead
+of ref_netlink.
+
+Fixes: 24e227896bbf ("netfilter: ipset: Add schedule point in call_ad().")
+Reported-by: Kyle Zeng <zengyhkyle@gmail.com>
+Closes: https://lore.kernel.org/netfilter-devel/ZPZqetxOmH+w%2Fmyc@westworld/#r
+Tested-by: Kyle Zeng <zengyhkyle@gmail.com>
+Signed-off-by: Jozsef Kadlecsik <kadlec@netfilter.org>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/ipset/ip_set_core.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 55ac0cc12657c..26613e3731d02 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -682,6 +682,14 @@ __ip_set_put(struct ip_set *set)
+ /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
+ * a separate reference counter
+ */
++static void
++__ip_set_get_netlink(struct ip_set *set)
++{
++ write_lock_bh(&ip_set_ref_lock);
++ set->ref_netlink++;
++ write_unlock_bh(&ip_set_ref_lock);
++}
++
+ static void
+ __ip_set_put_netlink(struct ip_set *set)
+ {
+@@ -1705,11 +1713,11 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
+
+ do {
+ if (retried) {
+- __ip_set_get(set);
++ __ip_set_get_netlink(set);
+ nfnl_unlock(NFNL_SUBSYS_IPSET);
+ cond_resched();
+ nfnl_lock(NFNL_SUBSYS_IPSET);
+- __ip_set_put(set);
++ __ip_set_put_netlink(set);
+ }
+
+ ip_set_lock(set);
+--
+2.40.1
+
--- /dev/null
+From 62a873bb8b917af7af2cbb84f32c30de11b10710 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 10 Sep 2023 19:04:45 +0200
+Subject: netfilter: nf_tables: disallow element removal on anonymous sets
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 23a3bfd4ba7acd36abf52b78605f61b21bdac216 ]
+
+Anonymous sets need to be populated once at creation and then they are
+bound to rule since 938154b93be8 ("netfilter: nf_tables: reject unbound
+anonymous set before commit phase"), otherwise transaction reports
+EINVAL.
+
+Userspace does not need to delete elements of anonymous sets that are
+not yet bound, reject this with EOPNOTSUPP.
+
+From flush command path, skip anonymous sets, they are expected to be
+bound already. Otherwise, EINVAL is hit at the end of this transaction
+for unbound sets.
+
+Fixes: 96518518cc41 ("netfilter: add nftables")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 32c97cc87ddc2..52c776b5967ef 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1267,8 +1267,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
+ if (!nft_is_active_next(ctx->net, set))
+ continue;
+
+- if (nft_set_is_anonymous(set) &&
+- !list_empty(&set->bindings))
++ if (nft_set_is_anonymous(set))
+ continue;
+
+ err = nft_delset(ctx, set);
+@@ -5922,8 +5921,10 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+
+- if (!list_empty(&set->bindings) &&
+- (set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
++ if (nft_set_is_anonymous(set))
++ return -EOPNOTSUPP;
++
++ if (!list_empty(&set->bindings) && (set->flags & NFT_SET_CONSTANT))
+ return -EBUSY;
+
+ if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) {
+--
+2.40.1
+
--- /dev/null
+From 66526c836210e5dc7644fd19e326747032ff7187 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Sep 2023 14:27:19 -0700
+Subject: platform/x86: intel_scu_ipc: Check status after timeout in
+ busy_loop()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Stephen Boyd <swboyd@chromium.org>
+
+[ Upstream commit e0b4ab3bb92bda8d12f55842614362989d5b2cb3 ]
+
+It's possible for the polling loop in busy_loop() to get scheduled away
+for a long time.
+
+ status = ipc_read_status(scu); // status = IPC_STATUS_BUSY
+ <long time scheduled away>
+ if (!(status & IPC_STATUS_BUSY))
+
+If this happens, then the status bit could change while the task is
+scheduled away and this function would never read the status again after
+timing out. Instead, the function will return -ETIMEDOUT when it's
+possible that scheduling didn't work out and the status bit was cleared.
+Bit polling code should always check the bit being polled one more time
+after the timeout in case this happens.
+
+Fix this by reading the status once more after the while loop breaks.
+The readl_poll_timeout() macro implements all of this, and it is
+shorter, so use that macro here to consolidate code and fix this.
+
+There were some concerns with using readl_poll_timeout() because it uses
+timekeeping, and timekeeping isn't running early on or during the late
+stages of system suspend or early stages of system resume, but an audit
+of the code concluded that this code isn't called during those times so
+it is safe to use the macro.
+
+Cc: Prashant Malani <pmalani@chromium.org>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Reviewed-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
+Fixes: e7b7ab3847c9 ("platform/x86: intel_scu_ipc: Sleeping is fine when polling")
+Signed-off-by: Stephen Boyd <swboyd@chromium.org>
+Link: https://lore.kernel.org/r/20230913212723.3055315-2-swboyd@chromium.org
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/intel_scu_ipc.c | 19 ++++++++-----------
+ 1 file changed, 8 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
+index bdeb888c0fea4..0b5029bca4a45 100644
+--- a/drivers/platform/x86/intel_scu_ipc.c
++++ b/drivers/platform/x86/intel_scu_ipc.c
+@@ -19,6 +19,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+
+@@ -232,19 +233,15 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
+ /* Wait till scu status is busy */
+ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
+ {
+- unsigned long end = jiffies + IPC_TIMEOUT;
+-
+- do {
+- u32 status;
+-
+- status = ipc_read_status(scu);
+- if (!(status & IPC_STATUS_BUSY))
+- return (status & IPC_STATUS_ERR) ? -EIO : 0;
++ u8 status;
++ int err;
+
+- usleep_range(50, 100);
+- } while (time_before(jiffies, end));
++ err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY),
++ 100, jiffies_to_usecs(IPC_TIMEOUT));
++ if (err)
++ return err;
+
+- return -ETIMEDOUT;
++ return (status & IPC_STATUS_ERR) ? -EIO : 0;
+ }
+
+ /* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
+--
+2.40.1
+
--- /dev/null
+From 35e843cb46aac0c3064e6c6d50c6c64729c38454 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Sep 2023 14:27:20 -0700
+Subject: platform/x86: intel_scu_ipc: Check status upon timeout in
+ ipc_wait_for_interrupt()
+
+From: Stephen Boyd <swboyd@chromium.org>
+
+[ Upstream commit 427fada620733e6474d783ae6037a66eae42bf8c ]
+
+It's possible for the completion in ipc_wait_for_interrupt() to timeout,
+simply because the interrupt was delayed in being processed. A timeout
+in itself is not an error. This driver should check the status register
+upon a timeout to ensure that scheduling or interrupt processing delays
+don't affect the outcome of the IPC return value.
+
+ CPU0 SCU
+ ---- ---
+ ipc_wait_for_interrupt()
+ wait_for_completion_timeout(&scu->cmd_complete)
+ [TIMEOUT] status[IPC_STATUS_BUSY]=0
+
+Fix this problem by reading the status bit in all cases, regardless of
+the timeout. If the completion times out, we'll assume the problem was
+that the IPC_STATUS_BUSY bit was still set, but if the status bit is
+cleared in the meantime we know that we hit some scheduling delay and we
+should just check the error bit.
+
+Cc: Prashant Malani <pmalani@chromium.org>
+Reviewed-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Fixes: ed12f295bfd5 ("ipc: Added support for IPC interrupt mode")
+Signed-off-by: Stephen Boyd <swboyd@chromium.org>
+Link: https://lore.kernel.org/r/20230913212723.3055315-3-swboyd@chromium.org
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/intel_scu_ipc.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
+index 0b5029bca4a45..4c053c715cde0 100644
+--- a/drivers/platform/x86/intel_scu_ipc.c
++++ b/drivers/platform/x86/intel_scu_ipc.c
+@@ -249,10 +249,12 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
+ {
+ int status;
+
+- if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT))
+- return -ETIMEDOUT;
++ wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT);
+
+ status = ipc_read_status(scu);
++ if (status & IPC_STATUS_BUSY)
++ return -ETIMEDOUT;
++
+ if (status & IPC_STATUS_ERR)
+ return -EIO;
+
+--
+2.40.1
+
--- /dev/null
+From 475b36ae1fa27911b59e833e81c92b27c5cb0514 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Sep 2023 14:27:21 -0700
+Subject: platform/x86: intel_scu_ipc: Don't override scu in
+ intel_scu_ipc_dev_simple_command()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Stephen Boyd <swboyd@chromium.org>
+
+[ Upstream commit efce78584e583226e9a1f6cb2fb555d6ff47c3e7 ]
+
+Andy discovered this bug during patch review. The 'scu' argument to this
+function shouldn't be overridden by the function itself. It doesn't make
+any sense. Looking at the commit history, we see that commit
+f57fa18583f5 ("platform/x86: intel_scu_ipc: Introduce new SCU IPC API")
+removed the setting of the scu to ipcdev in other functions, but not
+this one. That was an oversight. Remove this line so that we stop
+overriding the scu instance that is used by this function.
+
+Reported-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Closes: https://lore.kernel.org/r/ZPjdZ3xNmBEBvNiS@smile.fi.intel.com
+Cc: Prashant Malani <pmalani@chromium.org>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Fixes: f57fa18583f5 ("platform/x86: intel_scu_ipc: Introduce new SCU IPC API")
+Signed-off-by: Stephen Boyd <swboyd@chromium.org>
+Link: https://lore.kernel.org/r/20230913212723.3055315-4-swboyd@chromium.org
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/intel_scu_ipc.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
+index 4c053c715cde0..60e7f95bc5554 100644
+--- a/drivers/platform/x86/intel_scu_ipc.c
++++ b/drivers/platform/x86/intel_scu_ipc.c
+@@ -444,7 +444,6 @@ int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
+ mutex_unlock(&ipclock);
+ return -ENODEV;
+ }
+- scu = ipcdev;
+ cmdval = sub << 12 | cmd;
+ ipc_command(scu, cmdval);
+ err = intel_scu_ipc_check_status(scu);
+--
+2.40.1
+
--- /dev/null
+From 0d6f8ad32bc11ae4c0fd0a8edea6ba91a6f4737f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Sep 2023 14:27:22 -0700
+Subject: platform/x86: intel_scu_ipc: Fail IPC send if still busy
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Stephen Boyd <swboyd@chromium.org>
+
+[ Upstream commit 85e654c9f722853a595fa941dca60c157b707b86 ]
+
+It's possible for interrupts to get significantly delayed to the point
+that callers of intel_scu_ipc_dev_command() and friends can call the
+function once, hit a timeout, and call it again while the interrupt
+still hasn't been processed. This driver will get seriously confused if
+the interrupt is finally processed after the second IPC has been sent
+with ipc_command(). It won't know which IPC has been completed. This
+could be quite disastrous if calling code assumes something has happened
+upon return from intel_scu_ipc_dev_simple_command() when it actually
+hasn't.
+
+Let's avoid this scenario by simply returning -EBUSY in this case.
+Hopefully higher layers will know to back off or fail gracefully when
+this happens. It's all highly unlikely anyway, but it's better to be
+correct here as we have no way to know which IPC the status register is
+telling us about if we send a second IPC while the previous IPC is still
+processing.
+
+Cc: Prashant Malani <pmalani@chromium.org>
+Cc: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Fixes: ed12f295bfd5 ("ipc: Added support for IPC interrupt mode")
+Signed-off-by: Stephen Boyd <swboyd@chromium.org>
+Link: https://lore.kernel.org/r/20230913212723.3055315-5-swboyd@chromium.org
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/intel_scu_ipc.c | 40 +++++++++++++++++++---------
+ 1 file changed, 28 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
+index 60e7f95bc5554..84ed828694630 100644
+--- a/drivers/platform/x86/intel_scu_ipc.c
++++ b/drivers/platform/x86/intel_scu_ipc.c
+@@ -266,6 +266,24 @@ static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
+ return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
+ }
+
++static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu)
++{
++ u8 status;
++
++ if (!scu)
++ scu = ipcdev;
++ if (!scu)
++ return ERR_PTR(-ENODEV);
++
++ status = ipc_read_status(scu);
++ if (status & IPC_STATUS_BUSY) {
++ dev_dbg(&scu->dev, "device is busy\n");
++ return ERR_PTR(-EBUSY);
++ }
++
++ return scu;
++}
++
+ /* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
+ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
+ u32 count, u32 op, u32 id)
+@@ -279,11 +297,10 @@ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
+ memset(cbuf, 0, sizeof(cbuf));
+
+ mutex_lock(&ipclock);
+- if (!scu)
+- scu = ipcdev;
+- if (!scu) {
++ scu = intel_scu_ipc_get(scu);
++ if (IS_ERR(scu)) {
+ mutex_unlock(&ipclock);
+- return -ENODEV;
++ return PTR_ERR(scu);
+ }
+
+ for (nc = 0; nc < count; nc++, offset += 2) {
+@@ -438,12 +455,12 @@ int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
+ int err;
+
+ mutex_lock(&ipclock);
+- if (!scu)
+- scu = ipcdev;
+- if (!scu) {
++ scu = intel_scu_ipc_get(scu);
++ if (IS_ERR(scu)) {
+ mutex_unlock(&ipclock);
+- return -ENODEV;
++ return PTR_ERR(scu);
+ }
++
+ cmdval = sub << 12 | cmd;
+ ipc_command(scu, cmdval);
+ err = intel_scu_ipc_check_status(scu);
+@@ -483,11 +500,10 @@ int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
+ return -EINVAL;
+
+ mutex_lock(&ipclock);
+- if (!scu)
+- scu = ipcdev;
+- if (!scu) {
++ scu = intel_scu_ipc_get(scu);
++ if (IS_ERR(scu)) {
+ mutex_unlock(&ipclock);
+- return -ENODEV;
++ return PTR_ERR(scu);
+ }
+
+ memcpy(inbuf, in, inlen);
+--
+2.40.1
+
--- /dev/null
+From 521207bc3ee0367d843ec58c19190397c9332efd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Aug 2023 11:26:01 +0530
+Subject: powerpc/perf/hv-24x7: Update domain value check
+
+From: Kajol Jain <kjain@linux.ibm.com>
+
+[ Upstream commit 4ff3ba4db5943cac1045e3e4a3c0463ea10f6930 ]
+
+Valid domain value is in range 1 to HV_PERF_DOMAIN_MAX. Current code has
+check for domain value greater than or equal to HV_PERF_DOMAIN_MAX. But
+the check for domain value 0 is missing.
+
+Fix this issue by adding check for domain value 0.
+
+Before:
+ # ./perf stat -v -e hv_24x7/CPM_ADJUNCT_INST,domain=0,core=1/ sleep 1
+ Using CPUID 00800200
+ Control descriptor is not initialized
+ Error:
+ The sys_perf_event_open() syscall returned with 5 (Input/output error) for
+ event (hv_24x7/CPM_ADJUNCT_INST,domain=0,core=1/).
+ /bin/dmesg | grep -i perf may provide additional information.
+
+ Result from dmesg:
+ [ 37.819387] hv-24x7: hcall failed: [0 0x60040000 0x100 0] => ret
+ 0xfffffffffffffffc (-4) detail=0x2000000 failing ix=0
+
+After:
+ # ./perf stat -v -e hv_24x7/CPM_ADJUNCT_INST,domain=0,core=1/ sleep 1
+ Using CPUID 00800200
+ Control descriptor is not initialized
+ Warning:
+ hv_24x7/CPM_ADJUNCT_INST,domain=0,core=1/ event is not supported by the kernel.
+ failed to read counter hv_24x7/CPM_ADJUNCT_INST,domain=0,core=1/
+
+Fixes: ebd4a5a3ebd9 ("powerpc/perf/hv-24x7: Minor improvements")
+Reported-by: Krishan Gopal Sarawast <krishang@linux.vnet.ibm.com>
+Signed-off-by: Kajol Jain <kjain@linux.ibm.com>
+Tested-by: Disha Goel <disgoel@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20230825055601.360083-1-kjain@linux.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/perf/hv-24x7.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
+index 1cd2351d241e8..61a08747b1641 100644
+--- a/arch/powerpc/perf/hv-24x7.c
++++ b/arch/powerpc/perf/hv-24x7.c
+@@ -1410,7 +1410,7 @@ static int h_24x7_event_init(struct perf_event *event)
+ }
+
+ domain = event_get_domain(event);
+- if (domain >= HV_PERF_DOMAIN_MAX) {
++ if (domain == 0 || domain >= HV_PERF_DOMAIN_MAX) {
+ pr_devel("invalid domain %d\n", domain);
+ return -EINVAL;
+ }
+--
+2.40.1
+
--- /dev/null
+From 9272a928c9e22faf5bd45481a744d32ec9b21ee0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 May 2021 20:27:19 -0700
+Subject: selftests/tls: Add {} to avoid static checker warning
+
+From: Kees Cook <keescook@chromium.org>
+
+[ Upstream commit f50688b47c5858d2ff315d020332bf4cb6710837 ]
+
+This silences a static checker warning due to the unusual macro
+construction of EXPECT_*() by adding explicit {}s around the enclosing
+while loop.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Fixes: 7f657d5bf507 ("selftests: tls: add selftests for TLS sockets")
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: c326ca98446e ("selftests: tls: swap the TX and RX sockets in some tests")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/tls.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
+index b599f1fa99b55..44984741bd41d 100644
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -387,8 +387,9 @@ TEST_F(tls, sendmsg_large)
+ EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len);
+ }
+
+- while (recvs++ < sends)
++ while (recvs++ < sends) {
+ EXPECT_NE(recv(self->fd, mem, send_len, 0), -1);
++ }
+
+ free(mem);
+ }
+--
+2.40.1
+
--- /dev/null
+From cd03567e68c6db98f8d3ed042f88a0556d269ecc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Sep 2023 16:16:25 +0200
+Subject: selftests: tls: swap the TX and RX sockets in some tests
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+[ Upstream commit c326ca98446e0ae4fee43a40acf79412b74cfedb ]
+
+tls.sendmsg_large and tls.sendmsg_multiple are trying to send through
+the self->cfd socket (only configured with TLS_RX) and to receive through
+the self->fd socket (only configured with TLS_TX), so they're not using
+kTLS at all. Swap the sockets.
+
+Fixes: 7f657d5bf507 ("selftests: tls: add selftests for TLS sockets")
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/tls.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
+index 44984741bd41d..44a25a9f1f722 100644
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -384,11 +384,11 @@ TEST_F(tls, sendmsg_large)
+
+ msg.msg_iov = &vec;
+ msg.msg_iovlen = 1;
+- EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len);
++ EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
+ }
+
+ while (recvs++ < sends) {
+- EXPECT_NE(recv(self->fd, mem, send_len, 0), -1);
++ EXPECT_NE(recv(self->cfd, mem, send_len, 0), -1);
+ }
+
+ free(mem);
+@@ -417,9 +417,9 @@ TEST_F(tls, sendmsg_multiple)
+ msg.msg_iov = vec;
+ msg.msg_iovlen = iov_len;
+
+- EXPECT_EQ(sendmsg(self->cfd, &msg, 0), total_len);
++ EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len);
+ buf = malloc(total_len);
+- EXPECT_NE(recv(self->fd, buf, total_len, 0), -1);
++ EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1);
+ for (i = 0; i < iov_len; i++) {
+ EXPECT_EQ(memcmp(test_strs[i], buf + len_cmp,
+ strlen(test_strs[i])),
+--
+2.40.1
+
--- /dev/null
+From f41785672e809527d8ecca5d0fe2c55ad49f2aa9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Oct 2020 17:50:38 +0100
+Subject: seqlock: avoid -Wshadow warnings
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit a07c45312f06e288417049208c344ad76074627d ]
+
+When building with W=2, there is a flood of warnings about the seqlock
+macros shadowing local variables:
+
+ 19806 linux/seqlock.h:331:11: warning: declaration of 'seq' shadows a previous local [-Wshadow]
+ 48 linux/seqlock.h:348:11: warning: declaration of 'seq' shadows a previous local [-Wshadow]
+ 8 linux/seqlock.h:379:11: warning: declaration of 'seq' shadows a previous local [-Wshadow]
+
+Prefix the local variables to make the warning useful elsewhere again.
+
+Fixes: 52ac39e5db51 ("seqlock: seqcount_t: Implement all read APIs as statement expressions")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20201026165044.3722931-1-arnd@kernel.org
+Stable-dep-of: 41b43b6c6e30 ("locking/seqlock: Do the lockdep annotation before locking in do_write_seqcount_begin_nested()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/seqlock.h | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index 1ac20d75b0618..fb89b05066f43 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -328,13 +328,13 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
+ */
+ #define __read_seqcount_begin(s) \
+ ({ \
+- unsigned seq; \
++ unsigned __seq; \
+ \
+- while ((seq = __seqcount_sequence(s)) & 1) \
++ while ((__seq = __seqcount_sequence(s)) & 1) \
+ cpu_relax(); \
+ \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+- seq; \
++ __seq; \
+ })
+
+ /**
+@@ -345,10 +345,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
+ */
+ #define raw_read_seqcount_begin(s) \
+ ({ \
+- unsigned seq = __read_seqcount_begin(s); \
++ unsigned _seq = __read_seqcount_begin(s); \
+ \
+ smp_rmb(); \
+- seq; \
++ _seq; \
+ })
+
+ /**
+@@ -376,11 +376,11 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
+ */
+ #define raw_read_seqcount(s) \
+ ({ \
+- unsigned seq = __seqcount_sequence(s); \
++ unsigned __seq = __seqcount_sequence(s); \
+ \
+ smp_rmb(); \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+- seq; \
++ __seq; \
+ })
+
+ /**
+--
+2.40.1
+
--- /dev/null
+From 05897e55ea33eedf40dc3a98995c8d017c17231d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 6 Dec 2020 17:21:42 +0100
+Subject: seqlock: Prefix internal seqcount_t-only macros with a "do_"
+
+From: Ahmed S. Darwish <a.darwish@linutronix.de>
+
+[ Upstream commit 66bcfcdf89d00f2409f4b5da0f8c20c08318dc72 ]
+
+When the seqcount_LOCKNAME_t group of data types were introduced, two
+classes of seqlock.h sequence counter macros were added:
+
+ - An external public API which can either take a plain seqcount_t or
+ any of the seqcount_LOCKNAME_t variants.
+
+ - An internal API which takes only a plain seqcount_t.
+
+To distinguish between the two groups, the "*_seqcount_t_*" pattern was
+used for the latter. This confused a number of mm/ call-site developers,
+and Linus also commented that it was not a standard practice for marking
+seqlock.h internal APIs.
+
+Distinguish the latter group of macros by prefixing a "do_".
+
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/CAHk-=wikhGExmprXgaW+MVXG1zsGpztBbVwOb23vetk41EtTBQ@mail.gmail.com
+Stable-dep-of: 41b43b6c6e30 ("locking/seqlock: Do the lockdep annotation before locking in do_write_seqcount_begin_nested()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/seqlock.h | 66 ++++++++++++++++++++---------------------
+ 1 file changed, 33 insertions(+), 33 deletions(-)
+
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index 66993e9ef90d9..008fa88ad58e7 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -425,9 +425,9 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
+ * Return: true if a read section retry is required, else false
+ */
+ #define __read_seqcount_retry(s, start) \
+- __read_seqcount_t_retry(seqprop_ptr(s), start)
++ do___read_seqcount_retry(seqprop_ptr(s), start)
+
+-static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
++static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
+ {
+ kcsan_atomic_next(0);
+ return unlikely(READ_ONCE(s->sequence) != start);
+@@ -445,12 +445,12 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
+ * Return: true if a read section retry is required, else false
+ */
+ #define read_seqcount_retry(s, start) \
+- read_seqcount_t_retry(seqprop_ptr(s), start)
++ do_read_seqcount_retry(seqprop_ptr(s), start)
+
+-static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
++static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
+ {
+ smp_rmb();
+- return __read_seqcount_t_retry(s, start);
++ return do___read_seqcount_retry(s, start);
+ }
+
+ /**
+@@ -462,10 +462,10 @@ do { \
+ if (seqprop_preemptible(s)) \
+ preempt_disable(); \
+ \
+- raw_write_seqcount_t_begin(seqprop_ptr(s)); \
++ do_raw_write_seqcount_begin(seqprop_ptr(s)); \
+ } while (0)
+
+-static inline void raw_write_seqcount_t_begin(seqcount_t *s)
++static inline void do_raw_write_seqcount_begin(seqcount_t *s)
+ {
+ kcsan_nestable_atomic_begin();
+ s->sequence++;
+@@ -478,13 +478,13 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s)
+ */
+ #define raw_write_seqcount_end(s) \
+ do { \
+- raw_write_seqcount_t_end(seqprop_ptr(s)); \
++ do_raw_write_seqcount_end(seqprop_ptr(s)); \
+ \
+ if (seqprop_preemptible(s)) \
+ preempt_enable(); \
+ } while (0)
+
+-static inline void raw_write_seqcount_t_end(seqcount_t *s)
++static inline void do_raw_write_seqcount_end(seqcount_t *s)
+ {
+ smp_wmb();
+ s->sequence++;
+@@ -506,12 +506,12 @@ do { \
+ if (seqprop_preemptible(s)) \
+ preempt_disable(); \
+ \
+- write_seqcount_t_begin_nested(seqprop_ptr(s), subclass); \
++ do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \
+ } while (0)
+
+-static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
++static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
+ {
+- raw_write_seqcount_t_begin(s);
++ do_raw_write_seqcount_begin(s);
+ seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+ }
+
+@@ -533,12 +533,12 @@ do { \
+ if (seqprop_preemptible(s)) \
+ preempt_disable(); \
+ \
+- write_seqcount_t_begin(seqprop_ptr(s)); \
++ do_write_seqcount_begin(seqprop_ptr(s)); \
+ } while (0)
+
+-static inline void write_seqcount_t_begin(seqcount_t *s)
++static inline void do_write_seqcount_begin(seqcount_t *s)
+ {
+- write_seqcount_t_begin_nested(s, 0);
++ do_write_seqcount_begin_nested(s, 0);
+ }
+
+ /**
+@@ -549,16 +549,16 @@ static inline void write_seqcount_t_begin(seqcount_t *s)
+ */
+ #define write_seqcount_end(s) \
+ do { \
+- write_seqcount_t_end(seqprop_ptr(s)); \
++ do_write_seqcount_end(seqprop_ptr(s)); \
+ \
+ if (seqprop_preemptible(s)) \
+ preempt_enable(); \
+ } while (0)
+
+-static inline void write_seqcount_t_end(seqcount_t *s)
++static inline void do_write_seqcount_end(seqcount_t *s)
+ {
+ seqcount_release(&s->dep_map, _RET_IP_);
+- raw_write_seqcount_t_end(s);
++ do_raw_write_seqcount_end(s);
+ }
+
+ /**
+@@ -603,9 +603,9 @@ static inline void write_seqcount_t_end(seqcount_t *s)
+ * }
+ */
+ #define raw_write_seqcount_barrier(s) \
+- raw_write_seqcount_t_barrier(seqprop_ptr(s))
++ do_raw_write_seqcount_barrier(seqprop_ptr(s))
+
+-static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
++static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
+ {
+ kcsan_nestable_atomic_begin();
+ s->sequence++;
+@@ -623,9 +623,9 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
+ * will complete successfully and see data older than this.
+ */
+ #define write_seqcount_invalidate(s) \
+- write_seqcount_t_invalidate(seqprop_ptr(s))
++ do_write_seqcount_invalidate(seqprop_ptr(s))
+
+-static inline void write_seqcount_t_invalidate(seqcount_t *s)
++static inline void do_write_seqcount_invalidate(seqcount_t *s)
+ {
+ smp_wmb();
+ kcsan_nestable_atomic_begin();
+@@ -862,9 +862,9 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ }
+
+ /*
+- * For all seqlock_t write side functions, use write_seqcount_*t*_begin()
+- * instead of the generic write_seqcount_begin(). This way, no redundant
+- * lockdep_assert_held() checks are added.
++ * For all seqlock_t write side functions, use the the internal
++ * do_write_seqcount_begin() instead of generic write_seqcount_begin().
++ * This way, no redundant lockdep_assert_held() checks are added.
+ */
+
+ /**
+@@ -883,7 +883,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ static inline void write_seqlock(seqlock_t *sl)
+ {
+ spin_lock(&sl->lock);
+- write_seqcount_t_begin(&sl->seqcount.seqcount);
++ do_write_seqcount_begin(&sl->seqcount.seqcount);
+ }
+
+ /**
+@@ -895,7 +895,7 @@ static inline void write_seqlock(seqlock_t *sl)
+ */
+ static inline void write_sequnlock(seqlock_t *sl)
+ {
+- write_seqcount_t_end(&sl->seqcount.seqcount);
++ do_write_seqcount_end(&sl->seqcount.seqcount);
+ spin_unlock(&sl->lock);
+ }
+
+@@ -909,7 +909,7 @@ static inline void write_sequnlock(seqlock_t *sl)
+ static inline void write_seqlock_bh(seqlock_t *sl)
+ {
+ spin_lock_bh(&sl->lock);
+- write_seqcount_t_begin(&sl->seqcount.seqcount);
++ do_write_seqcount_begin(&sl->seqcount.seqcount);
+ }
+
+ /**
+@@ -922,7 +922,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
+ */
+ static inline void write_sequnlock_bh(seqlock_t *sl)
+ {
+- write_seqcount_t_end(&sl->seqcount.seqcount);
++ do_write_seqcount_end(&sl->seqcount.seqcount);
+ spin_unlock_bh(&sl->lock);
+ }
+
+@@ -936,7 +936,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
+ static inline void write_seqlock_irq(seqlock_t *sl)
+ {
+ spin_lock_irq(&sl->lock);
+- write_seqcount_t_begin(&sl->seqcount.seqcount);
++ do_write_seqcount_begin(&sl->seqcount.seqcount);
+ }
+
+ /**
+@@ -948,7 +948,7 @@ static inline void write_seqlock_irq(seqlock_t *sl)
+ */
+ static inline void write_sequnlock_irq(seqlock_t *sl)
+ {
+- write_seqcount_t_end(&sl->seqcount.seqcount);
++ do_write_seqcount_end(&sl->seqcount.seqcount);
+ spin_unlock_irq(&sl->lock);
+ }
+
+@@ -957,7 +957,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+ unsigned long flags;
+
+ spin_lock_irqsave(&sl->lock, flags);
+- write_seqcount_t_begin(&sl->seqcount.seqcount);
++ do_write_seqcount_begin(&sl->seqcount.seqcount);
+ return flags;
+ }
+
+@@ -986,7 +986,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+ static inline void
+ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+ {
+- write_seqcount_t_end(&sl->seqcount.seqcount);
++ do_write_seqcount_end(&sl->seqcount.seqcount);
+ spin_unlock_irqrestore(&sl->lock, flags);
+ }
+
+--
+2.40.1
+
--- /dev/null
+From 58889042e5216c8623afffce92f1698872789cd8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Nov 2020 13:44:17 +0100
+Subject: seqlock: Rename __seqprop() users
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit ab440b2c604b60fe90885270fcfeb5c3dd5d6fae ]
+
+More consistent naming should make it easier to untangle the _Generic
+token pasting maze called __seqprop().
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20201110115358.GE2594@hirez.programming.kicks-ass.net
+Stable-dep-of: 41b43b6c6e30 ("locking/seqlock: Do the lockdep annotation before locking in do_write_seqcount_begin_nested()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/seqlock.h | 46 ++++++++++++++++++++---------------------
+ 1 file changed, 23 insertions(+), 23 deletions(-)
+
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index fb89b05066f43..66993e9ef90d9 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -307,10 +307,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
+ __seqprop_case((s), mutex, prop), \
+ __seqprop_case((s), ww_mutex, prop))
+
+-#define __seqcount_ptr(s) __seqprop(s, ptr)
+-#define __seqcount_sequence(s) __seqprop(s, sequence)
+-#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
+-#define __seqcount_assert_lock_held(s) __seqprop(s, assert)
++#define seqprop_ptr(s) __seqprop(s, ptr)
++#define seqprop_sequence(s) __seqprop(s, sequence)
++#define seqprop_preemptible(s) __seqprop(s, preemptible)
++#define seqprop_assert(s) __seqprop(s, assert)
+
+ /**
+ * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
+@@ -330,7 +330,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
+ ({ \
+ unsigned __seq; \
+ \
+- while ((__seq = __seqcount_sequence(s)) & 1) \
++ while ((__seq = seqprop_sequence(s)) & 1) \
+ cpu_relax(); \
+ \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+@@ -359,7 +359,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
+ */
+ #define read_seqcount_begin(s) \
+ ({ \
+- seqcount_lockdep_reader_access(__seqcount_ptr(s)); \
++ seqcount_lockdep_reader_access(seqprop_ptr(s)); \
+ raw_read_seqcount_begin(s); \
+ })
+
+@@ -376,7 +376,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
+ */
+ #define raw_read_seqcount(s) \
+ ({ \
+- unsigned __seq = __seqcount_sequence(s); \
++ unsigned __seq = seqprop_sequence(s); \
+ \
+ smp_rmb(); \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+@@ -425,7 +425,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
+ * Return: true if a read section retry is required, else false
+ */
+ #define __read_seqcount_retry(s, start) \
+- __read_seqcount_t_retry(__seqcount_ptr(s), start)
++ __read_seqcount_t_retry(seqprop_ptr(s), start)
+
+ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
+ {
+@@ -445,7 +445,7 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
+ * Return: true if a read section retry is required, else false
+ */
+ #define read_seqcount_retry(s, start) \
+- read_seqcount_t_retry(__seqcount_ptr(s), start)
++ read_seqcount_t_retry(seqprop_ptr(s), start)
+
+ static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
+ {
+@@ -459,10 +459,10 @@ static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
+ */
+ #define raw_write_seqcount_begin(s) \
+ do { \
+- if (__seqcount_lock_preemptible(s)) \
++ if (seqprop_preemptible(s)) \
+ preempt_disable(); \
+ \
+- raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
++ raw_write_seqcount_t_begin(seqprop_ptr(s)); \
+ } while (0)
+
+ static inline void raw_write_seqcount_t_begin(seqcount_t *s)
+@@ -478,9 +478,9 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s)
+ */
+ #define raw_write_seqcount_end(s) \
+ do { \
+- raw_write_seqcount_t_end(__seqcount_ptr(s)); \
++ raw_write_seqcount_t_end(seqprop_ptr(s)); \
+ \
+- if (__seqcount_lock_preemptible(s)) \
++ if (seqprop_preemptible(s)) \
+ preempt_enable(); \
+ } while (0)
+
+@@ -501,12 +501,12 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s)
+ */
+ #define write_seqcount_begin_nested(s, subclass) \
+ do { \
+- __seqcount_assert_lock_held(s); \
++ seqprop_assert(s); \
+ \
+- if (__seqcount_lock_preemptible(s)) \
++ if (seqprop_preemptible(s)) \
+ preempt_disable(); \
+ \
+- write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
++ write_seqcount_t_begin_nested(seqprop_ptr(s), subclass); \
+ } while (0)
+
+ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
+@@ -528,12 +528,12 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
+ */
+ #define write_seqcount_begin(s) \
+ do { \
+- __seqcount_assert_lock_held(s); \
++ seqprop_assert(s); \
+ \
+- if (__seqcount_lock_preemptible(s)) \
++ if (seqprop_preemptible(s)) \
+ preempt_disable(); \
+ \
+- write_seqcount_t_begin(__seqcount_ptr(s)); \
++ write_seqcount_t_begin(seqprop_ptr(s)); \
+ } while (0)
+
+ static inline void write_seqcount_t_begin(seqcount_t *s)
+@@ -549,9 +549,9 @@ static inline void write_seqcount_t_begin(seqcount_t *s)
+ */
+ #define write_seqcount_end(s) \
+ do { \
+- write_seqcount_t_end(__seqcount_ptr(s)); \
++ write_seqcount_t_end(seqprop_ptr(s)); \
+ \
+- if (__seqcount_lock_preemptible(s)) \
++ if (seqprop_preemptible(s)) \
+ preempt_enable(); \
+ } while (0)
+
+@@ -603,7 +603,7 @@ static inline void write_seqcount_t_end(seqcount_t *s)
+ * }
+ */
+ #define raw_write_seqcount_barrier(s) \
+- raw_write_seqcount_t_barrier(__seqcount_ptr(s))
++ raw_write_seqcount_t_barrier(seqprop_ptr(s))
+
+ static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
+ {
+@@ -623,7 +623,7 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
+ * will complete successfully and see data older than this.
+ */
+ #define write_seqcount_invalidate(s) \
+- write_seqcount_t_invalidate(__seqcount_ptr(s))
++ write_seqcount_t_invalidate(seqprop_ptr(s))
+
+ static inline void write_seqcount_t_invalidate(seqcount_t *s)
+ {
+--
+2.40.1
+
netfilter-nft_set_pipapo-stop-gc-iteration-if-gc-tra.patch
netfilter-nft_set_hash-try-later-when-gc-hits-eagain.patch
netfilter-nf_tables-fix-memleak-when-more-than-255-e.patch
+asoc-meson-spdifin-start-hw-on-dai-probe.patch
+netfilter-nf_tables-disallow-element-removal-on-anon.patch
+bpf-avoid-deadlock-when-using-queue-and-stack-maps-f.patch
+selftests-tls-add-to-avoid-static-checker-warning.patch
+selftests-tls-swap-the-tx-and-rx-sockets-in-some-tes.patch
+asoc-imx-audmix-fix-return-error-with-devm_clk_get.patch
+i40e-fix-vf-vlan-offloading-when-port-vlan-is-config.patch
+ipv4-fix-null-deref-in-ipv4_link_failure.patch
+powerpc-perf-hv-24x7-update-domain-value-check.patch
+dccp-fix-dccp_v4_err-dccp_v6_err-again.patch
+platform-x86-intel_scu_ipc-check-status-after-timeou.patch
+platform-x86-intel_scu_ipc-check-status-upon-timeout.patch
+platform-x86-intel_scu_ipc-don-t-override-scu-in-int.patch
+platform-x86-intel_scu_ipc-fail-ipc-send-if-still-bu.patch
+x86-srso-fix-srso_show_state-side-effect.patch
+x86-srso-fix-sbpb-enablement-for-spec_rstack_overflo.patch
+net-hns3-only-enable-unicast-promisc-when-mac-table-.patch
+net-hns3-add-5ms-delay-before-clear-firmware-reset-i.patch
+net-bridge-use-dev_stats_inc.patch
+team-fix-null-ptr-deref-when-team-device-type-is-cha.patch
+netfilter-ipset-fix-race-between-ipset_cmd_create-an.patch
+seqlock-avoid-wshadow-warnings.patch
+seqlock-rename-__seqprop-users.patch
+seqlock-prefix-internal-seqcount_t-only-macros-with-.patch
+locking-seqlock-do-the-lockdep-annotation-before-loc.patch
+bnxt_en-flush-xdp-for-bnxt_poll_nitroa0-s-napi.patch
+net-rds-fix-possible-null-pointer-dereference.patch
--- /dev/null
+From b0486735a3e2f7a7c7a8facb80ff5729b7130ed1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Sep 2023 20:30:11 +0800
+Subject: team: fix null-ptr-deref when team device type is changed
+
+From: Ziyang Xuan <william.xuanziyang@huawei.com>
+
+[ Upstream commit 492032760127251e5540a5716a70996bacf2a3fd ]
+
+Get a null-ptr-deref bug as follows with reproducer [1].
+
+BUG: kernel NULL pointer dereference, address: 0000000000000228
+...
+RIP: 0010:vlan_dev_hard_header+0x35/0x140 [8021q]
+...
+Call Trace:
+ <TASK>
+ ? __die+0x24/0x70
+ ? page_fault_oops+0x82/0x150
+ ? exc_page_fault+0x69/0x150
+ ? asm_exc_page_fault+0x26/0x30
+ ? vlan_dev_hard_header+0x35/0x140 [8021q]
+ ? vlan_dev_hard_header+0x8e/0x140 [8021q]
+ neigh_connected_output+0xb2/0x100
+ ip6_finish_output2+0x1cb/0x520
+ ? nf_hook_slow+0x43/0xc0
+ ? ip6_mtu+0x46/0x80
+ ip6_finish_output+0x2a/0xb0
+ mld_sendpack+0x18f/0x250
+ mld_ifc_work+0x39/0x160
+ process_one_work+0x1e6/0x3f0
+ worker_thread+0x4d/0x2f0
+ ? __pfx_worker_thread+0x10/0x10
+ kthread+0xe5/0x120
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork+0x34/0x50
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork_asm+0x1b/0x30
+
+[1]
+$ teamd -t team0 -d -c '{"runner": {"name": "loadbalance"}}'
+$ ip link add name t-dummy type dummy
+$ ip link add link t-dummy name t-dummy.100 type vlan id 100
+$ ip link add name t-nlmon type nlmon
+$ ip link set t-nlmon master team0
+$ ip link set t-nlmon nomaster
+$ ip link set t-dummy up
+$ ip link set team0 up
+$ ip link set t-dummy.100 down
+$ ip link set t-dummy.100 master team0
+
+When enslave a vlan device to team device and team device type is changed
+from non-ether to ether, header_ops of team device is changed to
+vlan_header_ops. That is incorrect and will trigger null-ptr-deref
+for vlan->real_dev in vlan_dev_hard_header() because team device is not
+a vlan device.
+
+Cache eth_header_ops in team_setup(), then assign cached header_ops to
+header_ops of team net device when its type is changed from non-ether
+to ether to fix the bug.
+
+Fixes: 1d76efe1577b ("team: add support for non-ethernet devices")
+Suggested-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Hangbin Liu <liuhangbin@gmail.com>
+Signed-off-by: Ziyang Xuan <william.xuanziyang@huawei.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230918123011.1884401-1-william.xuanziyang@huawei.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/team/team.c | 10 +++++++++-
+ include/linux/if_team.h | 2 ++
+ 2 files changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 721b536ce8861..97a77dabed64c 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2122,7 +2122,12 @@ static const struct ethtool_ops team_ethtool_ops = {
+ static void team_setup_by_port(struct net_device *dev,
+ struct net_device *port_dev)
+ {
+- dev->header_ops = port_dev->header_ops;
++ struct team *team = netdev_priv(dev);
++
++ if (port_dev->type == ARPHRD_ETHER)
++ dev->header_ops = team->header_ops_cache;
++ else
++ dev->header_ops = port_dev->header_ops;
+ dev->type = port_dev->type;
+ dev->hard_header_len = port_dev->hard_header_len;
+ dev->needed_headroom = port_dev->needed_headroom;
+@@ -2169,8 +2174,11 @@ static int team_dev_type_check_change(struct net_device *dev,
+
+ static void team_setup(struct net_device *dev)
+ {
++ struct team *team = netdev_priv(dev);
++
+ ether_setup(dev);
+ dev->max_mtu = ETH_MAX_MTU;
++ team->header_ops_cache = dev->header_ops;
+
+ dev->netdev_ops = &team_netdev_ops;
+ dev->ethtool_ops = &team_ethtool_ops;
+diff --git a/include/linux/if_team.h b/include/linux/if_team.h
+index 5dd1657947b75..762c77d13e7dd 100644
+--- a/include/linux/if_team.h
++++ b/include/linux/if_team.h
+@@ -189,6 +189,8 @@ struct team {
+ struct net_device *dev; /* associated netdevice */
+ struct team_pcpu_stats __percpu *pcpu_stats;
+
++ const struct header_ops *header_ops_cache;
++
+ struct mutex lock; /* used for overall locking, e.g. port lists write */
+
+ /*
+--
+2.40.1
+
--- /dev/null
+From 4377ccdb3f80b3eaf0d190305e1b1d9daa79f4c7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Sep 2023 22:04:48 -0700
+Subject: x86/srso: Fix SBPB enablement for spec_rstack_overflow=off
+
+From: Josh Poimboeuf <jpoimboe@kernel.org>
+
+[ Upstream commit 01b057b2f4cc2d905a0bd92195657dbd9a7005ab ]
+
+If the user has requested no SRSO mitigation, other mitigations can use
+the lighter-weight SBPB instead of IBPB.
+
+Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
+Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/b20820c3cfd1003171135ec8d762a0b957348497.1693889988.git.jpoimboe@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 4719089029f02..ec3ddb9a456ba 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2344,7 +2344,7 @@ static void __init srso_select_mitigation(void)
+
+ switch (srso_cmd) {
+ case SRSO_CMD_OFF:
+- return;
++ goto pred_cmd;
+
+ case SRSO_CMD_MICROCODE:
+ if (has_microcode) {
+--
+2.40.1
+
--- /dev/null
+From 6468e8878ef38da6d299f5b2c3c4695a3ae0ba25 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Sep 2023 22:04:45 -0700
+Subject: x86/srso: Fix srso_show_state() side effect
+
+From: Josh Poimboeuf <jpoimboe@kernel.org>
+
+[ Upstream commit a8cf700c17d9ca6cb8ee7dc5c9330dbac3948237 ]
+
+Reading the 'spec_rstack_overflow' sysfs file can trigger an unnecessary
+MSR write, and possibly even a (handled) exception if the microcode
+hasn't been updated.
+
+Avoid all that by just checking X86_FEATURE_IBPB_BRTYPE instead, which
+gets set by srso_select_mitigation() if the updated microcode exists.
+
+Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
+Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>
+Acked-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/27d128899cb8aee9eb2b57ddc996742b0c1d776b.1693889988.git.jpoimboe@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 4d11a50089b27..4719089029f02 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2622,7 +2622,7 @@ static ssize_t srso_show_state(char *buf)
+
+ return sysfs_emit(buf, "%s%s\n",
+ srso_strings[srso_mitigation],
+- (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
++ boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
+ }
+
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+--
+2.40.1
+