--- /dev/null
+From 1a30332f9569e207cc466eb07ca222ee36e74ad5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Jan 2023 17:37:29 +0100
+Subject: drm/i915/selftest: fix intel_selftest_modify_policy argument types
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 2255bbcdc39d5b0311968f86614ae4f25fdd465d ]
+
+The definition of intel_selftest_modify_policy() does not match the
+declaration, as gcc-13 points out:
+
+drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c:29:5: error: conflicting types for 'intel_selftest_modify_policy' due to enum/integer mismatch; have 'int(struct intel_engine_cs *, struct intel_selftest_saved_policy *, u32)' {aka 'int(struct intel_engine_cs *, struct intel_selftest_saved_policy *, unsigned int)'} [-Werror=enum-int-mismatch]
+ 29 | int intel_selftest_modify_policy(struct intel_engine_cs *engine,
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
+In file included from drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c:11:
+drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h:28:5: note: previous declaration of 'intel_selftest_modify_policy' with type 'int(struct intel_engine_cs *, struct intel_selftest_saved_policy *, enum selftest_scheduler_modify)'
+ 28 | int intel_selftest_modify_policy(struct intel_engine_cs *engine,
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Change the type in the definition to match.
+
+Fixes: 617e87c05c72 ("drm/i915/selftest: Fix hangcheck self test for GuC submission")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230117163743.1003219-1-arnd@kernel.org
+(cherry picked from commit 8d7eb8ed3f83f248e01a4f548d9c500a950a2c2d)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+index 4b328346b48a..83ffd175ca89 100644
+--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
++++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+@@ -16,8 +16,7 @@
+
+ int intel_selftest_modify_policy(struct intel_engine_cs *engine,
+ struct intel_selftest_saved_policy *saved,
+- u32 modify_type)
+-
++ enum selftest_scheduler_modify modify_type)
+ {
+ int err;
+
+--
+2.39.0
+
--- /dev/null
+From b86b7e673ea960db16d0bf97cb4747a4b77f6090 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jan 2023 18:20:26 +0300
+Subject: gpio: mxc: Unlock on error path in mxc_flip_edge()
+
+From: Dan Carpenter <error27@gmail.com>
+
+[ Upstream commit 37870358616ca7fdb1e90ad1cdd791655ec54414 ]
+
+We recently added locking to this function but one error path was
+over looked. Drop the lock before returning.
+
+Fixes: e5464277625c ("gpio: mxc: Protect GPIO irqchip RMW with bgpio spinlock")
+Signed-off-by: Dan Carpenter <error27@gmail.com>
+Acked-by: Marek Vasut <marex@denx.de>
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpio/gpio-mxc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
+index dd91908c72f1..853d9aa6b3b1 100644
+--- a/drivers/gpio/gpio-mxc.c
++++ b/drivers/gpio/gpio-mxc.c
+@@ -236,10 +236,11 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
+ } else {
+ pr_err("mxc: invalid configuration for GPIO %d: %x\n",
+ gpio, edge);
+- return;
++ goto unlock;
+ }
+ writel(val | (edge << (bit << 1)), reg);
+
++unlock:
+ raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
+ }
+
+--
+2.39.0
+
--- /dev/null
+From f366a5bf2b27cfaff28ca3c91ce2ee032377a679 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Dec 2022 19:59:00 +0000
+Subject: i2c: designware: use casting of u64 in clock multiplication to avoid
+ overflow
+
+From: Lareine Khawaly <lareine@amazon.com>
+
+[ Upstream commit c8c37bc514514999e62a17e95160ed9ebf75ca8d ]
+
+In functions i2c_dw_scl_lcnt() and i2c_dw_scl_hcnt() may have overflow
+by depending on the values of the given parameters including the ic_clk.
+For example in our use case where ic_clk is larger than one million,
+multiplication of ic_clk * 4700 will result in 32 bit overflow.
+
+Add cast of u64 to the calculation to avoid multiplication overflow, and
+use the corresponding define for divide.
+
+Fixes: 2373f6b9744d ("i2c-designware: split of i2c-designware.c into core and bus specific parts")
+Signed-off-by: Lareine Khawaly <lareine@amazon.com>
+Signed-off-by: Hanna Hawa <hhhawa@amazon.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Acked-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/busses/i2c-designware-common.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
+index a1100e37626e..4af65f101dac 100644
+--- a/drivers/i2c/busses/i2c-designware-common.c
++++ b/drivers/i2c/busses/i2c-designware-common.c
+@@ -351,7 +351,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
+ *
+ * If your hardware is free from tHD;STA issue, try this one.
+ */
+- return DIV_ROUND_CLOSEST(ic_clk * tSYMBOL, MICRO) - 8 + offset;
++ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * tSYMBOL, MICRO) -
++ 8 + offset;
+ else
+ /*
+ * Conditional expression:
+@@ -367,7 +368,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
+ * The reason why we need to take into account "tf" here,
+ * is the same as described in i2c_dw_scl_lcnt().
+ */
+- return DIV_ROUND_CLOSEST(ic_clk * (tSYMBOL + tf), MICRO) - 3 + offset;
++ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) -
++ 3 + offset;
+ }
+
+ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
+@@ -383,7 +385,8 @@ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
+ * account the fall time of SCL signal (tf). Default tf value
+ * should be 0.3 us, for safety.
+ */
+- return DIV_ROUND_CLOSEST(ic_clk * (tLOW + tf), MICRO) - 1 + offset;
++ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) -
++ 1 + offset;
+ }
+
+ int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev)
+--
+2.39.0
+
--- /dev/null
+From eb9623d10bb0af4023329a84b454f7f63a34ad65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Jan 2023 13:31:40 +0000
+Subject: ipv4: prevent potential spectre v1 gadget in fib_metrics_match()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 5e9398a26a92fc402d82ce1f97cc67d832527da0 ]
+
+if (!type)
+ continue;
+ if (type > RTAX_MAX)
+ return false;
+ ...
+ fi_val = fi->fib_metrics->metrics[type - 1];
+
+@type being used as an array index, we need to prevent
+cpu speculation or risk leaking kernel memory content.
+
+Fixes: 5f9ae3d9e7e4 ("ipv4: do metrics match when looking up and deleting a route")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230120133140.3624204-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/fib_semantics.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 250af6e5a892..607a4f816155 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -30,6 +30,7 @@
+ #include <linux/slab.h>
+ #include <linux/netlink.h>
+ #include <linux/hash.h>
++#include <linux/nospec.h>
+
+ #include <net/arp.h>
+ #include <net/ip.h>
+@@ -1020,6 +1021,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
+ if (type > RTAX_MAX)
+ return false;
+
++ type = array_index_nospec(type, RTAX_MAX + 1);
+ if (type == RTAX_CC_ALGO) {
+ char tmp[TCP_CA_NAME_MAX];
+ bool ecn_ca = false;
+--
+2.39.0
+
--- /dev/null
+From 2defde978133a1a8c31bb431bfaa5439fb69f662 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Jan 2023 13:30:40 +0000
+Subject: ipv4: prevent potential spectre v1 gadget in ip_metrics_convert()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 1d1d63b612801b3f0a39b7d4467cad0abd60e5c8 ]
+
+if (!type)
+ continue;
+ if (type > RTAX_MAX)
+ return -EINVAL;
+ ...
+ metrics[type - 1] = val;
+
+@type being used as an array index, we need to prevent
+cpu speculation or risk leaking kernel memory content.
+
+Fixes: 6cf9dfd3bd62 ("net: fib: move metrics parsing to a helper")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230120133040.3623463-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/metrics.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
+index 25ea6ac44db9..6a1427916c7d 100644
+--- a/net/ipv4/metrics.c
++++ b/net/ipv4/metrics.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ #include <linux/netlink.h>
++#include <linux/nospec.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/types.h>
+ #include <net/ip.h>
+@@ -28,6 +29,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
+ return -EINVAL;
+ }
+
++ type = array_index_nospec(type, RTAX_MAX + 1);
+ if (type == RTAX_CC_ALGO) {
+ char tmp[TCP_CA_NAME_MAX];
+
+--
+2.39.0
+
--- /dev/null
+From 9e1201104a905b30f41099483e9cb41c61ada136 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Jan 2023 19:55:45 +0100
+Subject: net: fix UaF in netns ops registration error path
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 71ab9c3e2253619136c31c89dbb2c69305cc89b1 ]
+
+If net_assign_generic() fails, the current error path in ops_init() tries
+to clear the gen pointer slot. Anyway, in such error path, the gen pointer
+itself has not been modified yet, and the existing and accessed one is
+smaller than the accessed index, causing an out-of-bounds error:
+
+ BUG: KASAN: slab-out-of-bounds in ops_init+0x2de/0x320
+ Write of size 8 at addr ffff888109124978 by task modprobe/1018
+
+ CPU: 2 PID: 1018 Comm: modprobe Not tainted 6.2.0-rc2.mptcp_ae5ac65fbed5+ #1641
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.1-2.fc37 04/01/2014
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x6a/0x9f
+ print_address_description.constprop.0+0x86/0x2b5
+ print_report+0x11b/0x1fb
+ kasan_report+0x87/0xc0
+ ops_init+0x2de/0x320
+ register_pernet_operations+0x2e4/0x750
+ register_pernet_subsys+0x24/0x40
+ tcf_register_action+0x9f/0x560
+ do_one_initcall+0xf9/0x570
+ do_init_module+0x190/0x650
+ load_module+0x1fa5/0x23c0
+ __do_sys_finit_module+0x10d/0x1b0
+ do_syscall_64+0x58/0x80
+ entry_SYSCALL_64_after_hwframe+0x72/0xdc
+ RIP: 0033:0x7f42518f778d
+ Code: 00 c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48
+ 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff
+ ff 73 01 c3 48 8b 0d cb 56 2c 00 f7 d8 64 89 01 48
+ RSP: 002b:00007fff96869688 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
+ RAX: ffffffffffffffda RBX: 00005568ef7f7c90 RCX: 00007f42518f778d
+ RDX: 0000000000000000 RSI: 00005568ef41d796 RDI: 0000000000000003
+ RBP: 00005568ef41d796 R08: 0000000000000000 R09: 0000000000000000
+ R10: 0000000000000003 R11: 0000000000000246 R12: 0000000000000000
+ R13: 00005568ef7f7d30 R14: 0000000000040000 R15: 0000000000000000
+ </TASK>
+
+This change addresses the issue by skipping the gen pointer
+de-reference in the mentioned error-path.
+
+Found by code inspection and verified with explicit error injection
+on a kasan-enabled kernel.
+
+Fixes: d266935ac43d ("net: fix UAF issue in nfqnl_nf_hook_drop() when ops_init() failed")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/cec4e0f3bb2c77ac03a6154a8508d3930beb5f0f.1674154348.git.pabeni@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/net_namespace.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 982d06332007..dcddc54d0840 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -137,12 +137,12 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
+ return 0;
+
+ if (ops->id && ops->size) {
+-cleanup:
+ ng = rcu_dereference_protected(net->gen,
+ lockdep_is_held(&pernet_ops_rwsem));
+ ng->ptr[*ops->id] = NULL;
+ }
+
++cleanup:
+ kfree(data);
+
+ out:
+--
+2.39.0
+
--- /dev/null
+From 6a947a12afde52a1f19be239d2f8a82308b781ea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Jan 2023 12:59:10 -0800
+Subject: net: mana: Fix IRQ name - add PCI and queue number
+
+From: Haiyang Zhang <haiyangz@microsoft.com>
+
+[ Upstream commit 20e3028c39a5bf882e91e717da96d14f1acec40e ]
+
+The PCI and queue number info is missing in IRQ names.
+
+Add PCI and queue number to IRQ names, to allow CPU affinity
+tuning scripts to work.
+
+Cc: stable@vger.kernel.org
+Fixes: ca9c54d2d6a5 ("net: mana: Add a driver for Microsoft Azure Network Adapter (MANA)")
+Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
+Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Link: https://lore.kernel.org/r/1674161950-19708-1-git-send-email-haiyangz@microsoft.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/microsoft/mana/gdma.h | 3 +++
+ drivers/net/ethernet/microsoft/mana/gdma_main.c | 9 ++++++++-
+ 2 files changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h
+index 1038bdf28ec0..f74f416a296f 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma.h
++++ b/drivers/net/ethernet/microsoft/mana/gdma.h
+@@ -324,9 +324,12 @@ struct gdma_queue_spec {
+ };
+ };
+
++#define MANA_IRQ_NAME_SZ 32
++
+ struct gdma_irq_context {
+ void (*handler)(void *arg);
+ void *arg;
++ char name[MANA_IRQ_NAME_SZ];
+ };
+
+ struct gdma_context {
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+index f577507f522b..0fb42193643d 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+@@ -1195,13 +1195,20 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
+ gic->handler = NULL;
+ gic->arg = NULL;
+
++ if (!i)
++ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
++ pci_name(pdev));
++ else
++ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
++ i - 1, pci_name(pdev));
++
+ irq = pci_irq_vector(pdev, i);
+ if (irq < 0) {
+ err = irq;
+ goto free_irq;
+ }
+
+- err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
++ err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_irq;
+ }
+--
+2.39.0
+
--- /dev/null
+From 95c9f805b93e2f64610a3f1b8c8212018fc12bd2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jan 2023 10:01:06 +0800
+Subject: net: mctp: mark socks as dead on unhash, prevent re-add
+
+From: Jeremy Kerr <jk@codeconstruct.com.au>
+
+[ Upstream commit b98e1a04e27fddfdc808bf46fe78eca30db89ab3 ]
+
+Once a socket has been unhashed, we want to prevent it from being
+re-used in a sk_key entry as part of a routing operation.
+
+This change marks the sk as SOCK_DEAD on unhash, which prevents addition
+into the net's key list.
+
+We need to do this during the key add path, rather than key lookup, as
+we release the net keys_lock between those operations.
+
+Fixes: 4a992bbd3650 ("mctp: Implement message fragmentation & reassembly")
+Signed-off-by: Jeremy Kerr <jk@codeconstruct.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mctp/af_mctp.c | 1 +
+ net/mctp/route.c | 6 ++++++
+ 2 files changed, 7 insertions(+)
+
+diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
+index cbbde0f73a08..a77fafbc31cf 100644
+--- a/net/mctp/af_mctp.c
++++ b/net/mctp/af_mctp.c
+@@ -288,6 +288,7 @@ static void mctp_sk_unhash(struct sock *sk)
+
+ kfree_rcu(key, rcu);
+ }
++ sock_set_flag(sk, SOCK_DEAD);
+ spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
+
+ synchronize_rcu();
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index 6aebb4a3eded..89e67399249b 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -135,6 +135,11 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
+
+ spin_lock_irqsave(&net->mctp.keys_lock, flags);
+
++ if (sock_flag(&msk->sk, SOCK_DEAD)) {
++ rc = -EINVAL;
++ goto out_unlock;
++ }
++
+ hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
+ if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
+ key->tag)) {
+@@ -148,6 +153,7 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
+ hlist_add_head(&key->sklist, &msk->keys);
+ }
+
++out_unlock:
+ spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
+
+ return rc;
+--
+2.39.0
+
--- /dev/null
+From 462b445b0f03b8244890677603e81a8adc33009b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jan 2023 11:11:57 +0100
+Subject: net: mdio-mux-meson-g12a: force internal PHY off on mux switch
+
+From: Jerome Brunet <jbrunet@baylibre.com>
+
+[ Upstream commit 7083df59abbc2b7500db312cac706493be0273ff ]
+
+Force the internal PHY off then on when switching to the internal path.
+This fixes problems where the PHY ID is not properly set.
+
+Fixes: 7090425104db ("net: phy: add amlogic g12a mdio mux support")
+Suggested-by: Qi Duan <qi.duan@amlogic.com>
+Co-developed-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: Jerome Brunet <jbrunet@baylibre.com>
+Link: https://lore.kernel.org/r/20230124101157.232234-1-jbrunet@baylibre.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/mdio/mdio-mux-meson-g12a.c | 23 ++++++++++++++++-------
+ 1 file changed, 16 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
+index b8866bc3f2e8..917c8a10eea0 100644
+--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
++++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
+@@ -4,6 +4,7 @@
+ */
+
+ #include <linux/bitfield.h>
++#include <linux/delay.h>
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+ #include <linux/device.h>
+@@ -150,6 +151,7 @@ static const struct clk_ops g12a_ephy_pll_ops = {
+
+ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
+ {
++ u32 value;
+ int ret;
+
+ /* Enable the phy clock */
+@@ -163,18 +165,25 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
+
+ /* Initialize ephy control */
+ writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
+- writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
+- FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
+- FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
+- PHY_CNTL1_CLK_EN |
+- PHY_CNTL1_CLKFREQ |
+- PHY_CNTL1_PHY_ENB,
+- priv->regs + ETH_PHY_CNTL1);
++
++ /* Make sure we get a 0 -> 1 transition on the enable bit */
++ value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
++ FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
++ FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
++ PHY_CNTL1_CLK_EN |
++ PHY_CNTL1_CLKFREQ;
++ writel(value, priv->regs + ETH_PHY_CNTL1);
+ writel(PHY_CNTL2_USE_INTERNAL |
+ PHY_CNTL2_SMI_SRC_MAC |
+ PHY_CNTL2_RX_CLK_EPHY,
+ priv->regs + ETH_PHY_CNTL2);
+
++ value |= PHY_CNTL1_PHY_ENB;
++ writel(value, priv->regs + ETH_PHY_CNTL1);
++
++ /* The phy needs a bit of time to power up */
++ mdelay(10);
++
+ return 0;
+ }
+
+--
+2.39.0
+
--- /dev/null
+From 6802cec8a8376a4cee9b8691171f3a1037d13dda Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jan 2023 09:02:10 +0900
+Subject: net: ravb: Fix lack of register setting after system resumed for Gen3
+
+From: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+[ Upstream commit c2b6cdee1d13ffbb24baca3c9b8a572d6b541e4e ]
+
+After system entered Suspend to RAM, registers setting of this
+hardware is reset because the SoC will be turned off. On R-Car Gen3
+(info->ccc_gac), ravb_ptp_init() is called in ravb_probe() only. So,
+after system resumed, it lacks of the initial settings for ptp. So,
+add ravb_ptp_{init,stop}() into ravb_{resume,suspend}().
+
+Fixes: f5d7837f96e5 ("ravb: ptp: Add CONFIG mode support")
+Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/renesas/ravb_main.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index dcb18f1e6db0..046189507ec1 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -2446,6 +2446,9 @@ static int __maybe_unused ravb_suspend(struct device *dev)
+ else
+ ret = ravb_close(ndev);
+
++ if (priv->info->ccc_gac)
++ ravb_ptp_stop(ndev);
++
+ return ret;
+ }
+
+@@ -2482,6 +2485,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
+ /* Restore descriptor base address table */
+ ravb_write(ndev, priv->desc_bat_dma, DBAT);
+
++ if (priv->info->ccc_gac)
++ ravb_ptp_init(ndev, priv->pdev);
++
+ if (netif_running(ndev)) {
+ if (priv->wol_enabled) {
+ ret = ravb_wol_restore(ndev);
+--
+2.39.0
+
--- /dev/null
+From 5d8034c5e7b4837b07202a6cb331df6be4d89921 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jan 2023 09:02:11 +0900
+Subject: net: ravb: Fix possible hang if RIS2_QFF1 happen
+
+From: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+[ Upstream commit f3c07758c9007a6bfff5290d9e19d3c41930c897 ]
+
+Since this driver enables the interrupt by RIC2_QFE1, this driver
+should clear the interrupt flag if it happens. Otherwise, the interrupt
+causes to hang the system.
+
+Note that this also fix a minor coding style (a comment indentation)
+around the fixed code.
+
+Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper")
+Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/renesas/ravb_main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 046189507ec1..c6fe1cda7b88 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -792,14 +792,14 @@ static void ravb_error_interrupt(struct net_device *ndev)
+ ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
+ if (eis & EIS_QFS) {
+ ris2 = ravb_read(ndev, RIS2);
+- ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
++ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
+ RIS2);
+
+ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF0)
+ priv->stats[RAVB_BE].rx_over_errors++;
+
+- /* Receive Descriptor Empty int */
++ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF1)
+ priv->stats[RAVB_NC].rx_over_errors++;
+
+--
+2.39.0
+
--- /dev/null
+From e44c5a4884d56ac30b5abe58ab5f7eb9cc4f1f87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Jan 2023 08:45:52 +0000
+Subject: net/sched: sch_taprio: do not schedule in taprio_reset()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit ea4fdbaa2f7798cb25adbe4fd52ffc6356f097bb ]
+
+As reported by syzbot and hinted by Vinicius, I should not have added
+a qdisc_synchronize() call in taprio_reset()
+
+taprio_reset() can be called with qdisc spinlock held (and BH disabled)
+as shown in included syzbot report [1].
+
+Only taprio_destroy() needed this synchronization, as explained
+in the blamed commit changelog.
+
+[1]
+
+BUG: scheduling while atomic: syz-executor150/5091/0x00000202
+2 locks held by syz-executor150/5091:
+Modules linked in:
+Preemption disabled at:
+[<0000000000000000>] 0x0
+Kernel panic - not syncing: scheduling while atomic: panic_on_warn set ...
+CPU: 1 PID: 5091 Comm: syz-executor150 Not tainted 6.2.0-rc3-syzkaller-00219-g010a74f52203 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/12/2023
+Call Trace:
+<TASK>
+__dump_stack lib/dump_stack.c:88 [inline]
+dump_stack_lvl+0xd1/0x138 lib/dump_stack.c:106
+panic+0x2cc/0x626 kernel/panic.c:318
+check_panic_on_warn.cold+0x19/0x35 kernel/panic.c:238
+__schedule_bug.cold+0xd5/0xfe kernel/sched/core.c:5836
+schedule_debug kernel/sched/core.c:5865 [inline]
+__schedule+0x34e4/0x5450 kernel/sched/core.c:6500
+schedule+0xde/0x1b0 kernel/sched/core.c:6682
+schedule_timeout+0x14e/0x2a0 kernel/time/timer.c:2167
+schedule_timeout_uninterruptible kernel/time/timer.c:2201 [inline]
+msleep+0xb6/0x100 kernel/time/timer.c:2322
+qdisc_synchronize include/net/sch_generic.h:1295 [inline]
+taprio_reset+0x93/0x270 net/sched/sch_taprio.c:1703
+qdisc_reset+0x10c/0x770 net/sched/sch_generic.c:1022
+dev_reset_queue+0x92/0x130 net/sched/sch_generic.c:1285
+netdev_for_each_tx_queue include/linux/netdevice.h:2464 [inline]
+dev_deactivate_many+0x36d/0x9f0 net/sched/sch_generic.c:1351
+dev_deactivate+0xed/0x1b0 net/sched/sch_generic.c:1374
+qdisc_graft+0xe4a/0x1380 net/sched/sch_api.c:1080
+tc_modify_qdisc+0xb6b/0x19a0 net/sched/sch_api.c:1689
+rtnetlink_rcv_msg+0x43e/0xca0 net/core/rtnetlink.c:6141
+netlink_rcv_skb+0x165/0x440 net/netlink/af_netlink.c:2564
+netlink_unicast_kernel net/netlink/af_netlink.c:1330 [inline]
+netlink_unicast+0x547/0x7f0 net/netlink/af_netlink.c:1356
+netlink_sendmsg+0x91b/0xe10 net/netlink/af_netlink.c:1932
+sock_sendmsg_nosec net/socket.c:714 [inline]
+sock_sendmsg+0xd3/0x120 net/socket.c:734
+____sys_sendmsg+0x712/0x8c0 net/socket.c:2476
+___sys_sendmsg+0x110/0x1b0 net/socket.c:2530
+__sys_sendmsg+0xf7/0x1c0 net/socket.c:2559
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+
+Fixes: 3a415d59c1db ("net/sched: sch_taprio: fix possible use-after-free")
+Link: https://lore.kernel.org/netdev/167387581653.2747.13878941339893288655.git-patchwork-notify@kernel.org/T/
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://lore.kernel.org/r/20230123084552.574396-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_taprio.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index a76a2afe9585..135ea8b3816f 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -1632,7 +1632,6 @@ static void taprio_reset(struct Qdisc *sch)
+ int i;
+
+ hrtimer_cancel(&q->advance_timer);
+- qdisc_synchronize(sch);
+
+ if (q->qdiscs) {
+ for (i = 0; i < dev->num_tx_queues; i++)
+--
+2.39.0
+
--- /dev/null
+From 4e35cd5e3c6f7be8c238b1258160c07fab6f4385 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jan 2023 13:53:39 -0500
+Subject: net/tg3: resolve deadlock in tg3_reset_task() during EEH
+
+From: David Christensen <drc@linux.vnet.ibm.com>
+
+[ Upstream commit 6c4ca03bd890566d873e3593b32d034bf2f5a087 ]
+
+During EEH error injection testing, a deadlock was encountered in the tg3
+driver when tg3_io_error_detected() was attempting to cancel outstanding
+reset tasks:
+
+crash> foreach UN bt
+...
+PID: 159 TASK: c0000000067c6000 CPU: 8 COMMAND: "eehd"
+...
+ #5 [c00000000681f990] __cancel_work_timer at c00000000019fd18
+ #6 [c00000000681fa30] tg3_io_error_detected at c00800000295f098 [tg3]
+ #7 [c00000000681faf0] eeh_report_error at c00000000004e25c
+...
+
+PID: 290 TASK: c000000036e5f800 CPU: 6 COMMAND: "kworker/6:1"
+...
+ #4 [c00000003721fbc0] rtnl_lock at c000000000c940d8
+ #5 [c00000003721fbe0] tg3_reset_task at c008000002969358 [tg3]
+ #6 [c00000003721fc60] process_one_work at c00000000019e5c4
+...
+
+PID: 296 TASK: c000000037a65800 CPU: 21 COMMAND: "kworker/21:1"
+...
+ #4 [c000000037247bc0] rtnl_lock at c000000000c940d8
+ #5 [c000000037247be0] tg3_reset_task at c008000002969358 [tg3]
+ #6 [c000000037247c60] process_one_work at c00000000019e5c4
+...
+
+PID: 655 TASK: c000000036f49000 CPU: 16 COMMAND: "kworker/16:2"
+...:1
+
+ #4 [c0000000373ebbc0] rtnl_lock at c000000000c940d8
+ #5 [c0000000373ebbe0] tg3_reset_task at c008000002969358 [tg3]
+ #6 [c0000000373ebc60] process_one_work at c00000000019e5c4
+...
+
+Code inspection shows that both tg3_io_error_detected() and
+tg3_reset_task() attempt to acquire the RTNL lock at the beginning of
+their code blocks. If tg3_reset_task() should happen to execute between
+the times when tg3_io_error_deteced() acquires the RTNL lock and
+tg3_reset_task_cancel() is called, a deadlock will occur.
+
+Moving tg3_reset_task_cancel() call earlier within the code block, prior
+to acquiring RTNL, prevents this from happening, but also exposes another
+deadlock issue where tg3_reset_task() may execute AFTER
+tg3_io_error_detected() has executed:
+
+crash> foreach UN bt
+PID: 159 TASK: c0000000067d2000 CPU: 9 COMMAND: "eehd"
+...
+ #4 [c000000006867a60] rtnl_lock at c000000000c940d8
+ #5 [c000000006867a80] tg3_io_slot_reset at c0080000026c2ea8 [tg3]
+ #6 [c000000006867b00] eeh_report_reset at c00000000004de88
+...
+PID: 363 TASK: c000000037564000 CPU: 6 COMMAND: "kworker/6:1"
+...
+ #3 [c000000036c1bb70] msleep at c000000000259e6c
+ #4 [c000000036c1bba0] napi_disable at c000000000c6b848
+ #5 [c000000036c1bbe0] tg3_reset_task at c0080000026d942c [tg3]
+ #6 [c000000036c1bc60] process_one_work at c00000000019e5c4
+...
+
+This issue can be avoided by aborting tg3_reset_task() if EEH error
+recovery is already in progress.
+
+Fixes: db84bf43ef23 ("tg3: tg3_reset_task() needs to use rtnl_lock to synchronize")
+Signed-off-by: David Christensen <drc@linux.vnet.ibm.com>
+Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Link: https://lore.kernel.org/r/20230124185339.225806-1-drc@linux.vnet.ibm.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/tg3.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 8aab07419263..50f86bebbc19 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -11176,7 +11176,7 @@ static void tg3_reset_task(struct work_struct *work)
+ rtnl_lock();
+ tg3_full_lock(tp, 0);
+
+- if (!netif_running(tp->dev)) {
++ if (tp->pcierr_recovery || !netif_running(tp->dev)) {
+ tg3_flag_clear(tp, RESET_TASK_PENDING);
+ tg3_full_unlock(tp);
+ rtnl_unlock();
+@@ -18111,6 +18111,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+
+ netdev_info(netdev, "PCI I/O error detected\n");
+
++ /* Want to make sure that the reset task doesn't run */
++ tg3_reset_task_cancel(tp);
++
+ rtnl_lock();
+
+ /* Could be second call or maybe we don't have netdev yet */
+@@ -18127,9 +18130,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+
+ tg3_timer_stop(tp);
+
+- /* Want to make sure that the reset task doesn't run */
+- tg3_reset_task_cancel(tp);
+-
+ netif_device_detach(netdev);
+
+ /* Clean up software state, even if MMIO is blocked */
+--
+2.39.0
+
--- /dev/null
+From 95321a183d8e02fdb31af7bdbb6a006a9b9c98b0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jan 2023 02:47:19 +0100
+Subject: netfilter: conntrack: fix bug in for_each_sctp_chunk
+
+From: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
+
+[ Upstream commit 98ee0077452527f971567db01386de3c3d97ce13 ]
+
+skb_header_pointer() will return NULL if offset + sizeof(_sch) exceeds
+skb->len, so this offset < skb->len test is redundant.
+
+if sch->length == 0, this will end up in an infinite loop, add a check
+for sch->length > 0
+
+Fixes: 9fb9cbb1082d ("[NETFILTER]: Add nf_conntrack subsystem.")
+Suggested-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_proto_sctp.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 3704d1c7d3c2..ee317f9a22e5 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -155,8 +155,8 @@ static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
+
+ #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \
+ for ((offset) = (dataoff) + sizeof(struct sctphdr), (count) = 0; \
+- (offset) < (skb)->len && \
+- ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))); \
++ ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))) && \
++ (sch)->length; \
+ (offset) += (ntohs((sch)->length) + 3) & ~3, (count)++)
+
+ /* Some validity checks to make sure the chunks are fine */
+--
+2.39.0
+
--- /dev/null
+From c42845fae2bb5efe153f12313bb1d987a7a6fec8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jan 2023 02:47:18 +0100
+Subject: netfilter: conntrack: fix vtag checks for ABORT/SHUTDOWN_COMPLETE
+
+From: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
+
+[ Upstream commit a9993591fa94246b16b444eea55d84c54608282a ]
+
+RFC 9260, Sec 8.5.1 states that for ABORT/SHUTDOWN_COMPLETE, the chunk
+MUST be accepted if the vtag of the packet matches its own tag and the
+T bit is not set OR if it is set to its peer's vtag and the T bit is set
+in chunk flags. Otherwise the packet MUST be silently dropped.
+
+Update vtag verification for ABORT/SHUTDOWN_COMPLETE based on the above
+description.
+
+Fixes: 9fb9cbb1082d ("[NETFILTER]: Add nf_conntrack subsystem.")
+Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_proto_sctp.c | 25 ++++++++++++++++---------
+ 1 file changed, 16 insertions(+), 9 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 5a936334b517..3704d1c7d3c2 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -412,22 +412,29 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
+ /* Special cases of Verification tag check (Sec 8.5.1) */
+ if (sch->type == SCTP_CID_INIT) {
+- /* Sec 8.5.1 (A) */
++ /* (A) vtag MUST be zero */
+ if (sh->vtag != 0)
+ goto out_unlock;
+ } else if (sch->type == SCTP_CID_ABORT) {
+- /* Sec 8.5.1 (B) */
+- if (sh->vtag != ct->proto.sctp.vtag[dir] &&
+- sh->vtag != ct->proto.sctp.vtag[!dir])
++ /* (B) vtag MUST match own vtag if T flag is unset OR
++ * MUST match peer's vtag if T flag is set
++ */
++ if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
++ sh->vtag != ct->proto.sctp.vtag[dir]) ||
++ ((sch->flags & SCTP_CHUNK_FLAG_T) &&
++ sh->vtag != ct->proto.sctp.vtag[!dir]))
+ goto out_unlock;
+ } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
+- /* Sec 8.5.1 (C) */
+- if (sh->vtag != ct->proto.sctp.vtag[dir] &&
+- sh->vtag != ct->proto.sctp.vtag[!dir] &&
+- sch->flags & SCTP_CHUNK_FLAG_T)
++ /* (C) vtag MUST match own vtag if T flag is unset OR
++ * MUST match peer's vtag if T flag is set
++ */
++ if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
++ sh->vtag != ct->proto.sctp.vtag[dir]) ||
++ ((sch->flags & SCTP_CHUNK_FLAG_T) &&
++ sh->vtag != ct->proto.sctp.vtag[!dir]))
+ goto out_unlock;
+ } else if (sch->type == SCTP_CID_COOKIE_ECHO) {
+- /* Sec 8.5.1 (D) */
++ /* (D) vtag must be same as init_vtag as found in INIT_ACK */
+ if (sh->vtag != ct->proto.sctp.vtag[dir])
+ goto out_unlock;
+ } else if (sch->type == SCTP_CID_HEARTBEAT) {
+--
+2.39.0
+
--- /dev/null
+From 115c51f5f8c9ba7ed70aa8fcdc11df189c6b8bab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Jan 2023 23:49:46 +0100
+Subject: netfilter: nft_set_rbtree: skip elements in transaction from garbage
+ collection
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 5d235d6ce75c12a7fdee375eb211e4116f7ab01b ]
+
+Skip interference with an ongoing transaction, do not perform garbage
+collection on inactive elements. Reset annotated previous end interval
+if the expired element is marked as busy (control plane removed the
+element right before expiration).
+
+Fixes: 8d8540c4f5e0 ("netfilter: nft_set_rbtree: add timeout support")
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_rbtree.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 217225e13faf..19ea4d3c3553 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -563,23 +563,37 @@ static void nft_rbtree_gc(struct work_struct *work)
+ struct nft_rbtree *priv;
+ struct rb_node *node;
+ struct nft_set *set;
++ struct net *net;
++ u8 genmask;
+
+ priv = container_of(work, struct nft_rbtree, gc_work.work);
+ set = nft_set_container_of(priv);
++ net = read_pnet(&set->net);
++ genmask = nft_genmask_cur(net);
+
+ write_lock_bh(&priv->lock);
+ write_seqcount_begin(&priv->count);
+ for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
+ rbe = rb_entry(node, struct nft_rbtree_elem, node);
+
++ if (!nft_set_elem_active(&rbe->ext, genmask))
++ continue;
++
++ /* elements are reversed in the rbtree for historical reasons,
++ * from highest to lowest value, that is why end element is
++ * always visited before the start element.
++ */
+ if (nft_rbtree_interval_end(rbe)) {
+ rbe_end = rbe;
+ continue;
+ }
+ if (!nft_set_elem_expired(&rbe->ext))
+ continue;
+- if (nft_set_elem_mark_busy(&rbe->ext))
++
++ if (nft_set_elem_mark_busy(&rbe->ext)) {
++ rbe_end = NULL;
+ continue;
++ }
+
+ if (rbe_prev) {
+ rb_erase(&rbe_prev->node, &priv->root);
+--
+2.39.0
+
--- /dev/null
+From 7682c0f3ac48b7b63ee8b45feb268ceebe36d7eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Jan 2023 23:38:32 +0100
+Subject: netfilter: nft_set_rbtree: Switch to node list walk for overlap
+ detection
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit c9e6978e2725a7d4b6cd23b2facd3f11422c0643 ]
+
+...instead of a tree descent, which became overly complicated in an
+attempt to cover cases where expired or inactive elements would affect
+comparisons with the new element being inserted.
+
+Further, it turned out that it's probably impossible to cover all those
+cases, as inactive nodes might entirely hide subtrees consisting of a
+complete interval plus a node that makes the current insertion not
+overlap.
+
+To speed up the overlap check, descent the tree to find a greater
+element that is closer to the key value to insert. Then walk down the
+node list for overlap detection. Starting the overlap check from
+rb_first() unconditionally is slow, it takes 10 times longer due to the
+full linear traversal of the list.
+
+Moreover, perform garbage collection of expired elements when walking
+down the node list to avoid bogus overlap reports.
+
+For the insertion operation itself, this essentially reverts back to the
+implementation before commit 7c84d41416d8 ("netfilter: nft_set_rbtree:
+Detect partial overlaps on insertion"), except that cases of complete
+overlap are already handled in the overlap detection phase itself, which
+slightly simplifies the loop to find the insertion point.
+
+Based on initial patch from Stefano Brivio, including text from the
+original patch description too.
+
+Fixes: 7c84d41416d8 ("netfilter: nft_set_rbtree: Detect partial overlaps on insertion")
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_rbtree.c | 316 ++++++++++++++++++++-------------
+ 1 file changed, 189 insertions(+), 127 deletions(-)
+
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 7325bee7d144..217225e13faf 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -38,10 +38,12 @@ static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
+ return !nft_rbtree_interval_end(rbe);
+ }
+
+-static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
+- const struct nft_rbtree_elem *interval)
++static int nft_rbtree_cmp(const struct nft_set *set,
++ const struct nft_rbtree_elem *e1,
++ const struct nft_rbtree_elem *e2)
+ {
+- return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
++ return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
++ set->klen);
+ }
+
+ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+@@ -52,7 +54,6 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ const struct nft_rbtree_elem *rbe, *interval = NULL;
+ u8 genmask = nft_genmask_cur(net);
+ const struct rb_node *parent;
+- const void *this;
+ int d;
+
+ parent = rcu_dereference_raw(priv->root.rb_node);
+@@ -62,12 +63,11 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+
+ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+
+- this = nft_set_ext_key(&rbe->ext);
+- d = memcmp(this, key, set->klen);
++ d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
+ if (d < 0) {
+ parent = rcu_dereference_raw(parent->rb_left);
+ if (interval &&
+- nft_rbtree_equal(set, this, interval) &&
++ !nft_rbtree_cmp(set, rbe, interval) &&
+ nft_rbtree_interval_end(rbe) &&
+ nft_rbtree_interval_start(interval))
+ continue;
+@@ -215,154 +215,216 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
+ return rbe;
+ }
+
++static int nft_rbtree_gc_elem(const struct nft_set *__set,
++ struct nft_rbtree *priv,
++ struct nft_rbtree_elem *rbe)
++{
++ struct nft_set *set = (struct nft_set *)__set;
++ struct rb_node *prev = rb_prev(&rbe->node);
++ struct nft_rbtree_elem *rbe_prev;
++ struct nft_set_gc_batch *gcb;
++
++ gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
++ if (!gcb)
++ return -ENOMEM;
++
++ /* search for expired end interval coming before this element. */
++ do {
++ rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
++ if (nft_rbtree_interval_end(rbe_prev))
++ break;
++
++ prev = rb_prev(prev);
++ } while (prev != NULL);
++
++ rb_erase(&rbe_prev->node, &priv->root);
++ rb_erase(&rbe->node, &priv->root);
++ atomic_sub(2, &set->nelems);
++
++ nft_set_gc_batch_add(gcb, rbe);
++ nft_set_gc_batch_complete(gcb);
++
++ return 0;
++}
++
++static bool nft_rbtree_update_first(const struct nft_set *set,
++ struct nft_rbtree_elem *rbe,
++ struct rb_node *first)
++{
++ struct nft_rbtree_elem *first_elem;
++
++ first_elem = rb_entry(first, struct nft_rbtree_elem, node);
++ /* this element is closest to where the new element is to be inserted:
++ * update the first element for the node list path.
++ */
++ if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
++ return true;
++
++ return false;
++}
++
+ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ struct nft_rbtree_elem *new,
+ struct nft_set_ext **ext)
+ {
+- bool overlap = false, dup_end_left = false, dup_end_right = false;
++ struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
++ struct rb_node *node, *parent, **p, *first = NULL;
+ struct nft_rbtree *priv = nft_set_priv(set);
+ u8 genmask = nft_genmask_next(net);
+- struct nft_rbtree_elem *rbe;
+- struct rb_node *parent, **p;
+- int d;
++ int d, err;
+
+- /* Detect overlaps as we descend the tree. Set the flag in these cases:
+- *
+- * a1. _ _ __>| ?_ _ __| (insert end before existing end)
+- * a2. _ _ ___| ?_ _ _>| (insert end after existing end)
+- * a3. _ _ ___? >|_ _ __| (insert start before existing end)
+- *
+- * and clear it later on, as we eventually reach the points indicated by
+- * '?' above, in the cases described below. We'll always meet these
+- * later, locally, due to tree ordering, and overlaps for the intervals
+- * that are the closest together are always evaluated last.
+- *
+- * b1. _ _ __>| !_ _ __| (insert end before existing start)
+- * b2. _ _ ___| !_ _ _>| (insert end after existing start)
+- * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf)
+- * '--' no nodes falling in this range
+- * b4. >|_ _ ! (insert start before existing start)
+- *
+- * Case a3. resolves to b3.:
+- * - if the inserted start element is the leftmost, because the '0'
+- * element in the tree serves as end element
+- * - otherwise, if an existing end is found immediately to the left. If
+- * there are existing nodes in between, we need to further descend the
+- * tree before we can conclude the new start isn't causing an overlap
+- *
+- * or to b4., which, preceded by a3., means we already traversed one or
+- * more existing intervals entirely, from the right.
+- *
+- * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
+- * in that order.
+- *
+- * The flag is also cleared in two special cases:
+- *
+- * b5. |__ _ _!|<_ _ _ (insert start right before existing end)
+- * b6. |__ _ >|!__ _ _ (insert end right after existing start)
+- *
+- * which always happen as last step and imply that no further
+- * overlapping is possible.
+- *
+- * Another special case comes from the fact that start elements matching
+- * an already existing start element are allowed: insertion is not
+- * performed but we return -EEXIST in that case, and the error will be
+- * cleared by the caller if NLM_F_EXCL is not present in the request.
+- * This way, request for insertion of an exact overlap isn't reported as
+- * error to userspace if not desired.
+- *
+- * However, if the existing start matches a pre-existing start, but the
+- * end element doesn't match the corresponding pre-existing end element,
+- * we need to report a partial overlap. This is a local condition that
+- * can be noticed without need for a tracking flag, by checking for a
+- * local duplicated end for a corresponding start, from left and right,
+- * separately.
++ /* Descend the tree to search for an existing element greater than the
++ * key value to insert that is greater than the new element. This is the
++ * first element to walk the ordered elements to find possible overlap.
+ */
+-
+ parent = NULL;
+ p = &priv->root.rb_node;
+ while (*p != NULL) {
+ parent = *p;
+ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+- d = memcmp(nft_set_ext_key(&rbe->ext),
+- nft_set_ext_key(&new->ext),
+- set->klen);
++ d = nft_rbtree_cmp(set, rbe, new);
++
+ if (d < 0) {
+ p = &parent->rb_left;
+-
+- if (nft_rbtree_interval_start(new)) {
+- if (nft_rbtree_interval_end(rbe) &&
+- nft_set_elem_active(&rbe->ext, genmask) &&
+- !nft_set_elem_expired(&rbe->ext) && !*p)
+- overlap = false;
+- } else {
+- if (dup_end_left && !*p)
+- return -ENOTEMPTY;
+-
+- overlap = nft_rbtree_interval_end(rbe) &&
+- nft_set_elem_active(&rbe->ext,
+- genmask) &&
+- !nft_set_elem_expired(&rbe->ext);
+-
+- if (overlap) {
+- dup_end_right = true;
+- continue;
+- }
+- }
+ } else if (d > 0) {
+- p = &parent->rb_right;
++ if (!first ||
++ nft_rbtree_update_first(set, rbe, first))
++ first = &rbe->node;
+
+- if (nft_rbtree_interval_end(new)) {
+- if (dup_end_right && !*p)
+- return -ENOTEMPTY;
+-
+- overlap = nft_rbtree_interval_end(rbe) &&
+- nft_set_elem_active(&rbe->ext,
+- genmask) &&
+- !nft_set_elem_expired(&rbe->ext);
+-
+- if (overlap) {
+- dup_end_left = true;
+- continue;
+- }
+- } else if (nft_set_elem_active(&rbe->ext, genmask) &&
+- !nft_set_elem_expired(&rbe->ext)) {
+- overlap = nft_rbtree_interval_end(rbe);
+- }
++ p = &parent->rb_right;
+ } else {
+- if (nft_rbtree_interval_end(rbe) &&
+- nft_rbtree_interval_start(new)) {
++ if (nft_rbtree_interval_end(rbe))
+ p = &parent->rb_left;
+-
+- if (nft_set_elem_active(&rbe->ext, genmask) &&
+- !nft_set_elem_expired(&rbe->ext))
+- overlap = false;
+- } else if (nft_rbtree_interval_start(rbe) &&
+- nft_rbtree_interval_end(new)) {
++ else
+ p = &parent->rb_right;
++ }
++ }
++
++ if (!first)
++ first = rb_first(&priv->root);
++
++ /* Detect overlap by going through the list of valid tree nodes.
++ * Values stored in the tree are in reversed order, starting from
++ * highest to lowest value.
++ */
++ for (node = first; node != NULL; node = rb_next(node)) {
++ rbe = rb_entry(node, struct nft_rbtree_elem, node);
++
++ if (!nft_set_elem_active(&rbe->ext, genmask))
++ continue;
+
+- if (nft_set_elem_active(&rbe->ext, genmask) &&
+- !nft_set_elem_expired(&rbe->ext))
+- overlap = false;
+- } else if (nft_set_elem_active(&rbe->ext, genmask) &&
+- !nft_set_elem_expired(&rbe->ext)) {
+- *ext = &rbe->ext;
+- return -EEXIST;
+- } else {
+- overlap = false;
+- if (nft_rbtree_interval_end(rbe))
+- p = &parent->rb_left;
+- else
+- p = &parent->rb_right;
++ /* perform garbage collection to avoid bogus overlap reports. */
++ if (nft_set_elem_expired(&rbe->ext)) {
++ err = nft_rbtree_gc_elem(set, priv, rbe);
++ if (err < 0)
++ return err;
++
++ continue;
++ }
++
++ d = nft_rbtree_cmp(set, rbe, new);
++ if (d == 0) {
++ /* Matching end element: no need to look for an
++ * overlapping greater or equal element.
++ */
++ if (nft_rbtree_interval_end(rbe)) {
++ rbe_le = rbe;
++ break;
++ }
++
++ /* first element that is greater or equal to key value. */
++ if (!rbe_ge) {
++ rbe_ge = rbe;
++ continue;
++ }
++
++ /* this is a closer more or equal element, update it. */
++ if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
++ rbe_ge = rbe;
++ continue;
+ }
++
++ /* element is equal to key value, make sure flags are
++ * the same, an existing more or equal start element
++ * must not be replaced by more or equal end element.
++ */
++ if ((nft_rbtree_interval_start(new) &&
++ nft_rbtree_interval_start(rbe_ge)) ||
++ (nft_rbtree_interval_end(new) &&
++ nft_rbtree_interval_end(rbe_ge))) {
++ rbe_ge = rbe;
++ continue;
++ }
++ } else if (d > 0) {
++ /* annotate element greater than the new element. */
++ rbe_ge = rbe;
++ continue;
++ } else if (d < 0) {
++ /* annotate element less than the new element. */
++ rbe_le = rbe;
++ break;
+ }
++ }
+
+- dup_end_left = dup_end_right = false;
++ /* - new start element matching existing start element: full overlap
++ * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
++ */
++ if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
++ nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
++ *ext = &rbe_ge->ext;
++ return -EEXIST;
++ }
++
++ /* - new end element matching existing end element: full overlap
++ * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
++ */
++ if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
++ nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
++ *ext = &rbe_le->ext;
++ return -EEXIST;
+ }
+
+- if (overlap)
++ /* - new start element with existing closest, less or equal key value
++ * being a start element: partial overlap, reported as -ENOTEMPTY.
++ * Anonymous sets allow for two consecutive start element since they
++ * are constant, skip them to avoid bogus overlap reports.
++ */
++ if (!nft_set_is_anonymous(set) && rbe_le &&
++ nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
++ return -ENOTEMPTY;
++
++ /* - new end element with existing closest, less or equal key value
++ * being a end element: partial overlap, reported as -ENOTEMPTY.
++ */
++ if (rbe_le &&
++ nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
+ return -ENOTEMPTY;
+
++ /* - new end element with existing closest, greater or equal key value
++ * being an end element: partial overlap, reported as -ENOTEMPTY
++ */
++ if (rbe_ge &&
++ nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
++ return -ENOTEMPTY;
++
++ /* Accepted element: pick insertion point depending on key value */
++ parent = NULL;
++ p = &priv->root.rb_node;
++ while (*p != NULL) {
++ parent = *p;
++ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
++ d = nft_rbtree_cmp(set, rbe, new);
++
++ if (d < 0)
++ p = &parent->rb_left;
++ else if (d > 0)
++ p = &parent->rb_right;
++ else if (nft_rbtree_interval_end(rbe))
++ p = &parent->rb_left;
++ else
++ p = &parent->rb_right;
++ }
++
+ rb_link_node_rcu(&new->node, parent, p);
+ rb_insert_color(&new->node, &priv->root);
+ return 0;
+--
+2.39.0
+
--- /dev/null
+From 41714c47c09ead6064a9656253a030df00dddcfb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Jan 2023 12:59:54 +0000
+Subject: netlink: annotate data races around dst_portid and dst_group
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 004db64d185a5f23dfb891d7701e23713b2420ee ]
+
+netlink_getname(), netlink_sendmsg() and netlink_getsockbyportid()
+can read nlk->dst_portid and nlk->dst_group while another
+thread is changing them.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netlink/af_netlink.c | 23 ++++++++++++++---------
+ 1 file changed, 14 insertions(+), 9 deletions(-)
+
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 1eab80af5112..e041d2df9280 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1090,8 +1090,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+
+ if (addr->sa_family == AF_UNSPEC) {
+ sk->sk_state = NETLINK_UNCONNECTED;
+- nlk->dst_portid = 0;
+- nlk->dst_group = 0;
++ /* dst_portid and dst_group can be read locklessly */
++ WRITE_ONCE(nlk->dst_portid, 0);
++ WRITE_ONCE(nlk->dst_group, 0);
+ return 0;
+ }
+ if (addr->sa_family != AF_NETLINK)
+@@ -1113,8 +1114,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+
+ if (err == 0) {
+ sk->sk_state = NETLINK_CONNECTED;
+- nlk->dst_portid = nladdr->nl_pid;
+- nlk->dst_group = ffs(nladdr->nl_groups);
++ /* dst_portid and dst_group can be read locklessly */
++ WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
++ WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
+ }
+
+ return err;
+@@ -1131,8 +1133,9 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
+ nladdr->nl_pad = 0;
+
+ if (peer) {
+- nladdr->nl_pid = nlk->dst_portid;
+- nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
++ /* Paired with WRITE_ONCE() in netlink_connect() */
++ nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
++ nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
+ } else {
+ /* Paired with WRITE_ONCE() in netlink_insert() */
+ nladdr->nl_pid = READ_ONCE(nlk->portid);
+@@ -1162,8 +1165,9 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
+
+ /* Don't bother queuing skb if kernel socket has no input function */
+ nlk = nlk_sk(sock);
++ /* dst_portid can be changed in netlink_connect() */
+ if (sock->sk_state == NETLINK_CONNECTED &&
+- nlk->dst_portid != nlk_sk(ssk)->portid) {
++ READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
+ sock_put(sock);
+ return ERR_PTR(-ECONNREFUSED);
+ }
+@@ -1899,8 +1903,9 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ goto out;
+ netlink_skb_flags |= NETLINK_SKB_DST;
+ } else {
+- dst_portid = nlk->dst_portid;
+- dst_group = nlk->dst_group;
++ /* Paired with WRITE_ONCE() in netlink_connect() */
++ dst_portid = READ_ONCE(nlk->dst_portid);
++ dst_group = READ_ONCE(nlk->dst_group);
+ }
+
+ /* Paired with WRITE_ONCE() in netlink_insert() */
+--
+2.39.0
+
--- /dev/null
+From e176809a9657486fbb2c6979086d9719874d6557 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Jan 2023 12:59:53 +0000
+Subject: netlink: annotate data races around nlk->portid
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit c1bb9484e3b05166880da8574504156ccbd0549e ]
+
+syzbot reminds us netlink_getname() runs locklessly [1]
+
+This first patch annotates the race against nlk->portid.
+
+Following patches take care of the remaining races.
+
+[1]
+BUG: KCSAN: data-race in netlink_getname / netlink_insert
+
+write to 0xffff88814176d310 of 4 bytes by task 2315 on cpu 1:
+netlink_insert+0xf1/0x9a0 net/netlink/af_netlink.c:583
+netlink_autobind+0xae/0x180 net/netlink/af_netlink.c:856
+netlink_sendmsg+0x444/0x760 net/netlink/af_netlink.c:1895
+sock_sendmsg_nosec net/socket.c:714 [inline]
+sock_sendmsg net/socket.c:734 [inline]
+____sys_sendmsg+0x38f/0x500 net/socket.c:2476
+___sys_sendmsg net/socket.c:2530 [inline]
+__sys_sendmsg+0x19a/0x230 net/socket.c:2559
+__do_sys_sendmsg net/socket.c:2568 [inline]
+__se_sys_sendmsg net/socket.c:2566 [inline]
+__x64_sys_sendmsg+0x42/0x50 net/socket.c:2566
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x2b/0x70 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+read to 0xffff88814176d310 of 4 bytes by task 2316 on cpu 0:
+netlink_getname+0xcd/0x1a0 net/netlink/af_netlink.c:1144
+__sys_getsockname+0x11d/0x1b0 net/socket.c:2026
+__do_sys_getsockname net/socket.c:2041 [inline]
+__se_sys_getsockname net/socket.c:2038 [inline]
+__x64_sys_getsockname+0x3e/0x50 net/socket.c:2038
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x2b/0x70 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+value changed: 0x00000000 -> 0xc9a49780
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 2316 Comm: syz-executor.2 Not tainted 6.2.0-rc3-syzkaller-00030-ge8f60cd7db24-dirty #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/26/2022
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netlink/af_netlink.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 974d32632ef4..1eab80af5112 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -578,7 +578,9 @@ static int netlink_insert(struct sock *sk, u32 portid)
+ if (nlk_sk(sk)->bound)
+ goto err;
+
+- nlk_sk(sk)->portid = portid;
++ /* portid can be read locklessly from netlink_getname(). */
++ WRITE_ONCE(nlk_sk(sk)->portid, portid);
++
+ sock_hold(sk);
+
+ err = __netlink_insert(table, sk);
+@@ -1132,7 +1134,8 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
+ nladdr->nl_pid = nlk->dst_portid;
+ nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
+ } else {
+- nladdr->nl_pid = nlk->portid;
++ /* Paired with WRITE_ONCE() in netlink_insert() */
++ nladdr->nl_pid = READ_ONCE(nlk->portid);
+ netlink_lock_table();
+ nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
+ netlink_unlock_table();
+--
+2.39.0
+
--- /dev/null
+From bbf82718c9181d91e2254ec5873d6dbe5a1a7912 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Jan 2023 12:59:55 +0000
+Subject: netlink: annotate data races around sk_state
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 9b663b5cbb15b494ef132a3c937641c90646eb73 ]
+
+netlink_getsockbyportid() reads sk_state while a concurrent
+netlink_connect() can change its value.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netlink/af_netlink.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index e041d2df9280..011ec7d9a719 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1089,7 +1089,8 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+ return -EINVAL;
+
+ if (addr->sa_family == AF_UNSPEC) {
+- sk->sk_state = NETLINK_UNCONNECTED;
++ /* paired with READ_ONCE() in netlink_getsockbyportid() */
++ WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
+ /* dst_portid and dst_group can be read locklessly */
+ WRITE_ONCE(nlk->dst_portid, 0);
+ WRITE_ONCE(nlk->dst_group, 0);
+@@ -1113,7 +1114,8 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+ err = netlink_autobind(sock);
+
+ if (err == 0) {
+- sk->sk_state = NETLINK_CONNECTED;
++ /* paired with READ_ONCE() in netlink_getsockbyportid() */
++ WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
+ /* dst_portid and dst_group can be read locklessly */
+ WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
+ WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
+@@ -1165,8 +1167,8 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
+
+ /* Don't bother queuing skb if kernel socket has no input function */
+ nlk = nlk_sk(sock);
+- /* dst_portid can be changed in netlink_connect() */
+- if (sock->sk_state == NETLINK_CONNECTED &&
++ /* dst_portid and sk_state can be changed in netlink_connect() */
++ if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
+ READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
+ sock_put(sock);
+ return ERR_PTR(-ECONNREFUSED);
+--
+2.39.0
+
--- /dev/null
+From 77060cb0dee43b10a779dcbc830554634fb97566 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Jan 2023 11:01:50 +0000
+Subject: netlink: prevent potential spectre v1 gadgets
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit f0950402e8c76e7dcb08563f1b4e8000fbc62455 ]
+
+Most netlink attributes are parsed and validated from
+__nla_validate_parse() or validate_nla()
+
+ u16 type = nla_type(nla);
+
+ if (type == 0 || type > maxtype) {
+ /* error or continue */
+ }
+
+@type is then used as an array index and can be used
+as a Spectre v1 gadget.
+
+array_index_nospec() can be used to prevent leaking
+content of kernel memory to malicious users.
+
+This should take care of vast majority of netlink uses,
+but an audit is needed to take care of others where
+validation is not yet centralized in core netlink functions.
+
+Fixes: bfa83a9e03cf ("[NETLINK]: Type-safe netlink messages/attributes interface")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230119110150.2678537-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/nlattr.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index 86029ad5ead4..73635bdb0062 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -10,6 +10,7 @@
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/jiffies.h>
++#include <linux/nospec.h>
+ #include <linux/skbuff.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
+@@ -369,6 +370,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
+ if (type <= 0 || type > maxtype)
+ return 0;
+
++ type = array_index_nospec(type, maxtype + 1);
+ pt = &policy[type];
+
+ BUG_ON(pt->type > NLA_TYPE_MAX);
+@@ -584,6 +586,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
+ }
+ continue;
+ }
++ type = array_index_nospec(type, maxtype + 1);
+ if (policy) {
+ int err = validate_nla(nla, maxtype, policy,
+ validate, extack, depth);
+--
+2.39.0
+
--- /dev/null
+From 157d00a161303402ed4491999ee9be059560675d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Jan 2023 15:19:27 -0800
+Subject: netrom: Fix use-after-free of a listening socket.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 409db27e3a2eb5e8ef7226ca33be33361b3ed1c9 ]
+
+syzbot reported a use-after-free in do_accept(), precisely nr_accept()
+as sk_prot_alloc() allocated the memory and sock_put() frees it. [0]
+
+The issue could happen if the heartbeat timer is fired and
+nr_heartbeat_expiry() calls nr_destroy_socket(), where a socket
+has SOCK_DESTROY or a listening socket has SOCK_DEAD.
+
+In this case, the first condition cannot be true. SOCK_DESTROY is
+flagged in nr_release() only when the file descriptor is close()d,
+but accept() is being called for the listening socket, so the second
+condition must be true.
+
+Usually, the AF_NETROM listener neither starts timers nor sets
+SOCK_DEAD. However, the condition is met if connect() fails before
+listen(). connect() starts the t1 timer and heartbeat timer, and
+t1timer calls nr_disconnect() when timeout happens. Then, SOCK_DEAD
+is set, and if we call listen(), the heartbeat timer calls
+nr_destroy_socket().
+
+ nr_connect
+ nr_establish_data_link(sk)
+ nr_start_t1timer(sk)
+ nr_start_heartbeat(sk)
+ nr_t1timer_expiry
+ nr_disconnect(sk, ETIMEDOUT)
+ nr_sk(sk)->state = NR_STATE_0
+ sk->sk_state = TCP_CLOSE
+ sock_set_flag(sk, SOCK_DEAD)
+nr_listen
+ if (sk->sk_state != TCP_LISTEN)
+ sk->sk_state = TCP_LISTEN
+ nr_heartbeat_expiry
+ switch (nr->state)
+ case NR_STATE_0
+ if (sk->sk_state == TCP_LISTEN &&
+ sock_flag(sk, SOCK_DEAD))
+ nr_destroy_socket(sk)
+
+This path seems expected, and nr_destroy_socket() is called to clean
+up resources. Initially, there was sock_hold() before nr_destroy_socket()
+so that the socket would not be freed, but the commit 517a16b1a88b
+("netrom: Decrease sock refcount when sock timers expire") accidentally
+removed it.
+
+To fix use-after-free, let's add sock_hold().
+
+[0]:
+BUG: KASAN: use-after-free in do_accept+0x483/0x510 net/socket.c:1848
+Read of size 8 at addr ffff88807978d398 by task syz-executor.3/5315
+
+CPU: 0 PID: 5315 Comm: syz-executor.3 Not tainted 6.2.0-rc3-syzkaller-00165-gd9fc1511728c #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/26/2022
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:88 [inline]
+ dump_stack_lvl+0xd1/0x138 lib/dump_stack.c:106
+ print_address_description mm/kasan/report.c:306 [inline]
+ print_report+0x15e/0x461 mm/kasan/report.c:417
+ kasan_report+0xbf/0x1f0 mm/kasan/report.c:517
+ do_accept+0x483/0x510 net/socket.c:1848
+ __sys_accept4_file net/socket.c:1897 [inline]
+ __sys_accept4+0x9a/0x120 net/socket.c:1927
+ __do_sys_accept net/socket.c:1944 [inline]
+ __se_sys_accept net/socket.c:1941 [inline]
+ __x64_sys_accept+0x75/0xb0 net/socket.c:1941
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x39/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+RIP: 0033:0x7fa436a8c0c9
+Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 f1 19 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007fa437784168 EFLAGS: 00000246 ORIG_RAX: 000000000000002b
+RAX: ffffffffffffffda RBX: 00007fa436bac050 RCX: 00007fa436a8c0c9
+RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000005
+RBP: 00007fa436ae7ae9 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: 00007ffebc6700df R14: 00007fa437784300 R15: 0000000000022000
+ </TASK>
+
+Allocated by task 5294:
+ kasan_save_stack+0x22/0x40 mm/kasan/common.c:45
+ kasan_set_track+0x25/0x30 mm/kasan/common.c:52
+ ____kasan_kmalloc mm/kasan/common.c:371 [inline]
+ ____kasan_kmalloc mm/kasan/common.c:330 [inline]
+ __kasan_kmalloc+0xa3/0xb0 mm/kasan/common.c:380
+ kasan_kmalloc include/linux/kasan.h:211 [inline]
+ __do_kmalloc_node mm/slab_common.c:968 [inline]
+ __kmalloc+0x5a/0xd0 mm/slab_common.c:981
+ kmalloc include/linux/slab.h:584 [inline]
+ sk_prot_alloc+0x140/0x290 net/core/sock.c:2038
+ sk_alloc+0x3a/0x7a0 net/core/sock.c:2091
+ nr_create+0xb6/0x5f0 net/netrom/af_netrom.c:433
+ __sock_create+0x359/0x790 net/socket.c:1515
+ sock_create net/socket.c:1566 [inline]
+ __sys_socket_create net/socket.c:1603 [inline]
+ __sys_socket_create net/socket.c:1588 [inline]
+ __sys_socket+0x133/0x250 net/socket.c:1636
+ __do_sys_socket net/socket.c:1649 [inline]
+ __se_sys_socket net/socket.c:1647 [inline]
+ __x64_sys_socket+0x73/0xb0 net/socket.c:1647
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x39/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Freed by task 14:
+ kasan_save_stack+0x22/0x40 mm/kasan/common.c:45
+ kasan_set_track+0x25/0x30 mm/kasan/common.c:52
+ kasan_save_free_info+0x2b/0x40 mm/kasan/generic.c:518
+ ____kasan_slab_free mm/kasan/common.c:236 [inline]
+ ____kasan_slab_free+0x13b/0x1a0 mm/kasan/common.c:200
+ kasan_slab_free include/linux/kasan.h:177 [inline]
+ __cache_free mm/slab.c:3394 [inline]
+ __do_kmem_cache_free mm/slab.c:3580 [inline]
+ __kmem_cache_free+0xcd/0x3b0 mm/slab.c:3587
+ sk_prot_free net/core/sock.c:2074 [inline]
+ __sk_destruct+0x5df/0x750 net/core/sock.c:2166
+ sk_destruct net/core/sock.c:2181 [inline]
+ __sk_free+0x175/0x460 net/core/sock.c:2192
+ sk_free+0x7c/0xa0 net/core/sock.c:2203
+ sock_put include/net/sock.h:1991 [inline]
+ nr_heartbeat_expiry+0x1d7/0x460 net/netrom/nr_timer.c:148
+ call_timer_fn+0x1da/0x7c0 kernel/time/timer.c:1700
+ expire_timers+0x2c6/0x5c0 kernel/time/timer.c:1751
+ __run_timers kernel/time/timer.c:2022 [inline]
+ __run_timers kernel/time/timer.c:1995 [inline]
+ run_timer_softirq+0x326/0x910 kernel/time/timer.c:2035
+ __do_softirq+0x1fb/0xadc kernel/softirq.c:571
+
+Fixes: 517a16b1a88b ("netrom: Decrease sock refcount when sock timers expire")
+Reported-by: syzbot+5fafd5cfe1fc91f6b352@syzkaller.appspotmail.com
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://lore.kernel.org/r/20230120231927.51711-1-kuniyu@amazon.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netrom/nr_timer.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
+index a8da88db7893..4e7c968cde2d 100644
+--- a/net/netrom/nr_timer.c
++++ b/net/netrom/nr_timer.c
+@@ -121,6 +121,7 @@ static void nr_heartbeat_expiry(struct timer_list *t)
+ is accepted() it isn't 'dead' so doesn't get removed. */
+ if (sock_flag(sk, SOCK_DESTROY) ||
+ (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
++ sock_hold(sk);
+ bh_unlock_sock(sk);
+ nr_destroy_socket(sk);
+ goto out;
+--
+2.39.0
+
--- /dev/null
+From 92fc52767ce4fa1fcbc79ae9ec35e09496bc24da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jan 2023 13:29:14 -0800
+Subject: nvme: fix passthrough csi check
+
+From: Keith Busch <kbusch@kernel.org>
+
+[ Upstream commit 85eee6341abb81ac6a35062ffd5c3029eb53be6b ]
+
+The namespace head saves the Command Set Indicator enum, so use that
+instead of the Command Set Selected. The two values are not the same.
+
+Fixes: 831ed60c2aca2d ("nvme: also return I/O command effects from nvme_command_effects")
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 672f53d5651a..06750f3d5274 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1132,7 +1132,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
+ if (ns) {
+ if (ns->head->effects)
+ effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
+- if (ns->head->ids.csi == NVME_CAP_CSS_NVM)
++ if (ns->head->ids.csi == NVME_CSI_NVM)
+ effects |= nvme_known_nvm_effects(opcode);
+ if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
+ dev_warn_once(ctrl->device,
+--
+2.39.0
+
--- /dev/null
+From 8868722a7fc966ecc9a481b644dd8f57863a7f6f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Oct 2021 16:06:28 +0100
+Subject: ravb: Rename "no_ptp_cfg_active" and "ptp_cfg_active" variables
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+[ Upstream commit 2b061b545cd0d393585da2909044b15db1ac426f ]
+
+Rename the variable "no_ptp_cfg_active" with "gptp" and
+"ptp_cfg_active" with "ccc_gac" to match the HW features.
+
+There is no functional change.
+
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Suggested-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Reviewed-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: c2b6cdee1d13 ("net: ravb: Fix lack of register setting after system resumed for Gen3")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/renesas/ravb.h | 4 ++--
+ drivers/net/ethernet/renesas/ravb_main.c | 26 ++++++++++++------------
+ 2 files changed, 15 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
+index 47c5377e4f42..a475f54a6b63 100644
+--- a/drivers/net/ethernet/renesas/ravb.h
++++ b/drivers/net/ethernet/renesas/ravb.h
+@@ -1000,8 +1000,8 @@ struct ravb_hw_info {
+ unsigned internal_delay:1; /* AVB-DMAC has internal delays */
+ unsigned tx_counters:1; /* E-MAC has TX counters */
+ unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */
+- unsigned no_ptp_cfg_active:1; /* AVB-DMAC does not support gPTP active in config mode */
+- unsigned ptp_cfg_active:1; /* AVB-DMAC has gPTP support active in config mode */
++ unsigned gptp:1; /* AVB-DMAC has gPTP support */
++ unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */
+ };
+
+ struct ravb_private {
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index c89bcdd15f16..dcb18f1e6db0 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1275,7 +1275,7 @@ static int ravb_set_ringparam(struct net_device *ndev,
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ /* Stop PTP Clock driver */
+- if (info->no_ptp_cfg_active)
++ if (info->gptp)
+ ravb_ptp_stop(ndev);
+ /* Wait for DMA stopping */
+ error = ravb_stop_dma(ndev);
+@@ -1307,7 +1307,7 @@ static int ravb_set_ringparam(struct net_device *ndev,
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+- if (info->no_ptp_cfg_active)
++ if (info->gptp)
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_device_attach(ndev);
+@@ -1447,7 +1447,7 @@ static int ravb_open(struct net_device *ndev)
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+- if (info->no_ptp_cfg_active)
++ if (info->gptp)
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_tx_start_all_queues(ndev);
+@@ -1461,7 +1461,7 @@ static int ravb_open(struct net_device *ndev)
+
+ out_ptp_stop:
+ /* Stop PTP Clock driver */
+- if (info->no_ptp_cfg_active)
++ if (info->gptp)
+ ravb_ptp_stop(ndev);
+ out_free_irq_nc_tx:
+ if (!info->multi_irqs)
+@@ -1509,7 +1509,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
+ netif_tx_stop_all_queues(ndev);
+
+ /* Stop PTP Clock driver */
+- if (info->no_ptp_cfg_active)
++ if (info->gptp)
+ ravb_ptp_stop(ndev);
+
+ /* Wait for DMA stopping */
+@@ -1544,7 +1544,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
+
+ out:
+ /* Initialise PTP Clock driver */
+- if (info->no_ptp_cfg_active)
++ if (info->gptp)
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_tx_start_all_queues(ndev);
+@@ -1753,7 +1753,7 @@ static int ravb_close(struct net_device *ndev)
+ ravb_write(ndev, 0, TIC);
+
+ /* Stop PTP Clock driver */
+- if (info->no_ptp_cfg_active)
++ if (info->gptp)
+ ravb_ptp_stop(ndev);
+
+ /* Set the config mode to stop the AVB-DMAC's processes */
+@@ -2019,7 +2019,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
+ .internal_delay = 1,
+ .tx_counters = 1,
+ .multi_irqs = 1,
+- .ptp_cfg_active = 1,
++ .ccc_gac = 1,
+ };
+
+ static const struct ravb_hw_info ravb_gen2_hw_info = {
+@@ -2038,7 +2038,7 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .aligned_tx = 1,
+- .no_ptp_cfg_active = 1,
++ .gptp = 1,
+ };
+
+ static const struct of_device_id ravb_match_table[] = {
+@@ -2080,7 +2080,7 @@ static void ravb_set_config_mode(struct net_device *ndev)
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
+- if (info->no_ptp_cfg_active) {
++ if (info->gptp) {
+ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
+ /* Set CSEL value */
+ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
+@@ -2301,7 +2301,7 @@ static int ravb_probe(struct platform_device *pdev)
+ INIT_LIST_HEAD(&priv->ts_skb_list);
+
+ /* Initialise PTP Clock driver */
+- if (info->ptp_cfg_active)
++ if (info->ccc_gac)
+ ravb_ptp_init(ndev, pdev);
+
+ /* Debug message level */
+@@ -2349,7 +2349,7 @@ static int ravb_probe(struct platform_device *pdev)
+ priv->desc_bat_dma);
+
+ /* Stop PTP Clock driver */
+- if (info->ptp_cfg_active)
++ if (info->ccc_gac)
+ ravb_ptp_stop(ndev);
+ out_disable_refclk:
+ clk_disable_unprepare(priv->refclk);
+@@ -2369,7 +2369,7 @@ static int ravb_remove(struct platform_device *pdev)
+ const struct ravb_hw_info *info = priv->info;
+
+ /* Stop PTP Clock driver */
+- if (info->ptp_cfg_active)
++ if (info->ccc_gac)
+ ravb_ptp_stop(ndev);
+
+ clk_disable_unprepare(priv->refclk);
+--
+2.39.0
+
--- /dev/null
+From 97dd24b56ffc87e83851a6013932020ce6dddc6c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Jan 2023 14:43:42 +0800
+Subject: riscv/kprobe: Fix instruction simulation of JALR
+
+From: Liao Chang <liaochang1@huawei.com>
+
+[ Upstream commit ca0254998be4d74cf6add70ccfab0d2dbd362a10 ]
+
+Set kprobe at 'jalr 1140(ra)' of vfs_write results in the following
+crash:
+
+[ 32.092235] Unable to handle kernel access to user memory without uaccess routines at virtual address 00aaaaaad77b1170
+[ 32.093115] Oops [#1]
+[ 32.093251] Modules linked in:
+[ 32.093626] CPU: 0 PID: 135 Comm: ftracetest Not tainted 6.2.0-rc2-00013-gb0aa5e5df0cb-dirty #16
+[ 32.093985] Hardware name: riscv-virtio,qemu (DT)
+[ 32.094280] epc : ksys_read+0x88/0xd6
+[ 32.094855] ra : ksys_read+0xc0/0xd6
+[ 32.095016] epc : ffffffff801cda80 ra : ffffffff801cdab8 sp : ff20000000d7bdc0
+[ 32.095227] gp : ffffffff80f14000 tp : ff60000080f9cb40 t0 : ffffffff80f13e80
+[ 32.095500] t1 : ffffffff8000c29c t2 : ffffffff800dbc54 s0 : ff20000000d7be60
+[ 32.095716] s1 : 0000000000000000 a0 : ffffffff805a64ae a1 : ffffffff80a83708
+[ 32.095921] a2 : ffffffff80f160a0 a3 : 0000000000000000 a4 : f229b0afdb165300
+[ 32.096171] a5 : f229b0afdb165300 a6 : ffffffff80eeebd0 a7 : 00000000000003ff
+[ 32.096411] s2 : ff6000007ff76800 s3 : fffffffffffffff7 s4 : 00aaaaaad77b1170
+[ 32.096638] s5 : ffffffff80f160a0 s6 : ff6000007ff76800 s7 : 0000000000000030
+[ 32.096865] s8 : 00ffffffc3d97be0 s9 : 0000000000000007 s10: 00aaaaaad77c9410
+[ 32.097092] s11: 0000000000000000 t3 : ffffffff80f13e48 t4 : ffffffff8000c29c
+[ 32.097317] t5 : ffffffff8000c29c t6 : ffffffff800dbc54
+[ 32.097505] status: 0000000200000120 badaddr: 00aaaaaad77b1170 cause: 000000000000000d
+[ 32.098011] [<ffffffff801cdb72>] ksys_write+0x6c/0xd6
+[ 32.098222] [<ffffffff801cdc06>] sys_write+0x2a/0x38
+[ 32.098405] [<ffffffff80003c76>] ret_from_syscall+0x0/0x2
+
+Since the rs1 and rd might be the same one, such as 'jalr 1140(ra)',
+hence it requires obtaining the target address from rs1 followed by
+updating rd.
+
+Fixes: c22b0bcb1dd0 ("riscv: Add kprobes supported")
+Signed-off-by: Liao Chang <liaochang1@huawei.com>
+Reviewed-by: Guo Ren <guoren@kernel.org>
+Link: https://lore.kernel.org/r/20230116064342.2092136-1-liaochang1@huawei.com
+[Palmer: Pick Guo's cleanup]
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/probes/simulate-insn.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
+index d73e96f6ed7c..a20568bd1f1a 100644
+--- a/arch/riscv/kernel/probes/simulate-insn.c
++++ b/arch/riscv/kernel/probes/simulate-insn.c
+@@ -71,11 +71,11 @@ bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *reg
+ u32 rd_index = (opcode >> 7) & 0x1f;
+ u32 rs1_index = (opcode >> 15) & 0x1f;
+
+- ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
++ ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
+ if (!ret)
+ return ret;
+
+- ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
++ ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
+ if (!ret)
+ return ret;
+
+--
+2.39.0
+
--- /dev/null
+From f90533938563343b53eca6629f1cf99332a5e1d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Jan 2023 17:12:01 +0100
+Subject: scsi: ufs: core: Fix devfreq deadlocks
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit ba81043753fffbc2ad6e0c5ff2659f12ac2f46b4 ]
+
+There is a lock inversion and rwsem read-lock recursion in the devfreq
+target callback which can lead to deadlocks.
+
+Specifically, ufshcd_devfreq_scale() already holds a clk_scaling_lock
+read lock when toggling the write booster, which involves taking the
+dev_cmd mutex before taking another clk_scaling_lock read lock.
+
+This can lead to a deadlock if another thread:
+
+ 1) tries to acquire the dev_cmd and clk_scaling locks in the correct
+ order, or
+
+ 2) takes a clk_scaling write lock before the attempt to take the
+ clk_scaling read lock a second time.
+
+Fix this by dropping the clk_scaling_lock before toggling the write booster
+as was done before commit 0e9d4ca43ba8 ("scsi: ufs: Protect some contexts
+from unexpected clock scaling").
+
+While the devfreq callbacks are already serialised, add a second
+serialising mutex to handle the unlikely case where a callback triggered
+through the devfreq sysfs interface is racing with a request to disable
+clock scaling through the UFS controller 'clkscale_enable' sysfs
+attribute. This could otherwise lead to the write booster being left
+disabled after having disabled clock scaling.
+
+Also take the new mutex in ufshcd_clk_scaling_allow() to make sure that any
+pending write booster update has completed on return.
+
+Note that this currently only affects Qualcomm platforms since commit
+87bd05016a64 ("scsi: ufs: core: Allow host driver to disable wb toggling
+during clock scaling").
+
+The lock inversion (i.e. 1 above) was reported by lockdep as:
+
+ ======================================================
+ WARNING: possible circular locking dependency detected
+ 6.1.0-next-20221216 #211 Not tainted
+ ------------------------------------------------------
+ kworker/u16:2/71 is trying to acquire lock:
+ ffff076280ba98a0 (&hba->dev_cmd.lock){+.+.}-{3:3}, at: ufshcd_query_flag+0x50/0x1c0
+
+ but task is already holding lock:
+ ffff076280ba9cf0 (&hba->clk_scaling_lock){++++}-{3:3}, at: ufshcd_devfreq_scale+0x2b8/0x380
+
+ which lock already depends on the new lock.
+[ +0.011606]
+ the existing dependency chain (in reverse order) is:
+
+ -> #1 (&hba->clk_scaling_lock){++++}-{3:3}:
+ lock_acquire+0x68/0x90
+ down_read+0x58/0x80
+ ufshcd_exec_dev_cmd+0x70/0x2c0
+ ufshcd_verify_dev_init+0x68/0x170
+ ufshcd_probe_hba+0x398/0x1180
+ ufshcd_async_scan+0x30/0x320
+ async_run_entry_fn+0x34/0x150
+ process_one_work+0x288/0x6c0
+ worker_thread+0x74/0x450
+ kthread+0x118/0x120
+ ret_from_fork+0x10/0x20
+
+ -> #0 (&hba->dev_cmd.lock){+.+.}-{3:3}:
+ __lock_acquire+0x12a0/0x2240
+ lock_acquire.part.0+0xcc/0x220
+ lock_acquire+0x68/0x90
+ __mutex_lock+0x98/0x430
+ mutex_lock_nested+0x2c/0x40
+ ufshcd_query_flag+0x50/0x1c0
+ ufshcd_query_flag_retry+0x64/0x100
+ ufshcd_wb_toggle+0x5c/0x120
+ ufshcd_devfreq_scale+0x2c4/0x380
+ ufshcd_devfreq_target+0xf4/0x230
+ devfreq_set_target+0x84/0x2f0
+ devfreq_update_target+0xc4/0xf0
+ devfreq_monitor+0x38/0x1f0
+ process_one_work+0x288/0x6c0
+ worker_thread+0x74/0x450
+ kthread+0x118/0x120
+ ret_from_fork+0x10/0x20
+
+ other info that might help us debug this:
+ Possible unsafe locking scenario:
+ CPU0 CPU1
+ ---- ----
+ lock(&hba->clk_scaling_lock);
+ lock(&hba->dev_cmd.lock);
+ lock(&hba->clk_scaling_lock);
+ lock(&hba->dev_cmd.lock);
+
+ *** DEADLOCK ***
+
+Fixes: 0e9d4ca43ba8 ("scsi: ufs: Protect some contexts from unexpected clock scaling")
+Cc: stable@vger.kernel.org # 5.12
+Cc: Can Guo <quic_cang@quicinc.com>
+Tested-by: Andrew Halaney <ahalaney@redhat.com>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20230116161201.16923-1-johan+linaro@kernel.org
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/ufs/ufshcd.c | 26 ++++++++++++++------------
+ drivers/scsi/ufs/ufshcd.h | 2 ++
+ 2 files changed, 16 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 0b06223f5714..120831428ec6 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -1185,12 +1185,14 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+ * clock scaling is in progress
+ */
+ ufshcd_scsi_block_requests(hba);
++ mutex_lock(&hba->wb_mutex);
+ down_write(&hba->clk_scaling_lock);
+
+ if (!hba->clk_scaling.is_allowed ||
+ ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ ret = -EBUSY;
+ up_write(&hba->clk_scaling_lock);
++ mutex_unlock(&hba->wb_mutex);
+ ufshcd_scsi_unblock_requests(hba);
+ goto out;
+ }
+@@ -1202,12 +1204,15 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+ return ret;
+ }
+
+-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
++static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
+ {
+- if (writelock)
+- up_write(&hba->clk_scaling_lock);
+- else
+- up_read(&hba->clk_scaling_lock);
++ up_write(&hba->clk_scaling_lock);
++
++ /* Enable Write Booster if we have scaled up else disable it */
++ ufshcd_wb_toggle(hba, scale_up);
++
++ mutex_unlock(&hba->wb_mutex);
++
+ ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release(hba);
+ }
+@@ -1224,7 +1229,6 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
+ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+ {
+ int ret = 0;
+- bool is_writelock = true;
+
+ ret = ufshcd_clock_scaling_prepare(hba);
+ if (ret)
+@@ -1253,13 +1257,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+ }
+ }
+
+- /* Enable Write Booster if we have scaled up else disable it */
+- downgrade_write(&hba->clk_scaling_lock);
+- is_writelock = false;
+- ufshcd_wb_toggle(hba, scale_up);
+-
+ out_unprepare:
+- ufshcd_clock_scaling_unprepare(hba, is_writelock);
++ ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
+ return ret;
+ }
+
+@@ -5919,9 +5918,11 @@ static void ufshcd_force_error_recovery(struct ufs_hba *hba)
+
+ static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
+ {
++ mutex_lock(&hba->wb_mutex);
+ down_write(&hba->clk_scaling_lock);
+ hba->clk_scaling.is_allowed = allow;
+ up_write(&hba->clk_scaling_lock);
++ mutex_unlock(&hba->wb_mutex);
+ }
+
+ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
+@@ -9480,6 +9481,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ /* Initialize mutex for exception event control */
+ mutex_init(&hba->ee_ctrl_mutex);
+
++ mutex_init(&hba->wb_mutex);
+ init_rwsem(&hba->clk_scaling_lock);
+
+ ufshcd_init_clk_gating(hba);
+diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
+index d470a52ff24c..c8513cc6c2bd 100644
+--- a/drivers/scsi/ufs/ufshcd.h
++++ b/drivers/scsi/ufs/ufshcd.h
+@@ -763,6 +763,7 @@ struct ufs_hba_monitor {
+ * @urgent_bkops_lvl: keeps track of urgent bkops level for device
+ * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
+ * device is known or not.
++ * @wb_mutex: used to serialize devfreq and sysfs write booster toggling
+ * @scsi_block_reqs_cnt: reference counting for scsi block requests
+ * @crypto_capabilities: Content of crypto capabilities register (0x100)
+ * @crypto_cap_array: Array of crypto capabilities
+@@ -892,6 +893,7 @@ struct ufs_hba {
+ enum bkops_status urgent_bkops_lvl;
+ bool is_urgent_bkops_lvl_checked;
+
++ struct mutex wb_mutex;
+ struct rw_semaphore clk_scaling_lock;
+ unsigned char desc_size[QUERY_DESC_IDN_MAX];
+ atomic_t scsi_block_reqs_cnt;
+--
+2.39.0
+
--- /dev/null
+From be7a8689d27870a8d5038c64ed9f2b9009d9671c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Jan 2023 14:59:33 -0300
+Subject: sctp: fail if no bound addresses can be used for a given scope
+
+From: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+
+[ Upstream commit 458e279f861d3f61796894cd158b780765a1569f ]
+
+Currently, if you bind the socket to something like:
+ servaddr.sin6_family = AF_INET6;
+ servaddr.sin6_port = htons(0);
+ servaddr.sin6_scope_id = 0;
+ inet_pton(AF_INET6, "::1", &servaddr.sin6_addr);
+
+And then request a connect to:
+ connaddr.sin6_family = AF_INET6;
+ connaddr.sin6_port = htons(20000);
+ connaddr.sin6_scope_id = if_nametoindex("lo");
+ inet_pton(AF_INET6, "fe88::1", &connaddr.sin6_addr);
+
+What the stack does is:
+ - bind the socket
+ - create a new asoc
+ - to handle the connect
+ - copy the addresses that can be used for the given scope
+ - try to connect
+
+But the copy returns 0 addresses, and the effect is that it ends up
+trying to connect as if the socket wasn't bound, which is not the
+desired behavior. This unexpected behavior also allows KASLR leaks
+through SCTP diag interface.
+
+The fix here then is, if when trying to copy the addresses that can
+be used for the scope used in connect() it returns 0 addresses, bail
+out. This is what TCP does with a similar reproducer.
+
+Reported-by: Pietro Borrello <borrello@diag.uniroma1.it>
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Reviewed-by: Xin Long <lucien.xin@gmail.com>
+Link: https://lore.kernel.org/r/9fcd182f1099f86c6661f3717f63712ddd1c676c.1674496737.git.marcelo.leitner@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sctp/bind_addr.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
+index 59e653b528b1..6b95d3ba8fe1 100644
+--- a/net/sctp/bind_addr.c
++++ b/net/sctp/bind_addr.c
+@@ -73,6 +73,12 @@ int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
+ }
+ }
+
++ /* If somehow no addresses were found that can be used with this
++ * scope, it's an error.
++ */
++ if (list_empty(&dest->address_list))
++ error = -ENETUNREACH;
++
+ out:
+ if (error)
+ sctp_bind_addr_clean(dest);
+--
+2.39.0
+
arm-9280-1-mm-fix-warning-on-phys_addr_t-to-void-pointer-assignment.patch
edac-device-respect-any-driver-supplied-workqueue-polling-value.patch
edac-qcom-do-not-pass-llcc_driv_data-as-edac_device_ctl_info-s-pvt_info.patch
+net-mana-fix-irq-name-add-pci-and-queue-number.patch
+scsi-ufs-core-fix-devfreq-deadlocks.patch
+i2c-designware-use-casting-of-u64-in-clock-multiplic.patch
+netlink-prevent-potential-spectre-v1-gadgets.patch
+net-fix-uaf-in-netns-ops-registration-error-path.patch
+drm-i915-selftest-fix-intel_selftest_modify_policy-a.patch
+netfilter-nft_set_rbtree-switch-to-node-list-walk-fo.patch
+netfilter-nft_set_rbtree-skip-elements-in-transactio.patch
+netlink-annotate-data-races-around-nlk-portid.patch
+netlink-annotate-data-races-around-dst_portid-and-ds.patch
+netlink-annotate-data-races-around-sk_state.patch
+ipv4-prevent-potential-spectre-v1-gadget-in-ip_metri.patch
+ipv4-prevent-potential-spectre-v1-gadget-in-fib_metr.patch
+netfilter-conntrack-fix-vtag-checks-for-abort-shutdo.patch
+netfilter-conntrack-fix-bug-in-for_each_sctp_chunk.patch
+netrom-fix-use-after-free-of-a-listening-socket.patch
+net-sched-sch_taprio-do-not-schedule-in-taprio_reset.patch
+sctp-fail-if-no-bound-addresses-can-be-used-for-a-gi.patch
+riscv-kprobe-fix-instruction-simulation-of-jalr.patch
+nvme-fix-passthrough-csi-check.patch
+gpio-mxc-unlock-on-error-path-in-mxc_flip_edge.patch
+ravb-rename-no_ptp_cfg_active-and-ptp_cfg_active-var.patch
+net-ravb-fix-lack-of-register-setting-after-system-r.patch
+net-ravb-fix-possible-hang-if-ris2_qff1-happen.patch
+net-mctp-mark-socks-as-dead-on-unhash-prevent-re-add.patch
+thermal-intel-int340x-add-locking-to-int340x_thermal.patch
+net-tg3-resolve-deadlock-in-tg3_reset_task-during-ee.patch
+net-mdio-mux-meson-g12a-force-internal-phy-off-on-mu.patch
+treewide-fix-up-files-incorrectly-marked-executable.patch
+tools-gpio-fix-c-option-of-gpio-event-mon.patch
--- /dev/null
+From d56449a9d0464549de4a19c1d50247cd5c420d22 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Jan 2023 13:17:42 +0100
+Subject: thermal: intel: int340x: Add locking to
+ int340x_thermal_get_trip_type()
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+[ Upstream commit acd7e9ee57c880b99671dd99680cb707b7b5b0ee ]
+
+In order to prevent int340x_thermal_get_trip_type() from possibly
+racing with int340x_thermal_read_trips() invoked by int3403_notify()
+add locking to it in analogy with int340x_thermal_get_trip_temp().
+
+Fixes: 6757a7abe47b ("thermal: intel: int340x: Protect trip temperature from concurrent updates")
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../intel/int340x_thermal/int340x_thermal_zone.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+index 852f6c579af5..0a4eaa307156 100644
+--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+@@ -81,11 +81,13 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+ enum thermal_trip_type *type)
+ {
+ struct int34x_thermal_zone *d = zone->devdata;
+- int i;
++ int i, ret = 0;
+
+ if (d->override_ops && d->override_ops->get_trip_type)
+ return d->override_ops->get_trip_type(zone, trip, type);
+
++ mutex_lock(&d->trip_mutex);
++
+ if (trip < d->aux_trip_nr)
+ *type = THERMAL_TRIP_PASSIVE;
+ else if (trip == d->crt_trip_id)
+@@ -103,10 +105,12 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+ }
+ }
+ if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+- return -EINVAL;
++ ret = -EINVAL;
+ }
+
+- return 0;
++ mutex_unlock(&d->trip_mutex);
++
++ return ret;
+ }
+
+ static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
+--
+2.39.0
+
--- /dev/null
+From aa8867b21f96a6af61cbf85a097d17202122e5c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Jan 2023 15:10:33 +0200
+Subject: tools: gpio: fix -c option of gpio-event-mon
+
+From: Ivo Borisov Shopov <ivoshopov@gmail.com>
+
+[ Upstream commit 677d85e1a1ee69fa05ccea83847309484be3781c ]
+
+Following line should listen for a rising edge and exit after the first
+one since '-c 1' is provided.
+
+ # gpio-event-mon -n gpiochip1 -o 0 -r -c 1
+
+It works with kernel 4.19 but it doesn't work with 5.10. In 5.10 the
+above command doesn't exit after the first rising edge it keep listening
+for an event forever. The '-c 1' is not taken into an account.
+The problem is in commit 62757c32d5db ("tools: gpio: add multi-line
+monitoring to gpio-event-mon").
+Before this commit the iterator 'i' in monitor_device() is used for
+counting of the events (loops). In the case of the above command (-c 1)
+we should start from 0 and increment 'i' only ones and hit the 'break'
+statement and exit the process. But after the above commit counting
+doesn't start from 0, it start from 1 when we listen on one line.
+It is because 'i' is used from one more purpose, counting of lines
+(num_lines) and it isn't restore to 0 after following code
+
+ for (i = 0; i < num_lines; i++)
+ gpiotools_set_bit(&values.mask, i);
+
+Restore the initial value of the iterator to 0 in order to allow counting
+of loops to work for any cases.
+
+Fixes: 62757c32d5db ("tools: gpio: add multi-line monitoring to gpio-event-mon")
+Signed-off-by: Ivo Borisov Shopov <ivoshopov@gmail.com>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+[Bartosz: tweak the commit message]
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/gpio/gpio-event-mon.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
+index a2b233fdb572..667019990982 100644
+--- a/tools/gpio/gpio-event-mon.c
++++ b/tools/gpio/gpio-event-mon.c
+@@ -86,6 +86,7 @@ int monitor_device(const char *device_name,
+ gpiotools_test_bit(values.bits, i));
+ }
+
++ i = 0;
+ while (1) {
+ struct gpio_v2_line_event event;
+
+--
+2.39.0
+
--- /dev/null
+From 7cd366fb4fb17edc68a278117028cd12ca7e62de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Jan 2023 10:05:39 -0800
+Subject: treewide: fix up files incorrectly marked executable
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+[ Upstream commit 262b42e02d1e0b5ad1b33e9b9842e178c16231de ]
+
+I'm not exactly clear on what strange workflow causes people to do it,
+but clearly occasionally some files end up being committed as executable
+even though they clearly aren't.
+
+This is a reprise of commit 90fda63fa115 ("treewide: fix up files
+incorrectly marked executable"), just with a different set of files (but
+with the same trivial shell scripting).
+
+So apparently we need to re-do this every five years or so, and Joe
+needs to just keep reminding me to do so ;)
+
+Reported-by: Joe Perches <joe@perches.com>
+Fixes: 523375c943e5 ("drm/vmwgfx: Port vmwgfx to arm64")
+Fixes: 5c439937775d ("ASoC: codecs: add support for ES8326")
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h | 0
+ 1 file changed, 0 insertions(+), 0 deletions(-)
+ mode change 100755 => 100644 drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
+old mode 100755
+new mode 100644
+--
+2.39.0
+