--- /dev/null
+From 20dd29548c919784ed74aeb1edf3215933528bda Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Mar 2022 11:39:26 -0400
+Subject: arch/arm64: Fix topology initialization for core scheduling
+
+From: Phil Auld <pauld@redhat.com>
+
+[ Upstream commit 5524cbb1bfcdff0cad0aaa9f94e6092002a07259 ]
+
+Arm64 systems rely on store_cpu_topology() to call update_siblings_masks()
+to transfer the toplogy to the various cpu masks. This needs to be done
+before the call to notify_cpu_starting() which tells the scheduler about
+each cpu found, otherwise the core scheduling data structures are setup
+in a way that does not match the actual topology.
+
+With smt_mask not setup correctly we bail on `cpumask_weight(smt_mask) == 1`
+for !leaders in:
+
+ notify_cpu_starting()
+ cpuhp_invoke_callback_range()
+ sched_cpu_starting()
+ sched_core_cpu_starting()
+
+which leads to rq->core not being correctly set for !leader-rq's.
+
+Without this change stress-ng (which enables core scheduling in its prctl
+tests in newer versions -- i.e. with PR_SCHED_CORE support) causes a warning
+and then a crash (trimmed for legibility):
+
+[ 1853.805168] ------------[ cut here ]------------
+[ 1853.809784] task_rq(b)->core != rq->core
+[ 1853.809792] WARNING: CPU: 117 PID: 0 at kernel/sched/fair.c:11102 cfs_prio_less+0x1b4/0x1c4
+...
+[ 1854.015210] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000010
+...
+[ 1854.231256] Call trace:
+[ 1854.233689] pick_next_task+0x3dc/0x81c
+[ 1854.237512] __schedule+0x10c/0x4cc
+[ 1854.240988] schedule_idle+0x34/0x54
+
+Fixes: 9edeaea1bc45 ("sched: Core-wide rq->lock")
+Signed-off-by: Phil Auld <pauld@redhat.com>
+Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Link: https://lore.kernel.org/r/20220331153926.25742-1-pauld@redhat.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/smp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 27df5c1e6baa..3b46041f2b97 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -234,6 +234,7 @@ asmlinkage notrace void secondary_start_kernel(void)
+ * Log the CPU info before it is marked online and might get read.
+ */
+ cpuinfo_store_cpu();
++ store_cpu_topology(cpu);
+
+ /*
+ * Enable GIC and timers.
+@@ -242,7 +243,6 @@ asmlinkage notrace void secondary_start_kernel(void)
+
+ ipi_setup(cpu);
+
+- store_cpu_topology(cpu);
+ numa_add_cpu(cpu);
+
+ /*
+--
+2.35.1
+
--- /dev/null
+From d5144ddc76470bd0d2f60e20e2ae073d8509a28d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Apr 2022 20:21:12 -0400
+Subject: bnxt_en: Prevent XDP redirect from running when stopping TX queue
+
+From: Ray Jui <ray.jui@broadcom.com>
+
+[ Upstream commit 27d4073f8d9af0340362554414f4961643a4f4de ]
+
+Add checks in the XDP redirect callback to prevent XDP from running when
+the TX ring is undergoing shutdown.
+
+Also remove redundant checks in the XDP redirect callback to validate the
+txr and the flag that indicates the ring supports XDP. The modulo
+arithmetic on 'tx_nr_rings_xdp' already guarantees the derived TX
+ring is an XDP ring. txr is also guaranteed to be valid after checking
+BNXT_STATE_OPEN and within RCU grace period.
+
+Fixes: f18c2b77b2e4 ("bnxt_en: optimized XDP_REDIRECT support")
+Reviewed-by: Vladimir Olovyannikov <vladimir.olovyannikov@broadcom.com>
+Signed-off-by: Ray Jui <ray.jui@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+index c0541ff00ac8..03b1d6c04504 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+@@ -229,14 +229,16 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
+ ring = smp_processor_id() % bp->tx_nr_rings_xdp;
+ txr = &bp->tx_ring[ring];
+
++ if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
++ return -EINVAL;
++
+ if (static_branch_unlikely(&bnxt_xdp_locking_key))
+ spin_lock(&txr->xdp_tx_lock);
+
+ for (i = 0; i < num_frames; i++) {
+ struct xdp_frame *xdp = frames[i];
+
+- if (!txr || !bnxt_tx_avail(bp, txr) ||
+- !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP))
++ if (!bnxt_tx_avail(bp, txr))
+ break;
+
+ mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
+--
+2.35.1
+
--- /dev/null
+From a86f5d6a2ad53fc0431610378b879b2561342238 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Apr 2022 20:21:11 -0400
+Subject: bnxt_en: reserve space inside receive page for skb_shared_info
+
+From: Andy Gospodarek <gospo@broadcom.com>
+
+[ Upstream commit facc173cf700e55b2ad249ecbd3a7537f7315691 ]
+
+Insufficient space was being reserved in the page used for packet
+reception, so the interface MTU could be set too large to still have
+room for the contents of the packet when doing XDP redirect. This
+resulted in the following message when redirecting a packet between
+3520 and 3822 bytes with an MTU of 3822:
+
+[311815.561880] XDP_WARN: xdp_update_frame_from_buff(line:200): Driver BUG: missing reserved tailroom
+
+Fixes: f18c2b77b2e4 ("bnxt_en: optimized XDP_REDIRECT support")
+Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
+Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index caf66a35d923..d57bff46b587 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -593,7 +593,8 @@ struct nqe_cn {
+ #define BNXT_MAX_MTU 9500
+ #define BNXT_MAX_PAGE_MODE_MTU \
+ ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
+- XDP_PACKET_HEADROOM)
++ XDP_PACKET_HEADROOM - \
++ SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
+
+ #define BNXT_MIN_PKT_SIZE 52
+
+--
+2.35.1
+
--- /dev/null
+From 9e629688f3d3719b148539c0aad5686dab28274a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Apr 2022 20:21:10 -0400
+Subject: bnxt_en: Synchronize tx when xdp redirects happen on same ring
+
+From: Pavan Chebbi <pavan.chebbi@broadcom.com>
+
+[ Upstream commit 4f81def272de17dc4bbd89ac38f49b2676c9b3d2 ]
+
+If there are more CPUs than the number of TX XDP rings, multiple XDP
+redirects can select the same TX ring based on the CPU on which
+XDP redirect is called. Add locking when needed and use static
+key to decide whether to take the lock.
+
+Fixes: f18c2b77b2e4 ("bnxt_en: optimized XDP_REDIRECT support")
+Signed-off-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 7 +++++++
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 ++
+ drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 8 ++++++++
+ drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h | 2 ++
+ 4 files changed, 19 insertions(+)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index b1c98d1408b8..6af0ae1d0c46 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -3224,6 +3224,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
+ }
+ qidx = bp->tc_to_qidx[j];
+ ring->queue_id = bp->q_info[qidx].queue_id;
++ spin_lock_init(&txr->xdp_tx_lock);
+ if (i < bp->tx_nr_rings_xdp)
+ continue;
+ if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+@@ -10294,6 +10295,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ if (irq_re_init)
+ udp_tunnel_nic_reset_ntf(bp->dev);
+
++ if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
++ if (!static_key_enabled(&bnxt_xdp_locking_key))
++ static_branch_enable(&bnxt_xdp_locking_key);
++ } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
++ static_branch_disable(&bnxt_xdp_locking_key);
++ }
+ set_bit(BNXT_STATE_OPEN, &bp->state);
+ bnxt_enable_int(bp);
+ /* Enable TX queues */
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 666fc1e7a7d2..caf66a35d923 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -800,6 +800,8 @@ struct bnxt_tx_ring_info {
+ u32 dev_state;
+
+ struct bnxt_ring_struct tx_ring_struct;
++ /* Synchronize simultaneous xdp_xmit on same ring */
++ spinlock_t xdp_tx_lock;
+ };
+
+ #define BNXT_LEGACY_COAL_CMPL_PARAMS \
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+index 52fad0fdeacf..c0541ff00ac8 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+@@ -20,6 +20,8 @@
+ #include "bnxt.h"
+ #include "bnxt_xdp.h"
+
++DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
++
+ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len)
+@@ -227,6 +229,9 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
+ ring = smp_processor_id() % bp->tx_nr_rings_xdp;
+ txr = &bp->tx_ring[ring];
+
++ if (static_branch_unlikely(&bnxt_xdp_locking_key))
++ spin_lock(&txr->xdp_tx_lock);
++
+ for (i = 0; i < num_frames; i++) {
+ struct xdp_frame *xdp = frames[i];
+
+@@ -250,6 +255,9 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
+ bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
+ }
+
++ if (static_branch_unlikely(&bnxt_xdp_locking_key))
++ spin_unlock(&txr->xdp_tx_lock);
++
+ return nxmit;
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+index 0df40c3beb05..067bb5e821f5 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+@@ -10,6 +10,8 @@
+ #ifndef BNXT_XDP_H
+ #define BNXT_XDP_H
+
++DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
++
+ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len);
+--
+2.35.1
+
--- /dev/null
+From ccfe27b673b8bf146cbc6cf2bf1f8f6b7a656ae6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Apr 2022 15:41:12 +0300
+Subject: bpf: Support dual-stack sockets in bpf_tcp_check_syncookie
+
+From: Maxim Mikityanskiy <maximmi@nvidia.com>
+
+[ Upstream commit 2e8702cc0cfa1080f29fd64003c00a3e24ac38de ]
+
+bpf_tcp_gen_syncookie looks at the IP version in the IP header and
+validates the address family of the socket. It supports IPv4 packets in
+AF_INET6 dual-stack sockets.
+
+On the other hand, bpf_tcp_check_syncookie looks only at the address
+family of the socket, ignoring the real IP version in headers, and
+validates only the packet size. This implementation has some drawbacks:
+
+1. Packets are not validated properly, allowing a BPF program to trick
+ bpf_tcp_check_syncookie into handling an IPv6 packet on an IPv4
+ socket.
+
+2. Dual-stack sockets fail the checks on IPv4 packets. IPv4 clients end
+ up receiving a SYNACK with the cookie, but the following ACK gets
+ dropped.
+
+This patch fixes these issues by changing the checks in
+bpf_tcp_check_syncookie to match the ones in bpf_tcp_gen_syncookie. IP
+version from the header is taken into account, and it is validated
+properly with address family.
+
+Fixes: 399040847084 ("bpf: add helper to check for a valid SYN cookie")
+Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Acked-by: Arthur Fabre <afabre@cloudflare.com>
+Link: https://lore.kernel.org/bpf/20220406124113.2795730-1-maximmi@nvidia.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 82fcb7533663..48fc95626597 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -6777,24 +6777,33 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len
+ if (!th->ack || th->rst || th->syn)
+ return -ENOENT;
+
++ if (unlikely(iph_len < sizeof(struct iphdr)))
++ return -EINVAL;
++
+ if (tcp_synq_no_recent_overflow(sk))
+ return -ENOENT;
+
+ cookie = ntohl(th->ack_seq) - 1;
+
+- switch (sk->sk_family) {
+- case AF_INET:
+- if (unlikely(iph_len < sizeof(struct iphdr)))
++ /* Both struct iphdr and struct ipv6hdr have the version field at the
++ * same offset so we can cast to the shorter header (struct iphdr).
++ */
++ switch (((struct iphdr *)iph)->version) {
++ case 4:
++ if (sk->sk_family == AF_INET6 && ipv6_only_sock(sk))
+ return -EINVAL;
+
+ ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
+ break;
+
+ #if IS_BUILTIN(CONFIG_IPV6)
+- case AF_INET6:
++ case 6:
+ if (unlikely(iph_len < sizeof(struct ipv6hdr)))
+ return -EINVAL;
+
++ if (sk->sk_family != AF_INET6)
++ return -EINVAL;
++
+ ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
+ break;
+ #endif /* CONFIG_IPV6 */
+--
+2.35.1
+
--- /dev/null
+From 13669d6f18517e178be3e9635f24207dd196ceda Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Mar 2022 15:01:50 -0300
+Subject: cifs: fix potential race with cifsd thread
+
+From: Paulo Alcantara <pc@cjr.nz>
+
+[ Upstream commit 687127c81ad32c8900a3fedbc7ed8f686ca95855 ]
+
+To avoid racing with demultiplex thread while it is handling data on
+socket, use cifs_signal_cifsd_for_reconnect() helper for marking
+current server to reconnect and let the demultiplex thread handle the
+rest.
+
+Fixes: dca65818c80c ("cifs: use a different reconnect helper for non-cifsd threads")
+Reviewed-by: Enzo Matsumiya <ematsumiya@suse.de>
+Reviewed-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/connect.c | 2 +-
+ fs/cifs/netmisc.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index d6f8ccc7bfe2..0270b412f801 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -4465,7 +4465,7 @@ static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tco
+ */
+ if (rc && server->current_fullpath != server->origin_fullpath) {
+ server->current_fullpath = server->origin_fullpath;
+- cifs_reconnect(tcon->ses->server, true);
++ cifs_signal_cifsd_for_reconnect(server, true);
+ }
+
+ dfs_cache_free_tgts(tl);
+diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
+index ebe236b9d9f5..235aa1b395eb 100644
+--- a/fs/cifs/netmisc.c
++++ b/fs/cifs/netmisc.c
+@@ -896,7 +896,7 @@ map_and_check_smb_error(struct mid_q_entry *mid, bool logErr)
+ if (class == ERRSRV && code == ERRbaduid) {
+ cifs_dbg(FYI, "Server returned 0x%x, reconnecting session...\n",
+ code);
+- cifs_reconnect(mid->server, false);
++ cifs_signal_cifsd_for_reconnect(mid->server, false);
+ }
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 178c8d0aa4e3bd2665cd811d008a451e43a90cd8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 12:53:36 +0000
+Subject: dpaa2-ptp: Fix refcount leak in dpaa2_ptp_probe
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit 2b04bd4f03bba021959ca339314f6739710f0954 ]
+
+This node pointer is returned by of_find_compatible_node() with
+refcount incremented. Calling of_node_put() to aovid the refcount leak.
+
+Fixes: d346c9e86d86 ("dpaa2-ptp: reuse ptp_qoriq driver")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Link: https://lore.kernel.org/r/20220404125336.13427-1-linmq006@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+index 5f5f8c53c4a0..c8cb541572ff 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+@@ -167,7 +167,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
+ base = of_iomap(node, 0);
+ if (!base) {
+ err = -ENOMEM;
+- goto err_close;
++ goto err_put;
+ }
+
+ err = fsl_mc_allocate_irqs(mc_dev);
+@@ -210,6 +210,8 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
+ fsl_mc_free_irqs(mc_dev);
+ err_unmap:
+ iounmap(base);
++err_put:
++ of_node_put(node);
+ err_close:
+ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+ err_free_mcp:
+--
+2.35.1
+
--- /dev/null
+From f4c881fb3fe6b9a5f1b05214d63690e844329b75 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Apr 2022 21:04:43 +0200
+Subject: drbd: Fix five use after free bugs in get_initial_state
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lv Yunlong <lyl2019@mail.ustc.edu.cn>
+
+[ Upstream commit aadb22ba2f656581b2f733deb3a467c48cc618f6 ]
+
+In get_initial_state, it calls notify_initial_state_done(skb,..) if
+cb->args[5]==1. If genlmsg_put() failed in notify_initial_state_done(),
+the skb will be freed by nlmsg_free(skb).
+Then get_initial_state will goto out and the freed skb will be used by
+return value skb->len, which is a uaf bug.
+
+What's worse, the same problem goes even further: skb can also be
+freed in the notify_*_state_change -> notify_*_state calls below.
+Thus 4 additional uaf bugs happened.
+
+My patch lets the problem callee functions: notify_initial_state_done
+and notify_*_state_change return an error code if errors happen.
+So that the error codes could be propagated and the uaf bugs can be avoid.
+
+v2 reports a compilation warning. This v3 fixed this warning and built
+successfully in my local environment with no additional warnings.
+v2: https://lore.kernel.org/patchwork/patch/1435218/
+
+Fixes: a29728463b254 ("drbd: Backport the "events2" command")
+Signed-off-by: Lv Yunlong <lyl2019@mail.ustc.edu.cn>
+Reviewed-by: Christoph Böhmwalder <christoph.boehmwalder@linbit.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/drbd/drbd_int.h | 8 ++---
+ drivers/block/drbd/drbd_nl.c | 41 ++++++++++++++++----------
+ drivers/block/drbd/drbd_state.c | 18 +++++------
+ drivers/block/drbd/drbd_state_change.h | 8 ++---
+ 4 files changed, 42 insertions(+), 33 deletions(-)
+
+diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
+index f27d5b0f9a0b..a98bfcf4a5f0 100644
+--- a/drivers/block/drbd/drbd_int.h
++++ b/drivers/block/drbd/drbd_int.h
+@@ -1642,22 +1642,22 @@ struct sib_info {
+ };
+ void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
+
+-extern void notify_resource_state(struct sk_buff *,
++extern int notify_resource_state(struct sk_buff *,
+ unsigned int,
+ struct drbd_resource *,
+ struct resource_info *,
+ enum drbd_notification_type);
+-extern void notify_device_state(struct sk_buff *,
++extern int notify_device_state(struct sk_buff *,
+ unsigned int,
+ struct drbd_device *,
+ struct device_info *,
+ enum drbd_notification_type);
+-extern void notify_connection_state(struct sk_buff *,
++extern int notify_connection_state(struct sk_buff *,
+ unsigned int,
+ struct drbd_connection *,
+ struct connection_info *,
+ enum drbd_notification_type);
+-extern void notify_peer_device_state(struct sk_buff *,
++extern int notify_peer_device_state(struct sk_buff *,
+ unsigned int,
+ struct drbd_peer_device *,
+ struct peer_device_info *,
+diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
+index 44ccf8b4f4b2..69184cf17b6a 100644
+--- a/drivers/block/drbd/drbd_nl.c
++++ b/drivers/block/drbd/drbd_nl.c
+@@ -4617,7 +4617,7 @@ static int nla_put_notification_header(struct sk_buff *msg,
+ return drbd_notification_header_to_skb(msg, &nh, true);
+ }
+
+-void notify_resource_state(struct sk_buff *skb,
++int notify_resource_state(struct sk_buff *skb,
+ unsigned int seq,
+ struct drbd_resource *resource,
+ struct resource_info *resource_info,
+@@ -4659,16 +4659,17 @@ void notify_resource_state(struct sk_buff *skb,
+ if (err && err != -ESRCH)
+ goto failed;
+ }
+- return;
++ return 0;
+
+ nla_put_failure:
+ nlmsg_free(skb);
+ failed:
+ drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
+ err, seq);
++ return err;
+ }
+
+-void notify_device_state(struct sk_buff *skb,
++int notify_device_state(struct sk_buff *skb,
+ unsigned int seq,
+ struct drbd_device *device,
+ struct device_info *device_info,
+@@ -4708,16 +4709,17 @@ void notify_device_state(struct sk_buff *skb,
+ if (err && err != -ESRCH)
+ goto failed;
+ }
+- return;
++ return 0;
+
+ nla_put_failure:
+ nlmsg_free(skb);
+ failed:
+ drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
+ err, seq);
++ return err;
+ }
+
+-void notify_connection_state(struct sk_buff *skb,
++int notify_connection_state(struct sk_buff *skb,
+ unsigned int seq,
+ struct drbd_connection *connection,
+ struct connection_info *connection_info,
+@@ -4757,16 +4759,17 @@ void notify_connection_state(struct sk_buff *skb,
+ if (err && err != -ESRCH)
+ goto failed;
+ }
+- return;
++ return 0;
+
+ nla_put_failure:
+ nlmsg_free(skb);
+ failed:
+ drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
+ err, seq);
++ return err;
+ }
+
+-void notify_peer_device_state(struct sk_buff *skb,
++int notify_peer_device_state(struct sk_buff *skb,
+ unsigned int seq,
+ struct drbd_peer_device *peer_device,
+ struct peer_device_info *peer_device_info,
+@@ -4807,13 +4810,14 @@ void notify_peer_device_state(struct sk_buff *skb,
+ if (err && err != -ESRCH)
+ goto failed;
+ }
+- return;
++ return 0;
+
+ nla_put_failure:
+ nlmsg_free(skb);
+ failed:
+ drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
+ err, seq);
++ return err;
+ }
+
+ void notify_helper(enum drbd_notification_type type,
+@@ -4864,7 +4868,7 @@ void notify_helper(enum drbd_notification_type type,
+ err, seq);
+ }
+
+-static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
++static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
+ {
+ struct drbd_genlmsghdr *dh;
+ int err;
+@@ -4878,11 +4882,12 @@ static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
+ if (nla_put_notification_header(skb, NOTIFY_EXISTS))
+ goto nla_put_failure;
+ genlmsg_end(skb, dh);
+- return;
++ return 0;
+
+ nla_put_failure:
+ nlmsg_free(skb);
+ pr_err("Error %d sending event. Event seq:%u\n", err, seq);
++ return err;
+ }
+
+ static void free_state_changes(struct list_head *list)
+@@ -4909,6 +4914,7 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
+ unsigned int seq = cb->args[2];
+ unsigned int n;
+ enum drbd_notification_type flags = 0;
++ int err = 0;
+
+ /* There is no need for taking notification_mutex here: it doesn't
+ matter if the initial state events mix with later state chage
+@@ -4917,32 +4923,32 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
+
+ cb->args[5]--;
+ if (cb->args[5] == 1) {
+- notify_initial_state_done(skb, seq);
++ err = notify_initial_state_done(skb, seq);
+ goto out;
+ }
+ n = cb->args[4]++;
+ if (cb->args[4] < cb->args[3])
+ flags |= NOTIFY_CONTINUES;
+ if (n < 1) {
+- notify_resource_state_change(skb, seq, state_change->resource,
++ err = notify_resource_state_change(skb, seq, state_change->resource,
+ NOTIFY_EXISTS | flags);
+ goto next;
+ }
+ n--;
+ if (n < state_change->n_connections) {
+- notify_connection_state_change(skb, seq, &state_change->connections[n],
++ err = notify_connection_state_change(skb, seq, &state_change->connections[n],
+ NOTIFY_EXISTS | flags);
+ goto next;
+ }
+ n -= state_change->n_connections;
+ if (n < state_change->n_devices) {
+- notify_device_state_change(skb, seq, &state_change->devices[n],
++ err = notify_device_state_change(skb, seq, &state_change->devices[n],
+ NOTIFY_EXISTS | flags);
+ goto next;
+ }
+ n -= state_change->n_devices;
+ if (n < state_change->n_devices * state_change->n_connections) {
+- notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
++ err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
+ NOTIFY_EXISTS | flags);
+ goto next;
+ }
+@@ -4957,7 +4963,10 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
+ cb->args[4] = 0;
+ }
+ out:
+- return skb->len;
++ if (err)
++ return err;
++ else
++ return skb->len;
+ }
+
+ int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
+diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
+index b8a27818ab3f..4ee11aef6672 100644
+--- a/drivers/block/drbd/drbd_state.c
++++ b/drivers/block/drbd/drbd_state.c
+@@ -1537,7 +1537,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
+ return rv;
+ }
+
+-void notify_resource_state_change(struct sk_buff *skb,
++int notify_resource_state_change(struct sk_buff *skb,
+ unsigned int seq,
+ struct drbd_resource_state_change *resource_state_change,
+ enum drbd_notification_type type)
+@@ -1550,10 +1550,10 @@ void notify_resource_state_change(struct sk_buff *skb,
+ .res_susp_fen = resource_state_change->susp_fen[NEW],
+ };
+
+- notify_resource_state(skb, seq, resource, &resource_info, type);
++ return notify_resource_state(skb, seq, resource, &resource_info, type);
+ }
+
+-void notify_connection_state_change(struct sk_buff *skb,
++int notify_connection_state_change(struct sk_buff *skb,
+ unsigned int seq,
+ struct drbd_connection_state_change *connection_state_change,
+ enum drbd_notification_type type)
+@@ -1564,10 +1564,10 @@ void notify_connection_state_change(struct sk_buff *skb,
+ .conn_role = connection_state_change->peer_role[NEW],
+ };
+
+- notify_connection_state(skb, seq, connection, &connection_info, type);
++ return notify_connection_state(skb, seq, connection, &connection_info, type);
+ }
+
+-void notify_device_state_change(struct sk_buff *skb,
++int notify_device_state_change(struct sk_buff *skb,
+ unsigned int seq,
+ struct drbd_device_state_change *device_state_change,
+ enum drbd_notification_type type)
+@@ -1577,10 +1577,10 @@ void notify_device_state_change(struct sk_buff *skb,
+ .dev_disk_state = device_state_change->disk_state[NEW],
+ };
+
+- notify_device_state(skb, seq, device, &device_info, type);
++ return notify_device_state(skb, seq, device, &device_info, type);
+ }
+
+-void notify_peer_device_state_change(struct sk_buff *skb,
++int notify_peer_device_state_change(struct sk_buff *skb,
+ unsigned int seq,
+ struct drbd_peer_device_state_change *p,
+ enum drbd_notification_type type)
+@@ -1594,7 +1594,7 @@ void notify_peer_device_state_change(struct sk_buff *skb,
+ .peer_resync_susp_dependency = p->resync_susp_dependency[NEW],
+ };
+
+- notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
++ return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
+ }
+
+ static void broadcast_state_change(struct drbd_state_change *state_change)
+@@ -1602,7 +1602,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
+ struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
+ bool resource_state_has_changed;
+ unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
+- void (*last_func)(struct sk_buff *, unsigned int, void *,
++ int (*last_func)(struct sk_buff *, unsigned int, void *,
+ enum drbd_notification_type) = NULL;
+ void *last_arg = NULL;
+
+diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h
+index ba80f612d6ab..d5b0479bc9a6 100644
+--- a/drivers/block/drbd/drbd_state_change.h
++++ b/drivers/block/drbd/drbd_state_change.h
+@@ -44,19 +44,19 @@ extern struct drbd_state_change *remember_old_state(struct drbd_resource *, gfp_
+ extern void copy_old_to_new_state_change(struct drbd_state_change *);
+ extern void forget_state_change(struct drbd_state_change *);
+
+-extern void notify_resource_state_change(struct sk_buff *,
++extern int notify_resource_state_change(struct sk_buff *,
+ unsigned int,
+ struct drbd_resource_state_change *,
+ enum drbd_notification_type type);
+-extern void notify_connection_state_change(struct sk_buff *,
++extern int notify_connection_state_change(struct sk_buff *,
+ unsigned int,
+ struct drbd_connection_state_change *,
+ enum drbd_notification_type type);
+-extern void notify_device_state_change(struct sk_buff *,
++extern int notify_device_state_change(struct sk_buff *,
+ unsigned int,
+ struct drbd_device_state_change *,
+ enum drbd_notification_type type);
+-extern void notify_peer_device_state_change(struct sk_buff *,
++extern int notify_peer_device_state_change(struct sk_buff *,
+ unsigned int,
+ struct drbd_peer_device_state_change *,
+ enum drbd_notification_type type);
+--
+2.35.1
+
--- /dev/null
+From cf15a8c78a80a02487368e7d7590609bb6835321 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Mar 2022 15:10:53 +0100
+Subject: Drivers: hv: vmbus: Fix initialization of device object in
+ vmbus_device_register()
+
+From: Andrea Parri (Microsoft) <parri.andrea@gmail.com>
+
+[ Upstream commit 3a5469582c241abca22500f36a9cb8e9331969cf ]
+
+Initialize the device's dma_{mask,parms} pointers and the device's
+dma_mask value before invoking device_register(). Address the
+following trace with 5.17-rc7:
+
+[ 49.646839] WARNING: CPU: 0 PID: 189 at include/linux/dma-mapping.h:543
+ netvsc_probe+0x37a/0x3a0 [hv_netvsc]
+[ 49.646928] Call Trace:
+[ 49.646930] <TASK>
+[ 49.646935] vmbus_probe+0x40/0x60 [hv_vmbus]
+[ 49.646942] really_probe+0x1ce/0x3b0
+[ 49.646948] __driver_probe_device+0x109/0x180
+[ 49.646952] driver_probe_device+0x23/0xa0
+[ 49.646955] __device_attach_driver+0x76/0xe0
+[ 49.646958] ? driver_allows_async_probing+0x50/0x50
+[ 49.646961] bus_for_each_drv+0x84/0xd0
+[ 49.646964] __device_attach+0xed/0x170
+[ 49.646967] device_initial_probe+0x13/0x20
+[ 49.646970] bus_probe_device+0x8f/0xa0
+[ 49.646973] device_add+0x41a/0x8e0
+[ 49.646975] ? hrtimer_init+0x28/0x80
+[ 49.646981] device_register+0x1b/0x20
+[ 49.646983] vmbus_device_register+0x5e/0xf0 [hv_vmbus]
+[ 49.646991] vmbus_add_channel_work+0x12d/0x190 [hv_vmbus]
+[ 49.646999] process_one_work+0x21d/0x3f0
+[ 49.647002] worker_thread+0x4a/0x3b0
+[ 49.647005] ? process_one_work+0x3f0/0x3f0
+[ 49.647007] kthread+0xff/0x130
+[ 49.647011] ? kthread_complete_and_exit+0x20/0x20
+[ 49.647015] ret_from_fork+0x22/0x30
+[ 49.647020] </TASK>
+[ 49.647021] ---[ end trace 0000000000000000 ]---
+
+Fixes: 743b237c3a7b0 ("scsi: storvsc: Add Isolation VM support for storvsc driver")
+Signed-off-by: Andrea Parri (Microsoft) <parri.andrea@gmail.com>
+Reviewed-by: Michael Kelley <mikelley@microsoft.com>
+Link: https://lore.kernel.org/r/20220315141053.3223-1-parri.andrea@gmail.com
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hv/vmbus_drv.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 12a2b37e87f3..0a05e10ab36c 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -2097,6 +2097,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
+ child_device_obj->device.parent = &hv_acpi_dev->dev;
+ child_device_obj->device.release = vmbus_device_release;
+
++ child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
++ child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
++ dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
++
+ /*
+ * Register with the LDM. This will kick off the driver/device
+ * binding...which will eventually call vmbus_match() and vmbus_probe()
+@@ -2122,9 +2126,6 @@ int vmbus_device_register(struct hv_device *child_device_obj)
+ }
+ hv_debug_add_dev_dir(child_device_obj);
+
+- child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
+- child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
+- dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
+ return 0;
+
+ err_kset_unregister:
+--
+2.35.1
+
--- /dev/null
+From c088ff43888049258d982cd33e9fd5368c3ca7f8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Mar 2022 17:35:35 -0300
+Subject: Drivers: hv: vmbus: Fix potential crash on module unload
+
+From: Guilherme G. Piccoli <gpiccoli@igalia.com>
+
+[ Upstream commit 792f232d57ff28bbd5f9c4abe0466b23d5879dc8 ]
+
+The vmbus driver relies on the panic notifier infrastructure to perform
+some operations when a panic event is detected. Since vmbus can be built
+as module, it is required that the driver handles both registering and
+unregistering such panic notifier callback.
+
+After commit 74347a99e73a ("x86/Hyper-V: Unload vmbus channel in hv panic callback")
+though, the panic notifier registration is done unconditionally in the module
+initialization routine whereas the unregistering procedure is conditionally
+guarded and executes only if HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE capability
+is set.
+
+This patch fixes that by unconditionally unregistering the panic notifier
+in the module's exit routine as well.
+
+Fixes: 74347a99e73a ("x86/Hyper-V: Unload vmbus channel in hv panic callback")
+Signed-off-by: Guilherme G. Piccoli <gpiccoli@igalia.com>
+Reviewed-by: Michael Kelley <mikelley@microsoft.com>
+Link: https://lore.kernel.org/r/20220315203535.682306-1-gpiccoli@igalia.com
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hv/vmbus_drv.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 0a05e10ab36c..4bea1dfa41cd 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -2781,10 +2781,15 @@ static void __exit vmbus_exit(void)
+ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ kmsg_dump_unregister(&hv_kmsg_dumper);
+ unregister_die_notifier(&hyperv_die_block);
+- atomic_notifier_chain_unregister(&panic_notifier_list,
+- &hyperv_panic_block);
+ }
+
++ /*
++ * The panic notifier is always registered, hence we should
++ * also unconditionally unregister it here as well.
++ */
++ atomic_notifier_chain_unregister(&panic_notifier_list,
++ &hyperv_panic_block);
++
+ free_page((unsigned long)hv_panic_page);
+ unregister_sysctl_table(hv_ctl_table_hdr);
+ hv_ctl_table_hdr = NULL;
+--
+2.35.1
+
--- /dev/null
+From 5706044580efa9adf06b9fc2d2510f8240d053bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Jan 2022 19:58:04 -0500
+Subject: drm/amd/display: Fix for dmub outbox notification enable
+
+From: Meenakshikumar Somasundaram <meenakshikumar.somasundaram@amd.com>
+
+[ Upstream commit ed7208706448953c6f15009cf139135776c15713 ]
+
+[Why]
+Currently driver enables dmub outbox notification before oubox ISR is
+registered. During boot scenario, sometimes dmub issues hpd outbox
+message before driver registers ISR and those messages are missed.
+
+[How]
+Enable dmub outbox notification after outbox ISR is registered. Also,
+restructured outbox enable code to call from dm layer and renamed APIs.
+
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Jasdeep Dhillon <jdhillon@amd.com>
+Signed-off-by: Meenakshikumar Somasundaram <meenakshikumar.somasundaram@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 66 +++++++++++++++++--
+ drivers/gpu/drm/amd/display/dc/dc.h | 3 +
+ .../gpu/drm/amd/display/dc/dce/dmub_outbox.c | 25 +++----
+ .../gpu/drm/amd/display/dc/dce/dmub_outbox.h | 4 +-
+ .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 4 --
+ .../drm/amd/display/dc/dcn31/dcn31_hwseq.c | 2 +-
+ 6 files changed, 80 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index ba1aa994db4b..62bc6ce88753 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -76,6 +76,8 @@
+
+ #include "dc_trace.h"
+
++#include "dce/dmub_outbox.h"
++
+ #define CTX \
+ dc->ctx
+
+@@ -3707,13 +3709,23 @@ void dc_hardware_release(struct dc *dc)
+ }
+ #endif
+
+-/**
+- * dc_enable_dmub_notifications - Returns whether dmub notification can be enabled
+- * @dc: dc structure
++/*
++ *****************************************************************************
++ * Function: dc_is_dmub_outbox_supported -
++ *
++ * @brief
++ * Checks whether DMUB FW supports outbox notifications, if supported
++ * DM should register outbox interrupt prior to actually enabling interrupts
++ * via dc_enable_dmub_outbox
+ *
+- * Returns: True to enable dmub notifications, False otherwise
++ * @param
++ * [in] dc: dc structure
++ *
++ * @return
++ * True if DMUB FW supports outbox notifications, False otherwise
++ *****************************************************************************
+ */
+-bool dc_enable_dmub_notifications(struct dc *dc)
++bool dc_is_dmub_outbox_supported(struct dc *dc)
+ {
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */
+@@ -3728,6 +3740,48 @@ bool dc_enable_dmub_notifications(struct dc *dc)
+
+ /**
+ * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
++ * Function: dc_enable_dmub_notifications
++ *
++ * @brief
++ * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
++ * notifications. All DMs shall switch to dc_is_dmub_outbox_supported.
++ * This API shall be removed after switching.
++ *
++ * @param
++ * [in] dc: dc structure
++ *
++ * @return
++ * True if DMUB FW supports outbox notifications, False otherwise
++ *****************************************************************************
++ */
++bool dc_enable_dmub_notifications(struct dc *dc)
++{
++ return dc_is_dmub_outbox_supported(dc);
++}
++
++/**
++ *****************************************************************************
++ * Function: dc_enable_dmub_outbox
++ *
++ * @brief
++ * Enables DMUB unsolicited notifications to x86 via outbox
++ *
++ * @param
++ * [in] dc: dc structure
++ *
++ * @return
++ * None
++ *****************************************************************************
++ */
++void dc_enable_dmub_outbox(struct dc *dc)
++{
++ struct dc_context *dc_ctx = dc->ctx;
++
++ dmub_enable_outbox_notification(dc_ctx->dmub_srv);
++}
++
++/**
++ *****************************************************************************
+ * Sets port index appropriately for legacy DDC
+ * @dc: dc structure
+ * @link_index: link index
+@@ -3829,7 +3883,7 @@ uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
+ * [in] payload: aux payload
+ * [out] notify: set_config immediate reply
+ *
+- * @return
++ * @return
+ * True if successful, False if failure
+ *****************************************************************************
+ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index b51864890621..6a8c100a3688 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -1448,8 +1448,11 @@ void dc_z10_restore(const struct dc *dc);
+ void dc_z10_save_init(struct dc *dc);
+ #endif
+
++bool dc_is_dmub_outbox_supported(struct dc *dc);
+ bool dc_enable_dmub_notifications(struct dc *dc);
+
++void dc_enable_dmub_outbox(struct dc *dc);
++
+ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
+ uint32_t link_index,
+ struct aux_payload *payload);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
+index faad8555ddbb..fff1d07d865d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
+@@ -22,20 +22,23 @@
+ * Authors: AMD
+ */
+
+-#include "dmub_outbox.h"
++#include "dc.h"
+ #include "dc_dmub_srv.h"
++#include "dmub_outbox.h"
+ #include "dmub/inc/dmub_cmd.h"
+
+-/**
+- * dmub_enable_outbox_notification - Sends inbox cmd to dmub to enable outbox1
+- * messages with interrupt. Dmub sends outbox1
+- * message and triggers outbox1 interrupt.
+- * @dc: dc structure
++/*
++ * Function: dmub_enable_outbox_notification
++ *
++ * @brief
++ * Sends inbox cmd to dmub for enabling outbox notifications to x86.
++ *
++ * @param
++ * [in] dmub_srv: dmub_srv structure
+ */
+-void dmub_enable_outbox_notification(struct dc *dc)
++void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv)
+ {
+ union dmub_rb_cmd cmd;
+- struct dc_context *dc_ctx = dc->ctx;
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ cmd.outbox1_enable.header.type = DMUB_CMD__OUTBOX1_ENABLE;
+@@ -45,7 +48,7 @@ void dmub_enable_outbox_notification(struct dc *dc)
+ sizeof(cmd.outbox1_enable.header);
+ cmd.outbox1_enable.enable = true;
+
+- dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd);
+- dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv);
+- dc_dmub_srv_wait_idle(dc_ctx->dmub_srv);
++ dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
++ dc_dmub_srv_cmd_execute(dmub_srv);
++ dc_dmub_srv_wait_idle(dmub_srv);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h
+index 4e0aa0d1a2d5..58ceabb9d497 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h
+@@ -26,8 +26,8 @@
+ #ifndef _DMUB_OUTBOX_H_
+ #define _DMUB_OUTBOX_H_
+
+-#include "dc.h"
++struct dc_dmub_srv;
+
+-void dmub_enable_outbox_notification(struct dc *dc);
++void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv);
+
+ #endif /* _DMUB_OUTBOX_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 530a72e3eefe..636e2d90ff93 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1500,10 +1500,6 @@ void dcn10_init_hw(struct dc *dc)
+ hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+ }
+
+- /* Enable outbox notification feature of dmub */
+- if (dc->debug.enable_dmub_aux_for_legacy_ddc)
+- dmub_enable_outbox_notification(dc);
+-
+ /* we want to turn off all dp displays before doing detection */
+ if (dc->config.power_down_display_on_boot)
+ dc_link_blank_all_dp_displays(dc);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+index 4206ce5bf9a9..1e156f398065 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+@@ -194,7 +194,7 @@ void dcn31_init_hw(struct dc *dc)
+
+ /* Enables outbox notifications for usb4 dpia */
+ if (dc->res_pool->usb4_dpia_count)
+- dmub_enable_outbox_notification(dc);
++ dmub_enable_outbox_notification(dc->ctx->dmub_srv);
+
+ /* we want to turn off all dp displays before doing detection */
+ if (dc->config.power_down_display_on_boot)
+--
+2.35.1
+
--- /dev/null
+From 2c783a8e8e76d31b0907da9e019842e59bdff2e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Mar 2022 14:57:34 -0400
+Subject: drm/amd/display: Remove redundant dsc power gating from init_hw
+
+From: Roman Li <Roman.Li@amd.com>
+
+[ Upstream commit 95707203407c4cf0b7e520a99d6f46d8aed4b57f ]
+
+[Why]
+DSC Power down code has been moved from dcn31_init_hw into init_pipes()
+Need to remove it from dcn10_init_hw() as well to avoid duplicated action
+on dcn1.x/2.x
+
+[How]
+Remove DSC power down code from dcn10_init_hw()
+
+Fixes: 8fa6f4c5715c ("drm/amd/display: fixed the DSC power off sequence during Driver PnP")
+
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Eric Yang <Eric.Yang2@amd.com>
+Acked-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Roman Li <Roman.Li@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 7 -------
+ 1 file changed, 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 636e2d90ff93..2cefdd96d0cb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1493,13 +1493,6 @@ void dcn10_init_hw(struct dc *dc)
+ link->link_status.link_active = true;
+ }
+
+- /* Power gate DSCs */
+- if (!is_optimized_init_done) {
+- for (i = 0; i < res_pool->res_cap->num_dsc; i++)
+- if (hws->funcs.dsc_pg_control != NULL)
+- hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+- }
+-
+ /* we want to turn off all dp displays before doing detection */
+ if (dc->config.power_down_display_on_boot)
+ dc_link_blank_all_dp_displays(dc);
+--
+2.35.1
+
--- /dev/null
+From c2ee3b8c63db64e2cf5bb5417eb5816b75b2bcd2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Mar 2022 11:41:48 +0300
+Subject: drm/amdgpu: fix off by one in amdgpu_gfx_kiq_acquire()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 1647b54ed55d4d48c7199d439f8834626576cbe9 ]
+
+This post-op should be a pre-op so that we do not pass -1 as the bit
+number to test_bit(). The current code will loop downwards from 63 to
+-1. After changing to a pre-op, it loops from 63 to 0.
+
+Fixes: 71c37505e7ea ("drm/amdgpu/gfx: move more common KIQ code to amdgpu_gfx.c")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 1916ec84dd71..e7845df6cad2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -266,7 +266,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
+ * adev->gfx.mec.num_pipe_per_mec
+ * adev->gfx.mec.num_queue_per_pipe;
+
+- while (queue_bit-- >= 0) {
++ while (--queue_bit >= 0) {
+ if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
+ continue;
+
+--
+2.35.1
+
--- /dev/null
+From 0d79b32fdb50d6e86585de16eb90f15fc4e62aec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Jan 2022 17:19:44 +0800
+Subject: drm/imx: dw_hdmi-imx: Fix bailout in error cases of probe
+
+From: Liu Ying <victor.liu@nxp.com>
+
+[ Upstream commit e8083acc3f8cc2097917018e947fd4c857f60454 ]
+
+In dw_hdmi_imx_probe(), if error happens after dw_hdmi_probe() returns
+successfully, dw_hdmi_remove() should be called where necessary as
+bailout.
+
+Fixes: c805ec7eb210 ("drm/imx: dw_hdmi-imx: move initialization into probe")
+Cc: Philipp Zabel <p.zabel@pengutronix.de>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: Shawn Guo <shawnguo@kernel.org>
+Cc: Sascha Hauer <s.hauer@pengutronix.de>
+Cc: Pengutronix Kernel Team <kernel@pengutronix.de>
+Cc: Fabio Estevam <festevam@gmail.com>
+Cc: NXP Linux Team <linux-imx@nxp.com>
+Signed-off-by: Liu Ying <victor.liu@nxp.com>
+Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
+Link: https://lore.kernel.org/r/20220128091944.3831256-1-victor.liu@nxp.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/imx/dw_hdmi-imx.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
+index 87428fb23d9f..a2277a0d6d06 100644
+--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
++++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
+@@ -222,6 +222,7 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np);
+ struct imx_hdmi *hdmi;
++ int ret;
+
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+@@ -243,10 +244,15 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
+ hdmi->bridge = of_drm_find_bridge(np);
+ if (!hdmi->bridge) {
+ dev_err(hdmi->dev, "Unable to find bridge\n");
++ dw_hdmi_remove(hdmi->hdmi);
+ return -ENODEV;
+ }
+
+- return component_add(&pdev->dev, &dw_hdmi_imx_ops);
++ ret = component_add(&pdev->dev, &dw_hdmi_imx_ops);
++ if (ret)
++ dw_hdmi_remove(hdmi->hdmi);
++
++ return ret;
+ }
+
+ static int dw_hdmi_imx_remove(struct platform_device *pdev)
+--
+2.35.1
+
--- /dev/null
+From e5cfa984d6a510588eb54316bfa71c2b24e88adf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 8 Jan 2022 17:52:30 +0100
+Subject: drm/imx: Fix memory leak in imx_pd_connector_get_modes
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: José Expósito <jose.exposito89@gmail.com>
+
+[ Upstream commit bce81feb03a20fca7bbdd1c4af16b4e9d5c0e1d3 ]
+
+Avoid leaking the display mode variable if of_get_drm_display_mode
+fails.
+
+Fixes: 76ecd9c9fb24 ("drm/imx: parallel-display: check return code from of_get_drm_display_mode()")
+Addresses-Coverity-ID: 1443943 ("Resource leak")
+Signed-off-by: José Expósito <jose.exposito89@gmail.com>
+Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
+Link: https://lore.kernel.org/r/20220108165230.44610-1-jose.exposito89@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/imx/parallel-display.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
+index 06cb1a59b9bc..63ba2ad84679 100644
+--- a/drivers/gpu/drm/imx/parallel-display.c
++++ b/drivers/gpu/drm/imx/parallel-display.c
+@@ -75,8 +75,10 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
+ ret = of_get_drm_display_mode(np, &imxpd->mode,
+ &imxpd->bus_flags,
+ OF_USE_NATIVE_MODE);
+- if (ret)
++ if (ret) {
++ drm_mode_destroy(connector->dev, mode);
+ return ret;
++ }
+
+ drm_mode_copy(mode, &imxpd->mode);
+ mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+--
+2.35.1
+
--- /dev/null
+From 5ef7ebcc2d7236b0b40ae6784f2cf5948057440f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Jan 2022 15:47:29 +0800
+Subject: drm/imx: imx-ldb: Check for null pointer after calling kmemdup
+
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+
+[ Upstream commit 8027a9ad9b3568c5eb49c968ad6c97f279d76730 ]
+
+As the possible failure of the allocation, kmemdup() may return NULL
+pointer.
+Therefore, it should be better to check the return value of kmemdup()
+and return error if fails.
+
+Fixes: dc80d7038883 ("drm/imx-ldb: Add support to drm-bridge")
+Signed-off-by: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
+Link: https://lore.kernel.org/r/20220105074729.2363657-1-jiasheng@iscas.ac.cn
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/imx/imx-ldb.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
+index e5078d03020d..fb0e951248f6 100644
+--- a/drivers/gpu/drm/imx/imx-ldb.c
++++ b/drivers/gpu/drm/imx/imx-ldb.c
+@@ -572,6 +572,8 @@ static int imx_ldb_panel_ddc(struct device *dev,
+ edidp = of_get_property(child, "edid", &edid_len);
+ if (edidp) {
+ channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL);
++ if (!channel->edid)
++ return -ENOMEM;
+ } else if (!channel->panel) {
+ /* fallback to display-timings node */
+ ret = of_get_drm_display_mode(child,
+--
+2.35.1
+
--- /dev/null
+From 3592693532b40d83f3dc3f6eef57bca7a2884e91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 11:58:05 +0300
+Subject: IB/cm: Cancel mad on the DREQ event when the state is MRA_REP_RCVD
+
+From: Mark Zhang <markzhang@nvidia.com>
+
+[ Upstream commit 107dd7beba403a363adfeb3ffe3734fe38a05cce ]
+
+On the passive side when the disconnectReq event comes, if the current
+state is MRA_REP_RCVD, it needs to cancel the MAD before entering the
+DREQ_RCVD and TIMEWAIT states, otherwise the destroy_id may block until
+this mad will reach timeout.
+
+Fixes: a977049dacde ("[PATCH] IB: Add the kernel CM implementation")
+Link: https://lore.kernel.org/r/75261c00c1d82128b1d981af9ff46e994186e621.1649062436.git.leonro@nvidia.com
+Signed-off-by: Mark Zhang <markzhang@nvidia.com>
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/cm.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 35f0d5e7533d..1c107d6d03b9 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -2824,6 +2824,7 @@ static int cm_dreq_handler(struct cm_work *work)
+ switch (cm_id_priv->id.state) {
+ case IB_CM_REP_SENT:
+ case IB_CM_DREQ_SENT:
++ case IB_CM_MRA_REP_RCVD:
+ ib_cancel_mad(cm_id_priv->msg);
+ break;
+ case IB_CM_ESTABLISHED:
+@@ -2831,8 +2832,6 @@ static int cm_dreq_handler(struct cm_work *work)
+ cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
+ ib_cancel_mad(cm_id_priv->msg);
+ break;
+- case IB_CM_MRA_REP_RCVD:
+- break;
+ case IB_CM_TIMEWAIT:
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_DREQ_COUNTER]);
+--
+2.35.1
+
--- /dev/null
+From 6200d07a7463538c6e1b463d2ffa31043198cc81 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Feb 2022 17:53:30 +0100
+Subject: IB/rdmavt: add lock to call to rvt_error_qp to prevent a race
+ condition
+
+From: Niels Dossche <dossche.niels@gmail.com>
+
+[ Upstream commit 4d809f69695d4e7d1378b3a072fa9aef23123018 ]
+
+The documentation of the function rvt_error_qp says both r_lock and s_lock
+need to be held when calling that function. It also asserts using lockdep
+that both of those locks are held. However, the commit I referenced in
+Fixes accidentally makes the call to rvt_error_qp in rvt_ruc_loopback no
+longer covered by r_lock. This results in the lockdep assertion failing
+and also possibly in a race condition.
+
+Fixes: d757c60eca9b ("IB/rdmavt: Fix concurrency panics in QP post_send and modify to error")
+Link: https://lore.kernel.org/r/20220228165330.41546-1-dossche.niels@gmail.com
+Signed-off-by: Niels Dossche <dossche.niels@gmail.com>
+Acked-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rdmavt/qp.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
+index ae50b56e8913..8ef112f883a7 100644
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -3190,7 +3190,11 @@ void rvt_ruc_loopback(struct rvt_qp *sqp)
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ rvt_send_complete(sqp, wqe, send_status);
+ if (sqp->ibqp.qp_type == IB_QPT_RC) {
+- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
++ int lastwqe;
++
++ spin_lock(&sqp->r_lock);
++ lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
++ spin_unlock(&sqp->r_lock);
+
+ sqp->s_flags &= ~RVT_S_BUSY;
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+--
+2.35.1
+
--- /dev/null
+From b04b555aa4681a376bbd75ed4a259f74e9df78f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Mar 2022 19:36:29 +0100
+Subject: ice: clear cmd_type_offset_bsz for TX rings
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit e19778e6c911691856447c3bf9617f00b3e1347f ]
+
+Currently when XDP rings are created, each descriptor gets its DD bit
+set, which turns out to be the wrong approach as it can lead to a
+situation where more descriptors get cleaned than it was supposed to,
+e.g. when AF_XDP busy poll is run with a large batch size. In this
+situation, the driver would request for more buffers than it is able to
+handle.
+
+Fix this by not setting the DD bits in ice_xdp_alloc_setup_rings(). They
+should be initialized to zero instead.
+
+Fixes: 9610bd988df9 ("ice: optimize XDP_TX workloads")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Tested-by: Shwetha Nagaraju <shwetha.nagaraju@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 5229bce1a4ab..db2e02e673a7 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2546,7 +2546,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
+ spin_lock_init(&xdp_ring->tx_lock);
+ for (j = 0; j < xdp_ring->count; j++) {
+ tx_desc = ICE_TX_DESC(xdp_ring, j);
+- tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
++ tx_desc->cmd_type_offset_bsz = 0;
+ }
+ }
+
+--
+2.35.1
+
--- /dev/null
+From aba9a5d8b670c0a286d31b928a80dda92e287d9a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Mar 2022 09:20:06 -0700
+Subject: ice: Clear default forwarding VSI during VSI release
+
+From: Ivan Vecera <ivecera@redhat.com>
+
+[ Upstream commit bd8c624c0cd59de0032752ba3001c107bba97f7b ]
+
+VSI is set as default forwarding one when promisc mode is set for
+PF interface, when PF is switched to switchdev mode or when VF
+driver asks to enable allmulticast or promisc mode for the VF
+interface (when vf-true-promisc-support priv flag is off).
+The third case is buggy because in that case VSI associated with
+VF remains as default one after VF removal.
+
+Reproducer:
+1. Create VF
+ echo 1 > sys/class/net/ens7f0/device/sriov_numvfs
+2. Enable allmulticast or promisc mode on VF
+ ip link set ens7f0v0 allmulticast on
+ ip link set ens7f0v0 promisc on
+3. Delete VF
+ echo 0 > sys/class/net/ens7f0/device/sriov_numvfs
+4. Try to enable promisc mode on PF
+ ip link set ens7f0 promisc on
+
+Although it looks that promisc mode on PF is enabled the opposite
+is true because ice_vsi_sync_fltr() responsible for IFF_PROMISC
+handling first checks if any other VSI is set as default forwarding
+one and if so the function does not do anything. At this point
+it is not possible to enable promisc mode on PF without re-probe
+device.
+
+To resolve the issue this patch clear default forwarding VSI
+during ice_vsi_release() when the VSI to be released is the default
+one.
+
+Fixes: 01b5e89aab49 ("ice: Add VF promiscuous support")
+Signed-off-by: Ivan Vecera <ivecera@redhat.com>
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Alice Michael <alice.michael@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_lib.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 53256aca27c7..20d755822d43 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -3147,6 +3147,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
+ }
+ }
+
++ if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
++ ice_clear_dflt_vsi(pf->first_sw);
+ ice_fltr_remove_all(vsi);
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+--
+2.35.1
+
--- /dev/null
+From be43470940891a60571e9af2a9dc9b166b2306ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 11:35:48 -0700
+Subject: ice: Do not skip not enabled queues in ice_vc_dis_qs_msg
+
+From: Anatolii Gerasymenko <anatolii.gerasymenko@intel.com>
+
+[ Upstream commit 05ef6813b234db3196f083b91db3963f040b65bb ]
+
+Disable check for queue being enabled in ice_vc_dis_qs_msg, because
+there could be a case when queues were created, but were not enabled.
+We still need to delete those queues.
+
+Normal workflow for VF looks like:
+Enable path:
+VIRTCHNL_OP_ADD_ETH_ADDR (opcode 10)
+VIRTCHNL_OP_CONFIG_VSI_QUEUES (opcode 6)
+VIRTCHNL_OP_ENABLE_QUEUES (opcode 8)
+
+Disable path:
+VIRTCHNL_OP_DISABLE_QUEUES (opcode 9)
+VIRTCHNL_OP_DEL_ETH_ADDR (opcode 11)
+
+The issue appears only in stress conditions when VF is enabled and
+disabled very fast.
+Eventually there will be a case, when queues are created by
+VIRTCHNL_OP_CONFIG_VSI_QUEUES, but are not enabled by
+VIRTCHNL_OP_ENABLE_QUEUES.
+In turn, these queues are not deleted by VIRTCHNL_OP_DISABLE_QUEUES,
+because there is a check whether queues are enabled in
+ice_vc_dis_qs_msg.
+
+When we bring up the VF again, we will see the "Failed to set LAN Tx queue
+context" error during VIRTCHNL_OP_CONFIG_VSI_QUEUES step. This
+happens because old 16 queues were not deleted and VF requests to create
+16 more, but ice_sched_get_free_qparent in ice_ena_vsi_txq would fail to
+find a parent node for first newly requested queue (because all nodes
+are allocated to 16 old queues).
+
+Testing Hints:
+
+Just enable and disable VF fast enough, so it would be disabled before
+reaching VIRTCHNL_OP_ENABLE_QUEUES.
+
+while true; do
+ ip link set dev ens785f0v0 up
+ sleep 0.065 # adjust delay value for you machine
+ ip link set dev ens785f0v0 down
+done
+
+Fixes: 77ca27c41705 ("ice: add support for virtchnl_queue_select.[tx|rx]_queues bitmap")
+Signed-off-by: Anatolii Gerasymenko <anatolii.gerasymenko@intel.com>
+Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
+Signed-off-by: Alice Michael <alice.michael@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+index 1be3cd4b2bef..2bee8f10ad89 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+@@ -3351,9 +3351,9 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+ goto error_param;
+ }
+
+- /* Skip queue if not enabled */
+ if (!test_bit(vf_q_id, vf->txq_ena))
+- continue;
++ dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
++ vf_q_id, vsi->vsi_num);
+
+ ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+--
+2.35.1
+
--- /dev/null
+From e984d0a602f1501f84411e261a1430a3ea81242f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Mar 2022 09:20:07 -0700
+Subject: ice: Fix MAC address setting
+
+From: Ivan Vecera <ivecera@redhat.com>
+
+[ Upstream commit 2c0069f3f91f125b1b2ce66cc6bea8eb134723c3 ]
+
+Commit 2ccc1c1ccc671b ("ice: Remove excess error variables") merged
+the usage of 'status' and 'err' variables into single one in
+function ice_set_mac_address(). Unfortunately this causes
+a regression when call of ice_fltr_add_mac() returns -EEXIST because
+this return value does not indicate an error in this case but
+value of 'err' remains to be -EEXIST till the end of the function
+and is returned to caller.
+
+Prior mentioned commit this does not happen because return value of
+ice_fltr_add_mac() was stored to 'status' variable first and
+if it was -EEXIST then 'err' remains to be zero.
+
+Fix the problem by reset 'err' to zero when ice_fltr_add_mac()
+returns -EEXIST.
+
+Fixes: 2ccc1c1ccc671b ("ice: Remove excess error variables")
+Signed-off-by: Ivan Vecera <ivecera@redhat.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Acked-by: Alexander Lobakin <alexandr.lobakin@intel.com>
+Signed-off-by: Alice Michael <alice.michael@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 296f9d5f7408..92e0fe9316b9 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -5432,16 +5432,19 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
+
+ /* Add filter for new MAC. If filter exists, return success */
+ err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
+- if (err == -EEXIST)
++ if (err == -EEXIST) {
+ /* Although this MAC filter is already present in hardware it's
+ * possible in some cases (e.g. bonding) that dev_addr was
+ * modified outside of the driver and needs to be restored back
+ * to this value.
+ */
+ netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
+- else if (err)
++
++ return 0;
++ } else if (err) {
+ /* error if the new filter addition failed */
+ err = -EADDRNOTAVAIL;
++ }
+
+ err_update_filters:
+ if (err) {
+--
+2.35.1
+
--- /dev/null
+From f28fbc2e59ede1dfc21890018db84aa2bce9d47b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 11:35:47 -0700
+Subject: ice: Set txq_teid to ICE_INVAL_TEID on ring creation
+
+From: Anatolii Gerasymenko <anatolii.gerasymenko@intel.com>
+
+[ Upstream commit ccfee1822042b87e5135d33cad8ea353e64612d2 ]
+
+When VF is freshly created, but not brought up, ring->txq_teid
+value is by default set to 0.
+But 0 is a valid TEID. On some platforms the Root Node of
+Tx scheduler has a TEID = 0. This can cause issues as shown below.
+
+The proper way is to set ring->txq_teid to ICE_INVAL_TEID (0xFFFFFFFF).
+
+Testing Hints:
+echo 1 > /sys/class/net/ens785f0/device/sriov_numvfs
+ip link set dev ens785f0v0 up
+ip link set dev ens785f0v0 down
+
+If we have freshly created VF and quickly turn it on and off, so there
+would be no time to reach VIRTCHNL_OP_CONFIG_VSI_QUEUES stage, then
+VIRTCHNL_OP_DISABLE_QUEUES stage will fail with error:
+[ 639.531454] disable queue 89 failed 14
+[ 639.532233] Failed to disable LAN Tx queues, error: ICE_ERR_AQ_ERROR
+[ 639.533107] ice 0000:02:00.0: Failed to stop Tx ring 0 on VSI 5
+
+The reason for the fail is that we are trying to send AQ command to
+delete queue 89, which has never been created and receive an "invalid
+argument" error from firmware.
+
+As this queue has never been created, it's teid and ring->txq_teid
+have default value 0.
+ice_dis_vsi_txq has a check against non-existent queues:
+
+node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
+if (!node)
+ continue;
+
+But on some platforms the Root Node of Tx scheduler has a teid = 0.
+Hence, ice_sched_find_node_by_teid finds a node with teid = 0 (it is
+pi->root), and we go further to submit an erroneous request to firmware.
+
+Fixes: 37bb83901286 ("ice: Move common functions out of ice_main.c part 7/7")
+Signed-off-by: Anatolii Gerasymenko <anatolii.gerasymenko@intel.com>
+Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
+Signed-off-by: Alice Michael <alice.michael@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_lib.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 20d755822d43..5fd2bbeab2d1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -1452,6 +1452,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
+ ring->tx_tstamps = &pf->ptp.port.tx;
+ ring->dev = dev;
+ ring->count = vsi->num_tx_desc;
++ ring->txq_teid = ICE_INVAL_TEID;
+ WRITE_ONCE(vsi->tx_rings[i], ring);
+ }
+
+--
+2.35.1
+
--- /dev/null
+From a3b204beb61e7f93716237fc62f76d417c11166b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Mar 2022 19:36:27 +0100
+Subject: ice: synchronize_rcu() when terminating rings
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit f9124c68f05ffdb87a47e3ea6d5fae9dad7cb6eb ]
+
+Unfortunately, the ice driver doesn't respect the RCU critical section that
+XSK wakeup is surrounded with. To fix this, add synchronize_rcu() calls to
+paths that destroy resources that might be in use.
+
+This was addressed in other AF_XDP ZC enabled drivers, for reference see
+for example commit b3873a5be757 ("net/i40e: Fix concurrency issues
+between config flow and XSK")
+
+Fixes: efc2214b6047 ("ice: Add support for XDP")
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Tested-by: Shwetha Nagaraju <shwetha.nagaraju@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice.h | 2 +-
+ drivers/net/ethernet/intel/ice/ice_main.c | 4 +++-
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 4 +++-
+ 3 files changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 2f60230d332a..9c04a71a9fca 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -674,7 +674,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
+
+ static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
+ {
+- return !!vsi->xdp_prog;
++ return !!READ_ONCE(vsi->xdp_prog);
+ }
+
+ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 92e0fe9316b9..5229bce1a4ab 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2742,8 +2742,10 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+
+ ice_for_each_xdp_txq(vsi, i)
+ if (vsi->xdp_rings[i]) {
+- if (vsi->xdp_rings[i]->desc)
++ if (vsi->xdp_rings[i]->desc) {
++ synchronize_rcu();
+ ice_free_tx_ring(vsi->xdp_rings[i]);
++ }
+ kfree_rcu(vsi->xdp_rings[i], rcu);
+ vsi->xdp_rings[i] = NULL;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index feb874bde171..f95560c7387e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -41,8 +41,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+ static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+ {
+ ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+- if (ice_is_xdp_ena_vsi(vsi))
++ if (ice_is_xdp_ena_vsi(vsi)) {
++ synchronize_rcu();
+ ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
++ }
+ ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+ }
+
+--
+2.35.1
+
--- /dev/null
+From e46d5bb7f72b0c78c58520f932df910fe8dea601 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Mar 2022 19:36:28 +0100
+Subject: ice: xsk: fix VSI state check in ice_xsk_wakeup()
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit 72b915a2b444e9247c9d424a840e94263db07c27 ]
+
+ICE_DOWN is dedicated for pf->state. Check for ICE_VSI_DOWN being set on
+vsi->state in ice_xsk_wakeup().
+
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Tested-by: Shwetha Nagaraju <shwetha.nagaraju@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index f95560c7387e..30620b942fa0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -765,7 +765,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_tx_ring *ring;
+
+- if (test_bit(ICE_DOWN, vsi->state))
++ if (test_bit(ICE_VSI_DOWN, vsi->state))
+ return -ENETDOWN;
+
+ if (!ice_is_xdp_ena_vsi(vsi))
+--
+2.35.1
+
--- /dev/null
+From cb00112a6fd990e498ace80e296976e60f6acef3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Apr 2022 12:43:58 +0100
+Subject: io_uring: don't touch scm_fp_list after queueing skb
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit a07211e3001435fe8591b992464cd8d5e3c98c5a ]
+
+It's safer to not touch scm_fp_list after we queued an skb to which it
+was assigned, there might be races lurking if we screw subtle sync
+guarantees on the io_uring side.
+
+Fixes: 6b06314c47e14 ("io_uring: add file set registration")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index a3e82aececd9..0ee1d8903ffe 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -8237,8 +8237,12 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
+ refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+ skb_queue_head(&sk->sk_receive_queue, skb);
+
+- for (i = 0; i < nr_files; i++)
+- fput(fpl->fp[i]);
++ for (i = 0; i < nr; i++) {
++ struct file *file = io_file_from_index(ctx, i + offset);
++
++ if (file)
++ fput(file);
++ }
+ } else {
+ kfree_skb(skb);
+ free_uid(fpl->user);
+--
+2.35.1
+
--- /dev/null
+From 76ac6529d58a1d943cf5007b9149884a5c80375a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Apr 2022 12:43:57 +0100
+Subject: io_uring: nospec index for tags on files update
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 34bb77184123ae401100a4d156584f12fa630e5c ]
+
+Don't forget to array_index_nospec() for indexes before updating rsrc
+tags in __io_sqe_files_update(), just use already safe and precalculated
+index @i.
+
+Fixes: c3bdad0271834 ("io_uring: add generic rsrc update with tags")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 5e6788ab188f..a3e82aececd9 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -8700,7 +8700,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
+ err = -EBADF;
+ break;
+ }
+- *io_get_tag_slot(data, up->offset + done) = tag;
++ *io_get_tag_slot(data, i) = tag;
+ io_fixed_file_set(file_slot, file);
+ err = io_sqe_file_register(ctx, file, i);
+ if (err) {
+--
+2.35.1
+
--- /dev/null
+From fbd70e6fd57ecb89db6d9ff968c0a4c0911f4970 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Mar 2022 09:23:01 +0300
+Subject: iommu/omap: Fix regression in probe for NULL pointer dereference
+
+From: Tony Lindgren <tony@atomide.com>
+
+[ Upstream commit 71ff461c3f41f6465434b9e980c01782763e7ad8 ]
+
+Commit 3f6634d997db ("iommu: Use right way to retrieve iommu_ops") started
+triggering a NULL pointer dereference for some omap variants:
+
+__iommu_probe_device from probe_iommu_group+0x2c/0x38
+probe_iommu_group from bus_for_each_dev+0x74/0xbc
+bus_for_each_dev from bus_iommu_probe+0x34/0x2e8
+bus_iommu_probe from bus_set_iommu+0x80/0xc8
+bus_set_iommu from omap_iommu_init+0x88/0xcc
+omap_iommu_init from do_one_initcall+0x44/0x24
+
+This is caused by omap iommu probe returning 0 instead of ERR_PTR(-ENODEV)
+as noted by Jason Gunthorpe <jgg@ziepe.ca>.
+
+Looks like the regression already happened with an earlier commit
+6785eb9105e3 ("iommu/omap: Convert to probe/release_device() call-backs")
+that changed the function return type and missed converting one place.
+
+Cc: Drew Fustini <dfustini@baylibre.com>
+Cc: Lu Baolu <baolu.lu@linux.intel.com>
+Cc: Suman Anna <s-anna@ti.com>
+Suggested-by: Jason Gunthorpe <jgg@ziepe.ca>
+Fixes: 6785eb9105e3 ("iommu/omap: Convert to probe/release_device() call-backs")
+Fixes: 3f6634d997db ("iommu: Use right way to retrieve iommu_ops")
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Tested-by: Drew Fustini <dfustini@baylibre.com>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Link: https://lore.kernel.org/r/20220331062301.24269-1-tony@atomide.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/omap-iommu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
+index 980e4af3f06b..d2e82a1b56d8 100644
+--- a/drivers/iommu/omap-iommu.c
++++ b/drivers/iommu/omap-iommu.c
+@@ -1661,7 +1661,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
+ num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
+ sizeof(phandle));
+ if (num_iommus < 0)
+- return 0;
++ return ERR_PTR(-ENODEV);
+
+ arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
+ if (!arch_data)
+--
+2.35.1
+
--- /dev/null
+From 871b869e173f2730459c53e708394680ad4aad61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 09:09:08 -0600
+Subject: ipv6: Fix stats accounting in ip6_pkt_drop
+
+From: David Ahern <dsahern@kernel.org>
+
+[ Upstream commit 1158f79f82d437093aeed87d57df0548bdd68146 ]
+
+VRF devices are the loopbacks for VRFs, and a loopback can not be
+assigned to a VRF. Accordingly, the condition in ip6_pkt_drop should
+be '||' not '&&'.
+
+Fixes: 1d3fd8a10bed ("vrf: Use orig netdev to count Ip6InNoRoutes and a fresh route lookup when sending dest unreach")
+Reported-by: Pudak, Filip <Filip.Pudak@windriver.com>
+Reported-by: Xiao, Jiguang <Jiguang.Xiao@windriver.com>
+Signed-off-by: David Ahern <dsahern@kernel.org>
+Link: https://lore.kernel.org/r/20220404150908.2937-1-dsahern@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index ea1cf414a92e..da1bf48e7937 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -4495,7 +4495,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
+ struct inet6_dev *idev;
+ int type;
+
+- if (netif_is_l3_master(skb->dev) &&
++ if (netif_is_l3_master(skb->dev) ||
+ dst->dev == net->loopback_dev)
+ idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
+ else
+--
+2.35.1
+
--- /dev/null
+From 7c998c8c5b8cee34bbbbfb86eae803b5cf8c008d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Apr 2022 10:48:42 +0800
+Subject: mctp: Fix check for dev_hard_header() result
+
+From: Matt Johnston <matt@codeconstruct.com.au>
+
+[ Upstream commit 60be976ac45137657b7b505d7e0d44d0e51accb7 ]
+
+dev_hard_header() returns the length of the header, so
+we need to test for negative errors rather than non-zero.
+
+Fixes: 889b7da23abf ("mctp: Add initial routing framework")
+Signed-off-by: Matt Johnston <matt@codeconstruct.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mctp/route.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index 05fbd318eb98..d47438f5233d 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -507,7 +507,7 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
+
+ rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol),
+ daddr, skb->dev->dev_addr, skb->len);
+- if (rc) {
++ if (rc < 0) {
+ kfree_skb(skb);
+ return -EHOSTUNREACH;
+ }
+--
+2.35.1
+
--- /dev/null
+From 1e720116259a7fc19e598ec2e7933b8b997b15df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Apr 2022 10:48:44 +0800
+Subject: mctp: Use output netdev to allocate skb headroom
+
+From: Matt Johnston <matt@codeconstruct.com.au>
+
+[ Upstream commit 4a9dda1c1da65beee994f0977a56a9a21c5db2a7 ]
+
+Previously the skb was allocated with headroom MCTP_HEADER_MAXLEN,
+but that isn't sufficient if we are using devs that are not MCTP
+specific.
+
+This also adds a check that the smctp_halen provided to sendmsg for
+extended addressing is the correct size for the netdev.
+
+Fixes: 833ef3b91de6 ("mctp: Populate socket implementation")
+Reported-by: Matthew Rinaldi <mjrinal@g.clemson.edu>
+Signed-off-by: Matt Johnston <matt@codeconstruct.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/mctp.h | 2 --
+ net/mctp/af_mctp.c | 46 +++++++++++++++++++++++++++++++++-------------
+ net/mctp/route.c | 14 +++++++++++---
+ 3 files changed, 44 insertions(+), 18 deletions(-)
+
+diff --git a/include/net/mctp.h b/include/net/mctp.h
+index 7e35ec79b909..204ae3aebc0d 100644
+--- a/include/net/mctp.h
++++ b/include/net/mctp.h
+@@ -36,8 +36,6 @@ struct mctp_hdr {
+ #define MCTP_HDR_TAG_SHIFT 0
+ #define MCTP_HDR_TAG_MASK GENMASK(2, 0)
+
+-#define MCTP_HEADER_MAXLEN 4
+-
+ #define MCTP_INITIAL_DEFAULT_NET 1
+
+ static inline bool mctp_address_ok(mctp_eid_t eid)
+diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
+index c921de63b494..fc05351d3a82 100644
+--- a/net/mctp/af_mctp.c
++++ b/net/mctp/af_mctp.c
+@@ -90,13 +90,13 @@ static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
+ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ {
+ DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
+- const int hlen = MCTP_HEADER_MAXLEN + sizeof(struct mctp_hdr);
+ int rc, addrlen = msg->msg_namelen;
+ struct sock *sk = sock->sk;
+ struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
+ struct mctp_skb_cb *cb;
+ struct mctp_route *rt;
+- struct sk_buff *skb;
++ struct sk_buff *skb = NULL;
++ int hlen;
+
+ if (addr) {
+ if (addrlen < sizeof(struct sockaddr_mctp))
+@@ -119,6 +119,34 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ if (addr->smctp_network == MCTP_NET_ANY)
+ addr->smctp_network = mctp_default_net(sock_net(sk));
+
++ /* direct addressing */
++ if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
++ DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
++ extaddr, msg->msg_name);
++ struct net_device *dev;
++
++ rc = -EINVAL;
++ rcu_read_lock();
++ dev = dev_get_by_index_rcu(sock_net(sk), extaddr->smctp_ifindex);
++ /* check for correct halen */
++ if (dev && extaddr->smctp_halen == dev->addr_len) {
++ hlen = LL_RESERVED_SPACE(dev) + sizeof(struct mctp_hdr);
++ rc = 0;
++ }
++ rcu_read_unlock();
++ if (rc)
++ goto err_free;
++ rt = NULL;
++ } else {
++ rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
++ addr->smctp_addr.s_addr);
++ if (!rt) {
++ rc = -EHOSTUNREACH;
++ goto err_free;
++ }
++ hlen = LL_RESERVED_SPACE(rt->dev->dev) + sizeof(struct mctp_hdr);
++ }
++
+ skb = sock_alloc_send_skb(sk, hlen + 1 + len,
+ msg->msg_flags & MSG_DONTWAIT, &rc);
+ if (!skb)
+@@ -137,8 +165,8 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ cb = __mctp_cb(skb);
+ cb->net = addr->smctp_network;
+
+- /* direct addressing */
+- if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
++ if (!rt) {
++ /* fill extended address in cb */
+ DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
+ extaddr, msg->msg_name);
+
+@@ -149,17 +177,9 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ }
+
+ cb->ifindex = extaddr->smctp_ifindex;
++ /* smctp_halen is checked above */
+ cb->halen = extaddr->smctp_halen;
+ memcpy(cb->haddr, extaddr->smctp_haddr, cb->halen);
+-
+- rt = NULL;
+- } else {
+- rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
+- addr->smctp_addr.s_addr);
+- if (!rt) {
+- rc = -EHOSTUNREACH;
+- goto err_free;
+- }
+ }
+
+ rc = mctp_local_output(sk, rt, skb, addr->smctp_addr.s_addr,
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index d47438f5233d..1a296e211a50 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -498,6 +498,11 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
+
+ if (cb->ifindex) {
+ /* direct route; use the hwaddr we stashed in sendmsg */
++ if (cb->halen != skb->dev->addr_len) {
++ /* sanity check, sendmsg should have already caught this */
++ kfree_skb(skb);
++ return -EMSGSIZE;
++ }
+ daddr = cb->haddr;
+ } else {
+ /* If lookup fails let the device handle daddr==NULL */
+@@ -707,7 +712,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
+ {
+ const unsigned int hlen = sizeof(struct mctp_hdr);
+ struct mctp_hdr *hdr, *hdr2;
+- unsigned int pos, size;
++ unsigned int pos, size, headroom;
+ struct sk_buff *skb2;
+ int rc;
+ u8 seq;
+@@ -721,6 +726,9 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
+ return -EMSGSIZE;
+ }
+
++ /* keep same headroom as the original skb */
++ headroom = skb_headroom(skb);
++
+ /* we've got the header */
+ skb_pull(skb, hlen);
+
+@@ -728,7 +736,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
+ /* size of message payload */
+ size = min(mtu - hlen, skb->len - pos);
+
+- skb2 = alloc_skb(MCTP_HEADER_MAXLEN + hlen + size, GFP_KERNEL);
++ skb2 = alloc_skb(headroom + hlen + size, GFP_KERNEL);
+ if (!skb2) {
+ rc = -ENOMEM;
+ break;
+@@ -744,7 +752,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
+ skb_set_owner_w(skb2, skb->sk);
+
+ /* establish packet */
+- skb_reserve(skb2, MCTP_HEADER_MAXLEN);
++ skb_reserve(skb2, headroom);
+ skb_reset_network_header(skb2);
+ skb_put(skb2, hlen + size);
+ skb2->transport_header = skb2->network_header + hlen;
+--
+2.35.1
+
--- /dev/null
+From 68298f1eeef50d229f0efd2798d60b1c8d74b049 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Apr 2022 02:04:04 +0200
+Subject: net: ethernet: mv643xx: Fix over zealous checking
+ of_get_mac_address()
+
+From: Andrew Lunn <andrew@lunn.ch>
+
+[ Upstream commit 11f8e7c122ce013fa745029fa8c94c6db69c2e54 ]
+
+There is often not a MAC address available in an EEPROM accessible by
+Linux with Marvell devices. Instead the bootload has the MAC address
+and directly programs it into the hardware. So don't consider an error
+from of_get_mac_address() has fatal. However, the check was added for
+the case where there is a MAC address in an the EEPROM, but the EEPROM
+has not probed yet, and -EPROBE_DEFER is returned. In that case the
+error should be returned. So make the check specific to this error
+code.
+
+Cc: Mauri Sandberg <maukka@ext.kapsi.fi>
+Reported-by: Thomas Walther <walther-it@gmx.de>
+Fixes: 42404d8f1c01 ("net: mv643xx_eth: process retval from of_get_mac_address")
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/20220405000404.3374734-1-andrew@lunn.ch
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/mv643xx_eth.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
+index 143ca8be5eb5..4008596963be 100644
+--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
+@@ -2751,7 +2751,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
+ }
+
+ ret = of_get_mac_address(pnp, ppd.mac_addr);
+- if (ret)
++ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
+--
+2.35.1
+
--- /dev/null
+From 67c86aaea1bc5069ec43ceb5b964bdc39a286447 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Apr 2022 10:33:42 +0300
+Subject: net: ipv4: fix route with nexthop object delete warning
+
+From: Nikolay Aleksandrov <razor@blackwall.org>
+
+[ Upstream commit 6bf92d70e690b7ff12b24f4bfff5e5434d019b82 ]
+
+FRR folks have hit a kernel warning[1] while deleting routes[2] which is
+caused by trying to delete a route pointing to a nexthop id without
+specifying nhid but matching on an interface. That is, a route is found
+but we hit a warning while matching it. The warning is from
+fib_info_nh() in include/net/nexthop.h because we run it on a fib_info
+with nexthop object. The call chain is:
+ inet_rtm_delroute -> fib_table_delete -> fib_nh_match (called with a
+nexthop fib_info and also with fc_oif set thus calling fib_info_nh on
+the fib_info and triggering the warning). The fix is to not do any
+matching in that branch if the fi has a nexthop object because those are
+managed separately. I.e. we should match when deleting without nh spec and
+should fail when deleting a nexthop route with old-style nh spec because
+nexthop objects are managed separately, e.g.:
+ $ ip r show 1.2.3.4/32
+ 1.2.3.4 nhid 12 via 192.168.11.2 dev dummy0
+
+ $ ip r del 1.2.3.4/32
+ $ ip r del 1.2.3.4/32 nhid 12
+ <both should work>
+
+ $ ip r del 1.2.3.4/32 dev dummy0
+ <should fail with ESRCH>
+
+[1]
+ [ 523.462226] ------------[ cut here ]------------
+ [ 523.462230] WARNING: CPU: 14 PID: 22893 at include/net/nexthop.h:468 fib_nh_match+0x210/0x460
+ [ 523.462236] Modules linked in: dummy rpcsec_gss_krb5 xt_socket nf_socket_ipv4 nf_socket_ipv6 ip6table_raw iptable_raw bpf_preload xt_statistic ip_set ip_vs_sh ip_vs_wrr ip_vs_rr ip_vs xt_mark nf_tables xt_nat veth nf_conntrack_netlink nfnetlink xt_addrtype br_netfilter overlay dm_crypt nfsv3 nfs fscache netfs vhost_net vhost vhost_iotlb tap tun xt_CHECKSUM xt_MASQUERADE xt_conntrack 8021q garp mrp ipt_REJECT nf_reject_ipv4 ip6table_mangle ip6table_nat iptable_mangle iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 iptable_filter bridge stp llc rfcomm snd_seq_dummy snd_hrtimer rpcrdma rdma_cm iw_cm ib_cm ib_core ip6table_filter xt_comment ip6_tables vboxnetadp(OE) vboxnetflt(OE) vboxdrv(OE) qrtr bnep binfmt_misc xfs vfat fat squashfs loop nvidia_drm(POE) nvidia_modeset(POE) nvidia_uvm(POE) nvidia(POE) intel_rapl_msr intel_rapl_common snd_hda_codec_realtek snd_hda_codec_generic ledtrig_audio snd_hda_codec_hdmi btusb btrtl iwlmvm uvcvideo btbcm snd_hda_intel edac_mce_amd
+ [ 523.462274] videobuf2_vmalloc videobuf2_memops btintel snd_intel_dspcfg videobuf2_v4l2 snd_intel_sdw_acpi bluetooth snd_usb_audio snd_hda_codec mac80211 snd_usbmidi_lib joydev snd_hda_core videobuf2_common kvm_amd snd_rawmidi snd_hwdep snd_seq videodev ccp snd_seq_device libarc4 ecdh_generic mc snd_pcm kvm iwlwifi snd_timer drm_kms_helper snd cfg80211 cec soundcore irqbypass rapl wmi_bmof i2c_piix4 rfkill k10temp pcspkr acpi_cpufreq nfsd auth_rpcgss nfs_acl lockd grace sunrpc drm zram ip_tables crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel nvme sp5100_tco r8169 nvme_core wmi ipmi_devintf ipmi_msghandler fuse
+ [ 523.462300] CPU: 14 PID: 22893 Comm: ip Tainted: P OE 5.16.18-200.fc35.x86_64 #1
+ [ 523.462302] Hardware name: Micro-Star International Co., Ltd. MS-7C37/MPG X570 GAMING EDGE WIFI (MS-7C37), BIOS 1.C0 10/29/2020
+ [ 523.462303] RIP: 0010:fib_nh_match+0x210/0x460
+ [ 523.462304] Code: 7c 24 20 48 8b b5 90 00 00 00 e8 bb ee f4 ff 48 8b 7c 24 20 41 89 c4 e8 ee eb f4 ff 45 85 e4 0f 85 2e fe ff ff e9 4c ff ff ff <0f> 0b e9 17 ff ff ff 3c 0a 0f 85 61 fe ff ff 48 8b b5 98 00 00 00
+ [ 523.462306] RSP: 0018:ffffaa53d4d87928 EFLAGS: 00010286
+ [ 523.462307] RAX: 0000000000000000 RBX: ffffaa53d4d87a90 RCX: ffffaa53d4d87bb0
+ [ 523.462308] RDX: ffff9e3d2ee6be80 RSI: ffffaa53d4d87a90 RDI: ffffffff920ed380
+ [ 523.462309] RBP: ffff9e3d2ee6be80 R08: 0000000000000064 R09: 0000000000000000
+ [ 523.462310] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000031
+ [ 523.462310] R13: 0000000000000020 R14: 0000000000000000 R15: ffff9e3d331054e0
+ [ 523.462311] FS: 00007f245517c1c0(0000) GS:ffff9e492ed80000(0000) knlGS:0000000000000000
+ [ 523.462313] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [ 523.462313] CR2: 000055e5dfdd8268 CR3: 00000003ef488000 CR4: 0000000000350ee0
+ [ 523.462315] Call Trace:
+ [ 523.462316] <TASK>
+ [ 523.462320] fib_table_delete+0x1a9/0x310
+ [ 523.462323] inet_rtm_delroute+0x93/0x110
+ [ 523.462325] rtnetlink_rcv_msg+0x133/0x370
+ [ 523.462327] ? _copy_to_iter+0xb5/0x6f0
+ [ 523.462330] ? rtnl_calcit.isra.0+0x110/0x110
+ [ 523.462331] netlink_rcv_skb+0x50/0xf0
+ [ 523.462334] netlink_unicast+0x211/0x330
+ [ 523.462336] netlink_sendmsg+0x23f/0x480
+ [ 523.462338] sock_sendmsg+0x5e/0x60
+ [ 523.462340] ____sys_sendmsg+0x22c/0x270
+ [ 523.462341] ? import_iovec+0x17/0x20
+ [ 523.462343] ? sendmsg_copy_msghdr+0x59/0x90
+ [ 523.462344] ? __mod_lruvec_page_state+0x85/0x110
+ [ 523.462348] ___sys_sendmsg+0x81/0xc0
+ [ 523.462350] ? netlink_seq_start+0x70/0x70
+ [ 523.462352] ? __dentry_kill+0x13a/0x180
+ [ 523.462354] ? __fput+0xff/0x250
+ [ 523.462356] __sys_sendmsg+0x49/0x80
+ [ 523.462358] do_syscall_64+0x3b/0x90
+ [ 523.462361] entry_SYSCALL_64_after_hwframe+0x44/0xae
+ [ 523.462364] RIP: 0033:0x7f24552aa337
+ [ 523.462365] Code: 0e 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b9 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 89 54 24 1c 48 89 74 24 10
+ [ 523.462366] RSP: 002b:00007fff7f05a838 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+ [ 523.462368] RAX: ffffffffffffffda RBX: 000000006245bf91 RCX: 00007f24552aa337
+ [ 523.462368] RDX: 0000000000000000 RSI: 00007fff7f05a8a0 RDI: 0000000000000003
+ [ 523.462369] RBP: 0000000000000000 R08: 0000000000000001 R09: 0000000000000000
+ [ 523.462370] R10: 0000000000000008 R11: 0000000000000246 R12: 0000000000000001
+ [ 523.462370] R13: 00007fff7f05ce08 R14: 0000000000000000 R15: 000055e5dfdd1040
+ [ 523.462373] </TASK>
+ [ 523.462374] ---[ end trace ba537bc16f6bf4ed ]---
+
+[2] https://github.com/FRRouting/frr/issues/6412
+
+Fixes: 4c7e8084fd46 ("ipv4: Plumb support for nexthop object in a fib_info")
+Signed-off-by: Nikolay Aleksandrov <razor@blackwall.org>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/fib_semantics.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 2dd375f7407b..0a0f49770345 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -888,8 +888,13 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
+ }
+
+ if (cfg->fc_oif || cfg->fc_gw_family) {
+- struct fib_nh *nh = fib_info_nh(fi, 0);
++ struct fib_nh *nh;
++
++ /* cannot match on nexthop object attributes */
++ if (fi->nh)
++ return 1;
+
++ nh = fib_info_nh(fi, 0);
+ if (cfg->fc_encap) {
+ if (fib_encap_match(net, cfg->fc_encap_type,
+ cfg->fc_encap, nh, cfg, extack))
+--
+2.35.1
+
--- /dev/null
+From 5399b25d639ea8936c5c02de2dc9a71debea53b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 12:41:50 +0200
+Subject: net: openvswitch: don't send internal clone attribute to the
+ userspace.
+
+From: Ilya Maximets <i.maximets@ovn.org>
+
+[ Upstream commit 3f2a3050b4a3e7f32fc0ea3c9b0183090ae00522 ]
+
+'OVS_CLONE_ATTR_EXEC' is an internal attribute that is used for
+performance optimization inside the kernel. It's added by the kernel
+while parsing user-provided actions and should not be sent during the
+flow dump as it's not part of the uAPI.
+
+The issue doesn't cause any significant problems to the ovs-vswitchd
+process, because reported actions are not really used in the
+application lifecycle and only supposed to be shown to a human via
+ovs-dpctl flow dump. However, the action list is still incorrect
+and causes the following error if the user wants to look at the
+datapath flows:
+
+ # ovs-dpctl add-dp system@ovs-system
+ # ovs-dpctl add-flow "<flow match>" "clone(ct(commit),0)"
+ # ovs-dpctl dump-flows
+ <flow match>, packets:0, bytes:0, used:never,
+ actions:clone(bad length 4, expected -1 for: action0(01 00 00 00),
+ ct(commit),0)
+
+With the fix:
+
+ # ovs-dpctl dump-flows
+ <flow match>, packets:0, bytes:0, used:never,
+ actions:clone(ct(commit),0)
+
+Additionally fixed an incorrect attribute name in the comment.
+
+Fixes: b233504033db ("openvswitch: kernel datapath clone action")
+Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
+Acked-by: Aaron Conole <aconole@redhat.com>
+Link: https://lore.kernel.org/r/20220404104150.2865736-1-i.maximets@ovn.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/openvswitch/actions.c | 2 +-
+ net/openvswitch/flow_netlink.c | 4 +++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 780d9e2246f3..8955f31fa47e 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -1051,7 +1051,7 @@ static int clone(struct datapath *dp, struct sk_buff *skb,
+ int rem = nla_len(attr);
+ bool dont_clone_flow_key;
+
+- /* The first action is always 'OVS_CLONE_ATTR_ARG'. */
++ /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
+ clone_arg = nla_data(attr);
+ dont_clone_flow_key = nla_get_u32(clone_arg);
+ actions = nla_next(clone_arg, &rem);
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 0d677c9c2c80..2679007f8aeb 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -3429,7 +3429,9 @@ static int clone_action_to_attr(const struct nlattr *attr,
+ if (!start)
+ return -EMSGSIZE;
+
+- err = ovs_nla_put_actions(nla_data(attr), rem, skb);
++ /* Skipping the OVS_CLONE_ATTR_EXEC that is always the first attribute. */
++ attr = nla_next(nla_data(attr), &rem);
++ err = ovs_nla_put_actions(attr, rem, skb);
+
+ if (err)
+ nla_nest_cancel(skb, start);
+--
+2.35.1
+
--- /dev/null
+From c2f210a8c8528efaba7ea6dfcddc8ca2416a2054 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 17:43:45 +0200
+Subject: net: openvswitch: fix leak of nested actions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ilya Maximets <i.maximets@ovn.org>
+
+[ Upstream commit 1f30fb9166d4f15a1aa19449b9da871fe0ed4796 ]
+
+While parsing user-provided actions, openvswitch module may dynamically
+allocate memory and store pointers in the internal copy of the actions.
+So this memory has to be freed while destroying the actions.
+
+Currently there are only two such actions: ct() and set(). However,
+there are many actions that can hold nested lists of actions and
+ovs_nla_free_flow_actions() just jumps over them leaking the memory.
+
+For example, removal of the flow with the following actions will lead
+to a leak of the memory allocated by nf_ct_tmpl_alloc():
+
+ actions:clone(ct(commit),0)
+
+Non-freed set() action may also leak the 'dst' structure for the
+tunnel info including device references.
+
+Under certain conditions with a high rate of flow rotation that may
+cause significant memory leak problem (2MB per second in reporter's
+case). The problem is also hard to mitigate, because the user doesn't
+have direct control over the datapath flows generated by OVS.
+
+Fix that by iterating over all the nested actions and freeing
+everything that needs to be freed recursively.
+
+New build time assertion should protect us from this problem if new
+actions will be added in the future.
+
+Unfortunately, openvswitch module doesn't use NLA_F_NESTED, so all
+attributes has to be explicitly checked. sample() and clone() actions
+are mixing extra attributes into the user-provided action list. That
+prevents some code generalization too.
+
+Fixes: 34ae932a4036 ("openvswitch: Make tunnel set action attach a metadata dst")
+Link: https://mail.openvswitch.org/pipermail/ovs-dev/2022-March/392922.html
+Reported-by: Stéphane Graber <stgraber@ubuntu.com>
+Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
+Acked-by: Aaron Conole <aconole@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/openvswitch/flow_netlink.c | 95 ++++++++++++++++++++++++++++++++--
+ 1 file changed, 90 insertions(+), 5 deletions(-)
+
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 2679007f8aeb..c591b923016a 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -2288,6 +2288,62 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size)
+ return sfa;
+ }
+
++static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len);
++
++static void ovs_nla_free_check_pkt_len_action(const struct nlattr *action)
++{
++ const struct nlattr *a;
++ int rem;
++
++ nla_for_each_nested(a, action, rem) {
++ switch (nla_type(a)) {
++ case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL:
++ case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER:
++ ovs_nla_free_nested_actions(nla_data(a), nla_len(a));
++ break;
++ }
++ }
++}
++
++static void ovs_nla_free_clone_action(const struct nlattr *action)
++{
++ const struct nlattr *a = nla_data(action);
++ int rem = nla_len(action);
++
++ switch (nla_type(a)) {
++ case OVS_CLONE_ATTR_EXEC:
++ /* The real list of actions follows this attribute. */
++ a = nla_next(a, &rem);
++ ovs_nla_free_nested_actions(a, rem);
++ break;
++ }
++}
++
++static void ovs_nla_free_dec_ttl_action(const struct nlattr *action)
++{
++ const struct nlattr *a = nla_data(action);
++
++ switch (nla_type(a)) {
++ case OVS_DEC_TTL_ATTR_ACTION:
++ ovs_nla_free_nested_actions(nla_data(a), nla_len(a));
++ break;
++ }
++}
++
++static void ovs_nla_free_sample_action(const struct nlattr *action)
++{
++ const struct nlattr *a = nla_data(action);
++ int rem = nla_len(action);
++
++ switch (nla_type(a)) {
++ case OVS_SAMPLE_ATTR_ARG:
++ /* The real list of actions follows this attribute. */
++ a = nla_next(a, &rem);
++ ovs_nla_free_nested_actions(a, rem);
++ break;
++ }
++}
++
+ static void ovs_nla_free_set_action(const struct nlattr *a)
+ {
+ const struct nlattr *ovs_key = nla_data(a);
+@@ -2301,25 +2357,54 @@ static void ovs_nla_free_set_action(const struct nlattr *a)
+ }
+ }
+
+-void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
++static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len)
+ {
+ const struct nlattr *a;
+ int rem;
+
+- if (!sf_acts)
++ /* Whenever new actions are added, the need to update this
++ * function should be considered.
++ */
++ BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 23);
++
++ if (!actions)
+ return;
+
+- nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
++ nla_for_each_attr(a, actions, len, rem) {
+ switch (nla_type(a)) {
+- case OVS_ACTION_ATTR_SET:
+- ovs_nla_free_set_action(a);
++ case OVS_ACTION_ATTR_CHECK_PKT_LEN:
++ ovs_nla_free_check_pkt_len_action(a);
++ break;
++
++ case OVS_ACTION_ATTR_CLONE:
++ ovs_nla_free_clone_action(a);
+ break;
++
+ case OVS_ACTION_ATTR_CT:
+ ovs_ct_free_action(a);
+ break;
++
++ case OVS_ACTION_ATTR_DEC_TTL:
++ ovs_nla_free_dec_ttl_action(a);
++ break;
++
++ case OVS_ACTION_ATTR_SAMPLE:
++ ovs_nla_free_sample_action(a);
++ break;
++
++ case OVS_ACTION_ATTR_SET:
++ ovs_nla_free_set_action(a);
++ break;
+ }
+ }
++}
++
++void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
++{
++ if (!sf_acts)
++ return;
+
++ ovs_nla_free_nested_actions(sf_acts->actions, sf_acts->actions_len);
+ kfree(sf_acts);
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 990b29e01adb2954265e7d5fcd1212079679d8e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Apr 2022 14:02:33 +0200
+Subject: net: phy: mscc-miim: reject clause 45 register accesses
+
+From: Michael Walle <michael@walle.cc>
+
+[ Upstream commit 8d90991e5bf7fdb9f264f5f579d18969913054b7 ]
+
+The driver doesn't support clause 45 register access yet, but doesn't
+check if the access is a c45 one either. This leads to spurious register
+reads and writes. Add the check.
+
+Fixes: 542671fe4d86 ("net: phy: mscc-miim: Add MDIO driver")
+Signed-off-by: Michael Walle <michael@walle.cc>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/mdio/mdio-mscc-miim.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
+index 64fb76c1e395..08381038810d 100644
+--- a/drivers/net/mdio/mdio-mscc-miim.c
++++ b/drivers/net/mdio/mdio-mscc-miim.c
+@@ -93,6 +93,9 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
+ u32 val;
+ int ret;
+
++ if (regnum & MII_ADDR_C45)
++ return -EOPNOTSUPP;
++
+ ret = mscc_miim_wait_pending(bus);
+ if (ret)
+ goto out;
+@@ -136,6 +139,9 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id,
+ struct mscc_miim_dev *miim = bus->priv;
+ int ret;
+
++ if (regnum & MII_ADDR_C45)
++ return -EOPNOTSUPP;
++
+ ret = mscc_miim_wait_pending(bus);
+ if (ret < 0)
+ goto out;
+--
+2.35.1
+
--- /dev/null
+From ea9fdc935ef3459b4daa4a15504b069d8a18d017 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Mar 2022 16:37:03 +0000
+Subject: net: sfc: add missing xdp queue reinitialization
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit 059a47f1da93811d37533556d67e72f2261b1127 ]
+
+After rx/tx ring buffer size is changed, kernel panic occurs when
+it acts XDP_TX or XDP_REDIRECT.
+
+When tx/rx ring buffer size is changed(ethtool -G), sfc driver
+reallocates and reinitializes rx and tx queues and their buffer
+(tx_queue->buffer).
+But it misses reinitializing xdp queues(efx->xdp_tx_queues).
+So, while it is acting XDP_TX or XDP_REDIRECT, it uses the uninitialized
+tx_queue->buffer.
+
+A new function efx_set_xdp_channels() is separated from efx_set_channels()
+to handle only xdp queues.
+
+Splat looks like:
+ BUG: kernel NULL pointer dereference, address: 000000000000002a
+ #PF: supervisor write access in kernel mode
+ #PF: error_code(0x0002) - not-present page
+ PGD 0 P4D 0
+ Oops: 0002 [#4] PREEMPT SMP NOPTI
+ RIP: 0010:efx_tx_map_chunk+0x54/0x90 [sfc]
+ CPU: 2 PID: 0 Comm: swapper/2 Tainted: G D 5.17.0+ #55 e8beeee8289528f11357029357cf
+ Code: 48 8b 8d a8 01 00 00 48 8d 14 52 4c 8d 2c d0 44 89 e0 48 85 c9 74 0e 44 89 e2 4c 89 f6 48 80
+ RSP: 0018:ffff92f121e45c60 EFLAGS: 00010297
+ RIP: 0010:efx_tx_map_chunk+0x54/0x90 [sfc]
+ RAX: 0000000000000040 RBX: ffff92ea506895c0 RCX: ffffffffc0330870
+ RDX: 0000000000000001 RSI: 00000001139b10ce RDI: ffff92ea506895c0
+ RBP: ffffffffc0358a80 R08: 00000001139b110d R09: 0000000000000000
+ R10: 0000000000000001 R11: ffff92ea414c0088 R12: 0000000000000040
+ R13: 0000000000000018 R14: 00000001139b10ce R15: ffff92ea506895c0
+ FS: 0000000000000000(0000) GS:ffff92f121ec0000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ Code: 48 8b 8d a8 01 00 00 48 8d 14 52 4c 8d 2c d0 44 89 e0 48 85 c9 74 0e 44 89 e2 4c 89 f6 48 80
+ CR2: 000000000000002a CR3: 00000003e6810004 CR4: 00000000007706e0
+ RSP: 0018:ffff92f121e85c60 EFLAGS: 00010297
+ PKRU: 55555554
+ RAX: 0000000000000040 RBX: ffff92ea50689700 RCX: ffffffffc0330870
+ RDX: 0000000000000001 RSI: 00000001145a90ce RDI: ffff92ea50689700
+ RBP: ffffffffc0358a80 R08: 00000001145a910d R09: 0000000000000000
+ R10: 0000000000000001 R11: ffff92ea414c0088 R12: 0000000000000040
+ R13: 0000000000000018 R14: 00000001145a90ce R15: ffff92ea50689700
+ FS: 0000000000000000(0000) GS:ffff92f121e80000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 000000000000002a CR3: 00000003e6810005 CR4: 00000000007706e0
+ PKRU: 55555554
+ Call Trace:
+ <IRQ>
+ efx_xdp_tx_buffers+0x12b/0x3d0 [sfc 84c94b8e32d44d296c17e10a634d3ad454de4ba5]
+ __efx_rx_packet+0x5c3/0x930 [sfc 84c94b8e32d44d296c17e10a634d3ad454de4ba5]
+ efx_rx_packet+0x28c/0x2e0 [sfc 84c94b8e32d44d296c17e10a634d3ad454de4ba5]
+ efx_ef10_ev_process+0x5f8/0xf40 [sfc 84c94b8e32d44d296c17e10a634d3ad454de4ba5]
+ ? enqueue_task_fair+0x95/0x550
+ efx_poll+0xc4/0x360 [sfc 84c94b8e32d44d296c17e10a634d3ad454de4ba5]
+
+Fixes: 3990a8fffbda ("sfc: allocate channels for XDP tx queues")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/sfc/efx_channels.c | 146 +++++++++++++-----------
+ 1 file changed, 81 insertions(+), 65 deletions(-)
+
+diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
+index ead550ae2709..5e587cb853b9 100644
+--- a/drivers/net/ethernet/sfc/efx_channels.c
++++ b/drivers/net/ethernet/sfc/efx_channels.c
+@@ -764,6 +764,85 @@ void efx_remove_channels(struct efx_nic *efx)
+ kfree(efx->xdp_tx_queues);
+ }
+
++static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
++ struct efx_tx_queue *tx_queue)
++{
++ if (xdp_queue_number >= efx->xdp_tx_queue_count)
++ return -EINVAL;
++
++ netif_dbg(efx, drv, efx->net_dev,
++ "Channel %u TXQ %u is XDP %u, HW %u\n",
++ tx_queue->channel->channel, tx_queue->label,
++ xdp_queue_number, tx_queue->queue);
++ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
++ return 0;
++}
++
++static void efx_set_xdp_channels(struct efx_nic *efx)
++{
++ struct efx_tx_queue *tx_queue;
++ struct efx_channel *channel;
++ unsigned int next_queue = 0;
++ int xdp_queue_number = 0;
++ int rc;
++
++ /* We need to mark which channels really have RX and TX
++ * queues, and adjust the TX queue numbers if we have separate
++ * RX-only and TX-only channels.
++ */
++ efx_for_each_channel(channel, efx) {
++ if (channel->channel < efx->tx_channel_offset)
++ continue;
++
++ if (efx_channel_is_xdp_tx(channel)) {
++ efx_for_each_channel_tx_queue(tx_queue, channel) {
++ tx_queue->queue = next_queue++;
++ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
++ tx_queue);
++ if (rc == 0)
++ xdp_queue_number++;
++ }
++ } else {
++ efx_for_each_channel_tx_queue(tx_queue, channel) {
++ tx_queue->queue = next_queue++;
++ netif_dbg(efx, drv, efx->net_dev,
++ "Channel %u TXQ %u is HW %u\n",
++ channel->channel, tx_queue->label,
++ tx_queue->queue);
++ }
++
++ /* If XDP is borrowing queues from net stack, it must
++ * use the queue with no csum offload, which is the
++ * first one of the channel
++ * (note: tx_queue_by_type is not initialized yet)
++ */
++ if (efx->xdp_txq_queues_mode ==
++ EFX_XDP_TX_QUEUES_BORROWED) {
++ tx_queue = &channel->tx_queue[0];
++ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
++ tx_queue);
++ if (rc == 0)
++ xdp_queue_number++;
++ }
++ }
++ }
++ WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
++ xdp_queue_number != efx->xdp_tx_queue_count);
++ WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
++ xdp_queue_number > efx->xdp_tx_queue_count);
++
++ /* If we have more CPUs than assigned XDP TX queues, assign the already
++ * existing queues to the exceeding CPUs
++ */
++ next_queue = 0;
++ while (xdp_queue_number < efx->xdp_tx_queue_count) {
++ tx_queue = efx->xdp_tx_queues[next_queue++];
++ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
++ if (rc == 0)
++ xdp_queue_number++;
++ }
++}
++
+ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+ {
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+@@ -835,6 +914,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+ efx_init_napi_channel(efx->channel[i]);
+ }
+
++ efx_set_xdp_channels(efx);
+ out:
+ /* Destroy unused channel structures */
+ for (i = 0; i < efx->n_channels; i++) {
+@@ -867,26 +947,9 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+ goto out;
+ }
+
+-static inline int
+-efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+- struct efx_tx_queue *tx_queue)
+-{
+- if (xdp_queue_number >= efx->xdp_tx_queue_count)
+- return -EINVAL;
+-
+- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+- tx_queue->channel->channel, tx_queue->label,
+- xdp_queue_number, tx_queue->queue);
+- efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+- return 0;
+-}
+-
+ int efx_set_channels(struct efx_nic *efx)
+ {
+- struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+- unsigned int next_queue = 0;
+- int xdp_queue_number;
+ int rc;
+
+ efx->tx_channel_offset =
+@@ -904,61 +967,14 @@ int efx_set_channels(struct efx_nic *efx)
+ return -ENOMEM;
+ }
+
+- /* We need to mark which channels really have RX and TX
+- * queues, and adjust the TX queue numbers if we have separate
+- * RX-only and TX-only channels.
+- */
+- xdp_queue_number = 0;
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel < efx->n_rx_channels)
+ channel->rx_queue.core_index = channel->channel;
+ else
+ channel->rx_queue.core_index = -1;
+-
+- if (channel->channel >= efx->tx_channel_offset) {
+- if (efx_channel_is_xdp_tx(channel)) {
+- efx_for_each_channel_tx_queue(tx_queue, channel) {
+- tx_queue->queue = next_queue++;
+- rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+- if (rc == 0)
+- xdp_queue_number++;
+- }
+- } else {
+- efx_for_each_channel_tx_queue(tx_queue, channel) {
+- tx_queue->queue = next_queue++;
+- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
+- channel->channel, tx_queue->label,
+- tx_queue->queue);
+- }
+-
+- /* If XDP is borrowing queues from net stack, it must use the queue
+- * with no csum offload, which is the first one of the channel
+- * (note: channel->tx_queue_by_type is not initialized yet)
+- */
+- if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+- tx_queue = &channel->tx_queue[0];
+- rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+- if (rc == 0)
+- xdp_queue_number++;
+- }
+- }
+- }
+ }
+- WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+- xdp_queue_number != efx->xdp_tx_queue_count);
+- WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+- xdp_queue_number > efx->xdp_tx_queue_count);
+
+- /* If we have more CPUs than assigned XDP TX queues, assign the already
+- * existing queues to the exceeding CPUs
+- */
+- next_queue = 0;
+- while (xdp_queue_number < efx->xdp_tx_queue_count) {
+- tx_queue = efx->xdp_tx_queues[next_queue++];
+- rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+- if (rc == 0)
+- xdp_queue_number++;
+- }
++ efx_set_xdp_channels(efx);
+
+ rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
+ if (rc)
+--
+2.35.1
+
--- /dev/null
+From d6f412a01b38b94cfb15e1773bd70279ddcf283d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Apr 2022 08:45:44 +0000
+Subject: net: sfc: fix using uninitialized xdp tx_queue
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit fb5833d81e4333294add35d3ac7f7f52a7bf107f ]
+
+In some cases, xdp tx_queue can get used before initialization.
+1. interface up/down
+2. ring buffer size change
+
+When CPU cores are lower than maximum number of channels of sfc driver,
+it creates new channels only for XDP.
+
+When an interface is up or ring buffer size is changed, all channels
+are initialized.
+But xdp channels are always initialized later.
+So, the below scenario is possible.
+Packets are received to rx queue of normal channels and it is acted
+XDP_TX and tx_queue of xdp channels get used.
+But these tx_queues are not initialized yet.
+If so, TX DMA or queue error occurs.
+
+In order to avoid this problem.
+1. initializes xdp tx_queues earlier than other rx_queue in
+efx_start_channels().
+2. checks whether tx_queue is initialized or not in efx_xdp_tx_buffers().
+
+Splat looks like:
+ sfc 0000:08:00.1 enp8s0f1np1: TX queue 10 spurious TX completion id 250
+ sfc 0000:08:00.1 enp8s0f1np1: resetting (RECOVER_OR_ALL)
+ sfc 0000:08:00.1 enp8s0f1np1: MC command 0x80 inlen 100 failed rc=-22
+ (raw=22) arg=789
+ sfc 0000:08:00.1 enp8s0f1np1: has been disabled
+
+Fixes: f28100cb9c96 ("sfc: fix lack of XDP TX queues - error XDP TX failed (-22)")
+Acked-by: Martin Habets <habetsm.xilinx@gmail.com>
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/sfc/efx_channels.c | 2 +-
+ drivers/net/ethernet/sfc/tx.c | 3 +++
+ drivers/net/ethernet/sfc/tx_common.c | 2 ++
+ 3 files changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
+index 5e587cb853b9..40bfd0ad7d05 100644
+--- a/drivers/net/ethernet/sfc/efx_channels.c
++++ b/drivers/net/ethernet/sfc/efx_channels.c
+@@ -1118,7 +1118,7 @@ void efx_start_channels(struct efx_nic *efx)
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+- efx_for_each_channel(channel, efx) {
++ efx_for_each_channel_rev(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_init_tx_queue(tx_queue);
+ atomic_inc(&efx->active_queues);
+diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
+index d16e031e95f4..6983799e1c05 100644
+--- a/drivers/net/ethernet/sfc/tx.c
++++ b/drivers/net/ethernet/sfc/tx.c
+@@ -443,6 +443,9 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+ if (unlikely(!tx_queue))
+ return -EINVAL;
+
++ if (!tx_queue->initialised)
++ return -EINVAL;
++
+ if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+ HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
+
+diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
+index d530cde2b864..9bc8281b7f5b 100644
+--- a/drivers/net/ethernet/sfc/tx_common.c
++++ b/drivers/net/ethernet/sfc/tx_common.c
+@@ -101,6 +101,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
++ tx_queue->initialised = false;
++
+ if (!tx_queue->buffer)
+ return;
+
+--
+2.35.1
+
--- /dev/null
+From 0d65ad0ee0efbbfeafd5a091e3d64a855869e686 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Apr 2022 02:48:32 +0800
+Subject: net: stmmac: Fix unset max_speed difference between DT and non-DT
+ platforms
+
+From: Chen-Yu Tsai <wens@csie.org>
+
+[ Upstream commit c21cabb0fd0b54b8b54235fc1ecfe1195a23bcb2 ]
+
+In commit 9cbadf094d9d ("net: stmmac: support max-speed device tree
+property"), when DT platforms don't set "max-speed", max_speed is set to
+-1; for non-DT platforms, it stays the default 0.
+
+Prior to commit eeef2f6b9f6e ("net: stmmac: Start adding phylink support"),
+the check for a valid max_speed setting was to check if it was greater
+than zero. This commit got it right, but subsequent patches just checked
+for non-zero, which is incorrect for DT platforms.
+
+In commit 92c3807b9ac3 ("net: stmmac: convert to phylink_get_linkmodes()")
+the conversion switched completely to checking for non-zero value as a
+valid value, which caused 1000base-T to stop getting advertised by
+default.
+
+Instead of trying to fix all the checks, simply leave max_speed alone if
+DT property parsing fails.
+
+Fixes: 9cbadf094d9d ("net: stmmac: support max-speed device tree property")
+Fixes: 92c3807b9ac3 ("net: stmmac: convert to phylink_get_linkmodes()")
+Signed-off-by: Chen-Yu Tsai <wens@csie.org>
+Acked-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20220331184832.16316-1-wens@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index 5d29f336315b..11e1055e8260 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -431,8 +431,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ plat->phylink_node = np;
+
+ /* Get max speed of operation from device tree */
+- if (of_property_read_u32(np, "max-speed", &plat->max_speed))
+- plat->max_speed = -1;
++ of_property_read_u32(np, "max-speed", &plat->max_speed);
+
+ plat->bus_id = of_alias_get_id(np, "ethernet");
+ if (plat->bus_id < 0)
+--
+2.35.1
+
--- /dev/null
+From 28743bb9f711f1c5541bcc6894bc3fe4176afeda Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Mar 2022 15:04:28 +0800
+Subject: net/tls: fix slab-out-of-bounds bug in decrypt_internal
+
+From: Ziyang Xuan <william.xuanziyang@huawei.com>
+
+[ Upstream commit 9381fe8c849cfbe50245ac01fc077554f6eaa0e2 ]
+
+The memory size of tls_ctx->rx.iv for AES128-CCM is 12 setting in
+tls_set_sw_offload(). The return value of crypto_aead_ivsize()
+for "ccm(aes)" is 16. So memcpy() require 16 bytes from 12 bytes
+memory space will trigger slab-out-of-bounds bug as following:
+
+==================================================================
+BUG: KASAN: slab-out-of-bounds in decrypt_internal+0x385/0xc40 [tls]
+Read of size 16 at addr ffff888114e84e60 by task tls/10911
+
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x34/0x44
+ print_report.cold+0x5e/0x5db
+ ? decrypt_internal+0x385/0xc40 [tls]
+ kasan_report+0xab/0x120
+ ? decrypt_internal+0x385/0xc40 [tls]
+ kasan_check_range+0xf9/0x1e0
+ memcpy+0x20/0x60
+ decrypt_internal+0x385/0xc40 [tls]
+ ? tls_get_rec+0x2e0/0x2e0 [tls]
+ ? process_rx_list+0x1a5/0x420 [tls]
+ ? tls_setup_from_iter.constprop.0+0x2e0/0x2e0 [tls]
+ decrypt_skb_update+0x9d/0x400 [tls]
+ tls_sw_recvmsg+0x3c8/0xb50 [tls]
+
+Allocated by task 10911:
+ kasan_save_stack+0x1e/0x40
+ __kasan_kmalloc+0x81/0xa0
+ tls_set_sw_offload+0x2eb/0xa20 [tls]
+ tls_setsockopt+0x68c/0x700 [tls]
+ __sys_setsockopt+0xfe/0x1b0
+
+Replace the crypto_aead_ivsize() with prot->iv_size + prot->salt_size
+when memcpy() iv value in TLS_1_3_VERSION scenario.
+
+Fixes: f295b3ae9f59 ("net/tls: Add support of AES128-CCM based ciphers")
+Signed-off-by: Ziyang Xuan <william.xuanziyang@huawei.com>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls_sw.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index efc84845bb6b..75a699591383 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1495,7 +1495,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
+ if (prot->version == TLS_1_3_VERSION ||
+ prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
+ memcpy(iv + iv_offset, tls_ctx->rx.iv,
+- crypto_aead_ivsize(ctx->aead_recv));
++ prot->iv_size + prot->salt_size);
+ else
+ memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
+
+--
+2.35.1
+
--- /dev/null
+From 6c6be0a57bdb507d023f399682c3f9c363f195cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 27 Mar 2022 23:36:25 +0100
+Subject: netfilter: bitwise: fix reduce comparisons
+
+From: Jeremy Sowden <jeremy@azazel.net>
+
+[ Upstream commit 31818213170caa51d116eb5dc1167b88523b4fe1 ]
+
+The `nft_bitwise_reduce` and `nft_bitwise_fast_reduce` functions should
+compare the bitwise operation in `expr` with the tracked operation
+associated with the destination register of `expr`. However, instead of
+being called on `expr` and `track->regs[priv->dreg].selector`,
+`nft_expr_priv` is called on `expr` twice, so both reduce functions
+return true even when the operations differ.
+
+Fixes: be5650f8f47e ("netfilter: nft_bitwise: track register operations")
+Signed-off-by: Jeremy Sowden <jeremy@azazel.net>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_bitwise.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
+index 7b727d3ebf9d..04bd2f89afe8 100644
+--- a/net/netfilter/nft_bitwise.c
++++ b/net/netfilter/nft_bitwise.c
+@@ -287,7 +287,7 @@ static bool nft_bitwise_reduce(struct nft_regs_track *track,
+ if (!track->regs[priv->sreg].selector)
+ return false;
+
+- bitwise = nft_expr_priv(expr);
++ bitwise = nft_expr_priv(track->regs[priv->dreg].selector);
+ if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
+ track->regs[priv->dreg].bitwise &&
+ track->regs[priv->dreg].bitwise->ops == expr->ops &&
+@@ -434,7 +434,7 @@ static bool nft_bitwise_fast_reduce(struct nft_regs_track *track,
+ if (!track->regs[priv->sreg].selector)
+ return false;
+
+- bitwise = nft_expr_priv(expr);
++ bitwise = nft_expr_priv(track->regs[priv->dreg].selector);
+ if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
+ track->regs[priv->dreg].bitwise &&
+ track->regs[priv->dreg].bitwise->ops == expr->ops &&
+--
+2.35.1
+
--- /dev/null
+From 4516b12a0256208515ec1b28a47365d5b6d904eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Mar 2022 19:32:08 +0800
+Subject: NFSv4: fix open failure with O_ACCMODE flag
+
+From: ChenXiaoSong <chenxiaosong2@huawei.com>
+
+[ Upstream commit b243874f6f9568b2daf1a00e9222cacdc15e159c ]
+
+open() with O_ACCMODE|O_DIRECT flags secondly will fail.
+
+Reproducer:
+ 1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
+ 2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
+ 3. close(fd)
+ 4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
+
+Server nfsd4_decode_share_access() will fail with error nfserr_bad_xdr when
+client use incorrect share access mode of 0.
+
+Fix this by using NFS4_SHARE_ACCESS_BOTH share access mode in client,
+just like firstly opening.
+
+Fixes: ce4ef7c0a8a05 ("NFS: Split out NFS v4 file operations")
+Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/dir.c | 10 ----------
+ fs/nfs/internal.h | 10 ++++++++++
+ fs/nfs/nfs4file.c | 6 ++++--
+ 3 files changed, 14 insertions(+), 12 deletions(-)
+
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 75cb1cbe4cde..911bdb35eb08 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1853,16 +1853,6 @@ const struct dentry_operations nfs4_dentry_operations = {
+ };
+ EXPORT_SYMBOL_GPL(nfs4_dentry_operations);
+
+-static fmode_t flags_to_mode(int flags)
+-{
+- fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
+- if ((flags & O_ACCMODE) != O_WRONLY)
+- res |= FMODE_READ;
+- if ((flags & O_ACCMODE) != O_RDONLY)
+- res |= FMODE_WRITE;
+- return res;
+-}
+-
+ static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp)
+ {
+ return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp);
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index db9f611e8efd..465e39ff018d 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -42,6 +42,16 @@ static inline bool nfs_lookup_is_soft_revalidate(const struct dentry *dentry)
+ return true;
+ }
+
++static inline fmode_t flags_to_mode(int flags)
++{
++ fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
++ if ((flags & O_ACCMODE) != O_WRONLY)
++ res |= FMODE_READ;
++ if ((flags & O_ACCMODE) != O_RDONLY)
++ res |= FMODE_WRITE;
++ return res;
++}
++
+ /*
+ * Note: RFC 1813 doesn't limit the number of auth flavors that
+ * a server can return, so make something up.
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index c178db86a6e8..e34af48fb4f4 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -32,6 +32,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
+ struct dentry *parent = NULL;
+ struct inode *dir;
+ unsigned openflags = filp->f_flags;
++ fmode_t f_mode;
+ struct iattr attr;
+ int err;
+
+@@ -50,8 +51,9 @@ nfs4_file_open(struct inode *inode, struct file *filp)
+ if (err)
+ return err;
+
++ f_mode = filp->f_mode;
+ if ((openflags & O_ACCMODE) == 3)
+- openflags--;
++ f_mode |= flags_to_mode(openflags);
+
+ /* We can't create new files here */
+ openflags &= ~(O_CREAT|O_EXCL);
+@@ -59,7 +61,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
+ parent = dget_parent(dentry);
+ dir = d_inode(parent);
+
+- ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
++ ctx = alloc_nfs_open_context(file_dentry(filp), f_mode, filp);
+ err = PTR_ERR(ctx);
+ if (IS_ERR(ctx))
+ goto out;
+--
+2.35.1
+
--- /dev/null
+From f613ad70ec28fc9b5506e2f75b4dcf8acd694d9a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Apr 2022 15:40:56 +0100
+Subject: perf: arm-spe: Fix perf report --mem-mode
+
+From: James Clark <james.clark@arm.com>
+
+[ Upstream commit ffab487052054162b3b6c9c6005777ec6cfcea05 ]
+
+Since commit bb30acae4c4dacfa ("perf report: Bail out --mem-mode if mem
+info is not available") "perf mem report" and "perf report --mem-mode"
+don't allow opening the file unless one of the events has
+PERF_SAMPLE_DATA_SRC set.
+
+SPE doesn't have this set even though synthetic memory data is generated
+after it is decoded. Fix this issue by setting DATA_SRC on SPE events.
+This has no effect on the data collected because the SPE driver doesn't
+do anything with that flag and doesn't generate samples.
+
+Fixes: bb30acae4c4dacfa ("perf report: Bail out --mem-mode if mem info is not available")
+Signed-off-by: James Clark <james.clark@arm.com>
+Tested-by: Leo Yan <leo.yan@linaro.org>
+Acked-by: Namhyung Kim <namhyung@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: German Gomez <german.gomez@arm.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: John Garry <john.garry@huawei.com>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
+Cc: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20220408144056.1955535-1-james.clark@arm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/arch/arm64/util/arm-spe.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
+index 2100d46ccf5e..bb4ab99afa7f 100644
+--- a/tools/perf/arch/arm64/util/arm-spe.c
++++ b/tools/perf/arch/arm64/util/arm-spe.c
+@@ -239,6 +239,12 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
+ arm_spe_set_timestamp(itr, arm_spe_evsel);
+ }
+
++ /*
++ * Set this only so that perf report knows that SPE generates memory info. It has no effect
++ * on the opening of the event or the SPE data produced.
++ */
++ evsel__set_sample_bit(arm_spe_evsel, DATA_SRC);
++
+ /* Add dummy event to keep tracking */
+ err = parse_events(evlist, "dummy:u", NULL);
+ if (err)
+--
+2.35.1
+
--- /dev/null
+From 20f424735ae40b204016999a7e2b2bb52e26c217 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Mar 2022 20:11:30 -0700
+Subject: perf session: Remap buf if there is no space for event
+
+From: Denis Nikitin <denik@chromium.org>
+
+[ Upstream commit bc21e74d4775f883ae1f542c1f1dc7205b15d925 ]
+
+If a perf event doesn't fit into remaining buffer space return NULL to
+remap buf and fetch the event again.
+
+Keep the logic to error out on inadequate input from fuzzing.
+
+This fixes perf failing on ChromeOS (with 32b userspace):
+
+ $ perf report -v -i perf.data
+ ...
+ prefetch_event: head=0x1fffff8 event->header_size=0x30, mmap_size=0x2000000: fuzzed or compressed perf.data?
+ Error:
+ failed to process sample
+
+Fixes: 57fc032ad643ffd0 ("perf session: Avoid infinite loop when seeing invalid header.size")
+Reviewed-by: James Clark <james.clark@arm.com>
+Signed-off-by: Denis Nikitin <denik@chromium.org>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Link: https://lore.kernel.org/r/20220330031130.2152327-1-denik@chromium.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/session.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 498b05708db5..245dc70d1882 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -2084,6 +2084,7 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
+ bool needs_swap, union perf_event *error)
+ {
+ union perf_event *event;
++ u16 event_size;
+
+ /*
+ * Ensure we have enough space remaining to read
+@@ -2096,15 +2097,23 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
+ if (needs_swap)
+ perf_event_header__bswap(&event->header);
+
+- if (head + event->header.size <= mmap_size)
++ event_size = event->header.size;
++ if (head + event_size <= mmap_size)
+ return event;
+
+ /* We're not fetching the event so swap back again */
+ if (needs_swap)
+ perf_event_header__bswap(&event->header);
+
+- pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
+- " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
++ /* Check if the event fits into the next mmapped buf. */
++ if (event_size <= mmap_size - head % page_size) {
++ /* Remap buf and fetch again. */
++ return NULL;
++ }
++
++ /* Invalid input. Event size should never exceed mmap_size. */
++ pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
++ " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
+
+ return error;
+ }
+--
+2.35.1
+
--- /dev/null
+From d2e78ba9e678c05d6aec5cc33286611f0556f87f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Apr 2022 16:26:25 +0300
+Subject: perf tools: Fix perf's libperf_print callback
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+[ Upstream commit aeee9dc53ce405d2161f9915f553114e94e5b677 ]
+
+eprintf() does not expect va_list as the type of the 4th parameter.
+
+Use veprintf() because it does.
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Fixes: 428dab813a56ce94 ("libperf: Merge libperf_set_print() into libperf_init()")
+Cc: Jiri Olsa <jolsa@kernel.org>
+Link: https://lore.kernel.org/r/20220408132625.2451452-1-adrian.hunter@intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/perf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/perf.c b/tools/perf/perf.c
+index 2f6b67189b42..6aae7b6c376b 100644
+--- a/tools/perf/perf.c
++++ b/tools/perf/perf.c
+@@ -434,7 +434,7 @@ void pthread__unblock_sigwinch(void)
+ static int libperf_print(enum libperf_print_level level,
+ const char *fmt, va_list ap)
+ {
+- return eprintf(level, verbose, fmt, ap);
++ return veprintf(level, verbose, fmt, ap);
+ }
+
+ int main(int argc, const char **argv)
+--
+2.35.1
+
--- /dev/null
+From 205d210df3a884c5f986a4aa77742b65bffd6ddf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Apr 2022 15:56:51 +0100
+Subject: perf unwind: Don't show unwind error messages when augmenting frame
+ pointer stack
+
+From: James Clark <james.clark@arm.com>
+
+[ Upstream commit fa7095c5c3240bb2ecbc77f8b69be9b1d9e2cf60 ]
+
+Commit Fixes: b9f6fbb3b2c29736 ("perf arm64: Inject missing frames when
+using 'perf record --call-graph=fp'") intended to add a 'best effort'
+DWARF unwind that improved the frame pointer stack in most scenarios.
+
+It's expected that the unwind will fail sometimes, but this shouldn't be
+reported as an error. It only works when the return address can be
+determined from the contents of the link register alone.
+
+Fix the error shown when the unwinder requires extra registers by adding
+a new flag that suppresses error messages. This flag is not set in the
+normal --call-graph=dwarf unwind mode so that behavior is not changed.
+
+Fixes: b9f6fbb3b2c29736 ("perf arm64: Inject missing frames when using 'perf record --call-graph=fp'")
+Reported-by: John Garry <john.garry@huawei.com>
+Signed-off-by: James Clark <james.clark@arm.com>
+Tested-by: John Garry <john.garry@huawei.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Alexandre Truong <alexandre.truong@arm.com>
+Cc: German Gomez <german.gomez@arm.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Link: https://lore.kernel.org/r/20220406145651.1392529-1-james.clark@arm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/tests/dwarf-unwind.c | 2 +-
+ .../perf/util/arm64-frame-pointer-unwind-support.c | 2 +-
+ tools/perf/util/machine.c | 2 +-
+ tools/perf/util/unwind-libdw.c | 10 +++++++---
+ tools/perf/util/unwind-libdw.h | 1 +
+ tools/perf/util/unwind-libunwind-local.c | 10 +++++++---
+ tools/perf/util/unwind-libunwind.c | 6 ++++--
+ tools/perf/util/unwind.h | 13 ++++++++++---
+ 8 files changed, 32 insertions(+), 14 deletions(-)
+
+diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
+index 2dab2d262060..afdca7f2959f 100644
+--- a/tools/perf/tests/dwarf-unwind.c
++++ b/tools/perf/tests/dwarf-unwind.c
+@@ -122,7 +122,7 @@ NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thr
+ }
+
+ err = unwind__get_entries(unwind_entry, &cnt, thread,
+- &sample, MAX_STACK);
++ &sample, MAX_STACK, false);
+ if (err)
+ pr_debug("unwind failed\n");
+ else if (cnt != MAX_STACK) {
+diff --git a/tools/perf/util/arm64-frame-pointer-unwind-support.c b/tools/perf/util/arm64-frame-pointer-unwind-support.c
+index 2242a885fbd7..4940be4a0569 100644
+--- a/tools/perf/util/arm64-frame-pointer-unwind-support.c
++++ b/tools/perf/util/arm64-frame-pointer-unwind-support.c
+@@ -53,7 +53,7 @@ u64 get_leaf_frame_caller_aarch64(struct perf_sample *sample, struct thread *thr
+ sample->user_regs.cache_regs[PERF_REG_ARM64_SP] = 0;
+ }
+
+- ret = unwind__get_entries(add_entry, &entries, thread, sample, 2);
++ ret = unwind__get_entries(add_entry, &entries, thread, sample, 2, true);
+ sample->user_regs = old_regs;
+
+ if (ret || entries.length != 2)
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 394550003693..564abe17a0bd 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -2983,7 +2983,7 @@ static int thread__resolve_callchain_unwind(struct thread *thread,
+ return 0;
+
+ return unwind__get_entries(unwind_entry, cursor,
+- thread, sample, max_stack);
++ thread, sample, max_stack, false);
+ }
+
+ int thread__resolve_callchain(struct thread *thread,
+diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
+index a74b517f7497..94aa40f6e348 100644
+--- a/tools/perf/util/unwind-libdw.c
++++ b/tools/perf/util/unwind-libdw.c
+@@ -200,7 +200,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
+ bool isactivation;
+
+ if (!dwfl_frame_pc(state, &pc, NULL)) {
+- pr_err("%s", dwfl_errmsg(-1));
++ if (!ui->best_effort)
++ pr_err("%s", dwfl_errmsg(-1));
+ return DWARF_CB_ABORT;
+ }
+
+@@ -208,7 +209,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
+ report_module(pc, ui);
+
+ if (!dwfl_frame_pc(state, &pc, &isactivation)) {
+- pr_err("%s", dwfl_errmsg(-1));
++ if (!ui->best_effort)
++ pr_err("%s", dwfl_errmsg(-1));
+ return DWARF_CB_ABORT;
+ }
+
+@@ -222,7 +224,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
+ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ struct thread *thread,
+ struct perf_sample *data,
+- int max_stack)
++ int max_stack,
++ bool best_effort)
+ {
+ struct unwind_info *ui, ui_buf = {
+ .sample = data,
+@@ -231,6 +234,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ .cb = cb,
+ .arg = arg,
+ .max_stack = max_stack,
++ .best_effort = best_effort
+ };
+ Dwarf_Word ip;
+ int err = -EINVAL, i;
+diff --git a/tools/perf/util/unwind-libdw.h b/tools/perf/util/unwind-libdw.h
+index 0cbd2650e280..8c88bc4f2304 100644
+--- a/tools/perf/util/unwind-libdw.h
++++ b/tools/perf/util/unwind-libdw.h
+@@ -20,6 +20,7 @@ struct unwind_info {
+ void *arg;
+ int max_stack;
+ int idx;
++ bool best_effort;
+ struct unwind_entry entries[];
+ };
+
+diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
+index 71a353349181..41e29fc7648a 100644
+--- a/tools/perf/util/unwind-libunwind-local.c
++++ b/tools/perf/util/unwind-libunwind-local.c
+@@ -96,6 +96,7 @@ struct unwind_info {
+ struct perf_sample *sample;
+ struct machine *machine;
+ struct thread *thread;
++ bool best_effort;
+ };
+
+ #define dw_read(ptr, type, end) ({ \
+@@ -553,7 +554,8 @@ static int access_reg(unw_addr_space_t __maybe_unused as,
+
+ ret = perf_reg_value(&val, &ui->sample->user_regs, id);
+ if (ret) {
+- pr_err("unwind: can't read reg %d\n", regnum);
++ if (!ui->best_effort)
++ pr_err("unwind: can't read reg %d\n", regnum);
+ return ret;
+ }
+
+@@ -666,7 +668,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
+ return -1;
+
+ ret = unw_init_remote(&c, addr_space, ui);
+- if (ret)
++ if (ret && !ui->best_effort)
+ display_error(ret);
+
+ while (!ret && (unw_step(&c) > 0) && i < max_stack) {
+@@ -704,12 +706,14 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
+
+ static int _unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ struct thread *thread,
+- struct perf_sample *data, int max_stack)
++ struct perf_sample *data, int max_stack,
++ bool best_effort)
+ {
+ struct unwind_info ui = {
+ .sample = data,
+ .thread = thread,
+ .machine = thread->maps->machine,
++ .best_effort = best_effort
+ };
+
+ if (!data->user_regs.regs)
+diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
+index e89a5479b361..509c287ee762 100644
+--- a/tools/perf/util/unwind-libunwind.c
++++ b/tools/perf/util/unwind-libunwind.c
+@@ -80,9 +80,11 @@ void unwind__finish_access(struct maps *maps)
+
+ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ struct thread *thread,
+- struct perf_sample *data, int max_stack)
++ struct perf_sample *data, int max_stack,
++ bool best_effort)
+ {
+ if (thread->maps->unwind_libunwind_ops)
+- return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
++ return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data,
++ max_stack, best_effort);
+ return 0;
+ }
+diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
+index ab8ad469c8de..b2a03fa5289b 100644
+--- a/tools/perf/util/unwind.h
++++ b/tools/perf/util/unwind.h
+@@ -23,13 +23,19 @@ struct unwind_libunwind_ops {
+ void (*finish_access)(struct maps *maps);
+ int (*get_entries)(unwind_entry_cb_t cb, void *arg,
+ struct thread *thread,
+- struct perf_sample *data, int max_stack);
++ struct perf_sample *data, int max_stack, bool best_effort);
+ };
+
+ #ifdef HAVE_DWARF_UNWIND_SUPPORT
++/*
++ * When best_effort is set, don't report errors and fail silently. This could
++ * be expanded in the future to be more permissive about things other than
++ * error messages.
++ */
+ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ struct thread *thread,
+- struct perf_sample *data, int max_stack);
++ struct perf_sample *data, int max_stack,
++ bool best_effort);
+ /* libunwind specific */
+ #ifdef HAVE_LIBUNWIND_SUPPORT
+ #ifndef LIBUNWIND__ARCH_REG_ID
+@@ -65,7 +71,8 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
+ void *arg __maybe_unused,
+ struct thread *thread __maybe_unused,
+ struct perf_sample *data __maybe_unused,
+- int max_stack __maybe_unused)
++ int max_stack __maybe_unused,
++ bool best_effort __maybe_unused)
+ {
+ return 0;
+ }
+--
+2.35.1
+
--- /dev/null
+From c33fb9b5a9e8534eb2c6edccfbf364516dd5b2d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Apr 2022 21:19:19 +1000
+Subject: qede: confirm skb is allocated before using
+
+From: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+
+[ Upstream commit 4e910dbe36508654a896d5735b318c0b88172570 ]
+
+qede_build_skb() assumes build_skb() always works and goes straight
+to skb_reserve(). However, build_skb() can fail under memory pressure.
+This results in a kernel panic because the skb to reserve is NULL.
+
+Add a check in case build_skb() failed to allocate and return NULL.
+
+The NULL return is handled correctly in callers to qede_build_skb().
+
+Fixes: 8a8633978b842 ("qede: Add build_skb() support.")
+Signed-off-by: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qede/qede_fp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+index b242000a77fd..b7cc36589f59 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+@@ -748,6 +748,9 @@ qede_build_skb(struct qede_rx_queue *rxq,
+ buf = page_address(bd->data) + bd->page_offset;
+ skb = build_skb(buf, rxq->rx_buf_seg_size);
+
++ if (unlikely(!skb))
++ return NULL;
++
+ skb_reserve(skb, pad);
+ skb_put(skb, len);
+
+--
+2.35.1
+
--- /dev/null
+From 0937caed555dc80803bc621c308d70e63538ceb0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 11:58:04 +0300
+Subject: RDMA/mlx5: Add a missing update of cache->last_add
+
+From: Aharon Landau <aharonl@nvidia.com>
+
+[ Upstream commit 1d735eeee63a0beb65180ca0224f239cc0c9f804 ]
+
+Update cache->last_add when returning an MR to the cache so that the cache
+work won't remove it.
+
+Fixes: b9358bdbc713 ("RDMA/mlx5: Fix locking in MR cache work queue")
+Link: https://lore.kernel.org/r/c99f076fce4b44829d434936bbcd3b5fc4c95020.1649062436.git.leonro@nvidia.com
+Signed-off-by: Aharon Landau <aharonl@nvidia.com>
+Reviewed-by: Shay Drory <shayd@nvidia.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mlx5/mr.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index d3b2d02a4872..d40a1460ef97 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -632,6 +632,7 @@ static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+ {
+ struct mlx5_cache_ent *ent = mr->cache_ent;
+
++ WRITE_ONCE(dev->cache.last_add, jiffies);
+ spin_lock_irq(&ent->lock);
+ list_add_tail(&mr->list, &ent->head);
+ ent->available_mrs++;
+--
+2.35.1
+
--- /dev/null
+From f68d4299578a1b2efa62c9f686eb901190ac1e3f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 11:58:03 +0300
+Subject: RDMA/mlx5: Don't remove cache MRs when a delay is needed
+
+From: Aharon Landau <aharonl@nvidia.com>
+
+[ Upstream commit 84c2362fb65d69c721fec0974556378cbb36a62b ]
+
+Don't remove MRs from the cache if need to delay the removal.
+
+Fixes: b9358bdbc713 ("RDMA/mlx5: Fix locking in MR cache work queue")
+Link: https://lore.kernel.org/r/c3087a90ff362c8796c7eaa2715128743ce36722.1649062436.git.leonro@nvidia.com
+Signed-off-by: Aharon Landau <aharonl@nvidia.com>
+Reviewed-by: Shay Drory <shayd@nvidia.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mlx5/mr.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 2910d7833313..d3b2d02a4872 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -541,8 +541,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
+ spin_lock_irq(&ent->lock);
+ if (ent->disabled)
+ goto out;
+- if (need_delay)
++ if (need_delay) {
+ queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
++ goto out;
++ }
+ remove_cache_mr_locked(ent);
+ queue_adjust_cache_locked(ent);
+ }
+--
+2.35.1
+
--- /dev/null
+From 39eea35439a17a6bf3c6a7b22521f7042683a42f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 3 Apr 2022 21:22:35 +0800
+Subject: regulator: atc260x: Fix missing active_discharge_on setting
+
+From: Axel Lin <axel.lin@ingics.com>
+
+[ Upstream commit 2316f0fc0ad2aa87a568ceaf3d76be983ee555c3 ]
+
+Without active_discharge_on setting, the SWITCH1 discharge enable control
+is always disabled. Fix it.
+
+Fixes: 3b15ccac161a ("regulator: Add regulator driver for ATC260x PMICs")
+Signed-off-by: Axel Lin <axel.lin@ingics.com>
+Link: https://lore.kernel.org/r/20220403132235.123727-1-axel.lin@ingics.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/atc260x-regulator.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/regulator/atc260x-regulator.c b/drivers/regulator/atc260x-regulator.c
+index 05147d2c3842..485e58b264c0 100644
+--- a/drivers/regulator/atc260x-regulator.c
++++ b/drivers/regulator/atc260x-regulator.c
+@@ -292,6 +292,7 @@ enum atc2603c_reg_ids {
+ .bypass_mask = BIT(5), \
+ .active_discharge_reg = ATC2603C_PMU_SWITCH_CTL, \
+ .active_discharge_mask = BIT(1), \
++ .active_discharge_on = BIT(1), \
+ .owner = THIS_MODULE, \
+ }
+
+--
+2.35.1
+
--- /dev/null
+From b00bf1d5671a7cce8a13a1bd689c29e8249a7aae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 10:25:14 +0800
+Subject: regulator: rtq2134: Fix missing active_discharge_on setting
+
+From: Axel Lin <axel.lin@ingics.com>
+
+[ Upstream commit 17049bf9de55a42ee96fd34520aff8a484677675 ]
+
+The active_discharge_on setting was missed, so output discharge resistor
+is always disabled. Fix it.
+
+Fixes: 0555d41497de ("regulator: rtq2134: Add support for Richtek RTQ2134 SubPMIC")
+Signed-off-by: Axel Lin <axel.lin@ingics.com>
+Link: https://lore.kernel.org/r/20220404022514.449231-1-axel.lin@ingics.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/rtq2134-regulator.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/regulator/rtq2134-regulator.c b/drivers/regulator/rtq2134-regulator.c
+index f21e3f8b21f2..8e13dea354a2 100644
+--- a/drivers/regulator/rtq2134-regulator.c
++++ b/drivers/regulator/rtq2134-regulator.c
+@@ -285,6 +285,7 @@ static const unsigned int rtq2134_buck_ramp_delay_table[] = {
+ .enable_mask = RTQ2134_VOUTEN_MASK, \
+ .active_discharge_reg = RTQ2134_REG_BUCK##_id##_CFG0, \
+ .active_discharge_mask = RTQ2134_ACTDISCHG_MASK, \
++ .active_discharge_on = RTQ2134_ACTDISCHG_MASK, \
+ .ramp_reg = RTQ2134_REG_BUCK##_id##_RSPCFG, \
+ .ramp_mask = RTQ2134_RSPUP_MASK, \
+ .ramp_delay_table = rtq2134_buck_ramp_delay_table, \
+--
+2.35.1
+
--- /dev/null
+From d22fe8286de3e07294a38f64364ec9f087f2dd00 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Mar 2022 16:28:54 +0300
+Subject: Revert "net: dsa: stop updating master MTU from master.c"
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 066dfc4290406b1b0b014ae3267d4266a344efd1 ]
+
+This reverts commit a1ff94c2973c43bc1e2677ac63ebb15b1d1ff846.
+
+Switch drivers that don't implement ->port_change_mtu() will cause the
+DSA master to remain with an MTU of 1500, since we've deleted the other
+code path. In turn, this causes a regression for those systems, where
+MTU-sized traffic can no longer be terminated.
+
+Revert the change taking into account the fact that rtnl_lock() is now
+taken top-level from the callers of dsa_master_setup() and
+dsa_master_teardown(). Also add a comment in order for it to be
+absolutely clear why it is still needed.
+
+Fixes: a1ff94c2973c ("net: dsa: stop updating master MTU from master.c")
+Reported-by: Luiz Angelo Daros de Luca <luizluca@gmail.com>
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Tested-by: Luiz Angelo Daros de Luca <luizluca@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/dsa/master.c | 25 ++++++++++++++++++++++++-
+ 1 file changed, 24 insertions(+), 1 deletion(-)
+
+diff --git a/net/dsa/master.c b/net/dsa/master.c
+index 880f910b23a9..10b51ffbb6f4 100644
+--- a/net/dsa/master.c
++++ b/net/dsa/master.c
+@@ -337,11 +337,24 @@ static const struct attribute_group dsa_group = {
+
+ static struct lock_class_key dsa_master_addr_list_lock_key;
+
++static void dsa_master_reset_mtu(struct net_device *dev)
++{
++ int err;
++
++ err = dev_set_mtu(dev, ETH_DATA_LEN);
++ if (err)
++ netdev_dbg(dev,
++ "Unable to reset MTU to exclude DSA overheads\n");
++}
++
+ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
+ {
++ const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
+ struct dsa_switch *ds = cpu_dp->ds;
+ struct device_link *consumer_link;
+- int ret;
++ int mtu, ret;
++
++ mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
+
+ /* The DSA master must use SET_NETDEV_DEV for this to work. */
+ consumer_link = device_link_add(ds->dev, dev->dev.parent,
+@@ -351,6 +364,15 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
+ "Failed to create a device link to DSA switch %s\n",
+ dev_name(ds->dev));
+
++ /* The switch driver may not implement ->port_change_mtu(), case in
++ * which dsa_slave_change_mtu() will not update the master MTU either,
++ * so we need to do that here.
++ */
++ ret = dev_set_mtu(dev, mtu);
++ if (ret)
++ netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
++ ret, mtu);
++
+ /* If we use a tagging format that doesn't have an ethertype
+ * field, make sure that all packets from this point on get
+ * sent to the tag format's receive function.
+@@ -388,6 +410,7 @@ void dsa_master_teardown(struct net_device *dev)
+ sysfs_remove_group(&dev->dev.kobj, &dsa_group);
+ dsa_netdev_ops_set(dev, NULL);
+ dsa_master_ethtool_teardown(dev);
++ dsa_master_reset_mtu(dev);
+ dsa_master_set_promiscuity(dev, -1);
+
+ dev->dsa_ptr = NULL;
+--
+2.35.1
+
--- /dev/null
+From 99172cd7ce935037e1c7be0e9072d8f1fd56a4dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Mar 2022 19:32:07 +0800
+Subject: Revert "NFSv4: Handle the special Linux file open access mode"
+
+From: ChenXiaoSong <chenxiaosong2@huawei.com>
+
+[ Upstream commit ab0fc21bc7105b54bafd85bd8b82742f9e68898a ]
+
+This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
+
+After secondly opening a file with O_ACCMODE|O_DIRECT flags,
+nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
+
+Reproducer:
+ 1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
+ 2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
+ 3. close(fd)
+ 4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
+ 5. lseek(fd)
+
+Reported-by: Lyu Tao <tao.lyu@epfl.ch>
+Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/inode.c | 1 -
+ fs/nfs/nfs4file.c | 2 +-
+ 2 files changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index d96baa4450e3..e4fb939a2904 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1180,7 +1180,6 @@ int nfs_open(struct inode *inode, struct file *filp)
+ nfs_fscache_open_file(inode, filp);
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(nfs_open);
+
+ /*
+ * This function is called whenever some part of NFS notices that
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index e79ae4cbc395..c178db86a6e8 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -51,7 +51,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
+ return err;
+
+ if ((openflags & O_ACCMODE) == 3)
+- return nfs_open(inode, filp);
++ openflags--;
+
+ /* We can't create new files here */
+ openflags &= ~(O_CREAT|O_EXCL);
+--
+2.35.1
+
--- /dev/null
+From 6cbf896f75e2cb043629fc1ad97c27bd2606617b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 11:34:39 -0700
+Subject: rxrpc: fix a race in rxrpc_exit_net()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 1946014ca3b19be9e485e780e862c375c6f98bad ]
+
+Current code can lead to the following race:
+
+CPU0 CPU1
+
+rxrpc_exit_net()
+ rxrpc_peer_keepalive_worker()
+ if (rxnet->live)
+
+ rxnet->live = false;
+ del_timer_sync(&rxnet->peer_keepalive_timer);
+
+ timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
+
+ cancel_work_sync(&rxnet->peer_keepalive_work);
+
+rxrpc_exit_net() exits while peer_keepalive_timer is still armed,
+leading to use-after-free.
+
+syzbot report was:
+
+ODEBUG: free active (active state 0) object type: timer_list hint: rxrpc_peer_keepalive_timeout+0x0/0xb0
+WARNING: CPU: 0 PID: 3660 at lib/debugobjects.c:505 debug_print_object+0x16e/0x250 lib/debugobjects.c:505
+Modules linked in:
+CPU: 0 PID: 3660 Comm: kworker/u4:6 Not tainted 5.17.0-syzkaller-13993-g88e6c0207623 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Workqueue: netns cleanup_net
+RIP: 0010:debug_print_object+0x16e/0x250 lib/debugobjects.c:505
+Code: ff df 48 89 fa 48 c1 ea 03 80 3c 02 00 0f 85 af 00 00 00 48 8b 14 dd 00 1c 26 8a 4c 89 ee 48 c7 c7 00 10 26 8a e8 b1 e7 28 05 <0f> 0b 83 05 15 eb c5 09 01 48 83 c4 18 5b 5d 41 5c 41 5d 41 5e c3
+RSP: 0018:ffffc9000353fb00 EFLAGS: 00010082
+RAX: 0000000000000000 RBX: 0000000000000003 RCX: 0000000000000000
+RDX: ffff888029196140 RSI: ffffffff815efad8 RDI: fffff520006a7f52
+RBP: 0000000000000001 R08: 0000000000000000 R09: 0000000000000000
+R10: ffffffff815ea4ae R11: 0000000000000000 R12: ffffffff89ce23e0
+R13: ffffffff8a2614e0 R14: ffffffff816628c0 R15: dffffc0000000000
+FS: 0000000000000000(0000) GS:ffff8880b9c00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fe1f2908924 CR3: 0000000043720000 CR4: 00000000003506f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <TASK>
+ __debug_check_no_obj_freed lib/debugobjects.c:992 [inline]
+ debug_check_no_obj_freed+0x301/0x420 lib/debugobjects.c:1023
+ kfree+0xd6/0x310 mm/slab.c:3809
+ ops_free_list.part.0+0x119/0x370 net/core/net_namespace.c:176
+ ops_free_list net/core/net_namespace.c:174 [inline]
+ cleanup_net+0x591/0xb00 net/core/net_namespace.c:598
+ process_one_work+0x996/0x1610 kernel/workqueue.c:2289
+ worker_thread+0x665/0x1080 kernel/workqueue.c:2436
+ kthread+0x2e9/0x3a0 kernel/kthread.c:376
+ ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:298
+ </TASK>
+
+Fixes: ace45bec6d77 ("rxrpc: Fix firewall route keepalive")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: David Howells <dhowells@redhat.com>
+Cc: Marc Dionne <marc.dionne@auristor.com>
+Cc: linux-afs@lists.infradead.org
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/net_ns.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
+index 25bbc4cc8b13..f15d6942da45 100644
+--- a/net/rxrpc/net_ns.c
++++ b/net/rxrpc/net_ns.c
+@@ -113,8 +113,8 @@ static __net_exit void rxrpc_exit_net(struct net *net)
+ struct rxrpc_net *rxnet = rxrpc_net(net);
+
+ rxnet->live = false;
+- del_timer_sync(&rxnet->peer_keepalive_timer);
+ cancel_work_sync(&rxnet->peer_keepalive_work);
++ del_timer_sync(&rxnet->peer_keepalive_timer);
+ rxrpc_destroy_all_calls(rxnet);
+ rxrpc_destroy_all_connections(rxnet);
+ rxrpc_destroy_all_peers(rxnet);
+--
+2.35.1
+
--- /dev/null
+From 37131d93d950cc772a70d340c6248ab2a1e51809 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Mar 2022 17:44:30 +0800
+Subject: scsi: core: Fix sbitmap depth in scsi_realloc_sdev_budget_map()
+
+From: John Garry <john.garry@huawei.com>
+
+[ Upstream commit eaba83b5b8506bbc9ee7ca2f10aeab3fff3719e7 ]
+
+In commit edb854a3680b ("scsi: core: Reallocate device's budget map on
+queue depth change"), the sbitmap for the device budget map may be
+reallocated after the slave device depth is configured.
+
+When the sbitmap is reallocated we use the result from
+scsi_device_max_queue_depth() for the sbitmap size, but don't resize to
+match the actual device queue depth.
+
+Fix by resizing the sbitmap after reallocating the budget sbitmap. We do
+this instead of init'ing the sbitmap to the device queue depth as the user
+may want to change the queue depth later via sysfs or other.
+
+Link: https://lore.kernel.org/r/1647423870-143867-1-git-send-email-john.garry@huawei.com
+Fixes: edb854a3680b ("scsi: core: Reallocate device's budget map on queue depth change")
+Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: John Garry <john.garry@huawei.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/scsi_scan.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index f4e6c68ac99e..2ef78083f1ef 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -223,6 +223,8 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
+ int ret;
+ struct sbitmap sb_backup;
+
++ depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
++
+ /*
+ * realloc if new shift is calculated, which is caused by setting
+ * up one new default queue depth after calling ->slave_configure
+@@ -245,6 +247,9 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
+ scsi_device_max_queue_depth(sdev),
+ new_shift, GFP_KERNEL,
+ sdev->request_queue->node, false, true);
++ if (!ret)
++ sbitmap_resize(&sdev->budget_map, depth);
++
+ if (need_free) {
+ if (ret)
+ sdev->budget_map = sb_backup;
+--
+2.35.1
+
--- /dev/null
+From c75e85373a4702f7e404f6f86f46c12bb0f8f796 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Mar 2022 14:46:03 +0100
+Subject: scsi: core: scsi_logging: Fix a BUG
+
+From: Tomas Henzl <thenzl@redhat.com>
+
+[ Upstream commit f06aa52cb2723ec67e92df463827b800d6c477d1 ]
+
+The request_queue may be NULL in a request, for example when it comes from
+scsi_ioctl_reset(). Check it before use.
+
+Fixes: f3fa33acca9f ("block: remove the ->rq_disk field in struct request")
+Link: https://lore.kernel.org/r/20220324134603.28463-1-thenzl@redhat.com
+Reported-by: Changhui Zhong <czhong@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Tomas Henzl <thenzl@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/scsi_logging.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
+index 1f8f80b2dbfc..a9f8de5e9639 100644
+--- a/drivers/scsi/scsi_logging.c
++++ b/drivers/scsi/scsi_logging.c
+@@ -30,7 +30,7 @@ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
+ {
+ struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
+
+- if (!rq->q->disk)
++ if (!rq->q || !rq->q->disk)
+ return NULL;
+ return rq->q->disk->disk_name;
+ }
+--
+2.35.1
+
--- /dev/null
+From 5d53d632be8ffa9917f3e597164e851ac0a02138 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Mar 2022 00:35:57 -0500
+Subject: scsi: sd: sd_read_cpr() requires VPD pages
+
+From: Martin K. Petersen <martin.petersen@oracle.com>
+
+[ Upstream commit 1700714b1ff252b634db21186db4d91e7e006043 ]
+
+As such it should be called inside the scsi_device_supports_vpd()
+conditional.
+
+Link: https://lore.kernel.org/r/20220302053559.32147-13-martin.petersen@oracle.com
+Fixes: e815d36548f0 ("scsi: sd: add concurrent positioning ranges support")
+Cc: Damien Le Moal <damien.lemoal@wdc.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/sd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 66056806159a..8b5d2a4076c2 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3320,6 +3320,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ sd_read_block_limits(sdkp);
+ sd_read_block_characteristics(sdkp);
+ sd_zbc_read_zones(sdkp, buffer);
++ sd_read_cpr(sdkp);
+ }
+
+ sd_print_capacity(sdkp, old_capacity);
+@@ -3329,7 +3330,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ sd_read_app_tag_own(sdkp, buffer);
+ sd_read_write_same(sdkp, buffer);
+ sd_read_security(sdkp, buffer);
+- sd_read_cpr(sdkp);
+ }
+
+ /*
+--
+2.35.1
+
--- /dev/null
+From 745715ede089a4bbd3bf73c70e23aa511c211cc1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Mar 2022 20:22:42 -0400
+Subject: scsi: sr: Fix typo in CDROM(CLOSETRAY|EJECT) handling
+
+From: Kevin Groeneveld <kgroeneveld@lenbrook.com>
+
+[ Upstream commit bc5519c18a32ce855bb51b9f5eceb77a9489d080 ]
+
+Commit 2e27f576abc6 ("scsi: scsi_ioctl: Call scsi_cmd_ioctl() from
+scsi_ioctl()") seems to have a typo as it is checking ret instead of cmd in
+the if statement checking for CDROMCLOSETRAY and CDROMEJECT. This changes
+the behaviour of these ioctls as the cdrom_ioctl handling of these is more
+restrictive than the scsi_ioctl version.
+
+Link: https://lore.kernel.org/r/20220323002242.21157-1-kgroeneveld@lenbrook.com
+Fixes: 2e27f576abc6 ("scsi: scsi_ioctl: Call scsi_cmd_ioctl() from scsi_ioctl()")
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Kevin Groeneveld <kgroeneveld@lenbrook.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/sr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index f925b1f1f9ad..a0beb11abdc9 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -578,7 +578,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+
+ scsi_autopm_get_device(sdev);
+
+- if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) {
++ if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) {
+ ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
+ if (ret != -ENOSYS)
+ goto put;
+--
+2.35.1
+
--- /dev/null
+From ddb00a952b2fe3afeab5db26267975e35dbae296 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 20 Mar 2022 23:07:33 +0800
+Subject: scsi: ufs: ufshpb: Fix a NULL check on list iterator
+
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+
+[ Upstream commit bfb7789bcbd901caead43861461bc8f334c90d3b ]
+
+The list iterator is always non-NULL so the check 'if (!rgn)' is always
+false and the dev_err() is never called. Move the check outside the loop
+and determine if 'victim_rgn' is NULL, to fix this bug.
+
+Link: https://lore.kernel.org/r/20220320150733.21824-1-xiam0nd.tong@gmail.com
+Fixes: 4b5f49079c52 ("scsi: ufs: ufshpb: L2P map management for HPB read")
+Reviewed-by: Daejun Park <daejun7.park@samsung.com>
+Signed-off-by: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/ufs/ufshpb.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
+index 2d36a0715fca..b34feba1f53d 100644
+--- a/drivers/scsi/ufs/ufshpb.c
++++ b/drivers/scsi/ufs/ufshpb.c
+@@ -869,12 +869,6 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
+ struct ufshpb_region *rgn, *victim_rgn = NULL;
+
+ list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
+- if (!rgn) {
+- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+- "%s: no region allocated\n",
+- __func__);
+- return NULL;
+- }
+ if (ufshpb_check_srgns_issue_state(hpb, rgn))
+ continue;
+
+@@ -890,6 +884,11 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
+ break;
+ }
+
++ if (!victim_rgn)
++ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
++ "%s: no region allocated\n",
++ __func__);
++
+ return victim_rgn;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 45efb748801e500830295bb0915d6d5b318a0116 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 19 Mar 2022 08:01:24 +0100
+Subject: scsi: zorro7xx: Fix a resource leak in zorro7xx_remove_one()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 16ed828b872d12ccba8f07bcc446ae89ba662f9c ]
+
+The error handling path of the probe releases a resource that is not freed
+in the remove function. In some cases, a ioremap() must be undone.
+
+Add the missing iounmap() call in the remove function.
+
+Link: https://lore.kernel.org/r/247066a3104d25f9a05de8b3270fc3c848763bcc.1647673264.git.christophe.jaillet@wanadoo.fr
+Fixes: 45804fbb00ee ("[SCSI] 53c700: Amiga Zorro NCR53c710 SCSI")
+Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/zorro7xx.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
+index 27b9e2baab1a..7acf9193a9e8 100644
+--- a/drivers/scsi/zorro7xx.c
++++ b/drivers/scsi/zorro7xx.c
+@@ -159,6 +159,8 @@ static void zorro7xx_remove_one(struct zorro_dev *z)
+ scsi_remove_host(host);
+
+ NCR_700_release(host);
++ if (host->base > 0x01000000)
++ iounmap(hostdata->base);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+ zorro_release_device(z);
+--
+2.35.1
+
--- /dev/null
+From 23abc65f15115e76389246e48be88cbfaba33370 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 09:47:48 +1000
+Subject: sctp: count singleton chunks in assoc user stats
+
+From: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+
+[ Upstream commit e3d37210df5c41c51147a2d5d465de1a4d77be7a ]
+
+Singleton chunks (INIT, HEARTBEAT PMTU probes, and SHUTDOWN-
+COMPLETE) are not counted in SCTP_GET_ASOC_STATS "sas_octrlchunks"
+counter available to the assoc owner.
+
+These are all control chunks so they should be counted as such.
+
+Add counting of singleton chunks so they are properly accounted for.
+
+Fixes: 196d67593439 ("sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call")
+Signed-off-by: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Link: https://lore.kernel.org/r/c9ba8785789880cf07923b8a5051e174442ea9ee.1649029663.git.jamie.bainbridge@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sctp/outqueue.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
+index a18609f608fb..e213aaf45d67 100644
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -914,6 +914,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
+ ctx->asoc->base.sk->sk_err = -error;
+ return;
+ }
++ ctx->asoc->stats.octrlchunks++;
+ break;
+
+ case SCTP_CID_ABORT:
+@@ -938,7 +939,10 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
+
+ case SCTP_CID_HEARTBEAT:
+ if (chunk->pmtu_probe) {
+- sctp_packet_singleton(ctx->transport, chunk, ctx->gfp);
++ error = sctp_packet_singleton(ctx->transport,
++ chunk, ctx->gfp);
++ if (!error)
++ ctx->asoc->stats.octrlchunks++;
+ break;
+ }
+ fallthrough;
+--
+2.35.1
+
parisc-fix-cpu-affinity-for-lasi-wax-and-dino-chips.patch
parisc-fix-patch-code-locking-and-flushing.patch
mm-fix-race-between-madv_free-reclaim-and-blkdev-dir.patch
+drm-amdgpu-fix-off-by-one-in-amdgpu_gfx_kiq_acquire.patch
+drivers-hv-vmbus-fix-initialization-of-device-object.patch
+drivers-hv-vmbus-fix-potential-crash-on-module-unloa.patch
+netfilter-bitwise-fix-reduce-comparisons.patch
+revert-nfsv4-handle-the-special-linux-file-open-acce.patch
+nfsv4-fix-open-failure-with-o_accmode-flag.patch
+scsi-core-scsi_logging-fix-a-bug.patch
+scsi-sr-fix-typo-in-cdrom-closetray-eject-handling.patch
+scsi-core-fix-sbitmap-depth-in-scsi_realloc_sdev_bud.patch
+scsi-zorro7xx-fix-a-resource-leak-in-zorro7xx_remove.patch
+vdpa-mlx5-prevent-cvq-work-from-hogging-cpu.patch
+net-sfc-add-missing-xdp-queue-reinitialization.patch
+net-tls-fix-slab-out-of-bounds-bug-in-decrypt_intern.patch
+vrf-fix-packet-sniffing-for-traffic-originating-from.patch
+skbuff-fix-coalescing-for-page_pool-fragment-recycli.patch
+revert-net-dsa-stop-updating-master-mtu-from-master..patch
+ice-clear-default-forwarding-vsi-during-vsi-release.patch
+ice-fix-mac-address-setting.patch
+mctp-fix-check-for-dev_hard_header-result.patch
+mctp-use-output-netdev-to-allocate-skb-headroom.patch
+net-ipv4-fix-route-with-nexthop-object-delete-warnin.patch
+net-stmmac-fix-unset-max_speed-difference-between-dt.patch
+drm-imx-imx-ldb-check-for-null-pointer-after-calling.patch
+drm-imx-fix-memory-leak-in-imx_pd_connector_get_mode.patch
+drm-imx-dw_hdmi-imx-fix-bailout-in-error-cases-of-pr.patch
+regulator-rtq2134-fix-missing-active_discharge_on-se.patch
+spi-rpc-if-fix-rpm-imbalance-in-probe-error-path.patch
+regulator-atc260x-fix-missing-active_discharge_on-se.patch
+arch-arm64-fix-topology-initialization-for-core-sche.patch
+bnxt_en-synchronize-tx-when-xdp-redirects-happen-on-.patch
+bnxt_en-reserve-space-inside-receive-page-for-skb_sh.patch
+bnxt_en-prevent-xdp-redirect-from-running-when-stopp.patch
+sfc-do-not-free-an-empty-page_ring.patch
+rdma-mlx5-don-t-remove-cache-mrs-when-a-delay-is-nee.patch
+rdma-mlx5-add-a-missing-update-of-cache-last_add.patch
+ib-cm-cancel-mad-on-the-dreq-event-when-the-state-is.patch
+cifs-fix-potential-race-with-cifsd-thread.patch
+ib-rdmavt-add-lock-to-call-to-rvt_error_qp-to-preven.patch
+sctp-count-singleton-chunks-in-assoc-user-stats.patch
+dpaa2-ptp-fix-refcount-leak-in-dpaa2_ptp_probe.patch
+ice-set-txq_teid-to-ice_inval_teid-on-ring-creation.patch
+ice-do-not-skip-not-enabled-queues-in-ice_vc_dis_qs_.patch
+ipv6-fix-stats-accounting-in-ip6_pkt_drop.patch
+ice-synchronize_rcu-when-terminating-rings.patch
+ice-xsk-fix-vsi-state-check-in-ice_xsk_wakeup.patch
+ice-clear-cmd_type_offset_bsz-for-tx-rings.patch
+net-openvswitch-don-t-send-internal-clone-attribute-.patch
+net-ethernet-mv643xx-fix-over-zealous-checking-of_ge.patch
+net-openvswitch-fix-leak-of-nested-actions.patch
+rxrpc-fix-a-race-in-rxrpc_exit_net.patch
+net-sfc-fix-using-uninitialized-xdp-tx_queue.patch
+net-phy-mscc-miim-reject-clause-45-register-accesses.patch
+qede-confirm-skb-is-allocated-before-using.patch
+spi-bcm-qspi-fix-mspi-only-access-with-bcm_qspi_exec.patch
+drm-amd-display-fix-for-dmub-outbox-notification-ena.patch
+drm-amd-display-remove-redundant-dsc-power-gating-fr.patch
+bpf-support-dual-stack-sockets-in-bpf_tcp_check_sync.patch
+drbd-fix-five-use-after-free-bugs-in-get_initial_sta.patch
+scsi-sd-sd_read_cpr-requires-vpd-pages.patch
+scsi-ufs-ufshpb-fix-a-null-check-on-list-iterator.patch
+io_uring-nospec-index-for-tags-on-files-update.patch
+io_uring-don-t-touch-scm_fp_list-after-queueing-skb.patch
+sunrpc-handle-enomem-in-call_transmit_status.patch
+sunrpc-handle-low-memory-situations-in-call_status.patch
+sunrpc-svc_tcp_sendmsg-should-handle-errors-from-xdr.patch
+iommu-omap-fix-regression-in-probe-for-null-pointer-.patch
+perf-unwind-don-t-show-unwind-error-messages-when-au.patch
+perf-arm-spe-fix-perf-report-mem-mode.patch
+perf-tools-fix-perf-s-libperf_print-callback.patch
+perf-session-remap-buf-if-there-is-no-space-for-even.patch
--- /dev/null
+From 3e1cd8610fbc5bfc2343cde620ab1e8d07798b55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 11:48:51 +0100
+Subject: sfc: Do not free an empty page_ring
+
+From: Martin Habets <habetsm.xilinx@gmail.com>
+
+[ Upstream commit 458f5d92df4807e2a7c803ed928369129996bf96 ]
+
+When the page_ring is not used page_ptr_mask is 0.
+Do not dereference page_ring[0] in this case.
+
+Fixes: 2768935a4660 ("sfc: reuse pages to avoid DMA mapping/unmapping costs")
+Reported-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: Martin Habets <habetsm.xilinx@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/sfc/rx_common.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
+index 633ca77a26fd..b925de9b4302 100644
+--- a/drivers/net/ethernet/sfc/rx_common.c
++++ b/drivers/net/ethernet/sfc/rx_common.c
+@@ -166,6 +166,9 @@ static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
+ struct efx_nic *efx = rx_queue->efx;
+ int i;
+
++ if (unlikely(!rx_queue->page_ring))
++ return;
++
+ /* Unmap and release the pages in the recycle ring. Remove the ring. */
+ for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+ struct page *page = rx_queue->page_ring[i];
+--
+2.35.1
+
--- /dev/null
+From a9a84c346ea353ea22c96879bb4e13a4955fac82 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Mar 2022 11:24:41 +0100
+Subject: skbuff: fix coalescing for page_pool fragment recycling
+
+From: Jean-Philippe Brucker <jean-philippe@linaro.org>
+
+[ Upstream commit 1effe8ca4e34c34cdd9318436a4232dcb582ebf4 ]
+
+Fix a use-after-free when using page_pool with page fragments. We
+encountered this problem during normal RX in the hns3 driver:
+
+(1) Initially we have three descriptors in the RX queue. The first one
+ allocates PAGE1 through page_pool, and the other two allocate one
+ half of PAGE2 each. Page references look like this:
+
+ RX_BD1 _______ PAGE1
+ RX_BD2 _______ PAGE2
+ RX_BD3 _________/
+
+(2) Handle RX on the first descriptor. Allocate SKB1, eventually added
+ to the receive queue by tcp_queue_rcv().
+
+(3) Handle RX on the second descriptor. Allocate SKB2 and pass it to
+ netif_receive_skb():
+
+ netif_receive_skb(SKB2)
+ ip_rcv(SKB2)
+ SKB3 = skb_clone(SKB2)
+
+ SKB2 and SKB3 share a reference to PAGE2 through
+ skb_shinfo()->dataref. The other ref to PAGE2 is still held by
+ RX_BD3:
+
+ SKB2 ---+- PAGE2
+ SKB3 __/ /
+ RX_BD3 _________/
+
+ (3b) Now while handling TCP, coalesce SKB3 with SKB1:
+
+ tcp_v4_rcv(SKB3)
+ tcp_try_coalesce(to=SKB1, from=SKB3) // succeeds
+ kfree_skb_partial(SKB3)
+ skb_release_data(SKB3) // drops one dataref
+
+ SKB1 _____ PAGE1
+ \____
+ SKB2 _____ PAGE2
+ /
+ RX_BD3 _________/
+
+ In skb_try_coalesce(), __skb_frag_ref() takes a page reference to
+ PAGE2, where it should instead have increased the page_pool frag
+ reference, pp_frag_count. Without coalescing, when releasing both
+ SKB2 and SKB3, a single reference to PAGE2 would be dropped. Now
+ when releasing SKB1 and SKB2, two references to PAGE2 will be
+ dropped, resulting in underflow.
+
+ (3c) Drop SKB2:
+
+ af_packet_rcv(SKB2)
+ consume_skb(SKB2)
+ skb_release_data(SKB2) // drops second dataref
+ page_pool_return_skb_page(PAGE2) // drops one pp_frag_count
+
+ SKB1 _____ PAGE1
+ \____
+ PAGE2
+ /
+ RX_BD3 _________/
+
+(4) Userspace calls recvmsg()
+ Copies SKB1 and releases it. Since SKB3 was coalesced with SKB1, we
+ release the SKB3 page as well:
+
+ tcp_eat_recv_skb(SKB1)
+ skb_release_data(SKB1)
+ page_pool_return_skb_page(PAGE1)
+ page_pool_return_skb_page(PAGE2) // drops second pp_frag_count
+
+(5) PAGE2 is freed, but the third RX descriptor was still using it!
+ In our case this causes IOMMU faults, but it would silently corrupt
+ memory if the IOMMU was disabled.
+
+Change the logic that checks whether pp_recycle SKBs can be coalesced.
+We still reject differing pp_recycle between 'from' and 'to' SKBs, but
+in order to avoid the situation described above, we also reject
+coalescing when both 'from' and 'to' are pp_recycled and 'from' is
+cloned.
+
+The new logic allows coalescing a cloned pp_recycle SKB into a page
+refcounted one, because in this case the release (4) will drop the right
+reference, the one taken by skb_try_coalesce().
+
+Fixes: 53e0961da1c7 ("page_pool: add frag page recycling support in page pool")
+Suggested-by: Alexander Duyck <alexanderduyck@fb.com>
+Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
+Reviewed-by: Yunsheng Lin <linyunsheng@huawei.com>
+Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
+Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
+Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skbuff.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index a8a2fb745274..180fa6a26ad4 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -5275,11 +5275,18 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+ if (skb_cloned(to))
+ return false;
+
+- /* The page pool signature of struct page will eventually figure out
+- * which pages can be recycled or not but for now let's prohibit slab
+- * allocated and page_pool allocated SKBs from being coalesced.
++ /* In general, avoid mixing slab allocated and page_pool allocated
++ * pages within the same SKB. However when @to is not pp_recycle and
++ * @from is cloned, we can transition frag pages from page_pool to
++ * reference counted.
++ *
++ * On the other hand, don't allow coalescing two pp_recycle SKBs if
++ * @from is cloned, in case the SKB is using page_pool fragment
++ * references (PP_FLAG_PAGE_FRAG). Since we only take full page
++ * references for cloned SKBs at the moment that would result in
++ * inconsistent reference counts.
+ */
+- if (to->pp_recycle != from->pp_recycle)
++ if (to->pp_recycle != (from->pp_recycle && !skb_cloned(from)))
+ return false;
+
+ if (len <= skb_tailroom(to)) {
+--
+2.35.1
+
--- /dev/null
+From 9ab17339529794569921caa5c185d8f5a9556637 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Mar 2022 10:24:42 -0400
+Subject: spi: bcm-qspi: fix MSPI only access with bcm_qspi_exec_mem_op()
+
+From: Kamal Dasu <kdasu.kdev@gmail.com>
+
+[ Upstream commit 2c7d1b281286c46049cd22b43435cecba560edde ]
+
+This fixes case where MSPI controller is used to access spi-nor
+flash and BSPI block is not present.
+
+Fixes: 5f195ee7d830 ("spi: bcm-qspi: Implement the spi_mem interface")
+Signed-off-by: Kamal Dasu <kdasu.kdev@gmail.com>
+Acked-by: Florian Fainelli <f.fainelli@gmail.com>
+Link: https://lore.kernel.org/r/20220328142442.7553-1-kdasu.kdev@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-bcm-qspi.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
+index 86c76211b3d3..cad2d55dcd3d 100644
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -1205,7 +1205,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
+ addr = op->addr.val;
+ len = op->data.nbytes;
+
+- if (bcm_qspi_bspi_ver_three(qspi) == true) {
++ if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) {
+ /*
+ * The address coming into this function is a raw flash offset.
+ * But for BSPI <= V3, we need to convert it to a remapped BSPI
+@@ -1224,7 +1224,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
+ len < 4)
+ mspi_read = true;
+
+- if (mspi_read)
++ if (!has_bspi(qspi) || mspi_read)
+ return bcm_qspi_mspi_exec_mem_op(spi, op);
+
+ ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
+--
+2.35.1
+
--- /dev/null
+From 0789a71b270056ba2338dbe4531cd638eaa54059 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Mar 2022 16:00:39 +0200
+Subject: spi: rpc-if: Fix RPM imbalance in probe error path
+
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+
+[ Upstream commit 2f8cf5f642e80f8b6b0e660a9c86924a1f41cd80 ]
+
+If rpcif_hw_init() fails, Runtime PM is left enabled.
+
+Fixes: b04cc0d912eb80d3 ("memory: renesas-rpc-if: Add support for RZ/G2L")
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Reviewed-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Link: https://lore.kernel.org/r/1c78a1f447d019bb66b6e7787f520ae78821e2ae.1648562287.git.geert+renesas@glider.be
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-rpc-if.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
+index fe82f3575df4..24ec1c83f379 100644
+--- a/drivers/spi/spi-rpc-if.c
++++ b/drivers/spi/spi-rpc-if.c
+@@ -158,14 +158,18 @@ static int rpcif_spi_probe(struct platform_device *pdev)
+
+ error = rpcif_hw_init(rpc, false);
+ if (error)
+- return error;
++ goto out_disable_rpm;
+
+ error = spi_register_controller(ctlr);
+ if (error) {
+ dev_err(&pdev->dev, "spi_register_controller failed\n");
+- rpcif_disable_rpm(rpc);
++ goto out_disable_rpm;
+ }
+
++ return 0;
++
++out_disable_rpm:
++ rpcif_disable_rpm(rpc);
+ return error;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From d381a0f5e4540b08b3036b339cdfabad098ac56e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Apr 2022 23:18:57 -0400
+Subject: SUNRPC: Handle ENOMEM in call_transmit_status()
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit d3c15033b240767d0287f1c4a529cbbe2d5ded8a ]
+
+Both call_transmit() and call_bc_transmit() can now return ENOMEM, so
+let's make sure that we handle the errors gracefully.
+
+Fixes: 0472e4766049 ("SUNRPC: Convert socket page send code to use iov_iter()")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/clnt.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index b36d235d2d6d..bf1fd6caaf92 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2197,6 +2197,7 @@ call_transmit_status(struct rpc_task *task)
+ * socket just returned a connection error,
+ * then hold onto the transport lock.
+ */
++ case -ENOMEM:
+ case -ENOBUFS:
+ rpc_delay(task, HZ>>2);
+ fallthrough;
+@@ -2280,6 +2281,7 @@ call_bc_transmit_status(struct rpc_task *task)
+ case -ENOTCONN:
+ case -EPIPE:
+ break;
++ case -ENOMEM:
+ case -ENOBUFS:
+ rpc_delay(task, HZ>>2);
+ fallthrough;
+--
+2.35.1
+
--- /dev/null
+From f71f0da1ebe15c5c316d28d8e9668f2a3cd91a7b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Apr 2022 09:50:19 -0400
+Subject: SUNRPC: Handle low memory situations in call_status()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 9d82819d5b065348ce623f196bf601028e22ed00 ]
+
+We need to handle ENFILE, ENOBUFS, and ENOMEM, because
+xprt_wake_pending_tasks() can be called with any one of these due to
+socket creation failures.
+
+Fixes: b61d59fffd3e ("SUNRPC: xs_tcp_connect_worker{4,6}: merge common code")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/clnt.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index bf1fd6caaf92..0222ad4523a9 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2364,6 +2364,11 @@ call_status(struct rpc_task *task)
+ case -EPIPE:
+ case -EAGAIN:
+ break;
++ case -ENFILE:
++ case -ENOBUFS:
++ case -ENOMEM:
++ rpc_delay(task, HZ>>2);
++ break;
+ case -EIO:
+ /* shutdown or soft timeout */
+ goto out_exit;
+--
+2.35.1
+
--- /dev/null
+From 3edd5be2f222ad5be3c685fd4bd970955ee25c1b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Apr 2022 14:10:23 -0400
+Subject: SUNRPC: svc_tcp_sendmsg() should handle errors from xdr_alloc_bvec()
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit b056fa070814897be32d83b079dbc311375588e7 ]
+
+The allocation is done with GFP_KERNEL, but it could still fail in a low
+memory situation.
+
+Fixes: 4a85a6a3320b ("SUNRPC: Handle TCP socket sends with kernel_sendpage() again")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/svcsock.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 478f857cdaed..6ea3d87e1147 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1096,7 +1096,9 @@ static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr,
+ int ret;
+
+ *sentp = 0;
+- xdr_alloc_bvec(xdr, GFP_KERNEL);
++ ret = xdr_alloc_bvec(xdr, GFP_KERNEL);
++ if (ret < 0)
++ return ret;
+
+ ret = kernel_sendmsg(sock, &msg, &rm, 1, rm.iov_len);
+ if (ret < 0)
+--
+2.35.1
+
--- /dev/null
+From 2e4dd3c852dec42fd8bcc4e19d85fc6f05950e1b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Mar 2022 12:21:07 +0800
+Subject: vdpa: mlx5: prevent cvq work from hogging CPU
+
+From: Jason Wang <jasowang@redhat.com>
+
+[ Upstream commit 55ebf0d60e3cc6c9e8593399e185842c00e12f36 ]
+
+A userspace triggerable infinite loop could happen in
+mlx5_cvq_kick_handler() if userspace keeps sending a huge amount of
+cvq requests.
+
+Fixing this by introducing a quota and re-queue the work if we're out
+of the budget (currently the implicit budget is one) . While at it,
+using a per device work struct to avoid on demand memory allocation
+for cvq.
+
+Fixes: 5262912ef3cfc ("vdpa/mlx5: Add support for control VQ and MAC setting")
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Link: https://lore.kernel.org/r/20220329042109.4029-1-jasowang@redhat.com
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Eli Cohen <elic@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/mlx5/net/mlx5_vnet.c | 21 +++++++++------------
+ 1 file changed, 9 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 9fe1071a9644..1b5de3af1a62 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -163,6 +163,7 @@ struct mlx5_vdpa_net {
+ u32 cur_num_vqs;
+ struct notifier_block nb;
+ struct vdpa_callback config_cb;
++ struct mlx5_vdpa_wq_ent cvq_ent;
+ };
+
+ static void free_resources(struct mlx5_vdpa_net *ndev);
+@@ -1616,10 +1617,10 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+ cvq = &mvdev->cvq;
+ if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+- goto out;
++ return;
+
+ if (!cvq->ready)
+- goto out;
++ return;
+
+ while (true) {
+ err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head,
+@@ -1653,9 +1654,10 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
+
+ if (vringh_need_notify_iotlb(&cvq->vring))
+ vringh_notify(&cvq->vring);
++
++ queue_work(mvdev->wq, &wqent->work);
++ break;
+ }
+-out:
+- kfree(wqent);
+ }
+
+ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
+@@ -1663,7 +1665,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq;
+- struct mlx5_vdpa_wq_ent *wqent;
+
+ if (!is_index_valid(mvdev, idx))
+ return;
+@@ -1672,13 +1673,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
+ if (!mvdev->wq || !mvdev->cvq.ready)
+ return;
+
+- wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
+- if (!wqent)
+- return;
+-
+- wqent->mvdev = mvdev;
+- INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
+- queue_work(mvdev->wq, &wqent->work);
++ queue_work(mvdev->wq, &ndev->cvq_ent.work);
+ return;
+ }
+
+@@ -2668,6 +2663,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ if (err)
+ goto err_mr;
+
++ ndev->cvq_ent.mvdev = mvdev;
++ INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
+ mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
+ if (!mvdev->wq) {
+ err = -ENOMEM;
+--
+2.35.1
+
--- /dev/null
+From c14d6ec7dff61188368c65bd177d270e271bc386 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Mar 2022 10:26:43 +0300
+Subject: vrf: fix packet sniffing for traffic originating from ip tunnels
+
+From: Eyal Birger <eyal.birger@gmail.com>
+
+[ Upstream commit 012d69fbfcc739f846766c1da56ef8b493b803b5 ]
+
+in commit 048939088220
+("vrf: add mac header for tunneled packets when sniffer is attached")
+an Ethernet header was cooked for traffic originating from tunnel devices.
+
+However, the header is added based on whether the mac_header is unset
+and ignores cases where the device doesn't expose a mac header to upper
+layers, such as in ip tunnels like ipip and gre.
+
+Traffic originating from such devices still appears garbled when capturing
+on the vrf device.
+
+Fix by observing whether the original device exposes a header to upper
+layers, similar to the logic done in af_packet.
+
+In addition, skb->mac_len needs to be adjusted after adding the Ethernet
+header for the skb_push/pull() surrounding dev_queue_xmit_nit() to work
+on these packets.
+
+Fixes: 048939088220 ("vrf: add mac header for tunneled packets when sniffer is attached")
+Signed-off-by: Eyal Birger <eyal.birger@gmail.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vrf.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index e0b1ab99a359..f37adcef4bef 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -1266,6 +1266,7 @@ static int vrf_prepare_mac_header(struct sk_buff *skb,
+ eth = (struct ethhdr *)skb->data;
+
+ skb_reset_mac_header(skb);
++ skb_reset_mac_len(skb);
+
+ /* we set the ethernet destination and the source addresses to the
+ * address of the VRF device.
+@@ -1295,9 +1296,9 @@ static int vrf_prepare_mac_header(struct sk_buff *skb,
+ */
+ static int vrf_add_mac_header_if_unset(struct sk_buff *skb,
+ struct net_device *vrf_dev,
+- u16 proto)
++ u16 proto, struct net_device *orig_dev)
+ {
+- if (skb_mac_header_was_set(skb))
++ if (skb_mac_header_was_set(skb) && dev_has_header(orig_dev))
+ return 0;
+
+ return vrf_prepare_mac_header(skb, vrf_dev, proto);
+@@ -1403,6 +1404,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+
+ /* if packet is NDISC then keep the ingress interface */
+ if (!is_ndisc) {
++ struct net_device *orig_dev = skb->dev;
++
+ vrf_rx_stats(vrf_dev, skb->len);
+ skb->dev = vrf_dev;
+ skb->skb_iif = vrf_dev->ifindex;
+@@ -1411,7 +1414,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+ int err;
+
+ err = vrf_add_mac_header_if_unset(skb, vrf_dev,
+- ETH_P_IPV6);
++ ETH_P_IPV6,
++ orig_dev);
+ if (likely(!err)) {
+ skb_push(skb, skb->mac_len);
+ dev_queue_xmit_nit(skb, vrf_dev);
+@@ -1441,6 +1445,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
+ struct sk_buff *skb)
+ {
++ struct net_device *orig_dev = skb->dev;
++
+ skb->dev = vrf_dev;
+ skb->skb_iif = vrf_dev->ifindex;
+ IPCB(skb)->flags |= IPSKB_L3SLAVE;
+@@ -1461,7 +1467,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
+ if (!list_empty(&vrf_dev->ptype_all)) {
+ int err;
+
+- err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP);
++ err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP,
++ orig_dev);
+ if (likely(!err)) {
+ skb_push(skb, skb->mac_len);
+ dev_queue_xmit_nit(skb, vrf_dev);
+--
+2.35.1
+