--- /dev/null
+From 6bc8d7d074319cb583621132152ba970acd1bb93 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 5 Jun 2022 16:23:25 -0700
+Subject: af_unix: Fix a data-race in unix_dgram_peer_wake_me().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 662a80946ce13633ae90a55379f1346c10f0c432 ]
+
+unix_dgram_poll() calls unix_dgram_peer_wake_me() without `other`'s
+lock held and check if its receive queue is full. Here we need to
+use unix_recvq_full_lockless() instead of unix_recvq_full(), otherwise
+KCSAN will report a data-race.
+
+Fixes: 7d267278a9ec ("unix: avoid use-after-free in ep_remove_wait_queue")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://lore.kernel.org/r/20220605232325.11804-1-kuniyu@amazon.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index e71a312faa1e..4aed12e94221 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -490,7 +490,7 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
+ * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
+ * to other and its full, we will hang waiting for POLLOUT.
+ */
+- if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD))
++ if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
+ return 1;
+
+ if (connected)
+--
+2.35.1
+
--- /dev/null
+From 80e467e00c4c7cba5e18c8a56714e77266112461 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Jun 2022 14:01:07 +0000
+Subject: amt: fix possible null-ptr-deref in amt_rcv()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit d16207f92a4a823c48b4ea953ad51f4483456768 ]
+
+When amt interface receives amt message, it tries to obtain amt private
+data from sock.
+If there is no amt private data, it frees an skb immediately.
+After kfree_skb(), it increases the rx_dropped stats.
+But in order to use rx_dropped, amt private data is needed.
+So, it makes amt_rcv() to do not increase rx_dropped stats when it can
+not obtain amt private data.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Fixes: 1a1a0e80e005 ("amt: fix possible memory leak in amt_rcv()")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/amt.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/amt.c b/drivers/net/amt.c
+index fbb03562cb95..2815db7ee2a3 100644
+--- a/drivers/net/amt.c
++++ b/drivers/net/amt.c
+@@ -2698,7 +2698,8 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
+ amt = rcu_dereference_sk_user_data(sk);
+ if (!amt) {
+ err = true;
+- goto drop;
++ kfree_skb(skb);
++ goto out;
+ }
+
+ skb->dev = amt->dev;
+--
+2.35.1
+
--- /dev/null
+From a77440d9e44aa0584218760a3f0773ea6465da32 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Jun 2022 14:01:08 +0000
+Subject: amt: fix wrong type string definition
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit d7970039d87c926bb648982e920cb9851c19f3e1 ]
+
+amt message type definition starts from 1, not 0.
+But type_str[] starts from 0.
+So, it prints wrong type information.
+
+Fixes: cbc21dc1cfe9 ("amt: add data plane of amt interface")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/amt.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/amt.c b/drivers/net/amt.c
+index 2815db7ee2a3..14fe03dbd9b1 100644
+--- a/drivers/net/amt.c
++++ b/drivers/net/amt.c
+@@ -51,6 +51,7 @@ static char *status_str[] = {
+ };
+
+ static char *type_str[] = {
++ "", /* Type 0 is not defined */
+ "AMT_MSG_DISCOVERY",
+ "AMT_MSG_ADVERTISEMENT",
+ "AMT_MSG_REQUEST",
+--
+2.35.1
+
--- /dev/null
+From a7781fd3943ee8dd19359aec9244ce7d4383e39f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Jun 2022 14:01:06 +0000
+Subject: amt: fix wrong usage of pskb_may_pull()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit f55a07074fdd38cab8c097ac5bd397d68eff733c ]
+
+It adds missing pskb_may_pull() in amt_update_handler() and
+amt_multicast_data_handler().
+And it fixes wrong parameter of pskb_may_pull() in
+amt_advertisement_handler() and amt_membership_query_handler().
+
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Fixes: cbc21dc1cfe9 ("amt: add data plane of amt interface")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/amt.c | 55 +++++++++++++++++++++++++++++++----------------
+ 1 file changed, 37 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/net/amt.c b/drivers/net/amt.c
+index 22d7da749a24..fbb03562cb95 100644
+--- a/drivers/net/amt.c
++++ b/drivers/net/amt.c
+@@ -2220,8 +2220,7 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
+ struct amt_header_advertisement *amta;
+ int hdr_size;
+
+- hdr_size = sizeof(*amta) - sizeof(struct amt_header);
+-
++ hdr_size = sizeof(*amta) + sizeof(struct udphdr);
+ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
+@@ -2251,19 +2250,27 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
+ struct ethhdr *eth;
+ struct iphdr *iph;
+
++ hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
++ if (!pskb_may_pull(skb, hdr_size))
++ return true;
++
+ amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
+ if (amtmd->reserved || amtmd->version)
+ return true;
+
+- hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
+ if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
+ return true;
++
+ skb_reset_network_header(skb);
+ skb_push(skb, sizeof(*eth));
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(*eth));
+ eth = eth_hdr(skb);
++
++ if (!pskb_may_pull(skb, sizeof(*iph)))
++ return true;
+ iph = ip_hdr(skb);
++
+ if (iph->version == 4) {
+ if (!ipv4_is_multicast(iph->daddr))
+ return true;
+@@ -2274,6 +2281,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
+ } else if (iph->version == 6) {
+ struct ipv6hdr *ip6h;
+
++ if (!pskb_may_pull(skb, sizeof(*ip6h)))
++ return true;
++
+ ip6h = ipv6_hdr(skb);
+ if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ return true;
+@@ -2306,8 +2316,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
+ struct iphdr *iph;
+ int hdr_size, len;
+
+- hdr_size = sizeof(*amtmq) - sizeof(struct amt_header);
+-
++ hdr_size = sizeof(*amtmq) + sizeof(struct udphdr);
+ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
+@@ -2315,22 +2324,27 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
+ if (amtmq->reserved || amtmq->version)
+ return true;
+
+- hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth);
++ hdr_size -= sizeof(*eth);
+ if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
+ return true;
++
+ oeth = eth_hdr(skb);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(*eth));
+ skb_reset_network_header(skb);
+ eth = eth_hdr(skb);
++ if (!pskb_may_pull(skb, sizeof(*iph)))
++ return true;
++
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+- if (!ipv4_is_multicast(iph->daddr))
+- return true;
+ if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
+ sizeof(*ihv3)))
+ return true;
+
++ if (!ipv4_is_multicast(iph->daddr))
++ return true;
++
+ ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
+ skb_reset_transport_header(skb);
+ skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
+@@ -2345,15 +2359,17 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
+ ip_eth_mc_map(iph->daddr, eth->h_dest);
+ #if IS_ENABLED(CONFIG_IPV6)
+ } else if (iph->version == 6) {
+- struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct mld2_query *mld2q;
++ struct ipv6hdr *ip6h;
+
+- if (!ipv6_addr_is_multicast(&ip6h->daddr))
+- return true;
+ if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
+ sizeof(*mld2q)))
+ return true;
+
++ ip6h = ipv6_hdr(skb);
++ if (!ipv6_addr_is_multicast(&ip6h->daddr))
++ return true;
++
+ mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
+ skb_reset_transport_header(skb);
+ skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
+@@ -2389,23 +2405,23 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
+ {
+ struct amt_header_membership_update *amtmu;
+ struct amt_tunnel_list *tunnel;
+- struct udphdr *udph;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+- int len;
++ int len, hdr_size;
+
+ iph = ip_hdr(skb);
+- udph = udp_hdr(skb);
+
+- if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol,
+- false, false))
++ hdr_size = sizeof(*amtmu) + sizeof(struct udphdr);
++ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
+- amtmu = (struct amt_header_membership_update *)skb->data;
++ amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1);
+ if (amtmu->reserved || amtmu->version)
+ return true;
+
+- skb_pull(skb, sizeof(*amtmu));
++ if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false))
++ return true;
++
+ skb_reset_network_header(skb);
+
+ list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
+@@ -2426,6 +2442,9 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
+ return true;
+
+ report:
++ if (!pskb_may_pull(skb, sizeof(*iph)))
++ return true;
++
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ if (ip_mc_check_igmp(skb)) {
+--
+2.35.1
+
--- /dev/null
+From 3465817e3ef5f225ef78ebb70df3f19ab672caf9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Jun 2022 12:59:26 +0400
+Subject: ata: pata_octeon_cf: Fix refcount leak in octeon_cf_probe
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit 10d6bdf532902be1d8aa5900b3c03c5671612aa2 ]
+
+of_find_device_by_node() takes reference, we should use put_device()
+to release it when not need anymore.
+Add missing put_device() to avoid refcount leak.
+
+Fixes: 43f01da0f279 ("MIPS/OCTEON/ata: Convert pata_octeon_cf.c to use device tree.")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/pata_octeon_cf.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
+index 6b5ed3046b44..35608a0cf552 100644
+--- a/drivers/ata/pata_octeon_cf.c
++++ b/drivers/ata/pata_octeon_cf.c
+@@ -856,12 +856,14 @@ static int octeon_cf_probe(struct platform_device *pdev)
+ int i;
+ res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
+ if (!res_dma) {
++ put_device(&dma_dev->dev);
+ of_node_put(dma_node);
+ return -EINVAL;
+ }
+ cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
+ resource_size(res_dma));
+ if (!cf_port->dma_base) {
++ put_device(&dma_dev->dev);
+ of_node_put(dma_node);
+ return -EINVAL;
+ }
+@@ -871,6 +873,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
+ irq = i;
+ irq_handler = octeon_cf_interrupt;
+ }
++ put_device(&dma_dev->dev);
+ }
+ of_node_put(dma_node);
+ }
+--
+2.35.1
+
--- /dev/null
+From 63fd7aa292e0933e756afa00435ae6a541471558 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 May 2022 14:51:13 -0700
+Subject: bpf, arm64: Clear prog->jited_len along prog->jited
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 10f3b29c65bb2fe0d47c2945cd0b4087be1c5218 ]
+
+syzbot reported an illegal copy_to_user() attempt
+from bpf_prog_get_info_by_fd() [1]
+
+There was no repro yet on this bug, but I think
+that commit 0aef499f3172 ("mm/usercopy: Detect vmalloc overruns")
+is exposing a prior bug in bpf arm64.
+
+bpf_prog_get_info_by_fd() looks at prog->jited_len
+to determine if the JIT image can be copied out to user space.
+
+My theory is that syzbot managed to get a prog where prog->jited_len
+has been set to 43, while prog->bpf_func has ben cleared.
+
+It is not clear why copy_to_user(uinsns, NULL, ulen) is triggering
+this particular warning.
+
+I thought find_vma_area(NULL) would not find a vm_struct.
+As we do not hold vmap_area_lock spinlock, it might be possible
+that the found vm_struct was garbage.
+
+[1]
+usercopy: Kernel memory exposure attempt detected from vmalloc (offset 792633534417210172, size 43)!
+kernel BUG at mm/usercopy.c:101!
+Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
+Modules linked in:
+CPU: 0 PID: 25002 Comm: syz-executor.1 Not tainted 5.18.0-syzkaller-10139-g8291eaafed36 #0
+Hardware name: linux,dummy-virt (DT)
+pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : usercopy_abort+0x90/0x94 mm/usercopy.c:101
+lr : usercopy_abort+0x90/0x94 mm/usercopy.c:89
+sp : ffff80000b773a20
+x29: ffff80000b773a30 x28: faff80000b745000 x27: ffff80000b773b48
+x26: 0000000000000000 x25: 000000000000002b x24: 0000000000000000
+x23: 00000000000000e0 x22: ffff80000b75db67 x21: 0000000000000001
+x20: 000000000000002b x19: ffff80000b75db3c x18: 00000000fffffffd
+x17: 2820636f6c6c616d x16: 76206d6f72662064 x15: 6574636574656420
+x14: 74706d6574746120 x13: 2129333420657a69 x12: 73202c3237313031
+x11: 3237313434333533 x10: 3336323937207465 x9 : 657275736f707865
+x8 : ffff80000a30c550 x7 : ffff80000b773830 x6 : ffff80000b773830
+x5 : 0000000000000000 x4 : ffff00007fbbaa10 x3 : 0000000000000000
+x2 : 0000000000000000 x1 : f7ff000028fc0000 x0 : 0000000000000064
+Call trace:
+ usercopy_abort+0x90/0x94 mm/usercopy.c:89
+ check_heap_object mm/usercopy.c:186 [inline]
+ __check_object_size mm/usercopy.c:252 [inline]
+ __check_object_size+0x198/0x36c mm/usercopy.c:214
+ check_object_size include/linux/thread_info.h:199 [inline]
+ check_copy_size include/linux/thread_info.h:235 [inline]
+ copy_to_user include/linux/uaccess.h:159 [inline]
+ bpf_prog_get_info_by_fd.isra.0+0xf14/0xfdc kernel/bpf/syscall.c:3993
+ bpf_obj_get_info_by_fd+0x12c/0x510 kernel/bpf/syscall.c:4253
+ __sys_bpf+0x900/0x2150 kernel/bpf/syscall.c:4956
+ __do_sys_bpf kernel/bpf/syscall.c:5021 [inline]
+ __se_sys_bpf kernel/bpf/syscall.c:5019 [inline]
+ __arm64_sys_bpf+0x28/0x40 kernel/bpf/syscall.c:5019
+ __invoke_syscall arch/arm64/kernel/syscall.c:38 [inline]
+ invoke_syscall+0x48/0x114 arch/arm64/kernel/syscall.c:52
+ el0_svc_common.constprop.0+0x44/0xec arch/arm64/kernel/syscall.c:142
+ do_el0_svc+0xa0/0xc0 arch/arm64/kernel/syscall.c:206
+ el0_svc+0x44/0xb0 arch/arm64/kernel/entry-common.c:624
+ el0t_64_sync_handler+0x1ac/0x1b0 arch/arm64/kernel/entry-common.c:642
+ el0t_64_sync+0x198/0x19c arch/arm64/kernel/entry.S:581
+Code: aa0003e3 d00038c0 91248000 97fff65f (d4210000)
+
+Fixes: db496944fdaa ("bpf: arm64: add JIT support for multi-function programs")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20220531215113.1100754-1-eric.dumazet@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/net/bpf_jit_comp.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index fcc675aa1670..c779e604edac 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -1261,6 +1261,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ bpf_jit_binary_free(header);
+ prog->bpf_func = NULL;
+ prog->jited = 0;
++ prog->jited_len = 0;
+ goto out_off;
+ }
+ bpf_jit_binary_lock_ro(header);
+--
+2.35.1
+
--- /dev/null
+From 5407b92e1a44307e422a8c1bdc87a2a4757f4c02 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Jun 2022 12:21:06 +0200
+Subject: drm/amdgpu: fix limiting AV1 to the first instance on VCN3
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christian König <christian.koenig@amd.com>
+
+[ Upstream commit 1d2afeb7983081ecf656c2338c7db6fd405c653c ]
+
+The job is not yet initialized here.
+
+Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/2037
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Tested-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Fixes: cdc7893fc93f ("drm/amdgpu: use job and ib structures directly in CS parsers")
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 17 +++++++----------
+ 1 file changed, 7 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+index cb5f0a12333f..57a34e775da3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -1821,23 +1821,21 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ };
+
+-static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
+- struct amdgpu_job *job)
++static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
+ {
+ struct drm_gpu_scheduler **scheds;
+
+ /* The create msg must be in the first IB submitted */
+- if (atomic_read(&job->base.entity->fence_seq))
++ if (atomic_read(&p->entity->fence_seq))
+ return -EINVAL;
+
+ scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
+ [AMDGPU_RING_PRIO_DEFAULT].sched;
+- drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
++ drm_sched_entity_modify_sched(p->entity, scheds, 1);
+ return 0;
+ }
+
+-static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
+- uint64_t addr)
++static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
+ {
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_va_mapping *map;
+@@ -1908,7 +1906,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
+ if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
+ continue;
+
+- r = vcn_v3_0_limit_sched(p, job);
++ r = vcn_v3_0_limit_sched(p);
+ if (r)
+ goto out;
+ }
+@@ -1922,7 +1920,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+ {
+- struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
++ struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
+ uint32_t msg_lo = 0, msg_hi = 0;
+ unsigned i;
+ int r;
+@@ -1941,8 +1939,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+ msg_hi = val;
+ } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
+ val == 0) {
+- r = vcn_v3_0_dec_msg(p, job,
+- ((u64)msg_hi) << 32 | msg_lo);
++ r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
+ if (r)
+ return r;
+ }
+--
+2.35.1
+
--- /dev/null
+From b34aa14d3dc2f77fe9017304cf143c0a69249458 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 May 2022 01:38:44 +0200
+Subject: drm/bridge: ti-sn65dsi83: Handle dsi_lanes == 0 as invalid
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit edbc7960bef7fd71ef1e44d0df15b864784b14c8 ]
+
+Handle empty data-lanes = < >; property, which translates to
+dsi_lanes = 0 as invalid.
+
+Fixes: ceb515ba29ba6 ("drm/bridge: ti-sn65dsi83: Add TI SN65DSI83 and SN65DSI84 driver")
+Signed-off-by: Marek Vasut <marex@denx.de>
+Cc: Jonas Karlman <jonas@kwiboo.se>
+Cc: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
+Cc: Lucas Stach <l.stach@pengutronix.de>
+Cc: Marek Vasut <marex@denx.de>
+Cc: Maxime Ripard <maxime@cerno.tech>
+Cc: Neil Armstrong <narmstrong@baylibre.com>
+Cc: Robert Foss <robert.foss@linaro.org>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com>
+Reviewed-by: Lucas Stach <l.stach@pengutronix.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220518233844.248504-1-marex@denx.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/bridge/ti-sn65dsi83.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+index 19daaddd29a4..3d58110465fe 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+@@ -573,7 +573,7 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
+ ctx->host_node = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+
+- if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4) {
++ if (ctx->dsi_lanes <= 0 || ctx->dsi_lanes > 4) {
+ ret = -EINVAL;
+ goto err_put_node;
+ }
+--
+2.35.1
+
--- /dev/null
+From f1b498951a5f73a8326e4df140cb964e2c8160e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jun 2022 16:59:29 -0700
+Subject: drm: imx: fix compiler warning with gcc-12
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+[ Upstream commit 7aefd8b53815274f3ef398d370a3c9b27dd9f00c ]
+
+Gcc-12 correctly warned about this code using a non-NULL pointer as a
+truth value:
+
+ drivers/gpu/drm/imx/ipuv3-crtc.c: In function ‘ipu_crtc_disable_planes’:
+ drivers/gpu/drm/imx/ipuv3-crtc.c:72:21: error: the comparison will always evaluate as ‘true’ for the address of ‘plane’ will never be NULL [-Werror=address]
+ 72 | if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
+ | ^
+
+due to the extraneous '&' address-of operator.
+
+Philipp Zabel points out that The mistake had no adverse effect since
+the following condition doesn't actually dereference the NULL pointer,
+but the intent of the code was obviously to check for it, not to take
+the address of the member.
+
+Fixes: eb8c88808c83 ("drm/imx: add deferred plane disabling")
+Acked-by: Philipp Zabel <p.zabel@pengutronix.de>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/imx/ipuv3-crtc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
+index 9c8829f945b2..f7863d6dea80 100644
+--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
++++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
+@@ -69,7 +69,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
+ drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
+ if (plane == &ipu_crtc->plane[0]->base)
+ disable_full = true;
+- if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
++ if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
+ disable_partial = true;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 76417a6a002b74b95d19647a8eacce19d0565e1b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 May 2022 16:20:03 +0100
+Subject: drm/panfrost: Job should reference MMU not file_priv
+
+From: Steven Price <steven.price@arm.com>
+
+[ Upstream commit 6e516faf04317db2c46cbec4e3b78b4653a5b109 ]
+
+For a while now it's been allowed for a MMU context to outlive it's
+corresponding panfrost_priv, however the job structure still references
+panfrost_priv to get hold of the MMU context. If panfrost_priv has been
+freed this is a use-after-free which I've been able to trigger resulting
+in a splat.
+
+To fix this, drop the reference to panfrost_priv in the job structure
+and add a direct reference to the MMU structure which is what's actually
+needed.
+
+Fixes: 7fdc48cc63a3 ("drm/panfrost: Make sure MMU context lifetime is not bound to panfrost_priv")
+Signed-off-by: Steven Price <steven.price@arm.com>
+Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220519152003.81081-1-steven.price@arm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panfrost/panfrost_drv.c | 5 +++--
+ drivers/gpu/drm/panfrost/panfrost_job.c | 6 +++---
+ drivers/gpu/drm/panfrost/panfrost_job.h | 2 +-
+ 3 files changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
+index 94b6f0a19c83..47780fe597f2 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
+@@ -233,6 +233,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
+ struct drm_file *file)
+ {
+ struct panfrost_device *pfdev = dev->dev_private;
++ struct panfrost_file_priv *file_priv = file->driver_priv;
+ struct drm_panfrost_submit *args = data;
+ struct drm_syncobj *sync_out = NULL;
+ struct panfrost_job *job;
+@@ -262,12 +263,12 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
+ job->jc = args->jc;
+ job->requirements = args->requirements;
+ job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
+- job->file_priv = file->driver_priv;
++ job->mmu = file_priv->mmu;
+
+ slot = panfrost_job_get_slot(job);
+
+ ret = drm_sched_job_init(&job->base,
+- &job->file_priv->sched_entity[slot],
++ &file_priv->sched_entity[slot],
+ NULL);
+ if (ret)
+ goto out_put_job;
+diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
+index a6925dbb6224..22c2af1a4627 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_job.c
++++ b/drivers/gpu/drm/panfrost/panfrost_job.c
+@@ -201,7 +201,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
+ return;
+ }
+
+- cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
++ cfg = panfrost_mmu_as_get(pfdev, job->mmu);
+
+ job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
+ job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
+@@ -431,7 +431,7 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev,
+ job->jc = 0;
+ }
+
+- panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
++ panfrost_mmu_as_put(pfdev, job->mmu);
+ panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
+
+ if (signal_fence)
+@@ -452,7 +452,7 @@ static void panfrost_job_handle_done(struct panfrost_device *pfdev,
+ * happen when we receive the DONE interrupt while doing a GPU reset).
+ */
+ job->jc = 0;
+- panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
++ panfrost_mmu_as_put(pfdev, job->mmu);
+ panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
+
+ dma_fence_signal_locked(job->done_fence);
+diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
+index 77e6d0e6f612..8becc1ba0eb9 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_job.h
++++ b/drivers/gpu/drm/panfrost/panfrost_job.h
+@@ -17,7 +17,7 @@ struct panfrost_job {
+ struct kref refcount;
+
+ struct panfrost_device *pfdev;
+- struct panfrost_file_priv *file_priv;
++ struct panfrost_mmu *mmu;
+
+ /* Fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+--
+2.35.1
+
--- /dev/null
+From c54aa03c95713d20045f0536ea8952a861eef8eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Jun 2022 09:07:01 +0100
+Subject: iov_iter: Fix iter_xarray_get_pages{,_alloc}()
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 6c77676645ad42993e0a8bdb8dafa517851a352a ]
+
+The maths at the end of iter_xarray_get_pages() to calculate the actual
+size doesn't work under some circumstances, such as when it's been asked to
+extract a partial single page. Various terms of the equation cancel out
+and you end up with actual == offset. The same issue exists in
+iter_xarray_get_pages_alloc().
+
+Fix these to just use min() to select the lesser amount from between the
+amount of page content transcribed into the buffer, minus the offset, and
+the size limit specified.
+
+This doesn't appear to have caused a problem yet upstream because network
+filesystems aren't getting the pages from an xarray iterator, but rather
+passing it directly to the socket, which just iterates over it. Cachefiles
+*does* do DIO from one to/from ext4/xfs/btrfs/etc. but it always asks for
+whole pages to be written or read.
+
+Fixes: 7ff5062079ef ("iov_iter: Add ITER_XARRAY")
+Reported-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Alexander Viro <viro@zeniv.linux.org.uk>
+cc: Dominique Martinet <asmadeus@codewreck.org>
+cc: Mike Marshall <hubcap@omnibond.com>
+cc: Gao Xiang <xiang@kernel.org>
+cc: linux-afs@lists.infradead.org
+cc: v9fs-developer@lists.sourceforge.net
+cc: devel@lists.orangefs.org
+cc: linux-erofs@lists.ozlabs.org
+cc: linux-cachefs@redhat.com
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/iov_iter.c | 20 ++++----------------
+ 1 file changed, 4 insertions(+), 16 deletions(-)
+
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index 6dd5330f7a99..dda6d5f481c1 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -1434,7 +1434,7 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i,
+ {
+ unsigned nr, offset;
+ pgoff_t index, count;
+- size_t size = maxsize, actual;
++ size_t size = maxsize;
+ loff_t pos;
+
+ if (!size || !maxpages)
+@@ -1461,13 +1461,7 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i,
+ if (nr == 0)
+ return 0;
+
+- actual = PAGE_SIZE * nr;
+- actual -= offset;
+- if (nr == count && size > 0) {
+- unsigned last_offset = (nr > 1) ? 0 : offset;
+- actual -= PAGE_SIZE - (last_offset + size);
+- }
+- return actual;
++ return min(nr * PAGE_SIZE - offset, maxsize);
+ }
+
+ /* must be done on non-empty ITER_IOVEC one */
+@@ -1602,7 +1596,7 @@ static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
+ struct page **p;
+ unsigned nr, offset;
+ pgoff_t index, count;
+- size_t size = maxsize, actual;
++ size_t size = maxsize;
+ loff_t pos;
+
+ if (!size)
+@@ -1631,13 +1625,7 @@ static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
+ if (nr == 0)
+ return 0;
+
+- actual = PAGE_SIZE * nr;
+- actual -= offset;
+- if (nr == count && size > 0) {
+- unsigned last_offset = (nr > 1) ? 0 : offset;
+- actual -= PAGE_SIZE - (last_offset + size);
+- }
+- return actual;
++ return min(nr * PAGE_SIZE - offset, maxsize);
+ }
+
+ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
+--
+2.35.1
+
--- /dev/null
+From f1f190e9362968590fadd3fe2c0d44ab06e1c5b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jun 2022 09:21:07 -0400
+Subject: ip_gre: test csum_start instead of transport header
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit 8d21e9963bec1aad2280cdd034c8993033ef2948 ]
+
+GRE with TUNNEL_CSUM will apply local checksum offload on
+CHECKSUM_PARTIAL packets.
+
+ipgre_xmit must validate csum_start after an optional skb_pull,
+else lco_csum may trigger an overflow. The original check was
+
+ if (csum && skb_checksum_start(skb) < skb->data)
+ return -EINVAL;
+
+This had false positives when skb_checksum_start is undefined:
+when ip_summed is not CHECKSUM_PARTIAL. A discussed refinement
+was straightforward
+
+ if (csum && skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start(skb) < skb->data)
+ return -EINVAL;
+
+But was eventually revised more thoroughly:
+- restrict the check to the only branch where needed, in an
+ uncommon GRE path that uses header_ops and calls skb_pull.
+- test skb_transport_header, which is set along with csum_start
+ in skb_partial_csum_set in the normal header_ops datapath.
+
+Turns out skbs can arrive in this branch without the transport
+header set, e.g., through BPF redirection.
+
+Revise the check back to check csum_start directly, and only if
+CHECKSUM_PARTIAL. Do leave the check in the updated location.
+Check field regardless of whether TUNNEL_CSUM is configured.
+
+Link: https://lore.kernel.org/netdev/YS+h%2FtqCJJiQei+W@shredder/
+Link: https://lore.kernel.org/all/20210902193447.94039-2-willemdebruijn.kernel@gmail.com/T/#u
+Fixes: 8a0ed250f911 ("ip_gre: validate csum_start only on pull")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
+Link: https://lore.kernel.org/r/20220606132107.3582565-1-willemdebruijn.kernel@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/ip_gre.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index aacee9dd771b..bc8dfdf1c48a 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -629,21 +629,20 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
+ }
+
+ if (dev->header_ops) {
+- const int pull_len = tunnel->hlen + sizeof(struct iphdr);
+-
+ if (skb_cow_head(skb, 0))
+ goto free_skb;
+
+ tnl_params = (const struct iphdr *)skb->data;
+
+- if (pull_len > skb_transport_offset(skb))
+- goto free_skb;
+-
+ /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
+ * to gre header.
+ */
+- skb_pull(skb, pull_len);
++ skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
+ skb_reset_mac_header(skb);
++
++ if (skb->ip_summed == CHECKSUM_PARTIAL &&
++ skb_checksum_start(skb) < skb->data)
++ goto free_skb;
+ } else {
+ if (skb_cow_head(skb, dev->needed_headroom))
+ goto free_skb;
+--
+2.35.1
+
--- /dev/null
+From 87e777042d1cbe8d70c9b3aa692b9732fa9d4cdf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jun 2022 08:11:43 +0400
+Subject: net: altera: Fix refcount leak in altera_tse_mdio_create
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit 11ec18b1d8d92b9df307d31950dcba0b3dd7283c ]
+
+Every iteration of for_each_child_of_node() decrements
+the reference count of the previous node.
+When break from a for_each_child_of_node() loop,
+we need to explicitly call of_node_put() on the child node when
+not need anymore.
+Add missing of_node_put() to avoid refcount leak.
+
+Fixes: bbd2190ce96d ("Altera TSE: Add main and header file for Altera Ethernet Driver")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Link: https://lore.kernel.org/r/20220607041144.7553-1-linmq006@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/altera/altera_tse_main.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
+index a3816264c35c..8c5828582c21 100644
+--- a/drivers/net/ethernet/altera/altera_tse_main.c
++++ b/drivers/net/ethernet/altera/altera_tse_main.c
+@@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
+ mdio = mdiobus_alloc();
+ if (mdio == NULL) {
+ netdev_err(dev, "Error allocating MDIO bus\n");
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto put_node;
+ }
+
+ mdio->name = ALTERA_TSE_RESOURCE_NAME;
+@@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
+ mdio->id);
+ goto out_free_mdio;
+ }
++ of_node_put(mdio_node);
+
+ if (netif_msg_drv(priv))
+ netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
+@@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
+ out_free_mdio:
+ mdiobus_free(mdio);
+ mdio = NULL;
++put_node:
++ of_node_put(mdio_node);
+ return ret;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From ae0a0f377f2103c24cd37ba9bbf278802c7c9ab2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 5 Jun 2022 11:23:34 +0400
+Subject: net: dsa: lantiq_gswip: Fix refcount leak in gswip_gphy_fw_list
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit 0737e018a05e2aa352828c52bdeed3b02cff2930 ]
+
+Every iteration of for_each_available_child_of_node() decrements
+the reference count of the previous node.
+when breaking early from a for_each_available_child_of_node() loop,
+we need to explicitly call of_node_put() on the gphy_fw_np.
+Add missing of_node_put() to avoid refcount leak.
+
+Fixes: 14fceff4771e ("net: dsa: Add Lantiq / Intel DSA driver for vrx200")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Link: https://lore.kernel.org/r/20220605072335.11257-1-linmq006@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/lantiq_gswip.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
+index 12c15da55664..9284373222fa 100644
+--- a/drivers/net/dsa/lantiq_gswip.c
++++ b/drivers/net/dsa/lantiq_gswip.c
+@@ -2069,8 +2069,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
+ for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
+ err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
+ gphy_fw_np, i);
+- if (err)
++ if (err) {
++ of_node_put(gphy_fw_np);
+ goto remove_gphy;
++ }
+ i++;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 499225461fee62a1aedcfe854644022cbb3c4211 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jun 2022 12:28:42 +0100
+Subject: net: dsa: mv88e6xxx: use BMSR_ANEGCOMPLETE bit for filling
+ an_complete
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marek Behún <kabel@kernel.org>
+
+[ Upstream commit 47e96930d6e6106d5252e85b868d3c7e29296de0 ]
+
+Commit ede359d8843a ("net: dsa: mv88e6xxx: Link in pcs_get_state() if AN
+is bypassed") added the ability to link if AN was bypassed, and added
+filling of state->an_complete field, but set it to true if AN was
+enabled in BMCR, not when AN was reported complete in BMSR.
+
+This was done because for some reason, when I wanted to use BMSR value
+to infer an_complete, I was looking at BMSR_ANEGCAPABLE bit (which was
+always 1), instead of BMSR_ANEGCOMPLETE bit.
+
+Use BMSR_ANEGCOMPLETE for filling state->an_complete.
+
+Fixes: ede359d8843a ("net: dsa: mv88e6xxx: Link in pcs_get_state() if AN is bypassed")
+Signed-off-by: Marek Behún <kabel@kernel.org>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/mv88e6xxx/serdes.c | 27 +++++++++++----------------
+ 1 file changed, 11 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
+index 7b37d45bc9fb..1a19c5284f2c 100644
+--- a/drivers/net/dsa/mv88e6xxx/serdes.c
++++ b/drivers/net/dsa/mv88e6xxx/serdes.c
+@@ -50,22 +50,17 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
+ }
+
+ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
+- u16 ctrl, u16 status, u16 lpa,
++ u16 bmsr, u16 lpa, u16 status,
+ struct phylink_link_state *state)
+ {
+ state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
++ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
+
+ if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
+ /* The Spped and Duplex Resolved register is 1 if AN is enabled
+ * and complete, or if AN is disabled. So with disabled AN we
+- * still get here on link up. But we want to set an_complete
+- * only if AN was enabled, thus we look at BMCR_ANENABLE.
+- * (According to 802.3-2008 section 22.2.4.2.10, we should be
+- * able to get this same value from BMSR_ANEGCAPABLE, but tests
+- * show that these Marvell PHYs don't conform to this part of
+- * the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
++ * still get here on link up.
+ */
+- state->an_complete = !!(ctrl & BMCR_ANENABLE);
+ state->duplex = status &
+ MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+@@ -191,12 +186,12 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ int lane, struct phylink_link_state *state)
+ {
+- u16 lpa, status, ctrl;
++ u16 bmsr, lpa, status;
+ int err;
+
+- err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
++ err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
+ if (err) {
+- dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
++ dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err);
+ return err;
+ }
+
+@@ -212,7 +207,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ return err;
+ }
+
+- return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
++ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
+ }
+
+ int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
+@@ -918,13 +913,13 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
+ int port, int lane, struct phylink_link_state *state)
+ {
+- u16 lpa, status, ctrl;
++ u16 bmsr, lpa, status;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+- MV88E6390_SGMII_BMCR, &ctrl);
++ MV88E6390_SGMII_BMSR, &bmsr);
+ if (err) {
+- dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
++ dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
+ return err;
+ }
+
+@@ -942,7 +937,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
+ return err;
+ }
+
+- return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
++ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
+ }
+
+ static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
+--
+2.35.1
+
--- /dev/null
+From f0a84caaf46ef7b04ace2b8621c5eb9289dc1c6c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jun 2022 20:46:24 +0200
+Subject: net: dsa: realtek: rtl8365mb: fix GMII caps for ports with internal
+ PHY
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alvin Å ipraga <alsi@bang-olufsen.dk>
+
+[ Upstream commit 487994ff75880569d32504d7e70da8b3328e0693 ]
+
+Since commit a18e6521a7d9 ("net: phylink: handle NA interface mode in
+phylink_fwnode_phy_connect()"), phylib defaults to GMII when no phy-mode
+or phy-connection-type property is specified in a DSA port node of the
+device tree. The same commit caused a regression in rtl8365mb whereby
+phylink would fail to connect, because the driver did not advertise
+support for GMII for ports with internal PHY.
+
+It should be noted that the aforementioned regression is not because the
+blamed commit was incorrect: on the contrary, the blamed commit is
+correcting the previous behaviour whereby unspecified phy-mode would
+cause the internal interface mode to be PHY_INTERFACE_MODE_NA. The
+rtl8365mb driver only worked by accident before because it _did_
+advertise support for PHY_INTERFACE_MODE_NA, despite NA being reserved
+for internal use by phylink. With one mistake fixed, the other was
+exposed.
+
+Commit a5dba0f207e5 ("net: dsa: rtl8365mb: add GMII as user port mode")
+then introduced implicit support for GMII mode on ports with internal
+PHY to allow a PHY connection for device trees where the phy-mode is not
+explicitly set to "internal". At this point everything was working OK
+again.
+
+Subsequently, commit 6ff6064605e9 ("net: dsa: realtek: convert to
+phylink_generic_validate()") broke this behaviour again by discarding
+the usage of rtl8365mb_phy_mode_supported() - where this GMII support
+was indicated - while switching to the new .phylink_get_caps API.
+
+With the new API, rtl8365mb_phy_mode_supported() is no longer needed.
+Remove it altogether and add back the GMII capability - this time to
+rtl8365mb_phylink_get_caps() - so that the above default behaviour works
+for ports with internal PHY again.
+
+Fixes: 6ff6064605e9 ("net: dsa: realtek: convert to phylink_generic_validate()")
+Signed-off-by: Alvin Å ipraga <alsi@bang-olufsen.dk>
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Link: https://lore.kernel.org/r/20220607184624.417641-1-alvin@pqrs.dk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/realtek/rtl8365mb.c | 38 +++++++----------------------
+ 1 file changed, 9 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
+index 3d70e8a77ecf..907c743370e3 100644
+--- a/drivers/net/dsa/realtek/rtl8365mb.c
++++ b/drivers/net/dsa/realtek/rtl8365mb.c
+@@ -955,35 +955,21 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
+ return 0;
+ }
+
+-static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
+- phy_interface_t interface)
+-{
+- int ext_int;
+-
+- ext_int = rtl8365mb_extint_port_map[port];
+-
+- if (ext_int < 0 &&
+- (interface == PHY_INTERFACE_MODE_NA ||
+- interface == PHY_INTERFACE_MODE_INTERNAL ||
+- interface == PHY_INTERFACE_MODE_GMII))
+- /* Internal PHY */
+- return true;
+- else if ((ext_int >= 1) &&
+- phy_interface_mode_is_rgmii(interface))
+- /* Extension MAC */
+- return true;
+-
+- return false;
+-}
+-
+ static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+ {
+- if (dsa_is_user_port(ds, port))
++ if (dsa_is_user_port(ds, port)) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+- else if (dsa_is_cpu_port(ds, port))
++
++ /* GMII is the default interface mode for phylib, so
++ * we have to support it for ports with integrated PHY.
++ */
++ __set_bit(PHY_INTERFACE_MODE_GMII,
++ config->supported_interfaces);
++ } else if (dsa_is_cpu_port(ds, port)) {
+ phy_interface_set_rgmii(config->supported_interfaces);
++ }
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
+@@ -996,12 +982,6 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
+ struct realtek_priv *priv = ds->priv;
+ int ret;
+
+- if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
+- dev_err(priv->dev, "phy mode %s is unsupported on port %d\n",
+- phy_modes(state->interface), port);
+- return;
+- }
+-
+ if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
+ dev_err(priv->dev,
+ "port %d supports only conventional PHY or fixed-link\n",
+--
+2.35.1
+
--- /dev/null
+From e69b1d64733640ca9493f1a03ef7f6bf25aa5de0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Jun 2022 17:32:38 +0400
+Subject: net: ethernet: bgmac: Fix refcount leak in bcma_mdio_mii_register
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit b8d91399775c55162073bb2aca061ec42e3d4bc1 ]
+
+of_get_child_by_name() returns a node pointer with refcount
+incremented, we should use of_node_put() on it when not need anymore.
+Add missing of_node_put() to avoid refcount leak.
+
+Fixes: 55954f3bfdac ("net: ethernet: bgmac: move BCMA MDIO Phy code into a separate file")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/20220603133238.44114-1-linmq006@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+index 086739e4f40a..9b83d5361699 100644
+--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
++++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+@@ -234,6 +234,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
+ np = of_get_child_by_name(core->dev.of_node, "mdio");
+
+ err = of_mdiobus_register(mii_bus, np);
++ of_node_put(np);
+ if (err) {
+ dev_err(&core->dev, "Registration of mii bus failed\n");
+ goto err_free_bus;
+--
+2.35.1
+
--- /dev/null
+From 9f1775698c259666a8f979cf5d08bdbd071d0270 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jun 2022 13:53:55 +0900
+Subject: net: ipv6: unexport __init-annotated seg6_hmac_init()
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit 5801f064e35181c71857a80ff18af4dbec3c5f5c ]
+
+EXPORT_SYMBOL and __init is a bad combination because the .init.text
+section is freed up after the initialization. Hence, modules cannot
+use symbols annotated __init. The access to a freed symbol may end up
+with kernel panic.
+
+modpost used to detect it, but it has been broken for a decade.
+
+Recently, I fixed modpost so it started to warn it again, then this
+showed up in linux-next builds.
+
+There are two ways to fix it:
+
+ - Remove __init
+ - Remove EXPORT_SYMBOL
+
+I chose the latter for this case because the caller (net/ipv6/seg6.c)
+and the callee (net/ipv6/seg6_hmac.c) belong to the same module.
+It seems an internal function call in ipv6.ko.
+
+Fixes: bf355b8d2c30 ("ipv6: sr: add core files for SR HMAC support")
+Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/seg6_hmac.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
+index 29bc4e7c3046..6de01185cc68 100644
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -399,7 +399,6 @@ int __init seg6_hmac_init(void)
+ {
+ return seg6_hmac_init_algo();
+ }
+-EXPORT_SYMBOL(seg6_hmac_init);
+
+ int __net_init seg6_hmac_net_init(struct net *net)
+ {
+--
+2.35.1
+
--- /dev/null
+From 8f727673a409f567a77ea3266302168d64bf6d90 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jun 2022 13:53:53 +0900
+Subject: net: mdio: unexport __init-annotated mdio_bus_init()
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit 35b42dce619701f1300fb8498dae82c9bb1f0263 ]
+
+EXPORT_SYMBOL and __init is a bad combination because the .init.text
+section is freed up after the initialization. Hence, modules cannot
+use symbols annotated __init. The access to a freed symbol may end up
+with kernel panic.
+
+modpost used to detect it, but it has been broken for a decade.
+
+Recently, I fixed modpost so it started to warn it again, then this
+showed up in linux-next builds.
+
+There are two ways to fix it:
+
+ - Remove __init
+ - Remove EXPORT_SYMBOL
+
+I chose the latter for this case because the only in-tree call-site,
+drivers/net/phy/phy_device.c is never compiled as modular.
+(CONFIG_PHYLIB is boolean)
+
+Fixes: 90eff9096c01 ("net: phy: Allow splitting MDIO bus/device support from PHYs")
+Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/mdio_bus.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index 58d602985877..8a2dbe849866 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -1046,7 +1046,6 @@ int __init mdio_bus_init(void)
+
+ return ret;
+ }
+-EXPORT_SYMBOL_GPL(mdio_bus_init);
+
+ #if IS_ENABLED(CONFIG_PHYLIB)
+ void mdio_bus_exit(void)
+--
+2.35.1
+
--- /dev/null
+From f34fca2b4593b7f6176b15718ac08bd3bf093bcb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jun 2022 14:57:18 +0300
+Subject: net/mlx4_en: Fix wrong return value on ioctl EEPROM query failure
+
+From: Gal Pressman <gal@nvidia.com>
+
+[ Upstream commit f5826c8c9d57210a17031af5527056eefdc2b7eb ]
+
+The ioctl EEPROM query wrongly returns success on read failures, fix
+that by returning the appropriate error code.
+
+Fixes: 7202da8b7f71 ("ethtool, net/mlx4_en: Cable info, get_module_info/eeprom ethtool support")
+Signed-off-by: Gal Pressman <gal@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Link: https://lore.kernel.org/r/20220606115718.14233-1-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index ed5038d98ef6..6400a827173c 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -2110,7 +2110,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev,
+ en_err(priv,
+ "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
+ i, offset, ee->len - i, ret);
+- return 0;
++ return ret;
+ }
+
+ i += ret;
+--
+2.35.1
+
--- /dev/null
+From 6815213be0dca36e2d1249ee034f7894f83c2eb9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Feb 2021 23:10:47 -0800
+Subject: net/mlx5: Fix mlx5_get_next_dev() peer device matching
+
+From: Saeed Mahameed <saeedm@nvidia.com>
+
+[ Upstream commit 1c5de097bea31760c3f0467ac0c84ba0dc3525d5 ]
+
+In some use-cases, mlx5 instances will need to search for their peer
+device (the other port on the same HCA). For that, mlx5 device matching
+mechanism relied on auxiliary_find_device() to search, and used a bad matching
+callback function.
+
+This approach has two issues:
+
+1) next_phys_dev() the matching function, assumed all devices are
+ of the type mlx5_adev (mlx5 auxiliary device) which is wrong and
+ could lead to crashes, this worked for a while, since only lately
+ other drivers started registering auxiliary devices.
+
+2) using the auxiliary class bus (auxiliary_find_device) to search for
+ mlx5_core_dev devices, who are actually PCIe device instances, is wrong.
+ This works since mlx5_core always has at least one mlx5_adev instance
+ hanging around in the aux bus.
+
+As suggested by others we can fix 1. by comparing device names prefixes
+if they have the string "mlx5_core" in them, which is not a best practice !
+but even with that fixed, still 2. needs fixing, we are trying to
+match pcie device peers so we should look in the right bus (pci bus),
+hence this fix.
+
+The fix:
+1) search the pci bus for mlx5 peer devices, instead of the aux bus
+2) to validated devices are the same type "mlx5_core_dev" compare if
+ they have the same driver, which is bulletproof.
+
+ This wouldn't have worked with the aux bus since the various mlx5 aux
+ device types don't share the same driver, even if they share the same device
+ wrapper struct (mlx5_adev) "which helped to find the parent device"
+
+Fixes: a925b5e309c9 ("net/mlx5: Register mlx5 devices to auxiliary virtual bus")
+Reported-by: Alexander Lobakin <alexandr.lobakin@intel.com>
+Reported-by: Maher Sanalla <msanalla@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Reviewed-by: Maher Sanalla <msanalla@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/dev.c | 34 +++++++++++++------
+ 1 file changed, 23 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+index 3e750b827a19..c5d7bf662784 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+@@ -571,18 +571,32 @@ static int _next_phys_dev(struct mlx5_core_dev *mdev,
+ return 1;
+ }
+
++static void *pci_get_other_drvdata(struct device *this, struct device *other)
++{
++ if (this->driver != other->driver)
++ return NULL;
++
++ return pci_get_drvdata(to_pci_dev(other));
++}
++
+ static int next_phys_dev(struct device *dev, const void *data)
+ {
+- struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
+- struct mlx5_core_dev *mdev = madev->mdev;
++ struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
++
++ mdev = pci_get_other_drvdata(this->device, dev);
++ if (!mdev)
++ return 0;
+
+ return _next_phys_dev(mdev, data);
+ }
+
+ static int next_phys_dev_lag(struct device *dev, const void *data)
+ {
+- struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
+- struct mlx5_core_dev *mdev = madev->mdev;
++ struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
++
++ mdev = pci_get_other_drvdata(this->device, dev);
++ if (!mdev)
++ return 0;
+
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
+ !MLX5_CAP_GEN(mdev, lag_master) ||
+@@ -595,19 +609,17 @@ static int next_phys_dev_lag(struct device *dev, const void *data)
+ static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
+ int (*match)(struct device *dev, const void *data))
+ {
+- struct auxiliary_device *adev;
+- struct mlx5_adev *madev;
++ struct device *next;
+
+ if (!mlx5_core_is_pf(dev))
+ return NULL;
+
+- adev = auxiliary_find_device(NULL, dev, match);
+- if (!adev)
++ next = bus_find_device(&pci_bus_type, NULL, dev, match);
++ if (!next)
+ return NULL;
+
+- madev = container_of(adev, struct mlx5_adev, adev);
+- put_device(&adev->dev);
+- return madev->mdev;
++ put_device(next);
++ return pci_get_drvdata(to_pci_dev(next));
+ }
+
+ /* Must be called with intf_mutex held */
+--
+2.35.1
+
--- /dev/null
+From 89dc388d21a301add2edfe9aa781fa7b753421e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 May 2022 10:46:59 +0300
+Subject: net/mlx5: fs, fail conflicting actions
+
+From: Mark Bloch <mbloch@nvidia.com>
+
+[ Upstream commit 8fa5e7b20e01042b14f8cd684d2da9b638460c74 ]
+
+When combining two steering rules into one check
+not only do they share the same actions but those
+actions are also the same. This resolves an issue where
+when creating two different rules with the same match
+the actions are overwritten and one of the rules is deleted
+a FW syndrome can be seen in dmesg.
+
+mlx5_core 0000:03:00.0: mlx5_cmd_check:819:(pid 2105): DEALLOC_MODIFY_HEADER_CONTEXT(0x941) op_mod(0x0) failed, status bad resource state(0x9), syndrome (0x1ab444)
+
+Fixes: 0d235c3fabb7 ("net/mlx5: Add hash table to search FTEs in a flow-group")
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/fs_core.c | 35 +++++++++++++++++--
+ 1 file changed, 32 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index ab184e154eea..beedaf5b03ee 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1560,9 +1560,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
+ return NULL;
+ }
+
+-static bool check_conflicting_actions(u32 action1, u32 action2)
++static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
++ const struct mlx5_fs_vlan *vlan1)
+ {
+- u32 xored_actions = action1 ^ action2;
++ return vlan0->ethtype != vlan1->ethtype ||
++ vlan0->vid != vlan1->vid ||
++ vlan0->prio != vlan1->prio;
++}
++
++static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
++ const struct mlx5_flow_act *act2)
++{
++ u32 action1 = act1->action;
++ u32 action2 = act2->action;
++ u32 xored_actions;
++
++ xored_actions = action1 ^ action2;
+
+ /* if one rule only wants to count, it's ok */
+ if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
+@@ -1579,6 +1592,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
+ return true;
+
++ if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
++ act1->pkt_reformat != act2->pkt_reformat)
++ return true;
++
++ if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
++ act1->modify_hdr != act2->modify_hdr)
++ return true;
++
++ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
++ check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
++ return true;
++
++ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
++ check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
++ return true;
++
+ return false;
+ }
+
+@@ -1586,7 +1615,7 @@ static int check_conflicting_ftes(struct fs_fte *fte,
+ const struct mlx5_flow_context *flow_context,
+ const struct mlx5_flow_act *flow_act)
+ {
+- if (check_conflicting_actions(flow_act->action, fte->action.action)) {
++ if (check_conflicting_actions(flow_act, &fte->action)) {
+ mlx5_core_warn(get_dev(&fte->node),
+ "Found two FTEs with conflicting actions\n");
+ return -EEXIST;
+--
+2.35.1
+
--- /dev/null
+From 3b5ea5786d886c295384325df9ee4f8503f20690 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 27 Feb 2022 12:40:39 +0000
+Subject: net/mlx5: Lag, filter non compatible devices
+
+From: Mark Bloch <mbloch@nvidia.com>
+
+[ Upstream commit bc4c2f2e017949646b43fdcad005a03462d437c6 ]
+
+When search for a peer lag device we can filter based on that
+device's capabilities.
+
+Downstream patch will be less strict when filtering compatible devices
+and remove the limitation where we require exact MLX5_MAX_PORTS and
+change it to a range.
+
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/dev.c | 48 +++++++++++++++----
+ .../net/ethernet/mellanox/mlx5/core/lag/lag.c | 12 ++---
+ .../ethernet/mellanox/mlx5/core/mlx5_core.h | 1 +
+ 3 files changed, 47 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+index ba6dad97e308..3e750b827a19 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+@@ -555,12 +555,9 @@ static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
+ PCI_SLOT(dev->pdev->devfn));
+ }
+
+-static int next_phys_dev(struct device *dev, const void *data)
++static int _next_phys_dev(struct mlx5_core_dev *mdev,
++ const struct mlx5_core_dev *curr)
+ {
+- struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
+- struct mlx5_core_dev *mdev = madev->mdev;
+- const struct mlx5_core_dev *curr = data;
+-
+ if (!mlx5_core_is_pf(mdev))
+ return 0;
+
+@@ -574,8 +571,29 @@ static int next_phys_dev(struct device *dev, const void *data)
+ return 1;
+ }
+
+-/* Must be called with intf_mutex held */
+-struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
++static int next_phys_dev(struct device *dev, const void *data)
++{
++ struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
++ struct mlx5_core_dev *mdev = madev->mdev;
++
++ return _next_phys_dev(mdev, data);
++}
++
++static int next_phys_dev_lag(struct device *dev, const void *data)
++{
++ struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
++ struct mlx5_core_dev *mdev = madev->mdev;
++
++ if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
++ !MLX5_CAP_GEN(mdev, lag_master) ||
++ MLX5_CAP_GEN(mdev, num_lag_ports) != MLX5_MAX_PORTS)
++ return 0;
++
++ return _next_phys_dev(mdev, data);
++}
++
++static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
++ int (*match)(struct device *dev, const void *data))
+ {
+ struct auxiliary_device *adev;
+ struct mlx5_adev *madev;
+@@ -583,7 +601,7 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
+ if (!mlx5_core_is_pf(dev))
+ return NULL;
+
+- adev = auxiliary_find_device(NULL, dev, &next_phys_dev);
++ adev = auxiliary_find_device(NULL, dev, match);
+ if (!adev)
+ return NULL;
+
+@@ -592,6 +610,20 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
+ return madev->mdev;
+ }
+
++/* Must be called with intf_mutex held */
++struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
++{
++ lockdep_assert_held(&mlx5_intf_mutex);
++ return mlx5_get_next_dev(dev, &next_phys_dev);
++}
++
++/* Must be called with intf_mutex held */
++struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
++{
++ lockdep_assert_held(&mlx5_intf_mutex);
++ return mlx5_get_next_dev(dev, &next_phys_dev_lag);
++}
++
+ void mlx5_dev_list_lock(void)
+ {
+ mutex_lock(&mlx5_intf_mutex);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index 6cad3b72c133..a8b98242edb1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -924,12 +924,7 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
+ struct mlx5_lag *ldev = NULL;
+ struct mlx5_core_dev *tmp_dev;
+
+- if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+- !MLX5_CAP_GEN(dev, lag_master) ||
+- MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
+- return 0;
+-
+- tmp_dev = mlx5_get_next_phys_dev(dev);
++ tmp_dev = mlx5_get_next_phys_dev_lag(dev);
+ if (tmp_dev)
+ ldev = tmp_dev->priv.lag;
+
+@@ -974,6 +969,11 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
+ {
+ int err;
+
++ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
++ !MLX5_CAP_GEN(dev, lag_master) ||
++ MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
++ return;
++
+ recheck:
+ mlx5_dev_list_lock();
+ err = __mlx5_lag_dev_add_mdev(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+index 9026be1d6223..484cb1e4fc7f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+@@ -210,6 +210,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev);
+ int mlx5_register_device(struct mlx5_core_dev *dev);
+ void mlx5_unregister_device(struct mlx5_core_dev *dev);
+ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
++struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
+ void mlx5_dev_list_lock(void);
+ void mlx5_dev_list_unlock(void);
+ int mlx5_dev_list_trylock(void);
+--
+2.35.1
+
--- /dev/null
+From 6897591301dec371f9c78f5c52cb23cf4bcd05a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 19 Mar 2022 21:47:48 +0200
+Subject: net/mlx5: Rearm the FW tracer after each tracer event
+
+From: Feras Daoud <ferasda@nvidia.com>
+
+[ Upstream commit 8bf94e6414c9481bfa28269022688ab445d0081d ]
+
+The current design does not arm the tracer if traces are available before
+the tracer string database is fully loaded, leading to an unfunctional tracer.
+This fix will rearm the tracer every time the FW triggers tracer event
+regardless of the tracer strings database status.
+
+Fixes: c71ad41ccb0c ("net/mlx5: FW tracer, events handling")
+Signed-off-by: Feras Daoud <ferasda@nvidia.com>
+Signed-off-by: Roy Novich <royno@nvidia.com>
+Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+index eae9aa9c0811..978a2bb8e122 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+@@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
+ if (!tracer->owner)
+ return;
+
++ if (unlikely(!tracer->str_db.loaded))
++ goto arm;
++
+ block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
+ start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
+
+@@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
+ &tmp_trace_block[TRACES_PER_BLOCK - 1]);
+ }
+
++arm:
+ mlx5_fw_tracer_arm(dev);
+ }
+
+@@ -1136,8 +1140,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
+ queue_work(tracer->work_queue, &tracer->ownership_change_work);
+ break;
+ case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
+- if (likely(tracer->str_db.loaded))
+- queue_work(tracer->work_queue, &tracer->handle_traces_work);
++ queue_work(tracer->work_queue, &tracer->handle_traces_work);
+ break;
+ default:
+ mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
+--
+2.35.1
+
--- /dev/null
+From f3205b8914e88252474753fde706a69c227e1cc3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Mar 2022 18:37:18 +0300
+Subject: net/mlx5e: CT: Fix cleanup of CT before cleanup of TC ct rules
+
+From: Paul Blakey <paulb@nvidia.com>
+
+[ Upstream commit 15ef9efa855cf405fadd78272e1e5d04e09a1cf3 ]
+
+CT cleanup assumes that all tc rules were deleted first, and so
+is free to delete the CT shared resources (e.g the dr_action
+fwd_action which is shared for all tuples). But currently for
+uplink, this is happens in reverse, causing the below trace.
+
+CT cleanup is called from:
+mlx5e_cleanup_rep_tx()->mlx5e_cleanup_uplink_rep_tx()->
+mlx5e_rep_tc_cleanup()->mlx5e_tc_esw_cleanup()->
+mlx5_tc_ct_clean()
+
+Only afterwards, tc cleanup is called from:
+mlx5e_cleanup_rep_tx()->mlx5e_tc_ht_cleanup()
+which would have deleted all the tc ct rules, and so delete
+all the offloaded tuples.
+
+Fix this reversing the order of init and on cleanup, which
+will result in tc cleanup then ct cleanup.
+
+[ 9443.593347] WARNING: CPU: 2 PID: 206774 at drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c:1882 mlx5dr_action_destroy+0x188/0x1a0 [mlx5_core]
+[ 9443.593349] Modules linked in: act_ct nf_flow_table rdma_ucm(O) rdma_cm(O) iw_cm(O) ib_ipoib(O) ib_cm(O) ib_umad(O) mlx5_core(O-) mlxfw(O) mlxdevm(O) auxiliary(O) ib_uverbs(O) psample ib_core(O) mlx_compat(O) ip_gre gre ip_tunnel act_vlan bonding geneve esp6_offload esp6 esp4_offload esp4 act_tunnel_key vxlan ip6_udp_tunnel udp_tunnel act_mirred act_skbedit act_gact cls_flower sch_ingress nfnetlink_cttimeout nfnetlink xfrm_user xfrm_algo 8021q garp stp ipmi_devintf mrp ipmi_msghandler llc openvswitch nsh nf_conncount nf_nat mst_pciconf(O) dm_multipath sbsa_gwdt uio_pdrv_genirq uio mlxbf_pmc mlxbf_pka mlx_trio mlx_bootctl(O) bluefield_edac sch_fq_codel ip_tables ipv6 crc_ccitt btrfs zstd_compress raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx xor xor_neon raid6_pq raid1 raid0 crct10dif_ce i2c_mlxbf gpio_mlxbf2 mlxbf_gige aes_neon_bs aes_neon_blk [last unloaded: mlx5_ib]
+[ 9443.593419] CPU: 2 PID: 206774 Comm: modprobe Tainted: G O 5.4.0-1023.24.gc14613d-bluefield #1
+[ 9443.593422] Hardware name: https://www.mellanox.com BlueField SoC/BlueField SoC, BIOS BlueField:143ebaf Jan 11 2022
+[ 9443.593424] pstate: 20000005 (nzCv daif -PAN -UAO)
+[ 9443.593489] pc : mlx5dr_action_destroy+0x188/0x1a0 [mlx5_core]
+[ 9443.593545] lr : mlx5_ct_fs_smfs_destroy+0x24/0x30 [mlx5_core]
+[ 9443.593546] sp : ffff8000135dbab0
+[ 9443.593548] x29: ffff8000135dbab0 x28: ffff0003a6ab8e80
+[ 9443.593550] x27: 0000000000000000 x26: ffff0003e07d7000
+[ 9443.593552] x25: ffff800009609de0 x24: ffff000397fb2120
+[ 9443.593554] x23: ffff0003975c0000 x22: 0000000000000000
+[ 9443.593556] x21: ffff0003975f08c0 x20: ffff800009609de0
+[ 9443.593558] x19: ffff0003c8a13380 x18: 0000000000000014
+[ 9443.593560] x17: 0000000067f5f125 x16: 000000006529c620
+[ 9443.593561] x15: 000000000000000b x14: 0000000000000000
+[ 9443.593563] x13: 0000000000000002 x12: 0000000000000001
+[ 9443.593565] x11: ffff800011108868 x10: 0000000000000000
+[ 9443.593567] x9 : 0000000000000000 x8 : ffff8000117fb270
+[ 9443.593569] x7 : ffff0003ebc01288 x6 : 0000000000000000
+[ 9443.593571] x5 : ffff800009591ab8 x4 : fffffe000f6d9a20
+[ 9443.593572] x3 : 0000000080040001 x2 : fffffe000f6d9a20
+[ 9443.593574] x1 : ffff8000095901d8 x0 : 0000000000000025
+[ 9443.593577] Call trace:
+[ 9443.593634] mlx5dr_action_destroy+0x188/0x1a0 [mlx5_core]
+[ 9443.593688] mlx5_ct_fs_smfs_destroy+0x24/0x30 [mlx5_core]
+[ 9443.593743] mlx5_tc_ct_clean+0x34/0xa8 [mlx5_core]
+[ 9443.593797] mlx5e_tc_esw_cleanup+0x58/0x88 [mlx5_core]
+[ 9443.593851] mlx5e_rep_tc_cleanup+0x24/0x30 [mlx5_core]
+[ 9443.593905] mlx5e_cleanup_rep_tx+0x6c/0x78 [mlx5_core]
+[ 9443.593959] mlx5e_detach_netdev+0x74/0x98 [mlx5_core]
+[ 9443.594013] mlx5e_netdev_change_profile+0x70/0x180 [mlx5_core]
+[ 9443.594067] mlx5e_netdev_attach_nic_profile+0x34/0x40 [mlx5_core]
+[ 9443.594122] mlx5e_vport_rep_unload+0x15c/0x1a8 [mlx5_core]
+[ 9443.594177] mlx5_eswitch_unregister_vport_reps+0x228/0x298 [mlx5_core]
+[ 9443.594231] mlx5e_rep_remove+0x2c/0x38 [mlx5_core]
+[ 9443.594236] auxiliary_bus_remove+0x30/0x50 [auxiliary]
+[ 9443.594246] device_release_driver_internal+0x108/0x1d0
+[ 9443.594248] driver_detach+0x5c/0xe8
+[ 9443.594250] bus_remove_driver+0x64/0xd8
+[ 9443.594253] driver_unregister+0x38/0x60
+[ 9443.594255] auxiliary_driver_unregister+0x24/0x38 [auxiliary]
+[ 9443.594311] mlx5e_rep_cleanup+0x20/0x38 [mlx5_core]
+[ 9443.594365] mlx5e_cleanup+0x18/0x30 [mlx5_core]
+[ 9443.594419] cleanup+0xc/0x20cc [mlx5_core]
+[ 9443.594424] __arm64_sys_delete_module+0x154/0x2b0
+[ 9443.594429] el0_svc_common.constprop.0+0xf4/0x200
+[ 9443.594432] el0_svc_handler+0x38/0xa8
+[ 9443.594435] el0_svc+0x10/0x26c
+
+Fixes: d1a3138f7913 ("net/mlx5e: TC, Move flow hashtable to be per rep")
+Signed-off-by: Paul Blakey <paulb@nvidia.com>
+Reviewed-by: Oz Shlomo <ozsh@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/en_rep.c | 31 ++++++++++---------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index a464461f1418..52caefdbabb1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -937,6 +937,13 @@ static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
+ return err;
+ }
+
++static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
++{
++ mlx5e_rep_tc_netdevice_event_unregister(rpriv);
++ mlx5e_rep_bond_cleanup(rpriv);
++ mlx5e_rep_tc_cleanup(rpriv);
++}
++
+ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
+ {
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+@@ -948,42 +955,36 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
+ return err;
+ }
+
+- err = mlx5e_tc_ht_init(&rpriv->tc_ht);
+- if (err)
+- goto err_ht_init;
+-
+ if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
+ err = mlx5e_init_uplink_rep_tx(rpriv);
+ if (err)
+ goto err_init_tx;
+ }
+
++ err = mlx5e_tc_ht_init(&rpriv->tc_ht);
++ if (err)
++ goto err_ht_init;
++
+ return 0;
+
+-err_init_tx:
+- mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
+ err_ht_init:
++ if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
++ mlx5e_cleanup_uplink_rep_tx(rpriv);
++err_init_tx:
+ mlx5e_destroy_tises(priv);
+ return err;
+ }
+
+-static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
+-{
+- mlx5e_rep_tc_netdevice_event_unregister(rpriv);
+- mlx5e_rep_bond_cleanup(rpriv);
+- mlx5e_rep_tc_cleanup(rpriv);
+-}
+-
+ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
+ {
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+- mlx5e_destroy_tises(priv);
++ mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
+
+ if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
+ mlx5e_cleanup_uplink_rep_tx(rpriv);
+
+- mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
++ mlx5e_destroy_tises(priv);
+ }
+
+ static void mlx5e_rep_enable(struct mlx5e_priv *priv)
+--
+2.35.1
+
--- /dev/null
+From a16d3dd7a3d80c40b5c0ea64cc78b44a1af65eac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jun 2022 11:19:17 +0200
+Subject: net: seg6: fix seg6_lookup_any_nexthop() to handle VRFs using
+ flowi_l3mdev
+
+From: Andrea Mayer <andrea.mayer@uniroma2.it>
+
+[ Upstream commit a3bd2102e464202b58d57390a538d96f57ffc361 ]
+
+Commit 40867d74c374 ("net: Add l3mdev index to flow struct and avoid oif
+reset for port devices") adds a new entry (flowi_l3mdev) in the common
+flow struct used for indicating the l3mdev index for later rule and
+table matching.
+The l3mdev_update_flow() has been adapted to properly set the
+flowi_l3mdev based on the flowi_oif/flowi_iif. In fact, when a valid
+flowi_iif is supplied to the l3mdev_update_flow(), this function can
+update the flowi_l3mdev entry only if it has not yet been set (i.e., the
+flowi_l3mdev entry is equal to 0).
+
+The SRv6 End.DT6 behavior in VRF mode leverages a VRF device in order to
+force the routing lookup into the associated routing table. This routing
+operation is performed by seg6_lookup_any_nextop() preparing a flowi6
+data structure used by ip6_route_input_lookup() which, in turn,
+(indirectly) invokes l3mdev_update_flow().
+
+However, seg6_lookup_any_nexthop() does not initialize the new
+flowi_l3mdev entry which is filled with random garbage data. This
+prevents l3mdev_update_flow() from properly updating the flowi_l3mdev
+with the VRF index, and thus SRv6 End.DT6 (VRF mode)/DT46 behaviors are
+broken.
+
+This patch correctly initializes the flowi6 instance allocated and used
+by seg6_lookup_any_nexhtop(). Specifically, the entire flowi6 instance
+is wiped out: in case new entries are added to flowi/flowi6 (as happened
+with the flowi_l3mdev entry), we should no longer have incorrectly
+initialized values. As a result of this operation, the value of
+flowi_l3mdev is also set to 0.
+
+The proposed fix can be tested easily. Starting from the commit
+referenced in the Fixes, selftests [1],[2] indicate that the SRv6
+End.DT6 (VRF mode)/DT46 behaviors no longer work correctly. By applying
+this patch, those behaviors are back to work properly again.
+
+[1] - tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
+[2] - tools/testing/selftests/net/srv6_end_dt6_l3vpn_test.sh
+
+Fixes: 40867d74c374 ("net: Add l3mdev index to flow struct and avoid oif reset for port devices")
+Reported-by: Anton Makarov <am@3a-alliance.com>
+Signed-off-by: Andrea Mayer <andrea.mayer@uniroma2.it>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://lore.kernel.org/r/20220608091917.20345-1-andrea.mayer@uniroma2.it
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/seg6_local.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
+index 9fbe243a0e81..98a34287439c 100644
+--- a/net/ipv6/seg6_local.c
++++ b/net/ipv6/seg6_local.c
+@@ -218,6 +218,7 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
+ struct flowi6 fl6;
+ int dev_flags = 0;
+
++ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_iif = skb->dev->ifindex;
+ fl6.daddr = nhaddr ? *nhaddr : hdr->daddr;
+ fl6.saddr = hdr->saddr;
+--
+2.35.1
+
--- /dev/null
+From b0c8e6ea671cea8424c3d1a81b02dbc10b2c211a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jun 2022 13:53:54 +0900
+Subject: net: xfrm: unexport __init-annotated xfrm4_protocol_init()
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit 4a388f08d8784af48f352193d2b72aaf167a57a1 ]
+
+EXPORT_SYMBOL and __init is a bad combination because the .init.text
+section is freed up after the initialization. Hence, modules cannot
+use symbols annotated __init. The access to a freed symbol may end up
+with kernel panic.
+
+modpost used to detect it, but it has been broken for a decade.
+
+Recently, I fixed modpost so it started to warn it again, then this
+showed up in linux-next builds.
+
+There are two ways to fix it:
+
+ - Remove __init
+ - Remove EXPORT_SYMBOL
+
+I chose the latter for this case because the only in-tree call-site,
+net/ipv4/xfrm4_policy.c is never compiled as modular.
+(CONFIG_XFRM is boolean)
+
+Fixes: 2f32b51b609f ("xfrm: Introduce xfrm_input_afinfo to access the the callbacks properly")
+Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Acked-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/xfrm4_protocol.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
+index 2fe5860c21d6..b146ce88c5d0 100644
+--- a/net/ipv4/xfrm4_protocol.c
++++ b/net/ipv4/xfrm4_protocol.c
+@@ -304,4 +304,3 @@ void __init xfrm4_protocol_init(void)
+ {
+ xfrm_input_register_afinfo(&xfrm4_input_afinfo);
+ }
+-EXPORT_SYMBOL(xfrm4_protocol_init);
+--
+2.35.1
+
--- /dev/null
+From 57e369ab4e629aad5b302b94ff025305b0ba73b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Jun 2022 10:47:35 +0200
+Subject: netfilter: nat: really support inet nat without l3 address
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 282e5f8fe907dc3f2fbf9f2103b0e62ffc3a68a5 ]
+
+When no l3 address is given, priv->family is set to NFPROTO_INET and
+the evaluation function isn't called.
+
+Call it too so l4-only rewrite can work.
+Also add a test case for this.
+
+Fixes: a33f387ecd5aa ("netfilter: nft_nat: allow to specify layer 4 protocol NAT only")
+Reported-by: Yi Chen <yiche@redhat.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_nat.c | 3 +-
+ tools/testing/selftests/netfilter/nft_nat.sh | 43 ++++++++++++++++++++
+ 2 files changed, 45 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
+index 4394df4bc99b..e5fd6995e4bf 100644
+--- a/net/netfilter/nft_nat.c
++++ b/net/netfilter/nft_nat.c
+@@ -335,7 +335,8 @@ static void nft_nat_inet_eval(const struct nft_expr *expr,
+ {
+ const struct nft_nat *priv = nft_expr_priv(expr);
+
+- if (priv->family == nft_pf(pkt))
++ if (priv->family == nft_pf(pkt) ||
++ priv->family == NFPROTO_INET)
+ nft_nat_eval(expr, regs, pkt);
+ }
+
+diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
+index eb8543b9a5c4..924ecb3f1f73 100755
+--- a/tools/testing/selftests/netfilter/nft_nat.sh
++++ b/tools/testing/selftests/netfilter/nft_nat.sh
+@@ -374,6 +374,45 @@ EOF
+ return $lret
+ }
+
++test_local_dnat_portonly()
++{
++ local family=$1
++ local daddr=$2
++ local lret=0
++ local sr_s
++ local sr_r
++
++ip netns exec "$ns0" nft -f /dev/stdin <<EOF
++table $family nat {
++ chain output {
++ type nat hook output priority 0; policy accept;
++ meta l4proto tcp dnat to :2000
++
++ }
++}
++EOF
++ if [ $? -ne 0 ]; then
++ if [ $family = "inet" ];then
++ echo "SKIP: inet port test"
++ test_inet_nat=false
++ return
++ fi
++ echo "SKIP: Could not add $family dnat hook"
++ return
++ fi
++
++ echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 &
++ sc_s=$!
++
++ result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT)
++
++ if [ "$result" = "SERVER-inet" ];then
++ echo "PASS: inet port rewrite without l3 address"
++ else
++ echo "ERROR: inet port rewrite"
++ ret=1
++ fi
++}
+
+ test_masquerade6()
+ {
+@@ -1148,6 +1187,10 @@ fi
+ reset_counters
+ test_local_dnat ip
+ test_local_dnat6 ip6
++
++reset_counters
++test_local_dnat_portonly inet 10.0.1.99
++
+ reset_counters
+ $test_inet_nat && test_local_dnat inet
+ $test_inet_nat && test_local_dnat6 inet
+--
+2.35.1
+
--- /dev/null
+From 264df56cd35408eb04f80be6d7981a84f7230378 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Jun 2022 17:49:36 +0200
+Subject: netfilter: nf_tables: always initialize flowtable hook list in
+ transaction
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 2c9e4559773c261900c674a86b8e455911675d71 ]
+
+The hook list is used if nft_trans_flowtable_update(trans) == true. However,
+initialize this list for other cases for safety reasons.
+
+Fixes: 78d9f48f7f44 ("netfilter: nf_tables: add devices to existing flowtable")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index f23c40e6caa6..e515fa7d1ca2 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -544,6 +544,7 @@ static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
+ if (msg_type == NFT_MSG_NEWFLOWTABLE)
+ nft_activate_next(ctx->net, flowtable);
+
++ INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
+ nft_trans_flowtable(trans) = flowtable;
+ nft_trans_commit_list_add_tail(ctx->net, trans);
+
+--
+2.35.1
+
--- /dev/null
+From 4b362def9782972fed034d3359f393f70410af61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jun 2022 17:31:29 +0200
+Subject: netfilter: nf_tables: bail out early if hardware offload is not
+ supported
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 3a41c64d9c1185a2f3a184015e2a9b78bfc99c71 ]
+
+If user requests for NFT_CHAIN_HW_OFFLOAD, then check if either device
+provides the .ndo_setup_tc interface or there is an indirect flow block
+that has been registered. Otherwise, bail out early from the preparation
+phase. Moreover, validate that family == NFPROTO_NETDEV and hook is
+NF_NETDEV_INGRESS.
+
+Fixes: c9626a2cbdb2 ("netfilter: nf_tables: add hardware offload support")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/flow_offload.h | 1 +
+ include/net/netfilter/nf_tables_offload.h | 2 +-
+ net/core/flow_offload.c | 6 ++++++
+ net/netfilter/nf_tables_api.c | 2 +-
+ net/netfilter/nf_tables_offload.c | 23 ++++++++++++++++++++++-
+ 5 files changed, 31 insertions(+), 3 deletions(-)
+
+diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
+index 021778a7e1af..6484095a8c01 100644
+--- a/include/net/flow_offload.h
++++ b/include/net/flow_offload.h
+@@ -612,5 +612,6 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
+ enum tc_setup_type type, void *data,
+ struct flow_block_offload *bo,
+ void (*cleanup)(struct flow_block_cb *block_cb));
++bool flow_indr_dev_exists(void);
+
+ #endif /* _NET_FLOW_OFFLOAD_H */
+diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
+index 797147843958..3568b6a2f5f0 100644
+--- a/include/net/netfilter/nf_tables_offload.h
++++ b/include/net/netfilter/nf_tables_offload.h
+@@ -92,7 +92,7 @@ int nft_flow_rule_offload_commit(struct net *net);
+ NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
+ memset(&(__reg)->mask, 0xff, (__reg)->len);
+
+-int nft_chain_offload_priority(struct nft_base_chain *basechain);
++bool nft_chain_offload_support(const struct nft_base_chain *basechain);
+
+ int nft_offload_init(void);
+ void nft_offload_exit(void);
+diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
+index 73f68d4625f3..929f6379a279 100644
+--- a/net/core/flow_offload.c
++++ b/net/core/flow_offload.c
+@@ -595,3 +595,9 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
+ return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
+ }
+ EXPORT_SYMBOL(flow_indr_dev_setup_offload);
++
++bool flow_indr_dev_exists(void)
++{
++ return !list_empty(&flow_block_indr_dev_list);
++}
++EXPORT_SYMBOL(flow_indr_dev_exists);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index bce7da870bce..81243c834abb 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2166,7 +2166,7 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family,
+ chain->flags |= NFT_CHAIN_BASE | flags;
+ basechain->policy = NF_ACCEPT;
+ if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
+- nft_chain_offload_priority(basechain) < 0)
++ !nft_chain_offload_support(basechain))
+ return -EOPNOTSUPP;
+
+ flow_block_init(&basechain->flow_block);
+diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
+index 2d36952b1392..910ef881c3b8 100644
+--- a/net/netfilter/nf_tables_offload.c
++++ b/net/netfilter/nf_tables_offload.c
+@@ -208,7 +208,7 @@ static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
+ return 0;
+ }
+
+-int nft_chain_offload_priority(struct nft_base_chain *basechain)
++static int nft_chain_offload_priority(const struct nft_base_chain *basechain)
+ {
+ if (basechain->ops.priority <= 0 ||
+ basechain->ops.priority > USHRT_MAX)
+@@ -217,6 +217,27 @@ int nft_chain_offload_priority(struct nft_base_chain *basechain)
+ return 0;
+ }
+
++bool nft_chain_offload_support(const struct nft_base_chain *basechain)
++{
++ struct net_device *dev;
++ struct nft_hook *hook;
++
++ if (nft_chain_offload_priority(basechain) < 0)
++ return false;
++
++ list_for_each_entry(hook, &basechain->hook_list, list) {
++ if (hook->ops.pf != NFPROTO_NETDEV ||
++ hook->ops.hooknum != NF_NETDEV_INGRESS)
++ return false;
++
++ dev = hook->ops.dev;
++ if (!dev->netdev_ops->ndo_setup_tc && !flow_indr_dev_exists())
++ return false;
++ }
++
++ return true;
++}
++
+ static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
+ const struct nft_base_chain *basechain,
+ const struct nft_rule *rule,
+--
+2.35.1
+
--- /dev/null
+From cfe41cd2b2b8235bd62945b27df5874024825d77 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 May 2022 18:40:06 +0200
+Subject: netfilter: nf_tables: delete flowtable hooks via transaction list
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit b6d9014a3335194590abdd2a2471ef5147a67645 ]
+
+Remove inactive bool field in nft_hook object that was introduced in
+abadb2f865d7 ("netfilter: nf_tables: delete devices from flowtable").
+Move stale flowtable hooks to transaction list instead.
+
+Deleting twice the same device does not result in ENOENT.
+
+Fixes: abadb2f865d7 ("netfilter: nf_tables: delete devices from flowtable")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables.h | 1 -
+ net/netfilter/nf_tables_api.c | 31 ++++++-------------------------
+ 2 files changed, 6 insertions(+), 26 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 20af9d3557b9..279ae0fff7ad 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1090,7 +1090,6 @@ struct nft_stats {
+
+ struct nft_hook {
+ struct list_head list;
+- bool inactive;
+ struct nf_hook_ops ops;
+ struct rcu_head rcu;
+ };
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index a0981e7cb211..f23c40e6caa6 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1914,7 +1914,6 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
+ goto err_hook_dev;
+ }
+ hook->ops.dev = dev;
+- hook->inactive = false;
+
+ return hook;
+
+@@ -7612,6 +7611,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
+ {
+ const struct nlattr * const *nla = ctx->nla;
+ struct nft_flowtable_hook flowtable_hook;
++ LIST_HEAD(flowtable_del_list);
+ struct nft_hook *this, *hook;
+ struct nft_trans *trans;
+ int err;
+@@ -7627,7 +7627,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
+ err = -ENOENT;
+ goto err_flowtable_del_hook;
+ }
+- hook->inactive = true;
++ list_move(&hook->list, &flowtable_del_list);
+ }
+
+ trans = nft_trans_alloc(ctx, NFT_MSG_DELFLOWTABLE,
+@@ -7640,6 +7640,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
+ nft_trans_flowtable(trans) = flowtable;
+ nft_trans_flowtable_update(trans) = true;
+ INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
++ list_splice(&flowtable_del_list, &nft_trans_flowtable_hooks(trans));
+ nft_flowtable_hook_release(&flowtable_hook);
+
+ nft_trans_commit_list_add_tail(ctx->net, trans);
+@@ -7647,13 +7648,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
+ return 0;
+
+ err_flowtable_del_hook:
+- list_for_each_entry(this, &flowtable_hook.list, list) {
+- hook = nft_hook_list_find(&flowtable->hook_list, this);
+- if (!hook)
+- break;
+-
+- hook->inactive = false;
+- }
++ list_splice(&flowtable_del_list, &flowtable->hook_list);
+ nft_flowtable_hook_release(&flowtable_hook);
+
+ return err;
+@@ -8559,17 +8554,6 @@ void nft_chain_del(struct nft_chain *chain)
+ list_del_rcu(&chain->list);
+ }
+
+-static void nft_flowtable_hooks_del(struct nft_flowtable *flowtable,
+- struct list_head *hook_list)
+-{
+- struct nft_hook *hook, *next;
+-
+- list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+- if (hook->inactive)
+- list_move(&hook->list, hook_list);
+- }
+-}
+-
+ static void nf_tables_module_autoload_cleanup(struct net *net)
+ {
+ struct nftables_pernet *nft_net = nft_pernet(net);
+@@ -8914,8 +8898,6 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ break;
+ case NFT_MSG_DELFLOWTABLE:
+ if (nft_trans_flowtable_update(trans)) {
+- nft_flowtable_hooks_del(nft_trans_flowtable(trans),
+- &nft_trans_flowtable_hooks(trans));
+ nf_tables_flowtable_notify(&trans->ctx,
+ nft_trans_flowtable(trans),
+ &nft_trans_flowtable_hooks(trans),
+@@ -8996,7 +8978,6 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ struct nftables_pernet *nft_net = nft_pernet(net);
+ struct nft_trans *trans, *next;
+ struct nft_trans_elem *te;
+- struct nft_hook *hook;
+
+ if (action == NFNL_ABORT_VALIDATE &&
+ nf_tables_validate(net) < 0)
+@@ -9127,8 +9108,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ break;
+ case NFT_MSG_DELFLOWTABLE:
+ if (nft_trans_flowtable_update(trans)) {
+- list_for_each_entry(hook, &nft_trans_flowtable(trans)->hook_list, list)
+- hook->inactive = false;
++ list_splice(&nft_trans_flowtable_hooks(trans),
++ &nft_trans_flowtable(trans)->hook_list);
+ } else {
+ trans->ctx.table->use++;
+ nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
+--
+2.35.1
+
--- /dev/null
+From 9e20132246f376f084e9eafd4c93b420228dd103 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jun 2022 17:15:57 +0200
+Subject: netfilter: nf_tables: memleak flow rule from commit path
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 9dd732e0bdf538b1b76dc7c157e2b5e560ff30d3 ]
+
+Abort path release flow rule object, however, commit path does not.
+Update code to destroy these objects before releasing the transaction.
+
+Fixes: c9626a2cbdb2 ("netfilter: nf_tables: add hardware offload support")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 5c9a53729a28..bce7da870bce 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -8323,6 +8323,9 @@ static void nft_commit_release(struct nft_trans *trans)
+ nf_tables_chain_destroy(&trans->ctx);
+ break;
+ case NFT_MSG_DELRULE:
++ if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
++ nft_flow_rule_destroy(nft_trans_flow_rule(trans));
++
+ nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
+ break;
+ case NFT_MSG_DELSET:
+@@ -8813,6 +8816,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ nf_tables_rule_notify(&trans->ctx,
+ nft_trans_rule(trans),
+ NFT_MSG_NEWRULE);
++ if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
++ nft_flow_rule_destroy(nft_trans_flow_rule(trans));
++
+ nft_trans_destroy(trans);
+ break;
+ case NFT_MSG_DELRULE:
+--
+2.35.1
+
--- /dev/null
+From 7a43ce23cace5729217b1800689ed5ce06a9cfde Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 5 Jun 2022 13:40:06 +0200
+Subject: netfilter: nf_tables: release new hooks on unsupported flowtable
+ flags
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit c271cc9febaaa1bcbc0842d1ee30466aa6148ea8 ]
+
+Release the list of new hooks that are pending to be registered in case
+that unsupported flowtable flags are provided.
+
+Fixes: 78d9f48f7f44 ("netfilter: nf_tables: add devices to existing flowtable")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index e515fa7d1ca2..5c9a53729a28 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7427,11 +7427,15 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
+
+ if (nla[NFTA_FLOWTABLE_FLAGS]) {
+ flags = ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
+- if (flags & ~NFT_FLOWTABLE_MASK)
+- return -EOPNOTSUPP;
++ if (flags & ~NFT_FLOWTABLE_MASK) {
++ err = -EOPNOTSUPP;
++ goto err_flowtable_update_hook;
++ }
+ if ((flowtable->data.flags & NFT_FLOWTABLE_HW_OFFLOAD) ^
+- (flags & NFT_FLOWTABLE_HW_OFFLOAD))
+- return -EOPNOTSUPP;
++ (flags & NFT_FLOWTABLE_HW_OFFLOAD)) {
++ err = -EOPNOTSUPP;
++ goto err_flowtable_update_hook;
++ }
+ } else {
+ flags = flowtable->data.flags;
+ }
+--
+2.35.1
+
--- /dev/null
+From 997b5ca4f1b88b6a87d5559558f216a24b3de6ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Jun 2022 16:00:00 +0200
+Subject: netfilter: nf_tables: use kfree_rcu(ptr, rcu) to release hooks in
+ clean_net path
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit ab5e5c062f67c5ae8cd07f0632ffa62dc0e7d169 ]
+
+Use kfree_rcu(ptr, rcu) variant instead as described by ae089831ff28
+("netfilter: nf_tables: prefer kfree_rcu(ptr, rcu) variant").
+
+Fixes: f9a43007d3f7 ("netfilter: nf_tables: double hook unregistration in netns path")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index b6a920813005..a0981e7cb211 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7326,7 +7326,7 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net,
+ nf_unregister_net_hook(net, &hook->ops);
+ if (release_netdev) {
+ list_del(&hook->list);
+- kfree_rcu(hook);
++ kfree_rcu(hook, rcu);
+ }
+ }
+ }
+--
+2.35.1
+
--- /dev/null
+From ecf63e30aaad6952294c51bfc63708c0387f435d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jun 2022 11:29:01 +0200
+Subject: nfp: flower: restructure flow-key for gre+vlan combination
+
+From: Etienne van der Linde <etienne.vanderlinde@corigine.com>
+
+[ Upstream commit a0b843340dae704e17c1ddfad0f85c583c36757f ]
+
+Swap around the GRE and VLAN parts in the flow-key offloaded by
+the driver to fit in with other tunnel types and the firmware.
+Without this change used cases with GRE+VLAN on the outer header
+does not get offloaded as the flow-key mismatches what the
+firmware expect.
+
+Fixes: 0d630f58989a ("nfp: flower: add support to offload QinQ match")
+Fixes: 5a2b93041646 ("nfp: flower-ct: compile match sections of flow_payload")
+Signed-off-by: Etienne van der Linde <etienne.vanderlinde@corigine.com>
+Signed-off-by: Louis Peens <louis.peens@corigine.com>
+Signed-off-by: Yinjun Zhang <yinjun.zhang@corigine.com>
+Signed-off-by: Simon Horman <simon.horman@corigine.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/netronome/nfp/flower/conntrack.c | 32 +++++++++----------
+ .../net/ethernet/netronome/nfp/flower/match.c | 16 +++++-----
+ 2 files changed, 24 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+index bfd7d1c35076..7e9fcc16286e 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+@@ -442,6 +442,11 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
+ key_size += sizeof(struct nfp_flower_ipv6);
+ }
+
++ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
++ map[FLOW_PAY_QINQ] = key_size;
++ key_size += sizeof(struct nfp_flower_vlan);
++ }
++
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
+ map[FLOW_PAY_GRE] = key_size;
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
+@@ -450,11 +455,6 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
+ key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
+ }
+
+- if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
+- map[FLOW_PAY_QINQ] = key_size;
+- key_size += sizeof(struct nfp_flower_vlan);
+- }
+-
+ if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
+ (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
+ map[FLOW_PAY_UDP_TUN] = key_size;
+@@ -693,6 +693,17 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
+ }
+ }
+
++ if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
++ offset = key_map[FLOW_PAY_QINQ];
++ key = kdata + offset;
++ msk = mdata + offset;
++ for (i = 0; i < _CT_TYPE_MAX; i++) {
++ nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
++ (struct nfp_flower_vlan *)msk,
++ rules[i]);
++ }
++ }
++
+ if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
+ offset = key_map[FLOW_PAY_GRE];
+ key = kdata + offset;
+@@ -733,17 +744,6 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
+ }
+ }
+
+- if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
+- offset = key_map[FLOW_PAY_QINQ];
+- key = kdata + offset;
+- msk = mdata + offset;
+- for (i = 0; i < _CT_TYPE_MAX; i++) {
+- nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
+- (struct nfp_flower_vlan *)msk,
+- rules[i]);
+- }
+- }
+-
+ if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
+ key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
+ offset = key_map[FLOW_PAY_UDP_TUN];
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
+index 9d86eea4dc16..fb8bd2135c63 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
+@@ -602,6 +602,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
+ msk += sizeof(struct nfp_flower_ipv6);
+ }
+
++ if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
++ nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
++ (struct nfp_flower_vlan *)msk,
++ rule);
++ ext += sizeof(struct nfp_flower_vlan);
++ msk += sizeof(struct nfp_flower_vlan);
++ }
++
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+ struct nfp_flower_ipv6_gre_tun *gre_match;
+@@ -637,14 +645,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
+ }
+ }
+
+- if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
+- nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
+- (struct nfp_flower_vlan *)msk,
+- rule);
+- ext += sizeof(struct nfp_flower_vlan);
+- msk += sizeof(struct nfp_flower_vlan);
+- }
+-
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
+ key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+--
+2.35.1
+
--- /dev/null
+From c5d1fd28967c681a632f33a53cbca260462e1656 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 May 2022 19:49:01 -0400
+Subject: NFSD: Fix potential use-after-free in nfsd_file_put()
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit b6c71c66b0ad8f2b59d9bc08c7a5079b110bec01 ]
+
+nfsd_file_put_noref() can free @nf, so don't dereference @nf
+immediately upon return from nfsd_file_put_noref().
+
+Suggested-by: Trond Myklebust <trondmy@hammerspace.com>
+Fixes: 999397926ab3 ("nfsd: Clean up nfsd_file_put()")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/filecache.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index 2c1b027774d4..0326bdec5de7 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -306,11 +306,12 @@ nfsd_file_put(struct nfsd_file *nf)
+ if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) {
+ nfsd_file_flush(nf);
+ nfsd_file_put_noref(nf);
+- } else {
++ } else if (nf->nf_file) {
+ nfsd_file_put_noref(nf);
+- if (nf->nf_file)
+- nfsd_file_schedule_laundrette();
+- }
++ nfsd_file_schedule_laundrette();
++ } else
++ nfsd_file_put_noref(nf);
++
+ if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
+ nfsd_file_gc();
+ }
+--
+2.35.1
+
--- /dev/null
+From 1918345bb02d2867509ea84faef193e442fe64b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Jun 2022 00:31:14 +1000
+Subject: powerpc/kasan: Force thread size increase with KASAN
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+[ Upstream commit 3e8635fb2e072672cbc650989ffedf8300ad67fb ]
+
+KASAN causes increased stack usage, which can lead to stack overflows.
+
+The logic in Kconfig to suggest a larger default doesn't work if a user
+has CONFIG_EXPERT enabled and has an existing .config with a smaller
+value.
+
+Follow the lead of x86 and arm64, and force the thread size to be
+increased when KASAN is enabled.
+
+That also has the effect of enlarging the stack for 64-bit KASAN builds,
+which is also desirable.
+
+Fixes: edbadaf06710 ("powerpc/kasan: Fix stack overflow by increasing THREAD_SHIFT")
+Reported-by: Erhard Furtner <erhard_f@mailbox.org>
+Reported-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+[mpe: Use MIN_THREAD_SHIFT as suggested by Christophe]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20220601143114.133524-1-mpe@ellerman.id.au
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/Kconfig | 1 -
+ arch/powerpc/include/asm/thread_info.h | 10 ++++++++--
+ 2 files changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 174edabb74fa..efb03d8d1f8b 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -771,7 +771,6 @@ config THREAD_SHIFT
+ range 13 15
+ default "15" if PPC_256K_PAGES
+ default "14" if PPC64
+- default "14" if KASAN
+ default "13"
+ help
+ Used to define the stack size. The default is almost always what you
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index 125328d1b980..af58f1ed3952 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -14,10 +14,16 @@
+
+ #ifdef __KERNEL__
+
+-#if defined(CONFIG_VMAP_STACK) && CONFIG_THREAD_SHIFT < PAGE_SHIFT
++#ifdef CONFIG_KASAN
++#define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1)
++#else
++#define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT
++#endif
++
++#if defined(CONFIG_VMAP_STACK) && MIN_THREAD_SHIFT < PAGE_SHIFT
+ #define THREAD_SHIFT PAGE_SHIFT
+ #else
+-#define THREAD_SHIFT CONFIG_THREAD_SHIFT
++#define THREAD_SHIFT MIN_THREAD_SHIFT
+ #endif
+
+ #define THREAD_SIZE (1 << THREAD_SHIFT)
+--
+2.35.1
+
--- /dev/null
+From 8690d2d8739ef3845ce867bbf6b1faf6cd970698 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 May 2022 16:53:53 +0530
+Subject: powerpc/papr_scm: don't requests stats with '0' sized stats buffer
+
+From: Vaibhav Jain <vaibhav@linux.ibm.com>
+
+[ Upstream commit 07bf9431b1590d1cd7a8d62075d0b50b073f0495 ]
+
+Sachin reported [1] that on a POWER-10 lpar he is seeing a kernel panic being
+reported with vPMEM when papr_scm probe is being called. The panic is of the
+form below and is observed only with following option disabled(profile) for the
+said LPAR 'Enable Performance Information Collection' in the HMC:
+
+ Kernel attempted to write user page (1c) - exploit attempt? (uid: 0)
+ BUG: Kernel NULL pointer dereference on write at 0x0000001c
+ Faulting instruction address: 0xc008000001b90844
+ Oops: Kernel access of bad area, sig: 11 [#1]
+<snip>
+ NIP [c008000001b90844] drc_pmem_query_stats+0x5c/0x270 [papr_scm]
+ LR [c008000001b92794] papr_scm_probe+0x2ac/0x6ec [papr_scm]
+ Call Trace:
+ 0xc00000000941bca0 (unreliable)
+ papr_scm_probe+0x2ac/0x6ec [papr_scm]
+ platform_probe+0x98/0x150
+ really_probe+0xfc/0x510
+ __driver_probe_device+0x17c/0x230
+<snip>
+ ---[ end trace 0000000000000000 ]---
+ Kernel panic - not syncing: Fatal exception
+
+On investigation looks like this panic was caused due to a 'stat_buffer' of
+size==0 being provided to drc_pmem_query_stats() to fetch all performance
+stats-ids of an NVDIMM. However drc_pmem_query_stats() shouldn't have been called
+since the vPMEM NVDIMM doesn't support and performance stat-id's. This was caused
+due to missing check for 'p->stat_buffer_len' at the beginning of
+papr_scm_pmu_check_events() which indicates that the NVDIMM doesn't support
+performance-stats.
+
+Fix this by introducing the check for 'p->stat_buffer_len' at the beginning of
+papr_scm_pmu_check_events().
+
+[1] https://lore.kernel.org/all/6B3A522A-6A5F-4CC9-B268-0C63AA6E07D3@linux.ibm.com
+
+Fixes: 0e0946e22f3665d2732 ("powerpc/papr_scm: Fix leaking nvdimm_events_map elements")
+Reported-by: Sachin Sant <sachinp@linux.ibm.com>
+Signed-off-by: Vaibhav Jain <vaibhav@linux.ibm.com>
+Tested-by: Sachin Sant <sachinp@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20220524112353.1718454-1-vaibhav@linux.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/platforms/pseries/papr_scm.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
+index 181b855b3050..82cae08976bc 100644
+--- a/arch/powerpc/platforms/pseries/papr_scm.c
++++ b/arch/powerpc/platforms/pseries/papr_scm.c
+@@ -465,6 +465,9 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu
+ u32 available_events;
+ int index, rc = 0;
+
++ if (!p->stat_buffer_len)
++ return -ENOENT;
++
+ available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats))
+ / sizeof(struct papr_scm_perf_stat);
+ if (available_events == 0)
+--
+2.35.1
+
--- /dev/null
+From 0be8eaac562587f6312a99e085b500fd85c5e7dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jun 2022 14:45:17 +0800
+Subject: selftests net: fix bpf build error
+
+From: Lina Wang <lina.wang@mediatek.com>
+
+[ Upstream commit cf67838c4422eab826679b076dad99f96152b4de ]
+
+bpf_helpers.h has been moved to tools/lib/bpf since 5.10, so add more
+including path.
+
+Fixes: edae34a3ed92 ("selftests net: add UDP GRO fraglist + bpf self-tests")
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Signed-off-by: Lina Wang <lina.wang@mediatek.com>
+Acked-by: Song Liu <songliubraving@fb.com>
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Link: https://lore.kernel.org/r/20220606064517.8175-1-lina.wang@mediatek.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/bpf/Makefile | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/selftests/net/bpf/Makefile b/tools/testing/selftests/net/bpf/Makefile
+index f91bf14bbee7..8a69c91fcca0 100644
+--- a/tools/testing/selftests/net/bpf/Makefile
++++ b/tools/testing/selftests/net/bpf/Makefile
+@@ -2,6 +2,7 @@
+
+ CLANG ?= clang
+ CCINCLUDE += -I../../bpf
++CCINCLUDE += -I../../../lib
+ CCINCLUDE += -I../../../../../usr/include/
+
+ TEST_CUSTOM_PROGS = $(OUTPUT)/bpf/nat6to4.o
+@@ -10,5 +11,4 @@ all: $(TEST_CUSTOM_PROGS)
+ $(OUTPUT)/%.o: %.c
+ $(CLANG) -O2 -target bpf -c $< $(CCINCLUDE) -o $@
+
+-clean:
+- rm -f $(TEST_CUSTOM_PROGS)
++EXTRA_CLEAN := $(TEST_CUSTOM_PROGS)
+--
+2.35.1
+
rtla-makefile-properly-handle-dependencies.patch
f2fs-fix-to-tag-gcing-flag-on-page-during-file-defra.patch
xprtrdma-treat-all-calls-not-a-bcall-when-bc_serv-is.patch
+drm-bridge-ti-sn65dsi83-handle-dsi_lanes-0-as-invali.patch
+drm-panfrost-job-should-reference-mmu-not-file_priv.patch
+powerpc-papr_scm-don-t-requests-stats-with-0-sized-s.patch
+netfilter-nat-really-support-inet-nat-without-l3-add.patch
+netfilter-nf_tables-use-kfree_rcu-ptr-rcu-to-release.patch
+netfilter-nf_tables-delete-flowtable-hooks-via-trans.patch
+powerpc-kasan-force-thread-size-increase-with-kasan.patch
+nfsd-fix-potential-use-after-free-in-nfsd_file_put.patch
+sunrpc-trap-rdma-segment-overflows.patch
+netfilter-nf_tables-always-initialize-flowtable-hook.patch
+ata-pata_octeon_cf-fix-refcount-leak-in-octeon_cf_pr.patch
+netfilter-nf_tables-release-new-hooks-on-unsupported.patch
+netfilter-nf_tables-memleak-flow-rule-from-commit-pa.patch
+netfilter-nf_tables-bail-out-early-if-hardware-offlo.patch
+amt-fix-wrong-usage-of-pskb_may_pull.patch
+amt-fix-possible-null-ptr-deref-in-amt_rcv.patch
+amt-fix-wrong-type-string-definition.patch
+net-ethernet-bgmac-fix-refcount-leak-in-bcma_mdio_mi.patch
+xen-unexport-__init-annotated-xen_xlate_map_balloone.patch
+stmmac-intel-fix-an-error-handling-path-in-intel_eth.patch
+af_unix-fix-a-data-race-in-unix_dgram_peer_wake_me.patch
+selftests-net-fix-bpf-build-error.patch
+x86-drop-bogus-cc-clobber-from-__try_cmpxchg_user_as.patch
+bpf-arm64-clear-prog-jited_len-along-prog-jited.patch
+net-dsa-lantiq_gswip-fix-refcount-leak-in-gswip_gphy.patch
+net-mlx4_en-fix-wrong-return-value-on-ioctl-eeprom-q.patch
+xsk-fix-handling-of-invalid-descriptors-in-xsk-tx-ba.patch
+drm-amdgpu-fix-limiting-av1-to-the-first-instance-on.patch
+sunrpc-fix-the-calculation-of-xdr-end-in-xdr_get_nex.patch
+net-mdio-unexport-__init-annotated-mdio_bus_init.patch
+net-xfrm-unexport-__init-annotated-xfrm4_protocol_in.patch
+net-ipv6-unexport-__init-annotated-seg6_hmac_init.patch
+net-mlx5e-ct-fix-cleanup-of-ct-before-cleanup-of-tc-.patch
+net-mlx5-lag-filter-non-compatible-devices.patch
+net-mlx5-fix-mlx5_get_next_dev-peer-device-matching.patch
+net-mlx5-rearm-the-fw-tracer-after-each-tracer-event.patch
+net-mlx5-fs-fail-conflicting-actions.patch
+ip_gre-test-csum_start-instead-of-transport-header.patch
+net-altera-fix-refcount-leak-in-altera_tse_mdio_crea.patch
+net-dsa-mv88e6xxx-use-bmsr_anegcomplete-bit-for-fill.patch
+net-dsa-realtek-rtl8365mb-fix-gmii-caps-for-ports-wi.patch
+tcp-use-alloc_large_system_hash-to-allocate-table_pe.patch
+drm-imx-fix-compiler-warning-with-gcc-12.patch
+nfp-flower-restructure-flow-key-for-gre-vlan-combina.patch
+net-seg6-fix-seg6_lookup_any_nexthop-to-handle-vrfs-.patch
+iov_iter-fix-iter_xarray_get_pages-_alloc.patch
--- /dev/null
+From 79fe8c2ebbe83ddaa02714532e437f1043ac29fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 5 Jun 2022 22:50:48 +0200
+Subject: stmmac: intel: Fix an error handling path in intel_eth_pci_probe()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 5e74a4b3ec1816e3bbfd715d46ae29d2508079cb ]
+
+When the managed API is used, there is no need to explicitly call
+pci_free_irq_vectors().
+
+This looks to be a left-over from the commit in the Fixes tag. Only the
+.remove() function had been updated.
+
+So remove this unused function call and update goto label accordingly.
+
+Fixes: 8accc467758e ("stmmac: intel: use managed PCI function on probe and resume")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Wong Vee Khee <vee.khee.wong@linux.intel.com>
+Link: https://lore.kernel.org/r/1ac9b6787b0db83b0095711882c55c77c8ea8da0.1654462241.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+index 0b0be0898ac5..f6d8109e7edc 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+@@ -1072,13 +1072,11 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
+ if (ret) {
+- goto err_dvr_probe;
++ goto err_alloc_irq;
+ }
+
+ return 0;
+
+-err_dvr_probe:
+- pci_free_irq_vectors(pdev);
+ err_alloc_irq:
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_unregister_fixed_rate(plat->stmmac_clk);
+--
+2.35.1
+
--- /dev/null
+From b1275a195650aa9014f5e299e0ea798ae9598233 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jun 2022 16:47:52 -0400
+Subject: SUNRPC: Fix the calculation of xdr->end in
+ xdr_get_next_encode_buffer()
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 6c254bf3b637dd4ef4f78eb78c7447419c0161d7 ]
+
+I found that NFSD's new NFSv3 READDIRPLUS XDR encoder was screwing up
+right at the end of the page array. xdr_get_next_encode_buffer() does
+not compute the value of xdr->end correctly:
+
+ * The check to see if we're on the final available page in xdr->buf
+ needs to account for the space consumed by @nbytes.
+
+ * The new xdr->end value needs to account for the portion of @nbytes
+ that is to be encoded into the previous buffer.
+
+Fixes: 2825a7f90753 ("nfsd4: allow encoding across page boundaries")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Reviewed-by: NeilBrown <neilb@suse.de>
+Reviewed-by: J. Bruce Fields <bfields@fieldses.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/xdr.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
+index df194cc07035..b57cf9df4de8 100644
+--- a/net/sunrpc/xdr.c
++++ b/net/sunrpc/xdr.c
+@@ -979,7 +979,11 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
+ */
+ xdr->p = (void *)p + frag2bytes;
+ space_left = xdr->buf->buflen - xdr->buf->len;
+- xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
++ if (space_left - nbytes >= PAGE_SIZE)
++ xdr->end = (void *)p + PAGE_SIZE;
++ else
++ xdr->end = (void *)p + space_left - frag1bytes;
++
+ xdr->buf->page_len += frag2bytes;
+ xdr->buf->len += nbytes;
+ return p;
+--
+2.35.1
+
--- /dev/null
+From cc966f8d2026bc38f89a8311efbd3e3f54c275e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Jun 2022 12:46:52 -0400
+Subject: SUNRPC: Trap RDMA segment overflows
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit f012e95b377c73c0283f009823c633104dedb337 ]
+
+Prevent svc_rdma_build_writes() from walking off the end of a Write
+chunk's segment array. Caught with KASAN.
+
+The test that this fix replaces is invalid, and might have been left
+over from an earlier prototype of the PCL work.
+
+Fixes: 7a1cbfa18059 ("svcrdma: Use parsed chunk lists to construct RDMA Writes")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/xprtrdma/svc_rdma_rw.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
+index 5f0155fdefc7..11cf7c646644 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
+@@ -478,10 +478,10 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
+ unsigned int write_len;
+ u64 offset;
+
+- seg = &info->wi_chunk->ch_segments[info->wi_seg_no];
+- if (!seg)
++ if (info->wi_seg_no >= info->wi_chunk->ch_segcount)
+ goto out_overflow;
+
++ seg = &info->wi_chunk->ch_segments[info->wi_seg_no];
+ write_len = min(remaining, seg->rs_length - info->wi_seg_off);
+ if (!write_len)
+ goto out_overflow;
+--
+2.35.1
+
--- /dev/null
+From c038c7654b34068d6e3a77d1988977f556554d78 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jun 2022 15:02:14 +0800
+Subject: tcp: use alloc_large_system_hash() to allocate table_perturb
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+[ Upstream commit e67b72b90b7e19a4be4d9c29f3feea6f58ab43f8 ]
+
+In our server, there may be no high order (>= 6) memory since we reserve
+lots of HugeTLB pages when booting. Then the system panic. So use
+alloc_large_system_hash() to allocate table_perturb.
+
+Fixes: e9261476184b ("tcp: dynamically allocate the perturb table used by source ports")
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20220607070214.94443-1-songmuchun@bytedance.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/inet_hashtables.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index a5d57fa679ca..55654e335d43 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -917,10 +917,12 @@ void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
+ init_hashinfo_lhash2(h);
+
+ /* this one is used for source ports of outgoing connections */
+- table_perturb = kmalloc_array(INET_TABLE_PERTURB_SIZE,
+- sizeof(*table_perturb), GFP_KERNEL);
+- if (!table_perturb)
+- panic("TCP: failed to alloc table_perturb");
++ table_perturb = alloc_large_system_hash("Table-perturb",
++ sizeof(*table_perturb),
++ INET_TABLE_PERTURB_SIZE,
++ 0, 0, NULL, NULL,
++ INET_TABLE_PERTURB_SIZE,
++ INET_TABLE_PERTURB_SIZE);
+ }
+
+ int inet_hashinfo2_init_mod(struct inet_hashinfo *h)
+--
+2.35.1
+
--- /dev/null
+From 43cf98aa01c405e57ade5c0b2075ad533bcb8386 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jun 2022 17:00:53 +0200
+Subject: x86: drop bogus "cc" clobber from __try_cmpxchg_user_asm()
+
+From: Jan Beulich <jbeulich@suse.com>
+
+[ Upstream commit 1df931d95f4dc1c11db1123e85d4e08156e46ef9 ]
+
+As noted (and fixed) a couple of times in the past, "=@cc<cond>" outputs
+and clobbering of "cc" don't work well together. The compiler appears to
+mean to reject such, but doesn't - in its upstream form - quite manage
+to yet for "cc". Furthermore two similar macros don't clobber "cc", and
+clobbering "cc" is pointless in asm()-s for x86 anyway - the compiler
+always assumes status flags to be clobbered there.
+
+Fixes: 989b5db215a2 ("x86/uaccess: Implement macros for CMPXCHG on user addresses")
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Message-Id: <485c0c0b-a3a7-0b7c-5264-7d00c01de032@suse.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/uaccess.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 35f222aa66bf..913e593a3b45 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -439,7 +439,7 @@ do { \
+ [ptr] "+m" (*_ptr), \
+ [old] "+a" (__old) \
+ : [new] ltype (__new) \
+- : "memory", "cc"); \
++ : "memory"); \
+ if (unlikely(__err)) \
+ goto label; \
+ if (unlikely(!success)) \
+--
+2.35.1
+
--- /dev/null
+From 1ea62fd71dc7834674c5b4d53249b4d8510bb269 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jun 2022 13:59:20 +0900
+Subject: xen: unexport __init-annotated xen_xlate_map_ballooned_pages()
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit dbac14a5a05ff8e1ce7c0da0e1f520ce39ec62ea ]
+
+EXPORT_SYMBOL and __init is a bad combination because the .init.text
+section is freed up after the initialization. Hence, modules cannot
+use symbols annotated __init. The access to a freed symbol may end up
+with kernel panic.
+
+modpost used to detect it, but it has been broken for a decade.
+
+Recently, I fixed modpost so it started to warn it again, then this
+showed up in linux-next builds.
+
+There are two ways to fix it:
+
+ - Remove __init
+ - Remove EXPORT_SYMBOL
+
+I chose the latter for this case because none of the in-tree call-sites
+(arch/arm/xen/enlighten.c, arch/x86/xen/grant-table.c) is compiled as
+modular.
+
+Fixes: 243848fc018c ("xen/grant-table: Move xlated_setup_gnttab_pages to common place")
+Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Reviewed-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
+Acked-by: Stefano Stabellini <sstabellini@kernel.org>
+Link: https://lore.kernel.org/r/20220606045920.4161881-1-masahiroy@kernel.org
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/xlate_mmu.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
+index 34742c6e189e..f17c4c03db30 100644
+--- a/drivers/xen/xlate_mmu.c
++++ b/drivers/xen/xlate_mmu.c
+@@ -261,7 +261,6 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
+
+ struct remap_pfn {
+ struct mm_struct *mm;
+--
+2.35.1
+
--- /dev/null
+From 26e52fb741e4bb4da680e33690b19f651570df9c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jun 2022 16:22:00 +0200
+Subject: xsk: Fix handling of invalid descriptors in XSK TX batching API
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit d678cbd2f867a564a3c5b276c454e873f43f02f8 ]
+
+xdpxceiver run on a AF_XDP ZC enabled driver revealed a problem with XSK
+Tx batching API. There is a test that checks how invalid Tx descriptors
+are handled by AF_XDP. Each valid descriptor is followed by invalid one
+on Tx side whereas the Rx side expects only to receive a set of valid
+descriptors.
+
+In current xsk_tx_peek_release_desc_batch() function, the amount of
+available descriptors is hidden inside xskq_cons_peek_desc_batch(). This
+can be problematic in cases where invalid descriptors are present due to
+the fact that xskq_cons_peek_desc_batch() returns only a count of valid
+descriptors. This means that it is impossible to properly update XSK
+ring state when calling xskq_cons_release_n().
+
+To address this issue, pull out the contents of
+xskq_cons_peek_desc_batch() so that callers (currently only
+xsk_tx_peek_release_desc_batch()) will always be able to update the
+state of ring properly, as total count of entries is now available and
+use this value as an argument in xskq_cons_release_n(). By
+doing so, xskq_cons_peek_desc_batch() can be dropped altogether.
+
+Fixes: 9349eb3a9d2a ("xsk: Introduce batched Tx descriptor interfaces")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
+Link: https://lore.kernel.org/bpf/20220607142200.576735-1-maciej.fijalkowski@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/xdp/xsk.c | 5 +++--
+ net/xdp/xsk_queue.h | 8 --------
+ 2 files changed, 3 insertions(+), 10 deletions(-)
+
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 3a9348030e20..d6bcdbfd0fc5 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -373,7 +373,8 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
+ goto out;
+ }
+
+- nb_pkts = xskq_cons_peek_desc_batch(xs->tx, pool, max_entries);
++ max_entries = xskq_cons_nb_entries(xs->tx, max_entries);
++ nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, max_entries);
+ if (!nb_pkts) {
+ xs->tx->queue_empty_descs++;
+ goto out;
+@@ -389,7 +390,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
+ if (!nb_pkts)
+ goto out;
+
+- xskq_cons_release_n(xs->tx, nb_pkts);
++ xskq_cons_release_n(xs->tx, max_entries);
+ __xskq_cons_release(xs->tx);
+ xs->sk.sk_write_space(&xs->sk);
+
+diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
+index 801cda5d1938..64b43f31942f 100644
+--- a/net/xdp/xsk_queue.h
++++ b/net/xdp/xsk_queue.h
+@@ -282,14 +282,6 @@ static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
+ return xskq_cons_read_desc(q, desc, pool);
+ }
+
+-static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
+- u32 max)
+-{
+- u32 entries = xskq_cons_nb_entries(q, max);
+-
+- return xskq_cons_read_desc_batch(q, pool, entries);
+-}
+-
+ /* To improve performance in the xskq_cons_release functions, only update local state here.
+ * Reflect this to global state when we get new entries from the ring in
+ * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
+--
+2.35.1
+