--- /dev/null
+From 94d5838602655a70aab2ba97885936e81cf24497 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Jun 2022 15:59:57 +0100
+Subject: afs: Fix dynamic root getattr
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit cb78d1b5efffe4cf97e16766329dd7358aed3deb ]
+
+The recent patch to make afs_getattr consult the server didn't account
+for the pseudo-inodes employed by the dynamic root-type afs superblock
+not having a volume or a server to access, and thus an oops occurs if
+such a directory is stat'd.
+
+Fix this by checking to see if the vnode->volume pointer actually points
+anywhere before following it in afs_getattr().
+
+This can be tested by stat'ing a directory in /afs. It may be
+sufficient just to do "ls /afs" and the oops looks something like:
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000020
+ ...
+ RIP: 0010:afs_getattr+0x8b/0x14b
+ ...
+ Call Trace:
+ <TASK>
+ vfs_statx+0x79/0xf5
+ vfs_fstatat+0x49/0x62
+
+Fixes: 2aeb8c86d499 ("afs: Fix afs_getattr() to refetch file status if callback break occurred")
+Reported-by: Marc Dionne <marc.dionne@auristor.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Marc Dionne <marc.dionne@auristor.com>
+Tested-by: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+Link: https://lore.kernel.org/r/165408450783.1031787.7941404776393751186.stgit@warthog.procyon.org.uk/
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/inode.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 22811e9eacf5..c4c9f6dff0a2 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -745,7 +745,8 @@ int afs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+
+ _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
+
+- if (!(query_flags & AT_STATX_DONT_SYNC) &&
++ if (vnode->volume &&
++ !(query_flags & AT_STATX_DONT_SYNC) &&
+ !test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
+ key = afs_request_key(vnode->volume->cell);
+ if (IS_ERR(key))
+--
+2.35.1
+
--- /dev/null
+From 641abae63d1299dba10b55c6a2c80e3b715ec759 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Jun 2022 09:48:24 +0200
+Subject: block: disable the elevator int del_gendisk
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 50e34d78815e474d410f342fbe783b18192ca518 ]
+
+The elevator is only used for file system requests, which are stopped in
+del_gendisk. Move disabling the elevator and freeing the scheduler tags
+to the end of del_gendisk instead of doing that work in disk_release and
+blk_cleanup_queue to avoid a use after free on q->tag_set from
+disk_release as the tag_set might not be alive at that point.
+
+Move the blk_qos_exit call as well, as it just depends on the elevator
+exit and would be the only reason to keep the not exactly cheap queue
+freeze in disk_release.
+
+Fixes: e155b0c238b2 ("blk-mq: Use shared tags for shared sbitmap support")
+Reported-by: syzbot+3e3f419f4a7816471838@syzkaller.appspotmail.com
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Tested-by: syzbot+3e3f419f4a7816471838@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/r/20220614074827.458955-2-hch@lst.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-core.c | 13 -------------
+ block/genhd.c | 39 +++++++++++----------------------------
+ 2 files changed, 11 insertions(+), 41 deletions(-)
+
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 84f7b7884d07..a7329475aba2 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -322,19 +322,6 @@ void blk_cleanup_queue(struct request_queue *q)
+ blk_mq_exit_queue(q);
+ }
+
+- /*
+- * In theory, request pool of sched_tags belongs to request queue.
+- * However, the current implementation requires tag_set for freeing
+- * requests, so free the pool now.
+- *
+- * Queue has become frozen, there can't be any in-queue requests, so
+- * it is safe to free requests now.
+- */
+- mutex_lock(&q->sysfs_lock);
+- if (q->elevator)
+- blk_mq_sched_free_rqs(q);
+- mutex_unlock(&q->sysfs_lock);
+-
+ /* @q is and will stay empty, shutdown and put */
+ blk_put_queue(q);
+ }
+diff --git a/block/genhd.c b/block/genhd.c
+index 3008ec213654..13daac1a9aef 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -652,6 +652,17 @@ void del_gendisk(struct gendisk *disk)
+
+ blk_sync_queue(q);
+ blk_flush_integrity();
++ blk_mq_cancel_work_sync(q);
++
++ blk_mq_quiesce_queue(q);
++ if (q->elevator) {
++ mutex_lock(&q->sysfs_lock);
++ elevator_exit(q);
++ mutex_unlock(&q->sysfs_lock);
++ }
++ rq_qos_exit(q);
++ blk_mq_unquiesce_queue(q);
++
+ /*
+ * Allow using passthrough request again after the queue is torn down.
+ */
+@@ -1120,31 +1131,6 @@ static const struct attribute_group *disk_attr_groups[] = {
+ NULL
+ };
+
+-static void disk_release_mq(struct request_queue *q)
+-{
+- blk_mq_cancel_work_sync(q);
+-
+- /*
+- * There can't be any non non-passthrough bios in flight here, but
+- * requests stay around longer, including passthrough ones so we
+- * still need to freeze the queue here.
+- */
+- blk_mq_freeze_queue(q);
+-
+- /*
+- * Since the I/O scheduler exit code may access cgroup information,
+- * perform I/O scheduler exit before disassociating from the block
+- * cgroup controller.
+- */
+- if (q->elevator) {
+- mutex_lock(&q->sysfs_lock);
+- elevator_exit(q);
+- mutex_unlock(&q->sysfs_lock);
+- }
+- rq_qos_exit(q);
+- __blk_mq_unfreeze_queue(q, true);
+-}
+-
+ /**
+ * disk_release - releases all allocated resources of the gendisk
+ * @dev: the device representing this disk
+@@ -1166,9 +1152,6 @@ static void disk_release(struct device *dev)
+ might_sleep();
+ WARN_ON_ONCE(disk_live(disk));
+
+- if (queue_is_mq(disk->queue))
+- disk_release_mq(disk->queue);
+-
+ blkcg_exit_queue(disk->queue);
+
+ disk_release_events(disk);
+--
+2.35.1
+
--- /dev/null
+From 450750df0a575f36337994d3cbfa35d0343ee85a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Jun 2022 10:03:57 -0600
+Subject: block: pop cached rq before potentially blocking rq_qos_throttle()
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commit 2645672ffe21f0a1c139bfbc05ad30fd4e4f2583 ]
+
+If rq_qos_throttle() ends up blocking, then we will have invalidated and
+flushed our current plug. Since blk_mq_get_cached_request() hasn't
+popped the cached request off the plug list just yet, we end holding a
+pointer to a request that is no longer valid. This insta-crashes with
+rq->mq_hctx being NULL in the validity checks just after.
+
+Pop the request off the cached list before doing rq_qos_throttle() to
+avoid using a potentially stale request.
+
+Fixes: 0a5aa8d161d1 ("block: fix blk_mq_attempt_bio_merge and rq_qos_throttle protection")
+Reported-by: Dylan Yudaken <dylany@fb.com>
+Tested-by: Dylan Yudaken <dylany@fb.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 631fb87b4976..37caa73bff89 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2777,15 +2777,20 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+ return NULL;
+ }
+
+- rq_qos_throttle(q, *bio);
+-
+ if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
+ return NULL;
+ if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
+ return NULL;
+
+- rq->cmd_flags = (*bio)->bi_opf;
++ /*
++ * If any qos ->throttle() end up blocking, we will have flushed the
++ * plug and hence killed the cached_rq list as well. Pop this entry
++ * before we throttle.
++ */
+ plug->cached_rq = rq_list_next(rq);
++ rq_qos_throttle(q, *bio);
++
++ rq->cmd_flags = (*bio)->bi_opf;
+ INIT_LIST_HEAD(&rq->queuelist);
+ return rq;
+ }
+--
+2.35.1
+
--- /dev/null
+From 9fc03d918d38caabeff1b2b8b7ae2adf609fb277 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jun 2022 12:32:40 -0700
+Subject: bonding: ARP monitor spams NETDEV_NOTIFY_PEERS notifiers
+
+From: Jay Vosburgh <jay.vosburgh@canonical.com>
+
+[ Upstream commit 7a9214f3d88cfdb099f3896e102a306b316d8707 ]
+
+The bonding ARP monitor fails to decrement send_peer_notif, the
+number of peer notifications (gratuitous ARP or ND) to be sent. This
+results in a continuous series of notifications.
+
+Correct this by decrementing the counter for each notification.
+
+Reported-by: Jonathan Toppins <jtoppins@redhat.com>
+Signed-off-by: Jay Vosburgh <jay.vosburgh@canonical.com>
+Fixes: b0929915e035 ("bonding: Fix RTNL: assertion failed at net/core/rtnetlink.c for ab arp monitor")
+Link: https://lore.kernel.org/netdev/b2fd4147-8f50-bebd-963a-1a3e8d1d9715@redhat.com/
+Tested-by: Jonathan Toppins <jtoppins@redhat.com>
+Reviewed-by: Jonathan Toppins <jtoppins@redhat.com>
+Link: https://lore.kernel.org/r/9400.1655407960@famine
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 26a6573adf0f..93c7a551264e 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3684,9 +3684,11 @@ static void bond_activebackup_arp_mon(struct bonding *bond)
+ if (!rtnl_trylock())
+ return;
+
+- if (should_notify_peers)
++ if (should_notify_peers) {
++ bond->send_peer_notif--;
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
++ }
+ if (should_notify_rtnl) {
+ bond_slave_state_notify(bond);
+ bond_slave_link_notify(bond);
+--
+2.35.1
+
--- /dev/null
+From 0464a7bc5649d220105ac2236089be9defedf05c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Jun 2022 11:15:40 +1000
+Subject: bpf: Fix request_sock leak in sk lookup helpers
+
+From: Jon Maxwell <jmaxwell37@gmail.com>
+
+[ Upstream commit 3046a827316c0e55fc563b4fb78c93b9ca5c7c37 ]
+
+A customer reported a request_socket leak in a Calico cloud environment. We
+found that a BPF program was doing a socket lookup with takes a refcnt on
+the socket and that it was finding the request_socket but returning the parent
+LISTEN socket via sk_to_full_sk() without decrementing the child request socket
+1st, resulting in request_sock slab object leak. This patch retains the
+existing behaviour of returning full socks to the caller but it also decrements
+the child request_socket if one is present before doing so to prevent the leak.
+
+Thanks to Curtis Taylor for all the help in diagnosing and testing this. And
+thanks to Antoine Tenart for the reproducer and patch input.
+
+v2 of this patch contains, refactor as per Daniel Borkmann's suggestions to
+validate RCU flags on the listen socket so that it balances with bpf_sk_release()
+and update comments as per Martin KaFai Lau's suggestion. One small change to
+Daniels suggestion, put "sk = sk2" under "if (sk2 != sk)" to avoid an extra
+instruction.
+
+Fixes: f7355a6c0497 ("bpf: Check sk_fullsock() before returning from bpf_sk_lookup()")
+Fixes: edbf8c01de5a ("bpf: add skc_lookup_tcp helper")
+Co-developed-by: Antoine Tenart <atenart@kernel.org>
+Signed-off-by: Antoine Tenart <atenart@kernel.org>
+Signed-off-by: Jon Maxwell <jmaxwell37@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Tested-by: Curtis Taylor <cutaylor-pub@yahoo.com>
+Cc: Martin KaFai Lau <kafai@fb.com>
+Link: https://lore.kernel.org/bpf/56d6f898-bde0-bb25-3427-12a330b29fb8@iogearbox.net
+Link: https://lore.kernel.org/bpf/20220615011540.813025-1-jmaxwell37@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 34 ++++++++++++++++++++++++++++------
+ 1 file changed, 28 insertions(+), 6 deletions(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 8847316ee20e..af1e77f2f24a 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -6506,10 +6506,21 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ ifindex, proto, netns_id, flags);
+
+ if (sk) {
+- sk = sk_to_full_sk(sk);
+- if (!sk_fullsock(sk)) {
++ struct sock *sk2 = sk_to_full_sk(sk);
++
++ /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
++ * sock refcnt is decremented to prevent a request_sock leak.
++ */
++ if (!sk_fullsock(sk2))
++ sk2 = NULL;
++ if (sk2 != sk) {
+ sock_gen_put(sk);
+- return NULL;
++ /* Ensure there is no need to bump sk2 refcnt */
++ if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
++ WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
++ return NULL;
++ }
++ sk = sk2;
+ }
+ }
+
+@@ -6543,10 +6554,21 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ flags);
+
+ if (sk) {
+- sk = sk_to_full_sk(sk);
+- if (!sk_fullsock(sk)) {
++ struct sock *sk2 = sk_to_full_sk(sk);
++
++ /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
++ * sock refcnt is decremented to prevent a request_sock leak.
++ */
++ if (!sk_fullsock(sk2))
++ sk2 = NULL;
++ if (sk2 != sk) {
+ sock_gen_put(sk);
+- return NULL;
++ /* Ensure there is no need to bump sk2 refcnt */
++ if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
++ WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
++ return NULL;
++ }
++ sk = sk2;
+ }
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 4af70faba867466e5aa2651cfb1c4948771041a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jun 2022 18:20:36 +0200
+Subject: bpf, x86: Fix tail call count offset calculation on bpf2bpf call
+
+From: Jakub Sitnicki <jakub@cloudflare.com>
+
+[ Upstream commit ff672c67ee7635ca1e28fb13729e8ef0d1f08ce5 ]
+
+On x86-64 the tail call count is passed from one BPF function to another
+through %rax. Additionally, on function entry, the tail call count value
+is stored on stack right after the BPF program stack, due to register
+shortage.
+
+The stored count is later loaded from stack either when performing a tail
+call - to check if we have not reached the tail call limit - or before
+calling another BPF function call in order to pass it via %rax.
+
+In the latter case, we miscalculate the offset at which the tail call count
+was stored on function entry. The JIT does not take into account that the
+allocated BPF program stack is always a multiple of 8 on x86, while the
+actual stack depth does not have to be.
+
+This leads to a load from an offset that belongs to the BPF stack, as shown
+in the example below:
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+ /* Have data on stack which size is not a multiple of 8 */
+ volatile char arr[1] = {};
+ return subprog_tail(skb);
+}
+
+int entry(struct __sk_buff * skb):
+ 0: (b4) w2 = 0
+ 1: (73) *(u8 *)(r10 -1) = r2
+ 2: (85) call pc+1#bpf_prog_ce2f79bb5f3e06dd_F
+ 3: (95) exit
+
+int entry(struct __sk_buff * skb):
+ 0xffffffffa0201788: nop DWORD PTR [rax+rax*1+0x0]
+ 0xffffffffa020178d: xor eax,eax
+ 0xffffffffa020178f: push rbp
+ 0xffffffffa0201790: mov rbp,rsp
+ 0xffffffffa0201793: sub rsp,0x8
+ 0xffffffffa020179a: push rax
+ 0xffffffffa020179b: xor esi,esi
+ 0xffffffffa020179d: mov BYTE PTR [rbp-0x1],sil
+ 0xffffffffa02017a1: mov rax,QWORD PTR [rbp-0x9] !!! tail call count
+ 0xffffffffa02017a8: call 0xffffffffa02017d8 !!! is at rbp-0x10
+ 0xffffffffa02017ad: leave
+ 0xffffffffa02017ae: ret
+
+Fix it by rounding up the BPF stack depth to a multiple of 8, when
+calculating the tail call count offset on stack.
+
+Fixes: ebf7d1f508a7 ("bpf, x64: rework pro/epilogue and tailcall handling in JIT")
+Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20220616162037.535469-2-jakub@cloudflare.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/net/bpf_jit_comp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 16b6efacf7c6..4c71fa04e784 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -1415,8 +1415,9 @@ st: if (is_imm8(insn->off))
+ case BPF_JMP | BPF_CALL:
+ func = (u8 *) __bpf_call_base + imm32;
+ if (tail_call_reachable) {
++ /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
+ EMIT3_off32(0x48, 0x8B, 0x85,
+- -(bpf_prog->aux->stack_depth + 8));
++ -round_up(bpf_prog->aux->stack_depth, 8) - 8);
+ if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
+ return -EINVAL;
+ } else {
+--
+2.35.1
+
--- /dev/null
+From 51e22810c4c544f06f4a20288be5af0f8a43a4ed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Jun 2022 17:30:05 -0500
+Subject: drm/amd: Revert "drm/amd/display: keep eDP Vdd on when eDP stream is
+ already enabled"
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit 937e24b7f5595566a64e0f856ebab9147f2e4d1b ]
+
+A variety of Lenovo machines with Rembrandt APUs and OLED panels have
+stopped showing the display at login. This behavior clears up after
+leaving it idle and moving the mouse or touching keyboard.
+
+It was bisected to be caused by commit 559e2655220d ("drm/amd/display:
+keep eDP Vdd on when eDP stream is already enabled"). Revert this commit
+to fix the issue.
+
+Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2047
+Reported-by: Aaron Ma <aaron.ma@canonical.com>
+Fixes: 559e2655220d ("drm/amd/display: keep eDP Vdd on when eDP stream is already enabled")
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Tested-by: Mark Pearson <markpearson@lenovo.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../display/dc/dce110/dce110_hw_sequencer.c | 24 ++-----------------
+ 1 file changed, 2 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 248602c15f3a..6007b847b54f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1771,29 +1771,9 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
+ break;
+ }
+ }
+-
+- /*
+- * TO-DO: So far the code logic below only addresses single eDP case.
+- * For dual eDP case, there are a few things that need to be
+- * implemented first:
+- *
+- * 1. Change the fastboot logic above, so eDP link[0 or 1]'s
+- * stream[0 or 1] will all be checked.
+- *
+- * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on
+- * for each eDP.
+- *
+- * Once above 2 things are completed, we can then change the logic below
+- * correspondingly, so dual eDP case will be fully covered.
+- */
+-
+- // We are trying to enable eDP, don't power down VDD if eDP stream is existing
+- if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) {
++ // We are trying to enable eDP, don't power down VDD
++ if (can_apply_edp_fast_boot)
+ keep_edp_vdd_on = true;
+- DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n");
+- } else {
+- DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n");
+- }
+ }
+
+ // Check seamless boot support
+--
+2.35.1
+
--- /dev/null
+From 96c08b713d2bbb847a3b893684418d075ab8b176 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jun 2022 10:55:39 -0700
+Subject: drm/msm/dp: check core_initialized before disable interrupts at
+ dp_display_unbind()
+
+From: Kuogee Hsieh <quic_khsieh@quicinc.com>
+
+[ Upstream commit d80c3ba0ac247791a4ed7a0cd865a64906c8906a ]
+
+During msm initialize phase, dp_display_unbind() will be called to undo
+initializations had been done by dp_display_bind() previously if there is
+error happen at msm_drm_bind. In this case, core_initialized flag had to
+be check to make sure clocks is on before update DP controller register
+to disable HPD interrupts. Otherwise system will crash due to below NOC
+fatal error.
+
+QTISECLIB [01f01a7ad]CNOC2 ERROR: ERRLOG0_LOW = 0x00061007
+QTISECLIB [01f01a7ad]GEM_NOC ERROR: ERRLOG0_LOW = 0x00001007
+QTISECLIB [01f0371a0]CNOC2 ERROR: ERRLOG0_HIGH = 0x00000003
+QTISECLIB [01f055297]GEM_NOC ERROR: ERRLOG0_HIGH = 0x00000003
+QTISECLIB [01f072beb]CNOC2 ERROR: ERRLOG1_LOW = 0x00000024
+QTISECLIB [01f0914b8]GEM_NOC ERROR: ERRLOG1_LOW = 0x00000042
+QTISECLIB [01f0ae639]CNOC2 ERROR: ERRLOG1_HIGH = 0x00004002
+QTISECLIB [01f0cc73f]GEM_NOC ERROR: ERRLOG1_HIGH = 0x00004002
+QTISECLIB [01f0ea092]CNOC2 ERROR: ERRLOG2_LOW = 0x0009020c
+QTISECLIB [01f10895f]GEM_NOC ERROR: ERRLOG2_LOW = 0x0ae9020c
+QTISECLIB [01f125ae1]CNOC2 ERROR: ERRLOG2_HIGH = 0x00000000
+QTISECLIB [01f143be7]GEM_NOC ERROR: ERRLOG2_HIGH = 0x00000000
+QTISECLIB [01f16153a]CNOC2 ERROR: ERRLOG3_LOW = 0x00000000
+QTISECLIB [01f17fe07]GEM_NOC ERROR: ERRLOG3_LOW = 0x00000000
+QTISECLIB [01f19cf89]CNOC2 ERROR: ERRLOG3_HIGH = 0x00000000
+QTISECLIB [01f1bb08e]GEM_NOC ERROR: ERRLOG3_HIGH = 0x00000000
+QTISECLIB [01f1d8a31]CNOC2 ERROR: SBM1 FAULTINSTATUS0_LOW = 0x00000002
+QTISECLIB [01f1f72a4]GEM_NOC ERROR: SBM0 FAULTINSTATUS0_LOW = 0x00000001
+QTISECLIB [01f21a217]CNOC3 ERROR: ERRLOG0_LOW = 0x00000006
+QTISECLIB [01f23dfd3]NOC error fatal
+
+changes in v2:
+-- drop the first patch (drm/msm: enable msm irq after all initializations are done successfully at msm_drm_init()) since the problem had been fixed by other patch
+
+Fixes: 570d3e5d28db ("drm/msm/dp: stop event kernel thread when DP unbind")
+Signed-off-by: Kuogee Hsieh <quic_khsieh@quicinc.com>
+Reviewed-by: Stephen Boyd <swboyd@chromium.org>
+Patchwork: https://patchwork.freedesktop.org/patch/488387/
+Link: https://lore.kernel.org/r/1654538139-7450-1-git-send-email-quic_khsieh@quicinc.com
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/dp/dp_display.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index 8deb92bddfde..d11c81d8a5db 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -308,7 +308,8 @@ static void dp_display_unbind(struct device *dev, struct device *master,
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+
+ /* disable all HPD interrupts */
+- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
++ if (dp->core_initialized)
++ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+
+ kthread_stop(dp->ev_tsk);
+
+--
+2.35.1
+
--- /dev/null
+From 8c1ab4edc45eb747215c61746dcc06c2e39b7b76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jun 2022 13:26:40 -0700
+Subject: drm/msm/dp: force link training for display resolution change
+
+From: Kuogee Hsieh <quic_khsieh@quicinc.com>
+
+[ Upstream commit a6e2af64a79afa7f1b29375b5231e840a84bb845 ]
+
+Display resolution change is implemented through drm modeset. Older
+modeset (resolution) has to be disabled first before newer modeset
+(resolution) can be enabled. Display disable will turn off both
+pixel clock and main link clock so that main link have to be
+re-trained during display enable to have new video stream flow
+again. At current implementation, display enable function manually
+kicks up irq_hpd_handle which will read panel link status and start
+link training if link status is not in sync state.
+
+However, there is rare case that a particular panel links status keep
+staying in sync for some period of time after main link had been shut
+down previously at display disabled. In this case, main link retraining
+will not be executed by irq_hdp_handle(). Hence video stream of newer
+display resolution will fail to be transmitted to panel due to main
+link is not in sync between host and panel.
+
+This patch will bypass irq_hpd_handle() in favor of directly call
+dp_ctrl_on_stream() to always perform link training in regardless of
+main link status. So that no unexpected exception resolution change
+failure cases will happen. Also this implementation are more efficient
+than manual kicking off irq_hpd_handle function.
+
+Changes in v2:
+-- set force_link_train flag on DP only (is_edp == false)
+
+Changes in v3:
+-- revise commit text
+-- add Fixes tag
+
+Changes in v4:
+-- revise commit text
+
+Changes in v5:
+-- fix spelling at commit text
+
+Changes in v6:
+-- split dp_ctrl_on_stream() for phy test case
+-- revise commit text for modeset
+
+Changes in v7:
+-- drop 0 assignment at local variable (ret = 0)
+
+Changes in v8:
+-- add patch to remove pixel_rate from dp_ctrl
+
+Changes in v9:
+-- forward declare dp_ctrl_on_stream_phy_test_report()
+
+Fixes: 62671d2ef24b ("drm/msm/dp: fixes wrong connection state caused by failure of link train")
+Signed-off-by: Kuogee Hsieh <quic_khsieh@quicinc.com>
+Reviewed-by: Stephen Boyd <swboyd@chromium.org>
+Patchwork: https://patchwork.freedesktop.org/patch/489895/
+Link: https://lore.kernel.org/r/1655411200-7255-1-git-send-email-quic_khsieh@quicinc.com
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/dp/dp_ctrl.c | 33 ++++++++++++++++++++++-------
+ drivers/gpu/drm/msm/dp/dp_ctrl.h | 2 +-
+ drivers/gpu/drm/msm/dp/dp_display.c | 13 ++++++------
+ 3 files changed, 32 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index de1974916ad2..499d0bbc442c 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -1523,6 +1523,8 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
+ return ret;
+ }
+
++static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl);
++
+ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
+ {
+ int ret = 0;
+@@ -1545,7 +1547,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
+
+ ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
+ if (!ret)
+- ret = dp_ctrl_on_stream(&ctrl->dp_ctrl);
++ ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl);
+ else
+ DRM_ERROR("failed to enable DP link controller\n");
+
+@@ -1800,7 +1802,27 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
+ return dp_ctrl_setup_main_link(ctrl, &training_step);
+ }
+
+-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
++static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl)
++{
++ int ret;
++ struct dp_ctrl_private *ctrl;
++
++ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
++
++ ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
++
++ ret = dp_ctrl_enable_stream_clocks(ctrl);
++ if (ret) {
++ DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
++ return ret;
++ }
++
++ dp_ctrl_send_phy_test_pattern(ctrl);
++
++ return 0;
++}
++
++int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
+ {
+ int ret = 0;
+ bool mainlink_ready = false;
+@@ -1831,12 +1853,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
+ goto end;
+ }
+
+- if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
+- dp_ctrl_send_phy_test_pattern(ctrl);
+- return 0;
+- }
+-
+- if (!dp_ctrl_channel_eq_ok(ctrl))
++ if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl))
+ dp_ctrl_link_retrain(ctrl);
+
+ /* stop txing train pattern to end link training */
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
+index 2433edbc70a6..dcc7af21a5f0 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
+@@ -20,7 +20,7 @@ struct dp_ctrl {
+ };
+
+ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
+-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
++int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train);
+ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
+ int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
+ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index d11c81d8a5db..12270bd3cff9 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -903,7 +903,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
+ return 0;
+ }
+
+- rc = dp_ctrl_on_stream(dp->ctrl);
++ rc = dp_ctrl_on_stream(dp->ctrl, data);
+ if (!rc)
+ dp_display->power_on = true;
+
+@@ -1590,6 +1590,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
+ int rc = 0;
+ struct dp_display_private *dp_display;
+ u32 state;
++ bool force_link_train = false;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+ if (!dp_display->dp_mode.drm_mode.clock) {
+@@ -1618,10 +1619,12 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
+
+ state = dp_display->hpd_state;
+
+- if (state == ST_DISPLAY_OFF)
++ if (state == ST_DISPLAY_OFF) {
+ dp_display_host_phy_init(dp_display);
++ force_link_train = true;
++ }
+
+- dp_display_enable(dp_display, 0);
++ dp_display_enable(dp_display, force_link_train);
+
+ rc = dp_display_post_enable(dp);
+ if (rc) {
+@@ -1630,10 +1633,6 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
+ dp_display_unprepare(dp);
+ }
+
+- /* manual kick off plug event to train link */
+- if (state == ST_DISPLAY_OFF)
+- dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
+-
+ /* completed connection */
+ dp_display->hpd_state = ST_CONNECTED;
+
+--
+2.35.1
+
--- /dev/null
+From 0873d5b7ce06fb0a0ad70a33c1eb6f167da9d6f4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 May 2022 13:08:56 -0700
+Subject: drm/msm: Ensure mmap offset is initialized
+
+From: Rob Clark <robdclark@chromium.org>
+
+[ Upstream commit 036d20726c30267724416e966c9f92db07de8081 ]
+
+If a GEM object is allocated, and then exported as a dma-buf fd which is
+mmap'd before or without the GEM buffer being directly mmap'd, the
+vma_node could be unitialized. This leads to a situation where the CPU
+mapping is not correctly torn down in drm_vma_node_unmap().
+
+Fixes: e5516553999f ("drm: call drm_gem_object_funcs.mmap with fake offset")
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Link: https://lore.kernel.org/r/20220531200857.136547-1-robdclark@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/msm_drv.c | 2 +-
+ drivers/gpu/drm/msm/msm_drv.h | 1 +
+ drivers/gpu/drm/msm/msm_gem_prime.c | 15 +++++++++++++++
+ 3 files changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index f2c46116df55..b5f6acfe7c6e 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -967,7 +967,7 @@ static const struct drm_driver msm_driver = {
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
+- .gem_prime_mmap = drm_gem_prime_mmap,
++ .gem_prime_mmap = msm_gem_prime_mmap,
+ #ifdef CONFIG_DEBUG_FS
+ .debugfs_init = msm_debugfs_init,
+ #endif
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
+index d661debb50f1..9b985b641319 100644
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -288,6 +288,7 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t
+ void msm_gem_shrinker_init(struct drm_device *dev);
+ void msm_gem_shrinker_cleanup(struct drm_device *dev);
+
++int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
+ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
+ void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
+diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
+index 94ab705e9b8a..dcc8a573bc76 100644
+--- a/drivers/gpu/drm/msm/msm_gem_prime.c
++++ b/drivers/gpu/drm/msm/msm_gem_prime.c
+@@ -11,6 +11,21 @@
+ #include "msm_drv.h"
+ #include "msm_gem.h"
+
++int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
++{
++ int ret;
++
++ /* Ensure the mmap offset is initialized. We lazily initialize it,
++ * so if it has not been first mmap'd directly as a GEM object, the
++ * mmap offset will not be already initialized.
++ */
++ ret = drm_gem_create_mmap_offset(obj);
++ if (ret)
++ return ret;
++
++ return drm_gem_prime_mmap(obj, vma);
++}
++
+ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
+ {
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+--
+2.35.1
+
--- /dev/null
+From b0b9242f2e6d2afb8ca9e26560a6924a780244ee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jun 2022 23:13:05 +0200
+Subject: drm/msm: Fix double pm_runtime_disable() call
+
+From: Maximilian Luz <luzmaximilian@gmail.com>
+
+[ Upstream commit ce0db505bc0c51ef5e9ba446c660de7e26f78f29 ]
+
+Following commit 17e822f7591f ("drm/msm: fix unbalanced
+pm_runtime_enable in adreno_gpu_{init, cleanup}"), any call to
+adreno_unbind() will disable runtime PM twice, as indicated by the call
+trees below:
+
+ adreno_unbind()
+ -> pm_runtime_force_suspend()
+ -> pm_runtime_disable()
+
+ adreno_unbind()
+ -> gpu->funcs->destroy() [= aNxx_destroy()]
+ -> adreno_gpu_cleanup()
+ -> pm_runtime_disable()
+
+Note that pm_runtime_force_suspend() is called right before
+gpu->funcs->destroy() and both functions are called unconditionally.
+
+With recent addition of the eDP AUX bus code, this problem manifests
+itself when the eDP panel cannot be found yet and probing is deferred.
+On the first probe attempt, we disable runtime PM twice as described
+above. This then causes any later probe attempt to fail with
+
+ [drm:adreno_load_gpu [msm]] *ERROR* Couldn't power up the GPU: -13
+
+preventing the driver from loading.
+
+As there seem to be scenarios where the aNxx_destroy() functions are not
+called from adreno_unbind(), simply removing pm_runtime_disable() from
+inside adreno_unbind() does not seem to be the proper fix. This is what
+commit 17e822f7591f ("drm/msm: fix unbalanced pm_runtime_enable in
+adreno_gpu_{init, cleanup}") intended to fix. Therefore, instead check
+whether runtime PM is still enabled, and only disable it in that case.
+
+Fixes: 17e822f7591f ("drm/msm: fix unbalanced pm_runtime_enable in adreno_gpu_{init, cleanup}")
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Tested-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Reviewed-by: Rob Clark <robdclark@gmail.com>
+Link: https://lore.kernel.org/r/20220606211305.189585-1-luzmaximilian@gmail.com
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/adreno/adreno_gpu.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index 1219f71629a5..1ced7b108f2c 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -1002,7 +1002,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
+ for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
+ release_firmware(adreno_gpu->fw[i]);
+
+- pm_runtime_disable(&priv->gpu_pdev->dev);
++ if (pm_runtime_enabled(&priv->gpu_pdev->dev))
++ pm_runtime_disable(&priv->gpu_pdev->dev);
+
+ msm_gpu_cleanup(&adreno_gpu->base);
+ }
+--
+2.35.1
+
--- /dev/null
+From bdf65d61213683323fba9420a10d62a8087fb648 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jun 2022 15:08:38 +0400
+Subject: drm/msm/mdp4: Fix refcount leak in mdp4_modeset_init_intf
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit b9cc4598607cb7f7eae5c75fc1e3209cd52ff5e0 ]
+
+of_graph_get_remote_node() returns remote device node pointer with
+refcount incremented, we should use of_node_put() on it
+when not need anymore.
+Add missing of_node_put() to avoid refcount leak.
+
+Fixes: 86418f90a4c1 ("drm: convert drivers to use of_graph_get_remote_node")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Stephen Boyd <swboyd@chromium.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/488473/
+Link: https://lore.kernel.org/r/20220607110841.53889-1-linmq006@gmail.com
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+index 3cf476c55158..d92193db7eb2 100644
+--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+@@ -217,6 +217,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
+ encoder = mdp4_lcdc_encoder_init(dev, panel_node);
+ if (IS_ERR(encoder)) {
+ DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
++ of_node_put(panel_node);
+ return PTR_ERR(encoder);
+ }
+
+@@ -226,6 +227,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
+ connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
+ if (IS_ERR(connector)) {
+ DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
++ of_node_put(panel_node);
+ return PTR_ERR(connector);
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 99b324b4e8650ab43e1447557475e18e3d6e3eb1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jun 2022 09:13:34 -0700
+Subject: drm/msm: Switch ordering of runpm put vs devfreq_idle
+
+From: Rob Clark <robdclark@chromium.org>
+
+[ Upstream commit 49e477610087a02c3604061b8f3ee3a25a493987 ]
+
+In msm_devfreq_suspend() we cancel idle_work synchronously so that it
+doesn't run after we power of the hw or in the resume path. But this
+means that we want to ensure that idle_work is not scheduled *after* we
+no longer hold a runpm ref. So switch the ordering of pm_runtime_put()
+vs msm_devfreq_idle().
+
+v2. Only move the runpm _put_autosuspend, and not the _mark_last_busy()
+
+Fixes: 9bc95570175a ("drm/msm: Devfreq tuning")
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Link: https://lore.kernel.org/r/20210927152928.831245-1-robdclark@gmail.com
+Reviewed-by: Akhil P Oommen <quic_akhilpo@quicinc.com>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Link: https://lore.kernel.org/r/20220608161334.2140611-1-robdclark@gmail.com
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/msm_gpu.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
+index 58eb3e1662cb..7d27d7cee688 100644
+--- a/drivers/gpu/drm/msm/msm_gpu.c
++++ b/drivers/gpu/drm/msm/msm_gpu.c
+@@ -664,7 +664,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ msm_submit_retire(submit);
+
+ pm_runtime_mark_last_busy(&gpu->pdev->dev);
+- pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
+ spin_lock_irqsave(&ring->submit_lock, flags);
+ list_del(&submit->node);
+@@ -678,6 +677,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ msm_devfreq_idle(gpu);
+ mutex_unlock(&gpu->active_lock);
+
++ pm_runtime_put_autosuspend(&gpu->pdev->dev);
++
+ msm_gem_submit_put(submit);
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 4068af99a7acf7bbb62f5aebe1c8266f09c018fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Jun 2022 18:10:19 -0400
+Subject: drm/msm: use for_each_sgtable_sg to iterate over scatterlist
+
+From: Jonathan Marek <jonathan@marek.ca>
+
+[ Upstream commit 62b5e322fb6cc5a5a91fdeba0e4e57e75d9f4387 ]
+
+The dma_map_sgtable() call (used to invalidate cache) overwrites sgt->nents
+with 1, so msm_iommu_pagetable_map maps only the first physical segment.
+
+To fix this problem use for_each_sgtable_sg(), which uses orig_nents.
+
+Fixes: b145c6e65eb0 ("drm/msm: Add support to create a local pagetable")
+Signed-off-by: Jonathan Marek <jonathan@marek.ca>
+Link: https://lore.kernel.org/r/20220613221019.11399-1-jonathan@marek.ca
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/msm_iommu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
+index bcaddbba564d..a54ed354578b 100644
+--- a/drivers/gpu/drm/msm/msm_iommu.c
++++ b/drivers/gpu/drm/msm/msm_iommu.c
+@@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
+ u64 addr = iova;
+ unsigned int i;
+
+- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
++ for_each_sgtable_sg(sgt, sg, i) {
+ size_t size = sg->length;
+ phys_addr_t phys = sg_phys(sg);
+
+--
+2.35.1
+
--- /dev/null
+From e58ef77cbfba70ae0e144e6cacaf78ae665d289b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Jun 2022 00:42:53 -0500
+Subject: drm/sun4i: Fix crash during suspend after component bind failure
+
+From: Samuel Holland <samuel@sholland.org>
+
+[ Upstream commit 1342b5b23da9559a1578978eaff7f797d8a87d91 ]
+
+If the component driver fails to bind, or is unbound, the driver data
+for the top-level platform device points to a freed drm_device. If the
+system is then suspended, the driver passes this dangling pointer to
+drm_mode_config_helper_suspend(), which crashes.
+
+Fix this by only setting the driver data while the platform driver holds
+a reference to the drm_device.
+
+Fixes: 624b4b48d9d8 ("drm: sun4i: Add support for suspending the display driver")
+Signed-off-by: Samuel Holland <samuel@sholland.org>
+Reviewed-by: Jernej Skrabec <jernej.skrabec@gmail.com>
+Signed-off-by: Maxime Ripard <maxime@cerno.tech>
+Link: https://lore.kernel.org/r/20220615054254.16352-1-samuel@sholland.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/sun4i/sun4i_drv.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
+index 6a9ba8a77c77..4b29de65a563 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
+@@ -73,7 +73,6 @@ static int sun4i_drv_bind(struct device *dev)
+ goto free_drm;
+ }
+
+- dev_set_drvdata(dev, drm);
+ drm->dev_private = drv;
+ INIT_LIST_HEAD(&drv->frontend_list);
+ INIT_LIST_HEAD(&drv->engine_list);
+@@ -114,6 +113,8 @@ static int sun4i_drv_bind(struct device *dev)
+
+ drm_fbdev_generic_setup(drm, 32);
+
++ dev_set_drvdata(dev, drm);
++
+ return 0;
+
+ finish_poll:
+@@ -130,6 +131,7 @@ static void sun4i_drv_unbind(struct device *dev)
+ {
+ struct drm_device *drm = dev_get_drvdata(dev);
+
++ dev_set_drvdata(dev, NULL);
+ drm_dev_unregister(drm);
+ drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
+--
+2.35.1
+
--- /dev/null
+From cb37def236a36e89a57cd45d39180dc1593377a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jun 2022 01:35:06 -0700
+Subject: erspan: do not assume transport header is always set
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 301bd140ed0b24f0da660874c7e8a47dad8c8222 ]
+
+Rewrite tests in ip6erspan_tunnel_xmit() and
+erspan_fb_xmit() to not assume transport header is set.
+
+syzbot reported:
+
+WARNING: CPU: 0 PID: 1350 at include/linux/skbuff.h:2911 skb_transport_header include/linux/skbuff.h:2911 [inline]
+WARNING: CPU: 0 PID: 1350 at include/linux/skbuff.h:2911 ip6erspan_tunnel_xmit+0x15af/0x2eb0 net/ipv6/ip6_gre.c:963
+Modules linked in:
+CPU: 0 PID: 1350 Comm: aoe_tx0 Not tainted 5.19.0-rc2-syzkaller-00160-g274295c6e53f #0
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.14.0-2 04/01/2014
+RIP: 0010:skb_transport_header include/linux/skbuff.h:2911 [inline]
+RIP: 0010:ip6erspan_tunnel_xmit+0x15af/0x2eb0 net/ipv6/ip6_gre.c:963
+Code: 0f 47 f0 40 88 b5 7f fe ff ff e8 8c 16 4b f9 89 de bf ff ff ff ff e8 a0 12 4b f9 66 83 fb ff 0f 85 1d f1 ff ff e8 71 16 4b f9 <0f> 0b e9 43 f0 ff ff e8 65 16 4b f9 48 8d 85 30 ff ff ff ba 60 00
+RSP: 0018:ffffc90005daf910 EFLAGS: 00010293
+RAX: 0000000000000000 RBX: 000000000000ffff RCX: 0000000000000000
+RDX: ffff88801f032100 RSI: ffffffff882e8d3f RDI: 0000000000000003
+RBP: ffffc90005dafab8 R08: 0000000000000003 R09: 000000000000ffff
+R10: 000000000000ffff R11: 0000000000000000 R12: ffff888024f21d40
+R13: 000000000000a288 R14: 00000000000000b0 R15: ffff888025a2e000
+FS: 0000000000000000(0000) GS:ffff88802c800000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000001b2e425000 CR3: 000000006d099000 CR4: 0000000000152ef0
+Call Trace:
+<TASK>
+__netdev_start_xmit include/linux/netdevice.h:4805 [inline]
+netdev_start_xmit include/linux/netdevice.h:4819 [inline]
+xmit_one net/core/dev.c:3588 [inline]
+dev_hard_start_xmit+0x188/0x880 net/core/dev.c:3604
+sch_direct_xmit+0x19f/0xbe0 net/sched/sch_generic.c:342
+__dev_xmit_skb net/core/dev.c:3815 [inline]
+__dev_queue_xmit+0x14a1/0x3900 net/core/dev.c:4219
+dev_queue_xmit include/linux/netdevice.h:2994 [inline]
+tx+0x6a/0xc0 drivers/block/aoe/aoenet.c:63
+kthread+0x1e7/0x3b0 drivers/block/aoe/aoecmd.c:1229
+kthread+0x2e9/0x3a0 kernel/kthread.c:376
+ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:302
+</TASK>
+
+Fixes: d5db21a3e697 ("erspan: auto detect truncated ipv6 packets.")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: William Tu <u9012063@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/ip_gre.c | 15 ++++++++++-----
+ net/ipv6/ip6_gre.c | 15 ++++++++++-----
+ 2 files changed, 20 insertions(+), 10 deletions(-)
+
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index bc8dfdf1c48a..318673517976 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -524,7 +524,6 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+ int tunnel_hlen;
+ int version;
+ int nhoff;
+- int thoff;
+
+ tun_info = skb_tunnel_info(skb);
+ if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+@@ -558,10 +557,16 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+ (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
+ truncate = true;
+
+- thoff = skb_transport_header(skb) - skb_mac_header(skb);
+- if (skb->protocol == htons(ETH_P_IPV6) &&
+- (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
+- truncate = true;
++ if (skb->protocol == htons(ETH_P_IPV6)) {
++ int thoff;
++
++ if (skb_transport_header_was_set(skb))
++ thoff = skb_transport_header(skb) - skb_mac_header(skb);
++ else
++ thoff = nhoff + sizeof(struct ipv6hdr);
++ if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
++ truncate = true;
++ }
+
+ if (version == 1) {
+ erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 5136959b3dc5..b996ccaff56e 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -944,7 +944,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ __be16 proto;
+ __u32 mtu;
+ int nhoff;
+- int thoff;
+
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+@@ -965,10 +964,16 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
+ truncate = true;
+
+- thoff = skb_transport_header(skb) - skb_mac_header(skb);
+- if (skb->protocol == htons(ETH_P_IPV6) &&
+- (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
+- truncate = true;
++ if (skb->protocol == htons(ETH_P_IPV6)) {
++ int thoff;
++
++ if (skb_transport_header_was_set(skb))
++ thoff = skb_transport_header(skb) - skb_mac_header(skb);
++ else
++ thoff = nhoff + sizeof(struct ipv6hdr);
++ if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
++ truncate = true;
++ }
+
+ if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
+ goto tx_err;
+--
+2.35.1
+
--- /dev/null
+From 6d83ee56aadc5f296c1d7707270e32bed69573ca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jun 2022 18:08:55 +0200
+Subject: ethtool: Fix get module eeprom fallback
+
+From: Ivan Vecera <ivecera@redhat.com>
+
+[ Upstream commit a3bb7b63813f674fb62bac321cdd897cc62de094 ]
+
+Function fallback_set_params() checks if the module type returned
+by a driver is ETH_MODULE_SFF_8079 and in this case it assumes
+that buffer returns a concatenated content of page A0h and A2h.
+The check is wrong because the correct type is ETH_MODULE_SFF_8472.
+
+Fixes: 96d971e307cc ("ethtool: Add fallback to get_module_eeprom from netlink command")
+Signed-off-by: Ivan Vecera <ivecera@redhat.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Link: https://lore.kernel.org/r/20220616160856.3623273-1-ivecera@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ethtool/eeprom.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/ethtool/eeprom.c b/net/ethtool/eeprom.c
+index 7e6b37a54add..1c94bb8ea03f 100644
+--- a/net/ethtool/eeprom.c
++++ b/net/ethtool/eeprom.c
+@@ -36,7 +36,7 @@ static int fallback_set_params(struct eeprom_req_info *request,
+ if (request->page)
+ offset = request->page * ETH_MODULE_EEPROM_PAGE_LEN + offset;
+
+- if (modinfo->type == ETH_MODULE_SFF_8079 &&
++ if (modinfo->type == ETH_MODULE_SFF_8472 &&
+ request->i2c_address == 0x51)
+ offset += ETH_MODULE_EEPROM_PAGE_LEN * 2;
+
+--
+2.35.1
+
--- /dev/null
+From f7875ab7aa7f6cb551e53ef91c6e6e6f24e2304d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jun 2022 19:05:36 +1000
+Subject: filemap: Fix serialization adding transparent huge pages to page
+ cache
+
+From: Alistair Popple <apopple@nvidia.com>
+
+[ Upstream commit 00fa15e0d56482e32d8ca1f51d76b0ee00afb16b ]
+
+Commit 793917d997df ("mm/readahead: Add large folio readahead")
+introduced support for using large folios for filebacked pages if the
+filesystem supports it.
+
+page_cache_ra_order() was introduced to allocate and add these large
+folios to the page cache. However adding pages to the page cache should
+be serialized against truncation and hole punching by taking
+invalidate_lock. Not doing so can lead to data races resulting in stale
+data getting added to the page cache and marked up-to-date. See commit
+730633f0b7f9 ("mm: Protect operations adding pages to page cache with
+invalidate_lock") for more details.
+
+This issue was found by inspection but a testcase revealed it was
+possible to observe in practice on XFS. Fix this by taking
+invalidate_lock in page_cache_ra_order(), to mirror what is done for the
+non-thp case in page_cache_ra_unbounded().
+
+Signed-off-by: Alistair Popple <apopple@nvidia.com>
+Fixes: 793917d997df ("mm/readahead: Add large folio readahead")
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/readahead.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/mm/readahead.c b/mm/readahead.c
+index 4a60cdb64262..38635af5bab7 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -508,6 +508,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
+ new_order--;
+ }
+
++ filemap_invalidate_lock_shared(mapping);
+ while (index <= limit) {
+ unsigned int order = new_order;
+
+@@ -534,6 +535,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
+ }
+
+ read_pages(ractl);
++ filemap_invalidate_unlock_shared(mapping);
+
+ /*
+ * If there were already pages in the page cache, then we may have
+--
+2.35.1
+
--- /dev/null
+From 9af0334d51b835ebd28ee7833d9cab0c698662ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jun 2022 11:29:48 +0300
+Subject: gpio: winbond: Fix error code in winbond_gpio_get()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 9ca766eaea2e87b8b773bff04ee56c055cb76d4e ]
+
+This error path returns 1, but it should instead propagate the negative
+error code from winbond_sio_enter().
+
+Fixes: a0d65009411c ("gpio: winbond: Add driver")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Bartosz Golaszewski <brgl@bgdev.pl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpio/gpio-winbond.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
+index 7f8f5b02e31d..4b61d975cc0e 100644
+--- a/drivers/gpio/gpio-winbond.c
++++ b/drivers/gpio/gpio-winbond.c
+@@ -385,12 +385,13 @@ static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset)
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ bool val;
++ int ret;
+
+ winbond_gpio_get_info(&offset, &info);
+
+- val = winbond_sio_enter(*base);
+- if (val)
+- return val;
++ ret = winbond_sio_enter(*base);
++ if (ret)
++ return ret;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+--
+2.35.1
+
--- /dev/null
+From 7ac8a2fddf383c06306ec470149efd1360857c88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jun 2022 09:47:05 +0200
+Subject: ice: ethtool: advertise 1000M speeds properly
+
+From: Anatolii Gerasymenko <anatolii.gerasymenko@intel.com>
+
+[ Upstream commit c3d184c83ff4b80167e34edfc3d21df424bf27ff ]
+
+In current implementation ice_update_phy_type enables all link modes
+for selected speed. This approach doesn't work for 1000M speeds,
+because both copper (1000baseT) and optical (1000baseX) standards
+cannot be enabled at once.
+
+Fix this, by adding the function `ice_set_phy_type_from_speed()`
+for 1000M speeds.
+
+Fixes: 48cb27f2fd18 ("ice: Implement handlers for ethtool PHY/link operations")
+Signed-off-by: Anatolii Gerasymenko <anatolii.gerasymenko@intel.com>
+Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_ethtool.c | 39 +++++++++++++++++++-
+ 1 file changed, 38 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 24cda7e1f916..2a6f30c26592 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -2191,6 +2191,42 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
+ return err;
+ }
+
++/**
++ * ice_set_phy_type_from_speed - set phy_types based on speeds
++ * and advertised modes
++ * @ks: ethtool link ksettings struct
++ * @phy_type_low: pointer to the lower part of phy_type
++ * @phy_type_high: pointer to the higher part of phy_type
++ * @adv_link_speed: targeted link speeds bitmap
++ */
++static void
++ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks,
++ u64 *phy_type_low, u64 *phy_type_high,
++ u16 adv_link_speed)
++{
++ /* Handle 1000M speed in a special way because ice_update_phy_type
++ * enables all link modes, but having mixed copper and optical
++ * standards is not supported.
++ */
++ adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB;
++
++ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
++ 1000baseT_Full))
++ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T |
++ ICE_PHY_TYPE_LOW_1G_SGMII;
++
++ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
++ 1000baseKX_Full))
++ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX;
++
++ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
++ 1000baseX_Full))
++ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX |
++ ICE_PHY_TYPE_LOW_1000BASE_LX;
++
++ ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed);
++}
++
+ /**
+ * ice_set_link_ksettings - Set Speed and Duplex
+ * @netdev: network interface device structure
+@@ -2322,7 +2358,8 @@ ice_set_link_ksettings(struct net_device *netdev,
+ adv_link_speed = curr_link_speed;
+
+ /* Convert the advertise link speeds to their corresponded PHY_TYPE */
+- ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed);
++ ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high,
++ adv_link_speed);
+
+ if (!autoneg_changed && adv_link_speed == curr_link_speed) {
+ netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
+--
+2.35.1
+
--- /dev/null
+From b9358364702e3641c25c4e9f3007076c5109d89f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jun 2022 09:01:21 +0200
+Subject: ice: ethtool: Prohibit improper channel config for DCB
+
+From: Anatolii Gerasymenko <anatolii.gerasymenko@intel.com>
+
+[ Upstream commit a632b2a4c920ce5af29410fb091f7ee6d2e77dc6 ]
+
+Do not allow setting less channels, than Traffic Classes there are
+via ethtool. There must be at least one channel per Traffic Class.
+
+If you set less channels, than Traffic Classes there are, then during
+ice_vsi_rebuild there would be allocated only the requested amount
+of tx/rx rings in ice_vsi_alloc_arrays. But later in ice_vsi_setup_q_map
+there would be requested at least one channel per Traffic Class. This
+results in setting num_rxq > alloc_rxq and num_txq > alloc_txq.
+Later, there would be a NULL pointer dereference in
+ice_vsi_map_rings_to_vectors, because we go beyond of rx_rings or
+tx_rings arrays.
+
+Change ice_set_channels() to return error if you try to allocate less
+channels, than Traffic Classes there are.
+Change ice_vsi_setup_q_map() and ice_vsi_setup_q_map_mqprio() to return
+status code instead of void.
+Add error handling for ice_vsi_setup_q_map() and
+ice_vsi_setup_q_map_mqprio() in ice_vsi_init() and ice_vsi_cfg_tc().
+
+[53753.889983] INFO: Flow control is disabled for this traffic class (0) on this vsi.
+[53763.984862] BUG: unable to handle kernel NULL pointer dereference at 0000000000000028
+[53763.992915] PGD 14b45f5067 P4D 0
+[53763.996444] Oops: 0002 [#1] SMP NOPTI
+[53764.000312] CPU: 12 PID: 30661 Comm: ethtool Kdump: loaded Tainted: GOE --------- - - 4.18.0-240.el8.x86_64 #1
+[53764.011825] Hardware name: Intel Corporation WilsonCity/WilsonCity, BIOS WLYDCRB1.SYS.0020.P21.2012150710 12/15/2020
+[53764.022584] RIP: 0010:ice_vsi_map_rings_to_vectors+0x7e/0x120 [ice]
+[53764.029089] Code: 41 0d 0f b7 b7 12 05 00 00 0f b6 d0 44 29 de 44 0f b7 c6 44 01 c2 41 39 d0 7d 2d 4c 8b 47 28 44 0f b7 ce 83 c6 01 4f 8b 04 c8 <49> 89 48 28 4 c 8b 89 b8 01 00 00 4d 89 08 4c 89 81 b8 01 00 00 44
+[53764.048379] RSP: 0018:ff550dd88ea47b20 EFLAGS: 00010206
+[53764.053884] RAX: 0000000000000002 RBX: 0000000000000004 RCX: ff385ea42fa4a018
+[53764.061301] RDX: 0000000000000006 RSI: 0000000000000005 RDI: ff385e9baeedd018
+[53764.068717] RBP: 0000000000000010 R08: 0000000000000000 R09: 0000000000000004
+[53764.076133] R10: 0000000000000002 R11: 0000000000000004 R12: 0000000000000000
+[53764.083553] R13: 0000000000000000 R14: ff385e658fdd9000 R15: ff385e9baeedd018
+[53764.090976] FS: 000014872c5b5740(0000) GS:ff385e847f100000(0000) knlGS:0000000000000000
+[53764.099362] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[53764.105409] CR2: 0000000000000028 CR3: 0000000a820fa002 CR4: 0000000000761ee0
+[53764.112851] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[53764.120301] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[53764.127747] PKRU: 55555554
+[53764.130781] Call Trace:
+[53764.133564] ice_vsi_rebuild+0x611/0x870 [ice]
+[53764.138341] ice_vsi_recfg_qs+0x94/0x100 [ice]
+[53764.143116] ice_set_channels+0x1a8/0x3e0 [ice]
+[53764.147975] ethtool_set_channels+0x14e/0x240
+[53764.152667] dev_ethtool+0xd74/0x2a10
+[53764.156665] ? __mod_lruvec_state+0x44/0x110
+[53764.161280] ? __mod_lruvec_state+0x44/0x110
+[53764.165893] ? page_add_file_rmap+0x15/0x170
+[53764.170518] ? inet_ioctl+0xd1/0x220
+[53764.174445] ? netdev_run_todo+0x5e/0x290
+[53764.178808] dev_ioctl+0xb5/0x550
+[53764.182485] sock_do_ioctl+0xa0/0x140
+[53764.186512] sock_ioctl+0x1a8/0x300
+[53764.190367] ? selinux_file_ioctl+0x161/0x200
+[53764.195090] do_vfs_ioctl+0xa4/0x640
+[53764.199035] ksys_ioctl+0x60/0x90
+[53764.202722] __x64_sys_ioctl+0x16/0x20
+[53764.206845] do_syscall_64+0x5b/0x1a0
+[53764.210887] entry_SYSCALL_64_after_hwframe+0x65/0xca
+
+Fixes: 87324e747fde ("ice: Implement ethtool ops for channels")
+Signed-off-by: Anatolii Gerasymenko <anatolii.gerasymenko@intel.com>
+Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_ethtool.c | 10 +++++
+ drivers/net/ethernet/intel/ice/ice_lib.c | 42 +++++++++++++++++---
+ 2 files changed, 47 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 2a6f30c26592..8aee4ae4cc8c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -3477,6 +3477,16 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+ new_rx = ch->combined_count + ch->rx_count;
+ new_tx = ch->combined_count + ch->tx_count;
+
++ if (new_rx < vsi->tc_cfg.numtc) {
++ netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n",
++ vsi->tc_cfg.numtc);
++ return -EINVAL;
++ }
++ if (new_tx < vsi->tc_cfg.numtc) {
++ netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n",
++ vsi->tc_cfg.numtc);
++ return -EINVAL;
++ }
+ if (new_rx > ice_get_max_rxq(pf)) {
+ netdev_err(dev, "Maximum allowed Rx channels is %d\n",
+ ice_get_max_rxq(pf));
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 454e01ae09b9..f7f9c973ec54 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -909,7 +909,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
+ * @vsi: the VSI being configured
+ * @ctxt: VSI context structure
+ */
+-static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
++static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+ {
+ u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
+ u16 num_txq_per_tc, num_rxq_per_tc;
+@@ -982,7 +982,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+ else
+ vsi->num_rxq = num_rxq_per_tc;
+
++ if (vsi->num_rxq > vsi->alloc_rxq) {
++ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
++ vsi->num_rxq, vsi->alloc_rxq);
++ return -EINVAL;
++ }
++
+ vsi->num_txq = tx_count;
++ if (vsi->num_txq > vsi->alloc_txq) {
++ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
++ vsi->num_txq, vsi->alloc_txq);
++ return -EINVAL;
++ }
+
+ if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
+ dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
+@@ -1000,6 +1011,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+ */
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
+ ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
++
++ return 0;
+ }
+
+ /**
+@@ -1187,7 +1200,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
+ if (vsi->type == ICE_VSI_CHNL) {
+ ice_chnl_vsi_setup_q_map(vsi, ctxt);
+ } else {
+- ice_vsi_setup_q_map(vsi, ctxt);
++ ret = ice_vsi_setup_q_map(vsi, ctxt);
++ if (ret)
++ goto out;
++
+ if (!init_vsi) /* means VSI being updated */
+ /* must to indicate which section of VSI context are
+ * being modified
+@@ -3464,7 +3480,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
+ *
+ * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
+ */
+-static void
++static int
+ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
+ u8 ena_tc)
+ {
+@@ -3513,7 +3529,18 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
+
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_txq = offset + qcount_tx;
++ if (vsi->num_txq > vsi->alloc_txq) {
++ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
++ vsi->num_txq, vsi->alloc_txq);
++ return -EINVAL;
++ }
++
+ vsi->num_rxq = offset + qcount_rx;
++ if (vsi->num_rxq > vsi->alloc_rxq) {
++ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
++ vsi->num_rxq, vsi->alloc_rxq);
++ return -EINVAL;
++ }
+
+ /* Setup queue TC[0].qmap for given VSI context */
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+@@ -3531,6 +3558,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
+ dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
+ dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
+ vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
++
++ return 0;
+ }
+
+ /**
+@@ -3580,9 +3609,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
+
+ if (vsi->type == ICE_VSI_PF &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+- ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
++ ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
+ else
+- ice_vsi_setup_q_map(vsi, ctx);
++ ret = ice_vsi_setup_q_map(vsi, ctx);
++
++ if (ret)
++ goto out;
+
+ /* must to indicate which section of VSI context are being modified */
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
+--
+2.35.1
+
--- /dev/null
+From e3f206a0aafb344403166e6bd8fd7aea2e846866 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 May 2022 11:41:55 +0200
+Subject: ice: Fix switchdev rules book keeping
+
+From: Wojciech Drewek <wojciech.drewek@intel.com>
+
+[ Upstream commit 3578dc90013b1fa20da996cdadd8515802716132 ]
+
+Adding two filters with same matching criteria ends up with
+one rule in hardware with act = ICE_FWD_TO_VSI_LIST.
+In order to remove them properly we have to keep the
+information about vsi handle which is used in VSI bitmap
+(ice_adv_fltr_mgmt_list_entry::vsi_list_info::vsi_map).
+
+Fixes: 0d08a441fb1a ("ice: ndo_setup_tc implementation for PF")
+Reported-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
+Signed-off-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Tested-by: Sandeep Penigalapati <sandeep.penigalapati@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_tc_lib.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+index 734bfa121e24..2ce2694fcbd7 100644
+--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+@@ -524,6 +524,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+ */
+ fltr->rid = rule_added.rid;
+ fltr->rule_id = rule_added.rule_id;
++ fltr->dest_id = rule_added.vsi_handle;
+
+ exit:
+ kfree(list);
+--
+2.35.1
+
--- /dev/null
+From 8d29adc3900af18d59ac4ffe5216e3b44acfb1dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 21:01:18 +0200
+Subject: ice: ignore protocol field in GTP offload
+
+From: Marcin Szycik <marcin.szycik@linux.intel.com>
+
+[ Upstream commit d4ea6f6373ef56d1d795a24f1f5874f4a6019199 ]
+
+Commit 34a897758efe ("ice: Add support for inner etype in switchdev")
+added the ability to match on inner ethertype. A side effect of that change
+is that it is now impossible to add some filters for protocols which do not
+contain inner ethtype field. tc requires the protocol field to be specified
+when providing certain other options, e.g. src_ip. This is a problem in
+case of GTP - when user wants to specify e.g. src_ip, they also need to
+specify protocol in tc command (otherwise tc fails with: Illegal "src_ip").
+Because GTP is a tunnel, the protocol field is treated as inner protocol.
+GTP does not contain inner ethtype field and the filter cannot be added.
+
+To fix this, ignore the ethertype field in case of GTP filters.
+
+Fixes: 9a225f81f540 ("ice: Support GTP-U and GTP-C offload in switchdev")
+Signed-off-by: Marcin Szycik <marcin.szycik@linux.intel.com>
+Tested-by: Sandeep Penigalapati <sandeep.penigalapati@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_tc_lib.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+index 3acd9f921c44..734bfa121e24 100644
+--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+@@ -994,7 +994,9 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
+
+- if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
++ if (n_proto_key == ETH_P_ALL || n_proto_key == 0 ||
++ fltr->tunnel_type == TNL_GTPU ||
++ fltr->tunnel_type == TNL_GTPC) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ } else {
+--
+2.35.1
+
--- /dev/null
+From 85075f66c584bd9982b890ba04aedbb6a38465c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jun 2022 16:13:20 +0200
+Subject: igb: fix a use-after-free issue in igb_clean_tx_ring
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 3f6a57ee8544ec3982f8a3cbcbf4aea7d47eb9ec ]
+
+Fix the following use-after-free bug in igb_clean_tx_ring routine when
+the NIC is running in XDP mode. The issue can be triggered redirecting
+traffic into the igb NIC and then closing the device while the traffic
+is flowing.
+
+[ 73.322719] CPU: 1 PID: 487 Comm: xdp_redirect Not tainted 5.18.3-apu2 #9
+[ 73.330639] Hardware name: PC Engines APU2/APU2, BIOS 4.0.7 02/28/2017
+[ 73.337434] RIP: 0010:refcount_warn_saturate+0xa7/0xf0
+[ 73.362283] RSP: 0018:ffffc9000081f798 EFLAGS: 00010282
+[ 73.367761] RAX: 0000000000000000 RBX: ffffc90000420f80 RCX: 0000000000000000
+[ 73.375200] RDX: ffff88811ad22d00 RSI: ffff88811ad171e0 RDI: ffff88811ad171e0
+[ 73.382590] RBP: 0000000000000900 R08: ffffffff82298f28 R09: 0000000000000058
+[ 73.390008] R10: 0000000000000219 R11: ffffffff82280f40 R12: 0000000000000090
+[ 73.397356] R13: ffff888102343a40 R14: ffff88810359e0e4 R15: 0000000000000000
+[ 73.404806] FS: 00007ff38d31d740(0000) GS:ffff88811ad00000(0000) knlGS:0000000000000000
+[ 73.413129] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 73.419096] CR2: 000055cff35f13f8 CR3: 0000000106391000 CR4: 00000000000406e0
+[ 73.426565] Call Trace:
+[ 73.429087] <TASK>
+[ 73.431314] igb_clean_tx_ring+0x43/0x140 [igb]
+[ 73.436002] igb_down+0x1d7/0x220 [igb]
+[ 73.439974] __igb_close+0x3c/0x120 [igb]
+[ 73.444118] igb_xdp+0x10c/0x150 [igb]
+[ 73.447983] ? igb_pci_sriov_configure+0x70/0x70 [igb]
+[ 73.453362] dev_xdp_install+0xda/0x110
+[ 73.457371] dev_xdp_attach+0x1da/0x550
+[ 73.461369] do_setlink+0xfd0/0x10f0
+[ 73.465166] ? __nla_validate_parse+0x89/0xc70
+[ 73.469714] rtnl_setlink+0x11a/0x1e0
+[ 73.473547] rtnetlink_rcv_msg+0x145/0x3d0
+[ 73.477709] ? rtnl_calcit.isra.0+0x130/0x130
+[ 73.482258] netlink_rcv_skb+0x8d/0x110
+[ 73.486229] netlink_unicast+0x230/0x340
+[ 73.490317] netlink_sendmsg+0x215/0x470
+[ 73.494395] __sys_sendto+0x179/0x190
+[ 73.498268] ? move_addr_to_user+0x37/0x70
+[ 73.502547] ? __sys_getsockname+0x84/0xe0
+[ 73.506853] ? netlink_setsockopt+0x1c1/0x4a0
+[ 73.511349] ? __sys_setsockopt+0xc8/0x1d0
+[ 73.515636] __x64_sys_sendto+0x20/0x30
+[ 73.519603] do_syscall_64+0x3b/0x80
+[ 73.523399] entry_SYSCALL_64_after_hwframe+0x44/0xae
+[ 73.528712] RIP: 0033:0x7ff38d41f20c
+[ 73.551866] RSP: 002b:00007fff3b945a68 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
+[ 73.559640] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007ff38d41f20c
+[ 73.567066] RDX: 0000000000000034 RSI: 00007fff3b945b30 RDI: 0000000000000003
+[ 73.574457] RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000
+[ 73.581852] R10: 0000000000000000 R11: 0000000000000246 R12: 00007fff3b945ab0
+[ 73.589179] R13: 0000000000000000 R14: 0000000000000003 R15: 00007fff3b945b30
+[ 73.596545] </TASK>
+[ 73.598842] ---[ end trace 0000000000000000 ]---
+
+Fixes: 9cbc948b5a20c ("igb: add XDP support")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+Link: https://lore.kernel.org/r/e5c01d549dc37bff18e46aeabd6fb28a7bcf84be.1655388571.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_main.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 68be2976f539..1c26bec7d6fa 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -4819,8 +4819,11 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
+ while (i != tx_ring->next_to_use) {
+ union e1000_adv_tx_desc *eop_desc, *tx_desc;
+
+- /* Free all the Tx ring sk_buffs */
+- dev_kfree_skb_any(tx_buffer->skb);
++ /* Free all the Tx ring sk_buffs or xdp frames */
++ if (tx_buffer->type == IGB_TYPE_SKB)
++ dev_kfree_skb_any(tx_buffer->skb);
++ else
++ xdp_return_frame(tx_buffer->xdpf);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+--
+2.35.1
+
--- /dev/null
+From 7183bbb657b77e7c9eca7d9f49782118cfaea126 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Jun 2022 15:10:56 -0700
+Subject: igb: Make DMA faster when CPU is active on the PCIe link
+
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+
+[ Upstream commit 4e0effd9007ea0be31f7488611eb3824b4541554 ]
+
+Intel I210 on some Intel Alder Lake platforms can only achieve ~750Mbps
+Tx speed via iperf. The RR2DCDELAY shows around 0x2xxx DMA delay, which
+will be significantly lower when 1) ASPM is disabled or 2) SoC package
+c-state stays above PC3. When the RR2DCDELAY is around 0x1xxx the Tx
+speed can reach to ~950Mbps.
+
+According to the I210 datasheet "8.26.1 PCIe Misc. Register - PCIEMISC",
+"DMA Idle Indication" doesn't seem to tie to DMA coalesce anymore, so
+set it to 1b for "DMA is considered idle when there is no Rx or Tx AND
+when there are no TLPs indicating that CPU is active detected on the
+PCIe link (such as the host executes CSR or Configuration register read
+or write operation)" and performing Tx should also fall under "active
+CPU on PCIe link" case.
+
+In addition to that, commit b6e0c419f040 ("igb: Move DMA Coalescing init
+code to separate function.") seems to wrongly changed from enabling
+E1000_PCIEMISC_LX_DECISION to disabling it, also fix that.
+
+Fixes: b6e0c419f040 ("igb: Move DMA Coalescing init code to separate function.")
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://lore.kernel.org/r/20220621221056.604304-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_main.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 1c26bec7d6fa..c5f04c40284b 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -9901,11 +9901,10 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
+ struct e1000_hw *hw = &adapter->hw;
+ u32 dmac_thr;
+ u16 hwm;
++ u32 reg;
+
+ if (hw->mac.type > e1000_82580) {
+ if (adapter->flags & IGB_FLAG_DMAC) {
+- u32 reg;
+-
+ /* force threshold to 0. */
+ wr32(E1000_DMCTXTH, 0);
+
+@@ -9938,7 +9937,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
+ /* Disable BMC-to-OS Watchdog Enable */
+ if (hw->mac.type != e1000_i354)
+ reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
+-
+ wr32(E1000_DMACR, reg);
+
+ /* no lower threshold to disable
+@@ -9955,12 +9953,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
+ */
+ wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
+ (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
++ }
+
+- /* make low power state decision controlled
+- * by DMA coal
+- */
++ if (hw->mac.type >= e1000_i210 ||
++ (adapter->flags & IGB_FLAG_DMAC)) {
+ reg = rd32(E1000_PCIEMISC);
+- reg &= ~E1000_PCIEMISC_LX_DECISION;
++ reg |= E1000_PCIEMISC_LX_DECISION;
+ wr32(E1000_PCIEMISC, reg);
+ } /* endif adapter->dmac is not disabled */
+ } else if (hw->mac.type == e1000_82580) {
+--
+2.35.1
+
--- /dev/null
+From fd8eedb475ee3df6a7e3cb46feb655d76db92b7a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 May 2022 11:50:26 +0300
+Subject: iio: adc: vf610: fix conversion mode sysfs node name
+
+From: Baruch Siach <baruch@tkos.co.il>
+
+[ Upstream commit f1a633b15cd5371a2a83f02c513984e51132dd68 ]
+
+The documentation missed the "in_" prefix for this IIO_SHARED_BY_DIR
+entry.
+
+Fixes: bf04c1a367e3 ("iio: adc: vf610: implement configurable conversion modes")
+Signed-off-by: Baruch Siach <baruch@tkos.co.il>
+Acked-by: Haibo Chen <haibo.chen@nxp.com>
+Link: https://lore.kernel.org/r/560dc93fafe5ef7e9a409885fd20b6beac3973d8.1653900626.git.baruch@tkos.co.il
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/ABI/testing/sysfs-bus-iio-vf610 | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Documentation/ABI/testing/sysfs-bus-iio-vf610 b/Documentation/ABI/testing/sysfs-bus-iio-vf610
+index 308a6756d3bf..491ead804488 100644
+--- a/Documentation/ABI/testing/sysfs-bus-iio-vf610
++++ b/Documentation/ABI/testing/sysfs-bus-iio-vf610
+@@ -1,4 +1,4 @@
+-What: /sys/bus/iio/devices/iio:deviceX/conversion_mode
++What: /sys/bus/iio/devices/iio:deviceX/in_conversion_mode
+ KernelVersion: 4.2
+ Contact: linux-iio@vger.kernel.org
+ Description:
+--
+2.35.1
+
--- /dev/null
+From b80c70c7391cef5d713b4bc05cbef49b8ceaf83c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 07:24:05 +0000
+Subject: iio: adc: xilinx-ams: fix return error variable
+
+From: Lv Ruyi <lv.ruyi@zte.com.cn>
+
+[ Upstream commit f8ef475aa069cd72e9e7bdb2d60dc6a89e2bafad ]
+
+Return irq instead of ret which always equals to zero here.
+
+Fixes: d5c70627a794 ("iio: adc: Add Xilinx AMS driver")
+Reported-by: Zeal Robot <zealci@zte.com.cn>
+Signed-off-by: Lv Ruyi <lv.ruyi@zte.com.cn>
+Reviewed-by: Michal Simek <michal.simek@amd.com>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/adc/xilinx-ams.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
+index a55396c1f8b2..a7687706012d 100644
+--- a/drivers/iio/adc/xilinx-ams.c
++++ b/drivers/iio/adc/xilinx-ams.c
+@@ -1409,7 +1409,7 @@ static int ams_probe(struct platform_device *pdev)
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+- return ret;
++ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, &ams_irq, 0, "ams-irq",
+ indio_dev);
+--
+2.35.1
+
--- /dev/null
+From f33ba5dcda401c767974e2fa60193085f70e4f9c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 May 2022 21:50:29 +0200
+Subject: iio: magnetometer: yas530: Fix memchr_inv() misuse
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+[ Upstream commit bb52d3691db8cf24cea049235223f3599778f264 ]
+
+The call to check if the calibration is all zeroes is doing
+it wrong: memchr_inv() returns NULL if the the calibration
+contains all zeroes, but the check is for != NULL.
+
+Fix it up. It's probably not an urgent fix because the inner
+check for BIT(7) in data[13] will save us. But fix it.
+
+Fixes: de8860b1ed47 ("iio: magnetometer: Add driver for Yamaha YAS530")
+Reported-by: Jakob Hauser <jahau@rocketmail.com>
+Cc: Andy Shevchenko <andy.shevchenko@gmail.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Link: https://lore.kernel.org/r/20220501195029.151852-1-linus.walleij@linaro.org
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/magnetometer/yamaha-yas530.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
+index 9ff7b0e56cf6..b2bc637150bf 100644
+--- a/drivers/iio/magnetometer/yamaha-yas530.c
++++ b/drivers/iio/magnetometer/yamaha-yas530.c
+@@ -639,7 +639,7 @@ static int yas532_get_calibration_data(struct yas5xx *yas5xx)
+ dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data);
+
+ /* Sanity check, is this all zeroes? */
+- if (memchr_inv(data, 0x00, 13)) {
++ if (memchr_inv(data, 0x00, 13) == NULL) {
+ if (!(data[13] & BIT(7)))
+ dev_warn(yas5xx->dev, "calibration is blank!\n");
+ }
+--
+2.35.1
+
--- /dev/null
+From fcb0c66b98331ff1f142898fdd62ebc406ae5901 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Apr 2022 16:41:00 +0800
+Subject: iio: mma8452: fix probe fail when device tree compatible is used.
+
+From: Haibo Chen <haibo.chen@nxp.com>
+
+[ Upstream commit fe18894930a025617114aa8ca0adbf94d5bffe89 ]
+
+Correct the logic for the probe. First check of_match_table, if
+not meet, then check i2c_driver.id_table. If both not meet, then
+return fail.
+
+Fixes: a47ac019e7e8 ("iio: mma8452: Fix probe failing when an i2c_device_id is used")
+Signed-off-by: Haibo Chen <haibo.chen@nxp.com>
+Link: https://lore.kernel.org/r/1650876060-17577-1-git-send-email-haibo.chen@nxp.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/accel/mma8452.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
+index 9c02c681c84c..4156d216c640 100644
+--- a/drivers/iio/accel/mma8452.c
++++ b/drivers/iio/accel/mma8452.c
+@@ -1556,11 +1556,13 @@ static int mma8452_probe(struct i2c_client *client,
+ mutex_init(&data->lock);
+
+ data->chip_info = device_get_match_data(&client->dev);
+- if (!data->chip_info && id) {
+- data->chip_info = &mma_chip_info_table[id->driver_data];
+- } else {
+- dev_err(&client->dev, "unknown device model\n");
+- return -ENODEV;
++ if (!data->chip_info) {
++ if (id) {
++ data->chip_info = &mma_chip_info_table[id->driver_data];
++ } else {
++ dev_err(&client->dev, "unknown device model\n");
++ return -ENODEV;
++ }
+ }
+
+ ret = iio_read_mount_matrix(&client->dev, &data->orientation);
+--
+2.35.1
+
--- /dev/null
+From a3a1e51d18736979d1083cd0e0a607e0070d2781 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 00:00:35 +0100
+Subject: io_uring: fail links when poll fails
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit c487a5ad48831afa6784b368ec40d0ee50f2fe1b ]
+
+Don't forget to cancel all linked requests of poll request when
+__io_arm_poll_handler() failed.
+
+Fixes: aa43477b04025 ("io_uring: poll rework")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/a78aad962460f9fdfe4aa4c0b62425c88f9415bc.1655852245.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 68aab48838e4..ca9ed3d899e6 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -6399,6 +6399,8 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
+ ipt.pt._qproc = io_poll_queue_proc;
+
+ ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
++ if (!ret && ipt.error)
++ req_set_fail(req);
+ ret = ret ?: ipt.error;
+ if (ret)
+ __io_req_complete(req, issue_flags, ret, 0);
+--
+2.35.1
+
--- /dev/null
+From 64dfab31418f72f02c810c2fe3d2c385a1612f1d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Jun 2022 13:25:06 +0100
+Subject: io_uring: fix req->apoll_events
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit aacf2f9f382c91df73f33317e28a4c34c8038986 ]
+
+apoll_events should be set once in the beginning of poll arming just as
+poll->events and not change after. However, currently io_uring resets it
+on each __io_poll_execute() for no clear reason. There is also a place
+in __io_arm_poll_handler() where we add EPOLLONESHOT to downgrade a
+multishot, but forget to do the same thing with ->apoll_events, which is
+buggy.
+
+Fixes: 81459350d581e ("io_uring: cache req->apoll->events in req->cflags")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Reviewed-by: Hao Xu <howeyxu@tencent.com>
+Link: https://lore.kernel.org/r/0aef40399ba75b1a4d2c2e85e6e8fd93c02fc6e4.1655814213.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 1070d22a1c2b..38ecea726254 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -5984,7 +5984,8 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
+ io_req_complete_failed(req, ret);
+ }
+
+-static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
++static void __io_poll_execute(struct io_kiocb *req, int mask,
++ __poll_t __maybe_unused events)
+ {
+ req->result = mask;
+ /*
+@@ -5993,7 +5994,6 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
+ * CPU. We want to avoid pulling in req->apoll->events for that
+ * case.
+ */
+- req->apoll_events = events;
+ if (req->opcode == IORING_OP_POLL_ADD)
+ req->io_task_work.func = io_poll_task_func;
+ else
+@@ -6143,6 +6143,8 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
+ io_init_poll_iocb(poll, mask, io_poll_wake);
+ poll->file = req->file;
+
++ req->apoll_events = poll->events;
++
+ ipt->pt._key = mask;
+ ipt->req = req;
+ ipt->error = 0;
+@@ -6173,8 +6175,10 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
+
+ if (mask) {
+ /* can't multishot if failed, just queue the event we've got */
+- if (unlikely(ipt->error || !ipt->nr_entries))
++ if (unlikely(ipt->error || !ipt->nr_entries)) {
+ poll->events |= EPOLLONESHOT;
++ req->apoll_events |= EPOLLONESHOT;
++ }
+ __io_poll_execute(req, mask, poll->events);
+ return 0;
+ }
+@@ -6387,7 +6391,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ return -EINVAL;
+
+ io_req_set_refcount(req);
+- req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
++ poll->events = io_poll_parse_events(sqe, flags);
+ return 0;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From e7898604262a19749120b73553a11fbf3c3a17e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 00:00:36 +0100
+Subject: io_uring: fix wrong arm_poll error handling
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 9d2ad2947a53abf5e5e6527a9eeed50a3a4cbc72 ]
+
+Leaving ip.error set when a request was punted to task_work execution is
+problematic, don't forget to clear it.
+
+Fixes: aa43477b04025 ("io_uring: poll rework")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/a6c84ef4182c6962380aebe11b35bdcb25b0ccfb.1655852245.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 38ecea726254..e4186635aaa8 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -6178,6 +6178,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
+ if (unlikely(ipt->error || !ipt->nr_entries)) {
+ poll->events |= EPOLLONESHOT;
+ req->apoll_events |= EPOLLONESHOT;
++ ipt->error = 0;
+ }
+ __io_poll_execute(req, mask, poll->events);
+ return 0;
+--
+2.35.1
+
--- /dev/null
+From 5adc100ca25fca9ef15f452a9c02b8d38390e105 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 May 2022 10:40:03 +0200
+Subject: io_uring: make apoll_events a __poll_t
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 58f5c8d39e0ea07fdaaea6a85c49000da83dc0cc ]
+
+apoll_events is fed to vfs_poll and the poll tables, so it should be
+a __poll_t.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20220518084005.3255380-5-hch@lst.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index ca9ed3d899e6..1070d22a1c2b 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -926,7 +926,7 @@ struct io_kiocb {
+ /* used by request caches, completion batching and iopoll */
+ struct io_wq_work_node comp_list;
+ /* cache ->apoll->events */
+- int apoll_events;
++ __poll_t apoll_events;
+ };
+ atomic_t refs;
+ atomic_t poll_refs;
+@@ -5984,7 +5984,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
+ io_req_complete_failed(req, ret);
+ }
+
+-static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
++static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
+ {
+ req->result = mask;
+ /*
+@@ -6003,7 +6003,8 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
+ io_req_task_work_add(req, false);
+ }
+
+-static inline void io_poll_execute(struct io_kiocb *req, int res, int events)
++static inline void io_poll_execute(struct io_kiocb *req, int res,
++ __poll_t events)
+ {
+ if (io_poll_get_ownership(req))
+ __io_poll_execute(req, res, events);
+--
+2.35.1
+
--- /dev/null
+From 2696f44b04039030a4bdd3d40d126b765304d046 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jun 2022 10:01:07 +0900
+Subject: iommu/ipmmu-vmsa: Fix compatible for rcar-gen4
+
+From: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+[ Upstream commit 9f7d09fe23a0112c08d2326d9116fccb5a912660 ]
+
+Fix compatible string for R-Car Gen4.
+
+Fixes: ae684caf465b ("iommu/ipmmu-vmsa: Add support for R-Car Gen4")
+Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Link: https://lore.kernel.org/r/20220617010107.3229784-1-yoshihiro.shimoda.uh@renesas.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/ipmmu-vmsa.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
+index 8fdb84b3642b..1d42084d0276 100644
+--- a/drivers/iommu/ipmmu-vmsa.c
++++ b/drivers/iommu/ipmmu-vmsa.c
+@@ -987,7 +987,7 @@ static const struct of_device_id ipmmu_of_ids[] = {
+ .compatible = "renesas,ipmmu-r8a779a0",
+ .data = &ipmmu_features_rcar_gen4,
+ }, {
+- .compatible = "renesas,rcar-gen4-ipmmu",
++ .compatible = "renesas,rcar-gen4-ipmmu-vmsa",
+ .data = &ipmmu_features_rcar_gen4,
+ }, {
+ /* Terminator */
+--
+2.35.1
+
--- /dev/null
+From 7062be7bcec221e2d32617030bb4055cca286c47 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 19 Jun 2022 18:27:35 +0200
+Subject: ipv4: fix bind address validity regression tests
+
+From: Riccardo Paolo Bestetti <pbl@bestov.io>
+
+[ Upstream commit 313c502fa3b3494159cb8f18d4a6444d06c5c9a5 ]
+
+Commit 8ff978b8b222 ("ipv4/raw: support binding to nonlocal addresses")
+introduces support for binding to nonlocal addresses, as well as some
+basic test coverage for some of the related cases.
+
+Commit b4a028c4d031 ("ipv4: ping: fix bind address validity check")
+fixes a regression which incorrectly removed some checks for bind
+address validation. In addition, it introduces regression tests for
+those specific checks. However, those regression tests are defective, in
+that they perform the tests using an incorrect combination of bind
+flags. As a result, those tests fail when they should succeed.
+
+This commit introduces additional regression tests for nonlocal binding
+and fixes the defective regression tests. It also introduces new
+set_sysctl calls for the ipv4_bind test group, as to perform the ICMP
+binding tests it is necessary to allow ICMP socket creation by setting
+the net.ipv4.ping_group_range knob.
+
+Fixes: b4a028c4d031 ("ipv4: ping: fix bind address validity check")
+Reported-by: Riccardo Paolo Bestetti <pbl@bestov.io>
+Signed-off-by: Riccardo Paolo Bestetti <pbl@bestov.io>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/fcnal-test.sh | 36 +++++++++++++++++------
+ 1 file changed, 27 insertions(+), 9 deletions(-)
+
+diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
+index 75223b63e3c8..03b586760164 100755
+--- a/tools/testing/selftests/net/fcnal-test.sh
++++ b/tools/testing/selftests/net/fcnal-test.sh
+@@ -1800,24 +1800,32 @@ ipv4_addr_bind_novrf()
+ done
+
+ #
+- # raw socket with nonlocal bind
++ # tests for nonlocal bind
+ #
+ a=${NL_IP}
+ log_start
+- run_cmd nettest -s -R -P icmp -f -l ${a} -I ${NSA_DEV} -b
+- log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address after device bind"
++ run_cmd nettest -s -R -f -l ${a} -b
++ log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address"
++
++ log_start
++ run_cmd nettest -s -f -l ${a} -b
++ log_test_addr ${a} $? 0 "TCP socket bind to nonlocal address"
++
++ log_start
++ run_cmd nettest -s -D -P icmp -f -l ${a} -b
++ log_test_addr ${a} $? 0 "ICMP socket bind to nonlocal address"
+
+ #
+ # check that ICMP sockets cannot bind to broadcast and multicast addresses
+ #
+ a=${BCAST_IP}
+ log_start
+- run_cmd nettest -s -R -P icmp -l ${a} -b
++ run_cmd nettest -s -D -P icmp -l ${a} -b
+ log_test_addr ${a} $? 1 "ICMP socket bind to broadcast address"
+
+ a=${MCAST_IP}
+ log_start
+- run_cmd nettest -s -R -P icmp -f -l ${a} -b
++ run_cmd nettest -s -D -P icmp -l ${a} -b
+ log_test_addr ${a} $? 1 "ICMP socket bind to multicast address"
+
+ #
+@@ -1870,24 +1878,32 @@ ipv4_addr_bind_vrf()
+ log_test_addr ${a} $? 1 "Raw socket bind to out of scope address after VRF bind"
+
+ #
+- # raw socket with nonlocal bind
++ # tests for nonlocal bind
+ #
+ a=${NL_IP}
+ log_start
+- run_cmd nettest -s -R -P icmp -f -l ${a} -I ${VRF} -b
++ run_cmd nettest -s -R -f -l ${a} -I ${VRF} -b
+ log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address after VRF bind"
+
++ log_start
++ run_cmd nettest -s -f -l ${a} -I ${VRF} -b
++ log_test_addr ${a} $? 0 "TCP socket bind to nonlocal address after VRF bind"
++
++ log_start
++ run_cmd nettest -s -D -P icmp -f -l ${a} -I ${VRF} -b
++ log_test_addr ${a} $? 0 "ICMP socket bind to nonlocal address after VRF bind"
++
+ #
+ # check that ICMP sockets cannot bind to broadcast and multicast addresses
+ #
+ a=${BCAST_IP}
+ log_start
+- run_cmd nettest -s -R -P icmp -l ${a} -I ${VRF} -b
++ run_cmd nettest -s -D -P icmp -l ${a} -I ${VRF} -b
+ log_test_addr ${a} $? 1 "ICMP socket bind to broadcast address after VRF bind"
+
+ a=${MCAST_IP}
+ log_start
+- run_cmd nettest -s -R -P icmp -f -l ${a} -I ${VRF} -b
++ run_cmd nettest -s -D -P icmp -l ${a} -I ${VRF} -b
+ log_test_addr ${a} $? 1 "ICMP socket bind to multicast address after VRF bind"
+
+ #
+@@ -1922,10 +1938,12 @@ ipv4_addr_bind()
+
+ log_subsection "No VRF"
+ setup
++ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ ipv4_addr_bind_novrf
+
+ log_subsection "With VRF"
+ setup "yes"
++ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ ipv4_addr_bind_vrf
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 1ee762a3f29dc41c4f5870229ff7c3c712755730 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jun 2022 16:11:34 +0000
+Subject: KVM: arm64: Prevent kmemleak from accessing pKVM memory
+
+From: Quentin Perret <qperret@google.com>
+
+[ Upstream commit 56961c6331463cce2d84d0f973177a517fb33a82 ]
+
+Commit a7259df76702 ("memblock: make memblock_find_in_range method
+private") changed the API using which memory is reserved for the pKVM
+hypervisor. However, memblock_phys_alloc() differs from the original API in
+terms of kmemleak semantics -- the old one didn't report the reserved
+regions to kmemleak while the new one does. Unfortunately, when protected
+KVM is enabled, all kernel accesses to pKVM-private memory result in a
+fatal exception, which can now happen because of kmemleak scans:
+
+$ echo scan > /sys/kernel/debug/kmemleak
+[ 34.991354] kvm [304]: nVHE hyp BUG at: [<ffff800008fa3750>] __kvm_nvhe_handle_host_mem_abort+0x270/0x290!
+[ 34.991580] kvm [304]: Hyp Offset: 0xfffe8be807e00000
+[ 34.991813] Kernel panic - not syncing: HYP panic:
+[ 34.991813] PS:600003c9 PC:0000f418011a3750 ESR:00000000f2000800
+[ 34.991813] FAR:ffff000439200000 HPFAR:0000000004792000 PAR:0000000000000000
+[ 34.991813] VCPU:0000000000000000
+[ 34.993660] CPU: 0 PID: 304 Comm: bash Not tainted 5.19.0-rc2 #102
+[ 34.994059] Hardware name: linux,dummy-virt (DT)
+[ 34.994452] Call trace:
+[ 34.994641] dump_backtrace.part.0+0xcc/0xe0
+[ 34.994932] show_stack+0x18/0x6c
+[ 34.995094] dump_stack_lvl+0x68/0x84
+[ 34.995276] dump_stack+0x18/0x34
+[ 34.995484] panic+0x16c/0x354
+[ 34.995673] __hyp_pgtable_total_pages+0x0/0x60
+[ 34.995933] scan_block+0x74/0x12c
+[ 34.996129] scan_gray_list+0xd8/0x19c
+[ 34.996332] kmemleak_scan+0x2c8/0x580
+[ 34.996535] kmemleak_write+0x340/0x4a0
+[ 34.996744] full_proxy_write+0x60/0xbc
+[ 34.996967] vfs_write+0xc4/0x2b0
+[ 34.997136] ksys_write+0x68/0xf4
+[ 34.997311] __arm64_sys_write+0x20/0x2c
+[ 34.997532] invoke_syscall+0x48/0x114
+[ 34.997779] el0_svc_common.constprop.0+0x44/0xec
+[ 34.998029] do_el0_svc+0x2c/0xc0
+[ 34.998205] el0_svc+0x2c/0x84
+[ 34.998421] el0t_64_sync_handler+0xf4/0x100
+[ 34.998653] el0t_64_sync+0x18c/0x190
+[ 34.999252] SMP: stopping secondary CPUs
+[ 35.000034] Kernel Offset: disabled
+[ 35.000261] CPU features: 0x800,00007831,00001086
+[ 35.000642] Memory Limit: none
+[ 35.001329] ---[ end Kernel panic - not syncing: HYP panic:
+[ 35.001329] PS:600003c9 PC:0000f418011a3750 ESR:00000000f2000800
+[ 35.001329] FAR:ffff000439200000 HPFAR:0000000004792000 PAR:0000000000000000
+[ 35.001329] VCPU:0000000000000000 ]---
+
+Fix this by explicitly excluding the hypervisor's memory pool from
+kmemleak like we already do for the hyp BSS.
+
+Cc: Mike Rapoport <rppt@kernel.org>
+Fixes: a7259df76702 ("memblock: make memblock_find_in_range method private")
+Signed-off-by: Quentin Perret <qperret@google.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20220616161135.3997786-1-qperret@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kvm/arm.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index a66d83540c15..f88919a793ad 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -2011,11 +2011,11 @@ static int finalize_hyp_mode(void)
+ return 0;
+
+ /*
+- * Exclude HYP BSS from kmemleak so that it doesn't get peeked
+- * at, which would end badly once the section is inaccessible.
+- * None of other sections should ever be introspected.
++ * Exclude HYP sections from kmemleak so that they don't get peeked
++ * at, which would end badly once inaccessible.
+ */
+ kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
++ kmemleak_free_part(__va(hyp_mem_base), hyp_mem_size);
+ return pkvm_drop_host_privileges();
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 4717a84a9f4d8a1c2b385c63958055f7f864f289 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jun 2022 10:34:06 -0700
+Subject: KVM: SEV: Init target VMCBs in sev_migrate_from
+
+From: Peter Gonda <pgonda@google.com>
+
+[ Upstream commit 6defa24d3b12bbd418bc8526dea1cbc605265c06 ]
+
+The target VMCBs during an intra-host migration need to correctly setup
+for running SEV and SEV-ES guests. Add sev_init_vmcb() function and make
+sev_es_init_vmcb() static. sev_init_vmcb() uses the now private function
+to init SEV-ES guests VMCBs when needed.
+
+Fixes: 0b020f5af092 ("KVM: SEV: Add support for SEV-ES intra host migration")
+Fixes: b56639318bb2 ("KVM: SEV: Add support for SEV intra host migration")
+Signed-off-by: Peter Gonda <pgonda@google.com>
+Cc: Marc Orr <marcorr@google.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Sean Christopherson <seanjc@google.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Message-Id: <20220623173406.744645-1-pgonda@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/svm/sev.c | 68 ++++++++++++++++++++++++++++--------------
+ arch/x86/kvm/svm/svm.c | 11 ++-----
+ arch/x86/kvm/svm/svm.h | 2 +-
+ 3 files changed, 48 insertions(+), 33 deletions(-)
+
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 4b7d490c0b63..76e9e6eb71d6 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -1665,19 +1665,24 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
+ {
+ struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
+ struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
++ struct kvm_vcpu *dst_vcpu, *src_vcpu;
++ struct vcpu_svm *dst_svm, *src_svm;
+ struct kvm_sev_info *mirror;
++ unsigned long i;
+
+ dst->active = true;
+ dst->asid = src->asid;
+ dst->handle = src->handle;
+ dst->pages_locked = src->pages_locked;
+ dst->enc_context_owner = src->enc_context_owner;
++ dst->es_active = src->es_active;
+
+ src->asid = 0;
+ src->active = false;
+ src->handle = 0;
+ src->pages_locked = 0;
+ src->enc_context_owner = NULL;
++ src->es_active = false;
+
+ list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
+
+@@ -1704,26 +1709,21 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
+ list_del(&src->mirror_entry);
+ list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms);
+ }
+-}
+
+-static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
+-{
+- unsigned long i;
+- struct kvm_vcpu *dst_vcpu, *src_vcpu;
+- struct vcpu_svm *dst_svm, *src_svm;
++ kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
++ dst_svm = to_svm(dst_vcpu);
+
+- if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
+- return -EINVAL;
++ sev_init_vmcb(dst_svm);
+
+- kvm_for_each_vcpu(i, src_vcpu, src) {
+- if (!src_vcpu->arch.guest_state_protected)
+- return -EINVAL;
+- }
++ if (!dst->es_active)
++ continue;
+
+- kvm_for_each_vcpu(i, src_vcpu, src) {
++ /*
++ * Note, the source is not required to have the same number of
++ * vCPUs as the destination when migrating a vanilla SEV VM.
++ */
++ src_vcpu = kvm_get_vcpu(dst_kvm, i);
+ src_svm = to_svm(src_vcpu);
+- dst_vcpu = kvm_get_vcpu(dst, i);
+- dst_svm = to_svm(dst_vcpu);
+
+ /*
+ * Transfer VMSA and GHCB state to the destination. Nullify and
+@@ -1740,8 +1740,23 @@ static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
+ src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
+ src_vcpu->arch.guest_state_protected = false;
+ }
+- to_kvm_svm(src)->sev_info.es_active = false;
+- to_kvm_svm(dst)->sev_info.es_active = true;
++}
++
++static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
++{
++ struct kvm_vcpu *src_vcpu;
++ unsigned long i;
++
++ if (!sev_es_guest(src))
++ return 0;
++
++ if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
++ return -EINVAL;
++
++ kvm_for_each_vcpu(i, src_vcpu, src) {
++ if (!src_vcpu->arch.guest_state_protected)
++ return -EINVAL;
++ }
+
+ return 0;
+ }
+@@ -1789,11 +1804,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
+ if (ret)
+ goto out_dst_vcpu;
+
+- if (sev_es_guest(source_kvm)) {
+- ret = sev_es_migrate_from(kvm, source_kvm);
+- if (ret)
+- goto out_source_vcpu;
+- }
++ ret = sev_check_source_vcpus(kvm, source_kvm);
++ if (ret)
++ goto out_source_vcpu;
+
+ sev_migrate_from(kvm, source_kvm);
+ kvm_vm_dead(source_kvm);
+@@ -2910,7 +2923,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
+ count, in);
+ }
+
+-void sev_es_init_vmcb(struct vcpu_svm *svm)
++static void sev_es_init_vmcb(struct vcpu_svm *svm)
+ {
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+
+@@ -2955,6 +2968,15 @@ void sev_es_init_vmcb(struct vcpu_svm *svm)
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+ }
+
++void sev_init_vmcb(struct vcpu_svm *svm)
++{
++ svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
++ clr_exception_intercept(svm, UD_VECTOR);
++
++ if (sev_es_guest(svm->vcpu.kvm))
++ sev_es_init_vmcb(svm);
++}
++
+ void sev_es_vcpu_reset(struct vcpu_svm *svm)
+ {
+ /*
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 0c0a09b43b10..6bfb0b0e66bd 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1125,15 +1125,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
+ svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
+ }
+
+- if (sev_guest(vcpu->kvm)) {
+- svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
+- clr_exception_intercept(svm, UD_VECTOR);
+-
+- if (sev_es_guest(vcpu->kvm)) {
+- /* Perform SEV-ES specific VMCB updates */
+- sev_es_init_vmcb(svm);
+- }
+- }
++ if (sev_guest(vcpu->kvm))
++ sev_init_vmcb(svm);
+
+ svm_hv_init_vmcb(svm->vmcb);
+ init_vmcb_after_set_cpuid(vcpu);
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index 34babf9185fe..8ec8fb58b924 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -616,10 +616,10 @@ void __init sev_set_cpu_caps(void);
+ void __init sev_hardware_setup(void);
+ void sev_hardware_unsetup(void);
+ int sev_cpu_init(struct svm_cpu_data *sd);
++void sev_init_vmcb(struct vcpu_svm *svm);
+ void sev_free_vcpu(struct kvm_vcpu *vcpu);
+ int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
+ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
+-void sev_es_init_vmcb(struct vcpu_svm *svm);
+ void sev_es_vcpu_reset(struct vcpu_svm *svm);
+ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
+ void sev_es_prepare_switch_to_guest(struct vmcb_save_area *hostsa);
+--
+2.35.1
+
--- /dev/null
+From 74e8137473b45dc4f5d1dc97e9458977b308a195 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Jun 2022 19:14:20 +0800
+Subject: MIPS: Remove repetitive increase irq_err_count
+
+From: huhai <huhai@kylinos.cn>
+
+[ Upstream commit c81aba8fde2aee4f5778ebab3a1d51bd2ef48e4c ]
+
+commit 979934da9e7a ("[PATCH] mips: update IRQ handling for vr41xx") added
+a function irq_dispatch, and it'll increase irq_err_count when the get_irq
+callback returns a negative value, but increase irq_err_count in get_irq
+was not removed.
+
+And also, modpost complains once gpio-vr41xx drivers become modules.
+ ERROR: modpost: "irq_err_count" [drivers/gpio/gpio-vr41xx.ko] undefined!
+
+So it would be a good idea to remove repetitive increase irq_err_count in
+get_irq callback.
+
+Fixes: 27fdd325dace ("MIPS: Update VR41xx GPIO driver to use gpiolib")
+Fixes: 979934da9e7a ("[PATCH] mips: update IRQ handling for vr41xx")
+Reported-by: k2ci <kernel-bot@kylinos.cn>
+Signed-off-by: huhai <huhai@kylinos.cn>
+Signed-off-by: Genjian Zhang <zhanggenjian@kylinos.cn>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/vr41xx/common/icu.c | 2 --
+ drivers/gpio/gpio-vr41xx.c | 2 --
+ 2 files changed, 4 deletions(-)
+
+diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
+index 7b7f25b4b057..9240bcdbe74e 100644
+--- a/arch/mips/vr41xx/common/icu.c
++++ b/arch/mips/vr41xx/common/icu.c
+@@ -640,8 +640,6 @@ static int icu_get_irq(unsigned int irq)
+
+ printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
+
+- atomic_inc(&irq_err_count);
+-
+ return -1;
+ }
+
+diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
+index 98cd715ccc33..8d09b619c166 100644
+--- a/drivers/gpio/gpio-vr41xx.c
++++ b/drivers/gpio/gpio-vr41xx.c
+@@ -217,8 +217,6 @@ static int giu_get_irq(unsigned int irq)
+ printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
+ maskl, pendl, maskh, pendh);
+
+- atomic_inc(&irq_err_count);
+-
+ return -EINVAL;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From b41e060fb0824fb0d86d6abb42bf2f40db894fc9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Jun 2022 17:16:33 +0200
+Subject: net: dsa: qca8k: reduce mgmt ethernet timeout
+
+From: Christian Marangi <ansuelsmth@gmail.com>
+
+[ Upstream commit 85467f7da18992311deafdbf32a8d163cb1e98d7 ]
+
+The current mgmt ethernet timeout is set to 100ms. This value is too
+big and would slow down any mdio command in case the mgmt ethernet
+packet have some problems on the receiving part.
+Reduce it to just 5ms to handle case when some operation are done on the
+master port that would cause the mgmt ethernet to not work temporarily.
+
+Fixes: 5950c7c0a68c ("net: dsa: qca8k: add support for mgmt read/write in Ethernet packet")
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+Link: https://lore.kernel.org/r/20220621151633.11741-1-ansuelsmth@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/qca8k.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
+index f375627174c8..e553e3e6fa0f 100644
+--- a/drivers/net/dsa/qca8k.h
++++ b/drivers/net/dsa/qca8k.h
+@@ -15,7 +15,7 @@
+
+ #define QCA8K_ETHERNET_MDIO_PRIORITY 7
+ #define QCA8K_ETHERNET_PHY_PRIORITY 6
+-#define QCA8K_ETHERNET_TIMEOUT 100
++#define QCA8K_ETHERNET_TIMEOUT 5
+
+ #define QCA8K_NUM_PORTS 7
+ #define QCA8K_NUM_CPU_PORTS 2
+--
+2.35.1
+
--- /dev/null
+From ef86f5879dde9cd3edc28fc34f0a9863c9d3e554 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jun 2022 00:34:34 -0700
+Subject: net: fix data-race in dev_isalive()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit cc26c2661fefea215f41edb665193324a5f99021 ]
+
+dev_isalive() is called under RTNL or dev_base_lock protection.
+
+This means that changes to dev->reg_state should be done with both locks held.
+
+syzbot reported:
+
+BUG: KCSAN: data-race in register_netdevice / type_show
+
+write to 0xffff888144ecf518 of 1 bytes by task 20886 on cpu 0:
+register_netdevice+0xb9f/0xdf0 net/core/dev.c:10050
+lapbeth_new_device drivers/net/wan/lapbether.c:414 [inline]
+lapbeth_device_event+0x4a0/0x6c0 drivers/net/wan/lapbether.c:456
+notifier_call_chain kernel/notifier.c:87 [inline]
+raw_notifier_call_chain+0x53/0xb0 kernel/notifier.c:455
+__dev_notify_flags+0x1d6/0x3a0
+dev_change_flags+0xa2/0xc0 net/core/dev.c:8607
+do_setlink+0x778/0x2230 net/core/rtnetlink.c:2780
+__rtnl_newlink net/core/rtnetlink.c:3546 [inline]
+rtnl_newlink+0x114c/0x16a0 net/core/rtnetlink.c:3593
+rtnetlink_rcv_msg+0x811/0x8c0 net/core/rtnetlink.c:6089
+netlink_rcv_skb+0x13e/0x240 net/netlink/af_netlink.c:2501
+rtnetlink_rcv+0x18/0x20 net/core/rtnetlink.c:6107
+netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline]
+netlink_unicast+0x58a/0x660 net/netlink/af_netlink.c:1345
+netlink_sendmsg+0x661/0x750 net/netlink/af_netlink.c:1921
+sock_sendmsg_nosec net/socket.c:714 [inline]
+sock_sendmsg net/socket.c:734 [inline]
+__sys_sendto+0x21e/0x2c0 net/socket.c:2119
+__do_sys_sendto net/socket.c:2131 [inline]
+__se_sys_sendto net/socket.c:2127 [inline]
+__x64_sys_sendto+0x74/0x90 net/socket.c:2127
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x2b/0x70 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x46/0xb0
+
+read to 0xffff888144ecf518 of 1 bytes by task 20423 on cpu 1:
+dev_isalive net/core/net-sysfs.c:38 [inline]
+netdev_show net/core/net-sysfs.c:50 [inline]
+type_show+0x24/0x90 net/core/net-sysfs.c:112
+dev_attr_show+0x35/0x90 drivers/base/core.c:2095
+sysfs_kf_seq_show+0x175/0x240 fs/sysfs/file.c:59
+kernfs_seq_show+0x75/0x80 fs/kernfs/file.c:162
+seq_read_iter+0x2c3/0x8e0 fs/seq_file.c:230
+kernfs_fop_read_iter+0xd1/0x2f0 fs/kernfs/file.c:235
+call_read_iter include/linux/fs.h:2052 [inline]
+new_sync_read fs/read_write.c:401 [inline]
+vfs_read+0x5a5/0x6a0 fs/read_write.c:482
+ksys_read+0xe8/0x1a0 fs/read_write.c:620
+__do_sys_read fs/read_write.c:630 [inline]
+__se_sys_read fs/read_write.c:628 [inline]
+__x64_sys_read+0x3e/0x50 fs/read_write.c:628
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x2b/0x70 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x46/0xb0
+
+value changed: 0x00 -> 0x01
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 1 PID: 20423 Comm: udevd Tainted: G W 5.19.0-rc2-syzkaller-dirty #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev.c | 25 +++++++++++++++----------
+ net/core/net-sysfs.c | 1 +
+ 2 files changed, 16 insertions(+), 10 deletions(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 0784c339cd7d..842917883adb 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -396,16 +396,18 @@ static void list_netdevice(struct net_device *dev)
+ /* Device list removal
+ * caller must respect a RCU grace period before freeing/reusing dev
+ */
+-static void unlist_netdevice(struct net_device *dev)
++static void unlist_netdevice(struct net_device *dev, bool lock)
+ {
+ ASSERT_RTNL();
+
+ /* Unlink dev from the device chain */
+- write_lock(&dev_base_lock);
++ if (lock)
++ write_lock(&dev_base_lock);
+ list_del_rcu(&dev->dev_list);
+ netdev_name_node_del(dev->name_node);
+ hlist_del_rcu(&dev->index_hlist);
+- write_unlock(&dev_base_lock);
++ if (lock)
++ write_unlock(&dev_base_lock);
+
+ dev_base_seq_inc(dev_net(dev));
+ }
+@@ -9963,11 +9965,11 @@ int register_netdevice(struct net_device *dev)
+ goto err_uninit;
+
+ ret = netdev_register_kobject(dev);
+- if (ret) {
+- dev->reg_state = NETREG_UNREGISTERED;
++ write_lock(&dev_base_lock);
++ dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
++ write_unlock(&dev_base_lock);
++ if (ret)
+ goto err_uninit;
+- }
+- dev->reg_state = NETREG_REGISTERED;
+
+ __netdev_update_features(dev);
+
+@@ -10249,7 +10251,9 @@ void netdev_run_todo(void)
+ continue;
+ }
+
++ write_lock(&dev_base_lock);
+ dev->reg_state = NETREG_UNREGISTERED;
++ write_unlock(&dev_base_lock);
+ linkwatch_forget_dev(dev);
+ }
+
+@@ -10727,9 +10731,10 @@ void unregister_netdevice_many(struct list_head *head)
+
+ list_for_each_entry(dev, head, unreg_list) {
+ /* And unlink it from device chain. */
+- unlist_netdevice(dev);
+-
++ write_lock(&dev_base_lock);
++ unlist_netdevice(dev, false);
+ dev->reg_state = NETREG_UNREGISTERING;
++ write_unlock(&dev_base_lock);
+ }
+ flush_all_backlogs();
+
+@@ -10876,7 +10881,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
+ dev_close(dev);
+
+ /* And unlink it from device chain */
+- unlist_netdevice(dev);
++ unlist_netdevice(dev, true);
+
+ synchronize_net();
+
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index 9cbc1c8289bc..9ee57997354a 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -32,6 +32,7 @@ static const char fmt_dec[] = "%d\n";
+ static const char fmt_ulong[] = "%lu\n";
+ static const char fmt_u64[] = "%llu\n";
+
++/* Caller holds RTNL or dev_base_lock */
+ static inline int dev_isalive(const struct net_device *dev)
+ {
+ return dev->reg_state <= NETREG_REGISTERED;
+--
+2.35.1
+
--- /dev/null
+From e66f7973e0bc5be690e6b84a93e1f1c13651f7ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 18 Jun 2022 14:23:33 +0200
+Subject: net: phy: at803x: fix NULL pointer dereference on AR9331 PHY
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+[ Upstream commit 9926de7315be3d606cc011a305ad9adb9e8e14c9 ]
+
+Latest kernel will explode on the PHY interrupt config, since it depends
+now on allocated priv. So, run probe to allocate priv to fix it.
+
+ ar9331_switch ethernet.1:10 lan0 (uninitialized): PHY [!ahb!ethernet@1a000000!mdio!switch@10:00] driver [Qualcomm Atheros AR9331 built-in PHY] (irq=13)
+ CPU 0 Unable to handle kernel paging request at virtual address 0000000a, epc == 8050e8a8, ra == 80504b34
+ ...
+ Call Trace:
+ [<8050e8a8>] at803x_config_intr+0x5c/0xd0
+ [<80504b34>] phy_request_interrupt+0xa8/0xd0
+ [<8050289c>] phylink_bringup_phy+0x2d8/0x3ac
+ [<80502b68>] phylink_fwnode_phy_connect+0x118/0x130
+ [<8074d8ec>] dsa_slave_create+0x270/0x420
+ [<80743b04>] dsa_port_setup+0x12c/0x148
+ [<8074580c>] dsa_register_switch+0xaf0/0xcc0
+ [<80511344>] ar9331_sw_probe+0x370/0x388
+ [<8050cb78>] mdio_probe+0x44/0x70
+ [<804df300>] really_probe+0x200/0x424
+ [<804df7b4>] __driver_probe_device+0x290/0x298
+ [<804df810>] driver_probe_device+0x54/0xe4
+ [<804dfd50>] __device_attach_driver+0xe4/0x130
+ [<804dcb00>] bus_for_each_drv+0xb4/0xd8
+ [<804dfac4>] __device_attach+0x104/0x1a4
+ [<804ddd24>] bus_probe_device+0x48/0xc4
+ [<804deb44>] deferred_probe_work_func+0xf0/0x10c
+ [<800a0ffc>] process_one_work+0x314/0x4d4
+ [<800a17fc>] worker_thread+0x2a4/0x354
+ [<800a9a54>] kthread+0x134/0x13c
+ [<8006306c>] ret_from_kernel_thread+0x14/0x1c
+
+Same Issue would affect some other PHYs (QCA8081, QCA9561), so fix it
+too.
+
+Fixes: 3265f4218878 ("net: phy: at803x: add fiber support")
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/at803x.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
+index 6a467e7817a6..59fe356942b5 100644
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -2072,6 +2072,8 @@ static struct phy_driver at803x_driver[] = {
+ /* ATHEROS AR9331 */
+ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
+ .name = "Qualcomm Atheros AR9331 built-in PHY",
++ .probe = at803x_probe,
++ .remove = at803x_remove,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+@@ -2087,6 +2089,8 @@ static struct phy_driver at803x_driver[] = {
+ /* Qualcomm Atheros QCA9561 */
+ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
+ .name = "Qualcomm Atheros QCA9561 built-in PHY",
++ .probe = at803x_probe,
++ .remove = at803x_remove,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+@@ -2151,6 +2155,8 @@ static struct phy_driver at803x_driver[] = {
+ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
+ .name = "Qualcomm QCA8081",
+ .flags = PHY_POLL_CABLE_TEST,
++ .probe = at803x_probe,
++ .remove = at803x_remove,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+--
+2.35.1
+
--- /dev/null
+From 25f1cf91fcd8e92d86b063db6e5e2c4e8be86e28 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jun 2022 16:43:36 -0700
+Subject: net/sched: sch_netem: Fix arithmetic in netem_dump() for 32-bit
+ platforms
+
+From: Peilin Ye <peilin.ye@bytedance.com>
+
+[ Upstream commit a2b1a5d40bd12b44322c2ccd40bb0ec1699708b6 ]
+
+As reported by Yuming, currently tc always show a latency of UINT_MAX
+for netem Qdisc's on 32-bit platforms:
+
+ $ tc qdisc add dev dummy0 root netem latency 100ms
+ $ tc qdisc show dev dummy0
+ qdisc netem 8001: root refcnt 2 limit 1000 delay 275s 275s
+ ^^^^^^^^^^^^^^^^
+
+Let us take a closer look at netem_dump():
+
+ qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency,
+ UINT_MAX);
+
+qopt.latency is __u32, psched_tdiff_t is signed long,
+(psched_tdiff_t)(UINT_MAX) is negative for 32-bit platforms, so
+qopt.latency is always UINT_MAX.
+
+Fix it by using psched_time_t (u64) instead.
+
+Note: confusingly, users have two ways to specify 'latency':
+
+ 1. normally, via '__u32 latency' in struct tc_netem_qopt;
+ 2. via the TCA_NETEM_LATENCY64 attribute, which is s64.
+
+For the second case, theoretically 'latency' could be negative. This
+patch ignores that corner case, since it is broken (i.e. assigning a
+negative s64 to __u32) anyways, and should be handled separately.
+
+Thanks Ted Lin for the analysis [1] .
+
+[1] https://github.com/raspberrypi/linux/issues/3512
+
+Reported-by: Yuming Chen <chenyuming.junnan@bytedance.com>
+Fixes: 112f9cb65643 ("netem: convert to qdisc_watchdog_schedule_ns")
+Reviewed-by: Cong Wang <cong.wang@bytedance.com>
+Signed-off-by: Peilin Ye <peilin.ye@bytedance.com>
+Acked-by: Stephen Hemminger <stephen@networkplumber.org>
+Link: https://lore.kernel.org/r/20220616234336.2443-1-yepeilin.cs@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_netem.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index ed4ccef5d6a8..5449ed114e40 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -1146,9 +1146,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
+ struct tc_netem_rate rate;
+ struct tc_netem_slot slot;
+
+- qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
++ qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
+ UINT_MAX);
+- qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
++ qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
+ UINT_MAX);
+ qopt.limit = q->limit;
+ qopt.loss = q->loss;
+--
+2.35.1
+
--- /dev/null
+From 586a4d06ab7b1f3242c051a0cc7fd4abf121fa1e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jun 2022 12:35:08 +0800
+Subject: net/tls: fix tls_sk_proto_close executed repeatedly
+
+From: Ziyang Xuan <william.xuanziyang@huawei.com>
+
+[ Upstream commit 69135c572d1f84261a6de2a1268513a7e71753e2 ]
+
+After setting the sock ktls, update ctx->sk_proto to sock->sk_prot by
+tls_update(), so now ctx->sk_proto->close is tls_sk_proto_close(). When
+close the sock, tls_sk_proto_close() is called for sock->sk_prot->close
+is tls_sk_proto_close(). But ctx->sk_proto->close() will be executed later
+in tls_sk_proto_close(). Thus tls_sk_proto_close() executed repeatedly
+occurred. That will trigger the following bug.
+
+=================================================================
+KASAN: null-ptr-deref in range [0x0000000000000010-0x0000000000000017]
+RIP: 0010:tls_sk_proto_close+0xd8/0xaf0 net/tls/tls_main.c:306
+Call Trace:
+ <TASK>
+ tls_sk_proto_close+0x356/0xaf0 net/tls/tls_main.c:329
+ inet_release+0x12e/0x280 net/ipv4/af_inet.c:428
+ __sock_release+0xcd/0x280 net/socket.c:650
+ sock_close+0x18/0x20 net/socket.c:1365
+
+Updating a proto which is same with sock->sk_prot is incorrect. Add proto
+and sock->sk_prot equality check at the head of tls_update() to fix it.
+
+Fixes: 95fa145479fb ("bpf: sockmap/tls, close can race with map free")
+Reported-by: syzbot+29c3c12f3214b85ad081@syzkaller.appspotmail.com
+Signed-off-by: Ziyang Xuan <william.xuanziyang@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls_main.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 7b2b0e7ffee4..fc60bef83f90 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -873,6 +873,9 @@ static void tls_update(struct sock *sk, struct proto *p,
+ {
+ struct tls_context *ctx;
+
++ if (sk->sk_prot == p)
++ return;
++
+ ctx = tls_get_ctx(sk);
+ if (likely(ctx)) {
+ ctx->sk_write_space = write_space;
+--
+2.35.1
+
--- /dev/null
+From 6735a174e711f87ea58c6efd95818e947a606fb5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jun 2022 16:17:31 +0200
+Subject: netfilter: nf_dup_netdev: add and use recursion counter
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit fcd53c51d03709bc429822086f1e9b3e88904284 ]
+
+Now that the egress function can be called from egress hook, we need
+to avoid recursive calls into the nf_tables traverser, else crash.
+
+Fixes: f87b9464d152 ("netfilter: nft_fwd_netdev: Support egress hook")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_dup_netdev.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c
+index 13b7f6a66086..a8e2425e43b0 100644
+--- a/net/netfilter/nf_dup_netdev.c
++++ b/net/netfilter/nf_dup_netdev.c
+@@ -13,20 +13,31 @@
+ #include <net/netfilter/nf_tables_offload.h>
+ #include <net/netfilter/nf_dup_netdev.h>
+
++#define NF_RECURSION_LIMIT 2
++
++static DEFINE_PER_CPU(u8, nf_dup_skb_recursion);
++
+ static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev,
+ enum nf_dev_hooks hook)
+ {
++ if (__this_cpu_read(nf_dup_skb_recursion) > NF_RECURSION_LIMIT)
++ goto err;
++
+ if (hook == NF_NETDEV_INGRESS && skb_mac_header_was_set(skb)) {
+- if (skb_cow_head(skb, skb->mac_len)) {
+- kfree_skb(skb);
+- return;
+- }
++ if (skb_cow_head(skb, skb->mac_len))
++ goto err;
++
+ skb_push(skb, skb->mac_len);
+ }
+
+ skb->dev = dev;
+ skb_clear_tstamp(skb);
++ __this_cpu_inc(nf_dup_skb_recursion);
+ dev_queue_xmit(skb);
++ __this_cpu_dec(nf_dup_skb_recursion);
++ return;
++err:
++ kfree_skb(skb);
+ }
+
+ void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif)
+--
+2.35.1
+
--- /dev/null
+From eac150702149ec9cfe90ccfb1fff363529d285e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jun 2022 16:17:30 +0200
+Subject: netfilter: nf_dup_netdev: do not push mac header a second time
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 574a5b85dc3b9ab672ff3fba0ee020f927960648 ]
+
+Eric reports skb_under_panic when using dup/fwd via bond+egress hook.
+Before pushing mac header, we should make sure that we're called from
+ingress to put back what was pulled earlier.
+
+In egress case, the MAC header is already there; we should leave skb
+alone.
+
+While at it be more careful here: skb might have been altered and
+headroom reduced, so add a skb_cow() before so that headroom is
+increased if necessary.
+
+nf_do_netdev_egress() assumes skb ownership (it normally ends with
+a call to dev_queue_xmit), so we must free the packet on error.
+
+Fixes: f87b9464d152 ("netfilter: nft_fwd_netdev: Support egress hook")
+Reported-by: Eric Garver <eric@garver.life>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_dup_netdev.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c
+index 7873bd1389c3..13b7f6a66086 100644
+--- a/net/netfilter/nf_dup_netdev.c
++++ b/net/netfilter/nf_dup_netdev.c
+@@ -13,10 +13,16 @@
+ #include <net/netfilter/nf_tables_offload.h>
+ #include <net/netfilter/nf_dup_netdev.h>
+
+-static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev)
++static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev,
++ enum nf_dev_hooks hook)
+ {
+- if (skb_mac_header_was_set(skb))
++ if (hook == NF_NETDEV_INGRESS && skb_mac_header_was_set(skb)) {
++ if (skb_cow_head(skb, skb->mac_len)) {
++ kfree_skb(skb);
++ return;
++ }
+ skb_push(skb, skb->mac_len);
++ }
+
+ skb->dev = dev;
+ skb_clear_tstamp(skb);
+@@ -33,7 +39,7 @@ void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif)
+ return;
+ }
+
+- nf_do_netdev_egress(pkt->skb, dev);
++ nf_do_netdev_egress(pkt->skb, dev, nft_hook(pkt));
+ }
+ EXPORT_SYMBOL_GPL(nf_fwd_netdev_egress);
+
+@@ -48,7 +54,7 @@ void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif)
+
+ skb = skb_clone(pkt->skb, GFP_ATOMIC);
+ if (skb)
+- nf_do_netdev_egress(skb, dev);
++ nf_do_netdev_egress(skb, dev, nft_hook(pkt));
+ }
+ EXPORT_SYMBOL_GPL(nf_dup_netdev_egress);
+
+--
+2.35.1
+
--- /dev/null
+From b23ab7cdccb02351a5a5d66cf74e9a9a6cc75b26 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 May 2022 20:15:31 +0200
+Subject: netfilter: use get_random_u32 instead of prandom
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit b1fd94e704571f98b21027340eecf821b2bdffba ]
+
+bh might occur while updating per-cpu rnd_state from user context,
+ie. local_out path.
+
+BUG: using smp_processor_id() in preemptible [00000000] code: nginx/2725
+caller is nft_ng_random_eval+0x24/0x54 [nft_numgen]
+Call Trace:
+ check_preemption_disabled+0xde/0xe0
+ nft_ng_random_eval+0x24/0x54 [nft_numgen]
+
+Use the random driver instead, this also avoids need for local prandom
+state. Moreover, prandom now uses the random driver since d4150779e60f
+("random32: use real rng for non-deterministic randomness").
+
+Based on earlier patch from Pablo Neira.
+
+Fixes: 6b2faee0ca91 ("netfilter: nft_meta: place prandom handling in a helper")
+Fixes: 978d8f9055c3 ("netfilter: nft_numgen: add map lookups for numgen random operations")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_meta.c | 13 ++-----------
+ net/netfilter/nft_numgen.c | 12 +++---------
+ 2 files changed, 5 insertions(+), 20 deletions(-)
+
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
+index ac4859241e17..55d2d49c3425 100644
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -14,6 +14,7 @@
+ #include <linux/in.h>
+ #include <linux/ip.h>
+ #include <linux/ipv6.h>
++#include <linux/random.h>
+ #include <linux/smp.h>
+ #include <linux/static_key.h>
+ #include <net/dst.h>
+@@ -32,8 +33,6 @@
+ #define NFT_META_SECS_PER_DAY 86400
+ #define NFT_META_DAYS_PER_WEEK 7
+
+-static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state);
+-
+ static u8 nft_meta_weekday(void)
+ {
+ time64_t secs = ktime_get_real_seconds();
+@@ -271,13 +270,6 @@ static bool nft_meta_get_eval_ifname(enum nft_meta_keys key, u32 *dest,
+ return true;
+ }
+
+-static noinline u32 nft_prandom_u32(void)
+-{
+- struct rnd_state *state = this_cpu_ptr(&nft_prandom_state);
+-
+- return prandom_u32_state(state);
+-}
+-
+ #ifdef CONFIG_IP_ROUTE_CLASSID
+ static noinline bool
+ nft_meta_get_eval_rtclassid(const struct sk_buff *skb, u32 *dest)
+@@ -389,7 +381,7 @@ void nft_meta_get_eval(const struct nft_expr *expr,
+ break;
+ #endif
+ case NFT_META_PRANDOM:
+- *dest = nft_prandom_u32();
++ *dest = get_random_u32();
+ break;
+ #ifdef CONFIG_XFRM
+ case NFT_META_SECPATH:
+@@ -518,7 +510,6 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
+ len = IFNAMSIZ;
+ break;
+ case NFT_META_PRANDOM:
+- prandom_init_once(&nft_prandom_state);
+ len = sizeof(u32);
+ break;
+ #ifdef CONFIG_XFRM
+diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
+index 81b40c663d86..45d3dc9e96f2 100644
+--- a/net/netfilter/nft_numgen.c
++++ b/net/netfilter/nft_numgen.c
+@@ -9,12 +9,11 @@
+ #include <linux/netlink.h>
+ #include <linux/netfilter.h>
+ #include <linux/netfilter/nf_tables.h>
++#include <linux/random.h>
+ #include <linux/static_key.h>
+ #include <net/netfilter/nf_tables.h>
+ #include <net/netfilter/nf_tables_core.h>
+
+-static DEFINE_PER_CPU(struct rnd_state, nft_numgen_prandom_state);
+-
+ struct nft_ng_inc {
+ u8 dreg;
+ u32 modulus;
+@@ -135,12 +134,9 @@ struct nft_ng_random {
+ u32 offset;
+ };
+
+-static u32 nft_ng_random_gen(struct nft_ng_random *priv)
++static u32 nft_ng_random_gen(const struct nft_ng_random *priv)
+ {
+- struct rnd_state *state = this_cpu_ptr(&nft_numgen_prandom_state);
+-
+- return reciprocal_scale(prandom_u32_state(state), priv->modulus) +
+- priv->offset;
++ return reciprocal_scale(get_random_u32(), priv->modulus) + priv->offset;
+ }
+
+ static void nft_ng_random_eval(const struct nft_expr *expr,
+@@ -168,8 +164,6 @@ static int nft_ng_random_init(const struct nft_ctx *ctx,
+ if (priv->offset + priv->modulus - 1 < priv->offset)
+ return -EOVERFLOW;
+
+- prandom_init_once(&nft_numgen_prandom_state);
+-
+ return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE, sizeof(u32));
+ }
+--
+2.35.1
+
--- /dev/null
+From 6e27645df9ea0166c75d7087d9e0c25c8ce54713 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jun 2022 10:29:42 +0200
+Subject: nvme: move the Samsung X5 quirk entry to the core quirks
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit e6487833182a8a0187f0292aca542fc163ccd03e ]
+
+This device shares the PCI ID with the Samsung 970 Evo Plus that
+does not need or want the quirks. Move the the quirk entry to the
+core table based on the model number instead.
+
+Fixes: bc360b0b1611 ("nvme-pci: add quirks for Samsung X5 SSDs")
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Pankaj Raghav <p.raghav@samsung.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 14 ++++++++++++++
+ drivers/nvme/host/pci.c | 4 ----
+ 2 files changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 1ea85c88d795..a2862a56fadc 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2487,6 +2487,20 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
+ .vid = 0x1e0f,
+ .mn = "KCD6XVUL6T40",
+ .quirks = NVME_QUIRK_NO_APST,
++ },
++ {
++ /*
++ * The external Samsung X5 SSD fails initialization without a
++ * delay before checking if it is ready and has a whole set of
++ * other problems. To make this even more interesting, it
++ * shares the PCI ID with internal Samsung 970 Evo Plus that
++ * does not need or want these quirks.
++ */
++ .vid = 0x144d,
++ .mn = "Samsung Portable SSD X5",
++ .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
++ NVME_QUIRK_NO_DEEPEST_PS |
++ NVME_QUIRK_IGNORE_DEV_SUBNQN,
+ }
+ };
+
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 17aeb7d5c485..ddea0fb90c28 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3475,10 +3475,6 @@ static const struct pci_device_id nvme_id_table[] = {
+ NVME_QUIRK_128_BYTES_SQES |
+ NVME_QUIRK_SHARED_TAGS |
+ NVME_QUIRK_SKIP_CID_GEN },
+- { PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */
+- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
+- NVME_QUIRK_NO_DEEPEST_PS |
+- NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { 0, }
+ };
+--
+2.35.1
+
--- /dev/null
+From 33e4fb31266f638a2bd86b92f2842944efb74e00 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 May 2022 02:03:25 +0000
+Subject: perf arm-spe: Don't set data source if it's not a memory operation
+
+From: Leo Yan <leo.yan@linaro.org>
+
+[ Upstream commit 51ba539f5bdb5a8cc7b1dedd5e73ac54564a7602 ]
+
+Except for memory load and store operations, ARM SPE records also can
+support other operation types, bug when set the data source field the
+current code assumes a record is a either load operation or store
+operation, this leads to wrongly synthesize memory samples.
+
+This patch strictly checks the record operation type, it only sets data
+source only for the operation types ARM_SPE_LD and ARM_SPE_ST,
+otherwise, returns zero for data source. Therefore, we can synthesize
+memory samples only when data source is a non-zero value, the function
+arm_spe__is_memory_event() is useless and removed.
+
+Fixes: e55ed3423c1bb29f ("perf arm-spe: Synthesize memory event")
+Reviewed-by: Ali Saidi <alisaidi@amazon.com>
+Reviewed-by: German Gomez <german.gomez@arm.com>
+Signed-off-by: Leo Yan <leo.yan@linaro.org>
+Tested-by: Ali Saidi <alisaidi@amazon.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: alisaidi@amazon.com
+Cc: Andrew Kilroy <andrew.kilroy@arm.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: James Clark <james.clark@arm.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: John Garry <john.garry@huawei.com>
+Cc: Kajol Jain <kjain@linux.ibm.com>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: Li Huafei <lihuafei1@huawei.com>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Nick Forrington <nick.forrington@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Link: http://lore.kernel.org/lkml/20220517020326.18580-5-alisaidi@amazon.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/arm-spe.c | 22 ++++++++--------------
+ 1 file changed, 8 insertions(+), 14 deletions(-)
+
+diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
+index 1a80151baed9..d040406f3314 100644
+--- a/tools/perf/util/arm-spe.c
++++ b/tools/perf/util/arm-spe.c
+@@ -387,26 +387,16 @@ static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
+ return arm_spe_deliver_synth_event(spe, speq, event, &sample);
+ }
+
+-#define SPE_MEM_TYPE (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS | \
+- ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS | \
+- ARM_SPE_REMOTE_ACCESS)
+-
+-static bool arm_spe__is_memory_event(enum arm_spe_sample_type type)
+-{
+- if (type & SPE_MEM_TYPE)
+- return true;
+-
+- return false;
+-}
+-
+ static u64 arm_spe__synth_data_source(const struct arm_spe_record *record)
+ {
+ union perf_mem_data_src data_src = { 0 };
+
+ if (record->op == ARM_SPE_LD)
+ data_src.mem_op = PERF_MEM_OP_LOAD;
+- else
++ else if (record->op == ARM_SPE_ST)
+ data_src.mem_op = PERF_MEM_OP_STORE;
++ else
++ return 0;
+
+ if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
+ data_src.mem_lvl = PERF_MEM_LVL_L3;
+@@ -510,7 +500,11 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
+ return err;
+ }
+
+- if (spe->sample_memory && arm_spe__is_memory_event(record->type)) {
++ /*
++ * When data_src is zero it means the record is not a memory operation,
++ * skip to synthesize memory sample for this case.
++ */
++ if (spe->sample_memory && data_src) {
+ err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
+ if (err)
+ return err;
+--
+2.35.1
+
--- /dev/null
+From b6ceb5316137472ea18b45de1ed4f97f032a54ce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Jun 2022 12:52:07 +0200
+Subject: perf test: Record only user callchains on the "Check Arm64 callgraphs
+ are complete in fp mode" test
+
+From: Michael Petlan <mpetlan@redhat.com>
+
+[ Upstream commit 72dcae8efd42699bbfd55e1ef187310c4e2e5dcb ]
+
+The testcase 'Check Arm64 callgraphs are complete in fp mode' wants to
+see the following output:
+
+ 610 leaf
+ 62f parent
+ 648 main
+
+However, without excluding kernel callchains, the output might look like:
+
+ ffffc2ff40ef1b5c arch_local_irq_enable
+ ffffc2ff419d032c __schedule
+ ffffc2ff419d06c0 schedule
+ ffffc2ff40e4da30 do_notify_resume
+ ffffc2ff40e421b0 work_pending
+ 610 leaf
+ 62f parent
+ 648 main
+
+Adding '--user-callchains' leaves only the wanted symbols in the chain.
+
+Fixes: cd6382d82752737e ("perf test arm64: Test unwinding using fame-pointer (fp) mode")
+Suggested-by: German Gomez <german.gomez@arm.com>
+Reviewed-by: German Gomez <german.gomez@arm.com>
+Reviewed-by: Leo Yan <leo.yan@linaro.org>
+Signed-off-by: Michael Petlan <mpetlan@redhat.com>
+Cc: German Gomez <german.gomez@arm.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Link: https://lore.kernel.org/r/20220614105207.26223-1-mpetlan@redhat.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/tests/shell/test_arm_callgraph_fp.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+index 6ffbb27afaba..ec108d45d3c6 100755
+--- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh
++++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+@@ -43,7 +43,7 @@ CFLAGS="-g -O0 -fno-inline -fno-omit-frame-pointer"
+ cc $CFLAGS $TEST_PROGRAM_SOURCE -o $TEST_PROGRAM || exit 1
+
+ # Add a 1 second delay to skip samples that are not in the leaf() function
+-perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 -- $TEST_PROGRAM 2> /dev/null &
++perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null &
+ PID=$!
+
+ echo " + Recording (PID=$PID)..."
+--
+2.35.1
+
--- /dev/null
+From b92c9cf7f9966dc64305fa1325ffd0a495657f1b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Jun 2022 19:29:39 +0530
+Subject: perf test topology: Use !strncmp(right platform) to fix guest PPC
+ comparision check
+
+From: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
+
+[ Upstream commit b236371421df57b93fc49c4b9d0e53bd1aab2b2e ]
+
+commit cfd7092c31aed728 ("perf test session topology: Fix test to skip
+the test in guest environment") added check to skip the testcase if the
+socket_id can't be fetched from topology info.
+
+But the condition check uses strncmp which should be changed to !strncmp
+and to correctly match platform.
+
+Fix this condition check.
+
+Fixes: cfd7092c31aed728 ("perf test session topology: Fix test to skip the test in guest environment")
+Reported-by: Thomas Richter <tmricht@linux.ibm.com>
+Signed-off-by: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
+Acked-by: Ian Rogers <irogers@google.com>
+Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
+Cc: Disha Goel <disgoel@linux.vnet.ibm.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Kajol Jain <kjain@linux.ibm.com>
+Cc: linuxppc-dev@lists.ozlabs.org
+Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Nageswara R Sastry <rnsastry@linux.ibm.com>
+Link: https://lore.kernel.org/r/20220610135939.63361-1-atrajeev@linux.vnet.ibm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/tests/topology.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
+index d23a9e322ff5..0b4f61b6cc6b 100644
+--- a/tools/perf/tests/topology.c
++++ b/tools/perf/tests/topology.c
+@@ -115,7 +115,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
+ * physical_package_id will be set to -1. Hence skip this
+ * test if physical_package_id returns -1 for cpu from perf_cpu_map.
+ */
+- if (strncmp(session->header.env.arch, "powerpc", 7)) {
++ if (!strncmp(session->header.env.arch, "ppc64le", 7)) {
+ if (cpu__get_socket_id(perf_cpu_map__cpu(map, 0)) == -1)
+ return TEST_SKIP;
+ }
+--
+2.35.1
+
--- /dev/null
+From dec64a74acbe48ec1dcb2bab296b1da72a90bb52 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Jun 2022 11:40:37 +0300
+Subject: phy: aquantia: Fix AN when higher speeds than 1G are not advertised
+
+From: Claudiu Manoil <claudiu.manoil@nxp.com>
+
+[ Upstream commit 9b7fd1670a94a57d974795acebde843a5c1a354e ]
+
+Even when the eth port is resticted to work with speeds not higher than 1G,
+and so the eth driver is requesting the phy (via phylink) to advertise up
+to 1000BASET support, the aquantia phy device is still advertising for 2.5G
+and 5G speeds.
+Clear these advertising defaults when requested.
+
+Cc: Ondrej Spacek <ondrej.spacek@nxp.com>
+Fixes: 09c4c57f7bc41 ("net: phy: aquantia: add support for auto-negotiation configuration")
+Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
+Link: https://lore.kernel.org/r/20220610084037.7625-1-claudiu.manoil@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/aquantia_main.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
+index a8db1a19011b..c7047f5d7a9b 100644
+--- a/drivers/net/phy/aquantia_main.c
++++ b/drivers/net/phy/aquantia_main.c
+@@ -34,6 +34,8 @@
+ #define MDIO_AN_VEND_PROV 0xc400
+ #define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15)
+ #define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
++#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11)
++#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10)
+ #define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4)
+ #define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0)
+ #define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4
+@@ -231,9 +233,20 @@ static int aqr_config_aneg(struct phy_device *phydev)
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+
++ /* Handle the case when the 2.5G and 5G speeds are not advertised */
++ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
++ phydev->advertising))
++ reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
++
++ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
++ phydev->advertising))
++ reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
++
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
+ MDIO_AN_VEND_PROV_1000BASET_HALF |
+- MDIO_AN_VEND_PROV_1000BASET_FULL, reg);
++ MDIO_AN_VEND_PROV_1000BASET_FULL |
++ MDIO_AN_VEND_PROV_2500BASET_FULL |
++ MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+--
+2.35.1
+
--- /dev/null
+From 1361be52051ac0e03396564a6512d7bb96ca135d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jun 2022 21:05:56 +0100
+Subject: regmap-irq: Fix a bug in regmap_irq_enable() for type_in_mask chips
+
+From: Aidan MacDonald <aidanmacdonald.0x0@gmail.com>
+
+[ Upstream commit 485037ae9a095491beb7f893c909a76cc4f9d1e7 ]
+
+When enabling a type_in_mask irq, the type_buf contents must be
+AND'd with the mask of the IRQ we're enabling to avoid enabling
+other IRQs by accident, which can happen if several type_in_mask
+irqs share a mask register.
+
+Fixes: bc998a730367 ("regmap: irq: handle HW using separate rising/falling edge interrupts")
+Signed-off-by: Aidan MacDonald <aidanmacdonald.0x0@gmail.com>
+Link: https://lore.kernel.org/r/20220620200644.1961936-2-aidanmacdonald.0x0@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/regmap/regmap-irq.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
+index 400c7412a7dc..4f785bc7981c 100644
+--- a/drivers/base/regmap/regmap-irq.c
++++ b/drivers/base/regmap/regmap-irq.c
+@@ -252,6 +252,7 @@ static void regmap_irq_enable(struct irq_data *data)
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ struct regmap *map = d->map;
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
++ unsigned int reg = irq_data->reg_offset / map->reg_stride;
+ unsigned int mask, type;
+
+ type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
+@@ -268,14 +269,14 @@ static void regmap_irq_enable(struct irq_data *data)
+ * at the corresponding offset in regmap_irq_set_type().
+ */
+ if (d->chip->type_in_mask && type)
+- mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
++ mask = d->type_buf[reg] & irq_data->mask;
+ else
+ mask = irq_data->mask;
+
+ if (d->chip->clear_on_unmask)
+ d->clear_status = true;
+
+- d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
++ d->mask_buf[reg] &= ~mask;
+ }
+
+ static void regmap_irq_disable(struct irq_data *data)
+--
+2.35.1
+
--- /dev/null
+From 08a6f1e31aa2383e253b82878c708360f22a2464 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jun 2022 21:05:57 +0100
+Subject: regmap-irq: Fix offset/index mismatch in read_sub_irq_data()
+
+From: Aidan MacDonald <aidanmacdonald.0x0@gmail.com>
+
+[ Upstream commit 3f05010f243be06478a9b11cfce0ce994f5a0890 ]
+
+We need to divide the sub-irq status register offset by register
+stride to get an index for the status buffer to avoid an out of
+bounds write when the register stride is greater than 1.
+
+Fixes: a2d21848d921 ("regmap: regmap-irq: Add main status register support")
+Signed-off-by: Aidan MacDonald <aidanmacdonald.0x0@gmail.com>
+Link: https://lore.kernel.org/r/20220620200644.1961936-3-aidanmacdonald.0x0@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/regmap/regmap-irq.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
+index 4f785bc7981c..a6db605707b0 100644
+--- a/drivers/base/regmap/regmap-irq.c
++++ b/drivers/base/regmap/regmap-irq.c
+@@ -387,6 +387,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
+ subreg = &chip->sub_reg_offsets[b];
+ for (i = 0; i < subreg->num_regs; i++) {
+ unsigned int offset = subreg->offset[i];
++ unsigned int index = offset / map->reg_stride;
+
+ if (chip->not_fixed_stride)
+ ret = regmap_read(map,
+@@ -395,7 +396,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
+ else
+ ret = regmap_read(map,
+ chip->status_base + offset,
+- &data->status_buf[offset]);
++ &data->status_buf[index]);
+
+ if (ret)
+ break;
+--
+2.35.1
+
--- /dev/null
+From d84842d07ec9fef86435fea7240f33ee9140fa47 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jun 2022 01:11:12 +0900
+Subject: rethook: Reject getting a rethook if RCU is not watching
+
+From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+
+[ Upstream commit c0f3bb4054ef036e5f67e27f2e3cad9e6512cf00 ]
+
+Since the rethook_recycle() will involve the call_rcu() for reclaiming
+the rethook_instance, the rethook must be set up at the RCU available
+context (non idle). This rethook_recycle() in the rethook trampoline
+handler is inevitable, thus the RCU available check must be done before
+setting the rethook trampoline.
+
+This adds a rcu_is_watching() check in the rethook_try_get() so that
+it will return NULL if it is called when !rcu_is_watching().
+
+Fixes: 54ecbe6f1ed5 ("rethook: Add a generic return hook")
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Link: https://lore.kernel.org/bpf/165461827269.280167.7379263615545598958.stgit@devnote2
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/rethook.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c
+index b56833700d23..c69d82273ce7 100644
+--- a/kernel/trace/rethook.c
++++ b/kernel/trace/rethook.c
+@@ -154,6 +154,15 @@ struct rethook_node *rethook_try_get(struct rethook *rh)
+ if (unlikely(!handler))
+ return NULL;
+
++ /*
++ * This expects the caller will set up a rethook on a function entry.
++ * When the function returns, the rethook will eventually be reclaimed
++ * or released in the rethook_recycle() with call_rcu().
++ * This means the caller must be run in the RCU-availabe context.
++ */
++ if (unlikely(!rcu_is_watching()))
++ return NULL;
++
+ fn = freelist_try_get(&rh->pool);
+ if (!fn)
+ return NULL;
+--
+2.35.1
+
--- /dev/null
+From a482bae34cb0632cbf10cadfc80e003635cc4c0c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jun 2022 12:13:52 -0700
+Subject: Revert "net/tls: fix tls_sk_proto_close executed repeatedly"
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 1b205d948fbb06a7613d87dcea0ff5fd8a08ed91 ]
+
+This reverts commit 69135c572d1f84261a6de2a1268513a7e71753e2.
+
+This commit was just papering over the issue, ULP should not
+get ->update() called with its own sk_prot. Each ULP would
+need to add this check.
+
+Fixes: 69135c572d1f ("net/tls: fix tls_sk_proto_close executed repeatedly")
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/r/20220620191353.1184629-1-kuba@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls_main.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index fc60bef83f90..7b2b0e7ffee4 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -873,9 +873,6 @@ static void tls_update(struct sock *sk, struct proto *p,
+ {
+ struct tls_context *ctx;
+
+- if (sk->sk_prot == p)
+- return;
+-
+ ctx = tls_get_ctx(sk);
+ if (likely(ctx)) {
+ ctx->sk_write_space = write_space;
+--
+2.35.1
+
--- /dev/null
+From 56302a3e0c080b9bbbe9421b51e5a8a678486a3b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Jun 2022 15:19:00 +0200
+Subject: s390/cpumf: Handle events cycles and instructions identical
+
+From: Thomas Richter <tmricht@linux.ibm.com>
+
+[ Upstream commit be857b7f77d130dbbd47c91fc35198b040f35865 ]
+
+Events CPU_CYCLES and INSTRUCTIONS can be submitted with two different
+perf_event attribute::type values:
+ - PERF_TYPE_HARDWARE: when invoked via perf tool predefined events name
+ cycles or cpu-cycles or instructions.
+ - pmu->type: when invoked via perf tool event name cpu_cf/CPU_CYLCES/ or
+ cpu_cf/INSTRUCTIONS/. This invocation also selects the PMU to which
+ the event belongs.
+Handle both type of invocations identical for events CPU_CYLCES and
+INSTRUCTIONS. They address the same hardware.
+The result is different when event modifier exclude_kernel is also set.
+Invocation with event modifier for user space event counting fails.
+
+Output before:
+
+ # perf stat -e cpum_cf/cpu_cycles/u -- true
+
+ Performance counter stats for 'true':
+
+ <not supported> cpum_cf/cpu_cycles/u
+
+ 0.000761033 seconds time elapsed
+
+ 0.000076000 seconds user
+ 0.000725000 seconds sys
+
+ #
+
+Output after:
+ # perf stat -e cpum_cf/cpu_cycles/u -- true
+
+ Performance counter stats for 'true':
+
+ 349,613 cpum_cf/cpu_cycles/u
+
+ 0.000844143 seconds time elapsed
+
+ 0.000079000 seconds user
+ 0.000800000 seconds sys
+ #
+
+Fixes: 6a82e23f45fe ("s390/cpumf: Adjust registration of s390 PMU device drivers")
+Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
+Acked-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
+[agordeev@linux.ibm.com corrected commit ID of Fixes commit]
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/perf_cpum_cf.c | 22 +++++++++++++++++++++-
+ 1 file changed, 21 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
+index 483ab5e10164..f7dd3c849e68 100644
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -516,6 +516,26 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
+ return err;
+ }
+
++/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different
++ * attribute::type values:
++ * - PERF_TYPE_HARDWARE:
++ * - pmu->type:
++ * Handle both type of invocations identical. They address the same hardware.
++ * The result is different when event modifiers exclude_kernel and/or
++ * exclude_user are also set.
++ */
++static int cpumf_pmu_event_type(struct perf_event *event)
++{
++ u64 ev = event->attr.config;
++
++ if (cpumf_generic_events_basic[PERF_COUNT_HW_CPU_CYCLES] == ev ||
++ cpumf_generic_events_basic[PERF_COUNT_HW_INSTRUCTIONS] == ev ||
++ cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev ||
++ cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev)
++ return PERF_TYPE_HARDWARE;
++ return PERF_TYPE_RAW;
++}
++
+ static int cpumf_pmu_event_init(struct perf_event *event)
+ {
+ unsigned int type = event->attr.type;
+@@ -525,7 +545,7 @@ static int cpumf_pmu_event_init(struct perf_event *event)
+ err = __hw_perf_event_init(event, type);
+ else if (event->pmu->type == type)
+ /* Registered as unknown PMU */
+- err = __hw_perf_event_init(event, PERF_TYPE_RAW);
++ err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
+ else
+ return -ENOENT;
+
+--
+2.35.1
+
--- /dev/null
+From 12e6ace7ec87872916ff1bb3fd63c4d6361cb69a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Jun 2022 11:04:36 +0200
+Subject: s390/crash: add missing iterator advance in copy_oldmem_page()
+
+From: Alexander Gordeev <agordeev@linux.ibm.com>
+
+[ Upstream commit cc02e6e21aa5f2ac0defe8c15e5a9d024da6e73d ]
+
+In case old memory was successfully copied the passed iterator
+should be advanced as well. Currently copy_oldmem_page() is
+always called with single-segment iterator. Should that ever
+change - copy_oldmem_user and copy_oldmem_kernel() functions
+would need a rework to deal with multi-segment iterators.
+
+Fixes: 5d8de293c224 ("vmcore: convert copy_oldmem_page() to take an iov_iter")
+Reviewed-by: Alexander Egorenkov <egorenar@linux.ibm.com>
+Tested-by: Alexander Egorenkov <egorenar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/crash_dump.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
+index a2c1c55daec0..2534a31d2550 100644
+--- a/arch/s390/kernel/crash_dump.c
++++ b/arch/s390/kernel/crash_dump.c
+@@ -219,6 +219,11 @@ ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
+ unsigned long src;
+ int rc;
+
++ if (!(iter_is_iovec(iter) || iov_iter_is_kvec(iter)))
++ return -EINVAL;
++ /* Multi-segment iterators are not supported */
++ if (iter->nr_segs > 1)
++ return -EINVAL;
+ if (!csize)
+ return 0;
+ src = pfn_to_phys(pfn) + offset;
+@@ -228,6 +233,8 @@ ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
+ rc = copy_oldmem_user(iter->iov->iov_base, src, csize);
+ else
+ rc = copy_oldmem_kernel(iter->kvec->iov_base, src, csize);
++ if (!rc)
++ iov_iter_advance(iter, csize);
+ return rc;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From dff912d0683493488078f0365cc11d1ab40ef8df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Jun 2022 21:32:39 +0200
+Subject: s390/crash: make copy_oldmem_page() return number of bytes copied
+
+From: Alexander Gordeev <agordeev@linux.ibm.com>
+
+[ Upstream commit af2debd58bd769e38f538143f0d332e15d753396 ]
+
+Callback copy_oldmem_page() returns either error code or zero.
+Instead, it should return the error code or number of bytes copied.
+
+Fixes: df9694c7975f ("s390/dump: streamline oldmem copy functions")
+Reviewed-by: Alexander Egorenkov <egorenar@linux.ibm.com>
+Tested-by: Alexander Egorenkov <egorenar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/crash_dump.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
+index 2534a31d2550..28124d0fa1d5 100644
+--- a/arch/s390/kernel/crash_dump.c
++++ b/arch/s390/kernel/crash_dump.c
+@@ -233,9 +233,10 @@ ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
+ rc = copy_oldmem_user(iter->iov->iov_base, src, csize);
+ else
+ rc = copy_oldmem_kernel(iter->kvec->iov_base, src, csize);
+- if (!rc)
+- iov_iter_advance(iter, csize);
+- return rc;
++ if (rc < 0)
++ return rc;
++ iov_iter_advance(iter, csize);
++ return csize;
+ }
+
+ /*
+--
+2.35.1
+
--- /dev/null
+From 77aa3698baae035538cfc868685c282f340ba903 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Jun 2022 15:38:54 +0300
+Subject: scsi: iscsi: Exclude zero from the endpoint ID range
+
+From: Sergey Gorenko <sergeygo@nvidia.com>
+
+[ Upstream commit f6eed15f3ea76596ccc689331e1cc850b999133b ]
+
+The kernel returns an endpoint ID as r.ep_connect_ret.handle in the
+iscsi_uevent. The iscsid validates a received endpoint ID and treats zero
+as an error. The commit referenced in the fixes line changed the endpoint
+ID range, and zero is always assigned to the first endpoint ID. So, the
+first attempt to create a new iSER connection always fails.
+
+Link: https://lore.kernel.org/r/20220613123854.55073-1-sergeygo@nvidia.com
+Fixes: 3c6ae371b8a1 ("scsi: iscsi: Release endpoint ID when its freed")
+Reviewed-by: Max Gurtovoy <mgurtovoy@nvidia.com>
+Reviewed-by: Mike Christie <michael.christie@oracle.com>
+Reviewed-by: Lee Duncan <lduncan@suse.com>
+Signed-off-by: Sergey Gorenko <sergeygo@nvidia.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/scsi_transport_iscsi.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 2c0dd64159b0..5d21f07456c6 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -212,7 +212,12 @@ iscsi_create_endpoint(int dd_size)
+ return NULL;
+
+ mutex_lock(&iscsi_ep_idr_mutex);
+- id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
++
++ /*
++ * First endpoint id should be 1 to comply with user space
++ * applications (iscsid).
++ */
++ id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO);
+ if (id < 0) {
+ mutex_unlock(&iscsi_ep_idr_mutex);
+ printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
+--
+2.35.1
+
--- /dev/null
+From 52a4eb614e0633a11f1d178c62de0ca1f062ce16 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jun 2022 10:13:02 +0900
+Subject: scsi: scsi_debug: Fix zone transition to full condition
+
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+
+[ Upstream commit 566d3c57eb526f32951af15866086e236ce1fc8a ]
+
+When a write command to a sequential write required or sequential write
+preferred zone result in the zone write pointer reaching the end of the
+zone, the zone condition must be set to full AND the number of implicitly
+or explicitly open zones updated to have a correct accounting for zone
+resources. However, the function zbc_inc_wp() only sets the zone condition
+to full without updating the open zone counters, resulting in a zone state
+machine breakage.
+
+Introduce the helper function zbc_set_zone_full() and use it in
+zbc_inc_wp() to correctly transition zones to the full condition.
+
+Link: https://lore.kernel.org/r/20220608011302.92061-1-damien.lemoal@opensource.wdc.com
+Fixes: f0d1cf9378bd ("scsi: scsi_debug: Add ZBC zone commands")
+Reviewed-by: Niklas Cassel <niklas.cassel@wdc.com>
+Acked-by: Douglas Gilbert <dgilbert@interlog.com>
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/scsi_debug.c | 22 ++++++++++++++++++++--
+ 1 file changed, 20 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 592a290e6cfa..6cdd67f2a08e 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -2788,6 +2788,24 @@ static void zbc_open_zone(struct sdebug_dev_info *devip,
+ }
+ }
+
++static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
++ struct sdeb_zone_state *zsp)
++{
++ switch (zsp->z_cond) {
++ case ZC2_IMPLICIT_OPEN:
++ devip->nr_imp_open--;
++ break;
++ case ZC3_EXPLICIT_OPEN:
++ devip->nr_exp_open--;
++ break;
++ default:
++ WARN_ONCE(true, "Invalid zone %llu condition %x\n",
++ zsp->z_start, zsp->z_cond);
++ break;
++ }
++ zsp->z_cond = ZC5_FULL;
++}
++
+ static void zbc_inc_wp(struct sdebug_dev_info *devip,
+ unsigned long long lba, unsigned int num)
+ {
+@@ -2800,7 +2818,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
+ if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
+ zsp->z_wp += num;
+ if (zsp->z_wp >= zend)
+- zsp->z_cond = ZC5_FULL;
++ zbc_set_zone_full(devip, zsp);
+ return;
+ }
+
+@@ -2819,7 +2837,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
+ n = num;
+ }
+ if (zsp->z_wp >= zend)
+- zsp->z_cond = ZC5_FULL;
++ zbc_set_zone_full(devip, zsp);
+
+ num -= n;
+ lba += n;
+--
+2.35.1
+
--- /dev/null
+From 715f060ff5f7537fc2ab8d057f6821410db98f7a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Jun 2022 00:05:55 -0700
+Subject: scsi: storvsc: Correct reporting of Hyper-V I/O size limits
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Saurabh Sengar <ssengar@linux.microsoft.com>
+
+[ Upstream commit 1d3e0980782fbafaf93285779fd3905e4f866802 ]
+
+Current code is based on the idea that the max number of SGL entries
+also determines the max size of an I/O request. While this idea was
+true in older versions of the storvsc driver when SGL entry length
+was limited to 4 Kbytes, commit 3d9c3dcc58e9 ("scsi: storvsc: Enable
+scatterlist entry lengths > 4Kbytes") removed that limitation. It's
+now theoretically possible for the block layer to send requests that
+exceed the maximum size supported by Hyper-V. This problem doesn't
+currently happen in practice because the block layer defaults to a
+512 Kbyte maximum, while Hyper-V in Azure supports 2 Mbyte I/O sizes.
+But some future configuration of Hyper-V could have a smaller max I/O
+size, and the block layer could exceed that max.
+
+Fix this by correctly setting max_sectors as well as sg_tablesize to
+reflect the maximum I/O size that Hyper-V reports. While allowing
+I/O sizes larger than the block layer default of 512 Kbytes doesn’t
+provide any noticeable performance benefit in the tests we ran, it's
+still appropriate to report the correct underlying Hyper-V capabilities
+to the Linux block layer.
+
+Also tweak the virt_boundary_mask to reflect that the required
+alignment derives from Hyper-V communication using a 4 Kbyte page size,
+and not on the guest page size, which might be bigger (eg. ARM64).
+
+Link: https://lore.kernel.org/r/1655190355-28722-1-git-send-email-ssengar@linux.microsoft.com
+Fixes: 3d9c3dcc58e9 ("scsi: storvsc: Enable scatter list entry lengths > 4Kbytes")
+Reviewed-by: Michael Kelley <mikelley@microsoft.com>
+Signed-off-by: Saurabh Sengar <ssengar@linux.microsoft.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/storvsc_drv.c | 27 ++++++++++++++++++++++-----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 9a0bba5a51a7..4b1f1d73eee8 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1916,7 +1916,7 @@ static struct scsi_host_template scsi_driver = {
+ .cmd_per_lun = 2048,
+ .this_id = -1,
+ /* Ensure there are no gaps in presented sgls */
+- .virt_boundary_mask = PAGE_SIZE-1,
++ .virt_boundary_mask = HV_HYP_PAGE_SIZE - 1,
+ .no_write_same = 1,
+ .track_queue_depth = 1,
+ .change_queue_depth = storvsc_change_queue_depth,
+@@ -1970,6 +1970,7 @@ static int storvsc_probe(struct hv_device *device,
+ int max_targets;
+ int max_channels;
+ int max_sub_channels = 0;
++ u32 max_xfer_bytes;
+
+ /*
+ * Based on the windows host we are running on,
+@@ -2059,12 +2060,28 @@ static int storvsc_probe(struct hv_device *device,
+ }
+ /* max cmd length */
+ host->max_cmd_len = STORVSC_MAX_CMD_LEN;
+-
+ /*
+- * set the table size based on the info we got
+- * from the host.
++ * Any reasonable Hyper-V configuration should provide
++ * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE,
++ * protecting it from any weird value.
++ */
++ max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE);
++ /* max_hw_sectors_kb */
++ host->max_sectors = max_xfer_bytes >> 9;
++ /*
++ * There are 2 requirements for Hyper-V storvsc sgl segments,
++ * based on which the below calculation for max segments is
++ * done:
++ *
++ * 1. Except for the first and last sgl segment, all sgl segments
++ * should be align to HV_HYP_PAGE_SIZE, that also means the
++ * maximum number of segments in a sgl can be calculated by
++ * dividing the total max transfer length by HV_HYP_PAGE_SIZE.
++ *
++ * 2. Except for the first and last, each entry in the SGL must
++ * have an offset that is a multiple of HV_HYP_PAGE_SIZE.
+ */
+- host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
++ host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1;
+ /*
+ * For non-IDE disks, the host supports multiple channels.
+ * Set the number of HW queues we are supporting.
+--
+2.35.1
+
--- /dev/null
+From c4d97a3805a787eb3109898147f6af5fbcb132a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Jun 2022 20:02:35 +0800
+Subject: selftests dma: fix compile error for dma_map_benchmark
+
+From: Yu Liao <liaoyu15@huawei.com>
+
+[ Upstream commit 12a29115be72dfc72372af9ded4bc4ae7113a729 ]
+
+When building selftests/dma:
+$ make -C tools/testing/selftests TARGETS=dma
+I hit the following compilation error:
+
+dma_map_benchmark.c:13:10: fatal error: linux/map_benchmark.h: No such file or directory
+ #include <linux/map_benchmark.h>
+ ^~~~~~~~~~~~~~~~~~~~~~~
+
+dma/Makefile does not include the map_benchmark.h path, so add
+more including path, and fix include order in dma_map_benchmark.c
+
+Fixes: 8ddde07a3d28 ("dma-mapping: benchmark: extract a common header file for map_benchmark definition")
+Signed-off-by: Yu Liao <liaoyu15@huawei.com>
+Tested-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/dma/Makefile | 1 +
+ tools/testing/selftests/dma/dma_map_benchmark.c | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/dma/Makefile b/tools/testing/selftests/dma/Makefile
+index aa8e8b5b3864..cd8c5ece1cba 100644
+--- a/tools/testing/selftests/dma/Makefile
++++ b/tools/testing/selftests/dma/Makefile
+@@ -1,5 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ CFLAGS += -I../../../../usr/include/
++CFLAGS += -I../../../../include/
+
+ TEST_GEN_PROGS := dma_map_benchmark
+
+diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/testing/selftests/dma/dma_map_benchmark.c
+index c3b3c09e995e..5c997f17fcbd 100644
+--- a/tools/testing/selftests/dma/dma_map_benchmark.c
++++ b/tools/testing/selftests/dma/dma_map_benchmark.c
+@@ -10,8 +10,8 @@
+ #include <unistd.h>
+ #include <sys/ioctl.h>
+ #include <sys/mman.h>
+-#include <linux/map_benchmark.h>
+ #include <linux/types.h>
++#include <linux/map_benchmark.h>
+
+ #define NSEC_PER_MSEC 1000000L
+
+--
+2.35.1
+
--- /dev/null
+From b4e50adc48bf2571454bc5e969ddaf0286aba8e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jun 2022 15:40:46 +0800
+Subject: selftests: netfilter: correct PKTGEN_SCRIPT_PATHS in
+ nft_concat_range.sh
+
+From: Jie2x Zhou <jie2x.zhou@intel.com>
+
+[ Upstream commit 5d79d8af8dec58bf709b3124d09d9572edd9c617 ]
+
+Before change:
+make -C netfilter
+ TEST: performance
+ net,port [SKIP]
+ perf not supported
+ port,net [SKIP]
+ perf not supported
+ net6,port [SKIP]
+ perf not supported
+ port,proto [SKIP]
+ perf not supported
+ net6,port,mac [SKIP]
+ perf not supported
+ net6,port,mac,proto [SKIP]
+ perf not supported
+ net,mac [SKIP]
+ perf not supported
+
+After change:
+ net,mac [ OK ]
+ baseline (drop from netdev hook): 2061098pps
+ baseline hash (non-ranged entries): 1606741pps
+ baseline rbtree (match on first field only): 1191607pps
+ set with 1000 full, ranged entries: 1639119pps
+ok 8 selftests: netfilter: nft_concat_range.sh
+
+Fixes: 611973c1e06f ("selftests: netfilter: Introduce tests for sets with range concatenation")
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Jie2x Zhou <jie2x.zhou@intel.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/netfilter/nft_concat_range.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh
+index b35010cc7f6a..a6991877e50c 100755
+--- a/tools/testing/selftests/netfilter/nft_concat_range.sh
++++ b/tools/testing/selftests/netfilter/nft_concat_range.sh
+@@ -31,7 +31,7 @@ BUGS="flush_remove_add reload"
+
+ # List of possible paths to pktgen script from kernel tree for performance tests
+ PKTGEN_SCRIPT_PATHS="
+- ../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
++ ../../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
+ pktgen/pktgen_bench_xmit_mode_netif_receive.sh"
+
+ # Definition of set types:
+--
+2.35.1
+
usb-serial-option-add-telit-le910cx-0x1250-composition.patch
usb-serial-option-add-quectel-em05-g-modem.patch
usb-serial-option-add-quectel-rm500k-module-support.patch
+drm-msm-ensure-mmap-offset-is-initialized.patch
+drm-msm-fix-double-pm_runtime_disable-call.patch
+netfilter-use-get_random_u32-instead-of-prandom.patch
+scsi-scsi_debug-fix-zone-transition-to-full-conditio.patch
+drm-msm-switch-ordering-of-runpm-put-vs-devfreq_idle.patch
+scsi-iscsi-exclude-zero-from-the-endpoint-id-range.patch
+xsk-fix-generic-transmit-when-completion-queue-reser.patch
+drm-msm-use-for_each_sgtable_sg-to-iterate-over-scat.patch
+bpf-fix-request_sock-leak-in-sk-lookup-helpers.patch
+drm-sun4i-fix-crash-during-suspend-after-component-b.patch
+bpf-x86-fix-tail-call-count-offset-calculation-on-bp.patch
+selftests-dma-fix-compile-error-for-dma_map_benchmar.patch
+scsi-storvsc-correct-reporting-of-hyper-v-i-o-size-l.patch
+phy-aquantia-fix-an-when-higher-speeds-than-1g-are-n.patch
+kvm-arm64-prevent-kmemleak-from-accessing-pkvm-memor.patch
+net-fix-data-race-in-dev_isalive.patch
+veth-add-updating-of-trans_start.patch
+tipc-fix-use-after-free-read-in-tipc_named_reinit.patch
+block-disable-the-elevator-int-del_gendisk.patch
+rethook-reject-getting-a-rethook-if-rcu-is-not-watch.patch
+igb-fix-a-use-after-free-issue-in-igb_clean_tx_ring.patch
+bonding-arp-monitor-spams-netdev_notify_peers-notifi.patch
+ethtool-fix-get-module-eeprom-fallback.patch
+net-sched-sch_netem-fix-arithmetic-in-netem_dump-for.patch
+drm-msm-mdp4-fix-refcount-leak-in-mdp4_modeset_init_.patch
+drm-msm-dp-check-core_initialized-before-disable-int.patch
+drm-msm-dp-force-link-training-for-display-resolutio.patch
+net-phy-at803x-fix-null-pointer-dereference-on-ar933.patch
+perf-test-record-only-user-callchains-on-the-check-a.patch
+perf-test-topology-use-strncmp-right-platform-to-fix.patch
+perf-arm-spe-don-t-set-data-source-if-it-s-not-a-mem.patch
+ipv4-fix-bind-address-validity-regression-tests.patch
+erspan-do-not-assume-transport-header-is-always-set.patch
+net-tls-fix-tls_sk_proto_close-executed-repeatedly.patch
+udmabuf-add-back-sanity-check.patch
+selftests-netfilter-correct-pktgen_script_paths-in-n.patch
+netfilter-nf_dup_netdev-do-not-push-mac-header-a-sec.patch
+netfilter-nf_dup_netdev-add-and-use-recursion-counte.patch
+xen-blkfront-handle-null-gendisk.patch
+x86-xen-remove-undefined-behavior-in-setup_features.patch
+mips-remove-repetitive-increase-irq_err_count.patch
+afs-fix-dynamic-root-getattr.patch
+block-pop-cached-rq-before-potentially-blocking-rq_q.patch
+ice-ignore-protocol-field-in-gtp-offload.patch
+ice-fix-switchdev-rules-book-keeping.patch
+ice-ethtool-advertise-1000m-speeds-properly.patch
+ice-ethtool-prohibit-improper-channel-config-for-dcb.patch
+io_uring-fail-links-when-poll-fails.patch
+regmap-irq-fix-a-bug-in-regmap_irq_enable-for-type_i.patch
+regmap-irq-fix-offset-index-mismatch-in-read_sub_irq.patch
+iommu-ipmmu-vmsa-fix-compatible-for-rcar-gen4.patch
+drm-amd-revert-drm-amd-display-keep-edp-vdd-on-when-.patch
+net-dsa-qca8k-reduce-mgmt-ethernet-timeout.patch
+igb-make-dma-faster-when-cpu-is-active-on-the-pcie-l.patch
+virtio_net-fix-xdp_rxq_info-bug-after-suspend-resume.patch
+revert-net-tls-fix-tls_sk_proto_close-executed-repea.patch
+sock-redo-the-psock-vs-ulp-protection-check.patch
+nvme-move-the-samsung-x5-quirk-entry-to-the-core-qui.patch
+gpio-winbond-fix-error-code-in-winbond_gpio_get.patch
+s390-cpumf-handle-events-cycles-and-instructions-ide.patch
+filemap-fix-serialization-adding-transparent-huge-pa.patch
+kvm-sev-init-target-vmcbs-in-sev_migrate_from.patch
+iio-mma8452-fix-probe-fail-when-device-tree-compatib.patch
+iio-magnetometer-yas530-fix-memchr_inv-misuse.patch
+iio-adc-xilinx-ams-fix-return-error-variable.patch
+iio-adc-vf610-fix-conversion-mode-sysfs-node-name.patch
+io_uring-make-apoll_events-a-__poll_t.patch
+io_uring-fix-req-apoll_events.patch
+usb-typec-wcove-drop-wrong-dependency-to-intel_soc_p.patch
+io_uring-fix-wrong-arm_poll-error-handling.patch
+vmcore-convert-copy_oldmem_page-to-take-an-iov_iter.patch
+s390-crash-add-missing-iterator-advance-in-copy_oldm.patch
+s390-crash-make-copy_oldmem_page-return-number-of-by.patch
--- /dev/null
+From 6e8ec4a6b17bd232c32b934a21ef9e4de7ea5b4e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jun 2022 12:13:53 -0700
+Subject: sock: redo the psock vs ULP protection check
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit e34a07c0ae3906f97eb18df50902e2a01c1015b6 ]
+
+Commit 8a59f9d1e3d4 ("sock: Introduce sk->sk_prot->psock_update_sk_prot()")
+has moved the inet_csk_has_ulp(sk) check from sk_psock_init() to
+the new tcp_bpf_update_proto() function. I'm guessing that this
+was done to allow creating psocks for non-inet sockets.
+
+Unfortunately the destruction path for psock includes the ULP
+unwind, so we need to fail the sk_psock_init() itself.
+Otherwise if ULP is already present we'll notice that later,
+and call tcp_update_ulp() with the sk_proto of the ULP
+itself, which will most likely result in the ULP looping
+its callbacks.
+
+Fixes: 8a59f9d1e3d4 ("sock: Introduce sk->sk_prot->psock_update_sk_prot()")
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Reviewed-by: Jakub Sitnicki <jakub@cloudflare.com>
+Tested-by: Jakub Sitnicki <jakub@cloudflare.com>
+Link: https://lore.kernel.org/r/20220620191353.1184629-2-kuba@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/inet_sock.h | 5 +++++
+ net/core/skmsg.c | 5 +++++
+ net/ipv4/tcp_bpf.c | 3 ---
+ net/tls/tls_main.c | 2 ++
+ 4 files changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index 234d70ae5f4c..48e4c59d85e2 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -252,6 +252,11 @@ struct inet_sock {
+ #define IP_CMSG_CHECKSUM BIT(7)
+ #define IP_CMSG_RECVFRAGSIZE BIT(8)
+
++static inline bool sk_is_inet(struct sock *sk)
++{
++ return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
++}
++
+ /**
+ * sk_to_full_sk - Access to a full socket
+ * @sk: pointer to a socket
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index cc381165ea08..ede0af308f40 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -695,6 +695,11 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
+
+ write_lock_bh(&sk->sk_callback_lock);
+
++ if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
++ psock = ERR_PTR(-EINVAL);
++ goto out;
++ }
++
+ if (sk->sk_user_data) {
+ psock = ERR_PTR(-EBUSY);
+ goto out;
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 1cdcb4df0eb7..2c597a4e429a 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -612,9 +612,6 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
+ return 0;
+ }
+
+- if (inet_csk_has_ulp(sk))
+- return -EINVAL;
+-
+ if (sk->sk_family == AF_INET6) {
+ if (tcp_bpf_assert_proto_ops(psock->sk_proto))
+ return -EINVAL;
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 7b2b0e7ffee4..5c9697840ef7 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -873,6 +873,8 @@ static void tls_update(struct sock *sk, struct proto *p,
+ {
+ struct tls_context *ctx;
+
++ WARN_ON_ONCE(sk->sk_prot == p);
++
+ ctx = tls_get_ctx(sk);
+ if (likely(ctx)) {
+ ctx->sk_write_space = write_space;
+--
+2.35.1
+
--- /dev/null
+From 2f80b22c0d90d3cc82d8da73d9e6f032f60d4c77 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jun 2022 08:45:51 +0700
+Subject: tipc: fix use-after-free Read in tipc_named_reinit
+
+From: Hoang Le <hoang.h.le@dektech.com.au>
+
+[ Upstream commit 911600bf5a5e84bfda4d33ee32acc75ecf6159f0 ]
+
+syzbot found the following issue on:
+==================================================================
+BUG: KASAN: use-after-free in tipc_named_reinit+0x94f/0x9b0
+net/tipc/name_distr.c:413
+Read of size 8 at addr ffff88805299a000 by task kworker/1:9/23764
+
+CPU: 1 PID: 23764 Comm: kworker/1:9 Not tainted
+5.18.0-rc4-syzkaller-00878-g17d49e6e8012 #0
+Hardware name: Google Compute Engine/Google Compute Engine,
+BIOS Google 01/01/2011
+Workqueue: events tipc_net_finalize_work
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:88 [inline]
+ dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106
+ print_address_description.constprop.0.cold+0xeb/0x495
+mm/kasan/report.c:313
+ print_report mm/kasan/report.c:429 [inline]
+ kasan_report.cold+0xf4/0x1c6 mm/kasan/report.c:491
+ tipc_named_reinit+0x94f/0x9b0 net/tipc/name_distr.c:413
+ tipc_net_finalize+0x234/0x3d0 net/tipc/net.c:138
+ process_one_work+0x996/0x1610 kernel/workqueue.c:2289
+ worker_thread+0x665/0x1080 kernel/workqueue.c:2436
+ kthread+0x2e9/0x3a0 kernel/kthread.c:376
+ ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:298
+ </TASK>
+[...]
+==================================================================
+
+In the commit
+d966ddcc3821 ("tipc: fix a deadlock when flushing scheduled work"),
+the cancel_work_sync() function just to make sure ONLY the work
+tipc_net_finalize_work() is executing/pending on any CPU completed before
+tipc namespace is destroyed through tipc_exit_net(). But this function
+is not guaranteed the work is the last queued. So, the destroyed instance
+may be accessed in the work which will try to enqueue later.
+
+In order to completely fix, we re-order the calling of cancel_work_sync()
+to make sure the work tipc_net_finalize_work() was last queued and it
+must be completed by calling cancel_work_sync().
+
+Reported-by: syzbot+47af19f3307fc9c5c82e@syzkaller.appspotmail.com
+Fixes: d966ddcc3821 ("tipc: fix a deadlock when flushing scheduled work")
+Acked-by: Jon Maloy <jmaloy@redhat.com>
+Signed-off-by: Ying Xue <ying.xue@windriver.com>
+Signed-off-by: Hoang Le <hoang.h.le@dektech.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/core.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/net/tipc/core.c b/net/tipc/core.c
+index 3f4542e0f065..434e70eabe08 100644
+--- a/net/tipc/core.c
++++ b/net/tipc/core.c
+@@ -109,10 +109,9 @@ static void __net_exit tipc_exit_net(struct net *net)
+ struct tipc_net *tn = tipc_net(net);
+
+ tipc_detach_loopback(net);
++ tipc_net_stop(net);
+ /* Make sure the tipc_net_finalize_work() finished */
+ cancel_work_sync(&tn->work);
+- tipc_net_stop(net);
+-
+ tipc_bcast_stop(net);
+ tipc_nametbl_stop(net);
+ tipc_sk_rht_destroy(net);
+--
+2.35.1
+
--- /dev/null
+From 95810a66df323b41e67eba912eb93098db9231eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jun 2022 09:15:47 +0200
+Subject: udmabuf: add back sanity check
+
+From: Gerd Hoffmann <kraxel@redhat.com>
+
+[ Upstream commit 05b252cccb2e5c3f56119d25de684b4f810ba40a ]
+
+Check vm_fault->pgoff before using it. When we removed the warning, we
+also removed the check.
+
+Fixes: 7b26e4e2119d ("udmabuf: drop WARN_ON() check.")
+Reported-by: zdi-disclosures@trendmicro.com
+Suggested-by: Linus Torvalds <torvalds@linuxfoundation.org>
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma-buf/udmabuf.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
+index e7330684d3b8..9631f2fd2faf 100644
+--- a/drivers/dma-buf/udmabuf.c
++++ b/drivers/dma-buf/udmabuf.c
+@@ -32,8 +32,11 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
+ {
+ struct vm_area_struct *vma = vmf->vma;
+ struct udmabuf *ubuf = vma->vm_private_data;
++ pgoff_t pgoff = vmf->pgoff;
+
+- vmf->page = ubuf->pages[vmf->pgoff];
++ if (pgoff >= ubuf->pagecount)
++ return VM_FAULT_SIGBUS;
++ vmf->page = ubuf->pages[pgoff];
+ get_page(vmf->page);
+ return 0;
+ }
+--
+2.35.1
+
--- /dev/null
+From 749474fee44a1a63f3a2c508eb28829293f0064b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jun 2022 13:43:16 +0300
+Subject: usb: typec: wcove: Drop wrong dependency to INTEL_SOC_PMIC
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 9ef165406308515dcf2e3f6e97b39a1c56d86db5 ]
+
+Intel SoC PMIC is a generic name for all PMICs that are used
+on Intel platforms. In particular, INTEL_SOC_PMIC kernel configuration
+option refers to Crystal Cove PMIC, which has never been a part
+of any Intel Broxton hardware. Drop wrong dependency from Kconfig.
+
+Note, the correct dependency is satisfied via ACPI PMIC OpRegion driver,
+which the Type-C depends on.
+
+Fixes: d2061f9cc32d ("usb: typec: add driver for Intel Whiskey Cove PMIC USB Type-C PHY")
+Reported-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20220620104316.57592-1-andriy.shevchenko@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/typec/tcpm/Kconfig | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
+index 557f392fe24d..073fd2ea5e0b 100644
+--- a/drivers/usb/typec/tcpm/Kconfig
++++ b/drivers/usb/typec/tcpm/Kconfig
+@@ -56,7 +56,6 @@ config TYPEC_WCOVE
+ tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
+ depends on ACPI
+ depends on MFD_INTEL_PMC_BXT
+- depends on INTEL_SOC_PMIC
+ depends on BXT_WC_PMIC_OPREGION
+ help
+ This driver adds support for USB Type-C on Intel Broxton platforms
+--
+2.35.1
+
--- /dev/null
+From b861fb3a639a3f0d6776588793809d634408ea9f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jun 2022 12:26:30 -0700
+Subject: veth: Add updating of trans_start
+
+From: Jay Vosburgh <jay.vosburgh@canonical.com>
+
+[ Upstream commit e66e257a5d8368d9c0ba13d4630f474436533e8b ]
+
+Since commit 21a75f0915dd ("bonding: Fix ARP monitor validation"),
+the bonding ARP / ND link monitors depend on the trans_start time to
+determine link availability. NETIF_F_LLTX drivers must update trans_start
+directly, which veth does not do. This prevents use of the ARP or ND link
+monitors with veth interfaces in a bond.
+
+ Resolve this by having veth_xmit update the trans_start time.
+
+Reported-by: Jonathan Toppins <jtoppins@redhat.com>
+Tested-by: Jonathan Toppins <jtoppins@redhat.com>
+Signed-off-by: Jay Vosburgh <jay.vosburgh@canonical.com>
+Fixes: 21a75f0915dd ("bonding: Fix ARP monitor validation")
+Link: https://lore.kernel.org/netdev/b2fd4147-8f50-bebd-963a-1a3e8d1d9715@redhat.com/
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/veth.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index eb0121a64d6d..1d1dea07d932 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -312,6 +312,7 @@ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
+ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
++ struct netdev_queue *queue = NULL;
+ struct veth_rq *rq = NULL;
+ struct net_device *rcv;
+ int length = skb->len;
+@@ -329,6 +330,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+ rxq = skb_get_queue_mapping(skb);
+ if (rxq < rcv->real_num_rx_queues) {
+ rq = &rcv_priv->rq[rxq];
++ queue = netdev_get_tx_queue(dev, rxq);
+
+ /* The napi pointer is available when an XDP program is
+ * attached or when GRO is enabled
+@@ -340,6 +342,8 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ skb_tx_timestamp(skb);
+ if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
++ if (queue)
++ txq_trans_cond_update(queue);
+ if (!use_napi)
+ dev_lstats_add(dev, length);
+ } else {
+--
+2.35.1
+
--- /dev/null
+From 2895ea92fce2619dfaca4e09113dca834b20eebe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Jun 2022 13:48:44 +0200
+Subject: virtio_net: fix xdp_rxq_info bug after suspend/resume
+
+From: Stephan Gerhold <stephan.gerhold@kernkonzept.com>
+
+[ Upstream commit 8af52fe9fd3bf5e7478da99193c0632276e1dfce ]
+
+The following sequence currently causes a driver bug warning
+when using virtio_net:
+
+ # ip link set eth0 up
+ # echo mem > /sys/power/state (or e.g. # rtcwake -s 10 -m mem)
+ <resume>
+ # ip link set eth0 down
+
+ Missing register, driver bug
+ WARNING: CPU: 0 PID: 375 at net/core/xdp.c:138 xdp_rxq_info_unreg+0x58/0x60
+ Call trace:
+ xdp_rxq_info_unreg+0x58/0x60
+ virtnet_close+0x58/0xac
+ __dev_close_many+0xac/0x140
+ __dev_change_flags+0xd8/0x210
+ dev_change_flags+0x24/0x64
+ do_setlink+0x230/0xdd0
+ ...
+
+This happens because virtnet_freeze() frees the receive_queue
+completely (including struct xdp_rxq_info) but does not call
+xdp_rxq_info_unreg(). Similarly, virtnet_restore() sets up the
+receive_queue again but does not call xdp_rxq_info_reg().
+
+Actually, parts of virtnet_freeze_down() and virtnet_restore_up()
+are almost identical to virtnet_close() and virtnet_open(): only
+the calls to xdp_rxq_info_(un)reg() are missing. This means that
+we can fix this easily and avoid such problems in the future by
+just calling virtnet_close()/open() from the freeze/restore handlers.
+
+Aside from adding the missing xdp_rxq_info calls the only difference
+is that the refill work is only cancelled if netif_running(). However,
+this should not make any functional difference since the refill work
+should only be active if the network interface is actually up.
+
+Fixes: 754b8a21a96d ("virtio_net: setup xdp_rxq_info")
+Signed-off-by: Stephan Gerhold <stephan.gerhold@kernkonzept.com>
+Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Link: https://lore.kernel.org/r/20220621114845.3650258-1-stephan.gerhold@kernkonzept.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/virtio_net.c | 25 ++++++-------------------
+ 1 file changed, 6 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index cbba9d2e8f32..10d548b07b9c 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -2768,7 +2768,6 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
+ static void virtnet_freeze_down(struct virtio_device *vdev)
+ {
+ struct virtnet_info *vi = vdev->priv;
+- int i;
+
+ /* Make sure no work handler is accessing the device */
+ flush_work(&vi->config_work);
+@@ -2776,14 +2775,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
+ netif_tx_lock_bh(vi->dev);
+ netif_device_detach(vi->dev);
+ netif_tx_unlock_bh(vi->dev);
+- cancel_delayed_work_sync(&vi->refill);
+-
+- if (netif_running(vi->dev)) {
+- for (i = 0; i < vi->max_queue_pairs; i++) {
+- napi_disable(&vi->rq[i].napi);
+- virtnet_napi_tx_disable(&vi->sq[i].napi);
+- }
+- }
++ if (netif_running(vi->dev))
++ virtnet_close(vi->dev);
+ }
+
+ static int init_vqs(struct virtnet_info *vi);
+@@ -2791,7 +2784,7 @@ static int init_vqs(struct virtnet_info *vi);
+ static int virtnet_restore_up(struct virtio_device *vdev)
+ {
+ struct virtnet_info *vi = vdev->priv;
+- int err, i;
++ int err;
+
+ err = init_vqs(vi);
+ if (err)
+@@ -2800,15 +2793,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
+ virtio_device_ready(vdev);
+
+ if (netif_running(vi->dev)) {
+- for (i = 0; i < vi->curr_queue_pairs; i++)
+- if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
+- schedule_delayed_work(&vi->refill, 0);
+-
+- for (i = 0; i < vi->max_queue_pairs; i++) {
+- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+- &vi->sq[i].napi);
+- }
++ err = virtnet_open(vi->dev);
++ if (err)
++ return err;
+ }
+
+ netif_tx_lock_bh(vi->dev);
+--
+2.35.1
+
--- /dev/null
+From a4562abf36d082779ada087172a916cd1ec749c7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Apr 2022 14:37:59 -0700
+Subject: vmcore: convert copy_oldmem_page() to take an iov_iter
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+[ Upstream commit 5d8de293c224896a4da99763fce4f9794308caf4 ]
+
+Patch series "Convert vmcore to use an iov_iter", v5.
+
+For some reason several people have been sending bad patches to fix
+compiler warnings in vmcore recently. Here's how it should be done.
+Compile-tested only on x86. As noted in the first patch, s390 should take
+this conversion a bit further, but I'm not inclined to do that work
+myself.
+
+This patch (of 3):
+
+Instead of passing in a 'buf' and 'userbuf' argument, pass in an iov_iter.
+s390 needs more work to pass the iov_iter down further, or refactor, but
+I'd be more comfortable if someone who can test on s390 did that work.
+
+It's more convenient to convert the whole of read_from_oldmem() to take an
+iov_iter at the same time, so rename it to read_from_oldmem_iter() and add
+a temporary read_from_oldmem() wrapper that creates an iov_iter.
+
+Link: https://lkml.kernel.org/r/20220408090636.560886-1-bhe@redhat.com
+Link: https://lkml.kernel.org/r/20220408090636.560886-2-bhe@redhat.com
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Baoquan He <bhe@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/kernel/crash_dump.c | 27 +++-------------
+ arch/arm64/kernel/crash_dump.c | 29 +++--------------
+ arch/ia64/kernel/crash_dump.c | 32 +++----------------
+ arch/mips/kernel/crash_dump.c | 27 +++-------------
+ arch/powerpc/kernel/crash_dump.c | 35 +++------------------
+ arch/riscv/kernel/crash_dump.c | 26 +++------------
+ arch/s390/kernel/crash_dump.c | 13 +++++---
+ arch/sh/kernel/crash_dump.c | 29 +++--------------
+ arch/x86/kernel/crash_dump_32.c | 29 +++--------------
+ arch/x86/kernel/crash_dump_64.c | 41 +++++++-----------------
+ fs/proc/vmcore.c | 54 ++++++++++++++++++++------------
+ include/linux/crash_dump.h | 9 +++---
+ 12 files changed, 91 insertions(+), 260 deletions(-)
+
+diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c
+index 53cb92435392..938bd932df9a 100644
+--- a/arch/arm/kernel/crash_dump.c
++++ b/arch/arm/kernel/crash_dump.c
+@@ -14,22 +14,10 @@
+ #include <linux/crash_dump.h>
+ #include <linux/uaccess.h>
+ #include <linux/io.h>
++#include <linux/uio.h>
+
+-/**
+- * copy_oldmem_page() - copy one page from old kernel memory
+- * @pfn: page frame number to be copied
+- * @buf: buffer where the copied page is placed
+- * @csize: number of bytes to copy
+- * @offset: offset in bytes into the page
+- * @userbuf: if set, @buf is int he user address space
+- *
+- * This function copies one page from old kernel memory into buffer pointed by
+- * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
+- * copied or negative error in case of failure.
+- */
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+- size_t csize, unsigned long offset,
+- int userbuf)
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
++ size_t csize, unsigned long offset)
+ {
+ void *vaddr;
+
+@@ -40,14 +28,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ if (!vaddr)
+ return -ENOMEM;
+
+- if (userbuf) {
+- if (copy_to_user(buf, vaddr + offset, csize)) {
+- iounmap(vaddr);
+- return -EFAULT;
+- }
+- } else {
+- memcpy(buf, vaddr + offset, csize);
+- }
++ csize = copy_to_iter(vaddr + offset, csize, iter);
+
+ iounmap(vaddr);
+ return csize;
+diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
+index 58303a9ec32c..670e4ce81822 100644
+--- a/arch/arm64/kernel/crash_dump.c
++++ b/arch/arm64/kernel/crash_dump.c
+@@ -9,25 +9,11 @@
+ #include <linux/crash_dump.h>
+ #include <linux/errno.h>
+ #include <linux/io.h>
+-#include <linux/memblock.h>
+-#include <linux/uaccess.h>
++#include <linux/uio.h>
+ #include <asm/memory.h>
+
+-/**
+- * copy_oldmem_page() - copy one page from old kernel memory
+- * @pfn: page frame number to be copied
+- * @buf: buffer where the copied page is placed
+- * @csize: number of bytes to copy
+- * @offset: offset in bytes into the page
+- * @userbuf: if set, @buf is in a user address space
+- *
+- * This function copies one page from old kernel memory into buffer pointed by
+- * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
+- * copied or negative error in case of failure.
+- */
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+- size_t csize, unsigned long offset,
+- int userbuf)
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
++ size_t csize, unsigned long offset)
+ {
+ void *vaddr;
+
+@@ -38,14 +24,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ if (!vaddr)
+ return -ENOMEM;
+
+- if (userbuf) {
+- if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
+- memunmap(vaddr);
+- return -EFAULT;
+- }
+- } else {
+- memcpy(buf, vaddr + offset, csize);
+- }
++ csize = copy_to_iter(vaddr + offset, csize, iter);
+
+ memunmap(vaddr);
+
+diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
+index 0ed3c3dee4cd..4ef68e2aa757 100644
+--- a/arch/ia64/kernel/crash_dump.c
++++ b/arch/ia64/kernel/crash_dump.c
+@@ -10,42 +10,18 @@
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/crash_dump.h>
+-
++#include <linux/uio.h>
+ #include <asm/page.h>
+-#include <linux/uaccess.h>
+
+-/**
+- * copy_oldmem_page - copy one page from "oldmem"
+- * @pfn: page frame number to be copied
+- * @buf: target memory address for the copy; this can be in kernel address
+- * space or user address space (see @userbuf)
+- * @csize: number of bytes to copy
+- * @offset: offset in bytes into the page (based on pfn) to begin the copy
+- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+- * otherwise @buf is in kernel address space, use memcpy().
+- *
+- * Copy a page from "oldmem". For this page, there is no pte mapped
+- * in the current kernel. We stitch up a pte, similar to kmap_atomic.
+- *
+- * Calling copy_to_user() in atomic context is not desirable. Hence first
+- * copying the data to a pre-allocated kernel page and then copying to user
+- * space in non-atomic context.
+- */
+-ssize_t
+-copy_oldmem_page(unsigned long pfn, char *buf,
+- size_t csize, unsigned long offset, int userbuf)
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
++ size_t csize, unsigned long offset)
+ {
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+ vaddr = __va(pfn<<PAGE_SHIFT);
+- if (userbuf) {
+- if (copy_to_user(buf, (vaddr + offset), csize)) {
+- return -EFAULT;
+- }
+- } else
+- memcpy(buf, (vaddr + offset), csize);
++ csize = copy_to_iter(vaddr + offset, csize, iter);
+ return csize;
+ }
+
+diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
+index 2e50f55185a6..6e50f4902409 100644
+--- a/arch/mips/kernel/crash_dump.c
++++ b/arch/mips/kernel/crash_dump.c
+@@ -1,22 +1,10 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/highmem.h>
+ #include <linux/crash_dump.h>
++#include <linux/uio.h>
+
+-/**
+- * copy_oldmem_page - copy one page from "oldmem"
+- * @pfn: page frame number to be copied
+- * @buf: target memory address for the copy; this can be in kernel address
+- * space or user address space (see @userbuf)
+- * @csize: number of bytes to copy
+- * @offset: offset in bytes into the page (based on pfn) to begin the copy
+- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+- * otherwise @buf is in kernel address space, use memcpy().
+- *
+- * Copy a page from "oldmem". For this page, there is no pte mapped
+- * in the current kernel.
+- */
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+- size_t csize, unsigned long offset, int userbuf)
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
++ size_t csize, unsigned long offset)
+ {
+ void *vaddr;
+
+@@ -24,14 +12,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ return 0;
+
+ vaddr = kmap_local_pfn(pfn);
+-
+- if (!userbuf) {
+- memcpy(buf, vaddr + offset, csize);
+- } else {
+- if (copy_to_user(buf, vaddr + offset, csize))
+- csize = -EFAULT;
+- }
+-
++ csize = copy_to_iter(vaddr + offset, csize, iter);
+ kunmap_local(vaddr);
+
+ return csize;
+diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
+index 5693e1c67c2b..32b4a97f1b79 100644
+--- a/arch/powerpc/kernel/crash_dump.c
++++ b/arch/powerpc/kernel/crash_dump.c
+@@ -16,7 +16,7 @@
+ #include <asm/kdump.h>
+ #include <asm/prom.h>
+ #include <asm/firmware.h>
+-#include <linux/uaccess.h>
++#include <linux/uio.h>
+ #include <asm/rtas.h>
+ #include <asm/inst.h>
+
+@@ -68,33 +68,8 @@ void __init setup_kdump_trampoline(void)
+ }
+ #endif /* CONFIG_NONSTATIC_KERNEL */
+
+-static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
+- unsigned long offset, int userbuf)
+-{
+- if (userbuf) {
+- if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
+- return -EFAULT;
+- } else
+- memcpy(buf, (vaddr + offset), csize);
+-
+- return csize;
+-}
+-
+-/**
+- * copy_oldmem_page - copy one page from "oldmem"
+- * @pfn: page frame number to be copied
+- * @buf: target memory address for the copy; this can be in kernel address
+- * space or user address space (see @userbuf)
+- * @csize: number of bytes to copy
+- * @offset: offset in bytes into the page (based on pfn) to begin the copy
+- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+- * otherwise @buf is in kernel address space, use memcpy().
+- *
+- * Copy a page from "oldmem". For this page, there is no pte mapped
+- * in the current kernel. We stitch up a pte, similar to kmap_atomic.
+- */
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+- size_t csize, unsigned long offset, int userbuf)
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
++ size_t csize, unsigned long offset)
+ {
+ void *vaddr;
+ phys_addr_t paddr;
+@@ -107,10 +82,10 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+
+ if (memblock_is_region_memory(paddr, csize)) {
+ vaddr = __va(paddr);
+- csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
++ csize = copy_to_iter(vaddr + offset, csize, iter);
+ } else {
+ vaddr = ioremap_cache(paddr, PAGE_SIZE);
+- csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
++ csize = copy_to_iter(vaddr + offset, csize, iter);
+ iounmap(vaddr);
+ }
+
+diff --git a/arch/riscv/kernel/crash_dump.c b/arch/riscv/kernel/crash_dump.c
+index 86cc0ada5752..ea2158cee97b 100644
+--- a/arch/riscv/kernel/crash_dump.c
++++ b/arch/riscv/kernel/crash_dump.c
+@@ -7,22 +7,10 @@
+
+ #include <linux/crash_dump.h>
+ #include <linux/io.h>
++#include <linux/uio.h>
+
+-/**
+- * copy_oldmem_page() - copy one page from old kernel memory
+- * @pfn: page frame number to be copied
+- * @buf: buffer where the copied page is placed
+- * @csize: number of bytes to copy
+- * @offset: offset in bytes into the page
+- * @userbuf: if set, @buf is in a user address space
+- *
+- * This function copies one page from old kernel memory into buffer pointed by
+- * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
+- * copied or negative error in case of failure.
+- */
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+- size_t csize, unsigned long offset,
+- int userbuf)
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
++ size_t csize, unsigned long offset)
+ {
+ void *vaddr;
+
+@@ -33,13 +21,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ if (!vaddr)
+ return -ENOMEM;
+
+- if (userbuf) {
+- if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
+- memunmap(vaddr);
+- return -EFAULT;
+- }
+- } else
+- memcpy(buf, vaddr + offset, csize);
++ csize = copy_to_iter(vaddr + offset, csize, iter);
+
+ memunmap(vaddr);
+ return csize;
+diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
+index 69819b765250..a2c1c55daec0 100644
+--- a/arch/s390/kernel/crash_dump.c
++++ b/arch/s390/kernel/crash_dump.c
+@@ -15,6 +15,7 @@
+ #include <linux/slab.h>
+ #include <linux/memblock.h>
+ #include <linux/elf.h>
++#include <linux/uio.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/os_info.h>
+ #include <asm/elf.h>
+@@ -212,8 +213,8 @@ static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count)
+ /*
+ * Copy one page from "oldmem"
+ */
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
+- unsigned long offset, int userbuf)
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
++ unsigned long offset)
+ {
+ unsigned long src;
+ int rc;
+@@ -221,10 +222,12 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
+ if (!csize)
+ return 0;
+ src = pfn_to_phys(pfn) + offset;
+- if (userbuf)
+- rc = copy_oldmem_user((void __force __user *) buf, src, csize);
++
++ /* XXX: pass the iov_iter down to a common function */
++ if (iter_is_iovec(iter))
++ rc = copy_oldmem_user(iter->iov->iov_base, src, csize);
+ else
+- rc = copy_oldmem_kernel((void *) buf, src, csize);
++ rc = copy_oldmem_kernel(iter->kvec->iov_base, src, csize);
+ return rc;
+ }
+
+diff --git a/arch/sh/kernel/crash_dump.c b/arch/sh/kernel/crash_dump.c
+index 5b41b59698c1..19ce6a950aac 100644
+--- a/arch/sh/kernel/crash_dump.c
++++ b/arch/sh/kernel/crash_dump.c
+@@ -8,23 +8,11 @@
+ #include <linux/errno.h>
+ #include <linux/crash_dump.h>
+ #include <linux/io.h>
++#include <linux/uio.h>
+ #include <linux/uaccess.h>
+
+-/**
+- * copy_oldmem_page - copy one page from "oldmem"
+- * @pfn: page frame number to be copied
+- * @buf: target memory address for the copy; this can be in kernel address
+- * space or user address space (see @userbuf)
+- * @csize: number of bytes to copy
+- * @offset: offset in bytes into the page (based on pfn) to begin the copy
+- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+- * otherwise @buf is in kernel address space, use memcpy().
+- *
+- * Copy a page from "oldmem". For this page, there is no pte mapped
+- * in the current kernel. We stitch up a pte, similar to kmap_atomic.
+- */
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+- size_t csize, unsigned long offset, int userbuf)
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
++ size_t csize, unsigned long offset)
+ {
+ void __iomem *vaddr;
+
+@@ -32,15 +20,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ return 0;
+
+ vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+-
+- if (userbuf) {
+- if (copy_to_user((void __user *)buf, (vaddr + offset), csize)) {
+- iounmap(vaddr);
+- return -EFAULT;
+- }
+- } else
+- memcpy(buf, (vaddr + offset), csize);
+-
++ csize = copy_to_iter(vaddr + offset, csize, iter);
+ iounmap(vaddr);
++
+ return csize;
+ }
+diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
+index 5fcac46aaf6b..5f4ae5476e19 100644
+--- a/arch/x86/kernel/crash_dump_32.c
++++ b/arch/x86/kernel/crash_dump_32.c
+@@ -10,8 +10,7 @@
+ #include <linux/errno.h>
+ #include <linux/highmem.h>
+ #include <linux/crash_dump.h>
+-
+-#include <linux/uaccess.h>
++#include <linux/uio.h>
+
+ static inline bool is_crashed_pfn_valid(unsigned long pfn)
+ {
+@@ -29,21 +28,8 @@ static inline bool is_crashed_pfn_valid(unsigned long pfn)
+ #endif
+ }
+
+-/**
+- * copy_oldmem_page - copy one page from "oldmem"
+- * @pfn: page frame number to be copied
+- * @buf: target memory address for the copy; this can be in kernel address
+- * space or user address space (see @userbuf)
+- * @csize: number of bytes to copy
+- * @offset: offset in bytes into the page (based on pfn) to begin the copy
+- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+- * otherwise @buf is in kernel address space, use memcpy().
+- *
+- * Copy a page from "oldmem". For this page, there might be no pte mapped
+- * in the current kernel.
+- */
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
+- unsigned long offset, int userbuf)
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
++ unsigned long offset)
+ {
+ void *vaddr;
+
+@@ -54,14 +40,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
+ return -EFAULT;
+
+ vaddr = kmap_local_pfn(pfn);
+-
+- if (!userbuf) {
+- memcpy(buf, vaddr + offset, csize);
+- } else {
+- if (copy_to_user(buf, vaddr + offset, csize))
+- csize = -EFAULT;
+- }
+-
++ csize = copy_to_iter(vaddr + offset, csize, iter);
+ kunmap_local(vaddr);
+
+ return csize;
+diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
+index 97529552dd24..94fe4aff9694 100644
+--- a/arch/x86/kernel/crash_dump_64.c
++++ b/arch/x86/kernel/crash_dump_64.c
+@@ -8,12 +8,12 @@
+
+ #include <linux/errno.h>
+ #include <linux/crash_dump.h>
+-#include <linux/uaccess.h>
++#include <linux/uio.h>
+ #include <linux/io.h>
+ #include <linux/cc_platform.h>
+
+-static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
+- unsigned long offset, int userbuf,
++static ssize_t __copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
++ size_t csize, unsigned long offset,
+ bool encrypted)
+ {
+ void *vaddr;
+@@ -29,46 +29,27 @@ static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
+ if (!vaddr)
+ return -ENOMEM;
+
+- if (userbuf) {
+- if (copy_to_user((void __user *)buf, vaddr + offset, csize)) {
+- iounmap((void __iomem *)vaddr);
+- return -EFAULT;
+- }
+- } else
+- memcpy(buf, vaddr + offset, csize);
++ csize = copy_to_iter(vaddr + offset, csize, iter);
+
+ iounmap((void __iomem *)vaddr);
+ return csize;
+ }
+
+-/**
+- * copy_oldmem_page - copy one page of memory
+- * @pfn: page frame number to be copied
+- * @buf: target memory address for the copy; this can be in kernel address
+- * space or user address space (see @userbuf)
+- * @csize: number of bytes to copy
+- * @offset: offset in bytes into the page (based on pfn) to begin the copy
+- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+- * otherwise @buf is in kernel address space, use memcpy().
+- *
+- * Copy a page from the old kernel's memory. For this page, there is no pte
+- * mapped in the current kernel. We stitch up a pte, similar to kmap_atomic.
+- */
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
+- unsigned long offset, int userbuf)
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
++ unsigned long offset)
+ {
+- return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false);
++ return __copy_oldmem_page(iter, pfn, csize, offset, false);
+ }
+
+-/**
++/*
+ * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the
+ * memory with the encryption mask set to accommodate kdump on SME-enabled
+ * machines.
+ */
+-ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
+- unsigned long offset, int userbuf)
++ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
++ size_t csize, unsigned long offset)
+ {
+- return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true);
++ return __copy_oldmem_page(iter, pfn, csize, offset, true);
+ }
+
+ ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index 6f1b8ddc6f7a..54dda2e19ed1 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -26,6 +26,7 @@
+ #include <linux/vmalloc.h>
+ #include <linux/pagemap.h>
+ #include <linux/uaccess.h>
++#include <linux/uio.h>
+ #include <linux/cc_platform.h>
+ #include <asm/io.h>
+ #include "internal.h"
+@@ -128,9 +129,8 @@ static int open_vmcore(struct inode *inode, struct file *file)
+ }
+
+ /* Reads a page from the oldmem device from given offset. */
+-ssize_t read_from_oldmem(char *buf, size_t count,
+- u64 *ppos, int userbuf,
+- bool encrypted)
++static ssize_t read_from_oldmem_iter(struct iov_iter *iter, size_t count,
++ u64 *ppos, bool encrypted)
+ {
+ unsigned long pfn, offset;
+ size_t nr_bytes;
+@@ -152,29 +152,23 @@ ssize_t read_from_oldmem(char *buf, size_t count,
+
+ /* If pfn is not ram, return zeros for sparse dump files */
+ if (!pfn_is_ram(pfn)) {
+- tmp = 0;
+- if (!userbuf)
+- memset(buf, 0, nr_bytes);
+- else if (clear_user(buf, nr_bytes))
+- tmp = -EFAULT;
++ tmp = iov_iter_zero(nr_bytes, iter);
+ } else {
+ if (encrypted)
+- tmp = copy_oldmem_page_encrypted(pfn, buf,
++ tmp = copy_oldmem_page_encrypted(iter, pfn,
+ nr_bytes,
+- offset,
+- userbuf);
++ offset);
+ else
+- tmp = copy_oldmem_page(pfn, buf, nr_bytes,
+- offset, userbuf);
++ tmp = copy_oldmem_page(iter, pfn, nr_bytes,
++ offset);
+ }
+- if (tmp < 0) {
++ if (tmp < nr_bytes) {
+ srcu_read_unlock(&vmcore_cb_srcu, idx);
+- return tmp;
++ return -EFAULT;
+ }
+
+ *ppos += nr_bytes;
+ count -= nr_bytes;
+- buf += nr_bytes;
+ read += nr_bytes;
+ ++pfn;
+ offset = 0;
+@@ -184,6 +178,27 @@ ssize_t read_from_oldmem(char *buf, size_t count,
+ return read;
+ }
+
++ssize_t read_from_oldmem(char *buf, size_t count,
++ u64 *ppos, int userbuf,
++ bool encrypted)
++{
++ struct iov_iter iter;
++ struct iovec iov;
++ struct kvec kvec;
++
++ if (userbuf) {
++ iov.iov_base = (__force void __user *)buf;
++ iov.iov_len = count;
++ iov_iter_init(&iter, READ, &iov, 1, count);
++ } else {
++ kvec.iov_base = buf;
++ kvec.iov_len = count;
++ iov_iter_kvec(&iter, READ, &kvec, 1, count);
++ }
++
++ return read_from_oldmem_iter(&iter, count, ppos, encrypted);
++}
++
+ /*
+ * Architectures may override this function to allocate ELF header in 2nd kernel
+ */
+@@ -228,11 +243,10 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
+ /*
+ * Architectures which support memory encryption override this.
+ */
+-ssize_t __weak
+-copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
+- unsigned long offset, int userbuf)
++ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter,
++ unsigned long pfn, size_t csize, unsigned long offset)
+ {
+- return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
++ return copy_oldmem_page(iter, pfn, csize, offset);
+ }
+
+ /*
+diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
+index 620821549b23..a1cf7d5c03c7 100644
+--- a/include/linux/crash_dump.h
++++ b/include/linux/crash_dump.h
+@@ -24,11 +24,10 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
+ unsigned long from, unsigned long pfn,
+ unsigned long size, pgprot_t prot);
+
+-extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
+- unsigned long, int);
+-extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
+- size_t csize, unsigned long offset,
+- int userbuf);
++ssize_t copy_oldmem_page(struct iov_iter *i, unsigned long pfn, size_t csize,
++ unsigned long offset);
++ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
++ size_t csize, unsigned long offset);
+
+ void vmcore_cleanup(void);
+
+--
+2.35.1
+
--- /dev/null
+From 45a992d00c4889dab4f16eb48b920538a05587b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jun 2022 11:30:37 +0100
+Subject: x86/xen: Remove undefined behavior in setup_features()
+
+From: Julien Grall <jgrall@amazon.com>
+
+[ Upstream commit ecb6237fa397b7b810d798ad19322eca466dbab1 ]
+
+1 << 31 is undefined. So switch to 1U << 31.
+
+Fixes: 5ead97c84fa7 ("xen: Core Xen implementation")
+Signed-off-by: Julien Grall <jgrall@amazon.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Link: https://lore.kernel.org/r/20220617103037.57828-1-julien@xen.org
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/features.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/xen/features.c b/drivers/xen/features.c
+index 7b591443833c..87f1828d40d5 100644
+--- a/drivers/xen/features.c
++++ b/drivers/xen/features.c
+@@ -42,7 +42,7 @@ void xen_setup_features(void)
+ if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
+ break;
+ for (j = 0; j < 32; j++)
+- xen_features[i * 32 + j] = !!(fi.submap & 1<<j);
++ xen_features[i * 32 + j] = !!(fi.submap & 1U << j);
+ }
+
+ if (xen_pv_domain()) {
+--
+2.35.1
+
--- /dev/null
+From 054f9fed0364e3b0dee1b7e1379ade6821d0c23f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Jun 2022 15:53:41 -0400
+Subject: xen-blkfront: Handle NULL gendisk
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jason Andryuk <jandryuk@gmail.com>
+
+[ Upstream commit f9710c357e5bbf64d7ce45ba0bc75a52222491c1 ]
+
+When a VBD is not fully created and then closed, the kernel can have a
+NULL pointer dereference:
+
+The reproducer is trivial:
+
+[user@dom0 ~]$ sudo xl block-attach work backend=sys-usb vdev=xvdi target=/dev/sdz
+[user@dom0 ~]$ xl block-list work
+Vdev BE handle state evt-ch ring-ref BE-path
+51712 0 241 4 -1 -1 /local/domain/0/backend/vbd/241/51712
+51728 0 241 4 -1 -1 /local/domain/0/backend/vbd/241/51728
+51744 0 241 4 -1 -1 /local/domain/0/backend/vbd/241/51744
+51760 0 241 4 -1 -1 /local/domain/0/backend/vbd/241/51760
+51840 3 241 3 -1 -1 /local/domain/3/backend/vbd/241/51840
+ ^ note state, the /dev/sdz doesn't exist in the backend
+
+[user@dom0 ~]$ sudo xl block-detach work xvdi
+[user@dom0 ~]$ xl block-list work
+Vdev BE handle state evt-ch ring-ref BE-path
+work is an invalid domain identifier
+
+And its console has:
+
+BUG: kernel NULL pointer dereference, address: 0000000000000050
+PGD 80000000edebb067 P4D 80000000edebb067 PUD edec2067 PMD 0
+Oops: 0000 [#1] PREEMPT SMP PTI
+CPU: 1 PID: 52 Comm: xenwatch Not tainted 5.16.18-2.43.fc32.qubes.x86_64 #1
+RIP: 0010:blk_mq_stop_hw_queues+0x5/0x40
+Code: 00 48 83 e0 fd 83 c3 01 48 89 85 a8 00 00 00 41 39 5c 24 50 77 c0 5b 5d 41 5c 41 5d c3 c3 0f 1f 80 00 00 00 00 0f 1f 44 00 00 <8b> 47 50 85 c0 74 32 41 54 49 89 fc 55 53 31 db 49 8b 44 24 48 48
+RSP: 0018:ffffc90000bcfe98 EFLAGS: 00010293
+RAX: ffffffffc0008370 RBX: 0000000000000005 RCX: 0000000000000000
+RDX: 0000000000000000 RSI: 0000000000000005 RDI: 0000000000000000
+RBP: ffff88800775f000 R08: 0000000000000001 R09: ffff888006e620b8
+R10: ffff888006e620b0 R11: f000000000000000 R12: ffff8880bff39000
+R13: ffff8880bff39000 R14: 0000000000000000 R15: ffff88800604be00
+FS: 0000000000000000(0000) GS:ffff8880f3300000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000000000050 CR3: 00000000e932e002 CR4: 00000000003706e0
+Call Trace:
+ <TASK>
+ blkback_changed+0x95/0x137 [xen_blkfront]
+ ? read_reply+0x160/0x160
+ xenwatch_thread+0xc0/0x1a0
+ ? do_wait_intr_irq+0xa0/0xa0
+ kthread+0x16b/0x190
+ ? set_kthread_struct+0x40/0x40
+ ret_from_fork+0x22/0x30
+ </TASK>
+Modules linked in: snd_seq_dummy snd_hrtimer snd_seq snd_seq_device snd_timer snd soundcore ipt_REJECT nf_reject_ipv4 xt_state xt_conntrack nft_counter nft_chain_nat xt_MASQUERADE nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 nft_compat nf_tables nfnetlink intel_rapl_msr intel_rapl_common crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel xen_netfront pcspkr xen_scsiback target_core_mod xen_netback xen_privcmd xen_gntdev xen_gntalloc xen_blkback xen_evtchn ipmi_devintf ipmi_msghandler fuse bpf_preload ip_tables overlay xen_blkfront
+CR2: 0000000000000050
+---[ end trace 7bc9597fd06ae89d ]---
+RIP: 0010:blk_mq_stop_hw_queues+0x5/0x40
+Code: 00 48 83 e0 fd 83 c3 01 48 89 85 a8 00 00 00 41 39 5c 24 50 77 c0 5b 5d 41 5c 41 5d c3 c3 0f 1f 80 00 00 00 00 0f 1f 44 00 00 <8b> 47 50 85 c0 74 32 41 54 49 89 fc 55 53 31 db 49 8b 44 24 48 48
+RSP: 0018:ffffc90000bcfe98 EFLAGS: 00010293
+RAX: ffffffffc0008370 RBX: 0000000000000005 RCX: 0000000000000000
+RDX: 0000000000000000 RSI: 0000000000000005 RDI: 0000000000000000
+RBP: ffff88800775f000 R08: 0000000000000001 R09: ffff888006e620b8
+R10: ffff888006e620b0 R11: f000000000000000 R12: ffff8880bff39000
+R13: ffff8880bff39000 R14: 0000000000000000 R15: ffff88800604be00
+FS: 0000000000000000(0000) GS:ffff8880f3300000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000000000050 CR3: 00000000e932e002 CR4: 00000000003706e0
+Kernel panic - not syncing: Fatal exception
+Kernel Offset: disabled
+
+info->rq and info->gd are only set in blkfront_connect(), which is
+called for state 4 (XenbusStateConnected). Guard against using NULL
+variables in blkfront_closing() to avoid the issue.
+
+The rest of blkfront_closing looks okay. If info->nr_rings is 0, then
+for_each_rinfo won't do anything.
+
+blkfront_remove also needs to check for non-NULL pointers before
+cleaning up the gendisk and request queue.
+
+Fixes: 05d69d950d9d "xen-blkfront: sanitize the removal state machine"
+Reported-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Signed-off-by: Jason Andryuk <jandryuk@gmail.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Link: https://lore.kernel.org/r/20220601195341.28581-1-jandryuk@gmail.com
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/xen-blkfront.c | 19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 003056d4f7f5..966a6bf4c162 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -2137,9 +2137,11 @@ static void blkfront_closing(struct blkfront_info *info)
+ return;
+
+ /* No more blkif_request(). */
+- blk_mq_stop_hw_queues(info->rq);
+- blk_mark_disk_dead(info->gd);
+- set_capacity(info->gd, 0);
++ if (info->rq && info->gd) {
++ blk_mq_stop_hw_queues(info->rq);
++ blk_mark_disk_dead(info->gd);
++ set_capacity(info->gd, 0);
++ }
+
+ for_each_rinfo(info, rinfo, i) {
+ /* No more gnttab callback work. */
+@@ -2480,16 +2482,19 @@ static int blkfront_remove(struct xenbus_device *xbdev)
+
+ dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
+
+- del_gendisk(info->gd);
++ if (info->gd)
++ del_gendisk(info->gd);
+
+ mutex_lock(&blkfront_mutex);
+ list_del(&info->info_list);
+ mutex_unlock(&blkfront_mutex);
+
+ blkif_free(info, 0);
+- xlbd_release_minors(info->gd->first_minor, info->gd->minors);
+- blk_cleanup_disk(info->gd);
+- blk_mq_free_tag_set(&info->tag_set);
++ if (info->gd) {
++ xlbd_release_minors(info->gd->first_minor, info->gd->minors);
++ blk_cleanup_disk(info->gd);
++ blk_mq_free_tag_set(&info->tag_set);
++ }
+
+ kfree(info);
+ return 0;
+--
+2.35.1
+
--- /dev/null
+From a6931b4b5042348aec9f037e8ee85f2473997a64 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Jun 2022 07:07:46 +0000
+Subject: xsk: Fix generic transmit when completion queue reservation fails
+
+From: Ciara Loftus <ciara.loftus@intel.com>
+
+[ Upstream commit a6e944f25cdbe6b82275402b8bc9a55ad7aac10b ]
+
+Two points of potential failure in the generic transmit function are:
+
+ 1. completion queue (cq) reservation failure.
+ 2. skb allocation failure
+
+Originally the cq reservation was performed first, followed by the skb
+allocation. Commit 675716400da6 ("xdp: fix possible cq entry leak")
+reversed the order because at the time there was no mechanism available
+to undo the cq reservation which could have led to possible cq entry leaks
+in the event of skb allocation failure. However if the skb allocation is
+performed first and the cq reservation then fails, the xsk skb destructor
+is called which blindly adds the skb address to the already full cq leading
+to undefined behavior.
+
+This commit restores the original order (cq reservation followed by skb
+allocation) and uses the xskq_prod_cancel helper to undo the cq reserve
+in event of skb allocation failure.
+
+Fixes: 675716400da6 ("xdp: fix possible cq entry leak")
+Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
+Link: https://lore.kernel.org/bpf/20220614070746.8871-1-ciara.loftus@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/xdp/xsk.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index d6bcdbfd0fc5..9b12ea3ab85a 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -538,12 +538,6 @@ static int xsk_generic_xmit(struct sock *sk)
+ goto out;
+ }
+
+- skb = xsk_build_skb(xs, &desc);
+- if (IS_ERR(skb)) {
+- err = PTR_ERR(skb);
+- goto out;
+- }
+-
+ /* This is the backpressure mechanism for the Tx path.
+ * Reserve space in the completion queue and only proceed
+ * if there is space in it. This avoids having to implement
+@@ -552,11 +546,19 @@ static int xsk_generic_xmit(struct sock *sk)
+ spin_lock_irqsave(&xs->pool->cq_lock, flags);
+ if (xskq_prod_reserve(xs->pool->cq)) {
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+- kfree_skb(skb);
+ goto out;
+ }
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+
++ skb = xsk_build_skb(xs, &desc);
++ if (IS_ERR(skb)) {
++ err = PTR_ERR(skb);
++ spin_lock_irqsave(&xs->pool->cq_lock, flags);
++ xskq_prod_cancel(xs->pool->cq);
++ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
++ goto out;
++ }
++
+ err = __dev_direct_xmit(skb, xs->queue_id);
+ if (err == NETDEV_TX_BUSY) {
+ /* Tell user-space to retry the send */
+--
+2.35.1
+