--- /dev/null
+From aae50227aaeea5fada17f158e27c7120cdb01bfc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 May 2023 13:28:57 -0700
+Subject: af_packet: Don't send zero-byte data in packet_sendmsg_spkt().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 6a341729fb31b4c5df9f74f24b4b1c98410c9b87 ]
+
+syzkaller reported a warning below [0].
+
+We can reproduce it by sending 0-byte data from the (AF_PACKET,
+SOCK_PACKET) socket via some devices whose dev->hard_header_len
+is 0.
+
+ struct sockaddr_pkt addr = {
+ .spkt_family = AF_PACKET,
+ .spkt_device = "tun0",
+ };
+ int fd;
+
+ fd = socket(AF_PACKET, SOCK_PACKET, 0);
+ sendto(fd, NULL, 0, 0, (struct sockaddr *)&addr, sizeof(addr));
+
+We have a similar fix for the (AF_PACKET, SOCK_RAW) socket as
+commit dc633700f00f ("net/af_packet: check len when min_header_len
+equals to 0").
+
+Let's add the same test for the SOCK_PACKET socket.
+
+[0]:
+skb_assert_len
+WARNING: CPU: 1 PID: 19945 at include/linux/skbuff.h:2552 skb_assert_len include/linux/skbuff.h:2552 [inline]
+WARNING: CPU: 1 PID: 19945 at include/linux/skbuff.h:2552 __dev_queue_xmit+0x1f26/0x31d0 net/core/dev.c:4159
+Modules linked in:
+CPU: 1 PID: 19945 Comm: syz-executor.0 Not tainted 6.3.0-rc7-02330-gca6270c12e20 #1
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
+RIP: 0010:skb_assert_len include/linux/skbuff.h:2552 [inline]
+RIP: 0010:__dev_queue_xmit+0x1f26/0x31d0 net/core/dev.c:4159
+Code: 89 de e8 1d a2 85 fd 84 db 75 21 e8 64 a9 85 fd 48 c7 c6 80 2a 1f 86 48 c7 c7 c0 06 1f 86 c6 05 23 cf 27 04 01 e8 fa ee 56 fd <0f> 0b e8 43 a9 85 fd 0f b6 1d 0f cf 27 04 31 ff 89 de e8 e3 a1 85
+RSP: 0018:ffff8880217af6e0 EFLAGS: 00010282
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffffc90001133000
+RDX: 0000000000040000 RSI: ffffffff81186922 RDI: 0000000000000001
+RBP: ffff8880217af8b0 R08: 0000000000000001 R09: 0000000000000000
+R10: 0000000000000001 R11: 0000000000000001 R12: ffff888030045640
+R13: ffff8880300456b0 R14: ffff888030045650 R15: ffff888030045718
+FS: 00007fc5864da640(0000) GS:ffff88806cd00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000020005740 CR3: 000000003f856003 CR4: 0000000000770ee0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+PKRU: 55555554
+Call Trace:
+ <TASK>
+ dev_queue_xmit include/linux/netdevice.h:3085 [inline]
+ packet_sendmsg_spkt+0xc4b/0x1230 net/packet/af_packet.c:2066
+ sock_sendmsg_nosec net/socket.c:724 [inline]
+ sock_sendmsg+0x1b4/0x200 net/socket.c:747
+ ____sys_sendmsg+0x331/0x970 net/socket.c:2503
+ ___sys_sendmsg+0x11d/0x1c0 net/socket.c:2557
+ __sys_sendmmsg+0x18c/0x430 net/socket.c:2643
+ __do_sys_sendmmsg net/socket.c:2672 [inline]
+ __se_sys_sendmmsg net/socket.c:2669 [inline]
+ __x64_sys_sendmmsg+0x9c/0x100 net/socket.c:2669
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x3c/0x90 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x72/0xdc
+RIP: 0033:0x7fc58791de5d
+Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 73 9f 1b 00 f7 d8 64 89 01 48
+RSP: 002b:00007fc5864d9cc8 EFLAGS: 00000246 ORIG_RAX: 0000000000000133
+RAX: ffffffffffffffda RBX: 00000000004bbf80 RCX: 00007fc58791de5d
+RDX: 0000000000000001 RSI: 0000000020005740 RDI: 0000000000000004
+RBP: 00000000004bbf80 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: 000000000000000b R14: 00007fc58797e530 R15: 0000000000000000
+ </TASK>
+---[ end trace 0000000000000000 ]---
+skb len=0 headroom=16 headlen=0 tailroom=304
+mac=(16,0) net=(16,-1) trans=-1
+shinfo(txflags=0 nr_frags=0 gso(size=0 type=0 segs=0))
+csum(0x0 ip_summed=0 complete_sw=0 valid=0 level=0)
+hash(0x0 sw=0 l4=0) proto=0x0000 pkttype=0 iif=0
+dev name=sit0 feat=0x00000006401d7869
+sk family=17 type=10 proto=0
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/packet/af_packet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 0db871edd3a18..f5d430bd372ce 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1999,7 +1999,7 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
+ goto retry;
+ }
+
+- if (!dev_validate_header(dev, skb->data, len)) {
++ if (!dev_validate_header(dev, skb->data, len) || !skb->len) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+--
+2.39.2
+
--- /dev/null
+From 33de66b715cbe899dc2e1f8e2fe269ea21ecf626 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 May 2023 14:50:53 +0800
+Subject: ALSA: caiaq: input: Add error handling for unsupported input methods
+ in `snd_usb_caiaq_input_init`
+
+From: Ruliang Lin <u202112092@hust.edu.cn>
+
+[ Upstream commit 0d727e1856ef22dd9337199430258cb64cbbc658 ]
+
+Smatch complains that:
+snd_usb_caiaq_input_init() warn: missing error code 'ret'
+
+This patch adds a new case to handle the situation where the
+device does not support any input methods in the
+`snd_usb_caiaq_input_init` function. It returns an `-EINVAL` error code
+to indicate that no input methods are supported on the device.
+
+Fixes: 523f1dce3743 ("[ALSA] Add Native Instrument usb audio device support")
+Signed-off-by: Ruliang Lin <u202112092@hust.edu.cn>
+Reviewed-by: Dongliang Mu <dzm91@hust.edu.cn>
+Acked-by: Daniel Mack <daniel@zonque.org>
+Link: https://lore.kernel.org/r/20230504065054.3309-1-u202112092@hust.edu.cn
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/caiaq/input.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c
+index 1e2cf2f08eecd..84f26dce7f5d0 100644
+--- a/sound/usb/caiaq/input.c
++++ b/sound/usb/caiaq/input.c
+@@ -804,6 +804,7 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev)
+
+ default:
+ /* no input methods supported on this device */
++ ret = -EINVAL;
+ goto exit_free_idev;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From bdcac4c8875fd8fb5c10561a52e2e650f6c08149 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Jan 2023 16:01:45 +0800
+Subject: crypto: api - Add scaffolding to change completion function signature
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit c35e03eaece71101ff6cbf776b86403860ac8cc3 ]
+
+The crypto completion function currently takes a pointer to a
+struct crypto_async_request object. However, in reality the API
+does not allow the use of any part of the object apart from the
+data field. For example, ahash/shash will create a fake object
+on the stack to pass along a different data field.
+
+This leads to potential bugs where the user may try to dereference
+or otherwise use the crypto_async_request object.
+
+This patch adds some temporary scaffolding so that the completion
+function can take a void * instead. Once affected users have been
+converted this can be removed.
+
+The helper crypto_request_complete will remain even after the
+conversion is complete. It should be used instead of calling
+the completion function directly.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Stable-dep-of: 4140aafcff16 ("crypto: engine - fix crypto_queue backlog handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/crypto/algapi.h | 7 +++++++
+ include/linux/crypto.h | 6 ++++++
+ 2 files changed, 13 insertions(+)
+
+diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
+index 5f6841c73e5a7..0ffd61930e180 100644
+--- a/include/crypto/algapi.h
++++ b/include/crypto/algapi.h
+@@ -256,4 +256,11 @@ enum {
+ CRYPTO_MSG_ALG_LOADED,
+ };
+
++static inline void crypto_request_complete(struct crypto_async_request *req,
++ int err)
++{
++ crypto_completion_t complete = req->complete;
++ complete(req, err);
++}
++
+ #endif /* _CRYPTO_ALGAPI_H */
+diff --git a/include/linux/crypto.h b/include/linux/crypto.h
+index 855869e1fd327..987eeb94bb70b 100644
+--- a/include/linux/crypto.h
++++ b/include/linux/crypto.h
+@@ -167,6 +167,7 @@ struct crypto_async_request;
+ struct crypto_tfm;
+ struct crypto_type;
+
++typedef struct crypto_async_request crypto_completion_data_t;
+ typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
+
+ /**
+@@ -586,6 +587,11 @@ struct crypto_wait {
+ /*
+ * Async ops completion helper functioons
+ */
++static inline void *crypto_get_completion_data(crypto_completion_data_t *req)
++{
++ return req->data;
++}
++
+ void crypto_req_done(struct crypto_async_request *req, int err);
+
+ static inline int crypto_wait_req(int err, struct crypto_wait *wait)
+--
+2.39.2
+
--- /dev/null
+From f0efed381e89c78671714ea364c2e4545e66c99c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Feb 2022 12:08:33 +0000
+Subject: crypto: engine - check if BH is disabled during completion
+
+From: Corentin Labbe <clabbe@baylibre.com>
+
+[ Upstream commit 4058cf08945c18a6de193f4118fd05d83d3d4285 ]
+
+When doing iperf over ipsec with crypto hardware sun8i-ce, I hit some
+spinlock recursion bug.
+
+This is due to completion function called with enabled BH.
+
+Add check a to detect this.
+
+Fixes: 735d37b5424b ("crypto: engine - Introduce the block request crypto engine framework")
+Signed-off-by: Corentin Labbe <clabbe@baylibre.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Stable-dep-of: 4140aafcff16 ("crypto: engine - fix crypto_queue backlog handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ crypto/crypto_engine.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
+index cff21f4e03e32..fecf6baaa4f7d 100644
+--- a/crypto/crypto_engine.c
++++ b/crypto/crypto_engine.c
+@@ -53,6 +53,7 @@ static void crypto_finalize_request(struct crypto_engine *engine,
+ dev_err(engine->dev, "failed to unprepare request\n");
+ }
+ }
++ lockdep_assert_in_softirq();
+ req->complete(req, err);
+
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
+--
+2.39.2
+
--- /dev/null
+From d620106cd3d4c4870d22cfe1c094d9359b242865 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Apr 2023 11:00:35 -0400
+Subject: crypto: engine - fix crypto_queue backlog handling
+
+From: Olivier Bacon <olivierb89@gmail.com>
+
+[ Upstream commit 4140aafcff167b5b9e8dae6a1709a6de7cac6f74 ]
+
+CRYPTO_TFM_REQ_MAY_BACKLOG tells the crypto driver that it should
+internally backlog requests until the crypto hw's queue becomes
+full. At that point, crypto_engine backlogs the request and returns
+-EBUSY. Calling driver such as dm-crypt then waits until the
+complete() function is called with a status of -EINPROGRESS before
+sending a new request.
+
+The problem lies in the call to complete() with a value of -EINPROGRESS
+that is made when a backlog item is present on the queue. The call is
+done before the successful execution of the crypto request. In the case
+that do_one_request() returns < 0 and the retry support is available,
+the request is put back in the queue. This leads upper drivers to send
+a new request even if the queue is still full.
+
+The problem can be reproduced by doing a large dd into a crypto
+dm-crypt device. This is pretty easy to see when using
+Freescale CAAM crypto driver and SWIOTLB dma. Since the actual amount
+of requests that can be hold in the queue is unlimited we get IOs error
+and dma allocation.
+
+The fix is to call complete with a value of -EINPROGRESS only if
+the request is not enqueued back in crypto_queue. This is done
+by calling complete() later in the code. In order to delay the decision,
+crypto_queue is modified to correctly set the backlog pointer
+when a request is enqueued back.
+
+Fixes: 6a89f492f8e5 ("crypto: engine - support for parallel requests based on retry mechanism")
+Co-developed-by: Sylvain Ouellet <souellet@genetec.com>
+Signed-off-by: Sylvain Ouellet <souellet@genetec.com>
+Signed-off-by: Olivier Bacon <obacon@genetec.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ crypto/algapi.c | 3 +++
+ crypto/crypto_engine.c | 6 +++---
+ 2 files changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index c1af76ec65f51..3920c4b1e9c13 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -920,6 +920,9 @@ EXPORT_SYMBOL_GPL(crypto_enqueue_request);
+ void crypto_enqueue_request_head(struct crypto_queue *queue,
+ struct crypto_async_request *request)
+ {
++ if (unlikely(queue->qlen >= queue->max_qlen))
++ queue->backlog = queue->backlog->prev;
++
+ queue->qlen++;
+ list_add(&request->list, &queue->list);
+ }
+diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
+index 43fe324b9521b..34effd4826c03 100644
+--- a/crypto/crypto_engine.c
++++ b/crypto/crypto_engine.c
+@@ -129,9 +129,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
+ if (!engine->retry_support)
+ engine->cur_req = async_req;
+
+- if (backlog)
+- crypto_request_complete(backlog, -EINPROGRESS);
+-
+ if (engine->busy)
+ was_busy = true;
+ else
+@@ -217,6 +214,9 @@ static void crypto_pump_requests(struct crypto_engine *engine,
+ crypto_request_complete(async_req, ret);
+
+ retry:
++ if (backlog)
++ crypto_request_complete(backlog, -EINPROGRESS);
++
+ /* If retry mechanism is supported, send new requests to engine */
+ if (engine->retry_support) {
+ spin_lock_irqsave(&engine->queue_lock, flags);
+--
+2.39.2
+
--- /dev/null
+From 0a2ae14c0ecfdb489e28498fbaf9327312bf08c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Jan 2023 16:02:02 +0800
+Subject: crypto: engine - Use crypto_request_complete
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 6909823d47c17cba84e9244d04050b5db8d53789 ]
+
+Use the crypto_request_complete helper instead of calling the
+completion function directly.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Stable-dep-of: 4140aafcff16 ("crypto: engine - fix crypto_queue backlog handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ crypto/crypto_engine.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
+index fecf6baaa4f7d..43fe324b9521b 100644
+--- a/crypto/crypto_engine.c
++++ b/crypto/crypto_engine.c
+@@ -54,7 +54,7 @@ static void crypto_finalize_request(struct crypto_engine *engine,
+ }
+ }
+ lockdep_assert_in_softirq();
+- req->complete(req, err);
++ crypto_request_complete(req, err);
+
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
+ }
+@@ -130,7 +130,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
+ engine->cur_req = async_req;
+
+ if (backlog)
+- backlog->complete(backlog, -EINPROGRESS);
++ crypto_request_complete(backlog, -EINPROGRESS);
+
+ if (engine->busy)
+ was_busy = true;
+@@ -214,7 +214,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
+ }
+
+ req_err_2:
+- async_req->complete(async_req, ret);
++ crypto_request_complete(async_req, ret);
+
+ retry:
+ /* If retry mechanism is supported, send new requests to engine */
+--
+2.39.2
+
--- /dev/null
+From b96cd8468ec5b7b52e8070477a3b7f2303dc4ea0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Apr 2023 22:25:09 +0200
+Subject: crypto: sun8i-ss - Fix a test in sun8i_ss_setup_ivs()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 8fd91151ebcb21b3f2f2bf158ac6092192550b2b ]
+
+SS_ENCRYPTION is (0 << 7 = 0), so the test can never be true.
+Use a direct comparison to SS_ENCRYPTION instead.
+
+The same king of test is already done the same way in sun8i_ss_run_task().
+
+Fixes: 359e893e8af4 ("crypto: sun8i-ss - rework handling of IV")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+index 005eefecfdf59..0cc8cafdde27c 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+@@ -132,7 +132,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
+ }
+ rctx->p_iv[i] = a;
+ /* we need to setup all others IVs only in the decrypt way */
+- if (rctx->op_dir & SS_ENCRYPTION)
++ if (rctx->op_dir == SS_ENCRYPTION)
+ return 0;
+ todo = min(len, sg_dma_len(sg));
+ len -= todo;
+--
+2.39.2
+
--- /dev/null
+From 1c0beccd21683d2a495d7c7ac3330dc7488ab94e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Apr 2023 15:54:55 -0700
+Subject: drm/amdgpu: add a missing lock for AMDGPU_SCHED
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chia-I Wu <olvaffe@gmail.com>
+
+[ Upstream commit 2397e3d8d2e120355201a8310b61929f5a8bd2c0 ]
+
+mgr->ctx_handles should be protected by mgr->lock.
+
+v2: improve commit message
+v3: add a Fixes tag
+
+Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Fixes: 52c6a62c64fa ("drm/amdgpu: add interface for editing a foreign process's priority v3")
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+index b7d861ed52849..88f986a61c93a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+@@ -66,6 +66,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+ {
+ struct fd f = fdget(fd);
+ struct amdgpu_fpriv *fpriv;
++ struct amdgpu_ctx_mgr *mgr;
+ struct amdgpu_ctx *ctx;
+ uint32_t id;
+ int r;
+@@ -79,8 +80,11 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+ return r;
+ }
+
+- idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
++ mgr = &fpriv->ctx_mgr;
++ mutex_lock(&mgr->lock);
++ idr_for_each_entry(&mgr->ctx_handles, ctx, id)
+ amdgpu_ctx_priority_override(ctx, priority);
++ mutex_unlock(&mgr->lock);
+
+ fdput(f);
+ return 0;
+--
+2.39.2
+
--- /dev/null
+From f2d04c627c85e8c549246d11e70cbbf3d20ee447 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 May 2023 15:20:50 +0300
+Subject: ethtool: Fix uninitialized number of lanes
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit 9ad685dbfe7e856bbf17a7177b64676d324d6ed7 ]
+
+It is not possible to set the number of lanes when setting link modes
+using the legacy IOCTL ethtool interface. Since 'struct
+ethtool_link_ksettings' is not initialized in this path, drivers receive
+an uninitialized number of lanes in 'struct
+ethtool_link_ksettings::lanes'.
+
+When this information is later queried from drivers, it results in the
+ethtool code making decisions based on uninitialized memory, leading to
+the following KMSAN splat [1]. In practice, this most likely only
+happens with the tun driver that simply returns whatever it got in the
+set operation.
+
+As far as I can tell, this uninitialized memory is not leaked to user
+space thanks to the 'ethtool_ops->cap_link_lanes_supported' check in
+linkmodes_prepare_data().
+
+Fix by initializing the structure in the IOCTL path. Did not find any
+more call sites that pass an uninitialized structure when calling
+'ethtool_ops::set_link_ksettings()'.
+
+[1]
+BUG: KMSAN: uninit-value in ethnl_update_linkmodes net/ethtool/linkmodes.c:273 [inline]
+BUG: KMSAN: uninit-value in ethnl_set_linkmodes+0x190b/0x19d0 net/ethtool/linkmodes.c:333
+ ethnl_update_linkmodes net/ethtool/linkmodes.c:273 [inline]
+ ethnl_set_linkmodes+0x190b/0x19d0 net/ethtool/linkmodes.c:333
+ ethnl_default_set_doit+0x88d/0xde0 net/ethtool/netlink.c:640
+ genl_family_rcv_msg_doit net/netlink/genetlink.c:968 [inline]
+ genl_family_rcv_msg net/netlink/genetlink.c:1048 [inline]
+ genl_rcv_msg+0x141a/0x14c0 net/netlink/genetlink.c:1065
+ netlink_rcv_skb+0x3f8/0x750 net/netlink/af_netlink.c:2577
+ genl_rcv+0x40/0x60 net/netlink/genetlink.c:1076
+ netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline]
+ netlink_unicast+0xf41/0x1270 net/netlink/af_netlink.c:1365
+ netlink_sendmsg+0x127d/0x1430 net/netlink/af_netlink.c:1942
+ sock_sendmsg_nosec net/socket.c:724 [inline]
+ sock_sendmsg net/socket.c:747 [inline]
+ ____sys_sendmsg+0xa24/0xe40 net/socket.c:2501
+ ___sys_sendmsg+0x2a1/0x3f0 net/socket.c:2555
+ __sys_sendmsg net/socket.c:2584 [inline]
+ __do_sys_sendmsg net/socket.c:2593 [inline]
+ __se_sys_sendmsg net/socket.c:2591 [inline]
+ __x64_sys_sendmsg+0x36b/0x540 net/socket.c:2591
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Uninit was stored to memory at:
+ tun_get_link_ksettings+0x37/0x60 drivers/net/tun.c:3544
+ __ethtool_get_link_ksettings+0x17b/0x260 net/ethtool/ioctl.c:441
+ ethnl_set_linkmodes+0xee/0x19d0 net/ethtool/linkmodes.c:327
+ ethnl_default_set_doit+0x88d/0xde0 net/ethtool/netlink.c:640
+ genl_family_rcv_msg_doit net/netlink/genetlink.c:968 [inline]
+ genl_family_rcv_msg net/netlink/genetlink.c:1048 [inline]
+ genl_rcv_msg+0x141a/0x14c0 net/netlink/genetlink.c:1065
+ netlink_rcv_skb+0x3f8/0x750 net/netlink/af_netlink.c:2577
+ genl_rcv+0x40/0x60 net/netlink/genetlink.c:1076
+ netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline]
+ netlink_unicast+0xf41/0x1270 net/netlink/af_netlink.c:1365
+ netlink_sendmsg+0x127d/0x1430 net/netlink/af_netlink.c:1942
+ sock_sendmsg_nosec net/socket.c:724 [inline]
+ sock_sendmsg net/socket.c:747 [inline]
+ ____sys_sendmsg+0xa24/0xe40 net/socket.c:2501
+ ___sys_sendmsg+0x2a1/0x3f0 net/socket.c:2555
+ __sys_sendmsg net/socket.c:2584 [inline]
+ __do_sys_sendmsg net/socket.c:2593 [inline]
+ __se_sys_sendmsg net/socket.c:2591 [inline]
+ __x64_sys_sendmsg+0x36b/0x540 net/socket.c:2591
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Uninit was stored to memory at:
+ tun_set_link_ksettings+0x37/0x60 drivers/net/tun.c:3553
+ ethtool_set_link_ksettings+0x600/0x690 net/ethtool/ioctl.c:609
+ __dev_ethtool net/ethtool/ioctl.c:3024 [inline]
+ dev_ethtool+0x1db9/0x2a70 net/ethtool/ioctl.c:3078
+ dev_ioctl+0xb07/0x1270 net/core/dev_ioctl.c:524
+ sock_do_ioctl+0x295/0x540 net/socket.c:1213
+ sock_ioctl+0x729/0xd90 net/socket.c:1316
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:870 [inline]
+ __se_sys_ioctl+0x222/0x400 fs/ioctl.c:856
+ __x64_sys_ioctl+0x96/0xe0 fs/ioctl.c:856
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Local variable link_ksettings created at:
+ ethtool_set_link_ksettings+0x54/0x690 net/ethtool/ioctl.c:577
+ __dev_ethtool net/ethtool/ioctl.c:3024 [inline]
+ dev_ethtool+0x1db9/0x2a70 net/ethtool/ioctl.c:3078
+
+Fixes: 012ce4dd3102 ("ethtool: Extend link modes settings uAPI with lanes")
+Reported-and-tested-by: syzbot+ef6edd9f1baaa54d6235@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/netdev/0000000000004bb41105fa70f361@google.com/
+Reviewed-by: Danielle Ratson <danieller@nvidia.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ethtool/ioctl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 939c63d6e74b7..53e2ef6ada8f3 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -568,8 +568,8 @@ static int ethtool_get_link_ksettings(struct net_device *dev,
+ static int ethtool_set_link_ksettings(struct net_device *dev,
+ void __user *useraddr)
+ {
++ struct ethtool_link_ksettings link_ksettings = {};
+ int err;
+- struct ethtool_link_ksettings link_ksettings;
+
+ ASSERT_RTNL();
+
+--
+2.39.2
+
--- /dev/null
+From 9ea062bc319178e0f4bc7e94989e6ecc0aee80ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 May 2023 11:35:36 -0700
+Subject: ionic: catch failure from devlink_alloc
+
+From: Shannon Nelson <shannon.nelson@amd.com>
+
+[ Upstream commit 4a54903ff68ddb33b6463c94b4eb37fc584ef760 ]
+
+Add a check for NULL on the alloc return. If devlink_alloc() fails and
+we try to use devlink_priv() on the NULL return, the kernel gets very
+unhappy and panics. With this fix, the driver load will still fail,
+but at least it won't panic the kernel.
+
+Fixes: df69ba43217d ("ionic: Add basic framework for IONIC Network device driver")
+Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/pensando/ionic/ionic_devlink.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+index c7d0e195d1760..5c06decc868c4 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+@@ -65,6 +65,8 @@ struct ionic *ionic_devlink_alloc(struct device *dev)
+ struct devlink *dl;
+
+ dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic), dev);
++ if (!dl)
++ return NULL;
+
+ return devlink_priv(dl);
+ }
+--
+2.39.2
+
--- /dev/null
+From df3a1616e4c03cf273f6b23c333362c399b00d1c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 May 2023 11:47:40 -0700
+Subject: ionic: remove noise from ethtool rxnfc error msg
+
+From: Shannon Nelson <shannon.nelson@amd.com>
+
+[ Upstream commit 3711d44fac1f80ea69ecb7315fed05b3812a7401 ]
+
+It seems that ethtool is calling into .get_rxnfc more often with
+ETHTOOL_GRXCLSRLCNT which ionic doesn't know about. We don't
+need to log a message about it, just return not supported.
+
+Fixes: aa3198819bea6 ("ionic: Add RSS support")
+Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/pensando/ionic/ionic_ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+index 3de1a03839e25..2fa116c3694c4 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+@@ -724,7 +724,7 @@ static int ionic_get_rxnfc(struct net_device *netdev,
+ info->data = lif->nxqs;
+ break;
+ default:
+- netdev_err(netdev, "Command parameter %d is not supported\n",
++ netdev_dbg(netdev, "Command parameter %d is not supported\n",
+ info->cmd);
+ err = -EOPNOTSUPP;
+ }
+--
+2.39.2
+
--- /dev/null
+From c81dd0c57eac8918bc162b79a18f49308e91f362 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Apr 2023 11:27:53 +0200
+Subject: KVM: s390: fix race in gmap_make_secure()
+
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+
+[ Upstream commit c148dc8e2fa403be501612ee409db866eeed35c0 ]
+
+Fix a potential race in gmap_make_secure() and remove the last user of
+follow_page() without FOLL_GET.
+
+The old code is locking something it doesn't have a reference to, and
+as explained by Jason and David in this discussion:
+https://lore.kernel.org/linux-mm/Y9J4P%2FRNvY1Ztn0Q@nvidia.com/
+it can lead to all kind of bad things, including the page getting
+unmapped (MADV_DONTNEED), freed, reallocated as a larger folio and the
+unlock_page() would target the wrong bit.
+There is also another race with the FOLL_WRITE, which could race
+between the follow_page() and the get_locked_pte().
+
+The main point is to remove the last use of follow_page() without
+FOLL_GET or FOLL_PIN, removing the races can be considered a nice
+bonus.
+
+Link: https://lore.kernel.org/linux-mm/Y9J4P%2FRNvY1Ztn0Q@nvidia.com/
+Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
+Fixes: 214d9bbcd3a6 ("s390/mm: provide memory management functions for protected KVM guests")
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Message-Id: <20230428092753.27913-2-imbrenda@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/uv.c | 32 +++++++++++---------------------
+ 1 file changed, 11 insertions(+), 21 deletions(-)
+
+diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
+index afb9bbfc475b9..1d49744614326 100644
+--- a/arch/s390/kernel/uv.c
++++ b/arch/s390/kernel/uv.c
+@@ -160,21 +160,10 @@ static int expected_page_refs(struct page *page)
+ return res;
+ }
+
+-static int make_secure_pte(pte_t *ptep, unsigned long addr,
+- struct page *exp_page, struct uv_cb_header *uvcb)
++static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
+ {
+- pte_t entry = READ_ONCE(*ptep);
+- struct page *page;
+ int expected, cc = 0;
+
+- if (!pte_present(entry))
+- return -ENXIO;
+- if (pte_val(entry) & _PAGE_INVALID)
+- return -ENXIO;
+-
+- page = pte_page(entry);
+- if (page != exp_page)
+- return -ENXIO;
+ if (PageWriteback(page))
+ return -EAGAIN;
+ expected = expected_page_refs(page);
+@@ -265,17 +254,18 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
+ goto out;
+
+ rc = -ENXIO;
+- page = follow_page(vma, uaddr, FOLL_WRITE);
+- if (IS_ERR_OR_NULL(page))
+- goto out;
+-
+- lock_page(page);
+ ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
+- if (should_export_before_import(uvcb, gmap->mm))
+- uv_convert_from_secure(page_to_phys(page));
+- rc = make_secure_pte(ptep, uaddr, page, uvcb);
++ if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
++ page = pte_page(*ptep);
++ rc = -EAGAIN;
++ if (trylock_page(page)) {
++ if (should_export_before_import(uvcb, gmap->mm))
++ uv_convert_from_secure(page_to_phys(page));
++ rc = make_page_secure(page, uvcb);
++ unlock_page(page);
++ }
++ }
+ pte_unmap_unlock(ptep, ptelock);
+- unlock_page(page);
+ out:
+ mmap_read_unlock(gmap->mm);
+
+--
+2.39.2
+
--- /dev/null
+From 048e2024597b7061e660aab60619d149d453166b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Jun 2022 15:56:07 +0200
+Subject: KVM: s390: pv: add export before import
+
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+
+[ Upstream commit 72b1daff2671cef2c8cccc6c4e52f8d5ce4ebe58 ]
+
+Due to upcoming changes, it will be possible to temporarily have
+multiple protected VMs in the same address space, although only one
+will be actually active.
+
+In that scenario, it is necessary to perform an export of every page
+that is to be imported, since the hardware does not allow a page
+belonging to a protected guest to be imported into a different
+protected guest.
+
+This also applies to pages that are shared, and thus accessible by the
+host.
+
+Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Reviewed-by: Janosch Frank <frankja@linux.ibm.com>
+Link: https://lore.kernel.org/r/20220628135619.32410-7-imbrenda@linux.ibm.com
+Message-Id: <20220628135619.32410-7-imbrenda@linux.ibm.com>
+Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
+Stable-dep-of: c148dc8e2fa4 ("KVM: s390: fix race in gmap_make_secure()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/uv.c | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
+index 09b80d371409b..afb9bbfc475b9 100644
+--- a/arch/s390/kernel/uv.c
++++ b/arch/s390/kernel/uv.c
+@@ -202,6 +202,32 @@ static int make_secure_pte(pte_t *ptep, unsigned long addr,
+ return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
+ }
+
++/**
++ * should_export_before_import - Determine whether an export is needed
++ * before an import-like operation
++ * @uvcb: the Ultravisor control block of the UVC to be performed
++ * @mm: the mm of the process
++ *
++ * Returns whether an export is needed before every import-like operation.
++ * This is needed for shared pages, which don't trigger a secure storage
++ * exception when accessed from a different guest.
++ *
++ * Although considered as one, the Unpin Page UVC is not an actual import,
++ * so it is not affected.
++ *
++ * No export is needed also when there is only one protected VM, because the
++ * page cannot belong to the wrong VM in that case (there is no "other VM"
++ * it can belong to).
++ *
++ * Return: true if an export is needed before every import, otherwise false.
++ */
++static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
++{
++ if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
++ return false;
++ return atomic_read(&mm->context.protected_count) > 1;
++}
++
+ /*
+ * Requests the Ultravisor to make a page accessible to a guest.
+ * If it's brought in the first time, it will be cleared. If
+@@ -245,6 +271,8 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
+
+ lock_page(page);
+ ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
++ if (should_export_before_import(uvcb, gmap->mm))
++ uv_convert_from_secure(page_to_phys(page));
+ rc = make_secure_pte(ptep, uaddr, page, uvcb);
+ pte_unmap_unlock(ptep, ptelock);
+ unlock_page(page);
+--
+2.39.2
+
--- /dev/null
+From cc906bbbb096c62a9801917856b3691fe89fba35 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Sep 2021 15:24:52 +0200
+Subject: KVM: s390: pv: avoid stalls when making pages secure
+
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+
+[ Upstream commit f0a1a0615a6ff6d38af2c65a522698fb4bb85df6 ]
+
+Improve make_secure_pte to avoid stalls when the system is heavily
+overcommitted. This was especially problematic in kvm_s390_pv_unpack,
+because of the loop over all pages that needed unpacking.
+
+Due to the locks being held, it was not possible to simply replace
+uv_call with uv_call_sched. A more complex approach was
+needed, in which uv_call is replaced with __uv_call, which does not
+loop. When the UVC needs to be executed again, -EAGAIN is returned, and
+the caller (or its caller) will try again.
+
+When -EAGAIN is returned, the path is the same as when the page is in
+writeback (and the writeback check is also performed, which is
+harmless).
+
+Fixes: 214d9bbcd3a672 ("s390/mm: provide memory management functions for protected KVM guests")
+Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Reviewed-by: Janosch Frank <frankja@linux.ibm.com>
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Link: https://lore.kernel.org/r/20210920132502.36111-5-imbrenda@linux.ibm.com
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Stable-dep-of: c148dc8e2fa4 ("KVM: s390: fix race in gmap_make_secure()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/uv.c | 29 +++++++++++++++++++++++------
+ arch/s390/kvm/intercept.c | 5 +++++
+ 2 files changed, 28 insertions(+), 6 deletions(-)
+
+diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
+index f95ccbd396925..09b80d371409b 100644
+--- a/arch/s390/kernel/uv.c
++++ b/arch/s390/kernel/uv.c
+@@ -165,7 +165,7 @@ static int make_secure_pte(pte_t *ptep, unsigned long addr,
+ {
+ pte_t entry = READ_ONCE(*ptep);
+ struct page *page;
+- int expected, rc = 0;
++ int expected, cc = 0;
+
+ if (!pte_present(entry))
+ return -ENXIO;
+@@ -181,12 +181,25 @@ static int make_secure_pte(pte_t *ptep, unsigned long addr,
+ if (!page_ref_freeze(page, expected))
+ return -EBUSY;
+ set_bit(PG_arch_1, &page->flags);
+- rc = uv_call(0, (u64)uvcb);
++ /*
++ * If the UVC does not succeed or fail immediately, we don't want to
++ * loop for long, or we might get stall notifications.
++ * On the other hand, this is a complex scenario and we are holding a lot of
++ * locks, so we can't easily sleep and reschedule. We try only once,
++ * and if the UVC returned busy or partial completion, we return
++ * -EAGAIN and we let the callers deal with it.
++ */
++ cc = __uv_call(0, (u64)uvcb);
+ page_ref_unfreeze(page, expected);
+- /* Return -ENXIO if the page was not mapped, -EINVAL otherwise */
+- if (rc)
+- rc = uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
+- return rc;
++ /*
++ * Return -ENXIO if the page was not mapped, -EINVAL for other errors.
++ * If busy or partially completed, return -EAGAIN.
++ */
++ if (cc == UVC_CC_OK)
++ return 0;
++ else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
++ return -EAGAIN;
++ return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
+ }
+
+ /*
+@@ -239,6 +252,10 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
+ mmap_read_unlock(gmap->mm);
+
+ if (rc == -EAGAIN) {
++ /*
++ * If we are here because the UVC returned busy or partial
++ * completion, this is just a useless check, but it is safe.
++ */
+ wait_on_page_writeback(page);
+ } else if (rc == -EBUSY) {
+ /*
+diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
+index aeb0e0865e890..0eacd29033173 100644
+--- a/arch/s390/kvm/intercept.c
++++ b/arch/s390/kvm/intercept.c
+@@ -534,6 +534,11 @@ static int handle_pv_uvc(struct kvm_vcpu *vcpu)
+ */
+ if (rc == -EINVAL)
+ return 0;
++ /*
++ * If we got -EAGAIN here, we simply return it. It will eventually
++ * get propagated all the way to userspace, which should then try
++ * again.
++ */
+ return rc;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 7740412cc3a41401c3759a4310192dfde61e224d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 May 2023 16:07:27 -0700
+Subject: net: bcmgenet: Remove phy_stop() from bcmgenet_netif_stop()
+
+From: Florian Fainelli <f.fainelli@gmail.com>
+
+[ Upstream commit 93e0401e0fc0c54b0ac05b687cd135c2ac38187c ]
+
+The call to phy_stop() races with the later call to phy_disconnect(),
+resulting in concurrent phy_suspend() calls being run from different
+CPUs. The final call to phy_disconnect() ensures that the PHY is
+stopped and suspended, too.
+
+Fixes: c96e731c93ff ("net: bcmgenet: connect and disconnect from the PHY state machine")
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/genet/bcmgenet.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 92cd2916e8015..35bf840716d57 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -3416,7 +3416,6 @@ static void bcmgenet_netif_stop(struct net_device *dev)
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
+ umac_enable_set(priv, CMD_TX_EN, false);
+
+- phy_stop(dev->phydev);
+ bcmgenet_disable_rx_napi(priv);
+ bcmgenet_intr_disable(priv);
+
+--
+2.39.2
+
--- /dev/null
+From 1671499efb97ce4d39c7ad479c7f369fa56d9477 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 00:09:46 +0300
+Subject: net: dsa: mt7530: fix corrupt frames using trgmii on 40 MHz XTAL
+ MT7621
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Arınç ÜNAL <arinc.unal@arinc9.com>
+
+[ Upstream commit 37c218d8021e36e226add4bab93d071d30fe0704 ]
+
+The multi-chip module MT7530 switch with a 40 MHz oscillator on the
+MT7621AT, MT7621DAT, and MT7621ST SoCs forwards corrupt frames using
+trgmii.
+
+This is caused by the assumption that MT7621 SoCs have got 150 MHz PLL,
+hence using the ncpo1 value, 0x0780.
+
+My testing shows this value works on Unielec U7621-06, Bartel's testing
+shows it won't work on Hi-Link HLK-MT7621A and Netgear WAC104. All devices
+tested have got 40 MHz oscillators.
+
+Using the value for 125 MHz PLL, 0x0640, works on all boards at hand. The
+definitions for 125 MHz PLL exist on the Banana Pi BPI-R2 BSP source code
+whilst 150 MHz PLL don't.
+
+Forwarding frames using trgmii on the MCM MT7530 switch with a 25 MHz
+oscillator on the said MT7621 SoCs works fine because the ncpo1 value
+defined for it is for 125 MHz PLL.
+
+Change the 150 MHz PLL comment to 125 MHz PLL, and use the 125 MHz PLL
+ncpo1 values for both oscillator frequencies.
+
+Link: https://github.com/BPI-SINOVOIP/BPI-R2-bsp/blob/81d24bbce7d99524d0771a8bdb2d6663e4eb4faa/u-boot-mt/drivers/net/rt2880_eth.c#L2195
+Fixes: 7ef6f6f8d237 ("net: dsa: mt7530: Add MT7621 TRGMII mode support")
+Tested-by: Bartel Eerdekens <bartel.eerdekens@constell8.be>
+Signed-off-by: Arınç ÜNAL <arinc.unal@arinc9.com>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/mt7530.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index dfea2ab0c297f..e7a551570cf3c 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -441,9 +441,9 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ else
+ ssc_delta = 0x87;
+ if (priv->id == ID_MT7621) {
+- /* PLL frequency: 150MHz: 1.2GBit */
++ /* PLL frequency: 125MHz: 1.0GBit */
+ if (xtal == HWTRAP_XTAL_40MHZ)
+- ncpo1 = 0x0780;
++ ncpo1 = 0x0640;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ncpo1 = 0x0a00;
+ } else { /* PLL frequency: 250MHz: 2.0Gbit */
+--
+2.39.2
+
--- /dev/null
+From 455f5d0122fd9c9fc881ccf7a81f748a857cdbc5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Apr 2023 22:28:15 +0200
+Subject: net: dsa: mv88e6xxx: add mv88e6321 rsvd2cpu
+
+From: Angelo Dureghello <angelo.dureghello@timesys.com>
+
+[ Upstream commit 6686317855c6997671982d4489ccdd946f644957 ]
+
+Add rsvd2cpu capability for mv88e6321 model, to allow proper bpdu
+processing.
+
+Signed-off-by: Angelo Dureghello <angelo.dureghello@timesys.com>
+Fixes: 51c901a775621 ("net: dsa: mv88e6xxx: distinguish Global 2 Rsvd2CPU")
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/mv88e6xxx/chip.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index bc363fca2895f..b33aee4404de2 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -4575,6 +4575,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
++ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+--
+2.39.2
+
--- /dev/null
+From 4ac9663c9d5ac18f537eeb938c553e30de6a42cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 May 2023 16:03:59 +0800
+Subject: net: enetc: check the index of the SFI rather than the handle
+
+From: Wei Fang <wei.fang@nxp.com>
+
+[ Upstream commit 299efdc2380aac588557f4d0b2ce7bee05bd0cf2 ]
+
+We should check whether the current SFI (Stream Filter Instance) table
+is full before creating a new SFI entry. However, the previous logic
+checks the handle by mistake and might lead to unpredictable behavior.
+
+Fixes: 888ae5a3952b ("net: enetc: add tc flower psfp offload driver")
+Signed-off-by: Wei Fang <wei.fang@nxp.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/enetc/enetc_qos.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+index ba51fb381f0cb..4e9cb1deaf810 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+@@ -1270,7 +1270,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
+ int index;
+
+ index = enetc_get_free_index(priv);
+- if (sfi->handle < 0) {
++ if (index < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
+ err = -ENOSPC;
+ goto free_fmi;
+--
+2.39.2
+
--- /dev/null
+From 81f8da20e4ba70311096a8a2df1d6823afeb9e99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Apr 2023 16:13:50 +0800
+Subject: net/ncsi: clear Tx enable mode when handling a Config required AEN
+
+From: Cosmo Chou <chou.cosmo@gmail.com>
+
+[ Upstream commit 6f75cd166a5a3c0bc50441faa8b8304f60522fdd ]
+
+ncsi_channel_is_tx() determines whether a given channel should be
+used for Tx or not. However, when reconfiguring the channel by
+handling a Configuration Required AEN, there is a misjudgment that
+the channel Tx has already been enabled, which results in the Enable
+Channel Network Tx command not being sent.
+
+Clear the channel Tx enable flag before reconfiguring the channel to
+avoid the misjudgment.
+
+Fixes: 8d951a75d022 ("net/ncsi: Configure multi-package, multi-channel modes with failover")
+Signed-off-by: Cosmo Chou <chou.cosmo@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ncsi/ncsi-aen.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
+index b635c194f0a85..62fb1031763d1 100644
+--- a/net/ncsi/ncsi-aen.c
++++ b/net/ncsi/ncsi-aen.c
+@@ -165,6 +165,7 @@ static int ncsi_aen_handler_cr(struct ncsi_dev_priv *ndp,
+ nc->state = NCSI_CHANNEL_INACTIVE;
+ list_add_tail_rcu(&nc->link, &ndp->channel_queue);
+ spin_unlock_irqrestore(&ndp->lock, flags);
++ nc->modes[NCSI_MODE_TX_ENABLE].enable = 0;
+
+ return ncsi_process_next_channel(ndp);
+ }
+--
+2.39.2
+
--- /dev/null
+From 1c9b301cb0277da415b4d7d317850fe3552d918d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Apr 2023 15:19:40 +0000
+Subject: net/sched: act_mirred: Add carrier check
+
+From: Victor Nogueira <victor@mojatatu.com>
+
+[ Upstream commit 526f28bd0fbdc699cda31426928802650c1528e5 ]
+
+There are cases where the device is adminstratively UP, but operationally
+down. For example, we have a physical device (Nvidia ConnectX-6 Dx, 25Gbps)
+who's cable was pulled out, here is its ip link output:
+
+5: ens2f1: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc mq state DOWN mode DEFAULT group default qlen 1000
+ link/ether b8:ce:f6:4b:68:35 brd ff:ff:ff:ff:ff:ff
+ altname enp179s0f1np1
+
+As you can see, it's administratively UP but operationally down.
+In this case, sending a packet to this port caused a nasty kernel hang (so
+nasty that we were unable to capture it). Aborting a transmit based on
+operational status (in addition to administrative status) fixes the issue.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+v1->v2: Add fixes tag
+v2->v3: Remove blank line between tags + add change log, suggested by Leon
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/act_mirred.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index 6f39789d9d14b..97cd4b2377d69 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -261,7 +261,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
+ goto out;
+ }
+
+- if (unlikely(!(dev->flags & IFF_UP))) {
++ if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
+ net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
+ dev->name);
+ goto out;
+--
+2.39.2
+
--- /dev/null
+From b3e5fbe55fbc72febfa7eabf6ba33853720615df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Apr 2023 14:31:11 +0200
+Subject: net/sched: cls_api: remove block_cb from driver_list before freeing
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+[ Upstream commit da94a7781fc3c92e7df7832bc2746f4d39bc624e ]
+
+Error handler of tcf_block_bind() frees the whole bo->cb_list on error.
+However, by that time the flow_block_cb instances are already in the driver
+list because driver ndo_setup_tc() callback is called before that up the
+call chain in tcf_block_offload_cmd(). This leaves dangling pointers to
+freed objects in the list and causes use-after-free[0]. Fix it by also
+removing flow_block_cb instances from driver_list before deallocating them.
+
+[0]:
+[ 279.868433] ==================================================================
+[ 279.869964] BUG: KASAN: slab-use-after-free in flow_block_cb_setup_simple+0x631/0x7c0
+[ 279.871527] Read of size 8 at addr ffff888147e2bf20 by task tc/2963
+
+[ 279.873151] CPU: 6 PID: 2963 Comm: tc Not tainted 6.3.0-rc6+ #4
+[ 279.874273] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+[ 279.876295] Call Trace:
+[ 279.876882] <TASK>
+[ 279.877413] dump_stack_lvl+0x33/0x50
+[ 279.878198] print_report+0xc2/0x610
+[ 279.878987] ? flow_block_cb_setup_simple+0x631/0x7c0
+[ 279.879994] kasan_report+0xae/0xe0
+[ 279.880750] ? flow_block_cb_setup_simple+0x631/0x7c0
+[ 279.881744] ? mlx5e_tc_reoffload_flows_work+0x240/0x240 [mlx5_core]
+[ 279.883047] flow_block_cb_setup_simple+0x631/0x7c0
+[ 279.884027] tcf_block_offload_cmd.isra.0+0x189/0x2d0
+[ 279.885037] ? tcf_block_setup+0x6b0/0x6b0
+[ 279.885901] ? mutex_lock+0x7d/0xd0
+[ 279.886669] ? __mutex_unlock_slowpath.constprop.0+0x2d0/0x2d0
+[ 279.887844] ? ingress_init+0x1c0/0x1c0 [sch_ingress]
+[ 279.888846] tcf_block_get_ext+0x61c/0x1200
+[ 279.889711] ingress_init+0x112/0x1c0 [sch_ingress]
+[ 279.890682] ? clsact_init+0x2b0/0x2b0 [sch_ingress]
+[ 279.891701] qdisc_create+0x401/0xea0
+[ 279.892485] ? qdisc_tree_reduce_backlog+0x470/0x470
+[ 279.893473] tc_modify_qdisc+0x6f7/0x16d0
+[ 279.894344] ? tc_get_qdisc+0xac0/0xac0
+[ 279.895213] ? mutex_lock+0x7d/0xd0
+[ 279.896005] ? __mutex_lock_slowpath+0x10/0x10
+[ 279.896910] rtnetlink_rcv_msg+0x5fe/0x9d0
+[ 279.897770] ? rtnl_calcit.isra.0+0x2b0/0x2b0
+[ 279.898672] ? __sys_sendmsg+0xb5/0x140
+[ 279.899494] ? do_syscall_64+0x3d/0x90
+[ 279.900302] ? entry_SYSCALL_64_after_hwframe+0x46/0xb0
+[ 279.901337] ? kasan_save_stack+0x2e/0x40
+[ 279.902177] ? kasan_save_stack+0x1e/0x40
+[ 279.903058] ? kasan_set_track+0x21/0x30
+[ 279.903913] ? kasan_save_free_info+0x2a/0x40
+[ 279.904836] ? ____kasan_slab_free+0x11a/0x1b0
+[ 279.905741] ? kmem_cache_free+0x179/0x400
+[ 279.906599] netlink_rcv_skb+0x12c/0x360
+[ 279.907450] ? rtnl_calcit.isra.0+0x2b0/0x2b0
+[ 279.908360] ? netlink_ack+0x1550/0x1550
+[ 279.909192] ? rhashtable_walk_peek+0x170/0x170
+[ 279.910135] ? kmem_cache_alloc_node+0x1af/0x390
+[ 279.911086] ? _copy_from_iter+0x3d6/0xc70
+[ 279.912031] netlink_unicast+0x553/0x790
+[ 279.912864] ? netlink_attachskb+0x6a0/0x6a0
+[ 279.913763] ? netlink_recvmsg+0x416/0xb50
+[ 279.914627] netlink_sendmsg+0x7a1/0xcb0
+[ 279.915473] ? netlink_unicast+0x790/0x790
+[ 279.916334] ? iovec_from_user.part.0+0x4d/0x220
+[ 279.917293] ? netlink_unicast+0x790/0x790
+[ 279.918159] sock_sendmsg+0xc5/0x190
+[ 279.918938] ____sys_sendmsg+0x535/0x6b0
+[ 279.919813] ? import_iovec+0x7/0x10
+[ 279.920601] ? kernel_sendmsg+0x30/0x30
+[ 279.921423] ? __copy_msghdr+0x3c0/0x3c0
+[ 279.922254] ? import_iovec+0x7/0x10
+[ 279.923041] ___sys_sendmsg+0xeb/0x170
+[ 279.923854] ? copy_msghdr_from_user+0x110/0x110
+[ 279.924797] ? ___sys_recvmsg+0xd9/0x130
+[ 279.925630] ? __perf_event_task_sched_in+0x183/0x470
+[ 279.926656] ? ___sys_sendmsg+0x170/0x170
+[ 279.927529] ? ctx_sched_in+0x530/0x530
+[ 279.928369] ? update_curr+0x283/0x4f0
+[ 279.929185] ? perf_event_update_userpage+0x570/0x570
+[ 279.930201] ? __fget_light+0x57/0x520
+[ 279.931023] ? __switch_to+0x53d/0xe70
+[ 279.931846] ? sockfd_lookup_light+0x1a/0x140
+[ 279.932761] __sys_sendmsg+0xb5/0x140
+[ 279.933560] ? __sys_sendmsg_sock+0x20/0x20
+[ 279.934436] ? fpregs_assert_state_consistent+0x1d/0xa0
+[ 279.935490] do_syscall_64+0x3d/0x90
+[ 279.936300] entry_SYSCALL_64_after_hwframe+0x46/0xb0
+[ 279.937311] RIP: 0033:0x7f21c814f887
+[ 279.938085] Code: 0a 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b9 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 89 54 24 1c 48 89 74 24 10
+[ 279.941448] RSP: 002b:00007fff11efd478 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+[ 279.942964] RAX: ffffffffffffffda RBX: 0000000064401979 RCX: 00007f21c814f887
+[ 279.944337] RDX: 0000000000000000 RSI: 00007fff11efd4e0 RDI: 0000000000000003
+[ 279.945660] RBP: 0000000000000000 R08: 0000000000000001 R09: 0000000000000000
+[ 279.947003] R10: 00007f21c8008708 R11: 0000000000000246 R12: 0000000000000001
+[ 279.948345] R13: 0000000000409980 R14: 000000000047e538 R15: 0000000000485400
+[ 279.949690] </TASK>
+
+[ 279.950706] Allocated by task 2960:
+[ 279.951471] kasan_save_stack+0x1e/0x40
+[ 279.952338] kasan_set_track+0x21/0x30
+[ 279.953165] __kasan_kmalloc+0x77/0x90
+[ 279.954006] flow_block_cb_setup_simple+0x3dd/0x7c0
+[ 279.955001] tcf_block_offload_cmd.isra.0+0x189/0x2d0
+[ 279.956020] tcf_block_get_ext+0x61c/0x1200
+[ 279.956881] ingress_init+0x112/0x1c0 [sch_ingress]
+[ 279.957873] qdisc_create+0x401/0xea0
+[ 279.958656] tc_modify_qdisc+0x6f7/0x16d0
+[ 279.959506] rtnetlink_rcv_msg+0x5fe/0x9d0
+[ 279.960392] netlink_rcv_skb+0x12c/0x360
+[ 279.961216] netlink_unicast+0x553/0x790
+[ 279.962044] netlink_sendmsg+0x7a1/0xcb0
+[ 279.962906] sock_sendmsg+0xc5/0x190
+[ 279.963702] ____sys_sendmsg+0x535/0x6b0
+[ 279.964534] ___sys_sendmsg+0xeb/0x170
+[ 279.965343] __sys_sendmsg+0xb5/0x140
+[ 279.966132] do_syscall_64+0x3d/0x90
+[ 279.966908] entry_SYSCALL_64_after_hwframe+0x46/0xb0
+
+[ 279.968407] Freed by task 2960:
+[ 279.969114] kasan_save_stack+0x1e/0x40
+[ 279.969929] kasan_set_track+0x21/0x30
+[ 279.970729] kasan_save_free_info+0x2a/0x40
+[ 279.971603] ____kasan_slab_free+0x11a/0x1b0
+[ 279.972483] __kmem_cache_free+0x14d/0x280
+[ 279.973337] tcf_block_setup+0x29d/0x6b0
+[ 279.974173] tcf_block_offload_cmd.isra.0+0x226/0x2d0
+[ 279.975186] tcf_block_get_ext+0x61c/0x1200
+[ 279.976080] ingress_init+0x112/0x1c0 [sch_ingress]
+[ 279.977065] qdisc_create+0x401/0xea0
+[ 279.977857] tc_modify_qdisc+0x6f7/0x16d0
+[ 279.978695] rtnetlink_rcv_msg+0x5fe/0x9d0
+[ 279.979562] netlink_rcv_skb+0x12c/0x360
+[ 279.980388] netlink_unicast+0x553/0x790
+[ 279.981214] netlink_sendmsg+0x7a1/0xcb0
+[ 279.982043] sock_sendmsg+0xc5/0x190
+[ 279.982827] ____sys_sendmsg+0x535/0x6b0
+[ 279.983703] ___sys_sendmsg+0xeb/0x170
+[ 279.984510] __sys_sendmsg+0xb5/0x140
+[ 279.985298] do_syscall_64+0x3d/0x90
+[ 279.986076] entry_SYSCALL_64_after_hwframe+0x46/0xb0
+
+[ 279.987532] The buggy address belongs to the object at ffff888147e2bf00
+ which belongs to the cache kmalloc-192 of size 192
+[ 279.989747] The buggy address is located 32 bytes inside of
+ freed 192-byte region [ffff888147e2bf00, ffff888147e2bfc0)
+
+[ 279.992367] The buggy address belongs to the physical page:
+[ 279.993430] page:00000000550f405c refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x147e2a
+[ 279.995182] head:00000000550f405c order:1 entire_mapcount:0 nr_pages_mapped:0 pincount:0
+[ 279.996713] anon flags: 0x200000000010200(slab|head|node=0|zone=2)
+[ 279.997878] raw: 0200000000010200 ffff888100042a00 0000000000000000 dead000000000001
+[ 279.999384] raw: 0000000000000000 0000000000200020 00000001ffffffff 0000000000000000
+[ 280.000894] page dumped because: kasan: bad access detected
+
+[ 280.002386] Memory state around the buggy address:
+[ 280.003338] ffff888147e2be00: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 280.004781] ffff888147e2be80: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
+[ 280.006224] >ffff888147e2bf00: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 280.007700] ^
+[ 280.008592] ffff888147e2bf80: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
+[ 280.010035] ffff888147e2c000: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 280.011564] ==================================================================
+
+Fixes: 59094b1e5094 ("net: sched: use flow block API")
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/cls_api.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 62ce6981942b7..501e05943f02b 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -1465,6 +1465,7 @@ static int tcf_block_bind(struct tcf_block *block,
+
+ err_unroll:
+ list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
++ list_del(&block_cb->driver_list);
+ if (i-- > 0) {
+ list_del(&block_cb->list);
+ tcf_block_playback_offloads(block, block_cb->cb,
+--
+2.39.2
+
--- /dev/null
+From d0f651cfb10d1146b1e59d4495814368957f28e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 12:39:34 +0530
+Subject: octeontx2-af: Secure APR table update with the lock
+
+From: Geetha sowjanya <gakula@marvell.com>
+
+[ Upstream commit 048486f81d01db4d100af021ee2ea211d19732a0 ]
+
+APR table contains the lmtst base address of PF/VFs. These entries
+are updated by the PF/VF during the device probe. The lmtst address
+is fetched from HW using "TXN_REQ" and "ADDR_RSP_STS" registers.
+The lock tries to protect these registers from getting overwritten
+when multiple PFs invokes rvu_get_lmtaddr() simultaneously.
+
+For example, if PF1 submit the request and got permitted before it
+reads the response and PF2 got scheduled submit the request then the
+response of PF1 is overwritten by the PF2 response.
+
+Fixes: 893ae97214c3 ("octeontx2-af: cn10k: Support configurable LMTST regions")
+Signed-off-by: Geetha sowjanya <gakula@marvell.com>
+Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
+Signed-off-by: Sai Krishna <saikrishnag@marvell.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/marvell/octeontx2/af/rvu_cn10k.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+index 46a41cfff5751..25713287a288f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+@@ -60,13 +60,14 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ u64 iova, u64 *lmt_addr)
+ {
+ u64 pa, val, pf;
+- int err;
++ int err = 0;
+
+ if (!iova) {
+ dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
+ return -EINVAL;
+ }
+
++ mutex_lock(&rvu->rsrc_lock);
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+ pf = rvu_get_pf(pcifunc) & 0x1F;
+ val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+@@ -76,12 +77,13 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
+ if (err) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
+- return err;
++ goto exit;
+ }
+ val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
+ if (val & ~0x1ULL) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
+- return -EIO;
++ err = -EIO;
++ goto exit;
+ }
+ /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
+ * PA[11:0] = IOVA[11:0]
+@@ -89,8 +91,9 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
+ pa &= GENMASK_ULL(39, 0);
+ *lmt_addr = (pa << 12) | (iova & 0xFFF);
+-
+- return 0;
++exit:
++ mutex_unlock(&rvu->rsrc_lock);
++ return err;
+ }
+
+ static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+--
+2.39.2
+
--- /dev/null
+From fd3ca952ccda7f221584d56fe9b88a27efdbe729 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 12:39:42 +0530
+Subject: octeontx2-af: Skip PFs if not enabled
+
+From: Ratheesh Kannoth <rkannoth@marvell.com>
+
+[ Upstream commit 5eb1b7220948a69298a436148a735f32ec325289 ]
+
+Firmware enables PFs and allocate mbox resources for each of the PFs.
+Currently PF driver configures mbox resources without checking whether
+PF is enabled or not. This results in crash. This patch fixes this issue
+by skipping disabled PF's mbox initialization.
+
+Fixes: 9bdc47a6e328 ("octeontx2-af: Mbox communication support btw AF and it's VFs")
+Signed-off-by: Ratheesh Kannoth <rkannoth@marvell.com>
+Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
+Signed-off-by: Sai Krishna <saikrishnag@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/marvell/octeontx2/af/mbox.c | 5 +-
+ .../net/ethernet/marvell/octeontx2/af/mbox.h | 3 +-
+ .../net/ethernet/marvell/octeontx2/af/rvu.c | 49 +++++++++++++++----
+ 3 files changed, 46 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+index 2898931d5260a..9690ac01f02c8 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+@@ -157,7 +157,7 @@ EXPORT_SYMBOL(otx2_mbox_init);
+ */
+ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
+ struct pci_dev *pdev, void *reg_base,
+- int direction, int ndevs)
++ int direction, int ndevs, unsigned long *pf_bmap)
+ {
+ struct otx2_mbox_dev *mdev;
+ int devid, err;
+@@ -169,6 +169,9 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
+ mbox->hwbase = hwbase[0];
+
+ for (devid = 0; devid < ndevs; devid++) {
++ if (!test_bit(devid, pf_bmap))
++ continue;
++
+ mdev = &mbox->dev[devid];
+ mdev->mbase = hwbase[devid];
+ mdev->hwbase = hwbase[devid];
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+index c6643c7db1fc4..2b6cbd5af100d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+@@ -96,9 +96,10 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox);
+ int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs);
++
+ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+- int direction, int ndevs);
++ int direction, int ndevs, unsigned long *bmap);
+ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index bd33b90aaa67b..f64509b1d120c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -2196,7 +2196,7 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
+ }
+
+ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+- int num, int type)
++ int num, int type, unsigned long *pf_bmap)
+ {
+ struct rvu_hwinfo *hw = rvu->hw;
+ int region;
+@@ -2208,6 +2208,9 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ */
+ if (type == TYPE_AFVF) {
+ for (region = 0; region < num; region++) {
++ if (!test_bit(region, pf_bmap))
++ continue;
++
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(0)) +
+@@ -2229,6 +2232,9 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ * RVU_AF_PF_BAR4_ADDR register.
+ */
+ for (region = 0; region < num; region++) {
++ if (!test_bit(region, pf_bmap))
++ continue;
++
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(region));
+@@ -2257,20 +2263,41 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ int err = -EINVAL, i, dir, dir_up;
+ void __iomem *reg_base;
+ struct rvu_work *mwork;
++ unsigned long *pf_bmap;
+ void **mbox_regions;
+ const char *name;
++ u64 cfg;
+
+- mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+- if (!mbox_regions)
++ pf_bmap = bitmap_zalloc(num, GFP_KERNEL);
++ if (!pf_bmap)
+ return -ENOMEM;
+
++ /* RVU VFs */
++ if (type == TYPE_AFVF)
++ bitmap_set(pf_bmap, 0, num);
++
++ if (type == TYPE_AFPF) {
++ /* Mark enabled PFs in bitmap */
++ for (i = 0; i < num; i++) {
++ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i));
++ if (cfg & BIT_ULL(20))
++ set_bit(i, pf_bmap);
++ }
++ }
++
++ mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
++ if (!mbox_regions) {
++ err = -ENOMEM;
++ goto free_bitmap;
++ }
++
+ switch (type) {
+ case TYPE_AFPF:
+ name = "rvu_afpf_mailbox";
+ dir = MBOX_DIR_AFPF;
+ dir_up = MBOX_DIR_AFPF_UP;
+ reg_base = rvu->afreg_base;
+- err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
++ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF, pf_bmap);
+ if (err)
+ goto free_regions;
+ break;
+@@ -2279,7 +2306,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ dir = MBOX_DIR_PFVF;
+ dir_up = MBOX_DIR_PFVF_UP;
+ reg_base = rvu->pfreg_base;
+- err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
++ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF, pf_bmap);
+ if (err)
+ goto free_regions;
+ break;
+@@ -2310,16 +2337,19 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ }
+
+ err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
+- reg_base, dir, num);
++ reg_base, dir, num, pf_bmap);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
+- reg_base, dir_up, num);
++ reg_base, dir_up, num, pf_bmap);
+ if (err)
+ goto exit;
+
+ for (i = 0; i < num; i++) {
++ if (!test_bit(i, pf_bmap))
++ continue;
++
+ mwork = &mw->mbox_wrk[i];
+ mwork->rvu = rvu;
+ INIT_WORK(&mwork->work, mbox_handler);
+@@ -2328,8 +2358,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ mwork->rvu = rvu;
+ INIT_WORK(&mwork->work, mbox_up_handler);
+ }
+- kfree(mbox_regions);
+- return 0;
++ goto free_regions;
+
+ exit:
+ destroy_workqueue(mw->mbox_wq);
+@@ -2338,6 +2367,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ iounmap((void __iomem *)mbox_regions[num]);
+ free_regions:
+ kfree(mbox_regions);
++free_bitmap:
++ bitmap_free(pf_bmap);
+ return err;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 8d2323ded32267e2dd84bc4edfef6f9fda84e16e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 12:39:43 +0530
+Subject: octeontx2-pf: Disable packet I/O for graceful exit
+
+From: Subbaraya Sundeep <sbhatta@marvell.com>
+
+[ Upstream commit c926252205c424c4842dbdbe02f8e3296f623204 ]
+
+At the stage of enabling packet I/O in otx2_open, If mailbox
+timeout occurs then interface ends up in down state where as
+hardware packet I/O is enabled. Hence disable packet I/O also
+before bailing out.
+
+Fixes: 1ea0166da050 ("octeontx2-pf: Fix the device state on error")
+Signed-off-by: Subbaraya Sundeep <sbhatta@marvell.com>
+Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
+Signed-off-by: Sai Krishna <saikrishnag@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index ab291c2c30144..a987ae9d6a285 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -1761,13 +1761,22 @@ int otx2_open(struct net_device *netdev)
+ otx2_dmacflt_reinstall_flows(pf);
+
+ err = otx2_rxtx_enable(pf, true);
+- if (err)
++ /* If a mbox communication error happens at this point then interface
++ * will end up in a state such that it is in down state but hardware
++ * mcam entries are enabled to receive the packets. Hence disable the
++ * packet I/O.
++ */
++ if (err == EIO)
++ goto err_disable_rxtx;
++ else if (err)
+ goto err_tx_stop_queues;
+
+ otx2_do_set_rx_mode(pf);
+
+ return 0;
+
++err_disable_rxtx:
++ otx2_rxtx_enable(pf, false);
+ err_tx_stop_queues:
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+--
+2.39.2
+
--- /dev/null
+From 7cfe6683c999531258c2f5a2879e0a2c0caf6270 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 12:39:44 +0530
+Subject: octeontx2-vf: Detach LF resources on probe cleanup
+
+From: Subbaraya Sundeep <sbhatta@marvell.com>
+
+[ Upstream commit 99ae1260fdb5f15beab8a3adfb93a9041c87a2c1 ]
+
+When a VF device probe fails due to error in MSIX vector allocation then
+the resources NIX and NPA LFs were not detached. Fix this by detaching
+the LFs when MSIX vector allocation fails.
+
+Fixes: 3184fb5ba96e ("octeontx2-vf: Virtual function driver support")
+Signed-off-by: Subbaraya Sundeep <sbhatta@marvell.com>
+Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
+Signed-off-by: Sai Krishna <saikrishnag@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+index 9822db362c88e..e69b0e2729cb2 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+@@ -630,7 +630,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ err = otx2vf_realloc_msix_vectors(vf);
+ if (err)
+- goto err_mbox_destroy;
++ goto err_detach_rsrc;
+
+ err = otx2_set_real_num_queues(netdev, qcount, qcount);
+ if (err)
+--
+2.39.2
+
--- /dev/null
+From 8d1c545ff6de6c4f1c08beb57b30b92d93a23916 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jan 2022 22:13:37 -0800
+Subject: perf evlist: Refactor evlist__for_each_cpu()
+
+From: Ian Rogers <irogers@google.com>
+
+[ Upstream commit 472832d2c000b9611feaea66fe521055c3dbf17a ]
+
+Previously evlist__for_each_cpu() needed to iterate over the evlist in
+an inner loop and call "skip" routines. Refactor this so that the
+iteratr is smarter and the next function can update both the current CPU
+and evsel.
+
+By using a cpu map index, fix apparent off-by-1 in __run_perf_stat's
+call to perf_evsel__close_cpu().
+
+Signed-off-by: Ian Rogers <irogers@google.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: James Clark <james.clark@arm.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: John Garry <john.garry@huawei.com>
+Cc: Kajol Jain <kjain@linux.ibm.com>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
+Cc: Mike Leach <mike.leach@linaro.org>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Paul Clarke <pc@us.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Riccardo Mancini <rickyman7@gmail.com>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
+Cc: Vineet Singh <vineet.singh@intel.com>
+Cc: coresight@lists.linaro.org
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: zhengjun.xing@intel.com
+Link: https://lore.kernel.org/r/20220105061351.120843-35-irogers@google.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Stable-dep-of: ecc68ee216c6 ("perf stat: Separate bperf from bpf_profiler")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/builtin-stat.c | 179 ++++++++++++++++++--------------------
+ tools/perf/util/evlist.c | 146 +++++++++++++++++--------------
+ tools/perf/util/evlist.h | 50 +++++++++--
+ tools/perf/util/evsel.h | 1 -
+ 4 files changed, 210 insertions(+), 166 deletions(-)
+
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 0b709e3ead2ac..4ccd0c7c13ea1 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -405,36 +405,33 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
+
+ static int read_affinity_counters(struct timespec *rs)
+ {
+- struct evsel *counter;
+- struct affinity affinity;
+- int i, ncpus, cpu;
++ struct evlist_cpu_iterator evlist_cpu_itr;
++ struct affinity saved_affinity, *affinity;
+
+ if (all_counters_use_bpf)
+ return 0;
+
+- if (affinity__setup(&affinity) < 0)
++ if (!target__has_cpu(&target) || target__has_per_thread(&target))
++ affinity = NULL;
++ else if (affinity__setup(&saved_affinity) < 0)
+ return -1;
++ else
++ affinity = &saved_affinity;
+
+- ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus);
+- if (!target__has_cpu(&target) || target__has_per_thread(&target))
+- ncpus = 1;
+- evlist__for_each_cpu(evsel_list, i, cpu) {
+- if (i >= ncpus)
+- break;
+- affinity__set(&affinity, cpu);
++ evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
++ struct evsel *counter = evlist_cpu_itr.evsel;
+
+- evlist__for_each_entry(evsel_list, counter) {
+- if (evsel__cpu_iter_skip(counter, cpu))
+- continue;
+- if (evsel__is_bpf(counter))
+- continue;
+- if (!counter->err) {
+- counter->err = read_counter_cpu(counter, rs,
+- counter->cpu_iter - 1);
+- }
++ if (evsel__is_bpf(counter))
++ continue;
++
++ if (!counter->err) {
++ counter->err = read_counter_cpu(counter, rs,
++ evlist_cpu_itr.cpu_map_idx);
+ }
+ }
+- affinity__cleanup(&affinity);
++ if (affinity)
++ affinity__cleanup(&saved_affinity);
++
+ return 0;
+ }
+
+@@ -771,8 +768,9 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ int status = 0;
+ const bool forks = (argc > 0);
+ bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
++ struct evlist_cpu_iterator evlist_cpu_itr;
+ struct affinity affinity;
+- int i, cpu, err;
++ int err;
+ bool second_pass = false;
+
+ if (forks) {
+@@ -797,102 +795,97 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ all_counters_use_bpf = false;
+ }
+
+- evlist__for_each_cpu (evsel_list, i, cpu) {
++ evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
++ counter = evlist_cpu_itr.evsel;
++
+ /*
+ * bperf calls evsel__open_per_cpu() in bperf__load(), so
+ * no need to call it again here.
+ */
+ if (target.use_bpf)
+ break;
+- affinity__set(&affinity, cpu);
+
+- evlist__for_each_entry(evsel_list, counter) {
+- if (evsel__cpu_iter_skip(counter, cpu))
++ if (counter->reset_group || counter->errored)
++ continue;
++ if (evsel__is_bpf(counter))
++ continue;
++try_again:
++ if (create_perf_stat_counter(counter, &stat_config, &target,
++ evlist_cpu_itr.cpu_map_idx) < 0) {
++
++ /*
++ * Weak group failed. We cannot just undo this here
++ * because earlier CPUs might be in group mode, and the kernel
++ * doesn't support mixing group and non group reads. Defer
++ * it to later.
++ * Don't close here because we're in the wrong affinity.
++ */
++ if ((errno == EINVAL || errno == EBADF) &&
++ evsel__leader(counter) != counter &&
++ counter->weak_group) {
++ evlist__reset_weak_group(evsel_list, counter, false);
++ assert(counter->reset_group);
++ second_pass = true;
+ continue;
+- if (counter->reset_group || counter->errored)
++ }
++
++ switch (stat_handle_error(counter)) {
++ case COUNTER_FATAL:
++ return -1;
++ case COUNTER_RETRY:
++ goto try_again;
++ case COUNTER_SKIP:
+ continue;
+- if (evsel__is_bpf(counter))
++ default:
++ break;
++ }
++
++ }
++ counter->supported = true;
++ }
++
++ if (second_pass) {
++ /*
++ * Now redo all the weak group after closing them,
++ * and also close errored counters.
++ */
++
++ /* First close errored or weak retry */
++ evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
++ counter = evlist_cpu_itr.evsel;
++
++ if (!counter->reset_group && !counter->errored)
+ continue;
+-try_again:
++
++ perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
++ }
++ /* Now reopen weak */
++ evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
++ counter = evlist_cpu_itr.evsel;
++
++ if (!counter->reset_group && !counter->errored)
++ continue;
++ if (!counter->reset_group)
++ continue;
++try_again_reset:
++ pr_debug2("reopening weak %s\n", evsel__name(counter));
+ if (create_perf_stat_counter(counter, &stat_config, &target,
+- counter->cpu_iter - 1) < 0) {
+-
+- /*
+- * Weak group failed. We cannot just undo this here
+- * because earlier CPUs might be in group mode, and the kernel
+- * doesn't support mixing group and non group reads. Defer
+- * it to later.
+- * Don't close here because we're in the wrong affinity.
+- */
+- if ((errno == EINVAL || errno == EBADF) &&
+- evsel__leader(counter) != counter &&
+- counter->weak_group) {
+- evlist__reset_weak_group(evsel_list, counter, false);
+- assert(counter->reset_group);
+- second_pass = true;
+- continue;
+- }
++ evlist_cpu_itr.cpu_map_idx) < 0) {
+
+ switch (stat_handle_error(counter)) {
+ case COUNTER_FATAL:
+ return -1;
+ case COUNTER_RETRY:
+- goto try_again;
++ goto try_again_reset;
+ case COUNTER_SKIP:
+ continue;
+ default:
+ break;
+ }
+-
+ }
+ counter->supported = true;
+ }
+ }
+-
+- if (second_pass) {
+- /*
+- * Now redo all the weak group after closing them,
+- * and also close errored counters.
+- */
+-
+- evlist__for_each_cpu(evsel_list, i, cpu) {
+- affinity__set(&affinity, cpu);
+- /* First close errored or weak retry */
+- evlist__for_each_entry(evsel_list, counter) {
+- if (!counter->reset_group && !counter->errored)
+- continue;
+- if (evsel__cpu_iter_skip_no_inc(counter, cpu))
+- continue;
+- perf_evsel__close_cpu(&counter->core, counter->cpu_iter);
+- }
+- /* Now reopen weak */
+- evlist__for_each_entry(evsel_list, counter) {
+- if (!counter->reset_group && !counter->errored)
+- continue;
+- if (evsel__cpu_iter_skip(counter, cpu))
+- continue;
+- if (!counter->reset_group)
+- continue;
+-try_again_reset:
+- pr_debug2("reopening weak %s\n", evsel__name(counter));
+- if (create_perf_stat_counter(counter, &stat_config, &target,
+- counter->cpu_iter - 1) < 0) {
+-
+- switch (stat_handle_error(counter)) {
+- case COUNTER_FATAL:
+- return -1;
+- case COUNTER_RETRY:
+- goto try_again_reset;
+- case COUNTER_SKIP:
+- continue;
+- default:
+- break;
+- }
+- }
+- counter->supported = true;
+- }
+- }
+- }
+ affinity__cleanup(&affinity);
+
+ evlist__for_each_entry(evsel_list, counter) {
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index 5f92319ce258d..39d294f6c3218 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -342,36 +342,65 @@ static int evlist__nr_threads(struct evlist *evlist, struct evsel *evsel)
+ return perf_thread_map__nr(evlist->core.threads);
+ }
+
+-void evlist__cpu_iter_start(struct evlist *evlist)
+-{
+- struct evsel *pos;
+-
+- /*
+- * Reset the per evsel cpu_iter. This is needed because
+- * each evsel's cpumap may have a different index space,
+- * and some operations need the index to modify
+- * the FD xyarray (e.g. open, close)
+- */
+- evlist__for_each_entry(evlist, pos)
+- pos->cpu_iter = 0;
+-}
++struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity)
++{
++ struct evlist_cpu_iterator itr = {
++ .container = evlist,
++ .evsel = evlist__first(evlist),
++ .cpu_map_idx = 0,
++ .evlist_cpu_map_idx = 0,
++ .evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus),
++ .cpu = -1,
++ .affinity = affinity,
++ };
+
+-bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu)
+-{
+- if (ev->cpu_iter >= ev->core.cpus->nr)
+- return true;
+- if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu)
+- return true;
+- return false;
++ if (itr.affinity) {
++ itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0);
++ affinity__set(itr.affinity, itr.cpu);
++ itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu);
++ /*
++ * If this CPU isn't in the evsel's cpu map then advance through
++ * the list.
++ */
++ if (itr.cpu_map_idx == -1)
++ evlist_cpu_iterator__next(&itr);
++ }
++ return itr;
++}
++
++void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr)
++{
++ while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) {
++ evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel);
++ evlist_cpu_itr->cpu_map_idx =
++ perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
++ evlist_cpu_itr->cpu);
++ if (evlist_cpu_itr->cpu_map_idx != -1)
++ return;
++ }
++ evlist_cpu_itr->evlist_cpu_map_idx++;
++ if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) {
++ evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container);
++ evlist_cpu_itr->cpu =
++ perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus,
++ evlist_cpu_itr->evlist_cpu_map_idx);
++ if (evlist_cpu_itr->affinity)
++ affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu);
++ evlist_cpu_itr->cpu_map_idx =
++ perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
++ evlist_cpu_itr->cpu);
++ /*
++ * If this CPU isn't in the evsel's cpu map then advance through
++ * the list.
++ */
++ if (evlist_cpu_itr->cpu_map_idx == -1)
++ evlist_cpu_iterator__next(evlist_cpu_itr);
++ }
+ }
+
+-bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
++bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr)
+ {
+- if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) {
+- ev->cpu_iter++;
+- return false;
+- }
+- return true;
++ return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr;
+ }
+
+ static int evsel__strcmp(struct evsel *pos, char *evsel_name)
+@@ -400,31 +429,26 @@ static int evlist__is_enabled(struct evlist *evlist)
+ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
+ {
+ struct evsel *pos;
++ struct evlist_cpu_iterator evlist_cpu_itr;
+ struct affinity affinity;
+- int cpu, i, imm = 0;
+ bool has_imm = false;
+
+ if (affinity__setup(&affinity) < 0)
+ return;
+
+ /* Disable 'immediate' events last */
+- for (imm = 0; imm <= 1; imm++) {
+- evlist__for_each_cpu(evlist, i, cpu) {
+- affinity__set(&affinity, cpu);
+-
+- evlist__for_each_entry(evlist, pos) {
+- if (evsel__strcmp(pos, evsel_name))
+- continue;
+- if (evsel__cpu_iter_skip(pos, cpu))
+- continue;
+- if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
+- continue;
+- if (pos->immediate)
+- has_imm = true;
+- if (pos->immediate != imm)
+- continue;
+- evsel__disable_cpu(pos, pos->cpu_iter - 1);
+- }
++ for (int imm = 0; imm <= 1; imm++) {
++ evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
++ pos = evlist_cpu_itr.evsel;
++ if (evsel__strcmp(pos, evsel_name))
++ continue;
++ if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
++ continue;
++ if (pos->immediate)
++ has_imm = true;
++ if (pos->immediate != imm)
++ continue;
++ evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
+ }
+ if (!has_imm)
+ break;
+@@ -462,24 +486,19 @@ void evlist__disable_evsel(struct evlist *evlist, char *evsel_name)
+ static void __evlist__enable(struct evlist *evlist, char *evsel_name)
+ {
+ struct evsel *pos;
++ struct evlist_cpu_iterator evlist_cpu_itr;
+ struct affinity affinity;
+- int cpu, i;
+
+ if (affinity__setup(&affinity) < 0)
+ return;
+
+- evlist__for_each_cpu(evlist, i, cpu) {
+- affinity__set(&affinity, cpu);
+-
+- evlist__for_each_entry(evlist, pos) {
+- if (evsel__strcmp(pos, evsel_name))
+- continue;
+- if (evsel__cpu_iter_skip(pos, cpu))
+- continue;
+- if (!evsel__is_group_leader(pos) || !pos->core.fd)
+- continue;
+- evsel__enable_cpu(pos, pos->cpu_iter - 1);
+- }
++ evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
++ pos = evlist_cpu_itr.evsel;
++ if (evsel__strcmp(pos, evsel_name))
++ continue;
++ if (!evsel__is_group_leader(pos) || !pos->core.fd)
++ continue;
++ evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
+ }
+ affinity__cleanup(&affinity);
+ evlist__for_each_entry(evlist, pos) {
+@@ -1264,8 +1283,8 @@ void evlist__set_selected(struct evlist *evlist, struct evsel *evsel)
+ void evlist__close(struct evlist *evlist)
+ {
+ struct evsel *evsel;
++ struct evlist_cpu_iterator evlist_cpu_itr;
+ struct affinity affinity;
+- int cpu, i;
+
+ /*
+ * With perf record core.cpus is usually NULL.
+@@ -1279,15 +1298,12 @@ void evlist__close(struct evlist *evlist)
+
+ if (affinity__setup(&affinity) < 0)
+ return;
+- evlist__for_each_cpu(evlist, i, cpu) {
+- affinity__set(&affinity, cpu);
+
+- evlist__for_each_entry_reverse(evlist, evsel) {
+- if (evsel__cpu_iter_skip(evsel, cpu))
+- continue;
+- perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
+- }
++ evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
++ perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core,
++ evlist_cpu_itr.cpu_map_idx);
+ }
++
+ affinity__cleanup(&affinity);
+ evlist__for_each_entry_reverse(evlist, evsel) {
+ perf_evsel__free_fd(&evsel->core);
+diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
+index 97bfb8d0be4f0..ec177f783ee67 100644
+--- a/tools/perf/util/evlist.h
++++ b/tools/perf/util/evlist.h
+@@ -325,17 +325,53 @@ void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel);
+ #define evlist__for_each_entry_safe(evlist, tmp, evsel) \
+ __evlist__for_each_entry_safe(&(evlist)->core.entries, tmp, evsel)
+
+-#define evlist__for_each_cpu(evlist, index, cpu) \
+- evlist__cpu_iter_start(evlist); \
+- perf_cpu_map__for_each_cpu (cpu, index, (evlist)->core.all_cpus)
++/** Iterator state for evlist__for_each_cpu */
++struct evlist_cpu_iterator {
++ /** The list being iterated through. */
++ struct evlist *container;
++ /** The current evsel of the iterator. */
++ struct evsel *evsel;
++ /** The CPU map index corresponding to the evsel->core.cpus for the current CPU. */
++ int cpu_map_idx;
++ /**
++ * The CPU map index corresponding to evlist->core.all_cpus for the
++ * current CPU. Distinct from cpu_map_idx as the evsel's cpu map may
++ * contain fewer entries.
++ */
++ int evlist_cpu_map_idx;
++ /** The number of CPU map entries in evlist->core.all_cpus. */
++ int evlist_cpu_map_nr;
++ /** The current CPU of the iterator. */
++ int cpu;
++ /** If present, used to set the affinity when switching between CPUs. */
++ struct affinity *affinity;
++};
++
++/**
++ * evlist__for_each_cpu - without affinity, iterate over the evlist. With
++ * affinity, iterate over all CPUs and then the evlist
++ * for each evsel on that CPU. When switching between
++ * CPUs the affinity is set to the CPU to avoid IPIs
++ * during syscalls.
++ * @evlist_cpu_itr: the iterator instance.
++ * @evlist: evlist instance to iterate.
++ * @affinity: NULL or used to set the affinity to the current CPU.
++ */
++#define evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) \
++ for ((evlist_cpu_itr) = evlist__cpu_begin(evlist, affinity); \
++ !evlist_cpu_iterator__end(&evlist_cpu_itr); \
++ evlist_cpu_iterator__next(&evlist_cpu_itr))
++
++/** Returns an iterator set to the first CPU/evsel of evlist. */
++struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity);
++/** Move to next element in iterator, updating CPU, evsel and the affinity. */
++void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr);
++/** Returns true when iterator is at the end of the CPUs and evlist. */
++bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr);
+
+ struct evsel *evlist__get_tracking_event(struct evlist *evlist);
+ void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel);
+
+-void evlist__cpu_iter_start(struct evlist *evlist);
+-bool evsel__cpu_iter_skip(struct evsel *ev, int cpu);
+-bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu);
+-
+ struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str);
+
+ struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event);
+diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
+index 1f7edfa8568a6..9372ddd369ef4 100644
+--- a/tools/perf/util/evsel.h
++++ b/tools/perf/util/evsel.h
+@@ -119,7 +119,6 @@ struct evsel {
+ bool errored;
+ struct hashmap *per_pkg_mask;
+ int err;
+- int cpu_iter;
+ struct {
+ evsel__sb_cb_t *cb;
+ void *data;
+--
+2.39.2
+
--- /dev/null
+From 5a9792dfb7af7b786b1fc585baaa7e144b1ba722 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Apr 2023 14:46:39 +0200
+Subject: perf map: Delete two variable initialisations before null pointer
+ checks in sort__sym_from_cmp()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Markus Elfring <Markus.Elfring@web.de>
+
+[ Upstream commit c160118a90d4acf335993d8d59b02ae2147a524e ]
+
+Addresses of two data structure members were determined before
+corresponding null pointer checks in the implementation of the function
+“sort__sym_from_cmp”.
+
+Thus avoid the risk for undefined behaviour by removing extra
+initialisations for the local variables “from_l” and “from_r” (also
+because they were already reassigned with the same value behind this
+pointer check).
+
+This issue was detected by using the Coccinelle software.
+
+Fixes: 1b9e97a2a95e4941 ("perf tools: Fix report -F symbol_from for data without branch info")
+Signed-off-by: <elfring@users.sourceforge.net>
+Acked-by: Ian Rogers <irogers@google.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: German Gomez <german.gomez@arm.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Link: https://lore.kernel.org/cocci/54a21fea-64e3-de67-82ef-d61b90ffad05@web.de/
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/sort.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
+index a111065b484ef..a4f2ffe2bdb6d 100644
+--- a/tools/perf/util/sort.c
++++ b/tools/perf/util/sort.c
+@@ -876,8 +876,7 @@ static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
+ static int64_t
+ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
+ {
+- struct addr_map_symbol *from_l = &left->branch_info->from;
+- struct addr_map_symbol *from_r = &right->branch_info->from;
++ struct addr_map_symbol *from_l, *from_r;
+
+ if (!left->branch_info || !right->branch_info)
+ return cmp_null(left->branch_info, right->branch_info);
+--
+2.39.2
+
--- /dev/null
+From 146096a480fad9aef4cb2926493d42447241c0dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Apr 2023 10:23:35 -0300
+Subject: perf pmu: zfree() expects a pointer to a pointer to zero it after
+ freeing its contents
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+[ Upstream commit 57f14b5ae1a97537f2abd2828ee7212cada7036e ]
+
+An audit showed just this one problem with zfree(), fix it.
+
+Fixes: 9fbc61f832ebf432 ("perf pmu: Add support for PMU capabilities")
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/pmu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index 26c0b88cef4c8..eafd80be66076 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -1858,7 +1858,7 @@ static int perf_pmu__new_caps(struct list_head *list, char *name, char *value)
+ return 0;
+
+ free_name:
+- zfree(caps->name);
++ zfree(&caps->name);
+ free_caps:
+ free(caps);
+
+--
+2.39.2
+
--- /dev/null
+From 387ca8a12997e96632ca2b4d3cf3fc37205e1a22 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Mar 2023 15:04:45 +0000
+Subject: perf scripts intel-pt-events.py: Fix IPC output for Python 2
+
+From: Roman Lozko <lozko.roma@gmail.com>
+
+[ Upstream commit 1f64cfdebfe0494264271e8d7a3a47faf5f58ec7 ]
+
+Integers are not converted to floats during division in Python 2 which
+results in incorrect IPC values. Fix by switching to new division
+behavior.
+
+Fixes: a483e64c0b62e93a ("perf scripting python: intel-pt-events.py: Add --insn-trace and --src-trace")
+Signed-off-by: Roman Lozko <lozko.roma@gmail.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20230310150445.2925841-1-lozko.roma@gmail.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/scripts/python/intel-pt-events.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
+index 66452a8ec3586..ed6f614f2724d 100644
+--- a/tools/perf/scripts/python/intel-pt-events.py
++++ b/tools/perf/scripts/python/intel-pt-events.py
+@@ -11,7 +11,7 @@
+ # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ # more details.
+
+-from __future__ import print_function
++from __future__ import division, print_function
+
+ import os
+ import sys
+--
+2.39.2
+
--- /dev/null
+From cc56ef3b9dde3f42072bae0ec020eaa7be91442b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Apr 2023 20:23:16 +0200
+Subject: perf stat: Separate bperf from bpf_profiler
+
+From: Dmitrii Dolgov <9erthalion6@gmail.com>
+
+[ Upstream commit ecc68ee216c6c5b2f84915e1441adf436f1b019b ]
+
+It seems that perf stat -b <prog id> doesn't produce any results:
+
+ $ perf stat -e cycles -b 4 -I 10000 -vvv
+ Control descriptor is not initialized
+ cycles: 0 0 0
+ time counts unit events
+ 10.007641640 <not supported> cycles
+
+Looks like this happens because fentry/fexit progs are getting loaded, but the
+corresponding perf event is not enabled and not added into the events bpf map.
+I think there is some mixing up between two type of bpf support, one for bperf
+and one for bpf_profiler. Both are identified via evsel__is_bpf, based on which
+perf events are enabled, but for the latter (bpf_profiler) a perf event is
+required. Using evsel__is_bperf to check only bperf produces expected results:
+
+ $ perf stat -e cycles -b 4 -I 10000 -vvv
+ Control descriptor is not initialized
+ ------------------------------------------------------------
+ perf_event_attr:
+ size 136
+ sample_type IDENTIFIER
+ read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING
+ disabled 1
+ exclude_guest 1
+ ------------------------------------------------------------
+ sys_perf_event_open: pid -1 cpu 0 group_fd -1 flags 0x8 = 3
+ ------------------------------------------------------------
+ [...perf_event_attr for other CPUs...]
+ ------------------------------------------------------------
+ cycles: 309426 169009 169009
+ time counts unit events
+ 10.010091271 309426 cycles
+
+The final numbers correspond (at least in the level of magnitude) to the
+same metric obtained via bpftool.
+
+Fixes: 112cb56164bc2108 ("perf stat: Introduce config stat.bpf-counter-events")
+Reviewed-by: Song Liu <song@kernel.org>
+Signed-off-by: Dmitrii Dolgov <9erthalion6@gmail.com>
+Tested-by: Song Liu <song@kernel.org>
+Cc: Ian Rogers <irogers@google.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/r/20230412182316.11628-1-9erthalion6@gmail.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/builtin-stat.c | 4 ++--
+ tools/perf/util/evsel.h | 5 +++++
+ 2 files changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 4ccd0c7c13ea1..efae2998a472f 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -791,7 +791,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ counter->reset_group = false;
+ if (bpf_counter__load(counter, &target))
+ return -1;
+- if (!evsel__is_bpf(counter))
++ if (!(evsel__is_bperf(counter)))
+ all_counters_use_bpf = false;
+ }
+
+@@ -807,7 +807,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+
+ if (counter->reset_group || counter->errored)
+ continue;
+- if (evsel__is_bpf(counter))
++ if (evsel__is_bperf(counter))
+ continue;
+ try_again:
+ if (create_perf_stat_counter(counter, &stat_config, &target,
+diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
+index 9372ddd369ef4..0492cafac4430 100644
+--- a/tools/perf/util/evsel.h
++++ b/tools/perf/util/evsel.h
+@@ -248,6 +248,11 @@ static inline bool evsel__is_bpf(struct evsel *evsel)
+ return evsel->bpf_counter_ops != NULL;
+ }
+
++static inline bool evsel__is_bperf(struct evsel *evsel)
++{
++ return evsel->bpf_counter_ops != NULL && list_empty(&evsel->bpf_counter_list);
++}
++
+ #define EVSEL__MAX_ALIASES 8
+
+ extern const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
+--
+2.39.2
+
--- /dev/null
+From d3e8674f2980c5c300ac8860d3ab79c8ac26ec55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Apr 2023 01:28:41 +0000
+Subject: perf symbols: Fix return incorrect build_id size in
+ elf_read_build_id()
+
+From: Yang Jihong <yangjihong1@huawei.com>
+
+[ Upstream commit 1511e4696acb715a4fe48be89e1e691daec91c0e ]
+
+In elf_read_build_id(), if gnu build_id is found, should return the size of
+the actually copied data. If descsz is greater thanBuild_ID_SIZE,
+write_buildid data access may occur.
+
+Fixes: be96ea8ffa788dcc ("perf symbols: Fix issue with binaries using 16-bytes buildids (v2)")
+Reported-by: Will Ochowicz <Will.Ochowicz@genusplc.com>
+Signed-off-by: Yang Jihong <yangjihong1@huawei.com>
+Tested-by: Will Ochowicz <Will.Ochowicz@genusplc.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Ian Rogers <irogers@google.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Link: https://lore.kernel.org/lkml/CWLP265MB49702F7BA3D6D8F13E4B1A719C649@CWLP265MB4970.GBRP265.PROD.OUTLOOK.COM/T/
+Link: https://lore.kernel.org/r/20230427012841.231729-1-yangjihong1@huawei.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/symbol-elf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index fd42f768e5848..bbc3a150597a4 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -553,7 +553,7 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
+ size_t sz = min(size, descsz);
+ memcpy(bf, ptr, sz);
+ memset(bf + sz, 0, size - sz);
+- err = descsz;
++ err = sz;
+ break;
+ }
+ }
+--
+2.39.2
+
--- /dev/null
+From 394719eb6ef944760bae3858a7b256afc61d7fbb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Mar 2023 16:59:08 +0530
+Subject: perf vendor events power9: Remove UTF-8 characters from JSON files
+
+From: Kajol Jain <kjain@linux.ibm.com>
+
+[ Upstream commit 5d9df8731c0941f3add30f96745a62586a0c9d52 ]
+
+Commit 3c22ba5243040c13 ("perf vendor events powerpc: Update POWER9
+events") added and updated power9 PMU JSON events. However some of the
+JSON events which are part of other.json and pipeline.json files,
+contains UTF-8 characters in their brief description. Having UTF-8
+character could breaks the perf build on some distros.
+
+Fix this issue by removing the UTF-8 characters from other.json and
+pipeline.json files.
+
+Result without the fix:
+
+ [command]# file -i pmu-events/arch/powerpc/power9/*
+ pmu-events/arch/powerpc/power9/cache.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/floating-point.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/frontend.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/marked.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/memory.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/metrics.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/nest_metrics.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/other.json: application/json; charset=utf-8
+ pmu-events/arch/powerpc/power9/pipeline.json: application/json; charset=utf-8
+ pmu-events/arch/powerpc/power9/pmc.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/translation.json: application/json; charset=us-ascii
+ [command]#
+
+Result with the fix:
+
+ [command]# file -i pmu-events/arch/powerpc/power9/*
+ pmu-events/arch/powerpc/power9/cache.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/floating-point.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/frontend.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/marked.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/memory.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/metrics.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/nest_metrics.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/other.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/pipeline.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/pmc.json: application/json; charset=us-ascii
+ pmu-events/arch/powerpc/power9/translation.json: application/json; charset=us-ascii
+ [command]#
+
+Fixes: 3c22ba5243040c13 ("perf vendor events powerpc: Update POWER9 events")
+Reported-by: Arnaldo Carvalho de Melo <acme@kernel.com>
+Signed-off-by: Kajol Jain <kjain@linux.ibm.com>
+Acked-by: Ian Rogers <irogers@google.com>
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
+Cc: Disha Goel <disgoel@linux.ibm.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
+Cc: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
+Cc: linuxppc-dev@lists.ozlabs.org
+Link: https://lore.kernel.org/lkml/ZBxP77deq7ikTxwG@kernel.org/
+Link: https://lore.kernel.org/r/20230328112908.113158-1-kjain@linux.ibm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/pmu-events/arch/powerpc/power9/other.json | 4 ++--
+ tools/perf/pmu-events/arch/powerpc/power9/pipeline.json | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/tools/perf/pmu-events/arch/powerpc/power9/other.json b/tools/perf/pmu-events/arch/powerpc/power9/other.json
+index 3f69422c21f99..f10bd554521a0 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power9/other.json
++++ b/tools/perf/pmu-events/arch/powerpc/power9/other.json
+@@ -1417,7 +1417,7 @@
+ {
+ "EventCode": "0x45054",
+ "EventName": "PM_FMA_CMPL",
+- "BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only. "
++ "BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only."
+ },
+ {
+ "EventCode": "0x201E8",
+@@ -2017,7 +2017,7 @@
+ {
+ "EventCode": "0xC0BC",
+ "EventName": "PM_LSU_FLUSH_OTHER",
+- "BriefDescription": "Other LSU flushes including: Sync (sync ack from L2 caused search of LRQ for oldest snooped load, This will either signal a Precise Flush of the oldest snooped loa or a Flush Next PPC); Data Valid Flush Next (several cases of this, one example is store and reload are lined up such that a store-hit-reload scenario exists and the CDF has already launched and has gotten bad/stale data); Bad Data Valid Flush Next (might be a few cases of this, one example is a larxa (D$ hit) return data and dval but can't allocate to LMQ (LMQ full or other reason). Already gave dval but can't watch it for snoop_hit_larx. Need to take the “bad dval” back and flush all younger ops)"
++ "BriefDescription": "Other LSU flushes including: Sync (sync ack from L2 caused search of LRQ for oldest snooped load, This will either signal a Precise Flush of the oldest snooped loa or a Flush Next PPC); Data Valid Flush Next (several cases of this, one example is store and reload are lined up such that a store-hit-reload scenario exists and the CDF has already launched and has gotten bad/stale data); Bad Data Valid Flush Next (might be a few cases of this, one example is a larxa (D$ hit) return data and dval but can't allocate to LMQ (LMQ full or other reason). Already gave dval but can't watch it for snoop_hit_larx. Need to take the 'bad dval' back and flush all younger ops)"
+ },
+ {
+ "EventCode": "0x5094",
+diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
+index d0265f255de2b..723bffa41c448 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
++++ b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
+@@ -442,7 +442,7 @@
+ {
+ "EventCode": "0x4D052",
+ "EventName": "PM_2FLOP_CMPL",
+- "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg "
++ "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg"
+ },
+ {
+ "EventCode": "0x1F142",
+--
+2.39.2
+
--- /dev/null
+From 555ba405235e330a7761b2204bdcc42cc975e851 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Apr 2023 16:53:29 +0800
+Subject: r8152: fix flow control issue of RTL8156A
+
+From: Hayes Wang <hayeswang@realtek.com>
+
+[ Upstream commit 8ceda6d5a1e5402fd852e6cc59a286ce3dc545ee ]
+
+The feature of flow control becomes abnormal, if the device sends a
+pause frame and the tx/rx is disabled before sending a release frame. It
+causes the lost of packets.
+
+Set PLA_RX_FIFO_FULL and PLA_RX_FIFO_EMPTY to zeros before disabling the
+tx/rx. And, toggle FC_PATCH_TASK before enabling tx/rx to reset the flow
+control patch and timer. Then, the hardware could clear the state and
+the flow control becomes normal after enabling tx/rx.
+
+Besides, remove inline for fc_pause_on_auto() and fc_pause_off_auto().
+
+Fixes: 195aae321c82 ("r8152: support new chips")
+Signed-off-by: Hayes Wang <hayeswang@realtek.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/r8152.c | 56 ++++++++++++++++++++++++++---------------
+ 1 file changed, 36 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index cf6941b1d2800..1c45f5dd0be4c 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -5979,6 +5979,25 @@ static void rtl8153_disable(struct r8152 *tp)
+ r8153_aldps_en(tp, true);
+ }
+
++static u32 fc_pause_on_auto(struct r8152 *tp)
++{
++ return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024);
++}
++
++static u32 fc_pause_off_auto(struct r8152 *tp)
++{
++ return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024);
++}
++
++static void r8156_fc_parameter(struct r8152 *tp)
++{
++ u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
++ u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
++
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
++}
++
+ static int rtl8156_enable(struct r8152 *tp)
+ {
+ u32 ocp_data;
+@@ -5987,6 +6006,7 @@ static int rtl8156_enable(struct r8152 *tp)
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return -ENODEV;
+
++ r8156_fc_parameter(tp);
+ set_tx_qlen(tp);
+ rtl_set_eee_plus(tp);
+ r8153_set_rx_early_timeout(tp);
+@@ -6018,9 +6038,24 @@ static int rtl8156_enable(struct r8152 *tp)
+ ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data);
+ }
+
++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
++ ocp_data &= ~FC_PATCH_TASK;
++ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
++ usleep_range(1000, 2000);
++ ocp_data |= FC_PATCH_TASK;
++ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
++
+ return rtl_enable(tp);
+ }
+
++static void rtl8156_disable(struct r8152 *tp)
++{
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 0);
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 0);
++
++ rtl8153_disable(tp);
++}
++
+ static int rtl8156b_enable(struct r8152 *tp)
+ {
+ u32 ocp_data;
+@@ -6422,25 +6457,6 @@ static void rtl8153c_up(struct r8152 *tp)
+ r8153b_u1u2en(tp, true);
+ }
+
+-static inline u32 fc_pause_on_auto(struct r8152 *tp)
+-{
+- return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024);
+-}
+-
+-static inline u32 fc_pause_off_auto(struct r8152 *tp)
+-{
+- return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024);
+-}
+-
+-static void r8156_fc_parameter(struct r8152 *tp)
+-{
+- u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
+- u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
+-
+- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
+- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
+-}
+-
+ static void rtl8156_change_mtu(struct r8152 *tp)
+ {
+ u32 rx_max_size = mtu_to_size(tp->netdev->mtu);
+@@ -9366,7 +9382,7 @@ static int rtl_ops_init(struct r8152 *tp)
+ case RTL_VER_10:
+ ops->init = r8156_init;
+ ops->enable = rtl8156_enable;
+- ops->disable = rtl8153_disable;
++ ops->disable = rtl8156_disable;
+ ops->up = rtl8156_up;
+ ops->down = rtl8156_down;
+ ops->unload = rtl8153_unload;
+--
+2.39.2
+
--- /dev/null
+From 2f09078ceb355ea3388403bdd2cf8e94bbeca5ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Apr 2023 16:53:30 +0800
+Subject: r8152: fix the poor throughput for 2.5G devices
+
+From: Hayes Wang <hayeswang@realtek.com>
+
+[ Upstream commit 61b0ad6f58e2066e054c6d4839d67974d2861a7d ]
+
+Fix the poor throughput for 2.5G devices, when changing the speed from
+auto mode to force mode. This patch is used to notify the MAC when the
+mode is changed.
+
+Fixes: 195aae321c82 ("r8152: support new chips")
+Signed-off-by: Hayes Wang <hayeswang@realtek.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/r8152.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 1c45f5dd0be4c..b0e1ef97c4951 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -199,6 +199,7 @@
+ #define OCP_EEE_AR 0xa41a
+ #define OCP_EEE_DATA 0xa41c
+ #define OCP_PHY_STATUS 0xa420
++#define OCP_INTR_EN 0xa424
+ #define OCP_NCTL_CFG 0xa42c
+ #define OCP_POWER_CFG 0xa430
+ #define OCP_EEE_CFG 0xa432
+@@ -620,6 +621,9 @@ enum spd_duplex {
+ #define PHY_STAT_LAN_ON 3
+ #define PHY_STAT_PWRDN 5
+
++/* OCP_INTR_EN */
++#define INTR_SPEED_FORCE BIT(3)
++
+ /* OCP_NCTL_CFG */
+ #define PGA_RETURN_EN BIT(1)
+
+@@ -7547,6 +7551,11 @@ static void r8156_hw_phy_cfg(struct r8152 *tp)
+ ((swap_a & 0x1f) << 8) |
+ ((swap_a >> 8) & 0x1f));
+ }
++
++ /* Notify the MAC when the speed is changed to force mode. */
++ data = ocp_reg_read(tp, OCP_INTR_EN);
++ data |= INTR_SPEED_FORCE;
++ ocp_reg_write(tp, OCP_INTR_EN, data);
+ break;
+ default:
+ break;
+@@ -7942,6 +7951,11 @@ static void r8156b_hw_phy_cfg(struct r8152 *tp)
+ break;
+ }
+
++ /* Notify the MAC when the speed is changed to force mode. */
++ data = ocp_reg_read(tp, OCP_INTR_EN);
++ data |= INTR_SPEED_FORCE;
++ ocp_reg_write(tp, OCP_INTR_EN, data);
++
+ if (rtl_phy_patch_request(tp, true, true))
+ return;
+
+--
+2.39.2
+
--- /dev/null
+From b745b4b81ca173847e116c797cd40429ee3ae919 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Apr 2023 16:53:31 +0800
+Subject: r8152: move setting r8153b_rx_agg_chg_indicate()
+
+From: Hayes Wang <hayeswang@realtek.com>
+
+[ Upstream commit cce8334f4aacd9936309a002d4a4de92a07cd2c2 ]
+
+Move setting r8153b_rx_agg_chg_indicate() for 2.5G devices. The
+r8153b_rx_agg_chg_indicate() has to be called after enabling tx/rx.
+Otherwise, the coalescing settings are useless.
+
+Fixes: 195aae321c82 ("r8152: support new chips")
+Signed-off-by: Hayes Wang <hayeswang@realtek.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/r8152.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index b0e1ef97c4951..579524cb5d9b2 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -3020,12 +3020,16 @@ static int rtl_enable(struct r8152 *tp)
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
+
+ switch (tp->version) {
+- case RTL_VER_08:
+- case RTL_VER_09:
+- case RTL_VER_14:
+- r8153b_rx_agg_chg_indicate(tp);
++ case RTL_VER_01:
++ case RTL_VER_02:
++ case RTL_VER_03:
++ case RTL_VER_04:
++ case RTL_VER_05:
++ case RTL_VER_06:
++ case RTL_VER_07:
+ break;
+ default:
++ r8153b_rx_agg_chg_indicate(tp);
+ break;
+ }
+
+@@ -3079,7 +3083,6 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
+ 640 / 8);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR,
+ ocp_data);
+- r8153b_rx_agg_chg_indicate(tp);
+ break;
+
+ default:
+@@ -3113,7 +3116,6 @@ static void r8153_set_rx_early_size(struct r8152 *tp)
+ case RTL_VER_15:
+ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE,
+ ocp_data / 8);
+- r8153b_rx_agg_chg_indicate(tp);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+--
+2.39.2
+
--- /dev/null
+From f79e0573ddb01b3139d3e923881d25828e3ffa5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Mar 2023 14:43:20 +0800
+Subject: RISC-V: mm: Enable huge page support to kernel_page_present()
+ function
+
+From: Sia Jee Heng <jeeheng.sia@starfivetech.com>
+
+[ Upstream commit a15c90b67a662c75f469822a7f95c7aaa049e28f ]
+
+Currently kernel_page_present() function doesn't support huge page
+detection causes the function to mistakenly return false to the
+hibernation core.
+
+Add huge page detection to the function to solve the problem.
+
+Fixes: 9e953cda5cdf ("riscv: Introduce huge page support for 32/64bit kernel")
+Signed-off-by: Sia Jee Heng <jeeheng.sia@starfivetech.com>
+Reviewed-by: Ley Foon Tan <leyfoon.tan@starfivetech.com>
+Reviewed-by: Mason Huo <mason.huo@starfivetech.com>
+Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
+Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Link: https://lore.kernel.org/r/20230330064321.1008373-4-jeeheng.sia@starfivetech.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/mm/pageattr.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
+index 86c56616e5dea..ea3d61de065b3 100644
+--- a/arch/riscv/mm/pageattr.c
++++ b/arch/riscv/mm/pageattr.c
+@@ -217,18 +217,26 @@ bool kernel_page_present(struct page *page)
+ pgd = pgd_offset_k(addr);
+ if (!pgd_present(*pgd))
+ return false;
++ if (pgd_leaf(*pgd))
++ return true;
+
+ p4d = p4d_offset(pgd, addr);
+ if (!p4d_present(*p4d))
+ return false;
++ if (p4d_leaf(*p4d))
++ return true;
+
+ pud = pud_offset(p4d, addr);
+ if (!pud_present(*pud))
+ return false;
++ if (pud_leaf(*pud))
++ return true;
+
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd))
+ return false;
++ if (pmd_leaf(*pmd))
++ return true;
+
+ pte = pte_offset_kernel(pmd, addr);
+ return pte_present(*pte);
+--
+2.39.2
+
--- /dev/null
+From 7e9adba4e5c0e2478cb94c37fe0a84eb8c0e69bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Apr 2023 21:27:54 +0100
+Subject: rxrpc: Fix hard call timeout units
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 0d098d83c5d9e107b2df7f5e11f81492f56d2fe7 ]
+
+The hard call timeout is specified in the RXRPC_SET_CALL_TIMEOUT cmsg in
+seconds, so fix the point at which sendmsg() applies it to the call to
+convert to jiffies from seconds, not milliseconds.
+
+Fixes: a158bdd3247b ("rxrpc: Fix timeout of a call that hasn't yet been granted a channel")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: "David S. Miller" <davem@davemloft.net>
+cc: Eric Dumazet <edumazet@google.com>
+cc: Jakub Kicinski <kuba@kernel.org>
+cc: Paolo Abeni <pabeni@redhat.com>
+cc: linux-afs@lists.infradead.org
+cc: netdev@vger.kernel.org
+cc: linux-kernel@vger.kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/sendmsg.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index d4e4e94f4f987..71e40f91dd398 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -736,7 +736,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ fallthrough;
+ case 1:
+ if (p.call.timeouts.hard > 0) {
+- j = msecs_to_jiffies(p.call.timeouts.hard);
++ j = p.call.timeouts.hard * HZ;
+ now = jiffies;
+ j += now;
+ WRITE_ONCE(call->expect_term_by, j);
+--
+2.39.2
+
--- /dev/null
+From f839d25d61010b8eeea4976867fc6672010d61d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Apr 2023 11:34:22 +0800
+Subject: scsi: qedi: Fix use after free bug in qedi_remove()
+
+From: Zheng Wang <zyytlz.wz@163.com>
+
+[ Upstream commit c5749639f2d0a1f6cbe187d05f70c2e7c544d748 ]
+
+In qedi_probe() we call __qedi_probe() which initializes
+&qedi->recovery_work with qedi_recovery_handler() and
+&qedi->board_disable_work with qedi_board_disable_work().
+
+When qedi_schedule_recovery_handler() is called, schedule_delayed_work()
+will finally start the work.
+
+In qedi_remove(), which is called to remove the driver, the following
+sequence may be observed:
+
+Fix this by finishing the work before cleanup in qedi_remove().
+
+CPU0 CPU1
+
+ |qedi_recovery_handler
+qedi_remove |
+ __qedi_remove |
+iscsi_host_free |
+scsi_host_put |
+//free shost |
+ |iscsi_host_for_each_session
+ |//use qedi->shost
+
+Cancel recovery_work and board_disable_work in __qedi_remove().
+
+Fixes: 4b1068f5d74b ("scsi: qedi: Add MFW error recovery process")
+Signed-off-by: Zheng Wang <zyytlz.wz@163.com>
+Link: https://lore.kernel.org/r/20230413033422.28003-1-zyytlz.wz@163.com
+Acked-by: Manish Rangankar <mrangankar@marvell.com>
+Reviewed-by: Mike Christie <michael.christie@oracle.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/qedi/qedi_main.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
+index a117d11f2b078..e0096fc5927e7 100644
+--- a/drivers/scsi/qedi/qedi_main.c
++++ b/drivers/scsi/qedi/qedi_main.c
+@@ -2455,6 +2455,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
+ qedi_ops->ll2->stop(qedi->cdev);
+ }
+
++ cancel_delayed_work_sync(&qedi->recovery_work);
++ cancel_delayed_work_sync(&qedi->board_disable_work);
++
+ qedi_free_iscsi_pf_param(qedi);
+
+ rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
+--
+2.39.2
+
--- /dev/null
+From b33dce1ee584630ab5eca059ce865ff5d6f5f681 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Apr 2023 11:49:23 +0200
+Subject: selftests: srv6: make srv6_end_dt46_l3vpn_test more robust
+
+From: Andrea Mayer <andrea.mayer@uniroma2.it>
+
+[ Upstream commit 46ef24c60f8ee70662968ac55325297ed4624d61 ]
+
+On some distributions, the rp_filter is automatically set (=1) by
+default on a netdev basis (also on VRFs).
+In an SRv6 End.DT46 behavior, decapsulated IPv4 packets are routed using
+the table associated with the VRF bound to that tunnel. During lookup
+operations, the rp_filter can lead to packet loss when activated on the
+VRF.
+Therefore, we chose to make this selftest more robust by explicitly
+disabling the rp_filter during tests (as it is automatically set by some
+Linux distributions).
+
+Fixes: 03a0b567a03d ("selftests: seg6: add selftest for SRv6 End.DT46 Behavior")
+Reported-by: Hangbin Liu <liuhangbin@gmail.com>
+Signed-off-by: Andrea Mayer <andrea.mayer@uniroma2.it>
+Tested-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../testing/selftests/net/srv6_end_dt46_l3vpn_test.sh | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh b/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
+index aebaab8ce44cb..441eededa0312 100755
+--- a/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
++++ b/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
+@@ -292,6 +292,11 @@ setup_hs()
+ ip netns exec ${hsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec ${hsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
++ # disable the rp_filter otherwise the kernel gets confused about how
++ # to route decap ipv4 packets.
++ ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
++ ip netns exec ${rtname} sysctl -wq net.ipv4.conf.default.rp_filter=0
++
+ ip -netns ${hsname} link add veth0 type veth peer name ${rtveth}
+ ip -netns ${hsname} link set ${rtveth} netns ${rtname}
+ ip -netns ${hsname} addr add ${IPv6_HS_NETWORK}::${hs}/64 dev veth0 nodad
+@@ -316,11 +321,6 @@ setup_hs()
+ ip netns exec ${rtname} sysctl -wq net.ipv6.conf.${rtveth}.proxy_ndp=1
+ ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.proxy_arp=1
+
+- # disable the rp_filter otherwise the kernel gets confused about how
+- # to route decap ipv4 packets.
+- ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
+- ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.rp_filter=0
+-
+ ip netns exec ${rtname} sh -c "echo 1 > /proc/sys/net/vrf/strict_mode"
+ }
+
+--
+2.39.2
+
asoc-soc-pcm-fix-be-handling-of-pause_release.patch
fs-ntfs3-fix-null-ptr-deref-on-inode-i_op-in-ntfs_lo.patch
drm-hyperv-don-t-overwrite-dirt_needed-value-set-by-.patch
+scsi-qedi-fix-use-after-free-bug-in-qedi_remove.patch
+net-ncsi-clear-tx-enable-mode-when-handling-a-config.patch
+net-sched-cls_api-remove-block_cb-from-driver_list-b.patch
+sit-update-dev-needed_headroom-in-ipip6_tunnel_bind_.patch
+selftests-srv6-make-srv6_end_dt46_l3vpn_test-more-ro.patch
+net-dsa-mv88e6xxx-add-mv88e6321-rsvd2cpu.patch
+writeback-fix-call-of-incorrect-macro.patch
+watchdog-dw_wdt-fix-the-error-handling-path-of-dw_wd.patch
+risc-v-mm-enable-huge-page-support-to-kernel_page_pr.patch
+net-sched-act_mirred-add-carrier-check.patch
+r8152-fix-flow-control-issue-of-rtl8156a.patch
+r8152-fix-the-poor-throughput-for-2.5g-devices.patch
+r8152-move-setting-r8153b_rx_agg_chg_indicate.patch
+sfc-fix-module-eeprom-reporting-for-qsfp-modules.patch
+rxrpc-fix-hard-call-timeout-units.patch
+octeontx2-af-secure-apr-table-update-with-the-lock.patch
+octeontx2-af-skip-pfs-if-not-enabled.patch
+octeontx2-pf-disable-packet-i-o-for-graceful-exit.patch
+octeontx2-vf-detach-lf-resources-on-probe-cleanup.patch
+ionic-remove-noise-from-ethtool-rxnfc-error-msg.patch
+ethtool-fix-uninitialized-number-of-lanes.patch
+ionic-catch-failure-from-devlink_alloc.patch
+af_packet-don-t-send-zero-byte-data-in-packet_sendms.patch
+drm-amdgpu-add-a-missing-lock-for-amdgpu_sched.patch
+alsa-caiaq-input-add-error-handling-for-unsupported-.patch
+kvm-s390-pv-avoid-stalls-when-making-pages-secure.patch
+kvm-s390-pv-add-export-before-import.patch
+kvm-s390-fix-race-in-gmap_make_secure.patch
+net-dsa-mt7530-fix-corrupt-frames-using-trgmii-on-40.patch
+virtio_net-split-free_unused_bufs.patch
+virtio_net-suppress-cpu-stall-when-free_unused_bufs.patch
+net-enetc-check-the-index-of-the-sfi-rather-than-the.patch
+net-bcmgenet-remove-phy_stop-from-bcmgenet_netif_sto.patch
+perf-scripts-intel-pt-events.py-fix-ipc-output-for-p.patch
+perf-vendor-events-power9-remove-utf-8-characters-fr.patch
+perf-pmu-zfree-expects-a-pointer-to-a-pointer-to-zer.patch
+perf-map-delete-two-variable-initialisations-before-.patch
+crypto-sun8i-ss-fix-a-test-in-sun8i_ss_setup_ivs.patch
+crypto-engine-check-if-bh-is-disabled-during-complet.patch
+crypto-api-add-scaffolding-to-change-completion-func.patch
+crypto-engine-use-crypto_request_complete.patch
+crypto-engine-fix-crypto_queue-backlog-handling.patch
+perf-symbols-fix-return-incorrect-build_id-size-in-e.patch
+perf-evlist-refactor-evlist__for_each_cpu.patch
+perf-stat-separate-bperf-from-bpf_profiler.patch
--- /dev/null
+From 1e44b46524a70c5960bb0a60d21f066f4670207a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Apr 2023 12:33:33 +0100
+Subject: sfc: Fix module EEPROM reporting for QSFP modules
+
+From: Andy Moreton <andy.moreton@amd.com>
+
+[ Upstream commit 281900a923d4c50df109b52a22ae3cdac150159b ]
+
+The sfc driver does not report QSFP module EEPROM contents correctly
+as only the first page is fetched from hardware.
+
+Commit 0e1a2a3e6e7d ("ethtool: Add SFF-8436 and SFF-8636 max EEPROM
+length definitions") added ETH_MODULE_SFF_8436_MAX_LEN for the overall
+size of the EEPROM info, so use that to report the full EEPROM contents.
+
+Fixes: 9b17010da57a ("sfc: Add ethtool -m support for QSFP modules")
+Signed-off-by: Andy Moreton <andy.moreton@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/sfc/mcdi_port_common.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
+index c4fe3c48ac46a..eccb97a5d9387 100644
+--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
++++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
+@@ -974,12 +974,15 @@ static u32 efx_mcdi_phy_module_type(struct efx_nic *efx)
+
+ /* A QSFP+ NIC may actually have an SFP+ module attached.
+ * The ID is page 0, byte 0.
++ * QSFP28 is of type SFF_8636, however, this is treated
++ * the same by ethtool, so we can also treat them the same.
+ */
+ switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) {
+- case 0x3:
++ case 0x3: /* SFP */
+ return MC_CMD_MEDIA_SFP_PLUS;
+- case 0xc:
+- case 0xd:
++ case 0xc: /* QSFP */
++ case 0xd: /* QSFP+ */
++ case 0x11: /* QSFP28 */
+ return MC_CMD_MEDIA_QSFP_PLUS;
+ default:
+ return 0;
+@@ -1077,7 +1080,7 @@ int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *mo
+
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ modinfo->type = ETH_MODULE_SFF_8436;
+- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
++ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ break;
+
+ default:
+--
+2.39.2
+
--- /dev/null
+From 1311184360d71efdc9a0affd2898e0d54d5494de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Apr 2023 23:00:06 -0700
+Subject: sit: update dev->needed_headroom in ipip6_tunnel_bind_dev()
+
+From: Cong Wang <cong.wang@bytedance.com>
+
+[ Upstream commit c88f8d5cd95fd039cff95d682b8e71100c001df0 ]
+
+When a tunnel device is bound with the underlying device, its
+dev->needed_headroom needs to be updated properly. IPv4 tunnels
+already do the same in ip_tunnel_bind_dev(). Otherwise we may
+not have enough header room for skb, especially after commit
+b17f709a2401 ("gue: TX support for using remote checksum offload option").
+
+Fixes: 32b8a8e59c9c ("sit: add IPv4 over IPv4 support")
+Reported-by: Palash Oswal <oswalpalash@gmail.com>
+Link: https://lore.kernel.org/netdev/CAGyP=7fDcSPKu6nttbGwt7RXzE3uyYxLjCSE97J64pRxJP8jPA@mail.gmail.com/
+Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Cong Wang <cong.wang@bytedance.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/sit.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index d4cdc2b1b4689..3bc02ab9ceaca 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1101,12 +1101,13 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
+
+ static void ipip6_tunnel_bind_dev(struct net_device *dev)
+ {
++ struct ip_tunnel *tunnel = netdev_priv(dev);
++ int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+ struct net_device *tdev = NULL;
+- struct ip_tunnel *tunnel;
++ int hlen = LL_MAX_HEADER;
+ const struct iphdr *iph;
+ struct flowi4 fl4;
+
+- tunnel = netdev_priv(dev);
+ iph = &tunnel->parms.iph;
+
+ if (iph->daddr) {
+@@ -1129,14 +1130,15 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
+ tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
+
+ if (tdev && !netif_is_l3_master(tdev)) {
+- int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+ int mtu;
+
+ mtu = tdev->mtu - t_hlen;
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+ WRITE_ONCE(dev->mtu, mtu);
++ hlen = tdev->hard_header_len + tdev->needed_headroom;
+ }
++ dev->needed_headroom = t_hlen + hlen;
+ }
+
+ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
+--
+2.39.2
+
--- /dev/null
+From 84e0fcaa52d757cc926ce94c46b694af4e9d39c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Aug 2022 14:38:59 +0800
+Subject: virtio_net: split free_unused_bufs()
+
+From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+
+[ Upstream commit 6e345f8c7cd029ad3aaece15ad4425ac26e4eb63 ]
+
+This patch separates two functions for freeing sq buf and rq buf from
+free_unused_bufs().
+
+When supporting the enable/disable tx/rq queue in the future, it is
+necessary to support separate recovery of a sq buf or a rq buf.
+
+Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Message-Id: <20220801063902.129329-40-xuanzhuo@linux.alibaba.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Stable-dep-of: f8bb51043945 ("virtio_net: suppress cpu stall when free_unused_bufs")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/virtio_net.c | 41 ++++++++++++++++++++++++----------------
+ 1 file changed, 25 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 8a380086ac257..cff3e2a7ce7fc 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -2814,6 +2814,27 @@ static void free_receive_page_frags(struct virtnet_info *vi)
+ put_page(vi->rq[i].alloc_frag.page);
+ }
+
++static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
++{
++ if (!is_xdp_frame(buf))
++ dev_kfree_skb(buf);
++ else
++ xdp_return_frame(ptr_to_xdp(buf));
++}
++
++static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
++{
++ struct virtnet_info *vi = vq->vdev->priv;
++ int i = vq2rxq(vq);
++
++ if (vi->mergeable_rx_bufs)
++ put_page(virt_to_head_page(buf));
++ else if (vi->big_packets)
++ give_pages(&vi->rq[i], buf);
++ else
++ put_page(virt_to_head_page(buf));
++}
++
+ static void free_unused_bufs(struct virtnet_info *vi)
+ {
+ void *buf;
+@@ -2821,26 +2842,14 @@ static void free_unused_bufs(struct virtnet_info *vi)
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ struct virtqueue *vq = vi->sq[i].vq;
+- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
+- if (!is_xdp_frame(buf))
+- dev_kfree_skb(buf);
+- else
+- xdp_return_frame(ptr_to_xdp(buf));
+- }
++ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
++ virtnet_sq_free_unused_buf(vq, buf);
+ }
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ struct virtqueue *vq = vi->rq[i].vq;
+-
+- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
+- if (vi->mergeable_rx_bufs) {
+- put_page(virt_to_head_page(buf));
+- } else if (vi->big_packets) {
+- give_pages(&vi->rq[i], buf);
+- } else {
+- put_page(virt_to_head_page(buf));
+- }
+- }
++ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
++ virtnet_rq_free_unused_buf(vq, buf);
+ }
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 65196c493c3fa0cb49119a3c7d7caf4b213acaa0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 May 2023 10:27:06 +0800
+Subject: virtio_net: suppress cpu stall when free_unused_bufs
+
+From: Wenliang Wang <wangwenliang.1995@bytedance.com>
+
+[ Upstream commit f8bb5104394560e29017c25bcade4c6b7aabd108 ]
+
+For multi-queue and large ring-size use case, the following error
+occurred when free_unused_bufs:
+rcu: INFO: rcu_sched self-detected stall on CPU.
+
+Fixes: 986a4f4d452d ("virtio_net: multiqueue support")
+Signed-off-by: Wenliang Wang <wangwenliang.1995@bytedance.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/virtio_net.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index cff3e2a7ce7fc..9f2d691908b42 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -2844,12 +2844,14 @@ static void free_unused_bufs(struct virtnet_info *vi)
+ struct virtqueue *vq = vi->sq[i].vq;
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_sq_free_unused_buf(vq, buf);
++ cond_resched();
+ }
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ struct virtqueue *vq = vi->rq[i].vq;
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_rq_free_unused_buf(vq, buf);
++ cond_resched();
+ }
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 878e14fd2daf32534e94c6e4f164a93e9559f21a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Apr 2023 08:52:48 +0200
+Subject: watchdog: dw_wdt: Fix the error handling path of dw_wdt_drv_probe()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 7f5390750645756bd5da2b24fac285f2654dd922 ]
+
+The commit in Fixes has only updated the remove function and missed the
+error handling path of the probe.
+
+Add the missing reset_control_assert() call.
+
+Fixes: 65a3b6935d92 ("watchdog: dw_wdt: get reset lines from dt")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Link: https://lore.kernel.org/r/fbb650650bbb33a8fa2fd028c23157bedeed50e1.1682491863.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Wim Van Sebroeck <wim@linux-watchdog.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/watchdog/dw_wdt.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
+index cd578843277e5..498c1c403fc92 100644
+--- a/drivers/watchdog/dw_wdt.c
++++ b/drivers/watchdog/dw_wdt.c
+@@ -637,7 +637,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
+
+ ret = dw_wdt_init_timeouts(dw_wdt, dev);
+ if (ret)
+- goto out_disable_clk;
++ goto out_assert_rst;
+
+ wdd = &dw_wdt->wdd;
+ wdd->ops = &dw_wdt_ops;
+@@ -668,12 +668,15 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
+
+ ret = watchdog_register_device(wdd);
+ if (ret)
+- goto out_disable_pclk;
++ goto out_assert_rst;
+
+ dw_wdt_dbgfs_init(dw_wdt);
+
+ return 0;
+
++out_assert_rst:
++ reset_control_assert(dw_wdt->rst);
++
+ out_disable_pclk:
+ clk_disable_unprepare(dw_wdt->pclk);
+
+--
+2.39.2
+
--- /dev/null
+From fd479acdbd856a3008c0bc2b28728cb06a78f4b5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Jan 2023 13:44:43 +0300
+Subject: writeback: fix call of incorrect macro
+
+From: Maxim Korotkov <korotkov.maxim.s@gmail.com>
+
+[ Upstream commit 3e46c89c74f2c38e5337d2cf44b0b551adff1cb4 ]
+
+ the variable 'history' is of type u16, it may be an error
+ that the hweight32 macro was used for it
+ I guess macro hweight16 should be used
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: 2a81490811d0 ("writeback: implement foreign cgroup inode detection")
+Signed-off-by: Maxim Korotkov <korotkov.maxim.s@gmail.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230119104443.3002-1-korotkov.maxim.s@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fs-writeback.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index fbc3f0ef38c02..c76537a6826a7 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -825,7 +825,7 @@ void wbc_detach_inode(struct writeback_control *wbc)
+ * is okay. The main goal is avoiding keeping an inode on
+ * the wrong wb for an extended period of time.
+ */
+- if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
++ if (hweight16(history) > WB_FRN_HIST_THR_SLOTS)
+ inode_switch_wbs(inode, max_id);
+ }
+
+--
+2.39.2
+