--- /dev/null
+From 42a9b5f649124761a4ffd260d267295056eea113 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Tue, 23 May 2023 08:23:32 -0600
+Subject: io_uring: always grab lock in io_cancel_async_work()
+
+From: Jens Axboe <axboe@kernel.dk>
+
+No upstream commit exists for this patch.
+
+It's not necessarily safe to check the task_list locklessly, remove
+this micro optimization and always grab task_lock before deeming it
+empty.
+
+Reported-and-tested-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -3738,9 +3738,6 @@ static void io_cancel_async_work(struct
+ {
+ struct io_kiocb *req;
+
+- if (list_empty(&ctx->task_list))
+- return;
+-
+ spin_lock_irq(&ctx->task_lock);
+
+ list_for_each_entry(req, &ctx->task_list, task_list) {
--- /dev/null
+From 66512e9596044057fe2cc173ac5c32e4fb8aed5c Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Tue, 23 May 2023 08:24:31 -0600
+Subject: io_uring: don't drop completion lock before timer is fully initialized
+
+From: Jens Axboe <axboe@kernel.dk>
+
+No upstream commit exists for this patch.
+
+If we drop the lock right after adding it to the timeout list, then
+someone attempting to kill timeouts will find it in an indeterminate
+state. That means that cancelation could attempt to cancel and remove
+a timeout, and then io_timeout() proceeds to init and add the timer
+afterwards.
+
+Ensure the timeout request is fully setup before we drop the
+completion lock, which guards cancelation as well.
+
+Reported-and-tested-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2079,12 +2079,12 @@ static int io_timeout(struct io_kiocb *r
+ req->sequence -= span;
+ add:
+ list_add(&req->list, entry);
+- spin_unlock_irq(&ctx->completion_lock);
+
+ hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ req->timeout.timer.function = io_timeout_fn;
+ hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
+ HRTIMER_MODE_REL);
++ spin_unlock_irq(&ctx->completion_lock);
+ return 0;
+ }
+
--- /dev/null
+From c835053c99074197d55857c6db5576a3f0ac1c08 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Tue, 23 May 2023 08:26:06 -0600
+Subject: io_uring: have io_kill_timeout() honor the request references
+
+From: Jens Axboe <axboe@kernel.dk>
+
+No upstream commit exists for this patch.
+
+Don't free the request unconditionally, if the request is issued async
+then someone else may be holding a submit reference to it.
+
+Reported-and-tested-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -551,7 +551,8 @@ static void io_kill_timeout(struct io_ki
+ atomic_inc(&req->ctx->cq_timeouts);
+ list_del(&req->list);
+ io_cqring_fill_event(req->ctx, req->user_data, 0);
+- __io_free_req(req);
++ if (refcount_dec_and_test(&req->refs))
++ __io_free_req(req);
+ }
+ }
+
net-mlx5-devcom-only-supports-2-ports.patch
net-mlx5-devcom-serialize-devcom-registration.patch
cdc_ncm-fix-the-build-warning.patch
+io_uring-always-grab-lock-in-io_cancel_async_work.patch
+io_uring-don-t-drop-completion-lock-before-timer-is-fully-initialized.patch
+io_uring-have-io_kill_timeout-honor-the-request-references.patch