--- /dev/null
+From foo@baz Wed Oct 7 07:03:42 PM CEST 2020
+From: Muchun Song <songmuchun@bytedance.com>
+Date: Wed, 07 Oct 2020 11:16:35 +0800
+Subject: io_uring: Fix double list add in io_queue_async_work()
+To: axboe@kernel.dk, viro@zeniv.linux.org.uk
+Cc: linux-fsdevel@vger.kernel.org, linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, zhuyinyin@bytedance.com, Muchun Song <songmuchun@bytedance.com>, Jiachen Zhang <zhangjiachen.jaycee@bytedance.com>
+Message-ID: <20201007031635.65295-5-songmuchun@bytedance.com>
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+If we queue work in io_poll_wake(), it will leads to list double
+add. So we should add the list when the callback func is the
+io_sq_wq_submit_work.
+
+The following oops was seen:
+
+ list_add double add: new=ffff9ca6a8f1b0e0, prev=ffff9ca62001cee8,
+ next=ffff9ca6a8f1b0e0.
+ ------------[ cut here ]------------
+ kernel BUG at lib/list_debug.c:31!
+ Call Trace:
+ <IRQ>
+ io_poll_wake+0xf3/0x230
+ __wake_up_common+0x91/0x170
+ __wake_up_common_lock+0x7a/0xc0
+ io_commit_cqring+0xea/0x280
+ ? blkcg_iolatency_done_bio+0x2b/0x610
+ io_cqring_add_event+0x3e/0x60
+ io_complete_rw+0x58/0x80
+ dio_complete+0x106/0x250
+ blk_update_request+0xa0/0x3b0
+ blk_mq_end_request+0x1a/0x110
+ blk_mq_complete_request+0xd0/0xe0
+ nvme_irq+0x129/0x270 [nvme]
+ __handle_irq_event_percpu+0x7b/0x190
+ handle_irq_event_percpu+0x30/0x80
+ handle_irq_event+0x3c/0x60
+ handle_edge_irq+0x91/0x1e0
+ do_IRQ+0x4d/0xd0
+ common_interrupt+0xf/0xf
+
+Fixes: 1c4404efcf2c ("io_uring: make sure async workqueue is canceled on exit")
+Reported-by: Jiachen Zhang <zhangjiachen.jaycee@bytedance.com>
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -514,12 +514,14 @@ static inline void io_queue_async_work(s
+ }
+ }
+
+- req->files = current->files;
++ if (req->work.func == io_sq_wq_submit_work) {
++ req->files = current->files;
+
+- spin_lock_irqsave(&ctx->task_lock, flags);
+- list_add(&req->task_list, &ctx->task_list);
+- req->work_task = NULL;
+- spin_unlock_irqrestore(&ctx->task_lock, flags);
++ spin_lock_irqsave(&ctx->task_lock, flags);
++ list_add(&req->task_list, &ctx->task_list);
++ req->work_task = NULL;
++ spin_unlock_irqrestore(&ctx->task_lock, flags);
++ }
+
+ queue_work(ctx->sqo_wq[rw], &req->work);
+ }
+@@ -668,6 +670,7 @@ static struct io_kiocb *io_get_req(struc
+ state->cur_req++;
+ }
+
++ INIT_LIST_HEAD(&req->task_list);
+ req->file = NULL;
+ req->ctx = ctx;
+ req->flags = 0;
--- /dev/null
+From foo@baz Wed Oct 7 07:03:42 PM CEST 2020
+From: Muchun Song <songmuchun@bytedance.com>
+Date: Wed, 07 Oct 2020 11:16:33 +0800
+Subject: io_uring: Fix missing smp_mb() in io_cancel_async_work()
+To: axboe@kernel.dk, viro@zeniv.linux.org.uk
+Cc: linux-fsdevel@vger.kernel.org, linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, zhuyinyin@bytedance.com, Muchun Song <songmuchun@bytedance.com>
+Message-ID: <20201007031635.65295-3-songmuchun@bytedance.com>
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+The store to req->flags and load req->work_task should not be
+reordering in io_cancel_async_work(). We should make sure that
+either we store REQ_F_CANCE flag to req->flags or we see the
+req->work_task setted in io_sq_wq_submit_work().
+
+Fixes: 1c4404efcf2c ("io_uring: make sure async workqueue is canceled on exit")
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2247,6 +2247,12 @@ restart:
+
+ if (!ret) {
+ req->work_task = current;
++
++ /*
++ * Pairs with the smp_store_mb() (B) in
++ * io_cancel_async_work().
++ */
++ smp_mb(); /* A */
+ if (req->flags & REQ_F_CANCEL) {
+ ret = -ECANCELED;
+ goto end_req;
+@@ -3725,7 +3731,15 @@ static void io_cancel_async_work(struct
+
+ req = list_first_entry(&ctx->task_list, struct io_kiocb, task_list);
+ list_del_init(&req->task_list);
+- req->flags |= REQ_F_CANCEL;
++
++ /*
++ * The below executes an smp_mb(), which matches with the
++ * smp_mb() (A) in io_sq_wq_submit_work() such that either
++ * we store REQ_F_CANCEL flag to req->flags or we see the
++ * req->work_task setted in io_sq_wq_submit_work().
++ */
++ smp_store_mb(req->flags, req->flags | REQ_F_CANCEL); /* B */
++
+ if (req->work_task && (!files || req->files == files))
+ send_sig(SIGINT, req->work_task, 1);
+ }
--- /dev/null
+From foo@baz Wed Oct 7 07:03:42 PM CEST 2020
+From: Muchun Song <songmuchun@bytedance.com>
+Date: Wed, 07 Oct 2020 11:16:34 +0800
+Subject: io_uring: Fix remove irrelevant req from the task_list
+To: axboe@kernel.dk, viro@zeniv.linux.org.uk
+Cc: linux-fsdevel@vger.kernel.org, linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, zhuyinyin@bytedance.com, Muchun Song <songmuchun@bytedance.com>
+Message-ID: <20201007031635.65295-4-songmuchun@bytedance.com>
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+If the process 0 has been initialized io_uring is complete, and
+then fork process 1. If process 1 exits and it leads to delete
+all reqs from the task_list. If we kill process 0. We will not
+send SIGINT signal to the kworker. So we can not remove the req
+from the task_list. The io_sq_wq_submit_work() can do that for
+us.
+
+Fixes: 1c4404efcf2c ("io_uring: make sure async workqueue is canceled on exit")
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2272,13 +2272,11 @@ restart:
+ break;
+ cond_resched();
+ } while (1);
+-end_req:
+- if (!list_empty(&req->task_list)) {
+- spin_lock_irq(&ctx->task_lock);
+- list_del_init(&req->task_list);
+- spin_unlock_irq(&ctx->task_lock);
+- }
+ }
++end_req:
++ spin_lock_irq(&ctx->task_lock);
++ list_del_init(&req->task_list);
++ spin_unlock_irq(&ctx->task_lock);
+
+ /* drop submission reference */
+ io_put_req(req);
+@@ -3722,15 +3720,16 @@ static int io_uring_fasync(int fd, struc
+ static void io_cancel_async_work(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+ {
++ struct io_kiocb *req;
++
+ if (list_empty(&ctx->task_list))
+ return;
+
+ spin_lock_irq(&ctx->task_lock);
+- while (!list_empty(&ctx->task_list)) {
+- struct io_kiocb *req;
+
+- req = list_first_entry(&ctx->task_list, struct io_kiocb, task_list);
+- list_del_init(&req->task_list);
++ list_for_each_entry(req, &ctx->task_list, task_list) {
++ if (files && req->files != files)
++ continue;
+
+ /*
+ * The below executes an smp_mb(), which matches with the
+@@ -3740,7 +3739,7 @@ static void io_cancel_async_work(struct
+ */
+ smp_store_mb(req->flags, req->flags | REQ_F_CANCEL); /* B */
+
+- if (req->work_task && (!files || req->files == files))
++ if (req->work_task)
+ send_sig(SIGINT, req->work_task, 1);
+ }
+ spin_unlock_irq(&ctx->task_lock);
--- /dev/null
+From foo@baz Wed Oct 7 07:03:42 PM CEST 2020
+From: Muchun Song <songmuchun@bytedance.com>
+Date: Wed, 07 Oct 2020 11:16:32 +0800
+Subject: io_uring: Fix resource leaking when kill the process
+To: axboe@kernel.dk, viro@zeniv.linux.org.uk
+Cc: linux-fsdevel@vger.kernel.org, linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, zhuyinyin@bytedance.com
+Message-ID: <20201007031635.65295-2-songmuchun@bytedance.com>
+
+From: Yinyin Zhu <zhuyinyin@bytedance.com>
+
+The commit
+
+ 1c4404efcf2c0> ("<io_uring: make sure async workqueue is canceled on exit>")
+
+doesn't solve the resource leak problem totally! When kworker is doing a
+io task for the io_uring, The process which submitted the io task has
+received a SIGKILL signal from the user. Then the io_cancel_async_work
+function could have sent a SIGINT signal to the kworker, but the judging
+condition is wrong. So it doesn't send a SIGINT signal to the kworker,
+then caused the resource leaking problem.
+
+Why the juding condition is wrong? The process is a multi-threaded process,
+we call the thread of the process which has submitted the io task Thread1.
+So the req->task is the current macro of the Thread1. when all the threads
+of the process have done exit procedure, the last thread will call the
+io_cancel_async_work, but the last thread may not the Thread1, so the task
+is not equal and doesn't send the SIGINT signal. To fix this bug, we alter
+the task attribute of the req with struct files_struct. And check the files
+instead.
+
+Fixes: 1c4404efcf2c0 ("io_uring: make sure async workqueue is canceled on exit")
+Signed-off-by: Yinyin Zhu <zhuyinyin@bytedance.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -340,7 +340,7 @@ struct io_kiocb {
+ u64 user_data;
+ u32 result;
+ u32 sequence;
+- struct task_struct *task;
++ struct files_struct *files;
+
+ struct fs_struct *fs;
+
+@@ -514,7 +514,7 @@ static inline void io_queue_async_work(s
+ }
+ }
+
+- req->task = current;
++ req->files = current->files;
+
+ spin_lock_irqsave(&ctx->task_lock, flags);
+ list_add(&req->task_list, &ctx->task_list);
+@@ -2382,6 +2382,8 @@ static bool io_add_to_prev_work(struct a
+ if (ret) {
+ struct io_ring_ctx *ctx = req->ctx;
+
++ req->files = current->files;
++
+ spin_lock_irq(&ctx->task_lock);
+ list_add(&req->task_list, &ctx->task_list);
+ req->work_task = NULL;
+@@ -3712,7 +3714,7 @@ static int io_uring_fasync(int fd, struc
+ }
+
+ static void io_cancel_async_work(struct io_ring_ctx *ctx,
+- struct task_struct *task)
++ struct files_struct *files)
+ {
+ if (list_empty(&ctx->task_list))
+ return;
+@@ -3724,7 +3726,7 @@ static void io_cancel_async_work(struct
+ req = list_first_entry(&ctx->task_list, struct io_kiocb, task_list);
+ list_del_init(&req->task_list);
+ req->flags |= REQ_F_CANCEL;
+- if (req->work_task && (!task || req->task == task))
++ if (req->work_task && (!files || req->files == files))
+ send_sig(SIGINT, req->work_task, 1);
+ }
+ spin_unlock_irq(&ctx->task_lock);
+@@ -3749,7 +3751,7 @@ static int io_uring_flush(struct file *f
+ struct io_ring_ctx *ctx = file->private_data;
+
+ if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
+- io_cancel_async_work(ctx, current);
++ io_cancel_async_work(ctx, data);
+
+ return 0;
+ }
fonts-support-font_extra_words-macros-for-built-in-fonts.patch
fbcon-fix-global-out-of-bounds-read-in-fbcon_get_font.patch
revert-ravb-fixed-to-be-able-to-unload-modules.patch
+io_uring-fix-resource-leaking-when-kill-the-process.patch
+io_uring-fix-missing-smp_mb-in-io_cancel_async_work.patch
+io_uring-fix-remove-irrelevant-req-from-the-task_list.patch
+io_uring-fix-double-list-add-in-io_queue_async_work.patch