--- /dev/null
+From 1e5d770bb8a23dd01e28e92f4fb0b1093c8bdbe6 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Wed, 18 Nov 2020 14:56:25 +0000
+Subject: io_uring: get an active ref_node from files_data
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 1e5d770bb8a23dd01e28e92f4fb0b1093c8bdbe6 upstream.
+
+An active ref_node always can be found in ctx->files_data, it's much
+safer to get it this way instead of poking into files_data->ref_list.
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Cc: stable@vger.kernel.org # v5.7+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -6854,9 +6854,8 @@ static int io_sqe_files_unregister(struc
+ return -ENXIO;
+
+ spin_lock(&data->lock);
+- if (!list_empty(&data->ref_list))
+- ref_node = list_first_entry(&data->ref_list,
+- struct fixed_file_ref_node, node);
++ ref_node = container_of(data->cur_refs, struct fixed_file_ref_node,
++ refs);
+ spin_unlock(&data->lock);
+ if (ref_node)
+ percpu_ref_kill(&ref_node->refs);
--- /dev/null
+From e297822b20e7fe683e107aea46e6402adcf99c70 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Wed, 18 Nov 2020 14:56:26 +0000
+Subject: io_uring: order refnode recycling
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit e297822b20e7fe683e107aea46e6402adcf99c70 upstream.
+
+Don't recycle a refnode until we're done with all requests of nodes
+ejected before.
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Cc: stable@vger.kernel.org # v5.7+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 34 ++++++++++++++++++++++++----------
+ 1 file changed, 24 insertions(+), 10 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -200,6 +200,7 @@ struct fixed_file_ref_node {
+ struct list_head file_list;
+ struct fixed_file_data *file_data;
+ struct llist_node llist;
++ bool done;
+ };
+
+ struct fixed_file_data {
+@@ -7106,10 +7107,6 @@ static void __io_file_put_work(struct fi
+ kfree(pfile);
+ }
+
+- spin_lock(&file_data->lock);
+- list_del(&ref_node->node);
+- spin_unlock(&file_data->lock);
+-
+ percpu_ref_exit(&ref_node->refs);
+ kfree(ref_node);
+ percpu_ref_put(&file_data->refs);
+@@ -7136,17 +7133,33 @@ static void io_file_put_work(struct work
+ static void io_file_data_ref_zero(struct percpu_ref *ref)
+ {
+ struct fixed_file_ref_node *ref_node;
++ struct fixed_file_data *data;
+ struct io_ring_ctx *ctx;
+- bool first_add;
++ bool first_add = false;
+ int delay = HZ;
+
+ ref_node = container_of(ref, struct fixed_file_ref_node, refs);
+- ctx = ref_node->file_data->ctx;
++ data = ref_node->file_data;
++ ctx = data->ctx;
++
++ spin_lock(&data->lock);
++ ref_node->done = true;
++
++ while (!list_empty(&data->ref_list)) {
++ ref_node = list_first_entry(&data->ref_list,
++ struct fixed_file_ref_node, node);
++ /* recycle ref nodes in order */
++ if (!ref_node->done)
++ break;
++ list_del(&ref_node->node);
++ first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
++ }
++ spin_unlock(&data->lock);
++
+
+- if (percpu_ref_is_dying(&ctx->file_data->refs))
++ if (percpu_ref_is_dying(&data->refs))
+ delay = 0;
+
+- first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
+ if (!delay)
+ mod_delayed_work(system_wq, &ctx->file_put_work, 0);
+ else if (first_add)
+@@ -7170,6 +7183,7 @@ static struct fixed_file_ref_node *alloc
+ INIT_LIST_HEAD(&ref_node->node);
+ INIT_LIST_HEAD(&ref_node->file_list);
+ ref_node->file_data = ctx->file_data;
++ ref_node->done = false;
+ return ref_node;
+ }
+
+@@ -7297,7 +7311,7 @@ static int io_sqe_files_register(struct
+
+ ctx->file_data->cur_refs = &ref_node->refs;
+ spin_lock(&ctx->file_data->lock);
+- list_add(&ref_node->node, &ctx->file_data->ref_list);
++ list_add_tail(&ref_node->node, &ctx->file_data->ref_list);
+ spin_unlock(&ctx->file_data->lock);
+ percpu_ref_get(&ctx->file_data->refs);
+ return ret;
+@@ -7442,7 +7456,7 @@ static int __io_sqe_files_update(struct
+ if (needs_switch) {
+ percpu_ref_kill(data->cur_refs);
+ spin_lock(&data->lock);
+- list_add(&ref_node->node, &data->ref_list);
++ list_add_tail(&ref_node->node, &data->ref_list);
+ data->cur_refs = &ref_node->refs;
+ spin_unlock(&data->lock);
+ percpu_ref_get(&ctx->file_data->refs);