+static void io_sqe_files_set_node(struct fixed_file_data *file_data,
+ struct fixed_file_ref_node *ref_node)
+{
-+ spin_lock(&file_data->lock);
++ spin_lock_bh(&file_data->lock);
+ file_data->node = ref_node;
+ list_add_tail(&ref_node->node, &file_data->ref_list);
-+ spin_unlock(&file_data->lock);
++ spin_unlock_bh(&file_data->lock);
+ percpu_ref_get(&file_data->refs);
+}
+
}
- file_data->node = ref_node;
-- spin_lock(&file_data->lock);
+- spin_lock_bh(&file_data->lock);
- list_add_tail(&ref_node->node, &file_data->ref_list);
-- spin_unlock(&file_data->lock);
+- spin_unlock_bh(&file_data->lock);
- percpu_ref_get(&file_data->refs);
+ io_sqe_files_set_node(file_data, ref_node);
return ret;
if (needs_switch) {
percpu_ref_kill(&data->node->refs);
-- spin_lock(&data->lock);
+- spin_lock_bh(&data->lock);
- list_add_tail(&ref_node->node, &data->ref_list);
- data->node = ref_node;
-- spin_unlock(&data->lock);
+- spin_unlock_bh(&data->lock);
- percpu_ref_get(&ctx->file_data->refs);
+ io_sqe_files_set_node(data, ref_node);
} else
--- /dev/null
+From ac0648a56c1ff66c1cbf735075ad33a26cbc50de Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 23 Nov 2020 09:37:51 -0700
+Subject: io_uring: use bottom half safe lock for fixed file data
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit ac0648a56c1ff66c1cbf735075ad33a26cbc50de upstream.
+
+io_file_data_ref_zero() can be invoked from soft-irq from the RCU core,
+hence we need to ensure that the file_data lock is bottom half safe. Use
+the _bh() variants when grabbing this lock.
+
+Reported-by: syzbot+1f4ba1e5520762c523c6@syzkaller.appspotmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -7000,9 +7000,9 @@ static int io_sqe_files_unregister(struc
+ if (!data)
+ return -ENXIO;
+
+- spin_lock(&data->lock);
++ spin_lock_bh(&data->lock);
+ ref_node = data->node;
+- spin_unlock(&data->lock);
++ spin_unlock_bh(&data->lock);
+ if (ref_node)
+ percpu_ref_kill(&ref_node->refs);
+
+@@ -7385,7 +7385,7 @@ static void io_file_data_ref_zero(struct
+ data = ref_node->file_data;
+ ctx = data->ctx;
+
+- spin_lock(&data->lock);
++ spin_lock_bh(&data->lock);
+ ref_node->done = true;
+
+ while (!list_empty(&data->ref_list)) {
+@@ -7397,7 +7397,7 @@ static void io_file_data_ref_zero(struct
+ list_del(&ref_node->node);
+ first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
+ }
+- spin_unlock(&data->lock);
++ spin_unlock_bh(&data->lock);
+
+ if (percpu_ref_is_dying(&data->refs))
+ delay = 0;
+@@ -7520,9 +7520,9 @@ static int io_sqe_files_register(struct
+ }
+
+ file_data->node = ref_node;
+- spin_lock(&file_data->lock);
++ spin_lock_bh(&file_data->lock);
+ list_add_tail(&ref_node->node, &file_data->ref_list);
+- spin_unlock(&file_data->lock);
++ spin_unlock_bh(&file_data->lock);
+ percpu_ref_get(&file_data->refs);
+ return ret;
+ out_fput:
+@@ -7679,10 +7679,10 @@ static int __io_sqe_files_update(struct
+
+ if (needs_switch) {
+ percpu_ref_kill(&data->node->refs);
+- spin_lock(&data->lock);
++ spin_lock_bh(&data->lock);
+ list_add_tail(&ref_node->node, &data->ref_list);
+ data->node = ref_node;
+- spin_unlock(&data->lock);
++ spin_unlock_bh(&data->lock);
+ percpu_ref_get(&ctx->file_data->refs);
+ } else
+ destroy_fixed_file_ref_node(ref_node);