--- /dev/null
+From 9cae36a094e7e9d6e5fe8b6dcd4642138b3eb0c7 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Wed, 1 Jun 2022 23:57:02 -0600
+Subject: io_uring: reinstate the inflight tracking
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 9cae36a094e7e9d6e5fe8b6dcd4642138b3eb0c7 upstream.
+
+After some debugging, it was realized that we really do still need the
+old inflight tracking for any file type that has io_uring_fops assigned.
+If we don't, then trivial circular references will mean that we never get
+the ctx cleaned up and hence it'll leak.
+
+Just bring back the inflight tracking, which then also means we can
+eliminate the conditional dropping of the file when task_work is queued.
+
+Fixes: d5361233e9ab ("io_uring: drop the old style inflight file tracking")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 82 +++++++++++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 56 insertions(+), 26 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -111,7 +111,8 @@
+ IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
+
+ #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
+- REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
++ REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
++ REQ_F_ASYNC_DATA)
+
+ #define IO_TCTX_REFS_CACHE_NR (1U << 10)
+
+@@ -493,6 +494,7 @@ struct io_uring_task {
+ const struct io_ring_ctx *last;
+ struct io_wq *io_wq;
+ struct percpu_counter inflight;
++ atomic_t inflight_tracked;
+ atomic_t in_idle;
+
+ spinlock_t task_lock;
+@@ -1186,8 +1188,6 @@ static void io_clean_op(struct io_kiocb
+ static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
+ unsigned issue_flags);
+ static inline struct file *io_file_get_normal(struct io_kiocb *req, int fd);
+-static void io_drop_inflight_file(struct io_kiocb *req);
+-static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags);
+ static void __io_queue_sqe(struct io_kiocb *req);
+ static void io_rsrc_put_work(struct work_struct *work);
+
+@@ -1435,9 +1435,29 @@ static bool io_match_task(struct io_kioc
+ bool cancel_all)
+ __must_hold(&req->ctx->timeout_lock)
+ {
++ struct io_kiocb *req;
++
+ if (task && head->task != task)
+ return false;
+- return cancel_all;
++ if (cancel_all)
++ return true;
++
++ io_for_each_link(req, head) {
++ if (req->flags & REQ_F_INFLIGHT)
++ return true;
++ }
++ return false;
++}
++
++static bool io_match_linked(struct io_kiocb *head)
++{
++ struct io_kiocb *req;
++
++ io_for_each_link(req, head) {
++ if (req->flags & REQ_F_INFLIGHT)
++ return true;
++ }
++ return false;
+ }
+
+ /*
+@@ -1447,9 +1467,24 @@ static bool io_match_task(struct io_kioc
+ static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
+ bool cancel_all)
+ {
++ bool matched;
++
+ if (task && head->task != task)
+ return false;
+- return cancel_all;
++ if (cancel_all)
++ return true;
++
++ if (head->flags & REQ_F_LINK_TIMEOUT) {
++ struct io_ring_ctx *ctx = head->ctx;
++
++ /* protect against races with linked timeouts */
++ spin_lock_irq(&ctx->timeout_lock);
++ matched = io_match_linked(head);
++ spin_unlock_irq(&ctx->timeout_lock);
++ } else {
++ matched = io_match_linked(head);
++ }
++ return matched;
+ }
+
+ static inline bool req_has_async_data(struct io_kiocb *req)
+@@ -1608,6 +1643,14 @@ static inline bool io_req_ffs_set(struct
+ return req->flags & REQ_F_FIXED_FILE;
+ }
+
++static inline void io_req_track_inflight(struct io_kiocb *req)
++{
++ if (!(req->flags & REQ_F_INFLIGHT)) {
++ req->flags |= REQ_F_INFLIGHT;
++ atomic_inc(¤t->io_uring->inflight_tracked);
++ }
++}
++
+ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
+ {
+ if (WARN_ON_ONCE(!req->link))
+@@ -2516,8 +2559,6 @@ static void io_req_task_work_add(struct
+
+ WARN_ON_ONCE(!tctx);
+
+- io_drop_inflight_file(req);
+-
+ spin_lock_irqsave(&tctx->task_lock, flags);
+ if (priority)
+ wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
+@@ -5869,10 +5910,6 @@ static int io_poll_check_events(struct i
+
+ if (!req->result) {
+ struct poll_table_struct pt = { ._key = req->apoll_events };
+- unsigned flags = locked ? 0 : IO_URING_F_UNLOCKED;
+-
+- if (unlikely(!io_assign_file(req, flags)))
+- return -EBADF;
+ req->result = vfs_poll(req->file, &pt) & req->apoll_events;
+ }
+
+@@ -7097,6 +7134,11 @@ static void io_clean_op(struct io_kiocb
+ kfree(req->apoll);
+ req->apoll = NULL;
+ }
++ if (req->flags & REQ_F_INFLIGHT) {
++ struct io_uring_task *tctx = req->task->io_uring;
++
++ atomic_dec(&tctx->inflight_tracked);
++ }
+ if (req->flags & REQ_F_CREDS)
+ put_cred(req->creds);
+ if (req->flags & REQ_F_ASYNC_DATA) {
+@@ -7393,19 +7435,6 @@ out:
+ return file;
+ }
+
+-/*
+- * Drop the file for requeue operations. Only used of req->file is the
+- * io_uring descriptor itself.
+- */
+-static void io_drop_inflight_file(struct io_kiocb *req)
+-{
+- if (unlikely(req->flags & REQ_F_INFLIGHT)) {
+- fput(req->file);
+- req->file = NULL;
+- req->flags &= ~REQ_F_INFLIGHT;
+- }
+-}
+-
+ static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
+ {
+ struct file *file = fget(fd);
+@@ -7414,7 +7443,7 @@ static struct file *io_file_get_normal(s
+
+ /* we don't allow fixed io_uring files */
+ if (file && file->f_op == &io_uring_fops)
+- req->flags |= REQ_F_INFLIGHT;
++ io_req_track_inflight(req);
+ return file;
+ }
+
+@@ -9211,6 +9240,7 @@ static __cold int io_uring_alloc_task_co
+ xa_init(&tctx->xa);
+ init_waitqueue_head(&tctx->wait);
+ atomic_set(&tctx->in_idle, 0);
++ atomic_set(&tctx->inflight_tracked, 0);
+ task->io_uring = tctx;
+ spin_lock_init(&tctx->task_lock);
+ INIT_WQ_LIST(&tctx->task_list);
+@@ -10402,7 +10432,7 @@ static __cold void io_uring_clean_tctx(s
+ static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
+ {
+ if (tracked)
+- return 0;
++ return atomic_read(&tctx->inflight_tracked);
+ return percpu_counter_sum(&tctx->inflight);
+ }
+