--- /dev/null
+From axboe@kernel.dk Mon Jun 27 02:21:06 2022
+From: Jens Axboe <axboe@kernel.dk>
+Date: Sun, 26 Jun 2022 18:21:03 -0600
+Subject: io_uring: use separate list entry for iopoll requests
+To: Greg Thelen <gthelen@google.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, linux-kernel@vger.kernel.org, akpm@linux-foundation.org, torvalds@linux-foundation.org, stable@vger.kernel.org
+Cc: lwn@lwn.net, jslaby@suse.cz
+Message-ID: <6bc6ae48-b569-2002-118a-d3468b0278cd@kernel.dk>
+
+From: Jens Axboe <axboe@kernel.dk>
+
+A previous commit ended up enabling file tracking for iopoll requests,
+which conflicts with both of them using the same list entry for tracking.
+Add a separate list entry just for iopoll requests, avoid this issue.
+
+No upstream commit exists for this issue.
+
+Reported-by: Greg Thelen <gthelen@google.com>
+Fixes: df3f3bb5059d ("io_uring: add missing item types for various requests")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -696,6 +696,8 @@ struct io_kiocb {
+ */
+ struct list_head inflight_entry;
+
++ struct list_head iopoll_entry;
++
+ struct percpu_ref *fixed_file_refs;
+ struct callback_head task_work;
+ /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
+@@ -2350,8 +2352,8 @@ static void io_iopoll_queue(struct list_
+ struct io_kiocb *req;
+
+ do {
+- req = list_first_entry(again, struct io_kiocb, inflight_entry);
+- list_del(&req->inflight_entry);
++ req = list_first_entry(again, struct io_kiocb, iopoll_entry);
++ list_del(&req->iopoll_entry);
+ __io_complete_rw(req, -EAGAIN, 0, NULL);
+ } while (!list_empty(again));
+ }
+@@ -2373,14 +2375,14 @@ static void io_iopoll_complete(struct io
+ while (!list_empty(done)) {
+ int cflags = 0;
+
+- req = list_first_entry(done, struct io_kiocb, inflight_entry);
++ req = list_first_entry(done, struct io_kiocb, iopoll_entry);
+ if (READ_ONCE(req->result) == -EAGAIN) {
+ req->result = 0;
+ req->iopoll_completed = 0;
+- list_move_tail(&req->inflight_entry, &again);
++ list_move_tail(&req->iopoll_entry, &again);
+ continue;
+ }
+- list_del(&req->inflight_entry);
++ list_del(&req->iopoll_entry);
+
+ if (req->flags & REQ_F_BUFFER_SELECTED)
+ cflags = io_put_rw_kbuf(req);
+@@ -2416,7 +2418,7 @@ static int io_do_iopoll(struct io_ring_c
+ spin = !ctx->poll_multi_file && *nr_events < min;
+
+ ret = 0;
+- list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
++ list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, iopoll_entry) {
+ struct kiocb *kiocb = &req->rw.kiocb;
+
+ /*
+@@ -2425,7 +2427,7 @@ static int io_do_iopoll(struct io_ring_c
+ * and complete those lists first, if we have entries there.
+ */
+ if (READ_ONCE(req->iopoll_completed)) {
+- list_move_tail(&req->inflight_entry, &done);
++ list_move_tail(&req->iopoll_entry, &done);
+ continue;
+ }
+ if (!list_empty(&done))
+@@ -2437,7 +2439,7 @@ static int io_do_iopoll(struct io_ring_c
+
+ /* iopoll may have completed current req */
+ if (READ_ONCE(req->iopoll_completed))
+- list_move_tail(&req->inflight_entry, &done);
++ list_move_tail(&req->iopoll_entry, &done);
+
+ if (ret && spin)
+ spin = false;
+@@ -2670,7 +2672,7 @@ static void io_iopoll_req_issued(struct
+ struct io_kiocb *list_req;
+
+ list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
+- inflight_entry);
++ iopoll_entry);
+ if (list_req->file != req->file)
+ ctx->poll_multi_file = true;
+ }
+@@ -2680,9 +2682,9 @@ static void io_iopoll_req_issued(struct
+ * it to the front so we find it first.
+ */
+ if (READ_ONCE(req->iopoll_completed))
+- list_add(&req->inflight_entry, &ctx->iopoll_list);
++ list_add(&req->iopoll_entry, &ctx->iopoll_list);
+ else
+- list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
++ list_add_tail(&req->iopoll_entry, &ctx->iopoll_list);
+
+ if ((ctx->flags & IORING_SETUP_SQPOLL) &&
+ wq_has_sleeper(&ctx->sq_data->wait))