--- /dev/null
+From df730ec21f7ba395b1b22e7f93a3a85b1d1b7882 Mon Sep 17 00:00:00 2001
+From: Xinghui Li <korantli@tencent.com>
+Date: Wed, 2 Nov 2022 16:25:03 +0800
+Subject: io_uring: fix two assignments in if conditions
+
+From: Xinghui Li <korantli@tencent.com>
+
+commit df730ec21f7ba395b1b22e7f93a3a85b1d1b7882 upstream.
+
+Fixes two errors:
+
+"ERROR: do not use assignment in if condition
+130: FILE: io_uring/net.c:130:
++ if (!(issue_flags & IO_URING_F_UNLOCKED) &&
+
+ERROR: do not use assignment in if condition
+599: FILE: io_uring/poll.c:599:
++ } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&"
+reported by checkpatch.pl in net.c and poll.c .
+
+Signed-off-by: Xinghui Li <korantli@tencent.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Link: https://lore.kernel.org/r/20221102082503.32236-1-korantwork@gmail.com
+[axboe: style tweaks]
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/net.c | 16 +++++++++-------
+ io_uring/poll.c | 7 +++++--
+ 2 files changed, 14 insertions(+), 9 deletions(-)
+
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -126,13 +126,15 @@ static struct io_async_msghdr *io_msg_al
+ struct io_cache_entry *entry;
+ struct io_async_msghdr *hdr;
+
+- if (!(issue_flags & IO_URING_F_UNLOCKED) &&
+- (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
+- hdr = container_of(entry, struct io_async_msghdr, cache);
+- hdr->free_iov = NULL;
+- req->flags |= REQ_F_ASYNC_DATA;
+- req->async_data = hdr;
+- return hdr;
++ if (!(issue_flags & IO_URING_F_UNLOCKED)) {
++ entry = io_alloc_cache_get(&ctx->netmsg_cache);
++ if (entry) {
++ hdr = container_of(entry, struct io_async_msghdr, cache);
++ hdr->free_iov = NULL;
++ req->flags |= REQ_F_ASYNC_DATA;
++ req->async_data = hdr;
++ return hdr;
++ }
+ }
+
+ if (!io_alloc_async_data(req)) {
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -678,10 +678,13 @@ static struct async_poll *io_req_alloc_a
+ if (req->flags & REQ_F_POLLED) {
+ apoll = req->apoll;
+ kfree(apoll->double_poll);
+- } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
+- (entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) {
++ } else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
++ entry = io_alloc_cache_get(&ctx->apoll_cache);
++ if (entry == NULL)
++ goto alloc_apoll;
+ apoll = container_of(entry, struct async_poll, cache);
+ } else {
++alloc_apoll:
+ apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
+ if (unlikely(!apoll))
+ return NULL;
--- /dev/null
+From c16bda37594f83147b167d381d54c010024efecf Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Sat, 25 Feb 2023 12:53:53 -0700
+Subject: io_uring/poll: allow some retries for poll triggering spuriously
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit c16bda37594f83147b167d381d54c010024efecf upstream.
+
+If we get woken spuriously when polling and fail the operation with
+-EAGAIN again, then we generally only allow polling again if data
+had been transferred at some point. This is indicated with
+REQ_F_PARTIAL_IO. However, if the spurious poll triggers when the socket
+was originally empty, then we haven't transferred data yet and we will
+fail the poll re-arm. This either punts the socket to io-wq if it's
+blocking, or it fails the request with -EAGAIN if not. Neither condition
+is desirable, as the former will slow things down, while the latter
+will make the application confused.
+
+We want to ensure that a repeated poll trigger doesn't lead to infinite
+work making no progress, that's what the REQ_F_PARTIAL_IO check was
+for. But it doesn't protect against a loop post the first receive, and
+it's unnecessarily strict if we started out with an empty socket.
+
+Add a somewhat random retry count, just to put an upper limit on the
+potential number of retries that will be done. This should be high enough
+that we won't really hit it in practice, unless something needs to be
+aborted anyway.
+
+Cc: stable@vger.kernel.org # v5.10+
+Link: https://github.com/axboe/liburing/issues/364
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/poll.c | 14 ++++++++++++--
+ io_uring/poll.h | 1 +
+ 2 files changed, 13 insertions(+), 2 deletions(-)
+
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -668,6 +668,14 @@ static void io_async_queue_proc(struct f
+ __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
+ }
+
++/*
++ * We can't reliably detect loops in repeated poll triggers and issue
++ * subsequently failing. But rather than fail these immediately, allow a
++ * certain amount of retries before we give up. Given that this condition
++ * should _rarely_ trigger even once, we should be fine with a larger value.
++ */
++#define APOLL_MAX_RETRY 128
++
+ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
+ unsigned issue_flags)
+ {
+@@ -683,14 +691,18 @@ static struct async_poll *io_req_alloc_a
+ if (entry == NULL)
+ goto alloc_apoll;
+ apoll = container_of(entry, struct async_poll, cache);
++ apoll->poll.retries = APOLL_MAX_RETRY;
+ } else {
+ alloc_apoll:
+ apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
+ if (unlikely(!apoll))
+ return NULL;
++ apoll->poll.retries = APOLL_MAX_RETRY;
+ }
+ apoll->double_poll = NULL;
+ req->apoll = apoll;
++ if (unlikely(!--apoll->poll.retries))
++ return NULL;
+ return apoll;
+ }
+
+@@ -712,8 +724,6 @@ int io_arm_poll_handler(struct io_kiocb
+ return IO_APOLL_ABORTED;
+ if (!file_can_poll(req->file))
+ return IO_APOLL_ABORTED;
+- if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
+- return IO_APOLL_ABORTED;
+ if (!(req->flags & REQ_F_APOLL_MULTISHOT))
+ mask |= EPOLLONESHOT;
+
+--- a/io_uring/poll.h
++++ b/io_uring/poll.h
+@@ -12,6 +12,7 @@ struct io_poll {
+ struct file *file;
+ struct wait_queue_head *head;
+ __poll_t events;
++ int retries;
+ struct wait_queue_entry wait;
+ };
+