--- /dev/null
+From a2306a4da000941915063d9f5eb2fff33bafa1fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 23 Aug 2020 11:00:37 -0600
+Subject: io_uring: don't recurse on tsk->sighand->siglock with signalfd
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commit fd7d6de2241453fc7d042336d366a939a25bc5a9 ]
+
+If an application is doing reads on signalfd, and we arm the poll handler
+because there's no data available, then the wakeup can recurse on the
+tasks sighand->siglock as the signal delivery from task_work_add() will
+use TWA_SIGNAL and that attempts to lock it again.
+
+We can detect the signalfd case pretty easily by comparing the poll->head
+wait_queue_head_t with the target task signalfd wait queue. Just use
+normal task wakeup for this case.
+
+Cc: stable@vger.kernel.org # v5.7+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index b966e2b8a77da..c384caad64665 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4114,7 +4114,8 @@ struct io_poll_table {
+ int error;
+ };
+
+-static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
++static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb,
++ bool twa_signal_ok)
+ {
+ struct task_struct *tsk = req->task;
+ struct io_ring_ctx *ctx = req->ctx;
+@@ -4127,7 +4128,7 @@ static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
+ * will do the job.
+ */
+ notify = 0;
+- if (!(ctx->flags & IORING_SETUP_SQPOLL))
++ if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
+ notify = TWA_SIGNAL;
+
+ ret = task_work_add(tsk, cb, notify);
+@@ -4141,6 +4142,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
+ __poll_t mask, task_work_func_t func)
+ {
+ struct task_struct *tsk;
++ bool twa_signal_ok;
+ int ret;
+
+ /* for instances that support it check for an event match first: */
+@@ -4156,13 +4158,21 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
+ init_task_work(&req->task_work, func);
+ percpu_ref_get(&req->ctx->refs);
+
++ /*
++ * If we using the signalfd wait_queue_head for this wakeup, then
++ * it's not safe to use TWA_SIGNAL as we could be recursing on the
++ * tsk->sighand->siglock on doing the wakeup. Should not be needed
++ * either, as the normal wakeup will suffice.
++ */
++ twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
++
+ /*
+ * If this fails, then the task is exiting. When a task exits, the
+ * work gets canceled, so just cancel this request as well instead
+ * of executing it. We can't safely execute it anyway, as we may not
+ * have the needed state needed for it anyway.
+ */
+- ret = io_req_task_work_add(req, &req->task_work);
++ ret = io_req_task_work_add(req, &req->task_work, twa_signal_ok);
+ if (unlikely(ret)) {
+ WRITE_ONCE(poll->canceled, true);
+ tsk = io_wq_get_task(req->ctx->io_wq);
+--
+2.25.1
+
--- /dev/null
+From 2650072db7468068752d8060ac2f72b4d4832405 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Aug 2020 12:27:50 -0600
+Subject: io_uring: don't use poll handler if file can't be nonblocking
+ read/written
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commit 9dab14b81807a40dab8e464ec87043935c562c2c ]
+
+There's no point in using the poll handler if we can't do a nonblocking
+IO attempt of the operation, since we'll need to go async anyway. In
+fact this is actively harmful, as reading from eg pipes won't return 0
+to indicate EOF.
+
+Cc: stable@vger.kernel.org # v5.7+
+Reported-by: Benedikt Ames <wisp3rwind@posteo.eu>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index c384caad64665..2b7018456091c 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4503,12 +4503,20 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
+ struct async_poll *apoll;
+ struct io_poll_table ipt;
+ __poll_t mask, ret;
++ int rw;
+
+ if (!req->file || !file_can_poll(req->file))
+ return false;
+ if (req->flags & (REQ_F_MUST_PUNT | REQ_F_POLLED))
+ return false;
+- if (!def->pollin && !def->pollout)
++ if (def->pollin)
++ rw = READ;
++ else if (def->pollout)
++ rw = WRITE;
++ else
++ return false;
++ /* if we can't nonblock try, then no point in arming a poll handler */
++ if (!io_file_supports_async(req->file, rw))
+ return false;
+
+ apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
+--
+2.25.1
+
--- /dev/null
+From f2db9368ec9ac2eea03860ec812f2e7fa0fed265 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Aug 2020 10:36:20 -0600
+Subject: io_uring: make offset == -1 consistent with preadv2/pwritev2
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commit 0fef948363f62494d779cf9dc3c0a86ea1e5f7cd ]
+
+The man page for io_uring generally claims were consistent with what
+preadv2 and pwritev2 accept, but turns out there's a slight discrepancy
+in how offset == -1 is handled for pipes/streams. preadv doesn't allow
+it, but preadv2 does. This currently causes io_uring to return -EINVAL
+if that is attempted, but we should allow that as documented.
+
+This change makes us consistent with preadv2/pwritev2 for just passing
+in a NULL ppos for streams if the offset is -1.
+
+Cc: stable@vger.kernel.org # v5.7+
+Reported-by: Benedikt Ames <wisp3rwind@posteo.eu>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 2b7018456091c..4115bfedf15dc 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2518,6 +2518,11 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
+ return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
+ }
+
++static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
++{
++ return kiocb->ki_filp->f_mode & FMODE_STREAM ? NULL : &kiocb->ki_pos;
++}
++
+ /*
+ * For files that don't have ->read_iter() and ->write_iter(), handle them
+ * by looping over ->read() or ->write() manually.
+@@ -2553,10 +2558,10 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
+
+ if (rw == READ) {
+ nr = file->f_op->read(file, iovec.iov_base,
+- iovec.iov_len, &kiocb->ki_pos);
++ iovec.iov_len, io_kiocb_ppos(kiocb));
+ } else {
+ nr = file->f_op->write(file, iovec.iov_base,
+- iovec.iov_len, &kiocb->ki_pos);
++ iovec.iov_len, io_kiocb_ppos(kiocb));
+ }
+
+ if (iov_iter_is_bvec(iter))
+@@ -2681,7 +2686,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
+ goto copy_iov;
+
+ iov_count = iov_iter_count(&iter);
+- ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
++ ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
+ if (!ret) {
+ ssize_t ret2;
+
+@@ -2780,7 +2785,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
+ goto copy_iov;
+
+ iov_count = iov_iter_count(&iter);
+- ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
++ ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count);
+ if (!ret) {
+ ssize_t ret2;
+
+--
+2.25.1
+
usb-typec-ucsi-rework-ppm_lock-handling.patch
usb-typec-ucsi-hold-con-lock-for-the-entire-duration-of-ucsi_register_port.patch
usb-typec-tcpm-fix-fix-source-hard-reset-response-for-tda-2.3.1.1-and-tda-2.3.1.2-failures.patch
+io_uring-don-t-recurse-on-tsk-sighand-siglock-with-s.patch
+io_uring-don-t-use-poll-handler-if-file-can-t-be-non.patch
+io_uring-make-offset-1-consistent-with-preadv2-pwrit.patch