From: Greg Kroah-Hartman Date: Sun, 12 Dec 2021 13:55:00 +0000 (+0100) Subject: 4.14-stable patches X-Git-Tag: v4.4.295~22 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=4378feca62ff99d69a1a3de77f9174853ca35ea2;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: binder-use-wake_up_pollfree.patch signalfd-use-wake_up_pollfree.patch wait-add-wake_up_pollfree.patch --- diff --git a/queue-4.14/binder-use-wake_up_pollfree.patch b/queue-4.14/binder-use-wake_up_pollfree.patch new file mode 100644 index 00000000000..4cf69be9be8 --- /dev/null +++ b/queue-4.14/binder-use-wake_up_pollfree.patch @@ -0,0 +1,64 @@ +From foo@baz Sun Dec 12 02:47:18 PM CET 2021 +From: Eric Biggers +Date: Fri, 10 Dec 2021 16:19:25 -0800 +Subject: binder: use wake_up_pollfree() +To: stable@vger.kernel.org +Cc: linux-kernel@vger.kernel.org, Linus Torvalds +Message-ID: <20211211001926.100856-3-ebiggers@kernel.org> + +From: Eric Biggers + +commit a880b28a71e39013e357fd3adccd1d8a31bc69a8 upstream. + +wake_up_poll() uses nr_exclusive=1, so it's not guaranteed to wake up +all exclusive waiters. Yet, POLLFREE *must* wake up all waiters. epoll +and aio poll are fortunately not affected by this, but it's very +fragile. Thus, the new function wake_up_pollfree() has been introduced. + +Convert binder to use wake_up_pollfree(). + +Reported-by: Linus Torvalds +Fixes: f5cb779ba163 ("ANDROID: binder: remove waitqueue when thread exits.") +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20211209010455.42744-3-ebiggers@kernel.org +Signed-off-by: Eric Biggers +Signed-off-by: Greg Kroah-Hartman +--- + drivers/android/binder.c | 21 +++++++++------------ + 1 file changed, 9 insertions(+), 12 deletions(-) + +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -4336,23 +4336,20 @@ static int binder_thread_release(struct + } + + /* +- * If this thread used poll, make sure we remove the waitqueue +- * from any epoll data structures holding it with POLLFREE. +- * waitqueue_active() is safe to use here because we're holding +- * the inner lock. ++ * If this thread used poll, make sure we remove the waitqueue from any ++ * poll data structures holding it. + */ +- if ((thread->looper & BINDER_LOOPER_STATE_POLL) && +- waitqueue_active(&thread->wait)) { +- wake_up_poll(&thread->wait, POLLHUP | POLLFREE); +- } ++ if (thread->looper & BINDER_LOOPER_STATE_POLL) ++ wake_up_pollfree(&thread->wait); + + binder_inner_proc_unlock(thread->proc); + + /* +- * This is needed to avoid races between wake_up_poll() above and +- * and ep_remove_waitqueue() called for other reasons (eg the epoll file +- * descriptor being closed); ep_remove_waitqueue() holds an RCU read +- * lock, so we can be sure it's done after calling synchronize_rcu(). ++ * This is needed to avoid races between wake_up_pollfree() above and ++ * someone else removing the last entry from the queue for other reasons ++ * (e.g. ep_remove_wait_queue() being called due to an epoll file ++ * descriptor being closed). Such other users hold an RCU read lock, so ++ * we can be sure they're done after we call synchronize_rcu(). + */ + if (thread->looper & BINDER_LOOPER_STATE_POLL) + synchronize_rcu(); diff --git a/queue-4.14/series b/queue-4.14/series index 7767c6cd6a6..2068188c4c2 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -20,3 +20,6 @@ can-pch_can-pch_can_rx_normal-fix-use-after-free.patch can-m_can-disable-and-ignore-elo-interrupt.patch x86-sme-explicitly-map-new-efi-memmap-table-as-encrypted.patch libata-add-horkage-for-asmedia-1092.patch +wait-add-wake_up_pollfree.patch +binder-use-wake_up_pollfree.patch +signalfd-use-wake_up_pollfree.patch diff --git a/queue-4.14/signalfd-use-wake_up_pollfree.patch b/queue-4.14/signalfd-use-wake_up_pollfree.patch new file mode 100644 index 00000000000..fa31d4694ae --- /dev/null +++ b/queue-4.14/signalfd-use-wake_up_pollfree.patch @@ -0,0 +1,50 @@ +From foo@baz Sun Dec 12 02:47:18 PM CET 2021 +From: Eric Biggers +Date: Fri, 10 Dec 2021 16:19:26 -0800 +Subject: signalfd: use wake_up_pollfree() +To: stable@vger.kernel.org +Cc: linux-kernel@vger.kernel.org, Linus Torvalds +Message-ID: <20211211001926.100856-4-ebiggers@kernel.org> + +From: Eric Biggers + +commit 9537bae0da1f8d1e2361ab6d0479e8af7824e160 upstream. + +wake_up_poll() uses nr_exclusive=1, so it's not guaranteed to wake up +all exclusive waiters. Yet, POLLFREE *must* wake up all waiters. epoll +and aio poll are fortunately not affected by this, but it's very +fragile. Thus, the new function wake_up_pollfree() has been introduced. + +Convert signalfd to use wake_up_pollfree(). + +Reported-by: Linus Torvalds +Fixes: d80e731ecab4 ("epoll: introduce POLLFREE to flush ->signalfd_wqh before kfree()") +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20211209010455.42744-4-ebiggers@kernel.org +Signed-off-by: Eric Biggers +Signed-off-by: Greg Kroah-Hartman +--- + fs/signalfd.c | 12 +----------- + 1 file changed, 1 insertion(+), 11 deletions(-) + +--- a/fs/signalfd.c ++++ b/fs/signalfd.c +@@ -35,17 +35,7 @@ + + void signalfd_cleanup(struct sighand_struct *sighand) + { +- wait_queue_head_t *wqh = &sighand->signalfd_wqh; +- /* +- * The lockless check can race with remove_wait_queue() in progress, +- * but in this case its caller should run under rcu_read_lock() and +- * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return. +- */ +- if (likely(!waitqueue_active(wqh))) +- return; +- +- /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */ +- wake_up_poll(wqh, POLLHUP | POLLFREE); ++ wake_up_pollfree(&sighand->signalfd_wqh); + } + + struct signalfd_ctx { diff --git a/queue-4.14/wait-add-wake_up_pollfree.patch b/queue-4.14/wait-add-wake_up_pollfree.patch new file mode 100644 index 00000000000..a0710e45d72 --- /dev/null +++ b/queue-4.14/wait-add-wake_up_pollfree.patch @@ -0,0 +1,116 @@ +From foo@baz Sun Dec 12 02:47:18 PM CET 2021 +From: Eric Biggers +Date: Fri, 10 Dec 2021 16:19:24 -0800 +Subject: wait: add wake_up_pollfree() +To: stable@vger.kernel.org +Cc: linux-kernel@vger.kernel.org, Linus Torvalds +Message-ID: <20211211001926.100856-2-ebiggers@kernel.org> + +From: Eric Biggers + +commit 42288cb44c4b5fff7653bc392b583a2b8bd6a8c0 upstream. + +Several ->poll() implementations are special in that they use a +waitqueue whose lifetime is the current task, rather than the struct +file as is normally the case. This is okay for blocking polls, since a +blocking poll occurs within one task; however, non-blocking polls +require another solution. This solution is for the queue to be cleared +before it is freed, using 'wake_up_poll(wq, EPOLLHUP | POLLFREE);'. + +However, that has a bug: wake_up_poll() calls __wake_up() with +nr_exclusive=1. Therefore, if there are multiple "exclusive" waiters, +and the wakeup function for the first one returns a positive value, only +that one will be called. That's *not* what's needed for POLLFREE; +POLLFREE is special in that it really needs to wake up everyone. + +Considering the three non-blocking poll systems: + +- io_uring poll doesn't handle POLLFREE at all, so it is broken anyway. + +- aio poll is unaffected, since it doesn't support exclusive waits. + However, that's fragile, as someone could add this feature later. + +- epoll doesn't appear to be broken by this, since its wakeup function + returns 0 when it sees POLLFREE. But this is fragile. + +Although there is a workaround (see epoll), it's better to define a +function which always sends POLLFREE to all waiters. Add such a +function. Also make it verify that the queue really becomes empty after +all waiters have been woken up. + +Reported-by: Linus Torvalds +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20211209010455.42744-2-ebiggers@kernel.org +Signed-off-by: Eric Biggers +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/wait.h | 26 ++++++++++++++++++++++++++ + kernel/sched/wait.c | 8 ++++++++ + 2 files changed, 34 insertions(+) + +--- a/include/linux/wait.h ++++ b/include/linux/wait.h +@@ -191,6 +191,7 @@ void __wake_up_locked_key_bookmark(struc + void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); + void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); + void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); ++void __wake_up_pollfree(struct wait_queue_head *wq_head); + + #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) + #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) +@@ -215,6 +216,31 @@ void __wake_up_sync(struct wait_queue_he + #define wake_up_interruptible_sync_poll(x, m) \ + __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) + ++/** ++ * wake_up_pollfree - signal that a polled waitqueue is going away ++ * @wq_head: the wait queue head ++ * ++ * In the very rare cases where a ->poll() implementation uses a waitqueue whose ++ * lifetime is tied to a task rather than to the 'struct file' being polled, ++ * this function must be called before the waitqueue is freed so that ++ * non-blocking polls (e.g. epoll) are notified that the queue is going away. ++ * ++ * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via ++ * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. ++ */ ++static inline void wake_up_pollfree(struct wait_queue_head *wq_head) ++{ ++ /* ++ * For performance reasons, we don't always take the queue lock here. ++ * Therefore, we might race with someone removing the last entry from ++ * the queue, and proceed while they still hold the queue lock. ++ * However, rcu_read_lock() is required to be held in such cases, so we ++ * can safely proceed with an RCU-delayed free. ++ */ ++ if (waitqueue_active(wq_head)) ++ __wake_up_pollfree(wq_head); ++} ++ + #define ___wait_cond_timeout(condition) \ + ({ \ + bool __cond = (condition); \ +--- a/kernel/sched/wait.c ++++ b/kernel/sched/wait.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + + void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key) + { +@@ -213,6 +214,13 @@ void __wake_up_sync(struct wait_queue_he + } + EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ + ++void __wake_up_pollfree(struct wait_queue_head *wq_head) ++{ ++ __wake_up(wq_head, TASK_NORMAL, 0, (void *)(POLLHUP | POLLFREE)); ++ /* POLLFREE must have cleared the queue. */ ++ WARN_ON_ONCE(waitqueue_active(wq_head)); ++} ++ + /* + * Note: we use "set_current_state()" _after_ the wait-queue add, + * because we need a memory barrier there on SMP, so that any