--- /dev/null
+Subject: futex: Ensure the correct return value from futex_lock_pi()
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed Jan 20 16:00:24 2021 +0100
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 12bb3f7f1b03d5913b3f9d4236a488aa7774dfe9 upstream
+
+In case that futex_lock_pi() was aborted by a signal or a timeout and the
+task returned without acquiring the rtmutex, but is the designated owner of
+the futex due to a concurrent futex_unlock_pi() fixup_owner() is invoked to
+establish consistent state. In that case it invokes fixup_pi_state_owner()
+which in turn tries to acquire the rtmutex again. If that succeeds then it
+does not propagate this success to fixup_owner() and futex_lock_pi()
+returns -EINTR or -ETIMEOUT despite having the futex locked.
+
+Return success from fixup_pi_state_owner() in all cases where the current
+task owns the rtmutex and therefore the futex and propagate it correctly
+through fixup_owner(). Fixup the other callsite which does not expect a
+positive return value.
+
+Fixes: c1e2f0eaf015 ("futex: Avoid violating the 10th rule of futex")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/futex.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2375,8 +2375,8 @@ retry:
+ }
+
+ if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
+- /* We got the lock after all, nothing to fix. */
+- ret = 0;
++ /* We got the lock. pi_state is correct. Tell caller. */
++ ret = 1;
+ goto out_unlock;
+ }
+
+@@ -2404,7 +2404,7 @@ retry:
+ * We raced against a concurrent self; things are
+ * already fixed up. Nothing to do.
+ */
+- ret = 0;
++ ret = 1;
+ goto out_unlock;
+ }
+ newowner = argowner;
+@@ -2450,7 +2450,7 @@ retry:
+ raw_spin_unlock(&newowner->pi_lock);
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+
+- return 0;
++ return argowner == current;
+
+ /*
+ * In order to reschedule or handle a page fault, we need to drop the
+@@ -2492,7 +2492,7 @@ handle_err:
+ * Check if someone else fixed it for us:
+ */
+ if (pi_state->owner != oldowner) {
+- ret = 0;
++ ret = argowner == current;
+ goto out_unlock;
+ }
+
+@@ -2525,8 +2525,6 @@ static long futex_wait_restart(struct re
+ */
+ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
+ {
+- int ret = 0;
+-
+ if (locked) {
+ /*
+ * Got the lock. We might not be the anticipated owner if we
+@@ -2537,8 +2535,8 @@ static int fixup_owner(u32 __user *uaddr
+ * stable state, anything else needs more attention.
+ */
+ if (q->pi_state->owner != current)
+- ret = fixup_pi_state_owner(uaddr, q, current);
+- return ret ? ret : locked;
++ return fixup_pi_state_owner(uaddr, q, current);
++ return 1;
+ }
+
+ /*
+@@ -2549,10 +2547,8 @@ static int fixup_owner(u32 __user *uaddr
+ * Another speculative read; pi_state->owner == current is unstable
+ * but needs our attention.
+ */
+- if (q->pi_state->owner == current) {
+- ret = fixup_pi_state_owner(uaddr, q, NULL);
+- return ret;
+- }
++ if (q->pi_state->owner == current)
++ return fixup_pi_state_owner(uaddr, q, NULL);
+
+ /*
+ * Paranoia check. If we did not take the lock, then we should not be
+@@ -2565,7 +2561,7 @@ static int fixup_owner(u32 __user *uaddr
+ q->pi_state->owner);
+ }
+
+- return ret;
++ return 0;
+ }
+
+ /**
+@@ -3263,7 +3259,7 @@ static int futex_wait_requeue_pi(u32 __u
+ if (q.pi_state && (q.pi_state->owner != current)) {
+ spin_lock(q.lock_ptr);
+ ret = fixup_pi_state_owner(uaddr2, &q, current);
+- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
++ if (ret < 0 && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
+ pi_state = q.pi_state;
+ get_pi_state(pi_state);
+ }
+@@ -3273,6 +3269,11 @@ static int futex_wait_requeue_pi(u32 __u
+ */
+ put_pi_state(q.pi_state);
+ spin_unlock(q.lock_ptr);
++ /*
++ * Adjust the return value. It's either -EFAULT or
++ * success (1) but the caller expects 0 for success.
++ */
++ ret = ret < 0 ? ret : 0;
+ }
+ } else {
+ struct rt_mutex *pi_mutex;
--- /dev/null
+Subject: futex: Handle faults correctly for PI futexes
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon Jan 18 19:01:21 2021 +0100
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 34b1a1ce1458f50ef27c54e28eb9b1947012907a upstream
+
+fixup_pi_state_owner() tries to ensure that the state of the rtmutex,
+pi_state and the user space value related to the PI futex are consistent
+before returning to user space. In case that the user space value update
+faults and the fault cannot be resolved by faulting the page in via
+fault_in_user_writeable() the function returns with -EFAULT and leaves
+the rtmutex and pi_state owner state inconsistent.
+
+A subsequent futex_unlock_pi() operates on the inconsistent pi_state and
+releases the rtmutex despite not owning it which can corrupt the RB tree of
+the rtmutex and cause a subsequent kernel stack use after free.
+
+It was suggested to loop forever in fixup_pi_state_owner() if the fault
+cannot be resolved, but that results in runaway tasks which is especially
+undesired when the problem happens due to a programming error and not due
+to malice.
+
+As the user space value cannot be fixed up, the proper solution is to make
+the rtmutex and the pi_state consistent so both have the same owner. This
+leaves the user space value out of sync. Any subsequent operation on the
+futex will fail because the 10th rule of PI futexes (pi_state owner and
+user space value are consistent) has been violated.
+
+As a consequence this removes the inept attempts of 'fixing' the situation
+in case that the current task owns the rtmutex when returning with an
+unresolvable fault by unlocking the rtmutex which left pi_state::owner and
+rtmutex::owner out of sync in a different and only slightly less dangerous
+way.
+
+Fixes: 1b7558e457ed ("futexes: fix fault handling in futex_lock_pi")
+Reported-by: gzobqq@gmail.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/futex.c | 57 ++++++++++++++++++++-------------------------------------
+ 1 file changed, 20 insertions(+), 37 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -960,7 +960,8 @@ static inline void exit_pi_state_list(st
+ * FUTEX_OWNER_DIED bit. See [4]
+ *
+ * [10] There is no transient state which leaves owner and user space
+- * TID out of sync.
++ * TID out of sync. Except one error case where the kernel is denied
++ * write access to the user address, see fixup_pi_state_owner().
+ *
+ *
+ * Serialization and lifetime rules:
+@@ -2482,6 +2483,24 @@ handle_err:
+ if (!err)
+ goto retry;
+
++ /*
++ * fault_in_user_writeable() failed so user state is immutable. At
++ * best we can make the kernel state consistent but user state will
++ * be most likely hosed and any subsequent unlock operation will be
++ * rejected due to PI futex rule [10].
++ *
++ * Ensure that the rtmutex owner is also the pi_state owner despite
++ * the user space value claiming something different. There is no
++ * point in unlocking the rtmutex if current is the owner as it
++ * would need to wait until the next waiter has taken the rtmutex
++ * to guarantee consistent state. Keep it simple. Userspace asked
++ * for this wreckaged state.
++ *
++ * The rtmutex has an owner - either current or some other
++ * task. See the EAGAIN loop above.
++ */
++ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
++
+ return err;
+ }
+
+@@ -2758,7 +2777,6 @@ static int futex_lock_pi(u32 __user *uad
+ ktime_t *time, int trylock)
+ {
+ struct hrtimer_sleeper timeout, *to;
+- struct futex_pi_state *pi_state = NULL;
+ struct task_struct *exiting = NULL;
+ struct rt_mutex_waiter rt_waiter;
+ struct futex_hash_bucket *hb;
+@@ -2894,23 +2912,8 @@ no_block:
+ if (res)
+ ret = (res < 0) ? res : 0;
+
+- /*
+- * If fixup_owner() faulted and was unable to handle the fault, unlock
+- * it and return the fault to userspace.
+- */
+- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
+- pi_state = q.pi_state;
+- get_pi_state(pi_state);
+- }
+-
+ /* Unqueue and drop the lock */
+ unqueue_me_pi(&q);
+-
+- if (pi_state) {
+- rt_mutex_futex_unlock(&pi_state->pi_mutex);
+- put_pi_state(pi_state);
+- }
+-
+ goto out;
+
+ out_unlock_put_key:
+@@ -3170,7 +3173,6 @@ static int futex_wait_requeue_pi(u32 __u
+ u32 __user *uaddr2)
+ {
+ struct hrtimer_sleeper timeout, *to;
+- struct futex_pi_state *pi_state = NULL;
+ struct rt_mutex_waiter rt_waiter;
+ struct futex_hash_bucket *hb;
+ union futex_key key2 = FUTEX_KEY_INIT;
+@@ -3248,10 +3250,6 @@ static int futex_wait_requeue_pi(u32 __u
+ if (q.pi_state && (q.pi_state->owner != current)) {
+ spin_lock(q.lock_ptr);
+ ret = fixup_pi_state_owner(uaddr2, &q, current);
+- if (ret < 0 && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
+- pi_state = q.pi_state;
+- get_pi_state(pi_state);
+- }
+ /*
+ * Drop the reference to the pi state which
+ * the requeue_pi() code acquired for us.
+@@ -3293,25 +3291,10 @@ static int futex_wait_requeue_pi(u32 __u
+ if (res)
+ ret = (res < 0) ? res : 0;
+
+- /*
+- * If fixup_pi_state_owner() faulted and was unable to handle
+- * the fault, unlock the rt_mutex and return the fault to
+- * userspace.
+- */
+- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
+- pi_state = q.pi_state;
+- get_pi_state(pi_state);
+- }
+-
+ /* Unqueue and drop the lock. */
+ unqueue_me_pi(&q);
+ }
+
+- if (pi_state) {
+- rt_mutex_futex_unlock(&pi_state->pi_mutex);
+- put_pi_state(pi_state);
+- }
+-
+ if (ret == -EINTR) {
+ /*
+ * We've already been requeued, but cannot restart by calling
--- /dev/null
+Subject: futex: Provide and use pi_state_update_owner()
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue Jan 19 15:21:35 2021 +0100
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit c5cade200ab9a2a3be9e7f32a752c8d86b502ec7 upstream
+
+Updating pi_state::owner is done at several places with the same
+code. Provide a function for it and use that at the obvious places.
+
+This is also a preparation for a bug fix to avoid yet another copy of the
+same code or alternatively introducing a completely unpenetratable mess of
+gotos.
+
+Originally-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/futex.c | 66 ++++++++++++++++++++++++++++-----------------------------
+ 1 file changed, 33 insertions(+), 33 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -765,6 +765,29 @@ static struct futex_pi_state *alloc_pi_s
+ return pi_state;
+ }
+
++static void pi_state_update_owner(struct futex_pi_state *pi_state,
++ struct task_struct *new_owner)
++{
++ struct task_struct *old_owner = pi_state->owner;
++
++ lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
++
++ if (old_owner) {
++ raw_spin_lock(&old_owner->pi_lock);
++ WARN_ON(list_empty(&pi_state->list));
++ list_del_init(&pi_state->list);
++ raw_spin_unlock(&old_owner->pi_lock);
++ }
++
++ if (new_owner) {
++ raw_spin_lock(&new_owner->pi_lock);
++ WARN_ON(!list_empty(&pi_state->list));
++ list_add(&pi_state->list, &new_owner->pi_state_list);
++ pi_state->owner = new_owner;
++ raw_spin_unlock(&new_owner->pi_lock);
++ }
++}
++
+ static void get_pi_state(struct futex_pi_state *pi_state)
+ {
+ WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
+@@ -1523,26 +1546,15 @@ static int wake_futex_pi(u32 __user *uad
+ ret = -EINVAL;
+ }
+
+- if (ret)
+- goto out_unlock;
+-
+- /*
+- * This is a point of no return; once we modify the uval there is no
+- * going back and subsequent operations must not fail.
+- */
+-
+- raw_spin_lock(&pi_state->owner->pi_lock);
+- WARN_ON(list_empty(&pi_state->list));
+- list_del_init(&pi_state->list);
+- raw_spin_unlock(&pi_state->owner->pi_lock);
+-
+- raw_spin_lock(&new_owner->pi_lock);
+- WARN_ON(!list_empty(&pi_state->list));
+- list_add(&pi_state->list, &new_owner->pi_state_list);
+- pi_state->owner = new_owner;
+- raw_spin_unlock(&new_owner->pi_lock);
+-
+- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
++ if (!ret) {
++ /*
++ * This is a point of no return; once we modified the uval
++ * there is no going back and subsequent operations must
++ * not fail.
++ */
++ pi_state_update_owner(pi_state, new_owner);
++ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
++ }
+
+ out_unlock:
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+@@ -2435,19 +2447,7 @@ retry:
+ * We fixed up user space. Now we need to fix the pi_state
+ * itself.
+ */
+- if (pi_state->owner != NULL) {
+- raw_spin_lock(&pi_state->owner->pi_lock);
+- WARN_ON(list_empty(&pi_state->list));
+- list_del_init(&pi_state->list);
+- raw_spin_unlock(&pi_state->owner->pi_lock);
+- }
+-
+- pi_state->owner = newowner;
+-
+- raw_spin_lock(&newowner->pi_lock);
+- WARN_ON(!list_empty(&pi_state->list));
+- list_add(&pi_state->list, &newowner->pi_state_list);
+- raw_spin_unlock(&newowner->pi_lock);
++ pi_state_update_owner(pi_state, newowner);
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+
+ return argowner == current;
--- /dev/null
+Subject: futex: Replace pointless printk in fixup_owner()
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue Jan 19 16:06:10 2021 +0100
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 04b79c55201f02ffd675e1231d731365e335c307 upstream
+
+If that unexpected case of inconsistent arguments ever happens then the
+futex state is left completely inconsistent and the printk is not really
+helpful. Replace it with a warning and make the state consistent.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/futex.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2552,14 +2552,10 @@ static int fixup_owner(u32 __user *uaddr
+
+ /*
+ * Paranoia check. If we did not take the lock, then we should not be
+- * the owner of the rt_mutex.
++ * the owner of the rt_mutex. Warn and establish consistent state.
+ */
+- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
+- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
+- "pi-state %p\n", ret,
+- q->pi_state->pi_mutex.owner,
+- q->pi_state->owner);
+- }
++ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
++ return fixup_pi_state_owner(uaddr, q, current);
+
+ return 0;
+ }
--- /dev/null
+Subject: futex: Simplify fixup_pi_state_owner()
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue Jan 19 16:26:38 2021 +0100
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit f2dac39d93987f7de1e20b3988c8685523247ae2 upstream
+
+Too many gotos already and an upcoming fix would make it even more
+unreadable.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/futex.c | 53 ++++++++++++++++++++++++++---------------------------
+ 1 file changed, 26 insertions(+), 27 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2331,18 +2331,13 @@ static void unqueue_me_pi(struct futex_q
+ spin_unlock(q->lock_ptr);
+ }
+
+-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+- struct task_struct *argowner)
++static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
++ struct task_struct *argowner)
+ {
+ struct futex_pi_state *pi_state = q->pi_state;
+- u32 uval, curval, newval;
+ struct task_struct *oldowner, *newowner;
+- u32 newtid;
+- int ret, err = 0;
+-
+- lockdep_assert_held(q->lock_ptr);
+-
+- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
++ u32 uval, curval, newval, newtid;
++ int err = 0;
+
+ oldowner = pi_state->owner;
+
+@@ -2376,14 +2371,12 @@ retry:
+ * We raced against a concurrent self; things are
+ * already fixed up. Nothing to do.
+ */
+- ret = 0;
+- goto out_unlock;
++ return 0;
+ }
+
+ if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
+ /* We got the lock. pi_state is correct. Tell caller. */
+- ret = 1;
+- goto out_unlock;
++ return 1;
+ }
+
+ /*
+@@ -2410,8 +2403,7 @@ retry:
+ * We raced against a concurrent self; things are
+ * already fixed up. Nothing to do.
+ */
+- ret = 1;
+- goto out_unlock;
++ return 1;
+ }
+ newowner = argowner;
+ }
+@@ -2442,7 +2434,6 @@ retry:
+ * itself.
+ */
+ pi_state_update_owner(pi_state, newowner);
+- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+
+ return argowner == current;
+
+@@ -2465,17 +2456,16 @@ handle_err:
+
+ switch (err) {
+ case -EFAULT:
+- ret = fault_in_user_writeable(uaddr);
++ err = fault_in_user_writeable(uaddr);
+ break;
+
+ case -EAGAIN:
+ cond_resched();
+- ret = 0;
++ err = 0;
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+- ret = err;
+ break;
+ }
+
+@@ -2485,17 +2475,26 @@ handle_err:
+ /*
+ * Check if someone else fixed it for us:
+ */
+- if (pi_state->owner != oldowner) {
+- ret = argowner == current;
+- goto out_unlock;
+- }
++ if (pi_state->owner != oldowner)
++ return argowner == current;
+
+- if (ret)
+- goto out_unlock;
++ /* Retry if err was -EAGAIN or the fault in succeeded */
++ if (!err)
++ goto retry;
+
+- goto retry;
++ return err;
++}
+
+-out_unlock:
++static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
++ struct task_struct *argowner)
++{
++ struct futex_pi_state *pi_state = q->pi_state;
++ int ret;
++
++ lockdep_assert_held(q->lock_ptr);
++
++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
++ ret = __fixup_pi_state_owner(uaddr, q, argowner);
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ return ret;
+ }
--- /dev/null
+Subject: futex: Use pi_state_update_owner() in put_pi_state()
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed Jan 20 11:35:19 2021 +0100
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 6ccc84f917d33312eb2846bd7b567639f585ad6d upstream
+
+No point in open coding it. This way it gains the extra sanity checks.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/futex.c | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -810,16 +810,10 @@ static void put_pi_state(struct futex_pi
+ * and has cleaned up the pi_state already
+ */
+ if (pi_state->owner) {
+- struct task_struct *owner;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
+- owner = pi_state->owner;
+- if (owner) {
+- raw_spin_lock(&owner->pi_lock);
+- list_del_init(&pi_state->list);
+- raw_spin_unlock(&owner->pi_lock);
+- }
++ pi_state_update_owner(pi_state, NULL);
+ rt_mutex_proxy_unlock(&pi_state->pi_mutex);
+ raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
+ }
Reviewed-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Baruch Siach <baruch@tkos.co.il>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
---
drivers/gpio/gpio-mvebu.c | 25 ++++++++++---------------
1 file changed, 10 insertions(+), 15 deletions(-)
--- /dev/null
+From 794c613383433ffc4fceec8eaa081b9f1962e287 Mon Sep 17 00:00:00 2001
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Date: Mon, 18 Jan 2021 21:45:23 +0800
+Subject: HID: multitouch: Apply MT_QUIRK_CONFIDENCE quirk for multi-input devices
+
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+
+commit 794c613383433ffc4fceec8eaa081b9f1962e287 upstream.
+
+Palm ejection stops working on some Elan and Synaptics touchpad after
+commit 40d5bb87377a ("HID: multitouch: enable multi-input as a quirk for
+some devices").
+
+The commit changes the mt_class from MT_CLS_WIN_8 to
+MT_CLS_WIN_8_FORCE_MULTI_INPUT, so MT_QUIRK_CONFIDENCE isn't applied
+anymore.
+
+So also apply the quirk since MT_CLS_WIN_8_FORCE_MULTI_INPUT is
+essentially MT_CLS_WIN_8.
+
+Fixes: 40d5bb87377a ("HID: multitouch: enable multi-input as a quirk for some devices")
+Cc: stable@vger.kernel.org
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/hid-multitouch.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -758,7 +758,8 @@ static int mt_touch_input_mapping(struct
+ MT_STORE_FIELD(inrange_state);
+ return 1;
+ case HID_DG_CONFIDENCE:
+- if (cls->name == MT_CLS_WIN_8 &&
++ if ((cls->name == MT_CLS_WIN_8 ||
++ cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
+ (field->application == HID_DG_TOUCHPAD ||
+ field->application == HID_DG_TOUCHSCREEN))
+ app->quirks |= MT_QUIRK_CONFIDENCE;
--- /dev/null
+From 179e8e47c02a1950f1c556f2b854bdb2259078fb Mon Sep 17 00:00:00 2001
+From: Jason Gerecke <killertofu@gmail.com>
+Date: Thu, 21 Jan 2021 10:46:49 -0800
+Subject: HID: wacom: Correct NULL dereference on AES pen proximity
+
+From: Jason Gerecke <killertofu@gmail.com>
+
+commit 179e8e47c02a1950f1c556f2b854bdb2259078fb upstream.
+
+The recent commit to fix a memory leak introduced an inadvertant NULL
+pointer dereference. The `wacom_wac->pen_fifo` variable was never
+intialized, resuling in a crash whenever functions tried to use it.
+Since the FIFO is only used by AES pens (to buffer events from pen
+proximity until the hardware reports the pen serial number) this would
+have been easily overlooked without testing an AES device.
+
+This patch converts `wacom_wac->pen_fifo` over to a pointer (since the
+call to `devres_alloc` allocates memory for us) and ensures that we assign
+it to point to the allocated and initalized `pen_fifo` before the function
+returns.
+
+Link: https://github.com/linuxwacom/input-wacom/issues/230
+Fixes: 37309f47e2f5 ("HID: wacom: Fix memory leakage caused by kfifo_alloc")
+CC: stable@vger.kernel.org # v4.19+
+Signed-off-by: Jason Gerecke <jason.gerecke@wacom.com>
+Tested-by: Ping Cheng <ping.cheng@wacom.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/wacom_sys.c | 7 ++++---
+ drivers/hid/wacom_wac.h | 2 +-
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(
+ }
+
+ if (flush)
+- wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
++ wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo);
+ else if (insert)
+- wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
++ wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo,
+ raw_data, report_size);
+
+ return insert && !flush;
+@@ -1280,7 +1280,7 @@ static void wacom_devm_kfifo_release(str
+ static int wacom_devm_kfifo_alloc(struct wacom *wacom)
+ {
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+- struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
++ struct kfifo_rec_ptr_2 *pen_fifo;
+ int error;
+
+ pen_fifo = devres_alloc(wacom_devm_kfifo_release,
+@@ -1297,6 +1297,7 @@ static int wacom_devm_kfifo_alloc(struct
+ }
+
+ devres_add(&wacom->hdev->dev, pen_fifo);
++ wacom_wac->pen_fifo = pen_fifo;
+
+ return 0;
+ }
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -342,7 +342,7 @@ struct wacom_wac {
+ struct input_dev *pen_input;
+ struct input_dev *touch_input;
+ struct input_dev *pad_input;
+- struct kfifo_rec_ptr_2 pen_fifo;
++ struct kfifo_rec_ptr_2 *pen_fifo;
+ int pid;
+ int num_contacts_left;
+ u8 bt_features;
--- /dev/null
+From foo@baz Fri Jan 29 11:06:03 AM CET 2021
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 26 Jan 2021 11:17:02 +0000
+Subject: io_uring: add warn_once for io_uring_flush()
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>
+Message-ID: <1abdd0e576ae991c6ab04bebd20360ea2b3a175b.1611659564.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 6b5733eb638b7068ab7cb34e663b55a1d1892d85]
+
+files_cancel() should cancel all relevant requests and drop file notes,
+so we should never have file notes after that, including on-exit fput
+and flush. Add a WARN_ONCE to be sure.
+
+Cc: stable@vger.kernel.org # 5.5+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -8926,17 +8926,23 @@ void __io_uring_task_cancel(void)
+
+ static int io_uring_flush(struct file *file, void *data)
+ {
+- if (!current->io_uring)
++ struct io_uring_task *tctx = current->io_uring;
++
++ if (!tctx)
+ return 0;
+
++ /* we should have cancelled and erased it before PF_EXITING */
++ WARN_ON_ONCE((current->flags & PF_EXITING) &&
++ xa_load(&tctx->xa, (unsigned long)file));
++
+ /*
+ * fput() is pending, will be 2 if the only other ref is our potential
+ * task file note. If the task is exiting, drop regardless of count.
+ */
+- if (fatal_signal_pending(current) || (current->flags & PF_EXITING) ||
+- atomic_long_read(&file->f_count) == 2)
+- io_uring_del_task_file(file);
++ if (atomic_long_read(&file->f_count) != 2)
++ return 0;
+
++ io_uring_del_task_file(file);
+ return 0;
+ }
+
--- /dev/null
+From foo@baz Fri Jan 29 11:06:03 AM CET 2021
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 26 Jan 2021 11:17:05 +0000
+Subject: io_uring: do sqo disable on install_fd error
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>, syzbot+9c9c35374c0ecac06516@syzkaller.appspotmail.com
+Message-ID: <bdc76b5d2e1bd58d4deb9ad1011e86f5a2689dfb.1611659564.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 06585c497b55045ec21aa8128e340f6a6587351c ]
+
+WARNING: CPU: 0 PID: 8494 at fs/io_uring.c:8717
+ io_ring_ctx_wait_and_kill+0x4f2/0x600 fs/io_uring.c:8717
+Call Trace:
+ io_uring_release+0x3e/0x50 fs/io_uring.c:8759
+ __fput+0x283/0x920 fs/file_table.c:280
+ task_work_run+0xdd/0x190 kernel/task_work.c:140
+ tracehook_notify_resume include/linux/tracehook.h:189 [inline]
+ exit_to_user_mode_loop kernel/entry/common.c:174 [inline]
+ exit_to_user_mode_prepare+0x249/0x250 kernel/entry/common.c:201
+ __syscall_exit_to_user_mode_work kernel/entry/common.c:291 [inline]
+ syscall_exit_to_user_mode+0x19/0x50 kernel/entry/common.c:302
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+failed io_uring_install_fd() is a special case, we don't do
+io_ring_ctx_wait_and_kill() directly but defer it to fput, though still
+need to io_disable_sqo_submit() before.
+
+note: it doesn't fix any real problem, just a warning. That's because
+sqring won't be available to the userspace in this case and so SQPOLL
+won't submit anything.
+
+Cc: stable@vger.kernel.org # 5.5+
+Reported-by: syzbot+9c9c35374c0ecac06516@syzkaller.appspotmail.com
+Fixes: d9d05217cb69 ("io_uring: stop SQPOLL submit on creator's death")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -9532,6 +9532,7 @@ static int io_uring_create(unsigned entr
+ */
+ ret = io_uring_install_fd(ctx, file);
+ if (ret < 0) {
++ io_disable_sqo_submit(ctx);
+ /* fput will clean it up */
+ fput(file);
+ return ret;
--- /dev/null
+From foo@baz Fri Jan 29 11:06:03 AM CET 2021
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 26 Jan 2021 11:17:09 +0000
+Subject: io_uring: dont kill fasync under completion_lock
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>, syzbot+91ca3f25bd7f795f019c@syzkaller.appspotmail.com
+Message-ID: <41777a0d41ba61011c7ef96d44de36f37ec5e8ea.1611659564.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 4aa84f2ffa81f71e15e5cffc2cc6090dbee78f8e ]
+
+ CPU0 CPU1
+ ---- ----
+ lock(&new->fa_lock);
+ local_irq_disable();
+ lock(&ctx->completion_lock);
+ lock(&new->fa_lock);
+ <Interrupt>
+ lock(&ctx->completion_lock);
+
+ *** DEADLOCK ***
+
+Move kill_fasync() out of io_commit_cqring() to io_cqring_ev_posted(),
+so it doesn't hold completion_lock while doing it. That saves from the
+reported deadlock, and it's just nice to shorten the locking time and
+untangle nested locks (compl_lock -> wq_head::lock).
+
+Cc: stable@vger.kernel.org # 5.5+
+Reported-by: syzbot+91ca3f25bd7f795f019c@syzkaller.appspotmail.com
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1214,11 +1214,6 @@ static void __io_commit_cqring(struct io
+
+ /* order cqe stores with ring update */
+ smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
+-
+- if (wq_has_sleeper(&ctx->cq_wait)) {
+- wake_up_interruptible(&ctx->cq_wait);
+- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+- }
+ }
+
+ static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
+@@ -1604,6 +1599,10 @@ static inline bool io_should_trigger_evf
+
+ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+ {
++ if (wq_has_sleeper(&ctx->cq_wait)) {
++ wake_up_interruptible(&ctx->cq_wait);
++ kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
++ }
+ if (waitqueue_active(&ctx->wait))
+ wake_up(&ctx->wait);
+ if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
--- /dev/null
+From foo@baz Fri Jan 29 11:06:03 AM CET 2021
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 26 Jan 2021 11:17:06 +0000
+Subject: io_uring: fix false positive sqo warning on flush
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>, syzbot+2f5d1785dc624932da78@syzkaller.appspotmail.com
+Message-ID: <d880d405c12705056febe34cd7ab82dc1acb539b.1611659564.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 6b393a1ff1746a1c91bd95cbb2d79b104d8f15ac ]
+
+WARNING: CPU: 1 PID: 9094 at fs/io_uring.c:8884
+ io_disable_sqo_submit+0x106/0x130 fs/io_uring.c:8884
+Call Trace:
+ io_uring_flush+0x28b/0x3a0 fs/io_uring.c:9099
+ filp_close+0xb4/0x170 fs/open.c:1280
+ close_fd+0x5c/0x80 fs/file.c:626
+ __do_sys_close fs/open.c:1299 [inline]
+ __se_sys_close fs/open.c:1297 [inline]
+ __x64_sys_close+0x2f/0xa0 fs/open.c:1297
+ do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+io_uring's final close() may be triggered by any task not only the
+creator. It's well handled by io_uring_flush() including SQPOLL case,
+though a warning in io_disable_sqo_submit() will fallaciously fire by
+moving this warning out to the only call site that matters.
+
+Cc: stable@vger.kernel.org # 5.5+
+Reported-by: syzbot+2f5d1785dc624932da78@syzkaller.appspotmail.com
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -8750,8 +8750,6 @@ static bool __io_uring_cancel_task_reque
+
+ static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
+ {
+- WARN_ON_ONCE(ctx->sqo_task != current);
+-
+ mutex_lock(&ctx->uring_lock);
+ ctx->sqo_dead = 1;
+ mutex_unlock(&ctx->uring_lock);
+@@ -8773,6 +8771,7 @@ static void io_uring_cancel_task_request
+
+ if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
+ /* for SQPOLL only sqo_task has task notes */
++ WARN_ON_ONCE(ctx->sqo_task != current);
+ io_disable_sqo_submit(ctx);
+ task = ctx->sq_data->thread;
+ atomic_inc(&task->io_uring->in_idle);
--- /dev/null
+From foo@baz Fri Jan 29 11:06:03 AM CET 2021
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 26 Jan 2021 11:17:04 +0000
+Subject: io_uring: fix null-deref in io_disable_sqo_submit
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>, syzbot+ab412638aeb652ded540@syzkaller.appspotmail.com
+Message-ID: <cdec1207337932aaa7433a4abfd5f38fa4cb2de0.1611659564.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit b4411616c26f26c4017b8fa4d3538b1a02028733 ]
+
+general protection fault, probably for non-canonical address
+ 0xdffffc0000000022: 0000 [#1] KASAN: null-ptr-deref
+ in range [0x0000000000000110-0x0000000000000117]
+RIP: 0010:io_ring_set_wakeup_flag fs/io_uring.c:6929 [inline]
+RIP: 0010:io_disable_sqo_submit+0xdb/0x130 fs/io_uring.c:8891
+Call Trace:
+ io_uring_create fs/io_uring.c:9711 [inline]
+ io_uring_setup+0x12b1/0x38e0 fs/io_uring.c:9739
+ do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+io_disable_sqo_submit() might be called before user rings were
+allocated, don't do io_ring_set_wakeup_flag() in those cases.
+
+Cc: stable@vger.kernel.org # 5.5+
+Reported-by: syzbot+ab412638aeb652ded540@syzkaller.appspotmail.com
+Fixes: d9d05217cb69 ("io_uring: stop SQPOLL submit on creator's death")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -8757,7 +8757,8 @@ static void io_disable_sqo_submit(struct
+ mutex_unlock(&ctx->uring_lock);
+
+ /* make sure callers enter the ring to get error */
+- io_ring_set_wakeup_flag(ctx);
++ if (ctx->rings)
++ io_ring_set_wakeup_flag(ctx);
+ }
+
+ /*
--- /dev/null
+From foo@baz Fri Jan 29 11:06:03 AM CET 2021
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 26 Jan 2021 11:17:08 +0000
+Subject: io_uring: fix skipping disabling sqo on exec
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>
+Message-ID: <4bb1c422df133f0e883fefe221ffc866bfce7aa9.1611659564.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 0b5cd6c32b14413bf87e10ee62be3162588dcbe6 ]
+
+If there are no requests at the time __io_uring_task_cancel() is called,
+tctx_inflight() returns zero and and it terminates not getting a chance
+to go through __io_uring_files_cancel() and do
+io_disable_sqo_submit(). And we absolutely want them disabled by the
+time cancellation ends.
+
+Cc: stable@vger.kernel.org # 5.5+
+Reported-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Fixes: d9d05217cb69 ("io_uring: stop SQPOLL submit on creator's death")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -8917,6 +8917,10 @@ void __io_uring_task_cancel(void)
+ /* make sure overflow events are dropped */
+ atomic_inc(&tctx->in_idle);
+
++ /* trigger io_disable_sqo_submit() */
++ if (tctx->sqpoll)
++ __io_uring_files_cancel(NULL);
++
+ do {
+ /* read completions before cancelations */
+ inflight = tctx_inflight(tctx);
--- /dev/null
+From foo@baz Fri Jan 29 11:06:03 AM CET 2021
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 26 Jan 2021 11:17:10 +0000
+Subject: io_uring: fix sleeping under spin in __io_clean_op
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>, Abaci <abaci@linux.alibaba.com>, Joseph Qi <joseph.qi@linux.alibaba.com>, Xiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
+Message-ID: <61e93a6403ea6cc28764e7508cd877ca30345371.1611659564.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 9d5c8190683a462dbc787658467a0da17011ea5f ]
+
+[ 27.629441] BUG: sleeping function called from invalid context
+ at fs/file.c:402
+[ 27.631317] in_atomic(): 1, irqs_disabled(): 1, non_block: 0,
+ pid: 1012, name: io_wqe_worker-0
+[ 27.633220] 1 lock held by io_wqe_worker-0/1012:
+[ 27.634286] #0: ffff888105e26c98 (&ctx->completion_lock)
+ {....}-{2:2}, at: __io_req_complete.part.102+0x30/0x70
+[ 27.649249] Call Trace:
+[ 27.649874] dump_stack+0xac/0xe3
+[ 27.650666] ___might_sleep+0x284/0x2c0
+[ 27.651566] put_files_struct+0xb8/0x120
+[ 27.652481] __io_clean_op+0x10c/0x2a0
+[ 27.653362] __io_cqring_fill_event+0x2c1/0x350
+[ 27.654399] __io_req_complete.part.102+0x41/0x70
+[ 27.655464] io_openat2+0x151/0x300
+[ 27.656297] io_issue_sqe+0x6c/0x14e0
+[ 27.660991] io_wq_submit_work+0x7f/0x240
+[ 27.662890] io_worker_handle_work+0x501/0x8a0
+[ 27.664836] io_wqe_worker+0x158/0x520
+[ 27.667726] kthread+0x134/0x180
+[ 27.669641] ret_from_fork+0x1f/0x30
+
+Instead of cleaning files on overflow, return back overflow cancellation
+into io_uring_cancel_files(). Previously it was racy to clean
+REQ_F_OVERFLOW flag, but we got rid of it, and can do it through
+repetitive attempts targeting all matching requests.
+
+Cc: stable@vger.kernel.org # 5.9+
+Reported-by: Abaci <abaci@linux.alibaba.com>
+Reported-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Xiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -971,6 +971,7 @@ static ssize_t io_import_iovec(int rw, s
+ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
+ const struct iovec *fast_iov,
+ struct iov_iter *iter, bool force);
++static void io_req_drop_files(struct io_kiocb *req);
+
+ static struct kmem_cache *req_cachep;
+
+@@ -991,8 +992,7 @@ EXPORT_SYMBOL(io_uring_get_socket);
+
+ static inline void io_clean_op(struct io_kiocb *req)
+ {
+- if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
+- REQ_F_INFLIGHT))
++ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+ __io_clean_op(req);
+ }
+
+@@ -1256,6 +1256,8 @@ static void io_req_clean_work(struct io_
+ free_fs_struct(fs);
+ req->work.flags &= ~IO_WQ_WORK_FS;
+ }
++ if (req->flags & REQ_F_INFLIGHT)
++ io_req_drop_files(req);
+
+ io_put_identity(req->task->io_uring, req);
+ }
+@@ -5960,9 +5962,6 @@ static void __io_clean_op(struct io_kioc
+ }
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ }
+-
+- if (req->flags & REQ_F_INFLIGHT)
+- io_req_drop_files(req);
+ }
+
+ static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
+@@ -8700,6 +8699,8 @@ static bool io_uring_cancel_files(struct
+ break;
+ /* cancel this request, or head link requests */
+ io_attempt_cancel(ctx, cancel_req);
++ io_cqring_overflow_flush(ctx, true, task, files);
++
+ io_put_req(cancel_req);
+ /* cancellations _may_ trigger task work */
+ io_run_task_work();
--- /dev/null
+From foo@baz Fri Jan 29 11:06:03 AM CET 2021
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 26 Jan 2021 11:17:07 +0000
+Subject: io_uring: fix uring_flush in exit_files() warning
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>, syzbot+a32b546d58dde07875a1@syzkaller.appspotmail.com
+Message-ID: <0e32dce528dd20f3539b624e52b2f60d47e067fa.1611659564.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 4325cb498cb743dacaa3edbec398c5255f476ef6 ]
+
+WARNING: CPU: 1 PID: 11100 at fs/io_uring.c:9096
+ io_uring_flush+0x326/0x3a0 fs/io_uring.c:9096
+RIP: 0010:io_uring_flush+0x326/0x3a0 fs/io_uring.c:9096
+Call Trace:
+ filp_close+0xb4/0x170 fs/open.c:1280
+ close_files fs/file.c:401 [inline]
+ put_files_struct fs/file.c:416 [inline]
+ put_files_struct+0x1cc/0x350 fs/file.c:413
+ exit_files+0x7e/0xa0 fs/file.c:433
+ do_exit+0xc22/0x2ae0 kernel/exit.c:820
+ do_group_exit+0x125/0x310 kernel/exit.c:922
+ get_signal+0x3e9/0x20a0 kernel/signal.c:2770
+ arch_do_signal_or_restart+0x2a8/0x1eb0 arch/x86/kernel/signal.c:811
+ handle_signal_work kernel/entry/common.c:147 [inline]
+ exit_to_user_mode_loop kernel/entry/common.c:171 [inline]
+ exit_to_user_mode_prepare+0x148/0x250 kernel/entry/common.c:201
+ __syscall_exit_to_user_mode_work kernel/entry/common.c:291 [inline]
+ syscall_exit_to_user_mode+0x19/0x50 kernel/entry/common.c:302
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+An SQPOLL ring creator task may have gotten rid of its file note during
+exit and called io_disable_sqo_submit(), but the io_uring is still left
+referenced through fdtable, which will be put during close_files() and
+cause a false positive warning.
+
+First split the warning into two for more clarity when is hit, and the
+add sqo_dead check to handle the described case.
+
+Cc: stable@vger.kernel.org # 5.5+
+Reported-by: syzbot+a32b546d58dde07875a1@syzkaller.appspotmail.com
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -8962,7 +8962,10 @@ static int io_uring_flush(struct file *f
+
+ if (ctx->flags & IORING_SETUP_SQPOLL) {
+ /* there is only one file note, which is owned by sqo_task */
+- WARN_ON_ONCE((ctx->sqo_task == current) ==
++ WARN_ON_ONCE(ctx->sqo_task != current &&
++ xa_load(&tctx->xa, (unsigned long)file));
++ /* sqo_dead check is for when this happens after cancellation */
++ WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
+ !xa_load(&tctx->xa, (unsigned long)file));
+
+ io_disable_sqo_submit(ctx);
--- /dev/null
+From foo@baz Fri Jan 29 11:06:03 AM CET 2021
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 26 Jan 2021 11:17:01 +0000
+Subject: io_uring: inline io_uring_attempt_task_drop()
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>
+Message-ID: <706c41d54e42eff8de3f2d1741cead614c9b454b.1611659564.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 4f793dc40bc605b97624fd36baf085b3c35e8bfd ]
+
+A simple preparation change inlining io_uring_attempt_task_drop() into
+io_uring_flush().
+
+Cc: stable@vger.kernel.org # 5.5+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 29 +++++++++++------------------
+ 1 file changed, 11 insertions(+), 18 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -8835,23 +8835,6 @@ static void io_uring_del_task_file(struc
+ fput(file);
+ }
+
+-/*
+- * Drop task note for this file if we're the only ones that hold it after
+- * pending fput()
+- */
+-static void io_uring_attempt_task_drop(struct file *file)
+-{
+- if (!current->io_uring)
+- return;
+- /*
+- * fput() is pending, will be 2 if the only other ref is our potential
+- * task file note. If the task is exiting, drop regardless of count.
+- */
+- if (fatal_signal_pending(current) || (current->flags & PF_EXITING) ||
+- atomic_long_read(&file->f_count) == 2)
+- io_uring_del_task_file(file);
+-}
+-
+ static void io_uring_remove_task_files(struct io_uring_task *tctx)
+ {
+ struct file *file;
+@@ -8943,7 +8926,17 @@ void __io_uring_task_cancel(void)
+
+ static int io_uring_flush(struct file *file, void *data)
+ {
+- io_uring_attempt_task_drop(file);
++ if (!current->io_uring)
++ return 0;
++
++ /*
++ * fput() is pending, will be 2 if the only other ref is our potential
++ * task file note. If the task is exiting, drop regardless of count.
++ */
++ if (fatal_signal_pending(current) || (current->flags & PF_EXITING) ||
++ atomic_long_read(&file->f_count) == 2)
++ io_uring_del_task_file(file);
++
+ return 0;
+ }
+
--- /dev/null
+From foo@baz Fri Jan 29 11:06:03 AM CET 2021
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 26 Jan 2021 11:17:03 +0000
+Subject: io_uring: stop SQPOLL submit on creator's death
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>
+Message-ID: <ae5ab03b2e1c4e7ffde7dea41b5e5849e62ebafb.1611659564.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit d9d05217cb6990b9a56e13b56e7a1b71e2551f6c ]
+
+When the creator of SQPOLL io_uring dies (i.e. sqo_task), we don't want
+its internals like ->files and ->mm to be poked by the SQPOLL task, it
+have never been nice and recently got racy. That can happen when the
+owner undergoes destruction and SQPOLL tasks tries to submit new
+requests in parallel, and so calls io_sq_thread_acquire*().
+
+That patch halts SQPOLL submissions when sqo_task dies by introducing
+sqo_dead flag. Once set, the SQPOLL task must not do any submission,
+which is synchronised by uring_lock as well as the new flag.
+
+The tricky part is to make sure that disabling always happens, that
+means either the ring is discovered by creator's do_exit() -> cancel,
+or if the final close() happens before it's done by the creator. The
+last is guaranteed by the fact that for SQPOLL the creator task and only
+it holds exactly one file note, so either it pins up to do_exit() or
+removed by the creator on the final put in flush. (see comments in
+uring_flush() around file->f_count == 2).
+
+One more place that can trigger io_sq_thread_acquire_*() is
+__io_req_task_submit(). Shoot off requests on sqo_dead there, even
+though actually we don't need to. That's because cancellation of
+sqo_task should wait for the request before going any further.
+
+note 1: io_disable_sqo_submit() does io_ring_set_wakeup_flag() so the
+caller would enter the ring to get an error, but it still doesn't
+guarantee that the flag won't be cleared.
+
+note 2: if final __userspace__ close happens not from the creator
+task, the file note will pin the ring until the task dies.
+
+Cc: stable@vger.kernel.org # 5.5+
+Fixed: b1b6b5a30dce8 ("kernel/io_uring: cancel io_uring before task works")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 50 insertions(+), 8 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -260,6 +260,7 @@ struct io_ring_ctx {
+ unsigned int drain_next: 1;
+ unsigned int eventfd_async: 1;
+ unsigned int restricted: 1;
++ unsigned int sqo_dead: 1;
+
+ /*
+ * Ring buffer of indices into array of io_uring_sqe, which is
+@@ -2083,11 +2084,9 @@ static void io_req_task_cancel(struct ca
+ static void __io_req_task_submit(struct io_kiocb *req)
+ {
+ struct io_ring_ctx *ctx = req->ctx;
+- bool fail;
+
+- fail = __io_sq_thread_acquire_mm(ctx);
+ mutex_lock(&ctx->uring_lock);
+- if (!fail)
++ if (!ctx->sqo_dead && !__io_sq_thread_acquire_mm(ctx))
+ __io_queue_sqe(req, NULL);
+ else
+ __io_req_task_cancel(req, -EFAULT);
+@@ -6796,7 +6795,7 @@ again:
+ to_submit = 8;
+
+ mutex_lock(&ctx->uring_lock);
+- if (likely(!percpu_ref_is_dying(&ctx->refs)))
++ if (likely(!percpu_ref_is_dying(&ctx->refs) && !ctx->sqo_dead))
+ ret = io_submit_sqes(ctx, to_submit);
+ mutex_unlock(&ctx->uring_lock);
+
+@@ -8487,6 +8486,10 @@ static void io_ring_ctx_wait_and_kill(st
+ mutex_lock(&ctx->uring_lock);
+ percpu_ref_kill(&ctx->refs);
+ /* if force is set, the ring is going away. always drop after that */
++
++ if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
++ ctx->sqo_dead = 1;
++
+ ctx->cq_overflow_flushed = 1;
+ if (ctx->rings)
+ __io_cqring_overflow_flush(ctx, true, NULL, NULL);
+@@ -8745,6 +8748,18 @@ static bool __io_uring_cancel_task_reque
+ return ret;
+ }
+
++static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
++{
++ WARN_ON_ONCE(ctx->sqo_task != current);
++
++ mutex_lock(&ctx->uring_lock);
++ ctx->sqo_dead = 1;
++ mutex_unlock(&ctx->uring_lock);
++
++ /* make sure callers enter the ring to get error */
++ io_ring_set_wakeup_flag(ctx);
++}
++
+ /*
+ * We need to iteratively cancel requests, in case a request has dependent
+ * hard links. These persist even for failure of cancelations, hence keep
+@@ -8756,6 +8771,8 @@ static void io_uring_cancel_task_request
+ struct task_struct *task = current;
+
+ if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
++ /* for SQPOLL only sqo_task has task notes */
++ io_disable_sqo_submit(ctx);
+ task = ctx->sq_data->thread;
+ atomic_inc(&task->io_uring->in_idle);
+ io_sq_thread_park(ctx->sq_data);
+@@ -8927,6 +8944,7 @@ void __io_uring_task_cancel(void)
+ static int io_uring_flush(struct file *file, void *data)
+ {
+ struct io_uring_task *tctx = current->io_uring;
++ struct io_ring_ctx *ctx = file->private_data;
+
+ if (!tctx)
+ return 0;
+@@ -8942,7 +8960,16 @@ static int io_uring_flush(struct file *f
+ if (atomic_long_read(&file->f_count) != 2)
+ return 0;
+
+- io_uring_del_task_file(file);
++ if (ctx->flags & IORING_SETUP_SQPOLL) {
++ /* there is only one file note, which is owned by sqo_task */
++ WARN_ON_ONCE((ctx->sqo_task == current) ==
++ !xa_load(&tctx->xa, (unsigned long)file));
++
++ io_disable_sqo_submit(ctx);
++ }
++
++ if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
++ io_uring_del_task_file(file);
+ return 0;
+ }
+
+@@ -9016,8 +9043,9 @@ static unsigned long io_uring_nommu_get_
+
+ #endif /* !CONFIG_MMU */
+
+-static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
++static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
+ {
++ int ret = 0;
+ DEFINE_WAIT(wait);
+
+ do {
+@@ -9026,6 +9054,11 @@ static void io_sqpoll_wait_sq(struct io_
+
+ prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
+
++ if (unlikely(ctx->sqo_dead)) {
++ ret = -EOWNERDEAD;
++ goto out;
++ }
++
+ if (!io_sqring_full(ctx))
+ break;
+
+@@ -9033,6 +9066,8 @@ static void io_sqpoll_wait_sq(struct io_
+ } while (!signal_pending(current));
+
+ finish_wait(&ctx->sqo_sq_wait, &wait);
++out:
++ return ret;
+ }
+
+ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
+@@ -9076,10 +9111,16 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned
+ if (ctx->flags & IORING_SETUP_SQPOLL) {
+ io_cqring_overflow_flush(ctx, false, NULL, NULL);
+
++ ret = -EOWNERDEAD;
++ if (unlikely(ctx->sqo_dead))
++ goto out;
+ if (flags & IORING_ENTER_SQ_WAKEUP)
+ wake_up(&ctx->sq_data->wait);
+- if (flags & IORING_ENTER_SQ_WAIT)
+- io_sqpoll_wait_sq(ctx);
++ if (flags & IORING_ENTER_SQ_WAIT) {
++ ret = io_sqpoll_wait_sq(ctx);
++ if (ret)
++ goto out;
++ }
+ submitted = to_submit;
+ } else if (to_submit) {
+ ret = io_uring_add_task_file(ctx, f.file);
+@@ -9498,6 +9539,7 @@ static int io_uring_create(unsigned entr
+ trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
+ return ret;
+ err:
++ io_disable_sqo_submit(ctx);
+ io_ring_ctx_wait_and_kill(ctx);
+ return ret;
+ }
--- /dev/null
+From a6616bc9a0af7c65c0b0856a7508870a4a40c4ac Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Tue, 12 Jan 2021 14:24:48 +0100
+Subject: iwlwifi: dbg: Don't touch the tlv data
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit a6616bc9a0af7c65c0b0856a7508870a4a40c4ac upstream.
+
+The commit ba8f6f4ae254 ("iwlwifi: dbg: add dumping special device
+memory") added a termination of name string just to be sure, and this
+seems causing a regression, a GPF triggered at firmware loading.
+Basically we shouldn't modify the firmware data that may be provided
+as read-only.
+
+This patch drops the code that caused the regression and keep the tlv
+data as is.
+
+Fixes: ba8f6f4ae254 ("iwlwifi: dbg: add dumping special device memory")
+BugLink: https://bugzilla.suse.com/show_bug.cgi?id=1180344
+BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=210733
+Cc: stable@vger.kernel.org
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Acked-by: Luca Coelho <luciano.coelho@intel.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Link: https://lore.kernel.org/r/20210112132449.22243-2-tiwai@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c | 7 -------
+ 1 file changed, 7 deletions(-)
+
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+@@ -237,13 +237,6 @@ static int iwl_dbg_tlv_alloc_region(stru
+ if (le32_to_cpu(tlv->length) < sizeof(*reg))
+ return -EINVAL;
+
+- /* For safe using a string from FW make sure we have a
+- * null terminator
+- */
+- reg->name[IWL_FW_INI_MAX_NAME - 1] = 0;
+-
+- IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name);
+-
+ if (id >= IWL_FW_INI_MAX_REGION_ID) {
+ IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
+ return -EINVAL;
--- /dev/null
+From foo@baz Fri Jan 29 11:06:03 AM CET 2021
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 26 Jan 2021 11:17:00 +0000
+Subject: kernel/io_uring: cancel io_uring before task works
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>
+Message-ID: <96a68f8f062a7bc6e267fef65e01a665ab232a29.1611659564.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit b1b6b5a30dce872f500dc43f067cba8e7f86fc7d ]
+
+For cancelling io_uring requests it needs either to be able to run
+currently enqueued task_works or having it shut down by that moment.
+Otherwise io_uring_cancel_files() may be waiting for requests that won't
+ever complete.
+
+Go with the first way and do cancellations before setting PF_EXITING and
+so before putting the task_work infrastructure into a transition state
+where task_work_run() would better not be called.
+
+Cc: stable@vger.kernel.org # 5.5+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/file.c | 2 --
+ kernel/exit.c | 2 ++
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -21,7 +21,6 @@
+ #include <linux/rcupdate.h>
+ #include <linux/close_range.h>
+ #include <net/sock.h>
+-#include <linux/io_uring.h>
+
+ unsigned int sysctl_nr_open __read_mostly = 1024*1024;
+ unsigned int sysctl_nr_open_min = BITS_PER_LONG;
+@@ -453,7 +452,6 @@ void exit_files(struct task_struct *tsk)
+ struct files_struct * files = tsk->files;
+
+ if (files) {
+- io_uring_files_cancel(files);
+ task_lock(tsk);
+ tsk->files = NULL;
+ task_unlock(tsk);
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -63,6 +63,7 @@
+ #include <linux/random.h>
+ #include <linux/rcuwait.h>
+ #include <linux/compat.h>
++#include <linux/io_uring.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/unistd.h>
+@@ -762,6 +763,7 @@ void __noreturn do_exit(long code)
+ schedule();
+ }
+
++ io_uring_files_cancel(tsk->files);
+ exit_signals(tsk); /* sets PF_EXITING */
+
+ /* sync mm's RSS info before statistics gathering */
--- /dev/null
+From 95e9295daa849095d8be05fb6e26b2ba9be1594f Mon Sep 17 00:00:00 2001
+From: Naushir Patuck <naush@raspberrypi.com>
+Date: Wed, 6 Jan 2021 16:16:57 +0100
+Subject: media: Revert "media: videobuf2: Fix length check for single plane dmabuf queueing"
+
+From: Naushir Patuck <naush@raspberrypi.com>
+
+commit 95e9295daa849095d8be05fb6e26b2ba9be1594f upstream.
+
+The updated length check for dmabuf types broke existing usage in v4l2
+userland clients.
+
+Fixes: 961d3b27 ("media: videobuf2: Fix length check for single plane dmabuf queueing")
+Cc: stable@vger.kernel.org
+Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
+Tested-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
+Reviewed-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/common/videobuf2/videobuf2-v4l2.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
++++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
+@@ -118,8 +118,7 @@ static int __verify_length(struct vb2_bu
+ return -EINVAL;
+ }
+ } else {
+- length = (b->memory == VB2_MEMORY_USERPTR ||
+- b->memory == VB2_MEMORY_DMABUF)
++ length = (b->memory == VB2_MEMORY_USERPTR)
+ ? b->length : vb->planes[0].length;
+
+ if (b->bytesused > length)
--- /dev/null
+From a53e3c189cc6460b60e152af3fc24edf8e0ea9d2 Mon Sep 17 00:00:00 2001
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Date: Mon, 18 Jan 2021 16:37:00 +0100
+Subject: media: v4l2-subdev.h: BIT() is not available in userspace
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+commit a53e3c189cc6460b60e152af3fc24edf8e0ea9d2 upstream.
+
+The BIT macro is not available in userspace, so replace BIT(0) by
+0x00000001.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Fixes: 6446ec6cbf46 ("media: v4l2-subdev: add VIDIOC_SUBDEV_QUERYCAP ioctl")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/uapi/linux/v4l2-subdev.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/uapi/linux/v4l2-subdev.h
++++ b/include/uapi/linux/v4l2-subdev.h
+@@ -176,7 +176,7 @@ struct v4l2_subdev_capability {
+ };
+
+ /* The v4l2 sub-device video device node is registered in read-only mode. */
+-#define V4L2_SUBDEV_CAP_RO_SUBDEV BIT(0)
++#define V4L2_SUBDEV_CAP_RO_SUBDEV 0x00000001
+
+ /* Backwards compatibility define --- to be removed */
+ #define v4l2_subdev_edid v4l2_edid
--- /dev/null
+From 9f206f7398f6f6ec7dd0198c045c2459b4f720b6 Mon Sep 17 00:00:00 2001
+From: Bryan Tan <bryantan@vmware.com>
+Date: Mon, 18 Jan 2021 19:16:29 -0800
+Subject: RDMA/vmw_pvrdma: Fix network_hdr_type reported in WC
+
+From: Bryan Tan <bryantan@vmware.com>
+
+commit 9f206f7398f6f6ec7dd0198c045c2459b4f720b6 upstream.
+
+The PVRDMA device HW interface defines network_hdr_type according to an
+old definition of the internal kernel rdma_network_type enum that has
+since changed, resulting in the wrong rdma_network_type being reported.
+
+Fix this by explicitly defining the enum used by the PVRDMA device and
+adding a function to convert the pvrdma_network_type to rdma_network_type
+enum.
+
+Cc: stable@vger.kernel.org # 5.10+
+Fixes: 1c15b4f2a42f ("RDMA/core: Modify enum ib_gid_type and enum rdma_network_type")
+Link: https://lore.kernel.org/r/1611026189-17943-1-git-send-email-bryantan@vmware.com
+Reviewed-by: Adit Ranadive <aditr@vmware.com>
+Signed-off-by: Bryan Tan <bryantan@vmware.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/vmw_pvrdma/pvrdma.h | 14 ++++++++++++++
+ drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | 2 +-
+ include/uapi/rdma/vmw_pvrdma-abi.h | 7 +++++++
+ 3 files changed, 22 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+@@ -509,6 +509,20 @@ static inline int ib_send_flags_to_pvrdm
+ return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
+ }
+
++static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
++{
++ switch (type) {
++ case PVRDMA_NETWORK_ROCE_V1:
++ return RDMA_NETWORK_ROCE_V1;
++ case PVRDMA_NETWORK_IPV4:
++ return RDMA_NETWORK_IPV4;
++ case PVRDMA_NETWORK_IPV6:
++ return RDMA_NETWORK_IPV6;
++ default:
++ return RDMA_NETWORK_IPV6;
++ }
++}
++
+ void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
+ const struct pvrdma_qp_cap *src);
+ void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
+--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+@@ -364,7 +364,7 @@ retry:
+ wc->dlid_path_bits = cqe->dlid_path_bits;
+ wc->port_num = cqe->port_num;
+ wc->vendor_err = cqe->vendor_err;
+- wc->network_hdr_type = cqe->network_hdr_type;
++ wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
+
+ /* Update shared ring state */
+ pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
+--- a/include/uapi/rdma/vmw_pvrdma-abi.h
++++ b/include/uapi/rdma/vmw_pvrdma-abi.h
+@@ -133,6 +133,13 @@ enum pvrdma_wc_flags {
+ PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
+ };
+
++enum pvrdma_network_type {
++ PVRDMA_NETWORK_IB,
++ PVRDMA_NETWORK_ROCE_V1 = PVRDMA_NETWORK_IB,
++ PVRDMA_NETWORK_IPV4,
++ PVRDMA_NETWORK_IPV6
++};
++
+ struct pvrdma_alloc_ucontext_resp {
+ __u32 qp_tab_size;
+ __u32 reserved;
Cc: <stable@vger.kernel.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
---
mm/slub.c | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
--- /dev/null
+Subject: rtmutex: Remove unused argument from rt_mutex_proxy_unlock()
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed Jan 20 11:32:07 2021 +0100
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 2156ac1934166d6deb6cd0f6ffc4c1076ec63697 upstream
+
+Nothing uses the argument. Remove it as preparation to use
+pi_state_update_owner().
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/futex.c | 2 +-
+ kernel/locking/rtmutex.c | 3 +--
+ kernel/locking/rtmutex_common.h | 3 +--
+ 3 files changed, 3 insertions(+), 5 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -820,7 +820,7 @@ static void put_pi_state(struct futex_pi
+ list_del_init(&pi_state->list);
+ raw_spin_unlock(&owner->pi_lock);
+ }
+- rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
++ rt_mutex_proxy_unlock(&pi_state->pi_mutex);
+ raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
+ }
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1716,8 +1716,7 @@ void rt_mutex_init_proxy_locked(struct r
+ * possible because it belongs to the pi_state which is about to be freed
+ * and it is not longer visible to other tasks.
+ */
+-void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+- struct task_struct *proxy_owner)
++void rt_mutex_proxy_unlock(struct rt_mutex *lock)
+ {
+ debug_rt_mutex_proxy_unlock(lock);
+ rt_mutex_set_owner(lock, NULL);
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -133,8 +133,7 @@ enum rtmutex_chainwalk {
+ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner);
+-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+- struct task_struct *proxy_owner);
++extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
+ extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
+ extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
gpio-mvebu-fix-pwm-.get_state-period-calculation.patch
revert-mm-slub-fix-a-memory-leak-in-sysfs_slab_add.patch
+futex_Ensure_the_correct_return_value_from_futex_lock_pi_.patch
+futex_Replace_pointless_printk_in_fixup_owner_.patch
+futex_Provide_and_use_pi_state_update_owner_.patch
+rtmutex_Remove_unused_argument_from_rt_mutex_proxy_unlock_.patch
+futex_Use_pi_state_update_owner__in_put_pi_state_.patch
+futex_Simplify_fixup_pi_state_owner_.patch
+futex_Handle_faults_correctly_for_PI_futexes.patch
+hid-wacom-correct-null-dereference-on-aes-pen-proximity.patch
+hid-multitouch-apply-mt_quirk_confidence-quirk-for-multi-input-devices.patch
+media-revert-media-videobuf2-fix-length-check-for-single-plane-dmabuf-queueing.patch
+media-v4l2-subdev.h-bit-is-not-available-in-userspace.patch
+rdma-vmw_pvrdma-fix-network_hdr_type-reported-in-wc.patch
+iwlwifi-dbg-don-t-touch-the-tlv-data.patch
+kernel-io_uring-cancel-io_uring-before-task-works.patch
+io_uring-inline-io_uring_attempt_task_drop.patch
+io_uring-add-warn_once-for-io_uring_flush.patch
+io_uring-stop-sqpoll-submit-on-creator-s-death.patch
+io_uring-fix-null-deref-in-io_disable_sqo_submit.patch
+io_uring-do-sqo-disable-on-install_fd-error.patch
+io_uring-fix-false-positive-sqo-warning-on-flush.patch
+io_uring-fix-uring_flush-in-exit_files-warning.patch
+io_uring-fix-skipping-disabling-sqo-on-exec.patch
+io_uring-dont-kill-fasync-under-completion_lock.patch
+io_uring-fix-sleeping-under-spin-in-__io_clean_op.patch