]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 3 Dec 2019 13:41:19 +0000 (14:41 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 3 Dec 2019 13:41:19 +0000 (14:41 +0100)
added patches:
futex-prevent-robust-futex-exit-race.patch
y2038-futex-move-compat-implementation-into-futex.c.patch

queue-4.14/futex-prevent-robust-futex-exit-race.patch [new file with mode: 0644]
queue-4.14/series
queue-4.14/y2038-futex-move-compat-implementation-into-futex.c.patch [new file with mode: 0644]

diff --git a/queue-4.14/futex-prevent-robust-futex-exit-race.patch b/queue-4.14/futex-prevent-robust-futex-exit-race.patch
new file mode 100644 (file)
index 0000000..c9f6e14
--- /dev/null
@@ -0,0 +1,261 @@
+From ca16d5bee59807bf04deaab0a8eccecd5061528c Mon Sep 17 00:00:00 2001
+From: Yang Tao <yang.tao172@zte.com.cn>
+Date: Wed, 6 Nov 2019 22:55:35 +0100
+Subject: futex: Prevent robust futex exit race
+
+From: Yang Tao <yang.tao172@zte.com.cn>
+
+commit ca16d5bee59807bf04deaab0a8eccecd5061528c upstream.
+
+Robust futexes utilize the robust_list mechanism to allow the kernel to
+release futexes which are held when a task exits. The exit can be voluntary
+or caused by a signal or fault. This prevents that waiters block forever.
+
+The futex operations in user space store a pointer to the futex they are
+either locking or unlocking in the op_pending member of the per task robust
+list.
+
+After a lock operation has succeeded the futex is queued in the robust list
+linked list and the op_pending pointer is cleared.
+
+After an unlock operation has succeeded the futex is removed from the
+robust list linked list and the op_pending pointer is cleared.
+
+The robust list exit code checks for the pending operation and any futex
+which is queued in the linked list. It carefully checks whether the futex
+value is the TID of the exiting task. If so, it sets the OWNER_DIED bit and
+tries to wake up a potential waiter.
+
+This is race free for the lock operation but unlock has two race scenarios
+where waiters might not be woken up. These issues can be observed with
+regular robust pthread mutexes. PI aware pthread mutexes are not affected.
+
+(1) Unlocking task is killed after unlocking the futex value in user space
+    before being able to wake a waiter.
+
+        pthread_mutex_unlock()
+                |
+                V
+        atomic_exchange_rel (&mutex->__data.__lock, 0)
+                        <------------------------killed
+            lll_futex_wake ()                   |
+                                                |
+                                                |(__lock = 0)
+                                                |(enter kernel)
+                                                |
+                                                V
+                                            do_exit()
+                                            exit_mm()
+                                          mm_release()
+                                        exit_robust_list()
+                                        handle_futex_death()
+                                                |
+                                                |(__lock = 0)
+                                                |(uval = 0)
+                                                |
+                                                V
+        if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
+                return 0;
+
+    The sanity check which ensures that the user space futex is owned by
+    the exiting task prevents the wakeup of waiters which in consequence
+    block infinitely.
+
+(2) Waiting task is killed after a wakeup and before it can acquire the
+    futex in user space.
+
+        OWNER                         WAITER
+                               futex_wait()
+   pthread_mutex_unlock()               |
+                |                       |
+                |(__lock = 0)           |
+                |                       |
+                V                       |
+         futex_wake() ------------>  wakeup()
+                                        |
+                                        |(return to userspace)
+                                        |(__lock = 0)
+                                        |
+                                        V
+                        oldval = mutex->__data.__lock
+                                          <-----------------killed
+    atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,  |
+                        id | assume_other_futex_waiters, 0)      |
+                                                                 |
+                                                                 |
+                                                   (enter kernel)|
+                                                                 |
+                                                                 V
+                                                         do_exit()
+                                                        |
+                                                        |
+                                                        V
+                                        handle_futex_death()
+                                        |
+                                        |(__lock = 0)
+                                        |(uval = 0)
+                                        |
+                                        V
+        if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
+                return 0;
+
+    The sanity check which ensures that the user space futex is owned
+    by the exiting task prevents the wakeup of waiters, which seems to
+    be correct as the exiting task does not own the futex value, but
+    the consequence is that other waiters wont be woken up and block
+    infinitely.
+
+In both scenarios the following conditions are true:
+
+   - task->robust_list->list_op_pending != NULL
+   - user space futex value == 0
+   - Regular futex (not PI)
+
+If these conditions are met then it is reasonably safe to wake up a
+potential waiter in order to prevent the above problems.
+
+As this might be a false positive it can cause spurious wakeups, but the
+waiter side has to handle other types of unrelated wakeups, e.g. signals
+gracefully anyway. So such a spurious wakeup will not affect the
+correctness of these operations.
+
+This workaround must not touch the user space futex value and cannot set
+the OWNER_DIED bit because the lock value is 0, i.e. uncontended. Setting
+OWNER_DIED in this case would result in inconsistent state and subsequently
+in malfunction of the owner died handling in user space.
+
+The rest of the user space state is still consistent as no other task can
+observe the list_op_pending entry in the exiting tasks robust list.
+
+The eventually woken up waiter will observe the uncontended lock value and
+take it over.
+
+[ tglx: Massaged changelog and comment. Made the return explicit and not
+       depend on the subsequent check and added constants to hand into
+       handle_futex_death() instead of plain numbers. Fixed a few coding
+       style issues. ]
+
+Fixes: 0771dfefc9e5 ("[PATCH] lightweight robust futexes: core")
+Signed-off-by: Yang Tao <yang.tao172@zte.com.cn>
+Signed-off-by: Yi Wang <wang.yi59@zte.com.cn>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/1573010582-35297-1-git-send-email-wang.yi59@zte.com.cn
+Link: https://lkml.kernel.org/r/20191106224555.943191378@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/futex.c |   58 ++++++++++++++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 51 insertions(+), 7 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -3475,11 +3475,16 @@ err_unlock:
+       return ret;
+ }
++/* Constants for the pending_op argument of handle_futex_death */
++#define HANDLE_DEATH_PENDING  true
++#define HANDLE_DEATH_LIST     false
++
+ /*
+  * Process a futex-list entry, check whether it's owned by the
+  * dying task, and do notification if so:
+  */
+-static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
++static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
++                            bool pi, bool pending_op)
+ {
+       u32 uval, uninitialized_var(nval), mval;
+       int err;
+@@ -3492,6 +3497,42 @@ retry:
+       if (get_user(uval, uaddr))
+               return -1;
++      /*
++       * Special case for regular (non PI) futexes. The unlock path in
++       * user space has two race scenarios:
++       *
++       * 1. The unlock path releases the user space futex value and
++       *    before it can execute the futex() syscall to wake up
++       *    waiters it is killed.
++       *
++       * 2. A woken up waiter is killed before it can acquire the
++       *    futex in user space.
++       *
++       * In both cases the TID validation below prevents a wakeup of
++       * potential waiters which can cause these waiters to block
++       * forever.
++       *
++       * In both cases the following conditions are met:
++       *
++       *      1) task->robust_list->list_op_pending != NULL
++       *         @pending_op == true
++       *      2) User space futex value == 0
++       *      3) Regular futex: @pi == false
++       *
++       * If these conditions are met, it is safe to attempt waking up a
++       * potential waiter without touching the user space futex value and
++       * trying to set the OWNER_DIED bit. The user space futex value is
++       * uncontended and the rest of the user space mutex state is
++       * consistent, so a woken waiter will just take over the
++       * uncontended futex. Setting the OWNER_DIED bit would create
++       * inconsistent state and malfunction of the user space owner died
++       * handling.
++       */
++      if (pending_op && !pi && !uval) {
++              futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
++              return 0;
++      }
++
+       if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
+               return 0;
+@@ -3611,10 +3652,11 @@ void exit_robust_list(struct task_struct
+                * A pending lock might already be on the list, so
+                * don't process it twice:
+                */
+-              if (entry != pending)
++              if (entry != pending) {
+                       if (handle_futex_death((void __user *)entry + futex_offset,
+-                                              curr, pi))
++                                              curr, pi, HANDLE_DEATH_LIST))
+                               return;
++              }
+               if (rc)
+                       return;
+               entry = next_entry;
+@@ -3628,9 +3670,10 @@ void exit_robust_list(struct task_struct
+               cond_resched();
+       }
+-      if (pending)
++      if (pending) {
+               handle_futex_death((void __user *)pending + futex_offset,
+-                                 curr, pip);
++                                 curr, pip, HANDLE_DEATH_PENDING);
++      }
+ }
+ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+@@ -3805,7 +3848,8 @@ void compat_exit_robust_list(struct task
+               if (entry != pending) {
+                       void __user *uaddr = futex_uaddr(entry, futex_offset);
+-                      if (handle_futex_death(uaddr, curr, pi))
++                      if (handle_futex_death(uaddr, curr, pi,
++                                             HANDLE_DEATH_LIST))
+                               return;
+               }
+               if (rc)
+@@ -3824,7 +3868,7 @@ void compat_exit_robust_list(struct task
+       if (pending) {
+               void __user *uaddr = futex_uaddr(pending, futex_offset);
+-              handle_futex_death(uaddr, curr, pip);
++              handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
+       }
+ }
index 616e350d2df0d27816365279f1407b36958c3051..56a54c775113bd623737be1ad6639597a820ee10 100644 (file)
@@ -183,3 +183,5 @@ net-macb-driver-check-for-skbtx_hw_tstamp.patch
 mtd-rawnand-atmel-fix-spelling-mistake-in-error-message.patch
 mtd-rawnand-atmel-fix-possible-object-reference-leak.patch
 mtd-spi-nor-cast-to-u64-to-avoid-uint-overflows.patch
+y2038-futex-move-compat-implementation-into-futex.c.patch
+futex-prevent-robust-futex-exit-race.patch
diff --git a/queue-4.14/y2038-futex-move-compat-implementation-into-futex.c.patch b/queue-4.14/y2038-futex-move-compat-implementation-into-futex.c.patch
new file mode 100644 (file)
index 0000000..68dcd0e
--- /dev/null
@@ -0,0 +1,501 @@
+From 04e7712f4460585e5eed5b853fd8b82a9943958f Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Tue, 17 Apr 2018 16:31:07 +0200
+Subject: y2038: futex: Move compat implementation into futex.c
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 04e7712f4460585e5eed5b853fd8b82a9943958f upstream.
+
+We are going to share the compat_sys_futex() handler between 64-bit
+architectures and 32-bit architectures that need to deal with both 32-bit
+and 64-bit time_t, and this is easier if both entry points are in the
+same file.
+
+In fact, most other system call handlers do the same thing these days, so
+let's follow the trend here and merge all of futex_compat.c into futex.c.
+
+In the process, a few minor changes have to be done to make sure everything
+still makes sense: handle_futex_death() and futex_cmpxchg_enabled() become
+local symbol, and the compat version of the fetch_robust_entry() function
+gets renamed to compat_fetch_robust_entry() to avoid a symbol clash.
+
+This is intended as a purely cosmetic patch, no behavior should
+change.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/futex.h |    8 -
+ kernel/Makefile       |    3 
+ kernel/futex.c        |  195 +++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/futex_compat.c |  202 --------------------------------------------------
+ 4 files changed, 192 insertions(+), 216 deletions(-)
+
+--- a/include/linux/futex.h
++++ b/include/linux/futex.h
+@@ -12,9 +12,6 @@ struct task_struct;
+ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+             u32 __user *uaddr2, u32 val2, u32 val3);
+-extern int
+-handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
+-
+ /*
+  * Futexes are matched on equal values of this key.
+  * The key type depends on whether it's a shared or private mapping.
+@@ -55,11 +52,6 @@ union futex_key {
+ #ifdef CONFIG_FUTEX
+ extern void exit_robust_list(struct task_struct *curr);
+-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
+-#define futex_cmpxchg_enabled 1
+-#else
+-extern int futex_cmpxchg_enabled;
+-#endif
+ #else
+ static inline void exit_robust_list(struct task_struct *curr)
+ {
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -49,9 +49,6 @@ obj-$(CONFIG_PROFILING) += profile.o
+ obj-$(CONFIG_STACKTRACE) += stacktrace.o
+ obj-y += time/
+ obj-$(CONFIG_FUTEX) += futex.o
+-ifeq ($(CONFIG_COMPAT),y)
+-obj-$(CONFIG_FUTEX) += futex_compat.o
+-endif
+ obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
+ obj-$(CONFIG_SMP) += smp.o
+ ifneq ($(CONFIG_SMP),y)
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -44,6 +44,7 @@
+  *  along with this program; if not, write to the Free Software
+  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+  */
++#include <linux/compat.h>
+ #include <linux/slab.h>
+ #include <linux/poll.h>
+ #include <linux/fs.h>
+@@ -173,8 +174,10 @@
+  * double_lock_hb() and double_unlock_hb(), respectively.
+  */
+-#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
+-int __read_mostly futex_cmpxchg_enabled;
++#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
++#define futex_cmpxchg_enabled 1
++#else
++static int  __read_mostly futex_cmpxchg_enabled;
+ #endif
+ /*
+@@ -3476,7 +3479,7 @@ err_unlock:
+  * Process a futex-list entry, check whether it's owned by the
+  * dying task, and do notification if so:
+  */
+-int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
++static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
+ {
+       u32 uval, uninitialized_var(nval), mval;
+       int err;
+@@ -3723,6 +3726,192 @@ SYSCALL_DEFINE6(futex, u32 __user *, uad
+       return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
+ }
++#ifdef CONFIG_COMPAT
++/*
++ * Fetch a robust-list pointer. Bit 0 signals PI futexes:
++ */
++static inline int
++compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
++                 compat_uptr_t __user *head, unsigned int *pi)
++{
++      if (get_user(*uentry, head))
++              return -EFAULT;
++
++      *entry = compat_ptr((*uentry) & ~1);
++      *pi = (unsigned int)(*uentry) & 1;
++
++      return 0;
++}
++
++static void __user *futex_uaddr(struct robust_list __user *entry,
++                              compat_long_t futex_offset)
++{
++      compat_uptr_t base = ptr_to_compat(entry);
++      void __user *uaddr = compat_ptr(base + futex_offset);
++
++      return uaddr;
++}
++
++/*
++ * Walk curr->robust_list (very carefully, it's a userspace list!)
++ * and mark any locks found there dead, and notify any waiters.
++ *
++ * We silently return on any sign of list-walking problem.
++ */
++void compat_exit_robust_list(struct task_struct *curr)
++{
++      struct compat_robust_list_head __user *head = curr->compat_robust_list;
++      struct robust_list __user *entry, *next_entry, *pending;
++      unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
++      unsigned int uninitialized_var(next_pi);
++      compat_uptr_t uentry, next_uentry, upending;
++      compat_long_t futex_offset;
++      int rc;
++
++      if (!futex_cmpxchg_enabled)
++              return;
++
++      /*
++       * Fetch the list head (which was registered earlier, via
++       * sys_set_robust_list()):
++       */
++      if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
++              return;
++      /*
++       * Fetch the relative futex offset:
++       */
++      if (get_user(futex_offset, &head->futex_offset))
++              return;
++      /*
++       * Fetch any possibly pending lock-add first, and handle it
++       * if it exists:
++       */
++      if (compat_fetch_robust_entry(&upending, &pending,
++                             &head->list_op_pending, &pip))
++              return;
++
++      next_entry = NULL;      /* avoid warning with gcc */
++      while (entry != (struct robust_list __user *) &head->list) {
++              /*
++               * Fetch the next entry in the list before calling
++               * handle_futex_death:
++               */
++              rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
++                      (compat_uptr_t __user *)&entry->next, &next_pi);
++              /*
++               * A pending lock might already be on the list, so
++               * dont process it twice:
++               */
++              if (entry != pending) {
++                      void __user *uaddr = futex_uaddr(entry, futex_offset);
++
++                      if (handle_futex_death(uaddr, curr, pi))
++                              return;
++              }
++              if (rc)
++                      return;
++              uentry = next_uentry;
++              entry = next_entry;
++              pi = next_pi;
++              /*
++               * Avoid excessively long or circular lists:
++               */
++              if (!--limit)
++                      break;
++
++              cond_resched();
++      }
++      if (pending) {
++              void __user *uaddr = futex_uaddr(pending, futex_offset);
++
++              handle_futex_death(uaddr, curr, pip);
++      }
++}
++
++COMPAT_SYSCALL_DEFINE2(set_robust_list,
++              struct compat_robust_list_head __user *, head,
++              compat_size_t, len)
++{
++      if (!futex_cmpxchg_enabled)
++              return -ENOSYS;
++
++      if (unlikely(len != sizeof(*head)))
++              return -EINVAL;
++
++      current->compat_robust_list = head;
++
++      return 0;
++}
++
++COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
++                      compat_uptr_t __user *, head_ptr,
++                      compat_size_t __user *, len_ptr)
++{
++      struct compat_robust_list_head __user *head;
++      unsigned long ret;
++      struct task_struct *p;
++
++      if (!futex_cmpxchg_enabled)
++              return -ENOSYS;
++
++      rcu_read_lock();
++
++      ret = -ESRCH;
++      if (!pid)
++              p = current;
++      else {
++              p = find_task_by_vpid(pid);
++              if (!p)
++                      goto err_unlock;
++      }
++
++      ret = -EPERM;
++      if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
++              goto err_unlock;
++
++      head = p->compat_robust_list;
++      rcu_read_unlock();
++
++      if (put_user(sizeof(*head), len_ptr))
++              return -EFAULT;
++      return put_user(ptr_to_compat(head), head_ptr);
++
++err_unlock:
++      rcu_read_unlock();
++
++      return ret;
++}
++
++COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
++              struct compat_timespec __user *, utime, u32 __user *, uaddr2,
++              u32, val3)
++{
++      struct timespec ts;
++      ktime_t t, *tp = NULL;
++      int val2 = 0;
++      int cmd = op & FUTEX_CMD_MASK;
++
++      if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
++                    cmd == FUTEX_WAIT_BITSET ||
++                    cmd == FUTEX_WAIT_REQUEUE_PI)) {
++              if (compat_get_timespec(&ts, utime))
++                      return -EFAULT;
++              if (!timespec_valid(&ts))
++                      return -EINVAL;
++
++              t = timespec_to_ktime(ts);
++              if (cmd == FUTEX_WAIT)
++                      t = ktime_add_safe(ktime_get(), t);
++              tp = &t;
++      }
++      if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
++          cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
++              val2 = (int) (unsigned long) utime;
++
++      return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
++}
++#endif /* CONFIG_COMPAT */
++
+ static void __init futex_detect_cmpxchg(void)
+ {
+ #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
+--- a/kernel/futex_compat.c
++++ /dev/null
+@@ -1,202 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * linux/kernel/futex_compat.c
+- *
+- * Futex compatibililty routines.
+- *
+- * Copyright 2006, Red Hat, Inc., Ingo Molnar
+- */
+-
+-#include <linux/linkage.h>
+-#include <linux/compat.h>
+-#include <linux/nsproxy.h>
+-#include <linux/futex.h>
+-#include <linux/ptrace.h>
+-#include <linux/syscalls.h>
+-
+-#include <linux/uaccess.h>
+-
+-
+-/*
+- * Fetch a robust-list pointer. Bit 0 signals PI futexes:
+- */
+-static inline int
+-fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
+-                 compat_uptr_t __user *head, unsigned int *pi)
+-{
+-      if (get_user(*uentry, head))
+-              return -EFAULT;
+-
+-      *entry = compat_ptr((*uentry) & ~1);
+-      *pi = (unsigned int)(*uentry) & 1;
+-
+-      return 0;
+-}
+-
+-static void __user *futex_uaddr(struct robust_list __user *entry,
+-                              compat_long_t futex_offset)
+-{
+-      compat_uptr_t base = ptr_to_compat(entry);
+-      void __user *uaddr = compat_ptr(base + futex_offset);
+-
+-      return uaddr;
+-}
+-
+-/*
+- * Walk curr->robust_list (very carefully, it's a userspace list!)
+- * and mark any locks found there dead, and notify any waiters.
+- *
+- * We silently return on any sign of list-walking problem.
+- */
+-void compat_exit_robust_list(struct task_struct *curr)
+-{
+-      struct compat_robust_list_head __user *head = curr->compat_robust_list;
+-      struct robust_list __user *entry, *next_entry, *pending;
+-      unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
+-      unsigned int uninitialized_var(next_pi);
+-      compat_uptr_t uentry, next_uentry, upending;
+-      compat_long_t futex_offset;
+-      int rc;
+-
+-      if (!futex_cmpxchg_enabled)
+-              return;
+-
+-      /*
+-       * Fetch the list head (which was registered earlier, via
+-       * sys_set_robust_list()):
+-       */
+-      if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
+-              return;
+-      /*
+-       * Fetch the relative futex offset:
+-       */
+-      if (get_user(futex_offset, &head->futex_offset))
+-              return;
+-      /*
+-       * Fetch any possibly pending lock-add first, and handle it
+-       * if it exists:
+-       */
+-      if (fetch_robust_entry(&upending, &pending,
+-                             &head->list_op_pending, &pip))
+-              return;
+-
+-      next_entry = NULL;      /* avoid warning with gcc */
+-      while (entry != (struct robust_list __user *) &head->list) {
+-              /*
+-               * Fetch the next entry in the list before calling
+-               * handle_futex_death:
+-               */
+-              rc = fetch_robust_entry(&next_uentry, &next_entry,
+-                      (compat_uptr_t __user *)&entry->next, &next_pi);
+-              /*
+-               * A pending lock might already be on the list, so
+-               * dont process it twice:
+-               */
+-              if (entry != pending) {
+-                      void __user *uaddr = futex_uaddr(entry, futex_offset);
+-
+-                      if (handle_futex_death(uaddr, curr, pi))
+-                              return;
+-              }
+-              if (rc)
+-                      return;
+-              uentry = next_uentry;
+-              entry = next_entry;
+-              pi = next_pi;
+-              /*
+-               * Avoid excessively long or circular lists:
+-               */
+-              if (!--limit)
+-                      break;
+-
+-              cond_resched();
+-      }
+-      if (pending) {
+-              void __user *uaddr = futex_uaddr(pending, futex_offset);
+-
+-              handle_futex_death(uaddr, curr, pip);
+-      }
+-}
+-
+-COMPAT_SYSCALL_DEFINE2(set_robust_list,
+-              struct compat_robust_list_head __user *, head,
+-              compat_size_t, len)
+-{
+-      if (!futex_cmpxchg_enabled)
+-              return -ENOSYS;
+-
+-      if (unlikely(len != sizeof(*head)))
+-              return -EINVAL;
+-
+-      current->compat_robust_list = head;
+-
+-      return 0;
+-}
+-
+-COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
+-                      compat_uptr_t __user *, head_ptr,
+-                      compat_size_t __user *, len_ptr)
+-{
+-      struct compat_robust_list_head __user *head;
+-      unsigned long ret;
+-      struct task_struct *p;
+-
+-      if (!futex_cmpxchg_enabled)
+-              return -ENOSYS;
+-
+-      rcu_read_lock();
+-
+-      ret = -ESRCH;
+-      if (!pid)
+-              p = current;
+-      else {
+-              p = find_task_by_vpid(pid);
+-              if (!p)
+-                      goto err_unlock;
+-      }
+-
+-      ret = -EPERM;
+-      if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
+-              goto err_unlock;
+-
+-      head = p->compat_robust_list;
+-      rcu_read_unlock();
+-
+-      if (put_user(sizeof(*head), len_ptr))
+-              return -EFAULT;
+-      return put_user(ptr_to_compat(head), head_ptr);
+-
+-err_unlock:
+-      rcu_read_unlock();
+-
+-      return ret;
+-}
+-
+-COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
+-              struct compat_timespec __user *, utime, u32 __user *, uaddr2,
+-              u32, val3)
+-{
+-      struct timespec ts;
+-      ktime_t t, *tp = NULL;
+-      int val2 = 0;
+-      int cmd = op & FUTEX_CMD_MASK;
+-
+-      if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
+-                    cmd == FUTEX_WAIT_BITSET ||
+-                    cmd == FUTEX_WAIT_REQUEUE_PI)) {
+-              if (compat_get_timespec(&ts, utime))
+-                      return -EFAULT;
+-              if (!timespec_valid(&ts))
+-                      return -EINVAL;
+-
+-              t = timespec_to_ktime(ts);
+-              if (cmd == FUTEX_WAIT)
+-                      t = ktime_add_safe(ktime_get(), t);
+-              tp = &t;
+-      }
+-      if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
+-          cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
+-              val2 = (int) (unsigned long) utime;
+-
+-      return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
+-}