--- /dev/null
+From f65e0d299807d8a11812845c972493c3f9a18e10 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 10 Feb 2016 12:47:03 +0100
+Subject: ALSA: timer: Call notifier in the same spinlock
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit f65e0d299807d8a11812845c972493c3f9a18e10 upstream.
+
+snd_timer_notify1() is called outside the spinlock and it retakes the
+lock after the unlock. This is rather racy, and it's safer to move
+snd_timer_notify() call inside the main spinlock.
+
+The patch also contains a slight refactoring / cleanup of the code.
+Now all start/stop/continue/pause look more symmetric and a bit better
+readable.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/timer.c | 220 ++++++++++++++++++++++++-----------------------------
+ 1 file changed, 102 insertions(+), 118 deletions(-)
+
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -318,8 +318,6 @@ int snd_timer_open(struct snd_timer_inst
+ return 0;
+ }
+
+-static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
+-
+ /*
+ * close a timer instance
+ */
+@@ -408,7 +406,6 @@ unsigned long snd_timer_resolution(struc
+ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
+ {
+ struct snd_timer *timer;
+- unsigned long flags;
+ unsigned long resolution = 0;
+ struct snd_timer_instance *ts;
+ struct timespec tstamp;
+@@ -432,34 +429,66 @@ static void snd_timer_notify1(struct snd
+ return;
+ if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
+ return;
+- spin_lock_irqsave(&timer->lock, flags);
+ list_for_each_entry(ts, &ti->slave_active_head, active_list)
+ if (ts->ccallback)
+ ts->ccallback(ts, event + 100, &tstamp, resolution);
+- spin_unlock_irqrestore(&timer->lock, flags);
+ }
+
+-static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri,
+- unsigned long sticks)
++/* start/continue a master timer */
++static int snd_timer_start1(struct snd_timer_instance *timeri,
++ bool start, unsigned long ticks)
+ {
++ struct snd_timer *timer;
++ int result;
++ unsigned long flags;
++
++ timer = timeri->timer;
++ if (!timer)
++ return -EINVAL;
++
++ spin_lock_irqsave(&timer->lock, flags);
++ if (timer->card && timer->card->shutdown) {
++ result = -ENODEV;
++ goto unlock;
++ }
++ if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
++ SNDRV_TIMER_IFLG_START)) {
++ result = -EBUSY;
++ goto unlock;
++ }
++
++ if (start)
++ timeri->ticks = timeri->cticks = ticks;
++ else if (!timeri->cticks)
++ timeri->cticks = 1;
++ timeri->pticks = 0;
++
+ list_move_tail(&timeri->active_list, &timer->active_list_head);
+ if (timer->running) {
+ if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
+ goto __start_now;
+ timer->flags |= SNDRV_TIMER_FLG_RESCHED;
+ timeri->flags |= SNDRV_TIMER_IFLG_START;
+- return 1; /* delayed start */
++ result = 1; /* delayed start */
+ } else {
+- timer->sticks = sticks;
++ if (start)
++ timer->sticks = ticks;
+ timer->hw.start(timer);
+ __start_now:
+ timer->running++;
+ timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
+- return 0;
++ result = 0;
+ }
++ snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
++ SNDRV_TIMER_EVENT_CONTINUE);
++ unlock:
++ spin_unlock_irqrestore(&timer->lock, flags);
++ return result;
+ }
+
+-static int snd_timer_start_slave(struct snd_timer_instance *timeri)
++/* start/continue a slave timer */
++static int snd_timer_start_slave(struct snd_timer_instance *timeri,
++ bool start)
+ {
+ unsigned long flags;
+
+@@ -473,88 +502,37 @@ static int snd_timer_start_slave(struct
+ spin_lock(&timeri->timer->lock);
+ list_add_tail(&timeri->active_list,
+ &timeri->master->slave_active_head);
++ snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
++ SNDRV_TIMER_EVENT_CONTINUE);
+ spin_unlock(&timeri->timer->lock);
+ }
+ spin_unlock_irqrestore(&slave_active_lock, flags);
+ return 1; /* delayed start */
+ }
+
+-/*
+- * start the timer instance
+- */
+-int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
+-{
+- struct snd_timer *timer;
+- int result = -EINVAL;
+- unsigned long flags;
+-
+- if (timeri == NULL || ticks < 1)
+- return -EINVAL;
+- if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
+- result = snd_timer_start_slave(timeri);
+- if (result >= 0)
+- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+- return result;
+- }
+- timer = timeri->timer;
+- if (timer == NULL)
+- return -EINVAL;
+- if (timer->card && timer->card->shutdown)
+- return -ENODEV;
+- spin_lock_irqsave(&timer->lock, flags);
+- if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+- SNDRV_TIMER_IFLG_START)) {
+- result = -EBUSY;
+- goto unlock;
+- }
+- timeri->ticks = timeri->cticks = ticks;
+- timeri->pticks = 0;
+- result = snd_timer_start1(timer, timeri, ticks);
+- unlock:
+- spin_unlock_irqrestore(&timer->lock, flags);
+- if (result >= 0)
+- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+- return result;
+-}
+-
+-static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
++/* stop/pause a master timer */
++static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
+ {
+ struct snd_timer *timer;
++ int result = 0;
+ unsigned long flags;
+
+- if (snd_BUG_ON(!timeri))
+- return -ENXIO;
+-
+- if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
+- spin_lock_irqsave(&slave_active_lock, flags);
+- if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
+- spin_unlock_irqrestore(&slave_active_lock, flags);
+- return -EBUSY;
+- }
+- if (timeri->timer)
+- spin_lock(&timeri->timer->lock);
+- timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+- list_del_init(&timeri->ack_list);
+- list_del_init(&timeri->active_list);
+- if (timeri->timer)
+- spin_unlock(&timeri->timer->lock);
+- spin_unlock_irqrestore(&slave_active_lock, flags);
+- goto __end;
+- }
+ timer = timeri->timer;
+ if (!timer)
+ return -EINVAL;
+ spin_lock_irqsave(&timer->lock, flags);
+ if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+ SNDRV_TIMER_IFLG_START))) {
+- spin_unlock_irqrestore(&timer->lock, flags);
+- return -EBUSY;
++ result = -EBUSY;
++ goto unlock;
+ }
+ list_del_init(&timeri->ack_list);
+ list_del_init(&timeri->active_list);
+- if (timer->card && timer->card->shutdown) {
+- spin_unlock_irqrestore(&timer->lock, flags);
+- return 0;
++ if (timer->card && timer->card->shutdown)
++ goto unlock;
++ if (stop) {
++ timeri->cticks = timeri->ticks;
++ timeri->pticks = 0;
+ }
+ if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
+ !(--timer->running)) {
+@@ -569,35 +547,60 @@ static int _snd_timer_stop(struct snd_ti
+ }
+ }
+ timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
++ snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
++ SNDRV_TIMER_EVENT_CONTINUE);
++ unlock:
+ spin_unlock_irqrestore(&timer->lock, flags);
+- __end:
+- if (event != SNDRV_TIMER_EVENT_RESOLUTION)
+- snd_timer_notify1(timeri, event);
++ return result;
++}
++
++/* stop/pause a slave timer */
++static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&slave_active_lock, flags);
++ if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
++ spin_unlock_irqrestore(&slave_active_lock, flags);
++ return -EBUSY;
++ }
++ timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
++ if (timeri->timer) {
++ spin_lock(&timeri->timer->lock);
++ list_del_init(&timeri->ack_list);
++ list_del_init(&timeri->active_list);
++ snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
++ SNDRV_TIMER_EVENT_CONTINUE);
++ spin_unlock(&timeri->timer->lock);
++ }
++ spin_unlock_irqrestore(&slave_active_lock, flags);
+ return 0;
+ }
+
+ /*
++ * start the timer instance
++ */
++int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
++{
++ if (timeri == NULL || ticks < 1)
++ return -EINVAL;
++ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
++ return snd_timer_start_slave(timeri, true);
++ else
++ return snd_timer_start1(timeri, true, ticks);
++}
++
++/*
+ * stop the timer instance.
+ *
+ * do not call this from the timer callback!
+ */
+ int snd_timer_stop(struct snd_timer_instance *timeri)
+ {
+- struct snd_timer *timer;
+- unsigned long flags;
+- int err;
+-
+- err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
+- if (err < 0)
+- return err;
+- timer = timeri->timer;
+- if (!timer)
+- return -EINVAL;
+- spin_lock_irqsave(&timer->lock, flags);
+- timeri->cticks = timeri->ticks;
+- timeri->pticks = 0;
+- spin_unlock_irqrestore(&timer->lock, flags);
+- return 0;
++ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
++ return snd_timer_stop_slave(timeri, true);
++ else
++ return snd_timer_stop1(timeri, true);
+ }
+
+ /*
+@@ -605,32 +608,10 @@ int snd_timer_stop(struct snd_timer_inst
+ */
+ int snd_timer_continue(struct snd_timer_instance *timeri)
+ {
+- struct snd_timer *timer;
+- int result = -EINVAL;
+- unsigned long flags;
+-
+- if (timeri == NULL)
+- return result;
+ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
+- return snd_timer_start_slave(timeri);
+- timer = timeri->timer;
+- if (! timer)
+- return -EINVAL;
+- if (timer->card && timer->card->shutdown)
+- return -ENODEV;
+- spin_lock_irqsave(&timer->lock, flags);
+- if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+- result = -EBUSY;
+- goto unlock;
+- }
+- if (!timeri->cticks)
+- timeri->cticks = 1;
+- timeri->pticks = 0;
+- result = snd_timer_start1(timer, timeri, timer->sticks);
+- unlock:
+- spin_unlock_irqrestore(&timer->lock, flags);
+- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
+- return result;
++ return snd_timer_start_slave(timeri, false);
++ else
++ return snd_timer_start1(timeri, false, 0);
+ }
+
+ /*
+@@ -638,7 +619,10 @@ int snd_timer_continue(struct snd_timer_
+ */
+ int snd_timer_pause(struct snd_timer_instance * timeri)
+ {
+- return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
++ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
++ return snd_timer_stop_slave(timeri, false);
++ else
++ return snd_timer_stop1(timeri, false);
+ }
+
+ /*
--- /dev/null
+From ece1397cbc89c51914fae1aec729539cfd8bd62b Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Mon, 26 Mar 2018 15:12:49 +0100
+Subject: arm64: Add work around for Arm Cortex-A55 Erratum 1024718
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit ece1397cbc89c51914fae1aec729539cfd8bd62b upstream.
+
+Some variants of the Arm Cortex-55 cores (r0p0, r0p1, r1p0) suffer
+from an erratum 1024718, which causes incorrect updates when DBM/AP
+bits in a page table entry is modified without a break-before-make
+sequence. The work around is to skip enabling the hardware DBM feature
+on the affected cores. The hardware Access Flag management features
+is not affected. There are some other cores suffering from this
+errata, which could be added to the midr_list to trigger the work
+around.
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: ckadabi@codeaurora.org
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/Kconfig | 14 ++++++++++++
+ arch/arm64/include/asm/assembler.h | 40 +++++++++++++++++++++++++++++++++++++
+ arch/arm64/include/asm/cputype.h | 11 ++++++++++
+ arch/arm64/mm/proc.S | 5 ++++
+ 4 files changed, 70 insertions(+)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -375,6 +375,20 @@ config ARM64_ERRATUM_843419
+
+ If unsure, say Y.
+
++config ARM64_ERRATUM_1024718
++ bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
++ default y
++ help
++ This option adds work around for Arm Cortex-A55 Erratum 1024718.
++
++ Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
++ update of the hardware dirty bit when the DBM/AP bits are updated
++ without a break-before-make. The work around is to disable the usage
++ of hardware DBM locally on the affected cores. CPUs not affected by
++ erratum will continue to use the feature.
++
++ If unsure, say Y.
++
+ config CAVIUM_ERRATUM_22375
+ bool "Cavium erratum 22375, 24313"
+ default y
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -23,6 +23,7 @@
+ #ifndef __ASM_ASSEMBLER_H
+ #define __ASM_ASSEMBLER_H
+
++#include <asm/cputype.h>
+ #include <asm/ptrace.h>
+ #include <asm/thread_info.h>
+
+@@ -224,4 +225,43 @@ lr .req x30 // link register
+ movk \reg, :abs_g0_nc:\val
+ .endm
+
++/*
++ * Check the MIDR_EL1 of the current CPU for a given model and a range of
++ * variant/revision. See asm/cputype.h for the macros used below.
++ *
++ * model: MIDR_CPU_PART of CPU
++ * rv_min: Minimum of MIDR_CPU_VAR_REV()
++ * rv_max: Maximum of MIDR_CPU_VAR_REV()
++ * res: Result register.
++ * tmp1, tmp2, tmp3: Temporary registers
++ *
++ * Corrupts: res, tmp1, tmp2, tmp3
++ * Returns: 0, if the CPU id doesn't match. Non-zero otherwise
++ */
++ .macro cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
++ mrs \res, midr_el1
++ mov_q \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
++ mov_q \tmp2, MIDR_CPU_PART_MASK
++ and \tmp3, \res, \tmp2 // Extract model
++ and \tmp1, \res, \tmp1 // rev & variant
++ mov_q \tmp2, \model
++ cmp \tmp3, \tmp2
++ cset \res, eq
++ cbz \res, .Ldone\@ // Model matches ?
++
++ .if (\rv_min != 0) // Skip min check if rv_min == 0
++ mov_q \tmp3, \rv_min
++ cmp \tmp1, \tmp3
++ cset \res, ge
++ .endif // \rv_min != 0
++ /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
++ .if ((\rv_min != \rv_max) || \rv_min == 0)
++ mov_q \tmp2, \rv_max
++ cmp \tmp1, \tmp2
++ cset \tmp2, le
++ and \res, \res, \tmp2
++ .endif
++.Ldone\@:
++ .endm
++
+ #endif /* __ASM_ASSEMBLER_H */
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -57,6 +57,14 @@
+ #define MIDR_IMPLEMENTOR(midr) \
+ (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
+
++#define MIDR_CPU_VAR_REV(var, rev) \
++ (((var) << MIDR_VARIANT_SHIFT) | (rev))
++
++#define MIDR_CPU_PART_MASK \
++ (MIDR_IMPLEMENTOR_MASK | \
++ MIDR_ARCHITECTURE_MASK | \
++ MIDR_PARTNUM_MASK)
++
+ #define MIDR_CPU_PART(imp, partnum) \
+ (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
+ (0xf << MIDR_ARCHITECTURE_SHIFT) | \
+@@ -70,11 +78,14 @@
+ #define ARM_CPU_PART_FOUNDATION 0xD00
+ #define ARM_CPU_PART_CORTEX_A57 0xD07
+ #define ARM_CPU_PART_CORTEX_A53 0xD03
++#define ARM_CPU_PART_CORTEX_A55 0xD05
+
+ #define APM_CPU_PART_POTENZA 0x000
+
+ #define CAVIUM_CPU_PART_THUNDERX 0x0A1
+
++#define MIDR_CORTEX_A55 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
++
+ #ifndef __ASSEMBLY__
+
+ /*
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -221,6 +221,11 @@ ENTRY(__cpu_setup)
+ cbz x9, 2f
+ cmp x9, #2
+ b.lt 1f
++#ifdef CONFIG_ARM64_ERRATUM_1024718
++ /* Disable hardware DBM on Cortex-A55 r0p0, r0p1 & r1p0 */
++ cpu_midr_match MIDR_CORTEX_A55, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, 0), x1, x2, x3, x4
++ cbnz x1, 1f
++#endif
+ orr x10, x10, #TCR_HD // hardware Dirty flag update
+ 1: orr x10, x10, #TCR_HA // hardware Access flag update
+ 2:
--- /dev/null
+From 30d6e0a4190d37740e9447e4e4815f06992dd8c3 Mon Sep 17 00:00:00 2001
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Thu, 24 Aug 2017 09:31:05 +0200
+Subject: futex: Remove duplicated code and fix undefined behaviour
+
+From: Jiri Slaby <jslaby@suse.cz>
+
+commit 30d6e0a4190d37740e9447e4e4815f06992dd8c3 upstream.
+
+There is code duplicated over all architecture's headers for
+futex_atomic_op_inuser. Namely op decoding, access_ok check for uaddr,
+and comparison of the result.
+
+Remove this duplication and leave up to the arches only the needed
+assembly which is now in arch_futex_atomic_op_inuser.
+
+This effectively distributes the Will Deacon's arm64 fix for undefined
+behaviour reported by UBSAN to all architectures. The fix was done in
+commit 5f16a046f8e1 (arm64: futex: Fix undefined behaviour with
+FUTEX_OP_OPARG_SHIFT usage). Look there for an example dump.
+
+And as suggested by Thomas, check for negative oparg too, because it was
+also reported to cause undefined behaviour report.
+
+Note that s390 removed access_ok check in d12a29703 ("s390/uaccess:
+remove pointless access_ok() checks") as access_ok there returns true.
+We introduce it back to the helper for the sake of simplicity (it gets
+optimized away anyway).
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Russell King <rmk+kernel@armlinux.org.uk>
+Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
+Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> [s390]
+Acked-by: Chris Metcalf <cmetcalf@mellanox.com> [for tile]
+Reviewed-by: Darren Hart (VMware) <dvhart@infradead.org>
+Reviewed-by: Will Deacon <will.deacon@arm.com> [core/arm64]
+Cc: linux-mips@linux-mips.org
+Cc: Rich Felker <dalias@libc.org>
+Cc: linux-ia64@vger.kernel.org
+Cc: linux-sh@vger.kernel.org
+Cc: peterz@infradead.org
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Max Filippov <jcmvbkbc@gmail.com>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: sparclinux@vger.kernel.org
+Cc: Jonas Bonn <jonas@southpole.se>
+Cc: linux-s390@vger.kernel.org
+Cc: linux-arch@vger.kernel.org
+Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
+Cc: linux-hexagon@vger.kernel.org
+Cc: Helge Deller <deller@gmx.de>
+Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Matt Turner <mattst88@gmail.com>
+Cc: linux-snps-arc@lists.infradead.org
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: linux-xtensa@linux-xtensa.org
+Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+Cc: openrisc@lists.librecores.org
+Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
+Cc: Stafford Horne <shorne@gmail.com>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: Richard Henderson <rth@twiddle.net>
+Cc: Chris Zankel <chris@zankel.net>
+Cc: Michal Simek <monstr@monstr.eu>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: linux-parisc@vger.kernel.org
+Cc: Vineet Gupta <vgupta@synopsys.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Richard Kuo <rkuo@codeaurora.org>
+Cc: linux-alpha@vger.kernel.org
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: linuxppc-dev@lists.ozlabs.org
+Cc: "David S. Miller" <davem@davemloft.net>
+Link: http://lkml.kernel.org/r/20170824073105.3901-1-jslaby@suse.cz
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/alpha/include/asm/futex.h | 26 +++---------------
+ arch/arc/include/asm/futex.h | 40 +++-------------------------
+ arch/arm/include/asm/futex.h | 26 ++----------------
+ arch/arm64/include/asm/futex.h | 26 ++----------------
+ arch/frv/include/asm/futex.h | 3 +-
+ arch/frv/kernel/futex.c | 27 ++-----------------
+ arch/hexagon/include/asm/futex.h | 38 ++-------------------------
+ arch/ia64/include/asm/futex.h | 25 ++----------------
+ arch/microblaze/include/asm/futex.h | 38 ++-------------------------
+ arch/mips/include/asm/futex.h | 25 ++----------------
+ arch/parisc/include/asm/futex.h | 25 ++----------------
+ arch/powerpc/include/asm/futex.h | 26 +++---------------
+ arch/s390/include/asm/futex.h | 23 +++-------------
+ arch/sh/include/asm/futex.h | 26 ++----------------
+ arch/sparc/include/asm/futex_64.h | 26 +++---------------
+ arch/tile/include/asm/futex.h | 40 +++-------------------------
+ arch/x86/include/asm/futex.h | 40 +++-------------------------
+ arch/xtensa/include/asm/futex.h | 27 +++----------------
+ include/asm-generic/futex.h | 50 ++++++------------------------------
+ kernel/futex.c | 39 ++++++++++++++++++++++++++++
+ 20 files changed, 126 insertions(+), 470 deletions(-)
+
+--- a/arch/alpha/include/asm/futex.h
++++ b/arch/alpha/include/asm/futex.h
+@@ -29,18 +29,10 @@
+ : "r" (uaddr), "r"(oparg) \
+ : "memory")
+
+-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
++ u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+- return -EFAULT;
+
+ pagefault_disable();
+
+@@ -66,17 +58,9 @@ static inline int futex_atomic_op_inuser
+
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+- default: ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/arch/arc/include/asm/futex.h
++++ b/arch/arc/include/asm/futex.h
+@@ -73,20 +73,11 @@
+
+ #endif
+
+-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
++ u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+- return -EFAULT;
+-
+ #ifndef CONFIG_ARC_HAS_LLSC
+ preempt_disable(); /* to guarantee atomic r-m-w of futex op */
+ #endif
+@@ -118,30 +109,9 @@ static inline int futex_atomic_op_inuser
+ preempt_enable();
+ #endif
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ:
+- ret = (oldval == cmparg);
+- break;
+- case FUTEX_OP_CMP_NE:
+- ret = (oldval != cmparg);
+- break;
+- case FUTEX_OP_CMP_LT:
+- ret = (oldval < cmparg);
+- break;
+- case FUTEX_OP_CMP_GE:
+- ret = (oldval >= cmparg);
+- break;
+- case FUTEX_OP_CMP_LE:
+- ret = (oldval <= cmparg);
+- break;
+- case FUTEX_OP_CMP_GT:
+- ret = (oldval > cmparg);
+- break;
+- default:
+- ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/arch/arm/include/asm/futex.h
++++ b/arch/arm/include/asm/futex.h
+@@ -128,20 +128,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval,
+ #endif /* !SMP */
+
+ static inline int
+-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret, tmp;
+
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+- return -EFAULT;
+-
+ #ifndef CONFIG_SMP
+ preempt_disable();
+ #endif
+@@ -172,17 +162,9 @@ futex_atomic_op_inuser (int encoded_op,
+ preempt_enable();
+ #endif
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+- default: ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/arch/arm64/include/asm/futex.h
++++ b/arch/arm64/include/asm/futex.h
+@@ -53,20 +53,10 @@
+ : "memory")
+
+ static inline int
+-futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (int)(encoded_op << 8) >> 20;
+- int cmparg = (int)(encoded_op << 20) >> 20;
+ int oldval = 0, ret, tmp;
+
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1U << (oparg & 0x1f);
+-
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+- return -EFAULT;
+-
+ pagefault_disable();
+
+ switch (op) {
+@@ -96,17 +86,9 @@ futex_atomic_op_inuser(unsigned int enco
+
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+- default: ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/arch/frv/include/asm/futex.h
++++ b/arch/frv/include/asm/futex.h
+@@ -7,7 +7,8 @@
+ #include <asm/errno.h>
+ #include <asm/uaccess.h>
+
+-extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
++extern int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
++ u32 __user *uaddr);
+
+ static inline int
+ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+--- a/arch/frv/kernel/futex.c
++++ b/arch/frv/kernel/futex.c
+@@ -186,20 +186,10 @@ static inline int atomic_futex_op_xchg_x
+ /*
+ * do the futex operations
+ */
+-int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
++int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+- return -EFAULT;
+-
+ pagefault_disable();
+
+ switch (op) {
+@@ -225,18 +215,9 @@ int futex_atomic_op_inuser(int encoded_o
+
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+- default: ret = -ENOSYS; break;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
+
+ return ret;
+
+-} /* end futex_atomic_op_inuser() */
++} /* end arch_futex_atomic_op_inuser() */
+--- a/arch/hexagon/include/asm/futex.h
++++ b/arch/hexagon/include/asm/futex.h
+@@ -31,18 +31,9 @@
+
+
+ static inline int
+-futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+- return -EFAULT;
+
+ pagefault_disable();
+
+@@ -72,30 +63,9 @@ futex_atomic_op_inuser(int encoded_op, i
+
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ:
+- ret = (oldval == cmparg);
+- break;
+- case FUTEX_OP_CMP_NE:
+- ret = (oldval != cmparg);
+- break;
+- case FUTEX_OP_CMP_LT:
+- ret = (oldval < cmparg);
+- break;
+- case FUTEX_OP_CMP_GE:
+- ret = (oldval >= cmparg);
+- break;
+- case FUTEX_OP_CMP_LE:
+- ret = (oldval <= cmparg);
+- break;
+- case FUTEX_OP_CMP_GT:
+- ret = (oldval > cmparg);
+- break;
+- default:
+- ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/arch/ia64/include/asm/futex.h
++++ b/arch/ia64/include/asm/futex.h
+@@ -45,18 +45,9 @@ do { \
+ } while (0)
+
+ static inline int
+-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+- return -EFAULT;
+
+ pagefault_disable();
+
+@@ -84,17 +75,9 @@ futex_atomic_op_inuser (int encoded_op,
+
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+- default: ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/arch/microblaze/include/asm/futex.h
++++ b/arch/microblaze/include/asm/futex.h
+@@ -29,18 +29,9 @@
+ })
+
+ static inline int
+-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+- return -EFAULT;
+
+ pagefault_disable();
+
+@@ -66,30 +57,9 @@ futex_atomic_op_inuser(int encoded_op, u
+
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ:
+- ret = (oldval == cmparg);
+- break;
+- case FUTEX_OP_CMP_NE:
+- ret = (oldval != cmparg);
+- break;
+- case FUTEX_OP_CMP_LT:
+- ret = (oldval < cmparg);
+- break;
+- case FUTEX_OP_CMP_GE:
+- ret = (oldval >= cmparg);
+- break;
+- case FUTEX_OP_CMP_LE:
+- ret = (oldval <= cmparg);
+- break;
+- case FUTEX_OP_CMP_GT:
+- ret = (oldval > cmparg);
+- break;
+- default:
+- ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/arch/mips/include/asm/futex.h
++++ b/arch/mips/include/asm/futex.h
+@@ -83,18 +83,9 @@
+ }
+
+ static inline int
+-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+- return -EFAULT;
+
+ pagefault_disable();
+
+@@ -125,17 +116,9 @@ futex_atomic_op_inuser(int encoded_op, u
+
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+- default: ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/arch/parisc/include/asm/futex.h
++++ b/arch/parisc/include/asm/futex.h
+@@ -32,20 +32,11 @@ _futex_spin_unlock_irqrestore(u32 __user
+ }
+
+ static inline int
+-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+ {
+ unsigned long int flags;
+ u32 val;
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
+- return -EFAULT;
+
+ pagefault_disable();
+
+@@ -98,17 +89,9 @@ futex_atomic_op_inuser (int encoded_op,
+
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+- default: ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/arch/powerpc/include/asm/futex.h
++++ b/arch/powerpc/include/asm/futex.h
+@@ -31,18 +31,10 @@
+ : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
+ : "cr0", "memory")
+
+-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
++ u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+- return -EFAULT;
+
+ pagefault_disable();
+
+@@ -68,17 +60,9 @@ static inline int futex_atomic_op_inuser
+
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+- default: ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/arch/s390/include/asm/futex.h
++++ b/arch/s390/include/asm/futex.h
+@@ -21,17 +21,12 @@
+ : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
+ "m" (*uaddr) : "cc");
+
+-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
++ u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, newval, ret;
+
+ load_kernel_asce();
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+
+ pagefault_disable();
+ switch (op) {
+@@ -60,17 +55,9 @@ static inline int futex_atomic_op_inuser
+ }
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+- default: ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/arch/sh/include/asm/futex.h
++++ b/arch/sh/include/asm/futex.h
+@@ -10,20 +10,11 @@
+ /* XXX: UP variants, fix for SH-4A and SMP.. */
+ #include <asm/futex-irq.h>
+
+-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
++static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
++ u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+- return -EFAULT;
+-
+ pagefault_disable();
+
+ switch (op) {
+@@ -49,17 +40,8 @@ static inline int futex_atomic_op_inuser
+
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+- default: ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
+
+ return ret;
+ }
+--- a/arch/sparc/include/asm/futex_64.h
++++ b/arch/sparc/include/asm/futex_64.h
+@@ -29,22 +29,14 @@
+ : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
+ : "memory")
+
+-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
++ u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret, tem;
+
+- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
+- return -EFAULT;
+ if (unlikely((((unsigned long) uaddr) & 0x3UL)))
+ return -EINVAL;
+
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+ pagefault_disable();
+
+ switch (op) {
+@@ -69,17 +61,9 @@ static inline int futex_atomic_op_inuser
+
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+- default: ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/arch/tile/include/asm/futex.h
++++ b/arch/tile/include/asm/futex.h
+@@ -106,12 +106,9 @@
+ lock = __atomic_hashed_lock((int __force *)uaddr)
+ #endif
+
+-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
++static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
++ u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int uninitialized_var(val), ret;
+
+ __futex_prolog();
+@@ -119,12 +116,6 @@ static inline int futex_atomic_op_inuser
+ /* The 32-bit futex code makes this assumption, so validate it here. */
+ BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
+
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+- return -EFAULT;
+-
+ pagefault_disable();
+ switch (op) {
+ case FUTEX_OP_SET:
+@@ -148,30 +139,9 @@ static inline int futex_atomic_op_inuser
+ }
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ:
+- ret = (val == cmparg);
+- break;
+- case FUTEX_OP_CMP_NE:
+- ret = (val != cmparg);
+- break;
+- case FUTEX_OP_CMP_LT:
+- ret = (val < cmparg);
+- break;
+- case FUTEX_OP_CMP_GE:
+- ret = (val >= cmparg);
+- break;
+- case FUTEX_OP_CMP_LE:
+- ret = (val <= cmparg);
+- break;
+- case FUTEX_OP_CMP_GT:
+- ret = (val > cmparg);
+- break;
+- default:
+- ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = val;
++
+ return ret;
+ }
+
+--- a/arch/x86/include/asm/futex.h
++++ b/arch/x86/include/asm/futex.h
+@@ -41,20 +41,11 @@
+ "+m" (*uaddr), "=&r" (tem) \
+ : "r" (oparg), "i" (-EFAULT), "1" (0))
+
+-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
++ u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret, tem;
+
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+- return -EFAULT;
+-
+ pagefault_disable();
+
+ switch (op) {
+@@ -80,30 +71,9 @@ static inline int futex_atomic_op_inuser
+
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ:
+- ret = (oldval == cmparg);
+- break;
+- case FUTEX_OP_CMP_NE:
+- ret = (oldval != cmparg);
+- break;
+- case FUTEX_OP_CMP_LT:
+- ret = (oldval < cmparg);
+- break;
+- case FUTEX_OP_CMP_GE:
+- ret = (oldval >= cmparg);
+- break;
+- case FUTEX_OP_CMP_LE:
+- ret = (oldval <= cmparg);
+- break;
+- case FUTEX_OP_CMP_GT:
+- ret = (oldval > cmparg);
+- break;
+- default:
+- ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/arch/xtensa/include/asm/futex.h
++++ b/arch/xtensa/include/asm/futex.h
+@@ -44,18 +44,10 @@
+ : "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
+ : "memory")
+
+-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
++ u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+- return -EFAULT;
+
+ #if !XCHAL_HAVE_S32C1I
+ return -ENOSYS;
+@@ -89,19 +81,10 @@ static inline int futex_atomic_op_inuser
+
+ pagefault_enable();
+
+- if (ret)
+- return ret;
+-
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: return (oldval == cmparg);
+- case FUTEX_OP_CMP_NE: return (oldval != cmparg);
+- case FUTEX_OP_CMP_LT: return (oldval < cmparg);
+- case FUTEX_OP_CMP_GE: return (oldval >= cmparg);
+- case FUTEX_OP_CMP_LE: return (oldval <= cmparg);
+- case FUTEX_OP_CMP_GT: return (oldval > cmparg);
+- }
++ if (!ret)
++ *oval = oldval;
+
+- return -ENOSYS;
++ return ret;
+ }
+
+ static inline int
+--- a/include/asm-generic/futex.h
++++ b/include/asm-generic/futex.h
+@@ -13,7 +13,7 @@
+ */
+
+ /**
+- * futex_atomic_op_inuser() - Atomic arithmetic operation with constant
++ * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * argument and comparison of the previous
+ * futex value with another constant.
+ *
+@@ -25,18 +25,11 @@
+ * <0 - On error
+ */
+ static inline int
+-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
++arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval, ret;
+ u32 tmp;
+
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+ preempt_disable();
+ pagefault_disable();
+
+@@ -74,17 +67,9 @@ out_pagefault_enable:
+ pagefault_enable();
+ preempt_enable();
+
+- if (ret == 0) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+- default: ret = -ENOSYS;
+- }
+- }
++ if (ret == 0)
++ *oval = oldval;
++
+ return ret;
+ }
+
+@@ -126,18 +111,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval,
+
+ #else
+ static inline int
+-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
++arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
+ {
+- int op = (encoded_op >> 28) & 7;
+- int cmp = (encoded_op >> 24) & 15;
+- int oparg = (encoded_op << 8) >> 20;
+- int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+- oparg = 1 << oparg;
+-
+- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+- return -EFAULT;
+
+ pagefault_disable();
+
+@@ -153,17 +129,9 @@ futex_atomic_op_inuser (int encoded_op,
+
+ pagefault_enable();
+
+- if (!ret) {
+- switch (cmp) {
+- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+- default: ret = -ENOSYS;
+- }
+- }
++ if (!ret)
++ *oval = oldval;
++
+ return ret;
+ }
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1453,6 +1453,45 @@ out:
+ return ret;
+ }
+
++static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
++{
++ unsigned int op = (encoded_op & 0x70000000) >> 28;
++ unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
++ int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 12);
++ int cmparg = sign_extend32(encoded_op & 0x00000fff, 12);
++ int oldval, ret;
++
++ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
++ if (oparg < 0 || oparg > 31)
++ return -EINVAL;
++ oparg = 1 << oparg;
++ }
++
++ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
++ return -EFAULT;
++
++ ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
++ if (ret)
++ return ret;
++
++ switch (cmp) {
++ case FUTEX_OP_CMP_EQ:
++ return oldval == cmparg;
++ case FUTEX_OP_CMP_NE:
++ return oldval != cmparg;
++ case FUTEX_OP_CMP_LT:
++ return oldval < cmparg;
++ case FUTEX_OP_CMP_GE:
++ return oldval >= cmparg;
++ case FUTEX_OP_CMP_LE:
++ return oldval <= cmparg;
++ case FUTEX_OP_CMP_GT:
++ return oldval > cmparg;
++ default:
++ return -ENOSYS;
++ }
++}
++
+ /*
+ * Wake up all waiters hashed on the physical page that is mapped
+ * to this virtual address: