--- /dev/null
+From 31b35f6b4d5285a311e10753f4eb17304326b211 Mon Sep 17 00:00:00 2001
+From: Dmitry Vyukov <dvyukov@google.com>
+Date: Fri, 26 May 2017 19:29:00 +0200
+Subject: locking/x86: Remove the unused atomic_inc_short() methd
+
+From: Dmitry Vyukov <dvyukov@google.com>
+
+commit 31b35f6b4d5285a311e10753f4eb17304326b211 upstream.
+
+It is completely unused and implemented only on x86.
+Remove it.
+
+Suggested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20170526172900.91058-1-dvyukov@google.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/tile/lib/atomic_asm_32.S | 3 +--
+ arch/x86/include/asm/atomic.h | 13 -------------
+ 2 files changed, 1 insertion(+), 15 deletions(-)
+
+--- a/arch/tile/lib/atomic_asm_32.S
++++ b/arch/tile/lib/atomic_asm_32.S
+@@ -24,8 +24,7 @@
+ * has an opportunity to return -EFAULT to the user if needed.
+ * The 64-bit routines just return a "long long" with the value,
+ * since they are only used from kernel space and don't expect to fault.
+- * Support for 16-bit ops is included in the framework but we don't provide
+- * any (x86_64 has an atomic_inc_short(), so we might want to some day).
++ * Support for 16-bit ops is included in the framework but we don't provide any.
+ *
+ * Note that the caller is advised to issue a suitable L1 or L2
+ * prefetch on the address being manipulated to avoid extra stalls.
+--- a/arch/x86/include/asm/atomic.h
++++ b/arch/x86/include/asm/atomic.h
+@@ -220,19 +220,6 @@ static __always_inline int __atomic_add_
+ return c;
+ }
+
+-/**
+- * atomic_inc_short - increment of a short integer
+- * @v: pointer to type int
+- *
+- * Atomically adds 1 to @v
+- * Returns the new value of @u
+- */
+-static __always_inline short int atomic_inc_short(short int *v)
+-{
+- asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
+- return *v;
+-}
+-
+ #ifdef CONFIG_X86_32
+ # include <asm/atomic64_32.h>
+ #else
ath9k_htc-discard-undersized-packets.patch
net-add-annotations-on-hh-hh_len-lockless-accesses.patch
s390-smp-fix-physical-to-logical-cpu-map-for-smt.patch
+locking-x86-remove-the-unused-atomic_inc_short-methd.patch
--- /dev/null
+From 31b35f6b4d5285a311e10753f4eb17304326b211 Mon Sep 17 00:00:00 2001
+From: Dmitry Vyukov <dvyukov@google.com>
+Date: Fri, 26 May 2017 19:29:00 +0200
+Subject: locking/x86: Remove the unused atomic_inc_short() methd
+
+From: Dmitry Vyukov <dvyukov@google.com>
+
+commit 31b35f6b4d5285a311e10753f4eb17304326b211 upstream.
+
+It is completely unused and implemented only on x86.
+Remove it.
+
+Suggested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20170526172900.91058-1-dvyukov@google.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/tile/lib/atomic_asm_32.S | 3 +--
+ arch/x86/include/asm/atomic.h | 13 -------------
+ 2 files changed, 1 insertion(+), 15 deletions(-)
+
+--- a/arch/tile/lib/atomic_asm_32.S
++++ b/arch/tile/lib/atomic_asm_32.S
+@@ -24,8 +24,7 @@
+ * has an opportunity to return -EFAULT to the user if needed.
+ * The 64-bit routines just return a "long long" with the value,
+ * since they are only used from kernel space and don't expect to fault.
+- * Support for 16-bit ops is included in the framework but we don't provide
+- * any (x86_64 has an atomic_inc_short(), so we might want to some day).
++ * Support for 16-bit ops is included in the framework but we don't provide any.
+ *
+ * Note that the caller is advised to issue a suitable L1 or L2
+ * prefetch on the address being manipulated to avoid extra stalls.
+--- a/arch/x86/include/asm/atomic.h
++++ b/arch/x86/include/asm/atomic.h
+@@ -249,19 +249,6 @@ static __always_inline int __atomic_add_
+ return c;
+ }
+
+-/**
+- * atomic_inc_short - increment of a short integer
+- * @v: pointer to type int
+- *
+- * Atomically adds 1 to @v
+- * Returns the new value of @u
+- */
+-static __always_inline short int atomic_inc_short(short int *v)
+-{
+- asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
+- return *v;
+-}
+-
+ #ifdef CONFIG_X86_32
+ # include <asm/atomic64_32.h>
+ #else
net-add-annotations-on-hh-hh_len-lockless-accesses.patch
s390-smp-fix-physical-to-logical-cpu-map-for-smt.patch
xen-blkback-avoid-unmapping-unmapped-grant-pages.patch
+locking-x86-remove-the-unused-atomic_inc_short-methd.patch