--- /dev/null
+From 3b885ac1dc35b87a39ee176a6c7e2af9c789d8b8 Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave.anglin@bell.net>
+Date: Sun, 12 Aug 2018 16:31:17 -0400
+Subject: parisc: Remove unnecessary barriers from spinlock.h
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: John David Anglin <dave.anglin@bell.net>
+
+commit 3b885ac1dc35b87a39ee176a6c7e2af9c789d8b8 upstream.
+
+Now that mb() is an instruction barrier, it will slow performance if we issue
+unnecessary barriers.
+
+The spinlock defines have a number of unnecessary barriers. The __ldcw()
+define is both a hardware and compiler barrier. The mb() barriers in the
+routines using __ldcw() serve no purpose.
+
+The only barrier needed is the one in arch_spin_unlock(). We need to ensure
+all accesses are complete prior to releasing the lock.
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Cc: stable@vger.kernel.org # 4.0+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/spinlock.h | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/arch/parisc/include/asm/spinlock.h
++++ b/arch/parisc/include/asm/spinlock.h
+@@ -26,7 +26,6 @@ static inline void arch_spin_lock_flags(
+ {
+ volatile unsigned int *a;
+
+- mb();
+ a = __ldcw_align(x);
+ while (__ldcw(a) == 0)
+ while (*a == 0)
+@@ -36,16 +35,15 @@ static inline void arch_spin_lock_flags(
+ local_irq_disable();
+ } else
+ cpu_relax();
+- mb();
+ }
+
+ static inline void arch_spin_unlock(arch_spinlock_t *x)
+ {
+ volatile unsigned int *a;
+- mb();
++
+ a = __ldcw_align(x);
+- *a = 1;
+ mb();
++ *a = 1;
+ }
+
+ static inline int arch_spin_trylock(arch_spinlock_t *x)
+@@ -53,10 +51,8 @@ static inline int arch_spin_trylock(arch
+ volatile unsigned int *a;
+ int ret;
+
+- mb();
+ a = __ldcw_align(x);
+ ret = __ldcw(a) != 0;
+- mb();
+
+ return ret;
+ }