]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.4.152/parisc-remove-unnecessary-barriers-from-spinlock.h.patch
Linux 4.4.152
[thirdparty/kernel/stable-queue.git] / releases / 4.4.152 / parisc-remove-unnecessary-barriers-from-spinlock.h.patch
CommitLineData
cb5afc50
GKH
1From 3b885ac1dc35b87a39ee176a6c7e2af9c789d8b8 Mon Sep 17 00:00:00 2001
2From: John David Anglin <dave.anglin@bell.net>
3Date: Sun, 12 Aug 2018 16:31:17 -0400
4Subject: parisc: Remove unnecessary barriers from spinlock.h
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9From: John David Anglin <dave.anglin@bell.net>
10
11commit 3b885ac1dc35b87a39ee176a6c7e2af9c789d8b8 upstream.
12
13Now that mb() is an instruction barrier, it will slow performance if we issue
14unnecessary barriers.
15
16The spinlock defines have a number of unnecessary barriers.  The __ldcw()
17define is both a hardware and compiler barrier.  The mb() barriers in the
18routines using __ldcw() serve no purpose.
19
20The only barrier needed is the one in arch_spin_unlock().  We need to ensure
21all accesses are complete prior to releasing the lock.
22
23Signed-off-by: John David Anglin <dave.anglin@bell.net>
24Cc: stable@vger.kernel.org # 4.0+
25Signed-off-by: Helge Deller <deller@gmx.de>
26Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
27
28---
29 arch/parisc/include/asm/spinlock.h | 8 ++------
30 1 file changed, 2 insertions(+), 6 deletions(-)
31
32--- a/arch/parisc/include/asm/spinlock.h
33+++ b/arch/parisc/include/asm/spinlock.h
34@@ -21,7 +21,6 @@ static inline void arch_spin_lock_flags(
35 {
36 volatile unsigned int *a;
37
38- mb();
39 a = __ldcw_align(x);
40 while (__ldcw(a) == 0)
41 while (*a == 0)
42@@ -31,16 +30,15 @@ static inline void arch_spin_lock_flags(
43 local_irq_disable();
44 } else
45 cpu_relax();
46- mb();
47 }
48
49 static inline void arch_spin_unlock(arch_spinlock_t *x)
50 {
51 volatile unsigned int *a;
52- mb();
53+
54 a = __ldcw_align(x);
55- *a = 1;
56 mb();
57+ *a = 1;
58 }
59
60 static inline int arch_spin_trylock(arch_spinlock_t *x)
61@@ -48,10 +46,8 @@ static inline int arch_spin_trylock(arch
62 volatile unsigned int *a;
63 int ret;
64
65- mb();
66 a = __ldcw_align(x);
67 ret = __ldcw(a) != 0;
68- mb();
69
70 return ret;
71 }