]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/suse-2.6.27.39/patches.arch/ia64-rwlocks-enable-interrupts
Fix oinkmaster patch.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.arch / ia64-rwlocks-enable-interrupts
CommitLineData
2cb7cef9
BS
1From: Petr Tesarik <ptesarik@suse.cz>
2Subject: [ia64] re-enable interrupts when waiting for a rwlock
3References: bnc#387784
4Mainline: no
5
6Re-enable interrupts for _read_lock_irqsave() and _write_lock_irqsave()
7while waiting for the lock if interrupts were enabled in the caller.
8
9Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
10
11---
12 arch/ia64/include/asm/spinlock.h | 49 ++++++++++++++++++++++++++++++++++-----
13 1 file changed, 43 insertions(+), 6 deletions(-)
14
15--- linux-2.6.26.orig/arch/ia64/include/asm/spinlock.h 2008-09-26 13:02:50.000000000 +0200
16+++ linux-2.6.26/arch/ia64/include/asm/spinlock.h 2008-09-26 15:54:11.000000000 +0200
17@@ -120,6 +120,35 @@ do { \
18 #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
19 #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
20
21+#ifdef ASM_SUPPORTED
22+#define __raw_read_lock_flags(rw, flags) \
23+do { \
24+ __asm__ __volatile__ ( \
25+ "tbit.nz p6,p0 = %1,%2\n" \
26+ "br.few 3f\n" \
27+ "1:\n" \
28+ "fetchadd4.rel r2 = [%0],-1;;\n" \
29+ "(p6) ssm psr.i\n" \
30+ "2:\n" \
31+ "hint @pause\n" \
32+ "ld4 r2 = [%0];;\n" \
33+ "cmp4.lt p7,p0 = r2,r0\n" \
34+ "(p7) br.cond.spnt.few 2b\n" \
35+ "(p6) rsm psr.i;;\n" \
36+ "3:\n" \
37+ "fetchadd4.acq r2 = [%0],1;;\n" \
38+ "cmp4.lt p7,p0 = r2,r0\n" \
39+ "(p7) br.cond.spnt.few 1b\n" \
40+ :: "r"(rw), "r"(flags), "i"(IA64_PSR_I_BIT) \
41+ : "p6", "p7", "r2", "memory"); \
42+} while(0)
43+
44+#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
45+
46+#else /* !ASM_SUPPORTED */
47+
48+#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
49+
50 #define __raw_read_lock(rw) \
51 do { \
52 raw_rwlock_t *__read_lock_ptr = (rw); \
53@@ -131,6 +160,8 @@ do { \
54 } \
55 } while (0)
56
57+#endif /* !ASM_SUPPORTED */
58+
59 #define __raw_read_unlock(rw) \
60 do { \
61 raw_rwlock_t *__read_lock_ptr = (rw); \
62@@ -138,21 +169,28 @@ do { \
63 } while (0)
64
65 #ifdef ASM_SUPPORTED
66-#define __raw_write_lock(rw) \
67+#define __raw_write_lock_flags(rw, flags) \
68 do { \
69 __asm__ __volatile__ ( \
70 "mov ar.ccv = r0\n" \
71+ "tbit.nz p6,p0 = %1,%2\n" \
72 "dep r29 = -1, r0, 31, 1;;\n" \
73 "1:\n" \
74+ "(p6) ssm psr.i\n" \
75+ "2:\n" \
76 "ld4 r2 = [%0];;\n" \
77 "cmp4.eq p0,p7 = r0,r2\n" \
78- "(p7) br.cond.spnt.few 1b \n" \
79+ "(p7) br.cond.spnt.few 2b \n" \
80+ "(p6) rsm psr.i;;\n" \
81 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \
82 "cmp4.eq p0,p7 = r0, r2\n" \
83 "(p7) br.cond.spnt.few 1b;;\n" \
84- :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
85+ :: "r"(rw), "r"(flags), "i"(IA64_PSR_I_BIT) \
86+ : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); \
87 } while(0)
88
89+#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
90+
91 #define __raw_write_trylock(rw) \
92 ({ \
93 register long result; \
94@@ -174,6 +212,8 @@ static inline void __raw_write_unlock(ra
95
96 #else /* !ASM_SUPPORTED */
97
98+#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
99+
100 #define __raw_write_lock(l) \
101 ({ \
102 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
103@@ -213,9 +253,6 @@ static inline int __raw_read_trylock(raw
104 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
105 }
106
107-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
108-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
109-
110 #define _raw_spin_relax(lock) cpu_relax()
111 #define _raw_read_relax(lock) cpu_relax()
112 #define _raw_write_relax(lock) cpu_relax()