]>
Commit | Line | Data |
---|---|---|
94c24227 | 1 | /* Atomic operations. PowerPC32 version. |
568035b7 | 2 | Copyright (C) 2003-2013 Free Software Foundation, Inc. |
94c24227 UD |
3 | This file is part of the GNU C Library. |
4 | Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003. | |
5 | ||
6 | The GNU C Library is free software; you can redistribute it and/or | |
7 | modify it under the terms of the GNU Lesser General Public | |
8 | License as published by the Free Software Foundation; either | |
9 | version 2.1 of the License, or (at your option) any later version. | |
10 | ||
11 | The GNU C Library is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | Lesser General Public License for more details. | |
15 | ||
16 | You should have received a copy of the GNU Lesser General Public | |
59ba27a6 PE |
17 | License along with the GNU C Library; if not, see |
18 | <http://www.gnu.org/licenses/>. */ | |
94c24227 | 19 | |
fa6e3bc3 UD |
20 | /* POWER6 adds a "Mutex Hint" to the Load and Reserve instruction. |
21 | This is a hint to the hardware to expect additional updates adjacent | |
22 | to the lock word or not. If we are acquiring a Mutex, the hint | |
23 | should be true. Otherwise we releasing a Mutex or doing a simple | |
2ccdea26 | 24 | atomic operation. In that case we don't expect additional updates |
fa6e3bc3 UD |
25 | adjacent to the lock word after the Store Conditional and the hint |
26 | should be false. */ | |
9c84384c | 27 | |
fa6e3bc3 UD |
28 | #if defined _ARCH_PWR6 || defined _ARCH_PWR6X |
29 | # define MUTEX_HINT_ACQ ",1" | |
30 | # define MUTEX_HINT_REL ",0" | |
31 | #else | |
32 | # define MUTEX_HINT_ACQ | |
33 | # define MUTEX_HINT_REL | |
34 | #endif | |
35 | ||
94c24227 UD |
36 | /* |
37 | * The 32-bit exchange_bool is different on powerpc64 because the subf | |
2ccdea26 | 38 | * does signed 64-bit arithmetic while the lwarx is 32-bit unsigned |
94c24227 UD |
39 | * (a load word and zero (high 32) form). So powerpc64 has a slightly |
40 | * different version in sysdeps/powerpc/powerpc64/bits/atomic.h. | |
41 | */ | |
fa6e3bc3 | 42 | #define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ |
94c24227 UD |
43 | ({ \ |
44 | unsigned int __tmp; \ | |
7158eae4 | 45 | __asm __volatile ( \ |
fa6e3bc3 | 46 | "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ |
94c24227 UD |
47 | " subf. %0,%2,%0\n" \ |
48 | " bne 2f\n" \ | |
49 | " stwcx. %3,0,%1\n" \ | |
50 | " bne- 1b\n" \ | |
51 | "2: " __ARCH_ACQ_INSTR \ | |
52 | : "=&r" (__tmp) \ | |
53 | : "b" (mem), "r" (oldval), "r" (newval) \ | |
54 | : "cr0", "memory"); \ | |
55 | __tmp != 0; \ | |
56 | }) | |
57 | ||
fa6e3bc3 | 58 | #define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \ |
7158eae4 UD |
59 | ({ \ |
60 | unsigned int __tmp; \ | |
61 | __asm __volatile (__ARCH_REL_INSTR "\n" \ | |
fa6e3bc3 | 62 | "1: lwarx %0,0,%1" MUTEX_HINT_REL "\n" \ |
7158eae4 UD |
63 | " subf. %0,%2,%0\n" \ |
64 | " bne 2f\n" \ | |
65 | " stwcx. %3,0,%1\n" \ | |
66 | " bne- 1b\n" \ | |
67 | "2: " \ | |
68 | : "=&r" (__tmp) \ | |
69 | : "b" (mem), "r" (oldval), "r" (newval) \ | |
70 | : "cr0", "memory"); \ | |
71 | __tmp != 0; \ | |
72 | }) | |
73 | ||
7ba0e52c UD |
74 | /* Powerpc32 processors don't implement the 64-bit (doubleword) forms of |
75 | load and reserve (ldarx) and store conditional (stdcx.) instructions. | |
76 | So for powerpc32 we stub out the 64-bit forms. */ | |
fa6e3bc3 | 77 | #define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ |
94c24227 UD |
78 | (abort (), 0) |
79 | ||
fa6e3bc3 | 80 | #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ |
4661a153 | 81 | (abort (), (__typeof (*mem)) 0) |
7ba0e52c | 82 | |
fa6e3bc3 | 83 | #define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \ |
7158eae4 UD |
84 | (abort (), 0) |
85 | ||
fa6e3bc3 | 86 | #define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \ |
7158eae4 UD |
87 | (abort (), (__typeof (*mem)) 0) |
88 | ||
fa6e3bc3 | 89 | #define __arch_atomic_exchange_64_acq(mem, value) \ |
7158eae4 | 90 | ({ abort (); (*mem) = (value); }) |
94c24227 | 91 | |
fa6e3bc3 | 92 | #define __arch_atomic_exchange_64_rel(mem, value) \ |
94c24227 UD |
93 | ({ abort (); (*mem) = (value); }) |
94 | ||
fa6e3bc3 | 95 | #define __arch_atomic_exchange_and_add_64(mem, value) \ |
94c24227 UD |
96 | ({ abort (); (*mem) = (value); }) |
97 | ||
fa6e3bc3 | 98 | #define __arch_atomic_increment_val_64(mem) \ |
7ba0e52c UD |
99 | ({ abort (); (*mem)++; }) |
100 | ||
fa6e3bc3 | 101 | #define __arch_atomic_decrement_val_64(mem) \ |
7ba0e52c UD |
102 | ({ abort (); (*mem)--; }) |
103 | ||
fa6e3bc3 | 104 | #define __arch_atomic_decrement_if_positive_64(mem) \ |
94c24227 | 105 | ({ abort (); (*mem)--; }) |
7ba0e52c | 106 | |
f9d35bb9 RM |
107 | #ifdef _ARCH_PWR4 |
108 | /* | |
109 | * Newer powerpc64 processors support the new "light weight" sync (lwsync) | |
110 | * So if the build is using -mcpu=[power4,power5,power5+,970] we can | |
111 | * safely use lwsync. | |
112 | */ | |
113 | # define atomic_read_barrier() __asm ("lwsync" ::: "memory") | |
114 | /* | |
115 | * "light weight" sync can also be used for the release barrier. | |
116 | */ | |
117 | # ifndef UP | |
118 | # define __ARCH_REL_INSTR "lwsync" | |
119 | # endif | |
120 | #else | |
7ba0e52c UD |
121 | /* |
122 | * Older powerpc32 processors don't support the new "light weight" | |
123 | * sync (lwsync). So the only safe option is to use normal sync | |
124 | * for all powerpc32 applications. | |
94c24227 UD |
125 | */ |
126 | # define atomic_read_barrier() __asm ("sync" ::: "memory") | |
f9d35bb9 | 127 | #endif |
94c24227 UD |
128 | |
129 | /* | |
130 | * Include the rest of the atomic ops macros which are common to both | |
7ba0e52c | 131 | * powerpc32 and powerpc64. |
94c24227 UD |
132 | */ |
133 | #include_next <bits/atomic.h> |