]>
Commit | Line | Data |
---|---|---|
1 | /* Atomic operations. PowerPC32 version. | |
2 | Copyright (C) 2003-2019 Free Software Foundation, Inc. | |
3 | This file is part of the GNU C Library. | |
4 | Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003. | |
5 | ||
6 | The GNU C Library is free software; you can redistribute it and/or | |
7 | modify it under the terms of the GNU Lesser General Public | |
8 | License as published by the Free Software Foundation; either | |
9 | version 2.1 of the License, or (at your option) any later version. | |
10 | ||
11 | The GNU C Library is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | Lesser General Public License for more details. | |
15 | ||
16 | You should have received a copy of the GNU Lesser General Public | |
17 | License along with the GNU C Library; if not, see | |
18 | <https://www.gnu.org/licenses/>. */ | |
19 | ||
20 | /* POWER6 adds a "Mutex Hint" to the Load and Reserve instruction. | |
21 | This is a hint to the hardware to expect additional updates adjacent | |
22 | to the lock word or not. If we are acquiring a Mutex, the hint | |
23 | should be true. Otherwise we releasing a Mutex or doing a simple | |
24 | atomic operation. In that case we don't expect additional updates | |
25 | adjacent to the lock word after the Store Conditional and the hint | |
26 | should be false. */ | |
27 | ||
28 | #if defined _ARCH_PWR6 || defined _ARCH_PWR6X | |
29 | # define MUTEX_HINT_ACQ ",1" | |
30 | # define MUTEX_HINT_REL ",0" | |
31 | #else | |
32 | # define MUTEX_HINT_ACQ | |
33 | # define MUTEX_HINT_REL | |
34 | #endif | |
35 | ||
36 | #define __HAVE_64B_ATOMICS 0 | |
37 | #define USE_ATOMIC_COMPILER_BUILTINS 0 | |
38 | #define ATOMIC_EXCHANGE_USES_CAS 1 | |
39 | ||
40 | /* | |
41 | * The 32-bit exchange_bool is different on powerpc64 because the subf | |
42 | * does signed 64-bit arithmetic while the lwarx is 32-bit unsigned | |
43 | * (a load word and zero (high 32) form). So powerpc64 has a slightly | |
44 | * different version in sysdeps/powerpc/powerpc64/atomic-machine.h. | |
45 | */ | |
46 | #define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ | |
47 | ({ \ | |
48 | unsigned int __tmp; \ | |
49 | __asm __volatile ( \ | |
50 | "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ | |
51 | " subf. %0,%2,%0\n" \ | |
52 | " bne 2f\n" \ | |
53 | " stwcx. %3,0,%1\n" \ | |
54 | " bne- 1b\n" \ | |
55 | "2: " __ARCH_ACQ_INSTR \ | |
56 | : "=&r" (__tmp) \ | |
57 | : "b" (mem), "r" (oldval), "r" (newval) \ | |
58 | : "cr0", "memory"); \ | |
59 | __tmp != 0; \ | |
60 | }) | |
61 | ||
62 | /* Powerpc32 processors don't implement the 64-bit (doubleword) forms of | |
63 | load and reserve (ldarx) and store conditional (stdcx.) instructions. | |
64 | So for powerpc32 we stub out the 64-bit forms. */ | |
65 | #define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ | |
66 | (abort (), 0) | |
67 | ||
68 | #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ | |
69 | (abort (), (__typeof (*mem)) 0) | |
70 | ||
71 | #define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \ | |
72 | (abort (), (__typeof (*mem)) 0) | |
73 | ||
74 | #define __arch_atomic_exchange_64_acq(mem, value) \ | |
75 | ({ abort (); (*mem) = (value); }) | |
76 | ||
77 | #define __arch_atomic_exchange_64_rel(mem, value) \ | |
78 | ({ abort (); (*mem) = (value); }) | |
79 | ||
80 | #define __arch_atomic_exchange_and_add_64(mem, value) \ | |
81 | ({ abort (); (*mem) = (value); }) | |
82 | ||
83 | #define __arch_atomic_exchange_and_add_64_acq(mem, value) \ | |
84 | ({ abort (); (*mem) = (value); }) | |
85 | ||
86 | #define __arch_atomic_exchange_and_add_64_rel(mem, value) \ | |
87 | ({ abort (); (*mem) = (value); }) | |
88 | ||
89 | #define __arch_atomic_increment_val_64(mem) \ | |
90 | ({ abort (); (*mem)++; }) | |
91 | ||
92 | #define __arch_atomic_decrement_val_64(mem) \ | |
93 | ({ abort (); (*mem)--; }) | |
94 | ||
95 | #define __arch_atomic_decrement_if_positive_64(mem) \ | |
96 | ({ abort (); (*mem)--; }) | |
97 | ||
98 | #ifdef _ARCH_PWR4 | |
99 | /* | |
100 | * Newer powerpc64 processors support the new "light weight" sync (lwsync) | |
101 | * So if the build is using -mcpu=[power4,power5,power5+,970] we can | |
102 | * safely use lwsync. | |
103 | */ | |
104 | # define atomic_read_barrier() __asm ("lwsync" ::: "memory") | |
105 | /* | |
106 | * "light weight" sync can also be used for the release barrier. | |
107 | */ | |
108 | # ifndef UP | |
109 | # define __ARCH_REL_INSTR "lwsync" | |
110 | # endif | |
111 | # define atomic_write_barrier() __asm ("lwsync" ::: "memory") | |
112 | #else | |
113 | /* | |
114 | * Older powerpc32 processors don't support the new "light weight" | |
115 | * sync (lwsync). So the only safe option is to use normal sync | |
116 | * for all powerpc32 applications. | |
117 | */ | |
118 | # define atomic_read_barrier() __asm ("sync" ::: "memory") | |
119 | # define atomic_write_barrier() __asm ("sync" ::: "memory") | |
120 | #endif | |
121 | ||
122 | /* | |
123 | * Include the rest of the atomic ops macros which are common to both | |
124 | * powerpc32 and powerpc64. | |
125 | */ | |
126 | #include_next <atomic-machine.h> |