]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/powerpc/powerpc32/bits/atomic.h
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / powerpc / powerpc32 / bits / atomic.h
CommitLineData
94c24227 1/* Atomic operations. PowerPC32 version.
b168057a 2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
94c24227
UD
3 This file is part of the GNU C Library.
4 Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
94c24227 19
fa6e3bc3
UD
20/* POWER6 adds a "Mutex Hint" to the Load and Reserve instruction.
21 This is a hint to the hardware to expect additional updates adjacent
22 to the lock word or not. If we are acquiring a Mutex, the hint
23 should be true. Otherwise we releasing a Mutex or doing a simple
2ccdea26 24 atomic operation. In that case we don't expect additional updates
fa6e3bc3
UD
25 adjacent to the lock word after the Store Conditional and the hint
26 should be false. */
9c84384c 27
fa6e3bc3
UD
28#if defined _ARCH_PWR6 || defined _ARCH_PWR6X
29# define MUTEX_HINT_ACQ ",1"
30# define MUTEX_HINT_REL ",0"
31#else
32# define MUTEX_HINT_ACQ
33# define MUTEX_HINT_REL
34#endif
35
1ea339b6
TR
36#define __HAVE_64B_ATOMICS 0
37#define USE_ATOMIC_COMPILER_BUILTINS 0
38
94c24227
UD
39/*
40 * The 32-bit exchange_bool is different on powerpc64 because the subf
2ccdea26 41 * does signed 64-bit arithmetic while the lwarx is 32-bit unsigned
94c24227
UD
42 * (a load word and zero (high 32) form). So powerpc64 has a slightly
43 * different version in sysdeps/powerpc/powerpc64/bits/atomic.h.
44 */
fa6e3bc3 45#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
94c24227
UD
46({ \
47 unsigned int __tmp; \
7158eae4 48 __asm __volatile ( \
fa6e3bc3 49 "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
94c24227
UD
50 " subf. %0,%2,%0\n" \
51 " bne 2f\n" \
52 " stwcx. %3,0,%1\n" \
53 " bne- 1b\n" \
54 "2: " __ARCH_ACQ_INSTR \
55 : "=&r" (__tmp) \
56 : "b" (mem), "r" (oldval), "r" (newval) \
57 : "cr0", "memory"); \
58 __tmp != 0; \
59})
60
fa6e3bc3 61#define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
7158eae4
UD
62({ \
63 unsigned int __tmp; \
64 __asm __volatile (__ARCH_REL_INSTR "\n" \
fa6e3bc3 65 "1: lwarx %0,0,%1" MUTEX_HINT_REL "\n" \
7158eae4
UD
66 " subf. %0,%2,%0\n" \
67 " bne 2f\n" \
68 " stwcx. %3,0,%1\n" \
69 " bne- 1b\n" \
70 "2: " \
71 : "=&r" (__tmp) \
72 : "b" (mem), "r" (oldval), "r" (newval) \
73 : "cr0", "memory"); \
74 __tmp != 0; \
75})
76
7ba0e52c
UD
77/* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
78 load and reserve (ldarx) and store conditional (stdcx.) instructions.
79 So for powerpc32 we stub out the 64-bit forms. */
fa6e3bc3 80#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
94c24227
UD
81 (abort (), 0)
82
fa6e3bc3 83#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
4661a153 84 (abort (), (__typeof (*mem)) 0)
7ba0e52c 85
fa6e3bc3 86#define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
7158eae4
UD
87 (abort (), 0)
88
fa6e3bc3 89#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
7158eae4
UD
90 (abort (), (__typeof (*mem)) 0)
91
fa6e3bc3 92#define __arch_atomic_exchange_64_acq(mem, value) \
7158eae4 93 ({ abort (); (*mem) = (value); })
94c24227 94
fa6e3bc3 95#define __arch_atomic_exchange_64_rel(mem, value) \
94c24227
UD
96 ({ abort (); (*mem) = (value); })
97
fa6e3bc3 98#define __arch_atomic_exchange_and_add_64(mem, value) \
94c24227
UD
99 ({ abort (); (*mem) = (value); })
100
704f7947
AZ
101#define __arch_atomic_exchange_and_add_64_acq(mem, value) \
102 ({ abort (); (*mem) = (value); })
103
104#define __arch_atomic_exchange_and_add_64_rel(mem, value) \
105 ({ abort (); (*mem) = (value); })
106
fa6e3bc3 107#define __arch_atomic_increment_val_64(mem) \
7ba0e52c
UD
108 ({ abort (); (*mem)++; })
109
fa6e3bc3 110#define __arch_atomic_decrement_val_64(mem) \
7ba0e52c
UD
111 ({ abort (); (*mem)--; })
112
fa6e3bc3 113#define __arch_atomic_decrement_if_positive_64(mem) \
94c24227 114 ({ abort (); (*mem)--; })
7ba0e52c 115
f9d35bb9
RM
116#ifdef _ARCH_PWR4
117/*
118 * Newer powerpc64 processors support the new "light weight" sync (lwsync)
119 * So if the build is using -mcpu=[power4,power5,power5+,970] we can
120 * safely use lwsync.
121 */
122# define atomic_read_barrier() __asm ("lwsync" ::: "memory")
123/*
124 * "light weight" sync can also be used for the release barrier.
125 */
126# ifndef UP
127# define __ARCH_REL_INSTR "lwsync"
128# endif
7f981fc2 129# define atomic_write_barrier() __asm ("lwsync" ::: "memory")
f9d35bb9 130#else
7ba0e52c
UD
131/*
132 * Older powerpc32 processors don't support the new "light weight"
133 * sync (lwsync). So the only safe option is to use normal sync
134 * for all powerpc32 applications.
94c24227
UD
135 */
136# define atomic_read_barrier() __asm ("sync" ::: "memory")
7f981fc2 137# define atomic_write_barrier() __asm ("sync" ::: "memory")
f9d35bb9 138#endif
94c24227
UD
139
140/*
141 * Include the rest of the atomic ops macros which are common to both
7ba0e52c 142 * powerpc32 and powerpc64.
94c24227
UD
143 */
144#include_next <bits/atomic.h>