]>
git.ipfire.org Git - thirdparty/gcc.git/blob - libgcc/config/tilepro/atomic.h
1 /* Macros for atomic functionality for tile.
2 Copyright (C) 2011-2021 Free Software Foundation, Inc.
3 Contributed by Walter Lee (walt@tilera.com)
5 This file is free software; you can redistribute it and/or modify it
6 under the terms of the GNU General Public License as published by the
7 Free Software Foundation; either version 3, or (at your option) any
10 This file is distributed in the hope that it will be useful, but
11 WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 General Public License for more details.
15 Under Section 7 of GPL version 3, you are granted additional
16 permissions described in the GCC Runtime Library Exception, version
17 3.1, as published by the Free Software Foundation.
19 You should have received a copy of the GNU General Public License and
20 a copy of the GCC Runtime Library Exception along with this program;
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 <http://www.gnu.org/licenses/>. */
25 /* Provides macros for common atomic functionality. */
31 /* Atomic instruction macros
33 The macros provided by atomic.h simplify access to the TILE-Gx
34 architecture's atomic instructions. The architecture provides a
35 variety of atomic instructions, including "exchange", "compare and
36 exchange", "fetch and ADD", "fetch and AND", "fetch and OR", and
37 "fetch and ADD if greater than or equal to zero".
39 No barrier or fence semantics are implied by any of the atomic
40 instructions for manipulating memory; you must specify the barriers
41 that you wish explicitly, using the provided macros.
43 Any integral 32- or 64-bit value can be used as the argument
44 to these macros, such as "int", "long long", "unsigned long", etc.
45 The pointers must be aligned to 4 or 8 bytes for 32- or 64-bit data.
46 The "exchange" and "compare and exchange" macros may also take
47 pointer values. We use the pseudo-type "VAL" in the documentation
48 to indicate the use of an appropriate type. */
50 /* Atomic instruction macros
52 The macros provided by atomic.h simplify access to the Tile
53 architecture's atomic instructions. Since the architecture
54 supports test-and-set as its only in-silicon atomic operation, many
55 of the operations provided by this header are implemented as
56 fast-path calls to Linux emulation routines.
58 Using the kernel for atomic operations allows userspace to take
59 advantage of the kernel's existing atomic-integer support (managed
60 by a distributed array of locks). The kernel provides proper
61 ordering among simultaneous atomic operations on different cores,
62 and guarantees a process cannot be context-switched part way
63 through an atomic operation. By virtue of sharing the kernel
64 atomic implementation, the userspace atomic operations
65 are compatible with the atomic methods provided by the kernel's
66 futex() syscall API. Note that these operations never cause Linux
67 kernel scheduling, and are in fact invisible to the kernel; they
68 simply act as regular function calls but with an elevated privilege
69 level. Note that the kernel's distributed lock array is hashed by
70 using only VA bits from the atomic value's address (to avoid the
71 performance hit of page table locking and multiple page-table
72 lookups to get the PA) and only the VA bits that are below page
73 granularity (to properly lock simultaneous accesses to the same
74 page mapped at different VAs). As a result, simultaneous atomic
75 operations on values whose addresses are at the same offset on a
76 page will contend in the kernel for the same lock array element.
78 No barrier or fence semantics are implied by any of the atomic
79 instructions for manipulating memory; you must specify the barriers
80 that you wish explicitly, using the provided macros.
82 Any integral 32- or 64-bit value can be used as the argument
83 to these macros, such as "int", "long long", "unsigned long", etc.
84 The pointers must be aligned to 4 or 8 bytes for 32- or 64-bit data.
85 The "exchange" and "compare and exchange" macros may also take
86 pointer values. We use the pseudo-type "VAL" in the documentation
87 to indicate the use of an appropriate type.
89 The 32-bit routines are implemented using a single kernel fast
90 syscall, as is the 64-bit compare-and-exchange. The other 64-bit
91 routines are implemented by looping over the 64-bit
92 compare-and-exchange routine, so may be potentially less efficient. */
96 #define SPR_CMPEXCH_VALUE 0x2780
98 #define __NR_FAST_cmpxchg -1
99 #define __NR_FAST_atomic_update -2
100 #define __NR_FAST_cmpxchg64 -3
104 /* 32-bit integer compare-and-exchange. */
105 static __inline
__attribute__ ((always_inline
))
106 int arch_atomic_val_compare_and_exchange_4 (volatile int *mem
,
107 int oldval
, int newval
)
110 __insn_mtspr (SPR_CMPEXCH_VALUE
, oldval
);
111 return __insn_cmpexch4 (mem
, newval
);
114 __asm__
__volatile__ ("swint1":"=R00" (result
),
115 "=m" (*mem
):"R10" (__NR_FAST_cmpxchg
), "R00" (mem
),
116 "R01" (oldval
), "R02" (newval
), "m" (*mem
):"r20",
117 "r21", "r22", "r23", "r24", "r25", "r26", "r27",
118 "r28", "r29", "memory");
123 /* 64-bit integer compare-and-exchange. */
124 static __inline
__attribute__ ((always_inline
))
125 long long arch_atomic_val_compare_and_exchange_8 (volatile long long
126 *mem
, long long oldval
,
130 __insn_mtspr (SPR_CMPEXCH_VALUE
, oldval
);
131 return __insn_cmpexch (mem
, newval
);
133 unsigned int result_lo
, result_hi
;
134 unsigned int oldval_lo
= oldval
& 0xffffffffu
, oldval_hi
= oldval
>> 32;
135 unsigned int newval_lo
= newval
& 0xffffffffu
, newval_hi
= newval
>> 32;
136 __asm__
__volatile__ ("swint1":"=R00" (result_lo
), "=R01" (result_hi
),
137 "=m" (*mem
):"R10" (__NR_FAST_cmpxchg64
), "R00" (mem
),
138 "R02" (oldval_lo
), "R03" (oldval_hi
),
139 "R04" (newval_lo
), "R05" (newval_hi
),
140 "m" (*mem
):"r20", "r21", "r22", "r23", "r24", "r25",
141 "r26", "r27", "r28", "r29", "memory");
142 return ((long long) result_hi
) << 32 | result_lo
;
146 /* This non-existent symbol is called for sizes other than "4" and "8",
147 indicating a bug in the caller. */
148 extern int __arch_atomic_error_bad_argument_size (void)
149 __attribute__ ((warning ("sizeof atomic argument not 4 or 8")));
152 #define arch_atomic_val_compare_and_exchange(mem, o, n) \
154 (__typeof(*(mem)))(__typeof(*(mem)-*(mem))) \
155 ((sizeof(*(mem)) == 8) ? \
156 arch_atomic_val_compare_and_exchange_8( \
157 (volatile long long*)(mem), (__typeof((o)-(o)))(o), \
158 (__typeof((n)-(n)))(n)) : \
159 (sizeof(*(mem)) == 4) ? \
160 arch_atomic_val_compare_and_exchange_4( \
161 (volatile int*)(mem), (__typeof((o)-(o)))(o), \
162 (__typeof((n)-(n)))(n)) : \
163 __arch_atomic_error_bad_argument_size()); \
166 #define arch_atomic_bool_compare_and_exchange(mem, o, n) \
168 __typeof(o) __o = (o); \
170 __o == arch_atomic_val_compare_and_exchange((mem), __o, (n)), 1); \
174 /* Loop with compare_and_exchange until we guess the correct value.
175 Normally "expr" will be an expression using __old and __value. */
176 #define __arch_atomic_update_cmpxchg(mem, value, expr) \
178 __typeof(value) __value = (value); \
179 __typeof(*(mem)) *__mem = (mem), __old = *__mem, __guess; \
182 __old = arch_atomic_val_compare_and_exchange(__mem, __old, (expr)); \
183 } while (__builtin_expect(__old != __guess, 0)); \
189 /* Generic atomic op with 8- or 4-byte variant.
190 The _mask, _addend, and _expr arguments are ignored on tilegx. */
191 #define __arch_atomic_update(mem, value, op, _mask, _addend, _expr) \
193 ((__typeof(*(mem))) \
194 ((sizeof(*(mem)) == 8) ? (__typeof(*(mem)-*(mem)))__insn_##op( \
195 (volatile void *)(mem), \
196 (long long)(__typeof((value)-(value)))(value)) : \
197 (sizeof(*(mem)) == 4) ? (int)__insn_##op##4( \
198 (volatile void *)(mem), \
199 (int)(__typeof((value)-(value)))(value)) : \
200 __arch_atomic_error_bad_argument_size())); \
205 /* This uses TILEPro's fast syscall support to atomically compute:
208 *ptr = (old & mask) + addend;
211 This primitive can be used for atomic exchange, add, or, and.
212 Only 32-bit support is provided. */
213 static __inline
__attribute__ ((always_inline
))
215 __arch_atomic_update_4 (volatile int *mem
, int mask
, int addend
)
218 __asm__
__volatile__ ("swint1":"=R00" (result
),
219 "=m" (*mem
):"R10" (__NR_FAST_atomic_update
),
220 "R00" (mem
), "R01" (mask
), "R02" (addend
),
221 "m" (*mem
):"r20", "r21", "r22", "r23", "r24", "r25",
222 "r26", "r27", "r28", "r29", "memory");
226 /* Generic atomic op with 8- or 4-byte variant.
227 The _op argument is ignored on tilepro. */
228 #define __arch_atomic_update(mem, value, _op, mask, addend, expr) \
230 (__typeof(*(mem)))(__typeof(*(mem)-*(mem))) \
231 ((sizeof(*(mem)) == 8) ? \
232 __arch_atomic_update_cmpxchg((mem), (value), (expr)) : \
233 (sizeof(*(mem)) == 4) ? \
234 __arch_atomic_update_4((volatile int*)(mem), \
235 (__typeof((mask)-(mask)))(mask), \
236 (__typeof((addend)-(addend)))(addend)) : \
237 __arch_atomic_error_bad_argument_size()); \
240 #endif /* __tilegx__ */
243 #define arch_atomic_exchange(mem, newvalue) \
244 __arch_atomic_update(mem, newvalue, exch, 0, newvalue, __value)
246 #define arch_atomic_add(mem, value) \
247 __arch_atomic_update(mem, value, fetchadd, -1, value, __old + __value)
249 #define arch_atomic_sub(mem, value) arch_atomic_add((mem), -(value))
251 #define arch_atomic_increment(mem) arch_atomic_add((mem), 1)
253 #define arch_atomic_decrement(mem) arch_atomic_add((mem), -1)
255 #define arch_atomic_and(mem, mask) \
256 __arch_atomic_update(mem, mask, fetchand, mask, 0, __old & __value)
258 #define arch_atomic_or(mem, mask) \
259 __arch_atomic_update(mem, mask, fetchor, ~mask, mask, __old | __value)
261 #define arch_atomic_xor(mem, mask) \
262 __arch_atomic_update_cmpxchg(mem, mask, __old ^ __value)
264 #define arch_atomic_nand(mem, mask) \
265 __arch_atomic_update_cmpxchg(mem, mask, ~(__old & __value))
267 #define arch_atomic_bit_set(mem, bit) \
269 __typeof(*(mem)) __mask = (__typeof(*(mem)))1 << (bit); \
270 __mask & arch_atomic_or((mem), __mask); \
273 #define arch_atomic_bit_clear(mem, bit) \
275 __typeof(*(mem)) __mask = (__typeof(*(mem)))1 << (bit); \
276 __mask & arch_atomic_and((mem), ~__mask); \
280 /* Atomically store a new value to memory.
281 Note that you can freely use types of any size here, unlike the
282 other atomic routines, which require 32- or 64-bit types.
283 This accessor is provided for compatibility with TILEPro, which
284 required an explicit atomic operation for stores that needed
285 to be atomic with respect to other atomic methods in this header. */
286 #define arch_atomic_write(mem, value) ((void) (*(mem) = (value)))
288 #define arch_atomic_write(mem, value) \
290 __typeof(mem) __aw_mem = (mem); \
291 __typeof(value) __aw_val = (value); \
292 unsigned int *__aw_mem32, __aw_intval, __aw_val32, __aw_off, __aw_mask; \
293 __aw_intval = (__typeof((value) - (value)))__aw_val; \
294 switch (sizeof(*__aw_mem)) { \
296 __arch_atomic_update_cmpxchg(__aw_mem, __aw_val, __value); \
299 __arch_atomic_update_4((int *)__aw_mem, 0, __aw_intval); \
302 __aw_off = 8 * ((long)__aw_mem & 0x2); \
303 __aw_mask = 0xffffU << __aw_off; \
304 __aw_mem32 = (unsigned int *)((long)__aw_mem & ~0x2); \
305 __aw_val32 = (__aw_intval << __aw_off) & __aw_mask; \
306 __arch_atomic_update_cmpxchg(__aw_mem32, __aw_val32, \
307 (__old & ~__aw_mask) | __value); \
310 __aw_off = 8 * ((long)__aw_mem & 0x3); \
311 __aw_mask = 0xffU << __aw_off; \
312 __aw_mem32 = (unsigned int *)((long)__aw_mem & ~0x3); \
313 __aw_val32 = (__aw_intval << __aw_off) & __aw_mask; \
314 __arch_atomic_update_cmpxchg(__aw_mem32, __aw_val32, \
315 (__old & ~__aw_mask) | __value); \
323 This macro prevents loads or stores from being moved by the compiler
324 across the macro. Any loaded value that was loaded before this
325 macro must then be reloaded by the compiler. */
326 #define arch_atomic_compiler_barrier() __asm__ __volatile__("" ::: "memory")
328 /* Full memory barrier.
330 This macro has the semantics of arch_atomic_compiler_barrer(), but also
331 ensures that previous stores are visible to other cores, and that
332 all previous loaded values have been placed into their target
333 register on this core. */
334 #define arch_atomic_full_barrier() __insn_mf()
336 /* Read memory barrier.
338 Ensure that all reads by this processor that occurred prior to the
339 read memory barrier have completed, and that no reads that occur
340 after the read memory barrier on this processor are initiated
343 On current TILE chips a read barrier is implemented as a full barrier,
344 but this may not be true in later versions of the architecture.
346 See also arch_atomic_acquire_barrier() for the appropriate idiom to use
347 to ensure no reads are lifted above an atomic lock instruction. */
348 #define arch_atomic_read_barrier() arch_atomic_full_barrier()
350 /* Write memory barrier.
352 Ensure that all writes by this processor that occurred prior to the
353 write memory barrier have completed, and that no writes that occur
354 after the write memory barrier on this processor are initiated
357 On current TILE chips a write barrier is implemented as a full barrier,
358 but this may not be true in later versions of the architecture.
360 See also arch_atomic_release_barrier() for the appropriate idiom to use
361 to ensure all writes are complete prior to an atomic unlock instruction. */
362 #define arch_atomic_write_barrier() arch_atomic_full_barrier()
364 /* Lock acquisition barrier.
366 Ensure that no load operations that follow this macro in the
367 program can issue prior to the barrier. Without such a barrier,
368 the compiler can reorder them to issue earlier, or the hardware can
369 issue them speculatively. The latter is not currently done in the
370 Tile microarchitecture, but using this operation improves
371 portability to future implementations.
373 This operation is intended to be used as part of the "acquire"
374 path for locking, that is, when entering a critical section.
375 This should be done after the atomic operation that actually
376 acquires the lock, and in conjunction with a "control dependency"
377 that checks the atomic operation result to see if the lock was
378 in fact acquired. See the arch_atomic_read_barrier() macro
379 for a heavier-weight barrier to use in certain unusual constructs,
380 or arch_atomic_acquire_barrier_value() if no control dependency exists. */
381 #define arch_atomic_acquire_barrier() arch_atomic_compiler_barrier()
383 /* Lock release barrier.
385 Ensure that no store operations that precede this macro in the
386 program complete subsequent to the barrier. Without such a
387 barrier, the compiler can reorder stores to issue later, or stores
388 can be still outstanding in the memory network.
390 This operation is intended to be used as part of the "release" path
391 for locking, that is, when leaving a critical section. This should
392 be done before the operation (such as a store of zero) that
393 actually releases the lock. */
394 #define arch_atomic_release_barrier() arch_atomic_write_barrier()
396 /* Barrier until the read of a particular value is complete.
398 This is occasionally useful when constructing certain locking
399 scenarios. For example, you might write a routine that issues an
400 atomic instruction to enter a critical section, then reads one or
401 more values within the critical section without checking to see if
402 the critical section was in fact acquired, and only later checks
403 the atomic instruction result to see if the lock was acquired. If
404 so the routine could properly release the lock and know that the
405 values that were read were valid.
407 In this scenario, it is required to wait for the result of the
408 atomic instruction, even if the value itself is not checked. This
409 guarantees that if the atomic instruction succeeded in taking the lock,
410 the lock was held before any reads in the critical section issued. */
411 #define arch_atomic_acquire_barrier_value(val) \
412 __asm__ __volatile__("move %0, %0" :: "r"(val))
414 /* Access the given variable in memory exactly once.
416 In some contexts, an algorithm may need to force access to memory,
417 since otherwise the compiler may think it can optimize away a
418 memory load or store; for example, in a loop when polling memory to
419 see if another cpu has updated it yet. Generally this is only
420 required for certain very carefully hand-tuned algorithms; using it
421 unnecessarily may result in performance losses.
423 A related use of this macro is to ensure that the compiler does not
424 rematerialize the value of "x" by reloading it from memory
425 unexpectedly; the "volatile" marking will prevent the compiler from
426 being able to rematerialize. This is helpful if an algorithm needs
427 to read a variable without locking, but needs it to have the same
428 value if it ends up being used several times within the algorithm.
430 Note that multiple uses of this macro are guaranteed to be ordered,
431 i.e. the compiler will not reorder stores or loads that are wrapped
432 in arch_atomic_access_once(). */
433 #define arch_atomic_access_once(x) (*(volatile __typeof(x) *)&(x))
437 #endif /* !_ATOMIC_H_ */