2 * The atomic module provides basic support for lock-free
3 * concurrent programming.
5 * Copyright: Copyright Sean Kelly 2005 - 2016.
6 * License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
7 * Authors: Sean Kelly, Alex Rønne Petersen
8 * Source: $(DRUNTIMESRC core/_atomic.d)
12 /* NOTE: This file has been patched from the original DMD distribution to
13 * work with the GDC compiler.
17 version (D_InlineAsm_X86)
21 enum has64BitCAS = true;
22 enum has128BitCAS = false;
24 else version (D_InlineAsm_X86_64)
28 enum has64BitCAS = true;
29 enum has128BitCAS = true;
34 enum has64BitCAS = GNU_Have_64Bit_Atomics;
35 enum has128BitCAS = GNU_Have_LibAtomic;
39 enum has64BitCAS = false;
40 enum has128BitCAS = false;
45 template HeadUnshared(T)
47 static if ( is( T U : shared(U*) ) )
48 alias shared(U)* HeadUnshared;
57 // NOTE: Strictly speaking, the x86 supports atomic operations on
58 // unaligned values. However, this is far slower than the
59 // common case, so such behavior should be prohibited.
60 private bool atomicValueIsProperlyAligned(T)( ref T val ) pure nothrow @nogc @trusted
62 return atomicPtrIsProperlyAligned(&val);
65 private bool atomicPtrIsProperlyAligned(T)( T* ptr ) pure nothrow @nogc @safe
67 // NOTE: 32 bit x86 systems support 8 byte CAS, which only requires
68 // 4 byte alignment, so use size_t as the align type here.
69 static if ( T.sizeof > size_t.sizeof )
70 return cast(size_t)ptr % size_t.sizeof == 0;
72 return cast(size_t)ptr % T.sizeof == 0;
80 * Performs the binary operation 'op' on val using 'mod' as the modifier.
83 * val = The target variable.
84 * mod = The modifier to apply.
87 * The result of the operation.
89 HeadUnshared!(T) atomicOp(string op, T, V1)( ref shared T val, V1 mod ) pure nothrow @nogc @safe
90 if ( __traits( compiles, mixin( "*cast(T*)&val" ~ op ~ "mod" ) ) )
92 return HeadUnshared!(T).init;
97 * Stores 'writeThis' to the memory referenced by 'here' if the value
98 * referenced by 'here' is equal to 'ifThis'. This operation is both
99 * lock-free and atomic.
102 * here = The address of the destination variable.
103 * writeThis = The value to store.
104 * ifThis = The comparison value.
107 * true if the store occurred, false if not.
109 bool cas(T,V1,V2)( shared(T)* here, const V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe
110 if ( !is(T == class) && !is(T U : U*) && __traits( compiles, { *here = writeThis; } ) );
113 bool cas(T,V1,V2)( shared(T)* here, const shared(V1) ifThis, shared(V2) writeThis ) pure nothrow @nogc @safe
114 if ( is(T == class) && __traits( compiles, { *here = writeThis; } ) );
117 bool cas(T,V1,V2)( shared(T)* here, const shared(V1)* ifThis, shared(V2)* writeThis ) pure nothrow @nogc @safe
118 if ( is(T U : U*) && __traits( compiles, { *here = writeThis; } ) );
121 * Loads 'val' from memory and returns it. The memory barrier specified
122 * by 'ms' is applied to the operation, which is fully sequenced by
123 * default. Valid memory orders are MemoryOrder.raw, MemoryOrder.acq,
124 * and MemoryOrder.seq.
127 * val = The target variable.
130 * The value of 'val'.
132 HeadUnshared!(T) atomicLoad(MemoryOrder ms = MemoryOrder.seq,T)( ref const shared T val ) pure nothrow @nogc @safe
134 return HeadUnshared!(T).init;
139 * Writes 'newval' into 'val'. The memory barrier specified by 'ms' is
140 * applied to the operation, which is fully sequenced by default.
141 * Valid memory orders are MemoryOrder.raw, MemoryOrder.rel, and
145 * val = The target variable.
146 * newval = The value to store.
148 void atomicStore(MemoryOrder ms = MemoryOrder.seq,T,V1)( ref shared T val, V1 newval ) pure nothrow @nogc @safe
149 if ( __traits( compiles, { val = newval; } ) )
156 * Specifies the memory ordering semantics of an atomic operation.
160 raw, /// Not sequenced.
161 acq, /// Hoist-load + hoist-store barrier.
162 rel, /// Sink-load + sink-store barrier.
163 seq, /// Fully sequenced (acquire + release).
166 deprecated("Please use MemoryOrder instead.")
167 alias MemoryOrder msync;
170 * Inserts a full load/store memory fence (on platforms that need it). This ensures
171 * that all loads and stores before a call to this function are executed before any
172 * loads and stores after the call.
174 void atomicFence() nothrow @nogc;
176 else version (AsmX86_32)
178 // Uses specialized asm for fast fetch and add operations
179 private HeadUnshared!(T) atomicFetchAdd(T)( ref shared T val, size_t mod ) pure nothrow @nogc @safe
183 asm pure nothrow @nogc @trusted
188 static if (T.sizeof == 1) asm pure nothrow @nogc @trusted { lock; xadd[EDX], AL; }
189 else static if (T.sizeof == 2) asm pure nothrow @nogc @trusted { lock; xadd[EDX], AX; }
190 else static if (T.sizeof == 4) asm pure nothrow @nogc @trusted { lock; xadd[EDX], EAX; }
192 asm pure nothrow @nogc @trusted
200 private HeadUnshared!(T) atomicFetchSub(T)( ref shared T val, size_t mod ) pure nothrow @nogc @safe
203 return atomicFetchAdd(val, -mod);
206 HeadUnshared!(T) atomicOp(string op, T, V1)( ref shared T val, V1 mod ) pure nothrow @nogc
207 if ( __traits( compiles, mixin( "*cast(T*)&val" ~ op ~ "mod" ) ) )
210 assert(atomicValueIsProperlyAligned(val));
217 // | ^ << >> >>> ~ in
219 static if ( op == "+" || op == "-" || op == "*" || op == "/" ||
220 op == "%" || op == "^^" || op == "&" || op == "|" ||
221 op == "^" || op == "<<" || op == ">>" || op == ">>>" ||
222 op == "~" || // skip "in"
223 op == "==" || op == "!=" || op == "<" || op == "<=" ||
224 op == ">" || op == ">=" )
226 HeadUnshared!(T) get = atomicLoad!(MemoryOrder.raw)( val );
227 mixin( "return get " ~ op ~ " mod;" );
230 // assignment operators
232 // += -= *= /= %= ^^= &=
233 // |= ^= <<= >>= >>>= ~=
234 static if ( op == "+=" && __traits(isIntegral, T) && T.sizeof <= 4 && V1.sizeof <= 4)
236 return cast(T)(atomicFetchAdd!(T)(val, mod) + mod);
238 else static if ( op == "-=" && __traits(isIntegral, T) && T.sizeof <= 4 && V1.sizeof <= 4)
240 return cast(T)(atomicFetchSub!(T)(val, mod) - mod);
242 else static if ( op == "+=" || op == "-=" || op == "*=" || op == "/=" ||
243 op == "%=" || op == "^^=" || op == "&=" || op == "|=" ||
244 op == "^=" || op == "<<=" || op == ">>=" || op == ">>>=" ) // skip "~="
246 HeadUnshared!(T) get, set;
250 get = set = atomicLoad!(MemoryOrder.raw)( val );
251 mixin( "set " ~ op ~ " mod;" );
252 } while ( !casByRef( val, get, set ) );
257 static assert( false, "Operation not supported." );
261 bool casByRef(T,V1,V2)( ref T value, V1 ifThis, V2 writeThis ) pure nothrow @nogc @trusted
263 return cas(&value, ifThis, writeThis);
266 bool cas(T,V1,V2)( shared(T)* here, const V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe
267 if ( !is(T == class) && !is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
269 return casImpl(here, ifThis, writeThis);
272 bool cas(T,V1,V2)( shared(T)* here, const shared(V1) ifThis, shared(V2) writeThis ) pure nothrow @nogc @safe
273 if ( is(T == class) && __traits( compiles, { *here = writeThis; } ) )
275 return casImpl(here, ifThis, writeThis);
278 bool cas(T,V1,V2)( shared(T)* here, const shared(V1)* ifThis, shared(V2)* writeThis ) pure nothrow @nogc @safe
279 if ( is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
281 return casImpl(here, ifThis, writeThis);
284 private bool casImpl(T,V1,V2)( shared(T)* here, V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe
287 assert( atomicPtrIsProperlyAligned( here ) );
291 static if ( T.sizeof == byte.sizeof )
293 //////////////////////////////////////////////////////////////////
295 //////////////////////////////////////////////////////////////////
297 asm pure nothrow @nogc @trusted
302 lock; // lock always needed to make this op atomic
307 else static if ( T.sizeof == short.sizeof )
309 //////////////////////////////////////////////////////////////////
311 //////////////////////////////////////////////////////////////////
313 asm pure nothrow @nogc @trusted
318 lock; // lock always needed to make this op atomic
323 else static if ( T.sizeof == int.sizeof )
325 //////////////////////////////////////////////////////////////////
327 //////////////////////////////////////////////////////////////////
329 asm pure nothrow @nogc @trusted
334 lock; // lock always needed to make this op atomic
339 else static if ( T.sizeof == long.sizeof && has64BitCAS )
342 //////////////////////////////////////////////////////////////////
343 // 8 Byte CAS on a 32-Bit Processor
344 //////////////////////////////////////////////////////////////////
346 asm pure nothrow @nogc @trusted
357 lock; // lock always needed to make this op atomic
368 static assert( false, "Invalid template type specified." );
381 deprecated("Please use MemoryOrder instead.")
382 alias MemoryOrder msync;
387 // NOTE: x86 loads implicitly have acquire semantics so a memory
388 // barrier is only necessary on releases.
389 template needsLoadBarrier( MemoryOrder ms )
391 enum bool needsLoadBarrier = ms == MemoryOrder.seq;
395 // NOTE: x86 stores implicitly have release semantics so a memory
396 // barrier is only necessary on acquires.
397 template needsStoreBarrier( MemoryOrder ms )
399 enum bool needsStoreBarrier = ms == MemoryOrder.seq;
404 HeadUnshared!(T) atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T val ) pure nothrow @nogc @safe
405 if (!__traits(isFloating, T))
407 static assert( ms != MemoryOrder.rel, "invalid MemoryOrder for atomicLoad()" );
408 static assert( __traits(isPOD, T), "argument to atomicLoad() must be POD" );
410 static if ( T.sizeof == byte.sizeof )
412 //////////////////////////////////////////////////////////////////
414 //////////////////////////////////////////////////////////////////
416 static if ( needsLoadBarrier!(ms) )
418 asm pure nothrow @nogc @trusted
423 lock; // lock always needed to make this op atomic
429 asm pure nothrow @nogc @trusted
436 else static if ( T.sizeof == short.sizeof )
438 //////////////////////////////////////////////////////////////////
440 //////////////////////////////////////////////////////////////////
442 static if ( needsLoadBarrier!(ms) )
444 asm pure nothrow @nogc @trusted
449 lock; // lock always needed to make this op atomic
455 asm pure nothrow @nogc @trusted
462 else static if ( T.sizeof == int.sizeof )
464 //////////////////////////////////////////////////////////////////
466 //////////////////////////////////////////////////////////////////
468 static if ( needsLoadBarrier!(ms) )
470 asm pure nothrow @nogc @trusted
475 lock; // lock always needed to make this op atomic
481 asm pure nothrow @nogc @trusted
488 else static if ( T.sizeof == long.sizeof && has64BitCAS )
490 //////////////////////////////////////////////////////////////////
491 // 8 Byte Load on a 32-Bit Processor
492 //////////////////////////////////////////////////////////////////
494 asm pure nothrow @nogc @trusted
503 lock; // lock always needed to make this op atomic
511 static assert( false, "Invalid template type specified." );
515 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V1)( ref shared T val, V1 newval ) pure nothrow @nogc @safe
516 if ( __traits( compiles, { val = newval; } ) )
518 static assert( ms != MemoryOrder.acq, "invalid MemoryOrder for atomicStore()" );
519 static assert( __traits(isPOD, T), "argument to atomicStore() must be POD" );
521 static if ( T.sizeof == byte.sizeof )
523 //////////////////////////////////////////////////////////////////
525 //////////////////////////////////////////////////////////////////
527 static if ( needsStoreBarrier!(ms) )
529 asm pure nothrow @nogc @trusted
539 asm pure nothrow @nogc @trusted
547 else static if ( T.sizeof == short.sizeof )
549 //////////////////////////////////////////////////////////////////
551 //////////////////////////////////////////////////////////////////
553 static if ( needsStoreBarrier!(ms) )
555 asm pure nothrow @nogc @trusted
565 asm pure nothrow @nogc @trusted
573 else static if ( T.sizeof == int.sizeof )
575 //////////////////////////////////////////////////////////////////
577 //////////////////////////////////////////////////////////////////
579 static if ( needsStoreBarrier!(ms) )
581 asm pure nothrow @nogc @trusted
591 asm pure nothrow @nogc @trusted
599 else static if ( T.sizeof == long.sizeof && has64BitCAS )
601 //////////////////////////////////////////////////////////////////
602 // 8 Byte Store on a 32-Bit Processor
603 //////////////////////////////////////////////////////////////////
605 asm pure nothrow @nogc @trusted
615 L1: lock; // lock always needed to make this op atomic
624 static assert( false, "Invalid template type specified." );
629 void atomicFence() nothrow @nogc @safe
633 asm pure nothrow @nogc @trusted
641 // Fast path: We have SSE2, so just use mfence.
647 // Slow path: We use cpuid to serialize. This is
648 // significantly slower than mfence, but is the
649 // only serialization facility we have available
650 // on older non-SSE2 chips.
664 else version (AsmX86_64)
666 // Uses specialized asm for fast fetch and add operations
667 private HeadUnshared!(T) atomicFetchAdd(T)( ref shared T val, size_t mod ) pure nothrow @nogc @trusted
668 if ( __traits(isIntegral, T) )
671 assert( atomicValueIsProperlyAligned(val));
676 asm pure nothrow @nogc @trusted
681 static if (T.sizeof == 1) asm pure nothrow @nogc @trusted { lock; xadd[RDX], AL; }
682 else static if (T.sizeof == 2) asm pure nothrow @nogc @trusted { lock; xadd[RDX], AX; }
683 else static if (T.sizeof == 4) asm pure nothrow @nogc @trusted { lock; xadd[RDX], EAX; }
684 else static if (T.sizeof == 8) asm pure nothrow @nogc @trusted { lock; xadd[RDX], RAX; }
686 asm pure nothrow @nogc @trusted
694 private HeadUnshared!(T) atomicFetchSub(T)( ref shared T val, size_t mod ) pure nothrow @nogc @safe
695 if ( __traits(isIntegral, T) )
697 return atomicFetchAdd(val, -mod);
700 HeadUnshared!(T) atomicOp(string op, T, V1)( ref shared T val, V1 mod ) pure nothrow @nogc
701 if ( __traits( compiles, mixin( "*cast(T*)&val" ~ op ~ "mod" ) ) )
704 assert( atomicValueIsProperlyAligned(val));
711 // | ^ << >> >>> ~ in
713 static if ( op == "+" || op == "-" || op == "*" || op == "/" ||
714 op == "%" || op == "^^" || op == "&" || op == "|" ||
715 op == "^" || op == "<<" || op == ">>" || op == ">>>" ||
716 op == "~" || // skip "in"
717 op == "==" || op == "!=" || op == "<" || op == "<=" ||
718 op == ">" || op == ">=" )
720 HeadUnshared!(T) get = atomicLoad!(MemoryOrder.raw)( val );
721 mixin( "return get " ~ op ~ " mod;" );
724 // assignment operators
726 // += -= *= /= %= ^^= &=
727 // |= ^= <<= >>= >>>= ~=
728 static if ( op == "+=" && __traits(isIntegral, T) && __traits(isIntegral, V1))
730 return cast(T)(atomicFetchAdd!(T)(val, mod) + mod);
732 else static if ( op == "-=" && __traits(isIntegral, T) && __traits(isIntegral, V1))
734 return cast(T)(atomicFetchSub!(T)(val, mod) - mod);
736 else static if ( op == "+=" || op == "-=" || op == "*=" || op == "/=" ||
737 op == "%=" || op == "^^=" || op == "&=" || op == "|=" ||
738 op == "^=" || op == "<<=" || op == ">>=" || op == ">>>=" ) // skip "~="
740 HeadUnshared!(T) get, set;
744 get = set = atomicLoad!(MemoryOrder.raw)( val );
745 mixin( "set " ~ op ~ " mod;" );
746 } while ( !casByRef( val, get, set ) );
751 static assert( false, "Operation not supported." );
756 bool casByRef(T,V1,V2)( ref T value, V1 ifThis, V2 writeThis ) pure nothrow @nogc @trusted
758 return cas(&value, ifThis, writeThis);
761 bool cas(T,V1,V2)( shared(T)* here, const V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe
762 if ( !is(T == class) && !is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
764 return casImpl(here, ifThis, writeThis);
767 bool cas(T,V1,V2)( shared(T)* here, const shared(V1) ifThis, shared(V2) writeThis ) pure nothrow @nogc @safe
768 if ( is(T == class) && __traits( compiles, { *here = writeThis; } ) )
770 return casImpl(here, ifThis, writeThis);
773 bool cas(T,V1,V2)( shared(T)* here, const shared(V1)* ifThis, shared(V2)* writeThis ) pure nothrow @nogc @safe
774 if ( is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
776 return casImpl(here, ifThis, writeThis);
779 private bool casImpl(T,V1,V2)( shared(T)* here, V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe
782 assert( atomicPtrIsProperlyAligned( here ) );
786 static if ( T.sizeof == byte.sizeof )
788 //////////////////////////////////////////////////////////////////
790 //////////////////////////////////////////////////////////////////
792 asm pure nothrow @nogc @trusted
797 lock; // lock always needed to make this op atomic
802 else static if ( T.sizeof == short.sizeof )
804 //////////////////////////////////////////////////////////////////
806 //////////////////////////////////////////////////////////////////
808 asm pure nothrow @nogc @trusted
813 lock; // lock always needed to make this op atomic
818 else static if ( T.sizeof == int.sizeof )
820 //////////////////////////////////////////////////////////////////
822 //////////////////////////////////////////////////////////////////
824 asm pure nothrow @nogc @trusted
829 lock; // lock always needed to make this op atomic
834 else static if ( T.sizeof == long.sizeof )
836 //////////////////////////////////////////////////////////////////
837 // 8 Byte CAS on a 64-Bit Processor
838 //////////////////////////////////////////////////////////////////
840 asm pure nothrow @nogc @trusted
845 lock; // lock always needed to make this op atomic
850 else static if ( T.sizeof == long.sizeof*2 && has128BitCAS)
852 //////////////////////////////////////////////////////////////////
853 // 16 Byte CAS on a 64-Bit Processor
854 //////////////////////////////////////////////////////////////////
856 //Windows 64 calling convention uses different registers.
857 //DMD appears to reverse the register order.
858 asm pure nothrow @nogc @trusted
884 asm pure nothrow @nogc @trusted
895 lock; // lock always needed to make this op atomic
905 static assert( false, "Invalid template type specified." );
918 deprecated("Please use MemoryOrder instead.")
919 alias MemoryOrder msync;
924 // NOTE: x86 loads implicitly have acquire semantics so a memory
925 // barrier is only necessary on releases.
926 template needsLoadBarrier( MemoryOrder ms )
928 enum bool needsLoadBarrier = ms == MemoryOrder.seq;
932 // NOTE: x86 stores implicitly have release semantics so a memory
933 // barrier is only necessary on acquires.
934 template needsStoreBarrier( MemoryOrder ms )
936 enum bool needsStoreBarrier = ms == MemoryOrder.seq;
941 HeadUnshared!(T) atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T val ) pure nothrow @nogc @safe
942 if (!__traits(isFloating, T))
944 static assert( ms != MemoryOrder.rel, "invalid MemoryOrder for atomicLoad()" );
945 static assert( __traits(isPOD, T), "argument to atomicLoad() must be POD" );
947 static if ( T.sizeof == byte.sizeof )
949 //////////////////////////////////////////////////////////////////
951 //////////////////////////////////////////////////////////////////
953 static if ( needsLoadBarrier!(ms) )
955 asm pure nothrow @nogc @trusted
960 lock; // lock always needed to make this op atomic
966 asm pure nothrow @nogc @trusted
973 else static if ( T.sizeof == short.sizeof )
975 //////////////////////////////////////////////////////////////////
977 //////////////////////////////////////////////////////////////////
979 static if ( needsLoadBarrier!(ms) )
981 asm pure nothrow @nogc @trusted
986 lock; // lock always needed to make this op atomic
992 asm pure nothrow @nogc @trusted
999 else static if ( T.sizeof == int.sizeof )
1001 //////////////////////////////////////////////////////////////////
1003 //////////////////////////////////////////////////////////////////
1005 static if ( needsLoadBarrier!(ms) )
1007 asm pure nothrow @nogc @trusted
1012 lock; // lock always needed to make this op atomic
1018 asm pure nothrow @nogc @trusted
1025 else static if ( T.sizeof == long.sizeof )
1027 //////////////////////////////////////////////////////////////////
1029 //////////////////////////////////////////////////////////////////
1031 static if ( needsLoadBarrier!(ms) )
1033 asm pure nothrow @nogc @trusted
1038 lock; // lock always needed to make this op atomic
1044 asm pure nothrow @nogc @trusted
1051 else static if ( T.sizeof == long.sizeof*2 && has128BitCAS )
1053 //////////////////////////////////////////////////////////////////
1054 // 16 Byte Load on a 64-Bit Processor
1055 //////////////////////////////////////////////////////////////////
1058 asm pure nothrow @nogc @trusted
1067 lock; // lock always needed to make this op atomic
1076 static if (is(T:U[], U))
1078 pragma(inline, true)
1079 static typeof(return) toTrusted(size_t[2] retVal) @trusted
1081 return *(cast(typeof(return)*) retVal.ptr);
1084 return toTrusted(retVal);
1088 return cast(typeof(return)) retVal;
1091 asm pure nothrow @nogc @trusted
1100 lock; // lock always needed to make this op atomic
1109 static assert( false, "Invalid template type specified." );
1114 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V1)( ref shared T val, V1 newval ) pure nothrow @nogc @safe
1115 if ( __traits( compiles, { val = newval; } ) )
1117 static assert( ms != MemoryOrder.acq, "invalid MemoryOrder for atomicStore()" );
1118 static assert( __traits(isPOD, T), "argument to atomicStore() must be POD" );
1120 static if ( T.sizeof == byte.sizeof )
1122 //////////////////////////////////////////////////////////////////
1124 //////////////////////////////////////////////////////////////////
1126 static if ( needsStoreBarrier!(ms) )
1128 asm pure nothrow @nogc @trusted
1138 asm pure nothrow @nogc @trusted
1146 else static if ( T.sizeof == short.sizeof )
1148 //////////////////////////////////////////////////////////////////
1150 //////////////////////////////////////////////////////////////////
1152 static if ( needsStoreBarrier!(ms) )
1154 asm pure nothrow @nogc @trusted
1164 asm pure nothrow @nogc @trusted
1172 else static if ( T.sizeof == int.sizeof )
1174 //////////////////////////////////////////////////////////////////
1176 //////////////////////////////////////////////////////////////////
1178 static if ( needsStoreBarrier!(ms) )
1180 asm pure nothrow @nogc @trusted
1190 asm pure nothrow @nogc @trusted
1198 else static if ( T.sizeof == long.sizeof && has64BitCAS )
1200 //////////////////////////////////////////////////////////////////
1201 // 8 Byte Store on a 64-Bit Processor
1202 //////////////////////////////////////////////////////////////////
1204 static if ( needsStoreBarrier!(ms) )
1206 asm pure nothrow @nogc @trusted
1216 asm pure nothrow @nogc @trusted
1224 else static if ( T.sizeof == long.sizeof*2 && has128BitCAS )
1226 //////////////////////////////////////////////////////////////////
1227 // 16 Byte Store on a 64-Bit Processor
1228 //////////////////////////////////////////////////////////////////
1230 asm pure nothrow @nogc @trusted
1245 L1: lock; // lock always needed to make this op atomic
1252 asm pure nothrow @nogc @trusted
1262 L1: lock; // lock always needed to make this op atomic
1272 static assert( false, "Invalid template type specified." );
1277 void atomicFence() nothrow @nogc @safe
1279 // SSE2 is always present in 64-bit x86 chips.
1280 asm nothrow @nogc @trusted
1291 import gcc.builtins;
1293 HeadUnshared!(T) atomicOp(string op, T, V1)( ref shared T val, V1 mod ) pure nothrow @nogc @trusted
1294 if ( __traits( compiles, mixin( "*cast(T*)&val" ~ op ~ "mod" ) ) )
1299 // | ^ << >> >>> ~ in
1301 static if ( op == "+" || op == "-" || op == "*" || op == "/" ||
1302 op == "%" || op == "^^" || op == "&" || op == "|" ||
1303 op == "^" || op == "<<" || op == ">>" || op == ">>>" ||
1304 op == "~" || // skip "in"
1305 op == "==" || op == "!=" || op == "<" || op == "<=" ||
1306 op == ">" || op == ">=" )
1308 HeadUnshared!(T) get = atomicLoad!(MemoryOrder.raw)( val );
1309 mixin( "return get " ~ op ~ " mod;" );
1312 // assignment operators
1314 // += -= *= /= %= ^^= &=
1315 // |= ^= <<= >>= >>>= ~=
1316 static if ( op == "+=" || op == "-=" || op == "*=" || op == "/=" ||
1317 op == "%=" || op == "^^=" || op == "&=" || op == "|=" ||
1318 op == "^=" || op == "<<=" || op == ">>=" || op == ">>>=" ) // skip "~="
1320 HeadUnshared!(T) get, set;
1324 get = set = atomicLoad!(MemoryOrder.raw)( val );
1325 mixin( "set " ~ op ~ " mod;" );
1326 } while ( !cas( &val, get, set ) );
1331 static assert( false, "Operation not supported." );
1336 bool cas(T,V1,V2)( shared(T)* here, const V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe
1337 if ( !is(T == class) && !is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
1339 return casImpl(here, ifThis, writeThis);
1342 bool cas(T,V1,V2)( shared(T)* here, const shared(V1) ifThis, shared(V2) writeThis ) pure nothrow @nogc @safe
1343 if ( is(T == class) && __traits( compiles, { *here = writeThis; } ) )
1345 return casImpl(here, ifThis, writeThis);
1348 bool cas(T,V1,V2)( shared(T)* here, const shared(V1)* ifThis, shared(V2)* writeThis ) pure nothrow @nogc @safe
1349 if ( is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
1351 return casImpl(here, ifThis, writeThis);
1354 private bool casImpl(T,V1,V2)( shared(T)* here, V1 ifThis, V2 writeThis ) pure nothrow @nogc @trusted
1356 static assert(GNU_Have_Atomics, "cas() not supported on this architecture");
1359 static if (T.sizeof == byte.sizeof)
1361 res = __atomic_compare_exchange_1(here, cast(void*) &ifThis, *cast(ubyte*) &writeThis,
1362 false, MemoryOrder.seq, MemoryOrder.seq);
1364 else static if (T.sizeof == short.sizeof)
1366 res = __atomic_compare_exchange_2(here, cast(void*) &ifThis, *cast(ushort*) &writeThis,
1367 false, MemoryOrder.seq, MemoryOrder.seq);
1369 else static if (T.sizeof == int.sizeof)
1371 res = __atomic_compare_exchange_4(here, cast(void*) &ifThis, *cast(uint*) &writeThis,
1372 false, MemoryOrder.seq, MemoryOrder.seq);
1374 else static if (T.sizeof == long.sizeof && GNU_Have_64Bit_Atomics)
1376 res = __atomic_compare_exchange_8(here, cast(void*) &ifThis, *cast(ulong*) &writeThis,
1377 false, MemoryOrder.seq, MemoryOrder.seq);
1379 else static if (GNU_Have_LibAtomic)
1381 res = __atomic_compare_exchange(T.sizeof, here, cast(void*) &ifThis, cast(void*) &writeThis,
1382 MemoryOrder.seq, MemoryOrder.seq);
1385 static assert(0, "Invalid template type specified.");
1391 // Memory model types for the __atomic* builtins.
1400 deprecated("Please use MemoryOrder instead.")
1401 alias MemoryOrder msync;
1404 HeadUnshared!(T) atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T val ) pure nothrow @nogc @trusted
1405 if (!__traits(isFloating, T))
1407 static assert(ms != MemoryOrder.rel, "Invalid MemoryOrder for atomicLoad");
1408 static assert(__traits(isPOD, T), "argument to atomicLoad() must be POD");
1409 static assert(GNU_Have_Atomics, "atomicLoad() not supported on this architecture");
1411 static if (T.sizeof == ubyte.sizeof)
1413 ubyte value = __atomic_load_1(&val, ms);
1414 return *cast(HeadUnshared!T*) &value;
1416 else static if (T.sizeof == ushort.sizeof)
1418 ushort value = __atomic_load_2(&val, ms);
1419 return *cast(HeadUnshared!T*) &value;
1421 else static if (T.sizeof == uint.sizeof)
1423 uint value = __atomic_load_4(&val, ms);
1424 return *cast(HeadUnshared!T*) &value;
1426 else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
1428 ulong value = __atomic_load_8(&val, ms);
1429 return *cast(HeadUnshared!T*) &value;
1431 else static if (GNU_Have_LibAtomic)
1434 __atomic_load(T.sizeof, &val, cast(void*)&value, ms);
1435 return *cast(HeadUnshared!T*) &value;
1438 static assert(0, "Invalid template type specified.");
1442 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V1)( ref shared T val, V1 newval ) pure nothrow @nogc @trusted
1443 if ( __traits( compiles, { val = newval; } ) )
1445 static assert(ms != MemoryOrder.acq, "Invalid MemoryOrder for atomicStore");
1446 static assert(__traits(isPOD, T), "argument to atomicLoad() must be POD");
1447 static assert(GNU_Have_Atomics, "atomicStore() not supported on this architecture");
1449 static if (T.sizeof == ubyte.sizeof)
1451 __atomic_store_1(&val, *cast(ubyte*) &newval, ms);
1453 else static if (T.sizeof == ushort.sizeof)
1455 __atomic_store_2(&val, *cast(ushort*) &newval, ms);
1457 else static if (T.sizeof == uint.sizeof)
1459 __atomic_store_4(&val, *cast(uint*) &newval, ms);
1461 else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
1463 __atomic_store_8(&val, *cast(ulong*) &newval, ms);
1465 else static if (GNU_Have_LibAtomic)
1467 __atomic_store(T.sizeof, &val, cast(void*)&newval, ms);
1470 static assert(0, "Invalid template type specified.");
1474 void atomicFence() nothrow @nogc
1476 __atomic_thread_fence(MemoryOrder.seq);
1480 // This is an ABI adapter that works on all architectures. It type puns
1481 // floats and doubles to ints and longs, atomically loads them, then puns
1482 // them back. This is necessary so that they get returned in floating
1483 // point instead of integer registers.
1484 HeadUnshared!(T) atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T val ) pure nothrow @nogc @trusted
1485 if (__traits(isFloating, T))
1487 static if (T.sizeof == int.sizeof)
1489 static assert(is(T : float));
1490 auto ptr = cast(const shared int*) &val;
1491 auto asInt = atomicLoad!(ms)(*ptr);
1492 return *(cast(typeof(return)*) &asInt);
1494 else static if (T.sizeof == long.sizeof)
1496 static assert(is(T : double));
1497 auto ptr = cast(const shared long*) &val;
1498 auto asLong = atomicLoad!(ms)(*ptr);
1499 return *(cast(typeof(return)*) &asLong);
1503 static assert(0, "Cannot atomically load 80-bit reals.");
1507 ////////////////////////////////////////////////////////////////////////////////
1509 ////////////////////////////////////////////////////////////////////////////////
1514 void testCAS(T)( T val ) pure nothrow @nogc @trusted
1517 assert(val !is T.init);
1521 T base = cast(T)null;
1522 shared(T) atom = cast(shared(T))null;
1524 assert( base !is val, T.stringof );
1525 assert( atom is base, T.stringof );
1527 assert( cas( &atom, base, val ), T.stringof );
1528 assert( atom is val, T.stringof );
1529 assert( !cas( &atom, base, base ), T.stringof );
1530 assert( atom is val, T.stringof );
1533 void testLoadStore(MemoryOrder ms = MemoryOrder.seq, T)( T val = T.init + 1 ) pure nothrow @nogc @trusted
1536 shared(T) atom = cast(T) 0;
1538 assert( base !is val );
1539 assert( atom is base );
1540 atomicStore!(ms)( atom, val );
1541 base = atomicLoad!(ms)( atom );
1543 assert( base is val, T.stringof );
1544 assert( atom is val );
1548 void testType(T)( T val = T.init + 1 ) pure nothrow @nogc @safe
1551 testLoadStore!(MemoryOrder.seq, T)( val );
1552 testLoadStore!(MemoryOrder.raw, T)( val );
1555 @safe pure nothrow unittest
1563 testType!(ushort)();
1568 testType!(shared int*)();
1570 static class Klass {}
1571 testCAS!(shared Klass)( new shared(Klass) );
1573 testType!(float)(1.0f);
1575 static if ( has64BitCAS )
1577 testType!(double)(1.0);
1584 atomicOp!"+="( i, cast(size_t) 1 );
1587 atomicOp!"-="( i, cast(size_t) 1 );
1591 atomicOp!"+="( f, 1 );
1594 static if ( has64BitCAS )
1596 shared double d = 0;
1597 atomicOp!"+="( d, 1 );
1602 pure nothrow unittest
1604 static if (has128BitCAS)
1612 align(16) shared DoubleValue a;
1613 atomicStore(a, DoubleValue(1,2));
1614 assert(a.value1 == 1 && a.value2 ==2);
1616 while (!cas(&a, DoubleValue(1,2), DoubleValue(3,4))){}
1617 assert(a.value1 == 3 && a.value2 ==4);
1619 align(16) DoubleValue b = atomicLoad(a);
1620 assert(b.value1 == 3 && b.value2 ==4);
1625 enum hasDWCAS = has128BitCAS;
1629 enum hasDWCAS = has64BitCAS;
1632 static if (hasDWCAS)
1634 static struct List { size_t gen; List* next; }
1636 assert(cas(&head, shared(List)(0, null), shared(List)(1, cast(List*)1)));
1637 assert(head.gen == 1);
1638 assert(cast(size_t)head.next == 1);
1642 pure nothrow unittest
1644 static struct S { int val; }
1645 auto s = shared(S)(1);
1650 shared(S)* ifThis = null;
1651 shared(S)* writeThis = &s;
1652 assert(ptr is null);
1653 assert(cas(&ptr, ifThis, writeThis));
1654 assert(ptr is writeThis);
1657 shared(S*) ifThis2 = writeThis;
1658 shared(S*) writeThis2 = null;
1659 assert(cas(&ptr, ifThis2, writeThis2));
1660 assert(ptr is null);
1662 // head unshared target doesn't want atomic CAS
1664 static assert(!__traits(compiles, cas(&ptr2, ifThis, writeThis)));
1665 static assert(!__traits(compiles, cas(&ptr2, ifThis2, writeThis2)));
1672 // Use heap memory to ensure an optimizing
1673 // compiler doesn't put things in registers.
1674 uint* x = new uint();
1675 bool* f = new bool();
1676 uint* r = new uint();
1678 auto thr = new Thread(()
1704 // === atomicFetchAdd and atomicFetchSub operations ====
1705 pure nothrow @nogc @safe unittest
1707 shared ubyte u8 = 1;
1708 shared ushort u16 = 2;
1709 shared uint u32 = 3;
1711 shared short i16 = 6;
1714 assert(atomicOp!"+="(u8, 8) == 9);
1715 assert(atomicOp!"+="(u16, 8) == 10);
1716 assert(atomicOp!"+="(u32, 8) == 11);
1717 assert(atomicOp!"+="(i8, 8) == 13);
1718 assert(atomicOp!"+="(i16, 8) == 14);
1719 assert(atomicOp!"+="(i32, 8) == 15);
1722 shared ulong u64 = 4;
1723 shared long i64 = 8;
1724 assert(atomicOp!"+="(u64, 8) == 12);
1725 assert(atomicOp!"+="(i64, 8) == 16);
1729 pure nothrow @nogc @safe unittest
1731 shared ubyte u8 = 1;
1732 shared ushort u16 = 2;
1733 shared uint u32 = 3;
1735 shared short i16 = 6;
1738 assert(atomicOp!"-="(u8, 1) == 0);
1739 assert(atomicOp!"-="(u16, 1) == 1);
1740 assert(atomicOp!"-="(u32, 1) == 2);
1741 assert(atomicOp!"-="(i8, 1) == 4);
1742 assert(atomicOp!"-="(i16, 1) == 5);
1743 assert(atomicOp!"-="(i32, 1) == 6);
1746 shared ulong u64 = 4;
1747 shared long i64 = 8;
1748 assert(atomicOp!"-="(u64, 1) == 3);
1749 assert(atomicOp!"-="(i64, 1) == 7);
1753 pure nothrow @nogc @safe unittest // issue 16651
1757 atomicOp!"-="( a, b );
1762 atomicOp!"-="( c, d );