The OpenD Programming Language

1 /**
2  * The atomic module provides basic support for lock-free
3  * concurrent programming.
4  *
5  * $(NOTE Use the `-preview=nosharedaccess` compiler flag to detect
6  * unsafe individual read or write operations on shared data.)
7  *
8  * Copyright: Copyright Sean Kelly 2005 - 2016.
9  * License:   $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
10  * Authors:   Sean Kelly, Alex Rønne Petersen, Manu Evans
11  * Source:    $(DRUNTIMESRC core/_atomic.d)
12  */
13 
14 module core.atomic;
15 
16 ///
17 @safe unittest
18 {
19     int y = 2;
20     shared int x = y; // OK
21 
22     //x++; // read modify write error
23     x.atomicOp!"+="(1); // OK
24     //y = x; // read error with preview flag
25     y = x.atomicLoad(); // OK
26     assert(y == 3);
27     //x = 5; // write error with preview flag
28     x.atomicStore(5); // OK
29     assert(x.atomicLoad() == 5);
30 }
31 
32 import core.internal.atomic;
33 import core.internal.attributes : betterC;
34 import core.internal.traits : hasUnsharedIndirections;
35 
36 pragma(inline, true): // LDC
37 
38 /**
39  * Specifies the memory ordering semantics of an atomic operation.
40  *
41  * See_Also:
42  *     $(HTTP en.cppreference.com/w/cpp/atomic/memory_order)
43  */
44 enum MemoryOrder
45 {
46     /**
47      * Not sequenced.
48      * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#monotonic, LLVM AtomicOrdering.Monotonic)
49      * and C++11/C11 `memory_order_relaxed`.
50      */
51     raw = 0,
52     /**
53      * Hoist-load + hoist-store barrier.
54      * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#acquire, LLVM AtomicOrdering.Acquire)
55      * and C++11/C11 `memory_order_acquire`.
56      */
57     acq = 2,
58     /**
59      * Sink-load + sink-store barrier.
60      * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#release, LLVM AtomicOrdering.Release)
61      * and C++11/C11 `memory_order_release`.
62      */
63     rel = 3,
64     /**
65      * Acquire + release barrier.
66      * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#acquirerelease, LLVM AtomicOrdering.AcquireRelease)
67      * and C++11/C11 `memory_order_acq_rel`.
68      */
69     acq_rel = 4,
70     /**
71      * Fully sequenced (acquire + release). Corresponds to
72      * $(LINK2 https://llvm.org/docs/Atomics.html#sequentiallyconsistent, LLVM AtomicOrdering.SequentiallyConsistent)
73      * and C++11/C11 `memory_order_seq_cst`.
74      */
75     seq = 5,
76 }
77 
78 /**
79  * Loads 'val' from memory and returns it.  The memory barrier specified
80  * by 'ms' is applied to the operation, which is fully sequenced by
81  * default.  Valid memory orders are MemoryOrder.raw, MemoryOrder.acq,
82  * and MemoryOrder.seq.
83  *
84  * Params:
85  *  val = The target variable.
86  *
87  * Returns:
88  *  The value of 'val'.
89  */
90 T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(auto ref return scope const T val) pure nothrow @nogc @trusted
91     if (!is(T == shared U, U) && !is(T == shared inout U, U) && !is(T == shared const U, U))
92 {
93     static if (__traits(isFloating, T))
94     {
95         alias IntTy = IntForFloat!T;
96         IntTy r = core.internal.atomic.atomicLoad!ms(cast(IntTy*)&val);
97         return *cast(T*)&r;
98     }
99     else
100         return core.internal.atomic.atomicLoad!ms(cast(T*)&val);
101 }
102 
103 /// Ditto
104 T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(auto ref return scope shared const T val) pure nothrow @nogc @trusted
105     if (!hasUnsharedIndirections!T)
106 {
107     import core.internal.traits : hasUnsharedIndirections;
108     static assert(!hasUnsharedIndirections!T, "Copying `" ~ shared(const(T)).stringof ~ "` would violate shared.");
109 
110     return atomicLoad!ms(*cast(T*)&val);
111 }
112 
113 /// Ditto
114 TailShared!T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(auto ref shared const T val) pure nothrow @nogc @trusted
115     if (hasUnsharedIndirections!T)
116 {
117     // HACK: DEPRECATE THIS FUNCTION, IT IS INVALID TO DO ATOMIC LOAD OF SHARED CLASS
118     // this is here because code exists in the wild that does this...
119 
120     return core.internal.atomic.atomicLoad!ms(cast(TailShared!T*)&val);
121 }
122 
123 /**
124  * Writes 'newval' into 'val'.  The memory barrier specified by 'ms' is
125  * applied to the operation, which is fully sequenced by default.
126  * Valid memory orders are MemoryOrder.raw, MemoryOrder.rel, and
127  * MemoryOrder.seq.
128  *
129  * Params:
130  *  val    = The target variable.
131  *  newval = The value to store.
132  */
133 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref T val, V newval) pure nothrow @nogc @trusted
134     if (!is(T == shared) && !is(V == shared))
135 {
136     import core.internal.traits : hasElaborateCopyConstructor;
137     static assert (!hasElaborateCopyConstructor!T, "`T` may not have an elaborate copy: atomic operations override regular copying semantics.");
138 
139     // resolve implicit conversions
140     version (LDC)
141     {
142         import core.internal.traits : Unqual;
143         static if (is(Unqual!T == Unqual!V))
144         {
145             alias arg = newval;
146         }
147         else
148         {
149             // don't construct directly from `newval`, assign instead (`alias this` etc.)
150             T arg;
151             arg = newval;
152         }
153     }
154     else
155     {
156         T arg = newval;
157     }
158 
159     static if (__traits(isFloating, T))
160     {
161         alias IntTy = IntForFloat!T;
162         core.internal.atomic.atomicStore!ms(cast(IntTy*)&val, *cast(IntTy*)&arg);
163     }
164     else
165         core.internal.atomic.atomicStore!ms(&val, arg);
166 }
167 
168 /// Ditto
169 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref shared T val, V newval) pure nothrow @nogc @trusted
170     if (!is(T == class))
171 {
172     static if (is (V == shared U, U))
173         alias Thunk = U;
174     else
175     {
176         import core.internal.traits : hasUnsharedIndirections;
177         static assert(!hasUnsharedIndirections!V, "Copying argument `" ~ V.stringof ~ " newval` to `" ~ shared(T).stringof ~ " here` would violate shared.");
178         alias Thunk = V;
179     }
180     atomicStore!ms(*cast(T*)&val, *cast(Thunk*)&newval);
181 }
182 
183 /// Ditto
184 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref shared T val, auto ref shared V newval) pure nothrow @nogc @trusted
185     if (is(T == class))
186 {
187     static assert (is (V : T), "Can't assign `newval` of type `shared " ~ V.stringof ~ "` to `shared " ~ T.stringof ~ "`.");
188 
189     core.internal.atomic.atomicStore!ms(cast(T*)&val, cast(V)newval);
190 }
191 
192 /**
193  * Atomically adds `mod` to the value referenced by `val` and returns the value `val` held previously.
194  * This operation is both lock-free and atomic.
195  *
196  * Params:
197  *  val = Reference to the value to modify.
198  *  mod = The value to add.
199  *
200  * Returns:
201  *  The value held previously by `val`.
202  */
203 T atomicFetchAdd(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope T val, T mod) pure nothrow @nogc @trusted
204     if ((__traits(isIntegral, T) || is(T == U*, U)) && !is(T == shared))
205 in (atomicValueIsProperlyAligned(val))
206 {
207     static if (is(T == U*, U))
208         return cast(T)core.internal.atomic.atomicFetchAdd!ms(cast(T*)&val, mod * U.sizeof);
209     else
210         return core.internal.atomic.atomicFetchAdd!ms(&val, mod);
211 }
212 
213 /// Ditto
214 T atomicFetchAdd(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared T val, T mod) pure nothrow @nogc @trusted
215     if (__traits(isIntegral, T) || is(T == U*, U))
216 in (atomicValueIsProperlyAligned(val))
217 {
218     return atomicFetchAdd!ms(*cast(T*)&val, mod);
219 }
220 
221 /**
222  * Atomically subtracts `mod` from the value referenced by `val` and returns the value `val` held previously.
223  * This operation is both lock-free and atomic.
224  *
225  * Params:
226  *  val = Reference to the value to modify.
227  *  mod = The value to subtract.
228  *
229  * Returns:
230  *  The value held previously by `val`.
231  */
232 T atomicFetchSub(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope T val, T mod) pure nothrow @nogc @trusted
233     if ((__traits(isIntegral, T) || is(T == U*, U)) && !is(T == shared))
234 in (atomicValueIsProperlyAligned(val))
235 {
236     static if (is(T == U*, U))
237         return cast(T)core.internal.atomic.atomicFetchSub!ms(cast(T*)&val, mod * U.sizeof);
238     else
239         return core.internal.atomic.atomicFetchSub!ms(&val, mod);
240 }
241 
242 /// Ditto
243 T atomicFetchSub(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared T val, T mod) pure nothrow @nogc @trusted
244     if (__traits(isIntegral, T) || is(T == U*, U))
245 in (atomicValueIsProperlyAligned(val))
246 {
247     return atomicFetchSub!ms(*cast(T*)&val, mod);
248 }
249 
250 /**
251 * Atomically performs and AND operation between `mod` and the value referenced by `val` and returns the value `val` held previously.
252 * This operation is both lock-free and atomic.
253 *
254 * Params:
255 *  val = Reference to the value to modify.
256 *  mod = The value to perform and operation.
257 *
258 * Returns:
259 *  The value held previously by `val`.
260 */
261 T atomicFetchAnd(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope T val, T mod) pure nothrow @nogc @trusted
262 if ((__traits(isIntegral, T) || is(T == U*, U)) && !is(T == shared))
263 in (atomicValueIsProperlyAligned(val))
264 {
265     return core.internal.atomic.atomicFetchAnd!ms(&val, mod);
266 }
267 
268 /// Ditto
269 T atomicFetchAnd(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared T val, T mod) pure nothrow @nogc @trusted
270 if (__traits(isIntegral, T) || is(T == U*, U))
271 in (atomicValueIsProperlyAligned(val))
272 {
273     return atomicFetchAnd!ms(*cast(T*)&val, mod);
274 }
275 
276 /**
277 * Atomically performs and OR operation between `mod` and the value referenced by `val` and returns the value `val` held previously.
278 * This operation is both lock-free and atomic.
279 *
280 * Params:
281 *  val = Reference to the value to modify.
282 *  mod = The value to perform and operation.
283 *
284 * Returns:
285 *  The value held previously by `val`.
286 */
287 T atomicFetchOr(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope T val, T mod) pure nothrow @nogc @trusted
288 if ((__traits(isIntegral, T) || is(T == U*, U)) && !is(T == shared))
289 in (atomicValueIsProperlyAligned(val))
290 {
291     return core.internal.atomic.atomicFetchOr!ms(&val, mod);
292 }
293 
294 /// Ditto
295 T atomicFetchOr(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared T val, T mod) pure nothrow @nogc @trusted
296 if (__traits(isIntegral, T) || is(T == U*, U))
297 in (atomicValueIsProperlyAligned(val))
298 {
299     return atomicFetchOr!ms(*cast(T*)&val, mod);
300 }
301 
302 /**
303 * Atomically performs and XOR operation between `mod` and the value referenced by `val` and returns the value `val` held previously.
304 * This operation is both lock-free and atomic.
305 *
306 * Params:
307 *  val = Reference to the value to modify.
308 *  mod = The value to perform and operation.
309 *
310 * Returns:
311 *  The value held previously by `val`.
312 */
313 T atomicFetchXor(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope T val, T mod) pure nothrow @nogc @trusted
314 if ((__traits(isIntegral, T) || is(T == U*, U)) && !is(T == shared))
315 in (atomicValueIsProperlyAligned(val))
316 {
317     return core.internal.atomic.atomicFetchXor!ms(&val, mod);
318 }
319 
320 /// Ditto
321 T atomicFetchXor(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared T val, T mod) pure nothrow @nogc @trusted
322 if (__traits(isIntegral, T) || is(T == U*, U))
323 in (atomicValueIsProperlyAligned(val))
324 {
325     return atomicFetchXor!ms(*cast(T*)&val, mod);
326 }
327 
328 /**
329  * Exchange `exchangeWith` with the memory referenced by `here`.
330  * This operation is both lock-free and atomic.
331  *
332  * Params:
333  *  here         = The address of the destination variable.
334  *  exchangeWith = The value to exchange.
335  *
336  * Returns:
337  *  The value held previously by `here`.
338  */
339 T atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(T* here, V exchangeWith) pure nothrow @nogc @trusted
340     if (!is(T == shared) && !is(V == shared))
341 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
342 {
343     // resolve implicit conversions
344     T arg = exchangeWith;
345 
346     static if (__traits(isFloating, T))
347     {
348         alias IntTy = IntForFloat!T;
349         IntTy r = core.internal.atomic.atomicExchange!ms(cast(IntTy*)here, *cast(IntTy*)&arg);
350         return *cast(shared(T)*)&r;
351     }
352     else
353         return core.internal.atomic.atomicExchange!ms(here, arg);
354 }
355 
356 /// Ditto
357 TailShared!T atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(shared(T)* here, V exchangeWith) pure nothrow @nogc @trusted
358     if (!is(T == class) && !is(T == interface))
359 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
360 {
361     static if (is (V == shared U, U))
362         alias Thunk = U;
363     else
364     {
365         import core.internal.traits : hasUnsharedIndirections;
366         static assert(!hasUnsharedIndirections!V, "Copying `exchangeWith` of type `" ~ V.stringof ~ "` to `" ~ shared(T).stringof ~ "` would violate shared.");
367         alias Thunk = V;
368     }
369     return atomicExchange!ms(cast(T*)here, *cast(Thunk*)&exchangeWith);
370 }
371 
372 /// Ditto
373 shared(T) atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(shared(T)* here, shared(V) exchangeWith) pure nothrow @nogc @trusted
374     if (is(T == class) || is(T == interface))
375 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
376 {
377     static assert (is (V : T), "Can't assign `exchangeWith` of type `" ~ shared(V).stringof ~ "` to `" ~ shared(T).stringof ~ "`.");
378 
379     return cast(shared)core.internal.atomic.atomicExchange!ms(cast(T*)here, cast(V)exchangeWith);
380 }
381 
382 /**
383  * Performs either compare-and-set or compare-and-swap (or exchange).
384  *
385  * There are two categories of overloads in this template:
386  * The first category does a simple compare-and-set.
387  * The comparison value (`ifThis`) is treated as an rvalue.
388  *
389  * The second category does a compare-and-swap (a.k.a. compare-and-exchange),
390  * and expects `ifThis` to be a pointer type, where the previous value
391  * of `here` will be written.
392  *
393  * This operation is both lock-free and atomic.
394  *
395  * Params:
396  *  here      = The address of the destination variable.
397  *  writeThis = The value to store.
398  *  ifThis    = The comparison value.
399  *
400  * Returns:
401  *  true if the store occurred, false if not.
402  */
403 template cas(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq)
404 {
405     /// Compare-and-set for non-shared values
406     bool cas(T, V1, V2)(T* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
407     if (!is(T == shared) && is(T : V1))
408     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
409     {
410         // resolve implicit conversions
411         const T arg1 = ifThis;
412         T arg2 = writeThis;
413 
414         static if (__traits(isFloating, T))
415         {
416             alias IntTy = IntForFloat!T;
417             return atomicCompareExchangeStrongNoResult!(succ, fail)(
418                 cast(IntTy*)here, *cast(IntTy*)&arg1, *cast(IntTy*)&arg2);
419         }
420         else
421             return atomicCompareExchangeStrongNoResult!(succ, fail)(here, arg1, arg2);
422     }
423 
424     /// Compare-and-set for shared value type
425     bool cas(T, V1, V2)(shared(T)* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
426     if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
427     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
428     {
429         static if (is (V1 == shared U1, U1))
430             alias Thunk1 = U1;
431         else
432             alias Thunk1 = V1;
433         static if (is (V2 == shared U2, U2))
434             alias Thunk2 = U2;
435         else
436         {
437             import core.internal.traits : hasUnsharedIndirections;
438             static assert(!hasUnsharedIndirections!V2,
439                           "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~
440                           shared(T).stringof ~ "* here` would violate shared.");
441             alias Thunk2 = V2;
442         }
443         return cas(cast(T*)here, *cast(Thunk1*)&ifThis, *cast(Thunk2*)&writeThis);
444     }
445 
446     /// Compare-and-set for `shared` reference type (`class`)
447     bool cas(T, V1, V2)(shared(T)* here, shared(V1) ifThis, shared(V2) writeThis)
448     pure nothrow @nogc @trusted
449     if (is(T == class))
450     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
451     {
452         return atomicCompareExchangeStrongNoResult!(succ, fail)(
453             cast(T*)here, cast(V1)ifThis, cast(V2)writeThis);
454     }
455 
456     /// Compare-and-exchange for non-`shared` types
457     bool cas(T, V)(T* here, T* ifThis, V writeThis) pure nothrow @nogc @trusted
458     if (!is(T == shared) && !is(V == shared))
459     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
460     {
461         // resolve implicit conversions
462         T arg1 = writeThis;
463 
464         static if (__traits(isFloating, T))
465         {
466             alias IntTy = IntForFloat!T;
467             return atomicCompareExchangeStrong!(succ, fail)(
468                 cast(IntTy*)here, cast(IntTy*)ifThis, *cast(IntTy*)&writeThis);
469         }
470         else
471             return atomicCompareExchangeStrong!(succ, fail)(here, ifThis, writeThis);
472     }
473 
474     /// Compare and exchange for mixed-`shared`ness types
475     bool cas(T, V1, V2)(shared(T)* here, V1* ifThis, V2 writeThis) pure nothrow @nogc @trusted
476     if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
477     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
478     {
479         static if (is (V1 == shared U1, U1))
480             alias Thunk1 = U1;
481         else
482         {
483             import core.internal.traits : hasUnsharedIndirections;
484             static assert(!hasUnsharedIndirections!V1,
485                           "Copying `" ~ shared(T).stringof ~ "* here` to `" ~
486                           V1.stringof ~ "* ifThis` would violate shared.");
487             alias Thunk1 = V1;
488         }
489         static if (is (V2 == shared U2, U2))
490             alias Thunk2 = U2;
491         else
492         {
493             import core.internal.traits : hasUnsharedIndirections;
494             static assert(!hasUnsharedIndirections!V2,
495                           "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~
496                           shared(T).stringof ~ "* here` would violate shared.");
497             alias Thunk2 = V2;
498         }
499         static assert (is(T : Thunk1),
500                        "Mismatching types for `here` and `ifThis`: `" ~
501                        shared(T).stringof ~ "` and `" ~ V1.stringof ~ "`.");
502         return cas(cast(T*)here, cast(Thunk1*)ifThis, *cast(Thunk2*)&writeThis);
503     }
504 
505     /// Compare-and-exchange for `class`
506     bool cas(T, V)(shared(T)* here, shared(T)* ifThis, shared(V) writeThis)
507     pure nothrow @nogc @trusted
508     if (is(T == class))
509     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
510     {
511         return atomicCompareExchangeStrong!(succ, fail)(
512             cast(T*)here, cast(T*)ifThis, cast(V)writeThis);
513     }
514 }
515 
516 /**
517 * Stores 'writeThis' to the memory referenced by 'here' if the value
518 * referenced by 'here' is equal to 'ifThis'.
519 * The 'weak' version of cas may spuriously fail. It is recommended to
520 * use `casWeak` only when `cas` would be used in a loop.
521 * This operation is both
522 * lock-free and atomic.
523 *
524 * Params:
525 *  here      = The address of the destination variable.
526 *  writeThis = The value to store.
527 *  ifThis    = The comparison value.
528 *
529 * Returns:
530 *  true if the store occurred, false if not.
531 */
532 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(T* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
533     if (!is(T == shared) && is(T : V1))
534 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
535 {
536     // resolve implicit conversions
537     T arg1 = ifThis;
538     T arg2 = writeThis;
539 
540     static if (__traits(isFloating, T))
541     {
542         alias IntTy = IntForFloat!T;
543         return atomicCompareExchangeWeakNoResult!(succ, fail)(cast(IntTy*)here, *cast(IntTy*)&arg1, *cast(IntTy*)&arg2);
544     }
545     else
546         return atomicCompareExchangeWeakNoResult!(succ, fail)(here, arg1, arg2);
547 }
548 
549 /// Ditto
550 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
551     if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
552 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
553 {
554     static if (is (V1 == shared U1, U1))
555         alias Thunk1 = U1;
556     else
557         alias Thunk1 = V1;
558     static if (is (V2 == shared U2, U2))
559         alias Thunk2 = U2;
560     else
561     {
562         import core.internal.traits : hasUnsharedIndirections;
563         static assert(!hasUnsharedIndirections!V2, "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~ shared(T).stringof ~ "* here` would violate shared.");
564         alias Thunk2 = V2;
565     }
566     return casWeak!(succ, fail)(cast(T*)here, *cast(Thunk1*)&ifThis, *cast(Thunk2*)&writeThis);
567 }
568 
569 /// Ditto
570 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, shared(V1) ifThis, shared(V2) writeThis) pure nothrow @nogc @trusted
571     if (is(T == class))
572 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
573 {
574     return atomicCompareExchangeWeakNoResult!(succ, fail)(cast(T*)here, cast(V1)ifThis, cast(V2)writeThis);
575 }
576 
577 /**
578 * Stores 'writeThis' to the memory referenced by 'here' if the value
579 * referenced by 'here' is equal to the value referenced by 'ifThis'.
580 * The prior value referenced by 'here' is written to `ifThis` and
581 * returned to the user.
582 * The 'weak' version of cas may spuriously fail. It is recommended to
583 * use `casWeak` only when `cas` would be used in a loop.
584 * This operation is both lock-free and atomic.
585 *
586 * Params:
587 *  here      = The address of the destination variable.
588 *  writeThis = The value to store.
589 *  ifThis    = The address of the value to compare, and receives the prior value of `here` as output.
590 *
591 * Returns:
592 *  true if the store occurred, false if not.
593 */
594 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V)(T* here, T* ifThis, V writeThis) pure nothrow @nogc @trusted
595     if (!is(T == shared S, S) && !is(V == shared U, U))
596 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
597 {
598     // resolve implicit conversions
599     T arg1 = writeThis;
600 
601     static if (__traits(isFloating, T))
602     {
603         alias IntTy = IntForFloat!T;
604         return atomicCompareExchangeWeak!(succ, fail)(cast(IntTy*)here, cast(IntTy*)ifThis, *cast(IntTy*)&writeThis);
605     }
606     else
607         return atomicCompareExchangeWeak!(succ, fail)(here, ifThis, writeThis);
608 }
609 
610 /// Ditto
611 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, V1* ifThis, V2 writeThis) pure nothrow @nogc @trusted
612     if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
613 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
614 {
615     static if (is (V1 == shared U1, U1))
616         alias Thunk1 = U1;
617     else
618     {
619         import core.internal.traits : hasUnsharedIndirections;
620         static assert(!hasUnsharedIndirections!V1, "Copying `" ~ shared(T).stringof ~ "* here` to `" ~ V1.stringof ~ "* ifThis` would violate shared.");
621         alias Thunk1 = V1;
622     }
623     static if (is (V2 == shared U2, U2))
624         alias Thunk2 = U2;
625     else
626     {
627         import core.internal.traits : hasUnsharedIndirections;
628         static assert(!hasUnsharedIndirections!V2, "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~ shared(T).stringof ~ "* here` would violate shared.");
629         alias Thunk2 = V2;
630     }
631     static assert (is(T : Thunk1), "Mismatching types for `here` and `ifThis`: `" ~ shared(T).stringof ~ "` and `" ~ V1.stringof ~ "`.");
632     return casWeak!(succ, fail)(cast(T*)here, cast(Thunk1*)ifThis, *cast(Thunk2*)&writeThis);
633 }
634 
635 /// Ditto
636 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V)(shared(T)* here, shared(T)* ifThis, shared(V) writeThis) pure nothrow @nogc @trusted
637     if (is(T == class))
638 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
639 {
640     return atomicCompareExchangeWeak!(succ, fail)(cast(T*)here, cast(T*)ifThis, cast(V)writeThis);
641 }
642 
643 /**
644  * Inserts a full load/store memory fence (on platforms that need it). This ensures
645  * that all loads and stores before a call to this function are executed before any
646  * loads and stores after the call.
647  */
648 void atomicFence(MemoryOrder order = MemoryOrder.seq)() pure nothrow @nogc @safe
649 {
650     core.internal.atomic.atomicFence!order();
651 }
652 
653 /**
654  * Gives a hint to the processor that the calling thread is in a 'spin-wait' loop,
655  * allowing to more efficiently allocate resources.
656  */
657 void pause() pure nothrow @nogc @safe
658 {
659     core.internal.atomic.pause();
660 }
661 
662 /**
663  * Performs the binary operation 'op' on val using 'mod' as the modifier.
664  *
665  * Params:
666  *  val = The target variable.
667  *  mod = The modifier to apply.
668  *
669  * Returns:
670  *  The result of the operation.
671  */
672 TailShared!T atomicOp(string op, T, V1)(ref shared T val, V1 mod) pure nothrow @nogc @trusted // LDC: was @safe
673     if (__traits(compiles, mixin("*cast(T*)&val" ~ op ~ "mod")))
674 in (atomicValueIsProperlyAligned(val))
675 {
676     // binary operators
677     //
678     // +    -   *   /   %   ^^  &
679     // |    ^   <<  >>  >>> ~   in
680     // ==   !=  <   <=  >   >=
681     static if (op == "+"  || op == "-"  || op == "*"  || op == "/"   ||
682                 op == "%"  || op == "^^" || op == "&"  || op == "|"   ||
683                 op == "^"  || op == "<<" || op == ">>" || op == ">>>" ||
684                 op == "~"  || // skip "in"
685                 op == "==" || op == "!=" || op == "<"  || op == "<="  ||
686                 op == ">"  || op == ">=")
687     {
688         T get = atomicLoad!(MemoryOrder.raw, T)(val);
689         mixin("return get " ~ op ~ " mod;");
690     }
691     else
692     // assignment operators
693     //
694     // +=   -=  *=  /=  %=  ^^= &=
695     // |=   ^=  <<= >>= >>>=    ~=
696     static if (op == "+=")
697     {
698         T m = cast(T) mod;
699         return cast(T)(atomicFetchAdd(val, m) + m);
700     }
701     else static if (op == "-=")
702     {
703         T m = cast(T) mod;
704         return cast(T)(atomicFetchSub(val, m) - m);
705     }
706     else static if (op == "&=")
707     {
708         T m = cast(T) mod;
709         return cast(T)(atomicFetchAnd(val, m) & m);
710     }
711     else static if (op == "|=")
712     {
713         T m = cast(T) mod;
714         return cast(T)(atomicFetchOr(val, m) | m);
715     }
716     else static if (op == "^=")
717     {
718         T m = cast(T) mod;
719         return cast(T)(atomicFetchXor(val, m) ^ m);
720     }
721     else static if (op == "*="  || op == "/=" || op == "%=" || op == "^^=" ||
722                     op == "<<=" || op == ">>=" || op == ">>>=") // skip "~="
723     {
724         T set, get = atomicLoad!(MemoryOrder.raw, T)(val);
725         do
726         {
727             set = get;
728             mixin("set " ~ op ~ " mod;");
729         } while (!casWeakByRef(val, get, set));
730         return set;
731     }
732     else
733     {
734         static assert(false, "Operation not supported.");
735     }
736 }
737 
738 
739 version (LDC)
740 {
741     enum has64BitXCHG = true;
742     enum has64BitCAS = true;
743 
744     // Enable 128bit CAS on 64bit platforms if supported.
745     version (D_LP64)
746     {
747         version (PPC64)
748             enum has128BitCAS = false;
749         else
750             enum has128BitCAS = true;
751     }
752     else
753         enum has128BitCAS = false;
754 }
755 else version (D_InlineAsm_X86)
756 {
757     enum has64BitXCHG = false;
758     enum has64BitCAS = true;
759     enum has128BitCAS = false;
760 }
761 else version (D_InlineAsm_X86_64)
762 {
763     enum has64BitXCHG = true;
764     enum has64BitCAS = true;
765     enum has128BitCAS = true;
766 }
767 else version (GNU)
768 {
769     import gcc.config;
770     enum has64BitCAS = GNU_Have_64Bit_Atomics;
771     enum has64BitXCHG = GNU_Have_64Bit_Atomics;
772     enum has128BitCAS = GNU_Have_LibAtomic;
773 }
774 else
775 {
776     enum has64BitXCHG = false;
777     enum has64BitCAS = false;
778     enum has128BitCAS = false;
779 }
780 
781 private
782 {
783     bool atomicValueIsProperlyAligned(T)(ref T val) pure nothrow @nogc @trusted
784     {
785         return atomicPtrIsProperlyAligned(&val);
786     }
787 
788     bool atomicPtrIsProperlyAligned(T)(T* ptr) pure nothrow @nogc @safe
789     {
790         // NOTE: Strictly speaking, the x86 supports atomic operations on
791         //       unaligned values.  However, this is far slower than the
792         //       common case, so such behavior should be prohibited.
793         static if (T.sizeof > size_t.sizeof)
794         {
795             version (X86)
796             {
797                 // cmpxchg8b only requires 4-bytes alignment
798                 return cast(size_t)ptr % size_t.sizeof == 0;
799             }
800             else
801             {
802                 // e.g., x86_64 cmpxchg16b requires 16-bytes alignment
803                 return cast(size_t)ptr % T.sizeof == 0;
804             }
805         }
806         else
807         {
808             return cast(size_t)ptr % T.sizeof == 0;
809         }
810     }
811 
812     template IntForFloat(F)
813         if (__traits(isFloating, F))
814     {
815         static if (F.sizeof == 4)
816             alias IntForFloat = uint;
817         else static if (F.sizeof == 8)
818             alias IntForFloat = ulong;
819         else
820             static assert (false, "Invalid floating point type: " ~ F.stringof ~ ", only support `float` and `double`.");
821     }
822 
823     template IntForStruct(S)
824         if (is(S == struct))
825     {
826         static if (S.sizeof == 1)
827             alias IntForFloat = ubyte;
828         else static if (F.sizeof == 2)
829             alias IntForFloat = ushort;
830         else static if (F.sizeof == 4)
831             alias IntForFloat = uint;
832         else static if (F.sizeof == 8)
833             alias IntForFloat = ulong;
834         else static if (F.sizeof == 16)
835             alias IntForFloat = ulong[2]; // TODO: what's the best type here? slice/delegates pass in registers...
836         else
837             static assert (ValidateStruct!S);
838     }
839 
840     template ValidateStruct(S)
841         if (is(S == struct))
842     {
843         import core.internal.traits : hasElaborateAssign;
844 
845         // `(x & (x-1)) == 0` checks that x is a power of 2.
846         static assert (S.sizeof <= size_t.sizeof * 2
847             && (S.sizeof & (S.sizeof - 1)) == 0,
848             S.stringof ~ " has invalid size for atomic operations.");
849         static assert (!hasElaborateAssign!S, S.stringof ~ " may not have an elaborate assignment when used with atomic operations.");
850 
851         enum ValidateStruct = true;
852     }
853 
854     // TODO: it'd be nice if we had @trusted scopes; we could remove this...
855     bool casWeakByRef(T,V1,V2)(ref T value, ref V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
856     {
857         return casWeak(&value, &ifThis, writeThis);
858     }
859 
860     /* Construct a type with a shared tail, and if possible with an unshared
861     head. */
862     template TailShared(U) if (!is(U == shared))
863     {
864         alias TailShared = .TailShared!(shared U);
865     }
866     template TailShared(S) if (is(S == shared))
867     {
868         // Get the unshared variant of S.
869         static if (is(S U == shared U)) {}
870         else static assert(false, "Should never be triggered. The `static " ~
871             "if` declares `U` as the unshared version of the shared type " ~
872             "`S`. `S` is explicitly declared as shared, so getting `U` " ~
873             "should always work.");
874 
875         static if (is(S : U))
876             alias TailShared = U;
877         else static if (is(S == struct))
878         {
879             enum implName = () {
880                 /* Start with "_impl". If S has a field with that name, append
881                 underscores until the clash is resolved. */
882                 string name = "_impl";
883                 string[] fieldNames;
884                 static foreach (alias field; S.tupleof)
885                 {
886                     fieldNames ~= __traits(identifier, field);
887                 }
888                 static bool canFind(string[] haystack, string needle)
889                 {
890                     foreach (candidate; haystack)
891                     {
892                         if (candidate == needle) return true;
893                     }
894                     return false;
895                 }
896                 while (canFind(fieldNames, name)) name ~= "_";
897                 return name;
898             } ();
899             struct TailShared
900             {
901                 static foreach (i, alias field; S.tupleof)
902                 {
903                     /* On @trusted: This is casting the field from shared(Foo)
904                     to TailShared!Foo. The cast is safe because the field has
905                     been loaded and is not shared anymore. */
906                     mixin("
907                         @trusted @property
908                         ref " ~ __traits(identifier, field) ~ "()
909                         {
910                             alias R = TailShared!(typeof(field));
911                             return * cast(R*) &" ~ implName ~ ".tupleof[i];
912                         }
913                     ");
914                 }
915                 mixin("
916                     S " ~ implName ~ ";
917                     alias " ~ implName ~ " this;
918                 ");
919             }
920         }
921         else
922             alias TailShared = S;
923     }
924     @safe unittest
925     {
926         // No tail (no indirections) -> fully unshared.
927 
928         static assert(is(TailShared!int == int));
929         static assert(is(TailShared!(shared int) == int));
930 
931         static struct NoIndir { int i; }
932         static assert(is(TailShared!NoIndir == NoIndir));
933         static assert(is(TailShared!(shared NoIndir) == NoIndir));
934 
935         // Tail can be independently shared or is already -> tail-shared.
936 
937         static assert(is(TailShared!(int*) == shared(int)*));
938         static assert(is(TailShared!(shared int*) == shared(int)*));
939         static assert(is(TailShared!(shared(int)*) == shared(int)*));
940 
941         static assert(is(TailShared!(int[]) == shared(int)[]));
942         static assert(is(TailShared!(shared int[]) == shared(int)[]));
943         static assert(is(TailShared!(shared(int)[]) == shared(int)[]));
944 
945         static struct S1 { shared int* p; }
946         static assert(is(TailShared!S1 == S1));
947         static assert(is(TailShared!(shared S1) == S1));
948 
949         static struct S2 { shared(int)* p; }
950         static assert(is(TailShared!S2 == S2));
951         static assert(is(TailShared!(shared S2) == S2));
952 
953         // Tail follows shared-ness of head -> fully shared.
954 
955         static class C { int i; }
956         static assert(is(TailShared!C == shared C));
957         static assert(is(TailShared!(shared C) == shared C));
958 
959         /* However, structs get a wrapper that has getters which cast to
960         TailShared. */
961 
962         static struct S3 { int* p; int _impl; int _impl_; int _impl__; }
963         static assert(!is(TailShared!S3 : S3));
964         static assert(is(TailShared!S3 : shared S3));
965         static assert(is(TailShared!(shared S3) == TailShared!S3));
966 
967         static struct S4 { shared(int)** p; }
968         static assert(!is(TailShared!S4 : S4));
969         static assert(is(TailShared!S4 : shared S4));
970         static assert(is(TailShared!(shared S4) == TailShared!S4));
971     }
972 }
973 
974 
975 ////////////////////////////////////////////////////////////////////////////////
976 // Unit Tests
977 ////////////////////////////////////////////////////////////////////////////////
978 
979 
980 version (CoreUnittest)
981 {
982     version (D_LP64)
983     {
984         enum hasDWCAS = has128BitCAS;
985     }
986     else
987     {
988         enum hasDWCAS = has64BitCAS;
989     }
990 
991     void testXCHG(T)(T val) pure nothrow @nogc @trusted
992     in
993     {
994         assert(val !is T.init);
995     }
996     do
997     {
998         T         base = cast(T)null;
999         shared(T) atom = cast(shared(T))null;
1000 
1001         assert(base !is val, T.stringof);
1002         assert(atom is base, T.stringof);
1003 
1004         assert(atomicExchange(&atom, val) is base, T.stringof);
1005         assert(atom is val, T.stringof);
1006     }
1007 
1008     void testCAS(T)(T val) pure nothrow @nogc @trusted
1009     in
1010     {
1011         assert(val !is T.init);
1012     }
1013     do
1014     {
1015         T         base = cast(T)null;
1016         shared(T) atom = cast(shared(T))null;
1017 
1018         assert(base !is val, T.stringof);
1019         assert(atom is base, T.stringof);
1020 
1021         assert(cas(&atom, base, val), T.stringof);
1022         assert(atom is val, T.stringof);
1023         assert(!cas(&atom, base, base), T.stringof);
1024         assert(atom is val, T.stringof);
1025 
1026         atom = cast(shared(T))null;
1027 
1028         shared(T) arg = base;
1029         assert(cas(&atom, &arg, val), T.stringof);
1030         assert(arg is base, T.stringof);
1031         assert(atom is val, T.stringof);
1032 
1033         arg = base;
1034         assert(!cas(&atom, &arg, base), T.stringof);
1035         assert(arg is val, T.stringof);
1036         assert(atom is val, T.stringof);
1037     }
1038 
1039     void testLoadStore(MemoryOrder ms = MemoryOrder.seq, T)(T val = T.init + 1) pure nothrow @nogc @trusted
1040     {
1041         T         base = cast(T) 0;
1042         shared(T) atom = cast(T) 0;
1043 
1044         assert(base !is val);
1045         assert(atom is base);
1046         atomicStore!(ms)(atom, val);
1047         base = atomicLoad!(ms)(atom);
1048 
1049         assert(base is val, T.stringof);
1050         assert(atom is val);
1051     }
1052 
1053 
1054     void testType(T)(T val = T.init + 1) pure nothrow @nogc @safe
1055     {
1056         static if (T.sizeof < 8 || has64BitXCHG)
1057             testXCHG!(T)(val);
1058         testCAS!(T)(val);
1059         testLoadStore!(MemoryOrder.seq, T)(val);
1060         testLoadStore!(MemoryOrder.raw, T)(val);
1061     }
1062 
1063     @betterC @safe pure nothrow unittest
1064     {
1065         testType!(bool)();
1066 
1067         testType!(byte)();
1068         testType!(ubyte)();
1069 
1070         testType!(short)();
1071         testType!(ushort)();
1072 
1073         testType!(int)();
1074         testType!(uint)();
1075     }
1076 
1077     @safe pure nothrow unittest
1078     {
1079 
1080         testType!(shared int*)();
1081 
1082         static interface Inter {}
1083         static class KlassImpl : Inter {}
1084         testXCHG!(shared Inter)(new shared(KlassImpl));
1085         testCAS!(shared Inter)(new shared(KlassImpl));
1086 
1087         static class Klass {}
1088         testXCHG!(shared Klass)(new shared(Klass));
1089         testCAS!(shared Klass)(new shared(Klass));
1090 
1091         testXCHG!(shared int)(42);
1092 
1093         testType!(float)(0.1f);
1094 
1095         static if (has64BitCAS)
1096         {
1097             testType!(double)(0.1);
1098             testType!(long)();
1099             testType!(ulong)();
1100         }
1101         static if (has128BitCAS)
1102         {
1103             () @trusted
1104             {
1105                 align(16) struct Big { long a, b; }
1106 
1107                 shared(Big) atom;
1108                 shared(Big) base;
1109                 shared(Big) arg;
1110                 shared(Big) val = Big(1, 2);
1111 
1112                 assert(cas(&atom, arg, val), Big.stringof);
1113                 assert(atom is val, Big.stringof);
1114                 assert(!cas(&atom, arg, val), Big.stringof);
1115                 assert(atom is val, Big.stringof);
1116 
1117                 atom = Big();
1118                 assert(cas(&atom, &arg, val), Big.stringof);
1119                 assert(arg is base, Big.stringof);
1120                 assert(atom is val, Big.stringof);
1121 
1122                 arg = Big();
1123                 assert(!cas(&atom, &arg, base), Big.stringof);
1124                 assert(arg is val, Big.stringof);
1125                 assert(atom is val, Big.stringof);
1126             }();
1127         }
1128 
1129         shared(size_t) i;
1130 
1131         atomicOp!"+="(i, cast(size_t) 1);
1132         assert(i == 1);
1133 
1134         atomicOp!"-="(i, cast(size_t) 1);
1135         assert(i == 0);
1136 
1137         shared float f = 0.1f;
1138         atomicOp!"+="(f, 0.1f);
1139         assert(f > 0.1999f && f < 0.2001f);
1140 
1141         static if (has64BitCAS)
1142         {
1143             shared double d = 0.1;
1144             atomicOp!"+="(d, 0.1);
1145             assert(d > 0.1999 && d < 0.2001);
1146         }
1147     }
1148 
1149     @betterC pure nothrow unittest
1150     {
1151         static if (has128BitCAS)
1152         {
1153             struct DoubleValue
1154             {
1155                 long value1;
1156                 long value2;
1157             }
1158 
1159             align(16) shared DoubleValue a;
1160             atomicStore(a, DoubleValue(1,2));
1161             assert(a.value1 == 1 && a.value2 ==2);
1162 
1163             while (!cas(&a, DoubleValue(1,2), DoubleValue(3,4))){}
1164             assert(a.value1 == 3 && a.value2 ==4);
1165 
1166             align(16) DoubleValue b = atomicLoad(a);
1167             assert(b.value1 == 3 && b.value2 ==4);
1168         }
1169 
1170         static if (hasDWCAS)
1171         {
1172             static struct List { size_t gen; List* next; }
1173             shared(List) head;
1174             assert(cas(&head, shared(List)(0, null), shared(List)(1, cast(List*)1)));
1175             assert(head.gen == 1);
1176             assert(cast(size_t)head.next == 1);
1177         }
1178 
1179         // https://issues.dlang.org/show_bug.cgi?id=20629
1180         static struct Struct
1181         {
1182             uint a, b;
1183         }
1184         shared Struct s1 = Struct(1, 2);
1185         atomicStore(s1, Struct(3, 4));
1186         assert(cast(uint) s1.a == 3);
1187         assert(cast(uint) s1.b == 4);
1188     }
1189 
1190     // https://issues.dlang.org/show_bug.cgi?id=20844
1191     static if (hasDWCAS)
1192     {
1193         debug: // tests CAS in-contract
1194 
1195         pure nothrow unittest
1196         {
1197             import core.exception : AssertError;
1198 
1199             align(16) shared ubyte[2 * size_t.sizeof + 1] data;
1200             auto misalignedPointer = cast(size_t[2]*) &data[1];
1201             size_t[2] x;
1202 
1203             try
1204                 cas(misalignedPointer, x, x);
1205             catch (AssertError)
1206                 return;
1207 
1208             assert(0, "should have failed");
1209         }
1210     }
1211 
1212     @betterC pure nothrow @nogc @safe unittest
1213     {
1214         int a;
1215         if (casWeak!(MemoryOrder.acq_rel, MemoryOrder.raw)(&a, 0, 4))
1216             assert(a == 4);
1217     }
1218 
1219     @betterC pure nothrow unittest
1220     {
1221         // https://issues.dlang.org/show_bug.cgi?id=17821
1222         {
1223             shared ulong x = 0x1234_5678_8765_4321;
1224             atomicStore(x, 0);
1225             assert(x == 0);
1226         }
1227         {
1228             struct S
1229             {
1230                 ulong x;
1231                 alias x this;
1232             }
1233             shared S s;
1234             s = 0x1234_5678_8765_4321;
1235             atomicStore(s, 0);
1236             assert(s.x == 0);
1237         }
1238         {
1239             abstract class Logger {}
1240             shared Logger s1;
1241             Logger s2;
1242             atomicStore(s1, cast(shared) s2);
1243         }
1244     }
1245 
1246     @betterC pure nothrow unittest
1247     {
1248         static struct S { int val; }
1249         auto s = shared(S)(1);
1250 
1251         shared(S*) ptr;
1252 
1253         // head unshared
1254         shared(S)* ifThis = null;
1255         shared(S)* writeThis = &s;
1256         assert(ptr is null);
1257         assert(cas(&ptr, ifThis, writeThis));
1258         assert(ptr is writeThis);
1259 
1260         // head shared
1261         shared(S*) ifThis2 = writeThis;
1262         shared(S*) writeThis2 = null;
1263         assert(cas(&ptr, ifThis2, writeThis2));
1264         assert(ptr is null);
1265     }
1266 
1267     // === atomicFetchAdd and atomicFetchSub operations ====
1268     @betterC pure nothrow @nogc @safe unittest
1269     {
1270         shared ubyte u8 = 1;
1271         shared ushort u16 = 2;
1272         shared uint u32 = 3;
1273         shared byte i8 = 5;
1274         shared short i16 = 6;
1275         shared int i32 = 7;
1276 
1277         assert(atomicOp!"+="(u8, 8) == 9);
1278         assert(atomicOp!"+="(u16, 8) == 10);
1279         assert(atomicOp!"+="(u32, 8) == 11);
1280         assert(atomicOp!"+="(i8, 8) == 13);
1281         assert(atomicOp!"+="(i16, 8) == 14);
1282         assert(atomicOp!"+="(i32, 8) == 15);
1283         version (D_LP64)
1284         {
1285             shared ulong u64 = 4;
1286             shared long i64 = 8;
1287             assert(atomicOp!"+="(u64, 8) == 12);
1288             assert(atomicOp!"+="(i64, 8) == 16);
1289         }
1290     }
1291 
1292     @betterC pure nothrow @nogc unittest
1293     {
1294         byte[10] byteArray = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19];
1295         ulong[10] ulongArray = [2, 4, 6, 8, 10, 12, 14, 16, 19, 20];
1296 
1297         {
1298             auto array = byteArray;
1299             byte* ptr = &array[0];
1300             byte* prevPtr = atomicFetchAdd(ptr, 3);
1301             assert(prevPtr == &array[0]);
1302             assert(*prevPtr == 1);
1303             assert(*ptr == 7);
1304         }
1305         {
1306             auto array = ulongArray;
1307             ulong* ptr = &array[0];
1308             ulong* prevPtr = atomicFetchAdd(ptr, 3);
1309             assert(prevPtr == &array[0]);
1310             assert(*prevPtr == 2);
1311             assert(*ptr == 8);
1312         }
1313     }
1314 
1315     @betterC pure nothrow @nogc @safe unittest
1316     {
1317         shared ubyte u8 = 1;
1318         shared ushort u16 = 2;
1319         shared uint u32 = 3;
1320         shared byte i8 = 5;
1321         shared short i16 = 6;
1322         shared int i32 = 7;
1323 
1324         assert(atomicOp!"-="(u8, 1) == 0);
1325         assert(atomicOp!"-="(u16, 1) == 1);
1326         assert(atomicOp!"-="(u32, 1) == 2);
1327         assert(atomicOp!"-="(i8, 1) == 4);
1328         assert(atomicOp!"-="(i16, 1) == 5);
1329         assert(atomicOp!"-="(i32, 1) == 6);
1330         version (D_LP64)
1331         {
1332             shared ulong u64 = 4;
1333             shared long i64 = 8;
1334             assert(atomicOp!"-="(u64, 1) == 3);
1335             assert(atomicOp!"-="(i64, 1) == 7);
1336         }
1337     }
1338 
1339     @betterC pure nothrow @nogc unittest
1340     {
1341         byte[10] byteArray = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19];
1342         ulong[10] ulongArray = [2, 4, 6, 8, 10, 12, 14, 16, 19, 20];
1343 
1344         {
1345             auto array = byteArray;
1346             byte* ptr = &array[5];
1347             byte* prevPtr = atomicFetchSub(ptr, 4);
1348             assert(prevPtr == &array[5]);
1349             assert(*prevPtr == 11);
1350             assert(*ptr == 3); // https://issues.dlang.org/show_bug.cgi?id=21578
1351         }
1352         {
1353             auto array = ulongArray;
1354             ulong* ptr = &array[5];
1355             ulong* prevPtr = atomicFetchSub(ptr, 4);
1356             assert(prevPtr == &array[5]);
1357             assert(*prevPtr == 12);
1358             assert(*ptr == 4); // https://issues.dlang.org/show_bug.cgi?id=21578
1359         }
1360     }
1361 
1362     @betterC pure nothrow @nogc @safe unittest // https://issues.dlang.org/show_bug.cgi?id=16651
1363     {
1364         shared ulong a = 2;
1365         uint b = 1;
1366         atomicOp!"-="(a, b);
1367         assert(a == 1);
1368 
1369         shared uint c = 2;
1370         ubyte d = 1;
1371         atomicOp!"-="(c, d);
1372         assert(c == 1);
1373     }
1374 
1375     pure nothrow @safe unittest // https://issues.dlang.org/show_bug.cgi?id=16230
1376     {
1377         shared int i;
1378         static assert(is(typeof(atomicLoad(i)) == int));
1379 
1380         shared int* p;
1381         static assert(is(typeof(atomicLoad(p)) == shared(int)*));
1382 
1383         shared int[] a;
1384         static if (__traits(compiles, atomicLoad(a)))
1385         {
1386             static assert(is(typeof(atomicLoad(a)) == shared(int)[]));
1387         }
1388 
1389         static struct S { int* _impl; }
1390         shared S s;
1391         static assert(is(typeof(atomicLoad(s)) : shared S));
1392         static assert(is(typeof(atomicLoad(s)._impl) == shared(int)*));
1393         auto u = atomicLoad(s);
1394         assert(u._impl is null);
1395         u._impl = new shared int(42);
1396         assert(atomicLoad(*u._impl) == 42);
1397 
1398         static struct S2 { S s; }
1399         shared S2 s2;
1400         static assert(is(typeof(atomicLoad(s2).s) == TailShared!S));
1401 
1402         static struct S3 { size_t head; int* tail; }
1403         shared S3 s3;
1404         static if (__traits(compiles, atomicLoad(s3)))
1405         {
1406             static assert(is(typeof(atomicLoad(s3).head) == size_t));
1407             static assert(is(typeof(atomicLoad(s3).tail) == shared(int)*));
1408         }
1409 
1410         static class C { int i; }
1411         shared C c;
1412         static assert(is(typeof(atomicLoad(c)) == shared C));
1413 
1414         static struct NoIndirections { int i; }
1415         shared NoIndirections n;
1416         static assert(is(typeof(atomicLoad(n)) == NoIndirections));
1417     }
1418 
1419     unittest // Issue 21631
1420     {
1421         shared uint si1 = 45;
1422         shared uint si2 = 38;
1423         shared uint* psi = &si1;
1424 
1425         assert((&psi).cas(cast(const) psi, &si2));
1426     }
1427 }
1428 
1429 
1430 
1431 
1432 
1433 
1434 
1435 
1436 
1437 
1438 
1439 
1440 /**
1441  * This is a D version of a partial implementation of the std::atomic template found in C++ STL.
1442  * https://en.cppreference.com/w/cpp/atomic/atomic
1443  *
1444  */
1445 struct Atomic(T)
1446 {
1447     private T m_val;
1448 
1449     /**
1450      * Initializes the underlying object with desired value. The initialization is not atomic.
1451      *
1452      * Params:
1453      *   val = desired value
1454      */
1455     this(T val) pure nothrow @nogc
1456     {
1457         m_val = val;
1458     }
1459 
1460     /** Copy constructor is disabled because an atomic cannot be passed to functions as a copy
1461      * and copy it around will break the whole point of an atomic.
1462      */
1463     @disable this(ref return scope Atomic rhs);
1464 
1465     /**
1466      * Atomically replaces the current value with a desired value. Memory is affected according to the value of order.
1467      */
1468     void store(MemoryOrder order = MemoryOrder.seq)(T val) pure nothrow @nogc
1469     {
1470         m_val.atomicStore!(order)(val);
1471     }
1472 
1473     /**
1474      * Atomically replaces the current value with a desired value.
1475      *
1476      * Params:
1477      *  val = desired value
1478      */
1479     void opAssign(T val) pure nothrow @nogc
1480     {
1481         store(val);
1482     }
1483     
1484     /**
1485      * Atomically load the current value.
1486      *
1487      * Params:
1488      *  val = desired value
1489      */
1490     alias load this;
1491 
1492     /**
1493      * Atomically replaces the current value with the result of computation involving the previous value and val.
1494      * The operation is read-modify-write operation.
1495      *
1496      * operator += performs atomic addition. Equivalent to return fetchAdd(arg) + arg;.
1497      * operator -= performs atomic subtraction. Equivalent to return fetchSub(arg) - arg;.
1498      * operator &= performs atomic bitwise and. Equivalent to return fetchAnd(arg) & arg;.
1499      * operator |= performs atomic bitwise or. Equivalent to return fetchOr(arg) | arg;.
1500      * operator ^= performs atomic bitwise exclusive or. Equivalent to return fetchXor(arg) ^ arg;.
1501      *
1502      * Params:
1503      *  val = value to perform the operation with
1504      *
1505      * Returns:
1506      *  The atomic value AFTER the operation
1507      */
1508     T opOpAssign(string op)(T val) pure nothrow @nogc
1509     {
1510         static if (op == "+")
1511         {
1512             return fetchAdd(val) + val;
1513         }
1514         else static if (op == "-")
1515         {
1516             return fetchSub(val) - val;
1517         }
1518         else static if (op == "&")
1519         {
1520             return fetchAnd(val) & val;
1521         }
1522         else static if (op == "|")
1523         {
1524             return fetchOr(val) | val;
1525         }
1526         else static if (op == "^")
1527         {
1528             return fetchXor(val) ^ val;
1529         }
1530         else static assert(false);
1531     }
1532 
1533     /**
1534     * Loads the atomic value from memory and returns it.  The memory barrier specified
1535     * by 'order' is applied to the operation, which is fully sequenced by
1536     * default.  Valid memory orders are MemoryOrder.raw, MemoryOrder.acq,
1537     * and MemoryOrder.seq.
1538     *
1539     * Returns:
1540     *  The atomic value.
1541     */
1542     T load(MemoryOrder order = MemoryOrder.seq)() pure nothrow @nogc
1543     {
1544         return m_val.atomicLoad!(order)();
1545     }
1546 
1547     /**
1548      * Atomically replaces the current value with the result of arithmetic addition of the atomic variable and val.
1549      * That is, it performs atomic post-increment. The operation is a read-modify-write operation.
1550      * Memory is affected according to the value of order.
1551      *
1552      * Params:
1553      *  val = The other argument of arithmetic addition
1554      *
1555      * Returns:
1556      *  The atomic value BEFORE the operation
1557      */
1558     T fetchAdd(MemoryOrder order = MemoryOrder.seq)(T val) pure nothrow @nogc
1559     {
1560         return m_val.atomicFetchAdd!(order)(val);
1561     }
1562 
1563     /**
1564      * Atomically replaces the current value with the result of arithmetic subtraction of the atomic variable and val.
1565      * That is, it performs atomic post-decrment. The operation is a read-modify-write operation.
1566      * Memory is affected according to the value of order.
1567      *
1568      * Params:
1569      *  val = The other argument of arithmetic subtraction
1570      *
1571      * Returns:
1572      *  The atomic value BEFORE the operation
1573      */
1574     T fetchSub(MemoryOrder order = MemoryOrder.seq)(T val) pure nothrow @nogc
1575     {
1576         return m_val.atomicFetchSub!(order)(val);
1577     }
1578 
1579     /**
1580      * Atomically replaces the current value with the result of bitwise AND of the the atomic value and val.
1581      * The operation is read-modify-write operation. Memory is affected according to the value of order.
1582      *
1583      * Params:
1584      *  val = The other argument of bitwise AND
1585      *
1586      * Returns:
1587      *  The atomic value BEFORE the operation
1588      */
1589     T fetchAnd(MemoryOrder order = MemoryOrder.seq)(T val) pure nothrow @nogc
1590     {
1591         return m_val.atomicFetchAnd!(order)(val);
1592     }
1593 
1594     /**
1595      * Atomically replaces the current value with the result of bitwise OR of the the atomic value and val.
1596      * The operation is read-modify-write operation. Memory is affected according to the value of order.
1597      *
1598      * Params:
1599      *  val = The other argument of bitwise OR
1600      *
1601      * Returns:
1602      *  The atomic value BEFORE the operation
1603      */
1604     T fetchOr(MemoryOrder order = MemoryOrder.seq)(T val) pure nothrow @nogc
1605     {
1606         return m_val.atomicFetchOr!(order)(val);
1607     }
1608 
1609     /**
1610      * Atomically replaces the current value with the result of bitwise XOR of the the atomic value and val.
1611      * The operation is read-modify-write operation. Memory is affected according to the value of order.
1612      *
1613      * Params:
1614      *  val = The other argument of bitwise XOR
1615      *
1616      * Returns:
1617      *  The atomic value BEFORE the operation
1618      */
1619     T fetchXor(MemoryOrder order = MemoryOrder.seq)(T val) pure nothrow @nogc
1620     {
1621         return m_val.atomicFetchXor!(order)(val);
1622     }
1623 
1624     /**
1625      * Atomically increments or decrements the current value. The operation is read-modify-write operation.
1626      *
1627      * operator ++ performs atomic pre-increment. Equivalent to return fetchAdd(1) + 1;.
1628      * operator ++ performs atomic pre-decrement. Equivalent to return fetchSub(1) - 1;.
1629      *
1630      * Returns:
1631      *  The atomic value AFTER the operation
1632      */
1633     T opUnary(string op)() pure nothrow @nogc
1634     {
1635         static if (op == "++")
1636         {
1637             return fetchAdd(1) + 1;
1638         }
1639         else static if (op == "--")
1640         {
1641             return fetchSub(1) - 1;
1642         }
1643         else static assert(false);
1644     }
1645 
1646     /**
1647      * Atomically increments or decrements the current value. The operation is read-modify-write operation.
1648      *
1649      * operator ++ performs atomic post-increment. Equivalent to return fetchAdd(1);.
1650      * operator -- performs atomic post-decrement. Equivalent to return fetchSub(1);.
1651      *
1652      * Returns:
1653      *  The atomic value BEFORE the operation
1654      */
1655     T opUnaryRight(string op)() pure nothrow @nogc
1656     {
1657         static if (op == "++")
1658         {
1659             return fetchAdd(1);
1660         }
1661         else static if (op == "--")
1662         {
1663             return fetchSub(1);
1664         }
1665         else static assert(false);
1666     }
1667 
1668     /**
1669      * Atomically replaces the underlying value with a desired value (a read-modify-write operation).
1670      * Memory is affected according to the value of order.
1671      *
1672      * Params:
1673      *  newVal = The new desired value
1674      *
1675      * Returns:
1676      *  The atomic value BEFORE the exchange
1677      */
1678     T exchange(MemoryOrder order = MemoryOrder.seq)(T newVal) pure nothrow @nogc
1679     {
1680         return atomicExchange!(order)(&m_val, newVal);
1681     }
1682 
1683     /**
1684      * Atomically compares the atomic value with that of expected. If those are bitwise-equal, replaces the former with desired
1685      * (performs read-modify-write operation). Otherwise, loads the actual atomic value stored into expected (performs load operation).
1686      *
1687      * compare_exchange_weak is allowed to fail spuriously, that is, acts as if atomic value != expected even if they are equal.
1688      * When a compare-and-exchange is in a loop, compare_exchange_weak will yield better performance on some platforms.
1689      *
1690      * Params:
1691      *  expected = The expected value
1692      *  desired = The new desired value
1693      *
1694      * Returns:
1695      *  true if the underlying atomic value was successfully changed, false otherwise.
1696      */
1697     bool compareExchangeWeak(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq)(ref T expected, T desired) pure nothrow @nogc
1698     {
1699         return casWeak!(succ, fail)(&m_val, &expected, desired);
1700     }
1701 
1702     /**
1703      * Atomically compares the atomic value with that of expected. If those are bitwise-equal, replaces the former with desired
1704      * (performs read-modify-write operation). Otherwise, loads the actual atomic value stored into expected (performs load operation).
1705      *
1706      * Params:
1707      *  expected = The expected value
1708      *  desired = The new desired value
1709      *
1710      * Returns:
1711      *  true if the underlying atomic value was successfully changed, false otherwise.
1712      */
1713     bool compareExchangeStrong(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq)(ref T expected, T desired) pure nothrow @nogc
1714     {
1715         return cas!(succ, fail)(&m_val, &expected, desired);
1716     }
1717 }
1718 
1719 
1720 
1721 unittest // For the entire Atomic generic struct implementation
1722 {
1723     auto a = Atomic!int(0);
1724 
1725     // These test only test the operation and inteface, not if the operation is truly atomic
1726 
1727     // Test store
1728     a.store(2);
1729 
1730     // Test regular load
1731     int j = a.load();
1732     assert(j == 2);
1733 
1734     // Test load/store with custom memory order
1735     a.store!(MemoryOrder.raw)(4);
1736     j = a.load!(MemoryOrder.raw)();
1737     assert(j == 4);
1738 
1739     // Test fetchAdd
1740     j = a.fetchAdd(4);
1741     assert(j == 4 && a.load() == 8);
1742 
1743     // Test fetchSub
1744     j = a.fetchSub(4);
1745     assert(j == 8 && a.load() == 4);
1746 
1747     // Test fetchAnd
1748     a = 0xffff;
1749     j = a.fetchAnd(0x00ff);
1750     assert(j == 0xffff && a.load() == 0x00ff);
1751 
1752     // Test fetchOr
1753     a = 0xff;
1754     j = a.fetchOr(0xff00);
1755     assert(j == 0xff && a.load() == 0xffff);
1756 
1757     // Test fetchAnd
1758     a = 0xf0f0f0f0;
1759     j = a.fetchXor(0x0f0f0f0f);
1760     assert(j == 0xf0f0f0f0 && a.load() == 0xffffffff);
1761 
1762     // Test fetchAdd custom memory order
1763     a = 4;
1764     j = a.fetchAdd!(MemoryOrder.raw)(4);
1765     assert(j == 4 && a.load() == 8);
1766 
1767     // Test fetchSub custom memory order
1768     j = a.fetchSub!(MemoryOrder.raw)(4);
1769     assert(j == 8 && a.load() == 4);
1770 
1771     // Test fetchAnd custom memory order
1772     a = 0xffff;
1773     j = a.fetchAnd!(MemoryOrder.raw)(0x00ff);
1774     assert(j == 0xffff && a.load() == 0x00ff);
1775 
1776     // Test fetchOr custom memory order
1777     a = 0xff;
1778     j = a.fetchOr!(MemoryOrder.raw)(0xff00);
1779     assert(j == 0xff && a.load() == 0xffff);
1780 
1781     // Test fetchAnd custom memory order
1782     a = 0xf0f0f0f0;
1783     j = a.fetchXor!(MemoryOrder.raw)(0x0f0f0f0f);
1784     assert(j == 0xf0f0f0f0 && a.load() == 0xffffffff);
1785 
1786     // Test opAssign
1787     a = 3;
1788     j = a.load();
1789     assert(j == 3);
1790 
1791     // Test load
1792     assert(a == 3);
1793 
1794     // Test pre increment addition
1795     j = ++a;
1796     assert(j == 4 && a.load() == 4);
1797 
1798     // Test post increment addition
1799     j = a++;
1800     assert(j == 4 && a.load() == 5);
1801 
1802     // Test pre decrement subtraction
1803     j = --a;
1804     assert(j == 4 && a.load() == 4);
1805 
1806     j = a--;
1807     assert(j == 4 && a.load() == 3);
1808 
1809     // Test operator assign add
1810     j = (a += 4);
1811     assert(j == 7 && a.load() == 7);
1812 
1813     // Test operator assign sub
1814     j = (a -= 4);
1815     assert(j == 3 && a.load() == 3);
1816 
1817     // Test operator assign and
1818     a = 0xffff;
1819     j = (a &= 0x00ff);
1820     assert(j == 0x00ff && a.load() == 0x00ff);
1821 
1822     // Test operator assign and
1823     a = 0xffff;
1824     j = (a &= 0x00ff);
1825     assert(j == 0x00ff && a.load() == 0x00ff);
1826 
1827     // Test operator assign or
1828     a = 0xff;
1829     j = (a |= 0xff00);
1830     assert(j == 0xffff && a.load() == 0xffff);
1831 
1832     // Test operator assign xor
1833     a = 0xf0f0f0f0;
1834     j = (a ^= 0x0f0f0f0f);
1835     assert(j == 0xffffffff && a.load() == 0xffffffff);
1836 
1837     // Test exchange
1838     a = 3;
1839     j = a.exchange(10);
1840     assert(j == 3 && a.load() == 10);
1841 
1842     // Test exchange with custom memory order
1843     j = a.exchange!(MemoryOrder.raw)(3);
1844     assert(j == 10 && a.load() == 3);
1845 
1846     // Reset back to 10
1847     a = 10;
1848 
1849     // test compareExchangeWeak with successful assignment
1850     int expected = 10;
1851     bool res = a.compareExchangeWeak(expected, 3);
1852     assert(res == true && a.load() == 3);
1853 
1854     // test compareExchangeWeak with failed assignment result as well as custom memory order
1855     expected = 11;
1856     res = a.compareExchangeWeak!(MemoryOrder.raw, MemoryOrder.raw)(expected, 10);
1857     assert(res == false && a.load() == 3);
1858 
1859     // test compareExchangeStrong with successful assignment
1860     expected = 3;
1861     res = a.compareExchangeStrong(expected, 10);
1862     assert(res == true && a.load() == 10);
1863 
1864     // test compareExchangeStrong with false result as well as custom memory order
1865     expected = 3;
1866     res = a.compareExchangeStrong!(MemoryOrder.raw, MemoryOrder.raw)(expected, 10);
1867     assert(res == false && a.load() == 10);
1868 }