The OpenD Programming Language

1 /**
2  * A D implementation of the C stdatomic.h header.
3  *
4  * $(NOTE If it compiles it should produce similar assembly to the system C toolchain
5  *   and should not introduce when optimizing unnecessary behaviors,
6  *   if you do not care about this guarantee use the _impl suffix.)
7  *
8  * $(NOTE The D shared type qualifier is the closest to the _Atomic type qualifier from C. It may be changed from shared in the future.)
9  *
10  * Copyright: Copyright Richard (Rikki) Andrew Cattermole 2023.
11  * License:   $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
12  * Authors:   Richard (Rikki) Andrew cattermole
13  * Source:    $(DRUNTIMESRC core/stdc/stdatomic.d)
14  */
15 module core.stdc.stdatomic;
16 import core.atomic : MemoryOrder;
17 import core.internal.atomic;
18 import core.stdc.config;
19 import core.stdc.stdint;
20 
21 version(LDC) {} else:
22 
23 @safe nothrow @nogc:
24 
25 ///
26 enum memory_order
27 {
28     /// No ordering provided
29     memory_order_relaxed = MemoryOrder.raw,
30     /// As per cppreference.com circa 2015 no compiler supports consume memory order and in practice it devolves to acquire.
31     memory_order_consume = MemoryOrder.acq,
32     /// Prevent reordering before operation
33     memory_order_acquire = MemoryOrder.acq,
34     /// Prevent reordering after operation
35     memory_order_release = MemoryOrder.rel,
36     /// Prevent reordering before and after operation
37     memory_order_acq_rel = MemoryOrder.acq_rel,
38     /// Prevent reordering before for read operations and after for writes.
39     memory_order_seq_cst = MemoryOrder.seq
40 }
41 
42 ///
43 enum
44 {
45     ///
46     __STDC_VERSION_STDATOMIC_H__ = 202311,
47 
48     ///
49     ATOMIC_BOOL_LOCK_FREE = IsAtomicLockFree!bool ? 2 : 0,
50     ///
51     ATOMIC_CHAR_LOCK_FREE = IsAtomicLockFree!char ? 2 : 0,
52     ///
53     ATOMIC_CHAR16_T_LOCK_FREE = IsAtomicLockFree!wchar ? 2 : 0,
54     ///
55     ATOMIC_CHAR32_T_LOCK_FREE = IsAtomicLockFree!dchar ? 2 : 0,
56     ///
57     ATOMIC_WCHAR_T_LOCK_FREE = ATOMIC_CHAR16_T_LOCK_FREE,
58     ///
59     ATOMIC_SHORT_LOCK_FREE = IsAtomicLockFree!short ? 2 : 0,
60     ///
61     ATOMIC_INT_LOCK_FREE = IsAtomicLockFree!int ? 2 : 0,
62     ///
63     ATOMIC_LONG_LOCK_FREE = IsAtomicLockFree!c_long ? 2 : 0,
64     ///
65     ATOMIC_LLONG_LOCK_FREE = IsAtomicLockFree!ulong ? 2 : 0,
66     ///
67     ATOMIC_POINTER_LOCK_FREE = IsAtomicLockFree!(void*) ? 2 : 0,
68     ///
69     ATOMIC_CHAR8_T_LOCK_FREE = ATOMIC_CHAR_LOCK_FREE,
70 }
71 
72 version (DigitalMars)
73 {
74     alias atomic_signal_fence = atomic_signal_fence_impl; ///
75 
76     // these all use inline assembly, so will unlikely produce the codegen a user will expect
77     version(none)
78     {
79         alias atomic_flag_clear = atomic_flag_clear_impl; ///
80         alias atomic_flag_clear_explicit = atomic_flag_clear_explicit_impl; ///
81         alias atomic_flag_test_and_set = atomic_flag_test_and_set_impl; ///
82         alias atomic_flag_test_and_set_explicit = atomic_flag_test_and_set_explicit_impl; ///
83         alias atomic_thread_fence = atomic_thread_fence_impl; ///
84         alias atomic_store = atomic_store_impl; ///
85         alias atomic_store_explicit = atomic_store_explicit_impl; ///
86         alias atomic_load = atomic_load_impl; ///
87         alias atomic_load_explicit = atomic_load_explicit_impl; ///
88         alias atomic_exchange = atomic_exchange_impl; ///
89         alias atomic_exchange_explicit = atomic_exchange_explicit_impl; ///
90         alias atomic_compare_exchange_strong = atomic_compare_exchange_strong_impl; ///
91         alias atomic_compare_exchange_weak = atomic_compare_exchange_weak_impl; ///
92         alias atomic_compare_exchange_strong_explicit = atomic_compare_exchange_strong_explicit_impl; ///
93         alias atomic_compare_exchange_weak_explicit = atomic_compare_exchange_weak_explicit_impl; ///
94         alias atomic_fetch_add = atomic_fetch_add_impl; ///
95         alias atomic_fetch_add_explicit = atomic_fetch_add_explicit_impl; ///
96         alias atomic_fetch_sub = atomic_fetch_sub_impl; ///
97         alias atomic_fetch_sub_explicit = atomic_fetch_sub_explicit_impl; ///
98         alias atomic_fetch_or = atomic_fetch_or_impl; ///
99         alias atomic_fetch_or_explicit = atomic_fetch_or_explicit_impl; ///
100         alias atomic_fetch_xor = atomic_fetch_xor_impl; ///
101         alias atomic_fetch_xor_explicit = atomic_fetch_xor_explicit_impl; ///
102         alias atomic_fetch_and = atomic_fetch_and_impl; ///
103         alias atomic_fetch_and_explicit = atomic_fetch_and_explicit_impl; ///
104     }
105 }
106 else version(GNU)
107 {
108     alias atomic_flag_clear = atomic_flag_clear_impl; ///
109     alias atomic_flag_clear_explicit = atomic_flag_clear_explicit_impl; ///
110     alias atomic_flag_test_and_set = atomic_flag_test_and_set_impl; ///
111     alias atomic_flag_test_and_set_explicit = atomic_flag_test_and_set_explicit_impl; ///
112     alias atomic_signal_fence = atomic_signal_fence_impl; ///
113     alias atomic_thread_fence = atomic_thread_fence_impl; ///
114     alias atomic_store = atomic_store_impl; ///
115     alias atomic_store_explicit = atomic_store_explicit_impl; ///
116     alias atomic_load = atomic_load_impl; ///
117     alias atomic_load_explicit = atomic_load_explicit_impl; ///
118     alias atomic_exchange = atomic_exchange_impl; ///
119     alias atomic_exchange_explicit = atomic_exchange_explicit_impl; ///
120     alias atomic_compare_exchange_strong = atomic_compare_exchange_strong_impl; ///
121     alias atomic_compare_exchange_weak = atomic_compare_exchange_weak_impl; ///
122     alias atomic_compare_exchange_strong_explicit = atomic_compare_exchange_strong_explicit_impl; ///
123     alias atomic_compare_exchange_weak_explicit = atomic_compare_exchange_weak_explicit_impl; ///
124     alias atomic_fetch_add = atomic_fetch_add_impl; ///
125     alias atomic_fetch_add_explicit = atomic_fetch_add_explicit_impl; ///
126     alias atomic_fetch_sub = atomic_fetch_sub_impl; ///
127     alias atomic_fetch_sub_explicit = atomic_fetch_sub_explicit_impl; ///
128     alias atomic_fetch_or = atomic_fetch_or_impl; ///
129     alias atomic_fetch_or_explicit = atomic_fetch_or_explicit_impl; ///
130     alias atomic_fetch_xor = atomic_fetch_xor_impl; ///
131     alias atomic_fetch_xor_explicit = atomic_fetch_xor_explicit_impl; ///
132     alias atomic_fetch_and = atomic_fetch_and_impl; ///
133     alias atomic_fetch_and_explicit = atomic_fetch_and_explicit_impl; ///
134 }
135 
136 ///
137 pragma(inline, true)
138 bool atomic_is_lock_free(A)(const shared(A)* obj)
139 {
140     return IsAtomicLockFree!A;
141 }
142 
143 /// Guaranteed to be a atomic boolean type
144 struct atomic_flag
145 {
146     private bool b;
147 }
148 
149 ///
150 enum ATOMIC_FLAG_INIT = atomic_flag.init;
151 
152 ///
153 pragma(inline, true)
154 void atomic_flag_clear_impl()(atomic_flag* obj)
155 {
156     assert(obj !is null);
157 
158     atomicStore(&obj.b, false);
159 }
160 
161 ///
162 pragma(inline, true)
163 void atomic_flag_clear_explicit_impl()(atomic_flag* obj, memory_order order)
164 {
165     assert(obj !is null);
166 
167     final switch (order)
168     {
169         case memory_order.memory_order_relaxed:
170             atomicStore!(memory_order.memory_order_relaxed)(&obj.b, false);
171             break;
172 
173         case memory_order.memory_order_acquire:
174             // Ideally this would error at compile time but alas it is not an intrinsic.
175             // Note: this is not a valid memory order for this operation.
176             atomicStore!(memory_order.memory_order_seq_cst)(&obj.b, false);
177             break;
178 
179         case memory_order.memory_order_release:
180             atomicStore!(memory_order.memory_order_release)(&obj.b, false);
181             break;
182 
183         case memory_order.memory_order_acq_rel:
184             atomicStore!(memory_order.memory_order_acq_rel)(&obj.b, false);
185             break;
186 
187         case memory_order.memory_order_seq_cst:
188             atomicStore(&obj.b, false);
189             break;
190     }
191 }
192 
193 ///
194 pragma(inline, true)
195 bool atomic_flag_test_and_set_impl()(atomic_flag* obj)
196 {
197     assert(obj !is null);
198     return atomicExchange(&obj.b, true);
199 }
200 
201 ///
202 unittest
203 {
204     atomic_flag flag;
205     assert(!atomic_flag_test_and_set_impl(&flag));
206     atomic_flag_clear_impl(&flag);
207 }
208 
209 ///
210 pragma(inline, true)
211 bool atomic_flag_test_and_set_explicit_impl()(atomic_flag* obj, memory_order order)
212 {
213     assert(obj !is null);
214 
215     final switch (order)
216     {
217         case memory_order.memory_order_relaxed:
218             return atomicExchange!(memory_order.memory_order_relaxed)(&obj.b, true);
219 
220         case memory_order.memory_order_acquire:
221             return atomicExchange!(memory_order.memory_order_acquire)(&obj.b, true);
222 
223         case memory_order.memory_order_release:
224             return atomicExchange!(memory_order.memory_order_release)(&obj.b, true);
225 
226         case memory_order.memory_order_acq_rel:
227             return atomicExchange!(memory_order.memory_order_acq_rel)(&obj.b, true);
228 
229         case memory_order.memory_order_seq_cst:
230             return atomicExchange(&obj.b, true);
231     }
232 }
233 
234 ///
235 unittest
236 {
237     atomic_flag flag;
238     assert(!atomic_flag_test_and_set_explicit_impl(&flag, memory_order.memory_order_seq_cst));
239     atomic_flag_clear_explicit_impl(&flag, memory_order.memory_order_seq_cst);
240 }
241 
242 /**
243  * Initializes an atomic variable, the destination should not have any expression associated with it prior to this call.
244  *
245  * We use an out parameter instead of a pointer for destination in an attempt to communicate to the compiler that it initializers.
246  */
247 pragma(inline, true)
248 void atomic_init(A, C)(out shared(A) obj, C desired) @trusted
249 {
250     obj = cast(shared) desired;
251 }
252 
253 ///
254 unittest
255 {
256     shared int val;
257     atomic_init(val, 2);
258 }
259 
260 /// No-op function, doesn't apply to D
261 pragma(inline, true)
262 A kill_dependency(A)(A y) @trusted
263 {
264     return y;
265 }
266 
267 /// Don't allow reordering, does not emit any instructions.
268 pragma(inline, true)
269 void atomic_signal_fence_impl()(memory_order order)
270 {
271     final switch (order)
272     {
273         case memory_order.memory_order_relaxed:
274             atomicSignalFence!(memory_order.memory_order_relaxed);
275             break;
276 
277         case memory_order.memory_order_acquire:
278             atomicSignalFence!(memory_order.memory_order_acquire);
279             break;
280 
281         case memory_order.memory_order_release:
282             atomicSignalFence!(memory_order.memory_order_release);
283             break;
284 
285         case memory_order.memory_order_acq_rel:
286             atomicSignalFence!(memory_order.memory_order_acq_rel);
287             break;
288 
289         case memory_order.memory_order_seq_cst:
290             atomicSignalFence!(memory_order.memory_order_seq_cst);
291             break;
292     }
293 }
294 
295 ///
296 unittest
297 {
298     atomic_signal_fence_impl(memory_order.memory_order_seq_cst);
299 }
300 
301 /// Don't allow reordering, and emit a fence instruction.
302 pragma(inline, true)
303 void atomic_thread_fence_impl()(memory_order order)
304 {
305     final switch (order)
306     {
307         case memory_order.memory_order_relaxed:
308             atomicFence!(memory_order.memory_order_relaxed);
309             break;
310 
311         case memory_order.memory_order_acquire:
312             atomicFence!(memory_order.memory_order_acquire);
313             break;
314 
315         case memory_order.memory_order_release:
316             atomicFence!(memory_order.memory_order_release);
317             break;
318 
319         case memory_order.memory_order_acq_rel:
320             atomicFence!(memory_order.memory_order_acq_rel);
321             break;
322 
323         case memory_order.memory_order_seq_cst:
324             atomicFence!(memory_order.memory_order_seq_cst);
325             break;
326     }
327 }
328 
329 ///
330 unittest
331 {
332     atomic_thread_fence_impl(memory_order.memory_order_seq_cst);
333 }
334 
335 ///
336 alias atomic_bool = shared(bool);
337 ///
338 alias atomic_char = shared(char);
339 ///
340 alias atomic_schar = shared(byte);
341 ///
342 alias atomic_uchar = shared(ubyte);
343 ///
344 alias atomic_short = shared(short);
345 ///
346 alias atomic_ushort = shared(ushort);
347 ///
348 alias atomic_int = shared(int);
349 ///
350 alias atomic_uint = shared(uint);
351 ///
352 alias atomic_long = shared(c_long);
353 ///
354 alias atomic_ulong = shared(c_ulong);
355 ///
356 alias atomic_llong = shared(long);
357 ///
358 alias atomic_ullong = shared(ulong);
359 ///
360 alias atomic_char8_t = shared(char);
361 ///
362 alias atomic_char16_t = shared(wchar);
363 ///
364 alias atomic_char32_t = shared(dchar);
365 ///
366 alias atomic_wchar_t = shared(wchar);
367 
368 ///
369 alias atomic_int_least8_t = shared(int_least8_t);
370 ///
371 alias atomic_uint_least8_t = shared(uint_least8_t);
372 ///
373 alias atomic_int_least16_t = shared(int_least16_t);
374 ///
375 alias atomic_uint_least16_t = shared(uint_least16_t);
376 ///
377 alias atomic_int_least32_t = shared(int_least32_t);
378 ///
379 alias atomic_uint_least32_t = shared(uint_least32_t);
380 ///
381 alias atomic_int_least64_t = shared(int_least64_t);
382 ///
383 alias atomic_uint_least64_t = shared(uint_least64_t);
384 ///
385 alias atomic_int_fast8_t = shared(int_fast8_t);
386 ///
387 alias atomic_uint_fast8_t = shared(uint_fast8_t);
388 ///
389 alias atomic_int_fast16_t = shared(int_fast16_t);
390 ///
391 alias atomic_uint_fast16_t = shared(uint_fast16_t);
392 ///
393 alias atomic_int_fast32_t = shared(int_fast32_t);
394 ///
395 alias atomic_uint_fast32_t = shared(uint_fast32_t);
396 ///
397 alias atomic_int_fast64_t = shared(int_fast64_t);
398 ///
399 alias atomic_uint_fast64_t = shared(uint_fast64_t);
400 ///
401 alias atomic_intptr_t = shared(intptr_t);
402 ///
403 alias atomic_uintptr_t = shared(uintptr_t);
404 ///
405 alias atomic_size_t = shared(size_t);
406 ///
407 alias atomic_ptrdiff_t = shared(ptrdiff_t);
408 ///
409 alias atomic_intmax_t = shared(intmax_t);
410 ///
411 alias atomic_uintmax_t = shared(uintmax_t);
412 
413 ///
414 pragma(inline, true)
415 void atomic_store_impl(A, C)(shared(A)* obj, C desired) @trusted
416 {
417     assert(obj !is null);
418     atomicStore(obj, cast(A)desired);
419 }
420 
421 ///
422 unittest
423 {
424     shared(int) obj;
425     atomic_store_impl(&obj, 3);
426 }
427 
428 ///
429 pragma(inline, true)
430 void atomic_store_explicit_impl(A, C)(shared(A)* obj, C desired, memory_order order) @trusted
431 {
432     assert(obj !is null);
433 
434     final switch (order)
435     {
436         case memory_order.memory_order_relaxed:
437             atomicStore!(memory_order.memory_order_relaxed)(obj, cast(A)desired);
438             break;
439 
440         case memory_order.memory_order_acquire:
441             // Ideally this would error at compile time but alas it is not an intrinsic.
442             // Note: this is not a valid memory order for this operation.
443             atomicStore!(memory_order.memory_order_release)(obj, cast(A)desired);
444             break;
445 
446         case memory_order.memory_order_release:
447             atomicStore!(memory_order.memory_order_release)(obj, cast(A)desired);
448             break;
449 
450         case memory_order.memory_order_acq_rel:
451             atomicStore!(memory_order.memory_order_acq_rel)(obj, cast(A)desired);
452             break;
453 
454         case memory_order.memory_order_seq_cst:
455             atomicStore!(memory_order.memory_order_seq_cst)(obj, cast(A)desired);
456             break;
457     }
458 }
459 
460 ///
461 unittest
462 {
463     shared(int) obj;
464     atomic_store_explicit_impl(&obj, 3, memory_order.memory_order_seq_cst);
465 }
466 
467 ///
468 pragma(inline, true)
469 A atomic_load_impl(A)(const shared(A)* obj) @trusted
470 {
471     assert(obj !is null);
472     return atomicLoad(cast(shared(A)*)obj);
473 }
474 
475 ///
476 unittest
477 {
478     shared(int) obj = 3;
479     assert(atomic_load_impl(&obj) == 3);
480 }
481 
482 ///
483 pragma(inline, true)
484 A atomic_load_explicit_impl(A)(const shared(A)* obj, memory_order order) @trusted
485 {
486     assert(obj !is null);
487 
488     final switch (order)
489     {
490         case memory_order.memory_order_relaxed:
491             return atomicLoad!(memory_order.memory_order_relaxed)(obj);
492 
493         case memory_order.memory_order_acquire:
494             return atomicLoad!(memory_order.memory_order_acquire)(obj);
495 
496         case memory_order.memory_order_release:
497             // Ideally this would error at compile time but alas it is not an intrinsic.
498             // Note: this is not a valid memory order for this operation.
499             return atomicLoad!(memory_order.memory_order_acquire)(obj);
500 
501         case memory_order.memory_order_acq_rel:
502             return atomicLoad!(memory_order.memory_order_acq_rel)(obj);
503 
504         case memory_order.memory_order_seq_cst:
505             return atomicLoad!(memory_order.memory_order_seq_cst)(obj);
506     }
507 }
508 
509 ///
510 unittest
511 {
512     shared(int) obj = 3;
513     assert(atomic_load_explicit_impl(&obj, memory_order.memory_order_seq_cst) == 3);
514 }
515 
516 ///
517 pragma(inline, true)
518 A atomic_exchange_impl(A, C)(shared(A)* obj, C desired) @trusted
519 {
520     assert(obj !is null);
521     return atomicExchange(cast(shared(A)*)obj, cast(A)desired);
522 }
523 
524 ///
525 unittest
526 {
527     shared(int) obj = 3;
528     assert(atomic_exchange_impl(&obj, 2) == 3);
529 }
530 
531 ///
532 pragma(inline, true)
533 A atomic_exchange_explicit_impl(A, C)(shared(A)* obj, C desired, memory_order order) @trusted
534 {
535     assert(obj !is null);
536 
537     final switch (order)
538     {
539         case memory_order.memory_order_relaxed:
540             return atomicExchange!(memory_order.memory_order_relaxed)(obj, cast(A)desired);
541 
542         case memory_order.memory_order_acquire:
543             return atomicExchange!(memory_order.memory_order_acquire)(obj, cast(A)desired);
544 
545         case memory_order.memory_order_release:
546             return atomicExchange!(memory_order.memory_order_release)(obj, cast(A)desired);
547 
548         case memory_order.memory_order_acq_rel:
549             return atomicExchange!(memory_order.memory_order_acq_rel)(obj, cast(A)desired);
550 
551         case memory_order.memory_order_seq_cst:
552             return atomicExchange!(memory_order.memory_order_seq_cst)(obj, cast(A)desired);
553     }
554 }
555 
556 ///
557 unittest
558 {
559     shared(int) obj = 3;
560     assert(atomic_exchange_explicit_impl(&obj, 2, memory_order.memory_order_seq_cst) == 3);
561 }
562 
563 ///
564 pragma(inline, true)
565 bool atomic_compare_exchange_strong_impl(A, C)(shared(A)* obj, A* expected, C desired) @trusted
566 {
567     return atomicCompareExchangeStrong(cast(A*)obj, expected, cast(A)desired);
568 }
569 
570 ///
571 unittest
572 {
573     shared(int) obj = 3;
574     int expected = 3;
575     assert(atomic_compare_exchange_strong_impl(&obj, &expected, 2));
576 }
577 
578 ///
579 pragma(inline, true)
580 bool atomic_compare_exchange_weak_impl(A, C)(shared(A)* obj, A* expected, C desired) @trusted
581 {
582     return atomicCompareExchangeStrong(cast(A*)obj, expected, cast(A)desired);
583 }
584 
585 ///
586 unittest
587 {
588     shared(int) obj = 3;
589     int expected = 3;
590     static assert(__traits(compiles, {atomic_compare_exchange_weak_impl(&obj, &expected, 2);}));
591 }
592 
593 ///
594 pragma(inline, true)
595 bool atomic_compare_exchange_strong_explicit_impl(A, C)(shared(A)* obj, A* expected, C desired, memory_order succ, memory_order fail) @trusted
596 {
597     assert(obj !is null);
598     // We use these giant switch inside switch statements
599     //  because as of 2023 they are capable of being for the most part inlined by gdc & ldc when using literal arguments for memory_order.
600 
601     final switch(succ)
602     {
603         case memory_order.memory_order_relaxed:
604             final switch(fail)
605             {
606                 case memory_order.memory_order_relaxed:
607                     return atomicCompareExchangeStrong!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
608                 case memory_order.memory_order_acquire:
609                     return atomicCompareExchangeStrong!(memory_order.memory_order_relaxed, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
610                 case memory_order.memory_order_release:
611                     return atomicCompareExchangeStrong!(memory_order.memory_order_relaxed, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
612                 case memory_order.memory_order_acq_rel:
613                     return atomicCompareExchangeStrong!(memory_order.memory_order_relaxed, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
614                 case memory_order.memory_order_seq_cst:
615                     return atomicCompareExchangeStrong!(memory_order.memory_order_relaxed, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
616             }
617         case memory_order.memory_order_acquire:
618             final switch(fail)
619             {
620                 case memory_order.memory_order_relaxed:
621                     return atomicCompareExchangeStrong!(memory_order.memory_order_acquire, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
622                 case memory_order.memory_order_acquire:
623                     return atomicCompareExchangeStrong!(memory_order.memory_order_acquire, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
624                 case memory_order.memory_order_release:
625                     return atomicCompareExchangeStrong!(memory_order.memory_order_acquire, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
626                 case memory_order.memory_order_acq_rel:
627                     return atomicCompareExchangeStrong!(memory_order.memory_order_acquire, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
628                 case memory_order.memory_order_seq_cst:
629                     return atomicCompareExchangeStrong!(memory_order.memory_order_acquire, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
630             }
631         case memory_order.memory_order_release:
632             final switch(fail)
633             {
634                 case memory_order.memory_order_relaxed:
635                     return atomicCompareExchangeStrong!(memory_order.memory_order_release, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
636                 case memory_order.memory_order_acquire:
637                     return atomicCompareExchangeStrong!(memory_order.memory_order_release, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
638                 case memory_order.memory_order_release:
639                     return atomicCompareExchangeStrong!(memory_order.memory_order_release, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
640                 case memory_order.memory_order_acq_rel:
641                     return atomicCompareExchangeStrong!(memory_order.memory_order_release, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
642                 case memory_order.memory_order_seq_cst:
643                     return atomicCompareExchangeStrong!(memory_order.memory_order_release, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
644             }
645         case memory_order.memory_order_acq_rel:
646             final switch(fail)
647             {
648                 case memory_order.memory_order_relaxed:
649                     return atomicCompareExchangeStrong!(memory_order.memory_order_acq_rel, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
650                 case memory_order.memory_order_acquire:
651                     return atomicCompareExchangeStrong!(memory_order.memory_order_acq_rel, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
652                 case memory_order.memory_order_release:
653                     return atomicCompareExchangeStrong!(memory_order.memory_order_acq_rel, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
654                 case memory_order.memory_order_acq_rel:
655                     return atomicCompareExchangeStrong!(memory_order.memory_order_acq_rel, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
656                 case memory_order.memory_order_seq_cst:
657                     return atomicCompareExchangeStrong!(memory_order.memory_order_acq_rel, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
658             }
659         case memory_order.memory_order_seq_cst:
660             final switch(fail)
661             {
662                 case memory_order.memory_order_relaxed:
663                     return atomicCompareExchangeStrong!(memory_order.memory_order_seq_cst, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
664                 case memory_order.memory_order_acquire:
665                     return atomicCompareExchangeStrong!(memory_order.memory_order_seq_cst, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
666                 case memory_order.memory_order_release:
667                     return atomicCompareExchangeStrong!(memory_order.memory_order_seq_cst, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
668                 case memory_order.memory_order_acq_rel:
669                     return atomicCompareExchangeStrong!(memory_order.memory_order_seq_cst, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
670                 case memory_order.memory_order_seq_cst:
671                     return atomicCompareExchangeStrong!(memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
672             }
673     }
674 }
675 
676 ///
677 unittest
678 {
679     shared(int) obj = 3;
680     int expected = 3;
681     assert(atomic_compare_exchange_strong_explicit_impl(&obj, &expected, 2, memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst));
682 }
683 
684 ///
685 pragma(inline, true)
686 bool atomic_compare_exchange_weak_explicit_impl(A, C)(shared(A)* obj, A* expected, C desired, memory_order succ, memory_order fail) @trusted
687 {
688     assert(obj !is null);
689     // We use these giant switch inside switch statements
690     //  because as of 2023 they are capable of being for the most part inlined by gdc & ldc when using literal arguments for memory_order.
691 
692     final switch(succ)
693     {
694         case memory_order.memory_order_relaxed:
695             final switch(fail)
696             {
697                 case memory_order.memory_order_relaxed:
698                     return atomicCompareExchangeWeak!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
699                 case memory_order.memory_order_acquire:
700                     return atomicCompareExchangeWeak!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
701                 case memory_order.memory_order_release:
702                     return atomicCompareExchangeWeak!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
703                 case memory_order.memory_order_acq_rel:
704                     return atomicCompareExchangeWeak!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
705                 case memory_order.memory_order_seq_cst:
706                     return atomicCompareExchangeWeak!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
707             }
708         case memory_order.memory_order_acquire:
709             final switch(fail)
710             {
711                 case memory_order.memory_order_relaxed:
712                     return atomicCompareExchangeWeak!(memory_order.memory_order_acquire, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
713                 case memory_order.memory_order_acquire:
714                     return atomicCompareExchangeWeak!(memory_order.memory_order_acquire, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
715                 case memory_order.memory_order_release:
716                     return atomicCompareExchangeWeak!(memory_order.memory_order_acquire, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
717                 case memory_order.memory_order_acq_rel:
718                     return atomicCompareExchangeWeak!(memory_order.memory_order_acquire, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
719                 case memory_order.memory_order_seq_cst:
720                     return atomicCompareExchangeWeak!(memory_order.memory_order_acquire, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
721             }
722         case memory_order.memory_order_release:
723             final switch(fail)
724             {
725                 case memory_order.memory_order_relaxed:
726                     return atomicCompareExchangeWeak!(memory_order.memory_order_release, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
727                 case memory_order.memory_order_acquire:
728                     return atomicCompareExchangeWeak!(memory_order.memory_order_release, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
729                 case memory_order.memory_order_release:
730                     return atomicCompareExchangeWeak!(memory_order.memory_order_release, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
731                 case memory_order.memory_order_acq_rel:
732                     return atomicCompareExchangeWeak!(memory_order.memory_order_release, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
733                 case memory_order.memory_order_seq_cst:
734                     return atomicCompareExchangeWeak!(memory_order.memory_order_release, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
735             }
736         case memory_order.memory_order_acq_rel:
737             final switch(fail)
738             {
739                 case memory_order.memory_order_relaxed:
740                     return atomicCompareExchangeWeak!(memory_order.memory_order_acq_rel, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
741                 case memory_order.memory_order_acquire:
742                     return atomicCompareExchangeWeak!(memory_order.memory_order_acq_rel, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
743                 case memory_order.memory_order_release:
744                     return atomicCompareExchangeWeak!(memory_order.memory_order_acq_rel, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
745                 case memory_order.memory_order_acq_rel:
746                     return atomicCompareExchangeWeak!(memory_order.memory_order_acq_rel, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
747                 case memory_order.memory_order_seq_cst:
748                     return atomicCompareExchangeWeak!(memory_order.memory_order_acq_rel, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
749             }
750         case memory_order.memory_order_seq_cst:
751             final switch(fail)
752             {
753                 case memory_order.memory_order_relaxed:
754                     return atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
755                 case memory_order.memory_order_acquire:
756                     return atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
757                 case memory_order.memory_order_release:
758                     return atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
759                 case memory_order.memory_order_acq_rel:
760                     return atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
761                 case memory_order.memory_order_seq_cst:
762                     return atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
763             }
764     }
765 }
766 
767 ///
768 unittest
769 {
770     shared(int) obj = 3;
771     int expected = 3;
772     atomic_compare_exchange_weak_explicit_impl(&obj, &expected, 2, memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst);
773 }
774 
775 ///
776 pragma(inline, true)
777 A atomic_fetch_add_impl(A, M)(shared(A)* obj, M arg) @trusted
778 {
779     assert(obj !is null);
780     return atomicFetchAdd(cast(A*)obj, arg);
781 }
782 
783 ///
784 unittest
785 {
786     shared(int) val;
787     atomic_fetch_add_impl(&val, 3);
788     assert(atomic_load_impl(&val) == 3);
789 }
790 
791 pragma(inline, true)
792 A atomic_fetch_sub_impl(A, M)(shared(A)* obj, M arg) @trusted
793 {
794     assert(obj !is null);
795     return atomicFetchSub(cast(A*)obj, arg);
796 }
797 
798 ///
799 unittest
800 {
801     shared(int) val = 3;
802     atomic_fetch_sub_impl(&val, 3);
803     assert(atomic_load_impl(&val) == 0);
804 }
805 
806 ///
807 pragma(inline, true)
808 A atomic_fetch_add_explicit_impl(A, M)(shared(A)* obj, M arg, memory_order order) @trusted
809 {
810     assert(obj !is null);
811 
812     final switch(order)
813     {
814         case memory_order.memory_order_relaxed:
815             return atomicFetchAdd!(memory_order.memory_order_relaxed)(cast(A*)obj, arg);
816         case memory_order.memory_order_acquire:
817             return atomicFetchAdd!(memory_order.memory_order_acquire)(cast(A*)obj, arg);
818         case memory_order.memory_order_release:
819             return atomicFetchAdd!(memory_order.memory_order_release)(cast(A*)obj, arg);
820         case memory_order.memory_order_acq_rel:
821             return atomicFetchAdd!(memory_order.memory_order_acq_rel)(cast(A*)obj, arg);
822         case memory_order.memory_order_seq_cst:
823             return atomicFetchAdd!(memory_order.memory_order_seq_cst)(cast(A*)obj, arg);
824     }
825 }
826 
827 ///
828 unittest
829 {
830     shared(int) val;
831     atomic_fetch_add_explicit_impl(&val, 3, memory_order.memory_order_seq_cst);
832     assert(atomic_load_impl(&val) == 3);
833 }
834 
835 ///
836 pragma(inline, true)
837 A atomic_fetch_sub_explicit_impl(A, M)(shared(A)* obj, M arg, memory_order order) @trusted
838 {
839     assert(obj !is null);
840 
841     final switch(order)
842     {
843         case memory_order.memory_order_relaxed:
844             return atomicFetchSub!(memory_order.memory_order_relaxed)(cast(A*)obj, arg);
845         case memory_order.memory_order_acquire:
846             return atomicFetchSub!(memory_order.memory_order_acquire)(cast(A*)obj, arg);
847         case memory_order.memory_order_release:
848             return atomicFetchSub!(memory_order.memory_order_release)(cast(A*)obj, arg);
849         case memory_order.memory_order_acq_rel:
850             return atomicFetchSub!(memory_order.memory_order_acq_rel)(cast(A*)obj, arg);
851         case memory_order.memory_order_seq_cst:
852             return atomicFetchSub!(memory_order.memory_order_seq_cst)(cast(A*)obj, arg);
853     }
854 }
855 
856 ///
857 unittest
858 {
859     shared(int) val = 3;
860     atomic_fetch_sub_explicit_impl(&val, 3, memory_order.memory_order_seq_cst);
861     assert(atomic_load_impl(&val) == 0);
862 }
863 
864 ///
865 pragma(inline, true)
866 A atomic_fetch_or_impl(A, M)(shared(A)* obj, M arg) @trusted
867 {
868     assert(obj !is null);
869     return atomicFetchOr(cast(A*)obj, arg);
870 }
871 
872 ///
873 unittest
874 {
875     shared(int) val = 5;
876     atomic_fetch_or_impl(&val, 3);
877     assert(atomic_load_impl(&val) == 7);
878 }
879 
880 ///
881 pragma(inline, true)
882 A atomic_fetch_or_explicit_impl(A, M)(shared(A)* obj, M arg, memory_order order) @trusted
883 {
884     assert(obj !is null);
885 
886     final switch(order)
887     {
888         case memory_order.memory_order_relaxed:
889             return atomicFetchOr!(memory_order.memory_order_relaxed)(cast(A*)obj, arg);
890         case memory_order.memory_order_acquire:
891             return atomicFetchOr!(memory_order.memory_order_acquire)(cast(A*)obj, arg);
892         case memory_order.memory_order_release:
893             return atomicFetchOr!(memory_order.memory_order_release)(cast(A*)obj, arg);
894         case memory_order.memory_order_acq_rel:
895             return atomicFetchOr!(memory_order.memory_order_acq_rel)(cast(A*)obj, arg);
896         case memory_order.memory_order_seq_cst:
897             return atomicFetchOr!(memory_order.memory_order_seq_cst)(cast(A*)obj, arg);
898     }
899 }
900 
901 ///
902 unittest
903 {
904     shared(int) val = 5;
905     atomic_fetch_or_explicit_impl(&val, 3, memory_order.memory_order_seq_cst);
906     assert(atomic_load_impl(&val) == 7);
907 }
908 
909 ///
910 pragma(inline, true)
911 A atomic_fetch_xor_impl(A, M)(shared(A)* obj, M arg) @trusted
912 {
913     assert(obj !is null);
914     return atomicFetchXor(cast(A*)obj, arg);
915 }
916 
917 ///
918 unittest
919 {
920     shared(int) val = 5;
921     atomic_fetch_xor_impl(&val, 3);
922     assert(atomic_load_impl(&val) == 6);
923 }
924 
925 ///
926 pragma(inline, true)
927 A atomic_fetch_xor_explicit_impl(A, M)(shared(A)* obj, M arg, memory_order order) @trusted
928 {
929     assert(obj !is null);
930 
931     A set, get;
932 
933     final switch(order)
934     {
935         case memory_order.memory_order_relaxed:
936             return atomicFetchXor!(memory_order.memory_order_relaxed)(cast(A*)obj, arg);
937         case memory_order.memory_order_acquire:
938             return atomicFetchXor!(memory_order.memory_order_acquire)(cast(A*)obj, arg);
939         case memory_order.memory_order_release:
940             return atomicFetchXor!(memory_order.memory_order_release)(cast(A*)obj, arg);
941         case memory_order.memory_order_acq_rel:
942             return atomicFetchXor!(memory_order.memory_order_acq_rel)(cast(A*)obj, arg);
943         case memory_order.memory_order_seq_cst:
944             return atomicFetchXor!(memory_order.memory_order_seq_cst)(cast(A*)obj, arg);
945     }
946 }
947 
948 ///
949 unittest
950 {
951     shared(int) val = 5;
952     atomic_fetch_xor_explicit_impl(&val, 3, memory_order.memory_order_seq_cst);
953     assert(atomic_load_impl(&val) == 6);
954 }
955 
956 ///
957 pragma(inline, true)
958 A atomic_fetch_and_impl(A, M)(shared(A)* obj, M arg) @trusted
959 {
960     assert(obj !is null);
961     return atomicFetchAnd(cast(A*)obj, arg);
962 }
963 
964 ///
965 unittest
966 {
967     shared(int) val = 5;
968     atomic_fetch_and_impl(&val, 3);
969     assert(atomic_load_impl(&val) == 1);
970 }
971 
972 ///
973 pragma(inline, true)
974 A atomic_fetch_and_explicit_impl(A, M)(shared(A)* obj, M arg, memory_order order) @trusted
975 {
976     assert(obj !is null);
977 
978     A set, get;
979 
980     final switch(order)
981     {
982         case memory_order.memory_order_relaxed:
983             return atomicFetchAnd!(memory_order.memory_order_relaxed)(cast(A*)obj, arg);
984         case memory_order.memory_order_acquire:
985             return atomicFetchAnd!(memory_order.memory_order_acquire)(cast(A*)obj, arg);
986         case memory_order.memory_order_release:
987             return atomicFetchAnd!(memory_order.memory_order_release)(cast(A*)obj, arg);
988         case memory_order.memory_order_acq_rel:
989             return atomicFetchAnd!(memory_order.memory_order_acq_rel)(cast(A*)obj, arg);
990         case memory_order.memory_order_seq_cst:
991             return atomicFetchAnd!(memory_order.memory_order_seq_cst)(cast(A*)obj, arg);
992     }
993 }
994 
995 ///
996 unittest
997 {
998     shared(int) val = 5;
999     atomic_fetch_and_explicit_impl(&val, 3, memory_order.memory_order_seq_cst);
1000     assert(atomic_load_impl(&val) == 1);
1001 }