1 /**
2  * The atomic module provides basic support for lock-free
3  * concurrent programming.
4  *
5  * $(NOTE Use the `-preview=nosharedaccess` compiler flag to detect
6  * unsafe individual read or write operations on shared data.)
7  *
8  * Copyright: Copyright Sean Kelly 2005 - 2016.
9  * License:   $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
10  * Authors:   Sean Kelly, Alex Rønne Petersen, Manu Evans
11  * Source:    $(DRUNTIMESRC core/_atomic.d)
12  */
13 
14 module core.atomic;
15 
16 ///
17 @safe unittest
18 {
19     int y = 2;
20     shared int x = y; // OK
21 
22     //x++; // read modify write error
23     x.atomicOp!"+="(1); // OK
24     //y = x; // read error with preview flag
25     y = x.atomicLoad(); // OK
26     assert(y == 3);
27     //x = 5; // write error with preview flag
28     x.atomicStore(5); // OK
29     assert(x.atomicLoad() == 5);
30 }
31 
32 import core.internal.atomic;
33 import core.internal.attributes : betterC;
34 import core.internal.traits : hasUnsharedIndirections;
35 
36 pragma(inline, true): // LDC
37 
38 /**
39  * Specifies the memory ordering semantics of an atomic operation.
40  *
41  * See_Also:
42  *     $(HTTP en.cppreference.com/w/cpp/atomic/memory_order)
43  */
44 enum MemoryOrder
45 {
46     /**
47      * Not sequenced.
48      * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#monotonic, LLVM AtomicOrdering.Monotonic)
49      * and C++11/C11 `memory_order_relaxed`.
50      */
51     raw = 0,
52     /**
53      * Hoist-load + hoist-store barrier.
54      * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#acquire, LLVM AtomicOrdering.Acquire)
55      * and C++11/C11 `memory_order_acquire`.
56      */
57     acq = 2,
58     /**
59      * Sink-load + sink-store barrier.
60      * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#release, LLVM AtomicOrdering.Release)
61      * and C++11/C11 `memory_order_release`.
62      */
63     rel = 3,
64     /**
65      * Acquire + release barrier.
66      * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#acquirerelease, LLVM AtomicOrdering.AcquireRelease)
67      * and C++11/C11 `memory_order_acq_rel`.
68      */
69     acq_rel = 4,
70     /**
71      * Fully sequenced (acquire + release). Corresponds to
72      * $(LINK2 https://llvm.org/docs/Atomics.html#sequentiallyconsistent, LLVM AtomicOrdering.SequentiallyConsistent)
73      * and C++11/C11 `memory_order_seq_cst`.
74      */
75     seq = 5,
76 }
77 
78 /**
79  * Loads 'val' from memory and returns it.  The memory barrier specified
80  * by 'ms' is applied to the operation, which is fully sequenced by
81  * default.  Valid memory orders are MemoryOrder.raw, MemoryOrder.acq,
82  * and MemoryOrder.seq.
83  *
84  * Params:
85  *  val = The target variable.
86  *
87  * Returns:
88  *  The value of 'val'.
89  */
90 T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(auto ref return scope const T val) pure nothrow @nogc @trusted
91     if (!is(T == shared U, U) && !is(T == shared inout U, U) && !is(T == shared const U, U))
92 {
93     static if (__traits(isFloating, T))
94     {
95         alias IntTy = IntForFloat!T;
96         IntTy r = core.internal.atomic.atomicLoad!ms(cast(IntTy*)&val);
97         return *cast(T*)&r;
98     }
99     else
100         return core.internal.atomic.atomicLoad!ms(cast(T*)&val);
101 }
102 
103 /// Ditto
104 T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(auto ref return scope shared const T val) pure nothrow @nogc @trusted
105     if (!hasUnsharedIndirections!T)
106 {
107     import core.internal.traits : hasUnsharedIndirections;
108     static assert(!hasUnsharedIndirections!T, "Copying `" ~ shared(const(T)).stringof ~ "` would violate shared.");
109 
110     return atomicLoad!ms(*cast(T*)&val);
111 }
112 
113 /// Ditto
114 TailShared!T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(auto ref shared const T val) pure nothrow @nogc @trusted
115     if (hasUnsharedIndirections!T)
116 {
117     // HACK: DEPRECATE THIS FUNCTION, IT IS INVALID TO DO ATOMIC LOAD OF SHARED CLASS
118     // this is here because code exists in the wild that does this...
119 
120     return core.internal.atomic.atomicLoad!ms(cast(TailShared!T*)&val);
121 }
122 
123 /**
124  * Writes 'newval' into 'val'.  The memory barrier specified by 'ms' is
125  * applied to the operation, which is fully sequenced by default.
126  * Valid memory orders are MemoryOrder.raw, MemoryOrder.rel, and
127  * MemoryOrder.seq.
128  *
129  * Params:
130  *  val    = The target variable.
131  *  newval = The value to store.
132  */
133 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref T val, V newval) pure nothrow @nogc @trusted
134     if (!is(T == shared) && !is(V == shared))
135 {
136     import core.internal.traits : hasElaborateCopyConstructor;
137     static assert (!hasElaborateCopyConstructor!T, "`T` may not have an elaborate copy: atomic operations override regular copying semantics.");
138 
139     // resolve implicit conversions
140     version (LDC)
141     {
142         import core.internal.traits : Unqual;
143         static if (is(Unqual!T == Unqual!V))
144         {
145             alias arg = newval;
146         }
147         else
148         {
149             // don't construct directly from `newval`, assign instead (`alias this` etc.)
150             T arg;
151             arg = newval;
152         }
153     }
154     else
155     {
156         T arg = newval;
157     }
158 
159     static if (__traits(isFloating, T))
160     {
161         alias IntTy = IntForFloat!T;
162         core.internal.atomic.atomicStore!ms(cast(IntTy*)&val, *cast(IntTy*)&arg);
163     }
164     else
165         core.internal.atomic.atomicStore!ms(&val, arg);
166 }
167 
168 /// Ditto
169 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref shared T val, V newval) pure nothrow @nogc @trusted
170     if (!is(T == class))
171 {
172     static if (is (V == shared U, U))
173         alias Thunk = U;
174     else
175     {
176         import core.internal.traits : hasUnsharedIndirections;
177         static assert(!hasUnsharedIndirections!V, "Copying argument `" ~ V.stringof ~ " newval` to `" ~ shared(T).stringof ~ " here` would violate shared.");
178         alias Thunk = V;
179     }
180     atomicStore!ms(*cast(T*)&val, *cast(Thunk*)&newval);
181 }
182 
183 /// Ditto
184 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref shared T val, auto ref shared V newval) pure nothrow @nogc @trusted
185     if (is(T == class))
186 {
187     static assert (is (V : T), "Can't assign `newval` of type `shared " ~ V.stringof ~ "` to `shared " ~ T.stringof ~ "`.");
188 
189     core.internal.atomic.atomicStore!ms(cast(T*)&val, cast(V)newval);
190 }
191 
192 /**
193  * Atomically adds `mod` to the value referenced by `val` and returns the value `val` held previously.
194  * This operation is both lock-free and atomic.
195  *
196  * Params:
197  *  val = Reference to the value to modify.
198  *  mod = The value to add.
199  *
200  * Returns:
201  *  The value held previously by `val`.
202  */
203 T atomicFetchAdd(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope T val, size_t mod) pure nothrow @nogc @trusted
204     if ((__traits(isIntegral, T) || is(T == U*, U)) && !is(T == shared))
205 in (atomicValueIsProperlyAligned(val))
206 {
207     static if (is(T == U*, U))
208         return cast(T)core.internal.atomic.atomicFetchAdd!ms(cast(size_t*)&val, mod * U.sizeof);
209     else
210         return core.internal.atomic.atomicFetchAdd!ms(&val, cast(T)mod);
211 }
212 
213 /// Ditto
214 T atomicFetchAdd(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared T val, size_t mod) pure nothrow @nogc @trusted
215     if (__traits(isIntegral, T) || is(T == U*, U))
216 in (atomicValueIsProperlyAligned(val))
217 {
218     return atomicFetchAdd!ms(*cast(T*)&val, mod);
219 }
220 
221 /**
222  * Atomically subtracts `mod` from the value referenced by `val` and returns the value `val` held previously.
223  * This operation is both lock-free and atomic.
224  *
225  * Params:
226  *  val = Reference to the value to modify.
227  *  mod = The value to subtract.
228  *
229  * Returns:
230  *  The value held previously by `val`.
231  */
232 T atomicFetchSub(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope T val, size_t mod) pure nothrow @nogc @trusted
233     if ((__traits(isIntegral, T) || is(T == U*, U)) && !is(T == shared))
234 in (atomicValueIsProperlyAligned(val))
235 {
236     static if (is(T == U*, U))
237         return cast(T)core.internal.atomic.atomicFetchSub!ms(cast(size_t*)&val, mod * U.sizeof);
238     else
239         return core.internal.atomic.atomicFetchSub!ms(&val, cast(T)mod);
240 }
241 
242 /// Ditto
243 T atomicFetchSub(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared T val, size_t mod) pure nothrow @nogc @trusted
244     if (__traits(isIntegral, T) || is(T == U*, U))
245 in (atomicValueIsProperlyAligned(val))
246 {
247     return atomicFetchSub!ms(*cast(T*)&val, mod);
248 }
249 
250 /**
251  * Exchange `exchangeWith` with the memory referenced by `here`.
252  * This operation is both lock-free and atomic.
253  *
254  * Params:
255  *  here         = The address of the destination variable.
256  *  exchangeWith = The value to exchange.
257  *
258  * Returns:
259  *  The value held previously by `here`.
260  */
261 T atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(T* here, V exchangeWith) pure nothrow @nogc @trusted
262     if (!is(T == shared) && !is(V == shared))
263 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
264 {
265     // resolve implicit conversions
266     T arg = exchangeWith;
267 
268     static if (__traits(isFloating, T))
269     {
270         alias IntTy = IntForFloat!T;
271         IntTy r = core.internal.atomic.atomicExchange!ms(cast(IntTy*)here, *cast(IntTy*)&arg);
272         return *cast(shared(T)*)&r;
273     }
274     else
275         return core.internal.atomic.atomicExchange!ms(here, arg);
276 }
277 
278 /// Ditto
279 TailShared!T atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(shared(T)* here, V exchangeWith) pure nothrow @nogc @trusted
280     if (!is(T == class) && !is(T == interface))
281 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
282 {
283     static if (is (V == shared U, U))
284         alias Thunk = U;
285     else
286     {
287         import core.internal.traits : hasUnsharedIndirections;
288         static assert(!hasUnsharedIndirections!V, "Copying `exchangeWith` of type `" ~ V.stringof ~ "` to `" ~ shared(T).stringof ~ "` would violate shared.");
289         alias Thunk = V;
290     }
291     return atomicExchange!ms(cast(T*)here, *cast(Thunk*)&exchangeWith);
292 }
293 
294 /// Ditto
295 shared(T) atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(shared(T)* here, shared(V) exchangeWith) pure nothrow @nogc @trusted
296     if (is(T == class) || is(T == interface))
297 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
298 {
299     static assert (is (V : T), "Can't assign `exchangeWith` of type `" ~ shared(V).stringof ~ "` to `" ~ shared(T).stringof ~ "`.");
300 
301     return cast(shared)core.internal.atomic.atomicExchange!ms(cast(T*)here, cast(V)exchangeWith);
302 }
303 
304 /**
305  * Performs either compare-and-set or compare-and-swap (or exchange).
306  *
307  * There are two categories of overloads in this template:
308  * The first category does a simple compare-and-set.
309  * The comparison value (`ifThis`) is treated as an rvalue.
310  *
311  * The second category does a compare-and-swap (a.k.a. compare-and-exchange),
312  * and expects `ifThis` to be a pointer type, where the previous value
313  * of `here` will be written.
314  *
315  * This operation is both lock-free and atomic.
316  *
317  * Params:
318  *  here      = The address of the destination variable.
319  *  writeThis = The value to store.
320  *  ifThis    = The comparison value.
321  *
322  * Returns:
323  *  true if the store occurred, false if not.
324  */
325 template cas(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq)
326 {
327     /// Compare-and-set for non-shared values
328     bool cas(T, V1, V2)(T* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
329     if (!is(T == shared) && is(T : V1))
330     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
331     {
332         // resolve implicit conversions
333         const T arg1 = ifThis;
334         T arg2 = writeThis;
335 
336         static if (__traits(isFloating, T))
337         {
338             alias IntTy = IntForFloat!T;
339             return atomicCompareExchangeStrongNoResult!(succ, fail)(
340                 cast(IntTy*)here, *cast(IntTy*)&arg1, *cast(IntTy*)&arg2);
341         }
342         else
343             return atomicCompareExchangeStrongNoResult!(succ, fail)(here, arg1, arg2);
344     }
345 
346     /// Compare-and-set for shared value type
347     bool cas(T, V1, V2)(shared(T)* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
348     if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
349     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
350     {
351         static if (is (V1 == shared U1, U1))
352             alias Thunk1 = U1;
353         else
354             alias Thunk1 = V1;
355         static if (is (V2 == shared U2, U2))
356             alias Thunk2 = U2;
357         else
358         {
359             import core.internal.traits : hasUnsharedIndirections;
360             static assert(!hasUnsharedIndirections!V2,
361                           "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~
362                           shared(T).stringof ~ "* here` would violate shared.");
363             alias Thunk2 = V2;
364         }
365         return cas(cast(T*)here, *cast(Thunk1*)&ifThis, *cast(Thunk2*)&writeThis);
366     }
367 
368     /// Compare-and-set for `shared` reference type (`class`)
369     bool cas(T, V1, V2)(shared(T)* here, shared(V1) ifThis, shared(V2) writeThis)
370     pure nothrow @nogc @trusted
371     if (is(T == class))
372     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
373     {
374         return atomicCompareExchangeStrongNoResult!(succ, fail)(
375             cast(T*)here, cast(V1)ifThis, cast(V2)writeThis);
376     }
377 
378     /// Compare-and-exchange for non-`shared` types
379     bool cas(T, V)(T* here, T* ifThis, V writeThis) pure nothrow @nogc @trusted
380     if (!is(T == shared) && !is(V == shared))
381     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
382     {
383         // resolve implicit conversions
384         T arg1 = writeThis;
385 
386         static if (__traits(isFloating, T))
387         {
388             alias IntTy = IntForFloat!T;
389             return atomicCompareExchangeStrong!(succ, fail)(
390                 cast(IntTy*)here, cast(IntTy*)ifThis, *cast(IntTy*)&writeThis);
391         }
392         else
393             return atomicCompareExchangeStrong!(succ, fail)(here, ifThis, writeThis);
394     }
395 
396     /// Compare and exchange for mixed-`shared`ness types
397     bool cas(T, V1, V2)(shared(T)* here, V1* ifThis, V2 writeThis) pure nothrow @nogc @trusted
398     if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
399     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
400     {
401         static if (is (V1 == shared U1, U1))
402             alias Thunk1 = U1;
403         else
404         {
405             import core.internal.traits : hasUnsharedIndirections;
406             static assert(!hasUnsharedIndirections!V1,
407                           "Copying `" ~ shared(T).stringof ~ "* here` to `" ~
408                           V1.stringof ~ "* ifThis` would violate shared.");
409             alias Thunk1 = V1;
410         }
411         static if (is (V2 == shared U2, U2))
412             alias Thunk2 = U2;
413         else
414         {
415             import core.internal.traits : hasUnsharedIndirections;
416             static assert(!hasUnsharedIndirections!V2,
417                           "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~
418                           shared(T).stringof ~ "* here` would violate shared.");
419             alias Thunk2 = V2;
420         }
421         static assert (is(T : Thunk1),
422                        "Mismatching types for `here` and `ifThis`: `" ~
423                        shared(T).stringof ~ "` and `" ~ V1.stringof ~ "`.");
424         return cas(cast(T*)here, cast(Thunk1*)ifThis, *cast(Thunk2*)&writeThis);
425     }
426 
427     /// Compare-and-exchange for `class`
428     bool cas(T, V)(shared(T)* here, shared(T)* ifThis, shared(V) writeThis)
429     pure nothrow @nogc @trusted
430     if (is(T == class))
431     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
432     {
433         return atomicCompareExchangeStrong!(succ, fail)(
434             cast(T*)here, cast(T*)ifThis, cast(V)writeThis);
435     }
436 }
437 
438 /**
439 * Stores 'writeThis' to the memory referenced by 'here' if the value
440 * referenced by 'here' is equal to 'ifThis'.
441 * The 'weak' version of cas may spuriously fail. It is recommended to
442 * use `casWeak` only when `cas` would be used in a loop.
443 * This operation is both
444 * lock-free and atomic.
445 *
446 * Params:
447 *  here      = The address of the destination variable.
448 *  writeThis = The value to store.
449 *  ifThis    = The comparison value.
450 *
451 * Returns:
452 *  true if the store occurred, false if not.
453 */
454 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(T* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
455     if (!is(T == shared) && is(T : V1))
456 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
457 {
458     // resolve implicit conversions
459     T arg1 = ifThis;
460     T arg2 = writeThis;
461 
462     static if (__traits(isFloating, T))
463     {
464         alias IntTy = IntForFloat!T;
465         return atomicCompareExchangeWeakNoResult!(succ, fail)(cast(IntTy*)here, *cast(IntTy*)&arg1, *cast(IntTy*)&arg2);
466     }
467     else
468         return atomicCompareExchangeWeakNoResult!(succ, fail)(here, arg1, arg2);
469 }
470 
471 /// Ditto
472 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
473     if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
474 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
475 {
476     static if (is (V1 == shared U1, U1))
477         alias Thunk1 = U1;
478     else
479         alias Thunk1 = V1;
480     static if (is (V2 == shared U2, U2))
481         alias Thunk2 = U2;
482     else
483     {
484         import core.internal.traits : hasUnsharedIndirections;
485         static assert(!hasUnsharedIndirections!V2, "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~ shared(T).stringof ~ "* here` would violate shared.");
486         alias Thunk2 = V2;
487     }
488     return casWeak!(succ, fail)(cast(T*)here, *cast(Thunk1*)&ifThis, *cast(Thunk2*)&writeThis);
489 }
490 
491 /// Ditto
492 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, shared(V1) ifThis, shared(V2) writeThis) pure nothrow @nogc @trusted
493     if (is(T == class))
494 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
495 {
496     return atomicCompareExchangeWeakNoResult!(succ, fail)(cast(T*)here, cast(V1)ifThis, cast(V2)writeThis);
497 }
498 
499 /**
500 * Stores 'writeThis' to the memory referenced by 'here' if the value
501 * referenced by 'here' is equal to the value referenced by 'ifThis'.
502 * The prior value referenced by 'here' is written to `ifThis` and
503 * returned to the user.
504 * The 'weak' version of cas may spuriously fail. It is recommended to
505 * use `casWeak` only when `cas` would be used in a loop.
506 * This operation is both lock-free and atomic.
507 *
508 * Params:
509 *  here      = The address of the destination variable.
510 *  writeThis = The value to store.
511 *  ifThis    = The address of the value to compare, and receives the prior value of `here` as output.
512 *
513 * Returns:
514 *  true if the store occurred, false if not.
515 */
516 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V)(T* here, T* ifThis, V writeThis) pure nothrow @nogc @trusted
517     if (!is(T == shared S, S) && !is(V == shared U, U))
518 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
519 {
520     // resolve implicit conversions
521     T arg1 = writeThis;
522 
523     static if (__traits(isFloating, T))
524     {
525         alias IntTy = IntForFloat!T;
526         return atomicCompareExchangeWeak!(succ, fail)(cast(IntTy*)here, cast(IntTy*)ifThis, *cast(IntTy*)&writeThis);
527     }
528     else
529         return atomicCompareExchangeWeak!(succ, fail)(here, ifThis, writeThis);
530 }
531 
532 /// Ditto
533 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, V1* ifThis, V2 writeThis) pure nothrow @nogc @trusted
534     if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
535 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
536 {
537     static if (is (V1 == shared U1, U1))
538         alias Thunk1 = U1;
539     else
540     {
541         import core.internal.traits : hasUnsharedIndirections;
542         static assert(!hasUnsharedIndirections!V1, "Copying `" ~ shared(T).stringof ~ "* here` to `" ~ V1.stringof ~ "* ifThis` would violate shared.");
543         alias Thunk1 = V1;
544     }
545     static if (is (V2 == shared U2, U2))
546         alias Thunk2 = U2;
547     else
548     {
549         import core.internal.traits : hasUnsharedIndirections;
550         static assert(!hasUnsharedIndirections!V2, "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~ shared(T).stringof ~ "* here` would violate shared.");
551         alias Thunk2 = V2;
552     }
553     static assert (is(T : Thunk1), "Mismatching types for `here` and `ifThis`: `" ~ shared(T).stringof ~ "` and `" ~ V1.stringof ~ "`.");
554     return casWeak!(succ, fail)(cast(T*)here, cast(Thunk1*)ifThis, *cast(Thunk2*)&writeThis);
555 }
556 
557 /// Ditto
558 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V)(shared(T)* here, shared(T)* ifThis, shared(V) writeThis) pure nothrow @nogc @trusted
559     if (is(T == class))
560 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
561 {
562     return atomicCompareExchangeWeak!(succ, fail)(cast(T*)here, cast(T*)ifThis, cast(V)writeThis);
563 }
564 
565 /**
566  * Inserts a full load/store memory fence (on platforms that need it). This ensures
567  * that all loads and stores before a call to this function are executed before any
568  * loads and stores after the call.
569  */
570 void atomicFence(MemoryOrder order = MemoryOrder.seq)() pure nothrow @nogc @safe
571 {
572     core.internal.atomic.atomicFence!order();
573 }
574 
575 /**
576  * Gives a hint to the processor that the calling thread is in a 'spin-wait' loop,
577  * allowing to more efficiently allocate resources.
578  */
579 void pause() pure nothrow @nogc @safe
580 {
581     core.internal.atomic.pause();
582 }
583 
584 /**
585  * Performs the binary operation 'op' on val using 'mod' as the modifier.
586  *
587  * Params:
588  *  val = The target variable.
589  *  mod = The modifier to apply.
590  *
591  * Returns:
592  *  The result of the operation.
593  */
594 TailShared!T atomicOp(string op, T, V1)(ref shared T val, V1 mod) pure nothrow @nogc @trusted // LDC: was @safe
595     if (__traits(compiles, mixin("*cast(T*)&val" ~ op ~ "mod")))
596 in (atomicValueIsProperlyAligned(val))
597 {
598     version (LDC)
599     {
600         import ldc.intrinsics;
601 
602         enum suitedForLLVMAtomicRmw = (__traits(isIntegral, T) && __traits(isIntegral, V1) &&
603                                        T.sizeof <= AtomicRmwSizeLimit && V1.sizeof <= AtomicRmwSizeLimit);
604     }
605     else
606         enum suitedForLLVMAtomicRmw = false;
607 
608     // binary operators
609     //
610     // +    -   *   /   %   ^^  &
611     // |    ^   <<  >>  >>> ~   in
612     // ==   !=  <   <=  >   >=
613     static if (op == "+"  || op == "-"  || op == "*"  || op == "/"   ||
614                 op == "%"  || op == "^^" || op == "&"  || op == "|"   ||
615                 op == "^"  || op == "<<" || op == ">>" || op == ">>>" ||
616                 op == "~"  || // skip "in"
617                 op == "==" || op == "!=" || op == "<"  || op == "<="  ||
618                 op == ">"  || op == ">=")
619     {
620         T get = atomicLoad!(MemoryOrder.raw, T)(val);
621         mixin("return get " ~ op ~ " mod;");
622     }
623     else
624     // assignment operators
625     //
626     // +=   -=  *=  /=  %=  ^^= &=
627     // |=   ^=  <<= >>= >>>=    ~=
628     static if (op == "+=" && suitedForLLVMAtomicRmw)
629     {
630         T m = cast(T) mod;
631         return cast(T) (llvm_atomic_rmw_add(&val, m) + m);
632     }
633     else static if (op == "-=" && suitedForLLVMAtomicRmw)
634     {
635         T m = cast(T) mod;
636         return cast(T) (llvm_atomic_rmw_sub(&val, m) - m);
637     }
638     else static if (op == "&=" && suitedForLLVMAtomicRmw)
639     {
640         T m = cast(T) mod;
641         return cast(T) (llvm_atomic_rmw_and(&val, m) & m);
642     }
643     else static if (op == "|=" && suitedForLLVMAtomicRmw)
644     {
645         T m = cast(T) mod;
646         return cast(T) (llvm_atomic_rmw_or(&val, m) | m);
647     }
648     else static if (op == "^=" && suitedForLLVMAtomicRmw)
649     {
650         T m = cast(T) mod;
651         return cast(T) (llvm_atomic_rmw_xor(&val, m) ^ m);
652     }
653     else static if (op == "+=" && __traits(isIntegral, T) && __traits(isIntegral, V1) && T.sizeof <= size_t.sizeof && V1.sizeof <= size_t.sizeof)
654     {
655         return cast(T)(atomicFetchAdd(val, mod) + mod);
656     }
657     else static if (op == "-=" && __traits(isIntegral, T) && __traits(isIntegral, V1) && T.sizeof <= size_t.sizeof && V1.sizeof <= size_t.sizeof)
658     {
659         return cast(T)(atomicFetchSub(val, mod) - mod);
660     }
661     else static if (op == "+=" || op == "-="  || op == "*="  || op == "/=" ||
662                 op == "%=" || op == "^^=" || op == "&="  || op == "|=" ||
663                 op == "^=" || op == "<<=" || op == ">>=" || op == ">>>=") // skip "~="
664     {
665         T set, get = atomicLoad!(MemoryOrder.raw, T)(val);
666         do
667         {
668             set = get;
669             mixin("set " ~ op ~ " mod;");
670         } while (!casWeakByRef(val, get, set));
671         return set;
672     }
673     else
674     {
675         static assert(false, "Operation not supported.");
676     }
677 }
678 
679 
680 version (LDC)
681 {
682     enum has64BitXCHG = true;
683     enum has64BitCAS = true;
684 
685     // Enable 128bit CAS on 64bit platforms if supported.
686     version (D_LP64)
687     {
688         version (PPC64)
689             enum has128BitCAS = false;
690         else
691             enum has128BitCAS = true;
692     }
693     else
694         enum has128BitCAS = false;
695 }
696 else version (D_InlineAsm_X86)
697 {
698     enum has64BitXCHG = false;
699     enum has64BitCAS = true;
700     enum has128BitCAS = false;
701 }
702 else version (D_InlineAsm_X86_64)
703 {
704     enum has64BitXCHG = true;
705     enum has64BitCAS = true;
706     enum has128BitCAS = true;
707 }
708 else version (GNU)
709 {
710     import gcc.config;
711     enum has64BitCAS = GNU_Have_64Bit_Atomics;
712     enum has64BitXCHG = GNU_Have_64Bit_Atomics;
713     enum has128BitCAS = GNU_Have_LibAtomic;
714 }
715 else
716 {
717     enum has64BitXCHG = false;
718     enum has64BitCAS = false;
719     enum has128BitCAS = false;
720 }
721 
722 private
723 {
724     bool atomicValueIsProperlyAligned(T)(ref T val) pure nothrow @nogc @trusted
725     {
726         return atomicPtrIsProperlyAligned(&val);
727     }
728 
729     bool atomicPtrIsProperlyAligned(T)(T* ptr) pure nothrow @nogc @safe
730     {
731         // NOTE: Strictly speaking, the x86 supports atomic operations on
732         //       unaligned values.  However, this is far slower than the
733         //       common case, so such behavior should be prohibited.
734         static if (T.sizeof > size_t.sizeof)
735         {
736             version (X86)
737             {
738                 // cmpxchg8b only requires 4-bytes alignment
739                 return cast(size_t)ptr % size_t.sizeof == 0;
740             }
741             else
742             {
743                 // e.g., x86_64 cmpxchg16b requires 16-bytes alignment
744                 return cast(size_t)ptr % T.sizeof == 0;
745             }
746         }
747         else
748         {
749             return cast(size_t)ptr % T.sizeof == 0;
750         }
751     }
752 
753     template IntForFloat(F)
754         if (__traits(isFloating, F))
755     {
756         static if (F.sizeof == 4)
757             alias IntForFloat = uint;
758         else static if (F.sizeof == 8)
759             alias IntForFloat = ulong;
760         else
761             static assert (false, "Invalid floating point type: " ~ F.stringof ~ ", only support `float` and `double`.");
762     }
763 
764     template IntForStruct(S)
765         if (is(S == struct))
766     {
767         static if (S.sizeof == 1)
768             alias IntForFloat = ubyte;
769         else static if (F.sizeof == 2)
770             alias IntForFloat = ushort;
771         else static if (F.sizeof == 4)
772             alias IntForFloat = uint;
773         else static if (F.sizeof == 8)
774             alias IntForFloat = ulong;
775         else static if (F.sizeof == 16)
776             alias IntForFloat = ulong[2]; // TODO: what's the best type here? slice/delegates pass in registers...
777         else
778             static assert (ValidateStruct!S);
779     }
780 
781     template ValidateStruct(S)
782         if (is(S == struct))
783     {
784         import core.internal.traits : hasElaborateAssign;
785 
786         // `(x & (x-1)) == 0` checks that x is a power of 2.
787         static assert (S.sizeof <= size_t.sizeof * 2
788             && (S.sizeof & (S.sizeof - 1)) == 0,
789             S.stringof ~ " has invalid size for atomic operations.");
790         static assert (!hasElaborateAssign!S, S.stringof ~ " may not have an elaborate assignment when used with atomic operations.");
791 
792         enum ValidateStruct = true;
793     }
794 
795     // TODO: it'd be nice if we had @trusted scopes; we could remove this...
796     bool casWeakByRef(T,V1,V2)(ref T value, ref V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
797     {
798         return casWeak(&value, &ifThis, writeThis);
799     }
800 
801     /* Construct a type with a shared tail, and if possible with an unshared
802     head. */
803     template TailShared(U) if (!is(U == shared))
804     {
805         alias TailShared = .TailShared!(shared U);
806     }
807     template TailShared(S) if (is(S == shared))
808     {
809         // Get the unshared variant of S.
810         static if (is(S U == shared U)) {}
811         else static assert(false, "Should never be triggered. The `static " ~
812             "if` declares `U` as the unshared version of the shared type " ~
813             "`S`. `S` is explicitly declared as shared, so getting `U` " ~
814             "should always work.");
815 
816         static if (is(S : U))
817             alias TailShared = U;
818         else static if (is(S == struct))
819         {
820             enum implName = () {
821                 /* Start with "_impl". If S has a field with that name, append
822                 underscores until the clash is resolved. */
823                 string name = "_impl";
824                 string[] fieldNames;
825                 static foreach (alias field; S.tupleof)
826                 {
827                     fieldNames ~= __traits(identifier, field);
828                 }
829                 static bool canFind(string[] haystack, string needle)
830                 {
831                     foreach (candidate; haystack)
832                     {
833                         if (candidate == needle) return true;
834                     }
835                     return false;
836                 }
837                 while (canFind(fieldNames, name)) name ~= "_";
838                 return name;
839             } ();
840             struct TailShared
841             {
842                 static foreach (i, alias field; S.tupleof)
843                 {
844                     /* On @trusted: This is casting the field from shared(Foo)
845                     to TailShared!Foo. The cast is safe because the field has
846                     been loaded and is not shared anymore. */
847                     mixin("
848                         @trusted @property
849                         ref " ~ __traits(identifier, field) ~ "()
850                         {
851                             alias R = TailShared!(typeof(field));
852                             return * cast(R*) &" ~ implName ~ ".tupleof[i];
853                         }
854                     ");
855                 }
856                 mixin("
857                     S " ~ implName ~ ";
858                     alias " ~ implName ~ " this;
859                 ");
860             }
861         }
862         else
863             alias TailShared = S;
864     }
865     @safe unittest
866     {
867         // No tail (no indirections) -> fully unshared.
868 
869         static assert(is(TailShared!int == int));
870         static assert(is(TailShared!(shared int) == int));
871 
872         static struct NoIndir { int i; }
873         static assert(is(TailShared!NoIndir == NoIndir));
874         static assert(is(TailShared!(shared NoIndir) == NoIndir));
875 
876         // Tail can be independently shared or is already -> tail-shared.
877 
878         static assert(is(TailShared!(int*) == shared(int)*));
879         static assert(is(TailShared!(shared int*) == shared(int)*));
880         static assert(is(TailShared!(shared(int)*) == shared(int)*));
881 
882         static assert(is(TailShared!(int[]) == shared(int)[]));
883         static assert(is(TailShared!(shared int[]) == shared(int)[]));
884         static assert(is(TailShared!(shared(int)[]) == shared(int)[]));
885 
886         static struct S1 { shared int* p; }
887         static assert(is(TailShared!S1 == S1));
888         static assert(is(TailShared!(shared S1) == S1));
889 
890         static struct S2 { shared(int)* p; }
891         static assert(is(TailShared!S2 == S2));
892         static assert(is(TailShared!(shared S2) == S2));
893 
894         // Tail follows shared-ness of head -> fully shared.
895 
896         static class C { int i; }
897         static assert(is(TailShared!C == shared C));
898         static assert(is(TailShared!(shared C) == shared C));
899 
900         /* However, structs get a wrapper that has getters which cast to
901         TailShared. */
902 
903         static struct S3 { int* p; int _impl; int _impl_; int _impl__; }
904         static assert(!is(TailShared!S3 : S3));
905         static assert(is(TailShared!S3 : shared S3));
906         static assert(is(TailShared!(shared S3) == TailShared!S3));
907 
908         static struct S4 { shared(int)** p; }
909         static assert(!is(TailShared!S4 : S4));
910         static assert(is(TailShared!S4 : shared S4));
911         static assert(is(TailShared!(shared S4) == TailShared!S4));
912     }
913 }
914 
915 
916 ////////////////////////////////////////////////////////////////////////////////
917 // Unit Tests
918 ////////////////////////////////////////////////////////////////////////////////
919 
920 
921 version (CoreUnittest)
922 {
923     version (D_LP64)
924     {
925         enum hasDWCAS = has128BitCAS;
926     }
927     else
928     {
929         enum hasDWCAS = has64BitCAS;
930     }
931 
932     void testXCHG(T)(T val) pure nothrow @nogc @trusted
933     in
934     {
935         assert(val !is T.init);
936     }
937     do
938     {
939         T         base = cast(T)null;
940         shared(T) atom = cast(shared(T))null;
941 
942         assert(base !is val, T.stringof);
943         assert(atom is base, T.stringof);
944 
945         assert(atomicExchange(&atom, val) is base, T.stringof);
946         assert(atom is val, T.stringof);
947     }
948 
949     void testCAS(T)(T val) pure nothrow @nogc @trusted
950     in
951     {
952         assert(val !is T.init);
953     }
954     do
955     {
956         T         base = cast(T)null;
957         shared(T) atom = cast(shared(T))null;
958 
959         assert(base !is val, T.stringof);
960         assert(atom is base, T.stringof);
961 
962         assert(cas(&atom, base, val), T.stringof);
963         assert(atom is val, T.stringof);
964         assert(!cas(&atom, base, base), T.stringof);
965         assert(atom is val, T.stringof);
966 
967         atom = cast(shared(T))null;
968 
969         shared(T) arg = base;
970         assert(cas(&atom, &arg, val), T.stringof);
971         assert(arg is base, T.stringof);
972         assert(atom is val, T.stringof);
973 
974         arg = base;
975         assert(!cas(&atom, &arg, base), T.stringof);
976         assert(arg is val, T.stringof);
977         assert(atom is val, T.stringof);
978     }
979 
980     void testLoadStore(MemoryOrder ms = MemoryOrder.seq, T)(T val = T.init + 1) pure nothrow @nogc @trusted
981     {
982         T         base = cast(T) 0;
983         shared(T) atom = cast(T) 0;
984 
985         assert(base !is val);
986         assert(atom is base);
987         atomicStore!(ms)(atom, val);
988         base = atomicLoad!(ms)(atom);
989 
990         assert(base is val, T.stringof);
991         assert(atom is val);
992     }
993 
994 
995     void testType(T)(T val = T.init + 1) pure nothrow @nogc @safe
996     {
997         static if (T.sizeof < 8 || has64BitXCHG)
998             testXCHG!(T)(val);
999         testCAS!(T)(val);
1000         testLoadStore!(MemoryOrder.seq, T)(val);
1001         testLoadStore!(MemoryOrder.raw, T)(val);
1002     }
1003 
1004     @betterC @safe pure nothrow unittest
1005     {
1006         testType!(bool)();
1007 
1008         testType!(byte)();
1009         testType!(ubyte)();
1010 
1011         testType!(short)();
1012         testType!(ushort)();
1013 
1014         testType!(int)();
1015         testType!(uint)();
1016     }
1017 
1018     @safe pure nothrow unittest
1019     {
1020 
1021         testType!(shared int*)();
1022 
1023         static interface Inter {}
1024         static class KlassImpl : Inter {}
1025         testXCHG!(shared Inter)(new shared(KlassImpl));
1026         testCAS!(shared Inter)(new shared(KlassImpl));
1027 
1028         static class Klass {}
1029         testXCHG!(shared Klass)(new shared(Klass));
1030         testCAS!(shared Klass)(new shared(Klass));
1031 
1032         testXCHG!(shared int)(42);
1033 
1034         testType!(float)(0.1f);
1035 
1036         static if (has64BitCAS)
1037         {
1038             testType!(double)(0.1);
1039             testType!(long)();
1040             testType!(ulong)();
1041         }
1042         static if (has128BitCAS)
1043         {
1044             () @trusted
1045             {
1046                 align(16) struct Big { long a, b; }
1047 
1048                 shared(Big) atom;
1049                 shared(Big) base;
1050                 shared(Big) arg;
1051                 shared(Big) val = Big(1, 2);
1052 
1053                 assert(cas(&atom, arg, val), Big.stringof);
1054                 assert(atom is val, Big.stringof);
1055                 assert(!cas(&atom, arg, val), Big.stringof);
1056                 assert(atom is val, Big.stringof);
1057 
1058                 atom = Big();
1059                 assert(cas(&atom, &arg, val), Big.stringof);
1060                 assert(arg is base, Big.stringof);
1061                 assert(atom is val, Big.stringof);
1062 
1063                 arg = Big();
1064                 assert(!cas(&atom, &arg, base), Big.stringof);
1065                 assert(arg is val, Big.stringof);
1066                 assert(atom is val, Big.stringof);
1067             }();
1068         }
1069 
1070         shared(size_t) i;
1071 
1072         atomicOp!"+="(i, cast(size_t) 1);
1073         assert(i == 1);
1074 
1075         atomicOp!"-="(i, cast(size_t) 1);
1076         assert(i == 0);
1077 
1078         shared float f = 0.1f;
1079         atomicOp!"+="(f, 0.1f);
1080         assert(f > 0.1999f && f < 0.2001f);
1081 
1082         static if (has64BitCAS)
1083         {
1084             shared double d = 0.1;
1085             atomicOp!"+="(d, 0.1);
1086             assert(d > 0.1999 && d < 0.2001);
1087         }
1088     }
1089 
1090     @betterC pure nothrow unittest
1091     {
1092         static if (has128BitCAS)
1093         {
1094             struct DoubleValue
1095             {
1096                 long value1;
1097                 long value2;
1098             }
1099 
1100             align(16) shared DoubleValue a;
1101             atomicStore(a, DoubleValue(1,2));
1102             assert(a.value1 == 1 && a.value2 ==2);
1103 
1104             while (!cas(&a, DoubleValue(1,2), DoubleValue(3,4))){}
1105             assert(a.value1 == 3 && a.value2 ==4);
1106 
1107             align(16) DoubleValue b = atomicLoad(a);
1108             assert(b.value1 == 3 && b.value2 ==4);
1109         }
1110 
1111         static if (hasDWCAS)
1112         {
1113             static struct List { size_t gen; List* next; }
1114             shared(List) head;
1115             assert(cas(&head, shared(List)(0, null), shared(List)(1, cast(List*)1)));
1116             assert(head.gen == 1);
1117             assert(cast(size_t)head.next == 1);
1118         }
1119 
1120         // https://issues.dlang.org/show_bug.cgi?id=20629
1121         static struct Struct
1122         {
1123             uint a, b;
1124         }
1125         shared Struct s1 = Struct(1, 2);
1126         atomicStore(s1, Struct(3, 4));
1127         assert(cast(uint) s1.a == 3);
1128         assert(cast(uint) s1.b == 4);
1129     }
1130 
1131     // https://issues.dlang.org/show_bug.cgi?id=20844
1132     static if (hasDWCAS)
1133     {
1134         debug: // tests CAS in-contract
1135 
1136         pure nothrow unittest
1137         {
1138             import core.exception : AssertError;
1139 
1140             align(16) shared ubyte[2 * size_t.sizeof + 1] data;
1141             auto misalignedPointer = cast(size_t[2]*) &data[1];
1142             size_t[2] x;
1143 
1144             try
1145                 cas(misalignedPointer, x, x);
1146             catch (AssertError)
1147                 return;
1148 
1149             assert(0, "should have failed");
1150         }
1151     }
1152 
1153     @betterC pure nothrow @nogc @safe unittest
1154     {
1155         int a;
1156         if (casWeak!(MemoryOrder.acq_rel, MemoryOrder.raw)(&a, 0, 4))
1157             assert(a == 4);
1158     }
1159 
1160     @betterC pure nothrow unittest
1161     {
1162         // https://issues.dlang.org/show_bug.cgi?id=17821
1163         {
1164             shared ulong x = 0x1234_5678_8765_4321;
1165             atomicStore(x, 0);
1166             assert(x == 0);
1167         }
1168         {
1169             struct S
1170             {
1171                 ulong x;
1172                 alias x this;
1173             }
1174             shared S s;
1175             s = 0x1234_5678_8765_4321;
1176             atomicStore(s, 0);
1177             assert(s.x == 0);
1178         }
1179         {
1180             abstract class Logger {}
1181             shared Logger s1;
1182             Logger s2;
1183             atomicStore(s1, cast(shared) s2);
1184         }
1185     }
1186 
1187     @betterC pure nothrow unittest
1188     {
1189         static struct S { int val; }
1190         auto s = shared(S)(1);
1191 
1192         shared(S*) ptr;
1193 
1194         // head unshared
1195         shared(S)* ifThis = null;
1196         shared(S)* writeThis = &s;
1197         assert(ptr is null);
1198         assert(cas(&ptr, ifThis, writeThis));
1199         assert(ptr is writeThis);
1200 
1201         // head shared
1202         shared(S*) ifThis2 = writeThis;
1203         shared(S*) writeThis2 = null;
1204         assert(cas(&ptr, ifThis2, writeThis2));
1205         assert(ptr is null);
1206     }
1207 
1208     // === atomicFetchAdd and atomicFetchSub operations ====
1209     @betterC pure nothrow @nogc @safe unittest
1210     {
1211         shared ubyte u8 = 1;
1212         shared ushort u16 = 2;
1213         shared uint u32 = 3;
1214         shared byte i8 = 5;
1215         shared short i16 = 6;
1216         shared int i32 = 7;
1217 
1218         assert(atomicOp!"+="(u8, 8) == 9);
1219         assert(atomicOp!"+="(u16, 8) == 10);
1220         assert(atomicOp!"+="(u32, 8) == 11);
1221         assert(atomicOp!"+="(i8, 8) == 13);
1222         assert(atomicOp!"+="(i16, 8) == 14);
1223         assert(atomicOp!"+="(i32, 8) == 15);
1224         version (D_LP64)
1225         {
1226             shared ulong u64 = 4;
1227             shared long i64 = 8;
1228             assert(atomicOp!"+="(u64, 8) == 12);
1229             assert(atomicOp!"+="(i64, 8) == 16);
1230         }
1231     }
1232 
1233     @betterC pure nothrow @nogc unittest
1234     {
1235         byte[10] byteArray = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19];
1236         ulong[10] ulongArray = [2, 4, 6, 8, 10, 12, 14, 16, 19, 20];
1237 
1238         {
1239             auto array = byteArray;
1240             byte* ptr = &array[0];
1241             byte* prevPtr = atomicFetchAdd(ptr, 3);
1242             assert(prevPtr == &array[0]);
1243             assert(*prevPtr == 1);
1244             assert(*ptr == 7);
1245         }
1246         {
1247             auto array = ulongArray;
1248             ulong* ptr = &array[0];
1249             ulong* prevPtr = atomicFetchAdd(ptr, 3);
1250             assert(prevPtr == &array[0]);
1251             assert(*prevPtr == 2);
1252             assert(*ptr == 8);
1253         }
1254     }
1255 
1256     @betterC pure nothrow @nogc @safe unittest
1257     {
1258         shared ubyte u8 = 1;
1259         shared ushort u16 = 2;
1260         shared uint u32 = 3;
1261         shared byte i8 = 5;
1262         shared short i16 = 6;
1263         shared int i32 = 7;
1264 
1265         assert(atomicOp!"-="(u8, 1) == 0);
1266         assert(atomicOp!"-="(u16, 1) == 1);
1267         assert(atomicOp!"-="(u32, 1) == 2);
1268         assert(atomicOp!"-="(i8, 1) == 4);
1269         assert(atomicOp!"-="(i16, 1) == 5);
1270         assert(atomicOp!"-="(i32, 1) == 6);
1271         version (D_LP64)
1272         {
1273             shared ulong u64 = 4;
1274             shared long i64 = 8;
1275             assert(atomicOp!"-="(u64, 1) == 3);
1276             assert(atomicOp!"-="(i64, 1) == 7);
1277         }
1278     }
1279 
1280     @betterC pure nothrow @nogc unittest
1281     {
1282         byte[10] byteArray = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19];
1283         ulong[10] ulongArray = [2, 4, 6, 8, 10, 12, 14, 16, 19, 20];
1284 
1285         {
1286             auto array = byteArray;
1287             byte* ptr = &array[5];
1288             byte* prevPtr = atomicFetchSub(ptr, 4);
1289             assert(prevPtr == &array[5]);
1290             assert(*prevPtr == 11);
1291             assert(*ptr == 3); // https://issues.dlang.org/show_bug.cgi?id=21578
1292         }
1293         {
1294             auto array = ulongArray;
1295             ulong* ptr = &array[5];
1296             ulong* prevPtr = atomicFetchSub(ptr, 4);
1297             assert(prevPtr == &array[5]);
1298             assert(*prevPtr == 12);
1299             assert(*ptr == 4); // https://issues.dlang.org/show_bug.cgi?id=21578
1300         }
1301     }
1302 
1303     @betterC pure nothrow @nogc @safe unittest // https://issues.dlang.org/show_bug.cgi?id=16651
1304     {
1305         shared ulong a = 2;
1306         uint b = 1;
1307         atomicOp!"-="(a, b);
1308         assert(a == 1);
1309 
1310         shared uint c = 2;
1311         ubyte d = 1;
1312         atomicOp!"-="(c, d);
1313         assert(c == 1);
1314     }
1315 
1316     pure nothrow @safe unittest // https://issues.dlang.org/show_bug.cgi?id=16230
1317     {
1318         shared int i;
1319         static assert(is(typeof(atomicLoad(i)) == int));
1320 
1321         shared int* p;
1322         static assert(is(typeof(atomicLoad(p)) == shared(int)*));
1323 
1324         shared int[] a;
1325         static if (__traits(compiles, atomicLoad(a)))
1326         {
1327             static assert(is(typeof(atomicLoad(a)) == shared(int)[]));
1328         }
1329 
1330         static struct S { int* _impl; }
1331         shared S s;
1332         static assert(is(typeof(atomicLoad(s)) : shared S));
1333         static assert(is(typeof(atomicLoad(s)._impl) == shared(int)*));
1334         auto u = atomicLoad(s);
1335         assert(u._impl is null);
1336         u._impl = new shared int(42);
1337         assert(atomicLoad(*u._impl) == 42);
1338 
1339         static struct S2 { S s; }
1340         shared S2 s2;
1341         static assert(is(typeof(atomicLoad(s2).s) == TailShared!S));
1342 
1343         static struct S3 { size_t head; int* tail; }
1344         shared S3 s3;
1345         static if (__traits(compiles, atomicLoad(s3)))
1346         {
1347             static assert(is(typeof(atomicLoad(s3).head) == size_t));
1348             static assert(is(typeof(atomicLoad(s3).tail) == shared(int)*));
1349         }
1350 
1351         static class C { int i; }
1352         shared C c;
1353         static assert(is(typeof(atomicLoad(c)) == shared C));
1354 
1355         static struct NoIndirections { int i; }
1356         shared NoIndirections n;
1357         static assert(is(typeof(atomicLoad(n)) == NoIndirections));
1358     }
1359 
1360     unittest // Issue 21631
1361     {
1362         shared uint si1 = 45;
1363         shared uint si2 = 38;
1364         shared uint* psi = &si1;
1365 
1366         assert((&psi).cas(cast(const) psi, &si2));
1367     }
1368 }