The OpenD Programming Language

1 // Written in the D programming language.
2 /**
3 Source: $(PHOBOSSRC std/experimental/allocator/building_blocks/region.d)
4 */
5 module std.experimental.allocator.building_blocks.region;
6 
7 import std.experimental.allocator.building_blocks.null_allocator;
8 import std.experimental.allocator.common;
9 import std.typecons : Flag, Yes, No;
10 
11 version (OSX)
12     version = Darwin;
13 else version (iOS)
14     version = Darwin;
15 else version (TVOS)
16     version = Darwin;
17 else version (WatchOS)
18     version = Darwin;
19 
20 /**
21 A `Region` allocator allocates memory straight from one contiguous chunk.
22 There is no deallocation, and once the region is full, allocation requests
23 return `null`. Therefore, `Region`s are often used (a) in conjunction with
24 more sophisticated allocators; or (b) for batch-style very fast allocations
25 that deallocate everything at once.
26 
27 The region only stores three pointers, corresponding to the current position in
28 the store and the limits. One allocation entails rounding up the allocation
29 size for alignment purposes, bumping the current pointer, and comparing it
30 against the limit.
31 
32 `Region` deallocates the chunk of memory during destruction.
33 
34 The `minAlign` parameter establishes alignment. If $(D minAlign > 1), the
35 sizes of all allocation requests are rounded up to a multiple of `minAlign`.
36 Applications aiming at maximum speed may want to choose $(D minAlign = 1) and
37 control alignment externally.
38 
39 */
40 struct Region(ParentAllocator,
41     uint minAlign = platformAlignment,
42     Flag!"growDownwards" growDownwards = No.growDownwards)
43 {
44     static assert(minAlign.isGoodStaticAlignment);
45     static assert(ParentAllocator.alignment >= minAlign);
46 
47     import std.traits : hasMember;
48     import std.typecons : Ternary;
49 
50     // state
51     /**
52     The _parent allocator. Depending on whether `ParentAllocator` holds state
53     or not, this is a member variable or an alias for
54     `ParentAllocator.instance`.
55     */
56     static if (stateSize!ParentAllocator)
57     {
58         ParentAllocator parent;
59     }
60     else
61     {
62         alias parent = ParentAllocator.instance;
63     }
64 
65     private BorrowedRegion!(minAlign, growDownwards) _impl;
66 
67     private void* roundedBegin() const pure nothrow @trusted @nogc
68     {
69         return _impl.roundedBegin;
70     }
71 
72     private void* roundedEnd() const pure nothrow @trusted @nogc
73     {
74         return _impl.roundedEnd;
75     }
76     /**
77     Constructs a region backed by a user-provided store.
78     Assumes the memory was allocated with `ParentAllocator`.
79 
80     Params:
81         store = User-provided store backing up the region. Assumed to have been
82         allocated with `ParentAllocator`.
83         n = Bytes to allocate using `ParentAllocator`. If `parent.allocate(n)`
84         returns `null`, the region will be initialized as empty (correctly
85         initialized but unable to allocate).
86         */
87     this(ubyte[] store) pure nothrow @nogc
88     {
89         _impl = store;
90     }
91 
92     /// Ditto
93     static if (!stateSize!ParentAllocator)
94     this(size_t n)
95     {
96         this(cast(ubyte[]) (parent.allocate(n.roundUpToAlignment(alignment))));
97     }
98 
99     /// Ditto
100     static if (stateSize!ParentAllocator)
101     this(ParentAllocator parent, size_t n)
102     {
103         this.parent = parent;
104         this(cast(ubyte[]) (parent.allocate(n.roundUpToAlignment(alignment))));
105     }
106 
107     /*
108     TODO: The postblit of `BasicRegion` should be disabled because such objects
109     should not be copied around naively.
110     */
111 
112     /**
113     If `ParentAllocator` defines `deallocate`, the region defines a destructor
114     that uses `ParentAllocator.deallocate` to free the memory chunk.
115     */
116     static if (hasMember!(ParentAllocator, "deallocate"))
117     ~this()
118     {
119         with (_impl) parent.deallocate(_begin[0 .. _end - _begin]);
120     }
121 
122     /**
123     Rounds the given size to a multiple of the `alignment`
124     */
125     size_t goodAllocSize(size_t n) const pure nothrow @safe @nogc
126     {
127         return _impl.goodAllocSize(n);
128     }
129 
130     /**
131     Alignment offered.
132     */
133     alias alignment = minAlign;
134 
135     /**
136     Allocates `n` bytes of memory. The shortest path involves an alignment
137     adjustment (if $(D alignment > 1)), an increment, and a comparison.
138 
139     Params:
140         n = number of bytes to allocate
141 
142     Returns:
143         A properly-aligned buffer of size `n` or `null` if request could not
144         be satisfied.
145     */
146     void[] allocate(size_t n) pure nothrow @trusted @nogc
147     {
148         return _impl.allocate(n);
149     }
150 
151     /**
152     Allocates `n` bytes of memory aligned at alignment `a`.
153 
154     Params:
155         n = number of bytes to allocate
156         a = alignment for the allocated block
157 
158     Returns:
159         Either a suitable block of `n` bytes aligned at `a`, or `null`.
160     */
161     void[] alignedAllocate(size_t n, uint a) pure nothrow @trusted @nogc
162     {
163         return _impl.alignedAllocate(n, a);
164     }
165 
166     /// Allocates and returns all memory available to this region.
167     void[] allocateAll() pure nothrow @trusted @nogc
168     {
169         return _impl.allocateAll;
170     }
171 
172     /**
173     Expands an allocated block in place. Expansion will succeed only if the
174     block is the last allocated. Defined only if `growDownwards` is
175     `No.growDownwards`.
176     */
177     static if (growDownwards == No.growDownwards)
178     bool expand(ref void[] b, size_t delta) pure nothrow @safe @nogc
179     {
180         return _impl.expand(b, delta);
181     }
182 
183     /**
184     Deallocates `b`. This works only if `b` was obtained as the last call
185     to `allocate`; otherwise (i.e. another allocation has occurred since) it
186     does nothing.
187 
188     Params:
189         b = Block previously obtained by a call to `allocate` against this
190         allocator (`null` is allowed).
191     */
192     bool deallocate(void[] b) pure nothrow @nogc
193     {
194         return _impl.deallocate(b);
195     }
196 
197     /**
198     Deallocates all memory allocated by this region, which can be subsequently
199     reused for new allocations.
200     */
201     bool deallocateAll() pure nothrow @nogc
202     {
203         return _impl.deallocateAll;
204     }
205 
206     /**
207     Queries whether `b` has been allocated with this region.
208 
209     Params:
210         b = Arbitrary block of memory (`null` is allowed; `owns(null)` returns
211         `false`).
212 
213     Returns:
214         `true` if `b` has been allocated with this region, `false` otherwise.
215     */
216     Ternary owns(const void[] b) const pure nothrow @trusted @nogc
217     {
218         return _impl.owns(b);
219     }
220 
221     /**
222     Returns `Ternary.yes` if no memory has been allocated in this region,
223     `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
224     */
225     Ternary empty() const pure nothrow @safe @nogc
226     {
227         return _impl.empty;
228     }
229 
230     /// Nonstandard property that returns bytes available for allocation.
231     size_t available() const @safe pure nothrow @nogc
232     {
233         return _impl.available;
234     }
235 }
236 
237 ///
238 @system nothrow unittest
239 {
240     import std.algorithm.comparison : max;
241     import std.experimental.allocator.building_blocks.allocator_list
242         : AllocatorList;
243     import std.experimental.allocator.mallocator : Mallocator;
244     import std.typecons : Ternary;
245     // Create a scalable list of regions. Each gets at least 1MB at a time by
246     // using malloc.
247     auto batchAllocator = AllocatorList!(
248         (size_t n) => Region!Mallocator(max(n, 1024 * 1024))
249     )();
250     assert(batchAllocator.empty ==  Ternary.yes);
251     auto b = batchAllocator.allocate(101);
252     assert(b.length == 101);
253     assert(batchAllocator.empty ==  Ternary.no);
254     // This will cause a second allocation
255     b = batchAllocator.allocate(2 * 1024 * 1024);
256     assert(b.length == 2 * 1024 * 1024);
257     // Destructor will free the memory
258 }
259 
260 @system nothrow @nogc unittest
261 {
262     import std.experimental.allocator.mallocator : Mallocator;
263     import std.typecons : Ternary;
264 
265     static void testAlloc(Allocator)(ref Allocator a)
266     {
267         assert((() pure nothrow @safe @nogc => a.empty)() ==  Ternary.yes);
268         const b = a.allocate(101);
269         assert(b.length == 101);
270         assert((() nothrow @safe @nogc => a.owns(b))() == Ternary.yes);
271 
272         // Ensure deallocate inherits from parent allocators
273         auto c = a.allocate(42);
274         assert(c.length == 42);
275         assert((() nothrow @nogc => a.deallocate(c))());
276         assert((() pure nothrow @safe @nogc => a.empty)() ==  Ternary.no);
277     }
278 
279     // Create a 64 KB region allocated with malloc
280     auto reg = Region!(Mallocator, Mallocator.alignment,
281         Yes.growDownwards)(1024 * 64);
282     testAlloc(reg);
283 
284     // Create a 64 KB shared region allocated with malloc
285     auto sharedReg = SharedRegion!(Mallocator, Mallocator.alignment,
286         Yes.growDownwards)(1024 * 64);
287     testAlloc(sharedReg);
288 }
289 
290 @system nothrow @nogc unittest
291 {
292     // test 'this(ubyte[] store)' constructed regions properly clean up
293     // their inner storage after destruction
294     import std.experimental.allocator.mallocator : Mallocator;
295 
296     static shared struct LocalAllocator
297     {
298     nothrow @nogc:
299         enum alignment = Mallocator.alignment;
300         void[] buf;
301         bool deallocate(void[] b)
302         {
303             assert(buf.ptr == b.ptr && buf.length == b.length);
304             return true;
305         }
306 
307         void[] allocate(size_t n)
308         {
309             return null;
310         }
311 
312     }
313 
314     enum bufLen = 10 * Mallocator.alignment;
315     void[] tmp = Mallocator.instance.allocate(bufLen);
316 
317     LocalAllocator a;
318     a.buf = cast(typeof(a.buf)) tmp[1 .. $];
319 
320     auto reg = Region!(LocalAllocator, Mallocator.alignment,
321         Yes.growDownwards)(cast(ubyte[]) a.buf);
322     auto sharedReg = SharedRegion!(LocalAllocator, Mallocator.alignment,
323         Yes.growDownwards)(cast(ubyte[]) a.buf);
324     reg.parent = a;
325     sharedReg.parent = a;
326 
327     Mallocator.instance.deallocate(tmp);
328 }
329 
330 version (StdUnittest)
331 @system unittest
332 {
333     import std.experimental.allocator.mallocator : Mallocator;
334 
335     testAllocator!(() => Region!(Mallocator)(1024 * 64));
336     testAllocator!(() => Region!(Mallocator, Mallocator.alignment, Yes.growDownwards)(1024 * 64));
337 
338     testAllocator!(() => SharedRegion!(Mallocator)(1024 * 64));
339     testAllocator!(() => SharedRegion!(Mallocator, Mallocator.alignment, Yes.growDownwards)(1024 * 64));
340 }
341 
342 @system nothrow @nogc unittest
343 {
344     import std.experimental.allocator.mallocator : Mallocator;
345 
346     auto reg = Region!(Mallocator)(1024 * 64);
347     auto b = reg.allocate(101);
348     assert(b.length == 101);
349     assert((() pure nothrow @safe @nogc => reg.expand(b, 20))());
350     assert((() pure nothrow @safe @nogc => reg.expand(b, 73))());
351     assert((() pure nothrow @safe @nogc => !reg.expand(b, 1024 * 64))());
352     assert((() nothrow @nogc => reg.deallocateAll())());
353 }
354 
355 /**
356 A `BorrowedRegion` allocates directly from a user-provided block of memory.
357 
358 Unlike a `Region`, a `BorrowedRegion` does not own the memory it allocates from
359 and will not deallocate that memory upon destruction. Instead, it is the user's
360 responsibility to ensure that the memory is properly disposed of.
361 
362 In all other respects, a `BorrowedRegion` behaves exactly like a `Region`.
363 */
364 struct BorrowedRegion(uint minAlign = platformAlignment,
365     Flag!"growDownwards" growDownwards = No.growDownwards)
366 {
367     static assert(minAlign.isGoodStaticAlignment);
368 
369     import std.typecons : Ternary;
370 
371     // state
372     private void* _current, _begin, _end;
373 
374     private void* roundedBegin() const pure nothrow @trusted @nogc
375     {
376         return cast(void*) roundUpToAlignment(cast(size_t) _begin, alignment);
377     }
378 
379     private void* roundedEnd() const pure nothrow @trusted @nogc
380     {
381         return cast(void*) roundDownToAlignment(cast(size_t) _end, alignment);
382     }
383 
384     /**
385     Constructs a region backed by a user-provided store.
386 
387     Params:
388         store = User-provided store backing up the region.
389     */
390     this(ubyte[] store) pure nothrow @nogc
391     {
392         _begin = store.ptr;
393         _end = store.ptr + store.length;
394         static if (growDownwards)
395             _current = roundedEnd();
396         else
397             _current = roundedBegin();
398     }
399 
400     /*
401     TODO: The postblit of `BorrowedRegion` should be disabled because such objects
402     should not be copied around naively.
403     */
404 
405     /**
406     Rounds the given size to a multiple of the `alignment`
407     */
408     size_t goodAllocSize(size_t n) const pure nothrow @safe @nogc
409     {
410         return n.roundUpToAlignment(alignment);
411     }
412 
413     /**
414     Alignment offered.
415     */
416     alias alignment = minAlign;
417 
418     /**
419     Allocates `n` bytes of memory. The shortest path involves an alignment
420     adjustment (if $(D alignment > 1)), an increment, and a comparison.
421 
422     Params:
423         n = number of bytes to allocate
424 
425     Returns:
426         A properly-aligned buffer of size `n` or `null` if request could not
427         be satisfied.
428     */
429     void[] allocate(size_t n) pure nothrow @trusted @nogc
430     {
431         const rounded = goodAllocSize(n);
432         if (n == 0 || rounded < n || available < rounded) return null;
433 
434         static if (growDownwards)
435         {
436             assert(available >= rounded);
437             auto result = (_current - rounded)[0 .. n];
438             assert(result.ptr >= _begin);
439             _current = result.ptr;
440             assert(owns(result) == Ternary.yes);
441         }
442         else
443         {
444             auto result = _current[0 .. n];
445             _current += rounded;
446         }
447 
448         return result;
449     }
450 
451     /**
452     Allocates `n` bytes of memory aligned at alignment `a`.
453 
454     Params:
455         n = number of bytes to allocate
456         a = alignment for the allocated block
457 
458     Returns:
459         Either a suitable block of `n` bytes aligned at `a`, or `null`.
460     */
461     void[] alignedAllocate(size_t n, uint a) pure nothrow @trusted @nogc
462     {
463         import std.math.traits : isPowerOf2;
464         assert(a.isPowerOf2);
465 
466         const rounded = goodAllocSize(n);
467         if (n == 0 || rounded < n || available < rounded) return null;
468 
469         static if (growDownwards)
470         {
471             auto tmpCurrent = _current - rounded;
472             auto result = tmpCurrent.alignDownTo(a);
473             if (result <= tmpCurrent && result >= _begin)
474             {
475                 _current = result;
476                 return cast(void[]) result[0 .. n];
477             }
478         }
479         else
480         {
481             // Just bump the pointer to the next good allocation
482             auto newCurrent = _current.alignUpTo(a);
483             if (newCurrent < _current || newCurrent > _end)
484                 return null;
485 
486             auto save = _current;
487             _current = newCurrent;
488             auto result = allocate(n);
489             if (result.ptr)
490             {
491                 assert(result.length == n);
492                 return result;
493             }
494             // Failed, rollback
495             _current = save;
496         }
497         return null;
498     }
499 
500     /// Allocates and returns all memory available to this region.
501     void[] allocateAll() pure nothrow @trusted @nogc
502     {
503         static if (growDownwards)
504         {
505             auto result = _begin[0 .. available];
506             _current = _begin;
507         }
508         else
509         {
510             auto result = _current[0 .. available];
511             _current = _end;
512         }
513         return result;
514     }
515 
516     /**
517     Expands an allocated block in place. Expansion will succeed only if the
518     block is the last allocated. Defined only if `growDownwards` is
519     `No.growDownwards`.
520     */
521     static if (growDownwards == No.growDownwards)
522     bool expand(ref void[] b, size_t delta) pure nothrow @safe @nogc
523     {
524         assert(owns(b) == Ternary.yes || b is null);
525         assert((() @trusted => b.ptr + b.length <= _current)() || b is null);
526         if (b is null || delta == 0) return delta == 0;
527         auto newLength = b.length + delta;
528         if ((() @trusted => _current < b.ptr + b.length + alignment)())
529         {
530             immutable currentGoodSize = this.goodAllocSize(b.length);
531             immutable newGoodSize = this.goodAllocSize(newLength);
532             immutable goodDelta = newGoodSize - currentGoodSize;
533             // This was the last allocation! Allocate some more and we're done.
534             if (goodDelta == 0
535                 || (() @trusted => allocate(goodDelta).length == goodDelta)())
536             {
537                 b = (() @trusted => b.ptr[0 .. newLength])();
538                 assert((() @trusted => _current < b.ptr + b.length + alignment)());
539                 return true;
540             }
541         }
542         return false;
543     }
544 
545     /**
546     Deallocates `b`. This works only if `b` was obtained as the last call
547     to `allocate`; otherwise (i.e. another allocation has occurred since) it
548     does nothing.
549 
550     Params:
551         b = Block previously obtained by a call to `allocate` against this
552         allocator (`null` is allowed).
553     */
554     bool deallocate(void[] b) pure nothrow @nogc
555     {
556         assert(owns(b) == Ternary.yes || b.ptr is null);
557         auto rounded = goodAllocSize(b.length);
558         static if (growDownwards)
559         {
560             if (b.ptr == _current)
561             {
562                 _current += rounded;
563                 return true;
564             }
565         }
566         else
567         {
568             if (b.ptr + rounded == _current)
569             {
570                 assert(b.ptr !is null || _current is null);
571                 _current = b.ptr;
572                 return true;
573             }
574         }
575         return false;
576     }
577 
578     /**
579     Deallocates all memory allocated by this region, which can be subsequently
580     reused for new allocations.
581     */
582     bool deallocateAll() pure nothrow @nogc
583     {
584         static if (growDownwards)
585         {
586             _current = roundedEnd();
587         }
588         else
589         {
590             _current = roundedBegin();
591         }
592         return true;
593     }
594 
595     /**
596     Queries whether `b` has been allocated with this region.
597 
598     Params:
599         b = Arbitrary block of memory (`null` is allowed; `owns(null)` returns
600         `false`).
601 
602     Returns:
603         `true` if `b` has been allocated with this region, `false` otherwise.
604     */
605     Ternary owns(const void[] b) const pure nothrow @trusted @nogc
606     {
607         return Ternary(b && (&b[0] >= _begin) && (&b[0] + b.length <= _end));
608     }
609 
610     /**
611     Returns `Ternary.yes` if no memory has been allocated in this region,
612     `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
613     */
614     Ternary empty() const pure nothrow @safe @nogc
615     {
616         static if (growDownwards)
617             return Ternary(_current == roundedEnd());
618         else
619             return Ternary(_current == roundedBegin());
620     }
621 
622     /// Nonstandard property that returns bytes available for allocation.
623     size_t available() const @safe pure nothrow @nogc
624     {
625         static if (growDownwards)
626         {
627             return _current - _begin;
628         }
629         else
630         {
631             return _end - _current;
632         }
633     }
634 }
635 
636 ///
637 @system nothrow @nogc unittest
638 {
639     import std.typecons : Ternary;
640 
641     ubyte[1024] store;
642     auto myRegion = BorrowedRegion!(1)(store[]);
643 
644     assert(myRegion.empty == Ternary.yes);
645     assert(myRegion.available == store.length);
646 
647     void[] b = myRegion.allocate(101);
648 
649     assert(b.length == 101);
650     assert(myRegion.empty == Ternary.no);
651     assert(myRegion.owns(b) == Ternary.yes);
652     assert(myRegion.available == store.length - b.length);
653 
654     void[] b2 = myRegion.allocate(256);
655 
656     // Can only free the most recent allocation
657     assert(myRegion.deallocate(b) == false);
658     assert(myRegion.deallocate(b2) == true);
659 
660     myRegion.deallocateAll();
661 
662     assert(myRegion.empty == Ternary.yes);
663 }
664 
665 @system nothrow @nogc unittest
666 {
667     import std.experimental.allocator.mallocator : AlignedMallocator;
668     import std.typecons : Ternary;
669 
670     ubyte[] buf = cast(ubyte[]) AlignedMallocator.instance.alignedAllocate(64, 64);
671     auto reg = BorrowedRegion!(64, Yes.growDownwards)(buf);
672     assert(reg.alignedAllocate(10, 32).length == 10);
673     assert(!reg.available);
674 }
675 
676 /**
677 
678 `InSituRegion` is a convenient region that carries its storage within itself
679 (in the form of a statically-sized array).
680 
681 The first template argument is the size of the region and the second is the
682 needed alignment. Depending on the alignment requested and platform details,
683 the actual available storage may be smaller than the compile-time parameter. To
684 make sure that at least `n` bytes are available in the region, use
685 $(D InSituRegion!(n + a - 1, a)).
686 
687 Given that the most frequent use of `InSituRegion` is as a stack allocator, it
688 allocates starting at the end on systems where stack grows downwards, such that
689 hot memory is used first.
690 
691 */
692 struct InSituRegion(size_t size, size_t minAlign = platformAlignment)
693 {
694     import std.algorithm.comparison : max;
695     import std.conv : to;
696     import std.traits : hasMember;
697     import std.typecons : Ternary;
698     import core.thread.types : isStackGrowingDown;
699 
700     static assert(minAlign.isGoodStaticAlignment);
701     static assert(size >= minAlign);
702 
703     static if (isStackGrowingDown)
704         enum growDownwards = Yes.growDownwards;
705     else
706         enum growDownwards = No.growDownwards;
707 
708     @disable this(this);
709 
710     // state {
711     private BorrowedRegion!(minAlign, growDownwards) _impl;
712     union
713     {
714         private ubyte[size] _store = void;
715         private double _forAlignmentOnly1;
716     }
717     // }
718 
719     /**
720     An alias for `minAlign`, which must be a valid alignment (nonzero power
721     of 2). The start of the region and all allocation requests will be rounded
722     up to a multiple of the alignment.
723 
724     ----
725     InSituRegion!(4096) a1;
726     assert(a1.alignment == platformAlignment);
727     InSituRegion!(4096, 64) a2;
728     assert(a2.alignment == 64);
729     ----
730     */
731     alias alignment = minAlign;
732 
733     private void lazyInit()
734     {
735         assert(!_impl._current);
736         _impl = typeof(_impl)(_store);
737         assert(_impl._current.alignedAt(alignment));
738     }
739 
740     /**
741     Allocates `bytes` and returns them, or `null` if the region cannot
742     accommodate the request. For efficiency reasons, if $(D bytes == 0) the
743     function returns an empty non-null slice.
744     */
745     void[] allocate(size_t n)
746     {
747         // Fast path
748     entry:
749         auto result = _impl.allocate(n);
750         if (result.length == n) return result;
751         // Slow path
752         if (_impl._current) return null; // no more room
753         lazyInit;
754         assert(_impl._current);
755         goto entry;
756     }
757 
758     /**
759     As above, but the memory allocated is aligned at `a` bytes.
760     */
761     void[] alignedAllocate(size_t n, uint a)
762     {
763         // Fast path
764     entry:
765         auto result = _impl.alignedAllocate(n, a);
766         if (result.length == n) return result;
767         // Slow path
768         if (_impl._current) return null; // no more room
769         lazyInit;
770         assert(_impl._current);
771         goto entry;
772     }
773 
774     /**
775     Deallocates `b`. This works only if `b` was obtained as the last call
776     to `allocate`; otherwise (i.e. another allocation has occurred since) it
777     does nothing. This semantics is tricky and therefore `deallocate` is
778     defined only if `Region` is instantiated with `Yes.defineDeallocate`
779     as the third template argument.
780 
781     Params:
782         b = Block previously obtained by a call to `allocate` against this
783         allocator (`null` is allowed).
784     */
785     bool deallocate(void[] b)
786     {
787         if (!_impl._current) return b is null;
788         return _impl.deallocate(b);
789     }
790 
791     /**
792     Returns `Ternary.yes` if `b` is the result of a previous allocation,
793     `Ternary.no` otherwise.
794     */
795     Ternary owns(const void[] b) pure nothrow @safe @nogc
796     {
797         if (!_impl._current) return Ternary.no;
798         return _impl.owns(b);
799     }
800 
801     /**
802     Expands an allocated block in place. Expansion will succeed only if the
803     block is the last allocated.
804     */
805     static if (hasMember!(typeof(_impl), "expand"))
806     bool expand(ref void[] b, size_t delta)
807     {
808         if (!_impl._current) lazyInit;
809         return _impl.expand(b, delta);
810     }
811 
812     /**
813     Deallocates all memory allocated with this allocator.
814     */
815     bool deallocateAll()
816     {
817         // We don't care to lazily init the region
818         return _impl.deallocateAll;
819     }
820 
821     /**
822     Allocates all memory available with this allocator.
823     */
824     void[] allocateAll()
825     {
826         if (!_impl._current) lazyInit;
827         return _impl.allocateAll;
828     }
829 
830     /**
831     Nonstandard function that returns the bytes available for allocation.
832     */
833     size_t available()
834     {
835         if (!_impl._current) lazyInit;
836         return _impl.available;
837     }
838 }
839 
840 ///
841 @system unittest
842 {
843     // 128KB region, allocated to x86's cache line
844     InSituRegion!(128 * 1024, 16) r1;
845     auto a1 = r1.allocate(101);
846     assert(a1.length == 101);
847 
848     // 128KB region, with fallback to the garbage collector.
849     import std.experimental.allocator.building_blocks.fallback_allocator
850         : FallbackAllocator;
851     import std.experimental.allocator.building_blocks.free_list
852         : FreeList;
853     import std.experimental.allocator.building_blocks.bitmapped_block
854         : BitmappedBlock;
855     import std.experimental.allocator.gc_allocator : GCAllocator;
856     FallbackAllocator!(InSituRegion!(128 * 1024), GCAllocator) r2;
857     const a2 = r2.allocate(102);
858     assert(a2.length == 102);
859 
860     // Reap with GC fallback.
861     InSituRegion!(128 * 1024, 8) tmp3;
862     FallbackAllocator!(BitmappedBlock!(64, 8), GCAllocator) r3;
863     r3.primary = BitmappedBlock!(64, 8)(cast(ubyte[]) (tmp3.allocateAll()));
864     const a3 = r3.allocate(103);
865     assert(a3.length == 103);
866 
867     // Reap/GC with a freelist for small objects up to 16 bytes.
868     InSituRegion!(128 * 1024, 64) tmp4;
869     FreeList!(FallbackAllocator!(BitmappedBlock!(64, 64), GCAllocator), 0, 16) r4;
870     r4.parent.primary = BitmappedBlock!(64, 64)(cast(ubyte[]) (tmp4.allocateAll()));
871     const a4 = r4.allocate(104);
872     assert(a4.length == 104);
873 }
874 
875 @system pure nothrow unittest
876 {
877     import std.typecons : Ternary;
878 
879     InSituRegion!(4096, 1) r1;
880     auto a = r1.allocate(2001);
881     assert(a.length == 2001);
882     import std.conv : text;
883     assert(r1.available == 2095, text(r1.available));
884     // Ensure deallocate inherits from parent
885     assert((() nothrow @nogc => r1.deallocate(a))());
886     assert((() nothrow @nogc => r1.deallocateAll())());
887 
888     InSituRegion!(65_536, 1024*4) r2;
889     assert(r2.available <= 65_536);
890     a = r2.allocate(2001);
891     assert(a.length == 2001);
892     const void[] buff = r2.allocate(42);
893     assert((() nothrow @safe @nogc => r2.owns(buff))() == Ternary.yes);
894     assert((() nothrow @nogc => r2.deallocateAll())());
895 }
896 
897 version (CRuntime_Musl)
898 {
899     // sbrk and brk are disabled in Musl:
900     // https://git.musl-libc.org/cgit/musl/commit/?id=7a995fe706e519a4f55399776ef0df9596101f93
901     // https://git.musl-libc.org/cgit/musl/commit/?id=863d628d93ea341b6a32661a1654320ce69f6a07
902 }
903 version (DragonFlyBSD)
904 {
905     // sbrk is deprecated in favor of mmap   (we could implement a mmap + MAP_NORESERVE + PROT_NONE version)
906     // brk has been removed
907     // https://www.dragonflydigest.com/2019/02/22/22586.html
908     // http://gitweb.dragonflybsd.org/dragonfly.git/commitdiff/dc676eaefa61b0f47bbea1c53eab86fd5ccd78c6
909     // http://gitweb.dragonflybsd.org/dragonfly.git/commitdiff/4b5665564ef37dc939a3a9ffbafaab9894c18885
910     // http://gitweb.dragonflybsd.org/dragonfly.git/commitdiff/8618d94a0e2ff8303ad93c123a3fa598c26a116e
911 }
912 else
913 {
914     private extern(C) void* sbrk(long) nothrow @nogc;
915     private extern(C) int brk(shared void*) nothrow @nogc;
916 }
917 
918 /**
919 
920 Allocator backed by $(D $(LINK2 https://en.wikipedia.org/wiki/Sbrk, sbrk))
921 for Posix systems. Due to the fact that `sbrk` is not thread-safe
922 $(HTTP lifecs.likai.org/2010/02/sbrk-is-not-thread-safe.html, by design),
923 `SbrkRegion` uses a mutex internally. This implies
924 that uncontrolled calls to `brk` and `sbrk` may affect the workings of $(D
925 SbrkRegion) adversely.
926 
927 */
928 version (CRuntime_Musl) {} else
929 version (DragonFlyBSD) {} else
930 version (Posix) struct SbrkRegion(uint minAlign = platformAlignment)
931 {
932     import core.sys.posix.pthread : pthread_mutex_init, pthread_mutex_destroy,
933         pthread_mutex_t, pthread_mutex_lock, pthread_mutex_unlock,
934 
935     PTHREAD_MUTEX_INITIALIZER;
936     private static shared pthread_mutex_t sbrkMutex = PTHREAD_MUTEX_INITIALIZER;
937     import std.typecons : Ternary;
938 
939     static assert(minAlign.isGoodStaticAlignment);
940     static assert(size_t.sizeof == (void*).sizeof);
941     private shared void* _brkInitial, _brkCurrent;
942 
943     /**
944     Instance shared by all callers.
945     */
946     static shared SbrkRegion instance;
947 
948     /**
949     Standard allocator primitives.
950     */
951     enum uint alignment = minAlign;
952 
953     /**
954     Rounds the given size to a multiple of thew `alignment`
955     */
956     size_t goodAllocSize(size_t n) shared const pure nothrow @safe @nogc
957     {
958         return n.roundUpToMultipleOf(alignment);
959     }
960 
961     /// Ditto
962     void[] allocate(size_t bytes) shared @trusted nothrow @nogc
963     {
964         // Take alignment rounding into account
965         const rounded = goodAllocSize(bytes);
966 
967         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
968         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
969             || assert(0);
970         // Assume sbrk returns the old break. Most online documentation confirms
971         // that, except for http://www.inf.udec.cl/~leo/Malloc_tutorial.pdf,
972         // which claims the returned value is not portable.
973         auto p = sbrk(rounded);
974         if (p == cast(void*) -1)
975         {
976             return null;
977         }
978         if (!_brkInitial)
979         {
980             _brkInitial = cast(shared) p;
981             assert(cast(size_t) _brkInitial % minAlign == 0,
982                 "Too large alignment chosen for " ~ typeof(this).stringof);
983         }
984         _brkCurrent = cast(shared) (p + rounded);
985         return p[0 .. bytes];
986     }
987 
988     /// Ditto
989     void[] alignedAllocate(size_t bytes, uint a) shared @trusted nothrow @nogc
990     {
991         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
992         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
993             || assert(0);
994         if (!_brkInitial)
995         {
996             // This is one extra call, but it'll happen only once.
997             _brkInitial = cast(shared) sbrk(0);
998             assert(cast(size_t) _brkInitial % minAlign == 0,
999                 "Too large alignment chosen for " ~ typeof(this).stringof);
1000             (_brkInitial != cast(void*) -1) || assert(0);
1001             _brkCurrent = _brkInitial;
1002         }
1003         immutable size_t delta = cast(shared void*) roundUpToMultipleOf(
1004             cast(size_t) _brkCurrent, a) - _brkCurrent;
1005         // Still must make sure the total size is aligned to the allocator's
1006         // alignment.
1007         immutable rounded = (bytes + delta).roundUpToMultipleOf(alignment);
1008 
1009         auto p = sbrk(rounded);
1010         if (p == cast(void*) -1)
1011         {
1012             return null;
1013         }
1014         _brkCurrent = cast(shared) (p + rounded);
1015         return p[delta .. delta + bytes];
1016     }
1017 
1018     /**
1019 
1020     The `expand` method may only succeed if the argument is the last block
1021     allocated. In that case, `expand` attempts to push the break pointer to
1022     the right.
1023 
1024     */
1025     bool expand(ref void[] b, size_t delta) shared nothrow @trusted @nogc
1026     {
1027         if (b is null || delta == 0) return delta == 0;
1028         assert(_brkInitial && _brkCurrent); // otherwise where did b come from?
1029         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
1030         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
1031             || assert(0);
1032 
1033         // Take alignment rounding into account
1034         const rounded = goodAllocSize(b.length);
1035 
1036         const slack = rounded - b.length;
1037         if (delta <= slack)
1038         {
1039             b = b.ptr[0 .. b.length + delta];
1040             return true;
1041         }
1042 
1043         if (_brkCurrent != b.ptr + rounded) return false;
1044         // Great, can expand the last block
1045         delta -= slack;
1046 
1047         const roundedDelta = goodAllocSize(delta);
1048         auto p = sbrk(roundedDelta);
1049         if (p == cast(void*) -1)
1050         {
1051             return false;
1052         }
1053         _brkCurrent = cast(shared) (p + roundedDelta);
1054         b = b.ptr[0 .. b.length + slack + delta];
1055         return true;
1056     }
1057 
1058     /// Ditto
1059     Ternary owns(const void[] b) shared pure nothrow @trusted @nogc
1060     {
1061         // No need to lock here.
1062         assert(!_brkCurrent || !b || &b[0] + b.length <= _brkCurrent);
1063         return Ternary(_brkInitial && b && (&b[0] >= _brkInitial));
1064     }
1065 
1066     /**
1067 
1068     The `deallocate` method only works (and returns `true`)  on systems
1069     that support reducing the  break address (i.e. accept calls to `sbrk`
1070     with negative offsets). OSX does not accept such. In addition the argument
1071     must be the last block allocated.
1072 
1073     */
1074     bool deallocate(void[] b) shared nothrow @nogc
1075     {
1076         // Take alignment rounding into account
1077         const rounded = goodAllocSize(b.length);
1078         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
1079         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
1080             || assert(0);
1081         if (_brkCurrent != b.ptr + rounded) return false;
1082         assert(b.ptr >= _brkInitial);
1083         if (sbrk(-rounded) == cast(void*) -1)
1084             return false;
1085         _brkCurrent = cast(shared) b.ptr;
1086         return true;
1087     }
1088 
1089     /**
1090     The `deallocateAll` method only works (and returns `true`) on systems
1091     that support reducing the  break address (i.e. accept calls to `sbrk`
1092     with negative offsets). OSX does not accept such.
1093     */
1094     nothrow @nogc
1095     bool deallocateAll() shared
1096     {
1097         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
1098         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
1099             || assert(0);
1100         return !_brkInitial || brk(_brkInitial) == 0;
1101     }
1102 
1103     /// Standard allocator API.
1104     Ternary empty() shared pure nothrow @safe @nogc
1105     {
1106         // Also works when they're both null.
1107         return Ternary(_brkCurrent == _brkInitial);
1108     }
1109 }
1110 
1111 version (CRuntime_Musl) {} else
1112 version (DragonFlyBSD) {} else
1113 version (Posix) @system nothrow @nogc unittest
1114 {
1115     // Let's test the assumption that sbrk(n) returns the old address
1116     const p1 = sbrk(0);
1117     const p2 = sbrk(4096);
1118     assert(p1 == p2);
1119     const p3 = sbrk(0);
1120     assert(p3 == p2 + 4096);
1121     // Try to reset brk, but don't make a fuss if it doesn't work
1122     sbrk(-4096);
1123 }
1124 
1125 version (CRuntime_Musl) {} else
1126 version (DragonFlyBSD) {} else
1127 version (Posix) @system nothrow @nogc unittest
1128 {
1129     import std.typecons : Ternary;
1130     import std.algorithm.comparison : min;
1131     alias alloc = SbrkRegion!(min(8, platformAlignment)).instance;
1132     assert((() nothrow @safe @nogc => alloc.empty)() == Ternary.yes);
1133     auto a = alloc.alignedAllocate(2001, 4096);
1134     assert(a.length == 2001);
1135     assert((() nothrow @safe @nogc => alloc.empty)() == Ternary.no);
1136     auto oldBrkCurr = alloc._brkCurrent;
1137     auto b = alloc.allocate(2001);
1138     assert(b.length == 2001);
1139     assert((() nothrow @safe @nogc => alloc.expand(b, 0))());
1140     assert(b.length == 2001);
1141     // Expand with a small size to fit the rounded slack due to alignment
1142     assert((() nothrow @safe @nogc => alloc.expand(b, 1))());
1143     assert(b.length == 2002);
1144     // Exceed the rounded slack due to alignment
1145     assert((() nothrow @safe @nogc => alloc.expand(b, 10))());
1146     assert(b.length == 2012);
1147     assert((() nothrow @safe @nogc => alloc.owns(a))() == Ternary.yes);
1148     assert((() nothrow @safe @nogc => alloc.owns(b))() == Ternary.yes);
1149     // reducing the brk does not work on OSX
1150     version (Darwin) {} else
1151     {
1152         assert((() nothrow @nogc => alloc.deallocate(b))());
1153         // Check that expand and deallocate work well
1154         assert(oldBrkCurr == alloc._brkCurrent);
1155         assert((() nothrow @nogc => alloc.deallocate(a))());
1156         assert((() nothrow @nogc => alloc.deallocateAll())());
1157     }
1158     const void[] c = alloc.allocate(2001);
1159     assert(c.length == 2001);
1160     assert((() nothrow @safe @nogc => alloc.owns(c))() == Ternary.yes);
1161     assert((() nothrow @safe @nogc => alloc.owns(null))() == Ternary.no);
1162 }
1163 
1164 /**
1165 The threadsafe version of the `Region` allocator.
1166 Allocations and deallocations are lock-free based using $(REF cas, core,atomic).
1167 */
1168 shared struct SharedRegion(ParentAllocator,
1169     uint minAlign = platformAlignment,
1170     Flag!"growDownwards" growDownwards = No.growDownwards)
1171 {
1172     static assert(minAlign.isGoodStaticAlignment);
1173     static assert(ParentAllocator.alignment >= minAlign);
1174 
1175     import std.traits : hasMember;
1176     import std.typecons : Ternary;
1177 
1178     // state
1179     /**
1180     The _parent allocator. Depending on whether `ParentAllocator` holds state
1181     or not, this is a member variable or an alias for
1182     `ParentAllocator.instance`.
1183     */
1184     static if (stateSize!ParentAllocator)
1185     {
1186         ParentAllocator parent;
1187     }
1188     else
1189     {
1190         alias parent = ParentAllocator.instance;
1191     }
1192     private shared SharedBorrowedRegion!(minAlign, growDownwards) _impl;
1193 
1194     private void* roundedBegin() const pure nothrow @trusted @nogc
1195     {
1196         return _impl.roundedBegin;
1197     }
1198 
1199     private void* roundedEnd() const pure nothrow @trusted @nogc
1200     {
1201         return _impl.roundedEnd;
1202     }
1203 
1204 
1205     /**
1206     Constructs a region backed by a user-provided store.
1207     Assumes the memory was allocated with `ParentAllocator`.
1208 
1209     Params:
1210         store = User-provided store backing up the region. Assumed to have been
1211         allocated with `ParentAllocator`.
1212         n = Bytes to allocate using `ParentAllocator`. If `parent.allocate(n)`
1213         returns `null`, the region will be initialized as empty (correctly
1214         initialized but unable to allocate).
1215     */
1216     this(ubyte[] store) pure nothrow @nogc
1217     {
1218         _impl = store;
1219     }
1220 
1221     /// Ditto
1222     this(size_t n)
1223     {
1224         this(cast(ubyte[]) (parent.allocate(n.roundUpToAlignment(alignment))));
1225     }
1226 
1227     /**
1228     Rounds the given size to a multiple of the `alignment`
1229     */
1230     size_t goodAllocSize(size_t n) const pure nothrow @safe @nogc
1231     {
1232         return _impl.goodAllocSize(n);
1233     }
1234 
1235     /**
1236     Alignment offered.
1237     */
1238     alias alignment = minAlign;
1239 
1240     /**
1241     Allocates `n` bytes of memory. The allocation is served by atomically incrementing
1242     a pointer which keeps track of the current used space.
1243 
1244     Params:
1245         n = number of bytes to allocate
1246 
1247     Returns:
1248         A properly-aligned buffer of size `n`, or `null` if request could not
1249         be satisfied.
1250     */
1251     void[] allocate(size_t n) pure nothrow @trusted @nogc
1252     {
1253         return _impl.allocate(n);
1254     }
1255 
1256     /**
1257     Deallocates `b`. This works only if `b` was obtained as the last call
1258     to `allocate`; otherwise (i.e. another allocation has occurred since) it
1259     does nothing.
1260 
1261     Params:
1262         b = Block previously obtained by a call to `allocate` against this
1263         allocator (`null` is allowed).
1264     */
1265     bool deallocate(void[] b) pure nothrow @nogc
1266     {
1267         return _impl.deallocate(b);
1268     }
1269 
1270     /**
1271     Deallocates all memory allocated by this region, which can be subsequently
1272     reused for new allocations.
1273     */
1274     bool deallocateAll() pure nothrow @nogc
1275     {
1276         return _impl.deallocateAll;
1277     }
1278 
1279     /**
1280     Allocates `n` bytes of memory aligned at alignment `a`.
1281     Params:
1282         n = number of bytes to allocate
1283         a = alignment for the allocated block
1284 
1285     Returns:
1286         Either a suitable block of `n` bytes aligned at `a`, or `null`.
1287     */
1288     void[] alignedAllocate(size_t n, uint a) pure nothrow @trusted @nogc
1289     {
1290         return _impl.alignedAllocate(n, a);
1291     }
1292 
1293     /**
1294     Queries whether `b` has been allocated with this region.
1295 
1296     Params:
1297         b = Arbitrary block of memory (`null` is allowed; `owns(null)` returns
1298         `false`).
1299 
1300     Returns:
1301         `true` if `b` has been allocated with this region, `false` otherwise.
1302     */
1303     Ternary owns(const void[] b) const pure nothrow @trusted @nogc
1304     {
1305         return _impl.owns(b);
1306     }
1307 
1308     /**
1309     Returns `Ternary.yes` if no memory has been allocated in this region,
1310     `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
1311     */
1312     Ternary empty() const pure nothrow @safe @nogc
1313     {
1314         return _impl.empty;
1315     }
1316 
1317     /**
1318     If `ParentAllocator` defines `deallocate`, the region defines a destructor
1319     that uses `ParentAllocator.deallocate` to free the memory chunk.
1320     */
1321     static if (hasMember!(ParentAllocator, "deallocate"))
1322     ~this()
1323     {
1324         with (_impl) parent.deallocate(cast(void[]) _begin[0 .. _end - _begin]);
1325     }
1326 }
1327 
1328 @system unittest
1329 {
1330     import std.experimental.allocator.mallocator : Mallocator;
1331 
1332     static void testAlloc(Allocator)(ref Allocator a, bool growDownwards)
1333     {
1334         import core.thread : ThreadGroup;
1335         import std.algorithm.sorting : sort;
1336         import core.internal.spinlock : SpinLock;
1337 
1338         SpinLock lock = SpinLock(SpinLock.Contention.brief);
1339         enum numThreads = 100;
1340         void[][numThreads] buf;
1341         size_t count = 0;
1342 
1343         void fun()
1344         {
1345             void[] b = a.allocate(63);
1346             assert(b.length == 63);
1347 
1348             lock.lock();
1349             buf[count] = b;
1350             count++;
1351             lock.unlock();
1352         }
1353 
1354         auto tg = new ThreadGroup;
1355         foreach (i; 0 .. numThreads)
1356         {
1357             tg.create(&fun);
1358         }
1359         tg.joinAll();
1360 
1361         sort!((a, b) => a.ptr < b.ptr)(buf[0 .. numThreads]);
1362         foreach (i; 0 .. numThreads - 1)
1363         {
1364             assert(buf[i].ptr + a.goodAllocSize(buf[i].length) == buf[i + 1].ptr);
1365         }
1366 
1367         assert(!a.deallocate(buf[1]));
1368 
1369         foreach (i; 0 .. numThreads)
1370         {
1371             if (!growDownwards)
1372                 assert(a.deallocate(buf[numThreads - 1 - i]));
1373             else
1374                 assert(a.deallocate(buf[i]));
1375         }
1376 
1377         assert(a.deallocateAll());
1378         void[] b = a.allocate(63);
1379         assert(b.length == 63);
1380         assert(a.deallocate(b));
1381     }
1382 
1383     auto a1 = SharedRegion!(Mallocator, Mallocator.alignment,
1384         Yes.growDownwards)(1024 * 64);
1385 
1386     auto a2 = SharedRegion!(Mallocator, Mallocator.alignment,
1387         No.growDownwards)(1024 * 64);
1388 
1389     testAlloc(a1, true);
1390     testAlloc(a2, false);
1391 }
1392 
1393 @system unittest
1394 {
1395     import std.experimental.allocator.mallocator : Mallocator;
1396 
1397     static void testAlloc(Allocator)(ref Allocator a, bool growDownwards)
1398     {
1399         import core.thread : ThreadGroup;
1400         import std.algorithm.sorting : sort;
1401         import core.internal.spinlock : SpinLock;
1402 
1403         SpinLock lock = SpinLock(SpinLock.Contention.brief);
1404         enum numThreads = 100;
1405         void[][2 * numThreads] buf;
1406         size_t count = 0;
1407 
1408         void fun()
1409         {
1410             void[] b = a.allocate(63);
1411             assert(b.length == 63);
1412 
1413             lock.lock();
1414             buf[count] = b;
1415             count++;
1416             lock.unlock();
1417 
1418             b = a.alignedAllocate(63, 32);
1419             assert(b.length == 63);
1420             assert(cast(size_t) b.ptr % 32 == 0);
1421 
1422             lock.lock();
1423             buf[count] = b;
1424             count++;
1425             lock.unlock();
1426         }
1427 
1428         auto tg = new ThreadGroup;
1429         foreach (i; 0 .. numThreads)
1430         {
1431             tg.create(&fun);
1432         }
1433         tg.joinAll();
1434 
1435         sort!((a, b) => a.ptr < b.ptr)(buf[0 .. 2 * numThreads]);
1436         foreach (i; 0 .. 2 * numThreads - 1)
1437         {
1438             assert(buf[i].ptr + buf[i].length <= buf[i + 1].ptr);
1439         }
1440 
1441         assert(!a.deallocate(buf[1]));
1442         assert(a.deallocateAll());
1443 
1444         void[] b = a.allocate(13);
1445         assert(b.length == 13);
1446         assert(a.deallocate(b));
1447     }
1448 
1449     auto a1 = SharedRegion!(Mallocator, Mallocator.alignment,
1450         Yes.growDownwards)(1024 * 64);
1451 
1452     auto a2 = SharedRegion!(Mallocator, Mallocator.alignment,
1453         No.growDownwards)(1024 * 64);
1454 
1455     testAlloc(a1, true);
1456     testAlloc(a2, false);
1457 }
1458 
1459 /**
1460 A `SharedBorrowedRegion` allocates directly from a user-provided block of memory.
1461 
1462 Unlike a `SharedRegion`, a `SharedBorrowedRegion` does not own the memory it
1463 allocates from and will not deallocate that memory upon destruction. Instead,
1464 it is the user's responsibility to ensure that the memory is properly disposed
1465 of.
1466 
1467 In all other respects, a `SharedBorrowedRegion` behaves exactly like a `SharedRegion`.
1468 */
1469 shared struct SharedBorrowedRegion(uint minAlign = platformAlignment,
1470     Flag!"growDownwards" growDownwards = No.growDownwards)
1471 {
1472     static assert(minAlign.isGoodStaticAlignment);
1473 
1474     import std.typecons : Ternary;
1475 
1476     // state
1477     private void* _current, _begin, _end;
1478 
1479     private void* roundedBegin() shared const pure nothrow @trusted @nogc
1480     {
1481         return cast(void*) roundUpToAlignment(cast(size_t) _begin, alignment);
1482     }
1483 
1484     private void* roundedEnd() shared const pure nothrow @trusted @nogc
1485     {
1486         return cast(void*) roundDownToAlignment(cast(size_t) _end, alignment);
1487     }
1488 
1489     /**
1490     Constructs a region backed by a user-provided store.
1491 
1492     Params:
1493         store = User-provided store backing up the region. Must not be aliased.
1494     */
1495     this(ubyte[] store) shared pure nothrow @nogc
1496     {
1497         _begin = cast(typeof(_begin)) store.ptr;
1498         _end = cast(typeof(_end)) (store.ptr + store.length);
1499         static if (growDownwards)
1500             _current = cast(typeof(_current)) roundedEnd();
1501         else
1502             _current = cast(typeof(_current)) roundedBegin();
1503     }
1504 
1505     /*
1506     TODO: The postblit of `SharedBorrowedRegion` should be disabled because
1507     such objects should not be copied around naively.
1508     */
1509 
1510     /**
1511     Rounds the given size to a multiple of the `alignment`
1512     */
1513     size_t goodAllocSize(size_t n) shared const pure nothrow @safe @nogc
1514     {
1515         return n.roundUpToAlignment(alignment);
1516     }
1517 
1518     /**
1519     Alignment offered.
1520     */
1521     alias alignment = minAlign;
1522 
1523     /**
1524     Allocates `n` bytes of memory. The allocation is served by atomically incrementing
1525     a pointer which keeps track of the current used space.
1526 
1527     Params:
1528         n = number of bytes to allocate
1529 
1530     Returns:
1531         A properly-aligned buffer of size `n`, or `null` if request could not
1532         be satisfied.
1533     */
1534     void[] allocate(size_t n) shared pure nothrow @trusted @nogc
1535     {
1536         import core.atomic : cas, atomicLoad;
1537 
1538         if (n == 0) return null;
1539         const rounded = goodAllocSize(n);
1540 
1541         shared void* localCurrent, localNewCurrent;
1542         static if (growDownwards)
1543         {
1544             do
1545             {
1546                 localCurrent = atomicLoad(_current);
1547                 localNewCurrent = localCurrent - rounded;
1548                 if (localNewCurrent > localCurrent || localNewCurrent < _begin)
1549                     return null;
1550             } while (!cas(&_current, localCurrent, localNewCurrent));
1551 
1552             return cast(void[]) localNewCurrent[0 .. n];
1553         }
1554         else
1555         {
1556             do
1557             {
1558                 localCurrent = atomicLoad(_current);
1559                 localNewCurrent = localCurrent + rounded;
1560                 if (localNewCurrent < localCurrent || localNewCurrent > _end)
1561                     return null;
1562             } while (!cas(&_current, localCurrent, localNewCurrent));
1563 
1564             return cast(void[]) localCurrent[0 .. n];
1565         }
1566 
1567         assert(0, "Unexpected error in SharedBorrowedRegion.allocate");
1568     }
1569 
1570     /**
1571     Allocates `n` bytes of memory aligned at alignment `a`.
1572 
1573     Params:
1574         n = number of bytes to allocate
1575         a = alignment for the allocated block
1576 
1577     Returns:
1578         Either a suitable block of `n` bytes aligned at `a`, or `null`.
1579     */
1580     void[] alignedAllocate(size_t n, uint a) shared pure nothrow @trusted @nogc
1581     {
1582         import core.atomic : cas, atomicLoad;
1583         import std.math.traits : isPowerOf2;
1584 
1585         assert(a.isPowerOf2);
1586         if (n == 0) return null;
1587 
1588         const rounded = goodAllocSize(n);
1589         shared void* localCurrent, localNewCurrent;
1590 
1591         static if (growDownwards)
1592         {
1593             do
1594             {
1595                 localCurrent = atomicLoad(_current);
1596                 auto alignedCurrent = cast(void*)(localCurrent - rounded);
1597                 localNewCurrent = cast(shared(void*)) alignedCurrent.alignDownTo(a);
1598                 if (alignedCurrent > localCurrent || localNewCurrent > alignedCurrent ||
1599                     localNewCurrent < _begin)
1600                     return null;
1601             } while (!cas(&_current, localCurrent, localNewCurrent));
1602 
1603             return cast(void[]) localNewCurrent[0 .. n];
1604         }
1605         else
1606         {
1607             do
1608             {
1609                 localCurrent = atomicLoad(_current);
1610                 auto alignedCurrent = alignUpTo(cast(void*) localCurrent, a);
1611                 localNewCurrent = cast(shared(void*)) (alignedCurrent + rounded);
1612                 if (alignedCurrent < localCurrent || localNewCurrent < alignedCurrent ||
1613                     localNewCurrent > _end)
1614                     return null;
1615             } while (!cas(&_current, localCurrent, localNewCurrent));
1616 
1617             return cast(void[]) (localNewCurrent - rounded)[0 .. n];
1618         }
1619 
1620         assert(0, "Unexpected error in SharedBorrowedRegion.alignedAllocate");
1621     }
1622 
1623     /**
1624     Deallocates `b`. This works only if `b` was obtained as the last call
1625     to `allocate`; otherwise (i.e. another allocation has occurred since) it
1626     does nothing.
1627 
1628     Params:
1629         b = Block previously obtained by a call to `allocate` against this
1630         allocator (`null` is allowed).
1631     */
1632     bool deallocate(void[] b) shared pure nothrow @nogc
1633     {
1634         import core.atomic : cas, atomicLoad;
1635 
1636         const rounded = goodAllocSize(b.length);
1637         shared void* localCurrent, localNewCurrent;
1638 
1639         // The cas is done only once, because only the last allocation can be reverted
1640         localCurrent = atomicLoad(_current);
1641         static if (growDownwards)
1642         {
1643             localNewCurrent = localCurrent + rounded;
1644             if (b.ptr == localCurrent)
1645                 return cas(&_current, localCurrent, localNewCurrent);
1646         }
1647         else
1648         {
1649             localNewCurrent = localCurrent - rounded;
1650             if (b.ptr == localNewCurrent)
1651                 return cas(&_current, localCurrent, localNewCurrent);
1652         }
1653 
1654         return false;
1655     }
1656 
1657     /**
1658     Deallocates all memory allocated by this region, which can be subsequently
1659     reused for new allocations.
1660     */
1661     bool deallocateAll() shared pure nothrow @nogc
1662     {
1663         import core.atomic : atomicStore;
1664         static if (growDownwards)
1665         {
1666             atomicStore(_current, cast(shared(void*)) roundedEnd());
1667         }
1668         else
1669         {
1670             atomicStore(_current, cast(shared(void*)) roundedBegin());
1671         }
1672         return true;
1673     }
1674 
1675     /**
1676     Queries whether `b` has been allocated with this region.
1677 
1678     Params:
1679         b = Arbitrary block of memory (`null` is allowed; `owns(null)` returns
1680         `false`).
1681 
1682     Returns:
1683         `true` if `b` has been allocated with this region, `false` otherwise.
1684     */
1685     Ternary owns(const void[] b) shared const pure nothrow @trusted @nogc
1686     {
1687         return Ternary(b && (&b[0] >= _begin) && (&b[0] + b.length <= _end));
1688     }
1689 
1690     /**
1691     Returns `Ternary.yes` if no memory has been allocated in this region,
1692     `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
1693     */
1694     Ternary empty() shared const pure nothrow @safe @nogc
1695     {
1696         import core.atomic : atomicLoad;
1697 
1698         auto localCurrent = atomicLoad(_current);
1699         static if (growDownwards)
1700             return Ternary(localCurrent == roundedEnd());
1701         else
1702             return Ternary(localCurrent == roundedBegin());
1703     }
1704 }