The OpenD Programming Language

1 // Written in the D programming language.
2 /**
3 Source: $(PHOBOSSRC std/experimental/allocator/building_blocks/kernighan_ritchie.d)
4 */
5 module std.experimental.allocator.building_blocks.kernighan_ritchie;
6 import std.experimental.allocator.building_blocks.null_allocator :
7     NullAllocator;
8 
9 //debug = KRRegion;
10 debug(KRRegion) import std.stdio;
11 
12 // KRRegion
13 /**
14 `KRRegion` draws inspiration from the $(MREF_ALTTEXT region allocation
15 strategy, std,experimental,allocator,building_blocks,region) and also the
16 $(HTTP stackoverflow.com/questions/13159564/explain-this-implementation-of-malloc-from-the-kr-book,
17 famed allocator) described by Brian Kernighan and Dennis Ritchie in section 8.7
18 of the book $(HTTP amazon.com/exec/obidos/ASIN/0131103628/classicempire, "The C
19 Programming Language"), Second Edition, Prentice Hall, 1988.
20 
21 $(H4 `KRRegion` = `Region` + Kernighan-Ritchie Allocator)
22 
23 Initially, `KRRegion` starts in "region" mode: allocations are served from
24 the memory chunk in a region fashion. Thus, as long as there is enough memory
25 left, `KRRegion.allocate` has the performance profile of a region allocator.
26 Deallocation inserts (in $(BIGOH 1) time) the deallocated blocks in an
27 unstructured freelist, which is not read in region mode.
28 
29 Once the region cannot serve an `allocate` request, `KRRegion` switches
30 to "free list" mode. It sorts the list of previously deallocated blocks by
31 address and serves allocation requests off that free list. The allocation and
32 deallocation follow the pattern described by Kernighan and Ritchie.
33 
34 The recommended use of `KRRegion` is as a $(I region with deallocation). If the
35 `KRRegion` is dimensioned appropriately, it could often not enter free list
36 mode during its lifetime. Thus it is as fast as a simple region, whilst
37 offering deallocation at a small cost. When the region memory is  exhausted,
38 the previously deallocated memory is still usable, at a performance  cost. If
39 the region is not excessively large and fragmented, the linear  allocation and
40 deallocation cost may still be compensated for by the good locality
41 characteristics.
42 
43 If the chunk of memory managed is large, it may be desirable to switch
44 management to free list from the beginning. That way, memory may be used in a
45 more compact manner than region mode. To force free list mode, call $(D
46 switchToFreeList) shortly after construction or when deemed appropriate.
47 
48 The smallest size that can be allocated is two words (16 bytes on 64-bit
49 systems, 8 bytes on 32-bit systems). This is because the free list management
50 needs two words (one for the length, the other for the next pointer in the
51 singly-linked list).
52 
53 The `ParentAllocator` type parameter is the type of the allocator used to
54 allocate the memory chunk underlying the `KRRegion` object. Choosing the
55 default (`NullAllocator`) means the user is responsible for passing a buffer
56 at construction (and for deallocating it if necessary). Otherwise, `KRRegion`
57 automatically deallocates the buffer during destruction. For that reason, if
58 `ParentAllocator` is not `NullAllocator`, then `KRRegion` is not
59 copyable.
60 
61 $(H4 Implementation Details)
62 
63 In free list mode, `KRRegion` embeds a free blocks list onto the chunk of
64 memory. The free list is circular, coalesced, and sorted by address at all
65 times. Allocations and deallocations take time proportional to the number of
66 previously deallocated blocks. (In practice the cost may be lower, e.g. if
67 memory is deallocated in reverse order of allocation, all operations take
68 constant time.) Memory utilization is good (small control structure and no
69 per-allocation overhead). The disadvantages of freelist mode include proneness
70 to fragmentation, a minimum allocation size of two words, and linear worst-case
71 allocation and deallocation times.
72 
73 Similarities of `KRRegion` (in free list mode) with the
74 Kernighan-Ritchie allocator:
75 
76 $(UL
77 $(LI Free blocks have variable size and are linked in a singly-linked list.)
78 $(LI The freelist is maintained in increasing address order, which makes
79 coalescing easy.)
80 $(LI The strategy for finding the next available block is first fit.)
81 $(LI The free list is circular, with the last node pointing back to the first.)
82 $(LI Coalescing is carried during deallocation.)
83 )
84 
85 Differences from the Kernighan-Ritchie allocator:
86 
87 $(UL
88 $(LI Once the chunk is exhausted, the Kernighan-Ritchie allocator allocates
89 another chunk using operating system primitives. For better composability, $(D
90 KRRegion) just gets full (returns `null` on new allocation requests). The
91 decision to allocate more blocks is deferred to a higher-level entity. For an
92 example, see the example below using `AllocatorList` in conjunction with $(D
93 KRRegion).)
94 $(LI Allocated blocks do not hold a size prefix. This is because in D the size
95 information is available in client code at deallocation time.)
96 )
97 
98 */
99 struct KRRegion(ParentAllocator = NullAllocator)
100 {
101     import std.experimental.allocator.common : stateSize, alignedAt;
102     import std.traits : hasMember;
103     import std.typecons : Ternary;
104 
105     private static struct Node
106     {
107         import std.typecons : tuple, Tuple;
108 
109         Node* next;
110         size_t size;
111 
112         this(this) @disable;
113 
114         void[] payload() inout
115         {
116             return (cast(ubyte*) &this)[0 .. size];
117         }
118 
119         bool adjacent(in Node* right) const
120         {
121             assert(right);
122             auto p = payload;
123             return p.ptr < right && right < p.ptr + p.length + Node.sizeof;
124         }
125 
126         bool coalesce(void* memoryEnd = null)
127         {
128             // Coalesce the last node before the memory end with any possible gap
129             if (memoryEnd
130                 && memoryEnd < payload.ptr + payload.length + Node.sizeof)
131             {
132                 size += memoryEnd - (payload.ptr + payload.length);
133                 return true;
134             }
135 
136             if (!adjacent(next)) return false;
137             size = (cast(ubyte*) next + next.size) - cast(ubyte*) &this;
138             next = next.next;
139             return true;
140         }
141 
142         Tuple!(void[], Node*) allocateHere(size_t bytes)
143         {
144             assert(bytes >= Node.sizeof);
145             assert(bytes % Node.alignof == 0);
146             assert(next);
147             assert(!adjacent(next));
148             if (size < bytes) return typeof(return)();
149             assert(size >= bytes);
150             immutable leftover = size - bytes;
151 
152             if (leftover >= Node.sizeof)
153             {
154                 // There's room for another node
155                 auto newNode = cast(Node*) ((cast(ubyte*) &this) + bytes);
156                 newNode.size = leftover;
157                 newNode.next = next == &this ? newNode : next;
158                 assert(next);
159                 return tuple(payload, newNode);
160             }
161 
162             // No slack space, just return next node
163             return tuple(payload, next == &this ? null : next);
164         }
165     }
166 
167     // state
168     /**
169     If `ParentAllocator` holds state, `parent` is a public member of type
170     `KRRegion`. Otherwise, `parent` is an `alias` for
171     `ParentAllocator.instance`.
172     */
173     static if (stateSize!ParentAllocator) ParentAllocator parent;
174     else alias parent = ParentAllocator.instance;
175     private void[] payload;
176     private Node* root;
177     private bool regionMode() const { return bytesUsedRegionMode != size_t.max; }
178     private void cancelRegionMode() { bytesUsedRegionMode = size_t.max; }
179     private size_t bytesUsedRegionMode = 0;
180 
181     auto byNodePtr()
182     {
183         static struct Range
184         {
185             Node* start, current;
186             @property bool empty() { return !current; }
187             @property Node* front() { return current; }
188             void popFront()
189             {
190                 assert(current && current.next);
191                 current = current.next;
192                 if (current == start) current = null;
193             }
194             @property Range save() { return this; }
195         }
196         import std.range : isForwardRange;
197         static assert(isForwardRange!Range);
198         return Range(root, root);
199     }
200 
201     string toString()
202     {
203         import std.format : format;
204         string s = "KRRegion@";
205         s ~= format("%s-%s(0x%s[%s] %s", &this, &this + 1,
206             payload.ptr, payload.length,
207             regionMode ? "(region)" : "(freelist)");
208 
209         Node* lastNode = null;
210         if (!regionMode)
211         {
212             foreach (node; byNodePtr)
213             {
214                 s ~= format(", %sfree(0x%s[%s])",
215                     lastNode && lastNode.adjacent(node) ? "+" : "",
216                     cast(void*) node, node.size);
217                 lastNode = node;
218             }
219         }
220         else
221         {
222             for (auto node = root; node; node = node.next)
223             {
224                 s ~= format(", %sfree(0x%s[%s])",
225                     lastNode && lastNode.adjacent(node) ? "+" : "",
226                     cast(void*) node, node.size);
227                 lastNode = node;
228             }
229         }
230 
231         s ~= ')';
232         return s;
233     }
234 
235     private void assertValid(string s)
236     {
237         assert(!regionMode);
238         if (!payload.ptr)
239         {
240             assert(!root, s);
241             return;
242         }
243         if (!root)
244         {
245             return;
246         }
247         assert(root >= payload.ptr, s);
248         assert(root < payload.ptr + payload.length, s);
249 
250         // Check that the list terminates
251         size_t n;
252         foreach (node; byNodePtr)
253         {
254             assert(node.next);
255             assert(!node.adjacent(node.next));
256             assert(n++ < payload.length / Node.sizeof, s);
257         }
258     }
259 
260     private Node* sortFreelist(Node* root)
261     {
262         // Find a monotonic run
263         auto last = root;
264         for (;;)
265         {
266             if (!last.next) return root;
267             if (last > last.next) break;
268             assert(last < last.next);
269             last = last.next;
270         }
271         auto tail = last.next;
272         last.next = null;
273         tail = sortFreelist(tail);
274         return merge(root, tail);
275     }
276 
277     private Node* merge(Node* left, Node* right)
278     {
279         assert(left != right);
280         if (!left) return right;
281         if (!right) return left;
282         if (left < right)
283         {
284             auto result = left;
285             result.next = merge(left.next, right);
286             return result;
287         }
288         auto result = right;
289         result.next = merge(left, right.next);
290         return result;
291     }
292 
293     private void coalesceAndMakeCircular()
294     {
295         for (auto n = root;;)
296         {
297             assert(!n.next || n < n.next);
298             if (!n.next)
299             {
300                 // Convert to circular
301                 n.next = root;
302                 break;
303             }
304             if (n.coalesce) continue; // possibly another coalesce
305             n = n.next;
306         }
307     }
308 
309     /**
310     Create a `KRRegion`. If `ParentAllocator` is not `NullAllocator`,
311     `KRRegion`'s destructor will call `parent.deallocate`.
312 
313     Params:
314     b = Block of memory to serve as support for the allocator. Memory must be
315     larger than two words and word-aligned.
316     n = Capacity desired. This constructor is defined only if $(D
317     ParentAllocator) is not `NullAllocator`.
318     */
319     this(ubyte[] b)
320     {
321         if (b.length < Node.sizeof)
322         {
323             // Init as empty
324             assert(root is null);
325             assert(payload is null);
326             return;
327         }
328         assert(b.length >= Node.sizeof);
329         assert(b.ptr.alignedAt(Node.alignof));
330         assert(b.length >= 2 * Node.sizeof);
331         payload = b;
332         root = cast(Node*) b.ptr;
333         // Initialize the free list with all list
334         assert(regionMode);
335         root.next = null;
336         root.size = b.length;
337         debug(KRRegion) writefln("KRRegion@%s: init with %s[%s]", &this,
338             b.ptr, b.length);
339     }
340 
341     /// Ditto
342     static if (!is(ParentAllocator == NullAllocator) && !stateSize!ParentAllocator)
343     this(size_t n)
344     {
345         assert(n > Node.sizeof);
346         this(cast(ubyte[])(parent.allocate(n)));
347     }
348 
349     /// Ditto
350     static if (!is(ParentAllocator == NullAllocator) && stateSize!ParentAllocator)
351     this(ParentAllocator parent, size_t n)
352     {
353         assert(n > Node.sizeof);
354         this.parent = parent;
355         this(cast(ubyte[])(parent.allocate(n)));
356     }
357 
358     /// Ditto
359     static if (!is(ParentAllocator == NullAllocator)
360         && hasMember!(ParentAllocator, "deallocate"))
361     ~this()
362     {
363         parent.deallocate(payload);
364     }
365 
366     /**
367     Forces free list mode. If already in free list mode, does nothing.
368     Otherwise, sorts the free list accumulated so far and switches strategy for
369     future allocations to KR style.
370     */
371     void switchToFreeList()
372     {
373         if (!regionMode) return;
374         cancelRegionMode;
375         if (!root) return;
376         root = sortFreelist(root);
377         coalesceAndMakeCircular;
378     }
379 
380     /*
381     Noncopyable
382     */
383     @disable this(this);
384 
385     /**
386     Word-level alignment.
387     */
388     enum alignment = Node.alignof;
389 
390     /**
391     Allocates `n` bytes. Allocation searches the list of available blocks
392     until a free block with `n` or more bytes is found (first fit strategy).
393     The block is split (if larger) and returned.
394 
395     Params: n = number of bytes to _allocate
396 
397     Returns: A word-aligned buffer of `n` bytes, or `null`.
398     */
399     void[] allocate(size_t n)
400     {
401         if (!n || !root) return null;
402         const actualBytes = goodAllocSize(n);
403 
404         // Try the region first
405         if (regionMode)
406         {
407             // Only look at the head of the freelist
408             if (root.size >= actualBytes)
409             {
410                 // Enough room for allocation
411                 bytesUsedRegionMode += actualBytes;
412                 void* result = root;
413                 immutable balance = root.size - actualBytes;
414                 if (balance >= Node.sizeof)
415                 {
416                     auto newRoot = cast(Node*) (result + actualBytes);
417                     newRoot.next = root.next;
418                     newRoot.size = balance;
419                     root = newRoot;
420                 }
421                 else
422                 {
423                     root = null;
424                     switchToFreeList;
425                 }
426                 return result[0 .. n];
427             }
428 
429             // Not enough memory, switch to freelist mode and fall through
430             switchToFreeList;
431         }
432 
433         // Try to allocate from next after the iterating node
434         for (auto pnode = root;;)
435         {
436             assert(!pnode.adjacent(pnode.next));
437             auto k = pnode.next.allocateHere(actualBytes);
438             if (k[0] !is null)
439             {
440                 // awes
441                 assert(k[0].length >= n);
442                 if (root == pnode.next) root = k[1];
443                 pnode.next = k[1];
444                 return k[0][0 .. n];
445             }
446 
447             pnode = pnode.next;
448             if (pnode == root) break;
449         }
450         return null;
451     }
452 
453     /**
454     Deallocates `b`, which is assumed to have been previously allocated with
455     this allocator. Deallocation performs a linear search in the free list to
456     preserve its sorting order. It follows that blocks with higher addresses in
457     allocators with many free blocks are slower to deallocate.
458 
459     Params: b = block to be deallocated
460     */
461     nothrow @nogc
462     bool deallocate(void[] b)
463     {
464         debug(KRRegion) writefln("KRRegion@%s: deallocate(%s[%s])", &this,
465             b.ptr, b.length);
466         if (!b.ptr) return true;
467         assert(owns(b) == Ternary.yes);
468         assert(b.ptr.alignedAt(Node.alignof));
469 
470         // Insert back in the freelist, keeping it sorted by address. Do not
471         // coalesce at this time. Instead, do it lazily during allocation.
472         auto n = cast(Node*) b.ptr;
473         n.size = goodAllocSize(b.length);
474         auto memoryEnd = payload.ptr + payload.length;
475 
476         if (regionMode)
477         {
478             assert(root);
479             // Insert right after root
480             bytesUsedRegionMode -= n.size;
481             n.next = root.next;
482             root.next = n;
483             return true;
484         }
485 
486         if (!root)
487         {
488             // What a sight for sore eyes
489             root = n;
490             root.next = root;
491 
492             // If the first block freed is the last one allocated,
493             // maybe there's a gap after it.
494             root.coalesce(memoryEnd);
495             return true;
496         }
497 
498         version (assert) foreach (test; byNodePtr)
499         {
500             assert(test != n);
501         }
502         // Linear search
503         auto pnode = root;
504         do
505         {
506             assert(pnode && pnode.next);
507             assert(pnode != n);
508             assert(pnode.next != n);
509 
510             if (pnode < pnode.next)
511             {
512                 if (pnode > n || n > pnode.next) continue;
513                 // Insert in between pnode and pnode.next
514                 n.next = pnode.next;
515                 pnode.next = n;
516                 n.coalesce;
517                 pnode.coalesce;
518                 root = pnode;
519                 return true;
520             }
521             else if (pnode < n)
522             {
523                 // Insert at the end of the list
524                 // Add any possible gap at the end of n to the length of n
525                 n.next = pnode.next;
526                 pnode.next = n;
527                 n.coalesce(memoryEnd);
528                 pnode.coalesce;
529                 root = pnode;
530                 return true;
531             }
532             else if (n < pnode.next)
533             {
534                 // Insert at the front of the list
535                 n.next = pnode.next;
536                 pnode.next = n;
537                 n.coalesce;
538                 root = n;
539                 return true;
540             }
541         }
542         while ((pnode = pnode.next) != root);
543         assert(0, "Wrong parameter passed to deallocate");
544     }
545 
546     /**
547     Allocates all memory available to this allocator. If the allocator is empty,
548     returns the entire available block of memory. Otherwise, it still performs
549     a best-effort allocation: if there is no fragmentation (e.g. `allocate`
550     has been used but not `deallocate`), allocates and returns the only
551     available block of memory.
552 
553     The operation takes time proportional to the number of adjacent free blocks
554     at the front of the free list. These blocks get coalesced, whether
555     `allocateAll` succeeds or fails due to fragmentation.
556     */
557     void[] allocateAll()
558     {
559         if (regionMode) switchToFreeList;
560         if (root && root.next == root)
561             return allocate(root.size);
562         return null;
563     }
564 
565     ///
566     @system unittest
567     {
568         import std.experimental.allocator.gc_allocator : GCAllocator;
569         auto alloc = KRRegion!GCAllocator(1024 * 64);
570         const b1 = alloc.allocate(2048);
571         assert(b1.length == 2048);
572         const b2 = alloc.allocateAll;
573         assert(b2.length == 1024 * 62);
574     }
575 
576     /**
577     Deallocates all memory currently allocated, making the allocator ready for
578     other allocations. This is a $(BIGOH 1) operation.
579     */
580     pure nothrow @nogc
581     bool deallocateAll()
582     {
583         debug(KRRegion) assertValid("deallocateAll");
584         debug(KRRegion) scope(exit) assertValid("deallocateAll");
585         root = cast(Node*) payload.ptr;
586 
587         // Reset to regionMode
588         bytesUsedRegionMode = 0;
589         if (root)
590         {
591             root.next = null;
592             root.size = payload.length;
593         }
594         return true;
595     }
596 
597     /**
598     Checks whether the allocator is responsible for the allocation of `b`.
599     It does a simple $(BIGOH 1) range check. `b` should be a buffer either
600     allocated with `this` or obtained through other means.
601     */
602     pure nothrow @trusted @nogc
603     Ternary owns(void[] b)
604     {
605         debug(KRRegion) assertValid("owns");
606         debug(KRRegion) scope(exit) assertValid("owns");
607         return Ternary(b && payload && (&b[0] >= &payload[0])
608                        && (&b[0] < &payload[0] + payload.length));
609     }
610 
611     /**
612     Adjusts `n` to a size suitable for allocation (two words or larger,
613     word-aligned).
614     */
615     pure nothrow @safe @nogc
616     static size_t goodAllocSize(size_t n)
617     {
618         import std.experimental.allocator.common : roundUpToMultipleOf;
619         return n <= Node.sizeof
620             ? Node.sizeof : n.roundUpToMultipleOf(alignment);
621     }
622 
623     /**
624     Returns: `Ternary.yes` if the allocator is empty, `Ternary.no` otherwise.
625     Never returns `Ternary.unknown`.
626     */
627     pure nothrow @safe @nogc
628     Ternary empty()
629     {
630         if (regionMode)
631             return Ternary(bytesUsedRegionMode == 0);
632 
633         return Ternary(root && root.size == payload.length);
634     }
635 }
636 
637 /**
638 `KRRegion` is preferable to `Region` as a front for a general-purpose
639 allocator if `deallocate` is needed, yet the actual deallocation traffic is
640 relatively low. The example below shows a `KRRegion` using stack storage
641 fronting the GC allocator.
642 */
643 @system unittest
644 {
645     import std.experimental.allocator.building_blocks.fallback_allocator
646         : fallbackAllocator;
647     import std.experimental.allocator.gc_allocator : GCAllocator;
648     import std.typecons : Ternary;
649     // KRRegion fronting a general-purpose allocator
650     ubyte[1024 * 128] buf;
651     auto alloc = fallbackAllocator(KRRegion!()(buf), GCAllocator.instance);
652     auto b = alloc.allocate(100);
653     assert(b.length == 100);
654     assert((() pure nothrow @safe @nogc => alloc.primary.owns(b))() == Ternary.yes);
655 }
656 
657 /**
658 The code below defines a scalable allocator consisting of 1 MB (or larger)
659 blocks fetched from the garbage-collected heap. Each block is organized as a
660 KR-style heap. More blocks are allocated and freed on a need basis.
661 
662 This is the closest example to the allocator introduced in the K$(AMP)R book.
663 It should perform slightly better because instead of searching through one
664 large free list, it searches through several shorter lists in LRU order. Also,
665 it actually returns memory to the operating system when possible.
666 */
667 @system unittest
668 {
669     import std.algorithm.comparison : max;
670     import std.experimental.allocator.building_blocks.allocator_list
671         : AllocatorList;
672     import std.experimental.allocator.mmap_allocator : MmapAllocator;
673     AllocatorList!(n => KRRegion!MmapAllocator(max(n * 16, 1024 * 1024))) alloc;
674 }
675 
676 @system unittest
677 {
678     import std.algorithm.comparison : max;
679     import std.experimental.allocator.building_blocks.allocator_list
680         : AllocatorList;
681     import std.experimental.allocator.mallocator : Mallocator;
682     import std.typecons : Ternary;
683     /*
684     Create a scalable allocator consisting of 1 MB (or larger) blocks fetched
685     from the garbage-collected heap. Each block is organized as a KR-style
686     heap. More blocks are allocated and freed on a need basis.
687     */
688     AllocatorList!(n => KRRegion!Mallocator(max(n * 16, 1024 * 1024)),
689         NullAllocator) alloc;
690     void[][50] array;
691     foreach (i; 0 .. array.length)
692     {
693         auto length = i * 10_000 + 1;
694         array[i] = alloc.allocate(length);
695         assert(array[i].ptr);
696         assert(array[i].length == length);
697     }
698     import std.random : randomShuffle;
699     randomShuffle(array[]);
700     foreach (i; 0 .. array.length)
701     {
702         assert(array[i].ptr);
703         assert((() pure nothrow @safe @nogc => alloc.owns(array[i]))() == Ternary.yes);
704         () nothrow @nogc { alloc.deallocate(array[i]); }();
705     }
706 }
707 
708 @system unittest
709 {
710     import std.algorithm.comparison : max;
711     import std.experimental.allocator.building_blocks.allocator_list
712         : AllocatorList;
713     import std.experimental.allocator.mmap_allocator : MmapAllocator;
714     import std.typecons : Ternary;
715     /*
716     Create a scalable allocator consisting of 1 MB (or larger) blocks fetched
717     from the garbage-collected heap. Each block is organized as a KR-style
718     heap. More blocks are allocated and freed on a need basis.
719     */
720     AllocatorList!((n) {
721         auto result = KRRegion!MmapAllocator(max(n * 2, 1024 * 1024));
722         return result;
723     }) alloc;
724     void[][99] array;
725     foreach (i; 0 .. array.length)
726     {
727         auto length = i * 10_000 + 1;
728         array[i] = alloc.allocate(length);
729         assert(array[i].ptr);
730         foreach (j; 0 .. i)
731         {
732             assert(array[i].ptr != array[j].ptr);
733         }
734         assert(array[i].length == length);
735     }
736     import std.random : randomShuffle;
737     randomShuffle(array[]);
738     foreach (i; 0 .. array.length)
739     {
740         assert((() pure nothrow @safe @nogc => alloc.owns(array[i]))() == Ternary.yes);
741         () nothrow @nogc { alloc.deallocate(array[i]); }();
742     }
743 }
744 
745 version (StdUnittest)
746 @system unittest
747 {
748     import std.algorithm.comparison : max;
749     import std.experimental.allocator.building_blocks.allocator_list
750         : AllocatorList;
751     import std.experimental.allocator.common : testAllocator;
752     import std.experimental.allocator.gc_allocator : GCAllocator;
753     testAllocator!(() => AllocatorList!(
754         n => KRRegion!GCAllocator(max(n * 16, 1024 * 1024)))());
755 }
756 
757 @system unittest
758 {
759     import std.experimental.allocator.gc_allocator : GCAllocator;
760 
761     auto alloc = KRRegion!GCAllocator(1024 * 1024);
762 
763     void[][] array;
764     foreach (i; 1 .. 4)
765     {
766         array ~= alloc.allocate(i);
767         assert(array[$ - 1].length == i);
768     }
769     () nothrow @nogc { alloc.deallocate(array[1]); }();
770     () nothrow @nogc { alloc.deallocate(array[0]); }();
771     () nothrow @nogc { alloc.deallocate(array[2]); }();
772     assert(alloc.allocateAll().length == 1024 * 1024);
773 }
774 
775 @system unittest
776 {
777     import std.experimental.allocator.gc_allocator : GCAllocator;
778     import std.typecons : Ternary;
779     auto alloc = KRRegion!()(
780                     cast(ubyte[])(GCAllocator.instance.allocate(1024 * 1024)));
781     const store = alloc.allocate(KRRegion!().sizeof);
782     auto p = cast(KRRegion!()* ) store.ptr;
783     import core.lifetime : emplace;
784     import core.stdc.string : memcpy;
785     import std.conv : text;
786 
787     memcpy(p, &alloc, alloc.sizeof);
788     emplace(&alloc);
789 
790     void[][100] array;
791     foreach (i; 0 .. array.length)
792     {
793         auto length = 100 * i + 1;
794         array[i] = p.allocate(length);
795         assert(array[i].length == length, text(array[i].length));
796         assert((() pure nothrow @safe @nogc => p.owns(array[i]))() == Ternary.yes);
797     }
798     import std.random : randomShuffle;
799     randomShuffle(array[]);
800     foreach (i; 0 .. array.length)
801     {
802         assert((() pure nothrow @safe @nogc => p.owns(array[i]))() == Ternary.yes);
803         () nothrow @nogc { p.deallocate(array[i]); }();
804     }
805     auto b = p.allocateAll();
806     assert(b.length == 1024 * 1024 - KRRegion!().sizeof, text(b.length));
807 }
808 
809 @system unittest
810 {
811     import std.typecons : Ternary;
812     import std.experimental.allocator.gc_allocator : GCAllocator;
813     auto alloc = KRRegion!()(
814                     cast(ubyte[])(GCAllocator.instance.allocate(1024 * 1024)));
815     auto p = alloc.allocateAll();
816     assert(p.length == 1024 * 1024);
817     assert((() nothrow @nogc => alloc.deallocateAll())());
818     assert(alloc.empty() == Ternary.yes);
819     p = alloc.allocateAll();
820     assert(p.length == 1024 * 1024);
821 }
822 
823 @system unittest
824 {
825     import std.random : randomCover;
826     import std.typecons : Ternary;
827 
828     // Both sequences must work on either system
829 
830     // A sequence of allocs which generates the error described in https://issues.dlang.org/show_bug.cgi?id=16564
831     // that is a gap at the end of buf from the perspective of the allocator
832 
833     // for 64 bit systems (leftover balance = 8 bytes < 16)
834     int[] sizes64 = [18904, 2008, 74904, 224, 111904, 1904, 52288, 8];
835 
836     // for 32 bit systems (leftover balance < 8)
837     int[] sizes32 = [81412, 107068, 49892, 23768];
838 
839 
840     void test(int[] sizes)
841     {
842         align(size_t.sizeof) ubyte[256 * 1024] buf;
843         auto a = KRRegion!()(buf);
844 
845         void[][] bufs;
846 
847         foreach (size; sizes)
848         {
849             bufs ~= a.allocate(size);
850         }
851 
852         foreach (b; bufs.randomCover)
853         {
854             () nothrow @nogc { a.deallocate(b); }();
855         }
856 
857         assert((() pure nothrow @safe @nogc => a.empty)() == Ternary.yes);
858     }
859 
860     test(sizes64);
861     test(sizes32);
862 }
863 
864 @system unittest
865 {
866     import std.typecons : Ternary;
867 
868     // For 64 bits, we allocate in multiples of 8, but the minimum alloc size is 16.
869     // This can create gaps.
870     // This test is an example of such a case. The gap is formed between the block
871     // allocated for the second value in sizes and the third. There is also a gap
872     // at the very end. (total lost 2 * word)
873 
874     int[] sizes64 = [2008, 18904, 74904, 224, 111904, 1904, 52288, 8];
875     int[] sizes32 = [81412, 107068, 49892, 23768];
876 
877     int word64 = 8;
878     int word32 = 4;
879 
880     void test(int[] sizes, int word)
881     {
882         align(size_t.sizeof) ubyte[256 * 1024] buf;
883         auto a = KRRegion!()(buf);
884 
885         void[][] bufs;
886 
887         foreach (size; sizes)
888         {
889             bufs ~= a.allocate(size);
890         }
891 
892         () nothrow @nogc { a.deallocate(bufs[1]); }();
893         bufs ~= a.allocate(sizes[1] - word);
894 
895         () nothrow @nogc { a.deallocate(bufs[0]); }();
896         foreach (i; 2 .. bufs.length)
897         {
898             () nothrow @nogc { a.deallocate(bufs[i]); }();
899         }
900 
901         assert((() pure nothrow @safe @nogc => a.empty)() == Ternary.yes);
902     }
903 
904     test(sizes64, word64);
905     test(sizes32, word32);
906 }
907 
908 @system unittest
909 {
910     import std.experimental.allocator.gc_allocator : GCAllocator;
911 
912     auto a = KRRegion!GCAllocator(1024 * 1024);
913     assert((() pure nothrow @safe @nogc => a.goodAllocSize(1))() == typeof(*a.root).sizeof);
914 }
915 
916 @system unittest
917 {   import std.typecons : Ternary;
918 
919     ubyte[1024] b;
920     auto alloc = KRRegion!()(b);
921 
922     auto k = alloc.allocate(128);
923     assert(k.length == 128);
924     assert(alloc.empty == Ternary.no);
925     assert(alloc.deallocate(k));
926     assert(alloc.empty == Ternary.yes);
927 
928     k = alloc.allocate(512);
929     assert(k.length == 512);
930     assert(alloc.empty == Ternary.no);
931     assert(alloc.deallocate(k));
932     assert(alloc.empty == Ternary.yes);
933 
934     k = alloc.allocate(1024);
935     assert(k.length == 1024);
936     assert(alloc.empty == Ternary.no);
937     assert(alloc.deallocate(k));
938     assert(alloc.empty == Ternary.yes);
939 }