1 ///
2 module stdx.allocator.building_blocks.kernighan_ritchie;
3 import stdx.allocator.building_blocks.null_allocator;
4 
5 //debug = KRRegion;
6 debug(KRRegion) import std.stdio;
7 
8 // KRRegion
9 /**
10 $(D KRRegion) draws inspiration from the $(MREF_ALTTEXT region allocation
11 strategy, std,experimental,allocator,building_blocks,region) and also the
12 $(HTTP stackoverflow.com/questions/13159564/explain-this-implementation-of-malloc-from-the-kr-book,
13 famed allocator) described by Brian Kernighan and Dennis Ritchie in section 8.7
14 of the book $(HTTP amazon.com/exec/obidos/ASIN/0131103628/classicempire, "The C
15 Programming Language"), Second Edition, Prentice Hall, 1988.
16 
17 $(H4 `KRRegion` = `Region` + Kernighan-Ritchie Allocator)
18 
19 Initially, `KRRegion` starts in "region" mode: allocations are served from
20 the memory chunk in a region fashion. Thus, as long as there is enough memory
21 left, $(D KRRegion.allocate) has the performance profile of a region allocator.
22 Deallocation inserts (in $(BIGOH 1) time) the deallocated blocks in an
23 unstructured freelist, which is not read in region mode.
24 
25 Once the region cannot serve an $(D allocate) request, $(D KRRegion) switches
26 to "free list" mode. It sorts the list of previously deallocated blocks by
27 address and serves allocation requests off that free list. The allocation and
28 deallocation follow the pattern described by Kernighan and Ritchie.
29 
30 The recommended use of `KRRegion` is as a $(I region with deallocation). If the
31 `KRRegion` is dimensioned appropriately, it could often not enter free list
32 mode during its lifetime. Thus it is as fast as a simple region, whilst
33 offering deallocation at a small cost. When the region memory is  exhausted,
34 the previously deallocated memory is still usable, at a performance  cost. If
35 the region is not excessively large and fragmented, the linear  allocation and
36 deallocation cost may still be compensated for by the good locality
37 characteristics.
38 
39 If the chunk of memory managed is large, it may be desirable to switch
40 management to free list from the beginning. That way, memory may be used in a
41 more compact manner than region mode. To force free list mode, call $(D
42 switchToFreeList) shortly after construction or when deemed appropriate.
43 
44 The smallest size that can be allocated is two words (16 bytes on 64-bit
45 systems, 8 bytes on 32-bit systems). This is because the free list management
46 needs two words (one for the length, the other for the next pointer in the
47 singly-linked list).
48 
49 The $(D ParentAllocator) type parameter is the type of the allocator used to
50 allocate the memory chunk underlying the $(D KRRegion) object. Choosing the
51 default ($(D NullAllocator)) means the user is responsible for passing a buffer
52 at construction (and for deallocating it if necessary). Otherwise, $(D KRRegion)
53 automatically deallocates the buffer during destruction. For that reason, if
54 $(D ParentAllocator) is not $(D NullAllocator), then $(D KRRegion) is not
55 copyable.
56 
57 $(H4 Implementation Details)
58 
59 In free list mode, $(D KRRegion) embeds a free blocks list onto the chunk of
60 memory. The free list is circular, coalesced, and sorted by address at all
61 times. Allocations and deallocations take time proportional to the number of
62 previously deallocated blocks. (In practice the cost may be lower, e.g. if
63 memory is deallocated in reverse order of allocation, all operations take
64 constant time.) Memory utilization is good (small control structure and no
65 per-allocation overhead). The disadvantages of freelist mode include proneness
66 to fragmentation, a minimum allocation size of two words, and linear worst-case
67 allocation and deallocation times.
68 
69 Similarities of `KRRegion` (in free list mode) with the
70 Kernighan-Ritchie allocator:
71 
72 $(UL
73 $(LI Free blocks have variable size and are linked in a singly-linked list.)
74 $(LI The freelist is maintained in increasing address order, which makes
75 coalescing easy.)
76 $(LI The strategy for finding the next available block is first fit.)
77 $(LI The free list is circular, with the last node pointing back to the first.)
78 $(LI Coalescing is carried during deallocation.)
79 )
80 
81 Differences from the Kernighan-Ritchie allocator:
82 
83 $(UL
84 $(LI Once the chunk is exhausted, the Kernighan-Ritchie allocator allocates
85 another chunk using operating system primitives. For better composability, $(D
86 KRRegion) just gets full (returns $(D null) on new allocation requests). The
87 decision to allocate more blocks is deferred to a higher-level entity. For an
88 example, see the example below using $(D AllocatorList) in conjunction with $(D
89 KRRegion).)
90 $(LI Allocated blocks do not hold a size prefix. This is because in D the size
91 information is available in client code at deallocation time.)
92 )
93 
94 */
95 struct KRRegion(ParentAllocator = NullAllocator)
96 {
97     import stdx.allocator.common : stateSize, alignedAt;
98     import stdx.allocator.internal : Ternary;
99 
100     private static struct Node
101     {
102         import mir.functional : RefTuple;
103 
104         alias Tuple = RefTuple!(void[], Node*);
105 
106         Node* next;
107         size_t size;
108 
109         this(this) @disable;
110 
111         void[] payload() inout
112         {
113             return (cast(ubyte*) &this)[0 .. size];
114         }
115 
116         bool adjacent(in Node* right) const
117         {
118             assert(right);
119             auto p = payload;
120             return p.ptr < right && right < p.ptr + p.length + Node.sizeof;
121         }
122 
123         bool coalesce(void* memoryEnd = null)
124         {
125             // Coalesce the last node before the memory end with any possible gap
126             if (memoryEnd
127                 && memoryEnd < payload.ptr + payload.length + Node.sizeof)
128             {
129                 size += memoryEnd - (payload.ptr + payload.length);
130                 return true;
131             }
132 
133             if (!adjacent(next)) return false;
134             size = (cast(ubyte*) next + next.size) - cast(ubyte*) &this;
135             next = next.next;
136             return true;
137         }
138 
139         Tuple allocateHere(size_t bytes)
140         {
141             assert(bytes >= Node.sizeof);
142             assert(bytes % Node.alignof == 0);
143             assert(next);
144             assert(!adjacent(next));
145             if (size < bytes) return typeof(return)();
146             assert(size >= bytes);
147             immutable leftover = size - bytes;
148 
149             if (leftover >= Node.sizeof)
150             {
151                 // There's room for another node
152                 auto newNode = cast(Node*) ((cast(ubyte*) &this) + bytes);
153                 newNode.size = leftover;
154                 newNode.next = next == &this ? newNode : next;
155                 assert(next);
156                 return Tuple(payload, newNode);
157             }
158 
159             // No slack space, just return next node
160             return Tuple(payload, next == &this ? null : next);
161         }
162     }
163 
164     // state
165     /**
166     If $(D ParentAllocator) holds state, $(D parent) is a public member of type
167     $(D KRRegion). Otherwise, $(D parent) is an $(D alias) for
168     `ParentAllocator.instance`.
169     */
170     static if (stateSize!ParentAllocator) ParentAllocator parent;
171     else alias parent = ParentAllocator.instance;
172     private void[] payload;
173     private Node* root;
174     private bool regionMode = true;
175 
176     auto byNodePtr()
177     {
178         static struct Range
179         {
180             Node* start, current;
181             @property bool empty() { return !current; }
182             @property Node* front() { return current; }
183             void popFront()
184             {
185                 assert(current && current.next);
186                 current = current.next;
187                 if (current == start) current = null;
188             }
189             @property Range save() { return this; }
190         }
191         import std.range : isForwardRange;
192         static assert(isForwardRange!Range);
193         return Range(root, root);
194     }
195 
196     string toString()()
197     {
198         import std.format : format;
199         string s = "KRRegion@";
200         s ~= format("%s-%s(0x%s[%s] %s", &this, &this + 1,
201             payload.ptr, payload.length,
202             regionMode ? "(region)" : "(freelist)");
203 
204         Node* lastNode = null;
205         if (!regionMode)
206         {
207             foreach (node; byNodePtr)
208             {
209                 s ~= format(", %sfree(0x%s[%s])",
210                     lastNode && lastNode.adjacent(node) ? "+" : "",
211                     cast(void*) node, node.size);
212                 lastNode = node;
213             }
214         }
215         else
216         {
217             for (auto node = root; node; node = node.next)
218             {
219                 s ~= format(", %sfree(0x%s[%s])",
220                     lastNode && lastNode.adjacent(node) ? "+" : "",
221                     cast(void*) node, node.size);
222                 lastNode = node;
223             }
224         }
225 
226         s ~= ')';
227         return s;
228     }
229 
230     private void assertValid(string s)
231     {
232         assert(!regionMode);
233         if (!payload.ptr)
234         {
235             assert(!root, s);
236             return;
237         }
238         if (!root)
239         {
240             return;
241         }
242         assert(root >= payload.ptr, s);
243         assert(root < payload.ptr + payload.length, s);
244 
245         // Check that the list terminates
246         size_t n;
247         foreach (node; byNodePtr)
248         {
249             assert(node.next);
250             assert(!node.adjacent(node.next));
251             assert(n++ < payload.length / Node.sizeof, s);
252         }
253     }
254 
255     private Node* sortFreelist(Node* root)
256     {
257         // Find a monotonic run
258         auto last = root;
259         for (;;)
260         {
261             if (!last.next) return root;
262             if (last > last.next) break;
263             assert(last < last.next);
264             last = last.next;
265         }
266         auto tail = last.next;
267         last.next = null;
268         tail = sortFreelist(tail);
269         return merge(root, tail);
270     }
271 
272     private Node* merge(Node* left, Node* right)
273     {
274         assert(left != right);
275         if (!left) return right;
276         if (!right) return left;
277         if (left < right)
278         {
279             auto result = left;
280             result.next = merge(left.next, right);
281             return result;
282         }
283         auto result = right;
284         result.next = merge(left, right.next);
285         return result;
286     }
287 
288     private void coalesceAndMakeCircular()
289     {
290         for (auto n = root;;)
291         {
292             assert(!n.next || n < n.next);
293             if (!n.next)
294             {
295                 // Convert to circular
296                 n.next = root;
297                 break;
298             }
299             if (n.coalesce) continue; // possibly another coalesce
300             n = n.next;
301         }
302     }
303 
304     /**
305     Create a $(D KRRegion). If $(D ParentAllocator) is not $(D NullAllocator),
306     $(D KRRegion)'s destructor will call $(D parent.deallocate).
307 
308     Params:
309     b = Block of memory to serve as support for the allocator. Memory must be
310     larger than two words and word-aligned.
311     n = Capacity desired. This constructor is defined only if $(D
312     ParentAllocator) is not $(D NullAllocator).
313     */
314     this(ubyte[] b)
315     {
316         if (b.length < Node.sizeof)
317         {
318             // Init as empty
319             assert(root is null);
320             assert(payload is null);
321             return;
322         }
323         assert(b.length >= Node.sizeof);
324         assert(b.ptr.alignedAt(Node.alignof));
325         assert(b.length >= 2 * Node.sizeof);
326         payload = b;
327         root = cast(Node*) b.ptr;
328         // Initialize the free list with all list
329         assert(regionMode);
330         root.next = null;
331         root.size = b.length;
332         debug(KRRegion) writefln("KRRegion@%s: init with %s[%s]", &this,
333             b.ptr, b.length);
334     }
335 
336     /// Ditto
337     static if (!is(ParentAllocator == NullAllocator))
338     this(size_t n)
339     {
340         assert(n > Node.sizeof);
341         this(cast(ubyte[])(parent.allocate(n)));
342     }
343 
344     /// Ditto
345     static if (!is(ParentAllocator == NullAllocator)
346         && __traits(hasMember, ParentAllocator, "deallocate"))
347     ~this()
348     {
349         parent.deallocate(payload);
350     }
351 
352     /**
353     Forces free list mode. If already in free list mode, does nothing.
354     Otherwise, sorts the free list accumulated so far and switches strategy for
355     future allocations to KR style.
356     */
357     void switchToFreeList()
358     {
359         if (!regionMode) return;
360         regionMode = false;
361         if (!root) return;
362         root = sortFreelist(root);
363         coalesceAndMakeCircular;
364     }
365 
366     /*
367     Noncopyable
368     */
369     @disable this(this);
370 
371     /**
372     Word-level alignment.
373     */
374     enum alignment = Node.alignof;
375 
376     /**
377     Allocates $(D n) bytes. Allocation searches the list of available blocks
378     until a free block with $(D n) or more bytes is found (first fit strategy).
379     The block is split (if larger) and returned.
380 
381     Params: n = number of bytes to _allocate
382 
383     Returns: A word-aligned buffer of $(D n) bytes, or $(D null).
384     */
385     void[] allocate(size_t n)
386     {
387         if (!n || !root) return null;
388         const actualBytes = goodAllocSize(n);
389 
390         // Try the region first
391         if (regionMode)
392         {
393             // Only look at the head of the freelist
394             if (root.size >= actualBytes)
395             {
396                 // Enough room for allocation
397                 void* result = root;
398                 immutable balance = root.size - actualBytes;
399                 if (balance >= Node.sizeof)
400                 {
401                     auto newRoot = cast(Node*) (result + actualBytes);
402                     newRoot.next = root.next;
403                     newRoot.size = balance;
404                     root = newRoot;
405                 }
406                 else
407                 {
408                     root = null;
409                     switchToFreeList;
410                 }
411                 return result[0 .. n];
412             }
413 
414             // Not enough memory, switch to freelist mode and fall through
415             switchToFreeList;
416         }
417 
418         // Try to allocate from next after the iterating node
419         for (auto pnode = root;;)
420         {
421             assert(!pnode.adjacent(pnode.next));
422             auto k = pnode.next.allocateHere(actualBytes);
423             if (k[0] !is null)
424             {
425                 // awes
426                 assert(k[0].length >= n);
427                 if (root == pnode.next) root = k[1];
428                 pnode.next = k[1];
429                 return k[0][0 .. n];
430             }
431 
432             pnode = pnode.next;
433             if (pnode == root) break;
434         }
435         return null;
436     }
437 
438     /**
439     Deallocates $(D b), which is assumed to have been previously allocated with
440     this allocator. Deallocation performs a linear search in the free list to
441     preserve its sorting order. It follows that blocks with higher addresses in
442     allocators with many free blocks are slower to deallocate.
443 
444     Params: b = block to be deallocated
445     */
446     bool deallocate(void[] b)
447     {
448         debug(KRRegion) writefln("KRRegion@%s: deallocate(%s[%s])", &this,
449             b.ptr, b.length);
450         if (!b.ptr) return true;
451         assert(owns(b) == Ternary.yes);
452         assert(b.ptr.alignedAt(Node.alignof));
453 
454         // Insert back in the freelist, keeping it sorted by address. Do not
455         // coalesce at this time. Instead, do it lazily during allocation.
456         auto n = cast(Node*) b.ptr;
457         n.size = goodAllocSize(b.length);
458         auto memoryEnd = payload.ptr + payload.length;
459 
460         if (regionMode)
461         {
462             assert(root);
463             // Insert right after root
464             n.next = root.next;
465             root.next = n;
466             return true;
467         }
468 
469         if (!root)
470         {
471             // What a sight for sore eyes
472             root = n;
473             root.next = root;
474 
475             // If the first block freed is the last one allocated,
476             // maybe there's a gap after it.
477             root.coalesce(memoryEnd);
478             return true;
479         }
480 
481         version(assert) foreach (test; byNodePtr)
482         {
483             assert(test != n);
484         }
485         // Linear search
486         auto pnode = root;
487         do
488         {
489             assert(pnode && pnode.next);
490             assert(pnode != n);
491             assert(pnode.next != n);
492             if (pnode < pnode.next)
493             {
494                 if (pnode >= n || n >= pnode.next) continue;
495                 // Insert in between pnode and pnode.next
496                 n.next = pnode.next;
497                 pnode.next = n;
498                 n.coalesce;
499                 pnode.coalesce;
500                 root = pnode;
501                 return true;
502             }
503             else if (pnode < n)
504             {
505                 // Insert at the end of the list
506                 // Add any possible gap at the end of n to the length of n
507                 n.next = pnode.next;
508                 pnode.next = n;
509                 n.coalesce(memoryEnd);
510                 pnode.coalesce;
511                 root = pnode;
512                 return true;
513             }
514             else if (n < pnode.next)
515             {
516                 // Insert at the front of the list
517                 n.next = pnode.next;
518                 pnode.next = n;
519                 n.coalesce;
520                 root = n;
521                 return true;
522             }
523         }
524         while ((pnode = pnode.next) != root);
525         assert(0, "Wrong parameter passed to deallocate");
526     }
527 
528     /**
529     Allocates all memory available to this allocator. If the allocator is empty,
530     returns the entire available block of memory. Otherwise, it still performs
531     a best-effort allocation: if there is no fragmentation (e.g. $(D allocate)
532     has been used but not $(D deallocate)), allocates and returns the only
533     available block of memory.
534 
535     The operation takes time proportional to the number of adjacent free blocks
536     at the front of the free list. These blocks get coalesced, whether
537     $(D allocateAll) succeeds or fails due to fragmentation.
538     */
539     void[] allocateAll()
540     {
541         if (regionMode) switchToFreeList;
542         if (root && root.next == root)
543             return allocate(root.size);
544         return null;
545     }
546 
547     ///
548     @system unittest
549     {
550         import stdx.allocator.gc_allocator : GCAllocator;
551         auto alloc = KRRegion!GCAllocator(1024 * 64);
552         const b1 = alloc.allocate(2048);
553         assert(b1.length == 2048);
554         const b2 = alloc.allocateAll;
555         assert(b2.length == 1024 * 62);
556     }
557 
558     /**
559     Deallocates all memory currently allocated, making the allocator ready for
560     other allocations. This is a $(BIGOH 1) operation.
561     */
562     bool deallocateAll()
563     {
564         debug(KRRegion) assertValid("deallocateAll");
565         debug(KRRegion) scope(exit) assertValid("deallocateAll");
566         root = cast(Node*) payload.ptr;
567         // Initialize the free list with all list
568         if (root)
569         {
570             root.next = root;
571             root.size = payload.length;
572         }
573         return true;
574     }
575 
576     /**
577     Checks whether the allocator is responsible for the allocation of $(D b).
578     It does a simple $(BIGOH 1) range check. $(D b) should be a buffer either
579     allocated with $(D this) or obtained through other means.
580     */
581     Ternary owns(void[] b)
582     {
583         debug(KRRegion) assertValid("owns");
584         debug(KRRegion) scope(exit) assertValid("owns");
585         return Ternary(b.ptr >= payload.ptr
586             && b.ptr < payload.ptr + payload.length);
587     }
588 
589     /**
590     Adjusts $(D n) to a size suitable for allocation (two words or larger,
591     word-aligned).
592     */
593     static size_t goodAllocSize(size_t n)
594     {
595         import stdx.allocator.common : roundUpToMultipleOf;
596         return n <= Node.sizeof
597             ? Node.sizeof : n.roundUpToMultipleOf(alignment);
598     }
599 
600     /**
601     Returns: `Ternary.yes` if the allocator is empty, `Ternary.no` otherwise.
602     Never returns `Ternary.unknown`.
603     */
604     Ternary empty()
605     {
606         return Ternary(root && root.size == payload.length);
607     }
608 }
609 
610 /**
611 $(D KRRegion) is preferable to $(D Region) as a front for a general-purpose
612 allocator if $(D deallocate) is needed, yet the actual deallocation traffic is
613 relatively low. The example below shows a $(D KRRegion) using stack storage
614 fronting the GC allocator.
615 */
616 @system unittest
617 {
618     import stdx.allocator.building_blocks.fallback_allocator
619         : fallbackAllocator;
620     import stdx.allocator.gc_allocator : GCAllocator;
621     import stdx.allocator.internal : Ternary;
622     // KRRegion fronting a general-purpose allocator
623     ubyte[1024 * 128] buf;
624     auto alloc = fallbackAllocator(KRRegion!()(buf), GCAllocator.instance);
625     auto b = alloc.allocate(100);
626     assert(b.length == 100);
627     assert(alloc.primary.owns(b) == Ternary.yes);
628 }
629 
630 /**
631 The code below defines a scalable allocator consisting of 1 MB (or larger)
632 blocks fetched from the garbage-collected heap. Each block is organized as a
633 KR-style heap. More blocks are allocated and freed on a need basis.
634 
635 This is the closest example to the allocator introduced in the K$(AMP)R book.
636 It should perform slightly better because instead of searching through one
637 large free list, it searches through several shorter lists in LRU order. Also,
638 it actually returns memory to the operating system when possible.
639 */
640 @system unittest
641 {
642     import mir.utility : max;
643     import stdx.allocator.building_blocks.allocator_list
644         : AllocatorList;
645     import stdx.allocator.gc_allocator : GCAllocator;
646     import stdx.allocator.mmap_allocator : MmapAllocator;
647     AllocatorList!(n => KRRegion!MmapAllocator(max(n * 16, 1024u * 1024))) alloc;
648 }
649 
650 @system unittest
651 {
652     import mir.utility : max;
653     import stdx.allocator.building_blocks.allocator_list
654         : AllocatorList;
655     import stdx.allocator.gc_allocator : GCAllocator;
656     import stdx.allocator.mallocator : Mallocator;
657     import stdx.allocator.internal : Ternary;
658     /*
659     Create a scalable allocator consisting of 1 MB (or larger) blocks fetched
660     from the garbage-collected heap. Each block is organized as a KR-style
661     heap. More blocks are allocated and freed on a need basis.
662     */
663     AllocatorList!(n => KRRegion!Mallocator(max(n * 16, 1024u * 1024)),
664         NullAllocator) alloc;
665     void[][50] array;
666     foreach (i; 0 .. array.length)
667     {
668         auto length = i * 10_000 + 1;
669         array[i] = alloc.allocate(length);
670         assert(array[i].ptr);
671         assert(array[i].length == length);
672     }
673     import std.random : randomShuffle;
674     randomShuffle(array[]);
675     foreach (i; 0 .. array.length)
676     {
677         assert(array[i].ptr);
678         assert(alloc.owns(array[i]) == Ternary.yes);
679         alloc.deallocate(array[i]);
680     }
681 }
682 
683 @system unittest
684 {
685     import mir.utility : max;
686     import stdx.allocator.building_blocks.allocator_list
687         : AllocatorList;
688     import stdx.allocator.gc_allocator : GCAllocator;
689     import stdx.allocator.mmap_allocator : MmapAllocator;
690     import stdx.allocator.internal : Ternary;
691     /*
692     Create a scalable allocator consisting of 1 MB (or larger) blocks fetched
693     from the garbage-collected heap. Each block is organized as a KR-style
694     heap. More blocks are allocated and freed on a need basis.
695     */
696     AllocatorList!((n) {
697         auto result = KRRegion!MmapAllocator(max(n * 2, 1024u * 1024));
698         return result;
699     }) alloc;
700     void[][99] array;
701     foreach (i; 0 .. array.length)
702     {
703         auto length = i * 10_000 + 1;
704         array[i] = alloc.allocate(length);
705         assert(array[i].ptr);
706         foreach (j; 0 .. i)
707         {
708             assert(array[i].ptr != array[j].ptr);
709         }
710         assert(array[i].length == length);
711     }
712     import std.random : randomShuffle;
713     randomShuffle(array[]);
714     foreach (i; 0 .. array.length)
715     {
716         assert(alloc.owns(array[i]) == Ternary.yes);
717         alloc.deallocate(array[i]);
718     }
719 }
720 
721 @system unittest
722 {
723     import mir.utility : max;
724     import stdx.allocator.building_blocks.allocator_list
725         : AllocatorList;
726     import stdx.allocator.common : testAllocator;
727     import stdx.allocator.gc_allocator : GCAllocator;
728     testAllocator!(() => AllocatorList!(
729         n => KRRegion!GCAllocator(max(n * 16, 1024u * 1024)))());
730 }
731 
732 @system unittest
733 {
734     import stdx.allocator.gc_allocator : GCAllocator;
735 
736     auto alloc = KRRegion!GCAllocator(1024u * 1024);
737 
738     void[][] array;
739     foreach (i; 1 .. 4)
740     {
741         array ~= alloc.allocate(i);
742         assert(array[$ - 1].length == i);
743     }
744     alloc.deallocate(array[1]);
745     alloc.deallocate(array[0]);
746     alloc.deallocate(array[2]);
747     assert(alloc.allocateAll().length == 1024u * 1024);
748 }
749 
750 @system unittest
751 {
752     import std.conv : text;
753     import stdx.allocator.gc_allocator : GCAllocator;
754     import stdx.allocator.internal : Ternary;
755     auto alloc = KRRegion!()(
756                     cast(ubyte[])(GCAllocator.instance.allocate(1024u * 1024)));
757     const store = alloc.allocate(KRRegion!().sizeof);
758     auto p = cast(KRRegion!()* ) store.ptr;
759     import core.stdc..string : memcpy;
760     import std.algorithm.mutation : move;
761     import mir.conv : emplace;
762 
763     memcpy(p, &alloc, alloc.sizeof);
764     emplace(&alloc);
765 
766     void[][100] array;
767     foreach (i; 0 .. array.length)
768     {
769         auto length = 100 * i + 1;
770         array[i] = p.allocate(length);
771         assert(array[i].length == length, text(array[i].length));
772         assert(p.owns(array[i]) == Ternary.yes);
773     }
774     import std.random : randomShuffle;
775     randomShuffle(array[]);
776     foreach (i; 0 .. array.length)
777     {
778         assert(p.owns(array[i]) == Ternary.yes);
779         p.deallocate(array[i]);
780     }
781     auto b = p.allocateAll();
782     assert(b.length == 1024u * 1024 - KRRegion!().sizeof, text(b.length));
783 }
784 
785 @system unittest
786 {
787     import stdx.allocator.gc_allocator : GCAllocator;
788     auto alloc = KRRegion!()(
789                     cast(ubyte[])(GCAllocator.instance.allocate(1024u * 1024)));
790     auto p = alloc.allocateAll();
791     assert(p.length == 1024u * 1024);
792     alloc.deallocateAll();
793     p = alloc.allocateAll();
794     assert(p.length == 1024u * 1024);
795 }
796 
797 @system unittest
798 {
799     import stdx.allocator.building_blocks;
800     import std.random;
801     import stdx.allocator.internal : Ternary;
802 
803     // Both sequences must work on either system
804 
805     // A sequence of allocs which generates the error described in issue 16564
806     // that is a gap at the end of buf from the perspective of the allocator
807 
808     // for 64 bit systems (leftover balance = 8 bytes < 16)
809     int[] sizes64 = [18904, 2008, 74904, 224, 111904, 1904, 52288, 8];
810 
811     // for 32 bit systems (leftover balance < 8)
812     int[] sizes32 = [81412, 107068, 49892, 23768];
813 
814 
815     static if (__VERSION__ >= 2072) {
816         mixin(`
817         void test(int[] sizes)
818         {
819             align(size_t.sizeof) ubyte[256 * 1024] buf;
820             auto a = KRRegion!()(buf);
821 
822             void[][] bufs;
823 
824             foreach (size; sizes)
825             {
826                 bufs ~= a.allocate(size);
827             }
828 
829             foreach (b; bufs.randomCover)
830             {
831                 a.deallocate(b);
832             }
833 
834             assert(a.empty == Ternary.yes);
835         }
836 
837         test(sizes64);
838         test(sizes32);
839         `);
840     }
841 }
842 
843 @system unittest
844 {
845     import stdx.allocator.building_blocks;
846     import std.random;
847     import stdx.allocator.internal : Ternary;
848 
849     // For 64 bits, we allocate in multiples of 8, but the minimum alloc size is 16.
850     // This can create gaps.
851     // This test is an example of such a case. The gap is formed between the block
852     // allocated for the second value in sizes and the third. There is also a gap
853     // at the very end. (total lost 2 * word)
854 
855     int[] sizes64 = [2008, 18904, 74904, 224, 111904, 1904, 52288, 8];
856     int[] sizes32 = [81412, 107068, 49892, 23768];
857 
858     int word64 = 8;
859     int word32 = 4;
860 
861     static if (__VERSION__ >= 2072) {
862         mixin(`
863         void test(int[] sizes, int word)
864         {
865             align(size_t.sizeof) ubyte[256 * 1024] buf;
866             auto a = KRRegion!()(buf);
867 
868             void[][] bufs;
869 
870             foreach (size; sizes)
871             {
872                 bufs ~= a.allocate(size);
873             }
874 
875             a.deallocate(bufs[1]);
876             bufs ~= a.allocate(sizes[1] - word);
877 
878             a.deallocate(bufs[0]);
879             foreach (i; 2 .. bufs.length)
880             {
881                 a.deallocate(bufs[i]);
882             }
883 
884             assert(a.empty == Ternary.yes);
885         }
886 
887         test(sizes64, word64);
888         test(sizes32, word32);
889         `);
890     }
891 }