1 ///
2 module stdx.allocator.building_blocks.region;
3 
4 import stdx.allocator.building_blocks.null_allocator;
5 import stdx.allocator.common;
6 import std.typecons : Flag, Yes, No;
7 
8 /**
9 A $(D Region) allocator allocates memory straight from one contiguous chunk.
10 There is no deallocation, and once the region is full, allocation requests
11 return $(D null). Therefore, $(D Region)s are often used (a) in conjunction with
12 more sophisticated allocators; or (b) for batch-style very fast allocations
13 that deallocate everything at once.
14 
15 The region only stores three pointers, corresponding to the current position in
16 the store and the limits. One allocation entails rounding up the allocation
17 size for alignment purposes, bumping the current pointer, and comparing it
18 against the limit.
19 
20 If $(D ParentAllocator) is different from $(D NullAllocator), $(D Region)
21 deallocates the chunk of memory during destruction.
22 
23 The $(D minAlign) parameter establishes alignment. If $(D minAlign > 1), the
24 sizes of all allocation requests are rounded up to a multiple of $(D minAlign).
25 Applications aiming at maximum speed may want to choose $(D minAlign = 1) and
26 control alignment externally.
27 
28 */
29 struct Region(ParentAllocator = NullAllocator,
30     uint minAlign = platformAlignment,
31     Flag!"growDownwards" growDownwards = No.growDownwards)
32 {
33     static assert(minAlign.isGoodStaticAlignment);
34     static assert(ParentAllocator.alignment >= minAlign);
35 
36     import stdx.allocator.internal : Ternary;
37 
38     // state
39     /**
40     The _parent allocator. Depending on whether $(D ParentAllocator) holds state
41     or not, this is a member variable or an alias for
42     `ParentAllocator.instance`.
43     */
44     static if (stateSize!ParentAllocator)
45     {
46         ParentAllocator parent;
47     }
48     else
49     {
50         alias parent = ParentAllocator.instance;
51     }
52     private void* _current, _begin, _end;
53 
54     /**
55     Constructs a region backed by a user-provided store. Assumes $(D store) is
56     aligned at $(D minAlign). Also assumes the memory was allocated with $(D
57     ParentAllocator) (if different from $(D NullAllocator)).
58 
59     Params:
60     store = User-provided store backing up the region. $(D store) must be
61     aligned at $(D minAlign) (enforced with $(D assert)). If $(D
62     ParentAllocator) is different from $(D NullAllocator), memory is assumed to
63     have been allocated with $(D ParentAllocator).
64     n = Bytes to allocate using $(D ParentAllocator). This constructor is only
65     defined If $(D ParentAllocator) is different from $(D NullAllocator). If
66     $(D parent.allocate(n)) returns $(D null), the region will be initialized
67     as empty (correctly initialized but unable to allocate).
68     */
69     this(ubyte[] store)
70     {
71         store = cast(ubyte[])(store.roundUpToAlignment(alignment));
72         store = store[0 .. $.roundDownToAlignment(alignment)];
73         assert(store.ptr.alignedAt(minAlign));
74         assert(store.length % minAlign == 0);
75         _begin = store.ptr;
76         _end = store.ptr + store.length;
77         static if (growDownwards)
78             _current = _end;
79         else
80             _current = store.ptr;
81     }
82 
83     /// Ditto
84     static if (!is(ParentAllocator == NullAllocator))
85     this(size_t n)
86     {
87         this(cast(ubyte[])(parent.allocate(n.roundUpToAlignment(alignment))));
88     }
89 
90     /*
91     TODO: The postblit of $(D BasicRegion) should be disabled because such objects
92     should not be copied around naively.
93     */
94 
95     /**
96     If `ParentAllocator` is not `NullAllocator` and defines `deallocate`, the region defines a destructor that uses `ParentAllocator.delete` to free the
97     memory chunk.
98     */
99     static if (!is(ParentAllocator == NullAllocator)
100         && __traits(hasMember, ParentAllocator, "deallocate"))
101     ~this()
102     {
103         parent.deallocate(_begin[0 .. _end - _begin]);
104     }
105 
106 
107     /**
108     Alignment offered.
109     */
110     alias alignment = minAlign;
111 
112     /**
113     Allocates $(D n) bytes of memory. The shortest path involves an alignment
114     adjustment (if $(D alignment > 1)), an increment, and a comparison.
115 
116     Params:
117     n = number of bytes to allocate
118 
119     Returns:
120     A properly-aligned buffer of size $(D n) or $(D null) if request could not
121     be satisfied.
122     */
123     void[] allocate(size_t n)
124     {
125         static if (growDownwards)
126         {
127             if (available < n) return null;
128             static if (minAlign > 1)
129                 const rounded = n.roundUpToAlignment(alignment);
130             else
131                 alias rounded = n;
132             assert(available >= rounded);
133             auto result = (_current - rounded)[0 .. n];
134             assert(result.ptr >= _begin);
135             _current = result.ptr;
136             assert(owns(result) == Ternary.yes);
137             return result;
138         }
139         else
140         {
141             auto result = _current[0 .. n];
142             static if (minAlign > 1)
143                 const rounded = n.roundUpToAlignment(alignment);
144             else
145                 alias rounded = n;
146             _current += rounded;
147             if (_current <= _end) return result;
148             // Slow path, backtrack
149             _current -= rounded;
150             return null;
151         }
152     }
153 
154     /**
155     Allocates $(D n) bytes of memory aligned at alignment $(D a).
156 
157     Params:
158     n = number of bytes to allocate
159     a = alignment for the allocated block
160 
161     Returns:
162     Either a suitable block of $(D n) bytes aligned at $(D a), or $(D null).
163     */
164     void[] alignedAllocate(size_t n, uint a)
165     {
166         import stdx.allocator.internal : isPowerOf2;
167         assert(a.isPowerOf2);
168         static if (growDownwards)
169         {
170             const available = _current - _begin;
171             if (available < n) return null;
172             auto result = (_current - n).alignDownTo(a)[0 .. n];
173             if (result.ptr >= _begin)
174             {
175                 _current = result.ptr;
176                 return result;
177             }
178         }
179         else
180         {
181             // Just bump the pointer to the next good allocation
182             auto save = _current;
183             _current = _current.alignUpTo(a);
184             auto result = allocate(n);
185             if (result.ptr)
186             {
187                 assert(result.length == n);
188                 return result;
189             }
190             // Failed, rollback
191             _current = save;
192         }
193         return null;
194     }
195 
196     /// Allocates and returns all memory available to this region.
197     void[] allocateAll()
198     {
199         static if (growDownwards)
200         {
201             auto result = _begin[0 .. available];
202             _current = _begin;
203         }
204         else
205         {
206             auto result = _current[0 .. available];
207             _current = _end;
208         }
209         return result;
210     }
211 
212     /**
213     Expands an allocated block in place. Expansion will succeed only if the
214     block is the last allocated. Defined only if `growDownwards` is
215     `No.growDownwards`.
216     */
217     static if (growDownwards == No.growDownwards)
218     bool expand(ref void[] b, size_t delta)
219     {
220         assert(owns(b) == Ternary.yes || b.ptr is null);
221         assert(b.ptr + b.length <= _current || b.ptr is null);
222         if (!b.ptr) return delta == 0;
223         auto newLength = b.length + delta;
224         if (_current < b.ptr + b.length + alignment)
225         {
226             // This was the last allocation! Allocate some more and we're done.
227             if (this.goodAllocSize(b.length) == this.goodAllocSize(newLength)
228                 || allocate(delta).length == delta)
229             {
230                 b = b.ptr[0 .. newLength];
231                 assert(_current < b.ptr + b.length + alignment);
232                 return true;
233             }
234         }
235         return false;
236     }
237 
238     /**
239     Deallocates $(D b). This works only if $(D b) was obtained as the last call
240     to $(D allocate); otherwise (i.e. another allocation has occurred since) it
241     does nothing. This semantics is tricky and therefore $(D deallocate) is
242     defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
243     as the third template argument.
244 
245     Params:
246     b = Block previously obtained by a call to $(D allocate) against this
247     allocator ($(D null) is allowed).
248     */
249     bool deallocate(void[] b)
250     {
251         assert(owns(b) == Ternary.yes || b.ptr is null);
252         static if (growDownwards)
253         {
254             if (b.ptr == _current)
255             {
256                 _current += this.goodAllocSize(b.length);
257                 return true;
258             }
259         }
260         else
261         {
262             if (b.ptr + this.goodAllocSize(b.length) == _current)
263             {
264                 assert(b.ptr !is null || _current is null);
265                 _current = b.ptr;
266                 return true;
267             }
268         }
269         return false;
270     }
271 
272     /**
273     Deallocates all memory allocated by this region, which can be subsequently
274     reused for new allocations.
275     */
276     bool deallocateAll()
277     {
278         static if (growDownwards)
279         {
280             _current = _end;
281         }
282         else
283         {
284             _current = _begin;
285         }
286         return true;
287     }
288 
289     /**
290     Queries whether $(D b) has been allocated with this region.
291 
292     Params:
293     b = Arbitrary block of memory ($(D null) is allowed; $(D owns(null))
294     returns $(D false)).
295 
296     Returns:
297     $(D true) if $(D b) has been allocated with this region, $(D false)
298     otherwise.
299     */
300     Ternary owns(void[] b) const
301     {
302         return Ternary(b.ptr >= _begin && b.ptr + b.length <= _end);
303     }
304 
305     /**
306     Returns `Ternary.yes` if no memory has been allocated in this region,
307     `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
308     */
309     Ternary empty() const
310     {
311         return Ternary(_current == _begin);
312     }
313 
314     /// Nonstandard property that returns bytes available for allocation.
315     size_t available() const
316     {
317         static if (growDownwards)
318         {
319             return _current - _begin;
320         }
321         else
322         {
323             return _end - _current;
324         }
325     }
326 }
327 
328 ///
329 @system unittest
330 {
331     import mir.utility : max;
332     import stdx.allocator.building_blocks.allocator_list
333         : AllocatorList;
334     import stdx.allocator.mallocator : Mallocator;
335     // Create a scalable list of regions. Each gets at least 1MB at a time by
336     // using malloc.
337     auto batchAllocator = AllocatorList!(
338         (size_t n) => Region!Mallocator(max(n, 1024u * 1024))
339     )();
340     auto b = batchAllocator.allocate(101);
341     assert(b.length == 101);
342     // This will cause a second allocation
343     b = batchAllocator.allocate(2 * 1024 * 1024);
344     assert(b.length == 2 * 1024 * 1024);
345     // Destructor will free the memory
346 }
347 
348 @system unittest
349 {
350     import stdx.allocator.mallocator : Mallocator;
351     // Create a 64 KB region allocated with malloc
352     auto reg = Region!(Mallocator, Mallocator.alignment,
353         Yes.growDownwards)(1024 * 64);
354     const b = reg.allocate(101);
355     assert(b.length == 101);
356     // Destructor will free the memory
357 }
358 
359 /**
360 
361 $(D InSituRegion) is a convenient region that carries its storage within itself
362 (in the form of a statically-sized array).
363 
364 The first template argument is the size of the region and the second is the
365 needed alignment. Depending on the alignment requested and platform details,
366 the actual available storage may be smaller than the compile-time parameter. To
367 make sure that at least $(D n) bytes are available in the region, use
368 $(D InSituRegion!(n + a - 1, a)).
369 
370 Given that the most frequent use of `InSituRegion` is as a stack allocator, it
371 allocates starting at the end on systems where stack grows downwards, such that
372 hot memory is used first.
373 
374 */
375 struct InSituRegion(size_t size, size_t minAlign = platformAlignment)
376 {
377     import mir.utility : max;
378     import stdx.allocator.internal : Ternary;
379 
380     static assert(minAlign.isGoodStaticAlignment);
381     static assert(size >= minAlign);
382 
383     version (X86) enum growDownwards = Yes.growDownwards;
384     else version (X86_64) enum growDownwards = Yes.growDownwards;
385     else version (ARM) enum growDownwards = Yes.growDownwards;
386     else version (AArch64) enum growDownwards = Yes.growDownwards;
387     else version (PPC) enum growDownwards = Yes.growDownwards;
388     else version (PPC64) enum growDownwards = Yes.growDownwards;
389     else version (MIPS32) enum growDownwards = Yes.growDownwards;
390     else version (MIPS64) enum growDownwards = Yes.growDownwards;
391     else version (SPARC) enum growDownwards = Yes.growDownwards;
392     else version (SystemZ) enum growDownwards = Yes.growDownwards;
393     else version (WebAssembly) enum growDownwards = Yes.growDownwards;
394     else static assert(0, "Dunno how the stack grows on this architecture.");
395 
396     @disable this(this);
397 
398     // state {
399     private Region!(NullAllocator, minAlign, growDownwards) _impl;
400     union
401     {
402         private ubyte[size] _store = void;
403         private double _forAlignmentOnly1 = void;
404     }
405     // }
406 
407     /**
408     An alias for $(D minAlign), which must be a valid alignment (nonzero power
409     of 2). The start of the region and all allocation requests will be rounded
410     up to a multiple of the alignment.
411 
412     ----
413     InSituRegion!(4096) a1;
414     assert(a1.alignment == platformAlignment);
415     InSituRegion!(4096, 64) a2;
416     assert(a2.alignment == 64);
417     ----
418     */
419     alias alignment = minAlign;
420 
421     private void lazyInit()
422     {
423         assert(!_impl._current);
424         _impl = typeof(_impl)(_store);
425         assert(_impl._current.alignedAt(alignment));
426     }
427 
428     /**
429     Allocates $(D bytes) and returns them, or $(D null) if the region cannot
430     accommodate the request. For efficiency reasons, if $(D bytes == 0) the
431     function returns an empty non-null slice.
432     */
433     void[] allocate(size_t n)
434     {
435         // Fast path
436     entry:
437         auto result = _impl.allocate(n);
438         if (result.length == n) return result;
439         // Slow path
440         if (_impl._current) return null; // no more room
441         lazyInit;
442         assert(_impl._current);
443         goto entry;
444     }
445 
446     /**
447     As above, but the memory allocated is aligned at $(D a) bytes.
448     */
449     void[] alignedAllocate(size_t n, uint a)
450     {
451         // Fast path
452     entry:
453         auto result = _impl.alignedAllocate(n, a);
454         if (result.length == n) return result;
455         // Slow path
456         if (_impl._current) return null; // no more room
457         lazyInit;
458         assert(_impl._current);
459         goto entry;
460     }
461 
462     /**
463     Deallocates $(D b). This works only if $(D b) was obtained as the last call
464     to $(D allocate); otherwise (i.e. another allocation has occurred since) it
465     does nothing. This semantics is tricky and therefore $(D deallocate) is
466     defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
467     as the third template argument.
468 
469     Params:
470     b = Block previously obtained by a call to $(D allocate) against this
471     allocator ($(D null) is allowed).
472     */
473     bool deallocate(void[] b)
474     {
475         if (!_impl._current) return b is null;
476         return _impl.deallocate(b);
477     }
478 
479     /**
480     Returns `Ternary.yes` if `b` is the result of a previous allocation,
481     `Ternary.no` otherwise.
482     */
483     Ternary owns(void[] b)
484     {
485         if (!_impl._current) return Ternary.no;
486         return _impl.owns(b);
487     }
488 
489     /**
490     Expands an allocated block in place. Expansion will succeed only if the
491     block is the last allocated.
492     */
493     static if (__traits(hasMember, typeof(_impl), "expand"))
494     bool expand(ref void[] b, size_t delta)
495     {
496         if (!_impl._current) lazyInit;
497         return _impl.expand(b, delta);
498     }
499 
500     /**
501     Deallocates all memory allocated with this allocator.
502     */
503     bool deallocateAll()
504     {
505         // We don't care to lazily init the region
506         return _impl.deallocateAll;
507     }
508 
509     /**
510     Allocates all memory available with this allocator.
511     */
512     void[] allocateAll()
513     {
514         if (!_impl._current) lazyInit;
515         return _impl.allocateAll;
516     }
517 
518     /**
519     Nonstandard function that returns the bytes available for allocation.
520     */
521     size_t available()
522     {
523         if (!_impl._current) lazyInit;
524         return _impl.available;
525     }
526 }
527 
528 ///
529 @system unittest
530 {
531     // 128KB region, allocated to x86's cache line
532     InSituRegion!(128 * 1024, 16) r1;
533     auto a1 = r1.allocate(101);
534     assert(a1.length == 101);
535 
536     // 128KB region, with fallback to the garbage collector.
537     import stdx.allocator.building_blocks.fallback_allocator
538         : FallbackAllocator;
539     import stdx.allocator.building_blocks.free_list
540         : FreeList;
541     import stdx.allocator.building_blocks.bitmapped_block
542         : BitmappedBlock;
543     import stdx.allocator.gc_allocator : GCAllocator;
544     FallbackAllocator!(InSituRegion!(128 * 1024), GCAllocator) r2;
545     const a2 = r2.allocate(102);
546     assert(a2.length == 102);
547 
548     // Reap with GC fallback.
549     InSituRegion!(128 * 1024, 8) tmp3;
550     FallbackAllocator!(BitmappedBlock!(64, 8), GCAllocator) r3;
551     r3.primary = BitmappedBlock!(64, 8)(cast(ubyte[])(tmp3.allocateAll()));
552     const a3 = r3.allocate(103);
553     assert(a3.length == 103);
554 
555     // Reap/GC with a freelist for small objects up to 16 bytes.
556     InSituRegion!(128 * 1024, 64) tmp4;
557     FreeList!(FallbackAllocator!(BitmappedBlock!(64, 64), GCAllocator), 0, 16) r4;
558     r4.parent.primary = BitmappedBlock!(64, 64)(cast(ubyte[])(tmp4.allocateAll()));
559     const a4 = r4.allocate(104);
560     assert(a4.length == 104);
561 }
562 
563 @system unittest
564 {
565     InSituRegion!(4096, 1) r1;
566     auto a = r1.allocate(2001);
567     assert(a.length == 2001);
568     import std.conv : to;
569     assert(r1.available == 2095, r1.available.to!string);
570 
571     InSituRegion!(65_536, 1024*4) r2;
572     assert(r2.available <= 65_536);
573     a = r2.allocate(2001);
574     assert(a.length == 2001);
575 }
576 
577 private extern(C) void* sbrk(long);
578 private extern(C) int brk(shared void*);
579 
580 /**
581 
582 Allocator backed by $(D $(LINK2 https://en.wikipedia.org/wiki/Sbrk, sbrk))
583 for Posix systems. Due to the fact that $(D sbrk) is not thread-safe
584 $(HTTP lifecs.likai.org/2010/02/sbrk-is-not-thread-safe.html, by design),
585 $(D SbrkRegion) uses a mutex internally. This implies
586 that uncontrolled calls to $(D brk) and $(D sbrk) may affect the workings of $(D
587 SbrkRegion) adversely.
588 
589 */
590 version(Posix) struct SbrkRegion(uint minAlign = platformAlignment)
591 {
592     import core.sys.posix.pthread : pthread_mutex_init, pthread_mutex_destroy,
593         pthread_mutex_t, pthread_mutex_lock, pthread_mutex_unlock,
594         PTHREAD_MUTEX_INITIALIZER;
595     private static shared pthread_mutex_t sbrkMutex = PTHREAD_MUTEX_INITIALIZER;
596     import stdx.allocator.internal : Ternary;
597 
598     static assert(minAlign.isGoodStaticAlignment);
599     static assert(size_t.sizeof == (void*).sizeof);
600     private static shared void* _brkInitial, _brkCurrent;
601 
602     /**
603     Instance shared by all callers.
604     */
605     enum SbrkRegion instance = SbrkRegion();
606 
607     /**
608     Standard allocator primitives.
609     */
610     enum uint alignment = minAlign;
611 
612     /// Ditto
613     static void[] allocate(size_t bytes)
614     {
615         static if (minAlign > 1)
616             const rounded = bytes.roundUpToMultipleOf(alignment);
617         else
618             alias rounded = bytes;
619         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
620         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
621             || assert(0);
622         // Assume sbrk returns the old break. Most online documentation confirms
623         // that, except for http://www.inf.udec.cl/~leo/Malloc_tutorial.pdf,
624         // which claims the returned value is not portable.
625         auto p = sbrk(rounded);
626         if (p == cast(void*) -1)
627         {
628             return null;
629         }
630         if (!_brkInitial)
631         {
632             _brkInitial = cast(shared) p;
633             assert(cast(size_t) _brkInitial % minAlign == 0,
634                 "Too large alignment chosen for " ~ typeof(this).stringof);
635         }
636         _brkCurrent = cast(shared) (p + rounded);
637         return p[0 .. bytes];
638     }
639 
640     /// Ditto
641     static void[] alignedAllocate(size_t bytes, uint a)
642     {
643         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
644         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
645             || assert(0);
646         if (!_brkInitial)
647         {
648             // This is one extra call, but it'll happen only once.
649             _brkInitial = cast(shared) sbrk(0);
650             assert(cast(size_t) _brkInitial % minAlign == 0,
651                 "Too large alignment chosen for " ~ typeof(this).stringof);
652             (_brkInitial != cast(void*) -1) || assert(0);
653             _brkCurrent = _brkInitial;
654         }
655         immutable size_t delta = cast(shared void*) roundUpToMultipleOf(
656             cast(size_t) _brkCurrent, a) - _brkCurrent;
657         // Still must make sure the total size is aligned to the allocator's
658         // alignment.
659         immutable rounded = (bytes + delta).roundUpToMultipleOf(alignment);
660 
661         auto p = sbrk(rounded);
662         if (p == cast(void*) -1)
663         {
664             return null;
665         }
666         _brkCurrent = cast(shared) (p + rounded);
667         return p[delta .. delta + bytes];
668     }
669 
670     /**
671 
672     The $(D expand) method may only succeed if the argument is the last block
673     allocated. In that case, $(D expand) attempts to push the break pointer to
674     the right.
675 
676     */
677     static bool expand(ref void[] b, size_t delta)
678     {
679         if (b is null) return delta == 0;
680         assert(_brkInitial && _brkCurrent); // otherwise where did b come from?
681         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
682         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
683             || assert(0);
684         if (_brkCurrent != b.ptr + b.length) return false;
685         // Great, can expand the last block
686         static if (minAlign > 1)
687             const rounded = delta.roundUpToMultipleOf(alignment);
688         else
689             alias rounded = bytes;
690         auto p = sbrk(rounded);
691         if (p == cast(void*) -1)
692         {
693             return false;
694         }
695         _brkCurrent = cast(shared) (p + rounded);
696         b = b.ptr[0 .. b.length + delta];
697         return true;
698     }
699 
700     /// Ditto
701     static Ternary owns(void[] b)
702     {
703         // No need to lock here.
704         assert(!_brkCurrent || b.ptr + b.length <= _brkCurrent);
705         return Ternary(_brkInitial && b.ptr >= _brkInitial);
706     }
707 
708     /**
709 
710     The $(D deallocate) method only works (and returns $(D true))  on systems
711     that support reducing the  break address (i.e. accept calls to $(D sbrk)
712     with negative offsets). OSX does not accept such. In addition the argument
713     must be the last block allocated.
714 
715     */
716     static bool deallocate(void[] b)
717     {
718         static if (minAlign > 1)
719             const rounded = b.length.roundUpToMultipleOf(alignment);
720         else
721             const rounded = b.length;
722         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
723         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
724             || assert(0);
725         if (_brkCurrent != b.ptr + rounded) return false;
726         assert(b.ptr >= _brkInitial);
727         if (sbrk(-rounded) == cast(void*) -1)
728             return false;
729         _brkCurrent = cast(shared) b.ptr;
730         return true;
731     }
732 
733     /**
734     The $(D deallocateAll) method only works (and returns $(D true)) on systems
735     that support reducing the  break address (i.e. accept calls to $(D sbrk)
736     with negative offsets). OSX does not accept such.
737     */
738     static bool deallocateAll()
739     {
740         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
741         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
742             || assert(0);
743         return !_brkInitial || brk(_brkInitial) == 0;
744     }
745 
746     /// Standard allocator API.
747     Ternary empty()
748     {
749         // Also works when they're both null.
750         return Ternary(_brkCurrent == _brkInitial);
751     }
752 }
753 
754 version(Posix) @system unittest
755 {
756     // Let's test the assumption that sbrk(n) returns the old address
757     const p1 = sbrk(0);
758     const p2 = sbrk(4096);
759     assert(p1 == p2);
760     const p3 = sbrk(0);
761     assert(p3 == p2 + 4096);
762     // Try to reset brk, but don't make a fuss if it doesn't work
763     sbrk(-4096);
764 }
765 
766 version(Posix) @system unittest
767 {
768     import stdx.allocator.internal : Ternary;
769     alias alloc = SbrkRegion!(8).instance;
770     auto a = alloc.alignedAllocate(2001, 4096);
771     assert(a.length == 2001);
772     auto b = alloc.allocate(2001);
773     assert(b.length == 2001);
774     assert(alloc.owns(a) == Ternary.yes);
775     assert(alloc.owns(b) == Ternary.yes);
776     // reducing the brk does not work on OSX
777     version(OSX) {} else
778     {
779         assert(alloc.deallocate(b));
780         assert(alloc.deallocateAll);
781     }
782 }