libphobos: Merge upstream druntime 9d0c8364, phobos 9d575282e.
[official-gcc.git] / libphobos / src / std / experimental / allocator / building_blocks / region.d
blob53f5ef988d4812b046e014d3b952622f03082bcf
1 ///
2 module std.experimental.allocator.building_blocks.region;
4 import std.experimental.allocator.building_blocks.null_allocator;
5 import std.experimental.allocator.common;
6 import std.typecons : Flag, Yes, No;
8 version (OSX)
9 version = Darwin;
10 else version (iOS)
11 version = Darwin;
12 else version (TVOS)
13 version = Darwin;
14 else version (WatchOS)
15 version = Darwin;
17 /**
18 A $(D Region) allocator allocates memory straight from one contiguous chunk.
19 There is no deallocation, and once the region is full, allocation requests
20 return $(D null). Therefore, $(D Region)s are often used (a) in conjunction with
21 more sophisticated allocators; or (b) for batch-style very fast allocations
22 that deallocate everything at once.
24 The region only stores three pointers, corresponding to the current position in
25 the store and the limits. One allocation entails rounding up the allocation
26 size for alignment purposes, bumping the current pointer, and comparing it
27 against the limit.
29 If $(D ParentAllocator) is different from $(D NullAllocator), $(D Region)
30 deallocates the chunk of memory during destruction.
32 The $(D minAlign) parameter establishes alignment. If $(D minAlign > 1), the
33 sizes of all allocation requests are rounded up to a multiple of $(D minAlign).
34 Applications aiming at maximum speed may want to choose $(D minAlign = 1) and
35 control alignment externally.
38 struct Region(ParentAllocator = NullAllocator,
39 uint minAlign = platformAlignment,
40 Flag!"growDownwards" growDownwards = No.growDownwards)
42 static assert(minAlign.isGoodStaticAlignment);
43 static assert(ParentAllocator.alignment >= minAlign);
45 import std.traits : hasMember;
46 import std.typecons : Ternary;
48 // state
49 /**
50 The _parent allocator. Depending on whether $(D ParentAllocator) holds state
51 or not, this is a member variable or an alias for
52 `ParentAllocator.instance`.
54 static if (stateSize!ParentAllocator)
56 ParentAllocator parent;
58 else
60 alias parent = ParentAllocator.instance;
62 private void* _current, _begin, _end;
64 /**
65 Constructs a region backed by a user-provided store. Assumes $(D store) is
66 aligned at $(D minAlign). Also assumes the memory was allocated with $(D
67 ParentAllocator) (if different from $(D NullAllocator)).
69 Params:
70 store = User-provided store backing up the region. $(D store) must be
71 aligned at $(D minAlign) (enforced with $(D assert)). If $(D
72 ParentAllocator) is different from $(D NullAllocator), memory is assumed to
73 have been allocated with $(D ParentAllocator).
74 n = Bytes to allocate using $(D ParentAllocator). This constructor is only
75 defined If $(D ParentAllocator) is different from $(D NullAllocator). If
76 $(D parent.allocate(n)) returns $(D null), the region will be initialized
77 as empty (correctly initialized but unable to allocate).
79 this(ubyte[] store)
81 store = cast(ubyte[])(store.roundUpToAlignment(alignment));
82 store = store[0 .. $.roundDownToAlignment(alignment)];
83 assert(store.ptr.alignedAt(minAlign));
84 assert(store.length % minAlign == 0);
85 _begin = store.ptr;
86 _end = store.ptr + store.length;
87 static if (growDownwards)
88 _current = _end;
89 else
90 _current = store.ptr;
93 /// Ditto
94 static if (!is(ParentAllocator == NullAllocator))
95 this(size_t n)
97 this(cast(ubyte[])(parent.allocate(n.roundUpToAlignment(alignment))));
101 TODO: The postblit of $(D BasicRegion) should be disabled because such objects
102 should not be copied around naively.
106 If `ParentAllocator` is not `NullAllocator` and defines `deallocate`, the region defines a destructor that uses `ParentAllocator.delete` to free the
107 memory chunk.
109 static if (!is(ParentAllocator == NullAllocator)
110 && hasMember!(ParentAllocator, "deallocate"))
111 ~this()
113 parent.deallocate(_begin[0 .. _end - _begin]);
118 Alignment offered.
120 alias alignment = minAlign;
123 Allocates $(D n) bytes of memory. The shortest path involves an alignment
124 adjustment (if $(D alignment > 1)), an increment, and a comparison.
126 Params:
127 n = number of bytes to allocate
129 Returns:
130 A properly-aligned buffer of size $(D n) or $(D null) if request could not
131 be satisfied.
133 void[] allocate(size_t n)
135 static if (growDownwards)
137 if (available < n) return null;
138 static if (minAlign > 1)
139 const rounded = n.roundUpToAlignment(alignment);
140 else
141 alias rounded = n;
142 assert(available >= rounded);
143 auto result = (_current - rounded)[0 .. n];
144 assert(result.ptr >= _begin);
145 _current = result.ptr;
146 assert(owns(result) == Ternary.yes);
147 return result;
149 else
151 auto result = _current[0 .. n];
152 static if (minAlign > 1)
153 const rounded = n.roundUpToAlignment(alignment);
154 else
155 alias rounded = n;
156 _current += rounded;
157 if (_current <= _end) return result;
158 // Slow path, backtrack
159 _current -= rounded;
160 return null;
165 Allocates $(D n) bytes of memory aligned at alignment $(D a).
167 Params:
168 n = number of bytes to allocate
169 a = alignment for the allocated block
171 Returns:
172 Either a suitable block of $(D n) bytes aligned at $(D a), or $(D null).
174 void[] alignedAllocate(size_t n, uint a)
176 import std.math : isPowerOf2;
177 assert(a.isPowerOf2);
178 static if (growDownwards)
180 const available = _current - _begin;
181 if (available < n) return null;
182 auto result = (_current - n).alignDownTo(a)[0 .. n];
183 if (result.ptr >= _begin)
185 _current = result.ptr;
186 return result;
189 else
191 // Just bump the pointer to the next good allocation
192 auto save = _current;
193 _current = _current.alignUpTo(a);
194 auto result = allocate(n);
195 if (result.ptr)
197 assert(result.length == n);
198 return result;
200 // Failed, rollback
201 _current = save;
203 return null;
206 /// Allocates and returns all memory available to this region.
207 void[] allocateAll()
209 static if (growDownwards)
211 auto result = _begin[0 .. available];
212 _current = _begin;
214 else
216 auto result = _current[0 .. available];
217 _current = _end;
219 return result;
223 Expands an allocated block in place. Expansion will succeed only if the
224 block is the last allocated. Defined only if `growDownwards` is
225 `No.growDownwards`.
227 static if (growDownwards == No.growDownwards)
228 bool expand(ref void[] b, size_t delta)
230 assert(owns(b) == Ternary.yes || b.ptr is null);
231 assert(b.ptr + b.length <= _current || b.ptr is null);
232 if (!b.ptr) return delta == 0;
233 auto newLength = b.length + delta;
234 if (_current < b.ptr + b.length + alignment)
236 // This was the last allocation! Allocate some more and we're done.
237 if (this.goodAllocSize(b.length) == this.goodAllocSize(newLength)
238 || allocate(delta).length == delta)
240 b = b.ptr[0 .. newLength];
241 assert(_current < b.ptr + b.length + alignment);
242 return true;
245 return false;
249 Deallocates $(D b). This works only if $(D b) was obtained as the last call
250 to $(D allocate); otherwise (i.e. another allocation has occurred since) it
251 does nothing. This semantics is tricky and therefore $(D deallocate) is
252 defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
253 as the third template argument.
255 Params:
256 b = Block previously obtained by a call to $(D allocate) against this
257 allocator ($(D null) is allowed).
259 bool deallocate(void[] b)
261 assert(owns(b) == Ternary.yes || b.ptr is null);
262 static if (growDownwards)
264 if (b.ptr == _current)
266 _current += this.goodAllocSize(b.length);
267 return true;
270 else
272 if (b.ptr + this.goodAllocSize(b.length) == _current)
274 assert(b.ptr !is null || _current is null);
275 _current = b.ptr;
276 return true;
279 return false;
283 Deallocates all memory allocated by this region, which can be subsequently
284 reused for new allocations.
286 bool deallocateAll()
288 static if (growDownwards)
290 _current = _end;
292 else
294 _current = _begin;
296 return true;
300 Queries whether $(D b) has been allocated with this region.
302 Params:
303 b = Arbitrary block of memory ($(D null) is allowed; $(D owns(null))
304 returns $(D false)).
306 Returns:
307 $(D true) if $(D b) has been allocated with this region, $(D false)
308 otherwise.
310 Ternary owns(void[] b) const
312 return Ternary(b.ptr >= _begin && b.ptr + b.length <= _end);
316 Returns `Ternary.yes` if no memory has been allocated in this region,
317 `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
319 Ternary empty() const
321 return Ternary(_current == _begin);
324 /// Nonstandard property that returns bytes available for allocation.
325 size_t available() const
327 static if (growDownwards)
329 return _current - _begin;
331 else
333 return _end - _current;
339 @system unittest
341 import std.algorithm.comparison : max;
342 import std.experimental.allocator.building_blocks.allocator_list
343 : AllocatorList;
344 import std.experimental.allocator.mallocator : Mallocator;
345 // Create a scalable list of regions. Each gets at least 1MB at a time by
346 // using malloc.
347 auto batchAllocator = AllocatorList!(
348 (size_t n) => Region!Mallocator(max(n, 1024 * 1024))
349 )();
350 auto b = batchAllocator.allocate(101);
351 assert(b.length == 101);
352 // This will cause a second allocation
353 b = batchAllocator.allocate(2 * 1024 * 1024);
354 assert(b.length == 2 * 1024 * 1024);
355 // Destructor will free the memory
358 @system unittest
360 import std.experimental.allocator.mallocator : Mallocator;
361 // Create a 64 KB region allocated with malloc
362 auto reg = Region!(Mallocator, Mallocator.alignment,
363 Yes.growDownwards)(1024 * 64);
364 const b = reg.allocate(101);
365 assert(b.length == 101);
366 // Destructor will free the memory
371 $(D InSituRegion) is a convenient region that carries its storage within itself
372 (in the form of a statically-sized array).
374 The first template argument is the size of the region and the second is the
375 needed alignment. Depending on the alignment requested and platform details,
376 the actual available storage may be smaller than the compile-time parameter. To
377 make sure that at least $(D n) bytes are available in the region, use
378 $(D InSituRegion!(n + a - 1, a)).
380 Given that the most frequent use of `InSituRegion` is as a stack allocator, it
381 allocates starting at the end on systems where stack grows downwards, such that
382 hot memory is used first.
385 struct InSituRegion(size_t size, size_t minAlign = platformAlignment)
387 import std.algorithm.comparison : max;
388 import std.conv : to;
389 import std.traits : hasMember;
390 import std.typecons : Ternary;
392 static assert(minAlign.isGoodStaticAlignment);
393 static assert(size >= minAlign);
395 version (X86) enum growDownwards = Yes.growDownwards;
396 else version (X86_64) enum growDownwards = Yes.growDownwards;
397 else version (ARM) enum growDownwards = Yes.growDownwards;
398 else version (AArch64) enum growDownwards = Yes.growDownwards;
399 else version (HPPA) enum growDownwards = No.growDownwards;
400 else version (PPC) enum growDownwards = Yes.growDownwards;
401 else version (PPC64) enum growDownwards = Yes.growDownwards;
402 else version (MIPS32) enum growDownwards = Yes.growDownwards;
403 else version (MIPS64) enum growDownwards = Yes.growDownwards;
404 else version (RISCV32) enum growDownwards = Yes.growDownwards;
405 else version (RISCV64) enum growDownwards = Yes.growDownwards;
406 else version (SPARC) enum growDownwards = Yes.growDownwards;
407 else version (SPARC64) enum growDownwards = Yes.growDownwards;
408 else version (SystemZ) enum growDownwards = Yes.growDownwards;
409 else static assert(0, "Dunno how the stack grows on this architecture.");
411 @disable this(this);
413 // state {
414 private Region!(NullAllocator, minAlign, growDownwards) _impl;
415 union
417 private ubyte[size] _store = void;
418 private double _forAlignmentOnly1 = void;
420 // }
423 An alias for $(D minAlign), which must be a valid alignment (nonzero power
424 of 2). The start of the region and all allocation requests will be rounded
425 up to a multiple of the alignment.
427 ----
428 InSituRegion!(4096) a1;
429 assert(a1.alignment == platformAlignment);
430 InSituRegion!(4096, 64) a2;
431 assert(a2.alignment == 64);
432 ----
434 alias alignment = minAlign;
436 private void lazyInit()
438 assert(!_impl._current);
439 _impl = typeof(_impl)(_store);
440 assert(_impl._current.alignedAt(alignment));
444 Allocates $(D bytes) and returns them, or $(D null) if the region cannot
445 accommodate the request. For efficiency reasons, if $(D bytes == 0) the
446 function returns an empty non-null slice.
448 void[] allocate(size_t n)
450 // Fast path
451 entry:
452 auto result = _impl.allocate(n);
453 if (result.length == n) return result;
454 // Slow path
455 if (_impl._current) return null; // no more room
456 lazyInit;
457 assert(_impl._current);
458 goto entry;
462 As above, but the memory allocated is aligned at $(D a) bytes.
464 void[] alignedAllocate(size_t n, uint a)
466 // Fast path
467 entry:
468 auto result = _impl.alignedAllocate(n, a);
469 if (result.length == n) return result;
470 // Slow path
471 if (_impl._current) return null; // no more room
472 lazyInit;
473 assert(_impl._current);
474 goto entry;
478 Deallocates $(D b). This works only if $(D b) was obtained as the last call
479 to $(D allocate); otherwise (i.e. another allocation has occurred since) it
480 does nothing. This semantics is tricky and therefore $(D deallocate) is
481 defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
482 as the third template argument.
484 Params:
485 b = Block previously obtained by a call to $(D allocate) against this
486 allocator ($(D null) is allowed).
488 bool deallocate(void[] b)
490 if (!_impl._current) return b is null;
491 return _impl.deallocate(b);
495 Returns `Ternary.yes` if `b` is the result of a previous allocation,
496 `Ternary.no` otherwise.
498 Ternary owns(void[] b)
500 if (!_impl._current) return Ternary.no;
501 return _impl.owns(b);
505 Expands an allocated block in place. Expansion will succeed only if the
506 block is the last allocated.
508 static if (hasMember!(typeof(_impl), "expand"))
509 bool expand(ref void[] b, size_t delta)
511 if (!_impl._current) lazyInit;
512 return _impl.expand(b, delta);
516 Deallocates all memory allocated with this allocator.
518 bool deallocateAll()
520 // We don't care to lazily init the region
521 return _impl.deallocateAll;
525 Allocates all memory available with this allocator.
527 void[] allocateAll()
529 if (!_impl._current) lazyInit;
530 return _impl.allocateAll;
534 Nonstandard function that returns the bytes available for allocation.
536 size_t available()
538 if (!_impl._current) lazyInit;
539 return _impl.available;
544 @system unittest
546 // 128KB region, allocated to x86's cache line
547 InSituRegion!(128 * 1024, 16) r1;
548 auto a1 = r1.allocate(101);
549 assert(a1.length == 101);
551 // 128KB region, with fallback to the garbage collector.
552 import std.experimental.allocator.building_blocks.fallback_allocator
553 : FallbackAllocator;
554 import std.experimental.allocator.building_blocks.free_list
555 : FreeList;
556 import std.experimental.allocator.building_blocks.bitmapped_block
557 : BitmappedBlock;
558 import std.experimental.allocator.gc_allocator : GCAllocator;
559 FallbackAllocator!(InSituRegion!(128 * 1024), GCAllocator) r2;
560 const a2 = r2.allocate(102);
561 assert(a2.length == 102);
563 // Reap with GC fallback.
564 InSituRegion!(128 * 1024, 8) tmp3;
565 FallbackAllocator!(BitmappedBlock!(64, 8), GCAllocator) r3;
566 r3.primary = BitmappedBlock!(64, 8)(cast(ubyte[])(tmp3.allocateAll()));
567 const a3 = r3.allocate(103);
568 assert(a3.length == 103);
570 // Reap/GC with a freelist for small objects up to 16 bytes.
571 InSituRegion!(128 * 1024, 64) tmp4;
572 FreeList!(FallbackAllocator!(BitmappedBlock!(64, 64), GCAllocator), 0, 16) r4;
573 r4.parent.primary = BitmappedBlock!(64, 64)(cast(ubyte[])(tmp4.allocateAll()));
574 const a4 = r4.allocate(104);
575 assert(a4.length == 104);
578 @system unittest
580 InSituRegion!(4096, 1) r1;
581 auto a = r1.allocate(2001);
582 assert(a.length == 2001);
583 import std.conv : text;
584 assert(r1.available == 2095, text(r1.available));
586 InSituRegion!(65_536, 1024*4) r2;
587 assert(r2.available <= 65_536);
588 a = r2.allocate(2001);
589 assert(a.length == 2001);
592 version (CRuntime_Musl)
594 // sbrk and brk are disabled in Musl:
595 // https://git.musl-libc.org/cgit/musl/commit/?id=7a995fe706e519a4f55399776ef0df9596101f93
596 // https://git.musl-libc.org/cgit/musl/commit/?id=863d628d93ea341b6a32661a1654320ce69f6a07
598 version (DragonFlyBSD)
600 // sbrk is deprecated in favor of mmap (we could implement a mmap + MAP_NORESERVE + PROT_NONE version)
601 // brk has been removed
602 // https://www.dragonflydigest.com/2019/02/22/22586.html
603 // http://gitweb.dragonflybsd.org/dragonfly.git/commitdiff/dc676eaefa61b0f47bbea1c53eab86fd5ccd78c6
604 // http://gitweb.dragonflybsd.org/dragonfly.git/commitdiff/4b5665564ef37dc939a3a9ffbafaab9894c18885
605 // http://gitweb.dragonflybsd.org/dragonfly.git/commitdiff/8618d94a0e2ff8303ad93c123a3fa598c26a116e
607 else
609 private extern(C) void* sbrk(long) nothrow @nogc;
610 private extern(C) int brk(shared void*) nothrow @nogc;
615 Allocator backed by $(D $(LINK2 https://en.wikipedia.org/wiki/Sbrk, sbrk))
616 for Posix systems. Due to the fact that $(D sbrk) is not thread-safe
617 $(HTTP lifecs.likai.org/2010/02/sbrk-is-not-thread-safe.html, by design),
618 $(D SbrkRegion) uses a mutex internally. This implies
619 that uncontrolled calls to $(D brk) and $(D sbrk) may affect the workings of $(D
620 SbrkRegion) adversely.
623 version (CRuntime_Musl) {} else
624 version (DragonFlyBSD) {} else
625 version (Posix) struct SbrkRegion(uint minAlign = platformAlignment)
627 import core.sys.posix.pthread : pthread_mutex_init, pthread_mutex_destroy,
628 pthread_mutex_t, pthread_mutex_lock, pthread_mutex_unlock,
630 PTHREAD_MUTEX_INITIALIZER;
631 private static shared pthread_mutex_t sbrkMutex = PTHREAD_MUTEX_INITIALIZER;
632 import std.typecons : Ternary;
634 static assert(minAlign.isGoodStaticAlignment);
635 static assert(size_t.sizeof == (void*).sizeof);
636 private shared void* _brkInitial, _brkCurrent;
639 Instance shared by all callers.
641 static shared SbrkRegion instance;
644 Standard allocator primitives.
646 enum uint alignment = minAlign;
648 /// Ditto
649 void[] allocate(size_t bytes) shared
651 static if (minAlign > 1)
652 const rounded = bytes.roundUpToMultipleOf(alignment);
653 else
654 alias rounded = bytes;
655 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
656 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
657 || assert(0);
658 // Assume sbrk returns the old break. Most online documentation confirms
659 // that, except for http://www.inf.udec.cl/~leo/Malloc_tutorial.pdf,
660 // which claims the returned value is not portable.
661 auto p = sbrk(rounded);
662 if (p == cast(void*) -1)
664 return null;
666 if (!_brkInitial)
668 _brkInitial = cast(shared) p;
669 assert(cast(size_t) _brkInitial % minAlign == 0,
670 "Too large alignment chosen for " ~ typeof(this).stringof);
672 _brkCurrent = cast(shared) (p + rounded);
673 return p[0 .. bytes];
676 /// Ditto
677 void[] alignedAllocate(size_t bytes, uint a) shared
679 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
680 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
681 || assert(0);
682 if (!_brkInitial)
684 // This is one extra call, but it'll happen only once.
685 _brkInitial = cast(shared) sbrk(0);
686 assert(cast(size_t) _brkInitial % minAlign == 0,
687 "Too large alignment chosen for " ~ typeof(this).stringof);
688 (_brkInitial != cast(void*) -1) || assert(0);
689 _brkCurrent = _brkInitial;
691 immutable size_t delta = cast(shared void*) roundUpToMultipleOf(
692 cast(size_t) _brkCurrent, a) - _brkCurrent;
693 // Still must make sure the total size is aligned to the allocator's
694 // alignment.
695 immutable rounded = (bytes + delta).roundUpToMultipleOf(alignment);
697 auto p = sbrk(rounded);
698 if (p == cast(void*) -1)
700 return null;
702 _brkCurrent = cast(shared) (p + rounded);
703 return p[delta .. delta + bytes];
708 The $(D expand) method may only succeed if the argument is the last block
709 allocated. In that case, $(D expand) attempts to push the break pointer to
710 the right.
713 bool expand(ref void[] b, size_t delta) shared
715 if (b is null) return delta == 0;
716 assert(_brkInitial && _brkCurrent); // otherwise where did b come from?
717 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
718 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
719 || assert(0);
720 if (_brkCurrent != b.ptr + b.length) return false;
721 // Great, can expand the last block
722 static if (minAlign > 1)
723 const rounded = delta.roundUpToMultipleOf(alignment);
724 else
725 alias rounded = bytes;
726 auto p = sbrk(rounded);
727 if (p == cast(void*) -1)
729 return false;
731 _brkCurrent = cast(shared) (p + rounded);
732 b = b.ptr[0 .. b.length + delta];
733 return true;
736 /// Ditto
737 Ternary owns(void[] b) shared
739 // No need to lock here.
740 assert(!_brkCurrent || b.ptr + b.length <= _brkCurrent);
741 return Ternary(_brkInitial && b.ptr >= _brkInitial);
746 The $(D deallocate) method only works (and returns $(D true)) on systems
747 that support reducing the break address (i.e. accept calls to $(D sbrk)
748 with negative offsets). OSX does not accept such. In addition the argument
749 must be the last block allocated.
752 bool deallocate(void[] b) shared
754 static if (minAlign > 1)
755 const rounded = b.length.roundUpToMultipleOf(alignment);
756 else
757 const rounded = b.length;
758 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
759 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
760 || assert(0);
761 if (_brkCurrent != b.ptr + rounded) return false;
762 assert(b.ptr >= _brkInitial);
763 if (sbrk(-rounded) == cast(void*) -1)
764 return false;
765 _brkCurrent = cast(shared) b.ptr;
766 return true;
770 The $(D deallocateAll) method only works (and returns $(D true)) on systems
771 that support reducing the break address (i.e. accept calls to $(D sbrk)
772 with negative offsets). OSX does not accept such.
774 bool deallocateAll() shared
776 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
777 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
778 || assert(0);
779 return !_brkInitial || brk(_brkInitial) == 0;
782 /// Standard allocator API.
783 Ternary empty()
785 // Also works when they're both null.
786 return Ternary(_brkCurrent == _brkInitial);
790 version (CRuntime_Musl) {} else
791 version (DragonFlyBSD) {} else
792 version (Posix) @system nothrow @nogc unittest
794 // Let's test the assumption that sbrk(n) returns the old address
795 const p1 = sbrk(0);
796 const p2 = sbrk(4096);
797 assert(p1 == p2);
798 const p3 = sbrk(0);
799 assert(p3 == p2 + 4096);
800 // Try to reset brk, but don't make a fuss if it doesn't work
801 sbrk(-4096);
804 version (CRuntime_Musl) {} else
805 version (DragonFlyBSD) {} else
806 version (Posix) @system nothrow @nogc unittest
808 import std.typecons : Ternary;
809 alias alloc = SbrkRegion!(8).instance;
810 auto a = alloc.alignedAllocate(2001, 4096);
811 assert(a.length == 2001);
812 auto b = alloc.allocate(2001);
813 assert(b.length == 2001);
814 assert(alloc.owns(a) == Ternary.yes);
815 assert(alloc.owns(b) == Ternary.yes);
816 // reducing the brk does not work on OSX
817 version (Darwin) {} else
819 assert(alloc.deallocate(b));
820 assert(alloc.deallocateAll);