]> git.ipfire.org Git - thirdparty/gcc.git/blob - libphobos/src/std/experimental/allocator/building_blocks/region.d
Add D front-end, libphobos library, and D2 testsuite.
[thirdparty/gcc.git] / libphobos / src / std / experimental / allocator / building_blocks / region.d
1 ///
2 module std.experimental.allocator.building_blocks.region;
3
4 import std.experimental.allocator.building_blocks.null_allocator;
5 import std.experimental.allocator.common;
6 import std.typecons : Flag, Yes, No;
7
8 /**
9 A $(D Region) allocator allocates memory straight from one contiguous chunk.
10 There is no deallocation, and once the region is full, allocation requests
11 return $(D null). Therefore, $(D Region)s are often used (a) in conjunction with
12 more sophisticated allocators; or (b) for batch-style very fast allocations
13 that deallocate everything at once.
14
15 The region only stores three pointers, corresponding to the current position in
16 the store and the limits. One allocation entails rounding up the allocation
17 size for alignment purposes, bumping the current pointer, and comparing it
18 against the limit.
19
20 If $(D ParentAllocator) is different from $(D NullAllocator), $(D Region)
21 deallocates the chunk of memory during destruction.
22
23 The $(D minAlign) parameter establishes alignment. If $(D minAlign > 1), the
24 sizes of all allocation requests are rounded up to a multiple of $(D minAlign).
25 Applications aiming at maximum speed may want to choose $(D minAlign = 1) and
26 control alignment externally.
27
28 */
29 struct Region(ParentAllocator = NullAllocator,
30 uint minAlign = platformAlignment,
31 Flag!"growDownwards" growDownwards = No.growDownwards)
32 {
33 static assert(minAlign.isGoodStaticAlignment);
34 static assert(ParentAllocator.alignment >= minAlign);
35
36 import std.traits : hasMember;
37 import std.typecons : Ternary;
38
39 // state
40 /**
41 The _parent allocator. Depending on whether $(D ParentAllocator) holds state
42 or not, this is a member variable or an alias for
43 `ParentAllocator.instance`.
44 */
45 static if (stateSize!ParentAllocator)
46 {
47 ParentAllocator parent;
48 }
49 else
50 {
51 alias parent = ParentAllocator.instance;
52 }
53 private void* _current, _begin, _end;
54
55 /**
56 Constructs a region backed by a user-provided store. Assumes $(D store) is
57 aligned at $(D minAlign). Also assumes the memory was allocated with $(D
58 ParentAllocator) (if different from $(D NullAllocator)).
59
60 Params:
61 store = User-provided store backing up the region. $(D store) must be
62 aligned at $(D minAlign) (enforced with $(D assert)). If $(D
63 ParentAllocator) is different from $(D NullAllocator), memory is assumed to
64 have been allocated with $(D ParentAllocator).
65 n = Bytes to allocate using $(D ParentAllocator). This constructor is only
66 defined If $(D ParentAllocator) is different from $(D NullAllocator). If
67 $(D parent.allocate(n)) returns $(D null), the region will be initialized
68 as empty (correctly initialized but unable to allocate).
69 */
70 this(ubyte[] store)
71 {
72 store = cast(ubyte[])(store.roundUpToAlignment(alignment));
73 store = store[0 .. $.roundDownToAlignment(alignment)];
74 assert(store.ptr.alignedAt(minAlign));
75 assert(store.length % minAlign == 0);
76 _begin = store.ptr;
77 _end = store.ptr + store.length;
78 static if (growDownwards)
79 _current = _end;
80 else
81 _current = store.ptr;
82 }
83
84 /// Ditto
85 static if (!is(ParentAllocator == NullAllocator))
86 this(size_t n)
87 {
88 this(cast(ubyte[])(parent.allocate(n.roundUpToAlignment(alignment))));
89 }
90
91 /*
92 TODO: The postblit of $(D BasicRegion) should be disabled because such objects
93 should not be copied around naively.
94 */
95
96 /**
97 If `ParentAllocator` is not `NullAllocator` and defines `deallocate`, the region defines a destructor that uses `ParentAllocator.delete` to free the
98 memory chunk.
99 */
100 static if (!is(ParentAllocator == NullAllocator)
101 && hasMember!(ParentAllocator, "deallocate"))
102 ~this()
103 {
104 parent.deallocate(_begin[0 .. _end - _begin]);
105 }
106
107
108 /**
109 Alignment offered.
110 */
111 alias alignment = minAlign;
112
113 /**
114 Allocates $(D n) bytes of memory. The shortest path involves an alignment
115 adjustment (if $(D alignment > 1)), an increment, and a comparison.
116
117 Params:
118 n = number of bytes to allocate
119
120 Returns:
121 A properly-aligned buffer of size $(D n) or $(D null) if request could not
122 be satisfied.
123 */
124 void[] allocate(size_t n)
125 {
126 static if (growDownwards)
127 {
128 if (available < n) return null;
129 static if (minAlign > 1)
130 const rounded = n.roundUpToAlignment(alignment);
131 else
132 alias rounded = n;
133 assert(available >= rounded);
134 auto result = (_current - rounded)[0 .. n];
135 assert(result.ptr >= _begin);
136 _current = result.ptr;
137 assert(owns(result) == Ternary.yes);
138 return result;
139 }
140 else
141 {
142 auto result = _current[0 .. n];
143 static if (minAlign > 1)
144 const rounded = n.roundUpToAlignment(alignment);
145 else
146 alias rounded = n;
147 _current += rounded;
148 if (_current <= _end) return result;
149 // Slow path, backtrack
150 _current -= rounded;
151 return null;
152 }
153 }
154
155 /**
156 Allocates $(D n) bytes of memory aligned at alignment $(D a).
157
158 Params:
159 n = number of bytes to allocate
160 a = alignment for the allocated block
161
162 Returns:
163 Either a suitable block of $(D n) bytes aligned at $(D a), or $(D null).
164 */
165 void[] alignedAllocate(size_t n, uint a)
166 {
167 import std.math : isPowerOf2;
168 assert(a.isPowerOf2);
169 static if (growDownwards)
170 {
171 const available = _current - _begin;
172 if (available < n) return null;
173 auto result = (_current - n).alignDownTo(a)[0 .. n];
174 if (result.ptr >= _begin)
175 {
176 _current = result.ptr;
177 return result;
178 }
179 }
180 else
181 {
182 // Just bump the pointer to the next good allocation
183 auto save = _current;
184 _current = _current.alignUpTo(a);
185 auto result = allocate(n);
186 if (result.ptr)
187 {
188 assert(result.length == n);
189 return result;
190 }
191 // Failed, rollback
192 _current = save;
193 }
194 return null;
195 }
196
197 /// Allocates and returns all memory available to this region.
198 void[] allocateAll()
199 {
200 static if (growDownwards)
201 {
202 auto result = _begin[0 .. available];
203 _current = _begin;
204 }
205 else
206 {
207 auto result = _current[0 .. available];
208 _current = _end;
209 }
210 return result;
211 }
212
213 /**
214 Expands an allocated block in place. Expansion will succeed only if the
215 block is the last allocated. Defined only if `growDownwards` is
216 `No.growDownwards`.
217 */
218 static if (growDownwards == No.growDownwards)
219 bool expand(ref void[] b, size_t delta)
220 {
221 assert(owns(b) == Ternary.yes || b.ptr is null);
222 assert(b.ptr + b.length <= _current || b.ptr is null);
223 if (!b.ptr) return delta == 0;
224 auto newLength = b.length + delta;
225 if (_current < b.ptr + b.length + alignment)
226 {
227 // This was the last allocation! Allocate some more and we're done.
228 if (this.goodAllocSize(b.length) == this.goodAllocSize(newLength)
229 || allocate(delta).length == delta)
230 {
231 b = b.ptr[0 .. newLength];
232 assert(_current < b.ptr + b.length + alignment);
233 return true;
234 }
235 }
236 return false;
237 }
238
239 /**
240 Deallocates $(D b). This works only if $(D b) was obtained as the last call
241 to $(D allocate); otherwise (i.e. another allocation has occurred since) it
242 does nothing. This semantics is tricky and therefore $(D deallocate) is
243 defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
244 as the third template argument.
245
246 Params:
247 b = Block previously obtained by a call to $(D allocate) against this
248 allocator ($(D null) is allowed).
249 */
250 bool deallocate(void[] b)
251 {
252 assert(owns(b) == Ternary.yes || b.ptr is null);
253 static if (growDownwards)
254 {
255 if (b.ptr == _current)
256 {
257 _current += this.goodAllocSize(b.length);
258 return true;
259 }
260 }
261 else
262 {
263 if (b.ptr + this.goodAllocSize(b.length) == _current)
264 {
265 assert(b.ptr !is null || _current is null);
266 _current = b.ptr;
267 return true;
268 }
269 }
270 return false;
271 }
272
273 /**
274 Deallocates all memory allocated by this region, which can be subsequently
275 reused for new allocations.
276 */
277 bool deallocateAll()
278 {
279 static if (growDownwards)
280 {
281 _current = _end;
282 }
283 else
284 {
285 _current = _begin;
286 }
287 return true;
288 }
289
290 /**
291 Queries whether $(D b) has been allocated with this region.
292
293 Params:
294 b = Arbitrary block of memory ($(D null) is allowed; $(D owns(null))
295 returns $(D false)).
296
297 Returns:
298 $(D true) if $(D b) has been allocated with this region, $(D false)
299 otherwise.
300 */
301 Ternary owns(void[] b) const
302 {
303 return Ternary(b.ptr >= _begin && b.ptr + b.length <= _end);
304 }
305
306 /**
307 Returns `Ternary.yes` if no memory has been allocated in this region,
308 `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
309 */
310 Ternary empty() const
311 {
312 return Ternary(_current == _begin);
313 }
314
315 /// Nonstandard property that returns bytes available for allocation.
316 size_t available() const
317 {
318 static if (growDownwards)
319 {
320 return _current - _begin;
321 }
322 else
323 {
324 return _end - _current;
325 }
326 }
327 }
328
329 ///
330 @system unittest
331 {
332 import std.algorithm.comparison : max;
333 import std.experimental.allocator.building_blocks.allocator_list
334 : AllocatorList;
335 import std.experimental.allocator.mallocator : Mallocator;
336 // Create a scalable list of regions. Each gets at least 1MB at a time by
337 // using malloc.
338 auto batchAllocator = AllocatorList!(
339 (size_t n) => Region!Mallocator(max(n, 1024 * 1024))
340 )();
341 auto b = batchAllocator.allocate(101);
342 assert(b.length == 101);
343 // This will cause a second allocation
344 b = batchAllocator.allocate(2 * 1024 * 1024);
345 assert(b.length == 2 * 1024 * 1024);
346 // Destructor will free the memory
347 }
348
349 @system unittest
350 {
351 import std.experimental.allocator.mallocator : Mallocator;
352 // Create a 64 KB region allocated with malloc
353 auto reg = Region!(Mallocator, Mallocator.alignment,
354 Yes.growDownwards)(1024 * 64);
355 const b = reg.allocate(101);
356 assert(b.length == 101);
357 // Destructor will free the memory
358 }
359
360 /**
361
362 $(D InSituRegion) is a convenient region that carries its storage within itself
363 (in the form of a statically-sized array).
364
365 The first template argument is the size of the region and the second is the
366 needed alignment. Depending on the alignment requested and platform details,
367 the actual available storage may be smaller than the compile-time parameter. To
368 make sure that at least $(D n) bytes are available in the region, use
369 $(D InSituRegion!(n + a - 1, a)).
370
371 Given that the most frequent use of `InSituRegion` is as a stack allocator, it
372 allocates starting at the end on systems where stack grows downwards, such that
373 hot memory is used first.
374
375 */
376 struct InSituRegion(size_t size, size_t minAlign = platformAlignment)
377 {
378 import std.algorithm.comparison : max;
379 import std.conv : to;
380 import std.traits : hasMember;
381 import std.typecons : Ternary;
382
383 static assert(minAlign.isGoodStaticAlignment);
384 static assert(size >= minAlign);
385
386 version (X86) enum growDownwards = Yes.growDownwards;
387 else version (X86_64) enum growDownwards = Yes.growDownwards;
388 else version (ARM) enum growDownwards = Yes.growDownwards;
389 else version (AArch64) enum growDownwards = Yes.growDownwards;
390 else version (PPC) enum growDownwards = Yes.growDownwards;
391 else version (PPC64) enum growDownwards = Yes.growDownwards;
392 else version (MIPS32) enum growDownwards = Yes.growDownwards;
393 else version (MIPS64) enum growDownwards = Yes.growDownwards;
394 else version (SPARC) enum growDownwards = Yes.growDownwards;
395 else version (SystemZ) enum growDownwards = Yes.growDownwards;
396 else static assert(0, "Dunno how the stack grows on this architecture.");
397
398 @disable this(this);
399
400 // state {
401 private Region!(NullAllocator, minAlign, growDownwards) _impl;
402 union
403 {
404 private ubyte[size] _store = void;
405 private double _forAlignmentOnly1 = void;
406 }
407 // }
408
409 /**
410 An alias for $(D minAlign), which must be a valid alignment (nonzero power
411 of 2). The start of the region and all allocation requests will be rounded
412 up to a multiple of the alignment.
413
414 ----
415 InSituRegion!(4096) a1;
416 assert(a1.alignment == platformAlignment);
417 InSituRegion!(4096, 64) a2;
418 assert(a2.alignment == 64);
419 ----
420 */
421 alias alignment = minAlign;
422
423 private void lazyInit()
424 {
425 assert(!_impl._current);
426 _impl = typeof(_impl)(_store);
427 assert(_impl._current.alignedAt(alignment));
428 }
429
430 /**
431 Allocates $(D bytes) and returns them, or $(D null) if the region cannot
432 accommodate the request. For efficiency reasons, if $(D bytes == 0) the
433 function returns an empty non-null slice.
434 */
435 void[] allocate(size_t n)
436 {
437 // Fast path
438 entry:
439 auto result = _impl.allocate(n);
440 if (result.length == n) return result;
441 // Slow path
442 if (_impl._current) return null; // no more room
443 lazyInit;
444 assert(_impl._current);
445 goto entry;
446 }
447
448 /**
449 As above, but the memory allocated is aligned at $(D a) bytes.
450 */
451 void[] alignedAllocate(size_t n, uint a)
452 {
453 // Fast path
454 entry:
455 auto result = _impl.alignedAllocate(n, a);
456 if (result.length == n) return result;
457 // Slow path
458 if (_impl._current) return null; // no more room
459 lazyInit;
460 assert(_impl._current);
461 goto entry;
462 }
463
464 /**
465 Deallocates $(D b). This works only if $(D b) was obtained as the last call
466 to $(D allocate); otherwise (i.e. another allocation has occurred since) it
467 does nothing. This semantics is tricky and therefore $(D deallocate) is
468 defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
469 as the third template argument.
470
471 Params:
472 b = Block previously obtained by a call to $(D allocate) against this
473 allocator ($(D null) is allowed).
474 */
475 bool deallocate(void[] b)
476 {
477 if (!_impl._current) return b is null;
478 return _impl.deallocate(b);
479 }
480
481 /**
482 Returns `Ternary.yes` if `b` is the result of a previous allocation,
483 `Ternary.no` otherwise.
484 */
485 Ternary owns(void[] b)
486 {
487 if (!_impl._current) return Ternary.no;
488 return _impl.owns(b);
489 }
490
491 /**
492 Expands an allocated block in place. Expansion will succeed only if the
493 block is the last allocated.
494 */
495 static if (hasMember!(typeof(_impl), "expand"))
496 bool expand(ref void[] b, size_t delta)
497 {
498 if (!_impl._current) lazyInit;
499 return _impl.expand(b, delta);
500 }
501
502 /**
503 Deallocates all memory allocated with this allocator.
504 */
505 bool deallocateAll()
506 {
507 // We don't care to lazily init the region
508 return _impl.deallocateAll;
509 }
510
511 /**
512 Allocates all memory available with this allocator.
513 */
514 void[] allocateAll()
515 {
516 if (!_impl._current) lazyInit;
517 return _impl.allocateAll;
518 }
519
520 /**
521 Nonstandard function that returns the bytes available for allocation.
522 */
523 size_t available()
524 {
525 if (!_impl._current) lazyInit;
526 return _impl.available;
527 }
528 }
529
530 ///
531 @system unittest
532 {
533 // 128KB region, allocated to x86's cache line
534 InSituRegion!(128 * 1024, 16) r1;
535 auto a1 = r1.allocate(101);
536 assert(a1.length == 101);
537
538 // 128KB region, with fallback to the garbage collector.
539 import std.experimental.allocator.building_blocks.fallback_allocator
540 : FallbackAllocator;
541 import std.experimental.allocator.building_blocks.free_list
542 : FreeList;
543 import std.experimental.allocator.building_blocks.bitmapped_block
544 : BitmappedBlock;
545 import std.experimental.allocator.gc_allocator : GCAllocator;
546 FallbackAllocator!(InSituRegion!(128 * 1024), GCAllocator) r2;
547 const a2 = r2.allocate(102);
548 assert(a2.length == 102);
549
550 // Reap with GC fallback.
551 InSituRegion!(128 * 1024, 8) tmp3;
552 FallbackAllocator!(BitmappedBlock!(64, 8), GCAllocator) r3;
553 r3.primary = BitmappedBlock!(64, 8)(cast(ubyte[])(tmp3.allocateAll()));
554 const a3 = r3.allocate(103);
555 assert(a3.length == 103);
556
557 // Reap/GC with a freelist for small objects up to 16 bytes.
558 InSituRegion!(128 * 1024, 64) tmp4;
559 FreeList!(FallbackAllocator!(BitmappedBlock!(64, 64), GCAllocator), 0, 16) r4;
560 r4.parent.primary = BitmappedBlock!(64, 64)(cast(ubyte[])(tmp4.allocateAll()));
561 const a4 = r4.allocate(104);
562 assert(a4.length == 104);
563 }
564
565 @system unittest
566 {
567 InSituRegion!(4096, 1) r1;
568 auto a = r1.allocate(2001);
569 assert(a.length == 2001);
570 import std.conv : text;
571 assert(r1.available == 2095, text(r1.available));
572
573 InSituRegion!(65_536, 1024*4) r2;
574 assert(r2.available <= 65_536);
575 a = r2.allocate(2001);
576 assert(a.length == 2001);
577 }
578
579 private extern(C) void* sbrk(long);
580 private extern(C) int brk(shared void*);
581
582 /**
583
584 Allocator backed by $(D $(LINK2 https://en.wikipedia.org/wiki/Sbrk, sbrk))
585 for Posix systems. Due to the fact that $(D sbrk) is not thread-safe
586 $(HTTP lifecs.likai.org/2010/02/sbrk-is-not-thread-safe.html, by design),
587 $(D SbrkRegion) uses a mutex internally. This implies
588 that uncontrolled calls to $(D brk) and $(D sbrk) may affect the workings of $(D
589 SbrkRegion) adversely.
590
591 */
592 version (Posix) struct SbrkRegion(uint minAlign = platformAlignment)
593 {
594 import core.sys.posix.pthread : pthread_mutex_init, pthread_mutex_destroy,
595 pthread_mutex_t, pthread_mutex_lock, pthread_mutex_unlock,
596 PTHREAD_MUTEX_INITIALIZER;
597 private static shared pthread_mutex_t sbrkMutex = PTHREAD_MUTEX_INITIALIZER;
598 import std.typecons : Ternary;
599
600 static assert(minAlign.isGoodStaticAlignment);
601 static assert(size_t.sizeof == (void*).sizeof);
602 private shared void* _brkInitial, _brkCurrent;
603
604 /**
605 Instance shared by all callers.
606 */
607 static shared SbrkRegion instance;
608
609 /**
610 Standard allocator primitives.
611 */
612 enum uint alignment = minAlign;
613
614 /// Ditto
615 void[] allocate(size_t bytes) shared
616 {
617 static if (minAlign > 1)
618 const rounded = bytes.roundUpToMultipleOf(alignment);
619 else
620 alias rounded = bytes;
621 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
622 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
623 || assert(0);
624 // Assume sbrk returns the old break. Most online documentation confirms
625 // that, except for http://www.inf.udec.cl/~leo/Malloc_tutorial.pdf,
626 // which claims the returned value is not portable.
627 auto p = sbrk(rounded);
628 if (p == cast(void*) -1)
629 {
630 return null;
631 }
632 if (!_brkInitial)
633 {
634 _brkInitial = cast(shared) p;
635 assert(cast(size_t) _brkInitial % minAlign == 0,
636 "Too large alignment chosen for " ~ typeof(this).stringof);
637 }
638 _brkCurrent = cast(shared) (p + rounded);
639 return p[0 .. bytes];
640 }
641
642 /// Ditto
643 void[] alignedAllocate(size_t bytes, uint a) shared
644 {
645 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
646 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
647 || assert(0);
648 if (!_brkInitial)
649 {
650 // This is one extra call, but it'll happen only once.
651 _brkInitial = cast(shared) sbrk(0);
652 assert(cast(size_t) _brkInitial % minAlign == 0,
653 "Too large alignment chosen for " ~ typeof(this).stringof);
654 (_brkInitial != cast(void*) -1) || assert(0);
655 _brkCurrent = _brkInitial;
656 }
657 immutable size_t delta = cast(shared void*) roundUpToMultipleOf(
658 cast(size_t) _brkCurrent, a) - _brkCurrent;
659 // Still must make sure the total size is aligned to the allocator's
660 // alignment.
661 immutable rounded = (bytes + delta).roundUpToMultipleOf(alignment);
662
663 auto p = sbrk(rounded);
664 if (p == cast(void*) -1)
665 {
666 return null;
667 }
668 _brkCurrent = cast(shared) (p + rounded);
669 return p[delta .. delta + bytes];
670 }
671
672 /**
673
674 The $(D expand) method may only succeed if the argument is the last block
675 allocated. In that case, $(D expand) attempts to push the break pointer to
676 the right.
677
678 */
679 bool expand(ref void[] b, size_t delta) shared
680 {
681 if (b is null) return delta == 0;
682 assert(_brkInitial && _brkCurrent); // otherwise where did b come from?
683 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
684 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
685 || assert(0);
686 if (_brkCurrent != b.ptr + b.length) return false;
687 // Great, can expand the last block
688 static if (minAlign > 1)
689 const rounded = delta.roundUpToMultipleOf(alignment);
690 else
691 alias rounded = bytes;
692 auto p = sbrk(rounded);
693 if (p == cast(void*) -1)
694 {
695 return false;
696 }
697 _brkCurrent = cast(shared) (p + rounded);
698 b = b.ptr[0 .. b.length + delta];
699 return true;
700 }
701
702 /// Ditto
703 Ternary owns(void[] b) shared
704 {
705 // No need to lock here.
706 assert(!_brkCurrent || b.ptr + b.length <= _brkCurrent);
707 return Ternary(_brkInitial && b.ptr >= _brkInitial);
708 }
709
710 /**
711
712 The $(D deallocate) method only works (and returns $(D true)) on systems
713 that support reducing the break address (i.e. accept calls to $(D sbrk)
714 with negative offsets). OSX does not accept such. In addition the argument
715 must be the last block allocated.
716
717 */
718 bool deallocate(void[] b) shared
719 {
720 static if (minAlign > 1)
721 const rounded = b.length.roundUpToMultipleOf(alignment);
722 else
723 const rounded = b.length;
724 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
725 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
726 || assert(0);
727 if (_brkCurrent != b.ptr + rounded) return false;
728 assert(b.ptr >= _brkInitial);
729 if (sbrk(-rounded) == cast(void*) -1)
730 return false;
731 _brkCurrent = cast(shared) b.ptr;
732 return true;
733 }
734
735 /**
736 The $(D deallocateAll) method only works (and returns $(D true)) on systems
737 that support reducing the break address (i.e. accept calls to $(D sbrk)
738 with negative offsets). OSX does not accept such.
739 */
740 bool deallocateAll() shared
741 {
742 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
743 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
744 || assert(0);
745 return !_brkInitial || brk(_brkInitial) == 0;
746 }
747
748 /// Standard allocator API.
749 Ternary empty()
750 {
751 // Also works when they're both null.
752 return Ternary(_brkCurrent == _brkInitial);
753 }
754 }
755
756 version (Posix) @system unittest
757 {
758 // Let's test the assumption that sbrk(n) returns the old address
759 const p1 = sbrk(0);
760 const p2 = sbrk(4096);
761 assert(p1 == p2);
762 const p3 = sbrk(0);
763 assert(p3 == p2 + 4096);
764 // Try to reset brk, but don't make a fuss if it doesn't work
765 sbrk(-4096);
766 }
767
768 version (Posix) @system unittest
769 {
770 import std.typecons : Ternary;
771 alias alloc = SbrkRegion!(8).instance;
772 auto a = alloc.alignedAllocate(2001, 4096);
773 assert(a.length == 2001);
774 auto b = alloc.allocate(2001);
775 assert(b.length == 2001);
776 assert(alloc.owns(a) == Ternary.yes);
777 assert(alloc.owns(b) == Ternary.yes);
778 // reducing the brk does not work on OSX
779 version (OSX) {} else
780 {
781 assert(alloc.deallocate(b));
782 assert(alloc.deallocateAll);
783 }
784 }