]> git.ipfire.org Git - thirdparty/gcc.git/blob - libphobos/src/std/experimental/allocator/building_blocks/region.d
43dfdb788e5e5e33efd79fc4247a24e0053da497
[thirdparty/gcc.git] / libphobos / src / std / experimental / allocator / building_blocks / region.d
1 ///
2 module std.experimental.allocator.building_blocks.region;
3
4 import std.experimental.allocator.building_blocks.null_allocator;
5 import std.experimental.allocator.common;
6 import std.typecons : Flag, Yes, No;
7
8 /**
9 A $(D Region) allocator allocates memory straight from one contiguous chunk.
10 There is no deallocation, and once the region is full, allocation requests
11 return $(D null). Therefore, $(D Region)s are often used (a) in conjunction with
12 more sophisticated allocators; or (b) for batch-style very fast allocations
13 that deallocate everything at once.
14
15 The region only stores three pointers, corresponding to the current position in
16 the store and the limits. One allocation entails rounding up the allocation
17 size for alignment purposes, bumping the current pointer, and comparing it
18 against the limit.
19
20 If $(D ParentAllocator) is different from $(D NullAllocator), $(D Region)
21 deallocates the chunk of memory during destruction.
22
23 The $(D minAlign) parameter establishes alignment. If $(D minAlign > 1), the
24 sizes of all allocation requests are rounded up to a multiple of $(D minAlign).
25 Applications aiming at maximum speed may want to choose $(D minAlign = 1) and
26 control alignment externally.
27
28 */
29 struct Region(ParentAllocator = NullAllocator,
30 uint minAlign = platformAlignment,
31 Flag!"growDownwards" growDownwards = No.growDownwards)
32 {
33 static assert(minAlign.isGoodStaticAlignment);
34 static assert(ParentAllocator.alignment >= minAlign);
35
36 import std.traits : hasMember;
37 import std.typecons : Ternary;
38
39 // state
40 /**
41 The _parent allocator. Depending on whether $(D ParentAllocator) holds state
42 or not, this is a member variable or an alias for
43 `ParentAllocator.instance`.
44 */
45 static if (stateSize!ParentAllocator)
46 {
47 ParentAllocator parent;
48 }
49 else
50 {
51 alias parent = ParentAllocator.instance;
52 }
53 private void* _current, _begin, _end;
54
55 /**
56 Constructs a region backed by a user-provided store. Assumes $(D store) is
57 aligned at $(D minAlign). Also assumes the memory was allocated with $(D
58 ParentAllocator) (if different from $(D NullAllocator)).
59
60 Params:
61 store = User-provided store backing up the region. $(D store) must be
62 aligned at $(D minAlign) (enforced with $(D assert)). If $(D
63 ParentAllocator) is different from $(D NullAllocator), memory is assumed to
64 have been allocated with $(D ParentAllocator).
65 n = Bytes to allocate using $(D ParentAllocator). This constructor is only
66 defined If $(D ParentAllocator) is different from $(D NullAllocator). If
67 $(D parent.allocate(n)) returns $(D null), the region will be initialized
68 as empty (correctly initialized but unable to allocate).
69 */
70 this(ubyte[] store)
71 {
72 store = cast(ubyte[])(store.roundUpToAlignment(alignment));
73 store = store[0 .. $.roundDownToAlignment(alignment)];
74 assert(store.ptr.alignedAt(minAlign));
75 assert(store.length % minAlign == 0);
76 _begin = store.ptr;
77 _end = store.ptr + store.length;
78 static if (growDownwards)
79 _current = _end;
80 else
81 _current = store.ptr;
82 }
83
84 /// Ditto
85 static if (!is(ParentAllocator == NullAllocator))
86 this(size_t n)
87 {
88 this(cast(ubyte[])(parent.allocate(n.roundUpToAlignment(alignment))));
89 }
90
91 /*
92 TODO: The postblit of $(D BasicRegion) should be disabled because such objects
93 should not be copied around naively.
94 */
95
96 /**
97 If `ParentAllocator` is not `NullAllocator` and defines `deallocate`, the region defines a destructor that uses `ParentAllocator.delete` to free the
98 memory chunk.
99 */
100 static if (!is(ParentAllocator == NullAllocator)
101 && hasMember!(ParentAllocator, "deallocate"))
102 ~this()
103 {
104 parent.deallocate(_begin[0 .. _end - _begin]);
105 }
106
107
108 /**
109 Alignment offered.
110 */
111 alias alignment = minAlign;
112
113 /**
114 Allocates $(D n) bytes of memory. The shortest path involves an alignment
115 adjustment (if $(D alignment > 1)), an increment, and a comparison.
116
117 Params:
118 n = number of bytes to allocate
119
120 Returns:
121 A properly-aligned buffer of size $(D n) or $(D null) if request could not
122 be satisfied.
123 */
124 void[] allocate(size_t n)
125 {
126 static if (growDownwards)
127 {
128 if (available < n) return null;
129 static if (minAlign > 1)
130 const rounded = n.roundUpToAlignment(alignment);
131 else
132 alias rounded = n;
133 assert(available >= rounded);
134 auto result = (_current - rounded)[0 .. n];
135 assert(result.ptr >= _begin);
136 _current = result.ptr;
137 assert(owns(result) == Ternary.yes);
138 return result;
139 }
140 else
141 {
142 auto result = _current[0 .. n];
143 static if (minAlign > 1)
144 const rounded = n.roundUpToAlignment(alignment);
145 else
146 alias rounded = n;
147 _current += rounded;
148 if (_current <= _end) return result;
149 // Slow path, backtrack
150 _current -= rounded;
151 return null;
152 }
153 }
154
155 /**
156 Allocates $(D n) bytes of memory aligned at alignment $(D a).
157
158 Params:
159 n = number of bytes to allocate
160 a = alignment for the allocated block
161
162 Returns:
163 Either a suitable block of $(D n) bytes aligned at $(D a), or $(D null).
164 */
165 void[] alignedAllocate(size_t n, uint a)
166 {
167 import std.math : isPowerOf2;
168 assert(a.isPowerOf2);
169 static if (growDownwards)
170 {
171 const available = _current - _begin;
172 if (available < n) return null;
173 auto result = (_current - n).alignDownTo(a)[0 .. n];
174 if (result.ptr >= _begin)
175 {
176 _current = result.ptr;
177 return result;
178 }
179 }
180 else
181 {
182 // Just bump the pointer to the next good allocation
183 auto save = _current;
184 _current = _current.alignUpTo(a);
185 auto result = allocate(n);
186 if (result.ptr)
187 {
188 assert(result.length == n);
189 return result;
190 }
191 // Failed, rollback
192 _current = save;
193 }
194 return null;
195 }
196
197 /// Allocates and returns all memory available to this region.
198 void[] allocateAll()
199 {
200 static if (growDownwards)
201 {
202 auto result = _begin[0 .. available];
203 _current = _begin;
204 }
205 else
206 {
207 auto result = _current[0 .. available];
208 _current = _end;
209 }
210 return result;
211 }
212
213 /**
214 Expands an allocated block in place. Expansion will succeed only if the
215 block is the last allocated. Defined only if `growDownwards` is
216 `No.growDownwards`.
217 */
218 static if (growDownwards == No.growDownwards)
219 bool expand(ref void[] b, size_t delta)
220 {
221 assert(owns(b) == Ternary.yes || b.ptr is null);
222 assert(b.ptr + b.length <= _current || b.ptr is null);
223 if (!b.ptr) return delta == 0;
224 auto newLength = b.length + delta;
225 if (_current < b.ptr + b.length + alignment)
226 {
227 // This was the last allocation! Allocate some more and we're done.
228 if (this.goodAllocSize(b.length) == this.goodAllocSize(newLength)
229 || allocate(delta).length == delta)
230 {
231 b = b.ptr[0 .. newLength];
232 assert(_current < b.ptr + b.length + alignment);
233 return true;
234 }
235 }
236 return false;
237 }
238
239 /**
240 Deallocates $(D b). This works only if $(D b) was obtained as the last call
241 to $(D allocate); otherwise (i.e. another allocation has occurred since) it
242 does nothing. This semantics is tricky and therefore $(D deallocate) is
243 defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
244 as the third template argument.
245
246 Params:
247 b = Block previously obtained by a call to $(D allocate) against this
248 allocator ($(D null) is allowed).
249 */
250 bool deallocate(void[] b)
251 {
252 assert(owns(b) == Ternary.yes || b.ptr is null);
253 static if (growDownwards)
254 {
255 if (b.ptr == _current)
256 {
257 _current += this.goodAllocSize(b.length);
258 return true;
259 }
260 }
261 else
262 {
263 if (b.ptr + this.goodAllocSize(b.length) == _current)
264 {
265 assert(b.ptr !is null || _current is null);
266 _current = b.ptr;
267 return true;
268 }
269 }
270 return false;
271 }
272
273 /**
274 Deallocates all memory allocated by this region, which can be subsequently
275 reused for new allocations.
276 */
277 bool deallocateAll()
278 {
279 static if (growDownwards)
280 {
281 _current = _end;
282 }
283 else
284 {
285 _current = _begin;
286 }
287 return true;
288 }
289
290 /**
291 Queries whether $(D b) has been allocated with this region.
292
293 Params:
294 b = Arbitrary block of memory ($(D null) is allowed; $(D owns(null))
295 returns $(D false)).
296
297 Returns:
298 $(D true) if $(D b) has been allocated with this region, $(D false)
299 otherwise.
300 */
301 Ternary owns(void[] b) const
302 {
303 return Ternary(b.ptr >= _begin && b.ptr + b.length <= _end);
304 }
305
306 /**
307 Returns `Ternary.yes` if no memory has been allocated in this region,
308 `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
309 */
310 Ternary empty() const
311 {
312 return Ternary(_current == _begin);
313 }
314
315 /// Nonstandard property that returns bytes available for allocation.
316 size_t available() const
317 {
318 static if (growDownwards)
319 {
320 return _current - _begin;
321 }
322 else
323 {
324 return _end - _current;
325 }
326 }
327 }
328
329 ///
330 @system unittest
331 {
332 import std.algorithm.comparison : max;
333 import std.experimental.allocator.building_blocks.allocator_list
334 : AllocatorList;
335 import std.experimental.allocator.mallocator : Mallocator;
336 // Create a scalable list of regions. Each gets at least 1MB at a time by
337 // using malloc.
338 auto batchAllocator = AllocatorList!(
339 (size_t n) => Region!Mallocator(max(n, 1024 * 1024))
340 )();
341 auto b = batchAllocator.allocate(101);
342 assert(b.length == 101);
343 // This will cause a second allocation
344 b = batchAllocator.allocate(2 * 1024 * 1024);
345 assert(b.length == 2 * 1024 * 1024);
346 // Destructor will free the memory
347 }
348
349 @system unittest
350 {
351 import std.experimental.allocator.mallocator : Mallocator;
352 // Create a 64 KB region allocated with malloc
353 auto reg = Region!(Mallocator, Mallocator.alignment,
354 Yes.growDownwards)(1024 * 64);
355 const b = reg.allocate(101);
356 assert(b.length == 101);
357 // Destructor will free the memory
358 }
359
360 /**
361
362 $(D InSituRegion) is a convenient region that carries its storage within itself
363 (in the form of a statically-sized array).
364
365 The first template argument is the size of the region and the second is the
366 needed alignment. Depending on the alignment requested and platform details,
367 the actual available storage may be smaller than the compile-time parameter. To
368 make sure that at least $(D n) bytes are available in the region, use
369 $(D InSituRegion!(n + a - 1, a)).
370
371 Given that the most frequent use of `InSituRegion` is as a stack allocator, it
372 allocates starting at the end on systems where stack grows downwards, such that
373 hot memory is used first.
374
375 */
376 struct InSituRegion(size_t size, size_t minAlign = platformAlignment)
377 {
378 import std.algorithm.comparison : max;
379 import std.conv : to;
380 import std.traits : hasMember;
381 import std.typecons : Ternary;
382
383 static assert(minAlign.isGoodStaticAlignment);
384 static assert(size >= minAlign);
385
386 version (X86) enum growDownwards = Yes.growDownwards;
387 else version (X86_64) enum growDownwards = Yes.growDownwards;
388 else version (ARM) enum growDownwards = Yes.growDownwards;
389 else version (AArch64) enum growDownwards = Yes.growDownwards;
390 else version (HPPA) enum growDownwards = No.growDownwards;
391 else version (PPC) enum growDownwards = Yes.growDownwards;
392 else version (PPC64) enum growDownwards = Yes.growDownwards;
393 else version (MIPS32) enum growDownwards = Yes.growDownwards;
394 else version (MIPS64) enum growDownwards = Yes.growDownwards;
395 else version (RISCV32) enum growDownwards = Yes.growDownwards;
396 else version (RISCV64) enum growDownwards = Yes.growDownwards;
397 else version (SPARC) enum growDownwards = Yes.growDownwards;
398 else version (SPARC64) enum growDownwards = Yes.growDownwards;
399 else version (SystemZ) enum growDownwards = Yes.growDownwards;
400 else static assert(0, "Dunno how the stack grows on this architecture.");
401
402 @disable this(this);
403
404 // state {
405 private Region!(NullAllocator, minAlign, growDownwards) _impl;
406 union
407 {
408 private ubyte[size] _store = void;
409 private double _forAlignmentOnly1 = void;
410 }
411 // }
412
413 /**
414 An alias for $(D minAlign), which must be a valid alignment (nonzero power
415 of 2). The start of the region and all allocation requests will be rounded
416 up to a multiple of the alignment.
417
418 ----
419 InSituRegion!(4096) a1;
420 assert(a1.alignment == platformAlignment);
421 InSituRegion!(4096, 64) a2;
422 assert(a2.alignment == 64);
423 ----
424 */
425 alias alignment = minAlign;
426
427 private void lazyInit()
428 {
429 assert(!_impl._current);
430 _impl = typeof(_impl)(_store);
431 assert(_impl._current.alignedAt(alignment));
432 }
433
434 /**
435 Allocates $(D bytes) and returns them, or $(D null) if the region cannot
436 accommodate the request. For efficiency reasons, if $(D bytes == 0) the
437 function returns an empty non-null slice.
438 */
439 void[] allocate(size_t n)
440 {
441 // Fast path
442 entry:
443 auto result = _impl.allocate(n);
444 if (result.length == n) return result;
445 // Slow path
446 if (_impl._current) return null; // no more room
447 lazyInit;
448 assert(_impl._current);
449 goto entry;
450 }
451
452 /**
453 As above, but the memory allocated is aligned at $(D a) bytes.
454 */
455 void[] alignedAllocate(size_t n, uint a)
456 {
457 // Fast path
458 entry:
459 auto result = _impl.alignedAllocate(n, a);
460 if (result.length == n) return result;
461 // Slow path
462 if (_impl._current) return null; // no more room
463 lazyInit;
464 assert(_impl._current);
465 goto entry;
466 }
467
468 /**
469 Deallocates $(D b). This works only if $(D b) was obtained as the last call
470 to $(D allocate); otherwise (i.e. another allocation has occurred since) it
471 does nothing. This semantics is tricky and therefore $(D deallocate) is
472 defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
473 as the third template argument.
474
475 Params:
476 b = Block previously obtained by a call to $(D allocate) against this
477 allocator ($(D null) is allowed).
478 */
479 bool deallocate(void[] b)
480 {
481 if (!_impl._current) return b is null;
482 return _impl.deallocate(b);
483 }
484
485 /**
486 Returns `Ternary.yes` if `b` is the result of a previous allocation,
487 `Ternary.no` otherwise.
488 */
489 Ternary owns(void[] b)
490 {
491 if (!_impl._current) return Ternary.no;
492 return _impl.owns(b);
493 }
494
495 /**
496 Expands an allocated block in place. Expansion will succeed only if the
497 block is the last allocated.
498 */
499 static if (hasMember!(typeof(_impl), "expand"))
500 bool expand(ref void[] b, size_t delta)
501 {
502 if (!_impl._current) lazyInit;
503 return _impl.expand(b, delta);
504 }
505
506 /**
507 Deallocates all memory allocated with this allocator.
508 */
509 bool deallocateAll()
510 {
511 // We don't care to lazily init the region
512 return _impl.deallocateAll;
513 }
514
515 /**
516 Allocates all memory available with this allocator.
517 */
518 void[] allocateAll()
519 {
520 if (!_impl._current) lazyInit;
521 return _impl.allocateAll;
522 }
523
524 /**
525 Nonstandard function that returns the bytes available for allocation.
526 */
527 size_t available()
528 {
529 if (!_impl._current) lazyInit;
530 return _impl.available;
531 }
532 }
533
534 ///
535 @system unittest
536 {
537 // 128KB region, allocated to x86's cache line
538 InSituRegion!(128 * 1024, 16) r1;
539 auto a1 = r1.allocate(101);
540 assert(a1.length == 101);
541
542 // 128KB region, with fallback to the garbage collector.
543 import std.experimental.allocator.building_blocks.fallback_allocator
544 : FallbackAllocator;
545 import std.experimental.allocator.building_blocks.free_list
546 : FreeList;
547 import std.experimental.allocator.building_blocks.bitmapped_block
548 : BitmappedBlock;
549 import std.experimental.allocator.gc_allocator : GCAllocator;
550 FallbackAllocator!(InSituRegion!(128 * 1024), GCAllocator) r2;
551 const a2 = r2.allocate(102);
552 assert(a2.length == 102);
553
554 // Reap with GC fallback.
555 InSituRegion!(128 * 1024, 8) tmp3;
556 FallbackAllocator!(BitmappedBlock!(64, 8), GCAllocator) r3;
557 r3.primary = BitmappedBlock!(64, 8)(cast(ubyte[])(tmp3.allocateAll()));
558 const a3 = r3.allocate(103);
559 assert(a3.length == 103);
560
561 // Reap/GC with a freelist for small objects up to 16 bytes.
562 InSituRegion!(128 * 1024, 64) tmp4;
563 FreeList!(FallbackAllocator!(BitmappedBlock!(64, 64), GCAllocator), 0, 16) r4;
564 r4.parent.primary = BitmappedBlock!(64, 64)(cast(ubyte[])(tmp4.allocateAll()));
565 const a4 = r4.allocate(104);
566 assert(a4.length == 104);
567 }
568
569 @system unittest
570 {
571 InSituRegion!(4096, 1) r1;
572 auto a = r1.allocate(2001);
573 assert(a.length == 2001);
574 import std.conv : text;
575 assert(r1.available == 2095, text(r1.available));
576
577 InSituRegion!(65_536, 1024*4) r2;
578 assert(r2.available <= 65_536);
579 a = r2.allocate(2001);
580 assert(a.length == 2001);
581 }
582
583 private extern(C) void* sbrk(long);
584 private extern(C) int brk(shared void*);
585
586 /**
587
588 Allocator backed by $(D $(LINK2 https://en.wikipedia.org/wiki/Sbrk, sbrk))
589 for Posix systems. Due to the fact that $(D sbrk) is not thread-safe
590 $(HTTP lifecs.likai.org/2010/02/sbrk-is-not-thread-safe.html, by design),
591 $(D SbrkRegion) uses a mutex internally. This implies
592 that uncontrolled calls to $(D brk) and $(D sbrk) may affect the workings of $(D
593 SbrkRegion) adversely.
594
595 */
596 version (Posix) struct SbrkRegion(uint minAlign = platformAlignment)
597 {
598 import core.sys.posix.pthread : pthread_mutex_init, pthread_mutex_destroy,
599 pthread_mutex_t, pthread_mutex_lock, pthread_mutex_unlock,
600 PTHREAD_MUTEX_INITIALIZER;
601 private static shared pthread_mutex_t sbrkMutex = PTHREAD_MUTEX_INITIALIZER;
602 import std.typecons : Ternary;
603
604 static assert(minAlign.isGoodStaticAlignment);
605 static assert(size_t.sizeof == (void*).sizeof);
606 private shared void* _brkInitial, _brkCurrent;
607
608 /**
609 Instance shared by all callers.
610 */
611 static shared SbrkRegion instance;
612
613 /**
614 Standard allocator primitives.
615 */
616 enum uint alignment = minAlign;
617
618 /// Ditto
619 void[] allocate(size_t bytes) shared
620 {
621 static if (minAlign > 1)
622 const rounded = bytes.roundUpToMultipleOf(alignment);
623 else
624 alias rounded = bytes;
625 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
626 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
627 || assert(0);
628 // Assume sbrk returns the old break. Most online documentation confirms
629 // that, except for http://www.inf.udec.cl/~leo/Malloc_tutorial.pdf,
630 // which claims the returned value is not portable.
631 auto p = sbrk(rounded);
632 if (p == cast(void*) -1)
633 {
634 return null;
635 }
636 if (!_brkInitial)
637 {
638 _brkInitial = cast(shared) p;
639 assert(cast(size_t) _brkInitial % minAlign == 0,
640 "Too large alignment chosen for " ~ typeof(this).stringof);
641 }
642 _brkCurrent = cast(shared) (p + rounded);
643 return p[0 .. bytes];
644 }
645
646 /// Ditto
647 void[] alignedAllocate(size_t bytes, uint a) shared
648 {
649 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
650 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
651 || assert(0);
652 if (!_brkInitial)
653 {
654 // This is one extra call, but it'll happen only once.
655 _brkInitial = cast(shared) sbrk(0);
656 assert(cast(size_t) _brkInitial % minAlign == 0,
657 "Too large alignment chosen for " ~ typeof(this).stringof);
658 (_brkInitial != cast(void*) -1) || assert(0);
659 _brkCurrent = _brkInitial;
660 }
661 immutable size_t delta = cast(shared void*) roundUpToMultipleOf(
662 cast(size_t) _brkCurrent, a) - _brkCurrent;
663 // Still must make sure the total size is aligned to the allocator's
664 // alignment.
665 immutable rounded = (bytes + delta).roundUpToMultipleOf(alignment);
666
667 auto p = sbrk(rounded);
668 if (p == cast(void*) -1)
669 {
670 return null;
671 }
672 _brkCurrent = cast(shared) (p + rounded);
673 return p[delta .. delta + bytes];
674 }
675
676 /**
677
678 The $(D expand) method may only succeed if the argument is the last block
679 allocated. In that case, $(D expand) attempts to push the break pointer to
680 the right.
681
682 */
683 bool expand(ref void[] b, size_t delta) shared
684 {
685 if (b is null) return delta == 0;
686 assert(_brkInitial && _brkCurrent); // otherwise where did b come from?
687 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
688 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
689 || assert(0);
690 if (_brkCurrent != b.ptr + b.length) return false;
691 // Great, can expand the last block
692 static if (minAlign > 1)
693 const rounded = delta.roundUpToMultipleOf(alignment);
694 else
695 alias rounded = bytes;
696 auto p = sbrk(rounded);
697 if (p == cast(void*) -1)
698 {
699 return false;
700 }
701 _brkCurrent = cast(shared) (p + rounded);
702 b = b.ptr[0 .. b.length + delta];
703 return true;
704 }
705
706 /// Ditto
707 Ternary owns(void[] b) shared
708 {
709 // No need to lock here.
710 assert(!_brkCurrent || b.ptr + b.length <= _brkCurrent);
711 return Ternary(_brkInitial && b.ptr >= _brkInitial);
712 }
713
714 /**
715
716 The $(D deallocate) method only works (and returns $(D true)) on systems
717 that support reducing the break address (i.e. accept calls to $(D sbrk)
718 with negative offsets). OSX does not accept such. In addition the argument
719 must be the last block allocated.
720
721 */
722 bool deallocate(void[] b) shared
723 {
724 static if (minAlign > 1)
725 const rounded = b.length.roundUpToMultipleOf(alignment);
726 else
727 const rounded = b.length;
728 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
729 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
730 || assert(0);
731 if (_brkCurrent != b.ptr + rounded) return false;
732 assert(b.ptr >= _brkInitial);
733 if (sbrk(-rounded) == cast(void*) -1)
734 return false;
735 _brkCurrent = cast(shared) b.ptr;
736 return true;
737 }
738
739 /**
740 The $(D deallocateAll) method only works (and returns $(D true)) on systems
741 that support reducing the break address (i.e. accept calls to $(D sbrk)
742 with negative offsets). OSX does not accept such.
743 */
744 bool deallocateAll() shared
745 {
746 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
747 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
748 || assert(0);
749 return !_brkInitial || brk(_brkInitial) == 0;
750 }
751
752 /// Standard allocator API.
753 Ternary empty()
754 {
755 // Also works when they're both null.
756 return Ternary(_brkCurrent == _brkInitial);
757 }
758 }
759
760 version (Posix) @system unittest
761 {
762 // Let's test the assumption that sbrk(n) returns the old address
763 const p1 = sbrk(0);
764 const p2 = sbrk(4096);
765 assert(p1 == p2);
766 const p3 = sbrk(0);
767 assert(p3 == p2 + 4096);
768 // Try to reset brk, but don't make a fuss if it doesn't work
769 sbrk(-4096);
770 }
771
772 version (Posix) @system unittest
773 {
774 import std.typecons : Ternary;
775 alias alloc = SbrkRegion!(8).instance;
776 auto a = alloc.alignedAllocate(2001, 4096);
777 assert(a.length == 2001);
778 auto b = alloc.allocate(2001);
779 assert(b.length == 2001);
780 assert(alloc.owns(a) == Ternary.yes);
781 assert(alloc.owns(b) == Ternary.yes);
782 // reducing the brk does not work on OSX
783 version (OSX) {} else
784 {
785 assert(alloc.deallocate(b));
786 assert(alloc.deallocateAll);
787 }
788 }