2 module std.experimental.allocator.building_blocks.region;
4 import std.experimental.allocator.building_blocks.null_allocator;
5 import std.experimental.allocator.common;
6 import std.typecons : Flag, Yes, No;
9 A $(D Region) allocator allocates memory straight from one contiguous chunk.
10 There is no deallocation, and once the region is full, allocation requests
11 return $(D null). Therefore, $(D Region)s are often used (a) in conjunction with
12 more sophisticated allocators; or (b) for batch-style very fast allocations
13 that deallocate everything at once.
15 The region only stores three pointers, corresponding to the current position in
16 the store and the limits. One allocation entails rounding up the allocation
17 size for alignment purposes, bumping the current pointer, and comparing it
20 If $(D ParentAllocator) is different from $(D NullAllocator), $(D Region)
21 deallocates the chunk of memory during destruction.
23 The $(D minAlign) parameter establishes alignment. If $(D minAlign > 1), the
24 sizes of all allocation requests are rounded up to a multiple of $(D minAlign).
25 Applications aiming at maximum speed may want to choose $(D minAlign = 1) and
26 control alignment externally.
29 struct Region(ParentAllocator = NullAllocator,
30 uint minAlign = platformAlignment,
31 Flag!"growDownwards" growDownwards = No.growDownwards)
33 static assert(minAlign.isGoodStaticAlignment);
34 static assert(ParentAllocator.alignment >= minAlign);
36 import std.traits : hasMember;
37 import std.typecons : Ternary;
41 The _parent allocator. Depending on whether $(D ParentAllocator) holds state
42 or not, this is a member variable or an alias for
43 `ParentAllocator.instance`.
45 static if (stateSize!ParentAllocator)
47 ParentAllocator parent;
51 alias parent = ParentAllocator.instance;
53 private void* _current, _begin, _end;
56 Constructs a region backed by a user-provided store. Assumes $(D store) is
57 aligned at $(D minAlign). Also assumes the memory was allocated with $(D
58 ParentAllocator) (if different from $(D NullAllocator)).
61 store = User-provided store backing up the region. $(D store) must be
62 aligned at $(D minAlign) (enforced with $(D assert)). If $(D
63 ParentAllocator) is different from $(D NullAllocator), memory is assumed to
64 have been allocated with $(D ParentAllocator).
65 n = Bytes to allocate using $(D ParentAllocator). This constructor is only
66 defined If $(D ParentAllocator) is different from $(D NullAllocator). If
67 $(D parent.allocate(n)) returns $(D null), the region will be initialized
68 as empty (correctly initialized but unable to allocate).
72 store = cast(ubyte[])(store.roundUpToAlignment(alignment));
73 store = store[0 .. $.roundDownToAlignment(alignment)];
74 assert(store.ptr.alignedAt(minAlign));
75 assert(store.length % minAlign == 0);
77 _end = store.ptr + store.length;
78 static if (growDownwards)
85 static if (!is(ParentAllocator == NullAllocator))
88 this(cast(ubyte[])(parent.allocate(n.roundUpToAlignment(alignment))));
92 TODO: The postblit of $(D BasicRegion) should be disabled because such objects
93 should not be copied around naively.
97 If `ParentAllocator` is not `NullAllocator` and defines `deallocate`, the region defines a destructor that uses `ParentAllocator.delete` to free the
100 static if (!is(ParentAllocator == NullAllocator)
101 && hasMember!(ParentAllocator, "deallocate"))
104 parent.deallocate(_begin[0 .. _end - _begin]);
111 alias alignment = minAlign;
114 Allocates $(D n) bytes of memory. The shortest path involves an alignment
115 adjustment (if $(D alignment > 1)), an increment, and a comparison.
118 n = number of bytes to allocate
121 A properly-aligned buffer of size $(D n) or $(D null) if request could not
124 void[] allocate(size_t n)
126 static if (growDownwards)
128 if (available < n) return null;
129 static if (minAlign > 1)
130 const rounded = n.roundUpToAlignment(alignment);
133 assert(available >= rounded);
134 auto result = (_current - rounded)[0 .. n];
135 assert(result.ptr >= _begin);
136 _current = result.ptr;
137 assert(owns(result) == Ternary.yes);
142 auto result = _current[0 .. n];
143 static if (minAlign > 1)
144 const rounded = n.roundUpToAlignment(alignment);
148 if (_current <= _end) return result;
149 // Slow path, backtrack
156 Allocates $(D n) bytes of memory aligned at alignment $(D a).
159 n = number of bytes to allocate
160 a = alignment for the allocated block
163 Either a suitable block of $(D n) bytes aligned at $(D a), or $(D null).
165 void[] alignedAllocate(size_t n, uint a)
167 import std.math : isPowerOf2;
168 assert(a.isPowerOf2);
169 static if (growDownwards)
171 const available = _current - _begin;
172 if (available < n) return null;
173 auto result = (_current - n).alignDownTo(a)[0 .. n];
174 if (result.ptr >= _begin)
176 _current = result.ptr;
182 // Just bump the pointer to the next good allocation
183 auto save = _current;
184 _current = _current.alignUpTo(a);
185 auto result = allocate(n);
188 assert(result.length == n);
197 /// Allocates and returns all memory available to this region.
200 static if (growDownwards)
202 auto result = _begin[0 .. available];
207 auto result = _current[0 .. available];
214 Expands an allocated block in place. Expansion will succeed only if the
215 block is the last allocated. Defined only if `growDownwards` is
218 static if (growDownwards == No.growDownwards)
219 bool expand(ref void[] b, size_t delta)
221 assert(owns(b) == Ternary.yes || b.ptr is null);
222 assert(b.ptr + b.length <= _current || b.ptr is null);
223 if (!b.ptr) return delta == 0;
224 auto newLength = b.length + delta;
225 if (_current < b.ptr + b.length + alignment)
227 // This was the last allocation! Allocate some more and we're done.
228 if (this.goodAllocSize(b.length) == this.goodAllocSize(newLength)
229 || allocate(delta).length == delta)
231 b = b.ptr[0 .. newLength];
232 assert(_current < b.ptr + b.length + alignment);
240 Deallocates $(D b). This works only if $(D b) was obtained as the last call
241 to $(D allocate); otherwise (i.e. another allocation has occurred since) it
242 does nothing. This semantics is tricky and therefore $(D deallocate) is
243 defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
244 as the third template argument.
247 b = Block previously obtained by a call to $(D allocate) against this
248 allocator ($(D null) is allowed).
250 bool deallocate(void[] b)
252 assert(owns(b) == Ternary.yes || b.ptr is null);
253 static if (growDownwards)
255 if (b.ptr == _current)
257 _current += this.goodAllocSize(b.length);
263 if (b.ptr + this.goodAllocSize(b.length) == _current)
265 assert(b.ptr !is null || _current is null);
274 Deallocates all memory allocated by this region, which can be subsequently
275 reused for new allocations.
279 static if (growDownwards)
291 Queries whether $(D b) has been allocated with this region.
294 b = Arbitrary block of memory ($(D null) is allowed; $(D owns(null))
298 $(D true) if $(D b) has been allocated with this region, $(D false)
301 Ternary owns(void[] b) const
303 return Ternary(b.ptr >= _begin && b.ptr + b.length <= _end);
307 Returns `Ternary.yes` if no memory has been allocated in this region,
308 `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
310 Ternary empty() const
312 return Ternary(_current == _begin);
315 /// Nonstandard property that returns bytes available for allocation.
316 size_t available() const
318 static if (growDownwards)
320 return _current - _begin;
324 return _end - _current;
332 import std.algorithm.comparison : max;
333 import std.experimental.allocator.building_blocks.allocator_list
335 import std.experimental.allocator.mallocator : Mallocator;
336 // Create a scalable list of regions. Each gets at least 1MB at a time by
338 auto batchAllocator = AllocatorList!(
339 (size_t n) => Region!Mallocator(max(n, 1024 * 1024))
341 auto b = batchAllocator.allocate(101);
342 assert(b.length == 101);
343 // This will cause a second allocation
344 b = batchAllocator.allocate(2 * 1024 * 1024);
345 assert(b.length == 2 * 1024 * 1024);
346 // Destructor will free the memory
351 import std.experimental.allocator.mallocator : Mallocator;
352 // Create a 64 KB region allocated with malloc
353 auto reg = Region!(Mallocator, Mallocator.alignment,
354 Yes.growDownwards)(1024 * 64);
355 const b = reg.allocate(101);
356 assert(b.length == 101);
357 // Destructor will free the memory
362 $(D InSituRegion) is a convenient region that carries its storage within itself
363 (in the form of a statically-sized array).
365 The first template argument is the size of the region and the second is the
366 needed alignment. Depending on the alignment requested and platform details,
367 the actual available storage may be smaller than the compile-time parameter. To
368 make sure that at least $(D n) bytes are available in the region, use
369 $(D InSituRegion!(n + a - 1, a)).
371 Given that the most frequent use of `InSituRegion` is as a stack allocator, it
372 allocates starting at the end on systems where stack grows downwards, such that
373 hot memory is used first.
376 struct InSituRegion(size_t size, size_t minAlign = platformAlignment)
378 import std.algorithm.comparison : max;
379 import std.conv : to;
380 import std.traits : hasMember;
381 import std.typecons : Ternary;
383 static assert(minAlign.isGoodStaticAlignment);
384 static assert(size >= minAlign);
386 version (X86) enum growDownwards = Yes.growDownwards;
387 else version (X86_64) enum growDownwards = Yes.growDownwards;
388 else version (ARM) enum growDownwards = Yes.growDownwards;
389 else version (AArch64) enum growDownwards = Yes.growDownwards;
390 else version (PPC) enum growDownwards = Yes.growDownwards;
391 else version (PPC64) enum growDownwards = Yes.growDownwards;
392 else version (MIPS32) enum growDownwards = Yes.growDownwards;
393 else version (MIPS64) enum growDownwards = Yes.growDownwards;
394 else version (SPARC) enum growDownwards = Yes.growDownwards;
395 else version (SystemZ) enum growDownwards = Yes.growDownwards;
396 else static assert(0, "Dunno how the stack grows on this architecture.");
401 private Region!(NullAllocator, minAlign, growDownwards) _impl;
404 private ubyte[size] _store = void;
405 private double _forAlignmentOnly1 = void;
410 An alias for $(D minAlign), which must be a valid alignment (nonzero power
411 of 2). The start of the region and all allocation requests will be rounded
412 up to a multiple of the alignment.
415 InSituRegion!(4096) a1;
416 assert(a1.alignment == platformAlignment);
417 InSituRegion!(4096, 64) a2;
418 assert(a2.alignment == 64);
421 alias alignment = minAlign;
423 private void lazyInit()
425 assert(!_impl._current);
426 _impl = typeof(_impl)(_store);
427 assert(_impl._current.alignedAt(alignment));
431 Allocates $(D bytes) and returns them, or $(D null) if the region cannot
432 accommodate the request. For efficiency reasons, if $(D bytes == 0) the
433 function returns an empty non-null slice.
435 void[] allocate(size_t n)
439 auto result = _impl.allocate(n);
440 if (result.length == n) return result;
442 if (_impl._current) return null; // no more room
444 assert(_impl._current);
449 As above, but the memory allocated is aligned at $(D a) bytes.
451 void[] alignedAllocate(size_t n, uint a)
455 auto result = _impl.alignedAllocate(n, a);
456 if (result.length == n) return result;
458 if (_impl._current) return null; // no more room
460 assert(_impl._current);
465 Deallocates $(D b). This works only if $(D b) was obtained as the last call
466 to $(D allocate); otherwise (i.e. another allocation has occurred since) it
467 does nothing. This semantics is tricky and therefore $(D deallocate) is
468 defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
469 as the third template argument.
472 b = Block previously obtained by a call to $(D allocate) against this
473 allocator ($(D null) is allowed).
475 bool deallocate(void[] b)
477 if (!_impl._current) return b is null;
478 return _impl.deallocate(b);
482 Returns `Ternary.yes` if `b` is the result of a previous allocation,
483 `Ternary.no` otherwise.
485 Ternary owns(void[] b)
487 if (!_impl._current) return Ternary.no;
488 return _impl.owns(b);
492 Expands an allocated block in place. Expansion will succeed only if the
493 block is the last allocated.
495 static if (hasMember!(typeof(_impl), "expand"))
496 bool expand(ref void[] b, size_t delta)
498 if (!_impl._current) lazyInit;
499 return _impl.expand(b, delta);
503 Deallocates all memory allocated with this allocator.
507 // We don't care to lazily init the region
508 return _impl.deallocateAll;
512 Allocates all memory available with this allocator.
516 if (!_impl._current) lazyInit;
517 return _impl.allocateAll;
521 Nonstandard function that returns the bytes available for allocation.
525 if (!_impl._current) lazyInit;
526 return _impl.available;
533 // 128KB region, allocated to x86's cache line
534 InSituRegion!(128 * 1024, 16) r1;
535 auto a1 = r1.allocate(101);
536 assert(a1.length == 101);
538 // 128KB region, with fallback to the garbage collector.
539 import std.experimental.allocator.building_blocks.fallback_allocator
541 import std.experimental.allocator.building_blocks.free_list
543 import std.experimental.allocator.building_blocks.bitmapped_block
545 import std.experimental.allocator.gc_allocator : GCAllocator;
546 FallbackAllocator!(InSituRegion!(128 * 1024), GCAllocator) r2;
547 const a2 = r2.allocate(102);
548 assert(a2.length == 102);
550 // Reap with GC fallback.
551 InSituRegion!(128 * 1024, 8) tmp3;
552 FallbackAllocator!(BitmappedBlock!(64, 8), GCAllocator) r3;
553 r3.primary = BitmappedBlock!(64, 8)(cast(ubyte[])(tmp3.allocateAll()));
554 const a3 = r3.allocate(103);
555 assert(a3.length == 103);
557 // Reap/GC with a freelist for small objects up to 16 bytes.
558 InSituRegion!(128 * 1024, 64) tmp4;
559 FreeList!(FallbackAllocator!(BitmappedBlock!(64, 64), GCAllocator), 0, 16) r4;
560 r4.parent.primary = BitmappedBlock!(64, 64)(cast(ubyte[])(tmp4.allocateAll()));
561 const a4 = r4.allocate(104);
562 assert(a4.length == 104);
567 InSituRegion!(4096, 1) r1;
568 auto a = r1.allocate(2001);
569 assert(a.length == 2001);
570 import std.conv : text;
571 assert(r1.available == 2095, text(r1.available));
573 InSituRegion!(65_536, 1024*4) r2;
574 assert(r2.available <= 65_536);
575 a = r2.allocate(2001);
576 assert(a.length == 2001);
579 private extern(C) void* sbrk(long);
580 private extern(C) int brk(shared void*);
584 Allocator backed by $(D $(LINK2 https://en.wikipedia.org/wiki/Sbrk, sbrk))
585 for Posix systems. Due to the fact that $(D sbrk) is not thread-safe
586 $(HTTP lifecs.likai.org/2010/02/sbrk-is-not-thread-safe.html, by design),
587 $(D SbrkRegion) uses a mutex internally. This implies
588 that uncontrolled calls to $(D brk) and $(D sbrk) may affect the workings of $(D
589 SbrkRegion) adversely.
592 version (Posix) struct SbrkRegion(uint minAlign = platformAlignment)
594 import core.sys.posix.pthread : pthread_mutex_init, pthread_mutex_destroy,
595 pthread_mutex_t, pthread_mutex_lock, pthread_mutex_unlock,
596 PTHREAD_MUTEX_INITIALIZER;
597 private static shared pthread_mutex_t sbrkMutex = PTHREAD_MUTEX_INITIALIZER;
598 import std.typecons : Ternary;
600 static assert(minAlign.isGoodStaticAlignment);
601 static assert(size_t.sizeof == (void*).sizeof);
602 private shared void* _brkInitial, _brkCurrent;
605 Instance shared by all callers.
607 static shared SbrkRegion instance;
610 Standard allocator primitives.
612 enum uint alignment = minAlign;
615 void[] allocate(size_t bytes) shared
617 static if (minAlign > 1)
618 const rounded = bytes.roundUpToMultipleOf(alignment);
620 alias rounded = bytes;
621 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
622 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
624 // Assume sbrk returns the old break. Most online documentation confirms
625 // that, except for http://www.inf.udec.cl/~leo/Malloc_tutorial.pdf,
626 // which claims the returned value is not portable.
627 auto p = sbrk(rounded);
628 if (p == cast(void*) -1)
634 _brkInitial = cast(shared) p;
635 assert(cast(size_t) _brkInitial % minAlign == 0,
636 "Too large alignment chosen for " ~ typeof(this).stringof);
638 _brkCurrent = cast(shared) (p + rounded);
639 return p[0 .. bytes];
643 void[] alignedAllocate(size_t bytes, uint a) shared
645 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
646 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
650 // This is one extra call, but it'll happen only once.
651 _brkInitial = cast(shared) sbrk(0);
652 assert(cast(size_t) _brkInitial % minAlign == 0,
653 "Too large alignment chosen for " ~ typeof(this).stringof);
654 (_brkInitial != cast(void*) -1) || assert(0);
655 _brkCurrent = _brkInitial;
657 immutable size_t delta = cast(shared void*) roundUpToMultipleOf(
658 cast(size_t) _brkCurrent, a) - _brkCurrent;
659 // Still must make sure the total size is aligned to the allocator's
661 immutable rounded = (bytes + delta).roundUpToMultipleOf(alignment);
663 auto p = sbrk(rounded);
664 if (p == cast(void*) -1)
668 _brkCurrent = cast(shared) (p + rounded);
669 return p[delta .. delta + bytes];
674 The $(D expand) method may only succeed if the argument is the last block
675 allocated. In that case, $(D expand) attempts to push the break pointer to
679 bool expand(ref void[] b, size_t delta) shared
681 if (b is null) return delta == 0;
682 assert(_brkInitial && _brkCurrent); // otherwise where did b come from?
683 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
684 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
686 if (_brkCurrent != b.ptr + b.length) return false;
687 // Great, can expand the last block
688 static if (minAlign > 1)
689 const rounded = delta.roundUpToMultipleOf(alignment);
691 alias rounded = bytes;
692 auto p = sbrk(rounded);
693 if (p == cast(void*) -1)
697 _brkCurrent = cast(shared) (p + rounded);
698 b = b.ptr[0 .. b.length + delta];
703 Ternary owns(void[] b) shared
705 // No need to lock here.
706 assert(!_brkCurrent || b.ptr + b.length <= _brkCurrent);
707 return Ternary(_brkInitial && b.ptr >= _brkInitial);
712 The $(D deallocate) method only works (and returns $(D true)) on systems
713 that support reducing the break address (i.e. accept calls to $(D sbrk)
714 with negative offsets). OSX does not accept such. In addition the argument
715 must be the last block allocated.
718 bool deallocate(void[] b) shared
720 static if (minAlign > 1)
721 const rounded = b.length.roundUpToMultipleOf(alignment);
723 const rounded = b.length;
724 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
725 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
727 if (_brkCurrent != b.ptr + rounded) return false;
728 assert(b.ptr >= _brkInitial);
729 if (sbrk(-rounded) == cast(void*) -1)
731 _brkCurrent = cast(shared) b.ptr;
736 The $(D deallocateAll) method only works (and returns $(D true)) on systems
737 that support reducing the break address (i.e. accept calls to $(D sbrk)
738 with negative offsets). OSX does not accept such.
740 bool deallocateAll() shared
742 pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
743 scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
745 return !_brkInitial || brk(_brkInitial) == 0;
748 /// Standard allocator API.
751 // Also works when they're both null.
752 return Ternary(_brkCurrent == _brkInitial);
756 version (Posix) @system unittest
758 // Let's test the assumption that sbrk(n) returns the old address
760 const p2 = sbrk(4096);
763 assert(p3 == p2 + 4096);
764 // Try to reset brk, but don't make a fuss if it doesn't work
768 version (Posix) @system unittest
770 import std.typecons : Ternary;
771 alias alloc = SbrkRegion!(8).instance;
772 auto a = alloc.alignedAllocate(2001, 4096);
773 assert(a.length == 2001);
774 auto b = alloc.allocate(2001);
775 assert(b.length == 2001);
776 assert(alloc.owns(a) == Ternary.yes);
777 assert(alloc.owns(b) == Ternary.yes);
778 // reducing the brk does not work on OSX
779 version (OSX) {} else
781 assert(alloc.deallocate(b));
782 assert(alloc.deallocateAll);