#include <type_traits>
#include <new>
#include <sys/mman.h>
+#include <unistd.h>
// On OpenBSD mem used as stack should be marked MAP_STACK
#if !defined(MAP_STACK)
static_assert(std::is_trivial<T>::value,
"lazy_allocator must only be used with trivial types");
+#ifndef LAZY_ALLOCATOR_USES_NEW
+ /* Pad the requested size to be a multiple of page size, so that we can
+ properly restrict read and write access to our guard pages only */
+ static size_type getAlignmentPadding(size_type requestedSize, size_type pageSize)
+ {
+ size_type remaining = requestedSize % pageSize;
+ if (remaining == 0) {
+ return 0;
+ }
+ return pageSize - remaining;
+ }
+#endif /* LAZY_ALLOCATOR_USES_NEW */
+
pointer
allocate(size_type const n)
{
-#ifdef __OpenBSD__
- void* p = mmap(nullptr, n * sizeof(value_type),
- PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_STACK, -1, 0);
- if (p == MAP_FAILED)
- throw std::bad_alloc();
- return static_cast<pointer>(p);
-#else
+#ifdef LAZY_ALLOCATOR_USES_NEW
return static_cast<pointer>(::operator new(n * sizeof(value_type)));
+#else /* LAZY_ALLOCATOR_USES_NEW */
+ /* This implements a very basic protection against stack overflow
+ by placing two guard pages around the requested memory: one
+ page right before the new stack and one right after.
+ The guard pages cannot be read or written to, any attempt to
+ do so will trigger an immediate access violation, terminating
+ the program.
+ This is much better than the default behaviour for two reasons:
+ 1/ the program is stopped right before corrupting memory, which
+ prevents random corruption
+ 2/ it's easy to find the point where the stack overflow occurred
+ The memory overhead is two pages (usually 4k on Linux) per stack,
+ and the runtime CPU overhead is one call to mprotect() for every
+ stack allocation.
+ */
+ static const auto pageSize = sysconf(_SC_PAGESIZE);
+
+ const size_type requestedSize = n * sizeof(value_type);
+ const auto padding = getAlignmentPadding(requestedSize, pageSize);
+ const size_type allocatedSize = requestedSize + padding + (pageSize * 2);
+
+ void* p = mmap(nullptr, allocatedSize,
+ PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_STACK, -1, 0);
+ if (p == MAP_FAILED) {
+ throw std::bad_alloc();
+ }
+ char* basePointer = static_cast<char*>(p);
+ void* usablePointer = basePointer + pageSize;
+ int res = mprotect(usablePointer, requestedSize + padding, PROT_READ | PROT_WRITE);
+ if (res != 0) {
+ munmap(p, requestedSize + padding + (pageSize * 2));
+ throw std::bad_alloc();
+ }
+ return static_cast<pointer>(usablePointer);
#endif
}
void
deallocate(pointer const ptr, size_type const n) noexcept
{
-#ifdef __OpenBSD__
- munmap(ptr, n * sizeof(value_type));
-#else
+#ifdef LAZY_ALLOCATOR_USES_NEW
#if defined(__cpp_sized_deallocation) && (__cpp_sized_deallocation >= 201309)
::operator delete(ptr, n * sizeof(value_type));
#else
(void)n;
::operator delete(ptr);
#endif
-#endif
+#else /* LAZY_ALLOCATOR_USES_NEW */
+ static const auto pageSize = sysconf(_SC_PAGESIZE);
+
+ const size_type requestedSize = n * sizeof(value_type);
+ const auto padding = getAlignmentPadding(requestedSize, pageSize);
+ const size_type allocatedSize = requestedSize + padding + (pageSize * 2);
+
+ void* basePointer = static_cast<char*>(ptr) - pageSize;
+ munmap(basePointer, allocatedSize);
+#endif /* LAZY_ALLOCATOR_PROTECT */
}
void construct(T*) const noexcept {}