Skip to content

Commit

Permalink
Don't expose guard pages to malloc_stack API consumers (#54591)
Browse files Browse the repository at this point in the history
Whether or not a guard page is in effect is an implementation detail and
consumers of the `malloc_stack` API should not have to worry about that.
In particular, if a stack of a certain size is requested, a stack of that
size should be delivered, and not be reduced on some systems because we
park a guard page in that range.

This also helps consumers of the gcext API implementing stack scanning
(i.e., GAP.jl), as it does not have to worry about running into those
guard pages anymore.
  • Loading branch information
fingolfin authored Jun 9, 2024
1 parent 8a7d8f4 commit 5dfd57d
Showing 1 changed file with 29 additions and 1 deletion.
30 changes: 29 additions & 1 deletion src/gc-stacks.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,22 @@
// number of stacks to always keep available per pool
#define MIN_STACK_MAPPINGS_PER_POOL 5

#if defined(_OS_WINDOWS_) || (!defined(_OS_OPENBSD_) && !defined(JL_HAVE_UCONTEXT) && !defined(JL_HAVE_SIGALTSTACK))
#define JL_USE_GUARD_PAGE 1
const size_t jl_guard_size = (4096 * 8);
#else
const size_t jl_guard_size = 0;
#endif

static _Atomic(uint32_t) num_stack_mappings = 0;

#ifdef _OS_WINDOWS_
#define MAP_FAILED NULL
static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
{
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
bufsz += guard_size;

void *stk = VirtualAlloc(NULL, bufsz, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (stk == NULL)
return MAP_FAILED;
Expand All @@ -39,6 +48,7 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
VirtualFree(stk, 0, MEM_RELEASE);
return MAP_FAILED;
}
stk = (char *)stk + guard_size;

jl_atomic_fetch_add_relaxed(&num_stack_mappings, 1);
return stk;
Expand All @@ -47,6 +57,12 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT

static void free_stack(void *stkbuf, size_t bufsz)
{
#ifdef JL_USE_GUARD_PAGE
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
bufsz += guard_size;
stkbuf = (char *)stkbuf - guard_size;
#endif

VirtualFree(stkbuf, 0, MEM_RELEASE);
jl_atomic_fetch_add_relaxed(&num_stack_mappings, -1);
}
Expand All @@ -72,16 +88,22 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
# else
static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
{
#ifdef JL_USE_GUARD_PAGE
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
bufsz += guard_size;
#endif

void* stk = mmap(0, bufsz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (stk == MAP_FAILED)
return MAP_FAILED;

#if !defined(JL_HAVE_UCONTEXT) && !defined(JL_HAVE_SIGALTSTACK)
#ifdef JL_USE_GUARD_PAGE
// set up a guard page to detect stack overflow
if (mprotect(stk, jl_guard_size, PROT_NONE) == -1) {
munmap(stk, bufsz);
return MAP_FAILED;
}
stk = (char *)stk + guard_size;
#endif

jl_atomic_fetch_add_relaxed(&num_stack_mappings, 1);
Expand All @@ -91,6 +113,12 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT

static void free_stack(void *stkbuf, size_t bufsz)
{
#ifdef JL_USE_GUARD_PAGE
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
bufsz += guard_size;
stkbuf = (char *)stkbuf - guard_size;
#endif

munmap(stkbuf, bufsz);
jl_atomic_fetch_add_relaxed(&num_stack_mappings, -1);
}
Expand Down

0 comments on commit 5dfd57d

Please sign in to comment.