diff options
author | Rich Felker <dalias@aerifal.cx> | 2020-06-03 19:17:19 -0400 |
---|---|---|
committer | Rich Felker <dalias@aerifal.cx> | 2020-06-03 19:17:19 -0400 |
commit | eaa0f2496700c238e7e3c112d36445f3aee06ff1 (patch) | |
tree | 41979600733c58af880b832bed70bc51814b2bc9 /src/malloc/malloc.c | |
parent | e07138b880dcc72d3f1b3a2d2c1af75c2484c8d7 (diff) | |
download | musl-eaa0f2496700c238e7e3c112d36445f3aee06ff1.tar.gz musl-eaa0f2496700c238e7e3c112d36445f3aee06ff1.tar.bz2 musl-eaa0f2496700c238e7e3c112d36445f3aee06ff1.tar.xz musl-eaa0f2496700c238e7e3c112d36445f3aee06ff1.zip |
move __expand_heap into malloc.c
this function is no longer used elsewhere, and moving it reduces the
number of source files specific to the malloc implementation.
Diffstat (limited to 'src/malloc/malloc.c')
-rw-r--r-- | src/malloc/malloc.c | 64 |
1 files changed, 64 insertions, 0 deletions
diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c index 20598ec3..df3ea1be 100644 --- a/src/malloc/malloc.c +++ b/src/malloc/malloc.c @@ -126,6 +126,70 @@ void __dump_heap(int x) } #endif +/* This function returns true if the interval [old,new] + * intersects the 'len'-sized interval below &libc.auxv + * (interpreted as the main-thread stack) or below &b + * (the current stack). It is used to defend against + * buggy brk implementations that can cross the stack. */ + +static int traverses_stack_p(uintptr_t old, uintptr_t new) +{ + const uintptr_t len = 8<<20; + uintptr_t a, b; + + b = (uintptr_t)libc.auxv; + a = b > len ? b-len : 0; + if (new>a && old<b) return 1; + + b = (uintptr_t)&b; + a = b > len ? b-len : 0; + if (new>a && old<b) return 1; + + return 0; +} + +/* Expand the heap in-place if brk can be used, or otherwise via mmap, + * using an exponential lower bound on growth by mmap to make + * fragmentation asymptotically irrelevant. The size argument is both + * an input and an output, since the caller needs to know the size + * allocated, which will be larger than requested due to page alignment + * and mmap minimum size rules. The caller is responsible for locking + * to prevent concurrent calls. */ + +static void *__expand_heap(size_t *pn) +{ + static uintptr_t brk; + static unsigned mmap_step; + size_t n = *pn; + + if (n > SIZE_MAX/2 - PAGE_SIZE) { + errno = ENOMEM; + return 0; + } + n += -n & PAGE_SIZE-1; + + if (!brk) { + brk = __syscall(SYS_brk, 0); + brk += -brk & PAGE_SIZE-1; + } + + if (n < SIZE_MAX-brk && !traverses_stack_p(brk, brk+n) + && __syscall(SYS_brk, brk+n)==brk+n) { + *pn = n; + brk += n; + return (void *)(brk-n); + } + + size_t min = (size_t)PAGE_SIZE << mmap_step/2; + if (n < min) n = min; + void *area = __mmap(0, n, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (area == MAP_FAILED) return 0; + *pn = n; + mmap_step++; + return area; +} + static struct chunk *expand_heap(size_t n) { static void *end; |