summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2014-04-02 17:57:15 -0400
committerRich Felker <dalias@aerifal.cx>2014-04-02 17:57:15 -0400
commit5446303328adf4b4e36d9fba21848e6feb55fab4 (patch)
treebb513d417244a07c5699018e17e341be41ca8c1f /src
parent91d5aa06572d2660122f9a06ed242fef0383f292 (diff)
downloadmusl-5446303328adf4b4e36d9fba21848e6feb55fab4.tar.gz
musl-5446303328adf4b4e36d9fba21848e6feb55fab4.tar.bz2
musl-5446303328adf4b4e36d9fba21848e6feb55fab4.tar.xz
musl-5446303328adf4b4e36d9fba21848e6feb55fab4.zip
avoid malloc failure for small requests when brk can't be extended
this issue mainly affects PIE binaries and execution of programs via direct invocation of the dynamic linker binary: depending on kernel behavior, in these cases the initial brk may be placed at at location where it cannot be extended, due to conflicting adjacent maps. when brk fails, mmap is used instead to expand the heap. in order to avoid expensive bookkeeping for managing fragmentation by merging these new heap regions, the minimum size for new heap regions increases exponentially in the number of regions. this limits the number of regions, and thereby the number of fixed fragmentation points, to a quantity which is logarithmic with respect to the size of virtual address space and thus negligible. the exponential growth is tuned so as to avoid expanding the heap by more than approximately 50% of its current total size.
Diffstat (limited to 'src')
-rw-r--r--src/malloc/malloc.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
index d6ad9041..7932a975 100644
--- a/src/malloc/malloc.c
+++ b/src/malloc/malloc.c
@@ -37,6 +37,7 @@ static struct {
struct bin bins[64];
int brk_lock[2];
int free_lock[2];
+ unsigned mmap_step;
} mal;
@@ -162,7 +163,28 @@ static struct chunk *expand_heap(size_t n)
new = mal.brk + n + SIZE_ALIGN + PAGE_SIZE - 1 & -PAGE_SIZE;
n = new - mal.brk;
- if (__brk(new) != new) goto fail;
+ if (__brk(new) != new) {
+ size_t min = (size_t)PAGE_SIZE << mal.mmap_step/2;
+ n += -n & PAGE_SIZE-1;
+ if (n < min) n = min;
+ void *area = __mmap(0, n, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (area == MAP_FAILED) goto fail;
+
+ mal.mmap_step++;
+ area = (char *)area + SIZE_ALIGN - OVERHEAD;
+ w = area;
+ n -= SIZE_ALIGN;
+ w->psize = 0 | C_INUSE;
+ w->csize = n | C_INUSE;
+ w = NEXT_CHUNK(w);
+ w->psize = n | C_INUSE;
+ w->csize = 0 | C_INUSE;
+
+ unlock(mal.brk_lock);
+
+ return area;
+ }
w = MEM_TO_CHUNK(new);
w->psize = n | C_INUSE;