summaryrefslogtreecommitdiff
path: root/src/malloc
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2015-08-07 19:19:49 +0000
committerRich Felker <dalias@aerifal.cx>2015-08-07 19:19:49 +0000
commitc3761622e8168b0c6453637ac82e70b09af3e8e9 (patch)
tree90e92a2e312a3ad7389b53b402b1aef4b4fe1008 /src/malloc
parent3c43c0761e1725fd5f89a9c028cbf43250abb913 (diff)
downloadmusl-c3761622e8168b0c6453637ac82e70b09af3e8e9.tar.gz
musl-c3761622e8168b0c6453637ac82e70b09af3e8e9.tar.bz2
musl-c3761622e8168b0c6453637ac82e70b09af3e8e9.tar.xz
musl-c3761622e8168b0c6453637ac82e70b09af3e8e9.zip
mitigate blow-up of heap size under malloc/free contention
during calls to free, any free chunks adjacent to the chunk being freed are momentarily held in allocated state for the purpose of merging, possibly leaving little or no available free memory for other threads to allocate. under this condition, other threads will attempt to expand the heap rather than waiting to use memory that will soon be available. the race window where this happens is normally very small, but became huge when free chooses to use madvise to release unused physical memory, causing unbounded heap size growth. this patch drastically shrinks the race window for unwanted heap expansion by performing madvise with the bin lock held and marking the bin non-empty in the binmask before making the expensive madvise syscall. testing by Timo Teräs has shown this approach to be a suitable mitigation. more invasive changes to the synchronization between malloc and free would be needed to completely eliminate the problem. it's not clear whether such changes would improve or worsen typical-case performance, or whether this would be a worthwhile direction to take malloc development.
Diffstat (limited to 'src/malloc')
-rw-r--r--src/malloc/malloc.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
index eb68d554..b90636cc 100644
--- a/src/malloc/malloc.c
+++ b/src/malloc/malloc.c
@@ -464,18 +464,6 @@ void free(void *p)
if (next->psize != self->csize) a_crash();
for (;;) {
- /* Replace middle of large chunks with fresh zero pages */
- if (reclaim && (self->psize & next->csize & C_INUSE)) {
- uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
- uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
-#if 1
- __madvise((void *)a, b-a, MADV_DONTNEED);
-#else
- __mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
-#endif
- }
-
if (self->psize & next->csize & C_INUSE) {
self->csize = final_size | C_INUSE;
next->psize = final_size | C_INUSE;
@@ -505,6 +493,9 @@ void free(void *p)
}
}
+ if (!(mal.binmap & 1ULL<<i))
+ a_or_64(&mal.binmap, 1ULL<<i);
+
self->csize = final_size;
next->psize = final_size;
unlock(mal.free_lock);
@@ -514,8 +505,17 @@ void free(void *p)
self->next->prev = self;
self->prev->next = self;
- if (!(mal.binmap & 1ULL<<i))
- a_or_64(&mal.binmap, 1ULL<<i);
+ /* Replace middle of large chunks with fresh zero pages */
+ if (reclaim) {
+ uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
+ uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
+#if 1
+ __madvise((void *)a, b-a, MADV_DONTNEED);
+#else
+ __mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
+#endif
+ }
unlock_bin(i);
}