summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2015-03-04 09:29:39 -0500
committerRich Felker <dalias@aerifal.cx>2015-03-04 09:29:39 -0500
commit7a81fe3710be0128d29071e76c5acbea3d84277b (patch)
tree1ba3a7b2433dc4671a5b516996ee9051044cc622
parent56fbaa3bbe73f12af2bfbbcf2adb196e6f9fe264 (diff)
downloadmusl-7a81fe3710be0128d29071e76c5acbea3d84277b.tar.gz
musl-7a81fe3710be0128d29071e76c5acbea3d84277b.tar.bz2
musl-7a81fe3710be0128d29071e76c5acbea3d84277b.tar.xz
musl-7a81fe3710be0128d29071e76c5acbea3d84277b.zip
fix init race that could lead to deadlock in malloc init code
the malloc init code provided its own version of pthread_once type logic, including the exact same bug that was fixed in pthread_once in commit 0d0c2f40344640a2a6942dda156509593f51db5d. since this code is called adjacent to expand_heap, which takes a lock, there is no reason to have pthread_once-type initialization. simply moving the init code into the interval where expand_heap already holds its lock on the brk achieves the same result with much less synchronization logic, and allows the buggy code to be eliminated rather than just fixed.
-rw-r--r--src/malloc/malloc.c53
1 files changed, 14 insertions, 39 deletions
diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
index 70c7b3f3..4f61807b 100644
--- a/src/malloc/malloc.c
+++ b/src/malloc/malloc.c
@@ -154,11 +154,22 @@ void __dump_heap(int x)
static struct chunk *expand_heap(size_t n)
{
+ static int init;
struct chunk *w;
uintptr_t new;
lock(mal.brk_lock);
+ if (!init) {
+ mal.brk = __brk(0);
+#ifdef SHARED
+ mal.brk = mal.brk + PAGE_SIZE-1 & -PAGE_SIZE;
+#endif
+ mal.brk = mal.brk + 2*SIZE_ALIGN-1 & -SIZE_ALIGN;
+ mal.heap = (void *)mal.brk;
+ init = 1;
+ }
+
if (n > SIZE_MAX - mal.brk - 2*PAGE_SIZE) goto fail;
new = mal.brk + n + SIZE_ALIGN + PAGE_SIZE - 1 & -PAGE_SIZE;
n = new - mal.brk;
@@ -186,6 +197,9 @@ static struct chunk *expand_heap(size_t n)
return area;
}
+ w = MEM_TO_CHUNK(mal.heap);
+ w->psize = 0 | C_INUSE;
+
w = MEM_TO_CHUNK(new);
w->psize = n | C_INUSE;
w->csize = 0 | C_INUSE;
@@ -203,44 +217,6 @@ fail:
return 0;
}
-static int init_malloc(size_t n)
-{
- static volatile int init, waiters;
- int state;
- struct chunk *c;
-
- if (init == 2) return 0;
-
- while ((state=a_swap(&init, 1)) == 1)
- __wait(&init, &waiters, 1, 1);
- if (state) {
- a_store(&init, 2);
- return 0;
- }
-
- mal.brk = __brk(0);
-#ifdef SHARED
- mal.brk = mal.brk + PAGE_SIZE-1 & -PAGE_SIZE;
-#endif
- mal.brk = mal.brk + 2*SIZE_ALIGN-1 & -SIZE_ALIGN;
-
- c = expand_heap(n);
-
- if (!c) {
- a_store(&init, 0);
- if (waiters) __wake(&init, 1, 1);
- return -1;
- }
-
- mal.heap = (void *)c;
- c->psize = 0 | C_INUSE;
- free(CHUNK_TO_MEM(c));
-
- a_store(&init, 2);
- if (waiters) __wake(&init, -1, 1);
- return 1;
-}
-
static int adjust_size(size_t *n)
{
/* Result of pointer difference must fit in ptrdiff_t. */
@@ -375,7 +351,6 @@ void *malloc(size_t n)
for (;;) {
uint64_t mask = mal.binmap & -(1ULL<<i);
if (!mask) {
- if (init_malloc(n) > 0) continue;
c = expand_heap(n);
if (!c) return 0;
if (alloc_rev(c)) {