diff options
author | Max Rees <maxcrees@me.com> | 2020-05-22 17:44:58 -0500 |
---|---|---|
committer | Max Rees <maxcrees@me.com> | 2020-06-15 04:10:52 -0500 |
commit | 83fedda09fa002476aa5d8b1175233791a1b4934 (patch) | |
tree | 9b7b981872aaeaa1a692103eb60d7e51872adcc4 /system/musl | |
parent | 18c6e5665fe1dadc47fad19f3f9629a5f9040b2b (diff) | |
download | packages-83fedda09fa002476aa5d8b1175233791a1b4934.tar.gz packages-83fedda09fa002476aa5d8b1175233791a1b4934.tar.bz2 packages-83fedda09fa002476aa5d8b1175233791a1b4934.tar.xz packages-83fedda09fa002476aa5d8b1175233791a1b4934.zip |
system/musl: integrate MT->single thread transition fix from upstream
https://www.openwall.com/lists/musl/2020/05/22/10
Diffstat (limited to 'system/musl')
-rw-r--r-- | system/musl/APKBUILD | 4 | ||||
-rw-r--r-- | system/musl/threads_minus_1.patch | 267 |
2 files changed, 270 insertions, 1 deletions
diff --git a/system/musl/APKBUILD b/system/musl/APKBUILD index f7596ebb5..8517b148b 100644 --- a/system/musl/APKBUILD +++ b/system/musl/APKBUILD @@ -1,7 +1,7 @@ # Maintainer: A. Wilcox <awilfox@adelielinux.org> pkgname=musl pkgver=1.2.0 -pkgrel=0 +pkgrel=1 pkgdesc="System library (libc) implementation" url="https://www.musl-libc.org/" arch="all" @@ -26,6 +26,7 @@ source="https://musl.libc.org/releases/$pkgname-$pkgver.tar.gz 3001-make-real-lastlog-h.patch handle-aux-at_base.patch fgetspent_r.patch + threads_minus_1.patch ldconfig getent.c @@ -121,6 +122,7 @@ f01ab92b9d385c15369c0bb7d95e1bc06a009c8851e363517d0ba1bae3fc2647af69fc2f363b5d96 88ae443dbb8e0a4368235bdc3a1c5c7b718495afa75e06deb8e01becc76cb1f0d6964589e2204fc749c9c1b3190b8b9ac1ae2c0099cab8e2ce3ec877103d4332 3001-make-real-lastlog-h.patch 6a7ff16d95b5d1be77e0a0fbb245491817db192176496a57b22ab037637d97a185ea0b0d19da687da66c2a2f5578e4343d230f399d49fe377d8f008410974238 handle-aux-at_base.patch ded41235148930f8cf781538f7d63ecb0c65ea4e8ce792565f3649ee2523592a76b2a166785f0b145fc79f5852fd1fb1729a7a09110b3b8f85cba3912e790807 fgetspent_r.patch +68830961e297d9a499f3b609be84848ad5d3326a1af56e9e54a40ecd972c48da11532c51da572d45e0df3574d63191e7ae0d3a1b84a029365f8d00691de96952 threads_minus_1.patch cb71d29a87f334c75ecbc911becde7be825ab30d8f39fa6d64cb53812a7c9abaf91d9804c72540e5be3ddd3c84cfe7fd9632274309005cb8bcdf9a9b09b4b923 ldconfig 378d70e65bcc65bb4e1415354cecfa54b0c1146dfb24474b69e418cdbf7ad730472cd09f6f103e1c99ba6c324c9560bccdf287f5889bbc3ef0bdf0e08da47413 getent.c 9d42d66fb1facce2b85dad919be5be819ee290bd26ca2db00982b2f8e055a0196290a008711cbe2b18ec9eee8d2270e3b3a4692c5a1b807013baa5c2b70a2bbf iconv.c" diff --git a/system/musl/threads_minus_1.patch b/system/musl/threads_minus_1.patch new file mode 100644 index 000000000..05307d97c --- /dev/null +++ b/system/musl/threads_minus_1.patch @@ -0,0 +1,267 @@ +https://www.openwall.com/lists/musl/2020/05/22/10 + +From 4d5aa20a94a2d3fae3e69289dc23ecafbd0c16c4 Mon Sep 17 00:00:00 2001 +From: Rich Felker <dalias@aerifal.cx> +Date: Fri, 22 May 2020 17:35:14 -0400 +Subject: [PATCH 1/4] reorder thread list unlink in pthread_exit after all + locks + +since the backend for LOCK() skips locking if single-threaded, it's +unsafe to make the process appear single-threaded before the last use +of lock. + +this fixes potential unsynchronized access to a linked list via +__dl_thread_cleanup. +--- + src/thread/pthread_create.c | 19 +++++++++++-------- + 1 file changed, 11 insertions(+), 8 deletions(-) + +diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c +index 5f491092..6a3b0c21 100644 +--- a/src/thread/pthread_create.c ++++ b/src/thread/pthread_create.c +@@ -90,14 +90,7 @@ _Noreturn void __pthread_exit(void *result) + exit(0); + } + +- /* At this point we are committed to thread termination. Unlink +- * the thread from the list. This change will not be visible +- * until the lock is released, which only happens after SYS_exit +- * has been called, via the exit futex address pointing at the lock. */ +- libc.threads_minus_1--; +- self->next->prev = self->prev; +- self->prev->next = self->next; +- self->prev = self->next = self; ++ /* At this point we are committed to thread termination. */ + + /* Process robust list in userspace to handle non-pshared mutexes + * and the detached thread case where the robust list head will +@@ -121,6 +114,16 @@ _Noreturn void __pthread_exit(void *result) + __do_orphaned_stdio_locks(); + __dl_thread_cleanup(); + ++ /* Last, unlink thread from the list. This change will not be visible ++ * until the lock is released, which only happens after SYS_exit ++ * has been called, via the exit futex address pointing at the lock. ++ * This needs to happen after any possible calls to LOCK() that might ++ * skip locking if libc.threads_minus_1 is zero. */ ++ libc.threads_minus_1--; ++ self->next->prev = self->prev; ++ self->prev->next = self->next; ++ self->prev = self->next = self; ++ + /* This atomic potentially competes with a concurrent pthread_detach + * call; the loser is responsible for freeing thread resources. */ + int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING); +-- +2.21.0 + +From e01b5939b38aea5ecbe41670643199825874b26c Mon Sep 17 00:00:00 2001 +From: Rich Felker <dalias@aerifal.cx> +Date: Thu, 21 May 2020 23:32:45 -0400 +Subject: [PATCH 2/4] don't use libc.threads_minus_1 as relaxed atomic for + skipping locks + +after all but the last thread exits, the next thread to observe +libc.threads_minus_1==0 and conclude that it can skip locking fails to +synchronize with any changes to memory that were made by the +last-exiting thread. this can produce data races. + +on some archs, at least x86, memory synchronization is unlikely to be +a problem; however, with the inline locks in malloc, skipping the lock +also eliminated the compiler barrier, and caused code that needed to +re-check chunk in-use bits after obtaining the lock to reuse a stale +value, possibly from before the process became single-threaded. this +in turn produced corruption of the heap state. + +some uses of libc.threads_minus_1 remain, especially for allocation of +new TLS in the dynamic linker; otherwise, it could be removed +entirely. it's made non-volatile to reflect that the remaining +accesses are only made under lock on the thread list. + +instead of libc.threads_minus_1, libc.threaded is now used for +skipping locks. the difference is that libc.threaded is permanently +true once an additional thread has been created. this will produce +some performance regression in processes that are mostly +single-threaded but occasionally creating threads. in the future it +may be possible to bring back the full lock-skipping, but more care +needs to be taken to produce a safe design. +--- + src/internal/libc.h | 2 +- + src/malloc/malloc.c | 2 +- + src/thread/__lock.c | 2 +- + 3 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/src/internal/libc.h b/src/internal/libc.h +index ac97dc7e..c0614852 100644 +--- a/src/internal/libc.h ++++ b/src/internal/libc.h +@@ -21,7 +21,7 @@ struct __libc { + int can_do_threads; + int threaded; + int secure; +- volatile int threads_minus_1; ++ int threads_minus_1; + size_t *auxv; + struct tls_module *tls_head; + size_t tls_size, tls_align, tls_cnt; +diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c +index 96982596..2553a62e 100644 +--- a/src/malloc/malloc.c ++++ b/src/malloc/malloc.c +@@ -26,7 +26,7 @@ int __malloc_replaced; + + static inline void lock(volatile int *lk) + { +- if (libc.threads_minus_1) ++ if (libc.threaded) + while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1); + } + +diff --git a/src/thread/__lock.c b/src/thread/__lock.c +index 45557c88..5b9b144e 100644 +--- a/src/thread/__lock.c ++++ b/src/thread/__lock.c +@@ -18,7 +18,7 @@ + + void __lock(volatile int *l) + { +- if (!libc.threads_minus_1) return; ++ if (!libc.threaded) return; + /* fast path: INT_MIN for the lock, +1 for the congestion */ + int current = a_cas(l, 0, INT_MIN + 1); + if (!current) return; +-- +2.21.0 + +From f12888e9eb9eed60cc266b899dcafecb4752964a Mon Sep 17 00:00:00 2001 +From: Rich Felker <dalias@aerifal.cx> +Date: Fri, 22 May 2020 17:25:38 -0400 +Subject: [PATCH 3/4] cut down size of some libc struct members + +these are all flags that can be single-byte values. +--- + src/internal/libc.h | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/src/internal/libc.h b/src/internal/libc.h +index c0614852..d47f58e0 100644 +--- a/src/internal/libc.h ++++ b/src/internal/libc.h +@@ -18,9 +18,9 @@ struct tls_module { + }; + + struct __libc { +- int can_do_threads; +- int threaded; +- int secure; ++ char can_do_threads; ++ char threaded; ++ char secure; + int threads_minus_1; + size_t *auxv; + struct tls_module *tls_head; +-- +2.21.0 + +From 8d81ba8c0bc6fe31136cb15c9c82ef4c24965040 Mon Sep 17 00:00:00 2001 +From: Rich Felker <dalias@aerifal.cx> +Date: Fri, 22 May 2020 17:45:47 -0400 +Subject: [PATCH 4/4] restore lock-skipping for processes that return to + single-threaded state + +the design used here relies on the barrier provided by the first lock +operation after the process returns to single-threaded state to +synchronize with actions by the last thread that exited. by storing +the intent to change modes in the same object used to detect whether +locking is needed, it's possible to avoid an extra (possibly costly) +memory load after the lock is taken. +--- + src/internal/libc.h | 1 + + src/malloc/malloc.c | 5 ++++- + src/thread/__lock.c | 4 +++- + src/thread/pthread_create.c | 8 ++++---- + 4 files changed, 12 insertions(+), 6 deletions(-) + +diff --git a/src/internal/libc.h b/src/internal/libc.h +index d47f58e0..619bba86 100644 +--- a/src/internal/libc.h ++++ b/src/internal/libc.h +@@ -21,6 +21,7 @@ struct __libc { + char can_do_threads; + char threaded; + char secure; ++ volatile signed char need_locks; + int threads_minus_1; + size_t *auxv; + struct tls_module *tls_head; +diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c +index 2553a62e..a803d4c9 100644 +--- a/src/malloc/malloc.c ++++ b/src/malloc/malloc.c +@@ -26,8 +26,11 @@ int __malloc_replaced; + + static inline void lock(volatile int *lk) + { +- if (libc.threaded) ++ int need_locks = libc.need_locks; ++ if (need_locks) { + while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1); ++ if (need_locks < 0) libc.need_locks = 0; ++ } + } + + static inline void unlock(volatile int *lk) +diff --git a/src/thread/__lock.c b/src/thread/__lock.c +index 5b9b144e..60eece49 100644 +--- a/src/thread/__lock.c ++++ b/src/thread/__lock.c +@@ -18,9 +18,11 @@ + + void __lock(volatile int *l) + { +- if (!libc.threaded) return; ++ int need_locks = libc.need_locks; ++ if (!need_locks) return; + /* fast path: INT_MIN for the lock, +1 for the congestion */ + int current = a_cas(l, 0, INT_MIN + 1); ++ if (need_locks < 0) libc.need_locks = 0; + if (!current) return; + /* A first spin loop, for medium congestion. */ + for (unsigned i = 0; i < 10; ++i) { +diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c +index 6a3b0c21..6bdfb44f 100644 +--- a/src/thread/pthread_create.c ++++ b/src/thread/pthread_create.c +@@ -118,8 +118,8 @@ _Noreturn void __pthread_exit(void *result) + * until the lock is released, which only happens after SYS_exit + * has been called, via the exit futex address pointing at the lock. + * This needs to happen after any possible calls to LOCK() that might +- * skip locking if libc.threads_minus_1 is zero. */ +- libc.threads_minus_1--; ++ * skip locking if process appears single-threaded. */ ++ if (!--libc.threads_minus_1) libc.need_locks = -1; + self->next->prev = self->prev; + self->prev->next = self->next; + self->prev = self->next = self; +@@ -339,7 +339,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att + ~(1UL<<((SIGCANCEL-1)%(8*sizeof(long)))); + + __tl_lock(); +- libc.threads_minus_1++; ++ if (!libc.threads_minus_1++) libc.need_locks = 1; + ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock); + + /* All clone failures translate to EAGAIN. If explicit scheduling +@@ -363,7 +363,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att + new->next->prev = new; + new->prev->next = new; + } else { +- libc.threads_minus_1--; ++ if (!--libc.threads_minus_1) libc.need_locks = 0; + } + __tl_unlock(); + __restore_sigs(&set); +-- +2.21.0 + |