summaryrefslogtreecommitdiff
path: root/src/thread
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2015-04-10 00:26:34 -0400
committerRich Felker <dalias@aerifal.cx>2015-04-10 00:26:34 -0400
commit12e1e324683a1d381b7f15dd36c99b37dd44d940 (patch)
tree1347010df052cd8697558df4ca998d7631ea3312 /src/thread
parent25748db301c242d36718c6708ffd2b67a456483a (diff)
downloadmusl-12e1e324683a1d381b7f15dd36c99b37dd44d940.tar.gz
musl-12e1e324683a1d381b7f15dd36c99b37dd44d940.tar.bz2
musl-12e1e324683a1d381b7f15dd36c99b37dd44d940.tar.xz
musl-12e1e324683a1d381b7f15dd36c99b37dd44d940.zip
process robust list in pthread_exit to fix detached thread use-after-unmap
the robust list head lies in the thread structure, which is unmapped before exit for detached threads. this leaves the kernel unable to process the exiting thread's robust list, and with a dangling pointer which may happen to point to new unrelated data at the time the kernel processes it. userspace processing of the robust list was already needed for non-pshared robust mutexes in order to perform private futex wakes rather than the shared ones the kernel would do, but it was conditional on linking pthread_mutexattr_setrobust and did not bother processing the pshared mutexes in the list, which requires additional logic for the robust list pending slot in case pthread_exit is interrupted by asynchronous process termination. the new robust list processing code is linked unconditionally (inlined in pthread_exit), handles both private and shared mutexes, and also removes the kernel's reference to the robust list before unmapping and exit if the exiting thread is detached.
Diffstat (limited to 'src/thread')
-rw-r--r--src/thread/pthread_create.c29
-rw-r--r--src/thread/pthread_mutexattr_setrobust.c24
2 files changed, 27 insertions, 26 deletions
diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
index b6a7a5ef..893773fa 100644
--- a/src/thread/pthread_create.c
+++ b/src/thread/pthread_create.c
@@ -4,10 +4,13 @@
#include "libc.h"
#include <sys/mman.h>
#include <string.h>
+#include <stddef.h>
void *__mmap(void *, size_t, int, int, int, off_t);
int __munmap(void *, size_t);
int __mprotect(void *, size_t, int);
+void __vm_lock_impl(int);
+void __vm_unlock_impl(void);
static void dummy_0()
{
@@ -15,7 +18,6 @@ static void dummy_0()
weak_alias(dummy_0, __acquire_ptc);
weak_alias(dummy_0, __release_ptc);
weak_alias(dummy_0, __pthread_tsd_run_dtors);
-weak_alias(dummy_0, __do_private_robust_list);
weak_alias(dummy_0, __do_orphaned_stdio_locks);
_Noreturn void __pthread_exit(void *result)
@@ -72,7 +74,25 @@ _Noreturn void __pthread_exit(void *result)
a_dec(&libc.bytelocale_cnt_minus_1);
}
- __do_private_robust_list();
+ /* Process robust list in userspace to handle non-pshared mutexes
+ * and the detached thread case where the robust list head will
+ * be invalid when the kernel would process it. */
+ __vm_lock_impl(+1);
+ volatile void *volatile *rp;
+ while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
+ pthread_mutex_t *m = (void *)((char *)rp
+ - offsetof(pthread_mutex_t, _m_next));
+ int waiters = m->_m_waiters;
+ int priv = (m->_m_type & 128) ^ 128;
+ self->robust_list.pending = rp;
+ self->robust_list.head = *rp;
+ int cont = a_swap(&m->_m_lock, self->tid|0x40000000);
+ self->robust_list.pending = 0;
+ if (cont < 0 || waiters)
+ __wake(&m->_m_lock, 1, priv);
+ }
+ __vm_unlock_impl();
+
__do_orphaned_stdio_locks();
if (self->detached && self->map_base) {
@@ -85,6 +105,11 @@ _Noreturn void __pthread_exit(void *result)
* detached later (== 2), we need to clear it here. */
if (self->detached == 2) __syscall(SYS_set_tid_address, 0);
+ /* Robust list will no longer be valid, and was already
+ * processed above, so unregister it with the kernel. */
+ if (self->robust_list.off)
+ __syscall(SYS_set_robust_list, 0, 3*sizeof(long));
+
/* The following call unmaps the thread's stack mapping
* and then exits without touching the stack. */
__unmapself(self->map_base, self->map_size);
diff --git a/src/thread/pthread_mutexattr_setrobust.c b/src/thread/pthread_mutexattr_setrobust.c
index d0627889..dcfa4cf1 100644
--- a/src/thread/pthread_mutexattr_setrobust.c
+++ b/src/thread/pthread_mutexattr_setrobust.c
@@ -1,28 +1,4 @@
#include "pthread_impl.h"
-#include <stddef.h>
-
-void __do_private_robust_list()
-{
- pthread_t self = __pthread_self();
- volatile void *volatile *p;
- volatile void *volatile *prev;
- volatile void *volatile *next;
- pthread_mutex_t *m;
-
- prev = &self->robust_list.head;
- for (p=self->robust_list.head; p&&p!=&self->robust_list.head; p=next) {
- next = *p;
- m = (void *)((char *)p - offsetof(pthread_mutex_t, _m_next));
- if (!(m->_m_type & 128)) {
- int waiters = m->_m_waiters;
- *prev = next;
- int cont = a_swap(&m->_m_lock, self->tid|0x40000000);
- if (cont < 0 || waiters) __wake(&m->_m_lock, 1, 1);
- } else {
- prev = p;
- }
- }
-}
int pthread_mutexattr_setrobust(pthread_mutexattr_t *a, int robust)
{