diff options
author | Rich Felker <dalias@aerifal.cx> | 2014-08-17 00:46:26 -0400 |
---|---|---|
committer | Rich Felker <dalias@aerifal.cx> | 2014-08-17 00:46:26 -0400 |
commit | de7e99c58508ca70f0b1b8ef259a823a3766c434 (patch) | |
tree | 68fbc616cb85a0a5a2992f64080d19b25855088f /src/thread | |
parent | d338b506e39b1e2c68366b12be90704c635602ce (diff) | |
download | musl-de7e99c58508ca70f0b1b8ef259a823a3766c434.tar.gz musl-de7e99c58508ca70f0b1b8ef259a823a3766c434.tar.bz2 musl-de7e99c58508ca70f0b1b8ef259a823a3766c434.tar.xz musl-de7e99c58508ca70f0b1b8ef259a823a3766c434.zip |
make pointers used in robust list volatile
when manipulating the robust list, the order of stores matters,
because the code may be asynchronously interrupted by a fatal signal
and the kernel will then access the robust list in what is essentially
an async-signal context.
previously, aliasing considerations made it seem unlikely that a
compiler could reorder the stores, but proving that they could not be
reordered incorrectly would have been extremely difficult. instead
I've opted to make all the pointers used as part of the robust list,
including those in the robust list head and in the individual mutexes,
volatile.
in addition, the format of the robust list has been changed to point
back to the head at the end, rather than ending with a null pointer.
this is to match the documented kernel robust list ABI. the null
pointer, which was previously used, only worked because faults during
access terminate the robust list processing.
Diffstat (limited to 'src/thread')
-rw-r--r-- | src/thread/pthread_mutex_trylock.c | 8 | ||||
-rw-r--r-- | src/thread/pthread_mutex_unlock.c | 7 | ||||
-rw-r--r-- | src/thread/pthread_mutexattr_setrobust.c | 10 |
3 files changed, 16 insertions, 9 deletions
diff --git a/src/thread/pthread_mutex_trylock.c b/src/thread/pthread_mutex_trylock.c index 8d256614..31587e1f 100644 --- a/src/thread/pthread_mutex_trylock.c +++ b/src/thread/pthread_mutex_trylock.c @@ -9,6 +9,7 @@ int __pthread_mutex_trylock_owner(pthread_mutex_t *m) if (!self->robust_list.off) { __syscall(SYS_set_robust_list, &self->robust_list, 3*sizeof(long)); + self->robust_list.head = &self->robust_list.head; self->robust_list.off = (char*)&m->_m_lock-(char *)&m->_m_next; } @@ -29,10 +30,11 @@ int __pthread_mutex_trylock_owner(pthread_mutex_t *m) return EBUSY; } - m->_m_next = self->robust_list.head; + volatile void *next = self->robust_list.head; + m->_m_next = next; m->_m_prev = &self->robust_list.head; - if (self->robust_list.head) - self->robust_list.head[-1] = &m->_m_next; + if (next != &self->robust_list.head) *(volatile void *volatile *) + ((char *)next - sizeof(void *)) = &m->_m_next; self->robust_list.head = &m->_m_next; self->robust_list.pending = 0; diff --git a/src/thread/pthread_mutex_unlock.c b/src/thread/pthread_mutex_unlock.c index 42837516..46761d9a 100644 --- a/src/thread/pthread_mutex_unlock.c +++ b/src/thread/pthread_mutex_unlock.c @@ -21,8 +21,11 @@ int pthread_mutex_unlock(pthread_mutex_t *m) self->robust_list.pending = &m->_m_next; __vm_lock_impl(+1); } - *(void **)m->_m_prev = m->_m_next; - if (m->_m_next) ((void **)m->_m_next)[-1] = m->_m_prev; + volatile void *prev = m->_m_prev; + volatile void *next = m->_m_next; + *(volatile void *volatile *)prev = next; + if (next != &self->robust_list.head) *(volatile void *volatile *) + ((char *)next - sizeof(void *)) = prev; } cont = a_swap(&m->_m_lock, (type & 8) ? 0x40000000 : 0); if (type != PTHREAD_MUTEX_NORMAL && !priv) { diff --git a/src/thread/pthread_mutexattr_setrobust.c b/src/thread/pthread_mutexattr_setrobust.c index 8948cbaf..d0627889 100644 --- a/src/thread/pthread_mutexattr_setrobust.c +++ b/src/thread/pthread_mutexattr_setrobust.c @@ -4,16 +4,18 @@ void __do_private_robust_list() { pthread_t self = __pthread_self(); - void **p, **prev, **next; + volatile void *volatile *p; + volatile void *volatile *prev; + volatile void *volatile *next; pthread_mutex_t *m; - for (prev=0, p=self->robust_list.head; p; p=next) { + prev = &self->robust_list.head; + for (p=self->robust_list.head; p&&p!=&self->robust_list.head; p=next) { next = *p; m = (void *)((char *)p - offsetof(pthread_mutex_t, _m_next)); if (!(m->_m_type & 128)) { int waiters = m->_m_waiters; - if (prev) *prev = next; - else self->robust_list.head = next; + *prev = next; int cont = a_swap(&m->_m_lock, self->tid|0x40000000); if (cont < 0 || waiters) __wake(&m->_m_lock, 1, 1); } else { |