From de7e99c58508ca70f0b1b8ef259a823a3766c434 Mon Sep 17 00:00:00 2001 From: Rich Felker Date: Sun, 17 Aug 2014 00:46:26 -0400 Subject: make pointers used in robust list volatile when manipulating the robust list, the order of stores matters, because the code may be asynchronously interrupted by a fatal signal and the kernel will then access the robust list in what is essentially an async-signal context. previously, aliasing considerations made it seem unlikely that a compiler could reorder the stores, but proving that they could not be reordered incorrectly would have been extremely difficult. instead I've opted to make all the pointers used as part of the robust list, including those in the robust list head and in the individual mutexes, volatile. in addition, the format of the robust list has been changed to point back to the head at the end, rather than ending with a null pointer. this is to match the documented kernel robust list ABI. the null pointer, which was previously used, only worked because faults during access terminate the robust list processing. --- src/thread/pthread_mutex_trylock.c | 8 +++++--- src/thread/pthread_mutex_unlock.c | 7 +++++-- src/thread/pthread_mutexattr_setrobust.c | 10 ++++++---- 3 files changed, 16 insertions(+), 9 deletions(-) (limited to 'src/thread') diff --git a/src/thread/pthread_mutex_trylock.c b/src/thread/pthread_mutex_trylock.c index 8d256614..31587e1f 100644 --- a/src/thread/pthread_mutex_trylock.c +++ b/src/thread/pthread_mutex_trylock.c @@ -9,6 +9,7 @@ int __pthread_mutex_trylock_owner(pthread_mutex_t *m) if (!self->robust_list.off) { __syscall(SYS_set_robust_list, &self->robust_list, 3*sizeof(long)); + self->robust_list.head = &self->robust_list.head; self->robust_list.off = (char*)&m->_m_lock-(char *)&m->_m_next; } @@ -29,10 +30,11 @@ int __pthread_mutex_trylock_owner(pthread_mutex_t *m) return EBUSY; } - m->_m_next = self->robust_list.head; + volatile void *next = self->robust_list.head; + m->_m_next = next; m->_m_prev = &self->robust_list.head; - if (self->robust_list.head) - self->robust_list.head[-1] = &m->_m_next; + if (next != &self->robust_list.head) *(volatile void *volatile *) + ((char *)next - sizeof(void *)) = &m->_m_next; self->robust_list.head = &m->_m_next; self->robust_list.pending = 0; diff --git a/src/thread/pthread_mutex_unlock.c b/src/thread/pthread_mutex_unlock.c index 42837516..46761d9a 100644 --- a/src/thread/pthread_mutex_unlock.c +++ b/src/thread/pthread_mutex_unlock.c @@ -21,8 +21,11 @@ int pthread_mutex_unlock(pthread_mutex_t *m) self->robust_list.pending = &m->_m_next; __vm_lock_impl(+1); } - *(void **)m->_m_prev = m->_m_next; - if (m->_m_next) ((void **)m->_m_next)[-1] = m->_m_prev; + volatile void *prev = m->_m_prev; + volatile void *next = m->_m_next; + *(volatile void *volatile *)prev = next; + if (next != &self->robust_list.head) *(volatile void *volatile *) + ((char *)next - sizeof(void *)) = prev; } cont = a_swap(&m->_m_lock, (type & 8) ? 0x40000000 : 0); if (type != PTHREAD_MUTEX_NORMAL && !priv) { diff --git a/src/thread/pthread_mutexattr_setrobust.c b/src/thread/pthread_mutexattr_setrobust.c index 8948cbaf..d0627889 100644 --- a/src/thread/pthread_mutexattr_setrobust.c +++ b/src/thread/pthread_mutexattr_setrobust.c @@ -4,16 +4,18 @@ void __do_private_robust_list() { pthread_t self = __pthread_self(); - void **p, **prev, **next; + volatile void *volatile *p; + volatile void *volatile *prev; + volatile void *volatile *next; pthread_mutex_t *m; - for (prev=0, p=self->robust_list.head; p; p=next) { + prev = &self->robust_list.head; + for (p=self->robust_list.head; p&&p!=&self->robust_list.head; p=next) { next = *p; m = (void *)((char *)p - offsetof(pthread_mutex_t, _m_next)); if (!(m->_m_type & 128)) { int waiters = m->_m_waiters; - if (prev) *prev = next; - else self->robust_list.head = next; + *prev = next; int cont = a_swap(&m->_m_lock, self->tid|0x40000000); if (cont < 0 || waiters) __wake(&m->_m_lock, 1, 1); } else { -- cgit v1.2.3-70-g09d2