summaryrefslogtreecommitdiff
path: root/src/thread
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2015-03-03 22:50:02 -0500
committerRich Felker <dalias@aerifal.cx>2015-03-03 22:50:02 -0500
commit56fbaa3bbe73f12af2bfbbcf2adb196e6f9fe264 (patch)
tree09fbe371b108e25bb2f9c90e74da356359950d29 /src/thread
parenteb4bd8d8bb5c9f535ee8250edd4efbd3d4f84c5a (diff)
downloadmusl-56fbaa3bbe73f12af2bfbbcf2adb196e6f9fe264.tar.gz
musl-56fbaa3bbe73f12af2bfbbcf2adb196e6f9fe264.tar.bz2
musl-56fbaa3bbe73f12af2bfbbcf2adb196e6f9fe264.tar.xz
musl-56fbaa3bbe73f12af2bfbbcf2adb196e6f9fe264.zip
make all objects used with atomic operations volatile
the memory model we use internally for atomics permits plain loads of values which may be subject to concurrent modification without requiring that a special load function be used. since a compiler is free to make transformations that alter the number of loads or the way in which loads are performed, the compiler is theoretically free to break this usage. the most obvious concern is with atomic cas constructs: something of the form tmp=*p;a_cas(p,tmp,f(tmp)); could be transformed to a_cas(p,*p,f(*p)); where the latter is intended to show multiple loads of *p whose resulting values might fail to be equal; this would break the atomicity of the whole operation. but even more fundamental breakage is possible. with the changes being made now, objects that may be modified by atomics are modeled as volatile, and the atomic operations performed on them by other threads are modeled as asynchronous stores by hardware which happens to be acting on the request of another thread. such modeling of course does not itself address memory synchronization between cores/cpus, but that aspect was already handled. this all seems less than ideal, but it's the best we can do without mandating a C11 compiler and using the C11 model for atomics. in the case of pthread_once_t, the ABI type of the underlying object is not volatile-qualified. so we are assuming that accessing the object through a volatile-qualified lvalue via casts yields volatile access semantics. the language of the C standard is somewhat unclear on this matter, but this is an assumption the linux kernel also makes, and seems to be the correct interpretation of the standard.
Diffstat (limited to 'src/thread')
-rw-r--r--src/thread/pthread_atfork.c2
-rw-r--r--src/thread/pthread_barrier_wait.c8
-rw-r--r--src/thread/pthread_cond_timedwait.c10
-rw-r--r--src/thread/pthread_key_create.c2
-rw-r--r--src/thread/pthread_once.c2
-rw-r--r--src/thread/sem_open.c2
-rw-r--r--src/thread/sem_timedwait.c2
-rw-r--r--src/thread/synccall.c4
-rw-r--r--src/thread/vmlock.c2
9 files changed, 18 insertions, 16 deletions
diff --git a/src/thread/pthread_atfork.c b/src/thread/pthread_atfork.c
index 95fce207..a40d7f63 100644
--- a/src/thread/pthread_atfork.c
+++ b/src/thread/pthread_atfork.c
@@ -8,7 +8,7 @@ static struct atfork_funcs {
struct atfork_funcs *prev, *next;
} *funcs;
-static int lock[2];
+static volatile int lock[2];
void __fork_handler(int who)
{
diff --git a/src/thread/pthread_barrier_wait.c b/src/thread/pthread_barrier_wait.c
index e15abb84..bfeb3464 100644
--- a/src/thread/pthread_barrier_wait.c
+++ b/src/thread/pthread_barrier_wait.c
@@ -54,10 +54,10 @@ static int pshared_barrier_wait(pthread_barrier_t *b)
struct instance
{
- int count;
- int last;
- int waiters;
- int finished;
+ volatile int count;
+ volatile int last;
+ volatile int waiters;
+ volatile int finished;
};
int pthread_barrier_wait(pthread_barrier_t *b)
diff --git a/src/thread/pthread_cond_timedwait.c b/src/thread/pthread_cond_timedwait.c
index f5fd08c0..27b1a99a 100644
--- a/src/thread/pthread_cond_timedwait.c
+++ b/src/thread/pthread_cond_timedwait.c
@@ -29,8 +29,8 @@ int __pthread_setcancelstate(int, int *);
struct waiter {
struct waiter *prev, *next;
- int state, barrier;
- int *notify;
+ volatile int state, barrier;
+ volatile int *notify;
};
/* Self-synchronized-destruction-safe lock functions */
@@ -67,7 +67,8 @@ enum {
int __pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restrict m, const struct timespec *restrict ts)
{
struct waiter node = { 0 };
- int e, seq, *fut, clock = c->_c_clock, cs, shared=0, oldstate, tmp;
+ int e, seq, clock = c->_c_clock, cs, shared=0, oldstate, tmp;
+ volatile int *fut;
if ((m->_m_type&15) && (m->_m_lock&INT_MAX) != __pthread_self()->tid)
return EPERM;
@@ -175,7 +176,8 @@ done:
int __private_cond_signal(pthread_cond_t *c, int n)
{
struct waiter *p, *first=0;
- int ref = 0, cur;
+ volatile int ref = 0;
+ int cur;
lock(&c->_c_lock);
for (p=c->_c_tail; n && p; p=p->prev) {
diff --git a/src/thread/pthread_key_create.c b/src/thread/pthread_key_create.c
index bfcd5976..198ae56e 100644
--- a/src/thread/pthread_key_create.c
+++ b/src/thread/pthread_key_create.c
@@ -3,7 +3,7 @@
volatile size_t __pthread_tsd_size = sizeof(void *) * PTHREAD_KEYS_MAX;
void *__pthread_tsd_main[PTHREAD_KEYS_MAX] = { 0 };
-static void (*keys[PTHREAD_KEYS_MAX])(void *);
+static void (*volatile keys[PTHREAD_KEYS_MAX])(void *);
static void nodtor(void *dummy)
{
diff --git a/src/thread/pthread_once.c b/src/thread/pthread_once.c
index df655ef9..a8f8aeb1 100644
--- a/src/thread/pthread_once.c
+++ b/src/thread/pthread_once.c
@@ -40,7 +40,7 @@ int __pthread_once(pthread_once_t *control, void (*init)(void))
{
/* Return immediately if init finished before, but ensure that
* effects of the init routine are visible to the caller. */
- if (*control == 2) {
+ if (*(volatile int *)control == 2) {
a_barrier();
return 0;
}
diff --git a/src/thread/sem_open.c b/src/thread/sem_open.c
index 9a95d257..ab884a42 100644
--- a/src/thread/sem_open.c
+++ b/src/thread/sem_open.c
@@ -20,7 +20,7 @@ static struct {
sem_t *sem;
int refcnt;
} *semtab;
-static int lock[2];
+static volatile int lock[2];
#define FLAGS (O_RDWR|O_NOFOLLOW|O_CLOEXEC|O_NONBLOCK)
diff --git a/src/thread/sem_timedwait.c b/src/thread/sem_timedwait.c
index a7488df7..8132eb1b 100644
--- a/src/thread/sem_timedwait.c
+++ b/src/thread/sem_timedwait.c
@@ -19,7 +19,7 @@ int sem_timedwait(sem_t *restrict sem, const struct timespec *restrict at)
int r;
a_inc(sem->__val+1);
a_cas(sem->__val, 0, -1);
- pthread_cleanup_push(cleanup, sem->__val+1);
+ pthread_cleanup_push(cleanup, (void *)(sem->__val+1));
r = __timedwait_cp(sem->__val, -1, CLOCK_REALTIME, at, sem->__val[2]);
pthread_cleanup_pop(1);
if (r && r != EINTR) {
diff --git a/src/thread/synccall.c b/src/thread/synccall.c
index 47d070b4..000ec4e3 100644
--- a/src/thread/synccall.c
+++ b/src/thread/synccall.c
@@ -14,8 +14,8 @@ static struct chain {
sem_t target_sem, caller_sem;
} *volatile head;
-static int synccall_lock[2];
-static int target_tid;
+static volatile int synccall_lock[2];
+static volatile int target_tid;
static void (*callback)(void *), *context;
static volatile int dummy = 0;
weak_alias(dummy, __block_new_threads);
diff --git a/src/thread/vmlock.c b/src/thread/vmlock.c
index aba9e311..125c6dc9 100644
--- a/src/thread/vmlock.c
+++ b/src/thread/vmlock.c
@@ -1,6 +1,6 @@
#include "pthread_impl.h"
-static int vmlock[2];
+static volatile int vmlock[2];
void __vm_lock(int inc)
{