diff options
author | Rich Felker <dalias@aerifal.cx> | 2011-09-27 13:50:29 -0400 |
---|---|---|
committer | Rich Felker <dalias@aerifal.cx> | 2011-09-27 13:50:29 -0400 |
commit | 60164570111873175111cf8a5b973375e492eee9 (patch) | |
tree | 1557931c4e0bacd2be217965be08031d5e3a4e51 /src/thread/pthread_barrier_wait.c | |
parent | 3f39c9b3130cd6c142d358159879b799370a6663 (diff) | |
download | musl-60164570111873175111cf8a5b973375e492eee9.tar.gz musl-60164570111873175111cf8a5b973375e492eee9.tar.bz2 musl-60164570111873175111cf8a5b973375e492eee9.tar.xz musl-60164570111873175111cf8a5b973375e492eee9.zip |
process-shared barrier support, based on discussion with bdonlan
this implementation is rather heavy-weight, but it's the first
solution i've found that's actually correct. all waiters actually wait
twice at the barrier so that they can synchronize exit, and they hold
a "vm lock" that prevents changes to virtual memory mappings (and
blocks pthread_barrier_destroy) until all waiters are finished
inspecting the barrier.
thus, it is safe for any thread to destroy and/or unmap the barrier's
memory as soon as pthread_barrier_wait returns, without further
synchronization.
Diffstat (limited to 'src/thread/pthread_barrier_wait.c')
-rw-r--r-- | src/thread/pthread_barrier_wait.c | 74 |
1 files changed, 67 insertions, 7 deletions
diff --git a/src/thread/pthread_barrier_wait.c b/src/thread/pthread_barrier_wait.c index aed1adc8..db432ba3 100644 --- a/src/thread/pthread_barrier_wait.c +++ b/src/thread/pthread_barrier_wait.c @@ -1,5 +1,62 @@ #include "pthread_impl.h" +static int vmlock[2]; + +void __vm_lock(int inc) +{ + for (;;) { + int v = vmlock[0]; + if (inc*v < 0) __wait(vmlock, vmlock+1, v, 1); + else if (a_cas(vmlock, v, v+inc)==v) break; + } +} + +void __vm_unlock(void) +{ + if (vmlock[0]>0) a_dec(vmlock); + else a_inc(vmlock); + if (vmlock[1]) __wake(vmlock, 1, 1); +} + +static int pshared_barrier_wait(pthread_barrier_t *b) +{ + int limit = (b->_b_limit & INT_MAX) + 1; + int seq; + int ret = 0; + + if (!limit) return PTHREAD_BARRIER_SERIAL_THREAD; + + while (a_swap(&b->_b_lock, 1)) + __wait(&b->_b_lock, &b->_b_waiters, 1, 0); + + seq = b->_b_seq; + + if (++b->_b_count == limit) { + ret = PTHREAD_BARRIER_SERIAL_THREAD; + b->_b_seq++; + __wake(&b->_b_seq, -1, 0); + } else { + a_store(&b->_b_lock, 0); + if (b->_b_waiters) __wake(&b->_b_lock, 1, 0); + __wait(&b->_b_seq, 0, seq, 0); + } + + __vm_lock(+1); + + if (a_fetch_add(&b->_b_count, -1)==1) { + b->_b_seq++; + __wake(&b->_b_seq, -1, 0); + a_store(&b->_b_lock, 0); + if (b->_b_waiters) __wake(&b->_b_lock, 1, 0); + } else { + __wait(&b->_b_seq, 0, seq+1, 0); + } + + __vm_unlock(); + + return 0; +} + struct instance { int count; @@ -16,9 +73,12 @@ int pthread_barrier_wait(pthread_barrier_t *b) /* Trivial case: count was set at 1 */ if (!limit) return PTHREAD_BARRIER_SERIAL_THREAD; + /* Process-shared barriers require a separate, inefficient wait */ + if (limit < 0) return pshared_barrier_wait(b); + /* Otherwise we need a lock on the barrier object */ while (a_swap(&b->_b_lock, 1)) - __wait(&b->_b_lock, &b->_b_waiters, 1, 0); + __wait(&b->_b_lock, &b->_b_waiters, 1, 1); inst = b->_b_inst; /* First thread to enter the barrier becomes the "instance owner" */ @@ -27,7 +87,7 @@ int pthread_barrier_wait(pthread_barrier_t *b) int spins = 10000; b->_b_inst = inst = &new_inst; a_store(&b->_b_lock, 0); - if (b->_b_waiters) __wake(&b->_b_lock, 1, 0); + if (b->_b_waiters) __wake(&b->_b_lock, 1, 1); while (spins-- && !inst->finished) a_spin(); a_inc(&inst->finished); @@ -40,19 +100,19 @@ int pthread_barrier_wait(pthread_barrier_t *b) if (++inst->count == limit) { b->_b_inst = 0; a_store(&b->_b_lock, 0); - if (b->_b_waiters) __wake(&b->_b_lock, 1, 0); + if (b->_b_waiters) __wake(&b->_b_lock, 1, 1); a_store(&inst->last, 1); if (inst->waiters) - __wake(&inst->last, -1, 0); + __wake(&inst->last, -1, 1); } else { a_store(&b->_b_lock, 0); - if (b->_b_waiters) __wake(&b->_b_lock, 1, 0); - __wait(&inst->last, &inst->waiters, 0, 0); + if (b->_b_waiters) __wake(&b->_b_lock, 1, 1); + __wait(&inst->last, &inst->waiters, 0, 1); } /* Last thread to exit the barrier wakes the instance owner */ if (a_fetch_add(&inst->count,-1)==1 && a_fetch_add(&inst->finished,1)) - __wake(&inst->finished, 1, 0); + __wake(&inst->finished, 1, 1); return 0; } |