summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2012-08-17 17:13:53 -0400
committerRich Felker <dalias@aerifal.cx>2012-08-17 17:13:53 -0400
commitda8d0fc4fa3490f418a438b7e0830f9af312d41f (patch)
tree2faddbd4253ce4cc0ef9f9216fce451d2f9b9c47 /src
parent11458e5b098319cf3e2d05c8cbaa74d58db740e3 (diff)
downloadmusl-da8d0fc4fa3490f418a438b7e0830f9af312d41f.tar.gz
musl-da8d0fc4fa3490f418a438b7e0830f9af312d41f.tar.bz2
musl-da8d0fc4fa3490f418a438b7e0830f9af312d41f.tar.xz
musl-da8d0fc4fa3490f418a438b7e0830f9af312d41f.zip
fix extremely rare but dangerous race condition in robust mutexes
if new shared mappings of files/devices/shared memory can be made between the time a robust mutex is unlocked and its subsequent removal from the pending slot in the robustlist header, the kernel can inadvertently corrupt data in the newly-mapped pages when the process terminates. i am fixing the bug by using the same global vm lock mechanism that was used to fix the race condition with unmapping barriers after pthread_barrier_wait returns.
Diffstat (limited to 'src')
-rw-r--r--src/thread/pthread_barrier_wait.c23
-rw-r--r--src/thread/pthread_mutex_unlock.c8
-rw-r--r--src/thread/vmlock.c22
3 files changed, 33 insertions, 20 deletions
diff --git a/src/thread/pthread_barrier_wait.c b/src/thread/pthread_barrier_wait.c
index 6052925a..5e603380 100644
--- a/src/thread/pthread_barrier_wait.c
+++ b/src/thread/pthread_barrier_wait.c
@@ -1,22 +1,7 @@
#include "pthread_impl.h"
-static int vmlock[2];
-
-void __vm_lock(int inc)
-{
- for (;;) {
- int v = vmlock[0];
- if (inc*v < 0) __wait(vmlock, vmlock+1, v, 1);
- else if (a_cas(vmlock, v, v+inc)==v) break;
- }
-}
-
-void __vm_unlock(void)
-{
- int inc = vmlock[0]>0 ? -1 : 1;
- if (a_fetch_add(vmlock, inc)==-inc && vmlock[1])
- __wake(vmlock, -1, 1);
-}
+void __vm_lock_impl(int);
+void __vm_unlock_impl(void);
static int pshared_barrier_wait(pthread_barrier_t *b)
{
@@ -41,7 +26,7 @@ static int pshared_barrier_wait(pthread_barrier_t *b)
__wait(&b->_b_count, &b->_b_waiters2, v, 0);
}
- __vm_lock(+1);
+ __vm_lock_impl(+1);
/* Ensure all threads have a vm lock before proceeding */
if (a_fetch_add(&b->_b_count, -1)==1-limit) {
@@ -62,7 +47,7 @@ static int pshared_barrier_wait(pthread_barrier_t *b)
if (v==INT_MIN+1 || (v==1 && w))
__wake(&b->_b_lock, 1, 0);
- __vm_unlock();
+ __vm_unlock_impl();
return ret;
}
diff --git a/src/thread/pthread_mutex_unlock.c b/src/thread/pthread_mutex_unlock.c
index fdf9fc10..5fc0f4e5 100644
--- a/src/thread/pthread_mutex_unlock.c
+++ b/src/thread/pthread_mutex_unlock.c
@@ -1,5 +1,8 @@
#include "pthread_impl.h"
+void __vm_lock_impl(int);
+void __vm_unlock_impl(void);
+
int pthread_mutex_unlock(pthread_mutex_t *m)
{
pthread_t self;
@@ -20,11 +23,14 @@ int pthread_mutex_unlock(pthread_mutex_t *m)
self->robust_list.pending = &m->_m_next;
*(void **)m->_m_prev = m->_m_next;
if (m->_m_next) ((void **)m->_m_next)[-1] = m->_m_prev;
+ __vm_lock_impl(+1);
}
}
cont = a_swap(&m->_m_lock, 0);
- if (robust)
+ if (robust) {
self->robust_list.pending = 0;
+ __vm_unlock_impl();
+ }
if (waiters || cont<0)
__wake(&m->_m_lock, 1, 0);
return 0;
diff --git a/src/thread/vmlock.c b/src/thread/vmlock.c
new file mode 100644
index 00000000..aba9e311
--- /dev/null
+++ b/src/thread/vmlock.c
@@ -0,0 +1,22 @@
+#include "pthread_impl.h"
+
+static int vmlock[2];
+
+void __vm_lock(int inc)
+{
+ for (;;) {
+ int v = vmlock[0];
+ if (inc*v < 0) __wait(vmlock, vmlock+1, v, 1);
+ else if (a_cas(vmlock, v, v+inc)==v) break;
+ }
+}
+
+void __vm_unlock(void)
+{
+ int inc = vmlock[0]>0 ? -1 : 1;
+ if (a_fetch_add(vmlock, inc)==-inc && vmlock[1])
+ __wake(vmlock, -1, 1);
+}
+
+weak_alias(__vm_lock, __vm_lock_impl);
+weak_alias(__vm_unlock, __vm_unlock_impl);