diff options
author | Rich Felker <dalias@aerifal.cx> | 2015-07-28 18:40:18 +0000 |
---|---|---|
committer | Rich Felker <dalias@aerifal.cx> | 2015-07-28 18:40:18 +0000 |
commit | 3c43c0761e1725fd5f89a9c028cbf43250abb913 (patch) | |
tree | f8f504aaafc22502a8aec0a4ba58c60856096495 /arch | |
parent | fe7582f4f92152ab60e9523bf146fe28ceae51f6 (diff) | |
download | musl-3c43c0761e1725fd5f89a9c028cbf43250abb913.tar.gz musl-3c43c0761e1725fd5f89a9c028cbf43250abb913.tar.bz2 musl-3c43c0761e1725fd5f89a9c028cbf43250abb913.tar.xz musl-3c43c0761e1725fd5f89a9c028cbf43250abb913.zip |
fix missing synchronization in atomic store on i386 and x86_64
despite being strongly ordered, the x86 memory model does not preclude
reordering of loads across earlier stores. while a plain store
suffices as a release barrier, we actually need a full barrier, since
users of a_store subsequently load a waiter count to determine whether
to issue a futex wait, and using a stale count will result in soft
(fail-to-wake) deadlocks. these deadlocks were observed in malloc and
possible with stdio locks and other libc-internal locking.
on i386, an atomic operation on the caller's stack is used as the
barrier rather than performing the store itself using xchg; this
avoids the need to read the cache line on which the store is being
performed. mfence is used on x86_64 where it's always available, and
could be used on i386 with the appropriate cpu model checks if it's
shown to perform better.
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/atomic.h | 2 | ||||
-rw-r--r-- | arch/x32/atomic.h | 2 | ||||
-rw-r--r-- | arch/x86_64/atomic.h | 2 |
3 files changed, 3 insertions, 3 deletions
diff --git a/arch/i386/atomic.h b/arch/i386/atomic.h index 95fecbdc..25441df2 100644 --- a/arch/i386/atomic.h +++ b/arch/i386/atomic.h @@ -88,7 +88,7 @@ static inline void a_dec(volatile int *x) static inline void a_store(volatile int *p, int x) { - __asm__( "movl %1, %0" : "=m"(*p) : "r"(x) : "memory" ); + __asm__( "movl %1, %0 ; lock ; orl $0,(%%esp)" : "=m"(*p) : "r"(x) : "memory" ); } static inline void a_spin() diff --git a/arch/x32/atomic.h b/arch/x32/atomic.h index b2014cc0..2ab1f7a2 100644 --- a/arch/x32/atomic.h +++ b/arch/x32/atomic.h @@ -83,7 +83,7 @@ static inline void a_dec(volatile int *x) static inline void a_store(volatile int *p, int x) { - __asm__( "mov %1, %0" : "=m"(*p) : "r"(x) : "memory" ); + __asm__( "mov %1, %0 ; mfence" : "=m"(*p) : "r"(x) : "memory" ); } static inline void a_spin() diff --git a/arch/x86_64/atomic.h b/arch/x86_64/atomic.h index b2014cc0..2ab1f7a2 100644 --- a/arch/x86_64/atomic.h +++ b/arch/x86_64/atomic.h @@ -83,7 +83,7 @@ static inline void a_dec(volatile int *x) static inline void a_store(volatile int *p, int x) { - __asm__( "mov %1, %0" : "=m"(*p) : "r"(x) : "memory" ); + __asm__( "mov %1, %0 ; mfence" : "=m"(*p) : "r"(x) : "memory" ); } static inline void a_spin() |