summaryrefslogtreecommitdiff
path: root/arch/i386
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2016-01-22 00:16:53 +0000
committerRich Felker <dalias@aerifal.cx>2016-01-22 00:16:53 +0000
commite24984efd5c6ac5ea8e6cb6cd914fa8435d458bc (patch)
treeeb38313534ed61fc81c346056bd1a37d399614bb /arch/i386
parent369b22f9c4aebba2b8fe905db8469b2667572ee1 (diff)
downloadmusl-e24984efd5c6ac5ea8e6cb6cd914fa8435d458bc.tar.gz
musl-e24984efd5c6ac5ea8e6cb6cd914fa8435d458bc.tar.bz2
musl-e24984efd5c6ac5ea8e6cb6cd914fa8435d458bc.tar.xz
musl-e24984efd5c6ac5ea8e6cb6cd914fa8435d458bc.zip
clean up i386 atomics for new atomics framework
this commit mostly makes consistent things like spacing, function ordering in atomic_arch.h, argument names, use of volatile, etc. the fake 64-bit and/or atomics are also removed because the shared atomic.h does a better job of implementing them; it avoids making two atomic memory accesses when only one 32-bit half needs to be touched. no major overhaul is needed or possible because x86 actually has native versions of all the usual atomic operations, rather than using ll/sc or needing cas loops.
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/atomic_arch.h124
1 files changed, 58 insertions, 66 deletions
diff --git a/arch/i386/atomic_arch.h b/arch/i386/atomic_arch.h
index 5de862ed..6e67c4ce 100644
--- a/arch/i386/atomic_arch.h
+++ b/arch/i386/atomic_arch.h
@@ -1,99 +1,68 @@
-#define a_ctz_64 a_ctz_64
-static inline int a_ctz_64(uint64_t x)
-{
- int r;
- __asm__( "bsf %1,%0 ; jnz 1f ; bsf %2,%0 ; addl $32,%0\n1:"
- : "=&r"(r) : "r"((unsigned)x), "r"((unsigned)(x>>32)) );
- return r;
-}
-
-#define a_ctz_l a_ctz_l
-static inline int a_ctz_l(unsigned long x)
-{
- long r;
- __asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
- return r;
-}
-
-#define a_and_64 a_and_64
-static inline void a_and_64(volatile uint64_t *p, uint64_t v)
-{
- __asm__( "lock ; andl %1, (%0) ; lock ; andl %2, 4(%0)"
- : : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" );
-}
-
-#define a_or_64 a_or_64
-static inline void a_or_64(volatile uint64_t *p, uint64_t v)
-{
- __asm__( "lock ; orl %1, (%0) ; lock ; orl %2, 4(%0)"
- : : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" );
-}
-
-#define a_or_l a_or_l
-static inline void a_or_l(volatile void *p, long v)
-{
- __asm__( "lock ; orl %1, %0"
- : "=m"(*(long *)p) : "r"(v) : "memory" );
-}
-
#define a_cas a_cas
static inline int a_cas(volatile int *p, int t, int s)
{
- __asm__( "lock ; cmpxchg %3, %1"
+ __asm__ __volatile__ (
+ "lock ; cmpxchg %3, %1"
: "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
return t;
}
-#define a_or a_or
-static inline void a_or(volatile int *p, int v)
+#define a_swap a_swap
+static inline int a_swap(volatile int *p, int v)
{
- __asm__( "lock ; orl %1, %0"
- : "=m"(*p) : "r"(v) : "memory" );
+ __asm__ __volatile__(
+ "xchg %0, %1"
+ : "=r"(v), "=m"(*p) : "0"(v) : "memory" );
+ return v;
}
-#define a_and a_and
-static inline void a_and(volatile int *p, int v)
+#define a_fetch_add a_fetch_add
+static inline int a_fetch_add(volatile int *p, int v)
{
- __asm__( "lock ; andl %1, %0"
- : "=m"(*p) : "r"(v) : "memory" );
+ __asm__ __volatile__(
+ "lock ; xadd %0, %1"
+ : "=r"(v), "=m"(*p) : "0"(v) : "memory" );
+ return v;
}
-#define a_swap a_swap
-static inline int a_swap(volatile int *x, int v)
+#define a_and a_and
+static inline void a_and(volatile int *p, int v)
{
- __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
- return v;
+ __asm__ __volatile__(
+ "lock ; and %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
}
-#define a_fetch_add a_fetch_add
-static inline int a_fetch_add(volatile int *x, int v)
+#define a_or a_or
+static inline void a_or(volatile int *p, int v)
{
- __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
- return v;
+ __asm__ __volatile__(
+ "lock ; or %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
}
#define a_inc a_inc
-static inline void a_inc(volatile int *x)
+static inline void a_inc(volatile int *p)
{
- __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
+ __asm__ __volatile__(
+ "lock ; incl %0"
+ : "=m"(*p) : "m"(*p) : "memory" );
}
#define a_dec a_dec
-static inline void a_dec(volatile int *x)
+static inline void a_dec(volatile int *p)
{
- __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
+ __asm__ __volatile__(
+ "lock ; decl %0"
+ : "=m"(*p) : "m"(*p) : "memory" );
}
#define a_store a_store
static inline void a_store(volatile int *p, int x)
{
- __asm__( "movl %1, %0 ; lock ; orl $0,(%%esp)" : "=m"(*p) : "r"(x) : "memory" );
-}
-
-#define a_spin a_spin
-static inline void a_spin()
-{
- __asm__ __volatile__( "pause" : : : "memory" );
+ __asm__ __volatile__(
+ "mov %1, %0 ; lock ; orl $0,(%%esp)"
+ : "=m"(*p) : "r"(x) : "memory" );
}
#define a_barrier a_barrier
@@ -102,8 +71,31 @@ static inline void a_barrier()
__asm__ __volatile__( "" : : : "memory" );
}
+#define a_pause a_pause
+static inline void a_spin()
+{
+ __asm__ __volatile__( "pause" : : : "memory" );
+}
+
#define a_crash a_crash
static inline void a_crash()
{
__asm__ __volatile__( "hlt" : : : "memory" );
}
+
+#define a_ctz_64 a_ctz_64
+static inline int a_ctz_64(uint64_t x)
+{
+ int r;
+ __asm__( "bsf %1,%0 ; jnz 1f ; bsf %2,%0 ; add $32,%0\n1:"
+ : "=&r"(r) : "r"((unsigned)x), "r"((unsigned)(x>>32)) );
+ return r;
+}
+
+#define a_ctz_l a_ctz_l
+static inline int a_ctz_l(unsigned long x)
+{
+ long r;
+ __asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
+ return r;
+}