summaryrefslogtreecommitdiff
path: root/arch/x86_64
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2014-07-27 21:50:24 -0400
committerRich Felker <dalias@aerifal.cx>2014-07-27 21:50:24 -0400
commit90e51e45f57eb0e1564b3610b9bbd768215e5d6d (patch)
tree1691097a5acf25c5f30d03940c0e11416d56d226 /arch/x86_64
parentc394763d350f92ff1dbfb79fcd7124da47bc7043 (diff)
downloadmusl-90e51e45f57eb0e1564b3610b9bbd768215e5d6d.tar.gz
musl-90e51e45f57eb0e1564b3610b9bbd768215e5d6d.tar.bz2
musl-90e51e45f57eb0e1564b3610b9bbd768215e5d6d.tar.xz
musl-90e51e45f57eb0e1564b3610b9bbd768215e5d6d.zip
clean up unused and inconsistent atomics in arch dirs
the a_cas_l, a_swap_l, a_swap_p, and a_store_l operations were probably used a long time ago when only i386 and x86_64 were supported. as other archs were added, support for them was inconsistent, and they are obviously not in use at present. having them around potentially confuses readers working on new ports, and the type-punning hacks and inconsistent use of types in their definitions is not a style I wish to perpetuate in the source tree, so removing them seems appropriate.
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/atomic.h25
1 files changed, 0 insertions, 25 deletions
diff --git a/arch/x86_64/atomic.h b/arch/x86_64/atomic.h
index 124b37ac..ae0a576c 100644
--- a/arch/x86_64/atomic.h
+++ b/arch/x86_64/atomic.h
@@ -27,11 +27,6 @@ static inline void a_or_64(volatile uint64_t *p, uint64_t v)
: "=m"(*p) : "r"(v) : "memory" );
}
-static inline void a_store_l(volatile void *p, long x)
-{
- __asm__( "mov %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" );
-}
-
static inline void a_or_l(volatile void *p, long v)
{
__asm__( "lock ; or %1, %0"
@@ -45,13 +40,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s)
return t;
}
-static inline long a_cas_l(volatile void *p, long t, long s)
-{
- __asm__( "lock ; cmpxchg %3, %1"
- : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
- return t;
-}
-
static inline int a_cas(volatile int *p, int t, int s)
{
__asm__( "lock ; cmpxchg %3, %1"
@@ -59,17 +47,6 @@ static inline int a_cas(volatile int *p, int t, int s)
return t;
}
-static inline void *a_swap_p(void *volatile *x, void *v)
-{
- __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" );
- return v;
-}
-static inline long a_swap_l(volatile void *x, long v)
-{
- __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" );
- return v;
-}
-
static inline void a_or(volatile void *p, int v)
{
__asm__( "lock ; or %1, %0"
@@ -88,8 +65,6 @@ static inline int a_swap(volatile int *x, int v)
return v;
}
-#define a_xchg a_swap
-
static inline int a_fetch_add(volatile int *x, int v)
{
__asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );