summaryrefslogtreecommitdiff
path: root/arch/x32/atomic.h
diff options
context:
space:
mode:
authorrofl0r <retnyg@gmx.net>2014-01-07 22:43:34 +0100
committerrofl0r <retnyg@gmx.net>2014-02-23 11:07:18 +0100
commit323272db175204b951f119dae4bd99ef05e20f13 (patch)
tree70329156d5189294b1e9e7f9c7c326924ad62e35 /arch/x32/atomic.h
parent0f169cbb79c39a5b15f7a27d9283cdeb6e122b8f (diff)
downloadmusl-323272db175204b951f119dae4bd99ef05e20f13.tar.gz
musl-323272db175204b951f119dae4bd99ef05e20f13.tar.bz2
musl-323272db175204b951f119dae4bd99ef05e20f13.tar.xz
musl-323272db175204b951f119dae4bd99ef05e20f13.zip
import vanilla x86_64 code as x32
Diffstat (limited to 'arch/x32/atomic.h')
-rw-r--r--arch/x32/atomic.h125
1 files changed, 125 insertions, 0 deletions
diff --git a/arch/x32/atomic.h b/arch/x32/atomic.h
new file mode 100644
index 00000000..124b37ac
--- /dev/null
+++ b/arch/x32/atomic.h
@@ -0,0 +1,125 @@
+#ifndef _INTERNAL_ATOMIC_H
+#define _INTERNAL_ATOMIC_H
+
+#include <stdint.h>
+
+static inline int a_ctz_64(uint64_t x)
+{
+ __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
+ return x;
+}
+
+static inline int a_ctz_l(unsigned long x)
+{
+ __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
+ return x;
+}
+
+static inline void a_and_64(volatile uint64_t *p, uint64_t v)
+{
+ __asm__( "lock ; and %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
+}
+
+static inline void a_or_64(volatile uint64_t *p, uint64_t v)
+{
+ __asm__( "lock ; or %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
+}
+
+static inline void a_store_l(volatile void *p, long x)
+{
+ __asm__( "mov %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" );
+}
+
+static inline void a_or_l(volatile void *p, long v)
+{
+ __asm__( "lock ; or %1, %0"
+ : "=m"(*(long *)p) : "r"(v) : "memory" );
+}
+
+static inline void *a_cas_p(volatile void *p, void *t, void *s)
+{
+ __asm__( "lock ; cmpxchg %3, %1"
+ : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
+ return t;
+}
+
+static inline long a_cas_l(volatile void *p, long t, long s)
+{
+ __asm__( "lock ; cmpxchg %3, %1"
+ : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
+ return t;
+}
+
+static inline int a_cas(volatile int *p, int t, int s)
+{
+ __asm__( "lock ; cmpxchg %3, %1"
+ : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
+ return t;
+}
+
+static inline void *a_swap_p(void *volatile *x, void *v)
+{
+ __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" );
+ return v;
+}
+static inline long a_swap_l(volatile void *x, long v)
+{
+ __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" );
+ return v;
+}
+
+static inline void a_or(volatile void *p, int v)
+{
+ __asm__( "lock ; or %1, %0"
+ : "=m"(*(int *)p) : "r"(v) : "memory" );
+}
+
+static inline void a_and(volatile void *p, int v)
+{
+ __asm__( "lock ; and %1, %0"
+ : "=m"(*(int *)p) : "r"(v) : "memory" );
+}
+
+static inline int a_swap(volatile int *x, int v)
+{
+ __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
+ return v;
+}
+
+#define a_xchg a_swap
+
+static inline int a_fetch_add(volatile int *x, int v)
+{
+ __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
+ return v;
+}
+
+static inline void a_inc(volatile int *x)
+{
+ __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
+}
+
+static inline void a_dec(volatile int *x)
+{
+ __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
+}
+
+static inline void a_store(volatile int *p, int x)
+{
+ __asm__( "mov %1, %0" : "=m"(*p) : "r"(x) : "memory" );
+}
+
+static inline void a_spin()
+{
+ __asm__ __volatile__( "pause" : : : "memory" );
+}
+
+static inline void a_crash()
+{
+ __asm__ __volatile__( "hlt" : : : "memory" );
+}
+
+
+#endif