summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2014-07-19 15:51:12 -0400
committerRich Felker <dalias@aerifal.cx>2014-07-19 15:51:12 -0400
commitbcad48439494820989f5867c3f8ccfa6aae2909f (patch)
tree96b9f234bd245c189e8a7a1cef9545efb8fe6b58 /arch
parenta294f539c78c6ba0a2786ef3c5b2a1210a33864e (diff)
downloadmusl-bcad48439494820989f5867c3f8ccfa6aae2909f.tar.gz
musl-bcad48439494820989f5867c3f8ccfa6aae2909f.tar.bz2
musl-bcad48439494820989f5867c3f8ccfa6aae2909f.tar.xz
musl-bcad48439494820989f5867c3f8ccfa6aae2909f.zip
fix missing barrier instructions in mips atomic asm
previously I had wrongly assumed the ll/sc instructions also provided memory synchronization; apparently they do not. this commit adds sync instructions before and after each atomic operation and changes the atomic store to simply use sync before and after a plain store, rather than a useless compare-and-swap.
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/atomic.h32
1 files changed, 18 insertions, 14 deletions
diff --git a/arch/mips/atomic.h b/arch/mips/atomic.h
index 6731d17b..9dcd1555 100644
--- a/arch/mips/atomic.h
+++ b/arch/mips/atomic.h
@@ -29,12 +29,14 @@ static inline int a_cas(volatile int *p, int t, int s)
".set push\n"
".set mips2\n"
".set noreorder\n"
+ " sync\n"
"1: ll %0, %2\n"
" bne %0, %3, 1f\n"
" addu %1, %4, $0\n"
" sc %1, %2\n"
" beq %1, $0, 1b\n"
" nop\n"
+ " sync\n"
"1: \n"
".set pop\n"
: "=&r"(t), "=&r"(dummy), "+m"(*p) : "r"(t), "r"(s) : "memory" );
@@ -59,12 +61,13 @@ static inline int a_swap(volatile int *x, int v)
".set push\n"
".set mips2\n"
".set noreorder\n"
+ " sync\n"
"1: ll %0, %2\n"
" addu %1, %3, $0\n"
" sc %1, %2\n"
" beq %1, $0, 1b\n"
" nop\n"
- "1: \n"
+ " sync\n"
".set pop\n"
: "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" );
return old;
@@ -77,12 +80,13 @@ static inline int a_fetch_add(volatile int *x, int v)
".set push\n"
".set mips2\n"
".set noreorder\n"
+ " sync\n"
"1: ll %0, %2\n"
" addu %1, %0, %3\n"
" sc %1, %2\n"
" beq %1, $0, 1b\n"
" nop\n"
- "1: \n"
+ " sync\n"
".set pop\n"
: "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" );
return old;
@@ -95,12 +99,13 @@ static inline void a_inc(volatile int *x)
".set push\n"
".set mips2\n"
".set noreorder\n"
+ " sync\n"
"1: ll %0, %1\n"
" addu %0, %0, 1\n"
" sc %0, %1\n"
" beq %0, $0, 1b\n"
" nop\n"
- "1: \n"
+ " sync\n"
".set pop\n"
: "=&r"(dummy), "+m"(*x) : : "memory" );
}
@@ -112,31 +117,28 @@ static inline void a_dec(volatile int *x)
".set push\n"
".set mips2\n"
".set noreorder\n"
+ " sync\n"
"1: ll %0, %1\n"
" subu %0, %0, 1\n"
" sc %0, %1\n"
" beq %0, $0, 1b\n"
" nop\n"
- "1: \n"
+ " sync\n"
".set pop\n"
: "=&r"(dummy), "+m"(*x) : : "memory" );
}
static inline void a_store(volatile int *p, int x)
{
- int dummy;
__asm__ __volatile__(
".set push\n"
".set mips2\n"
".set noreorder\n"
- "1: ll %0, %1\n"
- " addu %0, %2, $0\n"
- " sc %0, %1\n"
- " beq %0, $0, 1b\n"
- " nop\n"
- "1: \n"
+ " sync\n"
+ " sw %1, %0\n"
+ " sync\n"
".set pop\n"
- : "=&r"(dummy), "+m"(*p) : "r"(x) : "memory" );
+ : "+m"(*p) : "r"(x) : "memory" );
}
static inline void a_spin()
@@ -155,12 +157,13 @@ static inline void a_and(volatile int *p, int v)
".set push\n"
".set mips2\n"
".set noreorder\n"
+ " sync\n"
"1: ll %0, %1\n"
" and %0, %0, %2\n"
" sc %0, %1\n"
" beq %0, $0, 1b\n"
" nop\n"
- "1: \n"
+ " sync\n"
".set pop\n"
: "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" );
}
@@ -172,12 +175,13 @@ static inline void a_or(volatile int *p, int v)
".set push\n"
".set mips2\n"
".set noreorder\n"
+ " sync\n"
"1: ll %0, %1\n"
" or %0, %0, %2\n"
" sc %0, %1\n"
" beq %0, $0, 1b\n"
" nop\n"
- "1: \n"
+ " sync\n"
".set pop\n"
: "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" );
}