summaryrefslogtreecommitdiff
path: root/arch/i386
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2019-05-11 19:44:21 -0400
committerRich Felker <dalias@aerifal.cx>2019-05-11 19:44:21 -0400
commitbdb0817599325e6ca6838717dfe18290695a59a2 (patch)
tree9ec908dfc35ac48bef7bf19126147289a3561eb2 /arch/i386
parentc8798ef974d21c338a7d8d874a402978ffc6168e (diff)
downloadmusl-bdb0817599325e6ca6838717dfe18290695a59a2.tar.gz
musl-bdb0817599325e6ca6838717dfe18290695a59a2.tar.bz2
musl-bdb0817599325e6ca6838717dfe18290695a59a2.tar.xz
musl-bdb0817599325e6ca6838717dfe18290695a59a2.zip
improve i386 inline syscall asm on non-broken compilers
we have to avoid using ebx unconditionally in asm constraints for i386, because gcc 3 and 4 and possibly other simplistic compilers (pcc?) implement PIC via making ebx a fixed-use register, and disallow its use for anything else. rather than hard-coding knowledge of which compilers work (at least gcc 5+ and clang), perform a configure test; this should give us the good codegen on any new compilers we don't yet know about. swapping ebx and edx is kept for 1- and 2-arg syscalls because it avoids having any spills/stack-frame at all in small functions. for 6-arg, if ebx is directly usable, the complex shuffling introduced in commit c8798ef974d21c338a7d8d874a402978ffc6168e can be avoided, and ebp can be loaded the same way ebx is in 5-arg syscalls for compilers that don't support direct use of ebx.
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/syscall_arch.h21
1 files changed, 20 insertions, 1 deletions
diff --git a/arch/i386/syscall_arch.h b/arch/i386/syscall_arch.h
index 29e141f2..22b0b28b 100644
--- a/arch/i386/syscall_arch.h
+++ b/arch/i386/syscall_arch.h
@@ -36,30 +36,49 @@ static inline long __syscall2(long n, long a1, long a2)
static inline long __syscall3(long n, long a1, long a2, long a3)
{
unsigned long __ret;
+#if !defined(__PIC__) || !defined(BROKEN_EBX_ASM)
+ __asm__ __volatile__ (SYSCALL_INSNS : "=a"(__ret) : "a"(n), "b"(a1), "c"(a2), "d"(a3) : "memory");
+#else
__asm__ __volatile__ (SYSCALL_INSNS_34 : "=a"(__ret) : "a"(n), "D"(a1), "c"(a2), "d"(a3) : "memory");
+#endif
return __ret;
}
static inline long __syscall4(long n, long a1, long a2, long a3, long a4)
{
unsigned long __ret;
+#if !defined(__PIC__) || !defined(BROKEN_EBX_ASM)
+ __asm__ __volatile__ (SYSCALL_INSNS : "=a"(__ret) : "a"(n), "b"(a1), "c"(a2), "d"(a3), "S"(a4) : "memory");
+#else
__asm__ __volatile__ (SYSCALL_INSNS_34 : "=a"(__ret) : "a"(n), "D"(a1), "c"(a2), "d"(a3), "S"(a4) : "memory");
+#endif
return __ret;
}
static inline long __syscall5(long n, long a1, long a2, long a3, long a4, long a5)
{
unsigned long __ret;
+#if !defined(__PIC__) || !defined(BROKEN_EBX_ASM)
+ __asm__ __volatile__ (SYSCALL_INSNS
+ : "=a"(__ret) : "a"(n), "b"(a1), "c"(a2), "d"(a3), "S"(a4), "D"(a5) : "memory");
+#else
__asm__ __volatile__ ("pushl %2 ; push %%ebx ; mov 4(%%esp),%%ebx ; " SYSCALL_INSNS " ; pop %%ebx ; add $4,%%esp"
: "=a"(__ret) : "a"(n), "g"(a1), "c"(a2), "d"(a3), "S"(a4), "D"(a5) : "memory");
+#endif
return __ret;
}
static inline long __syscall6(long n, long a1, long a2, long a3, long a4, long a5, long a6)
{
- unsigned long __ret, a1a6[2] = { a1, a6 };
+ unsigned long __ret;
+#if !defined(__PIC__) || !defined(BROKEN_EBX_ASM)
+ __asm__ __volatile__ ("pushl %7 ; push %%ebp ; mov 4(%%esp),%%ebp ; " SYSCALL_INSNS " ; pop %%ebp ; add $4,%%esp"
+ : "=a"(__ret) : "a"(n), "b"(a1), "c"(a2), "d"(a3), "S"(a4), "D"(a5), "g"(a6) : "memory");
+#else
+ unsigned long a1a6[2] = { a1, a6 };
__asm__ __volatile__ ("pushl %1 ; push %%ebx ; push %%ebp ; mov 8(%%esp),%%ebx ; mov 4(%%ebx),%%ebp ; mov (%%ebx),%%ebx ; " SYSCALL_INSNS " ; pop %%ebp ; pop %%ebx ; add $4,%%esp"
: "=a"(__ret) : "g"(&a1a6), "a"(n), "c"(a2), "d"(a3), "S"(a4), "D"(a5) : "memory");
+#endif
return __ret;
}