diff options
author | Rich Felker <dalias@aerifal.cx> | 2015-11-09 22:36:38 -0500 |
---|---|---|
committer | Rich Felker <dalias@aerifal.cx> | 2015-11-09 22:36:38 -0500 |
commit | 9f290a49bf9ee247d540d3c83875288a7991699c (patch) | |
tree | d9904f2b9698083ae64c122849a3dc3279f5856a | |
parent | cf40375e8fd14fc02a850af90b145d324d0381b5 (diff) | |
download | musl-9f290a49bf9ee247d540d3c83875288a7991699c.tar.gz musl-9f290a49bf9ee247d540d3c83875288a7991699c.tar.bz2 musl-9f290a49bf9ee247d540d3c83875288a7991699c.tar.xz musl-9f290a49bf9ee247d540d3c83875288a7991699c.zip |
remove non-working pre-armv4t support from arm asm
the idea of the three-instruction sequence being removed was to be
able to return to thumb code when used on armv4t+ from a thumb caller,
but also to be able to run on armv4 without the bx instruction
available (in which case the low bit of lr would always be 0).
however, without compiler support for generating such a sequence from
C code, which does not exist and which there is unlikely to be
interest in implementing, there is little point in having it in the
asm, and it would likely be easier to add pre-armv4t support via
enhanced linker handling of R_ARM_V4BX than at the compiler level.
removing this code simplifies adding support for building libc in
thumb2-only form (for cortex-m).
-rw-r--r-- | arch/arm/reloc.h | 5 | ||||
-rw-r--r-- | arch/arm/src/arm/atomics.s | 6 | ||||
-rw-r--r-- | crt/arm/crtn.s | 4 | ||||
-rw-r--r-- | src/internal/arm/syscall.s | 2 | ||||
-rw-r--r-- | src/setjmp/arm/longjmp.s | 4 | ||||
-rw-r--r-- | src/setjmp/arm/setjmp.s | 4 | ||||
-rw-r--r-- | src/string/armel/memcpy.s | 4 | ||||
-rw-r--r-- | src/thread/arm/clone.s | 2 | ||||
-rw-r--r-- | src/thread/arm/syscall_cp.s | 2 |
9 files changed, 2 insertions, 31 deletions
diff --git a/arch/arm/reloc.h b/arch/arm/reloc.h index e1ef3506..6227bd1e 100644 --- a/arch/arm/reloc.h +++ b/arch/arm/reloc.h @@ -28,10 +28,5 @@ #define REL_TPOFF R_ARM_TLS_TPOFF32 //#define REL_TLSDESC R_ARM_TLS_DESC -#ifdef __thumb__ #define CRTJMP(pc,sp) __asm__ __volatile__( \ "mov sp,%1 ; bx %0" : : "r"(pc), "r"(sp) : "memory" ) -#else -#define CRTJMP(pc,sp) __asm__ __volatile__( \ - "mov sp,%1 ; tst %0,#1 ; moveq pc,%0 ; bx %0" : : "r"(pc), "r"(sp) : "memory" ) -#endif diff --git a/arch/arm/src/arm/atomics.s b/arch/arm/src/arm/atomics.s index f241cc02..ecf3f05a 100644 --- a/arch/arm/src/arm/atomics.s +++ b/arch/arm/src/arm/atomics.s @@ -11,8 +11,6 @@ __a_barrier: .global __a_barrier_dummy .hidden __a_barrier_dummy __a_barrier_dummy: - tst lr,#1 - moveq pc,lr bx lr .global __a_barrier_oldkuser .hidden __a_barrier_oldkuser @@ -24,8 +22,6 @@ __a_barrier_oldkuser: mov lr,pc mov pc,ip pop {r0,r1,r2,r3,ip,lr} - tst lr,#1 - moveq pc,lr bx lr .global __a_barrier_v6 .hidden __a_barrier_v6 @@ -53,8 +49,6 @@ __a_cas_dummy: ldr r0,[r2] subs r0,r3,r0 streq r1,[r2] - tst lr,#1 - moveq pc,lr bx lr .global __a_cas_v6 .hidden __a_cas_v6 diff --git a/crt/arm/crtn.s b/crt/arm/crtn.s index 1b626c0a..b3eca856 100644 --- a/crt/arm/crtn.s +++ b/crt/arm/crtn.s @@ -1,11 +1,7 @@ .section .init pop {r0,lr} - tst lr,#1 - moveq pc,lr bx lr .section .fini pop {r0,lr} - tst lr,#1 - moveq pc,lr bx lr diff --git a/src/internal/arm/syscall.s b/src/internal/arm/syscall.s index 2028456c..28cb17ba 100644 --- a/src/internal/arm/syscall.s +++ b/src/internal/arm/syscall.s @@ -11,6 +11,4 @@ __syscall: ldmfd ip,{r3,r4,r5,r6} svc 0 ldmfd sp!,{r4,r5,r6,r7} - tst lr,#1 - moveq pc,lr bx lr diff --git a/src/setjmp/arm/longjmp.s b/src/setjmp/arm/longjmp.s index 82bce832..7db93e8a 100644 --- a/src/setjmp/arm/longjmp.s +++ b/src/setjmp/arm/longjmp.s @@ -33,9 +33,7 @@ longjmp: ldcl p1, cr13, [ip], #8 ldcl p1, cr14, [ip], #8 ldcl p1, cr15, [ip], #8 -3: tst lr,#1 - moveq pc,lr - bx lr +3: bx lr .hidden __hwcap 1: .word __hwcap-1b diff --git a/src/setjmp/arm/setjmp.s b/src/setjmp/arm/setjmp.s index 32db7d87..61425ad5 100644 --- a/src/setjmp/arm/setjmp.s +++ b/src/setjmp/arm/setjmp.s @@ -35,9 +35,7 @@ setjmp: stcl p1, cr13, [ip], #8 stcl p1, cr14, [ip], #8 stcl p1, cr15, [ip], #8 -3: tst lr,#1 - moveq pc,lr - bx lr +3: bx lr .hidden __hwcap 1: .word __hwcap-1b diff --git a/src/string/armel/memcpy.s b/src/string/armel/memcpy.s index 54164030..b16be0d6 100644 --- a/src/string/armel/memcpy.s +++ b/src/string/armel/memcpy.s @@ -189,8 +189,6 @@ less_than_32_left: /* we're done! restore everything and return */ 1: ldmfd sp!, {r5-r11} ldmfd sp!, {r0, r4, lr} - tst lr, #1 - moveq pc, lr bx lr /********************************************************************/ @@ -378,6 +376,4 @@ copy_last_3_and_return: /* we're done! restore sp and spilled registers and return */ add sp, sp, #28 ldmfd sp!, {r0, r4, lr} - tst lr, #1 - moveq pc, lr bx lr diff --git a/src/thread/arm/clone.s b/src/thread/arm/clone.s index d146999b..b7fb788b 100644 --- a/src/thread/arm/clone.s +++ b/src/thread/arm/clone.s @@ -15,8 +15,6 @@ __clone: tst r0,r0 beq 1f ldmfd sp!,{r4,r5,r6,r7} - tst lr,#1 - moveq pc,lr bx lr 1: mov r0,r6 diff --git a/src/thread/arm/syscall_cp.s b/src/thread/arm/syscall_cp.s index 96ce6135..64528b39 100644 --- a/src/thread/arm/syscall_cp.s +++ b/src/thread/arm/syscall_cp.s @@ -22,8 +22,6 @@ __cp_begin: svc 0 __cp_end: ldmfd sp!,{r4,r5,r6,r7,lr} - tst lr,#1 - moveq pc,lr bx lr __cp_cancel: ldmfd sp!,{r4,r5,r6,r7,lr} |