The LE and BE code here is the same, except the BE has the old-style
function descriptor. So, we use the LE code on ELFv2 to fix build errors.
--- valgrind-3.13.0/coregrind/m_libcsetjmp.c 2017-05-31 10:14:45.000000000 -0500
+++ valgrind-3.13.0/coregrind/m_libcsetjmp.c 2018-05-25 20:07:37.007835735 -0500
@@ -149,7 +149,7 @@
/* ------------ ppc64-linux ------------ */
-#if defined(VGP_ppc64be_linux)
+#if defined(VGP_ppc64be_linux) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
__asm__(
".section \".toc\",\"aw\"" "\n"
@@ -268,7 +268,8 @@
".previous" "\n"
);
-#elif defined(VGP_ppc64le_linux)
+#elif (defined(VGP_ppc64le_linux)) || \
+ (defined(VGP_ppc64be_linux) && defined(_CALL_ELF) && _CALL_ELF == 2)
__asm__(
".section \".toc\",\"aw\"" "\n"
--- valgrind-3.13.0/coregrind/m_main.c.old 2017-05-31 10:14:52.000000000 -0500
+++ valgrind-3.13.0/coregrind/m_main.c 2018-05-30 19:01:00.534083618 -0500
@@ -2585,7 +2585,7 @@
"\ttrap\n"
".previous\n"
);
-#elif defined(VGP_ppc64be_linux)
+#elif defined(VGP_ppc64be_linux) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
asm("\n"
/* PPC64 ELF ABI says '_start' points to a function descriptor.
So we must have one, and that is what goes into the .opd section. */
@@ -2631,9 +2631,9 @@
"\tnop\n"
"\ttrap\n"
);
-#elif defined(VGP_ppc64le_linux)
-/* Little Endian uses ELF version 2 but in the future may also
- * support other ELF versions.
+#elif defined(VGP_ppc64le_linux) || \
+ (defined(VGP_ppc64be_linux) && defined(_CALL_ELF) && _CALL_ELF == 2)
+/* PowerPC 64 ELF version 2 does not use function descriptors.
*/
asm("\n"
"\t.align 2\n"
--- valgrind-3.13.0/coregrind/m_syscall.c.old 2017-05-31 10:14:29.000000000 -0500
+++ valgrind-3.13.0/coregrind/m_syscall.c 2018-05-30 19:02:00.984023769 -0500
@@ -470,7 +470,7 @@
".previous\n"
);
-#elif defined(VGP_ppc64be_linux)
+#elif defined(VGP_ppc64be_linux) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
/* Due to the need to return 65 bits of result, this is completely
different from the ppc32 case. The single arg register points to a
7-word block containing the syscall # and the 6 args. The syscall
@@ -506,7 +506,8 @@
" blr\n"
);
-#elif defined(VGP_ppc64le_linux)
+#elif defined(VGP_ppc64le_linux) || \
+ (defined(VGP_ppc64be_linux) && defined(_CALL_ELF) && _CALL_ELF == 2)
/* Due to the need to return 65 bits of result, this is completely
different from the ppc32 case. The single arg register points to a
7-word block containing the syscall # and the 6 args. The syscall
--- valgrind-3.13.0/coregrind/m_signals.c.old 2017-05-31 10:14:52.000000000 -0500
+++ valgrind-3.13.0/coregrind/m_signals.c 2018-05-30 22:12:46.082692356 -0500
@@ -889,7 +889,7 @@
" sc\n" \
".previous\n"
-#elif defined(VGP_ppc64be_linux)
+#elif defined(VGP_ppc64be_linux) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
# define _MY_SIGRETURN(name) \
".align 2\n" \
".globl my_sigreturn\n" \
@@ -904,7 +904,8 @@
" li 0, " #name "\n" \
" sc\n"
-#elif defined(VGP_ppc64le_linux)
+#elif defined(VGP_ppc64le_linux) || \
+ (defined(VGP_ppc64be_linux) && defined(_CALL_ELF) && _CALL_ELF == 2)
/* Little Endian supports ELF version 2. In the future, it may
* support other versions.
*/
--- valgrind-3.13.0/coregrind/m_syswrap/syswrap-ppc64-linux.c.old 2017-05-31 10:14:39.000000000 -0500
+++ valgrind-3.13.0/coregrind/m_syswrap/syswrap-ppc64-linux.c 2018-05-30 22:15:42.112518074 -0500
@@ -71,12 +71,12 @@
// r4 = retaddr
// r5 = function descriptor
// r6 = arg1
-/* On PPC64, a func ptr is represented by a TOC entry ptr.
+/* On ELFv1, a func ptr is represented by a TOC entry ptr.
This TOC entry contains three words; the first word is the function
address, the second word is the TOC ptr (r2), and the third word is
the static chain value. */
asm(
-#if defined(VGP_ppc64be_linux)
+#if defined(VGP_ppc64be_linux) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
" .align 2\n"
" .globl vgModuleLocal_call_on_new_stack_0_1\n"
" .section \".opd\",\"aw\"\n"
@@ -126,7 +126,7 @@
" bctr\n\t" // jump to dst
" trap\n" // should never get here
#else
-// ppc64le_linux
+// ppc64le_linux, or ELFv2 ABI on BE
" .align 2\n"
" .globl vgModuleLocal_call_on_new_stack_0_1\n"
"vgModuleLocal_call_on_new_stack_0_1:\n"
@@ -211,7 +211,7 @@
// See priv_syswrap-linux.h for arg profile.
asm(
-#if defined(VGP_ppc64be_linux)
+#if defined(VGP_ppc64be_linux) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
" .align 2\n"
" .globl do_syscall_clone_ppc64_linux\n"
" .section \".opd\",\"aw\"\n"
--- valgrind-3.13.0/coregrind/m_syswrap/syscall-ppc64be-linux.S.old 2017-05-31 10:14:39.000000000 -0500
+++ valgrind-3.13.0/coregrind/m_syswrap/syscall-ppc64be-linux.S 2018-05-30 22:18:31.742350130 -0500
@@ -29,7 +29,7 @@
#include "pub_core_basics_asm.h"
-#if defined(VGP_ppc64be_linux)
+#if defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
#include "pub_core_vkiscnums_asm.h"
#include "libvex_guest_offsets.h"
@@ -76,12 +76,25 @@
.align 2
.globl ML_(do_syscall_for_client_WRK)
+#if _CALL_ELF == 2
+.type .ML_(do_syscall_for_client_WRK),@function
+ML_(do_syscall_for_client_WRK):
+0: addis 2,12,.TOC.-0b@ha
+ addi 2,2,.TOC.-0b@l
+ .localentry ML_(do_syscall_for_client_WRK), .-ML_(do_syscall_for_client_WRK)
+#else
.section ".opd","aw"
.align 3
-ML_(do_syscall_for_client_WRK):
+ML_(do_syscall_for_client_WRK):
.quad .ML_(do_syscall_for_client_WRK),.TOC.@tocbase,0
.previous
-.type .ML_(do_syscall_for_client_WRK),@function
+#endif
+#if _CALL_ELF == 2
+0: addis 2,12,.TOC.-0b@ha
+ addi 2,2,.TOC.-0b@l
+ .localentry ML_(do_syscall_for_client_WRK), .-ML_(do_syscall_for_client_WRK)
+#endif
+.type .ML_(do_syscall_for_client_WRK),@function
.globl .ML_(do_syscall_for_client_WRK)
.ML_(do_syscall_for_client_WRK):
/* make a stack frame */
@@ -145,7 +158,11 @@
/* failure: return 0x8000 | error code */
7: ori 3,3,0x8000 /* FAILURE -- ensure return value is nonzero */
b 5b
-
+#if _CALL_ELF == 2
+ .size .ML_(do_syscall_for_client_WRK),.-.ML_(do_syscall_for_client_WRK)
+#else
+ .size .ML_(do_syscall_for_client_WRK),.-.ML_(do_syscall_for_client_WRK)
+#endif
.section .rodata
/* export the ranges so that
VG_(fixup_guest_state_after_syscall_interrupted) can do the
@@ -162,7 +179,7 @@
ML_(blksys_committed): .quad 4b
ML_(blksys_finished): .quad 5b
-#endif // defined(VGP_ppc64be_linux)
+#endif // defined(VGP_ppc64le_linux)
/* Let the linker know we don't need an executable stack */
MARK_STACK_NO_EXEC
--- valgrind-3.13.0/coregrind/m_dispatch/dispatch-ppc64be-linux.S.old 2017-05-31 10:14:33.000000000 -0500
+++ valgrind-3.13.0/coregrind/m_dispatch/dispatch-ppc64be-linux.S 2018-05-30 22:39:37.951096498 -0500
@@ -30,12 +30,21 @@
#include "pub_core_basics_asm.h"
-#if defined(VGP_ppc64be_linux)
+#if defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
#include "pub_core_dispatch_asm.h"
#include "pub_core_transtab_asm.h"
#include "libvex_guest_offsets.h" /* for OFFSET_ppc64_CIA */
+/* NOTE: PPC64 supports Big Endian and Little Endian. It also supports the
+ ELF version 1 and ELF version 2 APIs.
+
+ Currently LE uses ELF version 2 and BE uses ELF version 1. However,
+ BE and LE may support the other ELF version in the future. So, the
+ _CALL_ELF is used in the assembly function to enable code for a
+ specific ELF version independently of the endianness of the machine.
+ The test "#if _CALL_ELF == 2" checks if ELF version 2 is being used.
+*/
/* References to globals via the TOC */
@@ -75,14 +84,26 @@
.section ".text"
.align 2
.globl VG_(disp_run_translations)
+#if _CALL_ELF == 2
+.type VG_(disp_run_translations),@function
+VG_(disp_run_translations):
+.type .VG_(disp_run_translations),@function
+#else
.section ".opd","aw"
.align 3
VG_(disp_run_translations):
.quad .VG_(disp_run_translations),.TOC.@tocbase,0
.previous
.type .VG_(disp_run_translations),@function
+#endif
.globl .VG_(disp_run_translations)
.VG_(disp_run_translations):
+#if _CALL_ELF == 2
+0: addis 2, 12,.TOC.-0b@ha
+ addi 2,2,.TOC.-0b@l
+ .localentry VG_(disp_run_translations), .-VG_(disp_run_translations)
+#endif
+
/* r3 holds two_words */
/* r4 holds guest_state */
/* r5 holds host_addr */
@@ -229,8 +250,13 @@
/* make a stack frame for the code we are calling */
stdu 1,-48(1)
- /* Set up the guest state ptr */
+ /* Set up the guest state ptr */
mr 31,4 /* r31 (generated code gsp) = r4 */
+#if _CALL_ELF == 2
+/* for the LE ABI need to setup r2 and r12 */
+0: addis 2, 12,.TOC.-0b@ha
+ addi 2,2,.TOC.-0b@l
+#endif
/* and jump into the code cache. Chained translations in
the code cache run, until for whatever reason, they can't
@@ -385,6 +411,9 @@
mtlr 0
addi 1,1,624 /* stack_size */
blr
+#if _CALL_ELF == 2
+ .size VG_(disp_run_translations),.-VG_(disp_run_translations)
+#endif
/*----------------------------------------------------*/
@@ -395,15 +424,25 @@
.section ".text"
.align 2
.globl VG_(disp_cp_chain_me_to_slowEP)
- .section ".opd","aw"
+#if _CALL_ELF == 2
+ .type VG_(disp_cp_chain_me_to_slowEP),@function
+ VG_(disp_cp_chain_me_to_slowEP):
+#else
+ .section ".opd","aw"
.align 3
VG_(disp_cp_chain_me_to_slowEP):
.quad .VG_(disp_cp_chain_me_to_slowEP),.TOC.@tocbase,0
.previous
+#endif
.type .VG_(disp_cp_chain_me_to_slowEP),@function
.globl .VG_(disp_cp_chain_me_to_slowEP)
.VG_(disp_cp_chain_me_to_slowEP):
- /* We got called. The return address indicates
+#if _CALL_ELF == 2
+0: addis 2, 12,.TOC.-0b@ha
+ addi 2,2,.TOC.-0b@l
+ .localentry VG_(disp_cp_chain_me_to_slowEP), .-VG_(disp_cp_chain_me_to_slowEP)
+#endif
+ /* We got called. The return address indicates
where the patching needs to happen. Collect
the return address and, exit back to C land,
handing the caller the pair (Chain_me_S, RA) */
@@ -415,20 +454,33 @@
*/
subi 7,7,20+4+4
b .postamble
+#if _CALL_ELF == 2
+ .size VG_(disp_cp_chain_me_to_slowEP),.-VG_(disp_cp_chain_me_to_slowEP)
+#endif
/* ------ Chain me to fast entry point ------ */
.section ".text"
.align 2
.globl VG_(disp_cp_chain_me_to_fastEP)
- .section ".opd","aw"
+#if _CALL_ELF == 2
+ .type VG_(disp_cp_chain_me_to_fastEP),@function
+VG_(disp_cp_chain_me_to_fastEP):
+#else
+ .section ".opd","aw"
.align 3
VG_(disp_cp_chain_me_to_fastEP):
.quad .VG_(disp_cp_chain_me_to_fastEP),.TOC.@tocbase,0
.previous
+#endif
.type .VG_(disp_cp_chain_me_to_fastEP),@function
.globl .VG_(disp_cp_chain_me_to_fastEP)
.VG_(disp_cp_chain_me_to_fastEP):
- /* We got called. The return address indicates
+#if _CALL_ELF == 2
+0: addis 2, 12,.TOC.-0b@ha
+ addi 2,2,.TOC.-0b@l
+ .localentry VG_(disp_cp_chain_me_to_fastEP), .-VG_(disp_cp_chain_me_to_fastEP)
+#endif
+ /* We got called. The return address indicates
where the patching needs to happen. Collect
the return address and, exit back to C land,
handing the caller the pair (Chain_me_S, RA) */
@@ -440,20 +492,33 @@
*/
subi 7,7,20+4+4
b .postamble
+#if _CALL_ELF == 2
+ .size VG_(disp_cp_chain_me_to_fastEP),.-VG_(disp_cp_chain_me_to_fastEP)
+#endif
/* ------ Indirect but boring jump ------ */
.section ".text"
.align 2
.globl VG_(disp_cp_xindir)
- .section ".opd","aw"
+#if _CALL_ELF == 2
+ .type VG_(disp_cp_xindir),@function
+VG_(disp_cp_xindir):
+#else
+ .section ".opd","aw"
.align 3
VG_(disp_cp_xindir):
.quad .VG_(disp_cp_xindir),.TOC.@tocbase,0
.previous
+#endif
.type .VG_(disp_cp_xindir),@function
.globl .VG_(disp_cp_xindir)
.VG_(disp_cp_xindir):
- /* Where are we going? */
+#if _CALL_ELF == 2
+0: addis 2, 12,.TOC.-0b@ha
+ addi 2,2,.TOC.-0b@l
+ .localentry VG_(disp_cp_xindir), .-VG_(disp_cp_xindir)
+#endif
+ /* Where are we going? */
ld 3,OFFSET_ppc64_CIA(31)
/* stats only */
@@ -479,6 +544,9 @@
/* Found a match. Jump to .host. */
mtctr 7
bctr
+#if _CALL_ELF == 2
+ .size VG_(disp_cp_xindir),.-VG_(disp_cp_xindir)
+#endif
.fast_lookup_failed:
/* stats only */
@@ -496,39 +564,64 @@
.section ".text"
.align 2
.globl VG_(disp_cp_xassisted)
- .section ".opd","aw"
+#if _CALL_ELF == 2
+ .type VG_(disp_cp_xassisted),@function
+VG_(disp_cp_xassisted):
+#else
+ .section ".opd","aw"
.align 3
VG_(disp_cp_xassisted):
.quad .VG_(disp_cp_xassisted),.TOC.@tocbase,0
.previous
- .type .VG_(disp_cp_xassisted),@function
+#endif
+#if _CALL_ELF == 2
+0: addis 2, 12,.TOC.-0b@ha
+ addi 2,2,.TOC.-0b@l
+ .localentry VG_(disp_cp_xassisted), .-VG_(disp_cp_xassisted)
+#endif
+ .type .VG_(disp_cp_xassisted),@function
.globl .VG_(disp_cp_xassisted)
.VG_(disp_cp_xassisted):
/* r31 contains the TRC */
mr 6,31
li 7,0
b .postamble
+#if _CALL_ELF == 2
+ .size VG_(disp_cp_xassisted),.-VG_(disp_cp_xassisted)
+#endif
/* ------ Event check failed ------ */
.section ".text"
.align 2
.globl VG_(disp_cp_evcheck_fail)
- .section ".opd","aw"
+#if _CALL_ELF == 2
+ .type VG_(disp_cp_evcheck_fail),@function
+VG_(disp_cp_evcheck_fail):
+#else
+ .section ".opd","aw"
.align 3
VG_(disp_cp_evcheck_fail):
.quad .VG_(disp_cp_evcheck_fail),.TOC.@tocbase,0
.previous
+#endif
+#if _CALL_ELF == 2
+0: addis 2, 12,.TOC.-0b@ha
+ addi 2,2,.TOC.-0b@l
+ .localentry VG_(disp_cp_evcheck_fail), .-VG_(disp_cp_evcheck_fail)
+#endif
.type .VG_(disp_cp_evcheck_fail),@function
.globl .VG_(disp_cp_evcheck_fail)
.VG_(disp_cp_evcheck_fail):
li 6,VG_TRC_INNER_COUNTERZERO
li 7,0
b .postamble
+#if _CALL_ELF == 2
+ .size VG_(disp_cp_evcheck_fail),.-VG_(disp_cp_evcheck_fail)
+#endif
-
.size .VG_(disp_run_translations), .-.VG_(disp_run_translations)
-#endif // defined(VGP_ppc64be_linux)
+#endif // defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
/* Let the linker know we don't need an executable stack */
MARK_STACK_NO_EXEC