diff options
author | Denys Vlasenko <vda.linux@googlemail.com> | 2015-02-10 18:30:56 +0100 |
---|---|---|
committer | Rich Felker <dalias@aerifal.cx> | 2015-02-10 18:53:31 -0500 |
commit | bf2071eda32528ee8b0bb89544152646684a2cf3 (patch) | |
tree | d2d826f84c6a2d23be25019208bf80b6dee3f071 | |
parent | 6a5242e4cb2f9c695f613dc312ed5e1bb8008912 (diff) | |
download | musl-bf2071eda32528ee8b0bb89544152646684a2cf3.tar.gz musl-bf2071eda32528ee8b0bb89544152646684a2cf3.tar.bz2 musl-bf2071eda32528ee8b0bb89544152646684a2cf3.tar.xz musl-bf2071eda32528ee8b0bb89544152646684a2cf3.zip |
x86_64/memset: simple optimizations
"and $0xff,%esi" is a six-byte insn (81 e6 ff 00 00 00), can use
4-byte "movzbl %sil,%esi" (40 0f b6 f6) instead.
64-bit imul is slow, move it as far up as possible so that the result
(rax) has more time to be ready by the time we start using it
in mem stores.
There is no need to shuffle registers in preparation to "rep movs"
if we are not going to take that code path. Thus, patch moves
"jump if len < 16" instructions up, and changes alternate code path
to use rdx and rdi instead of rcx and r8.
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
-rw-r--r-- | src/string/x86_64/memset.s | 30 |
1 files changed, 16 insertions, 14 deletions
diff --git a/src/string/x86_64/memset.s b/src/string/x86_64/memset.s index fc06eef8..263336b5 100644 --- a/src/string/x86_64/memset.s +++ b/src/string/x86_64/memset.s @@ -1,41 +1,43 @@ .global memset .type memset,@function memset: - and $0xff,%esi + movzbl %sil,%esi mov $0x101010101010101,%rax - mov %rdx,%rcx - mov %rdi,%r8 + # 64-bit imul has 3-7 cycles latency, launch early imul %rsi,%rax - cmp $16,%rcx + + cmp $16,%rdx jb 1f - mov %rax,-8(%rdi,%rcx) + mov %rdx,%rcx + mov %rdi,%r8 shr $3,%rcx + mov %rax,-8(%rdi,%rdx) rep stosq mov %r8,%rax ret -1: test %ecx,%ecx +1: test %edx,%edx jz 1f mov %al,(%rdi) - mov %al,-1(%rdi,%rcx) - cmp $2,%ecx + mov %al,-1(%rdi,%rdx) + cmp $2,%edx jbe 1f mov %al,1(%rdi) - mov %al,-2(%rdi,%rcx) - cmp $4,%ecx + mov %al,-2(%rdi,%rdx) + cmp $4,%edx jbe 1f mov %eax,(%rdi) - mov %eax,-4(%rdi,%rcx) - cmp $8,%ecx + mov %eax,-4(%rdi,%rdx) + cmp $8,%edx jbe 1f mov %eax,4(%rdi) - mov %eax,-8(%rdi,%rcx) + mov %eax,-8(%rdi,%rdx) -1: mov %r8,%rax +1: mov %rdi,%rax ret |