- 论坛徽章:
- 0
|
以下是我glibc 64 bit 的部分代码有兴趣的看看吧:
#ifndef MEMCPY
# define MEMCPY ssse3_memcpy_64
#endif
#ifndef L
# define L(label) .L##label
#endif
#ifndef ALIGN
# define ALIGN(n) .p2align n
#endif
#ifndef cfi_startproc
# define cfi_startproc .cfi_startproc
#endif
#ifndef cfi_endproc
# define cfi_endproc .cfi_endproc
#endif
#ifndef ENTRY
# define ENTRY(name) \
.type name, @function; \
.globl name; \
.p2align 4; \
name: \
cfi_startproc
#endif
#ifndef END
# define END(name) \
cfi_endproc; \
.size name, .-name
#endif
#define JMPTBL(I, B) I - B
/* Branch to an entry in a jump table. TABLE is a jump table with
relative offsets. INDEX is a register contains the index into the
jump table. SCALE is the scale of INDEX. */
#define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
lea TABLE(%rip), %r11; \
movslq (%r11, INDEX, SCALE), INDEX; \
lea (%r11, INDEX), INDEX; \
jmp *INDEX; \
ud2
.section .text.ssse3,"ax",@progbits
ENTRY (MEMCPY)
mov %rdi, %rax
#ifdef USE_AS_MEMPCPY
add %rdx, %rax
#endif
#ifdef USE_AS_MEMMOVE
cmp %rsi, %rdi
jb L(copy_forward)
je L(bwd_write_0bytes)
cmp $144, %rdx
jae L(copy_backward)
BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4)
L(copy_forward):
#endif
cmp $144, %rdx
jae L(144bytesormore)
L(fwd_write_less32bytes):
#ifndef USE_AS_MEMMOVE
cmp %dil, %sil
jbe L(bk_write)
#endif
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4)
#ifndef USE_AS_MEMMOVE
L(bk_write):
BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4)
#endif
ALIGN (4)
L(144bytesormore):
#ifndef USE_AS_MEMMOVE
cmp %dil, %sil
jle L(copy_backward)
#endif
movdqu (%rsi), %xmm0
mov %rdi, %r8
and $-16, %rdi
add $16, %rdi
mov %rdi, %r9
sub %r8, %r9
sub %r9, %rdx
add %r9, %rsi
mov %rsi, %r9
and $0xf, %r9
jz L(shl_0)
#ifdef DATA_CACHE_SIZE
mov $DATA_CACHE_SIZE, %rcx
#else
mov __x86_64_data_cache_size(%rip), %rcx
#endif
cmp %rcx, %rdx
jae L(gobble_mem_fwd)
lea L(shl_table_fwd)(%rip), %r11
sub $0x80, %rdx
movslq (%r11, %r9, 4), %r9
add %r11, %r9
jmp *%r9
ud2
ALIGN (4)
L(copy_backward):
#ifdef DATA_CACHE_SIZE
mov $DATA_CACHE_SIZE, %rcx
#else
mov __x86_64_data_cache_size(%rip), %rcx
#endif
shl $1, %rcx
cmp %rcx, %rdx
ja L(gobble_mem_bwd)
add %rdx, %rdi
add %rdx, %rsi
movdqu -16(%rsi), %xmm0
lea -16(%rdi), %r8
mov %rdi, %r9
and $0xf, %r9
xor %r9, %rdi
sub %r9, %rsi
sub %r9, %rdx
mov %rsi, %r9
and $0xf, %r9
jz L(shl_0_bwd)
lea L(shl_table_bwd)(%rip), %r11
sub $0x80, %rdx
movslq (%r11, %r9, 4), %r9
add %r11, %r9
jmp *%r9
ud2
ALIGN (4)
L(shl_0):
mov %rdx, %r9
shr $8, %r9
add %rdx, %r9
#ifdef DATA_CACHE_SIZE
cmp $DATA_CACHE_SIZE_HALF, %r9
#else
cmp __x86_64_data_cache_size_half(%rip), %r9
#endif
jae L(gobble_mem_fwd)
sub $0x80, %rdx
ALIGN (4)
L(shl_0_loop):
movdqa (%rsi), %xmm1
movdqa %xmm1, (%rdi)
movaps 0x10(%rsi), %xmm2
movaps %xmm2, 0x10(%rdi)
movaps 0x20(%rsi), %xmm3
movaps %xmm3, 0x20(%rdi)
movaps 0x30(%rsi), %xmm4
movaps %xmm4, 0x30(%rdi)
movaps 0x40(%rsi), %xmm1
movaps %xmm1, 0x40(%rdi)
movaps 0x50(%rsi), %xmm2
movaps %xmm2, 0x50(%rdi)
movaps 0x60(%rsi), %xmm3
movaps %xmm3, 0x60(%rdi)
movaps 0x70(%rsi), %xmm4
movaps %xmm4, 0x70(%rdi)
sub $0x80, %rdx
lea 0x80(%rsi), %rsi
lea 0x80(%rdi), %rdi
jae L(shl_0_loop)
movdqu %xmm0, (%r
add $0x80, %rdx
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4)
ALIGN (4)
L(shl_0_bwd):
sub $0x80, %rdx
L(copy_backward_loop):
movaps -0x10(%rsi), %xmm1
movaps %xmm1, -0x10(%rdi)
movaps -0x20(%rsi), %xmm2
movaps %xmm2, -0x20(%rdi)
movaps -0x30(%rsi), %xmm3
movaps %xmm3, -0x30(%rdi)
movaps -0x40(%rsi), %xmm4
movaps %xmm4, -0x40(%rdi)
movaps -0x50(%rsi), %xmm5
movaps %xmm5, -0x50(%rdi)
movaps -0x60(%rsi), %xmm5
movaps %xmm5, -0x60(%rdi)
movaps -0x70(%rsi), %xmm5
movaps %xmm5, -0x70(%rdi)
movaps -0x80(%rsi), %xmm5
movaps %xmm5, -0x80(%rdi)
sub $0x80, %rdx
lea -0x80(%rdi), %rdi
lea -0x80(%rsi), %rsi
jae L(copy_backward_loop)
movdqu %xmm0, (%r
add $0x80, %rdx
sub %rdx, %rdi
sub %rdx, %rsi
BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4)
ALIGN (4)
L(shl_1):
sub $0x80, %rdx
movaps -0x01(%rsi), %xmm1
movaps 0x0f(%rsi), %xmm2
movaps 0x1f(%rsi), %xmm3
movaps 0x2f(%rsi), %xmm4
movaps 0x3f(%rsi), %xmm5
movaps 0x4f(%rsi), %xmm6
movaps 0x5f(%rsi), %xmm7
movaps 0x6f(%rsi), %xmm8
movaps 0x7f(%rsi), %xmm9
lea 0x80(%rsi), %rsi
palignr $1, %xmm8, %xmm9
movaps %xmm9, 0x70(%rdi)
palignr $1, %xmm7, %xmm8
movaps %xmm8, 0x60(%rdi)
palignr $1, %xmm6, %xmm7
movaps %xmm7, 0x50(%rdi)
palignr $1, %xmm5, %xmm6
movaps %xmm6, 0x40(%rdi)
palignr $1, %xmm4, %xmm5
movaps %xmm5, 0x30(%rdi)
palignr $1, %xmm3, %xmm4
movaps %xmm4, 0x20(%rdi)
palignr $1, %xmm2, %xmm3
movaps %xmm3, 0x10(%rdi)
palignr $1, %xmm1, %xmm2
movaps %xmm2, (%rdi)
lea 0x80(%rdi), %rdi
jae L(shl_1)
movdqu %xmm0, (%r
add $0x80, %rdx
add %rdx, %rdi
add %rdx, %rsi
BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4)
ALIGN (4)
L(shl_1_bwd):
movaps -0x01(%rsi), %xmm1
movaps -0x11(%rsi), %xmm2
palignr $1, %xmm2, %xmm1
movaps %xmm1, -0x10(%rdi)
movaps -0x21(%rsi), %xmm3
palignr $1, %xmm3, %xmm2
movaps %xmm2, -0x20(%rdi)
movaps -0x31(%rsi), %xmm4
palignr $1, %xmm4, %xmm3
movaps %xmm3, -0x30(%rdi)
movaps -0x41(%rsi), %xmm5
palignr $1, %xmm5, %xmm4
movaps %xmm4, -0x40(%rdi)
movaps -0x51(%rsi), %xmm6
palignr $1, %xmm6, %xmm5
movaps %xmm5, -0x50(%rdi)
movaps -0x61(%rsi), %xmm7
palignr $1, %xmm7, %xmm6
movaps %xmm6, -0x60(%rdi)
movaps -0x71(%rsi), %xmm8
palignr $1, %xmm8, %xmm7
movaps %xmm7, -0x70(%rdi)
movaps -0x81(%rsi), %xmm9
palignr $1, %xmm9, %xmm8
movaps %xmm8, -0x80(%rdi)
sub $0x80, %rdx
lea -0x80(%rdi), %rdi
lea -0x80(%rsi), %rsi
jae L(shl_1_bwd)
movdqu %xmm0, (%r
add $0x80, %rdx
sub %rdx, %rdi
sub %rdx, %rsi
BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4)
。。。
。。。
ALIGN (4)
L(gobble_mem_fwd):
movdqu (%rsi), %xmm1
movdqu %xmm0, (%r
movdqa %xmm1, (%rdi)
sub $16, %rdx
add $16, %rsi
add $16, %rdi
#ifdef SHARED_CACHE_SIZE_HALF
mov $SHARED_CACHE_SIZE_HALF, %rcx
#else
mov __x86_64_shared_cache_size_half(%rip), %rcx
#endif
#ifdef USE_AS_MEMMOVE
mov %rsi, %r9
sub %rdi, %r9
cmp %rdx, %r9
jae L(memmove_is_memcpy_fwd)
cmp %rcx, %r9
jbe L(ll_cache_copy_fwd_start)
L(memmove_is_memcpy_fwd):
#endif
cmp %rcx, %rdx
ja L(bigger_in_fwd)
mov %rdx, %rcx
L(bigger_in_fwd):
sub %rcx, %rdx
cmp $0x1000, %rdx
jbe L(ll_cache_copy_fwd)
mov %rcx, %r9
shl $3, %r9
cmp %r9, %rdx
jbe L(2steps_copy_fwd)
add %rcx, %rdx
xor %rcx, %rcx
L(2steps_copy_fwd):
sub $0x80, %rdx
L(gobble_mem_fwd_loop):
sub $0x80, %rdx
prefetcht0 0x200(%rsi)
prefetcht0 0x300(%rsi)
movdqu (%rsi), %xmm0
movdqu 0x10(%rsi), %xmm1
movdqu 0x20(%rsi), %xmm2
movdqu 0x30(%rsi), %xmm3
movdqu 0x40(%rsi), %xmm4
movdqu 0x50(%rsi), %xmm5
movdqu 0x60(%rsi), %xmm6
movdqu 0x70(%rsi), %xmm7
lfence
movntdq %xmm0, (%rdi)
movntdq %xmm1, 0x10(%rdi)
movntdq %xmm2, 0x20(%rdi)
movntdq %xmm3, 0x30(%rdi)
movntdq %xmm4, 0x40(%rdi)
movntdq %xmm5, 0x50(%rdi)
movntdq %xmm6, 0x60(%rdi)
movntdq %xmm7, 0x70(%rdi)
lea 0x80(%rsi), %rsi
lea 0x80(%rdi), %rdi
jae L(gobble_mem_fwd_loop)
sfence
cmp $0x80, %rcx
jb L(gobble_mem_fwd_end)
add $0x80, %rdx
L(ll_cache_copy_fwd):
add %rcx, %rdx
L(ll_cache_copy_fwd_start):
sub $0x80, %rdx
L(gobble_ll_loop_fwd):
prefetchnta 0x1c0(%rsi)
prefetchnta 0x280(%rsi)
prefetchnta 0x1c0(%rdi)
prefetchnta 0x280(%rdi)
sub $0x80, %rdx
movdqu (%rsi), %xmm0
movdqu 0x10(%rsi), %xmm1
movdqu 0x20(%rsi), %xmm2
movdqu 0x30(%rsi), %xmm3
movdqu 0x40(%rsi), %xmm4
movdqu 0x50(%rsi), %xmm5
movdqu 0x60(%rsi), %xmm6
movdqu 0x70(%rsi), %xmm7
movdqa %xmm0, (%rdi)
movdqa %xmm1, 0x10(%rdi)
movdqa %xmm2, 0x20(%rdi)
movdqa %xmm3, 0x30(%rdi)
movdqa %xmm4, 0x40(%rdi)
movdqa %xmm5, 0x50(%rdi)
movdqa %xmm6, 0x60(%rdi)
movdqa %xmm7, 0x70(%rdi)
lea 0x80(%rsi), %rsi
lea 0x80(%rdi), %rdi
jae L(gobble_ll_loop_fwd)
L(gobble_mem_fwd_end):
add $0x80, %rdx
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4)
ALIGN (4)
L(gobble_mem_bwd):
add %rdx, %rsi
add %rdx, %rdi
movdqu -16(%rsi), %xmm0
lea -16(%rdi), %r8
mov %rdi, %r9
and $-16, %rdi
sub %rdi, %r9
sub %r9, %rsi
sub %r9, %rdx
#ifdef SHARED_CACHE_SIZE_HALF
mov $SHARED_CACHE_SIZE_HALF, %rcx
#else
mov __x86_64_shared_cache_size_half(%rip), %rcx
#endif
#ifdef USE_AS_MEMMOVE
mov %rdi, %r9
sub %rsi, %r9
cmp %rdx, %r9
jae L(memmove_is_memcpy_bwd)
cmp %rcx, %r9
jbe L(ll_cache_copy_bwd_start)
L(memmove_is_memcpy_bwd):
#endif
cmp %rcx, %rdx
ja L(bigger)
mov %rdx, %rcx
L(bigger):
sub %rcx, %rdx
cmp $0x1000, %rdx
jbe L(ll_cache_copy)
mov %rcx, %r9
shl $3, %r9
cmp %r9, %rdx
jbe L(2steps_copy)
add %rcx, %rdx
xor %rcx, %rcx
L(2steps_copy):
sub $0x80, %rdx
L(gobble_mem_bwd_loop):
sub $0x80, %rdx
prefetcht0 -0x200(%rsi)
prefetcht0 -0x300(%rsi)
movdqu -0x10(%rsi), %xmm1
movdqu -0x20(%rsi), %xmm2
movdqu -0x30(%rsi), %xmm3
movdqu -0x40(%rsi), %xmm4
movdqu -0x50(%rsi), %xmm5
movdqu -0x60(%rsi), %xmm6
movdqu -0x70(%rsi), %xmm7
movdqu -0x80(%rsi), %xmm8
lfence
movntdq %xmm1, -0x10(%rdi)
movntdq %xmm2, -0x20(%rdi)
movntdq %xmm3, -0x30(%rdi)
movntdq %xmm4, -0x40(%rdi)
movntdq %xmm5, -0x50(%rdi)
movntdq %xmm6, -0x60(%rdi)
movntdq %xmm7, -0x70(%rdi)
movntdq %xmm8, -0x80(%rdi)
lea -0x80(%rsi), %rsi
lea -0x80(%rdi), %rdi
jae L(gobble_mem_bwd_loop)
sfence
cmp $0x80, %rcx
jb L(gobble_mem_bwd_end)
add $0x80, %rdx
L(ll_cache_copy):
add %rcx, %rdx
L(ll_cache_copy_bwd_start):
sub $0x80, %rdx
L(gobble_ll_loop):
prefetchnta -0x1c0(%rsi)
prefetchnta -0x280(%rsi)
prefetchnta -0x1c0(%rdi)
prefetchnta -0x280(%rdi)
sub $0x80, %rdx
movdqu -0x10(%rsi), %xmm1
movdqu -0x20(%rsi), %xmm2
movdqu -0x30(%rsi), %xmm3
movdqu -0x40(%rsi), %xmm4
movdqu -0x50(%rsi), %xmm5
movdqu -0x60(%rsi), %xmm6
movdqu -0x70(%rsi), %xmm7
movdqu -0x80(%rsi), %xmm8
movdqa %xmm1, -0x10(%rdi)
movdqa %xmm2, -0x20(%rdi)
movdqa %xmm3, -0x30(%rdi)
movdqa %xmm4, -0x40(%rdi)
movdqa %xmm5, -0x50(%rdi)
movdqa %xmm6, -0x60(%rdi)
movdqa %xmm7, -0x70(%rdi)
movdqa %xmm8, -0x80(%rdi)
lea -0x80(%rsi), %rsi
lea -0x80(%rdi), %rdi
jae L(gobble_ll_loop)
L(gobble_mem_bwd_end):
movdqu %xmm0, (%r
add $0x80, %rdx
sub %rdx, %rsi
sub %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4)
END (MEMCPY) |
|