1 /* Copyright 2002 Andi Kleen */
3 #include <linux/linkage.h>
5 #include <asm/cpufeature.h>
6 #include <asm/dwarf2.h>
9 * memcpy - Copy a memory block.
17 * rax original destination
21 * memcpy_c() - fast string ops (REP MOVSQ) based variant.
23 * This gets patched over the unrolled variant (below) via the
24 * alternative instructions framework:
26 .section .altinstr_replacement, "ax", @progbits
46 * Use 32bit CMP here to avoid long NOP padding.
52 * We check whether memory false dependece could occur,
53 * then jump to corresponding copy mode.
62 * Move in blocks of 4x8 bytes:
75 jae .Lcopy_forward_loop
81 * Calculate copy position to tail.
87 * At most 3 ALU operations in one cycle,
88 * so append NOPS in the same 16bytes trunk.
100 movq %r10, -3*8(%rdi)
101 movq %r11, -4*8(%rdi)
102 leaq -4*8(%rdi), %rdi
103 jae .Lcopy_backward_loop
106 * Calculate copy position to head.
116 * Move data from 16 bytes to 31 bytes.
120 movq -2*8(%rsi, %rdx), %r10
121 movq -1*8(%rsi, %rdx), %r11
124 movq %r10, -2*8(%rdi, %rdx)
125 movq %r11, -1*8(%rdi, %rdx)
132 * Move data from 8 bytes to 15 bytes.
135 movq -1*8(%rsi, %rdx), %r9
137 movq %r9, -1*8(%rdi, %rdx)
145 * Move data from 4 bytes to 7 bytes.
148 movl -4(%rsi, %rdx), %r8d
150 movl %r8d, -4(%rdi, %rdx)
157 * Move data from 1 bytes to 3 bytes.
174 * Some CPUs run faster using the string copy instructions.
175 * It is also a lot simpler. Use this when possible:
178 .section .altinstructions, "a"
182 .word X86_FEATURE_REP_GOOD
185 * Replace only beginning, memcpy is used to apply alternatives,
186 * so it is silly to overwrite itself with nops - reboot is the
189 .byte .Lmemcpy_e - .Lmemcpy_c
190 .byte .Lmemcpy_e - .Lmemcpy_c