2 * Normally compiler builtins are used, but sometimes the compiler calls out
3 * of line code. Based on asm-i386/string.h.
5 * This assembly file is re-written from memmove_64.c file.
6 * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
8 #include <linux/linkage.h>
9 #include <asm/cpufeatures.h>
10 #include <asm/alternative-asm.h>
15 * Implement memmove(). This can handle overlap between src and dst.
30 /* Handle more 32 bytes in loop */
35 /* Decide forward/backward copy mode */
37 jge .Lmemmove_begin_forward
43 .Lmemmove_begin_forward:
44 ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
47 * movsq instruction have many startup latency
48 * so we handle small size by general register.
53 * movsq instruction is only good for aligned case.
61 * We gobble 32 bytes forward in each loop.
80 * Handle data forward by movsq.
85 movq -8(%rsi, %rdx), %r11
86 lea -8(%rdi, %rdx), %r10
91 .Lmemmove_end_forward:
94 * Handle data backward by movsq.
101 leaq -8(%rsi, %rdx), %rsi
102 leaq -8(%rdi, %rdx), %rdi
111 * Start to prepare for backward copy.
121 * Calculate copy position to tail.
127 * We gobble 32 bytes backward in each loop.
131 movq -1*8(%rsi), %r11
132 movq -2*8(%rsi), %r10
135 leaq -4*8(%rsi), %rsi
137 movq %r11, -1*8(%rdi)
138 movq %r10, -2*8(%rdi)
141 leaq -4*8(%rdi), %rdi
144 * Calculate copy position to head.
153 * Move data from 16 bytes to 31 bytes.
157 movq -2*8(%rsi, %rdx), %r9
158 movq -1*8(%rsi, %rdx), %r8
161 movq %r9, -2*8(%rdi, %rdx)
162 movq %r8, -1*8(%rdi, %rdx)
169 * Move data from 8 bytes to 15 bytes.
172 movq -1*8(%rsi, %rdx), %r10
174 movq %r10, -1*8(%rdi, %rdx)
180 * Move data from 4 bytes to 7 bytes.
183 movl -4(%rsi, %rdx), %r10d
185 movl %r10d, -4(%rdi, %rdx)
191 * Move data from 2 bytes to 3 bytes.
194 movw -2(%rsi, %rdx), %r10w
196 movw %r10w, -2(%rdi, %rdx)
202 * Move data for 1 byte.