2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
4 * Subject to the GNU Public License v2.
6 * Functions to copy from and to user space.
9 #include <linux/linkage.h>
10 #include <asm/current.h>
11 #include <asm/asm-offsets.h>
12 #include <asm/thread_info.h>
13 #include <asm/cpufeatures.h>
14 #include <asm/alternative-asm.h>
18 /* Standard copy_to_user with segment limit checking */
24 cmpq TI_addr_limit(%rax),%rcx
26 ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
27 "jmp copy_user_generic_string", \
28 X86_FEATURE_REP_GOOD, \
29 "jmp copy_user_enhanced_fast_string", \
31 ENDPROC(_copy_to_user)
33 /* Standard copy_from_user with segment limit checking */
34 ENTRY(_copy_from_user)
39 cmpq TI_addr_limit(%rax),%rcx
41 ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
42 "jmp copy_user_generic_string", \
43 X86_FEATURE_REP_GOOD, \
44 "jmp copy_user_enhanced_fast_string", \
46 ENDPROC(_copy_from_user)
59 ENDPROC(bad_from_user)
63 * copy_user_generic_unrolled - memory copy with exception handling.
64 * This version is for CPUs like P4 that don't have efficient micro
73 * eax uncopied bytes or 0 if successful.
75 ENTRY(copy_user_generic_unrolled)
78 jb 20f /* less then 8 bytes, go to byte copy loop */
86 3: movq 2*8(%rsi),%r10
87 4: movq 3*8(%rsi),%r11
90 7: movq %r10,2*8(%rdi)
91 8: movq %r11,3*8(%rdi)
93 10: movq 5*8(%rsi),%r9
94 11: movq 6*8(%rsi),%r10
95 12: movq 7*8(%rsi),%r11
96 13: movq %r8,4*8(%rdi)
97 14: movq %r9,5*8(%rdi)
98 15: movq %r10,6*8(%rdi)
99 16: movq %r11,7*8(%rdi)
131 40: leal (%rdx,%rcx,8),%edx
134 60: jmp copy_user_handle_tail /* ecx is zerorest also */
146 _ASM_EXTABLE(10b,30b)
147 _ASM_EXTABLE(11b,30b)
148 _ASM_EXTABLE(12b,30b)
149 _ASM_EXTABLE(13b,30b)
150 _ASM_EXTABLE(14b,30b)
151 _ASM_EXTABLE(15b,30b)
152 _ASM_EXTABLE(16b,30b)
153 _ASM_EXTABLE(18b,40b)
154 _ASM_EXTABLE(19b,40b)
155 _ASM_EXTABLE(21b,50b)
156 _ASM_EXTABLE(22b,50b)
157 ENDPROC(copy_user_generic_unrolled)
159 /* Some CPUs run faster using the string copy instructions.
160 * This is also a lot simpler. Use them when possible.
162 * Only 4GB of copy is supported. This shouldn't be a problem
163 * because the kernel normally only writes from/to page sized chunks
164 * even if user space passed a longer buffer.
165 * And more would be dangerous because both Intel and AMD have
166 * errata with rep movsq > 4GB. If someone feels the need to fix
167 * this please consider this.
175 * eax uncopied bytes or 0 if successful.
177 ENTRY(copy_user_generic_string)
180 jb 2f /* less than 8 bytes, go to byte copy loop */
195 11: leal (%rdx,%rcx,8),%ecx
196 12: movl %ecx,%edx /* ecx is zerorest also */
197 jmp copy_user_handle_tail
202 ENDPROC(copy_user_generic_string)
205 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
206 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
214 * eax uncopied bytes or 0 if successful.
216 ENTRY(copy_user_enhanced_fast_string)
226 12: movl %ecx,%edx /* ecx is zerorest also */
227 jmp copy_user_handle_tail
231 ENDPROC(copy_user_enhanced_fast_string)
234 * copy_user_nocache - Uncached memory copy with exception handling
235 * This will force destination out of cache for more performance.
237 * Note: Cached memory copy is used when destination or size is not
238 * naturally aligned. That is:
239 * - Require 8-byte alignment when size is 8 bytes or larger.
240 * - Require 4-byte alignment when size is 4 bytes.
242 ENTRY(__copy_user_nocache)
245 /* If size is less than 8 bytes, go to 4-byte copy */
247 jb .L_4b_nocache_copy_entry
249 /* If destination is not 8-byte aligned, "cache" copy to align it */
252 /* Set 4x8-byte copy count and remainder */
256 jz .L_8b_nocache_copy_entry /* jump if count is 0 */
258 /* Perform 4x8-byte nocache loop-copy */
259 .L_4x8b_nocache_copy_loop:
261 2: movq 1*8(%rsi),%r9
262 3: movq 2*8(%rsi),%r10
263 4: movq 3*8(%rsi),%r11
265 6: movnti %r9,1*8(%rdi)
266 7: movnti %r10,2*8(%rdi)
267 8: movnti %r11,3*8(%rdi)
268 9: movq 4*8(%rsi),%r8
269 10: movq 5*8(%rsi),%r9
270 11: movq 6*8(%rsi),%r10
271 12: movq 7*8(%rsi),%r11
272 13: movnti %r8,4*8(%rdi)
273 14: movnti %r9,5*8(%rdi)
274 15: movnti %r10,6*8(%rdi)
275 16: movnti %r11,7*8(%rdi)
279 jnz .L_4x8b_nocache_copy_loop
281 /* Set 8-byte copy count and remainder */
282 .L_8b_nocache_copy_entry:
286 jz .L_4b_nocache_copy_entry /* jump if count is 0 */
288 /* Perform 8-byte nocache loop-copy */
289 .L_8b_nocache_copy_loop:
291 21: movnti %r8,(%rdi)
295 jnz .L_8b_nocache_copy_loop
297 /* If no byte left, we're done */
298 .L_4b_nocache_copy_entry:
302 /* If destination is not 4-byte aligned, go to byte copy: */
305 jnz .L_1b_cache_copy_entry
307 /* Set 4-byte copy count (1 or 0) and remainder */
311 jz .L_1b_cache_copy_entry /* jump if count is 0 */
313 /* Perform 4-byte nocache copy: */
315 31: movnti %r8d,(%rdi)
319 /* If no bytes left, we're done: */
323 /* Perform byte "cache" loop-copy for the remainder */
324 .L_1b_cache_copy_entry:
326 .L_1b_cache_copy_loop:
332 jnz .L_1b_cache_copy_loop
334 /* Finished copying; fence the prior stores */
345 jmp .L_fixup_handle_tail
347 lea (%rdx,%rcx,8),%rdx
348 jmp .L_fixup_handle_tail
350 lea (%rdx,%rcx,4),%rdx
351 jmp .L_fixup_handle_tail
354 .L_fixup_handle_tail:
356 jmp copy_user_handle_tail
359 _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
360 _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
361 _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
362 _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
363 _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
364 _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
365 _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
366 _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
367 _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
368 _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
369 _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
370 _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
371 _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
372 _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
373 _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
374 _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
375 _ASM_EXTABLE(20b,.L_fixup_8b_copy)
376 _ASM_EXTABLE(21b,.L_fixup_8b_copy)
377 _ASM_EXTABLE(30b,.L_fixup_4b_copy)
378 _ASM_EXTABLE(31b,.L_fixup_4b_copy)
379 _ASM_EXTABLE(40b,.L_fixup_1b_copy)
380 _ASM_EXTABLE(41b,.L_fixup_1b_copy)
381 ENDPROC(__copy_user_nocache)