2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
4 * Subject to the GNU Public License v2.
6 * Functions to copy from and to user space.
9 #include <linux/linkage.h>
10 #include <asm/current.h>
11 #include <asm/asm-offsets.h>
12 #include <asm/thread_info.h>
13 #include <asm/cpufeatures.h>
14 #include <asm/alternative-asm.h>
17 #include <asm/export.h>
20 * copy_user_generic_unrolled - memory copy with exception handling.
21 * This version is for CPUs like P4 that don't have efficient micro
30 * eax uncopied bytes or 0 if successful.
32 ENTRY(copy_user_generic_unrolled)
35 jb 20f /* less then 8 bytes, go to byte copy loop */
40 jz .L_copy_short_string
43 3: movq 2*8(%rsi),%r10
44 4: movq 3*8(%rsi),%r11
47 7: movq %r10,2*8(%rdi)
48 8: movq %r11,3*8(%rdi)
50 10: movq 5*8(%rsi),%r9
51 11: movq 6*8(%rsi),%r10
52 12: movq 7*8(%rsi),%r11
53 13: movq %r8,4*8(%rdi)
54 14: movq %r9,5*8(%rdi)
55 15: movq %r10,6*8(%rdi)
56 16: movq %r11,7*8(%rdi)
89 40: leal (%rdx,%rcx,8),%edx
92 60: jmp copy_user_handle_tail /* ecx is zerorest also */
104 _ASM_EXTABLE(10b,30b)
105 _ASM_EXTABLE(11b,30b)
106 _ASM_EXTABLE(12b,30b)
107 _ASM_EXTABLE(13b,30b)
108 _ASM_EXTABLE(14b,30b)
109 _ASM_EXTABLE(15b,30b)
110 _ASM_EXTABLE(16b,30b)
111 _ASM_EXTABLE(18b,40b)
112 _ASM_EXTABLE(19b,40b)
113 _ASM_EXTABLE(21b,50b)
114 _ASM_EXTABLE(22b,50b)
115 ENDPROC(copy_user_generic_unrolled)
116 EXPORT_SYMBOL(copy_user_generic_unrolled)
118 /* Some CPUs run faster using the string copy instructions.
119 * This is also a lot simpler. Use them when possible.
121 * Only 4GB of copy is supported. This shouldn't be a problem
122 * because the kernel normally only writes from/to page sized chunks
123 * even if user space passed a longer buffer.
124 * And more would be dangerous because both Intel and AMD have
125 * errata with rep movsq > 4GB. If someone feels the need to fix
126 * this please consider this.
134 * eax uncopied bytes or 0 if successful.
136 ENTRY(copy_user_generic_string)
139 jb 2f /* less than 8 bytes, go to byte copy loop */
154 11: leal (%rdx,%rcx,8),%ecx
155 12: movl %ecx,%edx /* ecx is zerorest also */
156 jmp copy_user_handle_tail
161 ENDPROC(copy_user_generic_string)
162 EXPORT_SYMBOL(copy_user_generic_string)
165 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
166 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
174 * eax uncopied bytes or 0 if successful.
176 ENTRY(copy_user_enhanced_fast_string)
179 jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
188 12: movl %ecx,%edx /* ecx is zerorest also */
189 jmp copy_user_handle_tail
193 ENDPROC(copy_user_enhanced_fast_string)
194 EXPORT_SYMBOL(copy_user_enhanced_fast_string)
197 * copy_user_nocache - Uncached memory copy with exception handling
198 * This will force destination out of cache for more performance.
200 * Note: Cached memory copy is used when destination or size is not
201 * naturally aligned. That is:
202 * - Require 8-byte alignment when size is 8 bytes or larger.
203 * - Require 4-byte alignment when size is 4 bytes.
205 ENTRY(__copy_user_nocache)
208 /* If size is less than 8 bytes, go to 4-byte copy */
210 jb .L_4b_nocache_copy_entry
212 /* If destination is not 8-byte aligned, "cache" copy to align it */
215 /* Set 4x8-byte copy count and remainder */
219 jz .L_8b_nocache_copy_entry /* jump if count is 0 */
221 /* Perform 4x8-byte nocache loop-copy */
222 .L_4x8b_nocache_copy_loop:
224 2: movq 1*8(%rsi),%r9
225 3: movq 2*8(%rsi),%r10
226 4: movq 3*8(%rsi),%r11
228 6: movnti %r9,1*8(%rdi)
229 7: movnti %r10,2*8(%rdi)
230 8: movnti %r11,3*8(%rdi)
231 9: movq 4*8(%rsi),%r8
232 10: movq 5*8(%rsi),%r9
233 11: movq 6*8(%rsi),%r10
234 12: movq 7*8(%rsi),%r11
235 13: movnti %r8,4*8(%rdi)
236 14: movnti %r9,5*8(%rdi)
237 15: movnti %r10,6*8(%rdi)
238 16: movnti %r11,7*8(%rdi)
242 jnz .L_4x8b_nocache_copy_loop
244 /* Set 8-byte copy count and remainder */
245 .L_8b_nocache_copy_entry:
249 jz .L_4b_nocache_copy_entry /* jump if count is 0 */
251 /* Perform 8-byte nocache loop-copy */
252 .L_8b_nocache_copy_loop:
254 21: movnti %r8,(%rdi)
258 jnz .L_8b_nocache_copy_loop
260 /* If no byte left, we're done */
261 .L_4b_nocache_copy_entry:
265 /* If destination is not 4-byte aligned, go to byte copy: */
268 jnz .L_1b_cache_copy_entry
270 /* Set 4-byte copy count (1 or 0) and remainder */
274 jz .L_1b_cache_copy_entry /* jump if count is 0 */
276 /* Perform 4-byte nocache copy: */
278 31: movnti %r8d,(%rdi)
282 /* If no bytes left, we're done: */
286 /* Perform byte "cache" loop-copy for the remainder */
287 .L_1b_cache_copy_entry:
289 .L_1b_cache_copy_loop:
295 jnz .L_1b_cache_copy_loop
297 /* Finished copying; fence the prior stores */
308 jmp .L_fixup_handle_tail
310 lea (%rdx,%rcx,8),%rdx
311 jmp .L_fixup_handle_tail
313 lea (%rdx,%rcx,4),%rdx
314 jmp .L_fixup_handle_tail
317 .L_fixup_handle_tail:
319 jmp copy_user_handle_tail
322 _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
323 _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
324 _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
325 _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
326 _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
327 _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
328 _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
329 _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
330 _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
331 _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
332 _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
333 _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
334 _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
335 _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
336 _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
337 _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
338 _ASM_EXTABLE(20b,.L_fixup_8b_copy)
339 _ASM_EXTABLE(21b,.L_fixup_8b_copy)
340 _ASM_EXTABLE(30b,.L_fixup_4b_copy)
341 _ASM_EXTABLE(31b,.L_fixup_4b_copy)
342 _ASM_EXTABLE(40b,.L_fixup_1b_copy)
343 _ASM_EXTABLE(41b,.L_fixup_1b_copy)
344 ENDPROC(__copy_user_nocache)
345 EXPORT_SYMBOL(__copy_user_nocache)