1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
4 * Copyright 2002 Andi Kleen, SuSE Labs.
6 * Functions to copy from and to user space.
9 #include <linux/linkage.h>
10 #include <asm/current.h>
11 #include <asm/asm-offsets.h>
12 #include <asm/thread_info.h>
13 #include <asm/cpufeatures.h>
14 #include <asm/alternative-asm.h>
17 #include <asm/export.h>
19 .macro ALIGN_DESTINATION
20 /* check for bad alignment of destination */
23 jz 102f /* already aligned */
35 103: addl %ecx,%edx /* ecx is zerorest also */
36 jmp .Lcopy_user_handle_tail
39 _ASM_EXTABLE_UA(100b, 103b)
40 _ASM_EXTABLE_UA(101b, 103b)
44 * copy_user_generic_unrolled - memory copy with exception handling.
45 * This version is for CPUs like P4 that don't have efficient micro
54 * eax uncopied bytes or 0 if successful.
56 SYM_FUNC_START(copy_user_generic_unrolled)
59 jb 20f /* less then 8 bytes, go to byte copy loop */
64 jz .L_copy_short_string
67 3: movq 2*8(%rsi),%r10
68 4: movq 3*8(%rsi),%r11
71 7: movq %r10,2*8(%rdi)
72 8: movq %r11,3*8(%rdi)
74 10: movq 5*8(%rsi),%r9
75 11: movq 6*8(%rsi),%r10
76 12: movq 7*8(%rsi),%r11
77 13: movq %r8,4*8(%rdi)
78 14: movq %r9,5*8(%rdi)
79 15: movq %r10,6*8(%rdi)
80 16: movq %r11,7*8(%rdi)
113 40: leal (%rdx,%rcx,8),%edx
116 60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */
119 _ASM_EXTABLE_UA(1b, 30b)
120 _ASM_EXTABLE_UA(2b, 30b)
121 _ASM_EXTABLE_UA(3b, 30b)
122 _ASM_EXTABLE_UA(4b, 30b)
123 _ASM_EXTABLE_UA(5b, 30b)
124 _ASM_EXTABLE_UA(6b, 30b)
125 _ASM_EXTABLE_UA(7b, 30b)
126 _ASM_EXTABLE_UA(8b, 30b)
127 _ASM_EXTABLE_UA(9b, 30b)
128 _ASM_EXTABLE_UA(10b, 30b)
129 _ASM_EXTABLE_UA(11b, 30b)
130 _ASM_EXTABLE_UA(12b, 30b)
131 _ASM_EXTABLE_UA(13b, 30b)
132 _ASM_EXTABLE_UA(14b, 30b)
133 _ASM_EXTABLE_UA(15b, 30b)
134 _ASM_EXTABLE_UA(16b, 30b)
135 _ASM_EXTABLE_UA(18b, 40b)
136 _ASM_EXTABLE_UA(19b, 40b)
137 _ASM_EXTABLE_UA(21b, 50b)
138 _ASM_EXTABLE_UA(22b, 50b)
139 SYM_FUNC_END(copy_user_generic_unrolled)
140 EXPORT_SYMBOL(copy_user_generic_unrolled)
142 /* Some CPUs run faster using the string copy instructions.
143 * This is also a lot simpler. Use them when possible.
145 * Only 4GB of copy is supported. This shouldn't be a problem
146 * because the kernel normally only writes from/to page sized chunks
147 * even if user space passed a longer buffer.
148 * And more would be dangerous because both Intel and AMD have
149 * errata with rep movsq > 4GB. If someone feels the need to fix
150 * this please consider this.
158 * eax uncopied bytes or 0 if successful.
160 SYM_FUNC_START(copy_user_generic_string)
163 jb 2f /* less than 8 bytes, go to byte copy loop */
178 11: leal (%rdx,%rcx,8),%ecx
179 12: movl %ecx,%edx /* ecx is zerorest also */
180 jmp .Lcopy_user_handle_tail
183 _ASM_EXTABLE_UA(1b, 11b)
184 _ASM_EXTABLE_UA(3b, 12b)
185 SYM_FUNC_END(copy_user_generic_string)
186 EXPORT_SYMBOL(copy_user_generic_string)
189 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
190 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
198 * eax uncopied bytes or 0 if successful.
200 SYM_FUNC_START(copy_user_enhanced_fast_string)
203 jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
212 12: movl %ecx,%edx /* ecx is zerorest also */
213 jmp .Lcopy_user_handle_tail
216 _ASM_EXTABLE_UA(1b, 12b)
217 SYM_FUNC_END(copy_user_enhanced_fast_string)
218 EXPORT_SYMBOL(copy_user_enhanced_fast_string)
221 * Try to copy last bytes and clear the rest if needed.
222 * Since protection fault in copy_from/to_user is not a normal situation,
223 * it is not necessary to optimize tail handling.
231 * eax uncopied bytes or 0 if successful.
233 SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
240 _ASM_EXTABLE_UA(1b, 2b)
241 SYM_CODE_END(.Lcopy_user_handle_tail)
244 * copy_user_nocache - Uncached memory copy with exception handling
245 * This will force destination out of cache for more performance.
247 * Note: Cached memory copy is used when destination or size is not
248 * naturally aligned. That is:
249 * - Require 8-byte alignment when size is 8 bytes or larger.
250 * - Require 4-byte alignment when size is 4 bytes.
252 SYM_FUNC_START(__copy_user_nocache)
255 /* If size is less than 8 bytes, go to 4-byte copy */
257 jb .L_4b_nocache_copy_entry
259 /* If destination is not 8-byte aligned, "cache" copy to align it */
262 /* Set 4x8-byte copy count and remainder */
266 jz .L_8b_nocache_copy_entry /* jump if count is 0 */
268 /* Perform 4x8-byte nocache loop-copy */
269 .L_4x8b_nocache_copy_loop:
271 2: movq 1*8(%rsi),%r9
272 3: movq 2*8(%rsi),%r10
273 4: movq 3*8(%rsi),%r11
275 6: movnti %r9,1*8(%rdi)
276 7: movnti %r10,2*8(%rdi)
277 8: movnti %r11,3*8(%rdi)
278 9: movq 4*8(%rsi),%r8
279 10: movq 5*8(%rsi),%r9
280 11: movq 6*8(%rsi),%r10
281 12: movq 7*8(%rsi),%r11
282 13: movnti %r8,4*8(%rdi)
283 14: movnti %r9,5*8(%rdi)
284 15: movnti %r10,6*8(%rdi)
285 16: movnti %r11,7*8(%rdi)
289 jnz .L_4x8b_nocache_copy_loop
291 /* Set 8-byte copy count and remainder */
292 .L_8b_nocache_copy_entry:
296 jz .L_4b_nocache_copy_entry /* jump if count is 0 */
298 /* Perform 8-byte nocache loop-copy */
299 .L_8b_nocache_copy_loop:
301 21: movnti %r8,(%rdi)
305 jnz .L_8b_nocache_copy_loop
307 /* If no byte left, we're done */
308 .L_4b_nocache_copy_entry:
312 /* If destination is not 4-byte aligned, go to byte copy: */
315 jnz .L_1b_cache_copy_entry
317 /* Set 4-byte copy count (1 or 0) and remainder */
321 jz .L_1b_cache_copy_entry /* jump if count is 0 */
323 /* Perform 4-byte nocache copy: */
325 31: movnti %r8d,(%rdi)
329 /* If no bytes left, we're done: */
333 /* Perform byte "cache" loop-copy for the remainder */
334 .L_1b_cache_copy_entry:
336 .L_1b_cache_copy_loop:
342 jnz .L_1b_cache_copy_loop
344 /* Finished copying; fence the prior stores */
355 jmp .L_fixup_handle_tail
357 lea (%rdx,%rcx,8),%rdx
358 jmp .L_fixup_handle_tail
360 lea (%rdx,%rcx,4),%rdx
361 jmp .L_fixup_handle_tail
364 .L_fixup_handle_tail:
366 jmp .Lcopy_user_handle_tail
369 _ASM_EXTABLE_UA(1b, .L_fixup_4x8b_copy)
370 _ASM_EXTABLE_UA(2b, .L_fixup_4x8b_copy)
371 _ASM_EXTABLE_UA(3b, .L_fixup_4x8b_copy)
372 _ASM_EXTABLE_UA(4b, .L_fixup_4x8b_copy)
373 _ASM_EXTABLE_UA(5b, .L_fixup_4x8b_copy)
374 _ASM_EXTABLE_UA(6b, .L_fixup_4x8b_copy)
375 _ASM_EXTABLE_UA(7b, .L_fixup_4x8b_copy)
376 _ASM_EXTABLE_UA(8b, .L_fixup_4x8b_copy)
377 _ASM_EXTABLE_UA(9b, .L_fixup_4x8b_copy)
378 _ASM_EXTABLE_UA(10b, .L_fixup_4x8b_copy)
379 _ASM_EXTABLE_UA(11b, .L_fixup_4x8b_copy)
380 _ASM_EXTABLE_UA(12b, .L_fixup_4x8b_copy)
381 _ASM_EXTABLE_UA(13b, .L_fixup_4x8b_copy)
382 _ASM_EXTABLE_UA(14b, .L_fixup_4x8b_copy)
383 _ASM_EXTABLE_UA(15b, .L_fixup_4x8b_copy)
384 _ASM_EXTABLE_UA(16b, .L_fixup_4x8b_copy)
385 _ASM_EXTABLE_UA(20b, .L_fixup_8b_copy)
386 _ASM_EXTABLE_UA(21b, .L_fixup_8b_copy)
387 _ASM_EXTABLE_UA(30b, .L_fixup_4b_copy)
388 _ASM_EXTABLE_UA(31b, .L_fixup_4b_copy)
389 _ASM_EXTABLE_UA(40b, .L_fixup_1b_copy)
390 _ASM_EXTABLE_UA(41b, .L_fixup_1b_copy)
391 SYM_FUNC_END(__copy_user_nocache)
392 EXPORT_SYMBOL(__copy_user_nocache)