2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
4 * Subject to the GNU Public License v2.
6 * Functions to copy from and to user space.
9 #include <linux/linkage.h>
10 #include <asm/dwarf2.h>
11 #include <asm/current.h>
12 #include <asm/asm-offsets.h>
13 #include <asm/thread_info.h>
14 #include <asm/cpufeature.h>
15 #include <asm/alternative-asm.h>
19 .macro ALIGN_DESTINATION
20 /* check for bad alignment of destination */
23 jz 102f /* already aligned */
35 103: addl %ecx,%edx /* ecx is zerorest also */
36 jmp copy_user_handle_tail
39 _ASM_EXTABLE(100b,103b)
40 _ASM_EXTABLE(101b,103b)
43 /* Standard copy_to_user with segment limit checking */
50 cmpq TI_addr_limit(%rax),%rcx
52 ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
53 "jmp copy_user_generic_string", \
54 X86_FEATURE_REP_GOOD, \
55 "jmp copy_user_enhanced_fast_string", \
58 ENDPROC(_copy_to_user)
60 /* Standard copy_from_user with segment limit checking */
61 ENTRY(_copy_from_user)
67 cmpq TI_addr_limit(%rax),%rcx
69 ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
70 "jmp copy_user_generic_string", \
71 X86_FEATURE_REP_GOOD, \
72 "jmp copy_user_enhanced_fast_string", \
75 ENDPROC(_copy_from_user)
90 ENDPROC(bad_from_user)
94 * copy_user_generic_unrolled - memory copy with exception handling.
95 * This version is for CPUs like P4 that don't have efficient micro
104 * eax uncopied bytes or 0 if successful.
106 ENTRY(copy_user_generic_unrolled)
110 jb 20f /* less then 8 bytes, go to byte copy loop */
117 2: movq 1*8(%rsi),%r9
118 3: movq 2*8(%rsi),%r10
119 4: movq 3*8(%rsi),%r11
121 6: movq %r9,1*8(%rdi)
122 7: movq %r10,2*8(%rdi)
123 8: movq %r11,3*8(%rdi)
124 9: movq 4*8(%rsi),%r8
125 10: movq 5*8(%rsi),%r9
126 11: movq 6*8(%rsi),%r10
127 12: movq 7*8(%rsi),%r11
128 13: movq %r8,4*8(%rdi)
129 14: movq %r9,5*8(%rdi)
130 15: movq %r10,6*8(%rdi)
131 16: movq %r11,7*8(%rdi)
163 40: leal (%rdx,%rcx,8),%edx
166 60: jmp copy_user_handle_tail /* ecx is zerorest also */
178 _ASM_EXTABLE(10b,30b)
179 _ASM_EXTABLE(11b,30b)
180 _ASM_EXTABLE(12b,30b)
181 _ASM_EXTABLE(13b,30b)
182 _ASM_EXTABLE(14b,30b)
183 _ASM_EXTABLE(15b,30b)
184 _ASM_EXTABLE(16b,30b)
185 _ASM_EXTABLE(18b,40b)
186 _ASM_EXTABLE(19b,40b)
187 _ASM_EXTABLE(21b,50b)
188 _ASM_EXTABLE(22b,50b)
190 ENDPROC(copy_user_generic_unrolled)
192 /* Some CPUs run faster using the string copy instructions.
193 * This is also a lot simpler. Use them when possible.
195 * Only 4GB of copy is supported. This shouldn't be a problem
196 * because the kernel normally only writes from/to page sized chunks
197 * even if user space passed a longer buffer.
198 * And more would be dangerous because both Intel and AMD have
199 * errata with rep movsq > 4GB. If someone feels the need to fix
200 * this please consider this.
208 * eax uncopied bytes or 0 if successful.
210 ENTRY(copy_user_generic_string)
214 jb 2f /* less than 8 bytes, go to byte copy loop */
229 11: leal (%rdx,%rcx,8),%ecx
230 12: movl %ecx,%edx /* ecx is zerorest also */
231 jmp copy_user_handle_tail
237 ENDPROC(copy_user_generic_string)
240 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
241 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
249 * eax uncopied bytes or 0 if successful.
251 ENTRY(copy_user_enhanced_fast_string)
262 12: movl %ecx,%edx /* ecx is zerorest also */
263 jmp copy_user_handle_tail
268 ENDPROC(copy_user_enhanced_fast_string)