2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
4 * Subject to the GNU Public License v2.
6 * Functions to copy from and to user space.
9 #include <linux/linkage.h>
10 #include <asm/dwarf2.h>
12 #define FIX_ALIGNMENT 1
14 #include <asm/current.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/thread_info.h>
17 #include <asm/cpufeature.h>
19 .macro ALTERNATIVE_JUMP feature,orig,alt
21 .byte 0xe9 /* 32bit jump */
22 .long \orig-1f /* by default jump to orig */
24 .section .altinstr_replacement,"ax"
25 2: .byte 0xe9 /* near jump with 32bit immediate */
26 .long \alt-1b /* offset */ /* or alternatively to alt */
28 .section .altinstructions,"a"
32 .byte \feature /* when feature is set */
38 .macro ALIGN_DESTINATION
40 /* check for bad alignment of destination */
43 jz 102f /* already aligned */
55 103: addl %ecx,%edx /* ecx is zerorest also */
56 jmp copy_user_handle_tail
59 .section __ex_table,"a"
67 /* Standard copy_to_user with segment limit checking */
74 cmpq TI_addr_limit(%rax),%rcx
76 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
80 /* Standard copy_from_user with segment limit checking */
81 ENTRY(_copy_from_user)
87 cmpq TI_addr_limit(%rax),%rcx
89 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
91 ENDPROC(_copy_from_user)
93 ENTRY(copy_user_generic)
95 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
97 ENDPROC(copy_user_generic)
99 ENTRY(__copy_from_user_inatomic)
101 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
103 ENDPROC(__copy_from_user_inatomic)
118 ENDPROC(bad_from_user)
122 * copy_user_generic_unrolled - memory copy with exception handling.
123 * This version is for CPUs like P4 that don't have efficient micro
132 * eax uncopied bytes or 0 if successfull.
134 ENTRY(copy_user_generic_unrolled)
137 jb 20f /* less then 8 bytes, go to byte copy loop */
144 2: movq 1*8(%rsi),%r9
145 3: movq 2*8(%rsi),%r10
146 4: movq 3*8(%rsi),%r11
148 6: movq %r9,1*8(%rdi)
149 7: movq %r10,2*8(%rdi)
150 8: movq %r11,3*8(%rdi)
151 9: movq 4*8(%rsi),%r8
152 10: movq 5*8(%rsi),%r9
153 11: movq 6*8(%rsi),%r10
154 12: movq 7*8(%rsi),%r11
155 13: movq %r8,4*8(%rdi)
156 14: movq %r9,5*8(%rdi)
157 15: movq %r10,6*8(%rdi)
158 16: movq %r11,7*8(%rdi)
189 40: lea (%rdx,%rcx,8),%rdx
192 60: jmp copy_user_handle_tail /* ecx is zerorest also */
195 .section __ex_table,"a"
219 ENDPROC(copy_user_generic_unrolled)
221 /* Some CPUs run faster using the string copy instructions.
222 * This is also a lot simpler. Use them when possible.
224 * Only 4GB of copy is supported. This shouldn't be a problem
225 * because the kernel normally only writes from/to page sized chunks
226 * even if user space passed a longer buffer.
227 * And more would be dangerous because both Intel and AMD have
228 * errata with rep movsq > 4GB. If someone feels the need to fix
229 * this please consider this.
237 * eax uncopied bytes or 0 if successful.
239 ENTRY(copy_user_generic_string)
244 jb 2f /* less than 8 bytes, go to byte copy loop */
258 11: lea (%rdx,%rcx,8),%rcx
259 12: movl %ecx,%edx /* ecx is zerorest also */
260 jmp copy_user_handle_tail
263 .section __ex_table,"a"
269 ENDPROC(copy_user_generic_string)