2 * User address space access functions.
3 * The non inlined parts of asm-i386/uaccess.h are here.
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
9 #include <linux/highmem.h>
10 #include <linux/blkdev.h>
11 #include <linux/module.h>
12 #include <linux/backing-dev.h>
13 #include <asm/uaccess.h>
16 static inline int __movsl_is_ok(unsigned long a1
, unsigned long a2
, unsigned long n
)
18 #ifdef CONFIG_X86_INTEL_USERCOPY
19 if (n
>= 64 && ((a1
^ a2
) & movsl_mask
.mask
))
24 #define movsl_is_ok(a1,a2,n) \
25 __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n))
28 * Copy a null terminated string from userspace.
31 #define __do_strncpy_from_user(dst,src,count,res) \
33 int __d0, __d1, __d2; \
35 __asm__ __volatile__( \
40 " testb %%al,%%al\n" \
46 ".section .fixup,\"ax\"\n" \
50 ".section __ex_table,\"a\"\n" \
54 : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \
56 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
61 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
62 * @dst: Destination address, in kernel space. This buffer must be at
63 * least @count bytes long.
64 * @src: Source address, in user space.
65 * @count: Maximum number of bytes to copy, including the trailing NUL.
67 * Copies a NUL-terminated string from userspace to kernel space.
68 * Caller must check the specified block with access_ok() before calling
71 * On success, returns the length of the string (not including the trailing
74 * If access to userspace fails, returns -EFAULT (some data may have been
77 * If @count is smaller than the length of the string, copies @count bytes
81 __strncpy_from_user(char *dst
, const char __user
*src
, long count
)
84 __do_strncpy_from_user(dst
, src
, count
, res
);
87 EXPORT_SYMBOL(__strncpy_from_user
);
90 * strncpy_from_user: - Copy a NUL terminated string from userspace.
91 * @dst: Destination address, in kernel space. This buffer must be at
92 * least @count bytes long.
93 * @src: Source address, in user space.
94 * @count: Maximum number of bytes to copy, including the trailing NUL.
96 * Copies a NUL-terminated string from userspace to kernel space.
98 * On success, returns the length of the string (not including the trailing
101 * If access to userspace fails, returns -EFAULT (some data may have been
104 * If @count is smaller than the length of the string, copies @count bytes
105 * and returns @count.
108 strncpy_from_user(char *dst
, const char __user
*src
, long count
)
111 if (access_ok(VERIFY_READ
, src
, 1))
112 __do_strncpy_from_user(dst
, src
, count
, res
);
115 EXPORT_SYMBOL(strncpy_from_user
);
121 #define __do_clear_user(addr,size) \
125 __asm__ __volatile__( \
130 ".section .fixup,\"ax\"\n" \
131 "3: lea 0(%2,%0,4),%0\n" \
134 ".section __ex_table,\"a\"\n" \
139 : "=&c"(size), "=&D" (__d0) \
140 : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
144 * clear_user: - Zero a block of memory in user space.
145 * @to: Destination address, in user space.
146 * @n: Number of bytes to zero.
148 * Zero a block of memory in user space.
150 * Returns number of bytes that could not be cleared.
151 * On success, this will be zero.
154 clear_user(void __user
*to
, unsigned long n
)
157 if (access_ok(VERIFY_WRITE
, to
, n
))
158 __do_clear_user(to
, n
);
161 EXPORT_SYMBOL(clear_user
);
164 * __clear_user: - Zero a block of memory in user space, with less checking.
165 * @to: Destination address, in user space.
166 * @n: Number of bytes to zero.
168 * Zero a block of memory in user space. Caller must check
169 * the specified block with access_ok() before calling this function.
171 * Returns number of bytes that could not be cleared.
172 * On success, this will be zero.
175 __clear_user(void __user
*to
, unsigned long n
)
177 __do_clear_user(to
, n
);
180 EXPORT_SYMBOL(__clear_user
);
183 * strnlen_user: - Get the size of a string in user space.
184 * @s: The string to measure.
185 * @n: The maximum valid length
187 * Get the size of a NUL-terminated string in user space.
189 * Returns the size of the string INCLUDING the terminating NUL.
190 * On exception, returns 0.
191 * If the string is too long, returns a value greater than @n.
193 long strnlen_user(const char __user
*s
, long n
)
195 unsigned long mask
= -__addr_ok(s
);
196 unsigned long res
, tmp
;
200 __asm__
__volatile__(
209 ".section .fixup,\"ax\"\n"
210 "2: xorl %%eax,%%eax\n"
215 ".section __ex_table,\"a\"\n"
219 :"=r" (n
), "=D" (s
), "=a" (res
), "=c" (tmp
)
220 :"0" (n
), "1" (s
), "2" (0), "3" (mask
)
224 EXPORT_SYMBOL(strnlen_user
);
226 #ifdef CONFIG_X86_INTEL_USERCOPY
228 __copy_user_intel(void __user
*to
, const void *from
, unsigned long size
)
231 __asm__
__volatile__(
233 "1: movl 32(%4), %%eax\n"
236 "2: movl 64(%4), %%eax\n"
238 "3: movl 0(%4), %%eax\n"
239 "4: movl 4(%4), %%edx\n"
240 "5: movl %%eax, 0(%3)\n"
241 "6: movl %%edx, 4(%3)\n"
242 "7: movl 8(%4), %%eax\n"
243 "8: movl 12(%4),%%edx\n"
244 "9: movl %%eax, 8(%3)\n"
245 "10: movl %%edx, 12(%3)\n"
246 "11: movl 16(%4), %%eax\n"
247 "12: movl 20(%4), %%edx\n"
248 "13: movl %%eax, 16(%3)\n"
249 "14: movl %%edx, 20(%3)\n"
250 "15: movl 24(%4), %%eax\n"
251 "16: movl 28(%4), %%edx\n"
252 "17: movl %%eax, 24(%3)\n"
253 "18: movl %%edx, 28(%3)\n"
254 "19: movl 32(%4), %%eax\n"
255 "20: movl 36(%4), %%edx\n"
256 "21: movl %%eax, 32(%3)\n"
257 "22: movl %%edx, 36(%3)\n"
258 "23: movl 40(%4), %%eax\n"
259 "24: movl 44(%4), %%edx\n"
260 "25: movl %%eax, 40(%3)\n"
261 "26: movl %%edx, 44(%3)\n"
262 "27: movl 48(%4), %%eax\n"
263 "28: movl 52(%4), %%edx\n"
264 "29: movl %%eax, 48(%3)\n"
265 "30: movl %%edx, 52(%3)\n"
266 "31: movl 56(%4), %%eax\n"
267 "32: movl 60(%4), %%edx\n"
268 "33: movl %%eax, 56(%3)\n"
269 "34: movl %%edx, 60(%3)\n"
275 "35: movl %0, %%eax\n"
280 "36: movl %%eax, %0\n"
283 ".section .fixup,\"ax\"\n"
284 "101: lea 0(%%eax,%0,4),%0\n"
287 ".section __ex_table,\"a\"\n"
328 : "=&c"(size
), "=&D" (d0
), "=&S" (d1
)
329 : "1"(to
), "2"(from
), "0"(size
)
330 : "eax", "edx", "memory");
335 __copy_user_zeroing_intel(void *to
, const void __user
*from
, unsigned long size
)
338 __asm__
__volatile__(
340 "0: movl 32(%4), %%eax\n"
343 "1: movl 64(%4), %%eax\n"
345 "2: movl 0(%4), %%eax\n"
346 "21: movl 4(%4), %%edx\n"
347 " movl %%eax, 0(%3)\n"
348 " movl %%edx, 4(%3)\n"
349 "3: movl 8(%4), %%eax\n"
350 "31: movl 12(%4),%%edx\n"
351 " movl %%eax, 8(%3)\n"
352 " movl %%edx, 12(%3)\n"
353 "4: movl 16(%4), %%eax\n"
354 "41: movl 20(%4), %%edx\n"
355 " movl %%eax, 16(%3)\n"
356 " movl %%edx, 20(%3)\n"
357 "10: movl 24(%4), %%eax\n"
358 "51: movl 28(%4), %%edx\n"
359 " movl %%eax, 24(%3)\n"
360 " movl %%edx, 28(%3)\n"
361 "11: movl 32(%4), %%eax\n"
362 "61: movl 36(%4), %%edx\n"
363 " movl %%eax, 32(%3)\n"
364 " movl %%edx, 36(%3)\n"
365 "12: movl 40(%4), %%eax\n"
366 "71: movl 44(%4), %%edx\n"
367 " movl %%eax, 40(%3)\n"
368 " movl %%edx, 44(%3)\n"
369 "13: movl 48(%4), %%eax\n"
370 "81: movl 52(%4), %%edx\n"
371 " movl %%eax, 48(%3)\n"
372 " movl %%edx, 52(%3)\n"
373 "14: movl 56(%4), %%eax\n"
374 "91: movl 60(%4), %%edx\n"
375 " movl %%eax, 56(%3)\n"
376 " movl %%edx, 60(%3)\n"
382 "5: movl %0, %%eax\n"
390 ".section .fixup,\"ax\"\n"
391 "9: lea 0(%%eax,%0,4),%0\n"
394 " xorl %%eax,%%eax\n"
400 ".section __ex_table,\"a\"\n"
423 : "=&c"(size
), "=&D" (d0
), "=&S" (d1
)
424 : "1"(to
), "2"(from
), "0"(size
)
425 : "eax", "edx", "memory");
430 * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
431 * hyoshiok@miraclelinux.com
434 static unsigned long __copy_user_zeroing_intel_nocache(void *to
,
435 const void __user
*from
, unsigned long size
)
439 __asm__
__volatile__(
441 "0: movl 32(%4), %%eax\n"
444 "1: movl 64(%4), %%eax\n"
446 "2: movl 0(%4), %%eax\n"
447 "21: movl 4(%4), %%edx\n"
448 " movnti %%eax, 0(%3)\n"
449 " movnti %%edx, 4(%3)\n"
450 "3: movl 8(%4), %%eax\n"
451 "31: movl 12(%4),%%edx\n"
452 " movnti %%eax, 8(%3)\n"
453 " movnti %%edx, 12(%3)\n"
454 "4: movl 16(%4), %%eax\n"
455 "41: movl 20(%4), %%edx\n"
456 " movnti %%eax, 16(%3)\n"
457 " movnti %%edx, 20(%3)\n"
458 "10: movl 24(%4), %%eax\n"
459 "51: movl 28(%4), %%edx\n"
460 " movnti %%eax, 24(%3)\n"
461 " movnti %%edx, 28(%3)\n"
462 "11: movl 32(%4), %%eax\n"
463 "61: movl 36(%4), %%edx\n"
464 " movnti %%eax, 32(%3)\n"
465 " movnti %%edx, 36(%3)\n"
466 "12: movl 40(%4), %%eax\n"
467 "71: movl 44(%4), %%edx\n"
468 " movnti %%eax, 40(%3)\n"
469 " movnti %%edx, 44(%3)\n"
470 "13: movl 48(%4), %%eax\n"
471 "81: movl 52(%4), %%edx\n"
472 " movnti %%eax, 48(%3)\n"
473 " movnti %%edx, 52(%3)\n"
474 "14: movl 56(%4), %%eax\n"
475 "91: movl 60(%4), %%edx\n"
476 " movnti %%eax, 56(%3)\n"
477 " movnti %%edx, 60(%3)\n"
484 "5: movl %0, %%eax\n"
492 ".section .fixup,\"ax\"\n"
493 "9: lea 0(%%eax,%0,4),%0\n"
496 " xorl %%eax,%%eax\n"
502 ".section __ex_table,\"a\"\n"
525 : "=&c"(size
), "=&D" (d0
), "=&S" (d1
)
526 : "1"(to
), "2"(from
), "0"(size
)
527 : "eax", "edx", "memory");
531 static unsigned long __copy_user_intel_nocache(void *to
,
532 const void __user
*from
, unsigned long size
)
536 __asm__
__volatile__(
538 "0: movl 32(%4), %%eax\n"
541 "1: movl 64(%4), %%eax\n"
543 "2: movl 0(%4), %%eax\n"
544 "21: movl 4(%4), %%edx\n"
545 " movnti %%eax, 0(%3)\n"
546 " movnti %%edx, 4(%3)\n"
547 "3: movl 8(%4), %%eax\n"
548 "31: movl 12(%4),%%edx\n"
549 " movnti %%eax, 8(%3)\n"
550 " movnti %%edx, 12(%3)\n"
551 "4: movl 16(%4), %%eax\n"
552 "41: movl 20(%4), %%edx\n"
553 " movnti %%eax, 16(%3)\n"
554 " movnti %%edx, 20(%3)\n"
555 "10: movl 24(%4), %%eax\n"
556 "51: movl 28(%4), %%edx\n"
557 " movnti %%eax, 24(%3)\n"
558 " movnti %%edx, 28(%3)\n"
559 "11: movl 32(%4), %%eax\n"
560 "61: movl 36(%4), %%edx\n"
561 " movnti %%eax, 32(%3)\n"
562 " movnti %%edx, 36(%3)\n"
563 "12: movl 40(%4), %%eax\n"
564 "71: movl 44(%4), %%edx\n"
565 " movnti %%eax, 40(%3)\n"
566 " movnti %%edx, 44(%3)\n"
567 "13: movl 48(%4), %%eax\n"
568 "81: movl 52(%4), %%edx\n"
569 " movnti %%eax, 48(%3)\n"
570 " movnti %%edx, 52(%3)\n"
571 "14: movl 56(%4), %%eax\n"
572 "91: movl 60(%4), %%edx\n"
573 " movnti %%eax, 56(%3)\n"
574 " movnti %%edx, 60(%3)\n"
581 "5: movl %0, %%eax\n"
589 ".section .fixup,\"ax\"\n"
590 "9: lea 0(%%eax,%0,4),%0\n"
593 ".section __ex_table,\"a\"\n"
616 : "=&c"(size
), "=&D" (d0
), "=&S" (d1
)
617 : "1"(to
), "2"(from
), "0"(size
)
618 : "eax", "edx", "memory");
625 * Leave these declared but undefined. They should not be any references to
628 unsigned long __copy_user_zeroing_intel(void *to
, const void __user
*from
,
630 unsigned long __copy_user_intel(void __user
*to
, const void *from
,
632 unsigned long __copy_user_zeroing_intel_nocache(void *to
,
633 const void __user
*from
, unsigned long size
);
634 #endif /* CONFIG_X86_INTEL_USERCOPY */
636 /* Generic arbitrary sized copy. */
637 #define __copy_user(to,from,size) \
639 int __d0, __d1, __d2; \
640 __asm__ __volatile__( \
656 ".section .fixup,\"ax\"\n" \
659 "3: lea 0(%3,%0,4),%0\n" \
662 ".section __ex_table,\"a\"\n" \
668 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
669 : "3"(size), "0"(size), "1"(to), "2"(from) \
673 #define __copy_user_zeroing(to,from,size) \
675 int __d0, __d1, __d2; \
676 __asm__ __volatile__( \
692 ".section .fixup,\"ax\"\n" \
695 "3: lea 0(%3,%0,4),%0\n" \
698 " xorl %%eax,%%eax\n" \
704 ".section __ex_table,\"a\"\n" \
710 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
711 : "3"(size), "0"(size), "1"(to), "2"(from) \
715 unsigned long __copy_to_user_ll(void __user
*to
, const void *from
,
718 BUG_ON((long) n
< 0);
719 #ifndef CONFIG_X86_WP_WORKS_OK
720 if (unlikely(boot_cpu_data
.wp_works_ok
== 0) &&
721 ((unsigned long )to
) < TASK_SIZE
) {
723 * CPU does not honor the WP bit when writing
724 * from supervisory mode, and due to preemption or SMP,
725 * the page tables can change at any time.
726 * Do it manually. Manfred <manfred@colorfullife.com>
729 unsigned long offset
= ((unsigned long)to
)%PAGE_SIZE
;
730 unsigned long len
= PAGE_SIZE
- offset
;
739 down_read(¤t
->mm
->mmap_sem
);
740 retval
= get_user_pages(current
, current
->mm
,
741 (unsigned long )to
, 1, 1, 0, &pg
, NULL
);
743 if (retval
== -ENOMEM
&& is_init(current
)) {
744 up_read(¤t
->mm
->mmap_sem
);
745 congestion_wait(WRITE
, HZ
/50);
750 up_read(¤t
->mm
->mmap_sem
);
754 maddr
= kmap_atomic(pg
, KM_USER0
);
755 memcpy(maddr
+ offset
, from
, len
);
756 kunmap_atomic(maddr
, KM_USER0
);
757 set_page_dirty_lock(pg
);
759 up_read(¤t
->mm
->mmap_sem
);
768 if (movsl_is_ok(to
, from
, n
))
769 __copy_user(to
, from
, n
);
771 n
= __copy_user_intel(to
, from
, n
);
774 EXPORT_SYMBOL(__copy_to_user_ll
);
776 unsigned long __copy_from_user_ll(void *to
, const void __user
*from
,
780 if (movsl_is_ok(to
, from
, n
))
781 __copy_user_zeroing(to
, from
, n
);
783 n
= __copy_user_zeroing_intel(to
, from
, n
);
786 EXPORT_SYMBOL(__copy_from_user_ll
);
788 unsigned long __copy_from_user_ll_nozero(void *to
, const void __user
*from
,
792 if (movsl_is_ok(to
, from
, n
))
793 __copy_user(to
, from
, n
);
795 n
= __copy_user_intel((void __user
*)to
,
796 (const void *)from
, n
);
799 EXPORT_SYMBOL(__copy_from_user_ll_nozero
);
801 unsigned long __copy_from_user_ll_nocache(void *to
, const void __user
*from
,
805 #ifdef CONFIG_X86_INTEL_USERCOPY
806 if ( n
> 64 && cpu_has_xmm2
)
807 n
= __copy_user_zeroing_intel_nocache(to
, from
, n
);
809 __copy_user_zeroing(to
, from
, n
);
811 __copy_user_zeroing(to
, from
, n
);
816 unsigned long __copy_from_user_ll_nocache_nozero(void *to
, const void __user
*from
,
820 #ifdef CONFIG_X86_INTEL_USERCOPY
821 if ( n
> 64 && cpu_has_xmm2
)
822 n
= __copy_user_intel_nocache(to
, from
, n
);
824 __copy_user(to
, from
, n
);
826 __copy_user(to
, from
, n
);
832 * copy_to_user: - Copy a block of data into user space.
833 * @to: Destination address, in user space.
834 * @from: Source address, in kernel space.
835 * @n: Number of bytes to copy.
837 * Context: User context only. This function may sleep.
839 * Copy data from kernel space to user space.
841 * Returns number of bytes that could not be copied.
842 * On success, this will be zero.
845 copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
847 BUG_ON((long) n
< 0);
848 if (access_ok(VERIFY_WRITE
, to
, n
))
849 n
= __copy_to_user(to
, from
, n
);
852 EXPORT_SYMBOL(copy_to_user
);
855 * copy_from_user: - Copy a block of data from user space.
856 * @to: Destination address, in kernel space.
857 * @from: Source address, in user space.
858 * @n: Number of bytes to copy.
860 * Context: User context only. This function may sleep.
862 * Copy data from user space to kernel space.
864 * Returns number of bytes that could not be copied.
865 * On success, this will be zero.
867 * If some data could not be copied, this function will pad the copied
868 * data to the requested size using zero bytes.
871 copy_from_user(void *to
, const void __user
*from
, unsigned long n
)
873 BUG_ON((long) n
< 0);
874 if (access_ok(VERIFY_READ
, from
, n
))
875 n
= __copy_from_user(to
, from
, n
);
880 EXPORT_SYMBOL(copy_from_user
);