Avoid beyond bounds copy while caching ACL
[zen-stable.git] / arch / sparc / lib / user_fixup.c
blobac96ae23670900ebdcf13f1cd65319e494bb48e9
1 /* user_fixup.c: Fix up user copy faults.
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
6 #include <linux/compiler.h>
7 #include <linux/kernel.h>
8 #include <linux/string.h>
9 #include <linux/errno.h>
10 #include <linux/module.h>
12 #include <asm/uaccess.h>
14 /* Calculating the exact fault address when using
15 * block loads and stores can be very complicated.
17 * Instead of trying to be clever and handling all
18 * of the cases, just fix things up simply here.
21 static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
23 unsigned long fault_addr = current_thread_info()->fault_address;
24 unsigned long end = start + size;
26 if (fault_addr < start || fault_addr >= end) {
27 *offset = 0;
28 } else {
29 *offset = fault_addr - start;
30 size = end - fault_addr;
32 return size;
35 unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
37 unsigned long offset;
39 size = compute_size((unsigned long) from, size, &offset);
40 if (likely(size))
41 memset(to + offset, 0, size);
43 return size;
45 EXPORT_SYMBOL(copy_from_user_fixup);
47 unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
49 unsigned long offset;
51 return compute_size((unsigned long) to, size, &offset);
53 EXPORT_SYMBOL(copy_to_user_fixup);
55 unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
57 unsigned long fault_addr = current_thread_info()->fault_address;
58 unsigned long start = (unsigned long) to;
59 unsigned long end = start + size;
61 if (fault_addr >= start && fault_addr < end)
62 return end - fault_addr;
64 start = (unsigned long) from;
65 end = start + size;
66 if (fault_addr >= start && fault_addr < end)
67 return end - fault_addr;
69 return size;
71 EXPORT_SYMBOL(copy_in_user_fixup);