2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
7 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <asm/current.h>
13 #include <asm/pgtable.h>
14 #include <kern_util.h>
17 pte_t
*virt_to_pte(struct mm_struct
*mm
, unsigned long addr
)
26 pgd
= pgd_offset(mm
, addr
);
27 if (!pgd_present(*pgd
))
30 pud
= pud_offset(pgd
, addr
);
31 if (!pud_present(*pud
))
34 pmd
= pmd_offset(pud
, addr
);
35 if (!pmd_present(*pmd
))
38 return pte_offset_kernel(pmd
, addr
);
41 static pte_t
*maybe_map(unsigned long virt
, int is_write
)
43 pte_t
*pte
= virt_to_pte(current
->mm
, virt
);
46 if ((pte
== NULL
) || !pte_present(*pte
) ||
47 (is_write
&& !pte_write(*pte
))) {
48 err
= handle_page_fault(virt
, 0, is_write
, 1, &dummy_code
);
51 pte
= virt_to_pte(current
->mm
, virt
);
53 if (!pte_present(*pte
))
59 static int do_op_one_page(unsigned long addr
, int len
, int is_write
,
60 int (*op
)(unsigned long addr
, int len
, void *arg
), void *arg
)
67 pte
= maybe_map(addr
, is_write
);
71 page
= pte_page(*pte
);
72 addr
= (unsigned long) kmap_atomic(page
) +
75 current
->thread
.fault_catcher
= &buf
;
77 faulted
= UML_SETJMP(&buf
);
79 n
= (*op
)(addr
, len
, arg
);
83 current
->thread
.fault_catcher
= NULL
;
85 kunmap_atomic((void *)addr
);
90 static long buffer_op(unsigned long addr
, int len
, int is_write
,
91 int (*op
)(unsigned long, int, void *), void *arg
)
95 size
= min(PAGE_ALIGN(addr
) - addr
, (unsigned long) len
);
98 n
= do_op_one_page(addr
, size
, is_write
, op
, arg
);
100 remain
= (n
< 0 ? remain
: 0);
109 while (addr
< ((addr
+ remain
) & PAGE_MASK
)) {
110 n
= do_op_one_page(addr
, PAGE_SIZE
, is_write
, op
, arg
);
112 remain
= (n
< 0 ? remain
: 0);
122 n
= do_op_one_page(addr
, remain
, is_write
, op
, arg
);
124 remain
= (n
< 0 ? remain
: 0);
133 static int copy_chunk_from_user(unsigned long from
, int len
, void *arg
)
135 unsigned long *to_ptr
= arg
, to
= *to_ptr
;
137 memcpy((void *) to
, (void *) from
, len
);
142 unsigned long raw_copy_from_user(void *to
, const void __user
*from
, unsigned long n
)
144 if (uaccess_kernel()) {
145 memcpy(to
, (__force
void*)from
, n
);
149 return buffer_op((unsigned long) from
, n
, 0, copy_chunk_from_user
, &to
);
151 EXPORT_SYMBOL(raw_copy_from_user
);
153 static int copy_chunk_to_user(unsigned long to
, int len
, void *arg
)
155 unsigned long *from_ptr
= arg
, from
= *from_ptr
;
157 memcpy((void *) to
, (void *) from
, len
);
162 unsigned long raw_copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
164 if (uaccess_kernel()) {
165 memcpy((__force
void *) to
, from
, n
);
169 return buffer_op((unsigned long) to
, n
, 1, copy_chunk_to_user
, &from
);
171 EXPORT_SYMBOL(raw_copy_to_user
);
173 static int strncpy_chunk_from_user(unsigned long from
, int len
, void *arg
)
175 char **to_ptr
= arg
, *to
= *to_ptr
;
178 strncpy(to
, (void *) from
, len
);
179 n
= strnlen(to
, len
);
187 long __strncpy_from_user(char *dst
, const char __user
*src
, long count
)
192 if (uaccess_kernel()) {
193 strncpy(dst
, (__force
void *) src
, count
);
194 return strnlen(dst
, count
);
197 n
= buffer_op((unsigned long) src
, count
, 0, strncpy_chunk_from_user
,
201 return strnlen(dst
, count
);
203 EXPORT_SYMBOL(__strncpy_from_user
);
205 static int clear_chunk(unsigned long addr
, int len
, void *unused
)
207 memset((void *) addr
, 0, len
);
211 unsigned long __clear_user(void __user
*mem
, unsigned long len
)
213 if (uaccess_kernel()) {
214 memset((__force
void*)mem
, 0, len
);
218 return buffer_op((unsigned long) mem
, len
, 1, clear_chunk
, NULL
);
220 EXPORT_SYMBOL(__clear_user
);
222 static int strnlen_chunk(unsigned long str
, int len
, void *arg
)
224 int *len_ptr
= arg
, n
;
226 n
= strnlen((void *) str
, len
);
234 long __strnlen_user(const void __user
*str
, long len
)
238 if (uaccess_kernel())
239 return strnlen((__force
char*)str
, len
) + 1;
241 n
= buffer_op((unsigned long) str
, len
, 0, strnlen_chunk
, &count
);
246 EXPORT_SYMBOL(__strnlen_user
);