1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <asm/current.h>
13 #include <asm/pgtable.h>
14 #include <kern_util.h>
17 pte_t
*virt_to_pte(struct mm_struct
*mm
, unsigned long addr
)
27 pgd
= pgd_offset(mm
, addr
);
28 if (!pgd_present(*pgd
))
31 p4d
= p4d_offset(pgd
, addr
);
32 if (!p4d_present(*p4d
))
35 pud
= pud_offset(p4d
, addr
);
36 if (!pud_present(*pud
))
39 pmd
= pmd_offset(pud
, addr
);
40 if (!pmd_present(*pmd
))
43 return pte_offset_kernel(pmd
, addr
);
46 static pte_t
*maybe_map(unsigned long virt
, int is_write
)
48 pte_t
*pte
= virt_to_pte(current
->mm
, virt
);
51 if ((pte
== NULL
) || !pte_present(*pte
) ||
52 (is_write
&& !pte_write(*pte
))) {
53 err
= handle_page_fault(virt
, 0, is_write
, 1, &dummy_code
);
56 pte
= virt_to_pte(current
->mm
, virt
);
58 if (!pte_present(*pte
))
64 static int do_op_one_page(unsigned long addr
, int len
, int is_write
,
65 int (*op
)(unsigned long addr
, int len
, void *arg
), void *arg
)
71 pte
= maybe_map(addr
, is_write
);
75 page
= pte_page(*pte
);
78 addr
= (unsigned long) page_address(page
) +
81 addr
= (unsigned long) kmap_atomic(page
) +
84 n
= (*op
)(addr
, len
, arg
);
89 kunmap_atomic((void *)addr
);
95 static long buffer_op(unsigned long addr
, int len
, int is_write
,
96 int (*op
)(unsigned long, int, void *), void *arg
)
100 size
= min(PAGE_ALIGN(addr
) - addr
, (unsigned long) len
);
103 n
= do_op_one_page(addr
, size
, is_write
, op
, arg
);
105 remain
= (n
< 0 ? remain
: 0);
114 while (addr
< ((addr
+ remain
) & PAGE_MASK
)) {
115 n
= do_op_one_page(addr
, PAGE_SIZE
, is_write
, op
, arg
);
117 remain
= (n
< 0 ? remain
: 0);
127 n
= do_op_one_page(addr
, remain
, is_write
, op
, arg
);
129 remain
= (n
< 0 ? remain
: 0);
138 static int copy_chunk_from_user(unsigned long from
, int len
, void *arg
)
140 unsigned long *to_ptr
= arg
, to
= *to_ptr
;
142 memcpy((void *) to
, (void *) from
, len
);
147 unsigned long raw_copy_from_user(void *to
, const void __user
*from
, unsigned long n
)
149 if (uaccess_kernel()) {
150 memcpy(to
, (__force
void*)from
, n
);
154 return buffer_op((unsigned long) from
, n
, 0, copy_chunk_from_user
, &to
);
156 EXPORT_SYMBOL(raw_copy_from_user
);
158 static int copy_chunk_to_user(unsigned long to
, int len
, void *arg
)
160 unsigned long *from_ptr
= arg
, from
= *from_ptr
;
162 memcpy((void *) to
, (void *) from
, len
);
167 unsigned long raw_copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
169 if (uaccess_kernel()) {
170 memcpy((__force
void *) to
, from
, n
);
174 return buffer_op((unsigned long) to
, n
, 1, copy_chunk_to_user
, &from
);
176 EXPORT_SYMBOL(raw_copy_to_user
);
178 static int strncpy_chunk_from_user(unsigned long from
, int len
, void *arg
)
180 char **to_ptr
= arg
, *to
= *to_ptr
;
183 strncpy(to
, (void *) from
, len
);
184 n
= strnlen(to
, len
);
192 long __strncpy_from_user(char *dst
, const char __user
*src
, long count
)
197 if (uaccess_kernel()) {
198 strncpy(dst
, (__force
void *) src
, count
);
199 return strnlen(dst
, count
);
202 n
= buffer_op((unsigned long) src
, count
, 0, strncpy_chunk_from_user
,
206 return strnlen(dst
, count
);
208 EXPORT_SYMBOL(__strncpy_from_user
);
210 static int clear_chunk(unsigned long addr
, int len
, void *unused
)
212 memset((void *) addr
, 0, len
);
216 unsigned long __clear_user(void __user
*mem
, unsigned long len
)
218 if (uaccess_kernel()) {
219 memset((__force
void*)mem
, 0, len
);
223 return buffer_op((unsigned long) mem
, len
, 1, clear_chunk
, NULL
);
225 EXPORT_SYMBOL(__clear_user
);
227 static int strnlen_chunk(unsigned long str
, int len
, void *arg
)
229 int *len_ptr
= arg
, n
;
231 n
= strnlen((void *) str
, len
);
239 long __strnlen_user(const void __user
*str
, long len
)
243 if (uaccess_kernel())
244 return strnlen((__force
char*)str
, len
) + 1;
246 n
= buffer_op((unsigned long) str
, len
, 0, strnlen_chunk
, &count
);
251 EXPORT_SYMBOL(__strnlen_user
);