1 // SPDX-License-Identifier: GPL-2.0-only
3 * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
4 * which are designed to protect kernel memory from needless exposure
5 * and overwrite under many unintended conditions. This code is based
6 * on PAX_USERCOPY, which is:
8 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/highmem.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/sched/task.h>
18 #include <linux/sched/task_stack.h>
19 #include <linux/thread_info.h>
20 #include <linux/atomic.h>
21 #include <linux/jump_label.h>
22 #include <asm/sections.h>
25 * Checks if a given pointer and length is contained by the current
26 * stack frame (if possible).
29 * NOT_STACK: not at all on the stack
30 * GOOD_FRAME: fully within a valid stack frame
31 * GOOD_STACK: fully on the stack (when can't do frame-checking)
32 * BAD_STACK: error condition (invalid stack position or bad stack frame)
34 static noinline
int check_stack_object(const void *obj
, unsigned long len
)
36 const void * const stack
= task_stack_page(current
);
37 const void * const stackend
= stack
+ THREAD_SIZE
;
40 /* Object is not on the stack at all. */
41 if (obj
+ len
<= stack
|| stackend
<= obj
)
45 * Reject: object partially overlaps the stack (passing the
46 * check above means at least one end is within the stack,
47 * so if this check fails, the other end is outside the stack).
49 if (obj
< stack
|| stackend
< obj
+ len
)
52 /* Check if object is safely within a valid frame. */
53 ret
= arch_within_stack_frames(stack
, stackend
, obj
, len
);
61 * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
62 * an unexpected state during a copy_from_user() or copy_to_user() call.
63 * There are several checks being performed on the buffer by the
64 * __check_object_size() function. Normal stack buffer usage should never
65 * trip the checks, and kernel text addressing will always trip the check.
66 * For cache objects, it is checking that only the whitelisted range of
67 * bytes for a given cache is being accessed (via the cache's usersize and
68 * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
69 * kmem_cache_create_usercopy() function to create the cache (and
70 * carefully audit the whitelist range).
72 void usercopy_warn(const char *name
, const char *detail
, bool to_user
,
73 unsigned long offset
, unsigned long len
)
75 WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
76 to_user
? "exposure" : "overwrite",
77 to_user
? "from" : "to",
79 detail
? " '" : "", detail
? : "", detail
? "'" : "",
83 void __noreturn
usercopy_abort(const char *name
, const char *detail
,
84 bool to_user
, unsigned long offset
,
87 pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
88 to_user
? "exposure" : "overwrite",
89 to_user
? "from" : "to",
91 detail
? " '" : "", detail
? : "", detail
? "'" : "",
95 * For greater effect, it would be nice to do do_group_exit(),
96 * but BUG() actually hooks all the lock-breaking and per-arch
97 * Oops code, so that is used here instead.
102 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
103 static bool overlaps(const unsigned long ptr
, unsigned long n
,
104 unsigned long low
, unsigned long high
)
106 const unsigned long check_low
= ptr
;
107 unsigned long check_high
= check_low
+ n
;
109 /* Does not overlap if entirely above or entirely below. */
110 if (check_low
>= high
|| check_high
<= low
)
116 /* Is this address range in the kernel text area? */
117 static inline void check_kernel_text_object(const unsigned long ptr
,
118 unsigned long n
, bool to_user
)
120 unsigned long textlow
= (unsigned long)_stext
;
121 unsigned long texthigh
= (unsigned long)_etext
;
122 unsigned long textlow_linear
, texthigh_linear
;
124 if (overlaps(ptr
, n
, textlow
, texthigh
))
125 usercopy_abort("kernel text", NULL
, to_user
, ptr
- textlow
, n
);
128 * Some architectures have virtual memory mappings with a secondary
129 * mapping of the kernel text, i.e. there is more than one virtual
130 * kernel address that points to the kernel image. It is usually
131 * when there is a separate linear physical memory mapping, in that
132 * __pa() is not just the reverse of __va(). This can be detected
135 textlow_linear
= (unsigned long)lm_alias(textlow
);
136 /* No different mapping: we're done. */
137 if (textlow_linear
== textlow
)
140 /* Check the secondary mapping... */
141 texthigh_linear
= (unsigned long)lm_alias(texthigh
);
142 if (overlaps(ptr
, n
, textlow_linear
, texthigh_linear
))
143 usercopy_abort("linear kernel text", NULL
, to_user
,
144 ptr
- textlow_linear
, n
);
147 static inline void check_bogus_address(const unsigned long ptr
, unsigned long n
,
150 /* Reject if object wraps past end of memory. */
151 if (ptr
+ (n
- 1) < ptr
)
152 usercopy_abort("wrapped address", NULL
, to_user
, 0, ptr
+ n
);
154 /* Reject if NULL or ZERO-allocation. */
155 if (ZERO_OR_NULL_PTR(ptr
))
156 usercopy_abort("null address", NULL
, to_user
, ptr
, n
);
159 /* Checks for allocs that are marked in some way as spanning multiple pages. */
160 static inline void check_page_span(const void *ptr
, unsigned long n
,
161 struct page
*page
, bool to_user
)
163 #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
164 const void *end
= ptr
+ n
- 1;
165 struct page
*endpage
;
166 bool is_reserved
, is_cma
;
169 * Sometimes the kernel data regions are not marked Reserved (see
170 * check below). And sometimes [_sdata,_edata) does not cover
171 * rodata and/or bss, so check each range explicitly.
174 /* Allow reads of kernel rodata region (if not marked as Reserved). */
175 if (ptr
>= (const void *)__start_rodata
&&
176 end
<= (const void *)__end_rodata
) {
178 usercopy_abort("rodata", NULL
, to_user
, 0, n
);
182 /* Allow kernel data region (if not marked as Reserved). */
183 if (ptr
>= (const void *)_sdata
&& end
<= (const void *)_edata
)
186 /* Allow kernel bss region (if not marked as Reserved). */
187 if (ptr
>= (const void *)__bss_start
&&
188 end
<= (const void *)__bss_stop
)
191 /* Is the object wholly within one base page? */
192 if (likely(((unsigned long)ptr
& (unsigned long)PAGE_MASK
) ==
193 ((unsigned long)end
& (unsigned long)PAGE_MASK
)))
196 /* Allow if fully inside the same compound (__GFP_COMP) page. */
197 endpage
= virt_to_head_page(end
);
198 if (likely(endpage
== page
))
202 * Reject if range is entirely either Reserved (i.e. special or
203 * device memory), or CMA. Otherwise, reject since the object spans
204 * several independently allocated pages.
206 is_reserved
= PageReserved(page
);
207 is_cma
= is_migrate_cma_page(page
);
208 if (!is_reserved
&& !is_cma
)
209 usercopy_abort("spans multiple pages", NULL
, to_user
, 0, n
);
211 for (ptr
+= PAGE_SIZE
; ptr
<= end
; ptr
+= PAGE_SIZE
) {
212 page
= virt_to_head_page(ptr
);
213 if (is_reserved
&& !PageReserved(page
))
214 usercopy_abort("spans Reserved and non-Reserved pages",
215 NULL
, to_user
, 0, n
);
216 if (is_cma
&& !is_migrate_cma_page(page
))
217 usercopy_abort("spans CMA and non-CMA pages", NULL
,
223 static inline void check_heap_object(const void *ptr
, unsigned long n
,
228 if (!virt_addr_valid(ptr
))
232 * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
233 * highmem page or fallback to virt_to_page(). The following
234 * is effectively a highmem-aware virt_to_head_page().
236 page
= compound_head(kmap_to_page((void *)ptr
));
238 if (PageSlab(page
)) {
239 /* Check slab allocator for flags and size. */
240 __check_heap_object(ptr
, n
, page
, to_user
);
242 /* Verify object does not incorrectly span multiple pages. */
243 check_page_span(ptr
, n
, page
, to_user
);
247 static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks
);
250 * Validates that the given object is:
251 * - not bogus address
252 * - fully contained by stack (or stack frame, when available)
253 * - fully within SLAB object (or object whitelist area, when available)
254 * - not in kernel text
256 void __check_object_size(const void *ptr
, unsigned long n
, bool to_user
)
258 if (static_branch_unlikely(&bypass_usercopy_checks
))
261 /* Skip all tests if size is zero. */
265 /* Check for invalid addresses. */
266 check_bogus_address((const unsigned long)ptr
, n
, to_user
);
268 /* Check for bad stack object. */
269 switch (check_stack_object(ptr
, n
)) {
271 /* Object is not touching the current process stack. */
276 * Object is either in the correct frame (when it
277 * is possible to check) or just generally on the
278 * process stack (when frame checking not available).
282 usercopy_abort("process stack", NULL
, to_user
, 0, n
);
285 /* Check for bad heap object. */
286 check_heap_object(ptr
, n
, to_user
);
288 /* Check for object in kernel to avoid text exposure. */
289 check_kernel_text_object((const unsigned long)ptr
, n
, to_user
);
291 EXPORT_SYMBOL(__check_object_size
);
293 static bool enable_checks __initdata
= true;
295 static int __init
parse_hardened_usercopy(char *str
)
297 return strtobool(str
, &enable_checks
);
300 __setup("hardened_usercopy=", parse_hardened_usercopy
);
302 static int __init
set_hardened_usercopy(void)
304 if (enable_checks
== false)
305 static_branch_enable(&bypass_usercopy_checks
);
309 late_initcall(set_hardened_usercopy
);