1 // SPDX-License-Identifier: GPL-2.0-only
3 * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
4 * which are designed to protect kernel memory from needless exposure
5 * and overwrite under many unintended conditions. This code is based
6 * on PAX_USERCOPY, which is:
8 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/highmem.h>
15 #include <linux/kstrtox.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/sched/task.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/thread_info.h>
21 #include <linux/vmalloc.h>
22 #include <linux/atomic.h>
23 #include <linux/jump_label.h>
24 #include <asm/sections.h>
28 * Checks if a given pointer and length is contained by the current
29 * stack frame (if possible).
32 * NOT_STACK: not at all on the stack
33 * GOOD_FRAME: fully within a valid stack frame
34 * GOOD_STACK: within the current stack (when can't frame-check exactly)
35 * BAD_STACK: error condition (invalid stack position or bad stack frame)
37 static noinline
int check_stack_object(const void *obj
, unsigned long len
)
39 const void * const stack
= task_stack_page(current
);
40 const void * const stackend
= stack
+ THREAD_SIZE
;
43 /* Object is not on the stack at all. */
44 if (obj
+ len
<= stack
|| stackend
<= obj
)
48 * Reject: object partially overlaps the stack (passing the
49 * check above means at least one end is within the stack,
50 * so if this check fails, the other end is outside the stack).
52 if (obj
< stack
|| stackend
< obj
+ len
)
55 /* Check if object is safely within a valid frame. */
56 ret
= arch_within_stack_frames(stack
, stackend
, obj
, len
);
60 /* Finally, check stack depth if possible. */
61 #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
62 if (IS_ENABLED(CONFIG_STACK_GROWSUP
)) {
63 if ((void *)current_stack_pointer
< obj
+ len
)
66 if (obj
< (void *)current_stack_pointer
)
75 * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
76 * an unexpected state during a copy_from_user() or copy_to_user() call.
77 * There are several checks being performed on the buffer by the
78 * __check_object_size() function. Normal stack buffer usage should never
79 * trip the checks, and kernel text addressing will always trip the check.
80 * For cache objects, it is checking that only the whitelisted range of
81 * bytes for a given cache is being accessed (via the cache's usersize and
82 * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
83 * kmem_cache_create_usercopy() function to create the cache (and
84 * carefully audit the whitelist range).
86 void __noreturn
usercopy_abort(const char *name
, const char *detail
,
87 bool to_user
, unsigned long offset
,
90 pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
91 to_user
? "exposure" : "overwrite",
92 to_user
? "from" : "to",
94 detail
? " '" : "", detail
? : "", detail
? "'" : "",
98 * For greater effect, it would be nice to do do_group_exit(),
99 * but BUG() actually hooks all the lock-breaking and per-arch
100 * Oops code, so that is used here instead.
105 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
106 static bool overlaps(const unsigned long ptr
, unsigned long n
,
107 unsigned long low
, unsigned long high
)
109 const unsigned long check_low
= ptr
;
110 unsigned long check_high
= check_low
+ n
;
112 /* Does not overlap if entirely above or entirely below. */
113 if (check_low
>= high
|| check_high
<= low
)
119 /* Is this address range in the kernel text area? */
120 static inline void check_kernel_text_object(const unsigned long ptr
,
121 unsigned long n
, bool to_user
)
123 unsigned long textlow
= (unsigned long)_stext
;
124 unsigned long texthigh
= (unsigned long)_etext
;
125 unsigned long textlow_linear
, texthigh_linear
;
127 if (overlaps(ptr
, n
, textlow
, texthigh
))
128 usercopy_abort("kernel text", NULL
, to_user
, ptr
- textlow
, n
);
131 * Some architectures have virtual memory mappings with a secondary
132 * mapping of the kernel text, i.e. there is more than one virtual
133 * kernel address that points to the kernel image. It is usually
134 * when there is a separate linear physical memory mapping, in that
135 * __pa() is not just the reverse of __va(). This can be detected
138 textlow_linear
= (unsigned long)lm_alias(textlow
);
139 /* No different mapping: we're done. */
140 if (textlow_linear
== textlow
)
143 /* Check the secondary mapping... */
144 texthigh_linear
= (unsigned long)lm_alias(texthigh
);
145 if (overlaps(ptr
, n
, textlow_linear
, texthigh_linear
))
146 usercopy_abort("linear kernel text", NULL
, to_user
,
147 ptr
- textlow_linear
, n
);
150 static inline void check_bogus_address(const unsigned long ptr
, unsigned long n
,
153 /* Reject if object wraps past end of memory. */
154 if (ptr
+ (n
- 1) < ptr
)
155 usercopy_abort("wrapped address", NULL
, to_user
, 0, ptr
+ n
);
157 /* Reject if NULL or ZERO-allocation. */
158 if (ZERO_OR_NULL_PTR(ptr
))
159 usercopy_abort("null address", NULL
, to_user
, ptr
, n
);
162 static inline void check_heap_object(const void *ptr
, unsigned long n
,
165 unsigned long addr
= (unsigned long)ptr
;
166 unsigned long offset
;
169 if (is_kmap_addr(ptr
)) {
170 offset
= offset_in_page(ptr
);
171 if (n
> PAGE_SIZE
- offset
)
172 usercopy_abort("kmap", NULL
, to_user
, offset
, n
);
176 if (is_vmalloc_addr(ptr
) && !pagefault_disabled()) {
177 struct vmap_area
*area
= find_vmap_area(addr
);
180 usercopy_abort("vmalloc", "no area", to_user
, 0, n
);
182 if (n
> area
->va_end
- addr
) {
183 offset
= addr
- area
->va_start
;
184 usercopy_abort("vmalloc", NULL
, to_user
, offset
, n
);
189 if (!virt_addr_valid(ptr
))
192 folio
= virt_to_folio(ptr
);
194 if (folio_test_slab(folio
)) {
195 /* Check slab allocator for flags and size. */
196 __check_heap_object(ptr
, n
, folio_slab(folio
), to_user
);
197 } else if (folio_test_large(folio
)) {
198 offset
= ptr
- folio_address(folio
);
199 if (n
> folio_size(folio
) - offset
)
200 usercopy_abort("page alloc", NULL
, to_user
, offset
, n
);
204 static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks
);
207 * Validates that the given object is:
208 * - not bogus address
209 * - fully contained by stack (or stack frame, when available)
210 * - fully within SLAB object (or object whitelist area, when available)
211 * - not in kernel text
213 void __check_object_size(const void *ptr
, unsigned long n
, bool to_user
)
215 if (static_branch_unlikely(&bypass_usercopy_checks
))
218 /* Skip all tests if size is zero. */
222 /* Check for invalid addresses. */
223 check_bogus_address((const unsigned long)ptr
, n
, to_user
);
225 /* Check for bad stack object. */
226 switch (check_stack_object(ptr
, n
)) {
228 /* Object is not touching the current process stack. */
233 * Object is either in the correct frame (when it
234 * is possible to check) or just generally on the
235 * process stack (when frame checking not available).
239 usercopy_abort("process stack", NULL
, to_user
,
240 #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
241 IS_ENABLED(CONFIG_STACK_GROWSUP
) ?
242 ptr
- (void *)current_stack_pointer
:
243 (void *)current_stack_pointer
- ptr
,
250 /* Check for bad heap object. */
251 check_heap_object(ptr
, n
, to_user
);
253 /* Check for object in kernel to avoid text exposure. */
254 check_kernel_text_object((const unsigned long)ptr
, n
, to_user
);
256 EXPORT_SYMBOL(__check_object_size
);
258 static bool enable_checks __initdata
= true;
260 static int __init
parse_hardened_usercopy(char *str
)
262 if (kstrtobool(str
, &enable_checks
))
263 pr_warn("Invalid option string for hardened_usercopy: '%s'\n",
268 __setup("hardened_usercopy=", parse_hardened_usercopy
);
270 static int __init
set_hardened_usercopy(void)
272 if (enable_checks
== false)
273 static_branch_enable(&bypass_usercopy_checks
);
277 late_initcall(set_hardened_usercopy
);