unicore: Drop pointless include
[linux/fpc-iii.git] / mm / usercopy.c
blob2a09796edef8d53d257dedada5b1a40bc21b4c16
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
4 * which are designed to protect kernel memory from needless exposure
5 * and overwrite under many unintended conditions. This code is based
6 * on PAX_USERCOPY, which is:
8 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
9 * Security Inc.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/thread_info.h>
19 #include <linux/atomic.h>
20 #include <linux/jump_label.h>
21 #include <asm/sections.h>
24 * Checks if a given pointer and length is contained by the current
25 * stack frame (if possible).
27 * Returns:
28 * NOT_STACK: not at all on the stack
29 * GOOD_FRAME: fully within a valid stack frame
30 * GOOD_STACK: fully on the stack (when can't do frame-checking)
31 * BAD_STACK: error condition (invalid stack position or bad stack frame)
33 static noinline int check_stack_object(const void *obj, unsigned long len)
35 const void * const stack = task_stack_page(current);
36 const void * const stackend = stack + THREAD_SIZE;
37 int ret;
39 /* Object is not on the stack at all. */
40 if (obj + len <= stack || stackend <= obj)
41 return NOT_STACK;
44 * Reject: object partially overlaps the stack (passing the
45 * the check above means at least one end is within the stack,
46 * so if this check fails, the other end is outside the stack).
48 if (obj < stack || stackend < obj + len)
49 return BAD_STACK;
51 /* Check if object is safely within a valid frame. */
52 ret = arch_within_stack_frames(stack, stackend, obj, len);
53 if (ret)
54 return ret;
56 return GOOD_STACK;
60 * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
61 * an unexpected state during a copy_from_user() or copy_to_user() call.
62 * There are several checks being performed on the buffer by the
63 * __check_object_size() function. Normal stack buffer usage should never
64 * trip the checks, and kernel text addressing will always trip the check.
65 * For cache objects, it is checking that only the whitelisted range of
66 * bytes for a given cache is being accessed (via the cache's usersize and
67 * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
68 * kmem_cache_create_usercopy() function to create the cache (and
69 * carefully audit the whitelist range).
71 void usercopy_warn(const char *name, const char *detail, bool to_user,
72 unsigned long offset, unsigned long len)
74 WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
75 to_user ? "exposure" : "overwrite",
76 to_user ? "from" : "to",
77 name ? : "unknown?!",
78 detail ? " '" : "", detail ? : "", detail ? "'" : "",
79 offset, len);
82 void __noreturn usercopy_abort(const char *name, const char *detail,
83 bool to_user, unsigned long offset,
84 unsigned long len)
86 pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
87 to_user ? "exposure" : "overwrite",
88 to_user ? "from" : "to",
89 name ? : "unknown?!",
90 detail ? " '" : "", detail ? : "", detail ? "'" : "",
91 offset, len);
94 * For greater effect, it would be nice to do do_group_exit(),
95 * but BUG() actually hooks all the lock-breaking and per-arch
96 * Oops code, so that is used here instead.
98 BUG();
101 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
102 static bool overlaps(const unsigned long ptr, unsigned long n,
103 unsigned long low, unsigned long high)
105 const unsigned long check_low = ptr;
106 unsigned long check_high = check_low + n;
108 /* Does not overlap if entirely above or entirely below. */
109 if (check_low >= high || check_high <= low)
110 return false;
112 return true;
115 /* Is this address range in the kernel text area? */
116 static inline void check_kernel_text_object(const unsigned long ptr,
117 unsigned long n, bool to_user)
119 unsigned long textlow = (unsigned long)_stext;
120 unsigned long texthigh = (unsigned long)_etext;
121 unsigned long textlow_linear, texthigh_linear;
123 if (overlaps(ptr, n, textlow, texthigh))
124 usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
127 * Some architectures have virtual memory mappings with a secondary
128 * mapping of the kernel text, i.e. there is more than one virtual
129 * kernel address that points to the kernel image. It is usually
130 * when there is a separate linear physical memory mapping, in that
131 * __pa() is not just the reverse of __va(). This can be detected
132 * and checked:
134 textlow_linear = (unsigned long)lm_alias(textlow);
135 /* No different mapping: we're done. */
136 if (textlow_linear == textlow)
137 return;
139 /* Check the secondary mapping... */
140 texthigh_linear = (unsigned long)lm_alias(texthigh);
141 if (overlaps(ptr, n, textlow_linear, texthigh_linear))
142 usercopy_abort("linear kernel text", NULL, to_user,
143 ptr - textlow_linear, n);
146 static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
147 bool to_user)
149 /* Reject if object wraps past end of memory. */
150 if (ptr + n < ptr)
151 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
153 /* Reject if NULL or ZERO-allocation. */
154 if (ZERO_OR_NULL_PTR(ptr))
155 usercopy_abort("null address", NULL, to_user, ptr, n);
158 /* Checks for allocs that are marked in some way as spanning multiple pages. */
159 static inline void check_page_span(const void *ptr, unsigned long n,
160 struct page *page, bool to_user)
162 #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
163 const void *end = ptr + n - 1;
164 struct page *endpage;
165 bool is_reserved, is_cma;
168 * Sometimes the kernel data regions are not marked Reserved (see
169 * check below). And sometimes [_sdata,_edata) does not cover
170 * rodata and/or bss, so check each range explicitly.
173 /* Allow reads of kernel rodata region (if not marked as Reserved). */
174 if (ptr >= (const void *)__start_rodata &&
175 end <= (const void *)__end_rodata) {
176 if (!to_user)
177 usercopy_abort("rodata", NULL, to_user, 0, n);
178 return;
181 /* Allow kernel data region (if not marked as Reserved). */
182 if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
183 return;
185 /* Allow kernel bss region (if not marked as Reserved). */
186 if (ptr >= (const void *)__bss_start &&
187 end <= (const void *)__bss_stop)
188 return;
190 /* Is the object wholly within one base page? */
191 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
192 ((unsigned long)end & (unsigned long)PAGE_MASK)))
193 return;
195 /* Allow if fully inside the same compound (__GFP_COMP) page. */
196 endpage = virt_to_head_page(end);
197 if (likely(endpage == page))
198 return;
201 * Reject if range is entirely either Reserved (i.e. special or
202 * device memory), or CMA. Otherwise, reject since the object spans
203 * several independently allocated pages.
205 is_reserved = PageReserved(page);
206 is_cma = is_migrate_cma_page(page);
207 if (!is_reserved && !is_cma)
208 usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
210 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
211 page = virt_to_head_page(ptr);
212 if (is_reserved && !PageReserved(page))
213 usercopy_abort("spans Reserved and non-Reserved pages",
214 NULL, to_user, 0, n);
215 if (is_cma && !is_migrate_cma_page(page))
216 usercopy_abort("spans CMA and non-CMA pages", NULL,
217 to_user, 0, n);
219 #endif
222 static inline void check_heap_object(const void *ptr, unsigned long n,
223 bool to_user)
225 struct page *page;
227 if (!virt_addr_valid(ptr))
228 return;
230 page = virt_to_head_page(ptr);
232 if (PageSlab(page)) {
233 /* Check slab allocator for flags and size. */
234 __check_heap_object(ptr, n, page, to_user);
235 } else {
236 /* Verify object does not incorrectly span multiple pages. */
237 check_page_span(ptr, n, page, to_user);
241 static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
244 * Validates that the given object is:
245 * - not bogus address
246 * - fully contained by stack (or stack frame, when available)
247 * - fully within SLAB object (or object whitelist area, when available)
248 * - not in kernel text
250 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
252 if (static_branch_unlikely(&bypass_usercopy_checks))
253 return;
255 /* Skip all tests if size is zero. */
256 if (!n)
257 return;
259 /* Check for invalid addresses. */
260 check_bogus_address((const unsigned long)ptr, n, to_user);
262 /* Check for bad stack object. */
263 switch (check_stack_object(ptr, n)) {
264 case NOT_STACK:
265 /* Object is not touching the current process stack. */
266 break;
267 case GOOD_FRAME:
268 case GOOD_STACK:
270 * Object is either in the correct frame (when it
271 * is possible to check) or just generally on the
272 * process stack (when frame checking not available).
274 return;
275 default:
276 usercopy_abort("process stack", NULL, to_user, 0, n);
279 /* Check for bad heap object. */
280 check_heap_object(ptr, n, to_user);
282 /* Check for object in kernel to avoid text exposure. */
283 check_kernel_text_object((const unsigned long)ptr, n, to_user);
285 EXPORT_SYMBOL(__check_object_size);
287 static bool enable_checks __initdata = true;
289 static int __init parse_hardened_usercopy(char *str)
291 return strtobool(str, &enable_checks);
294 __setup("hardened_usercopy=", parse_hardened_usercopy);
296 static int __init set_hardened_usercopy(void)
298 if (enable_checks == false)
299 static_branch_enable(&bypass_usercopy_checks);
300 return 1;
303 late_initcall(set_hardened_usercopy);