1 // SPDX-License-Identifier: GPL-2.0
3 * This is for all the tests related to copy_to_user() and copy_from_user()
7 #include <linux/slab.h>
8 #include <linux/highmem.h>
9 #include <linux/vmalloc.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/mman.h>
12 #include <linux/uaccess.h>
13 #include <asm/cacheflush.h>
16 * Many of the tests here end up using const sizes, but those would
17 * normally be ignored by hardened usercopy, so force the compiler
18 * into choosing the non-const path to make sure we trigger the
19 * hardened usercopy checks by added "unconst" to all the const copies,
20 * and making sure "cache_size" isn't optimized into a const.
22 static volatile size_t unconst
;
23 static volatile size_t cache_size
= 1024;
24 static struct kmem_cache
*whitelist_cache
;
26 static const unsigned char test_text
[] = "This is a test.\n";
29 * Instead of adding -Wno-return-local-addr, just pass the stack address
30 * through a function to obfuscate it from the compiler.
32 static noinline
unsigned char *trick_compiler(unsigned char *stack
)
34 return stack
+ unconst
;
37 static noinline
unsigned char *do_usercopy_stack_callee(int value
)
39 unsigned char buf
[128];
42 /* Exercise stack to avoid everything living in registers. */
43 for (i
= 0; i
< sizeof(buf
); i
++) {
44 buf
[i
] = value
& 0xff;
48 * Put the target buffer in the middle of stack allocation
49 * so that we don't step on future stack users regardless
50 * of stack growth direction.
52 return trick_compiler(&buf
[(128/2)-32]);
55 static noinline
void do_usercopy_stack(bool to_user
, bool bad_frame
)
57 unsigned long user_addr
;
58 unsigned char good_stack
[32];
59 unsigned char *bad_stack
;
62 /* Exercise stack to avoid everything living in registers. */
63 for (i
= 0; i
< sizeof(good_stack
); i
++)
64 good_stack
[i
] = test_text
[i
% sizeof(test_text
)];
66 /* This is a pointer to outside our current stack frame. */
68 bad_stack
= do_usercopy_stack_callee((uintptr_t)&bad_stack
);
70 /* Put start address just inside stack. */
71 bad_stack
= task_stack_page(current
) + THREAD_SIZE
;
72 bad_stack
-= sizeof(unsigned long);
75 #ifdef ARCH_HAS_CURRENT_STACK_POINTER
76 pr_info("stack : %px\n", (void *)current_stack_pointer
);
78 pr_info("good_stack: %px-%px\n", good_stack
, good_stack
+ sizeof(good_stack
));
79 pr_info("bad_stack : %px-%px\n", bad_stack
, bad_stack
+ sizeof(good_stack
));
81 user_addr
= vm_mmap(NULL
, 0, PAGE_SIZE
,
82 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
83 MAP_ANONYMOUS
| MAP_PRIVATE
, 0);
84 if (user_addr
>= TASK_SIZE
) {
85 pr_warn("Failed to allocate user memory\n");
90 pr_info("attempting good copy_to_user of local stack\n");
91 if (copy_to_user((void __user
*)user_addr
, good_stack
,
92 unconst
+ sizeof(good_stack
))) {
93 pr_warn("copy_to_user failed unexpectedly?!\n");
97 pr_info("attempting bad copy_to_user of distant stack\n");
98 if (copy_to_user((void __user
*)user_addr
, bad_stack
,
99 unconst
+ sizeof(good_stack
))) {
100 pr_warn("copy_to_user failed, but lacked Oops\n");
105 * There isn't a safe way to not be protected by usercopy
106 * if we're going to write to another thread's stack.
111 pr_info("attempting good copy_from_user of local stack\n");
112 if (copy_from_user(good_stack
, (void __user
*)user_addr
,
113 unconst
+ sizeof(good_stack
))) {
114 pr_warn("copy_from_user failed unexpectedly?!\n");
118 pr_info("attempting bad copy_from_user of distant stack\n");
119 if (copy_from_user(bad_stack
, (void __user
*)user_addr
,
120 unconst
+ sizeof(good_stack
))) {
121 pr_warn("copy_from_user failed, but lacked Oops\n");
127 vm_munmap(user_addr
, PAGE_SIZE
);
131 * This checks for whole-object size validation with hardened usercopy,
132 * with or without usercopy whitelisting.
134 static void do_usercopy_slab_size(bool to_user
)
136 unsigned long user_addr
;
137 unsigned char *one
, *two
;
138 void __user
*test_user_addr
;
139 void *test_kern_addr
;
140 size_t size
= unconst
+ 1024;
142 one
= kmalloc(size
, GFP_KERNEL
);
143 two
= kmalloc(size
, GFP_KERNEL
);
145 pr_warn("Failed to allocate kernel memory\n");
149 user_addr
= vm_mmap(NULL
, 0, PAGE_SIZE
,
150 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
151 MAP_ANONYMOUS
| MAP_PRIVATE
, 0);
152 if (user_addr
>= TASK_SIZE
) {
153 pr_warn("Failed to allocate user memory\n");
157 memset(one
, 'A', size
);
158 memset(two
, 'B', size
);
160 test_user_addr
= (void __user
*)(user_addr
+ 16);
161 test_kern_addr
= one
+ 16;
164 pr_info("attempting good copy_to_user of correct size\n");
165 if (copy_to_user(test_user_addr
, test_kern_addr
, size
/ 2)) {
166 pr_warn("copy_to_user failed unexpectedly?!\n");
170 pr_info("attempting bad copy_to_user of too large size\n");
171 if (copy_to_user(test_user_addr
, test_kern_addr
, size
)) {
172 pr_warn("copy_to_user failed, but lacked Oops\n");
176 pr_info("attempting good copy_from_user of correct size\n");
177 if (copy_from_user(test_kern_addr
, test_user_addr
, size
/ 2)) {
178 pr_warn("copy_from_user failed unexpectedly?!\n");
182 pr_info("attempting bad copy_from_user of too large size\n");
183 if (copy_from_user(test_kern_addr
, test_user_addr
, size
)) {
184 pr_warn("copy_from_user failed, but lacked Oops\n");
188 pr_err("FAIL: bad usercopy not detected!\n");
189 pr_expected_config_param(CONFIG_HARDENED_USERCOPY
, "hardened_usercopy");
192 vm_munmap(user_addr
, PAGE_SIZE
);
199 * This checks for the specific whitelist window within an object. If this
200 * test passes, then do_usercopy_slab_size() tests will pass too.
202 static void do_usercopy_slab_whitelist(bool to_user
)
204 unsigned long user_alloc
;
205 unsigned char *buf
= NULL
;
206 unsigned char __user
*user_addr
;
209 /* Make sure cache was prepared. */
210 if (!whitelist_cache
) {
211 pr_warn("Failed to allocate kernel cache\n");
216 * Allocate a buffer with a whitelisted window in the buffer.
218 buf
= kmem_cache_alloc(whitelist_cache
, GFP_KERNEL
);
220 pr_warn("Failed to allocate buffer from whitelist cache\n");
224 /* Allocate user memory we'll poke at. */
225 user_alloc
= vm_mmap(NULL
, 0, PAGE_SIZE
,
226 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
227 MAP_ANONYMOUS
| MAP_PRIVATE
, 0);
228 if (user_alloc
>= TASK_SIZE
) {
229 pr_warn("Failed to allocate user memory\n");
232 user_addr
= (void __user
*)user_alloc
;
234 memset(buf
, 'B', cache_size
);
236 /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
237 offset
= (cache_size
/ 4) + unconst
;
238 size
= (cache_size
/ 16) + unconst
;
241 pr_info("attempting good copy_to_user inside whitelist\n");
242 if (copy_to_user(user_addr
, buf
+ offset
, size
)) {
243 pr_warn("copy_to_user failed unexpectedly?!\n");
247 pr_info("attempting bad copy_to_user outside whitelist\n");
248 if (copy_to_user(user_addr
, buf
+ offset
- 1, size
)) {
249 pr_warn("copy_to_user failed, but lacked Oops\n");
253 pr_info("attempting good copy_from_user inside whitelist\n");
254 if (copy_from_user(buf
+ offset
, user_addr
, size
)) {
255 pr_warn("copy_from_user failed unexpectedly?!\n");
259 pr_info("attempting bad copy_from_user outside whitelist\n");
260 if (copy_from_user(buf
+ offset
- 1, user_addr
, size
)) {
261 pr_warn("copy_from_user failed, but lacked Oops\n");
265 pr_err("FAIL: bad usercopy not detected!\n");
266 pr_expected_config_param(CONFIG_HARDENED_USERCOPY
, "hardened_usercopy");
269 vm_munmap(user_alloc
, PAGE_SIZE
);
272 kmem_cache_free(whitelist_cache
, buf
);
275 /* Callable tests. */
276 static void lkdtm_USERCOPY_SLAB_SIZE_TO(void)
278 do_usercopy_slab_size(true);
281 static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void)
283 do_usercopy_slab_size(false);
286 static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)
288 do_usercopy_slab_whitelist(true);
291 static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)
293 do_usercopy_slab_whitelist(false);
296 static void lkdtm_USERCOPY_STACK_FRAME_TO(void)
298 do_usercopy_stack(true, true);
301 static void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
303 do_usercopy_stack(false, true);
306 static void lkdtm_USERCOPY_STACK_BEYOND(void)
308 do_usercopy_stack(true, false);
311 static void lkdtm_USERCOPY_KERNEL(void)
313 unsigned long user_addr
;
315 user_addr
= vm_mmap(NULL
, 0, PAGE_SIZE
,
316 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
317 MAP_ANONYMOUS
| MAP_PRIVATE
, 0);
318 if (user_addr
>= TASK_SIZE
) {
319 pr_warn("Failed to allocate user memory\n");
323 pr_info("attempting good copy_to_user from kernel rodata: %px\n",
325 if (copy_to_user((void __user
*)user_addr
, test_text
,
326 unconst
+ sizeof(test_text
))) {
327 pr_warn("copy_to_user failed unexpectedly?!\n");
331 pr_info("attempting bad copy_to_user from kernel text: %px\n",
333 if (copy_to_user((void __user
*)user_addr
, vm_mmap
,
334 unconst
+ PAGE_SIZE
)) {
335 pr_warn("copy_to_user failed, but lacked Oops\n");
338 pr_err("FAIL: bad copy_to_user() not detected!\n");
339 pr_expected_config_param(CONFIG_HARDENED_USERCOPY
, "hardened_usercopy");
342 vm_munmap(user_addr
, PAGE_SIZE
);
346 * This expects "kaddr" to point to a PAGE_SIZE allocation, which means
347 * a more complete test that would include copy_from_user() would risk
348 * memory corruption. Just test copy_to_user() here, as that exercises
349 * almost exactly the same code paths.
351 static void do_usercopy_page_span(const char *name
, void *kaddr
)
355 uaddr
= vm_mmap(NULL
, 0, PAGE_SIZE
, PROT_READ
| PROT_WRITE
,
356 MAP_ANONYMOUS
| MAP_PRIVATE
, 0);
357 if (uaddr
>= TASK_SIZE
) {
358 pr_warn("Failed to allocate user memory\n");
362 /* Initialize contents. */
363 memset(kaddr
, 0xAA, PAGE_SIZE
);
365 /* Bump the kaddr forward to detect a page-spanning overflow. */
366 kaddr
+= PAGE_SIZE
/ 2;
368 pr_info("attempting good copy_to_user() from kernel %s: %px\n",
370 if (copy_to_user((void __user
*)uaddr
, kaddr
,
371 unconst
+ (PAGE_SIZE
/ 2))) {
372 pr_err("copy_to_user() failed unexpectedly?!\n");
376 pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
378 if (copy_to_user((void __user
*)uaddr
, kaddr
, unconst
+ PAGE_SIZE
)) {
379 pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
383 pr_err("FAIL: bad copy_to_user() not detected!\n");
384 pr_expected_config_param(CONFIG_HARDENED_USERCOPY
, "hardened_usercopy");
387 vm_munmap(uaddr
, PAGE_SIZE
);
390 static void lkdtm_USERCOPY_VMALLOC(void)
394 addr
= vmalloc(PAGE_SIZE
);
396 pr_err("vmalloc() failed!?\n");
399 do_usercopy_page_span("vmalloc", addr
);
403 static void lkdtm_USERCOPY_FOLIO(void)
409 * FIXME: Folio checking currently misses 0-order allocations, so
410 * allocate and bump forward to the last page.
412 folio
= folio_alloc(GFP_KERNEL
| __GFP_ZERO
, 1);
414 pr_err("folio_alloc() failed!?\n");
417 addr
= folio_address(folio
);
419 do_usercopy_page_span("folio", addr
+ PAGE_SIZE
);
421 pr_err("folio_address() failed?!\n");
425 void __init
lkdtm_usercopy_init(void)
427 /* Prepare cache that lacks SLAB_USERCOPY flag. */
429 kmem_cache_create_usercopy("lkdtm-usercopy", cache_size
,
436 void __exit
lkdtm_usercopy_exit(void)
438 kmem_cache_destroy(whitelist_cache
);
441 static struct crashtype crashtypes
[] = {
442 CRASHTYPE(USERCOPY_SLAB_SIZE_TO
),
443 CRASHTYPE(USERCOPY_SLAB_SIZE_FROM
),
444 CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO
),
445 CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM
),
446 CRASHTYPE(USERCOPY_STACK_FRAME_TO
),
447 CRASHTYPE(USERCOPY_STACK_FRAME_FROM
),
448 CRASHTYPE(USERCOPY_STACK_BEYOND
),
449 CRASHTYPE(USERCOPY_VMALLOC
),
450 CRASHTYPE(USERCOPY_FOLIO
),
451 CRASHTYPE(USERCOPY_KERNEL
),
454 struct crashtype_category usercopy_crashtypes
= {
455 .crashtypes
= crashtypes
,
456 .len
= ARRAY_SIZE(crashtypes
),