1 // SPDX-License-Identifier: GPL-2.0
3 * This is for all the tests related to logic bugs (e.g. bad dereferences,
4 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
5 * lockups) along with other things that don't fit well into existing LKDTM
9 #include <linux/list.h>
10 #include <linux/sched.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/uaccess.h>
14 #include <linux/slab.h>
21 struct list_head node
;
25 * Make sure our attempts to over run the kernel stack doesn't trigger
26 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
27 * recurse past the end of THREAD_SIZE by default.
29 #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
30 #define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
32 #define REC_STACK_SIZE (THREAD_SIZE / 8)
34 #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
36 static int recur_count
= REC_NUM_DEFAULT
;
38 static DEFINE_SPINLOCK(lock_me_up
);
41 * Make sure compiler does not optimize this function or stack frame away:
42 * - function marked noinline
43 * - stack variables are marked volatile
44 * - stack variables are written (memset()) and read (pr_info())
45 * - function has external effects (pr_info())
47 static int noinline
recursive_loop(int remaining
)
49 volatile char buf
[REC_STACK_SIZE
];
51 memset((void *)buf
, remaining
& 0xFF, sizeof(buf
));
52 pr_info("loop %d/%d ...\n", (int)buf
[remaining
% sizeof(buf
)],
57 return recursive_loop(remaining
- 1);
60 /* If the depth is negative, use the default, otherwise keep parameter. */
61 void __init
lkdtm_bugs_init(int *recur_param
)
64 *recur_param
= recur_count
;
66 recur_count
= *recur_param
;
69 void lkdtm_PANIC(void)
79 static int warn_counter
;
81 void lkdtm_WARNING(void)
83 WARN_ON(++warn_counter
);
86 void lkdtm_WARNING_MESSAGE(void)
88 WARN(1, "Warning message trigger count: %d\n", ++warn_counter
);
91 void lkdtm_EXCEPTION(void)
93 *((volatile int *) 0) = 0;
102 void lkdtm_EXHAUST_STACK(void)
104 pr_info("Calling function with %lu frame size to depth %d ...\n",
105 REC_STACK_SIZE
, recur_count
);
106 recursive_loop(recur_count
);
107 pr_info("FAIL: survived without exhausting stack?!\n");
110 static noinline
void __lkdtm_CORRUPT_STACK(void *stack
)
112 memset(stack
, '\xff', 64);
115 /* This should trip the stack canary, not corrupt the return address. */
116 noinline
void lkdtm_CORRUPT_STACK(void)
118 /* Use default char array length that triggers stack protection. */
119 char data
[8] __aligned(sizeof(void *));
121 __lkdtm_CORRUPT_STACK(&data
);
123 pr_info("Corrupted stack containing char array ...\n");
126 /* Same as above but will only get a canary with -fstack-protector-strong */
127 noinline
void lkdtm_CORRUPT_STACK_STRONG(void)
130 unsigned short shorts
[4];
132 } data
__aligned(sizeof(void *));
134 __lkdtm_CORRUPT_STACK(&data
);
136 pr_info("Corrupted stack containing union ...\n");
139 void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
141 static u8 data
[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
143 u32 val
= 0x12345678;
145 p
= (u32
*)(data
+ 1);
151 void lkdtm_SOFTLOCKUP(void)
158 void lkdtm_HARDLOCKUP(void)
165 void lkdtm_SPINLOCKUP(void)
167 /* Must be called twice to trigger. */
168 spin_lock(&lock_me_up
);
169 /* Let sparse know we intended to exit holding the lock. */
170 __release(&lock_me_up
);
173 void lkdtm_HUNG_TASK(void)
175 set_current_state(TASK_UNINTERRUPTIBLE
);
179 volatile unsigned int huge
= INT_MAX
- 2;
180 volatile unsigned int ignored
;
182 void lkdtm_OVERFLOW_SIGNED(void)
187 pr_info("Normal signed addition ...\n");
191 pr_info("Overflowing signed addition ...\n");
197 void lkdtm_OVERFLOW_UNSIGNED(void)
202 pr_info("Normal unsigned addition ...\n");
206 pr_info("Overflowing unsigned addition ...\n");
211 /* Intentially using old-style flex array definition of 1 byte. */
212 struct array_bounds_flex_array
{
218 struct array_bounds
{
225 void lkdtm_ARRAY_BOUNDS(void)
227 struct array_bounds_flex_array
*not_checked
;
228 struct array_bounds
*checked
;
231 not_checked
= kmalloc(sizeof(*not_checked
) * 2, GFP_KERNEL
);
232 checked
= kmalloc(sizeof(*checked
) * 2, GFP_KERNEL
);
234 pr_info("Array access within bounds ...\n");
235 /* For both, touch all bytes in the actual member size. */
236 for (i
= 0; i
< sizeof(checked
->data
); i
++)
237 checked
->data
[i
] = 'A';
239 * For the uninstrumented flex array member, also touch 1 byte
240 * beyond to verify it is correctly uninstrumented.
242 for (i
= 0; i
< sizeof(not_checked
->data
) + 1; i
++)
243 not_checked
->data
[i
] = 'A';
245 pr_info("Array access beyond bounds ...\n");
246 for (i
= 0; i
< sizeof(checked
->data
) + 1; i
++)
247 checked
->data
[i
] = 'B';
253 void lkdtm_CORRUPT_LIST_ADD(void)
256 * Initially, an empty list via LIST_HEAD:
257 * test_head.next = &test_head
258 * test_head.prev = &test_head
260 LIST_HEAD(test_head
);
261 struct lkdtm_list good
, bad
;
262 void *target
[2] = { };
263 void *redirection
= &target
;
265 pr_info("attempting good list addition\n");
268 * Adding to the list performs these actions:
269 * test_head.next->prev = &good.node
270 * good.node.next = test_head.next
271 * good.node.prev = test_head
272 * test_head.next = good.node
274 list_add(&good
.node
, &test_head
);
276 pr_info("attempting corrupted list addition\n");
278 * In simulating this "write what where" primitive, the "what" is
279 * the address of &bad.node, and the "where" is the address held
282 test_head
.next
= redirection
;
283 list_add(&bad
.node
, &test_head
);
285 if (target
[0] == NULL
&& target
[1] == NULL
)
286 pr_err("Overwrite did not happen, but no BUG?!\n");
288 pr_err("list_add() corruption not detected!\n");
291 void lkdtm_CORRUPT_LIST_DEL(void)
293 LIST_HEAD(test_head
);
294 struct lkdtm_list item
;
295 void *target
[2] = { };
296 void *redirection
= &target
;
298 list_add(&item
.node
, &test_head
);
300 pr_info("attempting good list removal\n");
301 list_del(&item
.node
);
303 pr_info("attempting corrupted list removal\n");
304 list_add(&item
.node
, &test_head
);
306 /* As with the list_add() test above, this corrupts "next". */
307 item
.node
.next
= redirection
;
308 list_del(&item
.node
);
310 if (target
[0] == NULL
&& target
[1] == NULL
)
311 pr_err("Overwrite did not happen, but no BUG?!\n");
313 pr_err("list_del() corruption not detected!\n");
316 /* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */
317 void lkdtm_CORRUPT_USER_DS(void)
319 pr_info("setting bad task size limit\n");
322 /* Make sure we do not keep running with a KERNEL_DS! */
326 /* Test that VMAP_STACK is actually allocating with a leading guard page */
327 void lkdtm_STACK_GUARD_PAGE_LEADING(void)
329 const unsigned char *stack
= task_stack_page(current
);
330 const unsigned char *ptr
= stack
- 1;
331 volatile unsigned char byte
;
333 pr_info("attempting bad read from page below current stack\n");
337 pr_err("FAIL: accessed page before stack!\n");
340 /* Test that VMAP_STACK is actually allocating with a trailing guard page */
341 void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
343 const unsigned char *stack
= task_stack_page(current
);
344 const unsigned char *ptr
= stack
+ THREAD_SIZE
;
345 volatile unsigned char byte
;
347 pr_info("attempting bad read from page above current stack\n");
351 pr_err("FAIL: accessed page after stack!\n");
354 void lkdtm_UNSET_SMEP(void)
356 #if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
357 #define MOV_CR4_DEPTH 64
358 void (*direct_write_cr4
)(unsigned long val
);
363 cr4
= native_read_cr4();
365 if ((cr4
& X86_CR4_SMEP
) != X86_CR4_SMEP
) {
366 pr_err("FAIL: SMEP not in use\n");
369 cr4
&= ~(X86_CR4_SMEP
);
371 pr_info("trying to clear SMEP normally\n");
372 native_write_cr4(cr4
);
373 if (cr4
== native_read_cr4()) {
374 pr_err("FAIL: pinning SMEP failed!\n");
376 pr_info("restoring SMEP\n");
377 native_write_cr4(cr4
);
380 pr_info("ok: SMEP did not get cleared\n");
383 * To test the post-write pinning verification we need to call
384 * directly into the middle of native_write_cr4() where the
385 * cr4 write happens, skipping any pinning. This searches for
386 * the cr4 writing instruction.
388 insn
= (unsigned char *)native_write_cr4
;
389 for (i
= 0; i
< MOV_CR4_DEPTH
; i
++) {
391 if (insn
[i
] == 0x0f && insn
[i
+1] == 0x22 && insn
[i
+2] == 0xe7)
393 /* mov %rdi,%rax; mov %rax, %cr4 */
394 if (insn
[i
] == 0x48 && insn
[i
+1] == 0x89 &&
395 insn
[i
+2] == 0xf8 && insn
[i
+3] == 0x0f &&
396 insn
[i
+4] == 0x22 && insn
[i
+5] == 0xe0)
399 if (i
>= MOV_CR4_DEPTH
) {
400 pr_info("ok: cannot locate cr4 writing call gadget\n");
403 direct_write_cr4
= (void *)(insn
+ i
);
405 pr_info("trying to clear SMEP with call gadget\n");
406 direct_write_cr4(cr4
);
407 if (native_read_cr4() & X86_CR4_SMEP
) {
408 pr_info("ok: SMEP removal was reverted\n");
410 pr_err("FAIL: cleared SMEP not detected!\n");
412 pr_info("restoring SMEP\n");
413 native_write_cr4(cr4
);
416 pr_err("XFAIL: this test is x86_64-only\n");
420 void lkdtm_DOUBLE_FAULT(void)
424 * Trigger #DF by setting the stack limit to zero. This clobbers
425 * a GDT TLS slot, which is okay because the current task will die
426 * anyway due to the double fault.
428 struct desc_struct d
= {
429 .type
= 3, /* expand-up, writable, accessed data */
430 .p
= 1, /* present */
432 .g
= 0, /* limit in bytes */
433 .s
= 1, /* not system */
437 write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
438 GDT_ENTRY_TLS_MIN
, &d
, DESCTYPE_S
);
441 * Put our zero-limit segment in SS and then trigger a fault. The
442 * 4-byte access to (%esp) will fault with #SS, and the attempt to
443 * deliver the fault will recursively cause #SS and result in #DF.
444 * This whole process happens while NMIs and MCEs are blocked by the
445 * MOV SS window. This is nice because an NMI with an invalid SS
446 * would also double-fault, resulting in the NMI or MCE being lost.
448 asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
449 "r" ((unsigned short)(GDT_ENTRY_TLS_MIN
<< 3)));
451 pr_err("FAIL: tried to double fault but didn't die\n");
453 pr_err("XFAIL: this test is ia32-only\n");
457 #ifdef CONFIG_ARM64_PTR_AUTH
458 static noinline
void change_pac_parameters(void)
460 /* Reset the keys of current task */
461 ptrauth_thread_init_kernel(current
);
462 ptrauth_thread_switch_kernel(current
);
465 #define CORRUPT_PAC_ITERATE 10
466 noinline
void lkdtm_CORRUPT_PAC(void)
470 if (!system_supports_address_auth()) {
471 pr_err("FAIL: arm64 pointer authentication feature not present\n");
475 pr_info("Change the PAC parameters to force function return failure\n");
477 * Pac is a hash value computed from input keys, return address and
478 * stack pointer. As pac has fewer bits so there is a chance of
479 * collision, so iterate few times to reduce the collision probability.
481 for (i
= 0; i
< CORRUPT_PAC_ITERATE
; i
++)
482 change_pac_parameters();
484 pr_err("FAIL: %s test failed. Kernel may be unstable from here\n", __func__
);
486 #else /* !CONFIG_ARM64_PTR_AUTH */
487 noinline
void lkdtm_CORRUPT_PAC(void)
489 pr_err("FAIL: arm64 pointer authentication config disabled\n");