1 // SPDX-License-Identifier: GPL-2.0
3 * This code fills the used part of the kernel stack with a poison value
4 * before returning to userspace. It's part of the STACKLEAK feature
5 * ported from grsecurity/PaX.
7 * Author: Alexander Popov <alex.popov@linux.com>
9 * STACKLEAK reduces the information which kernel stack leak bugs can
10 * reveal and blocks some uninitialized stack variable attacks.
13 #include <linux/stackleak.h>
15 #ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
16 #include <linux/jump_label.h>
17 #include <linux/sysctl.h>
19 static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass
);
21 int stack_erasing_sysctl(struct ctl_table
*table
, int write
,
22 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
25 int state
= !static_branch_unlikely(&stack_erasing_bypass
);
26 int prev_state
= state
;
29 table
->maxlen
= sizeof(int);
30 ret
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
32 if (ret
|| !write
|| state
== prev_state
)
36 static_branch_disable(&stack_erasing_bypass
);
38 static_branch_enable(&stack_erasing_bypass
);
40 pr_warn("stackleak: kernel stack erasing is %s\n",
41 state
? "enabled" : "disabled");
45 #define skip_erasing() static_branch_unlikely(&stack_erasing_bypass)
47 #define skip_erasing() false
48 #endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
50 asmlinkage
void stackleak_erase(void)
52 /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
53 unsigned long kstack_ptr
= current
->lowest_stack
;
54 unsigned long boundary
= (unsigned long)end_of_stack(current
);
55 unsigned int poison_count
= 0;
56 const unsigned int depth
= STACKLEAK_SEARCH_DEPTH
/ sizeof(unsigned long);
61 /* Check that 'lowest_stack' value is sane */
62 if (unlikely(kstack_ptr
- boundary
>= THREAD_SIZE
))
63 kstack_ptr
= boundary
;
65 /* Search for the poison value in the kernel stack */
66 while (kstack_ptr
> boundary
&& poison_count
<= depth
) {
67 if (*(unsigned long *)kstack_ptr
== STACKLEAK_POISON
)
72 kstack_ptr
-= sizeof(unsigned long);
76 * One 'long int' at the bottom of the thread stack is reserved and
77 * should not be poisoned (see CONFIG_SCHED_STACK_END_CHECK=y).
79 if (kstack_ptr
== boundary
)
80 kstack_ptr
+= sizeof(unsigned long);
82 #ifdef CONFIG_STACKLEAK_METRICS
83 current
->prev_lowest_stack
= kstack_ptr
;
87 * Now write the poison value to the kernel stack. Start from
88 * 'kstack_ptr' and move up till the new 'boundary'. We assume that
89 * the stack pointer doesn't change when we write poison.
91 if (on_thread_stack())
92 boundary
= current_stack_pointer
;
94 boundary
= current_top_of_stack();
96 while (kstack_ptr
< boundary
) {
97 *(unsigned long *)kstack_ptr
= STACKLEAK_POISON
;
98 kstack_ptr
+= sizeof(unsigned long);
101 /* Reset the 'lowest_stack' value for the next syscall */
102 current
->lowest_stack
= current_top_of_stack() - THREAD_SIZE
/64;
105 void __used
stackleak_track_stack(void)
108 * N.B. stackleak_erase() fills the kernel stack with the poison value,
109 * which has the register width. That code assumes that the value
110 * of 'lowest_stack' is aligned on the register width boundary.
112 * That is true for x86 and x86_64 because of the kernel stack
113 * alignment on these platforms (for details, see 'cc_stack_align' in
114 * arch/x86/Makefile). Take care of that when you port STACKLEAK to
117 unsigned long sp
= (unsigned long)&sp
;
120 * Having CONFIG_STACKLEAK_TRACK_MIN_SIZE larger than
121 * STACKLEAK_SEARCH_DEPTH makes the poison search in
122 * stackleak_erase() unreliable. Let's prevent that.
124 BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE
> STACKLEAK_SEARCH_DEPTH
);
126 if (sp
< current
->lowest_stack
&&
127 sp
>= (unsigned long)task_stack_page(current
) +
128 sizeof(unsigned long)) {
129 current
->lowest_stack
= sp
;
132 EXPORT_SYMBOL(stackleak_track_stack
);