ipc: optimize semget/shmget/msgget for lots of keys
[linux/fpc-iii.git] / drivers / misc / lkdtm_bugs.c
blob9e0b4f9599870581428f89ac115e7c7c778bf892
1 /*
2 * This is for all the tests related to logic bugs (e.g. bad dereferences,
3 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
4 * lockups) along with other things that don't fit well into existing LKDTM
5 * test source files.
6 */
7 #include "lkdtm.h"
8 #include <linux/list.h>
9 #include <linux/sched.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/uaccess.h>
14 struct lkdtm_list {
15 struct list_head node;
19 * Make sure our attempts to over run the kernel stack doesn't trigger
20 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
21 * recurse past the end of THREAD_SIZE by default.
23 #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
24 #define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
25 #else
26 #define REC_STACK_SIZE (THREAD_SIZE / 8)
27 #endif
28 #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
30 static int recur_count = REC_NUM_DEFAULT;
32 static DEFINE_SPINLOCK(lock_me_up);
34 static int recursive_loop(int remaining)
36 char buf[REC_STACK_SIZE];
38 /* Make sure compiler does not optimize this away. */
39 memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
40 if (!remaining)
41 return 0;
42 else
43 return recursive_loop(remaining - 1);
46 /* If the depth is negative, use the default, otherwise keep parameter. */
47 void __init lkdtm_bugs_init(int *recur_param)
49 if (*recur_param < 0)
50 *recur_param = recur_count;
51 else
52 recur_count = *recur_param;
55 void lkdtm_PANIC(void)
57 panic("dumptest");
60 void lkdtm_BUG(void)
62 BUG();
65 void lkdtm_WARNING(void)
67 WARN_ON(1);
70 void lkdtm_EXCEPTION(void)
72 *((volatile int *) 0) = 0;
75 void lkdtm_LOOP(void)
77 for (;;)
81 void lkdtm_OVERFLOW(void)
83 (void) recursive_loop(recur_count);
86 static noinline void __lkdtm_CORRUPT_STACK(void *stack)
88 memset(stack, '\xff', 64);
91 /* This should trip the stack canary, not corrupt the return address. */
92 noinline void lkdtm_CORRUPT_STACK(void)
94 /* Use default char array length that triggers stack protection. */
95 char data[8] __aligned(sizeof(void *));
97 __lkdtm_CORRUPT_STACK(&data);
99 pr_info("Corrupted stack containing char array ...\n");
102 /* Same as above but will only get a canary with -fstack-protector-strong */
103 noinline void lkdtm_CORRUPT_STACK_STRONG(void)
105 union {
106 unsigned short shorts[4];
107 unsigned long *ptr;
108 } data __aligned(sizeof(void *));
110 __lkdtm_CORRUPT_STACK(&data);
112 pr_info("Corrupted stack containing union ...\n");
115 void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
117 static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
118 u32 *p;
119 u32 val = 0x12345678;
121 p = (u32 *)(data + 1);
122 if (*p == 0)
123 val = 0x87654321;
124 *p = val;
127 void lkdtm_SOFTLOCKUP(void)
129 preempt_disable();
130 for (;;)
131 cpu_relax();
134 void lkdtm_HARDLOCKUP(void)
136 local_irq_disable();
137 for (;;)
138 cpu_relax();
141 void lkdtm_SPINLOCKUP(void)
143 /* Must be called twice to trigger. */
144 spin_lock(&lock_me_up);
145 /* Let sparse know we intended to exit holding the lock. */
146 __release(&lock_me_up);
149 void lkdtm_HUNG_TASK(void)
151 set_current_state(TASK_UNINTERRUPTIBLE);
152 schedule();
155 void lkdtm_CORRUPT_LIST_ADD(void)
158 * Initially, an empty list via LIST_HEAD:
159 * test_head.next = &test_head
160 * test_head.prev = &test_head
162 LIST_HEAD(test_head);
163 struct lkdtm_list good, bad;
164 void *target[2] = { };
165 void *redirection = &target;
167 pr_info("attempting good list addition\n");
170 * Adding to the list performs these actions:
171 * test_head.next->prev = &good.node
172 * good.node.next = test_head.next
173 * good.node.prev = test_head
174 * test_head.next = good.node
176 list_add(&good.node, &test_head);
178 pr_info("attempting corrupted list addition\n");
180 * In simulating this "write what where" primitive, the "what" is
181 * the address of &bad.node, and the "where" is the address held
182 * by "redirection".
184 test_head.next = redirection;
185 list_add(&bad.node, &test_head);
187 if (target[0] == NULL && target[1] == NULL)
188 pr_err("Overwrite did not happen, but no BUG?!\n");
189 else
190 pr_err("list_add() corruption not detected!\n");
193 void lkdtm_CORRUPT_LIST_DEL(void)
195 LIST_HEAD(test_head);
196 struct lkdtm_list item;
197 void *target[2] = { };
198 void *redirection = &target;
200 list_add(&item.node, &test_head);
202 pr_info("attempting good list removal\n");
203 list_del(&item.node);
205 pr_info("attempting corrupted list removal\n");
206 list_add(&item.node, &test_head);
208 /* As with the list_add() test above, this corrupts "next". */
209 item.node.next = redirection;
210 list_del(&item.node);
212 if (target[0] == NULL && target[1] == NULL)
213 pr_err("Overwrite did not happen, but no BUG?!\n");
214 else
215 pr_err("list_del() corruption not detected!\n");
218 /* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */
219 void lkdtm_CORRUPT_USER_DS(void)
221 pr_info("setting bad task size limit\n");
222 set_fs(KERNEL_DS);
224 /* Make sure we do not keep running with a KERNEL_DS! */
225 force_sig(SIGKILL, current);
228 /* Test that VMAP_STACK is actually allocating with a leading guard page */
229 void lkdtm_STACK_GUARD_PAGE_LEADING(void)
231 const unsigned char *stack = task_stack_page(current);
232 const unsigned char *ptr = stack - 1;
233 volatile unsigned char byte;
235 pr_info("attempting bad read from page below current stack\n");
237 byte = *ptr;
239 pr_err("FAIL: accessed page before stack!\n");
242 /* Test that VMAP_STACK is actually allocating with a trailing guard page */
243 void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
245 const unsigned char *stack = task_stack_page(current);
246 const unsigned char *ptr = stack + THREAD_SIZE;
247 volatile unsigned char byte;
249 pr_info("attempting bad read from page above current stack\n");
251 byte = *ptr;
253 pr_err("FAIL: accessed page after stack!\n");