1 // SPDX-License-Identifier: GPL-2.0
3 * KUnit test for hw_breakpoint constraints accounting logic.
5 * Copyright (C) 2022, Google LLC.
8 #include <kunit/test.h>
9 #include <linux/cpumask.h>
10 #include <linux/hw_breakpoint.h>
11 #include <linux/kthread.h>
12 #include <linux/perf_event.h>
13 #include <asm/hw_breakpoint.h>
15 #define TEST_REQUIRES_BP_SLOTS(test, slots) \
17 if ((slots) > get_test_bp_slots()) { \
18 kunit_skip((test), "Requires breakpoint slots: %d > %d", slots, \
19 get_test_bp_slots()); \
23 #define TEST_EXPECT_NOSPC(expr) KUNIT_EXPECT_EQ(test, -ENOSPC, PTR_ERR(expr))
25 #define MAX_TEST_BREAKPOINTS 512
27 static char break_vars
[MAX_TEST_BREAKPOINTS
];
28 static struct perf_event
*test_bps
[MAX_TEST_BREAKPOINTS
];
29 static struct task_struct
*__other_task
;
31 static struct perf_event
*register_test_bp(int cpu
, struct task_struct
*tsk
, int idx
)
33 struct perf_event_attr attr
= {};
35 if (WARN_ON(idx
< 0 || idx
>= MAX_TEST_BREAKPOINTS
))
38 hw_breakpoint_init(&attr
);
39 attr
.bp_addr
= (unsigned long)&break_vars
[idx
];
40 attr
.bp_len
= HW_BREAKPOINT_LEN_1
;
41 attr
.bp_type
= HW_BREAKPOINT_RW
;
42 return perf_event_create_kernel_counter(&attr
, cpu
, tsk
, NULL
, NULL
);
45 static void unregister_test_bp(struct perf_event
**bp
)
47 if (WARN_ON(IS_ERR(*bp
)))
51 unregister_hw_breakpoint(*bp
);
55 static int get_test_bp_slots(void)
60 slots
= hw_breakpoint_slots(TYPE_DATA
);
65 static void fill_one_bp_slot(struct kunit
*test
, int *id
, int cpu
, struct task_struct
*tsk
)
67 struct perf_event
*bp
= register_test_bp(cpu
, tsk
, *id
);
69 KUNIT_ASSERT_NOT_NULL(test
, bp
);
70 KUNIT_ASSERT_FALSE(test
, IS_ERR(bp
));
71 KUNIT_ASSERT_NULL(test
, test_bps
[*id
]);
72 test_bps
[(*id
)++] = bp
;
76 * Fills up the given @cpu/@tsk with breakpoints, only leaving @skip slots free.
78 * Returns true if this can be called again, continuing at @id.
80 static bool fill_bp_slots(struct kunit
*test
, int *id
, int cpu
, struct task_struct
*tsk
, int skip
)
82 for (int i
= 0; i
< get_test_bp_slots() - skip
; ++i
)
83 fill_one_bp_slot(test
, id
, cpu
, tsk
);
85 return *id
+ get_test_bp_slots() <= MAX_TEST_BREAKPOINTS
;
88 static int dummy_kthread(void *arg
)
93 static struct task_struct
*get_other_task(struct kunit
*test
)
95 struct task_struct
*tsk
;
100 tsk
= kthread_create(dummy_kthread
, NULL
, "hw_breakpoint_dummy_task");
101 KUNIT_ASSERT_FALSE(test
, IS_ERR(tsk
));
106 static int get_test_cpu(int num
)
112 for_each_online_cpu(cpu
) {
120 /* ===== Test cases ===== */
122 static void test_one_cpu(struct kunit
*test
)
126 fill_bp_slots(test
, &idx
, get_test_cpu(0), NULL
, 0);
127 TEST_EXPECT_NOSPC(register_test_bp(-1, current
, idx
));
128 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL
, idx
));
131 static void test_many_cpus(struct kunit
*test
)
136 /* Test that CPUs are independent. */
137 for_each_online_cpu(cpu
) {
138 bool do_continue
= fill_bp_slots(test
, &idx
, cpu
, NULL
, 0);
140 TEST_EXPECT_NOSPC(register_test_bp(cpu
, NULL
, idx
));
146 static void test_one_task_on_all_cpus(struct kunit
*test
)
150 fill_bp_slots(test
, &idx
, -1, current
, 0);
151 TEST_EXPECT_NOSPC(register_test_bp(-1, current
, idx
));
152 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current
, idx
));
153 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL
, idx
));
154 /* Remove one and adding back CPU-target should work. */
155 unregister_test_bp(&test_bps
[0]);
156 fill_one_bp_slot(test
, &idx
, get_test_cpu(0), NULL
);
159 static void test_two_tasks_on_all_cpus(struct kunit
*test
)
163 /* Test that tasks are independent. */
164 fill_bp_slots(test
, &idx
, -1, current
, 0);
165 fill_bp_slots(test
, &idx
, -1, get_other_task(test
), 0);
167 TEST_EXPECT_NOSPC(register_test_bp(-1, current
, idx
));
168 TEST_EXPECT_NOSPC(register_test_bp(-1, get_other_task(test
), idx
));
169 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current
, idx
));
170 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), get_other_task(test
), idx
));
171 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL
, idx
));
172 /* Remove one from first task and adding back CPU-target should not work. */
173 unregister_test_bp(&test_bps
[0]);
174 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL
, idx
));
177 static void test_one_task_on_one_cpu(struct kunit
*test
)
181 fill_bp_slots(test
, &idx
, get_test_cpu(0), current
, 0);
182 TEST_EXPECT_NOSPC(register_test_bp(-1, current
, idx
));
183 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current
, idx
));
184 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL
, idx
));
186 * Remove one and adding back CPU-target should work; this case is
187 * special vs. above because the task's constraints are CPU-dependent.
189 unregister_test_bp(&test_bps
[0]);
190 fill_one_bp_slot(test
, &idx
, get_test_cpu(0), NULL
);
193 static void test_one_task_mixed(struct kunit
*test
)
197 TEST_REQUIRES_BP_SLOTS(test
, 3);
199 fill_one_bp_slot(test
, &idx
, get_test_cpu(0), current
);
200 fill_bp_slots(test
, &idx
, -1, current
, 1);
201 TEST_EXPECT_NOSPC(register_test_bp(-1, current
, idx
));
202 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current
, idx
));
203 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL
, idx
));
205 /* Transition from CPU-dependent pinned count to CPU-independent. */
206 unregister_test_bp(&test_bps
[0]);
207 unregister_test_bp(&test_bps
[1]);
208 fill_one_bp_slot(test
, &idx
, get_test_cpu(0), NULL
);
209 fill_one_bp_slot(test
, &idx
, get_test_cpu(0), NULL
);
210 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL
, idx
));
213 static void test_two_tasks_on_one_cpu(struct kunit
*test
)
217 fill_bp_slots(test
, &idx
, get_test_cpu(0), current
, 0);
218 fill_bp_slots(test
, &idx
, get_test_cpu(0), get_other_task(test
), 0);
220 TEST_EXPECT_NOSPC(register_test_bp(-1, current
, idx
));
221 TEST_EXPECT_NOSPC(register_test_bp(-1, get_other_task(test
), idx
));
222 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current
, idx
));
223 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), get_other_task(test
), idx
));
224 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL
, idx
));
225 /* Can still create breakpoints on some other CPU. */
226 fill_bp_slots(test
, &idx
, get_test_cpu(1), NULL
, 0);
229 static void test_two_tasks_on_one_all_cpus(struct kunit
*test
)
233 fill_bp_slots(test
, &idx
, get_test_cpu(0), current
, 0);
234 fill_bp_slots(test
, &idx
, -1, get_other_task(test
), 0);
236 TEST_EXPECT_NOSPC(register_test_bp(-1, current
, idx
));
237 TEST_EXPECT_NOSPC(register_test_bp(-1, get_other_task(test
), idx
));
238 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current
, idx
));
239 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), get_other_task(test
), idx
));
240 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL
, idx
));
241 /* Cannot create breakpoints on some other CPU either. */
242 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(1), NULL
, idx
));
245 static void test_task_on_all_and_one_cpu(struct kunit
*test
)
247 int tsk_on_cpu_idx
, cpu_idx
;
250 TEST_REQUIRES_BP_SLOTS(test
, 3);
252 fill_bp_slots(test
, &idx
, -1, current
, 2);
253 /* Transitioning from only all CPU breakpoints to mixed. */
254 tsk_on_cpu_idx
= idx
;
255 fill_one_bp_slot(test
, &idx
, get_test_cpu(0), current
);
256 fill_one_bp_slot(test
, &idx
, -1, current
);
258 TEST_EXPECT_NOSPC(register_test_bp(-1, current
, idx
));
259 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current
, idx
));
260 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL
, idx
));
262 /* We should still be able to use up another CPU's slots. */
264 fill_one_bp_slot(test
, &idx
, get_test_cpu(1), NULL
);
265 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(1), NULL
, idx
));
267 /* Transitioning back to task target on all CPUs. */
268 unregister_test_bp(&test_bps
[tsk_on_cpu_idx
]);
269 /* Still have a CPU target breakpoint in get_test_cpu(1). */
270 TEST_EXPECT_NOSPC(register_test_bp(-1, current
, idx
));
271 /* Remove it and try again. */
272 unregister_test_bp(&test_bps
[cpu_idx
]);
273 fill_one_bp_slot(test
, &idx
, -1, current
);
275 TEST_EXPECT_NOSPC(register_test_bp(-1, current
, idx
));
276 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current
, idx
));
277 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL
, idx
));
278 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(1), NULL
, idx
));
281 static struct kunit_case hw_breakpoint_test_cases
[] = {
282 KUNIT_CASE(test_one_cpu
),
283 KUNIT_CASE(test_many_cpus
),
284 KUNIT_CASE(test_one_task_on_all_cpus
),
285 KUNIT_CASE(test_two_tasks_on_all_cpus
),
286 KUNIT_CASE(test_one_task_on_one_cpu
),
287 KUNIT_CASE(test_one_task_mixed
),
288 KUNIT_CASE(test_two_tasks_on_one_cpu
),
289 KUNIT_CASE(test_two_tasks_on_one_all_cpus
),
290 KUNIT_CASE(test_task_on_all_and_one_cpu
),
294 static int test_init(struct kunit
*test
)
296 /* Most test cases want 2 distinct CPUs. */
297 if (num_online_cpus() < 2)
298 kunit_skip(test
, "not enough cpus");
300 /* Want the system to not use breakpoints elsewhere. */
301 if (hw_breakpoint_is_used())
302 kunit_skip(test
, "hw breakpoint already in use");
307 static void test_exit(struct kunit
*test
)
309 for (int i
= 0; i
< MAX_TEST_BREAKPOINTS
; ++i
) {
311 unregister_test_bp(&test_bps
[i
]);
315 kthread_stop(__other_task
);
319 /* Verify that internal state agrees that no breakpoints are in use. */
320 KUNIT_EXPECT_FALSE(test
, hw_breakpoint_is_used());
323 static struct kunit_suite hw_breakpoint_test_suite
= {
324 .name
= "hw_breakpoint",
325 .test_cases
= hw_breakpoint_test_cases
,
330 kunit_test_suites(&hw_breakpoint_test_suite
);
332 MODULE_AUTHOR("Marco Elver <elver@google.com>");