1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "kcov: " fmt
4 #define DISABLE_BRANCH_PROFILING
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <linux/errno.h>
8 #include <linux/export.h>
9 #include <linux/types.h>
10 #include <linux/file.h>
12 #include <linux/init.h>
14 #include <linux/preempt.h>
15 #include <linux/printk.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/vmalloc.h>
20 #include <linux/debugfs.h>
21 #include <linux/uaccess.h>
22 #include <linux/kcov.h>
23 #include <linux/refcount.h>
24 #include <asm/setup.h>
26 /* Number of 64-bit words written per one comparison: */
27 #define KCOV_WORDS_PER_CMP 4
30 * kcov descriptor (one per opened debugfs file).
31 * State transitions of the descriptor:
32 * - initial state after open()
33 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
34 * - then, mmap() call (several calls are allowed but not useful)
35 * - then, ioctl(KCOV_ENABLE, arg), where arg is
36 * KCOV_TRACE_PC - to trace only the PCs
38 * KCOV_TRACE_CMP - to trace only the comparison operands
39 * - then, ioctl(KCOV_DISABLE) to disable the task.
40 * Enabling/disabling ioctls can be repeated (only one task a time allowed).
44 * Reference counter. We keep one for:
45 * - opened file descriptor
46 * - task with enabled coverage (we can't unwire it from another task)
49 /* The lock protects mode, size, area and t. */
52 /* Size of arena (in long's for KCOV_MODE_TRACE). */
54 /* Coverage buffer shared with user space. */
56 /* Task for which we collect coverage, or NULL. */
57 struct task_struct
*t
;
60 static notrace
bool check_kcov_mode(enum kcov_mode needed_mode
, struct task_struct
*t
)
65 * We are interested in code coverage as a function of a syscall inputs,
66 * so we ignore code executed in interrupts.
70 mode
= READ_ONCE(t
->kcov_mode
);
72 * There is some code that runs in interrupts but for which
73 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
74 * READ_ONCE()/barrier() effectively provides load-acquire wrt
75 * interrupts, there are paired barrier()/WRITE_ONCE() in
76 * kcov_ioctl_locked().
79 return mode
== needed_mode
;
82 static notrace
unsigned long canonicalize_ip(unsigned long ip
)
84 #ifdef CONFIG_RANDOMIZE_BASE
91 * Entry point from instrumented code.
92 * This is called once per basic-block/edge.
94 void notrace
__sanitizer_cov_trace_pc(void)
96 struct task_struct
*t
;
98 unsigned long ip
= canonicalize_ip(_RET_IP_
);
102 if (!check_kcov_mode(KCOV_MODE_TRACE_PC
, t
))
106 /* The first 64-bit word is the number of subsequent PCs. */
107 pos
= READ_ONCE(area
[0]) + 1;
108 if (likely(pos
< t
->kcov_size
)) {
110 WRITE_ONCE(area
[0], pos
);
113 EXPORT_SYMBOL(__sanitizer_cov_trace_pc
);
115 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
116 static void notrace
write_comp_data(u64 type
, u64 arg1
, u64 arg2
, u64 ip
)
118 struct task_struct
*t
;
120 u64 count
, start_index
, end_pos
, max_pos
;
123 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP
, t
))
126 ip
= canonicalize_ip(ip
);
129 * We write all comparison arguments and types as u64.
130 * The buffer was allocated for t->kcov_size unsigned longs.
132 area
= (u64
*)t
->kcov_area
;
133 max_pos
= t
->kcov_size
* sizeof(unsigned long);
135 count
= READ_ONCE(area
[0]);
137 /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
138 start_index
= 1 + count
* KCOV_WORDS_PER_CMP
;
139 end_pos
= (start_index
+ KCOV_WORDS_PER_CMP
) * sizeof(u64
);
140 if (likely(end_pos
<= max_pos
)) {
141 area
[start_index
] = type
;
142 area
[start_index
+ 1] = arg1
;
143 area
[start_index
+ 2] = arg2
;
144 area
[start_index
+ 3] = ip
;
145 WRITE_ONCE(area
[0], count
+ 1);
149 void notrace
__sanitizer_cov_trace_cmp1(u8 arg1
, u8 arg2
)
151 write_comp_data(KCOV_CMP_SIZE(0), arg1
, arg2
, _RET_IP_
);
153 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1
);
155 void notrace
__sanitizer_cov_trace_cmp2(u16 arg1
, u16 arg2
)
157 write_comp_data(KCOV_CMP_SIZE(1), arg1
, arg2
, _RET_IP_
);
159 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2
);
161 void notrace
__sanitizer_cov_trace_cmp4(u32 arg1
, u32 arg2
)
163 write_comp_data(KCOV_CMP_SIZE(2), arg1
, arg2
, _RET_IP_
);
165 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4
);
167 void notrace
__sanitizer_cov_trace_cmp8(u64 arg1
, u64 arg2
)
169 write_comp_data(KCOV_CMP_SIZE(3), arg1
, arg2
, _RET_IP_
);
171 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8
);
173 void notrace
__sanitizer_cov_trace_const_cmp1(u8 arg1
, u8 arg2
)
175 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST
, arg1
, arg2
,
178 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1
);
180 void notrace
__sanitizer_cov_trace_const_cmp2(u16 arg1
, u16 arg2
)
182 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST
, arg1
, arg2
,
185 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2
);
187 void notrace
__sanitizer_cov_trace_const_cmp4(u32 arg1
, u32 arg2
)
189 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST
, arg1
, arg2
,
192 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4
);
194 void notrace
__sanitizer_cov_trace_const_cmp8(u64 arg1
, u64 arg2
)
196 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST
, arg1
, arg2
,
199 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8
);
201 void notrace
__sanitizer_cov_trace_switch(u64 val
, u64
*cases
)
204 u64 count
= cases
[0];
206 u64 type
= KCOV_CMP_CONST
;
210 type
|= KCOV_CMP_SIZE(0);
213 type
|= KCOV_CMP_SIZE(1);
216 type
|= KCOV_CMP_SIZE(2);
219 type
|= KCOV_CMP_SIZE(3);
224 for (i
= 0; i
< count
; i
++)
225 write_comp_data(type
, cases
[i
+ 2], val
, _RET_IP_
);
227 EXPORT_SYMBOL(__sanitizer_cov_trace_switch
);
228 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
230 static void kcov_get(struct kcov
*kcov
)
232 refcount_inc(&kcov
->refcount
);
235 static void kcov_put(struct kcov
*kcov
)
237 if (refcount_dec_and_test(&kcov
->refcount
)) {
243 void kcov_task_init(struct task_struct
*t
)
245 WRITE_ONCE(t
->kcov_mode
, KCOV_MODE_DISABLED
);
252 void kcov_task_exit(struct task_struct
*t
)
259 spin_lock(&kcov
->lock
);
260 if (WARN_ON(kcov
->t
!= t
)) {
261 spin_unlock(&kcov
->lock
);
264 /* Just to not leave dangling references behind. */
267 kcov
->mode
= KCOV_MODE_INIT
;
268 spin_unlock(&kcov
->lock
);
272 static int kcov_mmap(struct file
*filep
, struct vm_area_struct
*vma
)
276 struct kcov
*kcov
= vma
->vm_file
->private_data
;
277 unsigned long size
, off
;
280 area
= vmalloc_user(vma
->vm_end
- vma
->vm_start
);
284 spin_lock(&kcov
->lock
);
285 size
= kcov
->size
* sizeof(unsigned long);
286 if (kcov
->mode
!= KCOV_MODE_INIT
|| vma
->vm_pgoff
!= 0 ||
287 vma
->vm_end
- vma
->vm_start
!= size
) {
293 vma
->vm_flags
|= VM_DONTEXPAND
;
294 spin_unlock(&kcov
->lock
);
295 for (off
= 0; off
< size
; off
+= PAGE_SIZE
) {
296 page
= vmalloc_to_page(kcov
->area
+ off
);
297 if (vm_insert_page(vma
, vma
->vm_start
+ off
, page
))
298 WARN_ONCE(1, "vm_insert_page() failed");
303 spin_unlock(&kcov
->lock
);
308 static int kcov_open(struct inode
*inode
, struct file
*filep
)
312 kcov
= kzalloc(sizeof(*kcov
), GFP_KERNEL
);
315 kcov
->mode
= KCOV_MODE_DISABLED
;
316 refcount_set(&kcov
->refcount
, 1);
317 spin_lock_init(&kcov
->lock
);
318 filep
->private_data
= kcov
;
319 return nonseekable_open(inode
, filep
);
322 static int kcov_close(struct inode
*inode
, struct file
*filep
)
324 kcov_put(filep
->private_data
);
329 * Fault in a lazily-faulted vmalloc area before it can be used by
330 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
331 * vmalloc fault handling path is instrumented.
333 static void kcov_fault_in_area(struct kcov
*kcov
)
335 unsigned long stride
= PAGE_SIZE
/ sizeof(unsigned long);
336 unsigned long *area
= kcov
->area
;
337 unsigned long offset
;
339 for (offset
= 0; offset
< kcov
->size
; offset
+= stride
)
340 READ_ONCE(area
[offset
]);
343 static int kcov_ioctl_locked(struct kcov
*kcov
, unsigned int cmd
,
346 struct task_struct
*t
;
347 unsigned long size
, unused
;
350 case KCOV_INIT_TRACE
:
352 * Enable kcov in trace mode and setup buffer size.
353 * Must happen before anything else.
355 if (kcov
->mode
!= KCOV_MODE_DISABLED
)
358 * Size must be at least 2 to hold current position and one PC.
359 * Later we allocate size * sizeof(unsigned long) memory,
360 * that must not overflow.
363 if (size
< 2 || size
> INT_MAX
/ sizeof(unsigned long))
366 kcov
->mode
= KCOV_MODE_INIT
;
370 * Enable coverage for the current task.
371 * At this point user must have been enabled trace mode,
372 * and mmapped the file. Coverage collection is disabled only
373 * at task exit or voluntary by KCOV_DISABLE. After that it can
374 * be enabled for another task.
376 if (kcov
->mode
!= KCOV_MODE_INIT
|| !kcov
->area
)
379 if (kcov
->t
!= NULL
|| t
->kcov
!= NULL
)
381 if (arg
== KCOV_TRACE_PC
)
382 kcov
->mode
= KCOV_MODE_TRACE_PC
;
383 else if (arg
== KCOV_TRACE_CMP
)
384 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
385 kcov
->mode
= KCOV_MODE_TRACE_CMP
;
391 kcov_fault_in_area(kcov
);
392 /* Cache in task struct for performance. */
393 t
->kcov_size
= kcov
->size
;
394 t
->kcov_area
= kcov
->area
;
395 /* See comment in check_kcov_mode(). */
397 WRITE_ONCE(t
->kcov_mode
, kcov
->mode
);
400 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
404 /* Disable coverage for the current task. */
406 if (unused
!= 0 || current
->kcov
!= kcov
)
409 if (WARN_ON(kcov
->t
!= t
))
413 kcov
->mode
= KCOV_MODE_INIT
;
421 static long kcov_ioctl(struct file
*filep
, unsigned int cmd
, unsigned long arg
)
426 kcov
= filep
->private_data
;
427 spin_lock(&kcov
->lock
);
428 res
= kcov_ioctl_locked(kcov
, cmd
, arg
);
429 spin_unlock(&kcov
->lock
);
433 static const struct file_operations kcov_fops
= {
435 .unlocked_ioctl
= kcov_ioctl
,
436 .compat_ioctl
= kcov_ioctl
,
438 .release
= kcov_close
,
441 static int __init
kcov_init(void)
444 * The kcov debugfs file won't ever get removed and thus,
445 * there is no need to protect it against removal races. The
446 * use of debugfs_create_file_unsafe() is actually safe here.
448 debugfs_create_file_unsafe("kcov", 0600, NULL
, NULL
, &kcov_fops
);
453 device_initcall(kcov_init
);