1 #define pr_fmt(fmt) "kcov: " fmt
3 #define DISABLE_BRANCH_PROFILING
4 #include <linux/compiler.h>
5 #include <linux/types.h>
6 #include <linux/file.h>
9 #include <linux/printk.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/vmalloc.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/kcov.h>
19 * kcov descriptor (one per opened debugfs file).
20 * State transitions of the descriptor:
21 * - initial state after open()
22 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
23 * - then, mmap() call (several calls are allowed but not useful)
24 * - then, repeated enable/disable for a task (only one task a time allowed)
28 * Reference counter. We keep one for:
29 * - opened file descriptor
30 * - task with enabled coverage (we can't unwire it from another task)
33 /* The lock protects mode, size, area and t. */
36 /* Size of arena (in long's for KCOV_MODE_TRACE). */
38 /* Coverage buffer shared with user space. */
40 /* Task for which we collect coverage, or NULL. */
41 struct task_struct
*t
;
45 * Entry point from instrumented code.
46 * This is called once per basic-block/edge.
48 void notrace
__sanitizer_cov_trace_pc(void)
50 struct task_struct
*t
;
55 * We are interested in code coverage as a function of a syscall inputs,
56 * so we ignore code executed in interrupts.
57 * The checks for whether we are in an interrupt are open-coded, because
58 * 1. We can't use in_interrupt() here, since it also returns true
59 * when we are inside local_bh_disable() section.
60 * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
61 * since that leads to slower generated code (three separate tests,
62 * one for each of the flags).
64 if (!t
|| (preempt_count() & (HARDIRQ_MASK
| SOFTIRQ_OFFSET
67 mode
= READ_ONCE(t
->kcov_mode
);
68 if (mode
== KCOV_MODE_TRACE
) {
73 * There is some code that runs in interrupts but for which
74 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
75 * READ_ONCE()/barrier() effectively provides load-acquire wrt
76 * interrupts, there are paired barrier()/WRITE_ONCE() in
77 * kcov_ioctl_locked().
81 /* The first word is number of subsequent PCs. */
82 pos
= READ_ONCE(area
[0]) + 1;
83 if (likely(pos
< t
->kcov_size
)) {
85 WRITE_ONCE(area
[0], pos
);
89 EXPORT_SYMBOL(__sanitizer_cov_trace_pc
);
91 static void kcov_get(struct kcov
*kcov
)
93 atomic_inc(&kcov
->refcount
);
96 static void kcov_put(struct kcov
*kcov
)
98 if (atomic_dec_and_test(&kcov
->refcount
)) {
104 void kcov_task_init(struct task_struct
*t
)
106 t
->kcov_mode
= KCOV_MODE_DISABLED
;
112 void kcov_task_exit(struct task_struct
*t
)
119 spin_lock(&kcov
->lock
);
120 if (WARN_ON(kcov
->t
!= t
)) {
121 spin_unlock(&kcov
->lock
);
124 /* Just to not leave dangling references behind. */
127 spin_unlock(&kcov
->lock
);
131 static int kcov_mmap(struct file
*filep
, struct vm_area_struct
*vma
)
135 struct kcov
*kcov
= vma
->vm_file
->private_data
;
136 unsigned long size
, off
;
139 area
= vmalloc_user(vma
->vm_end
- vma
->vm_start
);
143 spin_lock(&kcov
->lock
);
144 size
= kcov
->size
* sizeof(unsigned long);
145 if (kcov
->mode
== KCOV_MODE_DISABLED
|| vma
->vm_pgoff
!= 0 ||
146 vma
->vm_end
- vma
->vm_start
!= size
) {
152 vma
->vm_flags
|= VM_DONTEXPAND
;
153 spin_unlock(&kcov
->lock
);
154 for (off
= 0; off
< size
; off
+= PAGE_SIZE
) {
155 page
= vmalloc_to_page(kcov
->area
+ off
);
156 if (vm_insert_page(vma
, vma
->vm_start
+ off
, page
))
157 WARN_ONCE(1, "vm_insert_page() failed");
162 spin_unlock(&kcov
->lock
);
167 static int kcov_open(struct inode
*inode
, struct file
*filep
)
171 kcov
= kzalloc(sizeof(*kcov
), GFP_KERNEL
);
174 atomic_set(&kcov
->refcount
, 1);
175 spin_lock_init(&kcov
->lock
);
176 filep
->private_data
= kcov
;
177 return nonseekable_open(inode
, filep
);
180 static int kcov_close(struct inode
*inode
, struct file
*filep
)
182 kcov_put(filep
->private_data
);
186 static int kcov_ioctl_locked(struct kcov
*kcov
, unsigned int cmd
,
189 struct task_struct
*t
;
190 unsigned long size
, unused
;
193 case KCOV_INIT_TRACE
:
195 * Enable kcov in trace mode and setup buffer size.
196 * Must happen before anything else.
198 if (kcov
->mode
!= KCOV_MODE_DISABLED
)
201 * Size must be at least 2 to hold current position and one PC.
202 * Later we allocate size * sizeof(unsigned long) memory,
203 * that must not overflow.
206 if (size
< 2 || size
> INT_MAX
/ sizeof(unsigned long))
209 kcov
->mode
= KCOV_MODE_TRACE
;
213 * Enable coverage for the current task.
214 * At this point user must have been enabled trace mode,
215 * and mmapped the file. Coverage collection is disabled only
216 * at task exit or voluntary by KCOV_DISABLE. After that it can
217 * be enabled for another task.
220 if (unused
!= 0 || kcov
->mode
== KCOV_MODE_DISABLED
||
226 /* Cache in task struct for performance. */
227 t
->kcov_size
= kcov
->size
;
228 t
->kcov_area
= kcov
->area
;
229 /* See comment in __sanitizer_cov_trace_pc(). */
231 WRITE_ONCE(t
->kcov_mode
, kcov
->mode
);
234 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
238 /* Disable coverage for the current task. */
240 if (unused
!= 0 || current
->kcov
!= kcov
)
243 if (WARN_ON(kcov
->t
!= t
))
254 static long kcov_ioctl(struct file
*filep
, unsigned int cmd
, unsigned long arg
)
259 kcov
= filep
->private_data
;
260 spin_lock(&kcov
->lock
);
261 res
= kcov_ioctl_locked(kcov
, cmd
, arg
);
262 spin_unlock(&kcov
->lock
);
266 static const struct file_operations kcov_fops
= {
268 .unlocked_ioctl
= kcov_ioctl
,
270 .release
= kcov_close
,
273 static int __init
kcov_init(void)
276 * The kcov debugfs file won't ever get removed and thus,
277 * there is no need to protect it against removal races. The
278 * use of debugfs_create_file_unsafe() is actually safe here.
280 if (!debugfs_create_file_unsafe("kcov", 0600, NULL
, NULL
, &kcov_fops
)) {
281 pr_err("failed to create kcov in debugfs\n");
287 device_initcall(kcov_init
);