mm, swap: skip swapcache for swapin of synchronous device
[linux/fpc-iii.git] / kernel / kcov.c
blobfc6af9e1308b7a943d8519732a5f83885a0db2b5
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "kcov: " fmt
4 #define DISABLE_BRANCH_PROFILING
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <linux/errno.h>
8 #include <linux/export.h>
9 #include <linux/types.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/preempt.h>
15 #include <linux/printk.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/vmalloc.h>
20 #include <linux/debugfs.h>
21 #include <linux/uaccess.h>
22 #include <linux/kcov.h>
23 #include <asm/setup.h>
26 * kcov descriptor (one per opened debugfs file).
27 * State transitions of the descriptor:
28 * - initial state after open()
29 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
30 * - then, mmap() call (several calls are allowed but not useful)
31 * - then, repeated enable/disable for a task (only one task a time allowed)
33 struct kcov {
35 * Reference counter. We keep one for:
36 * - opened file descriptor
37 * - task with enabled coverage (we can't unwire it from another task)
39 atomic_t refcount;
40 /* The lock protects mode, size, area and t. */
41 spinlock_t lock;
42 enum kcov_mode mode;
43 /* Size of arena (in long's for KCOV_MODE_TRACE). */
44 unsigned size;
45 /* Coverage buffer shared with user space. */
46 void *area;
47 /* Task for which we collect coverage, or NULL. */
48 struct task_struct *t;
52 * Entry point from instrumented code.
53 * This is called once per basic-block/edge.
55 void notrace __sanitizer_cov_trace_pc(void)
57 struct task_struct *t;
58 enum kcov_mode mode;
60 t = current;
62 * We are interested in code coverage as a function of a syscall inputs,
63 * so we ignore code executed in interrupts.
65 if (!t || !in_task())
66 return;
67 mode = READ_ONCE(t->kcov_mode);
68 if (mode == KCOV_MODE_TRACE) {
69 unsigned long *area;
70 unsigned long pos;
71 unsigned long ip = _RET_IP_;
73 #ifdef CONFIG_RANDOMIZE_BASE
74 ip -= kaslr_offset();
75 #endif
78 * There is some code that runs in interrupts but for which
79 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
80 * READ_ONCE()/barrier() effectively provides load-acquire wrt
81 * interrupts, there are paired barrier()/WRITE_ONCE() in
82 * kcov_ioctl_locked().
84 barrier();
85 area = t->kcov_area;
86 /* The first word is number of subsequent PCs. */
87 pos = READ_ONCE(area[0]) + 1;
88 if (likely(pos < t->kcov_size)) {
89 area[pos] = ip;
90 WRITE_ONCE(area[0], pos);
94 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
96 static void kcov_get(struct kcov *kcov)
98 atomic_inc(&kcov->refcount);
101 static void kcov_put(struct kcov *kcov)
103 if (atomic_dec_and_test(&kcov->refcount)) {
104 vfree(kcov->area);
105 kfree(kcov);
109 void kcov_task_init(struct task_struct *t)
111 t->kcov_mode = KCOV_MODE_DISABLED;
112 t->kcov_size = 0;
113 t->kcov_area = NULL;
114 t->kcov = NULL;
117 void kcov_task_exit(struct task_struct *t)
119 struct kcov *kcov;
121 kcov = t->kcov;
122 if (kcov == NULL)
123 return;
124 spin_lock(&kcov->lock);
125 if (WARN_ON(kcov->t != t)) {
126 spin_unlock(&kcov->lock);
127 return;
129 /* Just to not leave dangling references behind. */
130 kcov_task_init(t);
131 kcov->t = NULL;
132 spin_unlock(&kcov->lock);
133 kcov_put(kcov);
136 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
138 int res = 0;
139 void *area;
140 struct kcov *kcov = vma->vm_file->private_data;
141 unsigned long size, off;
142 struct page *page;
144 area = vmalloc_user(vma->vm_end - vma->vm_start);
145 if (!area)
146 return -ENOMEM;
148 spin_lock(&kcov->lock);
149 size = kcov->size * sizeof(unsigned long);
150 if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
151 vma->vm_end - vma->vm_start != size) {
152 res = -EINVAL;
153 goto exit;
155 if (!kcov->area) {
156 kcov->area = area;
157 vma->vm_flags |= VM_DONTEXPAND;
158 spin_unlock(&kcov->lock);
159 for (off = 0; off < size; off += PAGE_SIZE) {
160 page = vmalloc_to_page(kcov->area + off);
161 if (vm_insert_page(vma, vma->vm_start + off, page))
162 WARN_ONCE(1, "vm_insert_page() failed");
164 return 0;
166 exit:
167 spin_unlock(&kcov->lock);
168 vfree(area);
169 return res;
172 static int kcov_open(struct inode *inode, struct file *filep)
174 struct kcov *kcov;
176 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
177 if (!kcov)
178 return -ENOMEM;
179 atomic_set(&kcov->refcount, 1);
180 spin_lock_init(&kcov->lock);
181 filep->private_data = kcov;
182 return nonseekable_open(inode, filep);
185 static int kcov_close(struct inode *inode, struct file *filep)
187 kcov_put(filep->private_data);
188 return 0;
191 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
192 unsigned long arg)
194 struct task_struct *t;
195 unsigned long size, unused;
197 switch (cmd) {
198 case KCOV_INIT_TRACE:
200 * Enable kcov in trace mode and setup buffer size.
201 * Must happen before anything else.
203 if (kcov->mode != KCOV_MODE_DISABLED)
204 return -EBUSY;
206 * Size must be at least 2 to hold current position and one PC.
207 * Later we allocate size * sizeof(unsigned long) memory,
208 * that must not overflow.
210 size = arg;
211 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
212 return -EINVAL;
213 kcov->size = size;
214 kcov->mode = KCOV_MODE_TRACE;
215 return 0;
216 case KCOV_ENABLE:
218 * Enable coverage for the current task.
219 * At this point user must have been enabled trace mode,
220 * and mmapped the file. Coverage collection is disabled only
221 * at task exit or voluntary by KCOV_DISABLE. After that it can
222 * be enabled for another task.
224 unused = arg;
225 if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
226 kcov->area == NULL)
227 return -EINVAL;
228 if (kcov->t != NULL)
229 return -EBUSY;
230 t = current;
231 /* Cache in task struct for performance. */
232 t->kcov_size = kcov->size;
233 t->kcov_area = kcov->area;
234 /* See comment in __sanitizer_cov_trace_pc(). */
235 barrier();
236 WRITE_ONCE(t->kcov_mode, kcov->mode);
237 t->kcov = kcov;
238 kcov->t = t;
239 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
240 kcov_get(kcov);
241 return 0;
242 case KCOV_DISABLE:
243 /* Disable coverage for the current task. */
244 unused = arg;
245 if (unused != 0 || current->kcov != kcov)
246 return -EINVAL;
247 t = current;
248 if (WARN_ON(kcov->t != t))
249 return -EINVAL;
250 kcov_task_init(t);
251 kcov->t = NULL;
252 kcov_put(kcov);
253 return 0;
254 default:
255 return -ENOTTY;
259 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
261 struct kcov *kcov;
262 int res;
264 kcov = filep->private_data;
265 spin_lock(&kcov->lock);
266 res = kcov_ioctl_locked(kcov, cmd, arg);
267 spin_unlock(&kcov->lock);
268 return res;
271 static const struct file_operations kcov_fops = {
272 .open = kcov_open,
273 .unlocked_ioctl = kcov_ioctl,
274 .compat_ioctl = kcov_ioctl,
275 .mmap = kcov_mmap,
276 .release = kcov_close,
279 static int __init kcov_init(void)
282 * The kcov debugfs file won't ever get removed and thus,
283 * there is no need to protect it against removal races. The
284 * use of debugfs_create_file_unsafe() is actually safe here.
286 if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
287 pr_err("failed to create kcov in debugfs\n");
288 return -ENOMEM;
290 return 0;
293 device_initcall(kcov_init);