Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / kernel / kcsan / debugfs.c
blob2af39ba5b70b9b546d5e5677e957da01706d1a22
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KCSAN debugfs interface.
5 * Copyright (C) 2019, Google LLC.
6 */
8 #define pr_fmt(fmt) "kcsan: " fmt
10 #include <linux/atomic.h>
11 #include <linux/bsearch.h>
12 #include <linux/bug.h>
13 #include <linux/debugfs.h>
14 #include <linux/init.h>
15 #include <linux/kallsyms.h>
16 #include <linux/sched.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/sort.h>
20 #include <linux/string.h>
21 #include <linux/uaccess.h>
23 #include "kcsan.h"
25 atomic_long_t kcsan_counters[KCSAN_COUNTER_COUNT];
26 static const char *const counter_names[] = {
27 [KCSAN_COUNTER_USED_WATCHPOINTS] = "used_watchpoints",
28 [KCSAN_COUNTER_SETUP_WATCHPOINTS] = "setup_watchpoints",
29 [KCSAN_COUNTER_DATA_RACES] = "data_races",
30 [KCSAN_COUNTER_ASSERT_FAILURES] = "assert_failures",
31 [KCSAN_COUNTER_NO_CAPACITY] = "no_capacity",
32 [KCSAN_COUNTER_REPORT_RACES] = "report_races",
33 [KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN] = "races_unknown_origin",
34 [KCSAN_COUNTER_UNENCODABLE_ACCESSES] = "unencodable_accesses",
35 [KCSAN_COUNTER_ENCODING_FALSE_POSITIVES] = "encoding_false_positives",
37 static_assert(ARRAY_SIZE(counter_names) == KCSAN_COUNTER_COUNT);
40 * Addresses for filtering functions from reporting. This list can be used as a
41 * whitelist or blacklist.
43 static struct {
44 unsigned long *addrs; /* array of addresses */
45 size_t size; /* current size */
46 int used; /* number of elements used */
47 bool sorted; /* if elements are sorted */
48 bool whitelist; /* if list is a blacklist or whitelist */
49 } report_filterlist;
50 static DEFINE_RAW_SPINLOCK(report_filterlist_lock);
53 * The microbenchmark allows benchmarking KCSAN core runtime only. To run
54 * multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
55 * debugfs file. This will not generate any conflicts, and tests fast-path only.
57 static noinline void microbenchmark(unsigned long iters)
59 const struct kcsan_ctx ctx_save = current->kcsan_ctx;
60 const bool was_enabled = READ_ONCE(kcsan_enabled);
61 u64 cycles;
63 /* We may have been called from an atomic region; reset context. */
64 memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
66 * Disable to benchmark fast-path for all accesses, and (expected
67 * negligible) call into slow-path, but never set up watchpoints.
69 WRITE_ONCE(kcsan_enabled, false);
71 pr_info("%s begin | iters: %lu\n", __func__, iters);
73 cycles = get_cycles();
74 while (iters--) {
75 unsigned long addr = iters & ((PAGE_SIZE << 8) - 1);
76 int type = !(iters & 0x7f) ? KCSAN_ACCESS_ATOMIC :
77 (!(iters & 0xf) ? KCSAN_ACCESS_WRITE : 0);
78 __kcsan_check_access((void *)addr, sizeof(long), type);
80 cycles = get_cycles() - cycles;
82 pr_info("%s end | cycles: %llu\n", __func__, cycles);
84 WRITE_ONCE(kcsan_enabled, was_enabled);
85 /* restore context */
86 current->kcsan_ctx = ctx_save;
89 static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
91 const unsigned long a = *(const unsigned long *)rhs;
92 const unsigned long b = *(const unsigned long *)lhs;
94 return a < b ? -1 : a == b ? 0 : 1;
97 bool kcsan_skip_report_debugfs(unsigned long func_addr)
99 unsigned long symbolsize, offset;
100 unsigned long flags;
101 bool ret = false;
103 if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset))
104 return false;
105 func_addr -= offset; /* Get function start */
107 raw_spin_lock_irqsave(&report_filterlist_lock, flags);
108 if (report_filterlist.used == 0)
109 goto out;
111 /* Sort array if it is unsorted, and then do a binary search. */
112 if (!report_filterlist.sorted) {
113 sort(report_filterlist.addrs, report_filterlist.used,
114 sizeof(unsigned long), cmp_filterlist_addrs, NULL);
115 report_filterlist.sorted = true;
117 ret = !!bsearch(&func_addr, report_filterlist.addrs,
118 report_filterlist.used, sizeof(unsigned long),
119 cmp_filterlist_addrs);
120 if (report_filterlist.whitelist)
121 ret = !ret;
123 out:
124 raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
125 return ret;
128 static void set_report_filterlist_whitelist(bool whitelist)
130 unsigned long flags;
132 raw_spin_lock_irqsave(&report_filterlist_lock, flags);
133 report_filterlist.whitelist = whitelist;
134 raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
137 /* Returns 0 on success, error-code otherwise. */
138 static ssize_t insert_report_filterlist(const char *func)
140 unsigned long flags;
141 unsigned long addr = kallsyms_lookup_name(func);
142 unsigned long *delay_free = NULL;
143 unsigned long *new_addrs = NULL;
144 size_t new_size = 0;
145 ssize_t ret = 0;
147 if (!addr) {
148 pr_err("could not find function: '%s'\n", func);
149 return -ENOENT;
152 retry_alloc:
154 * Check if we need an allocation, and re-validate under the lock. Since
155 * the report_filterlist_lock is a raw, cannot allocate under the lock.
157 if (data_race(report_filterlist.used == report_filterlist.size)) {
158 new_size = (report_filterlist.size ?: 4) * 2;
159 delay_free = new_addrs = kmalloc_array(new_size, sizeof(unsigned long), GFP_KERNEL);
160 if (!new_addrs)
161 return -ENOMEM;
164 raw_spin_lock_irqsave(&report_filterlist_lock, flags);
165 if (report_filterlist.used == report_filterlist.size) {
166 /* Check we pre-allocated enough, and retry if not. */
167 if (report_filterlist.used >= new_size) {
168 raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
169 kfree(new_addrs); /* kfree(NULL) is safe */
170 delay_free = new_addrs = NULL;
171 goto retry_alloc;
174 if (report_filterlist.used)
175 memcpy(new_addrs, report_filterlist.addrs, report_filterlist.used * sizeof(unsigned long));
176 delay_free = report_filterlist.addrs; /* free the old list */
177 report_filterlist.addrs = new_addrs; /* switch to the new list */
178 report_filterlist.size = new_size;
181 /* Note: deduplicating should be done in userspace. */
182 report_filterlist.addrs[report_filterlist.used++] = addr;
183 report_filterlist.sorted = false;
185 raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
187 kfree(delay_free);
188 return ret;
191 static int show_info(struct seq_file *file, void *v)
193 int i;
194 unsigned long flags;
196 /* show stats */
197 seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
198 for (i = 0; i < KCSAN_COUNTER_COUNT; ++i) {
199 seq_printf(file, "%s: %ld\n", counter_names[i],
200 atomic_long_read(&kcsan_counters[i]));
203 /* show filter functions, and filter type */
204 raw_spin_lock_irqsave(&report_filterlist_lock, flags);
205 seq_printf(file, "\n%s functions: %s\n",
206 report_filterlist.whitelist ? "whitelisted" : "blacklisted",
207 report_filterlist.used == 0 ? "none" : "");
208 for (i = 0; i < report_filterlist.used; ++i)
209 seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
210 raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
212 return 0;
215 static int debugfs_open(struct inode *inode, struct file *file)
217 return single_open(file, show_info, NULL);
220 static ssize_t
221 debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
223 char kbuf[KSYM_NAME_LEN];
224 char *arg;
225 const size_t read_len = min(count, sizeof(kbuf) - 1);
227 if (copy_from_user(kbuf, buf, read_len))
228 return -EFAULT;
229 kbuf[read_len] = '\0';
230 arg = strstrip(kbuf);
232 if (!strcmp(arg, "on")) {
233 WRITE_ONCE(kcsan_enabled, true);
234 } else if (!strcmp(arg, "off")) {
235 WRITE_ONCE(kcsan_enabled, false);
236 } else if (str_has_prefix(arg, "microbench=")) {
237 unsigned long iters;
239 if (kstrtoul(&arg[strlen("microbench=")], 0, &iters))
240 return -EINVAL;
241 microbenchmark(iters);
242 } else if (!strcmp(arg, "whitelist")) {
243 set_report_filterlist_whitelist(true);
244 } else if (!strcmp(arg, "blacklist")) {
245 set_report_filterlist_whitelist(false);
246 } else if (arg[0] == '!') {
247 ssize_t ret = insert_report_filterlist(&arg[1]);
249 if (ret < 0)
250 return ret;
251 } else {
252 return -EINVAL;
255 return count;
258 static const struct file_operations debugfs_ops =
260 .read = seq_read,
261 .open = debugfs_open,
262 .write = debugfs_write,
263 .release = single_release
266 static int __init kcsan_debugfs_init(void)
268 debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
269 return 0;
272 late_initcall(kcsan_debugfs_init);