x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / arch / x86 / mm / kmmio.c
blob79eb55ce69a91f716c15524d56816604bcdcc100
1 // SPDX-License-Identifier: GPL-2.0
2 /* Support for MMIO probes.
3 * Benfit many code from kprobes
4 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
5 * 2007 Alexander Eichner
6 * 2008 Pekka Paalanen <pq@iki.fi>
7 */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/list.h>
12 #include <linux/rculist.h>
13 #include <linux/spinlock.h>
14 #include <linux/hash.h>
15 #include <linux/export.h>
16 #include <linux/kernel.h>
17 #include <linux/uaccess.h>
18 #include <linux/ptrace.h>
19 #include <linux/preempt.h>
20 #include <linux/percpu.h>
21 #include <linux/kdebug.h>
22 #include <linux/mutex.h>
23 #include <linux/io.h>
24 #include <linux/slab.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <linux/errno.h>
28 #include <asm/debugreg.h>
29 #include <linux/mmiotrace.h>
31 #define KMMIO_PAGE_HASH_BITS 4
32 #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
34 struct kmmio_fault_page {
35 struct list_head list;
36 struct kmmio_fault_page *release_next;
37 unsigned long addr; /* the requested address */
38 pteval_t old_presence; /* page presence prior to arming */
39 bool armed;
42 * Number of times this page has been registered as a part
43 * of a probe. If zero, page is disarmed and this may be freed.
44 * Used only by writers (RCU) and post_kmmio_handler().
45 * Protected by kmmio_lock, when linked into kmmio_page_table.
47 int count;
49 bool scheduled_for_release;
52 struct kmmio_delayed_release {
53 struct rcu_head rcu;
54 struct kmmio_fault_page *release_list;
57 struct kmmio_context {
58 struct kmmio_fault_page *fpage;
59 struct kmmio_probe *probe;
60 unsigned long saved_flags;
61 unsigned long addr;
62 int active;
65 static DEFINE_SPINLOCK(kmmio_lock);
67 /* Protected by kmmio_lock */
68 unsigned int kmmio_count;
70 /* Read-protected by RCU, write-protected by kmmio_lock. */
71 static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
72 static LIST_HEAD(kmmio_probes);
74 static struct list_head *kmmio_page_list(unsigned long addr)
76 unsigned int l;
77 pte_t *pte = lookup_address(addr, &l);
79 if (!pte)
80 return NULL;
81 addr &= page_level_mask(l);
83 return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
86 /* Accessed per-cpu */
87 static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
90 * this is basically a dynamic stabbing problem:
91 * Could use the existing prio tree code or
92 * Possible better implementations:
93 * The Interval Skip List: A Data Structure for Finding All Intervals That
94 * Overlap a Point (might be simple)
95 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
97 /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */
98 static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
100 struct kmmio_probe *p;
101 list_for_each_entry_rcu(p, &kmmio_probes, list) {
102 if (addr >= p->addr && addr < (p->addr + p->len))
103 return p;
105 return NULL;
108 /* You must be holding RCU read lock. */
109 static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
111 struct list_head *head;
112 struct kmmio_fault_page *f;
113 unsigned int l;
114 pte_t *pte = lookup_address(addr, &l);
116 if (!pte)
117 return NULL;
118 addr &= page_level_mask(l);
119 head = kmmio_page_list(addr);
120 list_for_each_entry_rcu(f, head, list) {
121 if (f->addr == addr)
122 return f;
124 return NULL;
127 static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
129 pmd_t new_pmd;
130 pmdval_t v = pmd_val(*pmd);
131 if (clear) {
132 *old = v;
133 new_pmd = pmd_mknotpresent(*pmd);
134 } else {
135 /* Presume this has been called with clear==true previously */
136 new_pmd = __pmd(*old);
138 set_pmd(pmd, new_pmd);
141 static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
143 pteval_t v = pte_val(*pte);
144 if (clear) {
145 *old = v;
146 /* Nothing should care about address */
147 pte_clear(&init_mm, 0, pte);
148 } else {
149 /* Presume this has been called with clear==true previously */
150 set_pte_atomic(pte, __pte(*old));
154 static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
156 unsigned int level;
157 pte_t *pte = lookup_address(f->addr, &level);
159 if (!pte) {
160 pr_err("no pte for addr 0x%08lx\n", f->addr);
161 return -1;
164 switch (level) {
165 case PG_LEVEL_2M:
166 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence);
167 break;
168 case PG_LEVEL_4K:
169 clear_pte_presence(pte, clear, &f->old_presence);
170 break;
171 default:
172 pr_err("unexpected page level 0x%x.\n", level);
173 return -1;
176 __flush_tlb_one_kernel(f->addr);
177 return 0;
181 * Mark the given page as not present. Access to it will trigger a fault.
183 * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
184 * protection is ignored here. RCU read lock is assumed held, so the struct
185 * will not disappear unexpectedly. Furthermore, the caller must guarantee,
186 * that double arming the same virtual address (page) cannot occur.
188 * Double disarming on the other hand is allowed, and may occur when a fault
189 * and mmiotrace shutdown happen simultaneously.
191 static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
193 int ret;
194 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
195 if (f->armed) {
196 pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
197 f->addr, f->count, !!f->old_presence);
199 ret = clear_page_presence(f, true);
200 WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
201 f->addr);
202 f->armed = true;
203 return ret;
206 /** Restore the given page to saved presence state. */
207 static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
209 int ret = clear_page_presence(f, false);
210 WARN_ONCE(ret < 0,
211 KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
212 f->armed = false;
216 * This is being called from do_page_fault().
218 * We may be in an interrupt or a critical section. Also prefecthing may
219 * trigger a page fault. We may be in the middle of process switch.
220 * We cannot take any locks, because we could be executing especially
221 * within a kmmio critical section.
223 * Local interrupts are disabled, so preemption cannot happen.
224 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
227 * Interrupts are disabled on entry as trap3 is an interrupt gate
228 * and they remain disabled throughout this function.
230 int kmmio_handler(struct pt_regs *regs, unsigned long addr)
232 struct kmmio_context *ctx;
233 struct kmmio_fault_page *faultpage;
234 int ret = 0; /* default to fault not handled */
235 unsigned long page_base = addr;
236 unsigned int l;
237 pte_t *pte = lookup_address(addr, &l);
238 if (!pte)
239 return -EINVAL;
240 page_base &= page_level_mask(l);
243 * Preemption is now disabled to prevent process switch during
244 * single stepping. We can only handle one active kmmio trace
245 * per cpu, so ensure that we finish it before something else
246 * gets to run. We also hold the RCU read lock over single
247 * stepping to avoid looking up the probe and kmmio_fault_page
248 * again.
250 preempt_disable();
251 rcu_read_lock();
253 faultpage = get_kmmio_fault_page(page_base);
254 if (!faultpage) {
256 * Either this page fault is not caused by kmmio, or
257 * another CPU just pulled the kmmio probe from under
258 * our feet. The latter case should not be possible.
260 goto no_kmmio;
263 ctx = &get_cpu_var(kmmio_ctx);
264 if (ctx->active) {
265 if (page_base == ctx->addr) {
267 * A second fault on the same page means some other
268 * condition needs handling by do_page_fault(), the
269 * page really not being present is the most common.
271 pr_debug("secondary hit for 0x%08lx CPU %d.\n",
272 addr, smp_processor_id());
274 if (!faultpage->old_presence)
275 pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n",
276 addr, smp_processor_id());
277 } else {
279 * Prevent overwriting already in-flight context.
280 * This should not happen, let's hope disarming at
281 * least prevents a panic.
283 pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
284 smp_processor_id(), addr);
285 pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
286 disarm_kmmio_fault_page(faultpage);
288 goto no_kmmio_ctx;
290 ctx->active++;
292 ctx->fpage = faultpage;
293 ctx->probe = get_kmmio_probe(page_base);
294 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
295 ctx->addr = page_base;
297 if (ctx->probe && ctx->probe->pre_handler)
298 ctx->probe->pre_handler(ctx->probe, regs, addr);
301 * Enable single-stepping and disable interrupts for the faulting
302 * context. Local interrupts must not get enabled during stepping.
304 regs->flags |= X86_EFLAGS_TF;
305 regs->flags &= ~X86_EFLAGS_IF;
307 /* Now we set present bit in PTE and single step. */
308 disarm_kmmio_fault_page(ctx->fpage);
311 * If another cpu accesses the same page while we are stepping,
312 * the access will not be caught. It will simply succeed and the
313 * only downside is we lose the event. If this becomes a problem,
314 * the user should drop to single cpu before tracing.
317 put_cpu_var(kmmio_ctx);
318 return 1; /* fault handled */
320 no_kmmio_ctx:
321 put_cpu_var(kmmio_ctx);
322 no_kmmio:
323 rcu_read_unlock();
324 preempt_enable_no_resched();
325 return ret;
329 * Interrupts are disabled on entry as trap1 is an interrupt gate
330 * and they remain disabled throughout this function.
331 * This must always get called as the pair to kmmio_handler().
333 static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
335 int ret = 0;
336 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
338 if (!ctx->active) {
340 * debug traps without an active context are due to either
341 * something external causing them (f.e. using a debugger while
342 * mmio tracing enabled), or erroneous behaviour
344 pr_warning("unexpected debug trap on CPU %d.\n",
345 smp_processor_id());
346 goto out;
349 if (ctx->probe && ctx->probe->post_handler)
350 ctx->probe->post_handler(ctx->probe, condition, regs);
352 /* Prevent racing against release_kmmio_fault_page(). */
353 spin_lock(&kmmio_lock);
354 if (ctx->fpage->count)
355 arm_kmmio_fault_page(ctx->fpage);
356 spin_unlock(&kmmio_lock);
358 regs->flags &= ~X86_EFLAGS_TF;
359 regs->flags |= ctx->saved_flags;
361 /* These were acquired in kmmio_handler(). */
362 ctx->active--;
363 BUG_ON(ctx->active);
364 rcu_read_unlock();
365 preempt_enable_no_resched();
368 * if somebody else is singlestepping across a probe point, flags
369 * will have TF set, in which case, continue the remaining processing
370 * of do_debug, as if this is not a probe hit.
372 if (!(regs->flags & X86_EFLAGS_TF))
373 ret = 1;
374 out:
375 put_cpu_var(kmmio_ctx);
376 return ret;
379 /* You must be holding kmmio_lock. */
380 static int add_kmmio_fault_page(unsigned long addr)
382 struct kmmio_fault_page *f;
384 f = get_kmmio_fault_page(addr);
385 if (f) {
386 if (!f->count)
387 arm_kmmio_fault_page(f);
388 f->count++;
389 return 0;
392 f = kzalloc(sizeof(*f), GFP_ATOMIC);
393 if (!f)
394 return -1;
396 f->count = 1;
397 f->addr = addr;
399 if (arm_kmmio_fault_page(f)) {
400 kfree(f);
401 return -1;
404 list_add_rcu(&f->list, kmmio_page_list(f->addr));
406 return 0;
409 /* You must be holding kmmio_lock. */
410 static void release_kmmio_fault_page(unsigned long addr,
411 struct kmmio_fault_page **release_list)
413 struct kmmio_fault_page *f;
415 f = get_kmmio_fault_page(addr);
416 if (!f)
417 return;
419 f->count--;
420 BUG_ON(f->count < 0);
421 if (!f->count) {
422 disarm_kmmio_fault_page(f);
423 if (!f->scheduled_for_release) {
424 f->release_next = *release_list;
425 *release_list = f;
426 f->scheduled_for_release = true;
432 * With page-unaligned ioremaps, one or two armed pages may contain
433 * addresses from outside the intended mapping. Events for these addresses
434 * are currently silently dropped. The events may result only from programming
435 * mistakes by accessing addresses before the beginning or past the end of a
436 * mapping.
438 int register_kmmio_probe(struct kmmio_probe *p)
440 unsigned long flags;
441 int ret = 0;
442 unsigned long size = 0;
443 unsigned long addr = p->addr & PAGE_MASK;
444 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
445 unsigned int l;
446 pte_t *pte;
448 spin_lock_irqsave(&kmmio_lock, flags);
449 if (get_kmmio_probe(addr)) {
450 ret = -EEXIST;
451 goto out;
454 pte = lookup_address(addr, &l);
455 if (!pte) {
456 ret = -EINVAL;
457 goto out;
460 kmmio_count++;
461 list_add_rcu(&p->list, &kmmio_probes);
462 while (size < size_lim) {
463 if (add_kmmio_fault_page(addr + size))
464 pr_err("Unable to set page fault.\n");
465 size += page_level_size(l);
467 out:
468 spin_unlock_irqrestore(&kmmio_lock, flags);
470 * XXX: What should I do here?
471 * Here was a call to global_flush_tlb(), but it does not exist
472 * anymore. It seems it's not needed after all.
474 return ret;
476 EXPORT_SYMBOL(register_kmmio_probe);
478 static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
480 struct kmmio_delayed_release *dr = container_of(
481 head,
482 struct kmmio_delayed_release,
483 rcu);
484 struct kmmio_fault_page *f = dr->release_list;
485 while (f) {
486 struct kmmio_fault_page *next = f->release_next;
487 BUG_ON(f->count);
488 kfree(f);
489 f = next;
491 kfree(dr);
494 static void remove_kmmio_fault_pages(struct rcu_head *head)
496 struct kmmio_delayed_release *dr =
497 container_of(head, struct kmmio_delayed_release, rcu);
498 struct kmmio_fault_page *f = dr->release_list;
499 struct kmmio_fault_page **prevp = &dr->release_list;
500 unsigned long flags;
502 spin_lock_irqsave(&kmmio_lock, flags);
503 while (f) {
504 if (!f->count) {
505 list_del_rcu(&f->list);
506 prevp = &f->release_next;
507 } else {
508 *prevp = f->release_next;
509 f->release_next = NULL;
510 f->scheduled_for_release = false;
512 f = *prevp;
514 spin_unlock_irqrestore(&kmmio_lock, flags);
516 /* This is the real RCU destroy call. */
517 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
521 * Remove a kmmio probe. You have to synchronize_rcu() before you can be
522 * sure that the callbacks will not be called anymore. Only after that
523 * you may actually release your struct kmmio_probe.
525 * Unregistering a kmmio fault page has three steps:
526 * 1. release_kmmio_fault_page()
527 * Disarm the page, wait a grace period to let all faults finish.
528 * 2. remove_kmmio_fault_pages()
529 * Remove the pages from kmmio_page_table.
530 * 3. rcu_free_kmmio_fault_pages()
531 * Actually free the kmmio_fault_page structs as with RCU.
533 void unregister_kmmio_probe(struct kmmio_probe *p)
535 unsigned long flags;
536 unsigned long size = 0;
537 unsigned long addr = p->addr & PAGE_MASK;
538 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
539 struct kmmio_fault_page *release_list = NULL;
540 struct kmmio_delayed_release *drelease;
541 unsigned int l;
542 pte_t *pte;
544 pte = lookup_address(addr, &l);
545 if (!pte)
546 return;
548 spin_lock_irqsave(&kmmio_lock, flags);
549 while (size < size_lim) {
550 release_kmmio_fault_page(addr + size, &release_list);
551 size += page_level_size(l);
553 list_del_rcu(&p->list);
554 kmmio_count--;
555 spin_unlock_irqrestore(&kmmio_lock, flags);
557 if (!release_list)
558 return;
560 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
561 if (!drelease) {
562 pr_crit("leaking kmmio_fault_page objects.\n");
563 return;
565 drelease->release_list = release_list;
568 * This is not really RCU here. We have just disarmed a set of
569 * pages so that they cannot trigger page faults anymore. However,
570 * we cannot remove the pages from kmmio_page_table,
571 * because a probe hit might be in flight on another CPU. The
572 * pages are collected into a list, and they will be removed from
573 * kmmio_page_table when it is certain that no probe hit related to
574 * these pages can be in flight. RCU grace period sounds like a
575 * good choice.
577 * If we removed the pages too early, kmmio page fault handler might
578 * not find the respective kmmio_fault_page and determine it's not
579 * a kmmio fault, when it actually is. This would lead to madness.
581 call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
583 EXPORT_SYMBOL(unregister_kmmio_probe);
585 static int
586 kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
588 struct die_args *arg = args;
589 unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err);
591 if (val == DIE_DEBUG && (*dr6_p & DR_STEP))
592 if (post_kmmio_handler(*dr6_p, arg->regs) == 1) {
594 * Reset the BS bit in dr6 (pointed by args->err) to
595 * denote completion of processing
597 *dr6_p &= ~DR_STEP;
598 return NOTIFY_STOP;
601 return NOTIFY_DONE;
604 static struct notifier_block nb_die = {
605 .notifier_call = kmmio_die_notifier
608 int kmmio_init(void)
610 int i;
612 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
613 INIT_LIST_HEAD(&kmmio_page_table[i]);
615 return register_die_notifier(&nb_die);
618 void kmmio_cleanup(void)
620 int i;
622 unregister_die_notifier(&nb_die);
623 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) {
624 WARN_ONCE(!list_empty(&kmmio_page_table[i]),
625 KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");