debugfs: Modified default dir of debugfs for debugging UHCI.
[linux/fpc-iii.git] / arch / x86 / mm / kmemcheck / kmemcheck.c
blob528bf954eb7403c82d0816957b02a653ff9bcf18
1 /**
2 * kmemcheck - a heavyweight memory checker for the linux kernel
3 * Copyright (C) 2007, 2008 Vegard Nossum <vegardno@ifi.uio.no>
4 * (With a lot of help from Ingo Molnar and Pekka Enberg.)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2) as
8 * published by the Free Software Foundation.
9 */
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/kallsyms.h>
14 #include <linux/kernel.h>
15 #include <linux/kmemcheck.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/page-flags.h>
19 #include <linux/percpu.h>
20 #include <linux/ptrace.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
24 #include <asm/cacheflush.h>
25 #include <asm/kmemcheck.h>
26 #include <asm/pgtable.h>
27 #include <asm/tlbflush.h>
29 #include "error.h"
30 #include "opcode.h"
31 #include "pte.h"
32 #include "selftest.h"
33 #include "shadow.h"
36 #ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT
37 # define KMEMCHECK_ENABLED 0
38 #endif
40 #ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT
41 # define KMEMCHECK_ENABLED 1
42 #endif
44 #ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT
45 # define KMEMCHECK_ENABLED 2
46 #endif
48 int kmemcheck_enabled = KMEMCHECK_ENABLED;
50 int __init kmemcheck_init(void)
52 #ifdef CONFIG_SMP
54 * Limit SMP to use a single CPU. We rely on the fact that this code
55 * runs before SMP is set up.
57 if (setup_max_cpus > 1) {
58 printk(KERN_INFO
59 "kmemcheck: Limiting number of CPUs to 1.\n");
60 setup_max_cpus = 1;
62 #endif
64 if (!kmemcheck_selftest()) {
65 printk(KERN_INFO "kmemcheck: self-tests failed; disabling\n");
66 kmemcheck_enabled = 0;
67 return -EINVAL;
70 printk(KERN_INFO "kmemcheck: Initialized\n");
71 return 0;
74 early_initcall(kmemcheck_init);
77 * We need to parse the kmemcheck= option before any memory is allocated.
79 static int __init param_kmemcheck(char *str)
81 if (!str)
82 return -EINVAL;
84 sscanf(str, "%d", &kmemcheck_enabled);
85 return 0;
88 early_param("kmemcheck", param_kmemcheck);
90 int kmemcheck_show_addr(unsigned long address)
92 pte_t *pte;
94 pte = kmemcheck_pte_lookup(address);
95 if (!pte)
96 return 0;
98 set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
99 __flush_tlb_one(address);
100 return 1;
103 int kmemcheck_hide_addr(unsigned long address)
105 pte_t *pte;
107 pte = kmemcheck_pte_lookup(address);
108 if (!pte)
109 return 0;
111 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
112 __flush_tlb_one(address);
113 return 1;
116 struct kmemcheck_context {
117 bool busy;
118 int balance;
121 * There can be at most two memory operands to an instruction, but
122 * each address can cross a page boundary -- so we may need up to
123 * four addresses that must be hidden/revealed for each fault.
125 unsigned long addr[4];
126 unsigned long n_addrs;
127 unsigned long flags;
129 /* Data size of the instruction that caused a fault. */
130 unsigned int size;
133 static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context);
135 bool kmemcheck_active(struct pt_regs *regs)
137 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
139 return data->balance > 0;
142 /* Save an address that needs to be shown/hidden */
143 static void kmemcheck_save_addr(unsigned long addr)
145 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
147 BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr));
148 data->addr[data->n_addrs++] = addr;
151 static unsigned int kmemcheck_show_all(void)
153 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
154 unsigned int i;
155 unsigned int n;
157 n = 0;
158 for (i = 0; i < data->n_addrs; ++i)
159 n += kmemcheck_show_addr(data->addr[i]);
161 return n;
164 static unsigned int kmemcheck_hide_all(void)
166 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
167 unsigned int i;
168 unsigned int n;
170 n = 0;
171 for (i = 0; i < data->n_addrs; ++i)
172 n += kmemcheck_hide_addr(data->addr[i]);
174 return n;
178 * Called from the #PF handler.
180 void kmemcheck_show(struct pt_regs *regs)
182 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
184 BUG_ON(!irqs_disabled());
186 if (unlikely(data->balance != 0)) {
187 kmemcheck_show_all();
188 kmemcheck_error_save_bug(regs);
189 data->balance = 0;
190 return;
194 * None of the addresses actually belonged to kmemcheck. Note that
195 * this is not an error.
197 if (kmemcheck_show_all() == 0)
198 return;
200 ++data->balance;
203 * The IF needs to be cleared as well, so that the faulting
204 * instruction can run "uninterrupted". Otherwise, we might take
205 * an interrupt and start executing that before we've had a chance
206 * to hide the page again.
208 * NOTE: In the rare case of multiple faults, we must not override
209 * the original flags:
211 if (!(regs->flags & X86_EFLAGS_TF))
212 data->flags = regs->flags;
214 regs->flags |= X86_EFLAGS_TF;
215 regs->flags &= ~X86_EFLAGS_IF;
219 * Called from the #DB handler.
221 void kmemcheck_hide(struct pt_regs *regs)
223 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
224 int n;
226 BUG_ON(!irqs_disabled());
228 if (data->balance == 0)
229 return;
231 if (unlikely(data->balance != 1)) {
232 kmemcheck_show_all();
233 kmemcheck_error_save_bug(regs);
234 data->n_addrs = 0;
235 data->balance = 0;
237 if (!(data->flags & X86_EFLAGS_TF))
238 regs->flags &= ~X86_EFLAGS_TF;
239 if (data->flags & X86_EFLAGS_IF)
240 regs->flags |= X86_EFLAGS_IF;
241 return;
244 if (kmemcheck_enabled)
245 n = kmemcheck_hide_all();
246 else
247 n = kmemcheck_show_all();
249 if (n == 0)
250 return;
252 --data->balance;
254 data->n_addrs = 0;
256 if (!(data->flags & X86_EFLAGS_TF))
257 regs->flags &= ~X86_EFLAGS_TF;
258 if (data->flags & X86_EFLAGS_IF)
259 regs->flags |= X86_EFLAGS_IF;
262 void kmemcheck_show_pages(struct page *p, unsigned int n)
264 unsigned int i;
266 for (i = 0; i < n; ++i) {
267 unsigned long address;
268 pte_t *pte;
269 unsigned int level;
271 address = (unsigned long) page_address(&p[i]);
272 pte = lookup_address(address, &level);
273 BUG_ON(!pte);
274 BUG_ON(level != PG_LEVEL_4K);
276 set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
277 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN));
278 __flush_tlb_one(address);
282 bool kmemcheck_page_is_tracked(struct page *p)
284 /* This will also check the "hidden" flag of the PTE. */
285 return kmemcheck_pte_lookup((unsigned long) page_address(p));
288 void kmemcheck_hide_pages(struct page *p, unsigned int n)
290 unsigned int i;
292 for (i = 0; i < n; ++i) {
293 unsigned long address;
294 pte_t *pte;
295 unsigned int level;
297 address = (unsigned long) page_address(&p[i]);
298 pte = lookup_address(address, &level);
299 BUG_ON(!pte);
300 BUG_ON(level != PG_LEVEL_4K);
302 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
303 set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN));
304 __flush_tlb_one(address);
308 /* Access may NOT cross page boundary */
309 static void kmemcheck_read_strict(struct pt_regs *regs,
310 unsigned long addr, unsigned int size)
312 void *shadow;
313 enum kmemcheck_shadow status;
315 shadow = kmemcheck_shadow_lookup(addr);
316 if (!shadow)
317 return;
319 kmemcheck_save_addr(addr);
320 status = kmemcheck_shadow_test(shadow, size);
321 if (status == KMEMCHECK_SHADOW_INITIALIZED)
322 return;
324 if (kmemcheck_enabled)
325 kmemcheck_error_save(status, addr, size, regs);
327 if (kmemcheck_enabled == 2)
328 kmemcheck_enabled = 0;
330 /* Don't warn about it again. */
331 kmemcheck_shadow_set(shadow, size);
334 bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
336 enum kmemcheck_shadow status;
337 void *shadow;
339 shadow = kmemcheck_shadow_lookup(addr);
340 if (!shadow)
341 return true;
343 status = kmemcheck_shadow_test(shadow, size);
345 return status == KMEMCHECK_SHADOW_INITIALIZED;
348 /* Access may cross page boundary */
349 static void kmemcheck_read(struct pt_regs *regs,
350 unsigned long addr, unsigned int size)
352 unsigned long page = addr & PAGE_MASK;
353 unsigned long next_addr = addr + size - 1;
354 unsigned long next_page = next_addr & PAGE_MASK;
356 if (likely(page == next_page)) {
357 kmemcheck_read_strict(regs, addr, size);
358 return;
362 * What we do is basically to split the access across the
363 * two pages and handle each part separately. Yes, this means
364 * that we may now see reads that are 3 + 5 bytes, for
365 * example (and if both are uninitialized, there will be two
366 * reports), but it makes the code a lot simpler.
368 kmemcheck_read_strict(regs, addr, next_page - addr);
369 kmemcheck_read_strict(regs, next_page, next_addr - next_page);
372 static void kmemcheck_write_strict(struct pt_regs *regs,
373 unsigned long addr, unsigned int size)
375 void *shadow;
377 shadow = kmemcheck_shadow_lookup(addr);
378 if (!shadow)
379 return;
381 kmemcheck_save_addr(addr);
382 kmemcheck_shadow_set(shadow, size);
385 static void kmemcheck_write(struct pt_regs *regs,
386 unsigned long addr, unsigned int size)
388 unsigned long page = addr & PAGE_MASK;
389 unsigned long next_addr = addr + size - 1;
390 unsigned long next_page = next_addr & PAGE_MASK;
392 if (likely(page == next_page)) {
393 kmemcheck_write_strict(regs, addr, size);
394 return;
397 /* See comment in kmemcheck_read(). */
398 kmemcheck_write_strict(regs, addr, next_page - addr);
399 kmemcheck_write_strict(regs, next_page, next_addr - next_page);
403 * Copying is hard. We have two addresses, each of which may be split across
404 * a page (and each page will have different shadow addresses).
406 static void kmemcheck_copy(struct pt_regs *regs,
407 unsigned long src_addr, unsigned long dst_addr, unsigned int size)
409 uint8_t shadow[8];
410 enum kmemcheck_shadow status;
412 unsigned long page;
413 unsigned long next_addr;
414 unsigned long next_page;
416 uint8_t *x;
417 unsigned int i;
418 unsigned int n;
420 BUG_ON(size > sizeof(shadow));
422 page = src_addr & PAGE_MASK;
423 next_addr = src_addr + size - 1;
424 next_page = next_addr & PAGE_MASK;
426 if (likely(page == next_page)) {
427 /* Same page */
428 x = kmemcheck_shadow_lookup(src_addr);
429 if (x) {
430 kmemcheck_save_addr(src_addr);
431 for (i = 0; i < size; ++i)
432 shadow[i] = x[i];
433 } else {
434 for (i = 0; i < size; ++i)
435 shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
437 } else {
438 n = next_page - src_addr;
439 BUG_ON(n > sizeof(shadow));
441 /* First page */
442 x = kmemcheck_shadow_lookup(src_addr);
443 if (x) {
444 kmemcheck_save_addr(src_addr);
445 for (i = 0; i < n; ++i)
446 shadow[i] = x[i];
447 } else {
448 /* Not tracked */
449 for (i = 0; i < n; ++i)
450 shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
453 /* Second page */
454 x = kmemcheck_shadow_lookup(next_page);
455 if (x) {
456 kmemcheck_save_addr(next_page);
457 for (i = n; i < size; ++i)
458 shadow[i] = x[i - n];
459 } else {
460 /* Not tracked */
461 for (i = n; i < size; ++i)
462 shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
466 page = dst_addr & PAGE_MASK;
467 next_addr = dst_addr + size - 1;
468 next_page = next_addr & PAGE_MASK;
470 if (likely(page == next_page)) {
471 /* Same page */
472 x = kmemcheck_shadow_lookup(dst_addr);
473 if (x) {
474 kmemcheck_save_addr(dst_addr);
475 for (i = 0; i < size; ++i) {
476 x[i] = shadow[i];
477 shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
480 } else {
481 n = next_page - dst_addr;
482 BUG_ON(n > sizeof(shadow));
484 /* First page */
485 x = kmemcheck_shadow_lookup(dst_addr);
486 if (x) {
487 kmemcheck_save_addr(dst_addr);
488 for (i = 0; i < n; ++i) {
489 x[i] = shadow[i];
490 shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
494 /* Second page */
495 x = kmemcheck_shadow_lookup(next_page);
496 if (x) {
497 kmemcheck_save_addr(next_page);
498 for (i = n; i < size; ++i) {
499 x[i - n] = shadow[i];
500 shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
505 status = kmemcheck_shadow_test(shadow, size);
506 if (status == KMEMCHECK_SHADOW_INITIALIZED)
507 return;
509 if (kmemcheck_enabled)
510 kmemcheck_error_save(status, src_addr, size, regs);
512 if (kmemcheck_enabled == 2)
513 kmemcheck_enabled = 0;
516 enum kmemcheck_method {
517 KMEMCHECK_READ,
518 KMEMCHECK_WRITE,
521 static void kmemcheck_access(struct pt_regs *regs,
522 unsigned long fallback_address, enum kmemcheck_method fallback_method)
524 const uint8_t *insn;
525 const uint8_t *insn_primary;
526 unsigned int size;
528 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
530 /* Recursive fault -- ouch. */
531 if (data->busy) {
532 kmemcheck_show_addr(fallback_address);
533 kmemcheck_error_save_bug(regs);
534 return;
537 data->busy = true;
539 insn = (const uint8_t *) regs->ip;
540 insn_primary = kmemcheck_opcode_get_primary(insn);
542 kmemcheck_opcode_decode(insn, &size);
544 switch (insn_primary[0]) {
545 #ifdef CONFIG_KMEMCHECK_BITOPS_OK
546 /* AND, OR, XOR */
548 * Unfortunately, these instructions have to be excluded from
549 * our regular checking since they access only some (and not
550 * all) bits. This clears out "bogus" bitfield-access warnings.
552 case 0x80:
553 case 0x81:
554 case 0x82:
555 case 0x83:
556 switch ((insn_primary[1] >> 3) & 7) {
557 /* OR */
558 case 1:
559 /* AND */
560 case 4:
561 /* XOR */
562 case 6:
563 kmemcheck_write(regs, fallback_address, size);
564 goto out;
566 /* ADD */
567 case 0:
568 /* ADC */
569 case 2:
570 /* SBB */
571 case 3:
572 /* SUB */
573 case 5:
574 /* CMP */
575 case 7:
576 break;
578 break;
579 #endif
581 /* MOVS, MOVSB, MOVSW, MOVSD */
582 case 0xa4:
583 case 0xa5:
585 * These instructions are special because they take two
586 * addresses, but we only get one page fault.
588 kmemcheck_copy(regs, regs->si, regs->di, size);
589 goto out;
591 /* CMPS, CMPSB, CMPSW, CMPSD */
592 case 0xa6:
593 case 0xa7:
594 kmemcheck_read(regs, regs->si, size);
595 kmemcheck_read(regs, regs->di, size);
596 goto out;
600 * If the opcode isn't special in any way, we use the data from the
601 * page fault handler to determine the address and type of memory
602 * access.
604 switch (fallback_method) {
605 case KMEMCHECK_READ:
606 kmemcheck_read(regs, fallback_address, size);
607 goto out;
608 case KMEMCHECK_WRITE:
609 kmemcheck_write(regs, fallback_address, size);
610 goto out;
613 out:
614 data->busy = false;
617 bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
618 unsigned long error_code)
620 pte_t *pte;
623 * XXX: Is it safe to assume that memory accesses from virtual 86
624 * mode or non-kernel code segments will _never_ access kernel
625 * memory (e.g. tracked pages)? For now, we need this to avoid
626 * invoking kmemcheck for PnP BIOS calls.
628 if (regs->flags & X86_VM_MASK)
629 return false;
630 if (regs->cs != __KERNEL_CS)
631 return false;
633 pte = kmemcheck_pte_lookup(address);
634 if (!pte)
635 return false;
637 if (error_code & 2)
638 kmemcheck_access(regs, address, KMEMCHECK_WRITE);
639 else
640 kmemcheck_access(regs, address, KMEMCHECK_READ);
642 kmemcheck_show(regs);
643 return true;
646 bool kmemcheck_trap(struct pt_regs *regs)
648 if (!kmemcheck_active(regs))
649 return false;
651 /* We're done. */
652 kmemcheck_hide(regs);
653 return true;