2 * kmemcheck - a heavyweight memory checker for the linux kernel
3 * Copyright (C) 2007, 2008 Vegard Nossum <vegardno@ifi.uio.no>
4 * (With a lot of help from Ingo Molnar and Pekka Enberg.)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2) as
8 * published by the Free Software Foundation.
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/kallsyms.h>
14 #include <linux/kernel.h>
15 #include <linux/kmemcheck.h>
17 #include <linux/module.h>
18 #include <linux/page-flags.h>
19 #include <linux/percpu.h>
20 #include <linux/ptrace.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
24 #include <asm/cacheflush.h>
25 #include <asm/kmemcheck.h>
26 #include <asm/pgtable.h>
27 #include <asm/tlbflush.h>
36 #ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT
37 # define KMEMCHECK_ENABLED 0
40 #ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT
41 # define KMEMCHECK_ENABLED 1
44 #ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT
45 # define KMEMCHECK_ENABLED 2
48 int kmemcheck_enabled
= KMEMCHECK_ENABLED
;
50 int __init
kmemcheck_init(void)
54 * Limit SMP to use a single CPU. We rely on the fact that this code
55 * runs before SMP is set up.
57 if (setup_max_cpus
> 1) {
59 "kmemcheck: Limiting number of CPUs to 1.\n");
64 if (!kmemcheck_selftest()) {
65 printk(KERN_INFO
"kmemcheck: self-tests failed; disabling\n");
66 kmemcheck_enabled
= 0;
70 printk(KERN_INFO
"kmemcheck: Initialized\n");
74 early_initcall(kmemcheck_init
);
77 * We need to parse the kmemcheck= option before any memory is allocated.
79 static int __init
param_kmemcheck(char *str
)
87 ret
= kstrtoint(str
, 0, &val
);
90 kmemcheck_enabled
= val
;
94 early_param("kmemcheck", param_kmemcheck
);
96 int kmemcheck_show_addr(unsigned long address
)
100 pte
= kmemcheck_pte_lookup(address
);
104 set_pte(pte
, __pte(pte_val(*pte
) | _PAGE_PRESENT
));
105 __flush_tlb_one(address
);
109 int kmemcheck_hide_addr(unsigned long address
)
113 pte
= kmemcheck_pte_lookup(address
);
117 set_pte(pte
, __pte(pte_val(*pte
) & ~_PAGE_PRESENT
));
118 __flush_tlb_one(address
);
122 struct kmemcheck_context
{
127 * There can be at most two memory operands to an instruction, but
128 * each address can cross a page boundary -- so we may need up to
129 * four addresses that must be hidden/revealed for each fault.
131 unsigned long addr
[4];
132 unsigned long n_addrs
;
135 /* Data size of the instruction that caused a fault. */
139 static DEFINE_PER_CPU(struct kmemcheck_context
, kmemcheck_context
);
141 bool kmemcheck_active(struct pt_regs
*regs
)
143 struct kmemcheck_context
*data
= this_cpu_ptr(&kmemcheck_context
);
145 return data
->balance
> 0;
148 /* Save an address that needs to be shown/hidden */
149 static void kmemcheck_save_addr(unsigned long addr
)
151 struct kmemcheck_context
*data
= this_cpu_ptr(&kmemcheck_context
);
153 BUG_ON(data
->n_addrs
>= ARRAY_SIZE(data
->addr
));
154 data
->addr
[data
->n_addrs
++] = addr
;
157 static unsigned int kmemcheck_show_all(void)
159 struct kmemcheck_context
*data
= this_cpu_ptr(&kmemcheck_context
);
164 for (i
= 0; i
< data
->n_addrs
; ++i
)
165 n
+= kmemcheck_show_addr(data
->addr
[i
]);
170 static unsigned int kmemcheck_hide_all(void)
172 struct kmemcheck_context
*data
= this_cpu_ptr(&kmemcheck_context
);
177 for (i
= 0; i
< data
->n_addrs
; ++i
)
178 n
+= kmemcheck_hide_addr(data
->addr
[i
]);
184 * Called from the #PF handler.
186 void kmemcheck_show(struct pt_regs
*regs
)
188 struct kmemcheck_context
*data
= this_cpu_ptr(&kmemcheck_context
);
190 BUG_ON(!irqs_disabled());
192 if (unlikely(data
->balance
!= 0)) {
193 kmemcheck_show_all();
194 kmemcheck_error_save_bug(regs
);
200 * None of the addresses actually belonged to kmemcheck. Note that
201 * this is not an error.
203 if (kmemcheck_show_all() == 0)
209 * The IF needs to be cleared as well, so that the faulting
210 * instruction can run "uninterrupted". Otherwise, we might take
211 * an interrupt and start executing that before we've had a chance
212 * to hide the page again.
214 * NOTE: In the rare case of multiple faults, we must not override
215 * the original flags:
217 if (!(regs
->flags
& X86_EFLAGS_TF
))
218 data
->flags
= regs
->flags
;
220 regs
->flags
|= X86_EFLAGS_TF
;
221 regs
->flags
&= ~X86_EFLAGS_IF
;
225 * Called from the #DB handler.
227 void kmemcheck_hide(struct pt_regs
*regs
)
229 struct kmemcheck_context
*data
= this_cpu_ptr(&kmemcheck_context
);
232 BUG_ON(!irqs_disabled());
234 if (unlikely(data
->balance
!= 1)) {
235 kmemcheck_show_all();
236 kmemcheck_error_save_bug(regs
);
240 if (!(data
->flags
& X86_EFLAGS_TF
))
241 regs
->flags
&= ~X86_EFLAGS_TF
;
242 if (data
->flags
& X86_EFLAGS_IF
)
243 regs
->flags
|= X86_EFLAGS_IF
;
247 if (kmemcheck_enabled
)
248 n
= kmemcheck_hide_all();
250 n
= kmemcheck_show_all();
259 if (!(data
->flags
& X86_EFLAGS_TF
))
260 regs
->flags
&= ~X86_EFLAGS_TF
;
261 if (data
->flags
& X86_EFLAGS_IF
)
262 regs
->flags
|= X86_EFLAGS_IF
;
265 void kmemcheck_show_pages(struct page
*p
, unsigned int n
)
269 for (i
= 0; i
< n
; ++i
) {
270 unsigned long address
;
274 address
= (unsigned long) page_address(&p
[i
]);
275 pte
= lookup_address(address
, &level
);
277 BUG_ON(level
!= PG_LEVEL_4K
);
279 set_pte(pte
, __pte(pte_val(*pte
) | _PAGE_PRESENT
));
280 set_pte(pte
, __pte(pte_val(*pte
) & ~_PAGE_HIDDEN
));
281 __flush_tlb_one(address
);
285 bool kmemcheck_page_is_tracked(struct page
*p
)
287 /* This will also check the "hidden" flag of the PTE. */
288 return kmemcheck_pte_lookup((unsigned long) page_address(p
));
291 void kmemcheck_hide_pages(struct page
*p
, unsigned int n
)
295 for (i
= 0; i
< n
; ++i
) {
296 unsigned long address
;
300 address
= (unsigned long) page_address(&p
[i
]);
301 pte
= lookup_address(address
, &level
);
303 BUG_ON(level
!= PG_LEVEL_4K
);
305 set_pte(pte
, __pte(pte_val(*pte
) & ~_PAGE_PRESENT
));
306 set_pte(pte
, __pte(pte_val(*pte
) | _PAGE_HIDDEN
));
307 __flush_tlb_one(address
);
311 /* Access may NOT cross page boundary */
312 static void kmemcheck_read_strict(struct pt_regs
*regs
,
313 unsigned long addr
, unsigned int size
)
316 enum kmemcheck_shadow status
;
318 shadow
= kmemcheck_shadow_lookup(addr
);
322 kmemcheck_save_addr(addr
);
323 status
= kmemcheck_shadow_test(shadow
, size
);
324 if (status
== KMEMCHECK_SHADOW_INITIALIZED
)
327 if (kmemcheck_enabled
)
328 kmemcheck_error_save(status
, addr
, size
, regs
);
330 if (kmemcheck_enabled
== 2)
331 kmemcheck_enabled
= 0;
333 /* Don't warn about it again. */
334 kmemcheck_shadow_set(shadow
, size
);
337 bool kmemcheck_is_obj_initialized(unsigned long addr
, size_t size
)
339 enum kmemcheck_shadow status
;
342 shadow
= kmemcheck_shadow_lookup(addr
);
346 status
= kmemcheck_shadow_test_all(shadow
, size
);
348 return status
== KMEMCHECK_SHADOW_INITIALIZED
;
351 /* Access may cross page boundary */
352 static void kmemcheck_read(struct pt_regs
*regs
,
353 unsigned long addr
, unsigned int size
)
355 unsigned long page
= addr
& PAGE_MASK
;
356 unsigned long next_addr
= addr
+ size
- 1;
357 unsigned long next_page
= next_addr
& PAGE_MASK
;
359 if (likely(page
== next_page
)) {
360 kmemcheck_read_strict(regs
, addr
, size
);
365 * What we do is basically to split the access across the
366 * two pages and handle each part separately. Yes, this means
367 * that we may now see reads that are 3 + 5 bytes, for
368 * example (and if both are uninitialized, there will be two
369 * reports), but it makes the code a lot simpler.
371 kmemcheck_read_strict(regs
, addr
, next_page
- addr
);
372 kmemcheck_read_strict(regs
, next_page
, next_addr
- next_page
);
375 static void kmemcheck_write_strict(struct pt_regs
*regs
,
376 unsigned long addr
, unsigned int size
)
380 shadow
= kmemcheck_shadow_lookup(addr
);
384 kmemcheck_save_addr(addr
);
385 kmemcheck_shadow_set(shadow
, size
);
388 static void kmemcheck_write(struct pt_regs
*regs
,
389 unsigned long addr
, unsigned int size
)
391 unsigned long page
= addr
& PAGE_MASK
;
392 unsigned long next_addr
= addr
+ size
- 1;
393 unsigned long next_page
= next_addr
& PAGE_MASK
;
395 if (likely(page
== next_page
)) {
396 kmemcheck_write_strict(regs
, addr
, size
);
400 /* See comment in kmemcheck_read(). */
401 kmemcheck_write_strict(regs
, addr
, next_page
- addr
);
402 kmemcheck_write_strict(regs
, next_page
, next_addr
- next_page
);
406 * Copying is hard. We have two addresses, each of which may be split across
407 * a page (and each page will have different shadow addresses).
409 static void kmemcheck_copy(struct pt_regs
*regs
,
410 unsigned long src_addr
, unsigned long dst_addr
, unsigned int size
)
413 enum kmemcheck_shadow status
;
416 unsigned long next_addr
;
417 unsigned long next_page
;
423 BUG_ON(size
> sizeof(shadow
));
425 page
= src_addr
& PAGE_MASK
;
426 next_addr
= src_addr
+ size
- 1;
427 next_page
= next_addr
& PAGE_MASK
;
429 if (likely(page
== next_page
)) {
431 x
= kmemcheck_shadow_lookup(src_addr
);
433 kmemcheck_save_addr(src_addr
);
434 for (i
= 0; i
< size
; ++i
)
437 for (i
= 0; i
< size
; ++i
)
438 shadow
[i
] = KMEMCHECK_SHADOW_INITIALIZED
;
441 n
= next_page
- src_addr
;
442 BUG_ON(n
> sizeof(shadow
));
445 x
= kmemcheck_shadow_lookup(src_addr
);
447 kmemcheck_save_addr(src_addr
);
448 for (i
= 0; i
< n
; ++i
)
452 for (i
= 0; i
< n
; ++i
)
453 shadow
[i
] = KMEMCHECK_SHADOW_INITIALIZED
;
457 x
= kmemcheck_shadow_lookup(next_page
);
459 kmemcheck_save_addr(next_page
);
460 for (i
= n
; i
< size
; ++i
)
461 shadow
[i
] = x
[i
- n
];
464 for (i
= n
; i
< size
; ++i
)
465 shadow
[i
] = KMEMCHECK_SHADOW_INITIALIZED
;
469 page
= dst_addr
& PAGE_MASK
;
470 next_addr
= dst_addr
+ size
- 1;
471 next_page
= next_addr
& PAGE_MASK
;
473 if (likely(page
== next_page
)) {
475 x
= kmemcheck_shadow_lookup(dst_addr
);
477 kmemcheck_save_addr(dst_addr
);
478 for (i
= 0; i
< size
; ++i
) {
480 shadow
[i
] = KMEMCHECK_SHADOW_INITIALIZED
;
484 n
= next_page
- dst_addr
;
485 BUG_ON(n
> sizeof(shadow
));
488 x
= kmemcheck_shadow_lookup(dst_addr
);
490 kmemcheck_save_addr(dst_addr
);
491 for (i
= 0; i
< n
; ++i
) {
493 shadow
[i
] = KMEMCHECK_SHADOW_INITIALIZED
;
498 x
= kmemcheck_shadow_lookup(next_page
);
500 kmemcheck_save_addr(next_page
);
501 for (i
= n
; i
< size
; ++i
) {
502 x
[i
- n
] = shadow
[i
];
503 shadow
[i
] = KMEMCHECK_SHADOW_INITIALIZED
;
508 status
= kmemcheck_shadow_test(shadow
, size
);
509 if (status
== KMEMCHECK_SHADOW_INITIALIZED
)
512 if (kmemcheck_enabled
)
513 kmemcheck_error_save(status
, src_addr
, size
, regs
);
515 if (kmemcheck_enabled
== 2)
516 kmemcheck_enabled
= 0;
519 enum kmemcheck_method
{
524 static void kmemcheck_access(struct pt_regs
*regs
,
525 unsigned long fallback_address
, enum kmemcheck_method fallback_method
)
528 const uint8_t *insn_primary
;
531 struct kmemcheck_context
*data
= this_cpu_ptr(&kmemcheck_context
);
533 /* Recursive fault -- ouch. */
535 kmemcheck_show_addr(fallback_address
);
536 kmemcheck_error_save_bug(regs
);
542 insn
= (const uint8_t *) regs
->ip
;
543 insn_primary
= kmemcheck_opcode_get_primary(insn
);
545 kmemcheck_opcode_decode(insn
, &size
);
547 switch (insn_primary
[0]) {
548 #ifdef CONFIG_KMEMCHECK_BITOPS_OK
551 * Unfortunately, these instructions have to be excluded from
552 * our regular checking since they access only some (and not
553 * all) bits. This clears out "bogus" bitfield-access warnings.
559 switch ((insn_primary
[1] >> 3) & 7) {
566 kmemcheck_write(regs
, fallback_address
, size
);
584 /* MOVS, MOVSB, MOVSW, MOVSD */
588 * These instructions are special because they take two
589 * addresses, but we only get one page fault.
591 kmemcheck_copy(regs
, regs
->si
, regs
->di
, size
);
594 /* CMPS, CMPSB, CMPSW, CMPSD */
597 kmemcheck_read(regs
, regs
->si
, size
);
598 kmemcheck_read(regs
, regs
->di
, size
);
603 * If the opcode isn't special in any way, we use the data from the
604 * page fault handler to determine the address and type of memory
607 switch (fallback_method
) {
609 kmemcheck_read(regs
, fallback_address
, size
);
611 case KMEMCHECK_WRITE
:
612 kmemcheck_write(regs
, fallback_address
, size
);
620 bool kmemcheck_fault(struct pt_regs
*regs
, unsigned long address
,
621 unsigned long error_code
)
626 * XXX: Is it safe to assume that memory accesses from virtual 86
627 * mode or non-kernel code segments will _never_ access kernel
628 * memory (e.g. tracked pages)? For now, we need this to avoid
629 * invoking kmemcheck for PnP BIOS calls.
631 if (regs
->flags
& X86_VM_MASK
)
633 if (regs
->cs
!= __KERNEL_CS
)
636 pte
= kmemcheck_pte_lookup(address
);
640 WARN_ON_ONCE(in_nmi());
643 kmemcheck_access(regs
, address
, KMEMCHECK_WRITE
);
645 kmemcheck_access(regs
, address
, KMEMCHECK_READ
);
647 kmemcheck_show(regs
);
651 bool kmemcheck_trap(struct pt_regs
*regs
)
653 if (!kmemcheck_active(regs
))
657 kmemcheck_hide(regs
);