2 * kmemcheck - a heavyweight memory checker for the linux kernel
3 * Copyright (C) 2007, 2008 Vegard Nossum <vegardno@ifi.uio.no>
4 * (With a lot of help from Ingo Molnar and Pekka Enberg.)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2) as
8 * published by the Free Software Foundation.
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/kallsyms.h>
14 #include <linux/kernel.h>
15 #include <linux/kmemcheck.h>
17 #include <linux/module.h>
18 #include <linux/page-flags.h>
19 #include <linux/percpu.h>
20 #include <linux/ptrace.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
24 #include <asm/cacheflush.h>
25 #include <asm/kmemcheck.h>
26 #include <asm/pgtable.h>
27 #include <asm/tlbflush.h>
36 #ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT
37 # define KMEMCHECK_ENABLED 0
40 #ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT
41 # define KMEMCHECK_ENABLED 1
44 #ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT
45 # define KMEMCHECK_ENABLED 2
48 int kmemcheck_enabled
= KMEMCHECK_ENABLED
;
50 int __init
kmemcheck_init(void)
54 * Limit SMP to use a single CPU. We rely on the fact that this code
55 * runs before SMP is set up.
57 if (setup_max_cpus
> 1) {
59 "kmemcheck: Limiting number of CPUs to 1.\n");
64 if (!kmemcheck_selftest()) {
65 printk(KERN_INFO
"kmemcheck: self-tests failed; disabling\n");
66 kmemcheck_enabled
= 0;
70 printk(KERN_INFO
"kmemcheck: Initialized\n");
74 early_initcall(kmemcheck_init
);
77 * We need to parse the kmemcheck= option before any memory is allocated.
79 static int __init
param_kmemcheck(char *str
)
84 sscanf(str
, "%d", &kmemcheck_enabled
);
88 early_param("kmemcheck", param_kmemcheck
);
90 int kmemcheck_show_addr(unsigned long address
)
94 pte
= kmemcheck_pte_lookup(address
);
98 set_pte(pte
, __pte(pte_val(*pte
) | _PAGE_PRESENT
));
99 __flush_tlb_one(address
);
103 int kmemcheck_hide_addr(unsigned long address
)
107 pte
= kmemcheck_pte_lookup(address
);
111 set_pte(pte
, __pte(pte_val(*pte
) & ~_PAGE_PRESENT
));
112 __flush_tlb_one(address
);
116 struct kmemcheck_context
{
121 * There can be at most two memory operands to an instruction, but
122 * each address can cross a page boundary -- so we may need up to
123 * four addresses that must be hidden/revealed for each fault.
125 unsigned long addr
[4];
126 unsigned long n_addrs
;
129 /* Data size of the instruction that caused a fault. */
133 static DEFINE_PER_CPU(struct kmemcheck_context
, kmemcheck_context
);
135 bool kmemcheck_active(struct pt_regs
*regs
)
137 struct kmemcheck_context
*data
= &__get_cpu_var(kmemcheck_context
);
139 return data
->balance
> 0;
142 /* Save an address that needs to be shown/hidden */
143 static void kmemcheck_save_addr(unsigned long addr
)
145 struct kmemcheck_context
*data
= &__get_cpu_var(kmemcheck_context
);
147 BUG_ON(data
->n_addrs
>= ARRAY_SIZE(data
->addr
));
148 data
->addr
[data
->n_addrs
++] = addr
;
151 static unsigned int kmemcheck_show_all(void)
153 struct kmemcheck_context
*data
= &__get_cpu_var(kmemcheck_context
);
158 for (i
= 0; i
< data
->n_addrs
; ++i
)
159 n
+= kmemcheck_show_addr(data
->addr
[i
]);
164 static unsigned int kmemcheck_hide_all(void)
166 struct kmemcheck_context
*data
= &__get_cpu_var(kmemcheck_context
);
171 for (i
= 0; i
< data
->n_addrs
; ++i
)
172 n
+= kmemcheck_hide_addr(data
->addr
[i
]);
178 * Called from the #PF handler.
180 void kmemcheck_show(struct pt_regs
*regs
)
182 struct kmemcheck_context
*data
= &__get_cpu_var(kmemcheck_context
);
184 BUG_ON(!irqs_disabled());
186 if (unlikely(data
->balance
!= 0)) {
187 kmemcheck_show_all();
188 kmemcheck_error_save_bug(regs
);
194 * None of the addresses actually belonged to kmemcheck. Note that
195 * this is not an error.
197 if (kmemcheck_show_all() == 0)
203 * The IF needs to be cleared as well, so that the faulting
204 * instruction can run "uninterrupted". Otherwise, we might take
205 * an interrupt and start executing that before we've had a chance
206 * to hide the page again.
208 * NOTE: In the rare case of multiple faults, we must not override
209 * the original flags:
211 if (!(regs
->flags
& X86_EFLAGS_TF
))
212 data
->flags
= regs
->flags
;
214 regs
->flags
|= X86_EFLAGS_TF
;
215 regs
->flags
&= ~X86_EFLAGS_IF
;
219 * Called from the #DB handler.
221 void kmemcheck_hide(struct pt_regs
*regs
)
223 struct kmemcheck_context
*data
= &__get_cpu_var(kmemcheck_context
);
226 BUG_ON(!irqs_disabled());
228 if (unlikely(data
->balance
!= 1)) {
229 kmemcheck_show_all();
230 kmemcheck_error_save_bug(regs
);
234 if (!(data
->flags
& X86_EFLAGS_TF
))
235 regs
->flags
&= ~X86_EFLAGS_TF
;
236 if (data
->flags
& X86_EFLAGS_IF
)
237 regs
->flags
|= X86_EFLAGS_IF
;
241 if (kmemcheck_enabled
)
242 n
= kmemcheck_hide_all();
244 n
= kmemcheck_show_all();
253 if (!(data
->flags
& X86_EFLAGS_TF
))
254 regs
->flags
&= ~X86_EFLAGS_TF
;
255 if (data
->flags
& X86_EFLAGS_IF
)
256 regs
->flags
|= X86_EFLAGS_IF
;
259 void kmemcheck_show_pages(struct page
*p
, unsigned int n
)
263 for (i
= 0; i
< n
; ++i
) {
264 unsigned long address
;
268 address
= (unsigned long) page_address(&p
[i
]);
269 pte
= lookup_address(address
, &level
);
271 BUG_ON(level
!= PG_LEVEL_4K
);
273 set_pte(pte
, __pte(pte_val(*pte
) | _PAGE_PRESENT
));
274 set_pte(pte
, __pte(pte_val(*pte
) & ~_PAGE_HIDDEN
));
275 __flush_tlb_one(address
);
279 bool kmemcheck_page_is_tracked(struct page
*p
)
281 /* This will also check the "hidden" flag of the PTE. */
282 return kmemcheck_pte_lookup((unsigned long) page_address(p
));
285 void kmemcheck_hide_pages(struct page
*p
, unsigned int n
)
289 for (i
= 0; i
< n
; ++i
) {
290 unsigned long address
;
294 address
= (unsigned long) page_address(&p
[i
]);
295 pte
= lookup_address(address
, &level
);
297 BUG_ON(level
!= PG_LEVEL_4K
);
299 set_pte(pte
, __pte(pte_val(*pte
) & ~_PAGE_PRESENT
));
300 set_pte(pte
, __pte(pte_val(*pte
) | _PAGE_HIDDEN
));
301 __flush_tlb_one(address
);
305 /* Access may NOT cross page boundary */
306 static void kmemcheck_read_strict(struct pt_regs
*regs
,
307 unsigned long addr
, unsigned int size
)
310 enum kmemcheck_shadow status
;
312 shadow
= kmemcheck_shadow_lookup(addr
);
316 kmemcheck_save_addr(addr
);
317 status
= kmemcheck_shadow_test(shadow
, size
);
318 if (status
== KMEMCHECK_SHADOW_INITIALIZED
)
321 if (kmemcheck_enabled
)
322 kmemcheck_error_save(status
, addr
, size
, regs
);
324 if (kmemcheck_enabled
== 2)
325 kmemcheck_enabled
= 0;
327 /* Don't warn about it again. */
328 kmemcheck_shadow_set(shadow
, size
);
331 bool kmemcheck_is_obj_initialized(unsigned long addr
, size_t size
)
333 enum kmemcheck_shadow status
;
336 shadow
= kmemcheck_shadow_lookup(addr
);
340 status
= kmemcheck_shadow_test_all(shadow
, size
);
342 return status
== KMEMCHECK_SHADOW_INITIALIZED
;
345 /* Access may cross page boundary */
346 static void kmemcheck_read(struct pt_regs
*regs
,
347 unsigned long addr
, unsigned int size
)
349 unsigned long page
= addr
& PAGE_MASK
;
350 unsigned long next_addr
= addr
+ size
- 1;
351 unsigned long next_page
= next_addr
& PAGE_MASK
;
353 if (likely(page
== next_page
)) {
354 kmemcheck_read_strict(regs
, addr
, size
);
359 * What we do is basically to split the access across the
360 * two pages and handle each part separately. Yes, this means
361 * that we may now see reads that are 3 + 5 bytes, for
362 * example (and if both are uninitialized, there will be two
363 * reports), but it makes the code a lot simpler.
365 kmemcheck_read_strict(regs
, addr
, next_page
- addr
);
366 kmemcheck_read_strict(regs
, next_page
, next_addr
- next_page
);
369 static void kmemcheck_write_strict(struct pt_regs
*regs
,
370 unsigned long addr
, unsigned int size
)
374 shadow
= kmemcheck_shadow_lookup(addr
);
378 kmemcheck_save_addr(addr
);
379 kmemcheck_shadow_set(shadow
, size
);
382 static void kmemcheck_write(struct pt_regs
*regs
,
383 unsigned long addr
, unsigned int size
)
385 unsigned long page
= addr
& PAGE_MASK
;
386 unsigned long next_addr
= addr
+ size
- 1;
387 unsigned long next_page
= next_addr
& PAGE_MASK
;
389 if (likely(page
== next_page
)) {
390 kmemcheck_write_strict(regs
, addr
, size
);
394 /* See comment in kmemcheck_read(). */
395 kmemcheck_write_strict(regs
, addr
, next_page
- addr
);
396 kmemcheck_write_strict(regs
, next_page
, next_addr
- next_page
);
400 * Copying is hard. We have two addresses, each of which may be split across
401 * a page (and each page will have different shadow addresses).
403 static void kmemcheck_copy(struct pt_regs
*regs
,
404 unsigned long src_addr
, unsigned long dst_addr
, unsigned int size
)
407 enum kmemcheck_shadow status
;
410 unsigned long next_addr
;
411 unsigned long next_page
;
417 BUG_ON(size
> sizeof(shadow
));
419 page
= src_addr
& PAGE_MASK
;
420 next_addr
= src_addr
+ size
- 1;
421 next_page
= next_addr
& PAGE_MASK
;
423 if (likely(page
== next_page
)) {
425 x
= kmemcheck_shadow_lookup(src_addr
);
427 kmemcheck_save_addr(src_addr
);
428 for (i
= 0; i
< size
; ++i
)
431 for (i
= 0; i
< size
; ++i
)
432 shadow
[i
] = KMEMCHECK_SHADOW_INITIALIZED
;
435 n
= next_page
- src_addr
;
436 BUG_ON(n
> sizeof(shadow
));
439 x
= kmemcheck_shadow_lookup(src_addr
);
441 kmemcheck_save_addr(src_addr
);
442 for (i
= 0; i
< n
; ++i
)
446 for (i
= 0; i
< n
; ++i
)
447 shadow
[i
] = KMEMCHECK_SHADOW_INITIALIZED
;
451 x
= kmemcheck_shadow_lookup(next_page
);
453 kmemcheck_save_addr(next_page
);
454 for (i
= n
; i
< size
; ++i
)
455 shadow
[i
] = x
[i
- n
];
458 for (i
= n
; i
< size
; ++i
)
459 shadow
[i
] = KMEMCHECK_SHADOW_INITIALIZED
;
463 page
= dst_addr
& PAGE_MASK
;
464 next_addr
= dst_addr
+ size
- 1;
465 next_page
= next_addr
& PAGE_MASK
;
467 if (likely(page
== next_page
)) {
469 x
= kmemcheck_shadow_lookup(dst_addr
);
471 kmemcheck_save_addr(dst_addr
);
472 for (i
= 0; i
< size
; ++i
) {
474 shadow
[i
] = KMEMCHECK_SHADOW_INITIALIZED
;
478 n
= next_page
- dst_addr
;
479 BUG_ON(n
> sizeof(shadow
));
482 x
= kmemcheck_shadow_lookup(dst_addr
);
484 kmemcheck_save_addr(dst_addr
);
485 for (i
= 0; i
< n
; ++i
) {
487 shadow
[i
] = KMEMCHECK_SHADOW_INITIALIZED
;
492 x
= kmemcheck_shadow_lookup(next_page
);
494 kmemcheck_save_addr(next_page
);
495 for (i
= n
; i
< size
; ++i
) {
496 x
[i
- n
] = shadow
[i
];
497 shadow
[i
] = KMEMCHECK_SHADOW_INITIALIZED
;
502 status
= kmemcheck_shadow_test(shadow
, size
);
503 if (status
== KMEMCHECK_SHADOW_INITIALIZED
)
506 if (kmemcheck_enabled
)
507 kmemcheck_error_save(status
, src_addr
, size
, regs
);
509 if (kmemcheck_enabled
== 2)
510 kmemcheck_enabled
= 0;
513 enum kmemcheck_method
{
518 static void kmemcheck_access(struct pt_regs
*regs
,
519 unsigned long fallback_address
, enum kmemcheck_method fallback_method
)
522 const uint8_t *insn_primary
;
525 struct kmemcheck_context
*data
= &__get_cpu_var(kmemcheck_context
);
527 /* Recursive fault -- ouch. */
529 kmemcheck_show_addr(fallback_address
);
530 kmemcheck_error_save_bug(regs
);
536 insn
= (const uint8_t *) regs
->ip
;
537 insn_primary
= kmemcheck_opcode_get_primary(insn
);
539 kmemcheck_opcode_decode(insn
, &size
);
541 switch (insn_primary
[0]) {
542 #ifdef CONFIG_KMEMCHECK_BITOPS_OK
545 * Unfortunately, these instructions have to be excluded from
546 * our regular checking since they access only some (and not
547 * all) bits. This clears out "bogus" bitfield-access warnings.
553 switch ((insn_primary
[1] >> 3) & 7) {
560 kmemcheck_write(regs
, fallback_address
, size
);
578 /* MOVS, MOVSB, MOVSW, MOVSD */
582 * These instructions are special because they take two
583 * addresses, but we only get one page fault.
585 kmemcheck_copy(regs
, regs
->si
, regs
->di
, size
);
588 /* CMPS, CMPSB, CMPSW, CMPSD */
591 kmemcheck_read(regs
, regs
->si
, size
);
592 kmemcheck_read(regs
, regs
->di
, size
);
597 * If the opcode isn't special in any way, we use the data from the
598 * page fault handler to determine the address and type of memory
601 switch (fallback_method
) {
603 kmemcheck_read(regs
, fallback_address
, size
);
605 case KMEMCHECK_WRITE
:
606 kmemcheck_write(regs
, fallback_address
, size
);
614 bool kmemcheck_fault(struct pt_regs
*regs
, unsigned long address
,
615 unsigned long error_code
)
620 * XXX: Is it safe to assume that memory accesses from virtual 86
621 * mode or non-kernel code segments will _never_ access kernel
622 * memory (e.g. tracked pages)? For now, we need this to avoid
623 * invoking kmemcheck for PnP BIOS calls.
625 if (regs
->flags
& X86_VM_MASK
)
627 if (regs
->cs
!= __KERNEL_CS
)
630 pte
= kmemcheck_pte_lookup(address
);
634 WARN_ON_ONCE(in_nmi());
637 kmemcheck_access(regs
, address
, KMEMCHECK_WRITE
);
639 kmemcheck_access(regs
, address
, KMEMCHECK_READ
);
641 kmemcheck_show(regs
);
645 bool kmemcheck_trap(struct pt_regs
*regs
)
647 if (!kmemcheck_active(regs
))
651 kmemcheck_hide(regs
);