rcutorture: Move SRCU status printing to SRCU implementations
[linux/fpc-iii.git] / mm / kmemleak.c
blob7780cd83a4956f1a3cd9653cbd0737ad3f8c4f4e
1 /*
2 * mm/kmemleak.c
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/dev-tools/kmemleak.rst.
24 * Notes on locking
25 * ----------------
27 * The following locks and mutexes are used by kmemleak:
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a red black tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
54 * pointer
56 * Locks and mutexes are acquired/nested in the following order:
58 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
61 * regions.
63 * The kmemleak_object structures have a use_count incremented or decremented
64 * using the get_object()/put_object() functions. When the use_count becomes
65 * 0, this count can no longer be incremented and put_object() schedules the
66 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
67 * function must be protected by rcu_read_lock() to avoid accessing a freed
68 * structure.
71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
73 #include <linux/init.h>
74 #include <linux/kernel.h>
75 #include <linux/list.h>
76 #include <linux/sched/signal.h>
77 #include <linux/sched/task.h>
78 #include <linux/sched/task_stack.h>
79 #include <linux/jiffies.h>
80 #include <linux/delay.h>
81 #include <linux/export.h>
82 #include <linux/kthread.h>
83 #include <linux/rbtree.h>
84 #include <linux/fs.h>
85 #include <linux/debugfs.h>
86 #include <linux/seq_file.h>
87 #include <linux/cpumask.h>
88 #include <linux/spinlock.h>
89 #include <linux/mutex.h>
90 #include <linux/rcupdate.h>
91 #include <linux/stacktrace.h>
92 #include <linux/cache.h>
93 #include <linux/percpu.h>
94 #include <linux/hardirq.h>
95 #include <linux/bootmem.h>
96 #include <linux/pfn.h>
97 #include <linux/mmzone.h>
98 #include <linux/slab.h>
99 #include <linux/thread_info.h>
100 #include <linux/err.h>
101 #include <linux/uaccess.h>
102 #include <linux/string.h>
103 #include <linux/nodemask.h>
104 #include <linux/mm.h>
105 #include <linux/workqueue.h>
106 #include <linux/crc32.h>
108 #include <asm/sections.h>
109 #include <asm/processor.h>
110 #include <linux/atomic.h>
112 #include <linux/kasan.h>
113 #include <linux/kmemcheck.h>
114 #include <linux/kmemleak.h>
115 #include <linux/memory_hotplug.h>
118 * Kmemleak configuration and common defines.
120 #define MAX_TRACE 16 /* stack trace length */
121 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
122 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
123 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
124 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
126 #define BYTES_PER_POINTER sizeof(void *)
128 /* GFP bitmask for kmemleak internal allocations */
129 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
130 __GFP_NORETRY | __GFP_NOMEMALLOC | \
131 __GFP_NOWARN)
133 /* scanning area inside a memory block */
134 struct kmemleak_scan_area {
135 struct hlist_node node;
136 unsigned long start;
137 size_t size;
140 #define KMEMLEAK_GREY 0
141 #define KMEMLEAK_BLACK -1
144 * Structure holding the metadata for each allocated memory block.
145 * Modifications to such objects should be made while holding the
146 * object->lock. Insertions or deletions from object_list, gray_list or
147 * rb_node are already protected by the corresponding locks or mutex (see
148 * the notes on locking above). These objects are reference-counted
149 * (use_count) and freed using the RCU mechanism.
151 struct kmemleak_object {
152 spinlock_t lock;
153 unsigned int flags; /* object status flags */
154 struct list_head object_list;
155 struct list_head gray_list;
156 struct rb_node rb_node;
157 struct rcu_head rcu; /* object_list lockless traversal */
158 /* object usage count; object freed when use_count == 0 */
159 atomic_t use_count;
160 unsigned long pointer;
161 size_t size;
162 /* pass surplus references to this pointer */
163 unsigned long excess_ref;
164 /* minimum number of a pointers found before it is considered leak */
165 int min_count;
166 /* the total number of pointers found pointing to this object */
167 int count;
168 /* checksum for detecting modified objects */
169 u32 checksum;
170 /* memory ranges to be scanned inside an object (empty for all) */
171 struct hlist_head area_list;
172 unsigned long trace[MAX_TRACE];
173 unsigned int trace_len;
174 unsigned long jiffies; /* creation timestamp */
175 pid_t pid; /* pid of the current task */
176 char comm[TASK_COMM_LEN]; /* executable name */
179 /* flag representing the memory block allocation status */
180 #define OBJECT_ALLOCATED (1 << 0)
181 /* flag set after the first reporting of an unreference object */
182 #define OBJECT_REPORTED (1 << 1)
183 /* flag set to not scan the object */
184 #define OBJECT_NO_SCAN (1 << 2)
186 /* number of bytes to print per line; must be 16 or 32 */
187 #define HEX_ROW_SIZE 16
188 /* number of bytes to print at a time (1, 2, 4, 8) */
189 #define HEX_GROUP_SIZE 1
190 /* include ASCII after the hex output */
191 #define HEX_ASCII 1
192 /* max number of lines to be printed */
193 #define HEX_MAX_LINES 2
195 /* the list of all allocated objects */
196 static LIST_HEAD(object_list);
197 /* the list of gray-colored objects (see color_gray comment below) */
198 static LIST_HEAD(gray_list);
199 /* search tree for object boundaries */
200 static struct rb_root object_tree_root = RB_ROOT;
201 /* rw_lock protecting the access to object_list and object_tree_root */
202 static DEFINE_RWLOCK(kmemleak_lock);
204 /* allocation caches for kmemleak internal data */
205 static struct kmem_cache *object_cache;
206 static struct kmem_cache *scan_area_cache;
208 /* set if tracing memory operations is enabled */
209 static int kmemleak_enabled;
210 /* same as above but only for the kmemleak_free() callback */
211 static int kmemleak_free_enabled;
212 /* set in the late_initcall if there were no errors */
213 static int kmemleak_initialized;
214 /* enables or disables early logging of the memory operations */
215 static int kmemleak_early_log = 1;
216 /* set if a kmemleak warning was issued */
217 static int kmemleak_warning;
218 /* set if a fatal kmemleak error has occurred */
219 static int kmemleak_error;
221 /* minimum and maximum address that may be valid pointers */
222 static unsigned long min_addr = ULONG_MAX;
223 static unsigned long max_addr;
225 static struct task_struct *scan_thread;
226 /* used to avoid reporting of recently allocated objects */
227 static unsigned long jiffies_min_age;
228 static unsigned long jiffies_last_scan;
229 /* delay between automatic memory scannings */
230 static signed long jiffies_scan_wait;
231 /* enables or disables the task stacks scanning */
232 static int kmemleak_stack_scan = 1;
233 /* protects the memory scanning, parameters and debug/kmemleak file access */
234 static DEFINE_MUTEX(scan_mutex);
235 /* setting kmemleak=on, will set this var, skipping the disable */
236 static int kmemleak_skip_disable;
237 /* If there are leaks that can be reported */
238 static bool kmemleak_found_leaks;
241 * Early object allocation/freeing logging. Kmemleak is initialized after the
242 * kernel allocator. However, both the kernel allocator and kmemleak may
243 * allocate memory blocks which need to be tracked. Kmemleak defines an
244 * arbitrary buffer to hold the allocation/freeing information before it is
245 * fully initialized.
248 /* kmemleak operation type for early logging */
249 enum {
250 KMEMLEAK_ALLOC,
251 KMEMLEAK_ALLOC_PERCPU,
252 KMEMLEAK_FREE,
253 KMEMLEAK_FREE_PART,
254 KMEMLEAK_FREE_PERCPU,
255 KMEMLEAK_NOT_LEAK,
256 KMEMLEAK_IGNORE,
257 KMEMLEAK_SCAN_AREA,
258 KMEMLEAK_NO_SCAN,
259 KMEMLEAK_SET_EXCESS_REF
263 * Structure holding the information passed to kmemleak callbacks during the
264 * early logging.
266 struct early_log {
267 int op_type; /* kmemleak operation type */
268 int min_count; /* minimum reference count */
269 const void *ptr; /* allocated/freed memory block */
270 union {
271 size_t size; /* memory block size */
272 unsigned long excess_ref; /* surplus reference passing */
274 unsigned long trace[MAX_TRACE]; /* stack trace */
275 unsigned int trace_len; /* stack trace length */
278 /* early logging buffer and current position */
279 static struct early_log
280 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
281 static int crt_early_log __initdata;
283 static void kmemleak_disable(void);
286 * Print a warning and dump the stack trace.
288 #define kmemleak_warn(x...) do { \
289 pr_warn(x); \
290 dump_stack(); \
291 kmemleak_warning = 1; \
292 } while (0)
295 * Macro invoked when a serious kmemleak condition occurred and cannot be
296 * recovered from. Kmemleak will be disabled and further allocation/freeing
297 * tracing no longer available.
299 #define kmemleak_stop(x...) do { \
300 kmemleak_warn(x); \
301 kmemleak_disable(); \
302 } while (0)
305 * Printing of the objects hex dump to the seq file. The number of lines to be
306 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
307 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
308 * with the object->lock held.
310 static void hex_dump_object(struct seq_file *seq,
311 struct kmemleak_object *object)
313 const u8 *ptr = (const u8 *)object->pointer;
314 size_t len;
316 /* limit the number of lines to HEX_MAX_LINES */
317 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
319 seq_printf(seq, " hex dump (first %zu bytes):\n", len);
320 kasan_disable_current();
321 seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
322 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
323 kasan_enable_current();
327 * Object colors, encoded with count and min_count:
328 * - white - orphan object, not enough references to it (count < min_count)
329 * - gray - not orphan, not marked as false positive (min_count == 0) or
330 * sufficient references to it (count >= min_count)
331 * - black - ignore, it doesn't contain references (e.g. text section)
332 * (min_count == -1). No function defined for this color.
333 * Newly created objects don't have any color assigned (object->count == -1)
334 * before the next memory scan when they become white.
336 static bool color_white(const struct kmemleak_object *object)
338 return object->count != KMEMLEAK_BLACK &&
339 object->count < object->min_count;
342 static bool color_gray(const struct kmemleak_object *object)
344 return object->min_count != KMEMLEAK_BLACK &&
345 object->count >= object->min_count;
349 * Objects are considered unreferenced only if their color is white, they have
350 * not be deleted and have a minimum age to avoid false positives caused by
351 * pointers temporarily stored in CPU registers.
353 static bool unreferenced_object(struct kmemleak_object *object)
355 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
356 time_before_eq(object->jiffies + jiffies_min_age,
357 jiffies_last_scan);
361 * Printing of the unreferenced objects information to the seq file. The
362 * print_unreferenced function must be called with the object->lock held.
364 static void print_unreferenced(struct seq_file *seq,
365 struct kmemleak_object *object)
367 int i;
368 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
370 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
371 object->pointer, object->size);
372 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
373 object->comm, object->pid, object->jiffies,
374 msecs_age / 1000, msecs_age % 1000);
375 hex_dump_object(seq, object);
376 seq_printf(seq, " backtrace:\n");
378 for (i = 0; i < object->trace_len; i++) {
379 void *ptr = (void *)object->trace[i];
380 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
385 * Print the kmemleak_object information. This function is used mainly for
386 * debugging special cases when kmemleak operations. It must be called with
387 * the object->lock held.
389 static void dump_object_info(struct kmemleak_object *object)
391 struct stack_trace trace;
393 trace.nr_entries = object->trace_len;
394 trace.entries = object->trace;
396 pr_notice("Object 0x%08lx (size %zu):\n",
397 object->pointer, object->size);
398 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
399 object->comm, object->pid, object->jiffies);
400 pr_notice(" min_count = %d\n", object->min_count);
401 pr_notice(" count = %d\n", object->count);
402 pr_notice(" flags = 0x%x\n", object->flags);
403 pr_notice(" checksum = %u\n", object->checksum);
404 pr_notice(" backtrace:\n");
405 print_stack_trace(&trace, 4);
409 * Look-up a memory block metadata (kmemleak_object) in the object search
410 * tree based on a pointer value. If alias is 0, only values pointing to the
411 * beginning of the memory block are allowed. The kmemleak_lock must be held
412 * when calling this function.
414 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
416 struct rb_node *rb = object_tree_root.rb_node;
418 while (rb) {
419 struct kmemleak_object *object =
420 rb_entry(rb, struct kmemleak_object, rb_node);
421 if (ptr < object->pointer)
422 rb = object->rb_node.rb_left;
423 else if (object->pointer + object->size <= ptr)
424 rb = object->rb_node.rb_right;
425 else if (object->pointer == ptr || alias)
426 return object;
427 else {
428 kmemleak_warn("Found object by alias at 0x%08lx\n",
429 ptr);
430 dump_object_info(object);
431 break;
434 return NULL;
438 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
439 * that once an object's use_count reached 0, the RCU freeing was already
440 * registered and the object should no longer be used. This function must be
441 * called under the protection of rcu_read_lock().
443 static int get_object(struct kmemleak_object *object)
445 return atomic_inc_not_zero(&object->use_count);
449 * RCU callback to free a kmemleak_object.
451 static void free_object_rcu(struct rcu_head *rcu)
453 struct hlist_node *tmp;
454 struct kmemleak_scan_area *area;
455 struct kmemleak_object *object =
456 container_of(rcu, struct kmemleak_object, rcu);
459 * Once use_count is 0 (guaranteed by put_object), there is no other
460 * code accessing this object, hence no need for locking.
462 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
463 hlist_del(&area->node);
464 kmem_cache_free(scan_area_cache, area);
466 kmem_cache_free(object_cache, object);
470 * Decrement the object use_count. Once the count is 0, free the object using
471 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
472 * delete_object() path, the delayed RCU freeing ensures that there is no
473 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
474 * is also possible.
476 static void put_object(struct kmemleak_object *object)
478 if (!atomic_dec_and_test(&object->use_count))
479 return;
481 /* should only get here after delete_object was called */
482 WARN_ON(object->flags & OBJECT_ALLOCATED);
484 call_rcu(&object->rcu, free_object_rcu);
488 * Look up an object in the object search tree and increase its use_count.
490 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
492 unsigned long flags;
493 struct kmemleak_object *object;
495 rcu_read_lock();
496 read_lock_irqsave(&kmemleak_lock, flags);
497 object = lookup_object(ptr, alias);
498 read_unlock_irqrestore(&kmemleak_lock, flags);
500 /* check whether the object is still available */
501 if (object && !get_object(object))
502 object = NULL;
503 rcu_read_unlock();
505 return object;
509 * Look up an object in the object search tree and remove it from both
510 * object_tree_root and object_list. The returned object's use_count should be
511 * at least 1, as initially set by create_object().
513 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
515 unsigned long flags;
516 struct kmemleak_object *object;
518 write_lock_irqsave(&kmemleak_lock, flags);
519 object = lookup_object(ptr, alias);
520 if (object) {
521 rb_erase(&object->rb_node, &object_tree_root);
522 list_del_rcu(&object->object_list);
524 write_unlock_irqrestore(&kmemleak_lock, flags);
526 return object;
530 * Save stack trace to the given array of MAX_TRACE size.
532 static int __save_stack_trace(unsigned long *trace)
534 struct stack_trace stack_trace;
536 stack_trace.max_entries = MAX_TRACE;
537 stack_trace.nr_entries = 0;
538 stack_trace.entries = trace;
539 stack_trace.skip = 2;
540 save_stack_trace(&stack_trace);
542 return stack_trace.nr_entries;
546 * Create the metadata (struct kmemleak_object) corresponding to an allocated
547 * memory block and add it to the object_list and object_tree_root.
549 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
550 int min_count, gfp_t gfp)
552 unsigned long flags;
553 struct kmemleak_object *object, *parent;
554 struct rb_node **link, *rb_parent;
556 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
557 if (!object) {
558 pr_warn("Cannot allocate a kmemleak_object structure\n");
559 kmemleak_disable();
560 return NULL;
563 INIT_LIST_HEAD(&object->object_list);
564 INIT_LIST_HEAD(&object->gray_list);
565 INIT_HLIST_HEAD(&object->area_list);
566 spin_lock_init(&object->lock);
567 atomic_set(&object->use_count, 1);
568 object->flags = OBJECT_ALLOCATED;
569 object->pointer = ptr;
570 object->size = size;
571 object->excess_ref = 0;
572 object->min_count = min_count;
573 object->count = 0; /* white color initially */
574 object->jiffies = jiffies;
575 object->checksum = 0;
577 /* task information */
578 if (in_irq()) {
579 object->pid = 0;
580 strncpy(object->comm, "hardirq", sizeof(object->comm));
581 } else if (in_softirq()) {
582 object->pid = 0;
583 strncpy(object->comm, "softirq", sizeof(object->comm));
584 } else {
585 object->pid = current->pid;
587 * There is a small chance of a race with set_task_comm(),
588 * however using get_task_comm() here may cause locking
589 * dependency issues with current->alloc_lock. In the worst
590 * case, the command line is not correct.
592 strncpy(object->comm, current->comm, sizeof(object->comm));
595 /* kernel backtrace */
596 object->trace_len = __save_stack_trace(object->trace);
598 write_lock_irqsave(&kmemleak_lock, flags);
600 min_addr = min(min_addr, ptr);
601 max_addr = max(max_addr, ptr + size);
602 link = &object_tree_root.rb_node;
603 rb_parent = NULL;
604 while (*link) {
605 rb_parent = *link;
606 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
607 if (ptr + size <= parent->pointer)
608 link = &parent->rb_node.rb_left;
609 else if (parent->pointer + parent->size <= ptr)
610 link = &parent->rb_node.rb_right;
611 else {
612 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
613 ptr);
615 * No need for parent->lock here since "parent" cannot
616 * be freed while the kmemleak_lock is held.
618 dump_object_info(parent);
619 kmem_cache_free(object_cache, object);
620 object = NULL;
621 goto out;
624 rb_link_node(&object->rb_node, rb_parent, link);
625 rb_insert_color(&object->rb_node, &object_tree_root);
627 list_add_tail_rcu(&object->object_list, &object_list);
628 out:
629 write_unlock_irqrestore(&kmemleak_lock, flags);
630 return object;
634 * Mark the object as not allocated and schedule RCU freeing via put_object().
636 static void __delete_object(struct kmemleak_object *object)
638 unsigned long flags;
640 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
641 WARN_ON(atomic_read(&object->use_count) < 1);
644 * Locking here also ensures that the corresponding memory block
645 * cannot be freed when it is being scanned.
647 spin_lock_irqsave(&object->lock, flags);
648 object->flags &= ~OBJECT_ALLOCATED;
649 spin_unlock_irqrestore(&object->lock, flags);
650 put_object(object);
654 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
655 * delete it.
657 static void delete_object_full(unsigned long ptr)
659 struct kmemleak_object *object;
661 object = find_and_remove_object(ptr, 0);
662 if (!object) {
663 #ifdef DEBUG
664 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
665 ptr);
666 #endif
667 return;
669 __delete_object(object);
673 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
674 * delete it. If the memory block is partially freed, the function may create
675 * additional metadata for the remaining parts of the block.
677 static void delete_object_part(unsigned long ptr, size_t size)
679 struct kmemleak_object *object;
680 unsigned long start, end;
682 object = find_and_remove_object(ptr, 1);
683 if (!object) {
684 #ifdef DEBUG
685 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
686 ptr, size);
687 #endif
688 return;
692 * Create one or two objects that may result from the memory block
693 * split. Note that partial freeing is only done by free_bootmem() and
694 * this happens before kmemleak_init() is called. The path below is
695 * only executed during early log recording in kmemleak_init(), so
696 * GFP_KERNEL is enough.
698 start = object->pointer;
699 end = object->pointer + object->size;
700 if (ptr > start)
701 create_object(start, ptr - start, object->min_count,
702 GFP_KERNEL);
703 if (ptr + size < end)
704 create_object(ptr + size, end - ptr - size, object->min_count,
705 GFP_KERNEL);
707 __delete_object(object);
710 static void __paint_it(struct kmemleak_object *object, int color)
712 object->min_count = color;
713 if (color == KMEMLEAK_BLACK)
714 object->flags |= OBJECT_NO_SCAN;
717 static void paint_it(struct kmemleak_object *object, int color)
719 unsigned long flags;
721 spin_lock_irqsave(&object->lock, flags);
722 __paint_it(object, color);
723 spin_unlock_irqrestore(&object->lock, flags);
726 static void paint_ptr(unsigned long ptr, int color)
728 struct kmemleak_object *object;
730 object = find_and_get_object(ptr, 0);
731 if (!object) {
732 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
733 ptr,
734 (color == KMEMLEAK_GREY) ? "Grey" :
735 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
736 return;
738 paint_it(object, color);
739 put_object(object);
743 * Mark an object permanently as gray-colored so that it can no longer be
744 * reported as a leak. This is used in general to mark a false positive.
746 static void make_gray_object(unsigned long ptr)
748 paint_ptr(ptr, KMEMLEAK_GREY);
752 * Mark the object as black-colored so that it is ignored from scans and
753 * reporting.
755 static void make_black_object(unsigned long ptr)
757 paint_ptr(ptr, KMEMLEAK_BLACK);
761 * Add a scanning area to the object. If at least one such area is added,
762 * kmemleak will only scan these ranges rather than the whole memory block.
764 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
766 unsigned long flags;
767 struct kmemleak_object *object;
768 struct kmemleak_scan_area *area;
770 object = find_and_get_object(ptr, 1);
771 if (!object) {
772 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
773 ptr);
774 return;
777 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
778 if (!area) {
779 pr_warn("Cannot allocate a scan area\n");
780 goto out;
783 spin_lock_irqsave(&object->lock, flags);
784 if (size == SIZE_MAX) {
785 size = object->pointer + object->size - ptr;
786 } else if (ptr + size > object->pointer + object->size) {
787 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
788 dump_object_info(object);
789 kmem_cache_free(scan_area_cache, area);
790 goto out_unlock;
793 INIT_HLIST_NODE(&area->node);
794 area->start = ptr;
795 area->size = size;
797 hlist_add_head(&area->node, &object->area_list);
798 out_unlock:
799 spin_unlock_irqrestore(&object->lock, flags);
800 out:
801 put_object(object);
805 * Any surplus references (object already gray) to 'ptr' are passed to
806 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
807 * vm_struct may be used as an alternative reference to the vmalloc'ed object
808 * (see free_thread_stack()).
810 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
812 unsigned long flags;
813 struct kmemleak_object *object;
815 object = find_and_get_object(ptr, 0);
816 if (!object) {
817 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
818 ptr);
819 return;
822 spin_lock_irqsave(&object->lock, flags);
823 object->excess_ref = excess_ref;
824 spin_unlock_irqrestore(&object->lock, flags);
825 put_object(object);
829 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
830 * pointer. Such object will not be scanned by kmemleak but references to it
831 * are searched.
833 static void object_no_scan(unsigned long ptr)
835 unsigned long flags;
836 struct kmemleak_object *object;
838 object = find_and_get_object(ptr, 0);
839 if (!object) {
840 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
841 return;
844 spin_lock_irqsave(&object->lock, flags);
845 object->flags |= OBJECT_NO_SCAN;
846 spin_unlock_irqrestore(&object->lock, flags);
847 put_object(object);
851 * Log an early kmemleak_* call to the early_log buffer. These calls will be
852 * processed later once kmemleak is fully initialized.
854 static void __init log_early(int op_type, const void *ptr, size_t size,
855 int min_count)
857 unsigned long flags;
858 struct early_log *log;
860 if (kmemleak_error) {
861 /* kmemleak stopped recording, just count the requests */
862 crt_early_log++;
863 return;
866 if (crt_early_log >= ARRAY_SIZE(early_log)) {
867 crt_early_log++;
868 kmemleak_disable();
869 return;
873 * There is no need for locking since the kernel is still in UP mode
874 * at this stage. Disabling the IRQs is enough.
876 local_irq_save(flags);
877 log = &early_log[crt_early_log];
878 log->op_type = op_type;
879 log->ptr = ptr;
880 log->size = size;
881 log->min_count = min_count;
882 log->trace_len = __save_stack_trace(log->trace);
883 crt_early_log++;
884 local_irq_restore(flags);
888 * Log an early allocated block and populate the stack trace.
890 static void early_alloc(struct early_log *log)
892 struct kmemleak_object *object;
893 unsigned long flags;
894 int i;
896 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
897 return;
900 * RCU locking needed to ensure object is not freed via put_object().
902 rcu_read_lock();
903 object = create_object((unsigned long)log->ptr, log->size,
904 log->min_count, GFP_ATOMIC);
905 if (!object)
906 goto out;
907 spin_lock_irqsave(&object->lock, flags);
908 for (i = 0; i < log->trace_len; i++)
909 object->trace[i] = log->trace[i];
910 object->trace_len = log->trace_len;
911 spin_unlock_irqrestore(&object->lock, flags);
912 out:
913 rcu_read_unlock();
917 * Log an early allocated block and populate the stack trace.
919 static void early_alloc_percpu(struct early_log *log)
921 unsigned int cpu;
922 const void __percpu *ptr = log->ptr;
924 for_each_possible_cpu(cpu) {
925 log->ptr = per_cpu_ptr(ptr, cpu);
926 early_alloc(log);
931 * kmemleak_alloc - register a newly allocated object
932 * @ptr: pointer to beginning of the object
933 * @size: size of the object
934 * @min_count: minimum number of references to this object. If during memory
935 * scanning a number of references less than @min_count is found,
936 * the object is reported as a memory leak. If @min_count is 0,
937 * the object is never reported as a leak. If @min_count is -1,
938 * the object is ignored (not scanned and not reported as a leak)
939 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
941 * This function is called from the kernel allocators when a new object
942 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
944 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
945 gfp_t gfp)
947 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
949 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
950 create_object((unsigned long)ptr, size, min_count, gfp);
951 else if (kmemleak_early_log)
952 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
954 EXPORT_SYMBOL_GPL(kmemleak_alloc);
957 * kmemleak_alloc_percpu - register a newly allocated __percpu object
958 * @ptr: __percpu pointer to beginning of the object
959 * @size: size of the object
960 * @gfp: flags used for kmemleak internal memory allocations
962 * This function is called from the kernel percpu allocator when a new object
963 * (memory block) is allocated (alloc_percpu).
965 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
966 gfp_t gfp)
968 unsigned int cpu;
970 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
973 * Percpu allocations are only scanned and not reported as leaks
974 * (min_count is set to 0).
976 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
977 for_each_possible_cpu(cpu)
978 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
979 size, 0, gfp);
980 else if (kmemleak_early_log)
981 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
983 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
986 * kmemleak_vmalloc - register a newly vmalloc'ed object
987 * @area: pointer to vm_struct
988 * @size: size of the object
989 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
991 * This function is called from the vmalloc() kernel allocator when a new
992 * object (memory block) is allocated.
994 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
996 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
999 * A min_count = 2 is needed because vm_struct contains a reference to
1000 * the virtual address of the vmalloc'ed block.
1002 if (kmemleak_enabled) {
1003 create_object((unsigned long)area->addr, size, 2, gfp);
1004 object_set_excess_ref((unsigned long)area,
1005 (unsigned long)area->addr);
1006 } else if (kmemleak_early_log) {
1007 log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
1008 /* reusing early_log.size for storing area->addr */
1009 log_early(KMEMLEAK_SET_EXCESS_REF,
1010 area, (unsigned long)area->addr, 0);
1013 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1016 * kmemleak_free - unregister a previously registered object
1017 * @ptr: pointer to beginning of the object
1019 * This function is called from the kernel allocators when an object (memory
1020 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1022 void __ref kmemleak_free(const void *ptr)
1024 pr_debug("%s(0x%p)\n", __func__, ptr);
1026 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1027 delete_object_full((unsigned long)ptr);
1028 else if (kmemleak_early_log)
1029 log_early(KMEMLEAK_FREE, ptr, 0, 0);
1031 EXPORT_SYMBOL_GPL(kmemleak_free);
1034 * kmemleak_free_part - partially unregister a previously registered object
1035 * @ptr: pointer to the beginning or inside the object. This also
1036 * represents the start of the range to be freed
1037 * @size: size to be unregistered
1039 * This function is called when only a part of a memory block is freed
1040 * (usually from the bootmem allocator).
1042 void __ref kmemleak_free_part(const void *ptr, size_t size)
1044 pr_debug("%s(0x%p)\n", __func__, ptr);
1046 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1047 delete_object_part((unsigned long)ptr, size);
1048 else if (kmemleak_early_log)
1049 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
1051 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1054 * kmemleak_free_percpu - unregister a previously registered __percpu object
1055 * @ptr: __percpu pointer to beginning of the object
1057 * This function is called from the kernel percpu allocator when an object
1058 * (memory block) is freed (free_percpu).
1060 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1062 unsigned int cpu;
1064 pr_debug("%s(0x%p)\n", __func__, ptr);
1066 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1067 for_each_possible_cpu(cpu)
1068 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1069 cpu));
1070 else if (kmemleak_early_log)
1071 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1073 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1076 * kmemleak_update_trace - update object allocation stack trace
1077 * @ptr: pointer to beginning of the object
1079 * Override the object allocation stack trace for cases where the actual
1080 * allocation place is not always useful.
1082 void __ref kmemleak_update_trace(const void *ptr)
1084 struct kmemleak_object *object;
1085 unsigned long flags;
1087 pr_debug("%s(0x%p)\n", __func__, ptr);
1089 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1090 return;
1092 object = find_and_get_object((unsigned long)ptr, 1);
1093 if (!object) {
1094 #ifdef DEBUG
1095 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1096 ptr);
1097 #endif
1098 return;
1101 spin_lock_irqsave(&object->lock, flags);
1102 object->trace_len = __save_stack_trace(object->trace);
1103 spin_unlock_irqrestore(&object->lock, flags);
1105 put_object(object);
1107 EXPORT_SYMBOL(kmemleak_update_trace);
1110 * kmemleak_not_leak - mark an allocated object as false positive
1111 * @ptr: pointer to beginning of the object
1113 * Calling this function on an object will cause the memory block to no longer
1114 * be reported as leak and always be scanned.
1116 void __ref kmemleak_not_leak(const void *ptr)
1118 pr_debug("%s(0x%p)\n", __func__, ptr);
1120 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1121 make_gray_object((unsigned long)ptr);
1122 else if (kmemleak_early_log)
1123 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1125 EXPORT_SYMBOL(kmemleak_not_leak);
1128 * kmemleak_ignore - ignore an allocated object
1129 * @ptr: pointer to beginning of the object
1131 * Calling this function on an object will cause the memory block to be
1132 * ignored (not scanned and not reported as a leak). This is usually done when
1133 * it is known that the corresponding block is not a leak and does not contain
1134 * any references to other allocated memory blocks.
1136 void __ref kmemleak_ignore(const void *ptr)
1138 pr_debug("%s(0x%p)\n", __func__, ptr);
1140 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1141 make_black_object((unsigned long)ptr);
1142 else if (kmemleak_early_log)
1143 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1145 EXPORT_SYMBOL(kmemleak_ignore);
1148 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1149 * @ptr: pointer to beginning or inside the object. This also
1150 * represents the start of the scan area
1151 * @size: size of the scan area
1152 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1154 * This function is used when it is known that only certain parts of an object
1155 * contain references to other objects. Kmemleak will only scan these areas
1156 * reducing the number false negatives.
1158 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1160 pr_debug("%s(0x%p)\n", __func__, ptr);
1162 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1163 add_scan_area((unsigned long)ptr, size, gfp);
1164 else if (kmemleak_early_log)
1165 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1167 EXPORT_SYMBOL(kmemleak_scan_area);
1170 * kmemleak_no_scan - do not scan an allocated object
1171 * @ptr: pointer to beginning of the object
1173 * This function notifies kmemleak not to scan the given memory block. Useful
1174 * in situations where it is known that the given object does not contain any
1175 * references to other objects. Kmemleak will not scan such objects reducing
1176 * the number of false negatives.
1178 void __ref kmemleak_no_scan(const void *ptr)
1180 pr_debug("%s(0x%p)\n", __func__, ptr);
1182 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1183 object_no_scan((unsigned long)ptr);
1184 else if (kmemleak_early_log)
1185 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1187 EXPORT_SYMBOL(kmemleak_no_scan);
1190 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1191 * address argument
1193 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1194 gfp_t gfp)
1196 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1197 kmemleak_alloc(__va(phys), size, min_count, gfp);
1199 EXPORT_SYMBOL(kmemleak_alloc_phys);
1202 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1203 * physical address argument
1205 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1207 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1208 kmemleak_free_part(__va(phys), size);
1210 EXPORT_SYMBOL(kmemleak_free_part_phys);
1213 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1214 * address argument
1216 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1218 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1219 kmemleak_not_leak(__va(phys));
1221 EXPORT_SYMBOL(kmemleak_not_leak_phys);
1224 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1225 * address argument
1227 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1229 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1230 kmemleak_ignore(__va(phys));
1232 EXPORT_SYMBOL(kmemleak_ignore_phys);
1235 * Update an object's checksum and return true if it was modified.
1237 static bool update_checksum(struct kmemleak_object *object)
1239 u32 old_csum = object->checksum;
1241 if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1242 return false;
1244 kasan_disable_current();
1245 object->checksum = crc32(0, (void *)object->pointer, object->size);
1246 kasan_enable_current();
1248 return object->checksum != old_csum;
1252 * Update an object's references. object->lock must be held by the caller.
1254 static void update_refs(struct kmemleak_object *object)
1256 if (!color_white(object)) {
1257 /* non-orphan, ignored or new */
1258 return;
1262 * Increase the object's reference count (number of pointers to the
1263 * memory block). If this count reaches the required minimum, the
1264 * object's color will become gray and it will be added to the
1265 * gray_list.
1267 object->count++;
1268 if (color_gray(object)) {
1269 /* put_object() called when removing from gray_list */
1270 WARN_ON(!get_object(object));
1271 list_add_tail(&object->gray_list, &gray_list);
1276 * Memory scanning is a long process and it needs to be interruptable. This
1277 * function checks whether such interrupt condition occurred.
1279 static int scan_should_stop(void)
1281 if (!kmemleak_enabled)
1282 return 1;
1285 * This function may be called from either process or kthread context,
1286 * hence the need to check for both stop conditions.
1288 if (current->mm)
1289 return signal_pending(current);
1290 else
1291 return kthread_should_stop();
1293 return 0;
1297 * Scan a memory block (exclusive range) for valid pointers and add those
1298 * found to the gray list.
1300 static void scan_block(void *_start, void *_end,
1301 struct kmemleak_object *scanned)
1303 unsigned long *ptr;
1304 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1305 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1306 unsigned long flags;
1308 read_lock_irqsave(&kmemleak_lock, flags);
1309 for (ptr = start; ptr < end; ptr++) {
1310 struct kmemleak_object *object;
1311 unsigned long pointer;
1312 unsigned long excess_ref;
1314 if (scan_should_stop())
1315 break;
1317 /* don't scan uninitialized memory */
1318 if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1319 BYTES_PER_POINTER))
1320 continue;
1322 kasan_disable_current();
1323 pointer = *ptr;
1324 kasan_enable_current();
1326 if (pointer < min_addr || pointer >= max_addr)
1327 continue;
1330 * No need for get_object() here since we hold kmemleak_lock.
1331 * object->use_count cannot be dropped to 0 while the object
1332 * is still present in object_tree_root and object_list
1333 * (with updates protected by kmemleak_lock).
1335 object = lookup_object(pointer, 1);
1336 if (!object)
1337 continue;
1338 if (object == scanned)
1339 /* self referenced, ignore */
1340 continue;
1343 * Avoid the lockdep recursive warning on object->lock being
1344 * previously acquired in scan_object(). These locks are
1345 * enclosed by scan_mutex.
1347 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1348 /* only pass surplus references (object already gray) */
1349 if (color_gray(object)) {
1350 excess_ref = object->excess_ref;
1351 /* no need for update_refs() if object already gray */
1352 } else {
1353 excess_ref = 0;
1354 update_refs(object);
1356 spin_unlock(&object->lock);
1358 if (excess_ref) {
1359 object = lookup_object(excess_ref, 0);
1360 if (!object)
1361 continue;
1362 if (object == scanned)
1363 /* circular reference, ignore */
1364 continue;
1365 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1366 update_refs(object);
1367 spin_unlock(&object->lock);
1370 read_unlock_irqrestore(&kmemleak_lock, flags);
1374 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1376 static void scan_large_block(void *start, void *end)
1378 void *next;
1380 while (start < end) {
1381 next = min(start + MAX_SCAN_SIZE, end);
1382 scan_block(start, next, NULL);
1383 start = next;
1384 cond_resched();
1389 * Scan a memory block corresponding to a kmemleak_object. A condition is
1390 * that object->use_count >= 1.
1392 static void scan_object(struct kmemleak_object *object)
1394 struct kmemleak_scan_area *area;
1395 unsigned long flags;
1398 * Once the object->lock is acquired, the corresponding memory block
1399 * cannot be freed (the same lock is acquired in delete_object).
1401 spin_lock_irqsave(&object->lock, flags);
1402 if (object->flags & OBJECT_NO_SCAN)
1403 goto out;
1404 if (!(object->flags & OBJECT_ALLOCATED))
1405 /* already freed object */
1406 goto out;
1407 if (hlist_empty(&object->area_list)) {
1408 void *start = (void *)object->pointer;
1409 void *end = (void *)(object->pointer + object->size);
1410 void *next;
1412 do {
1413 next = min(start + MAX_SCAN_SIZE, end);
1414 scan_block(start, next, object);
1416 start = next;
1417 if (start >= end)
1418 break;
1420 spin_unlock_irqrestore(&object->lock, flags);
1421 cond_resched();
1422 spin_lock_irqsave(&object->lock, flags);
1423 } while (object->flags & OBJECT_ALLOCATED);
1424 } else
1425 hlist_for_each_entry(area, &object->area_list, node)
1426 scan_block((void *)area->start,
1427 (void *)(area->start + area->size),
1428 object);
1429 out:
1430 spin_unlock_irqrestore(&object->lock, flags);
1434 * Scan the objects already referenced (gray objects). More objects will be
1435 * referenced and, if there are no memory leaks, all the objects are scanned.
1437 static void scan_gray_list(void)
1439 struct kmemleak_object *object, *tmp;
1442 * The list traversal is safe for both tail additions and removals
1443 * from inside the loop. The kmemleak objects cannot be freed from
1444 * outside the loop because their use_count was incremented.
1446 object = list_entry(gray_list.next, typeof(*object), gray_list);
1447 while (&object->gray_list != &gray_list) {
1448 cond_resched();
1450 /* may add new objects to the list */
1451 if (!scan_should_stop())
1452 scan_object(object);
1454 tmp = list_entry(object->gray_list.next, typeof(*object),
1455 gray_list);
1457 /* remove the object from the list and release it */
1458 list_del(&object->gray_list);
1459 put_object(object);
1461 object = tmp;
1463 WARN_ON(!list_empty(&gray_list));
1467 * Scan data sections and all the referenced memory blocks allocated via the
1468 * kernel's standard allocators. This function must be called with the
1469 * scan_mutex held.
1471 static void kmemleak_scan(void)
1473 unsigned long flags;
1474 struct kmemleak_object *object;
1475 int i;
1476 int new_leaks = 0;
1478 jiffies_last_scan = jiffies;
1480 /* prepare the kmemleak_object's */
1481 rcu_read_lock();
1482 list_for_each_entry_rcu(object, &object_list, object_list) {
1483 spin_lock_irqsave(&object->lock, flags);
1484 #ifdef DEBUG
1486 * With a few exceptions there should be a maximum of
1487 * 1 reference to any object at this point.
1489 if (atomic_read(&object->use_count) > 1) {
1490 pr_debug("object->use_count = %d\n",
1491 atomic_read(&object->use_count));
1492 dump_object_info(object);
1494 #endif
1495 /* reset the reference count (whiten the object) */
1496 object->count = 0;
1497 if (color_gray(object) && get_object(object))
1498 list_add_tail(&object->gray_list, &gray_list);
1500 spin_unlock_irqrestore(&object->lock, flags);
1502 rcu_read_unlock();
1504 /* data/bss scanning */
1505 scan_large_block(_sdata, _edata);
1506 scan_large_block(__bss_start, __bss_stop);
1507 scan_large_block(__start_ro_after_init, __end_ro_after_init);
1509 #ifdef CONFIG_SMP
1510 /* per-cpu sections scanning */
1511 for_each_possible_cpu(i)
1512 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1513 __per_cpu_end + per_cpu_offset(i));
1514 #endif
1517 * Struct page scanning for each node.
1519 get_online_mems();
1520 for_each_online_node(i) {
1521 unsigned long start_pfn = node_start_pfn(i);
1522 unsigned long end_pfn = node_end_pfn(i);
1523 unsigned long pfn;
1525 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1526 struct page *page;
1528 if (!pfn_valid(pfn))
1529 continue;
1530 page = pfn_to_page(pfn);
1531 /* only scan if page is in use */
1532 if (page_count(page) == 0)
1533 continue;
1534 scan_block(page, page + 1, NULL);
1537 put_online_mems();
1540 * Scanning the task stacks (may introduce false negatives).
1542 if (kmemleak_stack_scan) {
1543 struct task_struct *p, *g;
1545 read_lock(&tasklist_lock);
1546 do_each_thread(g, p) {
1547 void *stack = try_get_task_stack(p);
1548 if (stack) {
1549 scan_block(stack, stack + THREAD_SIZE, NULL);
1550 put_task_stack(p);
1552 } while_each_thread(g, p);
1553 read_unlock(&tasklist_lock);
1557 * Scan the objects already referenced from the sections scanned
1558 * above.
1560 scan_gray_list();
1563 * Check for new or unreferenced objects modified since the previous
1564 * scan and color them gray until the next scan.
1566 rcu_read_lock();
1567 list_for_each_entry_rcu(object, &object_list, object_list) {
1568 spin_lock_irqsave(&object->lock, flags);
1569 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1570 && update_checksum(object) && get_object(object)) {
1571 /* color it gray temporarily */
1572 object->count = object->min_count;
1573 list_add_tail(&object->gray_list, &gray_list);
1575 spin_unlock_irqrestore(&object->lock, flags);
1577 rcu_read_unlock();
1580 * Re-scan the gray list for modified unreferenced objects.
1582 scan_gray_list();
1585 * If scanning was stopped do not report any new unreferenced objects.
1587 if (scan_should_stop())
1588 return;
1591 * Scanning result reporting.
1593 rcu_read_lock();
1594 list_for_each_entry_rcu(object, &object_list, object_list) {
1595 spin_lock_irqsave(&object->lock, flags);
1596 if (unreferenced_object(object) &&
1597 !(object->flags & OBJECT_REPORTED)) {
1598 object->flags |= OBJECT_REPORTED;
1599 new_leaks++;
1601 spin_unlock_irqrestore(&object->lock, flags);
1603 rcu_read_unlock();
1605 if (new_leaks) {
1606 kmemleak_found_leaks = true;
1608 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1609 new_leaks);
1615 * Thread function performing automatic memory scanning. Unreferenced objects
1616 * at the end of a memory scan are reported but only the first time.
1618 static int kmemleak_scan_thread(void *arg)
1620 static int first_run = 1;
1622 pr_info("Automatic memory scanning thread started\n");
1623 set_user_nice(current, 10);
1626 * Wait before the first scan to allow the system to fully initialize.
1628 if (first_run) {
1629 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1630 first_run = 0;
1631 while (timeout && !kthread_should_stop())
1632 timeout = schedule_timeout_interruptible(timeout);
1635 while (!kthread_should_stop()) {
1636 signed long timeout = jiffies_scan_wait;
1638 mutex_lock(&scan_mutex);
1639 kmemleak_scan();
1640 mutex_unlock(&scan_mutex);
1642 /* wait before the next scan */
1643 while (timeout && !kthread_should_stop())
1644 timeout = schedule_timeout_interruptible(timeout);
1647 pr_info("Automatic memory scanning thread ended\n");
1649 return 0;
1653 * Start the automatic memory scanning thread. This function must be called
1654 * with the scan_mutex held.
1656 static void start_scan_thread(void)
1658 if (scan_thread)
1659 return;
1660 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1661 if (IS_ERR(scan_thread)) {
1662 pr_warn("Failed to create the scan thread\n");
1663 scan_thread = NULL;
1668 * Stop the automatic memory scanning thread. This function must be called
1669 * with the scan_mutex held.
1671 static void stop_scan_thread(void)
1673 if (scan_thread) {
1674 kthread_stop(scan_thread);
1675 scan_thread = NULL;
1680 * Iterate over the object_list and return the first valid object at or after
1681 * the required position with its use_count incremented. The function triggers
1682 * a memory scanning when the pos argument points to the first position.
1684 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1686 struct kmemleak_object *object;
1687 loff_t n = *pos;
1688 int err;
1690 err = mutex_lock_interruptible(&scan_mutex);
1691 if (err < 0)
1692 return ERR_PTR(err);
1694 rcu_read_lock();
1695 list_for_each_entry_rcu(object, &object_list, object_list) {
1696 if (n-- > 0)
1697 continue;
1698 if (get_object(object))
1699 goto out;
1701 object = NULL;
1702 out:
1703 return object;
1707 * Return the next object in the object_list. The function decrements the
1708 * use_count of the previous object and increases that of the next one.
1710 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1712 struct kmemleak_object *prev_obj = v;
1713 struct kmemleak_object *next_obj = NULL;
1714 struct kmemleak_object *obj = prev_obj;
1716 ++(*pos);
1718 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1719 if (get_object(obj)) {
1720 next_obj = obj;
1721 break;
1725 put_object(prev_obj);
1726 return next_obj;
1730 * Decrement the use_count of the last object required, if any.
1732 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1734 if (!IS_ERR(v)) {
1736 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1737 * waiting was interrupted, so only release it if !IS_ERR.
1739 rcu_read_unlock();
1740 mutex_unlock(&scan_mutex);
1741 if (v)
1742 put_object(v);
1747 * Print the information for an unreferenced object to the seq file.
1749 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1751 struct kmemleak_object *object = v;
1752 unsigned long flags;
1754 spin_lock_irqsave(&object->lock, flags);
1755 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1756 print_unreferenced(seq, object);
1757 spin_unlock_irqrestore(&object->lock, flags);
1758 return 0;
1761 static const struct seq_operations kmemleak_seq_ops = {
1762 .start = kmemleak_seq_start,
1763 .next = kmemleak_seq_next,
1764 .stop = kmemleak_seq_stop,
1765 .show = kmemleak_seq_show,
1768 static int kmemleak_open(struct inode *inode, struct file *file)
1770 return seq_open(file, &kmemleak_seq_ops);
1773 static int dump_str_object_info(const char *str)
1775 unsigned long flags;
1776 struct kmemleak_object *object;
1777 unsigned long addr;
1779 if (kstrtoul(str, 0, &addr))
1780 return -EINVAL;
1781 object = find_and_get_object(addr, 0);
1782 if (!object) {
1783 pr_info("Unknown object at 0x%08lx\n", addr);
1784 return -EINVAL;
1787 spin_lock_irqsave(&object->lock, flags);
1788 dump_object_info(object);
1789 spin_unlock_irqrestore(&object->lock, flags);
1791 put_object(object);
1792 return 0;
1796 * We use grey instead of black to ensure we can do future scans on the same
1797 * objects. If we did not do future scans these black objects could
1798 * potentially contain references to newly allocated objects in the future and
1799 * we'd end up with false positives.
1801 static void kmemleak_clear(void)
1803 struct kmemleak_object *object;
1804 unsigned long flags;
1806 rcu_read_lock();
1807 list_for_each_entry_rcu(object, &object_list, object_list) {
1808 spin_lock_irqsave(&object->lock, flags);
1809 if ((object->flags & OBJECT_REPORTED) &&
1810 unreferenced_object(object))
1811 __paint_it(object, KMEMLEAK_GREY);
1812 spin_unlock_irqrestore(&object->lock, flags);
1814 rcu_read_unlock();
1816 kmemleak_found_leaks = false;
1819 static void __kmemleak_do_cleanup(void);
1822 * File write operation to configure kmemleak at run-time. The following
1823 * commands can be written to the /sys/kernel/debug/kmemleak file:
1824 * off - disable kmemleak (irreversible)
1825 * stack=on - enable the task stacks scanning
1826 * stack=off - disable the tasks stacks scanning
1827 * scan=on - start the automatic memory scanning thread
1828 * scan=off - stop the automatic memory scanning thread
1829 * scan=... - set the automatic memory scanning period in seconds (0 to
1830 * disable it)
1831 * scan - trigger a memory scan
1832 * clear - mark all current reported unreferenced kmemleak objects as
1833 * grey to ignore printing them, or free all kmemleak objects
1834 * if kmemleak has been disabled.
1835 * dump=... - dump information about the object found at the given address
1837 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1838 size_t size, loff_t *ppos)
1840 char buf[64];
1841 int buf_size;
1842 int ret;
1844 buf_size = min(size, (sizeof(buf) - 1));
1845 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1846 return -EFAULT;
1847 buf[buf_size] = 0;
1849 ret = mutex_lock_interruptible(&scan_mutex);
1850 if (ret < 0)
1851 return ret;
1853 if (strncmp(buf, "clear", 5) == 0) {
1854 if (kmemleak_enabled)
1855 kmemleak_clear();
1856 else
1857 __kmemleak_do_cleanup();
1858 goto out;
1861 if (!kmemleak_enabled) {
1862 ret = -EBUSY;
1863 goto out;
1866 if (strncmp(buf, "off", 3) == 0)
1867 kmemleak_disable();
1868 else if (strncmp(buf, "stack=on", 8) == 0)
1869 kmemleak_stack_scan = 1;
1870 else if (strncmp(buf, "stack=off", 9) == 0)
1871 kmemleak_stack_scan = 0;
1872 else if (strncmp(buf, "scan=on", 7) == 0)
1873 start_scan_thread();
1874 else if (strncmp(buf, "scan=off", 8) == 0)
1875 stop_scan_thread();
1876 else if (strncmp(buf, "scan=", 5) == 0) {
1877 unsigned long secs;
1879 ret = kstrtoul(buf + 5, 0, &secs);
1880 if (ret < 0)
1881 goto out;
1882 stop_scan_thread();
1883 if (secs) {
1884 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1885 start_scan_thread();
1887 } else if (strncmp(buf, "scan", 4) == 0)
1888 kmemleak_scan();
1889 else if (strncmp(buf, "dump=", 5) == 0)
1890 ret = dump_str_object_info(buf + 5);
1891 else
1892 ret = -EINVAL;
1894 out:
1895 mutex_unlock(&scan_mutex);
1896 if (ret < 0)
1897 return ret;
1899 /* ignore the rest of the buffer, only one command at a time */
1900 *ppos += size;
1901 return size;
1904 static const struct file_operations kmemleak_fops = {
1905 .owner = THIS_MODULE,
1906 .open = kmemleak_open,
1907 .read = seq_read,
1908 .write = kmemleak_write,
1909 .llseek = seq_lseek,
1910 .release = seq_release,
1913 static void __kmemleak_do_cleanup(void)
1915 struct kmemleak_object *object;
1917 rcu_read_lock();
1918 list_for_each_entry_rcu(object, &object_list, object_list)
1919 delete_object_full(object->pointer);
1920 rcu_read_unlock();
1924 * Stop the memory scanning thread and free the kmemleak internal objects if
1925 * no previous scan thread (otherwise, kmemleak may still have some useful
1926 * information on memory leaks).
1928 static void kmemleak_do_cleanup(struct work_struct *work)
1930 stop_scan_thread();
1933 * Once the scan thread has stopped, it is safe to no longer track
1934 * object freeing. Ordering of the scan thread stopping and the memory
1935 * accesses below is guaranteed by the kthread_stop() function.
1937 kmemleak_free_enabled = 0;
1939 if (!kmemleak_found_leaks)
1940 __kmemleak_do_cleanup();
1941 else
1942 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1945 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1948 * Disable kmemleak. No memory allocation/freeing will be traced once this
1949 * function is called. Disabling kmemleak is an irreversible operation.
1951 static void kmemleak_disable(void)
1953 /* atomically check whether it was already invoked */
1954 if (cmpxchg(&kmemleak_error, 0, 1))
1955 return;
1957 /* stop any memory operation tracing */
1958 kmemleak_enabled = 0;
1960 /* check whether it is too early for a kernel thread */
1961 if (kmemleak_initialized)
1962 schedule_work(&cleanup_work);
1963 else
1964 kmemleak_free_enabled = 0;
1966 pr_info("Kernel memory leak detector disabled\n");
1970 * Allow boot-time kmemleak disabling (enabled by default).
1972 static int kmemleak_boot_config(char *str)
1974 if (!str)
1975 return -EINVAL;
1976 if (strcmp(str, "off") == 0)
1977 kmemleak_disable();
1978 else if (strcmp(str, "on") == 0)
1979 kmemleak_skip_disable = 1;
1980 else
1981 return -EINVAL;
1982 return 0;
1984 early_param("kmemleak", kmemleak_boot_config);
1986 static void __init print_log_trace(struct early_log *log)
1988 struct stack_trace trace;
1990 trace.nr_entries = log->trace_len;
1991 trace.entries = log->trace;
1993 pr_notice("Early log backtrace:\n");
1994 print_stack_trace(&trace, 2);
1998 * Kmemleak initialization.
2000 void __init kmemleak_init(void)
2002 int i;
2003 unsigned long flags;
2005 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2006 if (!kmemleak_skip_disable) {
2007 kmemleak_early_log = 0;
2008 kmemleak_disable();
2009 return;
2011 #endif
2013 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2014 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2016 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2017 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2019 if (crt_early_log > ARRAY_SIZE(early_log))
2020 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
2021 crt_early_log);
2023 /* the kernel is still in UP mode, so disabling the IRQs is enough */
2024 local_irq_save(flags);
2025 kmemleak_early_log = 0;
2026 if (kmemleak_error) {
2027 local_irq_restore(flags);
2028 return;
2029 } else {
2030 kmemleak_enabled = 1;
2031 kmemleak_free_enabled = 1;
2033 local_irq_restore(flags);
2036 * This is the point where tracking allocations is safe. Automatic
2037 * scanning is started during the late initcall. Add the early logged
2038 * callbacks to the kmemleak infrastructure.
2040 for (i = 0; i < crt_early_log; i++) {
2041 struct early_log *log = &early_log[i];
2043 switch (log->op_type) {
2044 case KMEMLEAK_ALLOC:
2045 early_alloc(log);
2046 break;
2047 case KMEMLEAK_ALLOC_PERCPU:
2048 early_alloc_percpu(log);
2049 break;
2050 case KMEMLEAK_FREE:
2051 kmemleak_free(log->ptr);
2052 break;
2053 case KMEMLEAK_FREE_PART:
2054 kmemleak_free_part(log->ptr, log->size);
2055 break;
2056 case KMEMLEAK_FREE_PERCPU:
2057 kmemleak_free_percpu(log->ptr);
2058 break;
2059 case KMEMLEAK_NOT_LEAK:
2060 kmemleak_not_leak(log->ptr);
2061 break;
2062 case KMEMLEAK_IGNORE:
2063 kmemleak_ignore(log->ptr);
2064 break;
2065 case KMEMLEAK_SCAN_AREA:
2066 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
2067 break;
2068 case KMEMLEAK_NO_SCAN:
2069 kmemleak_no_scan(log->ptr);
2070 break;
2071 case KMEMLEAK_SET_EXCESS_REF:
2072 object_set_excess_ref((unsigned long)log->ptr,
2073 log->excess_ref);
2074 break;
2075 default:
2076 kmemleak_warn("Unknown early log operation: %d\n",
2077 log->op_type);
2080 if (kmemleak_warning) {
2081 print_log_trace(log);
2082 kmemleak_warning = 0;
2088 * Late initialization function.
2090 static int __init kmemleak_late_init(void)
2092 struct dentry *dentry;
2094 kmemleak_initialized = 1;
2096 if (kmemleak_error) {
2098 * Some error occurred and kmemleak was disabled. There is a
2099 * small chance that kmemleak_disable() was called immediately
2100 * after setting kmemleak_initialized and we may end up with
2101 * two clean-up threads but serialized by scan_mutex.
2103 schedule_work(&cleanup_work);
2104 return -ENOMEM;
2107 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
2108 &kmemleak_fops);
2109 if (!dentry)
2110 pr_warn("Failed to create the debugfs kmemleak file\n");
2111 mutex_lock(&scan_mutex);
2112 start_scan_thread();
2113 mutex_unlock(&scan_mutex);
2115 pr_info("Kernel memory leak detector initialized\n");
2117 return 0;
2119 late_initcall(kmemleak_late_init);