Merge tag 'xfs-4.20-merge-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux/fpc-iii.git] / lib / debugobjects.c
blob70935ed9112599c3517829c0f6dea0de6d2435e4
1 /*
2 * Generic infrastructure for lifetime debugging of objects.
4 * Started by Thomas Gleixner
6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
8 * For licencing details see kernel-base/COPYING
9 */
11 #define pr_fmt(fmt) "ODEBUG: " fmt
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
23 #define ODEBUG_HASH_BITS 14
24 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
26 #define ODEBUG_POOL_SIZE 1024
27 #define ODEBUG_POOL_MIN_LEVEL 256
29 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
33 struct debug_bucket {
34 struct hlist_head list;
35 raw_spinlock_t lock;
38 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
40 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
42 static DEFINE_RAW_SPINLOCK(pool_lock);
44 static HLIST_HEAD(obj_pool);
45 static HLIST_HEAD(obj_to_free);
47 static int obj_pool_min_free = ODEBUG_POOL_SIZE;
48 static int obj_pool_free = ODEBUG_POOL_SIZE;
49 static int obj_pool_used;
50 static int obj_pool_max_used;
51 /* The number of objs on the global free list */
52 static int obj_nr_tofree;
53 static struct kmem_cache *obj_cache;
55 static int debug_objects_maxchain __read_mostly;
56 static int __maybe_unused debug_objects_maxchecked __read_mostly;
57 static int debug_objects_fixups __read_mostly;
58 static int debug_objects_warnings __read_mostly;
59 static int debug_objects_enabled __read_mostly
60 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
61 static int debug_objects_pool_size __read_mostly
62 = ODEBUG_POOL_SIZE;
63 static int debug_objects_pool_min_level __read_mostly
64 = ODEBUG_POOL_MIN_LEVEL;
65 static struct debug_obj_descr *descr_test __read_mostly;
68 * Track numbers of kmem_cache_alloc()/free() calls done.
70 static int debug_objects_allocated;
71 static int debug_objects_freed;
73 static void free_obj_work(struct work_struct *work);
74 static DECLARE_WORK(debug_obj_work, free_obj_work);
76 static int __init enable_object_debug(char *str)
78 debug_objects_enabled = 1;
79 return 0;
82 static int __init disable_object_debug(char *str)
84 debug_objects_enabled = 0;
85 return 0;
88 early_param("debug_objects", enable_object_debug);
89 early_param("no_debug_objects", disable_object_debug);
91 static const char *obj_states[ODEBUG_STATE_MAX] = {
92 [ODEBUG_STATE_NONE] = "none",
93 [ODEBUG_STATE_INIT] = "initialized",
94 [ODEBUG_STATE_INACTIVE] = "inactive",
95 [ODEBUG_STATE_ACTIVE] = "active",
96 [ODEBUG_STATE_DESTROYED] = "destroyed",
97 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
100 static void fill_pool(void)
102 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
103 struct debug_obj *new, *obj;
104 unsigned long flags;
106 if (likely(obj_pool_free >= debug_objects_pool_min_level))
107 return;
110 * Reuse objs from the global free list; they will be reinitialized
111 * when allocating.
113 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
114 raw_spin_lock_irqsave(&pool_lock, flags);
116 * Recheck with the lock held as the worker thread might have
117 * won the race and freed the global free list already.
119 if (obj_nr_tofree) {
120 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
121 hlist_del(&obj->node);
122 obj_nr_tofree--;
123 hlist_add_head(&obj->node, &obj_pool);
124 obj_pool_free++;
126 raw_spin_unlock_irqrestore(&pool_lock, flags);
129 if (unlikely(!obj_cache))
130 return;
132 while (obj_pool_free < debug_objects_pool_min_level) {
134 new = kmem_cache_zalloc(obj_cache, gfp);
135 if (!new)
136 return;
138 kmemleak_ignore(new);
139 raw_spin_lock_irqsave(&pool_lock, flags);
140 hlist_add_head(&new->node, &obj_pool);
141 debug_objects_allocated++;
142 obj_pool_free++;
143 raw_spin_unlock_irqrestore(&pool_lock, flags);
148 * Lookup an object in the hash bucket.
150 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
152 struct debug_obj *obj;
153 int cnt = 0;
155 hlist_for_each_entry(obj, &b->list, node) {
156 cnt++;
157 if (obj->object == addr)
158 return obj;
160 if (cnt > debug_objects_maxchain)
161 debug_objects_maxchain = cnt;
163 return NULL;
167 * Allocate a new object. If the pool is empty, switch off the debugger.
168 * Must be called with interrupts disabled.
170 static struct debug_obj *
171 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
173 struct debug_obj *obj = NULL;
175 raw_spin_lock(&pool_lock);
176 if (obj_pool.first) {
177 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
179 obj->object = addr;
180 obj->descr = descr;
181 obj->state = ODEBUG_STATE_NONE;
182 obj->astate = 0;
183 hlist_del(&obj->node);
185 hlist_add_head(&obj->node, &b->list);
187 obj_pool_used++;
188 if (obj_pool_used > obj_pool_max_used)
189 obj_pool_max_used = obj_pool_used;
191 obj_pool_free--;
192 if (obj_pool_free < obj_pool_min_free)
193 obj_pool_min_free = obj_pool_free;
195 raw_spin_unlock(&pool_lock);
197 return obj;
201 * workqueue function to free objects.
203 * To reduce contention on the global pool_lock, the actual freeing of
204 * debug objects will be delayed if the pool_lock is busy.
206 static void free_obj_work(struct work_struct *work)
208 struct hlist_node *tmp;
209 struct debug_obj *obj;
210 unsigned long flags;
211 HLIST_HEAD(tofree);
213 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
214 return;
217 * The objs on the pool list might be allocated before the work is
218 * run, so recheck if pool list it full or not, if not fill pool
219 * list from the global free list
221 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
222 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
223 hlist_del(&obj->node);
224 hlist_add_head(&obj->node, &obj_pool);
225 obj_pool_free++;
226 obj_nr_tofree--;
230 * Pool list is already full and there are still objs on the free
231 * list. Move remaining free objs to a temporary list to free the
232 * memory outside the pool_lock held region.
234 if (obj_nr_tofree) {
235 hlist_move_list(&obj_to_free, &tofree);
236 debug_objects_freed += obj_nr_tofree;
237 obj_nr_tofree = 0;
239 raw_spin_unlock_irqrestore(&pool_lock, flags);
241 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
242 hlist_del(&obj->node);
243 kmem_cache_free(obj_cache, obj);
247 static bool __free_object(struct debug_obj *obj)
249 unsigned long flags;
250 bool work;
252 raw_spin_lock_irqsave(&pool_lock, flags);
253 work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
254 obj_pool_used--;
256 if (work) {
257 obj_nr_tofree++;
258 hlist_add_head(&obj->node, &obj_to_free);
259 } else {
260 obj_pool_free++;
261 hlist_add_head(&obj->node, &obj_pool);
263 raw_spin_unlock_irqrestore(&pool_lock, flags);
264 return work;
268 * Put the object back into the pool and schedule work to free objects
269 * if necessary.
271 static void free_object(struct debug_obj *obj)
273 if (__free_object(obj))
274 schedule_work(&debug_obj_work);
278 * We run out of memory. That means we probably have tons of objects
279 * allocated.
281 static void debug_objects_oom(void)
283 struct debug_bucket *db = obj_hash;
284 struct hlist_node *tmp;
285 HLIST_HEAD(freelist);
286 struct debug_obj *obj;
287 unsigned long flags;
288 int i;
290 pr_warn("Out of memory. ODEBUG disabled\n");
292 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
293 raw_spin_lock_irqsave(&db->lock, flags);
294 hlist_move_list(&db->list, &freelist);
295 raw_spin_unlock_irqrestore(&db->lock, flags);
297 /* Now free them */
298 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
299 hlist_del(&obj->node);
300 free_object(obj);
306 * We use the pfn of the address for the hash. That way we can check
307 * for freed objects simply by checking the affected bucket.
309 static struct debug_bucket *get_bucket(unsigned long addr)
311 unsigned long hash;
313 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
314 return &obj_hash[hash];
317 static void debug_print_object(struct debug_obj *obj, char *msg)
319 struct debug_obj_descr *descr = obj->descr;
320 static int limit;
322 if (limit < 5 && descr != descr_test) {
323 void *hint = descr->debug_hint ?
324 descr->debug_hint(obj->object) : NULL;
325 limit++;
326 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
327 "object type: %s hint: %pS\n",
328 msg, obj_states[obj->state], obj->astate,
329 descr->name, hint);
331 debug_objects_warnings++;
335 * Try to repair the damage, so we have a better chance to get useful
336 * debug output.
338 static bool
339 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
340 void * addr, enum debug_obj_state state)
342 if (fixup && fixup(addr, state)) {
343 debug_objects_fixups++;
344 return true;
346 return false;
349 static void debug_object_is_on_stack(void *addr, int onstack)
351 int is_on_stack;
352 static int limit;
354 if (limit > 4)
355 return;
357 is_on_stack = object_is_on_stack(addr);
358 if (is_on_stack == onstack)
359 return;
361 limit++;
362 if (is_on_stack)
363 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
364 task_stack_page(current));
365 else
366 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
367 task_stack_page(current));
369 WARN_ON(1);
372 static void
373 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
375 enum debug_obj_state state;
376 struct debug_bucket *db;
377 struct debug_obj *obj;
378 unsigned long flags;
380 fill_pool();
382 db = get_bucket((unsigned long) addr);
384 raw_spin_lock_irqsave(&db->lock, flags);
386 obj = lookup_object(addr, db);
387 if (!obj) {
388 obj = alloc_object(addr, db, descr);
389 if (!obj) {
390 debug_objects_enabled = 0;
391 raw_spin_unlock_irqrestore(&db->lock, flags);
392 debug_objects_oom();
393 return;
395 debug_object_is_on_stack(addr, onstack);
398 switch (obj->state) {
399 case ODEBUG_STATE_NONE:
400 case ODEBUG_STATE_INIT:
401 case ODEBUG_STATE_INACTIVE:
402 obj->state = ODEBUG_STATE_INIT;
403 break;
405 case ODEBUG_STATE_ACTIVE:
406 debug_print_object(obj, "init");
407 state = obj->state;
408 raw_spin_unlock_irqrestore(&db->lock, flags);
409 debug_object_fixup(descr->fixup_init, addr, state);
410 return;
412 case ODEBUG_STATE_DESTROYED:
413 debug_print_object(obj, "init");
414 break;
415 default:
416 break;
419 raw_spin_unlock_irqrestore(&db->lock, flags);
423 * debug_object_init - debug checks when an object is initialized
424 * @addr: address of the object
425 * @descr: pointer to an object specific debug description structure
427 void debug_object_init(void *addr, struct debug_obj_descr *descr)
429 if (!debug_objects_enabled)
430 return;
432 __debug_object_init(addr, descr, 0);
434 EXPORT_SYMBOL_GPL(debug_object_init);
437 * debug_object_init_on_stack - debug checks when an object on stack is
438 * initialized
439 * @addr: address of the object
440 * @descr: pointer to an object specific debug description structure
442 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
444 if (!debug_objects_enabled)
445 return;
447 __debug_object_init(addr, descr, 1);
449 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
452 * debug_object_activate - debug checks when an object is activated
453 * @addr: address of the object
454 * @descr: pointer to an object specific debug description structure
455 * Returns 0 for success, -EINVAL for check failed.
457 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
459 enum debug_obj_state state;
460 struct debug_bucket *db;
461 struct debug_obj *obj;
462 unsigned long flags;
463 int ret;
464 struct debug_obj o = { .object = addr,
465 .state = ODEBUG_STATE_NOTAVAILABLE,
466 .descr = descr };
468 if (!debug_objects_enabled)
469 return 0;
471 db = get_bucket((unsigned long) addr);
473 raw_spin_lock_irqsave(&db->lock, flags);
475 obj = lookup_object(addr, db);
476 if (obj) {
477 switch (obj->state) {
478 case ODEBUG_STATE_INIT:
479 case ODEBUG_STATE_INACTIVE:
480 obj->state = ODEBUG_STATE_ACTIVE;
481 ret = 0;
482 break;
484 case ODEBUG_STATE_ACTIVE:
485 debug_print_object(obj, "activate");
486 state = obj->state;
487 raw_spin_unlock_irqrestore(&db->lock, flags);
488 ret = debug_object_fixup(descr->fixup_activate, addr, state);
489 return ret ? 0 : -EINVAL;
491 case ODEBUG_STATE_DESTROYED:
492 debug_print_object(obj, "activate");
493 ret = -EINVAL;
494 break;
495 default:
496 ret = 0;
497 break;
499 raw_spin_unlock_irqrestore(&db->lock, flags);
500 return ret;
503 raw_spin_unlock_irqrestore(&db->lock, flags);
505 * We are here when a static object is activated. We
506 * let the type specific code confirm whether this is
507 * true or not. if true, we just make sure that the
508 * static object is tracked in the object tracker. If
509 * not, this must be a bug, so we try to fix it up.
511 if (descr->is_static_object && descr->is_static_object(addr)) {
512 /* track this static object */
513 debug_object_init(addr, descr);
514 debug_object_activate(addr, descr);
515 } else {
516 debug_print_object(&o, "activate");
517 ret = debug_object_fixup(descr->fixup_activate, addr,
518 ODEBUG_STATE_NOTAVAILABLE);
519 return ret ? 0 : -EINVAL;
521 return 0;
523 EXPORT_SYMBOL_GPL(debug_object_activate);
526 * debug_object_deactivate - debug checks when an object is deactivated
527 * @addr: address of the object
528 * @descr: pointer to an object specific debug description structure
530 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
532 struct debug_bucket *db;
533 struct debug_obj *obj;
534 unsigned long flags;
536 if (!debug_objects_enabled)
537 return;
539 db = get_bucket((unsigned long) addr);
541 raw_spin_lock_irqsave(&db->lock, flags);
543 obj = lookup_object(addr, db);
544 if (obj) {
545 switch (obj->state) {
546 case ODEBUG_STATE_INIT:
547 case ODEBUG_STATE_INACTIVE:
548 case ODEBUG_STATE_ACTIVE:
549 if (!obj->astate)
550 obj->state = ODEBUG_STATE_INACTIVE;
551 else
552 debug_print_object(obj, "deactivate");
553 break;
555 case ODEBUG_STATE_DESTROYED:
556 debug_print_object(obj, "deactivate");
557 break;
558 default:
559 break;
561 } else {
562 struct debug_obj o = { .object = addr,
563 .state = ODEBUG_STATE_NOTAVAILABLE,
564 .descr = descr };
566 debug_print_object(&o, "deactivate");
569 raw_spin_unlock_irqrestore(&db->lock, flags);
571 EXPORT_SYMBOL_GPL(debug_object_deactivate);
574 * debug_object_destroy - debug checks when an object is destroyed
575 * @addr: address of the object
576 * @descr: pointer to an object specific debug description structure
578 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
580 enum debug_obj_state state;
581 struct debug_bucket *db;
582 struct debug_obj *obj;
583 unsigned long flags;
585 if (!debug_objects_enabled)
586 return;
588 db = get_bucket((unsigned long) addr);
590 raw_spin_lock_irqsave(&db->lock, flags);
592 obj = lookup_object(addr, db);
593 if (!obj)
594 goto out_unlock;
596 switch (obj->state) {
597 case ODEBUG_STATE_NONE:
598 case ODEBUG_STATE_INIT:
599 case ODEBUG_STATE_INACTIVE:
600 obj->state = ODEBUG_STATE_DESTROYED;
601 break;
602 case ODEBUG_STATE_ACTIVE:
603 debug_print_object(obj, "destroy");
604 state = obj->state;
605 raw_spin_unlock_irqrestore(&db->lock, flags);
606 debug_object_fixup(descr->fixup_destroy, addr, state);
607 return;
609 case ODEBUG_STATE_DESTROYED:
610 debug_print_object(obj, "destroy");
611 break;
612 default:
613 break;
615 out_unlock:
616 raw_spin_unlock_irqrestore(&db->lock, flags);
618 EXPORT_SYMBOL_GPL(debug_object_destroy);
621 * debug_object_free - debug checks when an object is freed
622 * @addr: address of the object
623 * @descr: pointer to an object specific debug description structure
625 void debug_object_free(void *addr, struct debug_obj_descr *descr)
627 enum debug_obj_state state;
628 struct debug_bucket *db;
629 struct debug_obj *obj;
630 unsigned long flags;
632 if (!debug_objects_enabled)
633 return;
635 db = get_bucket((unsigned long) addr);
637 raw_spin_lock_irqsave(&db->lock, flags);
639 obj = lookup_object(addr, db);
640 if (!obj)
641 goto out_unlock;
643 switch (obj->state) {
644 case ODEBUG_STATE_ACTIVE:
645 debug_print_object(obj, "free");
646 state = obj->state;
647 raw_spin_unlock_irqrestore(&db->lock, flags);
648 debug_object_fixup(descr->fixup_free, addr, state);
649 return;
650 default:
651 hlist_del(&obj->node);
652 raw_spin_unlock_irqrestore(&db->lock, flags);
653 free_object(obj);
654 return;
656 out_unlock:
657 raw_spin_unlock_irqrestore(&db->lock, flags);
659 EXPORT_SYMBOL_GPL(debug_object_free);
662 * debug_object_assert_init - debug checks when object should be init-ed
663 * @addr: address of the object
664 * @descr: pointer to an object specific debug description structure
666 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
668 struct debug_bucket *db;
669 struct debug_obj *obj;
670 unsigned long flags;
672 if (!debug_objects_enabled)
673 return;
675 db = get_bucket((unsigned long) addr);
677 raw_spin_lock_irqsave(&db->lock, flags);
679 obj = lookup_object(addr, db);
680 if (!obj) {
681 struct debug_obj o = { .object = addr,
682 .state = ODEBUG_STATE_NOTAVAILABLE,
683 .descr = descr };
685 raw_spin_unlock_irqrestore(&db->lock, flags);
687 * Maybe the object is static, and we let the type specific
688 * code confirm. Track this static object if true, else invoke
689 * fixup.
691 if (descr->is_static_object && descr->is_static_object(addr)) {
692 /* Track this static object */
693 debug_object_init(addr, descr);
694 } else {
695 debug_print_object(&o, "assert_init");
696 debug_object_fixup(descr->fixup_assert_init, addr,
697 ODEBUG_STATE_NOTAVAILABLE);
699 return;
702 raw_spin_unlock_irqrestore(&db->lock, flags);
704 EXPORT_SYMBOL_GPL(debug_object_assert_init);
707 * debug_object_active_state - debug checks object usage state machine
708 * @addr: address of the object
709 * @descr: pointer to an object specific debug description structure
710 * @expect: expected state
711 * @next: state to move to if expected state is found
713 void
714 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
715 unsigned int expect, unsigned int next)
717 struct debug_bucket *db;
718 struct debug_obj *obj;
719 unsigned long flags;
721 if (!debug_objects_enabled)
722 return;
724 db = get_bucket((unsigned long) addr);
726 raw_spin_lock_irqsave(&db->lock, flags);
728 obj = lookup_object(addr, db);
729 if (obj) {
730 switch (obj->state) {
731 case ODEBUG_STATE_ACTIVE:
732 if (obj->astate == expect)
733 obj->astate = next;
734 else
735 debug_print_object(obj, "active_state");
736 break;
738 default:
739 debug_print_object(obj, "active_state");
740 break;
742 } else {
743 struct debug_obj o = { .object = addr,
744 .state = ODEBUG_STATE_NOTAVAILABLE,
745 .descr = descr };
747 debug_print_object(&o, "active_state");
750 raw_spin_unlock_irqrestore(&db->lock, flags);
752 EXPORT_SYMBOL_GPL(debug_object_active_state);
754 #ifdef CONFIG_DEBUG_OBJECTS_FREE
755 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
757 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
758 struct debug_obj_descr *descr;
759 enum debug_obj_state state;
760 struct debug_bucket *db;
761 struct hlist_node *tmp;
762 struct debug_obj *obj;
763 int cnt, objs_checked = 0;
764 bool work = false;
766 saddr = (unsigned long) address;
767 eaddr = saddr + size;
768 paddr = saddr & ODEBUG_CHUNK_MASK;
769 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
770 chunks >>= ODEBUG_CHUNK_SHIFT;
772 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
773 db = get_bucket(paddr);
775 repeat:
776 cnt = 0;
777 raw_spin_lock_irqsave(&db->lock, flags);
778 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
779 cnt++;
780 oaddr = (unsigned long) obj->object;
781 if (oaddr < saddr || oaddr >= eaddr)
782 continue;
784 switch (obj->state) {
785 case ODEBUG_STATE_ACTIVE:
786 debug_print_object(obj, "free");
787 descr = obj->descr;
788 state = obj->state;
789 raw_spin_unlock_irqrestore(&db->lock, flags);
790 debug_object_fixup(descr->fixup_free,
791 (void *) oaddr, state);
792 goto repeat;
793 default:
794 hlist_del(&obj->node);
795 work |= __free_object(obj);
796 break;
799 raw_spin_unlock_irqrestore(&db->lock, flags);
801 if (cnt > debug_objects_maxchain)
802 debug_objects_maxchain = cnt;
804 objs_checked += cnt;
807 if (objs_checked > debug_objects_maxchecked)
808 debug_objects_maxchecked = objs_checked;
810 /* Schedule work to actually kmem_cache_free() objects */
811 if (work)
812 schedule_work(&debug_obj_work);
815 void debug_check_no_obj_freed(const void *address, unsigned long size)
817 if (debug_objects_enabled)
818 __debug_check_no_obj_freed(address, size);
820 #endif
822 #ifdef CONFIG_DEBUG_FS
824 static int debug_stats_show(struct seq_file *m, void *v)
826 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
827 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
828 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
829 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
830 seq_printf(m, "pool_free :%d\n", obj_pool_free);
831 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
832 seq_printf(m, "pool_used :%d\n", obj_pool_used);
833 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
834 seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
835 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
836 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
837 return 0;
840 static int debug_stats_open(struct inode *inode, struct file *filp)
842 return single_open(filp, debug_stats_show, NULL);
845 static const struct file_operations debug_stats_fops = {
846 .open = debug_stats_open,
847 .read = seq_read,
848 .llseek = seq_lseek,
849 .release = single_release,
852 static int __init debug_objects_init_debugfs(void)
854 struct dentry *dbgdir, *dbgstats;
856 if (!debug_objects_enabled)
857 return 0;
859 dbgdir = debugfs_create_dir("debug_objects", NULL);
860 if (!dbgdir)
861 return -ENOMEM;
863 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
864 &debug_stats_fops);
865 if (!dbgstats)
866 goto err;
868 return 0;
870 err:
871 debugfs_remove(dbgdir);
873 return -ENOMEM;
875 __initcall(debug_objects_init_debugfs);
877 #else
878 static inline void debug_objects_init_debugfs(void) { }
879 #endif
881 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
883 /* Random data structure for the self test */
884 struct self_test {
885 unsigned long dummy1[6];
886 int static_init;
887 unsigned long dummy2[3];
890 static __initdata struct debug_obj_descr descr_type_test;
892 static bool __init is_static_object(void *addr)
894 struct self_test *obj = addr;
896 return obj->static_init;
900 * fixup_init is called when:
901 * - an active object is initialized
903 static bool __init fixup_init(void *addr, enum debug_obj_state state)
905 struct self_test *obj = addr;
907 switch (state) {
908 case ODEBUG_STATE_ACTIVE:
909 debug_object_deactivate(obj, &descr_type_test);
910 debug_object_init(obj, &descr_type_test);
911 return true;
912 default:
913 return false;
918 * fixup_activate is called when:
919 * - an active object is activated
920 * - an unknown non-static object is activated
922 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
924 struct self_test *obj = addr;
926 switch (state) {
927 case ODEBUG_STATE_NOTAVAILABLE:
928 return true;
929 case ODEBUG_STATE_ACTIVE:
930 debug_object_deactivate(obj, &descr_type_test);
931 debug_object_activate(obj, &descr_type_test);
932 return true;
934 default:
935 return false;
940 * fixup_destroy is called when:
941 * - an active object is destroyed
943 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
945 struct self_test *obj = addr;
947 switch (state) {
948 case ODEBUG_STATE_ACTIVE:
949 debug_object_deactivate(obj, &descr_type_test);
950 debug_object_destroy(obj, &descr_type_test);
951 return true;
952 default:
953 return false;
958 * fixup_free is called when:
959 * - an active object is freed
961 static bool __init fixup_free(void *addr, enum debug_obj_state state)
963 struct self_test *obj = addr;
965 switch (state) {
966 case ODEBUG_STATE_ACTIVE:
967 debug_object_deactivate(obj, &descr_type_test);
968 debug_object_free(obj, &descr_type_test);
969 return true;
970 default:
971 return false;
975 static int __init
976 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
978 struct debug_bucket *db;
979 struct debug_obj *obj;
980 unsigned long flags;
981 int res = -EINVAL;
983 db = get_bucket((unsigned long) addr);
985 raw_spin_lock_irqsave(&db->lock, flags);
987 obj = lookup_object(addr, db);
988 if (!obj && state != ODEBUG_STATE_NONE) {
989 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
990 goto out;
992 if (obj && obj->state != state) {
993 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
994 obj->state, state);
995 goto out;
997 if (fixups != debug_objects_fixups) {
998 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
999 fixups, debug_objects_fixups);
1000 goto out;
1002 if (warnings != debug_objects_warnings) {
1003 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1004 warnings, debug_objects_warnings);
1005 goto out;
1007 res = 0;
1008 out:
1009 raw_spin_unlock_irqrestore(&db->lock, flags);
1010 if (res)
1011 debug_objects_enabled = 0;
1012 return res;
1015 static __initdata struct debug_obj_descr descr_type_test = {
1016 .name = "selftest",
1017 .is_static_object = is_static_object,
1018 .fixup_init = fixup_init,
1019 .fixup_activate = fixup_activate,
1020 .fixup_destroy = fixup_destroy,
1021 .fixup_free = fixup_free,
1024 static __initdata struct self_test obj = { .static_init = 0 };
1026 static void __init debug_objects_selftest(void)
1028 int fixups, oldfixups, warnings, oldwarnings;
1029 unsigned long flags;
1031 local_irq_save(flags);
1033 fixups = oldfixups = debug_objects_fixups;
1034 warnings = oldwarnings = debug_objects_warnings;
1035 descr_test = &descr_type_test;
1037 debug_object_init(&obj, &descr_type_test);
1038 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1039 goto out;
1040 debug_object_activate(&obj, &descr_type_test);
1041 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1042 goto out;
1043 debug_object_activate(&obj, &descr_type_test);
1044 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1045 goto out;
1046 debug_object_deactivate(&obj, &descr_type_test);
1047 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1048 goto out;
1049 debug_object_destroy(&obj, &descr_type_test);
1050 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1051 goto out;
1052 debug_object_init(&obj, &descr_type_test);
1053 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1054 goto out;
1055 debug_object_activate(&obj, &descr_type_test);
1056 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1057 goto out;
1058 debug_object_deactivate(&obj, &descr_type_test);
1059 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1060 goto out;
1061 debug_object_free(&obj, &descr_type_test);
1062 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1063 goto out;
1065 obj.static_init = 1;
1066 debug_object_activate(&obj, &descr_type_test);
1067 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1068 goto out;
1069 debug_object_init(&obj, &descr_type_test);
1070 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1071 goto out;
1072 debug_object_free(&obj, &descr_type_test);
1073 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1074 goto out;
1076 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1077 debug_object_init(&obj, &descr_type_test);
1078 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1079 goto out;
1080 debug_object_activate(&obj, &descr_type_test);
1081 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1082 goto out;
1083 __debug_check_no_obj_freed(&obj, sizeof(obj));
1084 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1085 goto out;
1086 #endif
1087 pr_info("selftest passed\n");
1089 out:
1090 debug_objects_fixups = oldfixups;
1091 debug_objects_warnings = oldwarnings;
1092 descr_test = NULL;
1094 local_irq_restore(flags);
1096 #else
1097 static inline void debug_objects_selftest(void) { }
1098 #endif
1101 * Called during early boot to initialize the hash buckets and link
1102 * the static object pool objects into the poll list. After this call
1103 * the object tracker is fully operational.
1105 void __init debug_objects_early_init(void)
1107 int i;
1109 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1110 raw_spin_lock_init(&obj_hash[i].lock);
1112 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1113 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1117 * Convert the statically allocated objects to dynamic ones:
1119 static int __init debug_objects_replace_static_objects(void)
1121 struct debug_bucket *db = obj_hash;
1122 struct hlist_node *tmp;
1123 struct debug_obj *obj, *new;
1124 HLIST_HEAD(objects);
1125 int i, cnt = 0;
1127 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1128 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1129 if (!obj)
1130 goto free;
1131 kmemleak_ignore(obj);
1132 hlist_add_head(&obj->node, &objects);
1136 * When debug_objects_mem_init() is called we know that only
1137 * one CPU is up, so disabling interrupts is enough
1138 * protection. This avoids the lockdep hell of lock ordering.
1140 local_irq_disable();
1142 /* Remove the statically allocated objects from the pool */
1143 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1144 hlist_del(&obj->node);
1145 /* Move the allocated objects to the pool */
1146 hlist_move_list(&objects, &obj_pool);
1148 /* Replace the active object references */
1149 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1150 hlist_move_list(&db->list, &objects);
1152 hlist_for_each_entry(obj, &objects, node) {
1153 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1154 hlist_del(&new->node);
1155 /* copy object data */
1156 *new = *obj;
1157 hlist_add_head(&new->node, &db->list);
1158 cnt++;
1161 local_irq_enable();
1163 pr_debug("%d of %d active objects replaced\n",
1164 cnt, obj_pool_used);
1165 return 0;
1166 free:
1167 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1168 hlist_del(&obj->node);
1169 kmem_cache_free(obj_cache, obj);
1171 return -ENOMEM;
1175 * Called after the kmem_caches are functional to setup a dedicated
1176 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1177 * prevents that the debug code is called on kmem_cache_free() for the
1178 * debug tracker objects to avoid recursive calls.
1180 void __init debug_objects_mem_init(void)
1182 if (!debug_objects_enabled)
1183 return;
1185 obj_cache = kmem_cache_create("debug_objects_cache",
1186 sizeof (struct debug_obj), 0,
1187 SLAB_DEBUG_OBJECTS, NULL);
1189 if (!obj_cache || debug_objects_replace_static_objects()) {
1190 debug_objects_enabled = 0;
1191 kmem_cache_destroy(obj_cache);
1192 pr_warn("out of memory.\n");
1193 } else
1194 debug_objects_selftest();
1197 * Increase the thresholds for allocating and freeing objects
1198 * according to the number of possible CPUs available in the system.
1200 debug_objects_pool_size += num_possible_cpus() * 32;
1201 debug_objects_pool_min_level += num_possible_cpus() * 4;