2 * Generic infrastructure for lifetime debugging of objects.
4 * Started by Thomas Gleixner
6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
8 * For licencing details see kernel-base/COPYING
11 #define pr_fmt(fmt) "ODEBUG: " fmt
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
23 #define ODEBUG_HASH_BITS 14
24 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
26 #define ODEBUG_POOL_SIZE 1024
27 #define ODEBUG_POOL_MIN_LEVEL 256
29 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
34 struct hlist_head list
;
38 static struct debug_bucket obj_hash
[ODEBUG_HASH_SIZE
];
40 static struct debug_obj obj_static_pool
[ODEBUG_POOL_SIZE
] __initdata
;
42 static DEFINE_RAW_SPINLOCK(pool_lock
);
44 static HLIST_HEAD(obj_pool
);
45 static HLIST_HEAD(obj_to_free
);
47 static int obj_pool_min_free
= ODEBUG_POOL_SIZE
;
48 static int obj_pool_free
= ODEBUG_POOL_SIZE
;
49 static int obj_pool_used
;
50 static int obj_pool_max_used
;
51 /* The number of objs on the global free list */
52 static int obj_nr_tofree
;
53 static struct kmem_cache
*obj_cache
;
55 static int debug_objects_maxchain __read_mostly
;
56 static int __maybe_unused debug_objects_maxchecked __read_mostly
;
57 static int debug_objects_fixups __read_mostly
;
58 static int debug_objects_warnings __read_mostly
;
59 static int debug_objects_enabled __read_mostly
60 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT
;
61 static int debug_objects_pool_size __read_mostly
63 static int debug_objects_pool_min_level __read_mostly
64 = ODEBUG_POOL_MIN_LEVEL
;
65 static struct debug_obj_descr
*descr_test __read_mostly
;
68 * Track numbers of kmem_cache_alloc()/free() calls done.
70 static int debug_objects_allocated
;
71 static int debug_objects_freed
;
73 static void free_obj_work(struct work_struct
*work
);
74 static DECLARE_WORK(debug_obj_work
, free_obj_work
);
76 static int __init
enable_object_debug(char *str
)
78 debug_objects_enabled
= 1;
82 static int __init
disable_object_debug(char *str
)
84 debug_objects_enabled
= 0;
88 early_param("debug_objects", enable_object_debug
);
89 early_param("no_debug_objects", disable_object_debug
);
91 static const char *obj_states
[ODEBUG_STATE_MAX
] = {
92 [ODEBUG_STATE_NONE
] = "none",
93 [ODEBUG_STATE_INIT
] = "initialized",
94 [ODEBUG_STATE_INACTIVE
] = "inactive",
95 [ODEBUG_STATE_ACTIVE
] = "active",
96 [ODEBUG_STATE_DESTROYED
] = "destroyed",
97 [ODEBUG_STATE_NOTAVAILABLE
] = "not available",
100 static void fill_pool(void)
102 gfp_t gfp
= GFP_ATOMIC
| __GFP_NORETRY
| __GFP_NOWARN
;
103 struct debug_obj
*new, *obj
;
106 if (likely(obj_pool_free
>= debug_objects_pool_min_level
))
110 * Reuse objs from the global free list; they will be reinitialized
113 while (obj_nr_tofree
&& (obj_pool_free
< obj_pool_min_free
)) {
114 raw_spin_lock_irqsave(&pool_lock
, flags
);
116 * Recheck with the lock held as the worker thread might have
117 * won the race and freed the global free list already.
120 obj
= hlist_entry(obj_to_free
.first
, typeof(*obj
), node
);
121 hlist_del(&obj
->node
);
123 hlist_add_head(&obj
->node
, &obj_pool
);
126 raw_spin_unlock_irqrestore(&pool_lock
, flags
);
129 if (unlikely(!obj_cache
))
132 while (obj_pool_free
< debug_objects_pool_min_level
) {
134 new = kmem_cache_zalloc(obj_cache
, gfp
);
138 raw_spin_lock_irqsave(&pool_lock
, flags
);
139 hlist_add_head(&new->node
, &obj_pool
);
140 debug_objects_allocated
++;
142 raw_spin_unlock_irqrestore(&pool_lock
, flags
);
147 * Lookup an object in the hash bucket.
149 static struct debug_obj
*lookup_object(void *addr
, struct debug_bucket
*b
)
151 struct debug_obj
*obj
;
154 hlist_for_each_entry(obj
, &b
->list
, node
) {
156 if (obj
->object
== addr
)
159 if (cnt
> debug_objects_maxchain
)
160 debug_objects_maxchain
= cnt
;
166 * Allocate a new object. If the pool is empty, switch off the debugger.
167 * Must be called with interrupts disabled.
169 static struct debug_obj
*
170 alloc_object(void *addr
, struct debug_bucket
*b
, struct debug_obj_descr
*descr
)
172 struct debug_obj
*obj
= NULL
;
174 raw_spin_lock(&pool_lock
);
175 if (obj_pool
.first
) {
176 obj
= hlist_entry(obj_pool
.first
, typeof(*obj
), node
);
180 obj
->state
= ODEBUG_STATE_NONE
;
182 hlist_del(&obj
->node
);
184 hlist_add_head(&obj
->node
, &b
->list
);
187 if (obj_pool_used
> obj_pool_max_used
)
188 obj_pool_max_used
= obj_pool_used
;
191 if (obj_pool_free
< obj_pool_min_free
)
192 obj_pool_min_free
= obj_pool_free
;
194 raw_spin_unlock(&pool_lock
);
200 * workqueue function to free objects.
202 * To reduce contention on the global pool_lock, the actual freeing of
203 * debug objects will be delayed if the pool_lock is busy.
205 static void free_obj_work(struct work_struct
*work
)
207 struct hlist_node
*tmp
;
208 struct debug_obj
*obj
;
212 if (!raw_spin_trylock_irqsave(&pool_lock
, flags
))
216 * The objs on the pool list might be allocated before the work is
217 * run, so recheck if pool list it full or not, if not fill pool
218 * list from the global free list
220 while (obj_nr_tofree
&& obj_pool_free
< debug_objects_pool_size
) {
221 obj
= hlist_entry(obj_to_free
.first
, typeof(*obj
), node
);
222 hlist_del(&obj
->node
);
223 hlist_add_head(&obj
->node
, &obj_pool
);
229 * Pool list is already full and there are still objs on the free
230 * list. Move remaining free objs to a temporary list to free the
231 * memory outside the pool_lock held region.
234 hlist_move_list(&obj_to_free
, &tofree
);
235 debug_objects_freed
+= obj_nr_tofree
;
238 raw_spin_unlock_irqrestore(&pool_lock
, flags
);
240 hlist_for_each_entry_safe(obj
, tmp
, &tofree
, node
) {
241 hlist_del(&obj
->node
);
242 kmem_cache_free(obj_cache
, obj
);
246 static bool __free_object(struct debug_obj
*obj
)
251 raw_spin_lock_irqsave(&pool_lock
, flags
);
252 work
= (obj_pool_free
> debug_objects_pool_size
) && obj_cache
;
257 hlist_add_head(&obj
->node
, &obj_to_free
);
260 hlist_add_head(&obj
->node
, &obj_pool
);
262 raw_spin_unlock_irqrestore(&pool_lock
, flags
);
267 * Put the object back into the pool and schedule work to free objects
270 static void free_object(struct debug_obj
*obj
)
272 if (__free_object(obj
))
273 schedule_work(&debug_obj_work
);
277 * We run out of memory. That means we probably have tons of objects
280 static void debug_objects_oom(void)
282 struct debug_bucket
*db
= obj_hash
;
283 struct hlist_node
*tmp
;
284 HLIST_HEAD(freelist
);
285 struct debug_obj
*obj
;
289 pr_warn("Out of memory. ODEBUG disabled\n");
291 for (i
= 0; i
< ODEBUG_HASH_SIZE
; i
++, db
++) {
292 raw_spin_lock_irqsave(&db
->lock
, flags
);
293 hlist_move_list(&db
->list
, &freelist
);
294 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
297 hlist_for_each_entry_safe(obj
, tmp
, &freelist
, node
) {
298 hlist_del(&obj
->node
);
305 * We use the pfn of the address for the hash. That way we can check
306 * for freed objects simply by checking the affected bucket.
308 static struct debug_bucket
*get_bucket(unsigned long addr
)
312 hash
= hash_long((addr
>> ODEBUG_CHUNK_SHIFT
), ODEBUG_HASH_BITS
);
313 return &obj_hash
[hash
];
316 static void debug_print_object(struct debug_obj
*obj
, char *msg
)
318 struct debug_obj_descr
*descr
= obj
->descr
;
321 if (limit
< 5 && descr
!= descr_test
) {
322 void *hint
= descr
->debug_hint
?
323 descr
->debug_hint(obj
->object
) : NULL
;
325 WARN(1, KERN_ERR
"ODEBUG: %s %s (active state %u) "
326 "object type: %s hint: %pS\n",
327 msg
, obj_states
[obj
->state
], obj
->astate
,
330 debug_objects_warnings
++;
334 * Try to repair the damage, so we have a better chance to get useful
338 debug_object_fixup(bool (*fixup
)(void *addr
, enum debug_obj_state state
),
339 void * addr
, enum debug_obj_state state
)
341 if (fixup
&& fixup(addr
, state
)) {
342 debug_objects_fixups
++;
348 static void debug_object_is_on_stack(void *addr
, int onstack
)
356 is_on_stack
= object_is_on_stack(addr
);
357 if (is_on_stack
== onstack
)
362 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr
,
363 task_stack_page(current
));
365 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr
,
366 task_stack_page(current
));
372 __debug_object_init(void *addr
, struct debug_obj_descr
*descr
, int onstack
)
374 enum debug_obj_state state
;
375 struct debug_bucket
*db
;
376 struct debug_obj
*obj
;
381 db
= get_bucket((unsigned long) addr
);
383 raw_spin_lock_irqsave(&db
->lock
, flags
);
385 obj
= lookup_object(addr
, db
);
387 obj
= alloc_object(addr
, db
, descr
);
389 debug_objects_enabled
= 0;
390 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
394 debug_object_is_on_stack(addr
, onstack
);
397 switch (obj
->state
) {
398 case ODEBUG_STATE_NONE
:
399 case ODEBUG_STATE_INIT
:
400 case ODEBUG_STATE_INACTIVE
:
401 obj
->state
= ODEBUG_STATE_INIT
;
404 case ODEBUG_STATE_ACTIVE
:
405 debug_print_object(obj
, "init");
407 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
408 debug_object_fixup(descr
->fixup_init
, addr
, state
);
411 case ODEBUG_STATE_DESTROYED
:
412 debug_print_object(obj
, "init");
418 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
422 * debug_object_init - debug checks when an object is initialized
423 * @addr: address of the object
424 * @descr: pointer to an object specific debug description structure
426 void debug_object_init(void *addr
, struct debug_obj_descr
*descr
)
428 if (!debug_objects_enabled
)
431 __debug_object_init(addr
, descr
, 0);
433 EXPORT_SYMBOL_GPL(debug_object_init
);
436 * debug_object_init_on_stack - debug checks when an object on stack is
438 * @addr: address of the object
439 * @descr: pointer to an object specific debug description structure
441 void debug_object_init_on_stack(void *addr
, struct debug_obj_descr
*descr
)
443 if (!debug_objects_enabled
)
446 __debug_object_init(addr
, descr
, 1);
448 EXPORT_SYMBOL_GPL(debug_object_init_on_stack
);
451 * debug_object_activate - debug checks when an object is activated
452 * @addr: address of the object
453 * @descr: pointer to an object specific debug description structure
454 * Returns 0 for success, -EINVAL for check failed.
456 int debug_object_activate(void *addr
, struct debug_obj_descr
*descr
)
458 enum debug_obj_state state
;
459 struct debug_bucket
*db
;
460 struct debug_obj
*obj
;
463 struct debug_obj o
= { .object
= addr
,
464 .state
= ODEBUG_STATE_NOTAVAILABLE
,
467 if (!debug_objects_enabled
)
470 db
= get_bucket((unsigned long) addr
);
472 raw_spin_lock_irqsave(&db
->lock
, flags
);
474 obj
= lookup_object(addr
, db
);
476 switch (obj
->state
) {
477 case ODEBUG_STATE_INIT
:
478 case ODEBUG_STATE_INACTIVE
:
479 obj
->state
= ODEBUG_STATE_ACTIVE
;
483 case ODEBUG_STATE_ACTIVE
:
484 debug_print_object(obj
, "activate");
486 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
487 ret
= debug_object_fixup(descr
->fixup_activate
, addr
, state
);
488 return ret
? 0 : -EINVAL
;
490 case ODEBUG_STATE_DESTROYED
:
491 debug_print_object(obj
, "activate");
498 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
502 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
504 * We are here when a static object is activated. We
505 * let the type specific code confirm whether this is
506 * true or not. if true, we just make sure that the
507 * static object is tracked in the object tracker. If
508 * not, this must be a bug, so we try to fix it up.
510 if (descr
->is_static_object
&& descr
->is_static_object(addr
)) {
511 /* track this static object */
512 debug_object_init(addr
, descr
);
513 debug_object_activate(addr
, descr
);
515 debug_print_object(&o
, "activate");
516 ret
= debug_object_fixup(descr
->fixup_activate
, addr
,
517 ODEBUG_STATE_NOTAVAILABLE
);
518 return ret
? 0 : -EINVAL
;
522 EXPORT_SYMBOL_GPL(debug_object_activate
);
525 * debug_object_deactivate - debug checks when an object is deactivated
526 * @addr: address of the object
527 * @descr: pointer to an object specific debug description structure
529 void debug_object_deactivate(void *addr
, struct debug_obj_descr
*descr
)
531 struct debug_bucket
*db
;
532 struct debug_obj
*obj
;
535 if (!debug_objects_enabled
)
538 db
= get_bucket((unsigned long) addr
);
540 raw_spin_lock_irqsave(&db
->lock
, flags
);
542 obj
= lookup_object(addr
, db
);
544 switch (obj
->state
) {
545 case ODEBUG_STATE_INIT
:
546 case ODEBUG_STATE_INACTIVE
:
547 case ODEBUG_STATE_ACTIVE
:
549 obj
->state
= ODEBUG_STATE_INACTIVE
;
551 debug_print_object(obj
, "deactivate");
554 case ODEBUG_STATE_DESTROYED
:
555 debug_print_object(obj
, "deactivate");
561 struct debug_obj o
= { .object
= addr
,
562 .state
= ODEBUG_STATE_NOTAVAILABLE
,
565 debug_print_object(&o
, "deactivate");
568 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
570 EXPORT_SYMBOL_GPL(debug_object_deactivate
);
573 * debug_object_destroy - debug checks when an object is destroyed
574 * @addr: address of the object
575 * @descr: pointer to an object specific debug description structure
577 void debug_object_destroy(void *addr
, struct debug_obj_descr
*descr
)
579 enum debug_obj_state state
;
580 struct debug_bucket
*db
;
581 struct debug_obj
*obj
;
584 if (!debug_objects_enabled
)
587 db
= get_bucket((unsigned long) addr
);
589 raw_spin_lock_irqsave(&db
->lock
, flags
);
591 obj
= lookup_object(addr
, db
);
595 switch (obj
->state
) {
596 case ODEBUG_STATE_NONE
:
597 case ODEBUG_STATE_INIT
:
598 case ODEBUG_STATE_INACTIVE
:
599 obj
->state
= ODEBUG_STATE_DESTROYED
;
601 case ODEBUG_STATE_ACTIVE
:
602 debug_print_object(obj
, "destroy");
604 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
605 debug_object_fixup(descr
->fixup_destroy
, addr
, state
);
608 case ODEBUG_STATE_DESTROYED
:
609 debug_print_object(obj
, "destroy");
615 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
617 EXPORT_SYMBOL_GPL(debug_object_destroy
);
620 * debug_object_free - debug checks when an object is freed
621 * @addr: address of the object
622 * @descr: pointer to an object specific debug description structure
624 void debug_object_free(void *addr
, struct debug_obj_descr
*descr
)
626 enum debug_obj_state state
;
627 struct debug_bucket
*db
;
628 struct debug_obj
*obj
;
631 if (!debug_objects_enabled
)
634 db
= get_bucket((unsigned long) addr
);
636 raw_spin_lock_irqsave(&db
->lock
, flags
);
638 obj
= lookup_object(addr
, db
);
642 switch (obj
->state
) {
643 case ODEBUG_STATE_ACTIVE
:
644 debug_print_object(obj
, "free");
646 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
647 debug_object_fixup(descr
->fixup_free
, addr
, state
);
650 hlist_del(&obj
->node
);
651 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
656 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
658 EXPORT_SYMBOL_GPL(debug_object_free
);
661 * debug_object_assert_init - debug checks when object should be init-ed
662 * @addr: address of the object
663 * @descr: pointer to an object specific debug description structure
665 void debug_object_assert_init(void *addr
, struct debug_obj_descr
*descr
)
667 struct debug_bucket
*db
;
668 struct debug_obj
*obj
;
671 if (!debug_objects_enabled
)
674 db
= get_bucket((unsigned long) addr
);
676 raw_spin_lock_irqsave(&db
->lock
, flags
);
678 obj
= lookup_object(addr
, db
);
680 struct debug_obj o
= { .object
= addr
,
681 .state
= ODEBUG_STATE_NOTAVAILABLE
,
684 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
686 * Maybe the object is static, and we let the type specific
687 * code confirm. Track this static object if true, else invoke
690 if (descr
->is_static_object
&& descr
->is_static_object(addr
)) {
691 /* Track this static object */
692 debug_object_init(addr
, descr
);
694 debug_print_object(&o
, "assert_init");
695 debug_object_fixup(descr
->fixup_assert_init
, addr
,
696 ODEBUG_STATE_NOTAVAILABLE
);
701 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
703 EXPORT_SYMBOL_GPL(debug_object_assert_init
);
706 * debug_object_active_state - debug checks object usage state machine
707 * @addr: address of the object
708 * @descr: pointer to an object specific debug description structure
709 * @expect: expected state
710 * @next: state to move to if expected state is found
713 debug_object_active_state(void *addr
, struct debug_obj_descr
*descr
,
714 unsigned int expect
, unsigned int next
)
716 struct debug_bucket
*db
;
717 struct debug_obj
*obj
;
720 if (!debug_objects_enabled
)
723 db
= get_bucket((unsigned long) addr
);
725 raw_spin_lock_irqsave(&db
->lock
, flags
);
727 obj
= lookup_object(addr
, db
);
729 switch (obj
->state
) {
730 case ODEBUG_STATE_ACTIVE
:
731 if (obj
->astate
== expect
)
734 debug_print_object(obj
, "active_state");
738 debug_print_object(obj
, "active_state");
742 struct debug_obj o
= { .object
= addr
,
743 .state
= ODEBUG_STATE_NOTAVAILABLE
,
746 debug_print_object(&o
, "active_state");
749 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
751 EXPORT_SYMBOL_GPL(debug_object_active_state
);
753 #ifdef CONFIG_DEBUG_OBJECTS_FREE
754 static void __debug_check_no_obj_freed(const void *address
, unsigned long size
)
756 unsigned long flags
, oaddr
, saddr
, eaddr
, paddr
, chunks
;
757 struct debug_obj_descr
*descr
;
758 enum debug_obj_state state
;
759 struct debug_bucket
*db
;
760 struct hlist_node
*tmp
;
761 struct debug_obj
*obj
;
762 int cnt
, objs_checked
= 0;
765 saddr
= (unsigned long) address
;
766 eaddr
= saddr
+ size
;
767 paddr
= saddr
& ODEBUG_CHUNK_MASK
;
768 chunks
= ((eaddr
- paddr
) + (ODEBUG_CHUNK_SIZE
- 1));
769 chunks
>>= ODEBUG_CHUNK_SHIFT
;
771 for (;chunks
> 0; chunks
--, paddr
+= ODEBUG_CHUNK_SIZE
) {
772 db
= get_bucket(paddr
);
776 raw_spin_lock_irqsave(&db
->lock
, flags
);
777 hlist_for_each_entry_safe(obj
, tmp
, &db
->list
, node
) {
779 oaddr
= (unsigned long) obj
->object
;
780 if (oaddr
< saddr
|| oaddr
>= eaddr
)
783 switch (obj
->state
) {
784 case ODEBUG_STATE_ACTIVE
:
785 debug_print_object(obj
, "free");
788 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
789 debug_object_fixup(descr
->fixup_free
,
790 (void *) oaddr
, state
);
793 hlist_del(&obj
->node
);
794 work
|= __free_object(obj
);
798 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
800 if (cnt
> debug_objects_maxchain
)
801 debug_objects_maxchain
= cnt
;
806 if (objs_checked
> debug_objects_maxchecked
)
807 debug_objects_maxchecked
= objs_checked
;
809 /* Schedule work to actually kmem_cache_free() objects */
811 schedule_work(&debug_obj_work
);
814 void debug_check_no_obj_freed(const void *address
, unsigned long size
)
816 if (debug_objects_enabled
)
817 __debug_check_no_obj_freed(address
, size
);
821 #ifdef CONFIG_DEBUG_FS
823 static int debug_stats_show(struct seq_file
*m
, void *v
)
825 seq_printf(m
, "max_chain :%d\n", debug_objects_maxchain
);
826 seq_printf(m
, "max_checked :%d\n", debug_objects_maxchecked
);
827 seq_printf(m
, "warnings :%d\n", debug_objects_warnings
);
828 seq_printf(m
, "fixups :%d\n", debug_objects_fixups
);
829 seq_printf(m
, "pool_free :%d\n", obj_pool_free
);
830 seq_printf(m
, "pool_min_free :%d\n", obj_pool_min_free
);
831 seq_printf(m
, "pool_used :%d\n", obj_pool_used
);
832 seq_printf(m
, "pool_max_used :%d\n", obj_pool_max_used
);
833 seq_printf(m
, "on_free_list :%d\n", obj_nr_tofree
);
834 seq_printf(m
, "objs_allocated:%d\n", debug_objects_allocated
);
835 seq_printf(m
, "objs_freed :%d\n", debug_objects_freed
);
839 static int debug_stats_open(struct inode
*inode
, struct file
*filp
)
841 return single_open(filp
, debug_stats_show
, NULL
);
844 static const struct file_operations debug_stats_fops
= {
845 .open
= debug_stats_open
,
848 .release
= single_release
,
851 static int __init
debug_objects_init_debugfs(void)
853 struct dentry
*dbgdir
, *dbgstats
;
855 if (!debug_objects_enabled
)
858 dbgdir
= debugfs_create_dir("debug_objects", NULL
);
862 dbgstats
= debugfs_create_file("stats", 0444, dbgdir
, NULL
,
870 debugfs_remove(dbgdir
);
874 __initcall(debug_objects_init_debugfs
);
877 static inline void debug_objects_init_debugfs(void) { }
880 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
882 /* Random data structure for the self test */
884 unsigned long dummy1
[6];
886 unsigned long dummy2
[3];
889 static __initdata
struct debug_obj_descr descr_type_test
;
891 static bool __init
is_static_object(void *addr
)
893 struct self_test
*obj
= addr
;
895 return obj
->static_init
;
899 * fixup_init is called when:
900 * - an active object is initialized
902 static bool __init
fixup_init(void *addr
, enum debug_obj_state state
)
904 struct self_test
*obj
= addr
;
907 case ODEBUG_STATE_ACTIVE
:
908 debug_object_deactivate(obj
, &descr_type_test
);
909 debug_object_init(obj
, &descr_type_test
);
917 * fixup_activate is called when:
918 * - an active object is activated
919 * - an unknown non-static object is activated
921 static bool __init
fixup_activate(void *addr
, enum debug_obj_state state
)
923 struct self_test
*obj
= addr
;
926 case ODEBUG_STATE_NOTAVAILABLE
:
928 case ODEBUG_STATE_ACTIVE
:
929 debug_object_deactivate(obj
, &descr_type_test
);
930 debug_object_activate(obj
, &descr_type_test
);
939 * fixup_destroy is called when:
940 * - an active object is destroyed
942 static bool __init
fixup_destroy(void *addr
, enum debug_obj_state state
)
944 struct self_test
*obj
= addr
;
947 case ODEBUG_STATE_ACTIVE
:
948 debug_object_deactivate(obj
, &descr_type_test
);
949 debug_object_destroy(obj
, &descr_type_test
);
957 * fixup_free is called when:
958 * - an active object is freed
960 static bool __init
fixup_free(void *addr
, enum debug_obj_state state
)
962 struct self_test
*obj
= addr
;
965 case ODEBUG_STATE_ACTIVE
:
966 debug_object_deactivate(obj
, &descr_type_test
);
967 debug_object_free(obj
, &descr_type_test
);
975 check_results(void *addr
, enum debug_obj_state state
, int fixups
, int warnings
)
977 struct debug_bucket
*db
;
978 struct debug_obj
*obj
;
982 db
= get_bucket((unsigned long) addr
);
984 raw_spin_lock_irqsave(&db
->lock
, flags
);
986 obj
= lookup_object(addr
, db
);
987 if (!obj
&& state
!= ODEBUG_STATE_NONE
) {
988 WARN(1, KERN_ERR
"ODEBUG: selftest object not found\n");
991 if (obj
&& obj
->state
!= state
) {
992 WARN(1, KERN_ERR
"ODEBUG: selftest wrong state: %d != %d\n",
996 if (fixups
!= debug_objects_fixups
) {
997 WARN(1, KERN_ERR
"ODEBUG: selftest fixups failed %d != %d\n",
998 fixups
, debug_objects_fixups
);
1001 if (warnings
!= debug_objects_warnings
) {
1002 WARN(1, KERN_ERR
"ODEBUG: selftest warnings failed %d != %d\n",
1003 warnings
, debug_objects_warnings
);
1008 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
1010 debug_objects_enabled
= 0;
1014 static __initdata
struct debug_obj_descr descr_type_test
= {
1016 .is_static_object
= is_static_object
,
1017 .fixup_init
= fixup_init
,
1018 .fixup_activate
= fixup_activate
,
1019 .fixup_destroy
= fixup_destroy
,
1020 .fixup_free
= fixup_free
,
1023 static __initdata
struct self_test obj
= { .static_init
= 0 };
1025 static void __init
debug_objects_selftest(void)
1027 int fixups
, oldfixups
, warnings
, oldwarnings
;
1028 unsigned long flags
;
1030 local_irq_save(flags
);
1032 fixups
= oldfixups
= debug_objects_fixups
;
1033 warnings
= oldwarnings
= debug_objects_warnings
;
1034 descr_test
= &descr_type_test
;
1036 debug_object_init(&obj
, &descr_type_test
);
1037 if (check_results(&obj
, ODEBUG_STATE_INIT
, fixups
, warnings
))
1039 debug_object_activate(&obj
, &descr_type_test
);
1040 if (check_results(&obj
, ODEBUG_STATE_ACTIVE
, fixups
, warnings
))
1042 debug_object_activate(&obj
, &descr_type_test
);
1043 if (check_results(&obj
, ODEBUG_STATE_ACTIVE
, ++fixups
, ++warnings
))
1045 debug_object_deactivate(&obj
, &descr_type_test
);
1046 if (check_results(&obj
, ODEBUG_STATE_INACTIVE
, fixups
, warnings
))
1048 debug_object_destroy(&obj
, &descr_type_test
);
1049 if (check_results(&obj
, ODEBUG_STATE_DESTROYED
, fixups
, warnings
))
1051 debug_object_init(&obj
, &descr_type_test
);
1052 if (check_results(&obj
, ODEBUG_STATE_DESTROYED
, fixups
, ++warnings
))
1054 debug_object_activate(&obj
, &descr_type_test
);
1055 if (check_results(&obj
, ODEBUG_STATE_DESTROYED
, fixups
, ++warnings
))
1057 debug_object_deactivate(&obj
, &descr_type_test
);
1058 if (check_results(&obj
, ODEBUG_STATE_DESTROYED
, fixups
, ++warnings
))
1060 debug_object_free(&obj
, &descr_type_test
);
1061 if (check_results(&obj
, ODEBUG_STATE_NONE
, fixups
, warnings
))
1064 obj
.static_init
= 1;
1065 debug_object_activate(&obj
, &descr_type_test
);
1066 if (check_results(&obj
, ODEBUG_STATE_ACTIVE
, fixups
, warnings
))
1068 debug_object_init(&obj
, &descr_type_test
);
1069 if (check_results(&obj
, ODEBUG_STATE_INIT
, ++fixups
, ++warnings
))
1071 debug_object_free(&obj
, &descr_type_test
);
1072 if (check_results(&obj
, ODEBUG_STATE_NONE
, fixups
, warnings
))
1075 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1076 debug_object_init(&obj
, &descr_type_test
);
1077 if (check_results(&obj
, ODEBUG_STATE_INIT
, fixups
, warnings
))
1079 debug_object_activate(&obj
, &descr_type_test
);
1080 if (check_results(&obj
, ODEBUG_STATE_ACTIVE
, fixups
, warnings
))
1082 __debug_check_no_obj_freed(&obj
, sizeof(obj
));
1083 if (check_results(&obj
, ODEBUG_STATE_NONE
, ++fixups
, ++warnings
))
1086 pr_info("selftest passed\n");
1089 debug_objects_fixups
= oldfixups
;
1090 debug_objects_warnings
= oldwarnings
;
1093 local_irq_restore(flags
);
1096 static inline void debug_objects_selftest(void) { }
1100 * Called during early boot to initialize the hash buckets and link
1101 * the static object pool objects into the poll list. After this call
1102 * the object tracker is fully operational.
1104 void __init
debug_objects_early_init(void)
1108 for (i
= 0; i
< ODEBUG_HASH_SIZE
; i
++)
1109 raw_spin_lock_init(&obj_hash
[i
].lock
);
1111 for (i
= 0; i
< ODEBUG_POOL_SIZE
; i
++)
1112 hlist_add_head(&obj_static_pool
[i
].node
, &obj_pool
);
1116 * Convert the statically allocated objects to dynamic ones:
1118 static int __init
debug_objects_replace_static_objects(void)
1120 struct debug_bucket
*db
= obj_hash
;
1121 struct hlist_node
*tmp
;
1122 struct debug_obj
*obj
, *new;
1123 HLIST_HEAD(objects
);
1126 for (i
= 0; i
< ODEBUG_POOL_SIZE
; i
++) {
1127 obj
= kmem_cache_zalloc(obj_cache
, GFP_KERNEL
);
1130 hlist_add_head(&obj
->node
, &objects
);
1134 * debug_objects_mem_init() is now called early that only one CPU is up
1135 * and interrupts have been disabled, so it is safe to replace the
1136 * active object references.
1139 /* Remove the statically allocated objects from the pool */
1140 hlist_for_each_entry_safe(obj
, tmp
, &obj_pool
, node
)
1141 hlist_del(&obj
->node
);
1142 /* Move the allocated objects to the pool */
1143 hlist_move_list(&objects
, &obj_pool
);
1145 /* Replace the active object references */
1146 for (i
= 0; i
< ODEBUG_HASH_SIZE
; i
++, db
++) {
1147 hlist_move_list(&db
->list
, &objects
);
1149 hlist_for_each_entry(obj
, &objects
, node
) {
1150 new = hlist_entry(obj_pool
.first
, typeof(*obj
), node
);
1151 hlist_del(&new->node
);
1152 /* copy object data */
1154 hlist_add_head(&new->node
, &db
->list
);
1159 pr_debug("%d of %d active objects replaced\n",
1160 cnt
, obj_pool_used
);
1163 hlist_for_each_entry_safe(obj
, tmp
, &objects
, node
) {
1164 hlist_del(&obj
->node
);
1165 kmem_cache_free(obj_cache
, obj
);
1171 * Called after the kmem_caches are functional to setup a dedicated
1172 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1173 * prevents that the debug code is called on kmem_cache_free() for the
1174 * debug tracker objects to avoid recursive calls.
1176 void __init
debug_objects_mem_init(void)
1178 if (!debug_objects_enabled
)
1181 obj_cache
= kmem_cache_create("debug_objects_cache",
1182 sizeof (struct debug_obj
), 0,
1183 SLAB_DEBUG_OBJECTS
| SLAB_NOLEAKTRACE
,
1186 if (!obj_cache
|| debug_objects_replace_static_objects()) {
1187 debug_objects_enabled
= 0;
1188 kmem_cache_destroy(obj_cache
);
1189 pr_warn("out of memory.\n");
1191 debug_objects_selftest();
1194 * Increase the thresholds for allocating and freeing objects
1195 * according to the number of possible CPUs available in the system.
1197 debug_objects_pool_size
+= num_possible_cpus() * 32;
1198 debug_objects_pool_min_level
+= num_possible_cpus() * 4;