1 // SPDX-License-Identifier: GPL-2.0
3 * Generic infrastructure for lifetime debugging of objects.
5 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
8 #define pr_fmt(fmt) "ODEBUG: " fmt
10 #include <linux/cpu.h>
11 #include <linux/debugobjects.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/kmemleak.h>
15 #include <linux/sched.h>
16 #include <linux/sched/loadavg.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/static_key.h>
22 #define ODEBUG_HASH_BITS 14
23 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
25 /* Must be power of two */
26 #define ODEBUG_BATCH_SIZE 16
28 /* Initial values. Must all be a multiple of batch size */
29 #define ODEBUG_POOL_SIZE (64 * ODEBUG_BATCH_SIZE)
30 #define ODEBUG_POOL_MIN_LEVEL (ODEBUG_POOL_SIZE / 4)
32 #define ODEBUG_POOL_PERCPU_SIZE (8 * ODEBUG_BATCH_SIZE)
34 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
35 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
36 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
39 * We limit the freeing of debug objects via workqueue at a maximum
40 * frequency of 10Hz and about 1024 objects for each freeing operation.
41 * So it is freeing at most 10k debug objects per second.
43 #define ODEBUG_FREE_WORK_MAX (1024 / ODEBUG_BATCH_SIZE)
44 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
47 struct hlist_head list
;
52 unsigned int cur_used
;
53 unsigned int max_used
;
54 unsigned int min_fill
;
58 struct hlist_head objects
;
62 struct pool_stats stats
;
63 } ____cacheline_aligned
;
66 static DEFINE_PER_CPU_ALIGNED(struct obj_pool
, pool_pcpu
) = {
67 .max_cnt
= ODEBUG_POOL_PERCPU_SIZE
,
70 static struct debug_bucket obj_hash
[ODEBUG_HASH_SIZE
];
72 static struct debug_obj obj_static_pool
[ODEBUG_POOL_SIZE
] __initdata
;
74 static DEFINE_RAW_SPINLOCK(pool_lock
);
76 static struct obj_pool pool_global
= {
77 .min_cnt
= ODEBUG_POOL_MIN_LEVEL
,
78 .max_cnt
= ODEBUG_POOL_SIZE
,
80 .min_fill
= ODEBUG_POOL_SIZE
,
84 static struct obj_pool pool_to_free
= {
88 static HLIST_HEAD(pool_boot
);
90 static unsigned long avg_usage
;
91 static bool obj_freeing
;
93 static int __data_racy debug_objects_maxchain __read_mostly
;
94 static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly
;
95 static int __data_racy debug_objects_fixups __read_mostly
;
96 static int __data_racy debug_objects_warnings __read_mostly
;
97 static bool __data_racy debug_objects_enabled __read_mostly
98 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT
;
100 static const struct debug_obj_descr
*descr_test __read_mostly
;
101 static struct kmem_cache
*obj_cache __ro_after_init
;
104 * Track numbers of kmem_cache_alloc()/free() calls done.
106 static int __data_racy debug_objects_allocated
;
107 static int __data_racy debug_objects_freed
;
109 static void free_obj_work(struct work_struct
*work
);
110 static DECLARE_DELAYED_WORK(debug_obj_work
, free_obj_work
);
112 static DEFINE_STATIC_KEY_FALSE(obj_cache_enabled
);
114 static int __init
enable_object_debug(char *str
)
116 debug_objects_enabled
= true;
119 early_param("debug_objects", enable_object_debug
);
121 static int __init
disable_object_debug(char *str
)
123 debug_objects_enabled
= false;
126 early_param("no_debug_objects", disable_object_debug
);
128 static const char *obj_states
[ODEBUG_STATE_MAX
] = {
129 [ODEBUG_STATE_NONE
] = "none",
130 [ODEBUG_STATE_INIT
] = "initialized",
131 [ODEBUG_STATE_INACTIVE
] = "inactive",
132 [ODEBUG_STATE_ACTIVE
] = "active",
133 [ODEBUG_STATE_DESTROYED
] = "destroyed",
134 [ODEBUG_STATE_NOTAVAILABLE
] = "not available",
137 static __always_inline
unsigned int pool_count(struct obj_pool
*pool
)
139 return READ_ONCE(pool
->cnt
);
142 static __always_inline
bool pool_should_refill(struct obj_pool
*pool
)
144 return pool_count(pool
) < pool
->min_cnt
;
147 static __always_inline
bool pool_must_refill(struct obj_pool
*pool
)
149 return pool_count(pool
) < pool
->min_cnt
/ 2;
152 static bool pool_move_batch(struct obj_pool
*dst
, struct obj_pool
*src
)
154 struct hlist_node
*last
, *next_batch
, *first_batch
;
155 struct debug_obj
*obj
;
157 if (dst
->cnt
>= dst
->max_cnt
|| !src
->cnt
)
160 first_batch
= src
->objects
.first
;
161 obj
= hlist_entry(first_batch
, typeof(*obj
), node
);
162 last
= obj
->batch_last
;
163 next_batch
= last
->next
;
165 /* Move the next batch to the front of the source pool */
166 src
->objects
.first
= next_batch
;
168 next_batch
->pprev
= &src
->objects
.first
;
170 /* Add the extracted batch to the destination pool */
171 last
->next
= dst
->objects
.first
;
173 last
->next
->pprev
= &last
->next
;
174 first_batch
->pprev
= &dst
->objects
.first
;
175 dst
->objects
.first
= first_batch
;
177 WRITE_ONCE(src
->cnt
, src
->cnt
- ODEBUG_BATCH_SIZE
);
178 WRITE_ONCE(dst
->cnt
, dst
->cnt
+ ODEBUG_BATCH_SIZE
);
182 static bool pool_push_batch(struct obj_pool
*dst
, struct hlist_head
*head
)
184 struct hlist_node
*last
;
185 struct debug_obj
*obj
;
187 if (dst
->cnt
>= dst
->max_cnt
)
190 obj
= hlist_entry(head
->first
, typeof(*obj
), node
);
191 last
= obj
->batch_last
;
193 hlist_splice_init(head
, last
, &dst
->objects
);
194 WRITE_ONCE(dst
->cnt
, dst
->cnt
+ ODEBUG_BATCH_SIZE
);
198 static bool pool_pop_batch(struct hlist_head
*head
, struct obj_pool
*src
)
200 struct hlist_node
*last
, *next
;
201 struct debug_obj
*obj
;
206 /* Move the complete list to the head */
207 hlist_move_list(&src
->objects
, head
);
209 obj
= hlist_entry(head
->first
, typeof(*obj
), node
);
210 last
= obj
->batch_last
;
212 /* Disconnect the batch from the list */
215 /* Move the node after last back to the source pool. */
216 src
->objects
.first
= next
;
218 next
->pprev
= &src
->objects
.first
;
220 WRITE_ONCE(src
->cnt
, src
->cnt
- ODEBUG_BATCH_SIZE
);
224 static struct debug_obj
*__alloc_object(struct hlist_head
*list
)
226 struct debug_obj
*obj
;
228 if (unlikely(!list
->first
))
231 obj
= hlist_entry(list
->first
, typeof(*obj
), node
);
232 hlist_del(&obj
->node
);
236 static void pcpu_refill_stats(void)
238 struct pool_stats
*stats
= &pool_global
.stats
;
240 WRITE_ONCE(stats
->cur_used
, stats
->cur_used
+ ODEBUG_BATCH_SIZE
);
242 if (stats
->cur_used
> stats
->max_used
)
243 stats
->max_used
= stats
->cur_used
;
245 if (pool_global
.cnt
< stats
->min_fill
)
246 stats
->min_fill
= pool_global
.cnt
;
249 static struct debug_obj
*pcpu_alloc(void)
251 struct obj_pool
*pcp
= this_cpu_ptr(&pool_pcpu
);
253 lockdep_assert_irqs_disabled();
256 struct debug_obj
*obj
= __alloc_object(&pcp
->objects
);
261 * If this emptied a batch try to refill from the
262 * free pool. Don't do that if this was the top-most
263 * batch as pcpu_free() expects the per CPU pool
264 * to be less than ODEBUG_POOL_PERCPU_SIZE.
266 if (unlikely(pcp
->cnt
< (ODEBUG_POOL_PERCPU_SIZE
- ODEBUG_BATCH_SIZE
) &&
267 !(pcp
->cnt
% ODEBUG_BATCH_SIZE
))) {
269 * Don't try to allocate from the regular pool here
270 * to not exhaust it prematurely.
272 if (pool_count(&pool_to_free
)) {
273 guard(raw_spinlock
)(&pool_lock
);
274 pool_move_batch(pcp
, &pool_to_free
);
281 guard(raw_spinlock
)(&pool_lock
);
282 if (!pool_move_batch(pcp
, &pool_to_free
)) {
283 if (!pool_move_batch(pcp
, &pool_global
))
290 static void pcpu_free(struct debug_obj
*obj
)
292 struct obj_pool
*pcp
= this_cpu_ptr(&pool_pcpu
);
293 struct debug_obj
*first
;
295 lockdep_assert_irqs_disabled();
297 if (!(pcp
->cnt
% ODEBUG_BATCH_SIZE
)) {
298 obj
->batch_last
= &obj
->node
;
300 first
= hlist_entry(pcp
->objects
.first
, typeof(*first
), node
);
301 obj
->batch_last
= first
->batch_last
;
303 hlist_add_head(&obj
->node
, &pcp
->objects
);
307 if (pcp
->cnt
< ODEBUG_POOL_PERCPU_SIZE
)
310 /* Remove a batch from the per CPU pool */
311 guard(raw_spinlock
)(&pool_lock
);
312 /* Try to fit the batch into the pool_global first */
313 if (!pool_move_batch(&pool_global
, pcp
))
314 pool_move_batch(&pool_to_free
, pcp
);
315 WRITE_ONCE(pool_global
.stats
.cur_used
, pool_global
.stats
.cur_used
- ODEBUG_BATCH_SIZE
);
318 static void free_object_list(struct hlist_head
*head
)
320 struct hlist_node
*tmp
;
321 struct debug_obj
*obj
;
324 hlist_for_each_entry_safe(obj
, tmp
, head
, node
) {
325 hlist_del(&obj
->node
);
326 kmem_cache_free(obj_cache
, obj
);
329 debug_objects_freed
+= cnt
;
332 static void fill_pool_from_freelist(void)
334 static unsigned long state
;
337 * Reuse objs from the global obj_to_free list; they will be
338 * reinitialized when allocating.
340 if (!pool_count(&pool_to_free
))
344 * Prevent the context from being scheduled or interrupted after
345 * setting the state flag;
350 * Avoid lock contention on &pool_lock and avoid making the cache
351 * line exclusive by testing the bit before attempting to set it.
353 if (test_bit(0, &state
) || test_and_set_bit(0, &state
))
356 /* Avoid taking the lock when there is no work to do */
357 while (pool_should_refill(&pool_global
) && pool_count(&pool_to_free
)) {
358 guard(raw_spinlock
)(&pool_lock
);
359 /* Move a batch if possible */
360 pool_move_batch(&pool_global
, &pool_to_free
);
362 clear_bit(0, &state
);
365 static bool kmem_alloc_batch(struct hlist_head
*head
, struct kmem_cache
*cache
, gfp_t gfp
)
367 struct hlist_node
*last
= NULL
;
368 struct debug_obj
*obj
;
370 for (int cnt
= 0; cnt
< ODEBUG_BATCH_SIZE
; cnt
++) {
371 obj
= kmem_cache_zalloc(cache
, gfp
);
373 free_object_list(head
);
376 debug_objects_allocated
++;
380 obj
->batch_last
= last
;
382 hlist_add_head(&obj
->node
, head
);
387 static void fill_pool(void)
389 static atomic_t cpus_allocating
;
392 * Avoid allocation and lock contention when:
393 * - One other CPU is already allocating
394 * - the global pool has not reached the critical level yet
396 if (!pool_must_refill(&pool_global
) && atomic_read(&cpus_allocating
))
399 atomic_inc(&cpus_allocating
);
400 while (pool_should_refill(&pool_global
)) {
403 if (!kmem_alloc_batch(&head
, obj_cache
, __GFP_HIGH
| __GFP_NOWARN
))
406 guard(raw_spinlock_irqsave
)(&pool_lock
);
407 if (!pool_push_batch(&pool_global
, &head
))
408 pool_push_batch(&pool_to_free
, &head
);
410 atomic_dec(&cpus_allocating
);
414 * Lookup an object in the hash bucket.
416 static struct debug_obj
*lookup_object(void *addr
, struct debug_bucket
*b
)
418 struct debug_obj
*obj
;
421 hlist_for_each_entry(obj
, &b
->list
, node
) {
423 if (obj
->object
== addr
)
426 if (cnt
> debug_objects_maxchain
)
427 debug_objects_maxchain
= cnt
;
432 static void calc_usage(void)
434 static DEFINE_RAW_SPINLOCK(avg_lock
);
435 static unsigned long avg_period
;
436 unsigned long cur
, now
= jiffies
;
438 if (!time_after_eq(now
, READ_ONCE(avg_period
)))
441 if (!raw_spin_trylock(&avg_lock
))
444 WRITE_ONCE(avg_period
, now
+ msecs_to_jiffies(10));
445 cur
= READ_ONCE(pool_global
.stats
.cur_used
) * ODEBUG_FREE_WORK_MAX
;
446 WRITE_ONCE(avg_usage
, calc_load(avg_usage
, EXP_5
, cur
));
447 raw_spin_unlock(&avg_lock
);
450 static struct debug_obj
*alloc_object(void *addr
, struct debug_bucket
*b
,
451 const struct debug_obj_descr
*descr
)
453 struct debug_obj
*obj
;
457 if (static_branch_likely(&obj_cache_enabled
))
460 obj
= __alloc_object(&pool_boot
);
465 obj
->state
= ODEBUG_STATE_NONE
;
467 hlist_add_head(&obj
->node
, &b
->list
);
472 /* workqueue function to free objects. */
473 static void free_obj_work(struct work_struct
*work
)
475 static unsigned long last_use_avg
;
476 unsigned long cur_used
, last_used
, delta
;
477 unsigned int max_free
= 0;
479 WRITE_ONCE(obj_freeing
, false);
481 /* Rate limit freeing based on current use average */
482 cur_used
= READ_ONCE(avg_usage
);
483 last_used
= last_use_avg
;
484 last_use_avg
= cur_used
;
486 if (!pool_count(&pool_to_free
))
489 if (cur_used
<= last_used
) {
490 delta
= (last_used
- cur_used
) / ODEBUG_FREE_WORK_MAX
;
491 max_free
= min(delta
, ODEBUG_FREE_WORK_MAX
);
494 for (int cnt
= 0; cnt
< ODEBUG_FREE_WORK_MAX
; cnt
++) {
497 /* Acquire and drop the lock for each batch */
498 scoped_guard(raw_spinlock_irqsave
, &pool_lock
) {
499 if (!pool_to_free
.cnt
)
502 /* Refill the global pool if possible */
503 if (pool_move_batch(&pool_global
, &pool_to_free
)) {
504 /* Don't free as there seems to be demand */
506 } else if (max_free
) {
507 pool_pop_batch(&tofree
, &pool_to_free
);
513 free_object_list(&tofree
);
517 static void __free_object(struct debug_obj
*obj
)
520 if (static_branch_likely(&obj_cache_enabled
))
523 hlist_add_head(&obj
->node
, &pool_boot
);
527 * Put the object back into the pool and schedule work to free objects
530 static void free_object(struct debug_obj
*obj
)
533 if (!READ_ONCE(obj_freeing
) && pool_count(&pool_to_free
)) {
534 WRITE_ONCE(obj_freeing
, true);
535 schedule_delayed_work(&debug_obj_work
, ODEBUG_FREE_WORK_DELAY
);
539 static void put_objects(struct hlist_head
*list
)
541 struct hlist_node
*tmp
;
542 struct debug_obj
*obj
;
545 * Using free_object() puts the objects into reuse or schedules
546 * them for freeing and it get's all the accounting correct.
548 hlist_for_each_entry_safe(obj
, tmp
, list
, node
) {
549 hlist_del(&obj
->node
);
554 #ifdef CONFIG_HOTPLUG_CPU
555 static int object_cpu_offline(unsigned int cpu
)
557 /* Remote access is safe as the CPU is dead already */
558 struct obj_pool
*pcp
= per_cpu_ptr(&pool_pcpu
, cpu
);
560 put_objects(&pcp
->objects
);
566 /* Out of memory. Free all objects from hash */
567 static void debug_objects_oom(void)
569 struct debug_bucket
*db
= obj_hash
;
570 HLIST_HEAD(freelist
);
572 pr_warn("Out of memory. ODEBUG disabled\n");
574 for (int i
= 0; i
< ODEBUG_HASH_SIZE
; i
++, db
++) {
575 scoped_guard(raw_spinlock_irqsave
, &db
->lock
)
576 hlist_move_list(&db
->list
, &freelist
);
578 put_objects(&freelist
);
583 * We use the pfn of the address for the hash. That way we can check
584 * for freed objects simply by checking the affected bucket.
586 static struct debug_bucket
*get_bucket(unsigned long addr
)
590 hash
= hash_long((addr
>> ODEBUG_CHUNK_SHIFT
), ODEBUG_HASH_BITS
);
591 return &obj_hash
[hash
];
594 static void debug_print_object(struct debug_obj
*obj
, char *msg
)
596 const struct debug_obj_descr
*descr
= obj
->descr
;
600 * Don't report if lookup_object_or_alloc() by the current thread
601 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
602 * concurrent thread turned off debug_objects_enabled and cleared
605 if (!debug_objects_enabled
)
608 if (limit
< 5 && descr
!= descr_test
) {
609 void *hint
= descr
->debug_hint
?
610 descr
->debug_hint(obj
->object
) : NULL
;
612 WARN(1, KERN_ERR
"ODEBUG: %s %s (active state %u) "
613 "object: %p object type: %s hint: %pS\n",
614 msg
, obj_states
[obj
->state
], obj
->astate
,
615 obj
->object
, descr
->name
, hint
);
617 debug_objects_warnings
++;
621 * Try to repair the damage, so we have a better chance to get useful
625 debug_object_fixup(bool (*fixup
)(void *addr
, enum debug_obj_state state
),
626 void * addr
, enum debug_obj_state state
)
628 if (fixup
&& fixup(addr
, state
)) {
629 debug_objects_fixups
++;
635 static void debug_object_is_on_stack(void *addr
, int onstack
)
643 is_on_stack
= object_is_on_stack(addr
);
644 if (is_on_stack
== onstack
)
649 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr
,
650 task_stack_page(current
));
652 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr
,
653 task_stack_page(current
));
658 static struct debug_obj
*lookup_object_or_alloc(void *addr
, struct debug_bucket
*b
,
659 const struct debug_obj_descr
*descr
,
660 bool onstack
, bool alloc_ifstatic
)
662 struct debug_obj
*obj
= lookup_object(addr
, b
);
663 enum debug_obj_state state
= ODEBUG_STATE_NONE
;
669 * debug_object_init() unconditionally allocates untracked
670 * objects. It does not matter whether it is a static object or
673 * debug_object_assert_init() and debug_object_activate() allow
674 * allocation only if the descriptor callback confirms that the
675 * object is static and considered initialized. For non-static
676 * objects the allocation needs to be done from the fixup callback.
678 if (unlikely(alloc_ifstatic
)) {
679 if (!descr
->is_static_object
|| !descr
->is_static_object(addr
))
680 return ERR_PTR(-ENOENT
);
681 /* Statically allocated objects are considered initialized */
682 state
= ODEBUG_STATE_INIT
;
685 obj
= alloc_object(addr
, b
, descr
);
688 debug_object_is_on_stack(addr
, onstack
);
692 /* Out of memory. Do the cleanup outside of the locked region */
693 debug_objects_enabled
= false;
697 static void debug_objects_fill_pool(void)
699 if (!static_branch_likely(&obj_cache_enabled
))
702 if (likely(!pool_should_refill(&pool_global
)))
705 /* Try reusing objects from obj_to_free_list */
706 fill_pool_from_freelist();
708 if (likely(!pool_should_refill(&pool_global
)))
712 * On RT enabled kernels the pool refill must happen in preemptible
713 * context -- for !RT kernels we rely on the fact that spinlock_t and
714 * raw_spinlock_t are basically the same type and this lock-type
715 * inversion works just fine.
717 if (!IS_ENABLED(CONFIG_PREEMPT_RT
) || preemptible()) {
719 * Annotate away the spinlock_t inside raw_spinlock_t warning
720 * by temporarily raising the wait-type to WAIT_SLEEP, matching
721 * the preemptible() condition above.
723 static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map
, LD_WAIT_SLEEP
);
724 lock_map_acquire_try(&fill_pool_map
);
726 lock_map_release(&fill_pool_map
);
731 __debug_object_init(void *addr
, const struct debug_obj_descr
*descr
, int onstack
)
733 struct debug_obj
*obj
, o
;
734 struct debug_bucket
*db
;
737 debug_objects_fill_pool();
739 db
= get_bucket((unsigned long) addr
);
741 raw_spin_lock_irqsave(&db
->lock
, flags
);
743 obj
= lookup_object_or_alloc(addr
, db
, descr
, onstack
, false);
744 if (unlikely(!obj
)) {
745 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
750 switch (obj
->state
) {
751 case ODEBUG_STATE_NONE
:
752 case ODEBUG_STATE_INIT
:
753 case ODEBUG_STATE_INACTIVE
:
754 obj
->state
= ODEBUG_STATE_INIT
;
755 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
762 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
763 debug_print_object(&o
, "init");
765 if (o
.state
== ODEBUG_STATE_ACTIVE
)
766 debug_object_fixup(descr
->fixup_init
, addr
, o
.state
);
770 * debug_object_init - debug checks when an object is initialized
771 * @addr: address of the object
772 * @descr: pointer to an object specific debug description structure
774 void debug_object_init(void *addr
, const struct debug_obj_descr
*descr
)
776 if (!debug_objects_enabled
)
779 __debug_object_init(addr
, descr
, 0);
781 EXPORT_SYMBOL_GPL(debug_object_init
);
784 * debug_object_init_on_stack - debug checks when an object on stack is
786 * @addr: address of the object
787 * @descr: pointer to an object specific debug description structure
789 void debug_object_init_on_stack(void *addr
, const struct debug_obj_descr
*descr
)
791 if (!debug_objects_enabled
)
794 __debug_object_init(addr
, descr
, 1);
796 EXPORT_SYMBOL_GPL(debug_object_init_on_stack
);
799 * debug_object_activate - debug checks when an object is activated
800 * @addr: address of the object
801 * @descr: pointer to an object specific debug description structure
802 * Returns 0 for success, -EINVAL for check failed.
804 int debug_object_activate(void *addr
, const struct debug_obj_descr
*descr
)
806 struct debug_obj o
= { .object
= addr
, .state
= ODEBUG_STATE_NOTAVAILABLE
, .descr
= descr
};
807 struct debug_bucket
*db
;
808 struct debug_obj
*obj
;
811 if (!debug_objects_enabled
)
814 debug_objects_fill_pool();
816 db
= get_bucket((unsigned long) addr
);
818 raw_spin_lock_irqsave(&db
->lock
, flags
);
820 obj
= lookup_object_or_alloc(addr
, db
, descr
, false, true);
821 if (unlikely(!obj
)) {
822 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
825 } else if (likely(!IS_ERR(obj
))) {
826 switch (obj
->state
) {
827 case ODEBUG_STATE_ACTIVE
:
828 case ODEBUG_STATE_DESTROYED
:
831 case ODEBUG_STATE_INIT
:
832 case ODEBUG_STATE_INACTIVE
:
833 obj
->state
= ODEBUG_STATE_ACTIVE
;
836 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
841 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
842 debug_print_object(&o
, "activate");
845 case ODEBUG_STATE_ACTIVE
:
846 case ODEBUG_STATE_NOTAVAILABLE
:
847 if (debug_object_fixup(descr
->fixup_activate
, addr
, o
.state
))
854 EXPORT_SYMBOL_GPL(debug_object_activate
);
857 * debug_object_deactivate - debug checks when an object is deactivated
858 * @addr: address of the object
859 * @descr: pointer to an object specific debug description structure
861 void debug_object_deactivate(void *addr
, const struct debug_obj_descr
*descr
)
863 struct debug_obj o
= { .object
= addr
, .state
= ODEBUG_STATE_NOTAVAILABLE
, .descr
= descr
};
864 struct debug_bucket
*db
;
865 struct debug_obj
*obj
;
868 if (!debug_objects_enabled
)
871 db
= get_bucket((unsigned long) addr
);
873 raw_spin_lock_irqsave(&db
->lock
, flags
);
875 obj
= lookup_object(addr
, db
);
877 switch (obj
->state
) {
878 case ODEBUG_STATE_DESTROYED
:
880 case ODEBUG_STATE_INIT
:
881 case ODEBUG_STATE_INACTIVE
:
882 case ODEBUG_STATE_ACTIVE
:
885 obj
->state
= ODEBUG_STATE_INACTIVE
;
888 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
894 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
895 debug_print_object(&o
, "deactivate");
897 EXPORT_SYMBOL_GPL(debug_object_deactivate
);
900 * debug_object_destroy - debug checks when an object is destroyed
901 * @addr: address of the object
902 * @descr: pointer to an object specific debug description structure
904 void debug_object_destroy(void *addr
, const struct debug_obj_descr
*descr
)
906 struct debug_obj
*obj
, o
;
907 struct debug_bucket
*db
;
910 if (!debug_objects_enabled
)
913 db
= get_bucket((unsigned long) addr
);
915 raw_spin_lock_irqsave(&db
->lock
, flags
);
917 obj
= lookup_object(addr
, db
);
919 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
923 switch (obj
->state
) {
924 case ODEBUG_STATE_ACTIVE
:
925 case ODEBUG_STATE_DESTROYED
:
927 case ODEBUG_STATE_NONE
:
928 case ODEBUG_STATE_INIT
:
929 case ODEBUG_STATE_INACTIVE
:
930 obj
->state
= ODEBUG_STATE_DESTROYED
;
933 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
938 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
939 debug_print_object(&o
, "destroy");
941 if (o
.state
== ODEBUG_STATE_ACTIVE
)
942 debug_object_fixup(descr
->fixup_destroy
, addr
, o
.state
);
944 EXPORT_SYMBOL_GPL(debug_object_destroy
);
947 * debug_object_free - debug checks when an object is freed
948 * @addr: address of the object
949 * @descr: pointer to an object specific debug description structure
951 void debug_object_free(void *addr
, const struct debug_obj_descr
*descr
)
953 struct debug_obj
*obj
, o
;
954 struct debug_bucket
*db
;
957 if (!debug_objects_enabled
)
960 db
= get_bucket((unsigned long) addr
);
962 raw_spin_lock_irqsave(&db
->lock
, flags
);
964 obj
= lookup_object(addr
, db
);
966 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
970 switch (obj
->state
) {
971 case ODEBUG_STATE_ACTIVE
:
974 hlist_del(&obj
->node
);
975 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
981 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
982 debug_print_object(&o
, "free");
984 debug_object_fixup(descr
->fixup_free
, addr
, o
.state
);
986 EXPORT_SYMBOL_GPL(debug_object_free
);
989 * debug_object_assert_init - debug checks when object should be init-ed
990 * @addr: address of the object
991 * @descr: pointer to an object specific debug description structure
993 void debug_object_assert_init(void *addr
, const struct debug_obj_descr
*descr
)
995 struct debug_obj o
= { .object
= addr
, .state
= ODEBUG_STATE_NOTAVAILABLE
, .descr
= descr
};
996 struct debug_bucket
*db
;
997 struct debug_obj
*obj
;
1000 if (!debug_objects_enabled
)
1003 debug_objects_fill_pool();
1005 db
= get_bucket((unsigned long) addr
);
1007 raw_spin_lock_irqsave(&db
->lock
, flags
);
1008 obj
= lookup_object_or_alloc(addr
, db
, descr
, false, true);
1009 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
1010 if (likely(!IS_ERR_OR_NULL(obj
)))
1013 /* If NULL the allocation has hit OOM */
1015 debug_objects_oom();
1019 /* Object is neither tracked nor static. It's not initialized. */
1020 debug_print_object(&o
, "assert_init");
1021 debug_object_fixup(descr
->fixup_assert_init
, addr
, ODEBUG_STATE_NOTAVAILABLE
);
1023 EXPORT_SYMBOL_GPL(debug_object_assert_init
);
1026 * debug_object_active_state - debug checks object usage state machine
1027 * @addr: address of the object
1028 * @descr: pointer to an object specific debug description structure
1029 * @expect: expected state
1030 * @next: state to move to if expected state is found
1033 debug_object_active_state(void *addr
, const struct debug_obj_descr
*descr
,
1034 unsigned int expect
, unsigned int next
)
1036 struct debug_obj o
= { .object
= addr
, .state
= ODEBUG_STATE_NOTAVAILABLE
, .descr
= descr
};
1037 struct debug_bucket
*db
;
1038 struct debug_obj
*obj
;
1039 unsigned long flags
;
1041 if (!debug_objects_enabled
)
1044 db
= get_bucket((unsigned long) addr
);
1046 raw_spin_lock_irqsave(&db
->lock
, flags
);
1048 obj
= lookup_object(addr
, db
);
1050 switch (obj
->state
) {
1051 case ODEBUG_STATE_ACTIVE
:
1052 if (obj
->astate
!= expect
)
1055 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
1063 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
1064 debug_print_object(&o
, "active_state");
1066 EXPORT_SYMBOL_GPL(debug_object_active_state
);
1068 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1069 static void __debug_check_no_obj_freed(const void *address
, unsigned long size
)
1071 unsigned long flags
, oaddr
, saddr
, eaddr
, paddr
, chunks
;
1072 int cnt
, objs_checked
= 0;
1073 struct debug_obj
*obj
, o
;
1074 struct debug_bucket
*db
;
1075 struct hlist_node
*tmp
;
1077 saddr
= (unsigned long) address
;
1078 eaddr
= saddr
+ size
;
1079 paddr
= saddr
& ODEBUG_CHUNK_MASK
;
1080 chunks
= ((eaddr
- paddr
) + (ODEBUG_CHUNK_SIZE
- 1));
1081 chunks
>>= ODEBUG_CHUNK_SHIFT
;
1083 for (;chunks
> 0; chunks
--, paddr
+= ODEBUG_CHUNK_SIZE
) {
1084 db
= get_bucket(paddr
);
1088 raw_spin_lock_irqsave(&db
->lock
, flags
);
1089 hlist_for_each_entry_safe(obj
, tmp
, &db
->list
, node
) {
1091 oaddr
= (unsigned long) obj
->object
;
1092 if (oaddr
< saddr
|| oaddr
>= eaddr
)
1095 switch (obj
->state
) {
1096 case ODEBUG_STATE_ACTIVE
:
1098 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
1099 debug_print_object(&o
, "free");
1100 debug_object_fixup(o
.descr
->fixup_free
, (void *)oaddr
, o
.state
);
1103 hlist_del(&obj
->node
);
1108 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
1110 if (cnt
> debug_objects_maxchain
)
1111 debug_objects_maxchain
= cnt
;
1113 objs_checked
+= cnt
;
1116 if (objs_checked
> debug_objects_maxchecked
)
1117 debug_objects_maxchecked
= objs_checked
;
1119 /* Schedule work to actually kmem_cache_free() objects */
1120 if (!READ_ONCE(obj_freeing
) && pool_count(&pool_to_free
)) {
1121 WRITE_ONCE(obj_freeing
, true);
1122 schedule_delayed_work(&debug_obj_work
, ODEBUG_FREE_WORK_DELAY
);
1126 void debug_check_no_obj_freed(const void *address
, unsigned long size
)
1128 if (debug_objects_enabled
)
1129 __debug_check_no_obj_freed(address
, size
);
1133 #ifdef CONFIG_DEBUG_FS
1135 static int debug_stats_show(struct seq_file
*m
, void *v
)
1137 unsigned int cpu
, pool_used
, pcp_free
= 0;
1140 * pool_global.stats.cur_used is the number of batches currently
1141 * handed out to per CPU pools. Convert it to number of objects
1142 * and subtract the number of free objects in the per CPU pools.
1143 * As this is lockless the number is an estimate.
1145 for_each_possible_cpu(cpu
)
1146 pcp_free
+= per_cpu(pool_pcpu
.cnt
, cpu
);
1148 pool_used
= READ_ONCE(pool_global
.stats
.cur_used
);
1149 pcp_free
= min(pool_used
, pcp_free
);
1150 pool_used
-= pcp_free
;
1152 seq_printf(m
, "max_chain : %d\n", debug_objects_maxchain
);
1153 seq_printf(m
, "max_checked : %d\n", debug_objects_maxchecked
);
1154 seq_printf(m
, "warnings : %d\n", debug_objects_warnings
);
1155 seq_printf(m
, "fixups : %d\n", debug_objects_fixups
);
1156 seq_printf(m
, "pool_free : %u\n", pool_count(&pool_global
) + pcp_free
);
1157 seq_printf(m
, "pool_pcp_free : %u\n", pcp_free
);
1158 seq_printf(m
, "pool_min_free : %u\n", data_race(pool_global
.stats
.min_fill
));
1159 seq_printf(m
, "pool_used : %u\n", pool_used
);
1160 seq_printf(m
, "pool_max_used : %u\n", data_race(pool_global
.stats
.max_used
));
1161 seq_printf(m
, "on_free_list : %u\n", pool_count(&pool_to_free
));
1162 seq_printf(m
, "objs_allocated: %d\n", debug_objects_allocated
);
1163 seq_printf(m
, "objs_freed : %d\n", debug_objects_freed
);
1166 DEFINE_SHOW_ATTRIBUTE(debug_stats
);
1168 static int __init
debug_objects_init_debugfs(void)
1170 struct dentry
*dbgdir
;
1172 if (!debug_objects_enabled
)
1175 dbgdir
= debugfs_create_dir("debug_objects", NULL
);
1177 debugfs_create_file("stats", 0444, dbgdir
, NULL
, &debug_stats_fops
);
1181 __initcall(debug_objects_init_debugfs
);
1184 static inline void debug_objects_init_debugfs(void) { }
1187 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1189 /* Random data structure for the self test */
1191 unsigned long dummy1
[6];
1193 unsigned long dummy2
[3];
1196 static __initconst
const struct debug_obj_descr descr_type_test
;
1198 static bool __init
is_static_object(void *addr
)
1200 struct self_test
*obj
= addr
;
1202 return obj
->static_init
;
1206 * fixup_init is called when:
1207 * - an active object is initialized
1209 static bool __init
fixup_init(void *addr
, enum debug_obj_state state
)
1211 struct self_test
*obj
= addr
;
1214 case ODEBUG_STATE_ACTIVE
:
1215 debug_object_deactivate(obj
, &descr_type_test
);
1216 debug_object_init(obj
, &descr_type_test
);
1224 * fixup_activate is called when:
1225 * - an active object is activated
1226 * - an unknown non-static object is activated
1228 static bool __init
fixup_activate(void *addr
, enum debug_obj_state state
)
1230 struct self_test
*obj
= addr
;
1233 case ODEBUG_STATE_NOTAVAILABLE
:
1235 case ODEBUG_STATE_ACTIVE
:
1236 debug_object_deactivate(obj
, &descr_type_test
);
1237 debug_object_activate(obj
, &descr_type_test
);
1246 * fixup_destroy is called when:
1247 * - an active object is destroyed
1249 static bool __init
fixup_destroy(void *addr
, enum debug_obj_state state
)
1251 struct self_test
*obj
= addr
;
1254 case ODEBUG_STATE_ACTIVE
:
1255 debug_object_deactivate(obj
, &descr_type_test
);
1256 debug_object_destroy(obj
, &descr_type_test
);
1264 * fixup_free is called when:
1265 * - an active object is freed
1267 static bool __init
fixup_free(void *addr
, enum debug_obj_state state
)
1269 struct self_test
*obj
= addr
;
1272 case ODEBUG_STATE_ACTIVE
:
1273 debug_object_deactivate(obj
, &descr_type_test
);
1274 debug_object_free(obj
, &descr_type_test
);
1282 check_results(void *addr
, enum debug_obj_state state
, int fixups
, int warnings
)
1284 struct debug_bucket
*db
;
1285 struct debug_obj
*obj
;
1286 unsigned long flags
;
1289 db
= get_bucket((unsigned long) addr
);
1291 raw_spin_lock_irqsave(&db
->lock
, flags
);
1293 obj
= lookup_object(addr
, db
);
1294 if (!obj
&& state
!= ODEBUG_STATE_NONE
) {
1295 WARN(1, KERN_ERR
"ODEBUG: selftest object not found\n");
1298 if (obj
&& obj
->state
!= state
) {
1299 WARN(1, KERN_ERR
"ODEBUG: selftest wrong state: %d != %d\n",
1303 if (fixups
!= debug_objects_fixups
) {
1304 WARN(1, KERN_ERR
"ODEBUG: selftest fixups failed %d != %d\n",
1305 fixups
, debug_objects_fixups
);
1308 if (warnings
!= debug_objects_warnings
) {
1309 WARN(1, KERN_ERR
"ODEBUG: selftest warnings failed %d != %d\n",
1310 warnings
, debug_objects_warnings
);
1315 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
1317 debug_objects_enabled
= false;
1321 static __initconst
const struct debug_obj_descr descr_type_test
= {
1323 .is_static_object
= is_static_object
,
1324 .fixup_init
= fixup_init
,
1325 .fixup_activate
= fixup_activate
,
1326 .fixup_destroy
= fixup_destroy
,
1327 .fixup_free
= fixup_free
,
1330 static __initdata
struct self_test obj
= { .static_init
= 0 };
1332 static bool __init
debug_objects_selftest(void)
1334 int fixups
, oldfixups
, warnings
, oldwarnings
;
1335 unsigned long flags
;
1337 local_irq_save(flags
);
1339 fixups
= oldfixups
= debug_objects_fixups
;
1340 warnings
= oldwarnings
= debug_objects_warnings
;
1341 descr_test
= &descr_type_test
;
1343 debug_object_init(&obj
, &descr_type_test
);
1344 if (check_results(&obj
, ODEBUG_STATE_INIT
, fixups
, warnings
))
1346 debug_object_activate(&obj
, &descr_type_test
);
1347 if (check_results(&obj
, ODEBUG_STATE_ACTIVE
, fixups
, warnings
))
1349 debug_object_activate(&obj
, &descr_type_test
);
1350 if (check_results(&obj
, ODEBUG_STATE_ACTIVE
, ++fixups
, ++warnings
))
1352 debug_object_deactivate(&obj
, &descr_type_test
);
1353 if (check_results(&obj
, ODEBUG_STATE_INACTIVE
, fixups
, warnings
))
1355 debug_object_destroy(&obj
, &descr_type_test
);
1356 if (check_results(&obj
, ODEBUG_STATE_DESTROYED
, fixups
, warnings
))
1358 debug_object_init(&obj
, &descr_type_test
);
1359 if (check_results(&obj
, ODEBUG_STATE_DESTROYED
, fixups
, ++warnings
))
1361 debug_object_activate(&obj
, &descr_type_test
);
1362 if (check_results(&obj
, ODEBUG_STATE_DESTROYED
, fixups
, ++warnings
))
1364 debug_object_deactivate(&obj
, &descr_type_test
);
1365 if (check_results(&obj
, ODEBUG_STATE_DESTROYED
, fixups
, ++warnings
))
1367 debug_object_free(&obj
, &descr_type_test
);
1368 if (check_results(&obj
, ODEBUG_STATE_NONE
, fixups
, warnings
))
1371 obj
.static_init
= 1;
1372 debug_object_activate(&obj
, &descr_type_test
);
1373 if (check_results(&obj
, ODEBUG_STATE_ACTIVE
, fixups
, warnings
))
1375 debug_object_init(&obj
, &descr_type_test
);
1376 if (check_results(&obj
, ODEBUG_STATE_INIT
, ++fixups
, ++warnings
))
1378 debug_object_free(&obj
, &descr_type_test
);
1379 if (check_results(&obj
, ODEBUG_STATE_NONE
, fixups
, warnings
))
1382 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1383 debug_object_init(&obj
, &descr_type_test
);
1384 if (check_results(&obj
, ODEBUG_STATE_INIT
, fixups
, warnings
))
1386 debug_object_activate(&obj
, &descr_type_test
);
1387 if (check_results(&obj
, ODEBUG_STATE_ACTIVE
, fixups
, warnings
))
1389 __debug_check_no_obj_freed(&obj
, sizeof(obj
));
1390 if (check_results(&obj
, ODEBUG_STATE_NONE
, ++fixups
, ++warnings
))
1393 pr_info("selftest passed\n");
1396 debug_objects_fixups
= oldfixups
;
1397 debug_objects_warnings
= oldwarnings
;
1400 local_irq_restore(flags
);
1401 return debug_objects_enabled
;
1404 static inline bool debug_objects_selftest(void) { return true; }
1408 * Called during early boot to initialize the hash buckets and link
1409 * the static object pool objects into the poll list. After this call
1410 * the object tracker is fully operational.
1412 void __init
debug_objects_early_init(void)
1416 for (i
= 0; i
< ODEBUG_HASH_SIZE
; i
++)
1417 raw_spin_lock_init(&obj_hash
[i
].lock
);
1419 /* Keep early boot simple and add everything to the boot list */
1420 for (i
= 0; i
< ODEBUG_POOL_SIZE
; i
++)
1421 hlist_add_head(&obj_static_pool
[i
].node
, &pool_boot
);
1425 * Convert the statically allocated objects to dynamic ones.
1426 * debug_objects_mem_init() is called early so only one CPU is up and
1427 * interrupts are disabled, which means it is safe to replace the active
1428 * object references.
1430 static bool __init
debug_objects_replace_static_objects(struct kmem_cache
*cache
)
1432 struct debug_bucket
*db
= obj_hash
;
1433 struct hlist_node
*tmp
;
1434 struct debug_obj
*obj
;
1435 HLIST_HEAD(objects
);
1438 for (i
= 0; i
< ODEBUG_POOL_SIZE
; i
+= ODEBUG_BATCH_SIZE
) {
1439 if (!kmem_alloc_batch(&objects
, cache
, GFP_KERNEL
))
1441 pool_push_batch(&pool_global
, &objects
);
1444 /* Disconnect the boot pool. */
1445 pool_boot
.first
= NULL
;
1447 /* Replace the active object references */
1448 for (i
= 0; i
< ODEBUG_HASH_SIZE
; i
++, db
++) {
1449 hlist_move_list(&db
->list
, &objects
);
1451 hlist_for_each_entry(obj
, &objects
, node
) {
1452 struct debug_obj
*new = pcpu_alloc();
1454 /* copy object data */
1456 hlist_add_head(&new->node
, &db
->list
);
1461 /* Can't use free_object_list() as the cache is not populated yet */
1462 hlist_for_each_entry_safe(obj
, tmp
, &pool_global
.objects
, node
) {
1463 hlist_del(&obj
->node
);
1464 kmem_cache_free(cache
, obj
);
1470 * Called after the kmem_caches are functional to setup a dedicated
1471 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1472 * prevents that the debug code is called on kmem_cache_free() for the
1473 * debug tracker objects to avoid recursive calls.
1475 void __init
debug_objects_mem_init(void)
1477 struct kmem_cache
*cache
;
1480 if (!debug_objects_enabled
)
1483 if (!debug_objects_selftest())
1486 cache
= kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj
), 0,
1487 SLAB_DEBUG_OBJECTS
| SLAB_NOLEAKTRACE
, NULL
);
1489 if (!cache
|| !debug_objects_replace_static_objects(cache
)) {
1490 debug_objects_enabled
= false;
1491 pr_warn("Out of memory.\n");
1496 * Adjust the thresholds for allocating and freeing objects
1497 * according to the number of possible CPUs available in the
1500 extras
= num_possible_cpus() * ODEBUG_BATCH_SIZE
;
1501 pool_global
.max_cnt
+= extras
;
1502 pool_global
.min_cnt
+= extras
;
1504 /* Everything worked. Expose the cache */
1506 static_branch_enable(&obj_cache_enabled
);
1508 #ifdef CONFIG_HOTPLUG_CPU
1509 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD
, "object:offline", NULL
,
1510 object_cpu_offline
);