1 // SPDX-License-Identifier: GPL-2.0
3 * Slab allocator functions that are independent of the allocator strategy
5 * (C) 2012 Christoph Lameter <cl@linux.com>
7 #include <linux/slab.h>
10 #include <linux/poison.h>
11 #include <linux/interrupt.h>
12 #include <linux/memory.h>
13 #include <linux/cache.h>
14 #include <linux/compiler.h>
15 #include <linux/kfence.h>
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/uaccess.h>
19 #include <linux/seq_file.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/swiotlb.h>
22 #include <linux/proc_fs.h>
23 #include <linux/debugfs.h>
24 #include <linux/kmemleak.h>
25 #include <linux/kasan.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
29 #include <linux/memcontrol.h>
30 #include <linux/stackdepot.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/kmem.h>
38 enum slab_state slab_state
;
39 LIST_HEAD(slab_caches
);
40 DEFINE_MUTEX(slab_mutex
);
41 struct kmem_cache
*kmem_cache
;
44 * Set of flags that will prevent slab merging
46 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
47 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
48 SLAB_FAILSLAB | SLAB_NO_MERGE)
50 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
51 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
54 * Merge control. If this is set then no merging of slab caches will occur.
56 static bool slab_nomerge
= !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT
);
58 static int __init
setup_slab_nomerge(char *str
)
64 static int __init
setup_slab_merge(char *str
)
70 __setup_param("slub_nomerge", slub_nomerge
, setup_slab_nomerge
, 0);
71 __setup_param("slub_merge", slub_merge
, setup_slab_merge
, 0);
73 __setup("slab_nomerge", setup_slab_nomerge
);
74 __setup("slab_merge", setup_slab_merge
);
77 * Determine the size of a slab object
79 unsigned int kmem_cache_size(struct kmem_cache
*s
)
81 return s
->object_size
;
83 EXPORT_SYMBOL(kmem_cache_size
);
85 #ifdef CONFIG_DEBUG_VM
87 static bool kmem_cache_is_duplicate_name(const char *name
)
91 list_for_each_entry(s
, &slab_caches
, list
) {
92 if (!strcmp(s
->name
, name
))
99 static int kmem_cache_sanity_check(const char *name
, unsigned int size
)
101 if (!name
|| in_interrupt() || size
> KMALLOC_MAX_SIZE
) {
102 pr_err("kmem_cache_create(%s) integrity check failed\n", name
);
106 /* Duplicate names will confuse slabtop, et al */
107 WARN(kmem_cache_is_duplicate_name(name
),
108 "kmem_cache of name '%s' already exists\n", name
);
110 WARN_ON(strchr(name
, ' ')); /* It confuses parsers */
114 static inline int kmem_cache_sanity_check(const char *name
, unsigned int size
)
121 * Figure out what the alignment of the objects will be given a set of
122 * flags, a user specified alignment and the size of the objects.
124 static unsigned int calculate_alignment(slab_flags_t flags
,
125 unsigned int align
, unsigned int size
)
128 * If the user wants hardware cache aligned objects then follow that
129 * suggestion if the object is sufficiently large.
131 * The hardware cache alignment cannot override the specified
132 * alignment though. If that is greater then use it.
134 if (flags
& SLAB_HWCACHE_ALIGN
) {
137 ralign
= cache_line_size();
138 while (size
<= ralign
/ 2)
140 align
= max(align
, ralign
);
143 align
= max(align
, arch_slab_minalign());
145 return ALIGN(align
, sizeof(void *));
149 * Find a mergeable slab cache
151 int slab_unmergeable(struct kmem_cache
*s
)
153 if (slab_nomerge
|| (s
->flags
& SLAB_NEVER_MERGE
))
159 #ifdef CONFIG_HARDENED_USERCOPY
165 * We may have set a slab to be unmergeable during bootstrap.
173 struct kmem_cache
*find_mergeable(unsigned int size
, unsigned int align
,
174 slab_flags_t flags
, const char *name
, void (*ctor
)(void *))
176 struct kmem_cache
*s
;
184 flags
= kmem_cache_flags(flags
, name
);
186 if (flags
& SLAB_NEVER_MERGE
)
189 size
= ALIGN(size
, sizeof(void *));
190 align
= calculate_alignment(flags
, align
, size
);
191 size
= ALIGN(size
, align
);
193 list_for_each_entry_reverse(s
, &slab_caches
, list
) {
194 if (slab_unmergeable(s
))
200 if ((flags
& SLAB_MERGE_SAME
) != (s
->flags
& SLAB_MERGE_SAME
))
203 * Check if alignment is compatible.
204 * Courtesy of Adrian Drzewiecki
206 if ((s
->size
& ~(align
- 1)) != s
->size
)
209 if (s
->size
- size
>= sizeof(void *))
217 static struct kmem_cache
*create_cache(const char *name
,
218 unsigned int object_size
,
219 struct kmem_cache_args
*args
,
222 struct kmem_cache
*s
;
225 /* If a custom freelist pointer is requested make sure it's sane. */
227 if (args
->use_freeptr_offset
&&
228 (args
->freeptr_offset
>= object_size
||
229 !(flags
& SLAB_TYPESAFE_BY_RCU
) ||
230 !IS_ALIGNED(args
->freeptr_offset
, __alignof__(freeptr_t
))))
234 s
= kmem_cache_zalloc(kmem_cache
, GFP_KERNEL
);
237 err
= do_kmem_cache_create(s
, name
, object_size
, args
, flags
);
242 list_add(&s
->list
, &slab_caches
);
246 kmem_cache_free(kmem_cache
, s
);
252 * __kmem_cache_create_args - Create a kmem cache.
253 * @name: A string which is used in /proc/slabinfo to identify this cache.
254 * @object_size: The size of objects to be created in this cache.
255 * @args: Additional arguments for the cache creation (see
256 * &struct kmem_cache_args).
257 * @flags: See the desriptions of individual flags. The common ones are listed
258 * in the description below.
260 * Not to be called directly, use the kmem_cache_create() wrapper with the same
263 * Commonly used @flags:
265 * &SLAB_ACCOUNT - Account allocations to memcg.
267 * &SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
269 * &SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
271 * &SLAB_TYPESAFE_BY_RCU - Slab page (not individual objects) freeing delayed
272 * by a grace period - see the full description before using.
274 * Context: Cannot be called within a interrupt, but can be interrupted.
276 * Return: a pointer to the cache on success, NULL on failure.
278 struct kmem_cache
*__kmem_cache_create_args(const char *name
,
279 unsigned int object_size
,
280 struct kmem_cache_args
*args
,
283 struct kmem_cache
*s
= NULL
;
284 const char *cache_name
;
287 #ifdef CONFIG_SLUB_DEBUG
289 * If no slab_debug was enabled globally, the static key is not yet
290 * enabled by setup_slub_debug(). Enable it if the cache is being
291 * created with any of the debugging flags passed explicitly.
292 * It's also possible that this is the first cache created with
293 * SLAB_STORE_USER and we should init stack_depot for it.
295 if (flags
& SLAB_DEBUG_FLAGS
)
296 static_branch_enable(&slub_debug_enabled
);
297 if (flags
& SLAB_STORE_USER
)
301 mutex_lock(&slab_mutex
);
303 err
= kmem_cache_sanity_check(name
, object_size
);
308 /* Refuse requests with allocator specific flags */
309 if (flags
& ~SLAB_FLAGS_PERMITTED
) {
315 * Some allocators will constraint the set of valid flags to a subset
316 * of all flags. We expect them to define CACHE_CREATE_MASK in this
317 * case, and we'll just provide them with a sanitized version of the
320 flags
&= CACHE_CREATE_MASK
;
322 /* Fail closed on bad usersize of useroffset values. */
323 if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY
) ||
324 WARN_ON(!args
->usersize
&& args
->useroffset
) ||
325 WARN_ON(object_size
< args
->usersize
||
326 object_size
- args
->usersize
< args
->useroffset
))
327 args
->usersize
= args
->useroffset
= 0;
330 s
= __kmem_cache_alias(name
, object_size
, args
->align
, flags
,
335 cache_name
= kstrdup_const(name
, GFP_KERNEL
);
341 args
->align
= calculate_alignment(flags
, args
->align
, object_size
);
342 s
= create_cache(cache_name
, object_size
, args
, flags
);
345 kfree_const(cache_name
);
349 mutex_unlock(&slab_mutex
);
352 if (flags
& SLAB_PANIC
)
353 panic("%s: Failed to create slab '%s'. Error %d\n",
354 __func__
, name
, err
);
356 pr_warn("%s(%s) failed with error %d\n",
357 __func__
, name
, err
);
364 EXPORT_SYMBOL(__kmem_cache_create_args
);
366 static struct kmem_cache
*kmem_buckets_cache __ro_after_init
;
369 * kmem_buckets_create - Create a set of caches that handle dynamic sized
370 * allocations via kmem_buckets_alloc()
371 * @name: A prefix string which is used in /proc/slabinfo to identify this
372 * cache. The individual caches with have their sizes as the suffix.
373 * @flags: SLAB flags (see kmem_cache_create() for details).
374 * @useroffset: Starting offset within an allocation that may be copied
376 * @usersize: How many bytes, starting at @useroffset, may be copied
378 * @ctor: A constructor for the objects, run when new allocations are made.
380 * Cannot be called within an interrupt, but can be interrupted.
382 * Return: a pointer to the cache on success, NULL on failure. When
383 * CONFIG_SLAB_BUCKETS is not enabled, ZERO_SIZE_PTR is returned, and
384 * subsequent calls to kmem_buckets_alloc() will fall back to kmalloc().
385 * (i.e. callers only need to check for NULL on failure.)
387 kmem_buckets
*kmem_buckets_create(const char *name
, slab_flags_t flags
,
388 unsigned int useroffset
,
389 unsigned int usersize
,
390 void (*ctor
)(void *))
392 unsigned long mask
= 0;
396 BUILD_BUG_ON(ARRAY_SIZE(kmalloc_caches
[KMALLOC_NORMAL
]) > BITS_PER_LONG
);
399 * When the separate buckets API is not built in, just return
400 * a non-NULL value for the kmem_buckets pointer, which will be
401 * unused when performing allocations.
403 if (!IS_ENABLED(CONFIG_SLAB_BUCKETS
))
404 return ZERO_SIZE_PTR
;
406 if (WARN_ON(!kmem_buckets_cache
))
409 b
= kmem_cache_alloc(kmem_buckets_cache
, GFP_KERNEL
|__GFP_ZERO
);
413 flags
|= SLAB_NO_MERGE
;
415 for (idx
= 0; idx
< ARRAY_SIZE(kmalloc_caches
[KMALLOC_NORMAL
]); idx
++) {
416 char *short_size
, *cache_name
;
417 unsigned int cache_useroffset
, cache_usersize
;
418 unsigned int size
, aligned_idx
;
420 if (!kmalloc_caches
[KMALLOC_NORMAL
][idx
])
423 size
= kmalloc_caches
[KMALLOC_NORMAL
][idx
]->object_size
;
427 short_size
= strchr(kmalloc_caches
[KMALLOC_NORMAL
][idx
]->name
, '-');
428 if (WARN_ON(!short_size
))
431 if (useroffset
>= size
) {
432 cache_useroffset
= 0;
435 cache_useroffset
= useroffset
;
436 cache_usersize
= min(size
- cache_useroffset
, usersize
);
439 aligned_idx
= __kmalloc_index(size
, false);
440 if (!(*b
)[aligned_idx
]) {
441 cache_name
= kasprintf(GFP_KERNEL
, "%s-%s", name
, short_size
+ 1);
442 if (WARN_ON(!cache_name
))
444 (*b
)[aligned_idx
] = kmem_cache_create_usercopy(cache_name
, size
,
445 0, flags
, cache_useroffset
,
446 cache_usersize
, ctor
);
448 if (WARN_ON(!(*b
)[aligned_idx
]))
450 set_bit(aligned_idx
, &mask
);
452 if (idx
!= aligned_idx
)
453 (*b
)[idx
] = (*b
)[aligned_idx
];
459 for_each_set_bit(idx
, &mask
, ARRAY_SIZE(kmalloc_caches
[KMALLOC_NORMAL
]))
460 kmem_cache_destroy((*b
)[idx
]);
461 kmem_cache_free(kmem_buckets_cache
, b
);
465 EXPORT_SYMBOL(kmem_buckets_create
);
468 * For a given kmem_cache, kmem_cache_destroy() should only be called
469 * once or there will be a use-after-free problem. The actual deletion
470 * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
471 * protection. So they are now done without holding those locks.
473 static void kmem_cache_release(struct kmem_cache
*s
)
475 kfence_shutdown_cache(s
);
476 if (__is_defined(SLAB_SUPPORTS_SYSFS
) && slab_state
>= FULL
)
477 sysfs_slab_release(s
);
479 slab_kmem_cache_release(s
);
482 void slab_kmem_cache_release(struct kmem_cache
*s
)
484 __kmem_cache_release(s
);
485 kfree_const(s
->name
);
486 kmem_cache_free(kmem_cache
, s
);
489 void kmem_cache_destroy(struct kmem_cache
*s
)
493 if (unlikely(!s
) || !kasan_check_byte(s
))
496 /* in-flight kfree_rcu()'s may include objects from our cache */
497 kvfree_rcu_barrier();
499 if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG
) &&
500 (s
->flags
& SLAB_TYPESAFE_BY_RCU
)) {
502 * Under CONFIG_SLUB_RCU_DEBUG, when objects in a
503 * SLAB_TYPESAFE_BY_RCU slab are freed, SLUB will internally
504 * defer their freeing with call_rcu().
505 * Wait for such call_rcu() invocations here before actually
506 * destroying the cache.
508 * It doesn't matter that we haven't looked at the slab refcount
509 * yet - slabs with SLAB_TYPESAFE_BY_RCU can't be merged, so
510 * the refcount should be 1 here.
516 mutex_lock(&slab_mutex
);
520 mutex_unlock(&slab_mutex
);
525 /* free asan quarantined objects */
526 kasan_cache_shutdown(s
);
528 err
= __kmem_cache_shutdown(s
);
529 if (!slab_in_kunit_test())
530 WARN(err
, "%s %s: Slab cache still has objects when called from %pS",
531 __func__
, s
->name
, (void *)_RET_IP_
);
535 mutex_unlock(&slab_mutex
);
538 if (slab_state
>= FULL
)
539 sysfs_slab_unlink(s
);
540 debugfs_slab_release(s
);
545 if (s
->flags
& SLAB_TYPESAFE_BY_RCU
)
548 kmem_cache_release(s
);
550 EXPORT_SYMBOL(kmem_cache_destroy
);
553 * kmem_cache_shrink - Shrink a cache.
554 * @cachep: The cache to shrink.
556 * Releases as many slabs as possible for a cache.
557 * To help debugging, a zero exit status indicates all slabs were released.
559 * Return: %0 if all slabs were released, non-zero otherwise
561 int kmem_cache_shrink(struct kmem_cache
*cachep
)
563 kasan_cache_shrink(cachep
);
565 return __kmem_cache_shrink(cachep
);
567 EXPORT_SYMBOL(kmem_cache_shrink
);
569 bool slab_is_available(void)
571 return slab_state
>= UP
;
575 static void kmem_obj_info(struct kmem_obj_info
*kpp
, void *object
, struct slab
*slab
)
577 if (__kfence_obj_info(kpp
, object
, slab
))
579 __kmem_obj_info(kpp
, object
, slab
);
583 * kmem_dump_obj - Print available slab provenance information
584 * @object: slab object for which to find provenance information.
586 * This function uses pr_cont(), so that the caller is expected to have
587 * printed out whatever preamble is appropriate. The provenance information
588 * depends on the type of object and on how much debugging is enabled.
589 * For a slab-cache object, the fact that it is a slab object is printed,
590 * and, if available, the slab name, return address, and stack trace from
591 * the allocation and last free path of that object.
593 * Return: %true if the pointer is to a not-yet-freed object from
594 * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
595 * is to an already-freed object, and %false otherwise.
597 bool kmem_dump_obj(void *object
)
599 char *cp
= IS_ENABLED(CONFIG_MMU
) ? "" : "/vmalloc";
602 unsigned long ptroffset
;
603 struct kmem_obj_info kp
= { };
605 /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
606 if (object
< (void *)PAGE_SIZE
|| !virt_addr_valid(object
))
608 slab
= virt_to_slab(object
);
612 kmem_obj_info(&kp
, object
, slab
);
613 if (kp
.kp_slab_cache
)
614 pr_cont(" slab%s %s", cp
, kp
.kp_slab_cache
->name
);
616 pr_cont(" slab%s", cp
);
617 if (is_kfence_address(object
))
618 pr_cont(" (kfence)");
620 pr_cont(" start %px", kp
.kp_objp
);
621 if (kp
.kp_data_offset
)
622 pr_cont(" data offset %lu", kp
.kp_data_offset
);
624 ptroffset
= ((char *)object
- (char *)kp
.kp_objp
) - kp
.kp_data_offset
;
625 pr_cont(" pointer offset %lu", ptroffset
);
627 if (kp
.kp_slab_cache
&& kp
.kp_slab_cache
->object_size
)
628 pr_cont(" size %u", kp
.kp_slab_cache
->object_size
);
630 pr_cont(" allocated at %pS\n", kp
.kp_ret
);
633 for (i
= 0; i
< ARRAY_SIZE(kp
.kp_stack
); i
++) {
636 pr_info(" %pS\n", kp
.kp_stack
[i
]);
639 if (kp
.kp_free_stack
[0])
640 pr_cont(" Free path:\n");
642 for (i
= 0; i
< ARRAY_SIZE(kp
.kp_free_stack
); i
++) {
643 if (!kp
.kp_free_stack
[i
])
645 pr_info(" %pS\n", kp
.kp_free_stack
[i
]);
650 EXPORT_SYMBOL_GPL(kmem_dump_obj
);
653 /* Create a cache during boot when no slab services are available yet */
654 void __init
create_boot_cache(struct kmem_cache
*s
, const char *name
,
655 unsigned int size
, slab_flags_t flags
,
656 unsigned int useroffset
, unsigned int usersize
)
659 unsigned int align
= ARCH_KMALLOC_MINALIGN
;
660 struct kmem_cache_args kmem_args
= {};
663 * kmalloc caches guarantee alignment of at least the largest
664 * power-of-two divisor of the size. For power-of-two sizes,
665 * it is the size itself.
667 if (flags
& SLAB_KMALLOC
)
668 align
= max(align
, 1U << (ffs(size
) - 1));
669 kmem_args
.align
= calculate_alignment(flags
, align
, size
);
671 #ifdef CONFIG_HARDENED_USERCOPY
672 kmem_args
.useroffset
= useroffset
;
673 kmem_args
.usersize
= usersize
;
676 err
= do_kmem_cache_create(s
, name
, size
, &kmem_args
, flags
);
679 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
682 s
->refcount
= -1; /* Exempt from merging for now */
685 static struct kmem_cache
*__init
create_kmalloc_cache(const char *name
,
689 struct kmem_cache
*s
= kmem_cache_zalloc(kmem_cache
, GFP_NOWAIT
);
692 panic("Out of memory when creating slab %s\n", name
);
694 create_boot_cache(s
, name
, size
, flags
| SLAB_KMALLOC
, 0, size
);
695 list_add(&s
->list
, &slab_caches
);
700 kmem_buckets kmalloc_caches
[NR_KMALLOC_TYPES
] __ro_after_init
=
701 { /* initialization for https://llvm.org/pr42570 */ };
702 EXPORT_SYMBOL(kmalloc_caches
);
704 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
705 unsigned long random_kmalloc_seed __ro_after_init
;
706 EXPORT_SYMBOL(random_kmalloc_seed
);
710 * Conversion table for small slabs sizes / 8 to the index in the
711 * kmalloc array. This is necessary for slabs < 192 since we have non power
712 * of two cache sizes there. The size of larger slabs can be determined using
715 u8 kmalloc_size_index
[24] __ro_after_init
= {
742 size_t kmalloc_size_roundup(size_t size
)
744 if (size
&& size
<= KMALLOC_MAX_CACHE_SIZE
) {
746 * The flags don't matter since size_index is common to all.
747 * Neither does the caller for just getting ->object_size.
749 return kmalloc_slab(size
, NULL
, GFP_KERNEL
, 0)->object_size
;
752 /* Above the smaller buckets, size is a multiple of page size. */
753 if (size
&& size
<= KMALLOC_MAX_SIZE
)
754 return PAGE_SIZE
<< get_order(size
);
757 * Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR
758 * and very large size - kmalloc() may fail.
763 EXPORT_SYMBOL(kmalloc_size_roundup
);
765 #ifdef CONFIG_ZONE_DMA
766 #define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
768 #define KMALLOC_DMA_NAME(sz)
772 #define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
774 #define KMALLOC_CGROUP_NAME(sz)
777 #ifndef CONFIG_SLUB_TINY
778 #define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz,
780 #define KMALLOC_RCL_NAME(sz)
783 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
784 #define __KMALLOC_RANDOM_CONCAT(a, b) a ## b
785 #define KMALLOC_RANDOM_NAME(N, sz) __KMALLOC_RANDOM_CONCAT(KMA_RAND_, N)(sz)
786 #define KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 1] = "kmalloc-rnd-01-" #sz,
787 #define KMA_RAND_2(sz) KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 2] = "kmalloc-rnd-02-" #sz,
788 #define KMA_RAND_3(sz) KMA_RAND_2(sz) .name[KMALLOC_RANDOM_START + 3] = "kmalloc-rnd-03-" #sz,
789 #define KMA_RAND_4(sz) KMA_RAND_3(sz) .name[KMALLOC_RANDOM_START + 4] = "kmalloc-rnd-04-" #sz,
790 #define KMA_RAND_5(sz) KMA_RAND_4(sz) .name[KMALLOC_RANDOM_START + 5] = "kmalloc-rnd-05-" #sz,
791 #define KMA_RAND_6(sz) KMA_RAND_5(sz) .name[KMALLOC_RANDOM_START + 6] = "kmalloc-rnd-06-" #sz,
792 #define KMA_RAND_7(sz) KMA_RAND_6(sz) .name[KMALLOC_RANDOM_START + 7] = "kmalloc-rnd-07-" #sz,
793 #define KMA_RAND_8(sz) KMA_RAND_7(sz) .name[KMALLOC_RANDOM_START + 8] = "kmalloc-rnd-08-" #sz,
794 #define KMA_RAND_9(sz) KMA_RAND_8(sz) .name[KMALLOC_RANDOM_START + 9] = "kmalloc-rnd-09-" #sz,
795 #define KMA_RAND_10(sz) KMA_RAND_9(sz) .name[KMALLOC_RANDOM_START + 10] = "kmalloc-rnd-10-" #sz,
796 #define KMA_RAND_11(sz) KMA_RAND_10(sz) .name[KMALLOC_RANDOM_START + 11] = "kmalloc-rnd-11-" #sz,
797 #define KMA_RAND_12(sz) KMA_RAND_11(sz) .name[KMALLOC_RANDOM_START + 12] = "kmalloc-rnd-12-" #sz,
798 #define KMA_RAND_13(sz) KMA_RAND_12(sz) .name[KMALLOC_RANDOM_START + 13] = "kmalloc-rnd-13-" #sz,
799 #define KMA_RAND_14(sz) KMA_RAND_13(sz) .name[KMALLOC_RANDOM_START + 14] = "kmalloc-rnd-14-" #sz,
800 #define KMA_RAND_15(sz) KMA_RAND_14(sz) .name[KMALLOC_RANDOM_START + 15] = "kmalloc-rnd-15-" #sz,
801 #else // CONFIG_RANDOM_KMALLOC_CACHES
802 #define KMALLOC_RANDOM_NAME(N, sz)
805 #define INIT_KMALLOC_INFO(__size, __short_size) \
807 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
808 KMALLOC_RCL_NAME(__short_size) \
809 KMALLOC_CGROUP_NAME(__short_size) \
810 KMALLOC_DMA_NAME(__short_size) \
811 KMALLOC_RANDOM_NAME(RANDOM_KMALLOC_CACHES_NR, __short_size) \
816 * kmalloc_info[] is to make slab_debug=,kmalloc-xx option work at boot time.
817 * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is
820 const struct kmalloc_info_struct kmalloc_info
[] __initconst
= {
821 INIT_KMALLOC_INFO(0, 0),
822 INIT_KMALLOC_INFO(96, 96),
823 INIT_KMALLOC_INFO(192, 192),
824 INIT_KMALLOC_INFO(8, 8),
825 INIT_KMALLOC_INFO(16, 16),
826 INIT_KMALLOC_INFO(32, 32),
827 INIT_KMALLOC_INFO(64, 64),
828 INIT_KMALLOC_INFO(128, 128),
829 INIT_KMALLOC_INFO(256, 256),
830 INIT_KMALLOC_INFO(512, 512),
831 INIT_KMALLOC_INFO(1024, 1k
),
832 INIT_KMALLOC_INFO(2048, 2k
),
833 INIT_KMALLOC_INFO(4096, 4k
),
834 INIT_KMALLOC_INFO(8192, 8k
),
835 INIT_KMALLOC_INFO(16384, 16k
),
836 INIT_KMALLOC_INFO(32768, 32k
),
837 INIT_KMALLOC_INFO(65536, 64k
),
838 INIT_KMALLOC_INFO(131072, 128k
),
839 INIT_KMALLOC_INFO(262144, 256k
),
840 INIT_KMALLOC_INFO(524288, 512k
),
841 INIT_KMALLOC_INFO(1048576, 1M
),
842 INIT_KMALLOC_INFO(2097152, 2M
)
846 * Patch up the size_index table if we have strange large alignment
847 * requirements for the kmalloc array. This is only the case for
848 * MIPS it seems. The standard arches will not generate any code here.
850 * Largest permitted alignment is 256 bytes due to the way we
851 * handle the index determination for the smaller caches.
853 * Make sure that nothing crazy happens if someone starts tinkering
854 * around with ARCH_KMALLOC_MINALIGN
856 void __init
setup_kmalloc_cache_index_table(void)
860 BUILD_BUG_ON(KMALLOC_MIN_SIZE
> 256 ||
861 !is_power_of_2(KMALLOC_MIN_SIZE
));
863 for (i
= 8; i
< KMALLOC_MIN_SIZE
; i
+= 8) {
864 unsigned int elem
= size_index_elem(i
);
866 if (elem
>= ARRAY_SIZE(kmalloc_size_index
))
868 kmalloc_size_index
[elem
] = KMALLOC_SHIFT_LOW
;
871 if (KMALLOC_MIN_SIZE
>= 64) {
873 * The 96 byte sized cache is not used if the alignment
876 for (i
= 64 + 8; i
<= 96; i
+= 8)
877 kmalloc_size_index
[size_index_elem(i
)] = 7;
881 if (KMALLOC_MIN_SIZE
>= 128) {
883 * The 192 byte sized cache is not used if the alignment
884 * is 128 byte. Redirect kmalloc to use the 256 byte cache
887 for (i
= 128 + 8; i
<= 192; i
+= 8)
888 kmalloc_size_index
[size_index_elem(i
)] = 8;
892 static unsigned int __kmalloc_minalign(void)
894 unsigned int minalign
= dma_get_cache_alignment();
896 if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC
) &&
897 is_swiotlb_allocated())
898 minalign
= ARCH_KMALLOC_MINALIGN
;
900 return max(minalign
, arch_slab_minalign());
904 new_kmalloc_cache(int idx
, enum kmalloc_cache_type type
)
906 slab_flags_t flags
= 0;
907 unsigned int minalign
= __kmalloc_minalign();
908 unsigned int aligned_size
= kmalloc_info
[idx
].size
;
909 int aligned_idx
= idx
;
911 if ((KMALLOC_RECLAIM
!= KMALLOC_NORMAL
) && (type
== KMALLOC_RECLAIM
)) {
912 flags
|= SLAB_RECLAIM_ACCOUNT
;
913 } else if (IS_ENABLED(CONFIG_MEMCG
) && (type
== KMALLOC_CGROUP
)) {
914 if (mem_cgroup_kmem_disabled()) {
915 kmalloc_caches
[type
][idx
] = kmalloc_caches
[KMALLOC_NORMAL
][idx
];
918 flags
|= SLAB_ACCOUNT
;
919 } else if (IS_ENABLED(CONFIG_ZONE_DMA
) && (type
== KMALLOC_DMA
)) {
920 flags
|= SLAB_CACHE_DMA
;
923 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
924 if (type
>= KMALLOC_RANDOM_START
&& type
<= KMALLOC_RANDOM_END
)
925 flags
|= SLAB_NO_MERGE
;
929 * If CONFIG_MEMCG is enabled, disable cache merging for
930 * KMALLOC_NORMAL caches.
932 if (IS_ENABLED(CONFIG_MEMCG
) && (type
== KMALLOC_NORMAL
))
933 flags
|= SLAB_NO_MERGE
;
935 if (minalign
> ARCH_KMALLOC_MINALIGN
) {
936 aligned_size
= ALIGN(aligned_size
, minalign
);
937 aligned_idx
= __kmalloc_index(aligned_size
, false);
940 if (!kmalloc_caches
[type
][aligned_idx
])
941 kmalloc_caches
[type
][aligned_idx
] = create_kmalloc_cache(
942 kmalloc_info
[aligned_idx
].name
[type
],
943 aligned_size
, flags
);
944 if (idx
!= aligned_idx
)
945 kmalloc_caches
[type
][idx
] = kmalloc_caches
[type
][aligned_idx
];
949 * Create the kmalloc array. Some of the regular kmalloc arrays
950 * may already have been created because they were needed to
951 * enable allocations for slab creation.
953 void __init
create_kmalloc_caches(void)
956 enum kmalloc_cache_type type
;
959 * Including KMALLOC_CGROUP if CONFIG_MEMCG defined
961 for (type
= KMALLOC_NORMAL
; type
< NR_KMALLOC_TYPES
; type
++) {
962 /* Caches that are NOT of the two-to-the-power-of size. */
963 if (KMALLOC_MIN_SIZE
<= 32)
964 new_kmalloc_cache(1, type
);
965 if (KMALLOC_MIN_SIZE
<= 64)
966 new_kmalloc_cache(2, type
);
968 /* Caches that are of the two-to-the-power-of size. */
969 for (i
= KMALLOC_SHIFT_LOW
; i
<= KMALLOC_SHIFT_HIGH
; i
++)
970 new_kmalloc_cache(i
, type
);
972 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
973 random_kmalloc_seed
= get_random_u64();
976 /* Kmalloc array is now usable */
979 if (IS_ENABLED(CONFIG_SLAB_BUCKETS
))
980 kmem_buckets_cache
= kmem_cache_create("kmalloc_buckets",
981 sizeof(kmem_buckets
),
982 0, SLAB_NO_MERGE
, NULL
);
986 * __ksize -- Report full size of underlying allocation
987 * @object: pointer to the object
989 * This should only be used internally to query the true size of allocations.
990 * It is not meant to be a way to discover the usable size of an allocation
991 * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
992 * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
993 * and/or FORTIFY_SOURCE.
995 * Return: size of the actual memory used by @object in bytes
997 size_t __ksize(const void *object
)
1001 if (unlikely(object
== ZERO_SIZE_PTR
))
1004 folio
= virt_to_folio(object
);
1006 if (unlikely(!folio_test_slab(folio
))) {
1007 if (WARN_ON(folio_size(folio
) <= KMALLOC_MAX_CACHE_SIZE
))
1009 if (WARN_ON(object
!= folio_address(folio
)))
1011 return folio_size(folio
);
1014 #ifdef CONFIG_SLUB_DEBUG
1015 skip_orig_size_check(folio_slab(folio
)->slab_cache
, object
);
1018 return slab_ksize(folio_slab(folio
)->slab_cache
);
1021 gfp_t
kmalloc_fix_flags(gfp_t flags
)
1023 gfp_t invalid_mask
= flags
& GFP_SLAB_BUG_MASK
;
1025 flags
&= ~GFP_SLAB_BUG_MASK
;
1026 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1027 invalid_mask
, &invalid_mask
, flags
, &flags
);
1033 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1034 /* Randomize a generic freelist */
1035 static void freelist_randomize(unsigned int *list
,
1041 for (i
= 0; i
< count
; i
++)
1044 /* Fisher-Yates shuffle */
1045 for (i
= count
- 1; i
> 0; i
--) {
1046 rand
= get_random_u32_below(i
+ 1);
1047 swap(list
[i
], list
[rand
]);
1051 /* Create a random sequence per cache */
1052 int cache_random_seq_create(struct kmem_cache
*cachep
, unsigned int count
,
1056 if (count
< 2 || cachep
->random_seq
)
1059 cachep
->random_seq
= kcalloc(count
, sizeof(unsigned int), gfp
);
1060 if (!cachep
->random_seq
)
1063 freelist_randomize(cachep
->random_seq
, count
);
1067 /* Destroy the per-cache random freelist sequence */
1068 void cache_random_seq_destroy(struct kmem_cache
*cachep
)
1070 kfree(cachep
->random_seq
);
1071 cachep
->random_seq
= NULL
;
1073 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1075 #ifdef CONFIG_SLUB_DEBUG
1076 #define SLABINFO_RIGHTS (0400)
1078 static void print_slabinfo_header(struct seq_file
*m
)
1081 * Output format version, so at least we can change it
1082 * without _too_ many complaints.
1084 seq_puts(m
, "slabinfo - version: 2.1\n");
1085 seq_puts(m
, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1086 seq_puts(m
, " : tunables <limit> <batchcount> <sharedfactor>");
1087 seq_puts(m
, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1091 static void *slab_start(struct seq_file
*m
, loff_t
*pos
)
1093 mutex_lock(&slab_mutex
);
1094 return seq_list_start(&slab_caches
, *pos
);
1097 static void *slab_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1099 return seq_list_next(p
, &slab_caches
, pos
);
1102 static void slab_stop(struct seq_file
*m
, void *p
)
1104 mutex_unlock(&slab_mutex
);
1107 static void cache_show(struct kmem_cache
*s
, struct seq_file
*m
)
1109 struct slabinfo sinfo
;
1111 memset(&sinfo
, 0, sizeof(sinfo
));
1112 get_slabinfo(s
, &sinfo
);
1114 seq_printf(m
, "%-17s %6lu %6lu %6u %4u %4d",
1115 s
->name
, sinfo
.active_objs
, sinfo
.num_objs
, s
->size
,
1116 sinfo
.objects_per_slab
, (1 << sinfo
.cache_order
));
1118 seq_printf(m
, " : tunables %4u %4u %4u",
1119 sinfo
.limit
, sinfo
.batchcount
, sinfo
.shared
);
1120 seq_printf(m
, " : slabdata %6lu %6lu %6lu",
1121 sinfo
.active_slabs
, sinfo
.num_slabs
, sinfo
.shared_avail
);
1125 static int slab_show(struct seq_file
*m
, void *p
)
1127 struct kmem_cache
*s
= list_entry(p
, struct kmem_cache
, list
);
1129 if (p
== slab_caches
.next
)
1130 print_slabinfo_header(m
);
1135 void dump_unreclaimable_slab(void)
1137 struct kmem_cache
*s
;
1138 struct slabinfo sinfo
;
1141 * Here acquiring slab_mutex is risky since we don't prefer to get
1142 * sleep in oom path. But, without mutex hold, it may introduce a
1144 * Use mutex_trylock to protect the list traverse, dump nothing
1145 * without acquiring the mutex.
1147 if (!mutex_trylock(&slab_mutex
)) {
1148 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1152 pr_info("Unreclaimable slab info:\n");
1153 pr_info("Name Used Total\n");
1155 list_for_each_entry(s
, &slab_caches
, list
) {
1156 if (s
->flags
& SLAB_RECLAIM_ACCOUNT
)
1159 get_slabinfo(s
, &sinfo
);
1161 if (sinfo
.num_objs
> 0)
1162 pr_info("%-17s %10luKB %10luKB\n", s
->name
,
1163 (sinfo
.active_objs
* s
->size
) / 1024,
1164 (sinfo
.num_objs
* s
->size
) / 1024);
1166 mutex_unlock(&slab_mutex
);
1170 * slabinfo_op - iterator that generates /proc/slabinfo
1179 * num-pages-per-slab
1180 * + further values on SMP and with statistics enabled
1182 static const struct seq_operations slabinfo_op
= {
1183 .start
= slab_start
,
1189 static int slabinfo_open(struct inode
*inode
, struct file
*file
)
1191 return seq_open(file
, &slabinfo_op
);
1194 static const struct proc_ops slabinfo_proc_ops
= {
1195 .proc_flags
= PROC_ENTRY_PERMANENT
,
1196 .proc_open
= slabinfo_open
,
1197 .proc_read
= seq_read
,
1198 .proc_lseek
= seq_lseek
,
1199 .proc_release
= seq_release
,
1202 static int __init
slab_proc_init(void)
1204 proc_create("slabinfo", SLABINFO_RIGHTS
, NULL
, &slabinfo_proc_ops
);
1207 module_init(slab_proc_init
);
1209 #endif /* CONFIG_SLUB_DEBUG */
1212 * kfree_sensitive - Clear sensitive information in memory before freeing
1213 * @p: object to free memory of
1215 * The memory of the object @p points to is zeroed before freed.
1216 * If @p is %NULL, kfree_sensitive() does nothing.
1218 * Note: this function zeroes the whole allocated buffer which can be a good
1219 * deal bigger than the requested buffer size passed to kmalloc(). So be
1220 * careful when using this function in performance sensitive code.
1222 void kfree_sensitive(const void *p
)
1225 void *mem
= (void *)p
;
1229 kasan_unpoison_range(mem
, ks
);
1230 memzero_explicit(mem
, ks
);
1234 EXPORT_SYMBOL(kfree_sensitive
);
1236 size_t ksize(const void *objp
)
1239 * We need to first check that the pointer to the object is valid.
1240 * The KASAN report printed from ksize() is more useful, then when
1241 * it's printed later when the behaviour could be undefined due to
1242 * a potential use-after-free or double-free.
1244 * We use kasan_check_byte(), which is supported for the hardware
1245 * tag-based KASAN mode, unlike kasan_check_read/write().
1247 * If the pointed to memory is invalid, we return 0 to avoid users of
1248 * ksize() writing to and potentially corrupting the memory region.
1250 * We want to perform the check before __ksize(), to avoid potentially
1251 * crashing in __ksize() due to accessing invalid metadata.
1253 if (unlikely(ZERO_OR_NULL_PTR(objp
)) || !kasan_check_byte(objp
))
1256 return kfence_ksize(objp
) ?: __ksize(objp
);
1258 EXPORT_SYMBOL(ksize
);
1260 #ifdef CONFIG_BPF_SYSCALL
1261 #include <linux/btf.h>
1263 __bpf_kfunc_start_defs();
1265 __bpf_kfunc
struct kmem_cache
*bpf_get_kmem_cache(u64 addr
)
1269 if (!virt_addr_valid((void *)(long)addr
))
1272 slab
= virt_to_slab((void *)(long)addr
);
1273 return slab
? slab
->slab_cache
: NULL
;
1276 __bpf_kfunc_end_defs();
1277 #endif /* CONFIG_BPF_SYSCALL */
1279 /* Tracepoints definitions. */
1280 EXPORT_TRACEPOINT_SYMBOL(kmalloc
);
1281 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc
);
1282 EXPORT_TRACEPOINT_SYMBOL(kfree
);
1283 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free
);