1 // SPDX-License-Identifier: GPL-2.0
3 * Slab allocator functions that are independent of the allocator strategy
5 * (C) 2012 Christoph Lameter <cl@linux.com>
7 #include <linux/slab.h>
10 #include <linux/poison.h>
11 #include <linux/interrupt.h>
12 #include <linux/memory.h>
13 #include <linux/cache.h>
14 #include <linux/compiler.h>
15 #include <linux/kfence.h>
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/uaccess.h>
19 #include <linux/seq_file.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/swiotlb.h>
22 #include <linux/proc_fs.h>
23 #include <linux/debugfs.h>
24 #include <linux/kmemleak.h>
25 #include <linux/kasan.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
29 #include <linux/memcontrol.h>
30 #include <linux/stackdepot.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/kmem.h>
38 enum slab_state slab_state
;
39 LIST_HEAD(slab_caches
);
40 DEFINE_MUTEX(slab_mutex
);
41 struct kmem_cache
*kmem_cache
;
43 static LIST_HEAD(slab_caches_to_rcu_destroy
);
44 static void slab_caches_to_rcu_destroy_workfn(struct work_struct
*work
);
45 static DECLARE_WORK(slab_caches_to_rcu_destroy_work
,
46 slab_caches_to_rcu_destroy_workfn
);
49 * Set of flags that will prevent slab merging
51 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
52 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
53 SLAB_FAILSLAB | SLAB_NO_MERGE)
55 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
56 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
59 * Merge control. If this is set then no merging of slab caches will occur.
61 static bool slab_nomerge
= !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT
);
63 static int __init
setup_slab_nomerge(char *str
)
69 static int __init
setup_slab_merge(char *str
)
75 __setup_param("slub_nomerge", slub_nomerge
, setup_slab_nomerge
, 0);
76 __setup_param("slub_merge", slub_merge
, setup_slab_merge
, 0);
78 __setup("slab_nomerge", setup_slab_nomerge
);
79 __setup("slab_merge", setup_slab_merge
);
82 * Determine the size of a slab object
84 unsigned int kmem_cache_size(struct kmem_cache
*s
)
86 return s
->object_size
;
88 EXPORT_SYMBOL(kmem_cache_size
);
90 #ifdef CONFIG_DEBUG_VM
91 static int kmem_cache_sanity_check(const char *name
, unsigned int size
)
93 if (!name
|| in_interrupt() || size
> KMALLOC_MAX_SIZE
) {
94 pr_err("kmem_cache_create(%s) integrity check failed\n", name
);
98 WARN_ON(strchr(name
, ' ')); /* It confuses parsers */
102 static inline int kmem_cache_sanity_check(const char *name
, unsigned int size
)
109 * Figure out what the alignment of the objects will be given a set of
110 * flags, a user specified alignment and the size of the objects.
112 static unsigned int calculate_alignment(slab_flags_t flags
,
113 unsigned int align
, unsigned int size
)
116 * If the user wants hardware cache aligned objects then follow that
117 * suggestion if the object is sufficiently large.
119 * The hardware cache alignment cannot override the specified
120 * alignment though. If that is greater then use it.
122 if (flags
& SLAB_HWCACHE_ALIGN
) {
125 ralign
= cache_line_size();
126 while (size
<= ralign
/ 2)
128 align
= max(align
, ralign
);
131 align
= max(align
, arch_slab_minalign());
133 return ALIGN(align
, sizeof(void *));
137 * Find a mergeable slab cache
139 int slab_unmergeable(struct kmem_cache
*s
)
141 if (slab_nomerge
|| (s
->flags
& SLAB_NEVER_MERGE
))
147 #ifdef CONFIG_HARDENED_USERCOPY
153 * We may have set a slab to be unmergeable during bootstrap.
161 struct kmem_cache
*find_mergeable(unsigned int size
, unsigned int align
,
162 slab_flags_t flags
, const char *name
, void (*ctor
)(void *))
164 struct kmem_cache
*s
;
172 size
= ALIGN(size
, sizeof(void *));
173 align
= calculate_alignment(flags
, align
, size
);
174 size
= ALIGN(size
, align
);
175 flags
= kmem_cache_flags(flags
, name
);
177 if (flags
& SLAB_NEVER_MERGE
)
180 list_for_each_entry_reverse(s
, &slab_caches
, list
) {
181 if (slab_unmergeable(s
))
187 if ((flags
& SLAB_MERGE_SAME
) != (s
->flags
& SLAB_MERGE_SAME
))
190 * Check if alignment is compatible.
191 * Courtesy of Adrian Drzewiecki
193 if ((s
->size
& ~(align
- 1)) != s
->size
)
196 if (s
->size
- size
>= sizeof(void *))
204 static struct kmem_cache
*create_cache(const char *name
,
205 unsigned int object_size
, unsigned int align
,
206 slab_flags_t flags
, unsigned int useroffset
,
207 unsigned int usersize
, void (*ctor
)(void *),
208 struct kmem_cache
*root_cache
)
210 struct kmem_cache
*s
;
213 if (WARN_ON(useroffset
+ usersize
> object_size
))
214 useroffset
= usersize
= 0;
217 s
= kmem_cache_zalloc(kmem_cache
, GFP_KERNEL
);
222 s
->size
= s
->object_size
= object_size
;
225 #ifdef CONFIG_HARDENED_USERCOPY
226 s
->useroffset
= useroffset
;
227 s
->usersize
= usersize
;
230 err
= __kmem_cache_create(s
, flags
);
235 list_add(&s
->list
, &slab_caches
);
239 kmem_cache_free(kmem_cache
, s
);
245 * kmem_cache_create_usercopy - Create a cache with a region suitable
246 * for copying to userspace
247 * @name: A string which is used in /proc/slabinfo to identify this cache.
248 * @size: The size of objects to be created in this cache.
249 * @align: The required alignment for the objects.
251 * @useroffset: Usercopy region offset
252 * @usersize: Usercopy region size
253 * @ctor: A constructor for the objects.
255 * Cannot be called within a interrupt, but can be interrupted.
256 * The @ctor is run when new pages are allocated by the cache.
260 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
261 * to catch references to uninitialised memory.
263 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
264 * for buffer overruns.
266 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
267 * cacheline. This can be beneficial if you're counting cycles as closely
270 * Return: a pointer to the cache on success, NULL on failure.
273 kmem_cache_create_usercopy(const char *name
,
274 unsigned int size
, unsigned int align
,
276 unsigned int useroffset
, unsigned int usersize
,
277 void (*ctor
)(void *))
279 struct kmem_cache
*s
= NULL
;
280 const char *cache_name
;
283 #ifdef CONFIG_SLUB_DEBUG
285 * If no slab_debug was enabled globally, the static key is not yet
286 * enabled by setup_slub_debug(). Enable it if the cache is being
287 * created with any of the debugging flags passed explicitly.
288 * It's also possible that this is the first cache created with
289 * SLAB_STORE_USER and we should init stack_depot for it.
291 if (flags
& SLAB_DEBUG_FLAGS
)
292 static_branch_enable(&slub_debug_enabled
);
293 if (flags
& SLAB_STORE_USER
)
297 mutex_lock(&slab_mutex
);
299 err
= kmem_cache_sanity_check(name
, size
);
304 /* Refuse requests with allocator specific flags */
305 if (flags
& ~SLAB_FLAGS_PERMITTED
) {
311 * Some allocators will constraint the set of valid flags to a subset
312 * of all flags. We expect them to define CACHE_CREATE_MASK in this
313 * case, and we'll just provide them with a sanitized version of the
316 flags
&= CACHE_CREATE_MASK
;
318 /* Fail closed on bad usersize of useroffset values. */
319 if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY
) ||
320 WARN_ON(!usersize
&& useroffset
) ||
321 WARN_ON(size
< usersize
|| size
- usersize
< useroffset
))
322 usersize
= useroffset
= 0;
325 s
= __kmem_cache_alias(name
, size
, align
, flags
, ctor
);
329 cache_name
= kstrdup_const(name
, GFP_KERNEL
);
335 s
= create_cache(cache_name
, size
,
336 calculate_alignment(flags
, align
, size
),
337 flags
, useroffset
, usersize
, ctor
, NULL
);
340 kfree_const(cache_name
);
344 mutex_unlock(&slab_mutex
);
347 if (flags
& SLAB_PANIC
)
348 panic("%s: Failed to create slab '%s'. Error %d\n",
349 __func__
, name
, err
);
351 pr_warn("%s(%s) failed with error %d\n",
352 __func__
, name
, err
);
359 EXPORT_SYMBOL(kmem_cache_create_usercopy
);
362 * kmem_cache_create - Create a cache.
363 * @name: A string which is used in /proc/slabinfo to identify this cache.
364 * @size: The size of objects to be created in this cache.
365 * @align: The required alignment for the objects.
367 * @ctor: A constructor for the objects.
369 * Cannot be called within a interrupt, but can be interrupted.
370 * The @ctor is run when new pages are allocated by the cache.
374 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
375 * to catch references to uninitialised memory.
377 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
378 * for buffer overruns.
380 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
381 * cacheline. This can be beneficial if you're counting cycles as closely
384 * Return: a pointer to the cache on success, NULL on failure.
387 kmem_cache_create(const char *name
, unsigned int size
, unsigned int align
,
388 slab_flags_t flags
, void (*ctor
)(void *))
390 return kmem_cache_create_usercopy(name
, size
, align
, flags
, 0, 0,
393 EXPORT_SYMBOL(kmem_cache_create
);
395 #ifdef SLAB_SUPPORTS_SYSFS
397 * For a given kmem_cache, kmem_cache_destroy() should only be called
398 * once or there will be a use-after-free problem. The actual deletion
399 * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
400 * protection. So they are now done without holding those locks.
402 * Note that there will be a slight delay in the deletion of sysfs files
403 * if kmem_cache_release() is called indrectly from a work function.
405 static void kmem_cache_release(struct kmem_cache
*s
)
407 if (slab_state
>= FULL
) {
408 sysfs_slab_unlink(s
);
409 sysfs_slab_release(s
);
411 slab_kmem_cache_release(s
);
415 static void kmem_cache_release(struct kmem_cache
*s
)
417 slab_kmem_cache_release(s
);
421 static void slab_caches_to_rcu_destroy_workfn(struct work_struct
*work
)
423 LIST_HEAD(to_destroy
);
424 struct kmem_cache
*s
, *s2
;
427 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
428 * @slab_caches_to_rcu_destroy list. The slab pages are freed
429 * through RCU and the associated kmem_cache are dereferenced
430 * while freeing the pages, so the kmem_caches should be freed only
431 * after the pending RCU operations are finished. As rcu_barrier()
432 * is a pretty slow operation, we batch all pending destructions
435 mutex_lock(&slab_mutex
);
436 list_splice_init(&slab_caches_to_rcu_destroy
, &to_destroy
);
437 mutex_unlock(&slab_mutex
);
439 if (list_empty(&to_destroy
))
444 list_for_each_entry_safe(s
, s2
, &to_destroy
, list
) {
445 debugfs_slab_release(s
);
446 kfence_shutdown_cache(s
);
447 kmem_cache_release(s
);
451 static int shutdown_cache(struct kmem_cache
*s
)
453 /* free asan quarantined objects */
454 kasan_cache_shutdown(s
);
456 if (__kmem_cache_shutdown(s
) != 0)
461 if (s
->flags
& SLAB_TYPESAFE_BY_RCU
) {
462 list_add_tail(&s
->list
, &slab_caches_to_rcu_destroy
);
463 schedule_work(&slab_caches_to_rcu_destroy_work
);
465 kfence_shutdown_cache(s
);
466 debugfs_slab_release(s
);
472 void slab_kmem_cache_release(struct kmem_cache
*s
)
474 __kmem_cache_release(s
);
475 kfree_const(s
->name
);
476 kmem_cache_free(kmem_cache
, s
);
479 void kmem_cache_destroy(struct kmem_cache
*s
)
484 if (unlikely(!s
) || !kasan_check_byte(s
))
488 mutex_lock(&slab_mutex
);
490 rcu_set
= s
->flags
& SLAB_TYPESAFE_BY_RCU
;
496 err
= shutdown_cache(s
);
497 WARN(err
, "%s %s: Slab cache still has objects when called from %pS",
498 __func__
, s
->name
, (void *)_RET_IP_
);
500 mutex_unlock(&slab_mutex
);
502 if (!err
&& !rcu_set
)
503 kmem_cache_release(s
);
505 EXPORT_SYMBOL(kmem_cache_destroy
);
508 * kmem_cache_shrink - Shrink a cache.
509 * @cachep: The cache to shrink.
511 * Releases as many slabs as possible for a cache.
512 * To help debugging, a zero exit status indicates all slabs were released.
514 * Return: %0 if all slabs were released, non-zero otherwise
516 int kmem_cache_shrink(struct kmem_cache
*cachep
)
518 kasan_cache_shrink(cachep
);
520 return __kmem_cache_shrink(cachep
);
522 EXPORT_SYMBOL(kmem_cache_shrink
);
524 bool slab_is_available(void)
526 return slab_state
>= UP
;
530 static void kmem_obj_info(struct kmem_obj_info
*kpp
, void *object
, struct slab
*slab
)
532 if (__kfence_obj_info(kpp
, object
, slab
))
534 __kmem_obj_info(kpp
, object
, slab
);
538 * kmem_dump_obj - Print available slab provenance information
539 * @object: slab object for which to find provenance information.
541 * This function uses pr_cont(), so that the caller is expected to have
542 * printed out whatever preamble is appropriate. The provenance information
543 * depends on the type of object and on how much debugging is enabled.
544 * For a slab-cache object, the fact that it is a slab object is printed,
545 * and, if available, the slab name, return address, and stack trace from
546 * the allocation and last free path of that object.
548 * Return: %true if the pointer is to a not-yet-freed object from
549 * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
550 * is to an already-freed object, and %false otherwise.
552 bool kmem_dump_obj(void *object
)
554 char *cp
= IS_ENABLED(CONFIG_MMU
) ? "" : "/vmalloc";
557 unsigned long ptroffset
;
558 struct kmem_obj_info kp
= { };
560 /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
561 if (object
< (void *)PAGE_SIZE
|| !virt_addr_valid(object
))
563 slab
= virt_to_slab(object
);
567 kmem_obj_info(&kp
, object
, slab
);
568 if (kp
.kp_slab_cache
)
569 pr_cont(" slab%s %s", cp
, kp
.kp_slab_cache
->name
);
571 pr_cont(" slab%s", cp
);
572 if (is_kfence_address(object
))
573 pr_cont(" (kfence)");
575 pr_cont(" start %px", kp
.kp_objp
);
576 if (kp
.kp_data_offset
)
577 pr_cont(" data offset %lu", kp
.kp_data_offset
);
579 ptroffset
= ((char *)object
- (char *)kp
.kp_objp
) - kp
.kp_data_offset
;
580 pr_cont(" pointer offset %lu", ptroffset
);
582 if (kp
.kp_slab_cache
&& kp
.kp_slab_cache
->object_size
)
583 pr_cont(" size %u", kp
.kp_slab_cache
->object_size
);
585 pr_cont(" allocated at %pS\n", kp
.kp_ret
);
588 for (i
= 0; i
< ARRAY_SIZE(kp
.kp_stack
); i
++) {
591 pr_info(" %pS\n", kp
.kp_stack
[i
]);
594 if (kp
.kp_free_stack
[0])
595 pr_cont(" Free path:\n");
597 for (i
= 0; i
< ARRAY_SIZE(kp
.kp_free_stack
); i
++) {
598 if (!kp
.kp_free_stack
[i
])
600 pr_info(" %pS\n", kp
.kp_free_stack
[i
]);
605 EXPORT_SYMBOL_GPL(kmem_dump_obj
);
608 /* Create a cache during boot when no slab services are available yet */
609 void __init
create_boot_cache(struct kmem_cache
*s
, const char *name
,
610 unsigned int size
, slab_flags_t flags
,
611 unsigned int useroffset
, unsigned int usersize
)
614 unsigned int align
= ARCH_KMALLOC_MINALIGN
;
617 s
->size
= s
->object_size
= size
;
620 * For power of two sizes, guarantee natural alignment for kmalloc
621 * caches, regardless of SL*B debugging options.
623 if (is_power_of_2(size
))
624 align
= max(align
, size
);
625 s
->align
= calculate_alignment(flags
, align
, size
);
627 #ifdef CONFIG_HARDENED_USERCOPY
628 s
->useroffset
= useroffset
;
629 s
->usersize
= usersize
;
632 err
= __kmem_cache_create(s
, flags
);
635 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
638 s
->refcount
= -1; /* Exempt from merging for now */
641 static struct kmem_cache
*__init
create_kmalloc_cache(const char *name
,
645 struct kmem_cache
*s
= kmem_cache_zalloc(kmem_cache
, GFP_NOWAIT
);
648 panic("Out of memory when creating slab %s\n", name
);
650 create_boot_cache(s
, name
, size
, flags
| SLAB_KMALLOC
, 0, size
);
651 list_add(&s
->list
, &slab_caches
);
657 kmalloc_caches
[NR_KMALLOC_TYPES
][KMALLOC_SHIFT_HIGH
+ 1] __ro_after_init
=
658 { /* initialization for https://llvm.org/pr42570 */ };
659 EXPORT_SYMBOL(kmalloc_caches
);
661 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
662 unsigned long random_kmalloc_seed __ro_after_init
;
663 EXPORT_SYMBOL(random_kmalloc_seed
);
667 * Conversion table for small slabs sizes / 8 to the index in the
668 * kmalloc array. This is necessary for slabs < 192 since we have non power
669 * of two cache sizes there. The size of larger slabs can be determined using
672 u8 kmalloc_size_index
[24] __ro_after_init
= {
699 size_t kmalloc_size_roundup(size_t size
)
701 if (size
&& size
<= KMALLOC_MAX_CACHE_SIZE
) {
703 * The flags don't matter since size_index is common to all.
704 * Neither does the caller for just getting ->object_size.
706 return kmalloc_slab(size
, GFP_KERNEL
, 0)->object_size
;
709 /* Above the smaller buckets, size is a multiple of page size. */
710 if (size
&& size
<= KMALLOC_MAX_SIZE
)
711 return PAGE_SIZE
<< get_order(size
);
714 * Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR
715 * and very large size - kmalloc() may fail.
720 EXPORT_SYMBOL(kmalloc_size_roundup
);
722 #ifdef CONFIG_ZONE_DMA
723 #define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
725 #define KMALLOC_DMA_NAME(sz)
728 #ifdef CONFIG_MEMCG_KMEM
729 #define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
731 #define KMALLOC_CGROUP_NAME(sz)
734 #ifndef CONFIG_SLUB_TINY
735 #define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz,
737 #define KMALLOC_RCL_NAME(sz)
740 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
741 #define __KMALLOC_RANDOM_CONCAT(a, b) a ## b
742 #define KMALLOC_RANDOM_NAME(N, sz) __KMALLOC_RANDOM_CONCAT(KMA_RAND_, N)(sz)
743 #define KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 1] = "kmalloc-rnd-01-" #sz,
744 #define KMA_RAND_2(sz) KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 2] = "kmalloc-rnd-02-" #sz,
745 #define KMA_RAND_3(sz) KMA_RAND_2(sz) .name[KMALLOC_RANDOM_START + 3] = "kmalloc-rnd-03-" #sz,
746 #define KMA_RAND_4(sz) KMA_RAND_3(sz) .name[KMALLOC_RANDOM_START + 4] = "kmalloc-rnd-04-" #sz,
747 #define KMA_RAND_5(sz) KMA_RAND_4(sz) .name[KMALLOC_RANDOM_START + 5] = "kmalloc-rnd-05-" #sz,
748 #define KMA_RAND_6(sz) KMA_RAND_5(sz) .name[KMALLOC_RANDOM_START + 6] = "kmalloc-rnd-06-" #sz,
749 #define KMA_RAND_7(sz) KMA_RAND_6(sz) .name[KMALLOC_RANDOM_START + 7] = "kmalloc-rnd-07-" #sz,
750 #define KMA_RAND_8(sz) KMA_RAND_7(sz) .name[KMALLOC_RANDOM_START + 8] = "kmalloc-rnd-08-" #sz,
751 #define KMA_RAND_9(sz) KMA_RAND_8(sz) .name[KMALLOC_RANDOM_START + 9] = "kmalloc-rnd-09-" #sz,
752 #define KMA_RAND_10(sz) KMA_RAND_9(sz) .name[KMALLOC_RANDOM_START + 10] = "kmalloc-rnd-10-" #sz,
753 #define KMA_RAND_11(sz) KMA_RAND_10(sz) .name[KMALLOC_RANDOM_START + 11] = "kmalloc-rnd-11-" #sz,
754 #define KMA_RAND_12(sz) KMA_RAND_11(sz) .name[KMALLOC_RANDOM_START + 12] = "kmalloc-rnd-12-" #sz,
755 #define KMA_RAND_13(sz) KMA_RAND_12(sz) .name[KMALLOC_RANDOM_START + 13] = "kmalloc-rnd-13-" #sz,
756 #define KMA_RAND_14(sz) KMA_RAND_13(sz) .name[KMALLOC_RANDOM_START + 14] = "kmalloc-rnd-14-" #sz,
757 #define KMA_RAND_15(sz) KMA_RAND_14(sz) .name[KMALLOC_RANDOM_START + 15] = "kmalloc-rnd-15-" #sz,
758 #else // CONFIG_RANDOM_KMALLOC_CACHES
759 #define KMALLOC_RANDOM_NAME(N, sz)
762 #define INIT_KMALLOC_INFO(__size, __short_size) \
764 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
765 KMALLOC_RCL_NAME(__short_size) \
766 KMALLOC_CGROUP_NAME(__short_size) \
767 KMALLOC_DMA_NAME(__short_size) \
768 KMALLOC_RANDOM_NAME(RANDOM_KMALLOC_CACHES_NR, __short_size) \
773 * kmalloc_info[] is to make slab_debug=,kmalloc-xx option work at boot time.
774 * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is
777 const struct kmalloc_info_struct kmalloc_info
[] __initconst
= {
778 INIT_KMALLOC_INFO(0, 0),
779 INIT_KMALLOC_INFO(96, 96),
780 INIT_KMALLOC_INFO(192, 192),
781 INIT_KMALLOC_INFO(8, 8),
782 INIT_KMALLOC_INFO(16, 16),
783 INIT_KMALLOC_INFO(32, 32),
784 INIT_KMALLOC_INFO(64, 64),
785 INIT_KMALLOC_INFO(128, 128),
786 INIT_KMALLOC_INFO(256, 256),
787 INIT_KMALLOC_INFO(512, 512),
788 INIT_KMALLOC_INFO(1024, 1k
),
789 INIT_KMALLOC_INFO(2048, 2k
),
790 INIT_KMALLOC_INFO(4096, 4k
),
791 INIT_KMALLOC_INFO(8192, 8k
),
792 INIT_KMALLOC_INFO(16384, 16k
),
793 INIT_KMALLOC_INFO(32768, 32k
),
794 INIT_KMALLOC_INFO(65536, 64k
),
795 INIT_KMALLOC_INFO(131072, 128k
),
796 INIT_KMALLOC_INFO(262144, 256k
),
797 INIT_KMALLOC_INFO(524288, 512k
),
798 INIT_KMALLOC_INFO(1048576, 1M
),
799 INIT_KMALLOC_INFO(2097152, 2M
)
803 * Patch up the size_index table if we have strange large alignment
804 * requirements for the kmalloc array. This is only the case for
805 * MIPS it seems. The standard arches will not generate any code here.
807 * Largest permitted alignment is 256 bytes due to the way we
808 * handle the index determination for the smaller caches.
810 * Make sure that nothing crazy happens if someone starts tinkering
811 * around with ARCH_KMALLOC_MINALIGN
813 void __init
setup_kmalloc_cache_index_table(void)
817 BUILD_BUG_ON(KMALLOC_MIN_SIZE
> 256 ||
818 !is_power_of_2(KMALLOC_MIN_SIZE
));
820 for (i
= 8; i
< KMALLOC_MIN_SIZE
; i
+= 8) {
821 unsigned int elem
= size_index_elem(i
);
823 if (elem
>= ARRAY_SIZE(kmalloc_size_index
))
825 kmalloc_size_index
[elem
] = KMALLOC_SHIFT_LOW
;
828 if (KMALLOC_MIN_SIZE
>= 64) {
830 * The 96 byte sized cache is not used if the alignment
833 for (i
= 64 + 8; i
<= 96; i
+= 8)
834 kmalloc_size_index
[size_index_elem(i
)] = 7;
838 if (KMALLOC_MIN_SIZE
>= 128) {
840 * The 192 byte sized cache is not used if the alignment
841 * is 128 byte. Redirect kmalloc to use the 256 byte cache
844 for (i
= 128 + 8; i
<= 192; i
+= 8)
845 kmalloc_size_index
[size_index_elem(i
)] = 8;
849 static unsigned int __kmalloc_minalign(void)
851 unsigned int minalign
= dma_get_cache_alignment();
853 if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC
) &&
854 is_swiotlb_allocated())
855 minalign
= ARCH_KMALLOC_MINALIGN
;
857 return max(minalign
, arch_slab_minalign());
861 new_kmalloc_cache(int idx
, enum kmalloc_cache_type type
)
863 slab_flags_t flags
= 0;
864 unsigned int minalign
= __kmalloc_minalign();
865 unsigned int aligned_size
= kmalloc_info
[idx
].size
;
866 int aligned_idx
= idx
;
868 if ((KMALLOC_RECLAIM
!= KMALLOC_NORMAL
) && (type
== KMALLOC_RECLAIM
)) {
869 flags
|= SLAB_RECLAIM_ACCOUNT
;
870 } else if (IS_ENABLED(CONFIG_MEMCG_KMEM
) && (type
== KMALLOC_CGROUP
)) {
871 if (mem_cgroup_kmem_disabled()) {
872 kmalloc_caches
[type
][idx
] = kmalloc_caches
[KMALLOC_NORMAL
][idx
];
875 flags
|= SLAB_ACCOUNT
;
876 } else if (IS_ENABLED(CONFIG_ZONE_DMA
) && (type
== KMALLOC_DMA
)) {
877 flags
|= SLAB_CACHE_DMA
;
880 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
881 if (type
>= KMALLOC_RANDOM_START
&& type
<= KMALLOC_RANDOM_END
)
882 flags
|= SLAB_NO_MERGE
;
886 * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
887 * KMALLOC_NORMAL caches.
889 if (IS_ENABLED(CONFIG_MEMCG_KMEM
) && (type
== KMALLOC_NORMAL
))
890 flags
|= SLAB_NO_MERGE
;
892 if (minalign
> ARCH_KMALLOC_MINALIGN
) {
893 aligned_size
= ALIGN(aligned_size
, minalign
);
894 aligned_idx
= __kmalloc_index(aligned_size
, false);
897 if (!kmalloc_caches
[type
][aligned_idx
])
898 kmalloc_caches
[type
][aligned_idx
] = create_kmalloc_cache(
899 kmalloc_info
[aligned_idx
].name
[type
],
900 aligned_size
, flags
);
901 if (idx
!= aligned_idx
)
902 kmalloc_caches
[type
][idx
] = kmalloc_caches
[type
][aligned_idx
];
906 * Create the kmalloc array. Some of the regular kmalloc arrays
907 * may already have been created because they were needed to
908 * enable allocations for slab creation.
910 void __init
create_kmalloc_caches(void)
913 enum kmalloc_cache_type type
;
916 * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
918 for (type
= KMALLOC_NORMAL
; type
< NR_KMALLOC_TYPES
; type
++) {
919 for (i
= KMALLOC_SHIFT_LOW
; i
<= KMALLOC_SHIFT_HIGH
; i
++) {
920 if (!kmalloc_caches
[type
][i
])
921 new_kmalloc_cache(i
, type
);
924 * Caches that are not of the two-to-the-power-of size.
925 * These have to be created immediately after the
926 * earlier power of two caches
928 if (KMALLOC_MIN_SIZE
<= 32 && i
== 6 &&
929 !kmalloc_caches
[type
][1])
930 new_kmalloc_cache(1, type
);
931 if (KMALLOC_MIN_SIZE
<= 64 && i
== 7 &&
932 !kmalloc_caches
[type
][2])
933 new_kmalloc_cache(2, type
);
936 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
937 random_kmalloc_seed
= get_random_u64();
940 /* Kmalloc array is now usable */
945 * __ksize -- Report full size of underlying allocation
946 * @object: pointer to the object
948 * This should only be used internally to query the true size of allocations.
949 * It is not meant to be a way to discover the usable size of an allocation
950 * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
951 * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
952 * and/or FORTIFY_SOURCE.
954 * Return: size of the actual memory used by @object in bytes
956 size_t __ksize(const void *object
)
960 if (unlikely(object
== ZERO_SIZE_PTR
))
963 folio
= virt_to_folio(object
);
965 if (unlikely(!folio_test_slab(folio
))) {
966 if (WARN_ON(folio_size(folio
) <= KMALLOC_MAX_CACHE_SIZE
))
968 if (WARN_ON(object
!= folio_address(folio
)))
970 return folio_size(folio
);
973 #ifdef CONFIG_SLUB_DEBUG
974 skip_orig_size_check(folio_slab(folio
)->slab_cache
, object
);
977 return slab_ksize(folio_slab(folio
)->slab_cache
);
980 gfp_t
kmalloc_fix_flags(gfp_t flags
)
982 gfp_t invalid_mask
= flags
& GFP_SLAB_BUG_MASK
;
984 flags
&= ~GFP_SLAB_BUG_MASK
;
985 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
986 invalid_mask
, &invalid_mask
, flags
, &flags
);
992 #ifdef CONFIG_SLAB_FREELIST_RANDOM
993 /* Randomize a generic freelist */
994 static void freelist_randomize(unsigned int *list
,
1000 for (i
= 0; i
< count
; i
++)
1003 /* Fisher-Yates shuffle */
1004 for (i
= count
- 1; i
> 0; i
--) {
1005 rand
= get_random_u32_below(i
+ 1);
1006 swap(list
[i
], list
[rand
]);
1010 /* Create a random sequence per cache */
1011 int cache_random_seq_create(struct kmem_cache
*cachep
, unsigned int count
,
1015 if (count
< 2 || cachep
->random_seq
)
1018 cachep
->random_seq
= kcalloc(count
, sizeof(unsigned int), gfp
);
1019 if (!cachep
->random_seq
)
1022 freelist_randomize(cachep
->random_seq
, count
);
1026 /* Destroy the per-cache random freelist sequence */
1027 void cache_random_seq_destroy(struct kmem_cache
*cachep
)
1029 kfree(cachep
->random_seq
);
1030 cachep
->random_seq
= NULL
;
1032 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1034 #ifdef CONFIG_SLUB_DEBUG
1035 #define SLABINFO_RIGHTS (0400)
1037 static void print_slabinfo_header(struct seq_file
*m
)
1040 * Output format version, so at least we can change it
1041 * without _too_ many complaints.
1043 seq_puts(m
, "slabinfo - version: 2.1\n");
1044 seq_puts(m
, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1045 seq_puts(m
, " : tunables <limit> <batchcount> <sharedfactor>");
1046 seq_puts(m
, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1050 static void *slab_start(struct seq_file
*m
, loff_t
*pos
)
1052 mutex_lock(&slab_mutex
);
1053 return seq_list_start(&slab_caches
, *pos
);
1056 static void *slab_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1058 return seq_list_next(p
, &slab_caches
, pos
);
1061 static void slab_stop(struct seq_file
*m
, void *p
)
1063 mutex_unlock(&slab_mutex
);
1066 static void cache_show(struct kmem_cache
*s
, struct seq_file
*m
)
1068 struct slabinfo sinfo
;
1070 memset(&sinfo
, 0, sizeof(sinfo
));
1071 get_slabinfo(s
, &sinfo
);
1073 seq_printf(m
, "%-17s %6lu %6lu %6u %4u %4d",
1074 s
->name
, sinfo
.active_objs
, sinfo
.num_objs
, s
->size
,
1075 sinfo
.objects_per_slab
, (1 << sinfo
.cache_order
));
1077 seq_printf(m
, " : tunables %4u %4u %4u",
1078 sinfo
.limit
, sinfo
.batchcount
, sinfo
.shared
);
1079 seq_printf(m
, " : slabdata %6lu %6lu %6lu",
1080 sinfo
.active_slabs
, sinfo
.num_slabs
, sinfo
.shared_avail
);
1081 slabinfo_show_stats(m
, s
);
1085 static int slab_show(struct seq_file
*m
, void *p
)
1087 struct kmem_cache
*s
= list_entry(p
, struct kmem_cache
, list
);
1089 if (p
== slab_caches
.next
)
1090 print_slabinfo_header(m
);
1095 void dump_unreclaimable_slab(void)
1097 struct kmem_cache
*s
;
1098 struct slabinfo sinfo
;
1101 * Here acquiring slab_mutex is risky since we don't prefer to get
1102 * sleep in oom path. But, without mutex hold, it may introduce a
1104 * Use mutex_trylock to protect the list traverse, dump nothing
1105 * without acquiring the mutex.
1107 if (!mutex_trylock(&slab_mutex
)) {
1108 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1112 pr_info("Unreclaimable slab info:\n");
1113 pr_info("Name Used Total\n");
1115 list_for_each_entry(s
, &slab_caches
, list
) {
1116 if (s
->flags
& SLAB_RECLAIM_ACCOUNT
)
1119 get_slabinfo(s
, &sinfo
);
1121 if (sinfo
.num_objs
> 0)
1122 pr_info("%-17s %10luKB %10luKB\n", s
->name
,
1123 (sinfo
.active_objs
* s
->size
) / 1024,
1124 (sinfo
.num_objs
* s
->size
) / 1024);
1126 mutex_unlock(&slab_mutex
);
1130 * slabinfo_op - iterator that generates /proc/slabinfo
1139 * num-pages-per-slab
1140 * + further values on SMP and with statistics enabled
1142 static const struct seq_operations slabinfo_op
= {
1143 .start
= slab_start
,
1149 static int slabinfo_open(struct inode
*inode
, struct file
*file
)
1151 return seq_open(file
, &slabinfo_op
);
1154 static const struct proc_ops slabinfo_proc_ops
= {
1155 .proc_flags
= PROC_ENTRY_PERMANENT
,
1156 .proc_open
= slabinfo_open
,
1157 .proc_read
= seq_read
,
1158 .proc_write
= slabinfo_write
,
1159 .proc_lseek
= seq_lseek
,
1160 .proc_release
= seq_release
,
1163 static int __init
slab_proc_init(void)
1165 proc_create("slabinfo", SLABINFO_RIGHTS
, NULL
, &slabinfo_proc_ops
);
1168 module_init(slab_proc_init
);
1170 #endif /* CONFIG_SLUB_DEBUG */
1172 static __always_inline
__realloc_size(2) void *
1173 __do_krealloc(const void *p
, size_t new_size
, gfp_t flags
)
1178 /* Check for double-free before calling ksize. */
1179 if (likely(!ZERO_OR_NULL_PTR(p
))) {
1180 if (!kasan_check_byte(p
))
1186 /* If the object still fits, repoison it precisely. */
1187 if (ks
>= new_size
) {
1188 p
= kasan_krealloc((void *)p
, new_size
, flags
);
1192 ret
= kmalloc_track_caller(new_size
, flags
);
1194 /* Disable KASAN checks as the object's redzone is accessed. */
1195 kasan_disable_current();
1196 memcpy(ret
, kasan_reset_tag(p
), ks
);
1197 kasan_enable_current();
1204 * krealloc - reallocate memory. The contents will remain unchanged.
1205 * @p: object to reallocate memory for.
1206 * @new_size: how many bytes of memory are required.
1207 * @flags: the type of memory to allocate.
1209 * The contents of the object pointed to are preserved up to the
1210 * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
1211 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
1212 * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
1214 * Return: pointer to the allocated memory or %NULL in case of error
1216 void *krealloc(const void *p
, size_t new_size
, gfp_t flags
)
1220 if (unlikely(!new_size
)) {
1222 return ZERO_SIZE_PTR
;
1225 ret
= __do_krealloc(p
, new_size
, flags
);
1226 if (ret
&& kasan_reset_tag(p
) != kasan_reset_tag(ret
))
1231 EXPORT_SYMBOL(krealloc
);
1234 * kfree_sensitive - Clear sensitive information in memory before freeing
1235 * @p: object to free memory of
1237 * The memory of the object @p points to is zeroed before freed.
1238 * If @p is %NULL, kfree_sensitive() does nothing.
1240 * Note: this function zeroes the whole allocated buffer which can be a good
1241 * deal bigger than the requested buffer size passed to kmalloc(). So be
1242 * careful when using this function in performance sensitive code.
1244 void kfree_sensitive(const void *p
)
1247 void *mem
= (void *)p
;
1251 kasan_unpoison_range(mem
, ks
);
1252 memzero_explicit(mem
, ks
);
1256 EXPORT_SYMBOL(kfree_sensitive
);
1258 size_t ksize(const void *objp
)
1261 * We need to first check that the pointer to the object is valid.
1262 * The KASAN report printed from ksize() is more useful, then when
1263 * it's printed later when the behaviour could be undefined due to
1264 * a potential use-after-free or double-free.
1266 * We use kasan_check_byte(), which is supported for the hardware
1267 * tag-based KASAN mode, unlike kasan_check_read/write().
1269 * If the pointed to memory is invalid, we return 0 to avoid users of
1270 * ksize() writing to and potentially corrupting the memory region.
1272 * We want to perform the check before __ksize(), to avoid potentially
1273 * crashing in __ksize() due to accessing invalid metadata.
1275 if (unlikely(ZERO_OR_NULL_PTR(objp
)) || !kasan_check_byte(objp
))
1278 return kfence_ksize(objp
) ?: __ksize(objp
);
1280 EXPORT_SYMBOL(ksize
);
1282 /* Tracepoints definitions. */
1283 EXPORT_TRACEPOINT_SYMBOL(kmalloc
);
1284 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc
);
1285 EXPORT_TRACEPOINT_SYMBOL(kfree
);
1286 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free
);