1 // SPDX-License-Identifier: GPL-2.0
3 * Slab allocator functions that are independent of the allocator strategy
5 * (C) 2012 Christoph Lameter <cl@linux.com>
7 #include <linux/slab.h>
10 #include <linux/poison.h>
11 #include <linux/interrupt.h>
12 #include <linux/memory.h>
13 #include <linux/cache.h>
14 #include <linux/compiler.h>
15 #include <linux/kfence.h>
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/uaccess.h>
19 #include <linux/seq_file.h>
20 #include <linux/proc_fs.h>
21 #include <linux/debugfs.h>
22 #include <linux/kasan.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
26 #include <linux/memcontrol.h>
27 #include <linux/stackdepot.h>
32 #define CREATE_TRACE_POINTS
33 #include <trace/events/kmem.h>
35 enum slab_state slab_state
;
36 LIST_HEAD(slab_caches
);
37 DEFINE_MUTEX(slab_mutex
);
38 struct kmem_cache
*kmem_cache
;
40 static LIST_HEAD(slab_caches_to_rcu_destroy
);
41 static void slab_caches_to_rcu_destroy_workfn(struct work_struct
*work
);
42 static DECLARE_WORK(slab_caches_to_rcu_destroy_work
,
43 slab_caches_to_rcu_destroy_workfn
);
46 * Set of flags that will prevent slab merging
48 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
49 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
50 SLAB_FAILSLAB | kasan_never_merge())
52 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
53 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
56 * Merge control. If this is set then no merging of slab caches will occur.
58 static bool slab_nomerge
= !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT
);
60 static int __init
setup_slab_nomerge(char *str
)
66 static int __init
setup_slab_merge(char *str
)
73 __setup_param("slub_nomerge", slub_nomerge
, setup_slab_nomerge
, 0);
74 __setup_param("slub_merge", slub_merge
, setup_slab_merge
, 0);
77 __setup("slab_nomerge", setup_slab_nomerge
);
78 __setup("slab_merge", setup_slab_merge
);
81 * Determine the size of a slab object
83 unsigned int kmem_cache_size(struct kmem_cache
*s
)
85 return s
->object_size
;
87 EXPORT_SYMBOL(kmem_cache_size
);
89 #ifdef CONFIG_DEBUG_VM
90 static int kmem_cache_sanity_check(const char *name
, unsigned int size
)
92 if (!name
|| in_interrupt() || size
> KMALLOC_MAX_SIZE
) {
93 pr_err("kmem_cache_create(%s) integrity check failed\n", name
);
97 WARN_ON(strchr(name
, ' ')); /* It confuses parsers */
101 static inline int kmem_cache_sanity_check(const char *name
, unsigned int size
)
108 * Figure out what the alignment of the objects will be given a set of
109 * flags, a user specified alignment and the size of the objects.
111 static unsigned int calculate_alignment(slab_flags_t flags
,
112 unsigned int align
, unsigned int size
)
115 * If the user wants hardware cache aligned objects then follow that
116 * suggestion if the object is sufficiently large.
118 * The hardware cache alignment cannot override the specified
119 * alignment though. If that is greater then use it.
121 if (flags
& SLAB_HWCACHE_ALIGN
) {
124 ralign
= cache_line_size();
125 while (size
<= ralign
/ 2)
127 align
= max(align
, ralign
);
130 align
= max(align
, arch_slab_minalign());
132 return ALIGN(align
, sizeof(void *));
136 * Find a mergeable slab cache
138 int slab_unmergeable(struct kmem_cache
*s
)
140 if (slab_nomerge
|| (s
->flags
& SLAB_NEVER_MERGE
))
150 * We may have set a slab to be unmergeable during bootstrap.
158 struct kmem_cache
*find_mergeable(unsigned int size
, unsigned int align
,
159 slab_flags_t flags
, const char *name
, void (*ctor
)(void *))
161 struct kmem_cache
*s
;
169 size
= ALIGN(size
, sizeof(void *));
170 align
= calculate_alignment(flags
, align
, size
);
171 size
= ALIGN(size
, align
);
172 flags
= kmem_cache_flags(size
, flags
, name
);
174 if (flags
& SLAB_NEVER_MERGE
)
177 list_for_each_entry_reverse(s
, &slab_caches
, list
) {
178 if (slab_unmergeable(s
))
184 if ((flags
& SLAB_MERGE_SAME
) != (s
->flags
& SLAB_MERGE_SAME
))
187 * Check if alignment is compatible.
188 * Courtesy of Adrian Drzewiecki
190 if ((s
->size
& ~(align
- 1)) != s
->size
)
193 if (s
->size
- size
>= sizeof(void *))
196 if (IS_ENABLED(CONFIG_SLAB
) && align
&&
197 (align
> s
->align
|| s
->align
% align
))
205 static struct kmem_cache
*create_cache(const char *name
,
206 unsigned int object_size
, unsigned int align
,
207 slab_flags_t flags
, unsigned int useroffset
,
208 unsigned int usersize
, void (*ctor
)(void *),
209 struct kmem_cache
*root_cache
)
211 struct kmem_cache
*s
;
214 if (WARN_ON(useroffset
+ usersize
> object_size
))
215 useroffset
= usersize
= 0;
218 s
= kmem_cache_zalloc(kmem_cache
, GFP_KERNEL
);
223 s
->size
= s
->object_size
= object_size
;
226 s
->useroffset
= useroffset
;
227 s
->usersize
= usersize
;
229 err
= __kmem_cache_create(s
, flags
);
234 list_add(&s
->list
, &slab_caches
);
241 kmem_cache_free(kmem_cache
, s
);
246 * kmem_cache_create_usercopy - Create a cache with a region suitable
247 * for copying to userspace
248 * @name: A string which is used in /proc/slabinfo to identify this cache.
249 * @size: The size of objects to be created in this cache.
250 * @align: The required alignment for the objects.
252 * @useroffset: Usercopy region offset
253 * @usersize: Usercopy region size
254 * @ctor: A constructor for the objects.
256 * Cannot be called within a interrupt, but can be interrupted.
257 * The @ctor is run when new pages are allocated by the cache.
261 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
262 * to catch references to uninitialised memory.
264 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
265 * for buffer overruns.
267 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
268 * cacheline. This can be beneficial if you're counting cycles as closely
271 * Return: a pointer to the cache on success, NULL on failure.
274 kmem_cache_create_usercopy(const char *name
,
275 unsigned int size
, unsigned int align
,
277 unsigned int useroffset
, unsigned int usersize
,
278 void (*ctor
)(void *))
280 struct kmem_cache
*s
= NULL
;
281 const char *cache_name
;
284 #ifdef CONFIG_SLUB_DEBUG
286 * If no slub_debug was enabled globally, the static key is not yet
287 * enabled by setup_slub_debug(). Enable it if the cache is being
288 * created with any of the debugging flags passed explicitly.
289 * It's also possible that this is the first cache created with
290 * SLAB_STORE_USER and we should init stack_depot for it.
292 if (flags
& SLAB_DEBUG_FLAGS
)
293 static_branch_enable(&slub_debug_enabled
);
294 if (flags
& SLAB_STORE_USER
)
298 mutex_lock(&slab_mutex
);
300 err
= kmem_cache_sanity_check(name
, size
);
305 /* Refuse requests with allocator specific flags */
306 if (flags
& ~SLAB_FLAGS_PERMITTED
) {
312 * Some allocators will constraint the set of valid flags to a subset
313 * of all flags. We expect them to define CACHE_CREATE_MASK in this
314 * case, and we'll just provide them with a sanitized version of the
317 flags
&= CACHE_CREATE_MASK
;
319 /* Fail closed on bad usersize of useroffset values. */
320 if (WARN_ON(!usersize
&& useroffset
) ||
321 WARN_ON(size
< usersize
|| size
- usersize
< useroffset
))
322 usersize
= useroffset
= 0;
325 s
= __kmem_cache_alias(name
, size
, align
, flags
, ctor
);
329 cache_name
= kstrdup_const(name
, GFP_KERNEL
);
335 s
= create_cache(cache_name
, size
,
336 calculate_alignment(flags
, align
, size
),
337 flags
, useroffset
, usersize
, ctor
, NULL
);
340 kfree_const(cache_name
);
344 mutex_unlock(&slab_mutex
);
347 if (flags
& SLAB_PANIC
)
348 panic("%s: Failed to create slab '%s'. Error %d\n",
349 __func__
, name
, err
);
351 pr_warn("%s(%s) failed with error %d\n",
352 __func__
, name
, err
);
359 EXPORT_SYMBOL(kmem_cache_create_usercopy
);
362 * kmem_cache_create - Create a cache.
363 * @name: A string which is used in /proc/slabinfo to identify this cache.
364 * @size: The size of objects to be created in this cache.
365 * @align: The required alignment for the objects.
367 * @ctor: A constructor for the objects.
369 * Cannot be called within a interrupt, but can be interrupted.
370 * The @ctor is run when new pages are allocated by the cache.
374 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
375 * to catch references to uninitialised memory.
377 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
378 * for buffer overruns.
380 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
381 * cacheline. This can be beneficial if you're counting cycles as closely
384 * Return: a pointer to the cache on success, NULL on failure.
387 kmem_cache_create(const char *name
, unsigned int size
, unsigned int align
,
388 slab_flags_t flags
, void (*ctor
)(void *))
390 return kmem_cache_create_usercopy(name
, size
, align
, flags
, 0, 0,
393 EXPORT_SYMBOL(kmem_cache_create
);
395 #ifdef SLAB_SUPPORTS_SYSFS
397 * For a given kmem_cache, kmem_cache_destroy() should only be called
398 * once or there will be a use-after-free problem. The actual deletion
399 * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
400 * protection. So they are now done without holding those locks.
402 * Note that there will be a slight delay in the deletion of sysfs files
403 * if kmem_cache_release() is called indrectly from a work function.
405 static void kmem_cache_release(struct kmem_cache
*s
)
407 sysfs_slab_unlink(s
);
408 sysfs_slab_release(s
);
411 static void kmem_cache_release(struct kmem_cache
*s
)
413 slab_kmem_cache_release(s
);
417 static void slab_caches_to_rcu_destroy_workfn(struct work_struct
*work
)
419 LIST_HEAD(to_destroy
);
420 struct kmem_cache
*s
, *s2
;
423 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
424 * @slab_caches_to_rcu_destroy list. The slab pages are freed
425 * through RCU and the associated kmem_cache are dereferenced
426 * while freeing the pages, so the kmem_caches should be freed only
427 * after the pending RCU operations are finished. As rcu_barrier()
428 * is a pretty slow operation, we batch all pending destructions
431 mutex_lock(&slab_mutex
);
432 list_splice_init(&slab_caches_to_rcu_destroy
, &to_destroy
);
433 mutex_unlock(&slab_mutex
);
435 if (list_empty(&to_destroy
))
440 list_for_each_entry_safe(s
, s2
, &to_destroy
, list
) {
441 debugfs_slab_release(s
);
442 kfence_shutdown_cache(s
);
443 kmem_cache_release(s
);
447 static int shutdown_cache(struct kmem_cache
*s
)
449 /* free asan quarantined objects */
450 kasan_cache_shutdown(s
);
452 if (__kmem_cache_shutdown(s
) != 0)
457 if (s
->flags
& SLAB_TYPESAFE_BY_RCU
) {
458 list_add_tail(&s
->list
, &slab_caches_to_rcu_destroy
);
459 schedule_work(&slab_caches_to_rcu_destroy_work
);
461 kfence_shutdown_cache(s
);
462 debugfs_slab_release(s
);
468 void slab_kmem_cache_release(struct kmem_cache
*s
)
470 __kmem_cache_release(s
);
471 kfree_const(s
->name
);
472 kmem_cache_free(kmem_cache
, s
);
475 void kmem_cache_destroy(struct kmem_cache
*s
)
480 if (unlikely(!s
) || !kasan_check_byte(s
))
484 mutex_lock(&slab_mutex
);
486 rcu_set
= s
->flags
& SLAB_TYPESAFE_BY_RCU
;
488 refcnt
= --s
->refcount
;
492 WARN(shutdown_cache(s
),
493 "%s %s: Slab cache still has objects when called from %pS",
494 __func__
, s
->name
, (void *)_RET_IP_
);
496 mutex_unlock(&slab_mutex
);
498 if (!refcnt
&& !rcu_set
)
499 kmem_cache_release(s
);
501 EXPORT_SYMBOL(kmem_cache_destroy
);
504 * kmem_cache_shrink - Shrink a cache.
505 * @cachep: The cache to shrink.
507 * Releases as many slabs as possible for a cache.
508 * To help debugging, a zero exit status indicates all slabs were released.
510 * Return: %0 if all slabs were released, non-zero otherwise
512 int kmem_cache_shrink(struct kmem_cache
*cachep
)
514 kasan_cache_shrink(cachep
);
516 return __kmem_cache_shrink(cachep
);
518 EXPORT_SYMBOL(kmem_cache_shrink
);
520 bool slab_is_available(void)
522 return slab_state
>= UP
;
527 * kmem_valid_obj - does the pointer reference a valid slab object?
528 * @object: pointer to query.
530 * Return: %true if the pointer is to a not-yet-freed object from
531 * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
532 * is to an already-freed object, and %false otherwise.
534 bool kmem_valid_obj(void *object
)
538 /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
539 if (object
< (void *)PAGE_SIZE
|| !virt_addr_valid(object
))
541 folio
= virt_to_folio(object
);
542 return folio_test_slab(folio
);
544 EXPORT_SYMBOL_GPL(kmem_valid_obj
);
546 static void kmem_obj_info(struct kmem_obj_info
*kpp
, void *object
, struct slab
*slab
)
548 if (__kfence_obj_info(kpp
, object
, slab
))
550 __kmem_obj_info(kpp
, object
, slab
);
554 * kmem_dump_obj - Print available slab provenance information
555 * @object: slab object for which to find provenance information.
557 * This function uses pr_cont(), so that the caller is expected to have
558 * printed out whatever preamble is appropriate. The provenance information
559 * depends on the type of object and on how much debugging is enabled.
560 * For a slab-cache object, the fact that it is a slab object is printed,
561 * and, if available, the slab name, return address, and stack trace from
562 * the allocation and last free path of that object.
564 * This function will splat if passed a pointer to a non-slab object.
565 * If you are not sure what type of object you have, you should instead
566 * use mem_dump_obj().
568 void kmem_dump_obj(void *object
)
570 char *cp
= IS_ENABLED(CONFIG_MMU
) ? "" : "/vmalloc";
573 unsigned long ptroffset
;
574 struct kmem_obj_info kp
= { };
576 if (WARN_ON_ONCE(!virt_addr_valid(object
)))
578 slab
= virt_to_slab(object
);
579 if (WARN_ON_ONCE(!slab
)) {
580 pr_cont(" non-slab memory.\n");
583 kmem_obj_info(&kp
, object
, slab
);
584 if (kp
.kp_slab_cache
)
585 pr_cont(" slab%s %s", cp
, kp
.kp_slab_cache
->name
);
587 pr_cont(" slab%s", cp
);
588 if (is_kfence_address(object
))
589 pr_cont(" (kfence)");
591 pr_cont(" start %px", kp
.kp_objp
);
592 if (kp
.kp_data_offset
)
593 pr_cont(" data offset %lu", kp
.kp_data_offset
);
595 ptroffset
= ((char *)object
- (char *)kp
.kp_objp
) - kp
.kp_data_offset
;
596 pr_cont(" pointer offset %lu", ptroffset
);
598 if (kp
.kp_slab_cache
&& kp
.kp_slab_cache
->usersize
)
599 pr_cont(" size %u", kp
.kp_slab_cache
->usersize
);
601 pr_cont(" allocated at %pS\n", kp
.kp_ret
);
604 for (i
= 0; i
< ARRAY_SIZE(kp
.kp_stack
); i
++) {
607 pr_info(" %pS\n", kp
.kp_stack
[i
]);
610 if (kp
.kp_free_stack
[0])
611 pr_cont(" Free path:\n");
613 for (i
= 0; i
< ARRAY_SIZE(kp
.kp_free_stack
); i
++) {
614 if (!kp
.kp_free_stack
[i
])
616 pr_info(" %pS\n", kp
.kp_free_stack
[i
]);
620 EXPORT_SYMBOL_GPL(kmem_dump_obj
);
624 /* Create a cache during boot when no slab services are available yet */
625 void __init
create_boot_cache(struct kmem_cache
*s
, const char *name
,
626 unsigned int size
, slab_flags_t flags
,
627 unsigned int useroffset
, unsigned int usersize
)
630 unsigned int align
= ARCH_KMALLOC_MINALIGN
;
633 s
->size
= s
->object_size
= size
;
636 * For power of two sizes, guarantee natural alignment for kmalloc
637 * caches, regardless of SL*B debugging options.
639 if (is_power_of_2(size
))
640 align
= max(align
, size
);
641 s
->align
= calculate_alignment(flags
, align
, size
);
643 s
->useroffset
= useroffset
;
644 s
->usersize
= usersize
;
646 err
= __kmem_cache_create(s
, flags
);
649 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
652 s
->refcount
= -1; /* Exempt from merging for now */
655 struct kmem_cache
*__init
create_kmalloc_cache(const char *name
,
656 unsigned int size
, slab_flags_t flags
,
657 unsigned int useroffset
, unsigned int usersize
)
659 struct kmem_cache
*s
= kmem_cache_zalloc(kmem_cache
, GFP_NOWAIT
);
662 panic("Out of memory when creating slab %s\n", name
);
664 create_boot_cache(s
, name
, size
, flags
| SLAB_KMALLOC
, useroffset
,
666 kasan_cache_create_kmalloc(s
);
667 list_add(&s
->list
, &slab_caches
);
673 kmalloc_caches
[NR_KMALLOC_TYPES
][KMALLOC_SHIFT_HIGH
+ 1] __ro_after_init
=
674 { /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
675 EXPORT_SYMBOL(kmalloc_caches
);
678 * Conversion table for small slabs sizes / 8 to the index in the
679 * kmalloc array. This is necessary for slabs < 192 since we have non power
680 * of two cache sizes there. The size of larger slabs can be determined using
683 static u8 size_index
[24] __ro_after_init
= {
710 static inline unsigned int size_index_elem(unsigned int bytes
)
712 return (bytes
- 1) / 8;
716 * Find the kmem_cache structure that serves a given size of
719 struct kmem_cache
*kmalloc_slab(size_t size
, gfp_t flags
)
725 return ZERO_SIZE_PTR
;
727 index
= size_index
[size_index_elem(size
)];
729 if (WARN_ON_ONCE(size
> KMALLOC_MAX_CACHE_SIZE
))
731 index
= fls(size
- 1);
734 return kmalloc_caches
[kmalloc_type(flags
)][index
];
737 size_t kmalloc_size_roundup(size_t size
)
739 struct kmem_cache
*c
;
741 /* Short-circuit the 0 size case. */
742 if (unlikely(size
== 0))
744 /* Short-circuit saturated "too-large" case. */
745 if (unlikely(size
== SIZE_MAX
))
747 /* Above the smaller buckets, size is a multiple of page size. */
748 if (size
> KMALLOC_MAX_CACHE_SIZE
)
749 return PAGE_SIZE
<< get_order(size
);
751 /* The flags don't matter since size_index is common to all. */
752 c
= kmalloc_slab(size
, GFP_KERNEL
);
753 return c
? c
->object_size
: 0;
755 EXPORT_SYMBOL(kmalloc_size_roundup
);
757 #ifdef CONFIG_ZONE_DMA
758 #define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
760 #define KMALLOC_DMA_NAME(sz)
763 #ifdef CONFIG_MEMCG_KMEM
764 #define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
766 #define KMALLOC_CGROUP_NAME(sz)
769 #define INIT_KMALLOC_INFO(__size, __short_size) \
771 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
772 .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
773 KMALLOC_CGROUP_NAME(__short_size) \
774 KMALLOC_DMA_NAME(__short_size) \
779 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
780 * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is
783 const struct kmalloc_info_struct kmalloc_info
[] __initconst
= {
784 INIT_KMALLOC_INFO(0, 0),
785 INIT_KMALLOC_INFO(96, 96),
786 INIT_KMALLOC_INFO(192, 192),
787 INIT_KMALLOC_INFO(8, 8),
788 INIT_KMALLOC_INFO(16, 16),
789 INIT_KMALLOC_INFO(32, 32),
790 INIT_KMALLOC_INFO(64, 64),
791 INIT_KMALLOC_INFO(128, 128),
792 INIT_KMALLOC_INFO(256, 256),
793 INIT_KMALLOC_INFO(512, 512),
794 INIT_KMALLOC_INFO(1024, 1k
),
795 INIT_KMALLOC_INFO(2048, 2k
),
796 INIT_KMALLOC_INFO(4096, 4k
),
797 INIT_KMALLOC_INFO(8192, 8k
),
798 INIT_KMALLOC_INFO(16384, 16k
),
799 INIT_KMALLOC_INFO(32768, 32k
),
800 INIT_KMALLOC_INFO(65536, 64k
),
801 INIT_KMALLOC_INFO(131072, 128k
),
802 INIT_KMALLOC_INFO(262144, 256k
),
803 INIT_KMALLOC_INFO(524288, 512k
),
804 INIT_KMALLOC_INFO(1048576, 1M
),
805 INIT_KMALLOC_INFO(2097152, 2M
)
809 * Patch up the size_index table if we have strange large alignment
810 * requirements for the kmalloc array. This is only the case for
811 * MIPS it seems. The standard arches will not generate any code here.
813 * Largest permitted alignment is 256 bytes due to the way we
814 * handle the index determination for the smaller caches.
816 * Make sure that nothing crazy happens if someone starts tinkering
817 * around with ARCH_KMALLOC_MINALIGN
819 void __init
setup_kmalloc_cache_index_table(void)
823 BUILD_BUG_ON(KMALLOC_MIN_SIZE
> 256 ||
824 !is_power_of_2(KMALLOC_MIN_SIZE
));
826 for (i
= 8; i
< KMALLOC_MIN_SIZE
; i
+= 8) {
827 unsigned int elem
= size_index_elem(i
);
829 if (elem
>= ARRAY_SIZE(size_index
))
831 size_index
[elem
] = KMALLOC_SHIFT_LOW
;
834 if (KMALLOC_MIN_SIZE
>= 64) {
836 * The 96 byte sized cache is not used if the alignment
839 for (i
= 64 + 8; i
<= 96; i
+= 8)
840 size_index
[size_index_elem(i
)] = 7;
844 if (KMALLOC_MIN_SIZE
>= 128) {
846 * The 192 byte sized cache is not used if the alignment
847 * is 128 byte. Redirect kmalloc to use the 256 byte cache
850 for (i
= 128 + 8; i
<= 192; i
+= 8)
851 size_index
[size_index_elem(i
)] = 8;
856 new_kmalloc_cache(int idx
, enum kmalloc_cache_type type
, slab_flags_t flags
)
858 if (type
== KMALLOC_RECLAIM
) {
859 flags
|= SLAB_RECLAIM_ACCOUNT
;
860 } else if (IS_ENABLED(CONFIG_MEMCG_KMEM
) && (type
== KMALLOC_CGROUP
)) {
861 if (mem_cgroup_kmem_disabled()) {
862 kmalloc_caches
[type
][idx
] = kmalloc_caches
[KMALLOC_NORMAL
][idx
];
865 flags
|= SLAB_ACCOUNT
;
866 } else if (IS_ENABLED(CONFIG_ZONE_DMA
) && (type
== KMALLOC_DMA
)) {
867 flags
|= SLAB_CACHE_DMA
;
870 kmalloc_caches
[type
][idx
] = create_kmalloc_cache(
871 kmalloc_info
[idx
].name
[type
],
872 kmalloc_info
[idx
].size
, flags
, 0,
873 kmalloc_info
[idx
].size
);
876 * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
877 * KMALLOC_NORMAL caches.
879 if (IS_ENABLED(CONFIG_MEMCG_KMEM
) && (type
== KMALLOC_NORMAL
))
880 kmalloc_caches
[type
][idx
]->refcount
= -1;
884 * Create the kmalloc array. Some of the regular kmalloc arrays
885 * may already have been created because they were needed to
886 * enable allocations for slab creation.
888 void __init
create_kmalloc_caches(slab_flags_t flags
)
891 enum kmalloc_cache_type type
;
894 * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
896 for (type
= KMALLOC_NORMAL
; type
< NR_KMALLOC_TYPES
; type
++) {
897 for (i
= KMALLOC_SHIFT_LOW
; i
<= KMALLOC_SHIFT_HIGH
; i
++) {
898 if (!kmalloc_caches
[type
][i
])
899 new_kmalloc_cache(i
, type
, flags
);
902 * Caches that are not of the two-to-the-power-of size.
903 * These have to be created immediately after the
904 * earlier power of two caches
906 if (KMALLOC_MIN_SIZE
<= 32 && i
== 6 &&
907 !kmalloc_caches
[type
][1])
908 new_kmalloc_cache(1, type
, flags
);
909 if (KMALLOC_MIN_SIZE
<= 64 && i
== 7 &&
910 !kmalloc_caches
[type
][2])
911 new_kmalloc_cache(2, type
, flags
);
915 /* Kmalloc array is now usable */
919 void free_large_kmalloc(struct folio
*folio
, void *object
)
921 unsigned int order
= folio_order(folio
);
923 if (WARN_ON_ONCE(order
== 0))
924 pr_warn_once("object pointer: 0x%p\n", object
);
926 kmemleak_free(object
);
927 kasan_kfree_large(object
);
928 kmsan_kfree_large(object
);
930 mod_lruvec_page_state(folio_page(folio
, 0), NR_SLAB_UNRECLAIMABLE_B
,
931 -(PAGE_SIZE
<< order
));
932 __free_pages(folio_page(folio
, 0), order
);
935 static void *__kmalloc_large_node(size_t size
, gfp_t flags
, int node
);
936 static __always_inline
937 void *__do_kmalloc_node(size_t size
, gfp_t flags
, int node
, unsigned long caller
)
939 struct kmem_cache
*s
;
942 if (unlikely(size
> KMALLOC_MAX_CACHE_SIZE
)) {
943 ret
= __kmalloc_large_node(size
, flags
, node
);
944 trace_kmalloc(caller
, ret
, size
,
945 PAGE_SIZE
<< get_order(size
), flags
, node
);
949 s
= kmalloc_slab(size
, flags
);
951 if (unlikely(ZERO_OR_NULL_PTR(s
)))
954 ret
= __kmem_cache_alloc_node(s
, flags
, node
, size
, caller
);
955 ret
= kasan_kmalloc(s
, ret
, size
, flags
);
956 trace_kmalloc(caller
, ret
, size
, s
->size
, flags
, node
);
960 void *__kmalloc_node(size_t size
, gfp_t flags
, int node
)
962 return __do_kmalloc_node(size
, flags
, node
, _RET_IP_
);
964 EXPORT_SYMBOL(__kmalloc_node
);
966 void *__kmalloc(size_t size
, gfp_t flags
)
968 return __do_kmalloc_node(size
, flags
, NUMA_NO_NODE
, _RET_IP_
);
970 EXPORT_SYMBOL(__kmalloc
);
972 void *__kmalloc_node_track_caller(size_t size
, gfp_t flags
,
973 int node
, unsigned long caller
)
975 return __do_kmalloc_node(size
, flags
, node
, caller
);
977 EXPORT_SYMBOL(__kmalloc_node_track_caller
);
980 * kfree - free previously allocated memory
981 * @object: pointer returned by kmalloc.
983 * If @object is NULL, no operation is performed.
985 * Don't free memory not originally allocated by kmalloc()
986 * or you will run into trouble.
988 void kfree(const void *object
)
992 struct kmem_cache
*s
;
994 trace_kfree(_RET_IP_
, object
);
996 if (unlikely(ZERO_OR_NULL_PTR(object
)))
999 folio
= virt_to_folio(object
);
1000 if (unlikely(!folio_test_slab(folio
))) {
1001 free_large_kmalloc(folio
, (void *)object
);
1005 slab
= folio_slab(folio
);
1006 s
= slab
->slab_cache
;
1007 __kmem_cache_free(s
, (void *)object
, _RET_IP_
);
1009 EXPORT_SYMBOL(kfree
);
1012 * __ksize -- Report full size of underlying allocation
1013 * @object: pointer to the object
1015 * This should only be used internally to query the true size of allocations.
1016 * It is not meant to be a way to discover the usable size of an allocation
1017 * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
1018 * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
1019 * and/or FORTIFY_SOURCE.
1021 * Return: size of the actual memory used by @object in bytes
1023 size_t __ksize(const void *object
)
1025 struct folio
*folio
;
1027 if (unlikely(object
== ZERO_SIZE_PTR
))
1030 folio
= virt_to_folio(object
);
1032 if (unlikely(!folio_test_slab(folio
))) {
1033 if (WARN_ON(folio_size(folio
) <= KMALLOC_MAX_CACHE_SIZE
))
1035 if (WARN_ON(object
!= folio_address(folio
)))
1037 return folio_size(folio
);
1040 return slab_ksize(folio_slab(folio
)->slab_cache
);
1043 void *kmalloc_trace(struct kmem_cache
*s
, gfp_t gfpflags
, size_t size
)
1045 void *ret
= __kmem_cache_alloc_node(s
, gfpflags
, NUMA_NO_NODE
,
1048 trace_kmalloc(_RET_IP_
, ret
, size
, s
->size
, gfpflags
, NUMA_NO_NODE
);
1050 ret
= kasan_kmalloc(s
, ret
, size
, gfpflags
);
1053 EXPORT_SYMBOL(kmalloc_trace
);
1055 void *kmalloc_node_trace(struct kmem_cache
*s
, gfp_t gfpflags
,
1056 int node
, size_t size
)
1058 void *ret
= __kmem_cache_alloc_node(s
, gfpflags
, node
, size
, _RET_IP_
);
1060 trace_kmalloc(_RET_IP_
, ret
, size
, s
->size
, gfpflags
, node
);
1062 ret
= kasan_kmalloc(s
, ret
, size
, gfpflags
);
1065 EXPORT_SYMBOL(kmalloc_node_trace
);
1066 #endif /* !CONFIG_SLOB */
1068 gfp_t
kmalloc_fix_flags(gfp_t flags
)
1070 gfp_t invalid_mask
= flags
& GFP_SLAB_BUG_MASK
;
1072 flags
&= ~GFP_SLAB_BUG_MASK
;
1073 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1074 invalid_mask
, &invalid_mask
, flags
, &flags
);
1081 * To avoid unnecessary overhead, we pass through large allocation requests
1082 * directly to the page allocator. We use __GFP_COMP, because we will need to
1083 * know the allocation order to free the pages properly in kfree.
1086 static void *__kmalloc_large_node(size_t size
, gfp_t flags
, int node
)
1090 unsigned int order
= get_order(size
);
1092 if (unlikely(flags
& GFP_SLAB_BUG_MASK
))
1093 flags
= kmalloc_fix_flags(flags
);
1095 flags
|= __GFP_COMP
;
1096 page
= alloc_pages_node(node
, flags
, order
);
1098 ptr
= page_address(page
);
1099 mod_lruvec_page_state(page
, NR_SLAB_UNRECLAIMABLE_B
,
1100 PAGE_SIZE
<< order
);
1103 ptr
= kasan_kmalloc_large(ptr
, size
, flags
);
1104 /* As ptr might get tagged, call kmemleak hook after KASAN. */
1105 kmemleak_alloc(ptr
, size
, 1, flags
);
1106 kmsan_kmalloc_large(ptr
, size
, flags
);
1111 void *kmalloc_large(size_t size
, gfp_t flags
)
1113 void *ret
= __kmalloc_large_node(size
, flags
, NUMA_NO_NODE
);
1115 trace_kmalloc(_RET_IP_
, ret
, size
, PAGE_SIZE
<< get_order(size
),
1116 flags
, NUMA_NO_NODE
);
1119 EXPORT_SYMBOL(kmalloc_large
);
1121 void *kmalloc_large_node(size_t size
, gfp_t flags
, int node
)
1123 void *ret
= __kmalloc_large_node(size
, flags
, node
);
1125 trace_kmalloc(_RET_IP_
, ret
, size
, PAGE_SIZE
<< get_order(size
),
1129 EXPORT_SYMBOL(kmalloc_large_node
);
1131 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1132 /* Randomize a generic freelist */
1133 static void freelist_randomize(struct rnd_state
*state
, unsigned int *list
,
1139 for (i
= 0; i
< count
; i
++)
1142 /* Fisher-Yates shuffle */
1143 for (i
= count
- 1; i
> 0; i
--) {
1144 rand
= prandom_u32_state(state
);
1146 swap(list
[i
], list
[rand
]);
1150 /* Create a random sequence per cache */
1151 int cache_random_seq_create(struct kmem_cache
*cachep
, unsigned int count
,
1154 struct rnd_state state
;
1156 if (count
< 2 || cachep
->random_seq
)
1159 cachep
->random_seq
= kcalloc(count
, sizeof(unsigned int), gfp
);
1160 if (!cachep
->random_seq
)
1163 /* Get best entropy at this stage of boot */
1164 prandom_seed_state(&state
, get_random_long());
1166 freelist_randomize(&state
, cachep
->random_seq
, count
);
1170 /* Destroy the per-cache random freelist sequence */
1171 void cache_random_seq_destroy(struct kmem_cache
*cachep
)
1173 kfree(cachep
->random_seq
);
1174 cachep
->random_seq
= NULL
;
1176 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1178 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
1180 #define SLABINFO_RIGHTS (0600)
1182 #define SLABINFO_RIGHTS (0400)
1185 static void print_slabinfo_header(struct seq_file
*m
)
1188 * Output format version, so at least we can change it
1189 * without _too_ many complaints.
1191 #ifdef CONFIG_DEBUG_SLAB
1192 seq_puts(m
, "slabinfo - version: 2.1 (statistics)\n");
1194 seq_puts(m
, "slabinfo - version: 2.1\n");
1196 seq_puts(m
, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1197 seq_puts(m
, " : tunables <limit> <batchcount> <sharedfactor>");
1198 seq_puts(m
, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1199 #ifdef CONFIG_DEBUG_SLAB
1200 seq_puts(m
, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1201 seq_puts(m
, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1206 static void *slab_start(struct seq_file
*m
, loff_t
*pos
)
1208 mutex_lock(&slab_mutex
);
1209 return seq_list_start(&slab_caches
, *pos
);
1212 static void *slab_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1214 return seq_list_next(p
, &slab_caches
, pos
);
1217 static void slab_stop(struct seq_file
*m
, void *p
)
1219 mutex_unlock(&slab_mutex
);
1222 static void cache_show(struct kmem_cache
*s
, struct seq_file
*m
)
1224 struct slabinfo sinfo
;
1226 memset(&sinfo
, 0, sizeof(sinfo
));
1227 get_slabinfo(s
, &sinfo
);
1229 seq_printf(m
, "%-17s %6lu %6lu %6u %4u %4d",
1230 s
->name
, sinfo
.active_objs
, sinfo
.num_objs
, s
->size
,
1231 sinfo
.objects_per_slab
, (1 << sinfo
.cache_order
));
1233 seq_printf(m
, " : tunables %4u %4u %4u",
1234 sinfo
.limit
, sinfo
.batchcount
, sinfo
.shared
);
1235 seq_printf(m
, " : slabdata %6lu %6lu %6lu",
1236 sinfo
.active_slabs
, sinfo
.num_slabs
, sinfo
.shared_avail
);
1237 slabinfo_show_stats(m
, s
);
1241 static int slab_show(struct seq_file
*m
, void *p
)
1243 struct kmem_cache
*s
= list_entry(p
, struct kmem_cache
, list
);
1245 if (p
== slab_caches
.next
)
1246 print_slabinfo_header(m
);
1251 void dump_unreclaimable_slab(void)
1253 struct kmem_cache
*s
;
1254 struct slabinfo sinfo
;
1257 * Here acquiring slab_mutex is risky since we don't prefer to get
1258 * sleep in oom path. But, without mutex hold, it may introduce a
1260 * Use mutex_trylock to protect the list traverse, dump nothing
1261 * without acquiring the mutex.
1263 if (!mutex_trylock(&slab_mutex
)) {
1264 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1268 pr_info("Unreclaimable slab info:\n");
1269 pr_info("Name Used Total\n");
1271 list_for_each_entry(s
, &slab_caches
, list
) {
1272 if (s
->flags
& SLAB_RECLAIM_ACCOUNT
)
1275 get_slabinfo(s
, &sinfo
);
1277 if (sinfo
.num_objs
> 0)
1278 pr_info("%-17s %10luKB %10luKB\n", s
->name
,
1279 (sinfo
.active_objs
* s
->size
) / 1024,
1280 (sinfo
.num_objs
* s
->size
) / 1024);
1282 mutex_unlock(&slab_mutex
);
1286 * slabinfo_op - iterator that generates /proc/slabinfo
1295 * num-pages-per-slab
1296 * + further values on SMP and with statistics enabled
1298 static const struct seq_operations slabinfo_op
= {
1299 .start
= slab_start
,
1305 static int slabinfo_open(struct inode
*inode
, struct file
*file
)
1307 return seq_open(file
, &slabinfo_op
);
1310 static const struct proc_ops slabinfo_proc_ops
= {
1311 .proc_flags
= PROC_ENTRY_PERMANENT
,
1312 .proc_open
= slabinfo_open
,
1313 .proc_read
= seq_read
,
1314 .proc_write
= slabinfo_write
,
1315 .proc_lseek
= seq_lseek
,
1316 .proc_release
= seq_release
,
1319 static int __init
slab_proc_init(void)
1321 proc_create("slabinfo", SLABINFO_RIGHTS
, NULL
, &slabinfo_proc_ops
);
1324 module_init(slab_proc_init
);
1326 #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
1328 static __always_inline
__realloc_size(2) void *
1329 __do_krealloc(const void *p
, size_t new_size
, gfp_t flags
)
1334 /* Don't use instrumented ksize to allow precise KASAN poisoning. */
1335 if (likely(!ZERO_OR_NULL_PTR(p
))) {
1336 if (!kasan_check_byte(p
))
1338 ks
= kfence_ksize(p
) ?: __ksize(p
);
1342 /* If the object still fits, repoison it precisely. */
1343 if (ks
>= new_size
) {
1344 p
= kasan_krealloc((void *)p
, new_size
, flags
);
1348 ret
= kmalloc_track_caller(new_size
, flags
);
1350 /* Disable KASAN checks as the object's redzone is accessed. */
1351 kasan_disable_current();
1352 memcpy(ret
, kasan_reset_tag(p
), ks
);
1353 kasan_enable_current();
1360 * krealloc - reallocate memory. The contents will remain unchanged.
1361 * @p: object to reallocate memory for.
1362 * @new_size: how many bytes of memory are required.
1363 * @flags: the type of memory to allocate.
1365 * The contents of the object pointed to are preserved up to the
1366 * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
1367 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
1368 * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
1370 * Return: pointer to the allocated memory or %NULL in case of error
1372 void *krealloc(const void *p
, size_t new_size
, gfp_t flags
)
1376 if (unlikely(!new_size
)) {
1378 return ZERO_SIZE_PTR
;
1381 ret
= __do_krealloc(p
, new_size
, flags
);
1382 if (ret
&& kasan_reset_tag(p
) != kasan_reset_tag(ret
))
1387 EXPORT_SYMBOL(krealloc
);
1390 * kfree_sensitive - Clear sensitive information in memory before freeing
1391 * @p: object to free memory of
1393 * The memory of the object @p points to is zeroed before freed.
1394 * If @p is %NULL, kfree_sensitive() does nothing.
1396 * Note: this function zeroes the whole allocated buffer which can be a good
1397 * deal bigger than the requested buffer size passed to kmalloc(). So be
1398 * careful when using this function in performance sensitive code.
1400 void kfree_sensitive(const void *p
)
1403 void *mem
= (void *)p
;
1407 memzero_explicit(mem
, ks
);
1410 EXPORT_SYMBOL(kfree_sensitive
);
1412 size_t ksize(const void *objp
)
1417 * We need to first check that the pointer to the object is valid, and
1418 * only then unpoison the memory. The report printed from ksize() is
1419 * more useful, then when it's printed later when the behaviour could
1420 * be undefined due to a potential use-after-free or double-free.
1422 * We use kasan_check_byte(), which is supported for the hardware
1423 * tag-based KASAN mode, unlike kasan_check_read/write().
1425 * If the pointed to memory is invalid, we return 0 to avoid users of
1426 * ksize() writing to and potentially corrupting the memory region.
1428 * We want to perform the check before __ksize(), to avoid potentially
1429 * crashing in __ksize() due to accessing invalid metadata.
1431 if (unlikely(ZERO_OR_NULL_PTR(objp
)) || !kasan_check_byte(objp
))
1434 size
= kfence_ksize(objp
) ?: __ksize(objp
);
1436 * We assume that ksize callers could use whole allocated area,
1437 * so we need to unpoison this area.
1439 kasan_unpoison_range(objp
, size
);
1442 EXPORT_SYMBOL(ksize
);
1444 /* Tracepoints definitions. */
1445 EXPORT_TRACEPOINT_SYMBOL(kmalloc
);
1446 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc
);
1447 EXPORT_TRACEPOINT_SYMBOL(kfree
);
1448 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free
);
1450 int should_failslab(struct kmem_cache
*s
, gfp_t gfpflags
)
1452 if (__should_failslab(s
, gfpflags
))
1456 ALLOW_ERROR_INJECTION(should_failslab
, ERRNO
);