2 * Slab allocator functions that are independent of the allocator strategy
4 * (C) 2012 Christoph Lameter <cl@linux.com>
6 #include <linux/slab.h>
9 #include <linux/poison.h>
10 #include <linux/interrupt.h>
11 #include <linux/memory.h>
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/seq_file.h>
17 #include <linux/proc_fs.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
21 #include <linux/memcontrol.h>
25 enum slab_state slab_state __read_only
;
26 LIST_HEAD(slab_caches
);
27 DEFINE_MUTEX(slab_mutex
);
28 struct kmem_cache
*kmem_cache
;
30 #ifdef CONFIG_PAX_MEMORY_SANITIZE
31 bool pax_sanitize_slab __read_only
= true;
32 static int __init
pax_sanitize_slab_setup(char *str
)
34 pax_sanitize_slab
= !!simple_strtol(str
, NULL
, 0);
35 printk("%sabled PaX slab sanitization\n", pax_sanitize_slab
? "En" : "Dis");
38 __setup("pax_sanitize_slab=", pax_sanitize_slab_setup
);
41 #ifdef CONFIG_DEBUG_VM
42 static int kmem_cache_sanity_check(struct mem_cgroup
*memcg
, const char *name
,
45 struct kmem_cache
*s
= NULL
;
47 if (!name
|| in_interrupt() || size
< sizeof(void *) ||
48 size
> KMALLOC_MAX_SIZE
) {
49 pr_err("kmem_cache_create(%s) integrity check failed\n", name
);
53 list_for_each_entry(s
, &slab_caches
, list
) {
58 * This happens when the module gets unloaded and doesn't
59 * destroy its slab cache and no-one else reuses the vmalloc
60 * area of the module. Print a warning.
62 res
= probe_kernel_address(s
->name
, tmp
);
64 pr_err("Slab cache with size %d has lost its name\n",
69 #if !defined(CONFIG_SLUB)
71 * For simplicity, we won't check this in the list of memcg
72 * caches. We have control over memcg naming, and if there
73 * aren't duplicates in the global list, there won't be any
74 * duplicates in the memcg lists as well.
76 if (!memcg
&& !strcmp(s
->name
, name
)) {
77 pr_err("%s (%s): Cache name already exists.\n",
86 WARN_ON(strchr(name
, ' ')); /* It confuses parsers */
90 static inline int kmem_cache_sanity_check(struct mem_cgroup
*memcg
,
91 const char *name
, size_t size
)
97 #ifdef CONFIG_MEMCG_KMEM
98 int memcg_update_all_caches(int num_memcgs
)
100 struct kmem_cache
*s
;
102 mutex_lock(&slab_mutex
);
104 list_for_each_entry(s
, &slab_caches
, list
) {
105 if (!is_root_cache(s
))
108 ret
= memcg_update_cache_size(s
, num_memcgs
);
110 * See comment in memcontrol.c, memcg_update_cache_size:
111 * Instead of freeing the memory, we'll just leave the caches
112 * up to this point in an updated state.
118 memcg_update_array_size(num_memcgs
);
120 mutex_unlock(&slab_mutex
);
126 * Figure out what the alignment of the objects will be given a set of
127 * flags, a user specified alignment and the size of the objects.
129 unsigned long calculate_alignment(unsigned long flags
,
130 unsigned long align
, unsigned long size
)
133 * If the user wants hardware cache aligned objects then follow that
134 * suggestion if the object is sufficiently large.
136 * The hardware cache alignment cannot override the specified
137 * alignment though. If that is greater then use it.
139 if (flags
& SLAB_HWCACHE_ALIGN
) {
140 unsigned long ralign
= cache_line_size();
141 while (size
<= ralign
/ 2)
143 align
= max(align
, ralign
);
146 if (align
< ARCH_SLAB_MINALIGN
)
147 align
= ARCH_SLAB_MINALIGN
;
149 return ALIGN(align
, sizeof(void *));
154 * kmem_cache_create - Create a cache.
155 * @name: A string which is used in /proc/slabinfo to identify this cache.
156 * @size: The size of objects to be created in this cache.
157 * @align: The required alignment for the objects.
159 * @ctor: A constructor for the objects.
161 * Returns a ptr to the cache on success, NULL on failure.
162 * Cannot be called within a interrupt, but can be interrupted.
163 * The @ctor is run when new pages are allocated by the cache.
167 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
168 * to catch references to uninitialised memory.
170 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
171 * for buffer overruns.
173 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
174 * cacheline. This can be beneficial if you're counting cycles as closely
179 kmem_cache_create_memcg(struct mem_cgroup
*memcg
, const char *name
, size_t size
,
180 size_t align
, unsigned long flags
, void (*ctor
)(void *),
181 struct kmem_cache
*parent_cache
)
183 struct kmem_cache
*s
= NULL
;
187 mutex_lock(&slab_mutex
);
189 if (!kmem_cache_sanity_check(memcg
, name
, size
) == 0)
193 * Some allocators will constraint the set of valid flags to a subset
194 * of all flags. We expect them to define CACHE_CREATE_MASK in this
195 * case, and we'll just provide them with a sanitized version of the
198 flags
&= CACHE_CREATE_MASK
;
200 s
= __kmem_cache_alias(memcg
, name
, size
, align
, flags
, ctor
);
204 s
= kmem_cache_zalloc(kmem_cache
, GFP_KERNEL
);
206 s
->object_size
= s
->size
= size
;
207 s
->align
= calculate_alignment(flags
, align
, size
);
210 if (memcg_register_cache(memcg
, s
, parent_cache
)) {
211 kmem_cache_free(kmem_cache
, s
);
216 s
->name
= kstrdup(name
, GFP_KERNEL
);
218 kmem_cache_free(kmem_cache
, s
);
223 err
= __kmem_cache_create(s
, flags
);
225 atomic_set(&s
->refcount
, 1);
226 list_add(&s
->list
, &slab_caches
);
227 memcg_cache_list_add(memcg
, s
);
230 kmem_cache_free(kmem_cache
, s
);
236 mutex_unlock(&slab_mutex
);
241 if (flags
& SLAB_PANIC
)
242 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
245 printk(KERN_WARNING
"kmem_cache_create(%s) failed with error %d",
257 kmem_cache_create(const char *name
, size_t size
, size_t align
,
258 unsigned long flags
, void (*ctor
)(void *))
260 return kmem_cache_create_memcg(NULL
, name
, size
, align
, flags
, ctor
, NULL
);
262 EXPORT_SYMBOL(kmem_cache_create
);
264 void kmem_cache_destroy(struct kmem_cache
*s
)
266 /* Destroy all the children caches if we aren't a memcg cache */
267 kmem_cache_destroy_memcg_children(s
);
270 mutex_lock(&slab_mutex
);
271 if (atomic_dec_and_test(&s
->refcount
)) {
274 if (!__kmem_cache_shutdown(s
)) {
275 mutex_unlock(&slab_mutex
);
276 if (s
->flags
& SLAB_DESTROY_BY_RCU
)
279 memcg_release_cache(s
);
281 kmem_cache_free(kmem_cache
, s
);
283 list_add(&s
->list
, &slab_caches
);
284 mutex_unlock(&slab_mutex
);
285 printk(KERN_ERR
"kmem_cache_destroy %s: Slab cache still has objects\n",
290 mutex_unlock(&slab_mutex
);
294 EXPORT_SYMBOL(kmem_cache_destroy
);
296 int slab_is_available(void)
298 return slab_state
>= UP
;
302 /* Create a cache during boot when no slab services are available yet */
303 void __init
create_boot_cache(struct kmem_cache
*s
, const char *name
, size_t size
,
309 s
->size
= s
->object_size
= size
;
310 s
->align
= calculate_alignment(flags
, ARCH_KMALLOC_MINALIGN
, size
);
311 err
= __kmem_cache_create(s
, flags
);
314 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
317 atomic_set(&s
->refcount
, -1); /* Exempt from merging for now */
320 struct kmem_cache
*__init
create_kmalloc_cache(const char *name
, size_t size
,
323 struct kmem_cache
*s
= kmem_cache_zalloc(kmem_cache
, GFP_NOWAIT
);
326 panic("Out of memory when creating slab %s\n", name
);
328 create_boot_cache(s
, name
, size
, flags
);
329 list_add(&s
->list
, &slab_caches
);
330 atomic_set(&s
->refcount
, 1);
334 struct kmem_cache
*kmalloc_caches
[KMALLOC_SHIFT_HIGH
+ 1];
335 EXPORT_SYMBOL(kmalloc_caches
);
337 #ifdef CONFIG_ZONE_DMA
338 struct kmem_cache
*kmalloc_dma_caches
[KMALLOC_SHIFT_HIGH
+ 1];
339 EXPORT_SYMBOL(kmalloc_dma_caches
);
342 #ifdef CONFIG_PAX_USERCOPY_SLABS
343 struct kmem_cache
*kmalloc_usercopy_caches
[KMALLOC_SHIFT_HIGH
+ 1];
344 EXPORT_SYMBOL(kmalloc_usercopy_caches
);
348 * Conversion table for small slabs sizes / 8 to the index in the
349 * kmalloc array. This is necessary for slabs < 192 since we have non power
350 * of two cache sizes there. The size of larger slabs can be determined using
353 static s8 size_index
[24] = {
380 static inline int size_index_elem(size_t bytes
)
382 return (bytes
- 1) / 8;
386 * Find the kmem_cache structure that serves a given size of
389 struct kmem_cache
*kmalloc_slab(size_t size
, gfp_t flags
)
393 if (size
> KMALLOC_MAX_SIZE
) {
394 WARN_ON_ONCE(!(flags
& __GFP_NOWARN
));
400 return ZERO_SIZE_PTR
;
402 index
= size_index
[size_index_elem(size
)];
404 index
= fls(size
- 1);
406 #ifdef CONFIG_ZONE_DMA
407 if (unlikely((flags
& GFP_DMA
)))
408 return kmalloc_dma_caches
[index
];
412 #ifdef CONFIG_PAX_USERCOPY_SLABS
413 if (unlikely((flags
& GFP_USERCOPY
)))
414 return kmalloc_usercopy_caches
[index
];
418 return kmalloc_caches
[index
];
422 * Create the kmalloc array. Some of the regular kmalloc arrays
423 * may already have been created because they were needed to
424 * enable allocations for slab creation.
426 void __init
create_kmalloc_caches(unsigned long flags
)
431 * Patch up the size_index table if we have strange large alignment
432 * requirements for the kmalloc array. This is only the case for
433 * MIPS it seems. The standard arches will not generate any code here.
435 * Largest permitted alignment is 256 bytes due to the way we
436 * handle the index determination for the smaller caches.
438 * Make sure that nothing crazy happens if someone starts tinkering
439 * around with ARCH_KMALLOC_MINALIGN
441 BUILD_BUG_ON(KMALLOC_MIN_SIZE
> 256 ||
442 (KMALLOC_MIN_SIZE
& (KMALLOC_MIN_SIZE
- 1)));
444 for (i
= 8; i
< KMALLOC_MIN_SIZE
; i
+= 8) {
445 int elem
= size_index_elem(i
);
447 if (elem
>= ARRAY_SIZE(size_index
))
449 size_index
[elem
] = KMALLOC_SHIFT_LOW
;
452 if (KMALLOC_MIN_SIZE
>= 64) {
454 * The 96 byte size cache is not used if the alignment
457 for (i
= 64 + 8; i
<= 96; i
+= 8)
458 size_index
[size_index_elem(i
)] = 7;
462 if (KMALLOC_MIN_SIZE
>= 128) {
464 * The 192 byte sized cache is not used if the alignment
465 * is 128 byte. Redirect kmalloc to use the 256 byte cache
468 for (i
= 128 + 8; i
<= 192; i
+= 8)
469 size_index
[size_index_elem(i
)] = 8;
471 for (i
= KMALLOC_SHIFT_LOW
; i
<= KMALLOC_SHIFT_HIGH
; i
++) {
472 if (!kmalloc_caches
[i
]) {
473 kmalloc_caches
[i
] = create_kmalloc_cache(NULL
,
474 1 << i
, SLAB_USERCOPY
| flags
);
478 * Caches that are not of the two-to-the-power-of size.
479 * These have to be created immediately after the
480 * earlier power of two caches
482 if (KMALLOC_MIN_SIZE
<= 32 && !kmalloc_caches
[1] && i
== 6)
483 kmalloc_caches
[1] = create_kmalloc_cache(NULL
, 96, SLAB_USERCOPY
| flags
);
485 if (KMALLOC_MIN_SIZE
<= 64 && !kmalloc_caches
[2] && i
== 7)
486 kmalloc_caches
[2] = create_kmalloc_cache(NULL
, 192, SLAB_USERCOPY
| flags
);
489 /* Kmalloc array is now usable */
492 for (i
= 0; i
<= KMALLOC_SHIFT_HIGH
; i
++) {
493 struct kmem_cache
*s
= kmalloc_caches
[i
];
497 n
= kasprintf(GFP_NOWAIT
, "kmalloc-%d", kmalloc_size(i
));
504 #ifdef CONFIG_ZONE_DMA
505 for (i
= 0; i
<= KMALLOC_SHIFT_HIGH
; i
++) {
506 struct kmem_cache
*s
= kmalloc_caches
[i
];
509 int size
= kmalloc_size(i
);
510 char *n
= kasprintf(GFP_NOWAIT
,
511 "dma-kmalloc-%d", size
);
514 kmalloc_dma_caches
[i
] = create_kmalloc_cache(n
,
515 size
, SLAB_CACHE_DMA
| flags
);
520 #ifdef CONFIG_PAX_USERCOPY_SLABS
521 for (i
= 0; i
<= KMALLOC_SHIFT_HIGH
; i
++) {
522 struct kmem_cache
*s
= kmalloc_caches
[i
];
525 int size
= kmalloc_size(i
);
526 char *n
= kasprintf(GFP_NOWAIT
,
527 "usercopy-kmalloc-%d", size
);
530 kmalloc_usercopy_caches
[i
] = create_kmalloc_cache(n
,
531 size
, SLAB_USERCOPY
| flags
);
537 #endif /* !CONFIG_SLOB */
540 #ifdef CONFIG_SLABINFO
541 void print_slabinfo_header(struct seq_file
*m
)
544 * Output format version, so at least we can change it
545 * without _too_ many complaints.
547 #ifdef CONFIG_DEBUG_SLAB
548 seq_puts(m
, "slabinfo - version: 2.1 (statistics)\n");
550 seq_puts(m
, "slabinfo - version: 2.1\n");
552 seq_puts(m
, "# name <active_objs> <num_objs> <objsize> "
553 "<objperslab> <pagesperslab>");
554 seq_puts(m
, " : tunables <limit> <batchcount> <sharedfactor>");
555 seq_puts(m
, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
556 #ifdef CONFIG_DEBUG_SLAB
557 seq_puts(m
, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
558 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
559 seq_puts(m
, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
560 #ifdef CONFIG_PAX_MEMORY_SANITIZE
561 seq_puts(m
, " : pax <sanitized> <not_sanitized>");
567 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
571 mutex_lock(&slab_mutex
);
573 print_slabinfo_header(m
);
575 return seq_list_start(&slab_caches
, *pos
);
578 static void *s_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
580 return seq_list_next(p
, &slab_caches
, pos
);
583 static void s_stop(struct seq_file
*m
, void *p
)
585 mutex_unlock(&slab_mutex
);
589 memcg_accumulate_slabinfo(struct kmem_cache
*s
, struct slabinfo
*info
)
591 struct kmem_cache
*c
;
592 struct slabinfo sinfo
;
595 if (!is_root_cache(s
))
598 for_each_memcg_cache_index(i
) {
599 c
= cache_from_memcg(s
, i
);
603 memset(&sinfo
, 0, sizeof(sinfo
));
604 get_slabinfo(c
, &sinfo
);
606 info
->active_slabs
+= sinfo
.active_slabs
;
607 info
->num_slabs
+= sinfo
.num_slabs
;
608 info
->shared_avail
+= sinfo
.shared_avail
;
609 info
->active_objs
+= sinfo
.active_objs
;
610 info
->num_objs
+= sinfo
.num_objs
;
614 int cache_show(struct kmem_cache
*s
, struct seq_file
*m
)
616 struct slabinfo sinfo
;
618 memset(&sinfo
, 0, sizeof(sinfo
));
619 get_slabinfo(s
, &sinfo
);
621 memcg_accumulate_slabinfo(s
, &sinfo
);
623 seq_printf(m
, "%-17s %6lu %6lu %6u %4u %4d",
624 cache_name(s
), sinfo
.active_objs
, sinfo
.num_objs
, s
->size
,
625 sinfo
.objects_per_slab
, (1 << sinfo
.cache_order
));
627 seq_printf(m
, " : tunables %4u %4u %4u",
628 sinfo
.limit
, sinfo
.batchcount
, sinfo
.shared
);
629 seq_printf(m
, " : slabdata %6lu %6lu %6lu",
630 sinfo
.active_slabs
, sinfo
.num_slabs
, sinfo
.shared_avail
);
631 slabinfo_show_stats(m
, s
);
636 static int s_show(struct seq_file
*m
, void *p
)
638 struct kmem_cache
*s
= list_entry(p
, struct kmem_cache
, list
);
640 if (!is_root_cache(s
))
642 return cache_show(s
, m
);
646 * slabinfo_op - iterator that generates /proc/slabinfo
656 * + further values on SMP and with statistics enabled
658 static const struct seq_operations slabinfo_op
= {
665 static int slabinfo_open(struct inode
*inode
, struct file
*file
)
667 return seq_open(file
, &slabinfo_op
);
670 static const struct file_operations proc_slabinfo_operations
= {
671 .open
= slabinfo_open
,
673 .write
= slabinfo_write
,
675 .release
= seq_release
,
678 static int __init
slab_proc_init(void)
680 proc_create("slabinfo", S_IRUSR
, NULL
, &proc_slabinfo_operations
);
683 module_init(slab_proc_init
);
684 #endif /* CONFIG_SLABINFO */