]> git.ipfire.org Git - people/ms/linux.git/blame - mm/slab_common.c
mm/sl[aou]b: Extract a common function for kmem_cache_destroy
[people/ms/linux.git] / mm / slab_common.c
CommitLineData
039363f3
CL
1/*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6#include <linux/slab.h>
7
8#include <linux/mm.h>
9#include <linux/poison.h>
10#include <linux/interrupt.h>
11#include <linux/memory.h>
12#include <linux/compiler.h>
13#include <linux/module.h>
20cea968
CL
14#include <linux/cpu.h>
15#include <linux/uaccess.h>
039363f3
CL
16#include <asm/cacheflush.h>
17#include <asm/tlbflush.h>
18#include <asm/page.h>
19
97d06609
CL
20#include "slab.h"
21
22enum slab_state slab_state;
18004c5d
CL
23LIST_HEAD(slab_caches);
24DEFINE_MUTEX(slab_mutex);
97d06609 25
77be4b13
SK
26#ifdef CONFIG_DEBUG_VM
27static int kmem_cache_sanity_check(const char *name, size_t size)
039363f3
CL
28{
29 struct kmem_cache *s = NULL;
30
039363f3
CL
31 if (!name || in_interrupt() || size < sizeof(void *) ||
32 size > KMALLOC_MAX_SIZE) {
77be4b13
SK
33 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
34 return -EINVAL;
039363f3 35 }
b920536a 36
20cea968
CL
37 list_for_each_entry(s, &slab_caches, list) {
38 char tmp;
39 int res;
40
41 /*
42 * This happens when the module gets unloaded and doesn't
43 * destroy its slab cache and no-one else reuses the vmalloc
44 * area of the module. Print a warning.
45 */
46 res = probe_kernel_address(s->name, tmp);
47 if (res) {
77be4b13 48 pr_err("Slab cache with size %d has lost its name\n",
20cea968
CL
49 s->object_size);
50 continue;
51 }
52
53 if (!strcmp(s->name, name)) {
77be4b13
SK
54 pr_err("%s (%s): Cache name already exists.\n",
55 __func__, name);
20cea968
CL
56 dump_stack();
57 s = NULL;
77be4b13 58 return -EINVAL;
20cea968
CL
59 }
60 }
61
62 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
77be4b13
SK
63 return 0;
64}
65#else
66static inline int kmem_cache_sanity_check(const char *name, size_t size)
67{
68 return 0;
69}
20cea968
CL
70#endif
71
77be4b13
SK
72/*
73 * kmem_cache_create - Create a cache.
74 * @name: A string which is used in /proc/slabinfo to identify this cache.
75 * @size: The size of objects to be created in this cache.
76 * @align: The required alignment for the objects.
77 * @flags: SLAB flags
78 * @ctor: A constructor for the objects.
79 *
80 * Returns a ptr to the cache on success, NULL on failure.
81 * Cannot be called within a interrupt, but can be interrupted.
82 * The @ctor is run when new pages are allocated by the cache.
83 *
84 * The flags are
85 *
86 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
87 * to catch references to uninitialised memory.
88 *
89 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
90 * for buffer overruns.
91 *
92 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
93 * cacheline. This can be beneficial if you're counting cycles as closely
94 * as davem.
95 */
96
97struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align,
98 unsigned long flags, void (*ctor)(void *))
99{
100 struct kmem_cache *s = NULL;
686d550d 101 int err = 0;
039363f3 102
77be4b13
SK
103 get_online_cpus();
104 mutex_lock(&slab_mutex);
686d550d
CL
105
106 if (!kmem_cache_sanity_check(name, size) == 0)
107 goto out_locked;
108
109
110 s = __kmem_cache_create(name, size, align, flags, ctor);
111 if (!s)
112 err = -ENOSYS; /* Until __kmem_cache_create returns code */
113
7c9adf5a
CL
114 /*
115 * Check if the slab has actually been created and if it was a
116 * real instatiation. Aliases do not belong on the list
117 */
118 if (s && s->refcount == 1)
119 list_add(&s->list, &slab_caches);
120
686d550d 121out_locked:
20cea968
CL
122 mutex_unlock(&slab_mutex);
123 put_online_cpus();
124
686d550d
CL
125 if (err) {
126
127 if (flags & SLAB_PANIC)
128 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
129 name, err);
130 else {
131 printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
132 name, err);
133 dump_stack();
134 }
135
136 return NULL;
137 }
039363f3
CL
138
139 return s;
140}
141EXPORT_SYMBOL(kmem_cache_create);
97d06609 142
945cf2b6
CL
143void kmem_cache_destroy(struct kmem_cache *s)
144{
145 get_online_cpus();
146 mutex_lock(&slab_mutex);
147 s->refcount--;
148 if (!s->refcount) {
149 list_del(&s->list);
150
151 if (!__kmem_cache_shutdown(s)) {
152 if (s->flags & SLAB_DESTROY_BY_RCU)
153 rcu_barrier();
154
155 __kmem_cache_destroy(s);
156 } else {
157 list_add(&s->list, &slab_caches);
158 printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
159 s->name);
160 dump_stack();
161 }
162 }
163 mutex_unlock(&slab_mutex);
164 put_online_cpus();
165}
166EXPORT_SYMBOL(kmem_cache_destroy);
167
97d06609
CL
168int slab_is_available(void)
169{
170 return slab_state >= UP;
171}