]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - mm/slab.c
[PATCH] slab: Node rotor for freeing alien caches and remote per cpu pages.
[thirdparty/kernel/linux.git] / mm / slab.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays
9 * (c) 2000 Manfred Spraul
10 *
11 * Cleanup, make the head arrays unconditional, preparation for NUMA
12 * (c) 2002 Manfred Spraul
13 *
14 * An implementation of the Slab Allocator as described in outline in;
15 * UNIX Internals: The New Frontiers by Uresh Vahalia
16 * Pub: Prentice Hall ISBN 0-13-101908-2
17 * or with a little more detail in;
18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator
19 * Jeff Bonwick (Sun Microsystems).
20 * Presented at: USENIX Summer 1994 Technical Conference
21 *
22 * The memory is organized in caches, one cache for each object type.
23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24 * Each cache consists out of many slabs (they are small (usually one
25 * page long) and always contiguous), and each slab contains multiple
26 * initialized objects.
27 *
28 * This means, that your constructor is used only for newly allocated
29 * slabs and you must pass objects with the same intializations to
30 * kmem_cache_free.
31 *
32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33 * normal). If you need a special memory type, then must create a new
34 * cache for that memory type.
35 *
36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37 * full slabs with 0 free objects
38 * partial slabs
39 * empty slabs with no allocated objects
40 *
41 * If partial slabs exist, then new allocations come from these slabs,
42 * otherwise from empty slabs or new slabs are allocated.
43 *
44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46 *
47 * Each cache has a short per-cpu head array, most allocs
48 * and frees go into that array, and if that array overflows, then 1/2
49 * of the entries in the array are given back into the global cache.
50 * The head array is strictly LIFO and should improve the cache hit rates.
51 * On SMP, it additionally reduces the spinlock operations.
52 *
53 * The c_cpuarray may not be read with enabled local interrupts -
54 * it's changed with a smp_call_function().
55 *
56 * SMP synchronization:
57 * constructors and destructors are called without any locking.
343e0d7a 58 * Several members in struct kmem_cache and struct slab never change, they
1da177e4
LT
59 * are accessed without any locking.
60 * The per-cpu arrays are never accessed from the wrong cpu, no locking,
61 * and local interrupts are disabled so slab code is preempt-safe.
62 * The non-constant members are protected with a per-cache irq spinlock.
63 *
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65 * in 2000 - many ideas in the current implementation are derived from
66 * his patch.
67 *
68 * Further notes from the original documentation:
69 *
70 * 11 April '97. Started multi-threading - markhe
fc0abb14 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
1da177e4
LT
72 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()).
75 *
76 * At present, each engine can be growing a cache. This should be blocked.
77 *
e498be7d
CL
78 * 15 March 2005. NUMA slab allocator.
79 * Shai Fultheim <shai@scalex86.org>.
80 * Shobhit Dayal <shobhit@calsoftinc.com>
81 * Alok N Kataria <alokk@calsoftinc.com>
82 * Christoph Lameter <christoph@lameter.com>
83 *
84 * Modified the slab allocator to be node aware on NUMA systems.
85 * Each node has its own list of partial, free and full slabs.
86 * All object allocations for a node occur from node specific slab lists.
1da177e4
LT
87 */
88
89#include <linux/config.h>
90#include <linux/slab.h>
91#include <linux/mm.h>
92#include <linux/swap.h>
93#include <linux/cache.h>
94#include <linux/interrupt.h>
95#include <linux/init.h>
96#include <linux/compiler.h>
97#include <linux/seq_file.h>
98#include <linux/notifier.h>
99#include <linux/kallsyms.h>
100#include <linux/cpu.h>
101#include <linux/sysctl.h>
102#include <linux/module.h>
103#include <linux/rcupdate.h>
543537bd 104#include <linux/string.h>
e498be7d 105#include <linux/nodemask.h>
dc85da15 106#include <linux/mempolicy.h>
fc0abb14 107#include <linux/mutex.h>
1da177e4
LT
108
109#include <asm/uaccess.h>
110#include <asm/cacheflush.h>
111#include <asm/tlbflush.h>
112#include <asm/page.h>
113
114/*
115 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
116 * SLAB_RED_ZONE & SLAB_POISON.
117 * 0 for faster, smaller code (especially in the critical paths).
118 *
119 * STATS - 1 to collect stats for /proc/slabinfo.
120 * 0 for faster, smaller code (especially in the critical paths).
121 *
122 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
123 */
124
125#ifdef CONFIG_DEBUG_SLAB
126#define DEBUG 1
127#define STATS 1
128#define FORCED_DEBUG 1
129#else
130#define DEBUG 0
131#define STATS 0
132#define FORCED_DEBUG 0
133#endif
134
1da177e4
LT
135/* Shouldn't this be in a header file somewhere? */
136#define BYTES_PER_WORD sizeof(void *)
137
138#ifndef cache_line_size
139#define cache_line_size() L1_CACHE_BYTES
140#endif
141
142#ifndef ARCH_KMALLOC_MINALIGN
143/*
144 * Enforce a minimum alignment for the kmalloc caches.
145 * Usually, the kmalloc caches are cache_line_size() aligned, except when
146 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
147 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
148 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
149 * Note that this flag disables some debug features.
150 */
151#define ARCH_KMALLOC_MINALIGN 0
152#endif
153
154#ifndef ARCH_SLAB_MINALIGN
155/*
156 * Enforce a minimum alignment for all caches.
157 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
158 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
159 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
160 * some debug features.
161 */
162#define ARCH_SLAB_MINALIGN 0
163#endif
164
165#ifndef ARCH_KMALLOC_FLAGS
166#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
167#endif
168
169/* Legal flag mask for kmem_cache_create(). */
170#if DEBUG
171# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
172 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
173 SLAB_NO_REAP | SLAB_CACHE_DMA | \
174 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
175 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
176 SLAB_DESTROY_BY_RCU)
177#else
178# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \
179 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
181 SLAB_DESTROY_BY_RCU)
182#endif
183
184/*
185 * kmem_bufctl_t:
186 *
187 * Bufctl's are used for linking objs within a slab
188 * linked offsets.
189 *
190 * This implementation relies on "struct page" for locating the cache &
191 * slab an object belongs to.
192 * This allows the bufctl structure to be small (one int), but limits
193 * the number of objects a slab (not a cache) can contain when off-slab
194 * bufctls are used. The limit is the size of the largest general cache
195 * that does not use off-slab slabs.
196 * For 32bit archs with 4 kB pages, is this 56.
197 * This is not serious, as it is only for large objects, when it is unwise
198 * to have too many per slab.
199 * Note: This limit can be raised by introducing a general cache whose size
200 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
201 */
202
fa5b08d5 203typedef unsigned int kmem_bufctl_t;
1da177e4
LT
204#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
205#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
206#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2)
207
208/* Max number of objs-per-slab for caches which use off-slab slabs.
209 * Needed to avoid a possible looping condition in cache_grow().
210 */
211static unsigned long offslab_limit;
212
213/*
214 * struct slab
215 *
216 * Manages the objs in a slab. Placed either at the beginning of mem allocated
217 * for a slab, or allocated from an general cache.
218 * Slabs are chained into three list: fully used, partial, fully free slabs.
219 */
220struct slab {
b28a02de
PE
221 struct list_head list;
222 unsigned long colouroff;
223 void *s_mem; /* including colour offset */
224 unsigned int inuse; /* num of objs active in slab */
225 kmem_bufctl_t free;
226 unsigned short nodeid;
1da177e4
LT
227};
228
229/*
230 * struct slab_rcu
231 *
232 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
233 * arrange for kmem_freepages to be called via RCU. This is useful if
234 * we need to approach a kernel structure obliquely, from its address
235 * obtained without the usual locking. We can lock the structure to
236 * stabilize it and check it's still at the given address, only if we
237 * can be sure that the memory has not been meanwhile reused for some
238 * other kind of object (which our subsystem's lock might corrupt).
239 *
240 * rcu_read_lock before reading the address, then rcu_read_unlock after
241 * taking the spinlock within the structure expected at that address.
242 *
243 * We assume struct slab_rcu can overlay struct slab when destroying.
244 */
245struct slab_rcu {
b28a02de 246 struct rcu_head head;
343e0d7a 247 struct kmem_cache *cachep;
b28a02de 248 void *addr;
1da177e4
LT
249};
250
251/*
252 * struct array_cache
253 *
1da177e4
LT
254 * Purpose:
255 * - LIFO ordering, to hand out cache-warm objects from _alloc
256 * - reduce the number of linked list operations
257 * - reduce spinlock operations
258 *
259 * The limit is stored in the per-cpu structure to reduce the data cache
260 * footprint.
261 *
262 */
263struct array_cache {
264 unsigned int avail;
265 unsigned int limit;
266 unsigned int batchcount;
267 unsigned int touched;
e498be7d
CL
268 spinlock_t lock;
269 void *entry[0]; /*
270 * Must have this definition in here for the proper
271 * alignment of array_cache. Also simplifies accessing
272 * the entries.
273 * [0] is for gcc 2.95. It should really be [].
274 */
1da177e4
LT
275};
276
277/* bootstrap: The caches do not work without cpuarrays anymore,
278 * but the cpuarrays are allocated from the generic caches...
279 */
280#define BOOT_CPUCACHE_ENTRIES 1
281struct arraycache_init {
282 struct array_cache cache;
b28a02de 283 void *entries[BOOT_CPUCACHE_ENTRIES];
1da177e4
LT
284};
285
286/*
e498be7d 287 * The slab lists for all objects.
1da177e4
LT
288 */
289struct kmem_list3 {
b28a02de
PE
290 struct list_head slabs_partial; /* partial list first, better asm code */
291 struct list_head slabs_full;
292 struct list_head slabs_free;
293 unsigned long free_objects;
294 unsigned long next_reap;
295 int free_touched;
296 unsigned int free_limit;
2e1217cf 297 unsigned int colour_next; /* Per-node cache coloring */
b28a02de
PE
298 spinlock_t list_lock;
299 struct array_cache *shared; /* shared per node */
300 struct array_cache **alien; /* on other nodes */
1da177e4
LT
301};
302
e498be7d
CL
303/*
304 * Need this for bootstrapping a per node allocator.
305 */
306#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
307struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
308#define CACHE_CACHE 0
309#define SIZE_AC 1
310#define SIZE_L3 (1 + MAX_NUMNODES)
311
312/*
7243cc05 313 * This function must be completely optimized away if
e498be7d
CL
314 * a constant is passed to it. Mostly the same as
315 * what is in linux/slab.h except it returns an
316 * index.
317 */
7243cc05 318static __always_inline int index_of(const size_t size)
e498be7d 319{
5ec8a847
SR
320 extern void __bad_size(void);
321
e498be7d
CL
322 if (__builtin_constant_p(size)) {
323 int i = 0;
324
325#define CACHE(x) \
326 if (size <=x) \
327 return i; \
328 else \
329 i++;
330#include "linux/kmalloc_sizes.h"
331#undef CACHE
5ec8a847 332 __bad_size();
7243cc05 333 } else
5ec8a847 334 __bad_size();
e498be7d
CL
335 return 0;
336}
337
338#define INDEX_AC index_of(sizeof(struct arraycache_init))
339#define INDEX_L3 index_of(sizeof(struct kmem_list3))
1da177e4 340
5295a74c 341static void kmem_list3_init(struct kmem_list3 *parent)
e498be7d
CL
342{
343 INIT_LIST_HEAD(&parent->slabs_full);
344 INIT_LIST_HEAD(&parent->slabs_partial);
345 INIT_LIST_HEAD(&parent->slabs_free);
346 parent->shared = NULL;
347 parent->alien = NULL;
2e1217cf 348 parent->colour_next = 0;
e498be7d
CL
349 spin_lock_init(&parent->list_lock);
350 parent->free_objects = 0;
351 parent->free_touched = 0;
352}
353
354#define MAKE_LIST(cachep, listp, slab, nodeid) \
355 do { \
356 INIT_LIST_HEAD(listp); \
357 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
358 } while (0)
359
360#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
361 do { \
362 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
363 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
364 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
365 } while (0)
1da177e4
LT
366
367/*
343e0d7a 368 * struct kmem_cache
1da177e4
LT
369 *
370 * manages a cache.
371 */
b28a02de 372
2109a2d1 373struct kmem_cache {
1da177e4 374/* 1) per-cpu data, touched during every alloc/free */
b28a02de
PE
375 struct array_cache *array[NR_CPUS];
376 unsigned int batchcount;
377 unsigned int limit;
378 unsigned int shared;
3dafccf2 379 unsigned int buffer_size;
e498be7d 380/* 2) touched by every alloc & free from the backend */
b28a02de
PE
381 struct kmem_list3 *nodelists[MAX_NUMNODES];
382 unsigned int flags; /* constant flags */
383 unsigned int num; /* # of objs per slab */
384 spinlock_t spinlock;
1da177e4
LT
385
386/* 3) cache_grow/shrink */
387 /* order of pgs per slab (2^n) */
b28a02de 388 unsigned int gfporder;
1da177e4
LT
389
390 /* force GFP flags, e.g. GFP_DMA */
b28a02de 391 gfp_t gfpflags;
1da177e4 392
b28a02de
PE
393 size_t colour; /* cache colouring range */
394 unsigned int colour_off; /* colour offset */
343e0d7a 395 struct kmem_cache *slabp_cache;
b28a02de
PE
396 unsigned int slab_size;
397 unsigned int dflags; /* dynamic flags */
1da177e4
LT
398
399 /* constructor func */
343e0d7a 400 void (*ctor) (void *, struct kmem_cache *, unsigned long);
1da177e4
LT
401
402 /* de-constructor func */
343e0d7a 403 void (*dtor) (void *, struct kmem_cache *, unsigned long);
1da177e4
LT
404
405/* 4) cache creation/removal */
b28a02de
PE
406 const char *name;
407 struct list_head next;
1da177e4
LT
408
409/* 5) statistics */
410#if STATS
b28a02de
PE
411 unsigned long num_active;
412 unsigned long num_allocations;
413 unsigned long high_mark;
414 unsigned long grown;
415 unsigned long reaped;
416 unsigned long errors;
417 unsigned long max_freeable;
418 unsigned long node_allocs;
419 unsigned long node_frees;
420 atomic_t allochit;
421 atomic_t allocmiss;
422 atomic_t freehit;
423 atomic_t freemiss;
1da177e4
LT
424#endif
425#if DEBUG
3dafccf2
MS
426 /*
427 * If debugging is enabled, then the allocator can add additional
428 * fields and/or padding to every object. buffer_size contains the total
429 * object size including these internal fields, the following two
430 * variables contain the offset to the user object and its size.
431 */
432 int obj_offset;
433 int obj_size;
1da177e4
LT
434#endif
435};
436
437#define CFLGS_OFF_SLAB (0x80000000UL)
438#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
439
440#define BATCHREFILL_LIMIT 16
441/* Optimization question: fewer reaps means less
442 * probability for unnessary cpucache drain/refill cycles.
443 *
dc6f3f27 444 * OTOH the cpuarrays can contain lots of objects,
1da177e4
LT
445 * which could lock up otherwise freeable slabs.
446 */
447#define REAPTIMEOUT_CPUC (2*HZ)
448#define REAPTIMEOUT_LIST3 (4*HZ)
449
450#if STATS
451#define STATS_INC_ACTIVE(x) ((x)->num_active++)
452#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
453#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
454#define STATS_INC_GROWN(x) ((x)->grown++)
455#define STATS_INC_REAPED(x) ((x)->reaped++)
456#define STATS_SET_HIGH(x) do { if ((x)->num_active > (x)->high_mark) \
457 (x)->high_mark = (x)->num_active; \
458 } while (0)
459#define STATS_INC_ERR(x) ((x)->errors++)
460#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
e498be7d 461#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
1da177e4
LT
462#define STATS_SET_FREEABLE(x, i) \
463 do { if ((x)->max_freeable < i) \
464 (x)->max_freeable = i; \
465 } while (0)
466
467#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
468#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
469#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
470#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
471#else
472#define STATS_INC_ACTIVE(x) do { } while (0)
473#define STATS_DEC_ACTIVE(x) do { } while (0)
474#define STATS_INC_ALLOCED(x) do { } while (0)
475#define STATS_INC_GROWN(x) do { } while (0)
476#define STATS_INC_REAPED(x) do { } while (0)
477#define STATS_SET_HIGH(x) do { } while (0)
478#define STATS_INC_ERR(x) do { } while (0)
479#define STATS_INC_NODEALLOCS(x) do { } while (0)
e498be7d 480#define STATS_INC_NODEFREES(x) do { } while (0)
1da177e4
LT
481#define STATS_SET_FREEABLE(x, i) \
482 do { } while (0)
483
484#define STATS_INC_ALLOCHIT(x) do { } while (0)
485#define STATS_INC_ALLOCMISS(x) do { } while (0)
486#define STATS_INC_FREEHIT(x) do { } while (0)
487#define STATS_INC_FREEMISS(x) do { } while (0)
488#endif
489
490#if DEBUG
491/* Magic nums for obj red zoning.
492 * Placed in the first word before and the first word after an obj.
493 */
494#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
495#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */
496
497/* ...and for poisoning */
498#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
499#define POISON_FREE 0x6b /* for use-after-free poisoning */
500#define POISON_END 0xa5 /* end-byte of poisoning */
501
502/* memory layout of objects:
503 * 0 : objp
3dafccf2 504 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
1da177e4
LT
505 * the end of an object is aligned with the end of the real
506 * allocation. Catches writes behind the end of the allocation.
3dafccf2 507 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
1da177e4 508 * redzone word.
3dafccf2
MS
509 * cachep->obj_offset: The real object.
510 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
511 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
1da177e4 512 */
343e0d7a 513static int obj_offset(struct kmem_cache *cachep)
1da177e4 514{
3dafccf2 515 return cachep->obj_offset;
1da177e4
LT
516}
517
343e0d7a 518static int obj_size(struct kmem_cache *cachep)
1da177e4 519{
3dafccf2 520 return cachep->obj_size;
1da177e4
LT
521}
522
343e0d7a 523static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
1da177e4
LT
524{
525 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
3dafccf2 526 return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
1da177e4
LT
527}
528
343e0d7a 529static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
1da177e4
LT
530{
531 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
532 if (cachep->flags & SLAB_STORE_USER)
3dafccf2 533 return (unsigned long *)(objp + cachep->buffer_size -
b28a02de 534 2 * BYTES_PER_WORD);
3dafccf2 535 return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
1da177e4
LT
536}
537
343e0d7a 538static void **dbg_userword(struct kmem_cache *cachep, void *objp)
1da177e4
LT
539{
540 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
3dafccf2 541 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
1da177e4
LT
542}
543
544#else
545
3dafccf2
MS
546#define obj_offset(x) 0
547#define obj_size(cachep) (cachep->buffer_size)
1da177e4
LT
548#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;})
549#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;})
550#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
551
552#endif
553
554/*
555 * Maximum size of an obj (in 2^order pages)
556 * and absolute limit for the gfp order.
557 */
558#if defined(CONFIG_LARGE_ALLOCS)
559#define MAX_OBJ_ORDER 13 /* up to 32Mb */
560#define MAX_GFP_ORDER 13 /* up to 32Mb */
561#elif defined(CONFIG_MMU)
562#define MAX_OBJ_ORDER 5 /* 32 pages */
563#define MAX_GFP_ORDER 5 /* 32 pages */
564#else
565#define MAX_OBJ_ORDER 8 /* up to 1Mb */
566#define MAX_GFP_ORDER 8 /* up to 1Mb */
567#endif
568
569/*
570 * Do not go above this order unless 0 objects fit into the slab.
571 */
572#define BREAK_GFP_ORDER_HI 1
573#define BREAK_GFP_ORDER_LO 0
574static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
575
065d41cb 576/* Functions for storing/retrieving the cachep and or slab from the
1da177e4
LT
577 * global 'mem_map'. These are used to find the slab an obj belongs to.
578 * With kfree(), these are used to find the cache which an obj belongs to.
579 */
065d41cb
PE
580static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
581{
582 page->lru.next = (struct list_head *)cache;
583}
584
585static inline struct kmem_cache *page_get_cache(struct page *page)
586{
587 return (struct kmem_cache *)page->lru.next;
588}
589
590static inline void page_set_slab(struct page *page, struct slab *slab)
591{
592 page->lru.prev = (struct list_head *)slab;
593}
594
595static inline struct slab *page_get_slab(struct page *page)
596{
597 return (struct slab *)page->lru.prev;
598}
1da177e4 599
6ed5eb22
PE
600static inline struct kmem_cache *virt_to_cache(const void *obj)
601{
602 struct page *page = virt_to_page(obj);
603 return page_get_cache(page);
604}
605
606static inline struct slab *virt_to_slab(const void *obj)
607{
608 struct page *page = virt_to_page(obj);
609 return page_get_slab(page);
610}
611
1da177e4
LT
612/* These are the default caches for kmalloc. Custom caches can have other sizes. */
613struct cache_sizes malloc_sizes[] = {
614#define CACHE(x) { .cs_size = (x) },
615#include <linux/kmalloc_sizes.h>
616 CACHE(ULONG_MAX)
617#undef CACHE
618};
619EXPORT_SYMBOL(malloc_sizes);
620
621/* Must match cache_sizes above. Out of line to keep cache footprint low. */
622struct cache_names {
623 char *name;
624 char *name_dma;
625};
626
627static struct cache_names __initdata cache_names[] = {
628#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
629#include <linux/kmalloc_sizes.h>
b28a02de 630 {NULL,}
1da177e4
LT
631#undef CACHE
632};
633
634static struct arraycache_init initarray_cache __initdata =
b28a02de 635 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
1da177e4 636static struct arraycache_init initarray_generic =
b28a02de 637 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
1da177e4
LT
638
639/* internal cache of cache description objs */
343e0d7a 640static struct kmem_cache cache_cache = {
b28a02de
PE
641 .batchcount = 1,
642 .limit = BOOT_CPUCACHE_ENTRIES,
643 .shared = 1,
343e0d7a 644 .buffer_size = sizeof(struct kmem_cache),
b28a02de
PE
645 .flags = SLAB_NO_REAP,
646 .spinlock = SPIN_LOCK_UNLOCKED,
647 .name = "kmem_cache",
1da177e4 648#if DEBUG
343e0d7a 649 .obj_size = sizeof(struct kmem_cache),
1da177e4
LT
650#endif
651};
652
653/* Guard access to the cache-chain. */
fc0abb14 654static DEFINE_MUTEX(cache_chain_mutex);
1da177e4
LT
655static struct list_head cache_chain;
656
657/*
658 * vm_enough_memory() looks at this to determine how many
659 * slab-allocated pages are possibly freeable under pressure
660 *
661 * SLAB_RECLAIM_ACCOUNT turns this on per-slab
662 */
663atomic_t slab_reclaim_pages;
1da177e4
LT
664
665/*
666 * chicken and egg problem: delay the per-cpu array allocation
667 * until the general caches are up.
668 */
669static enum {
670 NONE,
e498be7d
CL
671 PARTIAL_AC,
672 PARTIAL_L3,
1da177e4
LT
673 FULL
674} g_cpucache_up;
675
676static DEFINE_PER_CPU(struct work_struct, reap_work);
677
343e0d7a
PE
678static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node);
679static void enable_cpucache(struct kmem_cache *cachep);
b28a02de 680static void cache_reap(void *unused);
343e0d7a 681static int __node_shrink(struct kmem_cache *cachep, int node);
1da177e4 682
343e0d7a 683static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
1da177e4
LT
684{
685 return cachep->array[smp_processor_id()];
686}
687
343e0d7a 688static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags)
1da177e4
LT
689{
690 struct cache_sizes *csizep = malloc_sizes;
691
692#if DEBUG
693 /* This happens if someone tries to call
b28a02de
PE
694 * kmem_cache_create(), or __kmalloc(), before
695 * the generic caches are initialized.
696 */
c7e43c78 697 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
1da177e4
LT
698#endif
699 while (size > csizep->cs_size)
700 csizep++;
701
702 /*
0abf40c1 703 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
1da177e4
LT
704 * has cs_{dma,}cachep==NULL. Thus no special case
705 * for large kmalloc calls required.
706 */
707 if (unlikely(gfpflags & GFP_DMA))
708 return csizep->cs_dmacachep;
709 return csizep->cs_cachep;
710}
711
343e0d7a 712struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
97e2bde4
MS
713{
714 return __find_general_cachep(size, gfpflags);
715}
716EXPORT_SYMBOL(kmem_find_general_cachep);
717
fbaccacf 718static size_t slab_mgmt_size(size_t nr_objs, size_t align)
1da177e4 719{
fbaccacf
SR
720 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
721}
1da177e4 722
fbaccacf
SR
723/* Calculate the number of objects and left-over bytes for a given
724 buffer size. */
725static void cache_estimate(unsigned long gfporder, size_t buffer_size,
726 size_t align, int flags, size_t *left_over,
727 unsigned int *num)
728{
729 int nr_objs;
730 size_t mgmt_size;
731 size_t slab_size = PAGE_SIZE << gfporder;
1da177e4 732
fbaccacf
SR
733 /*
734 * The slab management structure can be either off the slab or
735 * on it. For the latter case, the memory allocated for a
736 * slab is used for:
737 *
738 * - The struct slab
739 * - One kmem_bufctl_t for each object
740 * - Padding to respect alignment of @align
741 * - @buffer_size bytes for each object
742 *
743 * If the slab management structure is off the slab, then the
744 * alignment will already be calculated into the size. Because
745 * the slabs are all pages aligned, the objects will be at the
746 * correct alignment when allocated.
747 */
748 if (flags & CFLGS_OFF_SLAB) {
749 mgmt_size = 0;
750 nr_objs = slab_size / buffer_size;
751
752 if (nr_objs > SLAB_LIMIT)
753 nr_objs = SLAB_LIMIT;
754 } else {
755 /*
756 * Ignore padding for the initial guess. The padding
757 * is at most @align-1 bytes, and @buffer_size is at
758 * least @align. In the worst case, this result will
759 * be one greater than the number of objects that fit
760 * into the memory allocation when taking the padding
761 * into account.
762 */
763 nr_objs = (slab_size - sizeof(struct slab)) /
764 (buffer_size + sizeof(kmem_bufctl_t));
765
766 /*
767 * This calculated number will be either the right
768 * amount, or one greater than what we want.
769 */
770 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
771 > slab_size)
772 nr_objs--;
773
774 if (nr_objs > SLAB_LIMIT)
775 nr_objs = SLAB_LIMIT;
776
777 mgmt_size = slab_mgmt_size(nr_objs, align);
778 }
779 *num = nr_objs;
780 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
1da177e4
LT
781}
782
783#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
784
343e0d7a 785static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg)
1da177e4
LT
786{
787 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
b28a02de 788 function, cachep->name, msg);
1da177e4
LT
789 dump_stack();
790}
791
8fce4d8e
CL
792#ifdef CONFIG_NUMA
793/*
794 * Special reaping functions for NUMA systems called from cache_reap().
795 * These take care of doing round robin flushing of alien caches (containing
796 * objects freed on different nodes from which they were allocated) and the
797 * flushing of remote pcps by calling drain_node_pages.
798 */
799static DEFINE_PER_CPU(unsigned long, reap_node);
800
801static void init_reap_node(int cpu)
802{
803 int node;
804
805 node = next_node(cpu_to_node(cpu), node_online_map);
806 if (node == MAX_NUMNODES)
807 node = 0;
808
809 __get_cpu_var(reap_node) = node;
810}
811
812static void next_reap_node(void)
813{
814 int node = __get_cpu_var(reap_node);
815
816 /*
817 * Also drain per cpu pages on remote zones
818 */
819 if (node != numa_node_id())
820 drain_node_pages(node);
821
822 node = next_node(node, node_online_map);
823 if (unlikely(node >= MAX_NUMNODES))
824 node = first_node(node_online_map);
825 __get_cpu_var(reap_node) = node;
826}
827
828#else
829#define init_reap_node(cpu) do { } while (0)
830#define next_reap_node(void) do { } while (0)
831#endif
832
1da177e4
LT
833/*
834 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
835 * via the workqueue/eventd.
836 * Add the CPU number into the expiration time to minimize the possibility of
837 * the CPUs getting into lockstep and contending for the global cache chain
838 * lock.
839 */
840static void __devinit start_cpu_timer(int cpu)
841{
842 struct work_struct *reap_work = &per_cpu(reap_work, cpu);
843
844 /*
845 * When this gets called from do_initcalls via cpucache_init(),
846 * init_workqueues() has already run, so keventd will be setup
847 * at that time.
848 */
849 if (keventd_up() && reap_work->func == NULL) {
8fce4d8e 850 init_reap_node(cpu);
1da177e4
LT
851 INIT_WORK(reap_work, cache_reap, NULL);
852 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
853 }
854}
855
e498be7d 856static struct array_cache *alloc_arraycache(int node, int entries,
b28a02de 857 int batchcount)
1da177e4 858{
b28a02de 859 int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
1da177e4
LT
860 struct array_cache *nc = NULL;
861
e498be7d 862 nc = kmalloc_node(memsize, GFP_KERNEL, node);
1da177e4
LT
863 if (nc) {
864 nc->avail = 0;
865 nc->limit = entries;
866 nc->batchcount = batchcount;
867 nc->touched = 0;
e498be7d 868 spin_lock_init(&nc->lock);
1da177e4
LT
869 }
870 return nc;
871}
872
e498be7d 873#ifdef CONFIG_NUMA
343e0d7a 874static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
dc85da15 875
5295a74c 876static struct array_cache **alloc_alien_cache(int node, int limit)
e498be7d
CL
877{
878 struct array_cache **ac_ptr;
b28a02de 879 int memsize = sizeof(void *) * MAX_NUMNODES;
e498be7d
CL
880 int i;
881
882 if (limit > 1)
883 limit = 12;
884 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
885 if (ac_ptr) {
886 for_each_node(i) {
887 if (i == node || !node_online(i)) {
888 ac_ptr[i] = NULL;
889 continue;
890 }
891 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
892 if (!ac_ptr[i]) {
b28a02de 893 for (i--; i <= 0; i--)
e498be7d
CL
894 kfree(ac_ptr[i]);
895 kfree(ac_ptr);
896 return NULL;
897 }
898 }
899 }
900 return ac_ptr;
901}
902
5295a74c 903static void free_alien_cache(struct array_cache **ac_ptr)
e498be7d
CL
904{
905 int i;
906
907 if (!ac_ptr)
908 return;
909
910 for_each_node(i)
b28a02de 911 kfree(ac_ptr[i]);
e498be7d
CL
912
913 kfree(ac_ptr);
914}
915
343e0d7a 916static void __drain_alien_cache(struct kmem_cache *cachep,
5295a74c 917 struct array_cache *ac, int node)
e498be7d
CL
918{
919 struct kmem_list3 *rl3 = cachep->nodelists[node];
920
921 if (ac->avail) {
922 spin_lock(&rl3->list_lock);
ff69416e 923 free_block(cachep, ac->entry, ac->avail, node);
e498be7d
CL
924 ac->avail = 0;
925 spin_unlock(&rl3->list_lock);
926 }
927}
928
8fce4d8e
CL
929/*
930 * Called from cache_reap() to regularly drain alien caches round robin.
931 */
932static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
933{
934 int node = __get_cpu_var(reap_node);
935
936 if (l3->alien) {
937 struct array_cache *ac = l3->alien[node];
938 if (ac && ac->avail) {
939 spin_lock_irq(&ac->lock);
940 __drain_alien_cache(cachep, ac, node);
941 spin_unlock_irq(&ac->lock);
942 }
943 }
944}
945
4484ebf1 946static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien)
e498be7d 947{
b28a02de 948 int i = 0;
e498be7d
CL
949 struct array_cache *ac;
950 unsigned long flags;
951
952 for_each_online_node(i) {
4484ebf1 953 ac = alien[i];
e498be7d
CL
954 if (ac) {
955 spin_lock_irqsave(&ac->lock, flags);
956 __drain_alien_cache(cachep, ac, i);
957 spin_unlock_irqrestore(&ac->lock, flags);
958 }
959 }
960}
961#else
7a21ef6f 962
4484ebf1 963#define drain_alien_cache(cachep, alien) do { } while (0)
8fce4d8e 964#define reap_alien(cachep, l3) do { } while (0)
4484ebf1 965
7a21ef6f
LT
966static inline struct array_cache **alloc_alien_cache(int node, int limit)
967{
968 return (struct array_cache **) 0x01020304ul;
969}
970
4484ebf1
RT
971static inline void free_alien_cache(struct array_cache **ac_ptr)
972{
973}
7a21ef6f 974
e498be7d
CL
975#endif
976
1da177e4 977static int __devinit cpuup_callback(struct notifier_block *nfb,
b28a02de 978 unsigned long action, void *hcpu)
1da177e4
LT
979{
980 long cpu = (long)hcpu;
343e0d7a 981 struct kmem_cache *cachep;
e498be7d
CL
982 struct kmem_list3 *l3 = NULL;
983 int node = cpu_to_node(cpu);
984 int memsize = sizeof(struct kmem_list3);
1da177e4
LT
985
986 switch (action) {
987 case CPU_UP_PREPARE:
fc0abb14 988 mutex_lock(&cache_chain_mutex);
e498be7d
CL
989 /* we need to do this right in the beginning since
990 * alloc_arraycache's are going to use this list.
991 * kmalloc_node allows us to add the slab to the right
992 * kmem_list3 and not this cpu's kmem_list3
993 */
994
1da177e4 995 list_for_each_entry(cachep, &cache_chain, next) {
e498be7d
CL
996 /* setup the size64 kmemlist for cpu before we can
997 * begin anything. Make sure some other cpu on this
998 * node has not already allocated this
999 */
1000 if (!cachep->nodelists[node]) {
1001 if (!(l3 = kmalloc_node(memsize,
b28a02de 1002 GFP_KERNEL, node)))
e498be7d
CL
1003 goto bad;
1004 kmem_list3_init(l3);
1005 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
b28a02de 1006 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
e498be7d 1007
4484ebf1
RT
1008 /*
1009 * The l3s don't come and go as CPUs come and
1010 * go. cache_chain_mutex is sufficient
1011 * protection here.
1012 */
e498be7d
CL
1013 cachep->nodelists[node] = l3;
1014 }
1da177e4 1015
e498be7d
CL
1016 spin_lock_irq(&cachep->nodelists[node]->list_lock);
1017 cachep->nodelists[node]->free_limit =
b28a02de
PE
1018 (1 + nr_cpus_node(node)) *
1019 cachep->batchcount + cachep->num;
e498be7d
CL
1020 spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1021 }
1022
1023 /* Now we can go ahead with allocating the shared array's
b28a02de 1024 & array cache's */
e498be7d 1025 list_for_each_entry(cachep, &cache_chain, next) {
cd105df4 1026 struct array_cache *nc;
4484ebf1
RT
1027 struct array_cache *shared;
1028 struct array_cache **alien;
cd105df4 1029
e498be7d 1030 nc = alloc_arraycache(node, cachep->limit,
4484ebf1 1031 cachep->batchcount);
1da177e4
LT
1032 if (!nc)
1033 goto bad;
4484ebf1
RT
1034 shared = alloc_arraycache(node,
1035 cachep->shared * cachep->batchcount,
1036 0xbaadf00d);
1037 if (!shared)
1038 goto bad;
7a21ef6f 1039
4484ebf1
RT
1040 alien = alloc_alien_cache(node, cachep->limit);
1041 if (!alien)
1042 goto bad;
1da177e4 1043 cachep->array[cpu] = nc;
1da177e4 1044
e498be7d
CL
1045 l3 = cachep->nodelists[node];
1046 BUG_ON(!l3);
e498be7d 1047
4484ebf1
RT
1048 spin_lock_irq(&l3->list_lock);
1049 if (!l3->shared) {
1050 /*
1051 * We are serialised from CPU_DEAD or
1052 * CPU_UP_CANCELLED by the cpucontrol lock
1053 */
1054 l3->shared = shared;
1055 shared = NULL;
e498be7d 1056 }
4484ebf1
RT
1057#ifdef CONFIG_NUMA
1058 if (!l3->alien) {
1059 l3->alien = alien;
1060 alien = NULL;
1061 }
1062#endif
1063 spin_unlock_irq(&l3->list_lock);
1064
1065 kfree(shared);
1066 free_alien_cache(alien);
1da177e4 1067 }
fc0abb14 1068 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
1069 break;
1070 case CPU_ONLINE:
1071 start_cpu_timer(cpu);
1072 break;
1073#ifdef CONFIG_HOTPLUG_CPU
1074 case CPU_DEAD:
4484ebf1
RT
1075 /*
1076 * Even if all the cpus of a node are down, we don't free the
1077 * kmem_list3 of any cache. This to avoid a race between
1078 * cpu_down, and a kmalloc allocation from another cpu for
1079 * memory from the node of the cpu going down. The list3
1080 * structure is usually allocated from kmem_cache_create() and
1081 * gets destroyed at kmem_cache_destroy().
1082 */
1da177e4
LT
1083 /* fall thru */
1084 case CPU_UP_CANCELED:
fc0abb14 1085 mutex_lock(&cache_chain_mutex);
1da177e4
LT
1086
1087 list_for_each_entry(cachep, &cache_chain, next) {
1088 struct array_cache *nc;
4484ebf1
RT
1089 struct array_cache *shared;
1090 struct array_cache **alien;
e498be7d 1091 cpumask_t mask;
1da177e4 1092
e498be7d 1093 mask = node_to_cpumask(node);
1da177e4
LT
1094 /* cpu is dead; no one can alloc from it. */
1095 nc = cachep->array[cpu];
1096 cachep->array[cpu] = NULL;
e498be7d
CL
1097 l3 = cachep->nodelists[node];
1098
1099 if (!l3)
4484ebf1 1100 goto free_array_cache;
e498be7d 1101
ca3b9b91 1102 spin_lock_irq(&l3->list_lock);
e498be7d
CL
1103
1104 /* Free limit for this kmem_list3 */
1105 l3->free_limit -= cachep->batchcount;
1106 if (nc)
ff69416e 1107 free_block(cachep, nc->entry, nc->avail, node);
e498be7d
CL
1108
1109 if (!cpus_empty(mask)) {
ca3b9b91 1110 spin_unlock_irq(&l3->list_lock);
4484ebf1 1111 goto free_array_cache;
b28a02de 1112 }
e498be7d 1113
4484ebf1
RT
1114 shared = l3->shared;
1115 if (shared) {
e498be7d 1116 free_block(cachep, l3->shared->entry,
b28a02de 1117 l3->shared->avail, node);
e498be7d
CL
1118 l3->shared = NULL;
1119 }
e498be7d 1120
4484ebf1
RT
1121 alien = l3->alien;
1122 l3->alien = NULL;
1123
1124 spin_unlock_irq(&l3->list_lock);
1125
1126 kfree(shared);
1127 if (alien) {
1128 drain_alien_cache(cachep, alien);
1129 free_alien_cache(alien);
e498be7d 1130 }
4484ebf1 1131free_array_cache:
1da177e4
LT
1132 kfree(nc);
1133 }
4484ebf1
RT
1134 /*
1135 * In the previous loop, all the objects were freed to
1136 * the respective cache's slabs, now we can go ahead and
1137 * shrink each nodelist to its limit.
1138 */
1139 list_for_each_entry(cachep, &cache_chain, next) {
1140 l3 = cachep->nodelists[node];
1141 if (!l3)
1142 continue;
1143 spin_lock_irq(&l3->list_lock);
1144 /* free slabs belonging to this node */
1145 __node_shrink(cachep, node);
1146 spin_unlock_irq(&l3->list_lock);
1147 }
fc0abb14 1148 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
1149 break;
1150#endif
1151 }
1152 return NOTIFY_OK;
b28a02de 1153 bad:
fc0abb14 1154 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
1155 return NOTIFY_BAD;
1156}
1157
1158static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
1159
e498be7d
CL
1160/*
1161 * swap the static kmem_list3 with kmalloced memory
1162 */
343e0d7a 1163static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid)
e498be7d
CL
1164{
1165 struct kmem_list3 *ptr;
1166
1167 BUG_ON(cachep->nodelists[nodeid] != list);
1168 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
1169 BUG_ON(!ptr);
1170
1171 local_irq_disable();
1172 memcpy(ptr, list, sizeof(struct kmem_list3));
1173 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1174 cachep->nodelists[nodeid] = ptr;
1175 local_irq_enable();
1176}
1177
1da177e4
LT
1178/* Initialisation.
1179 * Called after the gfp() functions have been enabled, and before smp_init().
1180 */
1181void __init kmem_cache_init(void)
1182{
1183 size_t left_over;
1184 struct cache_sizes *sizes;
1185 struct cache_names *names;
e498be7d 1186 int i;
07ed76b2 1187 int order;
e498be7d
CL
1188
1189 for (i = 0; i < NUM_INIT_LISTS; i++) {
1190 kmem_list3_init(&initkmem_list3[i]);
1191 if (i < MAX_NUMNODES)
1192 cache_cache.nodelists[i] = NULL;
1193 }
1da177e4
LT
1194
1195 /*
1196 * Fragmentation resistance on low memory - only use bigger
1197 * page orders on machines with more than 32MB of memory.
1198 */
1199 if (num_physpages > (32 << 20) >> PAGE_SHIFT)
1200 slab_break_gfp_order = BREAK_GFP_ORDER_HI;
1201
1da177e4
LT
1202 /* Bootstrap is tricky, because several objects are allocated
1203 * from caches that do not exist yet:
343e0d7a 1204 * 1) initialize the cache_cache cache: it contains the struct kmem_cache
1da177e4
LT
1205 * structures of all caches, except cache_cache itself: cache_cache
1206 * is statically allocated.
e498be7d
CL
1207 * Initially an __init data area is used for the head array and the
1208 * kmem_list3 structures, it's replaced with a kmalloc allocated
1209 * array at the end of the bootstrap.
1da177e4 1210 * 2) Create the first kmalloc cache.
343e0d7a 1211 * The struct kmem_cache for the new cache is allocated normally.
e498be7d
CL
1212 * An __init data area is used for the head array.
1213 * 3) Create the remaining kmalloc caches, with minimally sized
1214 * head arrays.
1da177e4
LT
1215 * 4) Replace the __init data head arrays for cache_cache and the first
1216 * kmalloc cache with kmalloc allocated arrays.
e498be7d
CL
1217 * 5) Replace the __init data for kmem_list3 for cache_cache and
1218 * the other cache's with kmalloc allocated memory.
1219 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1da177e4
LT
1220 */
1221
1222 /* 1) create the cache_cache */
1da177e4
LT
1223 INIT_LIST_HEAD(&cache_chain);
1224 list_add(&cache_cache.next, &cache_chain);
1225 cache_cache.colour_off = cache_line_size();
1226 cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
e498be7d 1227 cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
1da177e4 1228
3dafccf2 1229 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
1da177e4 1230
07ed76b2
JS
1231 for (order = 0; order < MAX_ORDER; order++) {
1232 cache_estimate(order, cache_cache.buffer_size,
1233 cache_line_size(), 0, &left_over, &cache_cache.num);
1234 if (cache_cache.num)
1235 break;
1236 }
1da177e4
LT
1237 if (!cache_cache.num)
1238 BUG();
07ed76b2 1239 cache_cache.gfporder = order;
b28a02de 1240 cache_cache.colour = left_over / cache_cache.colour_off;
b28a02de
PE
1241 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1242 sizeof(struct slab), cache_line_size());
1da177e4
LT
1243
1244 /* 2+3) create the kmalloc caches */
1245 sizes = malloc_sizes;
1246 names = cache_names;
1247
e498be7d
CL
1248 /* Initialize the caches that provide memory for the array cache
1249 * and the kmem_list3 structures first.
1250 * Without this, further allocations will bug
1251 */
1252
1253 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
b28a02de
PE
1254 sizes[INDEX_AC].cs_size,
1255 ARCH_KMALLOC_MINALIGN,
1256 (ARCH_KMALLOC_FLAGS |
1257 SLAB_PANIC), NULL, NULL);
e498be7d
CL
1258
1259 if (INDEX_AC != INDEX_L3)
1260 sizes[INDEX_L3].cs_cachep =
b28a02de
PE
1261 kmem_cache_create(names[INDEX_L3].name,
1262 sizes[INDEX_L3].cs_size,
1263 ARCH_KMALLOC_MINALIGN,
1264 (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL,
1265 NULL);
e498be7d 1266
1da177e4 1267 while (sizes->cs_size != ULONG_MAX) {
e498be7d
CL
1268 /*
1269 * For performance, all the general caches are L1 aligned.
1da177e4
LT
1270 * This should be particularly beneficial on SMP boxes, as it
1271 * eliminates "false sharing".
1272 * Note for systems short on memory removing the alignment will
e498be7d
CL
1273 * allow tighter packing of the smaller caches.
1274 */
b28a02de 1275 if (!sizes->cs_cachep)
e498be7d 1276 sizes->cs_cachep = kmem_cache_create(names->name,
b28a02de
PE
1277 sizes->cs_size,
1278 ARCH_KMALLOC_MINALIGN,
1279 (ARCH_KMALLOC_FLAGS
1280 | SLAB_PANIC),
1281 NULL, NULL);
1da177e4
LT
1282
1283 /* Inc off-slab bufctl limit until the ceiling is hit. */
1284 if (!(OFF_SLAB(sizes->cs_cachep))) {
b28a02de 1285 offslab_limit = sizes->cs_size - sizeof(struct slab);
1da177e4
LT
1286 offslab_limit /= sizeof(kmem_bufctl_t);
1287 }
1288
1289 sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
b28a02de
PE
1290 sizes->cs_size,
1291 ARCH_KMALLOC_MINALIGN,
1292 (ARCH_KMALLOC_FLAGS |
1293 SLAB_CACHE_DMA |
1294 SLAB_PANIC), NULL,
1295 NULL);
1da177e4
LT
1296
1297 sizes++;
1298 names++;
1299 }
1300 /* 4) Replace the bootstrap head arrays */
1301 {
b28a02de 1302 void *ptr;
e498be7d 1303
1da177e4 1304 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
e498be7d 1305
1da177e4 1306 local_irq_disable();
9a2dba4b
PE
1307 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1308 memcpy(ptr, cpu_cache_get(&cache_cache),
b28a02de 1309 sizeof(struct arraycache_init));
1da177e4
LT
1310 cache_cache.array[smp_processor_id()] = ptr;
1311 local_irq_enable();
e498be7d 1312
1da177e4 1313 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
e498be7d 1314
1da177e4 1315 local_irq_disable();
9a2dba4b 1316 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
b28a02de 1317 != &initarray_generic.cache);
9a2dba4b 1318 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
b28a02de 1319 sizeof(struct arraycache_init));
e498be7d 1320 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
b28a02de 1321 ptr;
1da177e4
LT
1322 local_irq_enable();
1323 }
e498be7d
CL
1324 /* 5) Replace the bootstrap kmem_list3's */
1325 {
1326 int node;
1327 /* Replace the static kmem_list3 structures for the boot cpu */
1328 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
b28a02de 1329 numa_node_id());
e498be7d
CL
1330
1331 for_each_online_node(node) {
1332 init_list(malloc_sizes[INDEX_AC].cs_cachep,
b28a02de 1333 &initkmem_list3[SIZE_AC + node], node);
e498be7d
CL
1334
1335 if (INDEX_AC != INDEX_L3) {
1336 init_list(malloc_sizes[INDEX_L3].cs_cachep,
b28a02de
PE
1337 &initkmem_list3[SIZE_L3 + node],
1338 node);
e498be7d
CL
1339 }
1340 }
1341 }
1da177e4 1342
e498be7d 1343 /* 6) resize the head arrays to their final sizes */
1da177e4 1344 {
343e0d7a 1345 struct kmem_cache *cachep;
fc0abb14 1346 mutex_lock(&cache_chain_mutex);
1da177e4 1347 list_for_each_entry(cachep, &cache_chain, next)
b28a02de 1348 enable_cpucache(cachep);
fc0abb14 1349 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
1350 }
1351
1352 /* Done! */
1353 g_cpucache_up = FULL;
1354
1355 /* Register a cpu startup notifier callback
9a2dba4b 1356 * that initializes cpu_cache_get for all new cpus
1da177e4
LT
1357 */
1358 register_cpu_notifier(&cpucache_notifier);
1da177e4
LT
1359
1360 /* The reap timers are started later, with a module init call:
1361 * That part of the kernel is not yet operational.
1362 */
1363}
1364
1365static int __init cpucache_init(void)
1366{
1367 int cpu;
1368
1369 /*
1370 * Register the timers that return unneeded
1371 * pages to gfp.
1372 */
e498be7d 1373 for_each_online_cpu(cpu)
b28a02de 1374 start_cpu_timer(cpu);
1da177e4
LT
1375
1376 return 0;
1377}
1378
1379__initcall(cpucache_init);
1380
1381/*
1382 * Interface to system's page allocator. No need to hold the cache-lock.
1383 *
1384 * If we requested dmaable memory, we will get it. Even if we
1385 * did not request dmaable memory, we might get it, but that
1386 * would be relatively rare and ignorable.
1387 */
343e0d7a 1388static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1da177e4
LT
1389{
1390 struct page *page;
1391 void *addr;
1392 int i;
1393
1394 flags |= cachep->gfpflags;
50c85a19 1395 page = alloc_pages_node(nodeid, flags, cachep->gfporder);
1da177e4
LT
1396 if (!page)
1397 return NULL;
1398 addr = page_address(page);
1399
1400 i = (1 << cachep->gfporder);
1401 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1402 atomic_add(i, &slab_reclaim_pages);
1403 add_page_state(nr_slab, i);
1404 while (i--) {
1405 SetPageSlab(page);
1406 page++;
1407 }
1408 return addr;
1409}
1410
1411/*
1412 * Interface to system's page release.
1413 */
343e0d7a 1414static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1da177e4 1415{
b28a02de 1416 unsigned long i = (1 << cachep->gfporder);
1da177e4
LT
1417 struct page *page = virt_to_page(addr);
1418 const unsigned long nr_freed = i;
1419
1420 while (i--) {
1421 if (!TestClearPageSlab(page))
1422 BUG();
1423 page++;
1424 }
1425 sub_page_state(nr_slab, nr_freed);
1426 if (current->reclaim_state)
1427 current->reclaim_state->reclaimed_slab += nr_freed;
1428 free_pages((unsigned long)addr, cachep->gfporder);
b28a02de
PE
1429 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1430 atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages);
1da177e4
LT
1431}
1432
1433static void kmem_rcu_free(struct rcu_head *head)
1434{
b28a02de 1435 struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
343e0d7a 1436 struct kmem_cache *cachep = slab_rcu->cachep;
1da177e4
LT
1437
1438 kmem_freepages(cachep, slab_rcu->addr);
1439 if (OFF_SLAB(cachep))
1440 kmem_cache_free(cachep->slabp_cache, slab_rcu);
1441}
1442
1443#if DEBUG
1444
1445#ifdef CONFIG_DEBUG_PAGEALLOC
343e0d7a 1446static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
b28a02de 1447 unsigned long caller)
1da177e4 1448{
3dafccf2 1449 int size = obj_size(cachep);
1da177e4 1450
3dafccf2 1451 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1da177e4 1452
b28a02de 1453 if (size < 5 * sizeof(unsigned long))
1da177e4
LT
1454 return;
1455
b28a02de
PE
1456 *addr++ = 0x12345678;
1457 *addr++ = caller;
1458 *addr++ = smp_processor_id();
1459 size -= 3 * sizeof(unsigned long);
1da177e4
LT
1460 {
1461 unsigned long *sptr = &caller;
1462 unsigned long svalue;
1463
1464 while (!kstack_end(sptr)) {
1465 svalue = *sptr++;
1466 if (kernel_text_address(svalue)) {
b28a02de 1467 *addr++ = svalue;
1da177e4
LT
1468 size -= sizeof(unsigned long);
1469 if (size <= sizeof(unsigned long))
1470 break;
1471 }
1472 }
1473
1474 }
b28a02de 1475 *addr++ = 0x87654321;
1da177e4
LT
1476}
1477#endif
1478
343e0d7a 1479static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1da177e4 1480{
3dafccf2
MS
1481 int size = obj_size(cachep);
1482 addr = &((char *)addr)[obj_offset(cachep)];
1da177e4
LT
1483
1484 memset(addr, val, size);
b28a02de 1485 *(unsigned char *)(addr + size - 1) = POISON_END;
1da177e4
LT
1486}
1487
1488static void dump_line(char *data, int offset, int limit)
1489{
1490 int i;
1491 printk(KERN_ERR "%03x:", offset);
b28a02de
PE
1492 for (i = 0; i < limit; i++) {
1493 printk(" %02x", (unsigned char)data[offset + i]);
1da177e4
LT
1494 }
1495 printk("\n");
1496}
1497#endif
1498
1499#if DEBUG
1500
343e0d7a 1501static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1da177e4
LT
1502{
1503 int i, size;
1504 char *realobj;
1505
1506 if (cachep->flags & SLAB_RED_ZONE) {
1507 printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
b28a02de
PE
1508 *dbg_redzone1(cachep, objp),
1509 *dbg_redzone2(cachep, objp));
1da177e4
LT
1510 }
1511
1512 if (cachep->flags & SLAB_STORE_USER) {
1513 printk(KERN_ERR "Last user: [<%p>]",
b28a02de 1514 *dbg_userword(cachep, objp));
1da177e4 1515 print_symbol("(%s)",
b28a02de 1516 (unsigned long)*dbg_userword(cachep, objp));
1da177e4
LT
1517 printk("\n");
1518 }
3dafccf2
MS
1519 realobj = (char *)objp + obj_offset(cachep);
1520 size = obj_size(cachep);
b28a02de 1521 for (i = 0; i < size && lines; i += 16, lines--) {
1da177e4
LT
1522 int limit;
1523 limit = 16;
b28a02de
PE
1524 if (i + limit > size)
1525 limit = size - i;
1da177e4
LT
1526 dump_line(realobj, i, limit);
1527 }
1528}
1529
343e0d7a 1530static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1da177e4
LT
1531{
1532 char *realobj;
1533 int size, i;
1534 int lines = 0;
1535
3dafccf2
MS
1536 realobj = (char *)objp + obj_offset(cachep);
1537 size = obj_size(cachep);
1da177e4 1538
b28a02de 1539 for (i = 0; i < size; i++) {
1da177e4 1540 char exp = POISON_FREE;
b28a02de 1541 if (i == size - 1)
1da177e4
LT
1542 exp = POISON_END;
1543 if (realobj[i] != exp) {
1544 int limit;
1545 /* Mismatch ! */
1546 /* Print header */
1547 if (lines == 0) {
b28a02de
PE
1548 printk(KERN_ERR
1549 "Slab corruption: start=%p, len=%d\n",
1550 realobj, size);
1da177e4
LT
1551 print_objinfo(cachep, objp, 0);
1552 }
1553 /* Hexdump the affected line */
b28a02de 1554 i = (i / 16) * 16;
1da177e4 1555 limit = 16;
b28a02de
PE
1556 if (i + limit > size)
1557 limit = size - i;
1da177e4
LT
1558 dump_line(realobj, i, limit);
1559 i += 16;
1560 lines++;
1561 /* Limit to 5 lines */
1562 if (lines > 5)
1563 break;
1564 }
1565 }
1566 if (lines != 0) {
1567 /* Print some data about the neighboring objects, if they
1568 * exist:
1569 */
6ed5eb22 1570 struct slab *slabp = virt_to_slab(objp);
1da177e4
LT
1571 int objnr;
1572
3dafccf2 1573 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
1da177e4 1574 if (objnr) {
3dafccf2
MS
1575 objp = slabp->s_mem + (objnr - 1) * cachep->buffer_size;
1576 realobj = (char *)objp + obj_offset(cachep);
1da177e4 1577 printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
b28a02de 1578 realobj, size);
1da177e4
LT
1579 print_objinfo(cachep, objp, 2);
1580 }
b28a02de 1581 if (objnr + 1 < cachep->num) {
3dafccf2
MS
1582 objp = slabp->s_mem + (objnr + 1) * cachep->buffer_size;
1583 realobj = (char *)objp + obj_offset(cachep);
1da177e4 1584 printk(KERN_ERR "Next obj: start=%p, len=%d\n",
b28a02de 1585 realobj, size);
1da177e4
LT
1586 print_objinfo(cachep, objp, 2);
1587 }
1588 }
1589}
1590#endif
1591
12dd36fa
MD
1592#if DEBUG
1593/**
1594 * slab_destroy_objs - call the registered destructor for each object in
1595 * a slab that is to be destroyed.
1da177e4 1596 */
343e0d7a 1597static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1da177e4 1598{
1da177e4
LT
1599 int i;
1600 for (i = 0; i < cachep->num; i++) {
3dafccf2 1601 void *objp = slabp->s_mem + cachep->buffer_size * i;
1da177e4
LT
1602
1603 if (cachep->flags & SLAB_POISON) {
1604#ifdef CONFIG_DEBUG_PAGEALLOC
3dafccf2 1605 if ((cachep->buffer_size % PAGE_SIZE) == 0
b28a02de
PE
1606 && OFF_SLAB(cachep))
1607 kernel_map_pages(virt_to_page(objp),
3dafccf2 1608 cachep->buffer_size / PAGE_SIZE,
b28a02de 1609 1);
1da177e4
LT
1610 else
1611 check_poison_obj(cachep, objp);
1612#else
1613 check_poison_obj(cachep, objp);
1614#endif
1615 }
1616 if (cachep->flags & SLAB_RED_ZONE) {
1617 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1618 slab_error(cachep, "start of a freed object "
b28a02de 1619 "was overwritten");
1da177e4
LT
1620 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1621 slab_error(cachep, "end of a freed object "
b28a02de 1622 "was overwritten");
1da177e4
LT
1623 }
1624 if (cachep->dtor && !(cachep->flags & SLAB_POISON))
3dafccf2 1625 (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
1da177e4 1626 }
12dd36fa 1627}
1da177e4 1628#else
343e0d7a 1629static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
12dd36fa 1630{
1da177e4
LT
1631 if (cachep->dtor) {
1632 int i;
1633 for (i = 0; i < cachep->num; i++) {
3dafccf2 1634 void *objp = slabp->s_mem + cachep->buffer_size * i;
b28a02de 1635 (cachep->dtor) (objp, cachep, 0);
1da177e4
LT
1636 }
1637 }
12dd36fa 1638}
1da177e4
LT
1639#endif
1640
12dd36fa
MD
1641/**
1642 * Destroy all the objs in a slab, and release the mem back to the system.
1643 * Before calling the slab must have been unlinked from the cache.
1644 * The cache-lock is not held/needed.
1645 */
343e0d7a 1646static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
12dd36fa
MD
1647{
1648 void *addr = slabp->s_mem - slabp->colouroff;
1649
1650 slab_destroy_objs(cachep, slabp);
1da177e4
LT
1651 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1652 struct slab_rcu *slab_rcu;
1653
b28a02de 1654 slab_rcu = (struct slab_rcu *)slabp;
1da177e4
LT
1655 slab_rcu->cachep = cachep;
1656 slab_rcu->addr = addr;
1657 call_rcu(&slab_rcu->head, kmem_rcu_free);
1658 } else {
1659 kmem_freepages(cachep, addr);
1660 if (OFF_SLAB(cachep))
1661 kmem_cache_free(cachep->slabp_cache, slabp);
1662 }
1663}
1664
3dafccf2 1665/* For setting up all the kmem_list3s for cache whose buffer_size is same
e498be7d 1666 as size of kmem_list3. */
343e0d7a 1667static void set_up_list3s(struct kmem_cache *cachep, int index)
e498be7d
CL
1668{
1669 int node;
1670
1671 for_each_online_node(node) {
b28a02de 1672 cachep->nodelists[node] = &initkmem_list3[index + node];
e498be7d 1673 cachep->nodelists[node]->next_reap = jiffies +
b28a02de
PE
1674 REAPTIMEOUT_LIST3 +
1675 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
e498be7d
CL
1676 }
1677}
1678
4d268eba 1679/**
a70773dd
RD
1680 * calculate_slab_order - calculate size (page order) of slabs
1681 * @cachep: pointer to the cache that is being created
1682 * @size: size of objects to be created in this cache.
1683 * @align: required alignment for the objects.
1684 * @flags: slab allocation flags
1685 *
1686 * Also calculates the number of objects per slab.
4d268eba
PE
1687 *
1688 * This could be made much more intelligent. For now, try to avoid using
1689 * high order pages for slabs. When the gfp() functions are more friendly
1690 * towards high-order requests, this should be changed.
1691 */
ee13d785
RD
1692static inline size_t calculate_slab_order(struct kmem_cache *cachep,
1693 size_t size, size_t align, unsigned long flags)
4d268eba
PE
1694{
1695 size_t left_over = 0;
9888e6fa 1696 int gfporder;
4d268eba 1697
9888e6fa 1698 for (gfporder = 0 ; gfporder <= MAX_GFP_ORDER; gfporder++) {
4d268eba
PE
1699 unsigned int num;
1700 size_t remainder;
1701
9888e6fa 1702 cache_estimate(gfporder, size, align, flags, &remainder, &num);
4d268eba
PE
1703 if (!num)
1704 continue;
9888e6fa 1705
4d268eba 1706 /* More than offslab_limit objects will cause problems */
9888e6fa 1707 if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit)
4d268eba
PE
1708 break;
1709
9888e6fa 1710 /* Found something acceptable - save it away */
4d268eba 1711 cachep->num = num;
9888e6fa 1712 cachep->gfporder = gfporder;
4d268eba
PE
1713 left_over = remainder;
1714
f78bb8ad
LT
1715 /*
1716 * A VFS-reclaimable slab tends to have most allocations
1717 * as GFP_NOFS and we really don't want to have to be allocating
1718 * higher-order pages when we are unable to shrink dcache.
1719 */
1720 if (flags & SLAB_RECLAIM_ACCOUNT)
1721 break;
1722
4d268eba
PE
1723 /*
1724 * Large number of objects is good, but very large slabs are
1725 * currently bad for the gfp()s.
1726 */
9888e6fa 1727 if (gfporder >= slab_break_gfp_order)
4d268eba
PE
1728 break;
1729
9888e6fa
LT
1730 /*
1731 * Acceptable internal fragmentation?
1732 */
1733 if ((left_over * 8) <= (PAGE_SIZE << gfporder))
4d268eba
PE
1734 break;
1735 }
1736 return left_over;
1737}
1738
1da177e4
LT
1739/**
1740 * kmem_cache_create - Create a cache.
1741 * @name: A string which is used in /proc/slabinfo to identify this cache.
1742 * @size: The size of objects to be created in this cache.
1743 * @align: The required alignment for the objects.
1744 * @flags: SLAB flags
1745 * @ctor: A constructor for the objects.
1746 * @dtor: A destructor for the objects.
1747 *
1748 * Returns a ptr to the cache on success, NULL on failure.
1749 * Cannot be called within a int, but can be interrupted.
1750 * The @ctor is run when new pages are allocated by the cache
1751 * and the @dtor is run before the pages are handed back.
1752 *
1753 * @name must be valid until the cache is destroyed. This implies that
1754 * the module calling this has to destroy the cache before getting
1755 * unloaded.
1756 *
1757 * The flags are
1758 *
1759 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
1760 * to catch references to uninitialised memory.
1761 *
1762 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
1763 * for buffer overruns.
1764 *
1765 * %SLAB_NO_REAP - Don't automatically reap this cache when we're under
1766 * memory pressure.
1767 *
1768 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
1769 * cacheline. This can be beneficial if you're counting cycles as closely
1770 * as davem.
1771 */
343e0d7a 1772struct kmem_cache *
1da177e4 1773kmem_cache_create (const char *name, size_t size, size_t align,
343e0d7a
PE
1774 unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long),
1775 void (*dtor)(void*, struct kmem_cache *, unsigned long))
1da177e4
LT
1776{
1777 size_t left_over, slab_size, ralign;
343e0d7a 1778 struct kmem_cache *cachep = NULL;
4f12bb4f 1779 struct list_head *p;
1da177e4
LT
1780
1781 /*
1782 * Sanity checks... these are all serious usage bugs.
1783 */
1784 if ((!name) ||
b28a02de
PE
1785 in_interrupt() ||
1786 (size < BYTES_PER_WORD) ||
1787 (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
1788 printk(KERN_ERR "%s: Early error in slab %s\n",
1789 __FUNCTION__, name);
1790 BUG();
1791 }
1da177e4 1792
f0188f47
RT
1793 /*
1794 * Prevent CPUs from coming and going.
1795 * lock_cpu_hotplug() nests outside cache_chain_mutex
1796 */
1797 lock_cpu_hotplug();
1798
fc0abb14 1799 mutex_lock(&cache_chain_mutex);
4f12bb4f
AM
1800
1801 list_for_each(p, &cache_chain) {
343e0d7a 1802 struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
4f12bb4f
AM
1803 mm_segment_t old_fs = get_fs();
1804 char tmp;
1805 int res;
1806
1807 /*
1808 * This happens when the module gets unloaded and doesn't
1809 * destroy its slab cache and no-one else reuses the vmalloc
1810 * area of the module. Print a warning.
1811 */
1812 set_fs(KERNEL_DS);
1813 res = __get_user(tmp, pc->name);
1814 set_fs(old_fs);
1815 if (res) {
1816 printk("SLAB: cache with size %d has lost its name\n",
3dafccf2 1817 pc->buffer_size);
4f12bb4f
AM
1818 continue;
1819 }
1820
b28a02de 1821 if (!strcmp(pc->name, name)) {
4f12bb4f
AM
1822 printk("kmem_cache_create: duplicate cache %s\n", name);
1823 dump_stack();
1824 goto oops;
1825 }
1826 }
1827
1da177e4
LT
1828#if DEBUG
1829 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
1830 if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
1831 /* No constructor, but inital state check requested */
1832 printk(KERN_ERR "%s: No con, but init state check "
b28a02de 1833 "requested - %s\n", __FUNCTION__, name);
1da177e4
LT
1834 flags &= ~SLAB_DEBUG_INITIAL;
1835 }
1da177e4
LT
1836#if FORCED_DEBUG
1837 /*
1838 * Enable redzoning and last user accounting, except for caches with
1839 * large objects, if the increased size would increase the object size
1840 * above the next power of two: caches with object sizes just above a
1841 * power of two have a significant amount of internal fragmentation.
1842 */
b28a02de
PE
1843 if ((size < 4096
1844 || fls(size - 1) == fls(size - 1 + 3 * BYTES_PER_WORD)))
1845 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1da177e4
LT
1846 if (!(flags & SLAB_DESTROY_BY_RCU))
1847 flags |= SLAB_POISON;
1848#endif
1849 if (flags & SLAB_DESTROY_BY_RCU)
1850 BUG_ON(flags & SLAB_POISON);
1851#endif
1852 if (flags & SLAB_DESTROY_BY_RCU)
1853 BUG_ON(dtor);
1854
1855 /*
1856 * Always checks flags, a caller might be expecting debug
1857 * support which isn't available.
1858 */
1859 if (flags & ~CREATE_MASK)
1860 BUG();
1861
1862 /* Check that size is in terms of words. This is needed to avoid
1863 * unaligned accesses for some archs when redzoning is used, and makes
1864 * sure any on-slab bufctl's are also correctly aligned.
1865 */
b28a02de
PE
1866 if (size & (BYTES_PER_WORD - 1)) {
1867 size += (BYTES_PER_WORD - 1);
1868 size &= ~(BYTES_PER_WORD - 1);
1da177e4
LT
1869 }
1870
1871 /* calculate out the final buffer alignment: */
1872 /* 1) arch recommendation: can be overridden for debug */
1873 if (flags & SLAB_HWCACHE_ALIGN) {
1874 /* Default alignment: as specified by the arch code.
1875 * Except if an object is really small, then squeeze multiple
1876 * objects into one cacheline.
1877 */
1878 ralign = cache_line_size();
b28a02de 1879 while (size <= ralign / 2)
1da177e4
LT
1880 ralign /= 2;
1881 } else {
1882 ralign = BYTES_PER_WORD;
1883 }
1884 /* 2) arch mandated alignment: disables debug if necessary */
1885 if (ralign < ARCH_SLAB_MINALIGN) {
1886 ralign = ARCH_SLAB_MINALIGN;
1887 if (ralign > BYTES_PER_WORD)
b28a02de 1888 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
1da177e4
LT
1889 }
1890 /* 3) caller mandated alignment: disables debug if necessary */
1891 if (ralign < align) {
1892 ralign = align;
1893 if (ralign > BYTES_PER_WORD)
b28a02de 1894 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
1da177e4
LT
1895 }
1896 /* 4) Store it. Note that the debug code below can reduce
1897 * the alignment to BYTES_PER_WORD.
1898 */
1899 align = ralign;
1900
1901 /* Get cache's description obj. */
343e0d7a 1902 cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
1da177e4 1903 if (!cachep)
4f12bb4f 1904 goto oops;
343e0d7a 1905 memset(cachep, 0, sizeof(struct kmem_cache));
1da177e4
LT
1906
1907#if DEBUG
3dafccf2 1908 cachep->obj_size = size;
1da177e4
LT
1909
1910 if (flags & SLAB_RED_ZONE) {
1911 /* redzoning only works with word aligned caches */
1912 align = BYTES_PER_WORD;
1913
1914 /* add space for red zone words */
3dafccf2 1915 cachep->obj_offset += BYTES_PER_WORD;
b28a02de 1916 size += 2 * BYTES_PER_WORD;
1da177e4
LT
1917 }
1918 if (flags & SLAB_STORE_USER) {
1919 /* user store requires word alignment and
1920 * one word storage behind the end of the real
1921 * object.
1922 */
1923 align = BYTES_PER_WORD;
1924 size += BYTES_PER_WORD;
1925 }
1926#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
b28a02de 1927 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
3dafccf2
MS
1928 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
1929 cachep->obj_offset += PAGE_SIZE - size;
1da177e4
LT
1930 size = PAGE_SIZE;
1931 }
1932#endif
1933#endif
1934
1935 /* Determine if the slab management is 'on' or 'off' slab. */
b28a02de 1936 if (size >= (PAGE_SIZE >> 3))
1da177e4
LT
1937 /*
1938 * Size is large, assume best to place the slab management obj
1939 * off-slab (should allow better packing of objs).
1940 */
1941 flags |= CFLGS_OFF_SLAB;
1942
1943 size = ALIGN(size, align);
1944
f78bb8ad 1945 left_over = calculate_slab_order(cachep, size, align, flags);
1da177e4
LT
1946
1947 if (!cachep->num) {
1948 printk("kmem_cache_create: couldn't create cache %s.\n", name);
1949 kmem_cache_free(&cache_cache, cachep);
1950 cachep = NULL;
4f12bb4f 1951 goto oops;
1da177e4 1952 }
b28a02de
PE
1953 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
1954 + sizeof(struct slab), align);
1da177e4
LT
1955
1956 /*
1957 * If the slab has been placed off-slab, and we have enough space then
1958 * move it on-slab. This is at the expense of any extra colouring.
1959 */
1960 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
1961 flags &= ~CFLGS_OFF_SLAB;
1962 left_over -= slab_size;
1963 }
1964
1965 if (flags & CFLGS_OFF_SLAB) {
1966 /* really off slab. No need for manual alignment */
b28a02de
PE
1967 slab_size =
1968 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
1da177e4
LT
1969 }
1970
1971 cachep->colour_off = cache_line_size();
1972 /* Offset must be a multiple of the alignment. */
1973 if (cachep->colour_off < align)
1974 cachep->colour_off = align;
b28a02de 1975 cachep->colour = left_over / cachep->colour_off;
1da177e4
LT
1976 cachep->slab_size = slab_size;
1977 cachep->flags = flags;
1978 cachep->gfpflags = 0;
1979 if (flags & SLAB_CACHE_DMA)
1980 cachep->gfpflags |= GFP_DMA;
1981 spin_lock_init(&cachep->spinlock);
3dafccf2 1982 cachep->buffer_size = size;
1da177e4
LT
1983
1984 if (flags & CFLGS_OFF_SLAB)
b2d55073 1985 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
1da177e4
LT
1986 cachep->ctor = ctor;
1987 cachep->dtor = dtor;
1988 cachep->name = name;
1989
1da177e4
LT
1990
1991 if (g_cpucache_up == FULL) {
1992 enable_cpucache(cachep);
1993 } else {
1994 if (g_cpucache_up == NONE) {
1995 /* Note: the first kmem_cache_create must create
1996 * the cache that's used by kmalloc(24), otherwise
1997 * the creation of further caches will BUG().
1998 */
e498be7d 1999 cachep->array[smp_processor_id()] =
b28a02de 2000 &initarray_generic.cache;
e498be7d
CL
2001
2002 /* If the cache that's used by
2003 * kmalloc(sizeof(kmem_list3)) is the first cache,
2004 * then we need to set up all its list3s, otherwise
2005 * the creation of further caches will BUG().
2006 */
2007 set_up_list3s(cachep, SIZE_AC);
2008 if (INDEX_AC == INDEX_L3)
2009 g_cpucache_up = PARTIAL_L3;
2010 else
2011 g_cpucache_up = PARTIAL_AC;
1da177e4 2012 } else {
e498be7d 2013 cachep->array[smp_processor_id()] =
b28a02de 2014 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
e498be7d
CL
2015
2016 if (g_cpucache_up == PARTIAL_AC) {
2017 set_up_list3s(cachep, SIZE_L3);
2018 g_cpucache_up = PARTIAL_L3;
2019 } else {
2020 int node;
2021 for_each_online_node(node) {
2022
2023 cachep->nodelists[node] =
b28a02de
PE
2024 kmalloc_node(sizeof
2025 (struct kmem_list3),
2026 GFP_KERNEL, node);
e498be7d 2027 BUG_ON(!cachep->nodelists[node]);
b28a02de
PE
2028 kmem_list3_init(cachep->
2029 nodelists[node]);
e498be7d
CL
2030 }
2031 }
1da177e4 2032 }
e498be7d 2033 cachep->nodelists[numa_node_id()]->next_reap =
b28a02de
PE
2034 jiffies + REAPTIMEOUT_LIST3 +
2035 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
e498be7d 2036
9a2dba4b
PE
2037 BUG_ON(!cpu_cache_get(cachep));
2038 cpu_cache_get(cachep)->avail = 0;
2039 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2040 cpu_cache_get(cachep)->batchcount = 1;
2041 cpu_cache_get(cachep)->touched = 0;
1da177e4
LT
2042 cachep->batchcount = 1;
2043 cachep->limit = BOOT_CPUCACHE_ENTRIES;
b28a02de 2044 }
1da177e4 2045
1da177e4
LT
2046 /* cache setup completed, link it into the list */
2047 list_add(&cachep->next, &cache_chain);
b28a02de 2048 oops:
1da177e4
LT
2049 if (!cachep && (flags & SLAB_PANIC))
2050 panic("kmem_cache_create(): failed to create slab `%s'\n",
b28a02de 2051 name);
fc0abb14 2052 mutex_unlock(&cache_chain_mutex);
f0188f47 2053 unlock_cpu_hotplug();
1da177e4
LT
2054 return cachep;
2055}
2056EXPORT_SYMBOL(kmem_cache_create);
2057
2058#if DEBUG
2059static void check_irq_off(void)
2060{
2061 BUG_ON(!irqs_disabled());
2062}
2063
2064static void check_irq_on(void)
2065{
2066 BUG_ON(irqs_disabled());
2067}
2068
343e0d7a 2069static void check_spinlock_acquired(struct kmem_cache *cachep)
1da177e4
LT
2070{
2071#ifdef CONFIG_SMP
2072 check_irq_off();
e498be7d 2073 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
1da177e4
LT
2074#endif
2075}
e498be7d 2076
343e0d7a 2077static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
e498be7d
CL
2078{
2079#ifdef CONFIG_SMP
2080 check_irq_off();
2081 assert_spin_locked(&cachep->nodelists[node]->list_lock);
2082#endif
2083}
2084
1da177e4
LT
2085#else
2086#define check_irq_off() do { } while(0)
2087#define check_irq_on() do { } while(0)
2088#define check_spinlock_acquired(x) do { } while(0)
e498be7d 2089#define check_spinlock_acquired_node(x, y) do { } while(0)
1da177e4
LT
2090#endif
2091
2092/*
2093 * Waits for all CPUs to execute func().
2094 */
b28a02de 2095static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
1da177e4
LT
2096{
2097 check_irq_on();
2098 preempt_disable();
2099
2100 local_irq_disable();
2101 func(arg);
2102 local_irq_enable();
2103
2104 if (smp_call_function(func, arg, 1, 1))
2105 BUG();
2106
2107 preempt_enable();
2108}
2109
343e0d7a 2110static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
b28a02de 2111 int force, int node);
1da177e4
LT
2112
2113static void do_drain(void *arg)
2114{
343e0d7a 2115 struct kmem_cache *cachep = (struct kmem_cache *) arg;
1da177e4 2116 struct array_cache *ac;
ff69416e 2117 int node = numa_node_id();
1da177e4
LT
2118
2119 check_irq_off();
9a2dba4b 2120 ac = cpu_cache_get(cachep);
ff69416e
CL
2121 spin_lock(&cachep->nodelists[node]->list_lock);
2122 free_block(cachep, ac->entry, ac->avail, node);
2123 spin_unlock(&cachep->nodelists[node]->list_lock);
1da177e4
LT
2124 ac->avail = 0;
2125}
2126
343e0d7a 2127static void drain_cpu_caches(struct kmem_cache *cachep)
1da177e4 2128{
e498be7d
CL
2129 struct kmem_list3 *l3;
2130 int node;
2131
1da177e4
LT
2132 smp_call_function_all_cpus(do_drain, cachep);
2133 check_irq_on();
b28a02de 2134 for_each_online_node(node) {
e498be7d
CL
2135 l3 = cachep->nodelists[node];
2136 if (l3) {
ca3b9b91 2137 spin_lock_irq(&l3->list_lock);
e498be7d 2138 drain_array_locked(cachep, l3->shared, 1, node);
ca3b9b91 2139 spin_unlock_irq(&l3->list_lock);
e498be7d 2140 if (l3->alien)
4484ebf1 2141 drain_alien_cache(cachep, l3->alien);
e498be7d
CL
2142 }
2143 }
1da177e4
LT
2144}
2145
343e0d7a 2146static int __node_shrink(struct kmem_cache *cachep, int node)
1da177e4
LT
2147{
2148 struct slab *slabp;
e498be7d 2149 struct kmem_list3 *l3 = cachep->nodelists[node];
1da177e4
LT
2150 int ret;
2151
e498be7d 2152 for (;;) {
1da177e4
LT
2153 struct list_head *p;
2154
e498be7d
CL
2155 p = l3->slabs_free.prev;
2156 if (p == &l3->slabs_free)
1da177e4
LT
2157 break;
2158
e498be7d 2159 slabp = list_entry(l3->slabs_free.prev, struct slab, list);
1da177e4
LT
2160#if DEBUG
2161 if (slabp->inuse)
2162 BUG();
2163#endif
2164 list_del(&slabp->list);
2165
e498be7d
CL
2166 l3->free_objects -= cachep->num;
2167 spin_unlock_irq(&l3->list_lock);
1da177e4 2168 slab_destroy(cachep, slabp);
e498be7d 2169 spin_lock_irq(&l3->list_lock);
1da177e4 2170 }
b28a02de 2171 ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
1da177e4
LT
2172 return ret;
2173}
2174
343e0d7a 2175static int __cache_shrink(struct kmem_cache *cachep)
e498be7d
CL
2176{
2177 int ret = 0, i = 0;
2178 struct kmem_list3 *l3;
2179
2180 drain_cpu_caches(cachep);
2181
2182 check_irq_on();
2183 for_each_online_node(i) {
2184 l3 = cachep->nodelists[i];
2185 if (l3) {
2186 spin_lock_irq(&l3->list_lock);
2187 ret += __node_shrink(cachep, i);
2188 spin_unlock_irq(&l3->list_lock);
2189 }
2190 }
2191 return (ret ? 1 : 0);
2192}
2193
1da177e4
LT
2194/**
2195 * kmem_cache_shrink - Shrink a cache.
2196 * @cachep: The cache to shrink.
2197 *
2198 * Releases as many slabs as possible for a cache.
2199 * To help debugging, a zero exit status indicates all slabs were released.
2200 */
343e0d7a 2201int kmem_cache_shrink(struct kmem_cache *cachep)
1da177e4
LT
2202{
2203 if (!cachep || in_interrupt())
2204 BUG();
2205
2206 return __cache_shrink(cachep);
2207}
2208EXPORT_SYMBOL(kmem_cache_shrink);
2209
2210/**
2211 * kmem_cache_destroy - delete a cache
2212 * @cachep: the cache to destroy
2213 *
343e0d7a 2214 * Remove a struct kmem_cache object from the slab cache.
1da177e4
LT
2215 * Returns 0 on success.
2216 *
2217 * It is expected this function will be called by a module when it is
2218 * unloaded. This will remove the cache completely, and avoid a duplicate
2219 * cache being allocated each time a module is loaded and unloaded, if the
2220 * module doesn't have persistent in-kernel storage across loads and unloads.
2221 *
2222 * The cache must be empty before calling this function.
2223 *
2224 * The caller must guarantee that noone will allocate memory from the cache
2225 * during the kmem_cache_destroy().
2226 */
343e0d7a 2227int kmem_cache_destroy(struct kmem_cache *cachep)
1da177e4
LT
2228{
2229 int i;
e498be7d 2230 struct kmem_list3 *l3;
1da177e4
LT
2231
2232 if (!cachep || in_interrupt())
2233 BUG();
2234
2235 /* Don't let CPUs to come and go */
2236 lock_cpu_hotplug();
2237
2238 /* Find the cache in the chain of caches. */
fc0abb14 2239 mutex_lock(&cache_chain_mutex);
1da177e4
LT
2240 /*
2241 * the chain is never empty, cache_cache is never destroyed
2242 */
2243 list_del(&cachep->next);
fc0abb14 2244 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
2245
2246 if (__cache_shrink(cachep)) {
2247 slab_error(cachep, "Can't free all objects");
fc0abb14 2248 mutex_lock(&cache_chain_mutex);
b28a02de 2249 list_add(&cachep->next, &cache_chain);
fc0abb14 2250 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
2251 unlock_cpu_hotplug();
2252 return 1;
2253 }
2254
2255 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
fbd568a3 2256 synchronize_rcu();
1da177e4 2257
e498be7d 2258 for_each_online_cpu(i)
b28a02de 2259 kfree(cachep->array[i]);
1da177e4
LT
2260
2261 /* NUMA: free the list3 structures */
e498be7d
CL
2262 for_each_online_node(i) {
2263 if ((l3 = cachep->nodelists[i])) {
2264 kfree(l3->shared);
2265 free_alien_cache(l3->alien);
2266 kfree(l3);
2267 }
2268 }
1da177e4
LT
2269 kmem_cache_free(&cache_cache, cachep);
2270
2271 unlock_cpu_hotplug();
2272
2273 return 0;
2274}
2275EXPORT_SYMBOL(kmem_cache_destroy);
2276
2277/* Get the memory for a slab management obj. */
343e0d7a 2278static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
b28a02de 2279 int colour_off, gfp_t local_flags)
1da177e4
LT
2280{
2281 struct slab *slabp;
b28a02de 2282
1da177e4
LT
2283 if (OFF_SLAB(cachep)) {
2284 /* Slab management obj is off-slab. */
2285 slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
2286 if (!slabp)
2287 return NULL;
2288 } else {
b28a02de 2289 slabp = objp + colour_off;
1da177e4
LT
2290 colour_off += cachep->slab_size;
2291 }
2292 slabp->inuse = 0;
2293 slabp->colouroff = colour_off;
b28a02de 2294 slabp->s_mem = objp + colour_off;
1da177e4
LT
2295
2296 return slabp;
2297}
2298
2299static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2300{
b28a02de 2301 return (kmem_bufctl_t *) (slabp + 1);
1da177e4
LT
2302}
2303
343e0d7a 2304static void cache_init_objs(struct kmem_cache *cachep,
b28a02de 2305 struct slab *slabp, unsigned long ctor_flags)
1da177e4
LT
2306{
2307 int i;
2308
2309 for (i = 0; i < cachep->num; i++) {
3dafccf2 2310 void *objp = slabp->s_mem + cachep->buffer_size * i;
1da177e4
LT
2311#if DEBUG
2312 /* need to poison the objs? */
2313 if (cachep->flags & SLAB_POISON)
2314 poison_obj(cachep, objp, POISON_FREE);
2315 if (cachep->flags & SLAB_STORE_USER)
2316 *dbg_userword(cachep, objp) = NULL;
2317
2318 if (cachep->flags & SLAB_RED_ZONE) {
2319 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2320 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2321 }
2322 /*
2323 * Constructors are not allowed to allocate memory from
2324 * the same cache which they are a constructor for.
2325 * Otherwise, deadlock. They must also be threaded.
2326 */
2327 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
3dafccf2 2328 cachep->ctor(objp + obj_offset(cachep), cachep,
b28a02de 2329 ctor_flags);
1da177e4
LT
2330
2331 if (cachep->flags & SLAB_RED_ZONE) {
2332 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2333 slab_error(cachep, "constructor overwrote the"
b28a02de 2334 " end of an object");
1da177e4
LT
2335 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2336 slab_error(cachep, "constructor overwrote the"
b28a02de 2337 " start of an object");
1da177e4 2338 }
3dafccf2 2339 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
b28a02de
PE
2340 && cachep->flags & SLAB_POISON)
2341 kernel_map_pages(virt_to_page(objp),
3dafccf2 2342 cachep->buffer_size / PAGE_SIZE, 0);
1da177e4
LT
2343#else
2344 if (cachep->ctor)
2345 cachep->ctor(objp, cachep, ctor_flags);
2346#endif
b28a02de 2347 slab_bufctl(slabp)[i] = i + 1;
1da177e4 2348 }
b28a02de 2349 slab_bufctl(slabp)[i - 1] = BUFCTL_END;
1da177e4
LT
2350 slabp->free = 0;
2351}
2352
343e0d7a 2353static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
1da177e4
LT
2354{
2355 if (flags & SLAB_DMA) {
2356 if (!(cachep->gfpflags & GFP_DMA))
2357 BUG();
2358 } else {
2359 if (cachep->gfpflags & GFP_DMA)
2360 BUG();
2361 }
2362}
2363
343e0d7a 2364static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid)
78d382d7
MD
2365{
2366 void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size);
2367 kmem_bufctl_t next;
2368
2369 slabp->inuse++;
2370 next = slab_bufctl(slabp)[slabp->free];
2371#if DEBUG
2372 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2373 WARN_ON(slabp->nodeid != nodeid);
2374#endif
2375 slabp->free = next;
2376
2377 return objp;
2378}
2379
343e0d7a 2380static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp,
78d382d7
MD
2381 int nodeid)
2382{
2383 unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size;
2384
2385#if DEBUG
2386 /* Verify that the slab belongs to the intended node */
2387 WARN_ON(slabp->nodeid != nodeid);
2388
2389 if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
2390 printk(KERN_ERR "slab: double free detected in cache "
2391 "'%s', objp %p\n", cachep->name, objp);
2392 BUG();
2393 }
2394#endif
2395 slab_bufctl(slabp)[objnr] = slabp->free;
2396 slabp->free = objnr;
2397 slabp->inuse--;
2398}
2399
343e0d7a 2400static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, void *objp)
1da177e4
LT
2401{
2402 int i;
2403 struct page *page;
2404
2405 /* Nasty!!!!!! I hope this is OK. */
2406 i = 1 << cachep->gfporder;
2407 page = virt_to_page(objp);
2408 do {
065d41cb
PE
2409 page_set_cache(page, cachep);
2410 page_set_slab(page, slabp);
1da177e4
LT
2411 page++;
2412 } while (--i);
2413}
2414
2415/*
2416 * Grow (by 1) the number of slabs within a cache. This is called by
2417 * kmem_cache_alloc() when there are no active objs left in a cache.
2418 */
343e0d7a 2419static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1da177e4 2420{
b28a02de
PE
2421 struct slab *slabp;
2422 void *objp;
2423 size_t offset;
2424 gfp_t local_flags;
2425 unsigned long ctor_flags;
e498be7d 2426 struct kmem_list3 *l3;
1da177e4
LT
2427
2428 /* Be lazy and only check for valid flags here,
b28a02de 2429 * keeping it out of the critical path in kmem_cache_alloc().
1da177e4 2430 */
b28a02de 2431 if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
1da177e4
LT
2432 BUG();
2433 if (flags & SLAB_NO_GROW)
2434 return 0;
2435
2436 ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2437 local_flags = (flags & SLAB_LEVEL_MASK);
2438 if (!(local_flags & __GFP_WAIT))
2439 /*
2440 * Not allowed to sleep. Need to tell a constructor about
2441 * this - it might need to know...
2442 */
2443 ctor_flags |= SLAB_CTOR_ATOMIC;
2444
2e1217cf 2445 /* Take the l3 list lock to change the colour_next on this node */
1da177e4 2446 check_irq_off();
2e1217cf
RT
2447 l3 = cachep->nodelists[nodeid];
2448 spin_lock(&l3->list_lock);
1da177e4
LT
2449
2450 /* Get colour for the slab, and cal the next value. */
2e1217cf
RT
2451 offset = l3->colour_next;
2452 l3->colour_next++;
2453 if (l3->colour_next >= cachep->colour)
2454 l3->colour_next = 0;
2455 spin_unlock(&l3->list_lock);
1da177e4 2456
2e1217cf 2457 offset *= cachep->colour_off;
1da177e4
LT
2458
2459 if (local_flags & __GFP_WAIT)
2460 local_irq_enable();
2461
2462 /*
2463 * The test for missing atomic flag is performed here, rather than
2464 * the more obvious place, simply to reduce the critical path length
2465 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2466 * will eventually be caught here (where it matters).
2467 */
2468 kmem_flagcheck(cachep, flags);
2469
e498be7d
CL
2470 /* Get mem for the objs.
2471 * Attempt to allocate a physical page from 'nodeid',
2472 */
1da177e4
LT
2473 if (!(objp = kmem_getpages(cachep, flags, nodeid)))
2474 goto failed;
2475
2476 /* Get slab management. */
2477 if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags)))
2478 goto opps1;
2479
e498be7d 2480 slabp->nodeid = nodeid;
1da177e4
LT
2481 set_slab_attr(cachep, slabp, objp);
2482
2483 cache_init_objs(cachep, slabp, ctor_flags);
2484
2485 if (local_flags & __GFP_WAIT)
2486 local_irq_disable();
2487 check_irq_off();
e498be7d 2488 spin_lock(&l3->list_lock);
1da177e4
LT
2489
2490 /* Make slab active. */
e498be7d 2491 list_add_tail(&slabp->list, &(l3->slabs_free));
1da177e4 2492 STATS_INC_GROWN(cachep);
e498be7d
CL
2493 l3->free_objects += cachep->num;
2494 spin_unlock(&l3->list_lock);
1da177e4 2495 return 1;
b28a02de 2496 opps1:
1da177e4 2497 kmem_freepages(cachep, objp);
b28a02de 2498 failed:
1da177e4
LT
2499 if (local_flags & __GFP_WAIT)
2500 local_irq_disable();
2501 return 0;
2502}
2503
2504#if DEBUG
2505
2506/*
2507 * Perform extra freeing checks:
2508 * - detect bad pointers.
2509 * - POISON/RED_ZONE checking
2510 * - destructor calls, for caches with POISON+dtor
2511 */
2512static void kfree_debugcheck(const void *objp)
2513{
2514 struct page *page;
2515
2516 if (!virt_addr_valid(objp)) {
2517 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
b28a02de
PE
2518 (unsigned long)objp);
2519 BUG();
1da177e4
LT
2520 }
2521 page = virt_to_page(objp);
2522 if (!PageSlab(page)) {
b28a02de
PE
2523 printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n",
2524 (unsigned long)objp);
1da177e4
LT
2525 BUG();
2526 }
2527}
2528
343e0d7a 2529static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
b28a02de 2530 void *caller)
1da177e4
LT
2531{
2532 struct page *page;
2533 unsigned int objnr;
2534 struct slab *slabp;
2535
3dafccf2 2536 objp -= obj_offset(cachep);
1da177e4
LT
2537 kfree_debugcheck(objp);
2538 page = virt_to_page(objp);
2539
065d41cb 2540 if (page_get_cache(page) != cachep) {
b28a02de
PE
2541 printk(KERN_ERR
2542 "mismatch in kmem_cache_free: expected cache %p, got %p\n",
2543 page_get_cache(page), cachep);
1da177e4 2544 printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
b28a02de
PE
2545 printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
2546 page_get_cache(page)->name);
1da177e4
LT
2547 WARN_ON(1);
2548 }
065d41cb 2549 slabp = page_get_slab(page);
1da177e4
LT
2550
2551 if (cachep->flags & SLAB_RED_ZONE) {
b28a02de
PE
2552 if (*dbg_redzone1(cachep, objp) != RED_ACTIVE
2553 || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
2554 slab_error(cachep,
2555 "double free, or memory outside"
2556 " object was overwritten");
2557 printk(KERN_ERR
2558 "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
2559 objp, *dbg_redzone1(cachep, objp),
2560 *dbg_redzone2(cachep, objp));
1da177e4
LT
2561 }
2562 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2563 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2564 }
2565 if (cachep->flags & SLAB_STORE_USER)
2566 *dbg_userword(cachep, objp) = caller;
2567
3dafccf2 2568 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
1da177e4
LT
2569
2570 BUG_ON(objnr >= cachep->num);
3dafccf2 2571 BUG_ON(objp != slabp->s_mem + objnr * cachep->buffer_size);
1da177e4
LT
2572
2573 if (cachep->flags & SLAB_DEBUG_INITIAL) {
2574 /* Need to call the slab's constructor so the
2575 * caller can perform a verify of its state (debugging).
2576 * Called without the cache-lock held.
2577 */
3dafccf2 2578 cachep->ctor(objp + obj_offset(cachep),
b28a02de 2579 cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
1da177e4
LT
2580 }
2581 if (cachep->flags & SLAB_POISON && cachep->dtor) {
2582 /* we want to cache poison the object,
2583 * call the destruction callback
2584 */
3dafccf2 2585 cachep->dtor(objp + obj_offset(cachep), cachep, 0);
1da177e4
LT
2586 }
2587 if (cachep->flags & SLAB_POISON) {
2588#ifdef CONFIG_DEBUG_PAGEALLOC
3dafccf2 2589 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
1da177e4 2590 store_stackinfo(cachep, objp, (unsigned long)caller);
b28a02de 2591 kernel_map_pages(virt_to_page(objp),
3dafccf2 2592 cachep->buffer_size / PAGE_SIZE, 0);
1da177e4
LT
2593 } else {
2594 poison_obj(cachep, objp, POISON_FREE);
2595 }
2596#else
2597 poison_obj(cachep, objp, POISON_FREE);
2598#endif
2599 }
2600 return objp;
2601}
2602
343e0d7a 2603static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
1da177e4
LT
2604{
2605 kmem_bufctl_t i;
2606 int entries = 0;
b28a02de 2607
1da177e4
LT
2608 /* Check slab's freelist to see if this obj is there. */
2609 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2610 entries++;
2611 if (entries > cachep->num || i >= cachep->num)
2612 goto bad;
2613 }
2614 if (entries != cachep->num - slabp->inuse) {
b28a02de
PE
2615 bad:
2616 printk(KERN_ERR
2617 "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2618 cachep->name, cachep->num, slabp, slabp->inuse);
2619 for (i = 0;
264132bc 2620 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
b28a02de
PE
2621 i++) {
2622 if ((i % 16) == 0)
1da177e4 2623 printk("\n%03x:", i);
b28a02de 2624 printk(" %02x", ((unsigned char *)slabp)[i]);
1da177e4
LT
2625 }
2626 printk("\n");
2627 BUG();
2628 }
2629}
2630#else
2631#define kfree_debugcheck(x) do { } while(0)
2632#define cache_free_debugcheck(x,objp,z) (objp)
2633#define check_slabp(x,y) do { } while(0)
2634#endif
2635
343e0d7a 2636static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
1da177e4
LT
2637{
2638 int batchcount;
2639 struct kmem_list3 *l3;
2640 struct array_cache *ac;
2641
2642 check_irq_off();
9a2dba4b 2643 ac = cpu_cache_get(cachep);
b28a02de 2644 retry:
1da177e4
LT
2645 batchcount = ac->batchcount;
2646 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2647 /* if there was little recent activity on this
2648 * cache, then perform only a partial refill.
2649 * Otherwise we could generate refill bouncing.
2650 */
2651 batchcount = BATCHREFILL_LIMIT;
2652 }
e498be7d
CL
2653 l3 = cachep->nodelists[numa_node_id()];
2654
2655 BUG_ON(ac->avail > 0 || !l3);
2656 spin_lock(&l3->list_lock);
1da177e4 2657
1da177e4
LT
2658 if (l3->shared) {
2659 struct array_cache *shared_array = l3->shared;
2660 if (shared_array->avail) {
2661 if (batchcount > shared_array->avail)
2662 batchcount = shared_array->avail;
2663 shared_array->avail -= batchcount;
2664 ac->avail = batchcount;
e498be7d 2665 memcpy(ac->entry,
b28a02de
PE
2666 &(shared_array->entry[shared_array->avail]),
2667 sizeof(void *) * batchcount);
1da177e4
LT
2668 shared_array->touched = 1;
2669 goto alloc_done;
2670 }
2671 }
2672 while (batchcount > 0) {
2673 struct list_head *entry;
2674 struct slab *slabp;
2675 /* Get slab alloc is to come from. */
2676 entry = l3->slabs_partial.next;
2677 if (entry == &l3->slabs_partial) {
2678 l3->free_touched = 1;
2679 entry = l3->slabs_free.next;
2680 if (entry == &l3->slabs_free)
2681 goto must_grow;
2682 }
2683
2684 slabp = list_entry(entry, struct slab, list);
2685 check_slabp(cachep, slabp);
2686 check_spinlock_acquired(cachep);
2687 while (slabp->inuse < cachep->num && batchcount--) {
1da177e4
LT
2688 STATS_INC_ALLOCED(cachep);
2689 STATS_INC_ACTIVE(cachep);
2690 STATS_SET_HIGH(cachep);
2691
78d382d7
MD
2692 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
2693 numa_node_id());
1da177e4
LT
2694 }
2695 check_slabp(cachep, slabp);
2696
2697 /* move slabp to correct slabp list: */
2698 list_del(&slabp->list);
2699 if (slabp->free == BUFCTL_END)
2700 list_add(&slabp->list, &l3->slabs_full);
2701 else
2702 list_add(&slabp->list, &l3->slabs_partial);
2703 }
2704
b28a02de 2705 must_grow:
1da177e4 2706 l3->free_objects -= ac->avail;
b28a02de 2707 alloc_done:
e498be7d 2708 spin_unlock(&l3->list_lock);
1da177e4
LT
2709
2710 if (unlikely(!ac->avail)) {
2711 int x;
e498be7d
CL
2712 x = cache_grow(cachep, flags, numa_node_id());
2713
1da177e4 2714 // cache_grow can reenable interrupts, then ac could change.
9a2dba4b 2715 ac = cpu_cache_get(cachep);
1da177e4
LT
2716 if (!x && ac->avail == 0) // no objects in sight? abort
2717 return NULL;
2718
b28a02de 2719 if (!ac->avail) // objects refilled by interrupt?
1da177e4
LT
2720 goto retry;
2721 }
2722 ac->touched = 1;
e498be7d 2723 return ac->entry[--ac->avail];
1da177e4
LT
2724}
2725
2726static inline void
343e0d7a 2727cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags)
1da177e4
LT
2728{
2729 might_sleep_if(flags & __GFP_WAIT);
2730#if DEBUG
2731 kmem_flagcheck(cachep, flags);
2732#endif
2733}
2734
2735#if DEBUG
343e0d7a 2736static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags,
b28a02de 2737 void *objp, void *caller)
1da177e4 2738{
b28a02de 2739 if (!objp)
1da177e4 2740 return objp;
b28a02de 2741 if (cachep->flags & SLAB_POISON) {
1da177e4 2742#ifdef CONFIG_DEBUG_PAGEALLOC
3dafccf2 2743 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
b28a02de 2744 kernel_map_pages(virt_to_page(objp),
3dafccf2 2745 cachep->buffer_size / PAGE_SIZE, 1);
1da177e4
LT
2746 else
2747 check_poison_obj(cachep, objp);
2748#else
2749 check_poison_obj(cachep, objp);
2750#endif
2751 poison_obj(cachep, objp, POISON_INUSE);
2752 }
2753 if (cachep->flags & SLAB_STORE_USER)
2754 *dbg_userword(cachep, objp) = caller;
2755
2756 if (cachep->flags & SLAB_RED_ZONE) {
b28a02de
PE
2757 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE
2758 || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2759 slab_error(cachep,
2760 "double free, or memory outside"
2761 " object was overwritten");
2762 printk(KERN_ERR
2763 "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
2764 objp, *dbg_redzone1(cachep, objp),
2765 *dbg_redzone2(cachep, objp));
1da177e4
LT
2766 }
2767 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2768 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2769 }
3dafccf2 2770 objp += obj_offset(cachep);
1da177e4 2771 if (cachep->ctor && cachep->flags & SLAB_POISON) {
b28a02de 2772 unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
1da177e4
LT
2773
2774 if (!(flags & __GFP_WAIT))
2775 ctor_flags |= SLAB_CTOR_ATOMIC;
2776
2777 cachep->ctor(objp, cachep, ctor_flags);
b28a02de 2778 }
1da177e4
LT
2779 return objp;
2780}
2781#else
2782#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2783#endif
2784
343e0d7a 2785static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
1da177e4 2786{
b28a02de 2787 void *objp;
1da177e4
LT
2788 struct array_cache *ac;
2789
dc85da15 2790#ifdef CONFIG_NUMA
86c562a9 2791 if (unlikely(current->mempolicy && !in_interrupt())) {
dc85da15
CL
2792 int nid = slab_node(current->mempolicy);
2793
2794 if (nid != numa_node_id())
2795 return __cache_alloc_node(cachep, flags, nid);
2796 }
2797#endif
2798
5c382300 2799 check_irq_off();
9a2dba4b 2800 ac = cpu_cache_get(cachep);
1da177e4
LT
2801 if (likely(ac->avail)) {
2802 STATS_INC_ALLOCHIT(cachep);
2803 ac->touched = 1;
e498be7d 2804 objp = ac->entry[--ac->avail];
1da177e4
LT
2805 } else {
2806 STATS_INC_ALLOCMISS(cachep);
2807 objp = cache_alloc_refill(cachep, flags);
2808 }
5c382300
AK
2809 return objp;
2810}
2811
7fd6b141
PE
2812static __always_inline void *
2813__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
5c382300
AK
2814{
2815 unsigned long save_flags;
b28a02de 2816 void *objp;
5c382300
AK
2817
2818 cache_alloc_debugcheck_before(cachep, flags);
2819
2820 local_irq_save(save_flags);
2821 objp = ____cache_alloc(cachep, flags);
1da177e4 2822 local_irq_restore(save_flags);
34342e86 2823 objp = cache_alloc_debugcheck_after(cachep, flags, objp,
7fd6b141 2824 caller);
34342e86 2825 prefetchw(objp);
1da177e4
LT
2826 return objp;
2827}
2828
e498be7d
CL
2829#ifdef CONFIG_NUMA
2830/*
2831 * A interface to enable slab creation on nodeid
1da177e4 2832 */
343e0d7a 2833static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
e498be7d
CL
2834{
2835 struct list_head *entry;
b28a02de
PE
2836 struct slab *slabp;
2837 struct kmem_list3 *l3;
2838 void *obj;
b28a02de
PE
2839 int x;
2840
2841 l3 = cachep->nodelists[nodeid];
2842 BUG_ON(!l3);
2843
2844 retry:
ca3b9b91 2845 check_irq_off();
b28a02de
PE
2846 spin_lock(&l3->list_lock);
2847 entry = l3->slabs_partial.next;
2848 if (entry == &l3->slabs_partial) {
2849 l3->free_touched = 1;
2850 entry = l3->slabs_free.next;
2851 if (entry == &l3->slabs_free)
2852 goto must_grow;
2853 }
2854
2855 slabp = list_entry(entry, struct slab, list);
2856 check_spinlock_acquired_node(cachep, nodeid);
2857 check_slabp(cachep, slabp);
2858
2859 STATS_INC_NODEALLOCS(cachep);
2860 STATS_INC_ACTIVE(cachep);
2861 STATS_SET_HIGH(cachep);
2862
2863 BUG_ON(slabp->inuse == cachep->num);
2864
78d382d7 2865 obj = slab_get_obj(cachep, slabp, nodeid);
b28a02de
PE
2866 check_slabp(cachep, slabp);
2867 l3->free_objects--;
2868 /* move slabp to correct slabp list: */
2869 list_del(&slabp->list);
2870
2871 if (slabp->free == BUFCTL_END) {
2872 list_add(&slabp->list, &l3->slabs_full);
2873 } else {
2874 list_add(&slabp->list, &l3->slabs_partial);
2875 }
e498be7d 2876
b28a02de
PE
2877 spin_unlock(&l3->list_lock);
2878 goto done;
e498be7d 2879
b28a02de
PE
2880 must_grow:
2881 spin_unlock(&l3->list_lock);
2882 x = cache_grow(cachep, flags, nodeid);
1da177e4 2883
b28a02de
PE
2884 if (!x)
2885 return NULL;
e498be7d 2886
b28a02de
PE
2887 goto retry;
2888 done:
2889 return obj;
e498be7d
CL
2890}
2891#endif
2892
2893/*
2894 * Caller needs to acquire correct kmem_list's list_lock
2895 */
343e0d7a 2896static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
b28a02de 2897 int node)
1da177e4
LT
2898{
2899 int i;
e498be7d 2900 struct kmem_list3 *l3;
1da177e4
LT
2901
2902 for (i = 0; i < nr_objects; i++) {
2903 void *objp = objpp[i];
2904 struct slab *slabp;
1da177e4 2905
6ed5eb22 2906 slabp = virt_to_slab(objp);
ff69416e 2907 l3 = cachep->nodelists[node];
1da177e4 2908 list_del(&slabp->list);
ff69416e 2909 check_spinlock_acquired_node(cachep, node);
1da177e4 2910 check_slabp(cachep, slabp);
78d382d7 2911 slab_put_obj(cachep, slabp, objp, node);
1da177e4 2912 STATS_DEC_ACTIVE(cachep);
e498be7d 2913 l3->free_objects++;
1da177e4
LT
2914 check_slabp(cachep, slabp);
2915
2916 /* fixup slab chains */
2917 if (slabp->inuse == 0) {
e498be7d
CL
2918 if (l3->free_objects > l3->free_limit) {
2919 l3->free_objects -= cachep->num;
1da177e4
LT
2920 slab_destroy(cachep, slabp);
2921 } else {
e498be7d 2922 list_add(&slabp->list, &l3->slabs_free);
1da177e4
LT
2923 }
2924 } else {
2925 /* Unconditionally move a slab to the end of the
2926 * partial list on free - maximum time for the
2927 * other objects to be freed, too.
2928 */
e498be7d 2929 list_add_tail(&slabp->list, &l3->slabs_partial);
1da177e4
LT
2930 }
2931 }
2932}
2933
343e0d7a 2934static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
1da177e4
LT
2935{
2936 int batchcount;
e498be7d 2937 struct kmem_list3 *l3;
ff69416e 2938 int node = numa_node_id();
1da177e4
LT
2939
2940 batchcount = ac->batchcount;
2941#if DEBUG
2942 BUG_ON(!batchcount || batchcount > ac->avail);
2943#endif
2944 check_irq_off();
ff69416e 2945 l3 = cachep->nodelists[node];
e498be7d
CL
2946 spin_lock(&l3->list_lock);
2947 if (l3->shared) {
2948 struct array_cache *shared_array = l3->shared;
b28a02de 2949 int max = shared_array->limit - shared_array->avail;
1da177e4
LT
2950 if (max) {
2951 if (batchcount > max)
2952 batchcount = max;
e498be7d 2953 memcpy(&(shared_array->entry[shared_array->avail]),
b28a02de 2954 ac->entry, sizeof(void *) * batchcount);
1da177e4
LT
2955 shared_array->avail += batchcount;
2956 goto free_done;
2957 }
2958 }
2959
ff69416e 2960 free_block(cachep, ac->entry, batchcount, node);
b28a02de 2961 free_done:
1da177e4
LT
2962#if STATS
2963 {
2964 int i = 0;
2965 struct list_head *p;
2966
e498be7d
CL
2967 p = l3->slabs_free.next;
2968 while (p != &(l3->slabs_free)) {
1da177e4
LT
2969 struct slab *slabp;
2970
2971 slabp = list_entry(p, struct slab, list);
2972 BUG_ON(slabp->inuse);
2973
2974 i++;
2975 p = p->next;
2976 }
2977 STATS_SET_FREEABLE(cachep, i);
2978 }
2979#endif
e498be7d 2980 spin_unlock(&l3->list_lock);
1da177e4 2981 ac->avail -= batchcount;
e498be7d 2982 memmove(ac->entry, &(ac->entry[batchcount]),
b28a02de 2983 sizeof(void *) * ac->avail);
1da177e4
LT
2984}
2985
2986/*
2987 * __cache_free
2988 * Release an obj back to its cache. If the obj has a constructed
2989 * state, it must be in this state _before_ it is released.
2990 *
2991 * Called with disabled ints.
2992 */
343e0d7a 2993static inline void __cache_free(struct kmem_cache *cachep, void *objp)
1da177e4 2994{
9a2dba4b 2995 struct array_cache *ac = cpu_cache_get(cachep);
1da177e4
LT
2996
2997 check_irq_off();
2998 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
2999
e498be7d
CL
3000 /* Make sure we are not freeing a object from another
3001 * node to the array cache on this cpu.
3002 */
3003#ifdef CONFIG_NUMA
3004 {
3005 struct slab *slabp;
6ed5eb22 3006 slabp = virt_to_slab(objp);
e498be7d
CL
3007 if (unlikely(slabp->nodeid != numa_node_id())) {
3008 struct array_cache *alien = NULL;
3009 int nodeid = slabp->nodeid;
b28a02de
PE
3010 struct kmem_list3 *l3 =
3011 cachep->nodelists[numa_node_id()];
e498be7d
CL
3012
3013 STATS_INC_NODEFREES(cachep);
3014 if (l3->alien && l3->alien[nodeid]) {
3015 alien = l3->alien[nodeid];
3016 spin_lock(&alien->lock);
3017 if (unlikely(alien->avail == alien->limit))
3018 __drain_alien_cache(cachep,
b28a02de 3019 alien, nodeid);
e498be7d
CL
3020 alien->entry[alien->avail++] = objp;
3021 spin_unlock(&alien->lock);
3022 } else {
3023 spin_lock(&(cachep->nodelists[nodeid])->
b28a02de 3024 list_lock);
ff69416e 3025 free_block(cachep, &objp, 1, nodeid);
e498be7d 3026 spin_unlock(&(cachep->nodelists[nodeid])->
b28a02de 3027 list_lock);
e498be7d
CL
3028 }
3029 return;
3030 }
3031 }
3032#endif
1da177e4
LT
3033 if (likely(ac->avail < ac->limit)) {
3034 STATS_INC_FREEHIT(cachep);
e498be7d 3035 ac->entry[ac->avail++] = objp;
1da177e4
LT
3036 return;
3037 } else {
3038 STATS_INC_FREEMISS(cachep);
3039 cache_flusharray(cachep, ac);
e498be7d 3040 ac->entry[ac->avail++] = objp;
1da177e4
LT
3041 }
3042}
3043
3044/**
3045 * kmem_cache_alloc - Allocate an object
3046 * @cachep: The cache to allocate from.
3047 * @flags: See kmalloc().
3048 *
3049 * Allocate an object from this cache. The flags are only relevant
3050 * if the cache has no available objects.
3051 */
343e0d7a 3052void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
1da177e4 3053{
7fd6b141 3054 return __cache_alloc(cachep, flags, __builtin_return_address(0));
1da177e4
LT
3055}
3056EXPORT_SYMBOL(kmem_cache_alloc);
3057
3058/**
3059 * kmem_ptr_validate - check if an untrusted pointer might
3060 * be a slab entry.
3061 * @cachep: the cache we're checking against
3062 * @ptr: pointer to validate
3063 *
3064 * This verifies that the untrusted pointer looks sane:
3065 * it is _not_ a guarantee that the pointer is actually
3066 * part of the slab cache in question, but it at least
3067 * validates that the pointer can be dereferenced and
3068 * looks half-way sane.
3069 *
3070 * Currently only used for dentry validation.
3071 */
343e0d7a 3072int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
1da177e4 3073{
b28a02de 3074 unsigned long addr = (unsigned long)ptr;
1da177e4 3075 unsigned long min_addr = PAGE_OFFSET;
b28a02de 3076 unsigned long align_mask = BYTES_PER_WORD - 1;
3dafccf2 3077 unsigned long size = cachep->buffer_size;
1da177e4
LT
3078 struct page *page;
3079
3080 if (unlikely(addr < min_addr))
3081 goto out;
3082 if (unlikely(addr > (unsigned long)high_memory - size))
3083 goto out;
3084 if (unlikely(addr & align_mask))
3085 goto out;
3086 if (unlikely(!kern_addr_valid(addr)))
3087 goto out;
3088 if (unlikely(!kern_addr_valid(addr + size - 1)))
3089 goto out;
3090 page = virt_to_page(ptr);
3091 if (unlikely(!PageSlab(page)))
3092 goto out;
065d41cb 3093 if (unlikely(page_get_cache(page) != cachep))
1da177e4
LT
3094 goto out;
3095 return 1;
b28a02de 3096 out:
1da177e4
LT
3097 return 0;
3098}
3099
3100#ifdef CONFIG_NUMA
3101/**
3102 * kmem_cache_alloc_node - Allocate an object on the specified node
3103 * @cachep: The cache to allocate from.
3104 * @flags: See kmalloc().
3105 * @nodeid: node number of the target node.
3106 *
3107 * Identical to kmem_cache_alloc, except that this function is slow
3108 * and can sleep. And it will allocate memory on the given node, which
3109 * can improve the performance for cpu bound structures.
e498be7d
CL
3110 * New and improved: it will now make sure that the object gets
3111 * put on the correct node list so that there is no false sharing.
1da177e4 3112 */
343e0d7a 3113void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1da177e4 3114{
e498be7d
CL
3115 unsigned long save_flags;
3116 void *ptr;
1da177e4 3117
e498be7d
CL
3118 cache_alloc_debugcheck_before(cachep, flags);
3119 local_irq_save(save_flags);
18f820f6
CL
3120
3121 if (nodeid == -1 || nodeid == numa_node_id() ||
3122 !cachep->nodelists[nodeid])
5c382300
AK
3123 ptr = ____cache_alloc(cachep, flags);
3124 else
3125 ptr = __cache_alloc_node(cachep, flags, nodeid);
e498be7d 3126 local_irq_restore(save_flags);
18f820f6
CL
3127
3128 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
3129 __builtin_return_address(0));
1da177e4 3130
e498be7d 3131 return ptr;
1da177e4
LT
3132}
3133EXPORT_SYMBOL(kmem_cache_alloc_node);
3134
dd0fc66f 3135void *kmalloc_node(size_t size, gfp_t flags, int node)
97e2bde4 3136{
343e0d7a 3137 struct kmem_cache *cachep;
97e2bde4
MS
3138
3139 cachep = kmem_find_general_cachep(size, flags);
3140 if (unlikely(cachep == NULL))
3141 return NULL;
3142 return kmem_cache_alloc_node(cachep, flags, node);
3143}
3144EXPORT_SYMBOL(kmalloc_node);
1da177e4
LT
3145#endif
3146
3147/**
3148 * kmalloc - allocate memory
3149 * @size: how many bytes of memory are required.
3150 * @flags: the type of memory to allocate.
3151 *
3152 * kmalloc is the normal method of allocating memory
3153 * in the kernel.
3154 *
3155 * The @flags argument may be one of:
3156 *
3157 * %GFP_USER - Allocate memory on behalf of user. May sleep.
3158 *
3159 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
3160 *
3161 * %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers.
3162 *
3163 * Additionally, the %GFP_DMA flag may be set to indicate the memory
3164 * must be suitable for DMA. This can mean different things on different
3165 * platforms. For example, on i386, it means that the memory must come
3166 * from the first 16MB.
3167 */
7fd6b141
PE
3168static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3169 void *caller)
1da177e4 3170{
343e0d7a 3171 struct kmem_cache *cachep;
1da177e4 3172
97e2bde4
MS
3173 /* If you want to save a few bytes .text space: replace
3174 * __ with kmem_.
3175 * Then kmalloc uses the uninlined functions instead of the inline
3176 * functions.
3177 */
3178 cachep = __find_general_cachep(size, flags);
dbdb9045
AM
3179 if (unlikely(cachep == NULL))
3180 return NULL;
7fd6b141
PE
3181 return __cache_alloc(cachep, flags, caller);
3182}
3183
3184#ifndef CONFIG_DEBUG_SLAB
3185
3186void *__kmalloc(size_t size, gfp_t flags)
3187{
3188 return __do_kmalloc(size, flags, NULL);
1da177e4
LT
3189}
3190EXPORT_SYMBOL(__kmalloc);
3191
7fd6b141
PE
3192#else
3193
3194void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3195{
3196 return __do_kmalloc(size, flags, caller);
3197}
3198EXPORT_SYMBOL(__kmalloc_track_caller);
3199
3200#endif
3201
1da177e4
LT
3202#ifdef CONFIG_SMP
3203/**
3204 * __alloc_percpu - allocate one copy of the object for every present
3205 * cpu in the system, zeroing them.
3206 * Objects should be dereferenced using the per_cpu_ptr macro only.
3207 *
3208 * @size: how many bytes of memory are required.
1da177e4 3209 */
f9f75005 3210void *__alloc_percpu(size_t size)
1da177e4
LT
3211{
3212 int i;
b28a02de 3213 struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
1da177e4
LT
3214
3215 if (!pdata)
3216 return NULL;
3217
e498be7d
CL
3218 /*
3219 * Cannot use for_each_online_cpu since a cpu may come online
3220 * and we have no way of figuring out how to fix the array
3221 * that we have allocated then....
3222 */
3223 for_each_cpu(i) {
3224 int node = cpu_to_node(i);
3225
3226 if (node_online(node))
3227 pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, node);
3228 else
3229 pdata->ptrs[i] = kmalloc(size, GFP_KERNEL);
1da177e4
LT
3230
3231 if (!pdata->ptrs[i])
3232 goto unwind_oom;
3233 memset(pdata->ptrs[i], 0, size);
3234 }
3235
3236 /* Catch derefs w/o wrappers */
b28a02de 3237 return (void *)(~(unsigned long)pdata);
1da177e4 3238
b28a02de 3239 unwind_oom:
1da177e4
LT
3240 while (--i >= 0) {
3241 if (!cpu_possible(i))
3242 continue;
3243 kfree(pdata->ptrs[i]);
3244 }
3245 kfree(pdata);
3246 return NULL;
3247}
3248EXPORT_SYMBOL(__alloc_percpu);
3249#endif
3250
3251/**
3252 * kmem_cache_free - Deallocate an object
3253 * @cachep: The cache the allocation was from.
3254 * @objp: The previously allocated object.
3255 *
3256 * Free an object which was previously allocated from this
3257 * cache.
3258 */
343e0d7a 3259void kmem_cache_free(struct kmem_cache *cachep, void *objp)
1da177e4
LT
3260{
3261 unsigned long flags;
3262
3263 local_irq_save(flags);
3264 __cache_free(cachep, objp);
3265 local_irq_restore(flags);
3266}
3267EXPORT_SYMBOL(kmem_cache_free);
3268
1da177e4
LT
3269/**
3270 * kfree - free previously allocated memory
3271 * @objp: pointer returned by kmalloc.
3272 *
80e93eff
PE
3273 * If @objp is NULL, no operation is performed.
3274 *
1da177e4
LT
3275 * Don't free memory not originally allocated by kmalloc()
3276 * or you will run into trouble.
3277 */
3278void kfree(const void *objp)
3279{
343e0d7a 3280 struct kmem_cache *c;
1da177e4
LT
3281 unsigned long flags;
3282
3283 if (unlikely(!objp))
3284 return;
3285 local_irq_save(flags);
3286 kfree_debugcheck(objp);
6ed5eb22 3287 c = virt_to_cache(objp);
3dafccf2 3288 mutex_debug_check_no_locks_freed(objp, obj_size(c));
b28a02de 3289 __cache_free(c, (void *)objp);
1da177e4
LT
3290 local_irq_restore(flags);
3291}
3292EXPORT_SYMBOL(kfree);
3293
3294#ifdef CONFIG_SMP
3295/**
3296 * free_percpu - free previously allocated percpu memory
3297 * @objp: pointer returned by alloc_percpu.
3298 *
3299 * Don't free memory not originally allocated by alloc_percpu()
3300 * The complemented objp is to check for that.
3301 */
b28a02de 3302void free_percpu(const void *objp)
1da177e4
LT
3303{
3304 int i;
b28a02de 3305 struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp);
1da177e4 3306
e498be7d
CL
3307 /*
3308 * We allocate for all cpus so we cannot use for online cpu here.
3309 */
3310 for_each_cpu(i)
b28a02de 3311 kfree(p->ptrs[i]);
1da177e4
LT
3312 kfree(p);
3313}
3314EXPORT_SYMBOL(free_percpu);
3315#endif
3316
343e0d7a 3317unsigned int kmem_cache_size(struct kmem_cache *cachep)
1da177e4 3318{
3dafccf2 3319 return obj_size(cachep);
1da177e4
LT
3320}
3321EXPORT_SYMBOL(kmem_cache_size);
3322
343e0d7a 3323const char *kmem_cache_name(struct kmem_cache *cachep)
1944972d
ACM
3324{
3325 return cachep->name;
3326}
3327EXPORT_SYMBOL_GPL(kmem_cache_name);
3328
e498be7d
CL
3329/*
3330 * This initializes kmem_list3 for all nodes.
3331 */
343e0d7a 3332static int alloc_kmemlist(struct kmem_cache *cachep)
e498be7d
CL
3333{
3334 int node;
3335 struct kmem_list3 *l3;
3336 int err = 0;
3337
3338 for_each_online_node(node) {
3339 struct array_cache *nc = NULL, *new;
3340 struct array_cache **new_alien = NULL;
3341#ifdef CONFIG_NUMA
3342 if (!(new_alien = alloc_alien_cache(node, cachep->limit)))
3343 goto fail;
3344#endif
b28a02de
PE
3345 if (!(new = alloc_arraycache(node, (cachep->shared *
3346 cachep->batchcount),
3347 0xbaadf00d)))
e498be7d
CL
3348 goto fail;
3349 if ((l3 = cachep->nodelists[node])) {
3350
3351 spin_lock_irq(&l3->list_lock);
3352
3353 if ((nc = cachep->nodelists[node]->shared))
b28a02de 3354 free_block(cachep, nc->entry, nc->avail, node);
e498be7d
CL
3355
3356 l3->shared = new;
3357 if (!cachep->nodelists[node]->alien) {
3358 l3->alien = new_alien;
3359 new_alien = NULL;
3360 }
b28a02de
PE
3361 l3->free_limit = (1 + nr_cpus_node(node)) *
3362 cachep->batchcount + cachep->num;
e498be7d
CL
3363 spin_unlock_irq(&l3->list_lock);
3364 kfree(nc);
3365 free_alien_cache(new_alien);
3366 continue;
3367 }
3368 if (!(l3 = kmalloc_node(sizeof(struct kmem_list3),
b28a02de 3369 GFP_KERNEL, node)))
e498be7d
CL
3370 goto fail;
3371
3372 kmem_list3_init(l3);
3373 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
b28a02de 3374 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
e498be7d
CL
3375 l3->shared = new;
3376 l3->alien = new_alien;
b28a02de
PE
3377 l3->free_limit = (1 + nr_cpus_node(node)) *
3378 cachep->batchcount + cachep->num;
e498be7d
CL
3379 cachep->nodelists[node] = l3;
3380 }
3381 return err;
b28a02de 3382 fail:
e498be7d
CL
3383 err = -ENOMEM;
3384 return err;
3385}
3386
1da177e4 3387struct ccupdate_struct {
343e0d7a 3388 struct kmem_cache *cachep;
1da177e4
LT
3389 struct array_cache *new[NR_CPUS];
3390};
3391
3392static void do_ccupdate_local(void *info)
3393{
3394 struct ccupdate_struct *new = (struct ccupdate_struct *)info;
3395 struct array_cache *old;
3396
3397 check_irq_off();
9a2dba4b 3398 old = cpu_cache_get(new->cachep);
e498be7d 3399
1da177e4
LT
3400 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3401 new->new[smp_processor_id()] = old;
3402}
3403
343e0d7a 3404static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount,
b28a02de 3405 int shared)
1da177e4
LT
3406{
3407 struct ccupdate_struct new;
e498be7d 3408 int i, err;
1da177e4 3409
b28a02de 3410 memset(&new.new, 0, sizeof(new.new));
e498be7d 3411 for_each_online_cpu(i) {
b28a02de
PE
3412 new.new[i] =
3413 alloc_arraycache(cpu_to_node(i), limit, batchcount);
e498be7d 3414 if (!new.new[i]) {
b28a02de
PE
3415 for (i--; i >= 0; i--)
3416 kfree(new.new[i]);
e498be7d 3417 return -ENOMEM;
1da177e4
LT
3418 }
3419 }
3420 new.cachep = cachep;
3421
3422 smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
e498be7d 3423
1da177e4 3424 check_irq_on();
ca3b9b91 3425 spin_lock(&cachep->spinlock);
1da177e4
LT
3426 cachep->batchcount = batchcount;
3427 cachep->limit = limit;
e498be7d 3428 cachep->shared = shared;
ca3b9b91 3429 spin_unlock(&cachep->spinlock);
1da177e4 3430
e498be7d 3431 for_each_online_cpu(i) {
1da177e4
LT
3432 struct array_cache *ccold = new.new[i];
3433 if (!ccold)
3434 continue;
e498be7d 3435 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
ff69416e 3436 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
e498be7d 3437 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
1da177e4
LT
3438 kfree(ccold);
3439 }
1da177e4 3440
e498be7d
CL
3441 err = alloc_kmemlist(cachep);
3442 if (err) {
3443 printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n",
b28a02de 3444 cachep->name, -err);
e498be7d 3445 BUG();
1da177e4 3446 }
1da177e4
LT
3447 return 0;
3448}
3449
343e0d7a 3450static void enable_cpucache(struct kmem_cache *cachep)
1da177e4
LT
3451{
3452 int err;
3453 int limit, shared;
3454
3455 /* The head array serves three purposes:
3456 * - create a LIFO ordering, i.e. return objects that are cache-warm
3457 * - reduce the number of spinlock operations.
3458 * - reduce the number of linked list operations on the slab and
3459 * bufctl chains: array operations are cheaper.
3460 * The numbers are guessed, we should auto-tune as described by
3461 * Bonwick.
3462 */
3dafccf2 3463 if (cachep->buffer_size > 131072)
1da177e4 3464 limit = 1;
3dafccf2 3465 else if (cachep->buffer_size > PAGE_SIZE)
1da177e4 3466 limit = 8;
3dafccf2 3467 else if (cachep->buffer_size > 1024)
1da177e4 3468 limit = 24;
3dafccf2 3469 else if (cachep->buffer_size > 256)
1da177e4
LT
3470 limit = 54;
3471 else
3472 limit = 120;
3473
3474 /* Cpu bound tasks (e.g. network routing) can exhibit cpu bound
3475 * allocation behaviour: Most allocs on one cpu, most free operations
3476 * on another cpu. For these cases, an efficient object passing between
3477 * cpus is necessary. This is provided by a shared array. The array
3478 * replaces Bonwick's magazine layer.
3479 * On uniprocessor, it's functionally equivalent (but less efficient)
3480 * to a larger limit. Thus disabled by default.
3481 */
3482 shared = 0;
3483#ifdef CONFIG_SMP
3dafccf2 3484 if (cachep->buffer_size <= PAGE_SIZE)
1da177e4
LT
3485 shared = 8;
3486#endif
3487
3488#if DEBUG
3489 /* With debugging enabled, large batchcount lead to excessively
3490 * long periods with disabled local interrupts. Limit the
3491 * batchcount
3492 */
3493 if (limit > 32)
3494 limit = 32;
3495#endif
b28a02de 3496 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
1da177e4
LT
3497 if (err)
3498 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
b28a02de 3499 cachep->name, -err);
1da177e4
LT
3500}
3501
343e0d7a 3502static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
b28a02de 3503 int force, int node)
1da177e4
LT
3504{
3505 int tofree;
3506
e498be7d 3507 check_spinlock_acquired_node(cachep, node);
1da177e4
LT
3508 if (ac->touched && !force) {
3509 ac->touched = 0;
3510 } else if (ac->avail) {
b28a02de 3511 tofree = force ? ac->avail : (ac->limit + 4) / 5;
1da177e4 3512 if (tofree > ac->avail) {
b28a02de 3513 tofree = (ac->avail + 1) / 2;
1da177e4 3514 }
ff69416e 3515 free_block(cachep, ac->entry, tofree, node);
1da177e4 3516 ac->avail -= tofree;
e498be7d 3517 memmove(ac->entry, &(ac->entry[tofree]),
b28a02de 3518 sizeof(void *) * ac->avail);
1da177e4
LT
3519 }
3520}
3521
3522/**
3523 * cache_reap - Reclaim memory from caches.
1e5d5331 3524 * @unused: unused parameter
1da177e4
LT
3525 *
3526 * Called from workqueue/eventd every few seconds.
3527 * Purpose:
3528 * - clear the per-cpu caches for this CPU.
3529 * - return freeable pages to the main free memory pool.
3530 *
fc0abb14 3531 * If we cannot acquire the cache chain mutex then just give up - we'll
1da177e4
LT
3532 * try again on the next iteration.
3533 */
3534static void cache_reap(void *unused)
3535{
3536 struct list_head *walk;
e498be7d 3537 struct kmem_list3 *l3;
1da177e4 3538
fc0abb14 3539 if (!mutex_trylock(&cache_chain_mutex)) {
1da177e4 3540 /* Give up. Setup the next iteration. */
b28a02de
PE
3541 schedule_delayed_work(&__get_cpu_var(reap_work),
3542 REAPTIMEOUT_CPUC);
1da177e4
LT
3543 return;
3544 }
3545
3546 list_for_each(walk, &cache_chain) {
343e0d7a 3547 struct kmem_cache *searchp;
b28a02de 3548 struct list_head *p;
1da177e4
LT
3549 int tofree;
3550 struct slab *slabp;
3551
343e0d7a 3552 searchp = list_entry(walk, struct kmem_cache, next);
1da177e4
LT
3553
3554 if (searchp->flags & SLAB_NO_REAP)
3555 goto next;
3556
3557 check_irq_on();
3558
e498be7d 3559 l3 = searchp->nodelists[numa_node_id()];
8fce4d8e 3560 reap_alien(searchp, l3);
e498be7d 3561 spin_lock_irq(&l3->list_lock);
1da177e4 3562
9a2dba4b 3563 drain_array_locked(searchp, cpu_cache_get(searchp), 0,
b28a02de 3564 numa_node_id());
1da177e4 3565
e498be7d 3566 if (time_after(l3->next_reap, jiffies))
1da177e4
LT
3567 goto next_unlock;
3568
e498be7d 3569 l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
1da177e4 3570
e498be7d
CL
3571 if (l3->shared)
3572 drain_array_locked(searchp, l3->shared, 0,
b28a02de 3573 numa_node_id());
1da177e4 3574
e498be7d
CL
3575 if (l3->free_touched) {
3576 l3->free_touched = 0;
1da177e4
LT
3577 goto next_unlock;
3578 }
3579
b28a02de
PE
3580 tofree =
3581 (l3->free_limit + 5 * searchp->num -
3582 1) / (5 * searchp->num);
1da177e4 3583 do {
e498be7d
CL
3584 p = l3->slabs_free.next;
3585 if (p == &(l3->slabs_free))
1da177e4
LT
3586 break;
3587
3588 slabp = list_entry(p, struct slab, list);
3589 BUG_ON(slabp->inuse);
3590 list_del(&slabp->list);
3591 STATS_INC_REAPED(searchp);
3592
3593 /* Safe to drop the lock. The slab is no longer
3594 * linked to the cache.
3595 * searchp cannot disappear, we hold
3596 * cache_chain_lock
3597 */
e498be7d
CL
3598 l3->free_objects -= searchp->num;
3599 spin_unlock_irq(&l3->list_lock);
1da177e4 3600 slab_destroy(searchp, slabp);
e498be7d 3601 spin_lock_irq(&l3->list_lock);
b28a02de
PE
3602 } while (--tofree > 0);
3603 next_unlock:
e498be7d 3604 spin_unlock_irq(&l3->list_lock);
b28a02de 3605 next:
1da177e4
LT
3606 cond_resched();
3607 }
3608 check_irq_on();
fc0abb14 3609 mutex_unlock(&cache_chain_mutex);
8fce4d8e 3610 next_reap_node();
1da177e4 3611 /* Setup the next iteration */
cd61ef62 3612 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
1da177e4
LT
3613}
3614
3615#ifdef CONFIG_PROC_FS
3616
85289f98 3617static void print_slabinfo_header(struct seq_file *m)
1da177e4 3618{
85289f98
PE
3619 /*
3620 * Output format version, so at least we can change it
3621 * without _too_ many complaints.
3622 */
1da177e4 3623#if STATS
85289f98 3624 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1da177e4 3625#else
85289f98 3626 seq_puts(m, "slabinfo - version: 2.1\n");
1da177e4 3627#endif
85289f98
PE
3628 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
3629 "<objperslab> <pagesperslab>");
3630 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
3631 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1da177e4 3632#if STATS
85289f98
PE
3633 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
3634 "<error> <maxfreeable> <nodeallocs> <remotefrees>");
3635 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1da177e4 3636#endif
85289f98
PE
3637 seq_putc(m, '\n');
3638}
3639
3640static void *s_start(struct seq_file *m, loff_t *pos)
3641{
3642 loff_t n = *pos;
3643 struct list_head *p;
3644
fc0abb14 3645 mutex_lock(&cache_chain_mutex);
85289f98
PE
3646 if (!n)
3647 print_slabinfo_header(m);
1da177e4
LT
3648 p = cache_chain.next;
3649 while (n--) {
3650 p = p->next;
3651 if (p == &cache_chain)
3652 return NULL;
3653 }
343e0d7a 3654 return list_entry(p, struct kmem_cache, next);
1da177e4
LT
3655}
3656
3657static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3658{
343e0d7a 3659 struct kmem_cache *cachep = p;
1da177e4
LT
3660 ++*pos;
3661 return cachep->next.next == &cache_chain ? NULL
343e0d7a 3662 : list_entry(cachep->next.next, struct kmem_cache, next);
1da177e4
LT
3663}
3664
3665static void s_stop(struct seq_file *m, void *p)
3666{
fc0abb14 3667 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
3668}
3669
3670static int s_show(struct seq_file *m, void *p)
3671{
343e0d7a 3672 struct kmem_cache *cachep = p;
1da177e4 3673 struct list_head *q;
b28a02de
PE
3674 struct slab *slabp;
3675 unsigned long active_objs;
3676 unsigned long num_objs;
3677 unsigned long active_slabs = 0;
3678 unsigned long num_slabs, free_objects = 0, shared_avail = 0;
e498be7d 3679 const char *name;
1da177e4 3680 char *error = NULL;
e498be7d
CL
3681 int node;
3682 struct kmem_list3 *l3;
1da177e4 3683
ca3b9b91 3684 spin_lock(&cachep->spinlock);
1da177e4
LT
3685 active_objs = 0;
3686 num_slabs = 0;
e498be7d
CL
3687 for_each_online_node(node) {
3688 l3 = cachep->nodelists[node];
3689 if (!l3)
3690 continue;
3691
ca3b9b91
RT
3692 check_irq_on();
3693 spin_lock_irq(&l3->list_lock);
e498be7d 3694
b28a02de 3695 list_for_each(q, &l3->slabs_full) {
e498be7d
CL
3696 slabp = list_entry(q, struct slab, list);
3697 if (slabp->inuse != cachep->num && !error)
3698 error = "slabs_full accounting error";
3699 active_objs += cachep->num;
3700 active_slabs++;
3701 }
b28a02de 3702 list_for_each(q, &l3->slabs_partial) {
e498be7d
CL
3703 slabp = list_entry(q, struct slab, list);
3704 if (slabp->inuse == cachep->num && !error)
3705 error = "slabs_partial inuse accounting error";
3706 if (!slabp->inuse && !error)
3707 error = "slabs_partial/inuse accounting error";
3708 active_objs += slabp->inuse;
3709 active_slabs++;
3710 }
b28a02de 3711 list_for_each(q, &l3->slabs_free) {
e498be7d
CL
3712 slabp = list_entry(q, struct slab, list);
3713 if (slabp->inuse && !error)
3714 error = "slabs_free/inuse accounting error";
3715 num_slabs++;
3716 }
3717 free_objects += l3->free_objects;
4484ebf1
RT
3718 if (l3->shared)
3719 shared_avail += l3->shared->avail;
e498be7d 3720
ca3b9b91 3721 spin_unlock_irq(&l3->list_lock);
1da177e4 3722 }
b28a02de
PE
3723 num_slabs += active_slabs;
3724 num_objs = num_slabs * cachep->num;
e498be7d 3725 if (num_objs - active_objs != free_objects && !error)
1da177e4
LT
3726 error = "free_objects accounting error";
3727
b28a02de 3728 name = cachep->name;
1da177e4
LT
3729 if (error)
3730 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
3731
3732 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
3dafccf2 3733 name, active_objs, num_objs, cachep->buffer_size,
b28a02de 3734 cachep->num, (1 << cachep->gfporder));
1da177e4 3735 seq_printf(m, " : tunables %4u %4u %4u",
b28a02de 3736 cachep->limit, cachep->batchcount, cachep->shared);
e498be7d 3737 seq_printf(m, " : slabdata %6lu %6lu %6lu",
b28a02de 3738 active_slabs, num_slabs, shared_avail);
1da177e4 3739#if STATS
b28a02de 3740 { /* list3 stats */
1da177e4
LT
3741 unsigned long high = cachep->high_mark;
3742 unsigned long allocs = cachep->num_allocations;
3743 unsigned long grown = cachep->grown;
3744 unsigned long reaped = cachep->reaped;
3745 unsigned long errors = cachep->errors;
3746 unsigned long max_freeable = cachep->max_freeable;
1da177e4 3747 unsigned long node_allocs = cachep->node_allocs;
e498be7d 3748 unsigned long node_frees = cachep->node_frees;
1da177e4 3749
e498be7d 3750 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
b28a02de 3751 %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees);
1da177e4
LT
3752 }
3753 /* cpu stats */
3754 {
3755 unsigned long allochit = atomic_read(&cachep->allochit);
3756 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
3757 unsigned long freehit = atomic_read(&cachep->freehit);
3758 unsigned long freemiss = atomic_read(&cachep->freemiss);
3759
3760 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
b28a02de 3761 allochit, allocmiss, freehit, freemiss);
1da177e4
LT
3762 }
3763#endif
3764 seq_putc(m, '\n');
ca3b9b91 3765 spin_unlock(&cachep->spinlock);
1da177e4
LT
3766 return 0;
3767}
3768
3769/*
3770 * slabinfo_op - iterator that generates /proc/slabinfo
3771 *
3772 * Output layout:
3773 * cache-name
3774 * num-active-objs
3775 * total-objs
3776 * object size
3777 * num-active-slabs
3778 * total-slabs
3779 * num-pages-per-slab
3780 * + further values on SMP and with statistics enabled
3781 */
3782
3783struct seq_operations slabinfo_op = {
b28a02de
PE
3784 .start = s_start,
3785 .next = s_next,
3786 .stop = s_stop,
3787 .show = s_show,
1da177e4
LT
3788};
3789
3790#define MAX_SLABINFO_WRITE 128
3791/**
3792 * slabinfo_write - Tuning for the slab allocator
3793 * @file: unused
3794 * @buffer: user buffer
3795 * @count: data length
3796 * @ppos: unused
3797 */
b28a02de
PE
3798ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3799 size_t count, loff_t *ppos)
1da177e4 3800{
b28a02de 3801 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
1da177e4
LT
3802 int limit, batchcount, shared, res;
3803 struct list_head *p;
b28a02de 3804
1da177e4
LT
3805 if (count > MAX_SLABINFO_WRITE)
3806 return -EINVAL;
3807 if (copy_from_user(&kbuf, buffer, count))
3808 return -EFAULT;
b28a02de 3809 kbuf[MAX_SLABINFO_WRITE] = '\0';
1da177e4
LT
3810
3811 tmp = strchr(kbuf, ' ');
3812 if (!tmp)
3813 return -EINVAL;
3814 *tmp = '\0';
3815 tmp++;
3816 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
3817 return -EINVAL;
3818
3819 /* Find the cache in the chain of caches. */
fc0abb14 3820 mutex_lock(&cache_chain_mutex);
1da177e4 3821 res = -EINVAL;
b28a02de 3822 list_for_each(p, &cache_chain) {
343e0d7a
PE
3823 struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
3824 next);
1da177e4
LT
3825
3826 if (!strcmp(cachep->name, kbuf)) {
3827 if (limit < 1 ||
3828 batchcount < 1 ||
b28a02de 3829 batchcount > limit || shared < 0) {
e498be7d 3830 res = 0;
1da177e4 3831 } else {
e498be7d 3832 res = do_tune_cpucache(cachep, limit,
b28a02de 3833 batchcount, shared);
1da177e4
LT
3834 }
3835 break;
3836 }
3837 }
fc0abb14 3838 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
3839 if (res >= 0)
3840 res = count;
3841 return res;
3842}
3843#endif
3844
00e145b6
MS
3845/**
3846 * ksize - get the actual amount of memory allocated for a given object
3847 * @objp: Pointer to the object
3848 *
3849 * kmalloc may internally round up allocations and return more memory
3850 * than requested. ksize() can be used to determine the actual amount of
3851 * memory allocated. The caller may use this additional memory, even though
3852 * a smaller amount of memory was initially specified with the kmalloc call.
3853 * The caller must guarantee that objp points to a valid object previously
3854 * allocated with either kmalloc() or kmem_cache_alloc(). The object
3855 * must not be freed during the duration of the call.
3856 */
1da177e4
LT
3857unsigned int ksize(const void *objp)
3858{
00e145b6
MS
3859 if (unlikely(objp == NULL))
3860 return 0;
1da177e4 3861
6ed5eb22 3862 return obj_size(virt_to_cache(objp));
1da177e4 3863}