]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/slob.c
mm/swapfile.c: tmp is always smaller than max
[thirdparty/linux.git] / mm / slob.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
10cef602
MM
2/*
3 * SLOB Allocator: Simple List Of Blocks
4 *
5 * Matt Mackall <mpm@selenic.com> 12/30/03
6 *
6193a2ff
PM
7 * NUMA support by Paul Mundt, 2007.
8 *
10cef602
MM
9 * How SLOB works:
10 *
11 * The core of SLOB is a traditional K&R style heap allocator, with
12 * support for returning aligned objects. The granularity of this
55394849
NP
13 * allocator is as little as 2 bytes, however typically most architectures
14 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
95b35127 15 *
20cecbae
MM
16 * The slob heap is a set of linked list of pages from alloc_pages(),
17 * and within each page, there is a singly-linked list of free blocks
18 * (slob_t). The heap is grown on demand. To reduce fragmentation,
19 * heap pages are segregated into three lists, with objects less than
20 * 256 bytes, objects less than 1024 bytes, and all other objects.
21 *
22 * Allocation from heap involves first searching for a page with
23 * sufficient free blocks (using a next-fit-like approach) followed by
24 * a first-fit scan of the page. Deallocation inserts objects back
25 * into the free list in address order, so this is effectively an
26 * address-ordered first fit.
10cef602
MM
27 *
28 * Above this is an implementation of kmalloc/kfree. Blocks returned
55394849 29 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
10cef602 30 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
6193a2ff 31 * alloc_pages() directly, allocating compound pages so the page order
999d8795
EG
32 * does not have to be separately tracked.
33 * These objects are detected in kfree() because PageSlab()
d87a133f 34 * is false for them.
10cef602
MM
35 *
36 * SLAB is emulated on top of SLOB by simply calling constructors and
95b35127
NP
37 * destructors for every SLAB allocation. Objects are returned with the
38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
39 * case the low-level allocator will fragment blocks to create the proper
40 * alignment. Again, objects of page-size or greater are allocated by
6193a2ff 41 * calling alloc_pages(). As SLAB objects know their size, no separate
95b35127 42 * size bookkeeping is necessary and there is essentially no allocation
d87a133f
NP
43 * space overhead, and compound pages aren't needed for multi-page
44 * allocations.
6193a2ff
PM
45 *
46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
47 * logic down to the page allocator, and simply doing the node accounting
48 * on the upper levels. In the event that a node id is explicitly
96db800f 49 * provided, __alloc_pages_node() with the specified node id is used
6193a2ff
PM
50 * instead. The common case (or when the node id isn't explicitly provided)
51 * will default to the current node, as per numa_node_id().
52 *
53 * Node aware pages are still inserted in to the global freelist, and
54 * these are scanned for by matching against the node id encoded in the
55 * page flags. As a result, block allocations that can be satisfied from
56 * the freelist will only be done so on pages residing on the same node,
57 * in order to prevent random node placement.
10cef602
MM
58 */
59
95b35127 60#include <linux/kernel.h>
10cef602 61#include <linux/slab.h>
97d06609 62
10cef602 63#include <linux/mm.h>
1f0532eb 64#include <linux/swap.h> /* struct reclaim_state */
10cef602
MM
65#include <linux/cache.h>
66#include <linux/init.h>
b95f1b31 67#include <linux/export.h>
afc0cedb 68#include <linux/rcupdate.h>
95b35127 69#include <linux/list.h>
4374e616 70#include <linux/kmemleak.h>
039ca4e7
LZ
71
72#include <trace/events/kmem.h>
73
60063497 74#include <linux/atomic.h>
95b35127 75
b9ce5ef4 76#include "slab.h"
95b35127
NP
77/*
78 * slob_block has a field 'units', which indicates size of block if +ve,
79 * or offset of next block if -ve (in SLOB_UNITs).
80 *
81 * Free blocks of size 1 unit simply contain the offset of the next block.
82 * Those with larger size contain their size in the first SLOB_UNIT of
83 * memory, and the offset of the next free block in the second SLOB_UNIT.
84 */
55394849 85#if PAGE_SIZE <= (32767 * 2)
95b35127
NP
86typedef s16 slobidx_t;
87#else
88typedef s32 slobidx_t;
89#endif
90
10cef602 91struct slob_block {
95b35127 92 slobidx_t units;
55394849 93};
10cef602
MM
94typedef struct slob_block slob_t;
95
95b35127 96/*
20cecbae 97 * All partially free slob pages go on these lists.
95b35127 98 */
20cecbae
MM
99#define SLOB_BREAK1 256
100#define SLOB_BREAK2 1024
101static LIST_HEAD(free_slob_small);
102static LIST_HEAD(free_slob_medium);
103static LIST_HEAD(free_slob_large);
95b35127 104
95b35127
NP
105/*
106 * slob_page_free: true for pages on free_slob_pages list.
107 */
b8c24c4a 108static inline int slob_page_free(struct page *sp)
95b35127 109{
b8c24c4a 110 return PageSlobFree(sp);
95b35127
NP
111}
112
b8c24c4a 113static void set_slob_page_free(struct page *sp, struct list_head *list)
95b35127 114{
adab7b68 115 list_add(&sp->slab_list, list);
b8c24c4a 116 __SetPageSlobFree(sp);
95b35127
NP
117}
118
b8c24c4a 119static inline void clear_slob_page_free(struct page *sp)
95b35127 120{
adab7b68 121 list_del(&sp->slab_list);
b8c24c4a 122 __ClearPageSlobFree(sp);
95b35127
NP
123}
124
10cef602 125#define SLOB_UNIT sizeof(slob_t)
a6d78159 126#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
10cef602 127
afc0cedb
NP
128/*
129 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
5f0d5a3a 130 * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
afc0cedb
NP
131 * the block using call_rcu.
132 */
133struct slob_rcu {
134 struct rcu_head head;
135 int size;
136};
137
95b35127
NP
138/*
139 * slob_lock protects all slob allocator structures.
140 */
10cef602 141static DEFINE_SPINLOCK(slob_lock);
10cef602 142
95b35127
NP
143/*
144 * Encode the given size and next info into a free slob block s.
145 */
146static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
147{
148 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
149 slobidx_t offset = next - base;
bcb4ddb4 150
95b35127
NP
151 if (size > 1) {
152 s[0].units = size;
153 s[1].units = offset;
154 } else
155 s[0].units = -offset;
156}
10cef602 157
95b35127
NP
158/*
159 * Return the size of a slob block.
160 */
161static slobidx_t slob_units(slob_t *s)
162{
163 if (s->units > 0)
164 return s->units;
165 return 1;
166}
167
168/*
169 * Return the next free slob block pointer after this one.
170 */
171static slob_t *slob_next(slob_t *s)
172{
173 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
174 slobidx_t next;
175
176 if (s[0].units < 0)
177 next = -s[0].units;
178 else
179 next = s[1].units;
180 return base+next;
181}
182
183/*
184 * Returns true if s is the last free block in its page.
185 */
186static int slob_last(slob_t *s)
187{
188 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
189}
190
6e9ed0cc 191static void *slob_new_pages(gfp_t gfp, int order, int node)
6193a2ff 192{
6a486c0a 193 struct page *page;
6193a2ff
PM
194
195#ifdef CONFIG_NUMA
90f2cbbc 196 if (node != NUMA_NO_NODE)
96db800f 197 page = __alloc_pages_node(node, gfp, order);
6193a2ff
PM
198 else
199#endif
200 page = alloc_pages(gfp, order);
201
202 if (!page)
203 return NULL;
204
6a486c0a
VB
205 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
206 1 << order);
6193a2ff
PM
207 return page_address(page);
208}
209
6e9ed0cc
AW
210static void slob_free_pages(void *b, int order)
211{
6a486c0a
VB
212 struct page *sp = virt_to_page(b);
213
1f0532eb
NP
214 if (current->reclaim_state)
215 current->reclaim_state->reclaimed_slab += 1 << order;
6a486c0a
VB
216
217 mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
218 -(1 << order));
219 __free_pages(sp, order);
6e9ed0cc
AW
220}
221
95b35127 222/*
130e8e09
TH
223 * slob_page_alloc() - Allocate a slob block within a given slob_page sp.
224 * @sp: Page to look in.
225 * @size: Size of the allocation.
226 * @align: Allocation alignment.
59bb4798 227 * @align_offset: Offset in the allocated block that will be aligned.
130e8e09
TH
228 * @page_removed_from_list: Return parameter.
229 *
230 * Tries to find a chunk of memory at least @size bytes big within @page.
231 *
232 * Return: Pointer to memory if allocated, %NULL otherwise. If the
233 * allocation fills up @page then the page is removed from the
234 * freelist, in this case @page_removed_from_list will be set to
235 * true (set to false otherwise).
95b35127 236 */
130e8e09 237static void *slob_page_alloc(struct page *sp, size_t size, int align,
59bb4798 238 int align_offset, bool *page_removed_from_list)
10cef602 239{
6e9ed0cc 240 slob_t *prev, *cur, *aligned = NULL;
10cef602 241 int delta = 0, units = SLOB_UNITS(size);
10cef602 242
130e8e09 243 *page_removed_from_list = false;
b8c24c4a 244 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
95b35127
NP
245 slobidx_t avail = slob_units(cur);
246
59bb4798
VB
247 /*
248 * 'aligned' will hold the address of the slob block so that the
249 * address 'aligned'+'align_offset' is aligned according to the
250 * 'align' parameter. This is for kmalloc() which prepends the
251 * allocated block with its size, so that the block itself is
252 * aligned when needed.
253 */
10cef602 254 if (align) {
59bb4798
VB
255 aligned = (slob_t *)
256 (ALIGN((unsigned long)cur + align_offset, align)
257 - align_offset);
10cef602
MM
258 delta = aligned - cur;
259 }
95b35127
NP
260 if (avail >= units + delta) { /* room enough? */
261 slob_t *next;
262
10cef602 263 if (delta) { /* need to fragment head to align? */
95b35127
NP
264 next = slob_next(cur);
265 set_slob(aligned, avail - delta, next);
266 set_slob(cur, delta, aligned);
10cef602
MM
267 prev = cur;
268 cur = aligned;
95b35127 269 avail = slob_units(cur);
10cef602
MM
270 }
271
95b35127
NP
272 next = slob_next(cur);
273 if (avail == units) { /* exact fit? unlink. */
274 if (prev)
275 set_slob(prev, slob_units(prev), next);
276 else
b8c24c4a 277 sp->freelist = next;
95b35127
NP
278 } else { /* fragment */
279 if (prev)
280 set_slob(prev, slob_units(prev), cur + units);
281 else
b8c24c4a 282 sp->freelist = cur + units;
95b35127 283 set_slob(cur + units, avail - units, next);
10cef602
MM
284 }
285
95b35127 286 sp->units -= units;
130e8e09 287 if (!sp->units) {
95b35127 288 clear_slob_page_free(sp);
130e8e09
TH
289 *page_removed_from_list = true;
290 }
10cef602
MM
291 return cur;
292 }
95b35127
NP
293 if (slob_last(cur))
294 return NULL;
295 }
296}
10cef602 297
95b35127
NP
298/*
299 * slob_alloc: entry point into the slob allocator.
300 */
59bb4798
VB
301static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
302 int align_offset)
95b35127 303{
b8c24c4a 304 struct page *sp;
20cecbae 305 struct list_head *slob_list;
95b35127
NP
306 slob_t *b = NULL;
307 unsigned long flags;
130e8e09 308 bool _unused;
10cef602 309
20cecbae
MM
310 if (size < SLOB_BREAK1)
311 slob_list = &free_slob_small;
312 else if (size < SLOB_BREAK2)
313 slob_list = &free_slob_medium;
314 else
315 slob_list = &free_slob_large;
316
95b35127
NP
317 spin_lock_irqsave(&slob_lock, flags);
318 /* Iterate through each partially free page, try to find room */
adab7b68 319 list_for_each_entry(sp, slob_list, slab_list) {
130e8e09 320 bool page_removed_from_list = false;
6193a2ff
PM
321#ifdef CONFIG_NUMA
322 /*
323 * If there's a node specification, search for a partial
324 * page with a matching node id in the freelist.
325 */
90f2cbbc 326 if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
6193a2ff
PM
327 continue;
328#endif
d6269543
MM
329 /* Enough room on this page? */
330 if (sp->units < SLOB_UNITS(size))
331 continue;
6193a2ff 332
59bb4798 333 b = slob_page_alloc(sp, size, align, align_offset, &page_removed_from_list);
d6269543
MM
334 if (!b)
335 continue;
336
130e8e09
TH
337 /*
338 * If slob_page_alloc() removed sp from the list then we
339 * cannot call list functions on sp. If so allocation
340 * did not fragment the page anyway so optimisation is
341 * unnecessary.
342 */
343 if (!page_removed_from_list) {
344 /*
345 * Improve fragment distribution and reduce our average
346 * search time by starting our next search here. (see
347 * Knuth vol 1, sec 2.5, pg 449)
348 */
adab7b68
TH
349 if (!list_is_first(&sp->slab_list, slob_list))
350 list_rotate_to_front(&sp->slab_list, slob_list);
130e8e09 351 }
d6269543 352 break;
10cef602 353 }
95b35127
NP
354 spin_unlock_irqrestore(&slob_lock, flags);
355
356 /* Not enough space: must allocate a new page */
357 if (!b) {
6e9ed0cc 358 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
95b35127 359 if (!b)
6e9ed0cc 360 return NULL;
b5568280
CL
361 sp = virt_to_page(b);
362 __SetPageSlab(sp);
95b35127
NP
363
364 spin_lock_irqsave(&slob_lock, flags);
365 sp->units = SLOB_UNITS(PAGE_SIZE);
b8c24c4a 366 sp->freelist = b;
adab7b68 367 INIT_LIST_HEAD(&sp->slab_list);
95b35127 368 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
20cecbae 369 set_slob_page_free(sp, slob_list);
59bb4798 370 b = slob_page_alloc(sp, size, align, align_offset, &_unused);
95b35127
NP
371 BUG_ON(!b);
372 spin_unlock_irqrestore(&slob_lock, flags);
373 }
9f88faee 374 if (unlikely(gfp & __GFP_ZERO))
d07dbea4 375 memset(b, 0, size);
95b35127 376 return b;
10cef602
MM
377}
378
95b35127
NP
379/*
380 * slob_free: entry point into the slob allocator.
381 */
10cef602
MM
382static void slob_free(void *block, int size)
383{
b8c24c4a 384 struct page *sp;
95b35127
NP
385 slob_t *prev, *next, *b = (slob_t *)block;
386 slobidx_t units;
10cef602 387 unsigned long flags;
d602daba 388 struct list_head *slob_list;
10cef602 389
2408c550 390 if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef602 391 return;
95b35127 392 BUG_ON(!size);
10cef602 393
b5568280 394 sp = virt_to_page(block);
95b35127 395 units = SLOB_UNITS(size);
10cef602 396
10cef602 397 spin_lock_irqsave(&slob_lock, flags);
10cef602 398
95b35127
NP
399 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
400 /* Go directly to page allocator. Do not pass slob allocator */
401 if (slob_page_free(sp))
402 clear_slob_page_free(sp);
6fb8f424 403 spin_unlock_irqrestore(&slob_lock, flags);
b5568280 404 __ClearPageSlab(sp);
22b751c3 405 page_mapcount_reset(sp);
1f0532eb 406 slob_free_pages(b, 0);
6fb8f424 407 return;
95b35127 408 }
10cef602 409
95b35127
NP
410 if (!slob_page_free(sp)) {
411 /* This slob page is about to become partially free. Easy! */
412 sp->units = units;
b8c24c4a 413 sp->freelist = b;
95b35127
NP
414 set_slob(b, units,
415 (void *)((unsigned long)(b +
416 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
d602daba
BL
417 if (size < SLOB_BREAK1)
418 slob_list = &free_slob_small;
419 else if (size < SLOB_BREAK2)
420 slob_list = &free_slob_medium;
421 else
422 slob_list = &free_slob_large;
423 set_slob_page_free(sp, slob_list);
95b35127
NP
424 goto out;
425 }
426
427 /*
428 * Otherwise the page is already partially free, so find reinsertion
429 * point.
430 */
431 sp->units += units;
10cef602 432
b8c24c4a
CL
433 if (b < (slob_t *)sp->freelist) {
434 if (b + units == sp->freelist) {
435 units += slob_units(sp->freelist);
436 sp->freelist = slob_next(sp->freelist);
679299b3 437 }
b8c24c4a
CL
438 set_slob(b, units, sp->freelist);
439 sp->freelist = b;
95b35127 440 } else {
b8c24c4a 441 prev = sp->freelist;
95b35127
NP
442 next = slob_next(prev);
443 while (b > next) {
444 prev = next;
445 next = slob_next(prev);
446 }
10cef602 447
95b35127
NP
448 if (!slob_last(prev) && b + units == next) {
449 units += slob_units(next);
450 set_slob(b, units, slob_next(next));
451 } else
452 set_slob(b, units, next);
453
454 if (prev + slob_units(prev) == b) {
455 units = slob_units(b) + slob_units(prev);
456 set_slob(prev, units, slob_next(b));
457 } else
458 set_slob(prev, slob_units(prev), b);
459 }
460out:
10cef602
MM
461 spin_unlock_irqrestore(&slob_lock, flags);
462}
463
95b35127
NP
464/*
465 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
466 */
467
f3f74101
EG
468static __always_inline void *
469__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
10cef602 470{
6cb8f913 471 unsigned int *m;
59bb4798 472 int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
3eae2cb2 473 void *ret;
55394849 474
bd50cfa8
SR
475 gfp &= gfp_allowed_mask;
476
d92a8cfc
PZ
477 fs_reclaim_acquire(gfp);
478 fs_reclaim_release(gfp);
cf40bd16 479
59bb4798
VB
480 if (size < PAGE_SIZE - minalign) {
481 int align = minalign;
482
483 /*
484 * For power of two sizes, guarantee natural alignment for
485 * kmalloc()'d objects.
486 */
487 if (is_power_of_2(size))
488 align = max(minalign, (int) size);
489
6cb8f913
CL
490 if (!size)
491 return ZERO_SIZE_PTR;
492
59bb4798 493 m = slob_alloc(size + minalign, gfp, align, node, minalign);
3eae2cb2 494
239f49c0
MK
495 if (!m)
496 return NULL;
497 *m = size;
59bb4798 498 ret = (void *)m + minalign;
3eae2cb2 499
f3f74101 500 trace_kmalloc_node(caller, ret,
59bb4798 501 size, size + minalign, gfp, node);
d87a133f 502 } else {
3eae2cb2 503 unsigned int order = get_order(size);
d87a133f 504
8df275af
DR
505 if (likely(order))
506 gfp |= __GFP_COMP;
507 ret = slob_new_pages(gfp, order, node);
3eae2cb2 508
f3f74101 509 trace_kmalloc_node(caller, ret,
ca2b84cb 510 size, PAGE_SIZE << order, gfp, node);
10cef602 511 }
3eae2cb2 512
4374e616 513 kmemleak_alloc(ret, size, 1, gfp);
3eae2cb2 514 return ret;
10cef602 515}
f3f74101 516
f1b6eb6e 517void *__kmalloc(size_t size, gfp_t gfp)
f3f74101 518{
f1b6eb6e 519 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
f3f74101 520}
f1b6eb6e 521EXPORT_SYMBOL(__kmalloc);
10cef602 522
f3f74101
EG
523void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
524{
525 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
526}
527
528#ifdef CONFIG_NUMA
82bd5508 529void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
f3f74101
EG
530 int node, unsigned long caller)
531{
532 return __do_kmalloc_node(size, gfp, node, caller);
533}
534#endif
f3f74101 535
10cef602
MM
536void kfree(const void *block)
537{
b8c24c4a 538 struct page *sp;
10cef602 539
2121db74
PE
540 trace_kfree(_RET_IP_, block);
541
2408c550 542 if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef602 543 return;
4374e616 544 kmemleak_free(block);
10cef602 545
b5568280
CL
546 sp = virt_to_page(block);
547 if (PageSlab(sp)) {
789306e5 548 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
55394849
NP
549 unsigned int *m = (unsigned int *)(block - align);
550 slob_free(m, *m + align);
6a486c0a
VB
551 } else {
552 unsigned int order = compound_order(sp);
553 mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
554 -(1 << order));
555 __free_pages(sp, order);
556
557 }
10cef602 558}
10cef602
MM
559EXPORT_SYMBOL(kfree);
560
d87a133f 561/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
10d1f8cb 562size_t __ksize(const void *block)
10cef602 563{
b8c24c4a 564 struct page *sp;
999d8795
EG
565 int align;
566 unsigned int *m;
10cef602 567
ef8b4520
CL
568 BUG_ON(!block);
569 if (unlikely(block == ZERO_SIZE_PTR))
10cef602
MM
570 return 0;
571
b5568280 572 sp = virt_to_page(block);
999d8795 573 if (unlikely(!PageSlab(sp)))
a50b854e 574 return page_size(sp);
999d8795 575
789306e5 576 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
999d8795
EG
577 m = (unsigned int *)(block - align);
578 return SLOB_UNITS(*m) * SLOB_UNIT;
10cef602 579}
10d1f8cb 580EXPORT_SYMBOL(__ksize);
10cef602 581
d50112ed 582int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
10cef602 583{
5f0d5a3a 584 if (flags & SLAB_TYPESAFE_BY_RCU) {
278b1bb1
CL
585 /* leave room for rcu footer at the end of object */
586 c->size += sizeof(struct slob_rcu);
039363f3 587 }
278b1bb1 588 c->flags = flags;
278b1bb1 589 return 0;
10cef602 590}
10cef602 591
c21a6daf 592static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
10cef602
MM
593{
594 void *b;
595
bd50cfa8
SR
596 flags &= gfp_allowed_mask;
597
d92a8cfc
PZ
598 fs_reclaim_acquire(flags);
599 fs_reclaim_release(flags);
bd50cfa8 600
3eae2cb2 601 if (c->size < PAGE_SIZE) {
59bb4798 602 b = slob_alloc(c->size, flags, c->align, node, 0);
fe74fe2b 603 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
ca2b84cb
EGM
604 SLOB_UNITS(c->size) * SLOB_UNIT,
605 flags, node);
3eae2cb2 606 } else {
6e9ed0cc 607 b = slob_new_pages(flags, get_order(c->size), node);
fe74fe2b 608 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
ca2b84cb
EGM
609 PAGE_SIZE << get_order(c->size),
610 flags, node);
3eae2cb2 611 }
10cef602 612
128227e7
MW
613 if (b && c->ctor) {
614 WARN_ON_ONCE(flags & __GFP_ZERO);
51cc5068 615 c->ctor(b);
128227e7 616 }
10cef602 617
4374e616 618 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
10cef602
MM
619 return b;
620}
f1b6eb6e
CL
621
622void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
623{
624 return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
625}
626EXPORT_SYMBOL(kmem_cache_alloc);
627
628#ifdef CONFIG_NUMA
629void *__kmalloc_node(size_t size, gfp_t gfp, int node)
630{
631 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
632}
633EXPORT_SYMBOL(__kmalloc_node);
634
635void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
636{
637 return slob_alloc_node(cachep, gfp, node);
638}
6193a2ff 639EXPORT_SYMBOL(kmem_cache_alloc_node);
f1b6eb6e 640#endif
10cef602 641
afc0cedb 642static void __kmem_cache_free(void *b, int size)
10cef602 643{
afc0cedb
NP
644 if (size < PAGE_SIZE)
645 slob_free(b, size);
10cef602 646 else
6e9ed0cc 647 slob_free_pages(b, get_order(size));
afc0cedb
NP
648}
649
650static void kmem_rcu_free(struct rcu_head *head)
651{
652 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
653 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
654
655 __kmem_cache_free(b, slob_rcu->size);
656}
657
658void kmem_cache_free(struct kmem_cache *c, void *b)
659{
4374e616 660 kmemleak_free_recursive(b, c->flags);
5f0d5a3a 661 if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
afc0cedb
NP
662 struct slob_rcu *slob_rcu;
663 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
afc0cedb
NP
664 slob_rcu->size = c->size;
665 call_rcu(&slob_rcu->head, kmem_rcu_free);
666 } else {
afc0cedb
NP
667 __kmem_cache_free(b, c->size);
668 }
3eae2cb2 669
ca2b84cb 670 trace_kmem_cache_free(_RET_IP_, b);
10cef602
MM
671}
672EXPORT_SYMBOL(kmem_cache_free);
673
484748f0
CL
674void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
675{
676 __kmem_cache_free_bulk(s, size, p);
677}
678EXPORT_SYMBOL(kmem_cache_free_bulk);
679
865762a8 680int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
484748f0
CL
681 void **p)
682{
683 return __kmem_cache_alloc_bulk(s, flags, size, p);
684}
685EXPORT_SYMBOL(kmem_cache_alloc_bulk);
686
945cf2b6
CL
687int __kmem_cache_shutdown(struct kmem_cache *c)
688{
689 /* No way to check for remaining objects */
690 return 0;
691}
692
52b4b950
DS
693void __kmem_cache_release(struct kmem_cache *c)
694{
695}
696
89e364db 697int __kmem_cache_shrink(struct kmem_cache *d)
2e892f43
CL
698{
699 return 0;
700}
2e892f43 701
9b030cb8
CL
702struct kmem_cache kmem_cache_boot = {
703 .name = "kmem_cache",
704 .size = sizeof(struct kmem_cache),
705 .flags = SLAB_PANIC,
706 .align = ARCH_KMALLOC_MINALIGN,
707};
708
bcb4ddb4
DG
709void __init kmem_cache_init(void)
710{
9b030cb8 711 kmem_cache = &kmem_cache_boot;
97d06609 712 slab_state = UP;
10cef602 713}
bbff2e43
WF
714
715void __init kmem_cache_init_late(void)
716{
97d06609 717 slab_state = FULL;
bbff2e43 718}