]> git.ipfire.org Git - thirdparty/linux.git/blob - mm/slab.h
Merge tag 'riscv/for-v5.3-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv...
[thirdparty/linux.git] / mm / slab.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef MM_SLAB_H
3 #define MM_SLAB_H
4 /*
5 * Internal slab definitions
6 */
7
8 #ifdef CONFIG_SLOB
9 /*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20 struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
24 slab_flags_t flags; /* Active flags on the slab */
25 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
27 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31 };
32
33 #endif /* CONFIG_SLOB */
34
35 #ifdef CONFIG_SLAB
36 #include <linux/slab_def.h>
37 #endif
38
39 #ifdef CONFIG_SLUB
40 #include <linux/slub_def.h>
41 #endif
42
43 #include <linux/memcontrol.h>
44 #include <linux/fault-inject.h>
45 #include <linux/kasan.h>
46 #include <linux/kmemleak.h>
47 #include <linux/random.h>
48 #include <linux/sched/mm.h>
49
50 /*
51 * State of the slab allocator.
52 *
53 * This is used to describe the states of the allocator during bootup.
54 * Allocators use this to gradually bootstrap themselves. Most allocators
55 * have the problem that the structures used for managing slab caches are
56 * allocated from slab caches themselves.
57 */
58 enum slab_state {
59 DOWN, /* No slab functionality yet */
60 PARTIAL, /* SLUB: kmem_cache_node available */
61 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
62 UP, /* Slab caches usable but not all extras yet */
63 FULL /* Everything is working */
64 };
65
66 extern enum slab_state slab_state;
67
68 /* The slab cache mutex protects the management structures during changes */
69 extern struct mutex slab_mutex;
70
71 /* The list of all slab caches on the system */
72 extern struct list_head slab_caches;
73
74 /* The slab cache that manages slab cache information */
75 extern struct kmem_cache *kmem_cache;
76
77 /* A table of kmalloc cache names and sizes */
78 extern const struct kmalloc_info_struct {
79 const char *name;
80 unsigned int size;
81 } kmalloc_info[];
82
83 #ifndef CONFIG_SLOB
84 /* Kmalloc array related functions */
85 void setup_kmalloc_cache_index_table(void);
86 void create_kmalloc_caches(slab_flags_t);
87
88 /* Find the kmalloc slab corresponding for a certain size */
89 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
90 #endif
91
92
93 /* Functions provided by the slab allocators */
94 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
95
96 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
97 slab_flags_t flags, unsigned int useroffset,
98 unsigned int usersize);
99 extern void create_boot_cache(struct kmem_cache *, const char *name,
100 unsigned int size, slab_flags_t flags,
101 unsigned int useroffset, unsigned int usersize);
102
103 int slab_unmergeable(struct kmem_cache *s);
104 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
105 slab_flags_t flags, const char *name, void (*ctor)(void *));
106 #ifndef CONFIG_SLOB
107 struct kmem_cache *
108 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
109 slab_flags_t flags, void (*ctor)(void *));
110
111 slab_flags_t kmem_cache_flags(unsigned int object_size,
112 slab_flags_t flags, const char *name,
113 void (*ctor)(void *));
114 #else
115 static inline struct kmem_cache *
116 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
117 slab_flags_t flags, void (*ctor)(void *))
118 { return NULL; }
119
120 static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
121 slab_flags_t flags, const char *name,
122 void (*ctor)(void *))
123 {
124 return flags;
125 }
126 #endif
127
128
129 /* Legal flag mask for kmem_cache_create(), for various configurations */
130 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
131 SLAB_CACHE_DMA32 | SLAB_PANIC | \
132 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
133
134 #if defined(CONFIG_DEBUG_SLAB)
135 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
136 #elif defined(CONFIG_SLUB_DEBUG)
137 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
138 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
139 #else
140 #define SLAB_DEBUG_FLAGS (0)
141 #endif
142
143 #if defined(CONFIG_SLAB)
144 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
145 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
146 SLAB_ACCOUNT)
147 #elif defined(CONFIG_SLUB)
148 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
149 SLAB_TEMPORARY | SLAB_ACCOUNT)
150 #else
151 #define SLAB_CACHE_FLAGS (0)
152 #endif
153
154 /* Common flags available with current configuration */
155 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
156
157 /* Common flags permitted for kmem_cache_create */
158 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
159 SLAB_RED_ZONE | \
160 SLAB_POISON | \
161 SLAB_STORE_USER | \
162 SLAB_TRACE | \
163 SLAB_CONSISTENCY_CHECKS | \
164 SLAB_MEM_SPREAD | \
165 SLAB_NOLEAKTRACE | \
166 SLAB_RECLAIM_ACCOUNT | \
167 SLAB_TEMPORARY | \
168 SLAB_ACCOUNT)
169
170 bool __kmem_cache_empty(struct kmem_cache *);
171 int __kmem_cache_shutdown(struct kmem_cache *);
172 void __kmem_cache_release(struct kmem_cache *);
173 int __kmem_cache_shrink(struct kmem_cache *);
174 void __kmemcg_cache_deactivate(struct kmem_cache *s);
175 void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
176 void slab_kmem_cache_release(struct kmem_cache *);
177
178 struct seq_file;
179 struct file;
180
181 struct slabinfo {
182 unsigned long active_objs;
183 unsigned long num_objs;
184 unsigned long active_slabs;
185 unsigned long num_slabs;
186 unsigned long shared_avail;
187 unsigned int limit;
188 unsigned int batchcount;
189 unsigned int shared;
190 unsigned int objects_per_slab;
191 unsigned int cache_order;
192 };
193
194 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
195 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
196 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
197 size_t count, loff_t *ppos);
198
199 /*
200 * Generic implementation of bulk operations
201 * These are useful for situations in which the allocator cannot
202 * perform optimizations. In that case segments of the object listed
203 * may be allocated or freed using these operations.
204 */
205 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
206 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
207
208 static inline int cache_vmstat_idx(struct kmem_cache *s)
209 {
210 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
211 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
212 }
213
214 #ifdef CONFIG_MEMCG_KMEM
215
216 /* List of all root caches. */
217 extern struct list_head slab_root_caches;
218 #define root_caches_node memcg_params.__root_caches_node
219
220 /*
221 * Iterate over all memcg caches of the given root cache. The caller must hold
222 * slab_mutex.
223 */
224 #define for_each_memcg_cache(iter, root) \
225 list_for_each_entry(iter, &(root)->memcg_params.children, \
226 memcg_params.children_node)
227
228 static inline bool is_root_cache(struct kmem_cache *s)
229 {
230 return !s->memcg_params.root_cache;
231 }
232
233 static inline bool slab_equal_or_root(struct kmem_cache *s,
234 struct kmem_cache *p)
235 {
236 return p == s || p == s->memcg_params.root_cache;
237 }
238
239 /*
240 * We use suffixes to the name in memcg because we can't have caches
241 * created in the system with the same name. But when we print them
242 * locally, better refer to them with the base name
243 */
244 static inline const char *cache_name(struct kmem_cache *s)
245 {
246 if (!is_root_cache(s))
247 s = s->memcg_params.root_cache;
248 return s->name;
249 }
250
251 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
252 {
253 if (is_root_cache(s))
254 return s;
255 return s->memcg_params.root_cache;
256 }
257
258 /*
259 * Expects a pointer to a slab page. Please note, that PageSlab() check
260 * isn't sufficient, as it returns true also for tail compound slab pages,
261 * which do not have slab_cache pointer set.
262 * So this function assumes that the page can pass PageHead() and PageSlab()
263 * checks.
264 *
265 * The kmem_cache can be reparented asynchronously. The caller must ensure
266 * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
267 */
268 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
269 {
270 struct kmem_cache *s;
271
272 s = READ_ONCE(page->slab_cache);
273 if (s && !is_root_cache(s))
274 return READ_ONCE(s->memcg_params.memcg);
275
276 return NULL;
277 }
278
279 /*
280 * Charge the slab page belonging to the non-root kmem_cache.
281 * Can be called for non-root kmem_caches only.
282 */
283 static __always_inline int memcg_charge_slab(struct page *page,
284 gfp_t gfp, int order,
285 struct kmem_cache *s)
286 {
287 struct mem_cgroup *memcg;
288 struct lruvec *lruvec;
289 int ret;
290
291 rcu_read_lock();
292 memcg = READ_ONCE(s->memcg_params.memcg);
293 while (memcg && !css_tryget_online(&memcg->css))
294 memcg = parent_mem_cgroup(memcg);
295 rcu_read_unlock();
296
297 if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
298 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
299 (1 << order));
300 percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
301 return 0;
302 }
303
304 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
305 if (ret)
306 goto out;
307
308 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
309 mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
310
311 /* transer try_charge() page references to kmem_cache */
312 percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
313 css_put_many(&memcg->css, 1 << order);
314 out:
315 css_put(&memcg->css);
316 return ret;
317 }
318
319 /*
320 * Uncharge a slab page belonging to a non-root kmem_cache.
321 * Can be called for non-root kmem_caches only.
322 */
323 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
324 struct kmem_cache *s)
325 {
326 struct mem_cgroup *memcg;
327 struct lruvec *lruvec;
328
329 rcu_read_lock();
330 memcg = READ_ONCE(s->memcg_params.memcg);
331 if (likely(!mem_cgroup_is_root(memcg))) {
332 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
333 mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
334 memcg_kmem_uncharge_memcg(page, order, memcg);
335 } else {
336 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
337 -(1 << order));
338 }
339 rcu_read_unlock();
340
341 percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
342 }
343
344 extern void slab_init_memcg_params(struct kmem_cache *);
345 extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
346
347 #else /* CONFIG_MEMCG_KMEM */
348
349 /* If !memcg, all caches are root. */
350 #define slab_root_caches slab_caches
351 #define root_caches_node list
352
353 #define for_each_memcg_cache(iter, root) \
354 for ((void)(iter), (void)(root); 0; )
355
356 static inline bool is_root_cache(struct kmem_cache *s)
357 {
358 return true;
359 }
360
361 static inline bool slab_equal_or_root(struct kmem_cache *s,
362 struct kmem_cache *p)
363 {
364 return s == p;
365 }
366
367 static inline const char *cache_name(struct kmem_cache *s)
368 {
369 return s->name;
370 }
371
372 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
373 {
374 return s;
375 }
376
377 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
378 {
379 return NULL;
380 }
381
382 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
383 struct kmem_cache *s)
384 {
385 return 0;
386 }
387
388 static inline void memcg_uncharge_slab(struct page *page, int order,
389 struct kmem_cache *s)
390 {
391 }
392
393 static inline void slab_init_memcg_params(struct kmem_cache *s)
394 {
395 }
396
397 static inline void memcg_link_cache(struct kmem_cache *s,
398 struct mem_cgroup *memcg)
399 {
400 }
401
402 #endif /* CONFIG_MEMCG_KMEM */
403
404 static inline struct kmem_cache *virt_to_cache(const void *obj)
405 {
406 struct page *page;
407
408 page = virt_to_head_page(obj);
409 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
410 __func__))
411 return NULL;
412 return page->slab_cache;
413 }
414
415 static __always_inline int charge_slab_page(struct page *page,
416 gfp_t gfp, int order,
417 struct kmem_cache *s)
418 {
419 if (is_root_cache(s)) {
420 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
421 1 << order);
422 return 0;
423 }
424
425 return memcg_charge_slab(page, gfp, order, s);
426 }
427
428 static __always_inline void uncharge_slab_page(struct page *page, int order,
429 struct kmem_cache *s)
430 {
431 if (is_root_cache(s)) {
432 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
433 -(1 << order));
434 return;
435 }
436
437 memcg_uncharge_slab(page, order, s);
438 }
439
440 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
441 {
442 struct kmem_cache *cachep;
443
444 /*
445 * When kmemcg is not being used, both assignments should return the
446 * same value. but we don't want to pay the assignment price in that
447 * case. If it is not compiled in, the compiler should be smart enough
448 * to not do even the assignment. In that case, slab_equal_or_root
449 * will also be a constant.
450 */
451 if (!memcg_kmem_enabled() &&
452 !IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
453 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
454 return s;
455
456 cachep = virt_to_cache(x);
457 WARN_ONCE(cachep && !slab_equal_or_root(cachep, s),
458 "%s: Wrong slab cache. %s but object is from %s\n",
459 __func__, s->name, cachep->name);
460 return cachep;
461 }
462
463 static inline size_t slab_ksize(const struct kmem_cache *s)
464 {
465 #ifndef CONFIG_SLUB
466 return s->object_size;
467
468 #else /* CONFIG_SLUB */
469 # ifdef CONFIG_SLUB_DEBUG
470 /*
471 * Debugging requires use of the padding between object
472 * and whatever may come after it.
473 */
474 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
475 return s->object_size;
476 # endif
477 if (s->flags & SLAB_KASAN)
478 return s->object_size;
479 /*
480 * If we have the need to store the freelist pointer
481 * back there or track user information then we can
482 * only use the space before that information.
483 */
484 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
485 return s->inuse;
486 /*
487 * Else we can use all the padding etc for the allocation
488 */
489 return s->size;
490 #endif
491 }
492
493 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
494 gfp_t flags)
495 {
496 flags &= gfp_allowed_mask;
497
498 fs_reclaim_acquire(flags);
499 fs_reclaim_release(flags);
500
501 might_sleep_if(gfpflags_allow_blocking(flags));
502
503 if (should_failslab(s, flags))
504 return NULL;
505
506 if (memcg_kmem_enabled() &&
507 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
508 return memcg_kmem_get_cache(s);
509
510 return s;
511 }
512
513 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
514 size_t size, void **p)
515 {
516 size_t i;
517
518 flags &= gfp_allowed_mask;
519 for (i = 0; i < size; i++) {
520 p[i] = kasan_slab_alloc(s, p[i], flags);
521 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
522 kmemleak_alloc_recursive(p[i], s->object_size, 1,
523 s->flags, flags);
524 }
525
526 if (memcg_kmem_enabled())
527 memcg_kmem_put_cache(s);
528 }
529
530 #ifndef CONFIG_SLOB
531 /*
532 * The slab lists for all objects.
533 */
534 struct kmem_cache_node {
535 spinlock_t list_lock;
536
537 #ifdef CONFIG_SLAB
538 struct list_head slabs_partial; /* partial list first, better asm code */
539 struct list_head slabs_full;
540 struct list_head slabs_free;
541 unsigned long total_slabs; /* length of all slab lists */
542 unsigned long free_slabs; /* length of free slab list only */
543 unsigned long free_objects;
544 unsigned int free_limit;
545 unsigned int colour_next; /* Per-node cache coloring */
546 struct array_cache *shared; /* shared per node */
547 struct alien_cache **alien; /* on other nodes */
548 unsigned long next_reap; /* updated without locking */
549 int free_touched; /* updated without locking */
550 #endif
551
552 #ifdef CONFIG_SLUB
553 unsigned long nr_partial;
554 struct list_head partial;
555 #ifdef CONFIG_SLUB_DEBUG
556 atomic_long_t nr_slabs;
557 atomic_long_t total_objects;
558 struct list_head full;
559 #endif
560 #endif
561
562 };
563
564 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
565 {
566 return s->node[node];
567 }
568
569 /*
570 * Iterator over all nodes. The body will be executed for each node that has
571 * a kmem_cache_node structure allocated (which is true for all online nodes)
572 */
573 #define for_each_kmem_cache_node(__s, __node, __n) \
574 for (__node = 0; __node < nr_node_ids; __node++) \
575 if ((__n = get_node(__s, __node)))
576
577 #endif
578
579 void *slab_start(struct seq_file *m, loff_t *pos);
580 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
581 void slab_stop(struct seq_file *m, void *p);
582 void *memcg_slab_start(struct seq_file *m, loff_t *pos);
583 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
584 void memcg_slab_stop(struct seq_file *m, void *p);
585 int memcg_slab_show(struct seq_file *m, void *p);
586
587 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
588 void dump_unreclaimable_slab(void);
589 #else
590 static inline void dump_unreclaimable_slab(void)
591 {
592 }
593 #endif
594
595 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
596
597 #ifdef CONFIG_SLAB_FREELIST_RANDOM
598 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
599 gfp_t gfp);
600 void cache_random_seq_destroy(struct kmem_cache *cachep);
601 #else
602 static inline int cache_random_seq_create(struct kmem_cache *cachep,
603 unsigned int count, gfp_t gfp)
604 {
605 return 0;
606 }
607 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
608 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
609
610 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
611 {
612 if (static_branch_unlikely(&init_on_alloc)) {
613 if (c->ctor)
614 return false;
615 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
616 return flags & __GFP_ZERO;
617 return true;
618 }
619 return flags & __GFP_ZERO;
620 }
621
622 static inline bool slab_want_init_on_free(struct kmem_cache *c)
623 {
624 if (static_branch_unlikely(&init_on_free))
625 return !(c->ctor ||
626 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
627 return false;
628 }
629
630 #endif /* MM_SLAB_H */