]> git.ipfire.org Git - thirdparty/linux.git/blob - mm/slab.h
mm/swapfile.c: tmp is always smaller than max
[thirdparty/linux.git] / mm / slab.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef MM_SLAB_H
3 #define MM_SLAB_H
4 /*
5 * Internal slab definitions
6 */
7
8 #ifdef CONFIG_SLOB
9 /*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20 struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
24 slab_flags_t flags; /* Active flags on the slab */
25 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
27 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31 };
32
33 #else /* !CONFIG_SLOB */
34
35 struct memcg_cache_array {
36 struct rcu_head rcu;
37 struct kmem_cache *entries[0];
38 };
39
40 /*
41 * This is the main placeholder for memcg-related information in kmem caches.
42 * Both the root cache and the child caches will have it. For the root cache,
43 * this will hold a dynamically allocated array large enough to hold
44 * information about the currently limited memcgs in the system. To allow the
45 * array to be accessed without taking any locks, on relocation we free the old
46 * version only after a grace period.
47 *
48 * Root and child caches hold different metadata.
49 *
50 * @root_cache: Common to root and child caches. NULL for root, pointer to
51 * the root cache for children.
52 *
53 * The following fields are specific to root caches.
54 *
55 * @memcg_caches: kmemcg ID indexed table of child caches. This table is
56 * used to index child cachces during allocation and cleared
57 * early during shutdown.
58 *
59 * @root_caches_node: List node for slab_root_caches list.
60 *
61 * @children: List of all child caches. While the child caches are also
62 * reachable through @memcg_caches, a child cache remains on
63 * this list until it is actually destroyed.
64 *
65 * The following fields are specific to child caches.
66 *
67 * @memcg: Pointer to the memcg this cache belongs to.
68 *
69 * @children_node: List node for @root_cache->children list.
70 *
71 * @kmem_caches_node: List node for @memcg->kmem_caches list.
72 */
73 struct memcg_cache_params {
74 struct kmem_cache *root_cache;
75 union {
76 struct {
77 struct memcg_cache_array __rcu *memcg_caches;
78 struct list_head __root_caches_node;
79 struct list_head children;
80 bool dying;
81 };
82 struct {
83 struct mem_cgroup *memcg;
84 struct list_head children_node;
85 struct list_head kmem_caches_node;
86 struct percpu_ref refcnt;
87
88 void (*work_fn)(struct kmem_cache *);
89 union {
90 struct rcu_head rcu_head;
91 struct work_struct work;
92 };
93 };
94 };
95 };
96 #endif /* CONFIG_SLOB */
97
98 #ifdef CONFIG_SLAB
99 #include <linux/slab_def.h>
100 #endif
101
102 #ifdef CONFIG_SLUB
103 #include <linux/slub_def.h>
104 #endif
105
106 #include <linux/memcontrol.h>
107 #include <linux/fault-inject.h>
108 #include <linux/kasan.h>
109 #include <linux/kmemleak.h>
110 #include <linux/random.h>
111 #include <linux/sched/mm.h>
112
113 /*
114 * State of the slab allocator.
115 *
116 * This is used to describe the states of the allocator during bootup.
117 * Allocators use this to gradually bootstrap themselves. Most allocators
118 * have the problem that the structures used for managing slab caches are
119 * allocated from slab caches themselves.
120 */
121 enum slab_state {
122 DOWN, /* No slab functionality yet */
123 PARTIAL, /* SLUB: kmem_cache_node available */
124 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
125 UP, /* Slab caches usable but not all extras yet */
126 FULL /* Everything is working */
127 };
128
129 extern enum slab_state slab_state;
130
131 /* The slab cache mutex protects the management structures during changes */
132 extern struct mutex slab_mutex;
133
134 /* The list of all slab caches on the system */
135 extern struct list_head slab_caches;
136
137 /* The slab cache that manages slab cache information */
138 extern struct kmem_cache *kmem_cache;
139
140 /* A table of kmalloc cache names and sizes */
141 extern const struct kmalloc_info_struct {
142 const char *name[NR_KMALLOC_TYPES];
143 unsigned int size;
144 } kmalloc_info[];
145
146 #ifndef CONFIG_SLOB
147 /* Kmalloc array related functions */
148 void setup_kmalloc_cache_index_table(void);
149 void create_kmalloc_caches(slab_flags_t);
150
151 /* Find the kmalloc slab corresponding for a certain size */
152 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
153 #endif
154
155
156 /* Functions provided by the slab allocators */
157 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
158
159 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
160 slab_flags_t flags, unsigned int useroffset,
161 unsigned int usersize);
162 extern void create_boot_cache(struct kmem_cache *, const char *name,
163 unsigned int size, slab_flags_t flags,
164 unsigned int useroffset, unsigned int usersize);
165
166 int slab_unmergeable(struct kmem_cache *s);
167 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
168 slab_flags_t flags, const char *name, void (*ctor)(void *));
169 #ifndef CONFIG_SLOB
170 struct kmem_cache *
171 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
172 slab_flags_t flags, void (*ctor)(void *));
173
174 slab_flags_t kmem_cache_flags(unsigned int object_size,
175 slab_flags_t flags, const char *name,
176 void (*ctor)(void *));
177 #else
178 static inline struct kmem_cache *
179 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
180 slab_flags_t flags, void (*ctor)(void *))
181 { return NULL; }
182
183 static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
184 slab_flags_t flags, const char *name,
185 void (*ctor)(void *))
186 {
187 return flags;
188 }
189 #endif
190
191
192 /* Legal flag mask for kmem_cache_create(), for various configurations */
193 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
194 SLAB_CACHE_DMA32 | SLAB_PANIC | \
195 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
196
197 #if defined(CONFIG_DEBUG_SLAB)
198 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
199 #elif defined(CONFIG_SLUB_DEBUG)
200 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
201 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
202 #else
203 #define SLAB_DEBUG_FLAGS (0)
204 #endif
205
206 #if defined(CONFIG_SLAB)
207 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
208 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
209 SLAB_ACCOUNT)
210 #elif defined(CONFIG_SLUB)
211 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
212 SLAB_TEMPORARY | SLAB_ACCOUNT)
213 #else
214 #define SLAB_CACHE_FLAGS (0)
215 #endif
216
217 /* Common flags available with current configuration */
218 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
219
220 /* Common flags permitted for kmem_cache_create */
221 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
222 SLAB_RED_ZONE | \
223 SLAB_POISON | \
224 SLAB_STORE_USER | \
225 SLAB_TRACE | \
226 SLAB_CONSISTENCY_CHECKS | \
227 SLAB_MEM_SPREAD | \
228 SLAB_NOLEAKTRACE | \
229 SLAB_RECLAIM_ACCOUNT | \
230 SLAB_TEMPORARY | \
231 SLAB_ACCOUNT)
232
233 bool __kmem_cache_empty(struct kmem_cache *);
234 int __kmem_cache_shutdown(struct kmem_cache *);
235 void __kmem_cache_release(struct kmem_cache *);
236 int __kmem_cache_shrink(struct kmem_cache *);
237 void __kmemcg_cache_deactivate(struct kmem_cache *s);
238 void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
239 void slab_kmem_cache_release(struct kmem_cache *);
240 void kmem_cache_shrink_all(struct kmem_cache *s);
241
242 struct seq_file;
243 struct file;
244
245 struct slabinfo {
246 unsigned long active_objs;
247 unsigned long num_objs;
248 unsigned long active_slabs;
249 unsigned long num_slabs;
250 unsigned long shared_avail;
251 unsigned int limit;
252 unsigned int batchcount;
253 unsigned int shared;
254 unsigned int objects_per_slab;
255 unsigned int cache_order;
256 };
257
258 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
259 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
260 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
261 size_t count, loff_t *ppos);
262
263 /*
264 * Generic implementation of bulk operations
265 * These are useful for situations in which the allocator cannot
266 * perform optimizations. In that case segments of the object listed
267 * may be allocated or freed using these operations.
268 */
269 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
270 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
271
272 static inline int cache_vmstat_idx(struct kmem_cache *s)
273 {
274 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
275 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
276 }
277
278 #ifdef CONFIG_MEMCG_KMEM
279
280 /* List of all root caches. */
281 extern struct list_head slab_root_caches;
282 #define root_caches_node memcg_params.__root_caches_node
283
284 /*
285 * Iterate over all memcg caches of the given root cache. The caller must hold
286 * slab_mutex.
287 */
288 #define for_each_memcg_cache(iter, root) \
289 list_for_each_entry(iter, &(root)->memcg_params.children, \
290 memcg_params.children_node)
291
292 static inline bool is_root_cache(struct kmem_cache *s)
293 {
294 return !s->memcg_params.root_cache;
295 }
296
297 static inline bool slab_equal_or_root(struct kmem_cache *s,
298 struct kmem_cache *p)
299 {
300 return p == s || p == s->memcg_params.root_cache;
301 }
302
303 /*
304 * We use suffixes to the name in memcg because we can't have caches
305 * created in the system with the same name. But when we print them
306 * locally, better refer to them with the base name
307 */
308 static inline const char *cache_name(struct kmem_cache *s)
309 {
310 if (!is_root_cache(s))
311 s = s->memcg_params.root_cache;
312 return s->name;
313 }
314
315 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
316 {
317 if (is_root_cache(s))
318 return s;
319 return s->memcg_params.root_cache;
320 }
321
322 /*
323 * Expects a pointer to a slab page. Please note, that PageSlab() check
324 * isn't sufficient, as it returns true also for tail compound slab pages,
325 * which do not have slab_cache pointer set.
326 * So this function assumes that the page can pass PageSlab() && !PageTail()
327 * check.
328 *
329 * The kmem_cache can be reparented asynchronously. The caller must ensure
330 * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
331 */
332 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
333 {
334 struct kmem_cache *s;
335
336 s = READ_ONCE(page->slab_cache);
337 if (s && !is_root_cache(s))
338 return READ_ONCE(s->memcg_params.memcg);
339
340 return NULL;
341 }
342
343 /*
344 * Charge the slab page belonging to the non-root kmem_cache.
345 * Can be called for non-root kmem_caches only.
346 */
347 static __always_inline int memcg_charge_slab(struct page *page,
348 gfp_t gfp, int order,
349 struct kmem_cache *s)
350 {
351 unsigned int nr_pages = 1 << order;
352 struct mem_cgroup *memcg;
353 struct lruvec *lruvec;
354 int ret;
355
356 rcu_read_lock();
357 memcg = READ_ONCE(s->memcg_params.memcg);
358 while (memcg && !css_tryget_online(&memcg->css))
359 memcg = parent_mem_cgroup(memcg);
360 rcu_read_unlock();
361
362 if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
363 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
364 nr_pages);
365 percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
366 return 0;
367 }
368
369 ret = memcg_kmem_charge(memcg, gfp, nr_pages);
370 if (ret)
371 goto out;
372
373 lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
374 mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages);
375
376 /* transer try_charge() page references to kmem_cache */
377 percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
378 css_put_many(&memcg->css, nr_pages);
379 out:
380 css_put(&memcg->css);
381 return ret;
382 }
383
384 /*
385 * Uncharge a slab page belonging to a non-root kmem_cache.
386 * Can be called for non-root kmem_caches only.
387 */
388 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
389 struct kmem_cache *s)
390 {
391 unsigned int nr_pages = 1 << order;
392 struct mem_cgroup *memcg;
393 struct lruvec *lruvec;
394
395 rcu_read_lock();
396 memcg = READ_ONCE(s->memcg_params.memcg);
397 if (likely(!mem_cgroup_is_root(memcg))) {
398 lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
399 mod_lruvec_state(lruvec, cache_vmstat_idx(s), -nr_pages);
400 memcg_kmem_uncharge(memcg, nr_pages);
401 } else {
402 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
403 -nr_pages);
404 }
405 rcu_read_unlock();
406
407 percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages);
408 }
409
410 extern void slab_init_memcg_params(struct kmem_cache *);
411 extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
412
413 #else /* CONFIG_MEMCG_KMEM */
414
415 /* If !memcg, all caches are root. */
416 #define slab_root_caches slab_caches
417 #define root_caches_node list
418
419 #define for_each_memcg_cache(iter, root) \
420 for ((void)(iter), (void)(root); 0; )
421
422 static inline bool is_root_cache(struct kmem_cache *s)
423 {
424 return true;
425 }
426
427 static inline bool slab_equal_or_root(struct kmem_cache *s,
428 struct kmem_cache *p)
429 {
430 return s == p;
431 }
432
433 static inline const char *cache_name(struct kmem_cache *s)
434 {
435 return s->name;
436 }
437
438 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
439 {
440 return s;
441 }
442
443 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
444 {
445 return NULL;
446 }
447
448 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
449 struct kmem_cache *s)
450 {
451 return 0;
452 }
453
454 static inline void memcg_uncharge_slab(struct page *page, int order,
455 struct kmem_cache *s)
456 {
457 }
458
459 static inline void slab_init_memcg_params(struct kmem_cache *s)
460 {
461 }
462
463 static inline void memcg_link_cache(struct kmem_cache *s,
464 struct mem_cgroup *memcg)
465 {
466 }
467
468 #endif /* CONFIG_MEMCG_KMEM */
469
470 static inline struct kmem_cache *virt_to_cache(const void *obj)
471 {
472 struct page *page;
473
474 page = virt_to_head_page(obj);
475 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
476 __func__))
477 return NULL;
478 return page->slab_cache;
479 }
480
481 static __always_inline int charge_slab_page(struct page *page,
482 gfp_t gfp, int order,
483 struct kmem_cache *s)
484 {
485 if (is_root_cache(s)) {
486 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
487 1 << order);
488 return 0;
489 }
490
491 return memcg_charge_slab(page, gfp, order, s);
492 }
493
494 static __always_inline void uncharge_slab_page(struct page *page, int order,
495 struct kmem_cache *s)
496 {
497 if (is_root_cache(s)) {
498 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
499 -(1 << order));
500 return;
501 }
502
503 memcg_uncharge_slab(page, order, s);
504 }
505
506 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
507 {
508 struct kmem_cache *cachep;
509
510 /*
511 * When kmemcg is not being used, both assignments should return the
512 * same value. but we don't want to pay the assignment price in that
513 * case. If it is not compiled in, the compiler should be smart enough
514 * to not do even the assignment. In that case, slab_equal_or_root
515 * will also be a constant.
516 */
517 if (!memcg_kmem_enabled() &&
518 !IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
519 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
520 return s;
521
522 cachep = virt_to_cache(x);
523 WARN_ONCE(cachep && !slab_equal_or_root(cachep, s),
524 "%s: Wrong slab cache. %s but object is from %s\n",
525 __func__, s->name, cachep->name);
526 return cachep;
527 }
528
529 static inline size_t slab_ksize(const struct kmem_cache *s)
530 {
531 #ifndef CONFIG_SLUB
532 return s->object_size;
533
534 #else /* CONFIG_SLUB */
535 # ifdef CONFIG_SLUB_DEBUG
536 /*
537 * Debugging requires use of the padding between object
538 * and whatever may come after it.
539 */
540 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
541 return s->object_size;
542 # endif
543 if (s->flags & SLAB_KASAN)
544 return s->object_size;
545 /*
546 * If we have the need to store the freelist pointer
547 * back there or track user information then we can
548 * only use the space before that information.
549 */
550 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
551 return s->inuse;
552 /*
553 * Else we can use all the padding etc for the allocation
554 */
555 return s->size;
556 #endif
557 }
558
559 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
560 gfp_t flags)
561 {
562 flags &= gfp_allowed_mask;
563
564 fs_reclaim_acquire(flags);
565 fs_reclaim_release(flags);
566
567 might_sleep_if(gfpflags_allow_blocking(flags));
568
569 if (should_failslab(s, flags))
570 return NULL;
571
572 if (memcg_kmem_enabled() &&
573 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
574 return memcg_kmem_get_cache(s);
575
576 return s;
577 }
578
579 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
580 size_t size, void **p)
581 {
582 size_t i;
583
584 flags &= gfp_allowed_mask;
585 for (i = 0; i < size; i++) {
586 p[i] = kasan_slab_alloc(s, p[i], flags);
587 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
588 kmemleak_alloc_recursive(p[i], s->object_size, 1,
589 s->flags, flags);
590 }
591
592 if (memcg_kmem_enabled())
593 memcg_kmem_put_cache(s);
594 }
595
596 #ifndef CONFIG_SLOB
597 /*
598 * The slab lists for all objects.
599 */
600 struct kmem_cache_node {
601 spinlock_t list_lock;
602
603 #ifdef CONFIG_SLAB
604 struct list_head slabs_partial; /* partial list first, better asm code */
605 struct list_head slabs_full;
606 struct list_head slabs_free;
607 unsigned long total_slabs; /* length of all slab lists */
608 unsigned long free_slabs; /* length of free slab list only */
609 unsigned long free_objects;
610 unsigned int free_limit;
611 unsigned int colour_next; /* Per-node cache coloring */
612 struct array_cache *shared; /* shared per node */
613 struct alien_cache **alien; /* on other nodes */
614 unsigned long next_reap; /* updated without locking */
615 int free_touched; /* updated without locking */
616 #endif
617
618 #ifdef CONFIG_SLUB
619 unsigned long nr_partial;
620 struct list_head partial;
621 #ifdef CONFIG_SLUB_DEBUG
622 atomic_long_t nr_slabs;
623 atomic_long_t total_objects;
624 struct list_head full;
625 #endif
626 #endif
627
628 };
629
630 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
631 {
632 return s->node[node];
633 }
634
635 /*
636 * Iterator over all nodes. The body will be executed for each node that has
637 * a kmem_cache_node structure allocated (which is true for all online nodes)
638 */
639 #define for_each_kmem_cache_node(__s, __node, __n) \
640 for (__node = 0; __node < nr_node_ids; __node++) \
641 if ((__n = get_node(__s, __node)))
642
643 #endif
644
645 void *slab_start(struct seq_file *m, loff_t *pos);
646 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
647 void slab_stop(struct seq_file *m, void *p);
648 void *memcg_slab_start(struct seq_file *m, loff_t *pos);
649 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
650 void memcg_slab_stop(struct seq_file *m, void *p);
651 int memcg_slab_show(struct seq_file *m, void *p);
652
653 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
654 void dump_unreclaimable_slab(void);
655 #else
656 static inline void dump_unreclaimable_slab(void)
657 {
658 }
659 #endif
660
661 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
662
663 #ifdef CONFIG_SLAB_FREELIST_RANDOM
664 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
665 gfp_t gfp);
666 void cache_random_seq_destroy(struct kmem_cache *cachep);
667 #else
668 static inline int cache_random_seq_create(struct kmem_cache *cachep,
669 unsigned int count, gfp_t gfp)
670 {
671 return 0;
672 }
673 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
674 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
675
676 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
677 {
678 if (static_branch_unlikely(&init_on_alloc)) {
679 if (c->ctor)
680 return false;
681 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
682 return flags & __GFP_ZERO;
683 return true;
684 }
685 return flags & __GFP_ZERO;
686 }
687
688 static inline bool slab_want_init_on_free(struct kmem_cache *c)
689 {
690 if (static_branch_unlikely(&init_on_free))
691 return !(c->ctor ||
692 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
693 return false;
694 }
695
696 #endif /* MM_SLAB_H */