1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Internal slab definitions
7 void __init
kmem_cache_init(void);
9 /* Reuses the bits in struct page */
11 unsigned long __page_flags
;
13 #if defined(CONFIG_SLAB)
15 struct kmem_cache
*slab_cache
;
18 struct list_head slab_list
;
19 void *freelist
; /* array of free object indexes */
20 void *s_mem
; /* first object */
22 struct rcu_head rcu_head
;
26 #elif defined(CONFIG_SLUB)
28 struct kmem_cache
*slab_cache
;
32 struct list_head slab_list
;
33 #ifdef CONFIG_SLUB_CPU_PARTIAL
36 int slabs
; /* Nr of slabs left */
40 /* Double-word boundary */
41 void *freelist
; /* first free object */
43 unsigned long counters
;
51 struct rcu_head rcu_head
;
53 unsigned int __unused
;
56 #error "Unexpected slab allocator configured"
59 atomic_t __page_refcount
;
61 unsigned long memcg_data
;
65 #define SLAB_MATCH(pg, sl) \
66 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
67 SLAB_MATCH(flags
, __page_flags
);
68 SLAB_MATCH(compound_head
, slab_cache
); /* Ensure bit 0 is clear */
69 SLAB_MATCH(_refcount
, __page_refcount
);
71 SLAB_MATCH(memcg_data
, memcg_data
);
74 static_assert(sizeof(struct slab
) <= sizeof(struct page
));
75 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && defined(CONFIG_SLUB)
76 static_assert(IS_ALIGNED(offsetof(struct slab
, freelist
), 2*sizeof(void *)));
80 * folio_slab - Converts from folio to slab.
83 * Currently struct slab is a different representation of a folio where
84 * folio_test_slab() is true.
86 * Return: The slab which contains this folio.
88 #define folio_slab(folio) (_Generic((folio), \
89 const struct folio *: (const struct slab *)(folio), \
90 struct folio *: (struct slab *)(folio)))
93 * slab_folio - The folio allocated for a slab
96 * Slabs are allocated as folios that contain the individual objects and are
97 * using some fields in the first struct page of the folio - those fields are
98 * now accessed by struct slab. It is occasionally necessary to convert back to
99 * a folio in order to communicate with the rest of the mm. Please use this
100 * helper function instead of casting yourself, as the implementation may change
103 #define slab_folio(s) (_Generic((s), \
104 const struct slab *: (const struct folio *)s, \
105 struct slab *: (struct folio *)s))
108 * page_slab - Converts from first struct page to slab.
109 * @p: The first (either head of compound or single) page of slab.
111 * A temporary wrapper to convert struct page to struct slab in situations where
112 * we know the page is the compound head, or single order-0 page.
114 * Long-term ideally everything would work with struct slab directly or go
115 * through folio to struct slab.
117 * Return: The slab which contains this page
119 #define page_slab(p) (_Generic((p), \
120 const struct page *: (const struct slab *)(p), \
121 struct page *: (struct slab *)(p)))
124 * slab_page - The first struct page allocated for a slab
127 * A convenience wrapper for converting slab to the first struct page of the
128 * underlying folio, to communicate with code not yet converted to folio or
131 #define slab_page(s) folio_page(slab_folio(s), 0)
134 * If network-based swap is enabled, sl*b must keep track of whether pages
135 * were allocated from pfmemalloc reserves.
137 static inline bool slab_test_pfmemalloc(const struct slab
*slab
)
139 return folio_test_active((struct folio
*)slab_folio(slab
));
142 static inline void slab_set_pfmemalloc(struct slab
*slab
)
144 folio_set_active(slab_folio(slab
));
147 static inline void slab_clear_pfmemalloc(struct slab
*slab
)
149 folio_clear_active(slab_folio(slab
));
152 static inline void __slab_clear_pfmemalloc(struct slab
*slab
)
154 __folio_clear_active(slab_folio(slab
));
157 static inline void *slab_address(const struct slab
*slab
)
159 return folio_address(slab_folio(slab
));
162 static inline int slab_nid(const struct slab
*slab
)
164 return folio_nid(slab_folio(slab
));
167 static inline pg_data_t
*slab_pgdat(const struct slab
*slab
)
169 return folio_pgdat(slab_folio(slab
));
172 static inline struct slab
*virt_to_slab(const void *addr
)
174 struct folio
*folio
= virt_to_folio(addr
);
176 if (!folio_test_slab(folio
))
179 return folio_slab(folio
);
182 static inline int slab_order(const struct slab
*slab
)
184 return folio_order((struct folio
*)slab_folio(slab
));
187 static inline size_t slab_size(const struct slab
*slab
)
189 return PAGE_SIZE
<< slab_order(slab
);
193 #include <linux/slab_def.h>
197 #include <linux/slub_def.h>
200 #include <linux/memcontrol.h>
201 #include <linux/fault-inject.h>
202 #include <linux/kasan.h>
203 #include <linux/kmemleak.h>
204 #include <linux/random.h>
205 #include <linux/sched/mm.h>
206 #include <linux/list_lru.h>
209 * State of the slab allocator.
211 * This is used to describe the states of the allocator during bootup.
212 * Allocators use this to gradually bootstrap themselves. Most allocators
213 * have the problem that the structures used for managing slab caches are
214 * allocated from slab caches themselves.
217 DOWN
, /* No slab functionality yet */
218 PARTIAL
, /* SLUB: kmem_cache_node available */
219 PARTIAL_NODE
, /* SLAB: kmalloc size for node struct available */
220 UP
, /* Slab caches usable but not all extras yet */
221 FULL
/* Everything is working */
224 extern enum slab_state slab_state
;
226 /* The slab cache mutex protects the management structures during changes */
227 extern struct mutex slab_mutex
;
229 /* The list of all slab caches on the system */
230 extern struct list_head slab_caches
;
232 /* The slab cache that manages slab cache information */
233 extern struct kmem_cache
*kmem_cache
;
235 /* A table of kmalloc cache names and sizes */
236 extern const struct kmalloc_info_struct
{
237 const char *name
[NR_KMALLOC_TYPES
];
241 /* Kmalloc array related functions */
242 void setup_kmalloc_cache_index_table(void);
243 void create_kmalloc_caches(slab_flags_t
);
245 /* Find the kmalloc slab corresponding for a certain size */
246 struct kmem_cache
*kmalloc_slab(size_t, gfp_t
);
248 void *__kmem_cache_alloc_node(struct kmem_cache
*s
, gfp_t gfpflags
,
249 int node
, size_t orig_size
,
250 unsigned long caller
);
251 void __kmem_cache_free(struct kmem_cache
*s
, void *x
, unsigned long caller
);
253 gfp_t
kmalloc_fix_flags(gfp_t flags
);
255 /* Functions provided by the slab allocators */
256 int __kmem_cache_create(struct kmem_cache
*, slab_flags_t flags
);
258 struct kmem_cache
*create_kmalloc_cache(const char *name
, unsigned int size
,
259 slab_flags_t flags
, unsigned int useroffset
,
260 unsigned int usersize
);
261 extern void create_boot_cache(struct kmem_cache
*, const char *name
,
262 unsigned int size
, slab_flags_t flags
,
263 unsigned int useroffset
, unsigned int usersize
);
265 int slab_unmergeable(struct kmem_cache
*s
);
266 struct kmem_cache
*find_mergeable(unsigned size
, unsigned align
,
267 slab_flags_t flags
, const char *name
, void (*ctor
)(void *));
269 __kmem_cache_alias(const char *name
, unsigned int size
, unsigned int align
,
270 slab_flags_t flags
, void (*ctor
)(void *));
272 slab_flags_t
kmem_cache_flags(unsigned int object_size
,
273 slab_flags_t flags
, const char *name
);
275 static inline bool is_kmalloc_cache(struct kmem_cache
*s
)
277 return (s
->flags
& SLAB_KMALLOC
);
280 /* Legal flag mask for kmem_cache_create(), for various configurations */
281 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
282 SLAB_CACHE_DMA32 | SLAB_PANIC | \
283 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
285 #if defined(CONFIG_DEBUG_SLAB)
286 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
287 #elif defined(CONFIG_SLUB_DEBUG)
288 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
289 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
291 #define SLAB_DEBUG_FLAGS (0)
294 #if defined(CONFIG_SLAB)
295 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
296 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
298 #elif defined(CONFIG_SLUB)
299 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
300 SLAB_TEMPORARY | SLAB_ACCOUNT | \
301 SLAB_NO_USER_FLAGS | SLAB_KMALLOC)
303 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
306 /* Common flags available with current configuration */
307 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
309 /* Common flags permitted for kmem_cache_create */
310 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
315 SLAB_CONSISTENCY_CHECKS | \
318 SLAB_RECLAIM_ACCOUNT | \
324 bool __kmem_cache_empty(struct kmem_cache
*);
325 int __kmem_cache_shutdown(struct kmem_cache
*);
326 void __kmem_cache_release(struct kmem_cache
*);
327 int __kmem_cache_shrink(struct kmem_cache
*);
328 void slab_kmem_cache_release(struct kmem_cache
*);
334 unsigned long active_objs
;
335 unsigned long num_objs
;
336 unsigned long active_slabs
;
337 unsigned long num_slabs
;
338 unsigned long shared_avail
;
340 unsigned int batchcount
;
342 unsigned int objects_per_slab
;
343 unsigned int cache_order
;
346 void get_slabinfo(struct kmem_cache
*s
, struct slabinfo
*sinfo
);
347 void slabinfo_show_stats(struct seq_file
*m
, struct kmem_cache
*s
);
348 ssize_t
slabinfo_write(struct file
*file
, const char __user
*buffer
,
349 size_t count
, loff_t
*ppos
);
351 static inline enum node_stat_item
cache_vmstat_idx(struct kmem_cache
*s
)
353 return (s
->flags
& SLAB_RECLAIM_ACCOUNT
) ?
354 NR_SLAB_RECLAIMABLE_B
: NR_SLAB_UNRECLAIMABLE_B
;
357 #ifdef CONFIG_SLUB_DEBUG
358 #ifdef CONFIG_SLUB_DEBUG_ON
359 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled
);
361 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled
);
363 extern void print_tracking(struct kmem_cache
*s
, void *object
);
364 long validate_slab_cache(struct kmem_cache
*s
);
365 static inline bool __slub_debug_enabled(void)
367 return static_branch_unlikely(&slub_debug_enabled
);
370 static inline void print_tracking(struct kmem_cache
*s
, void *object
)
373 static inline bool __slub_debug_enabled(void)
380 * Returns true if any of the specified slub_debug flags is enabled for the
381 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
384 static inline bool kmem_cache_debug_flags(struct kmem_cache
*s
, slab_flags_t flags
)
386 if (IS_ENABLED(CONFIG_SLUB_DEBUG
))
387 VM_WARN_ON_ONCE(!(flags
& SLAB_DEBUG_FLAGS
));
388 if (__slub_debug_enabled())
389 return s
->flags
& flags
;
393 #ifdef CONFIG_MEMCG_KMEM
395 * slab_objcgs - get the object cgroups vector associated with a slab
396 * @slab: a pointer to the slab struct
398 * Returns a pointer to the object cgroups vector associated with the slab,
399 * or NULL if no such vector has been associated yet.
401 static inline struct obj_cgroup
**slab_objcgs(struct slab
*slab
)
403 unsigned long memcg_data
= READ_ONCE(slab
->memcg_data
);
405 VM_BUG_ON_PAGE(memcg_data
&& !(memcg_data
& MEMCG_DATA_OBJCGS
),
407 VM_BUG_ON_PAGE(memcg_data
& MEMCG_DATA_KMEM
, slab_page(slab
));
409 return (struct obj_cgroup
**)(memcg_data
& ~MEMCG_DATA_FLAGS_MASK
);
412 int memcg_alloc_slab_cgroups(struct slab
*slab
, struct kmem_cache
*s
,
413 gfp_t gfp
, bool new_slab
);
414 void mod_objcg_state(struct obj_cgroup
*objcg
, struct pglist_data
*pgdat
,
415 enum node_stat_item idx
, int nr
);
417 static inline void memcg_free_slab_cgroups(struct slab
*slab
)
419 kfree(slab_objcgs(slab
));
420 slab
->memcg_data
= 0;
423 static inline size_t obj_full_size(struct kmem_cache
*s
)
426 * For each accounted object there is an extra space which is used
427 * to store obj_cgroup membership. Charge it too.
429 return s
->size
+ sizeof(struct obj_cgroup
*);
433 * Returns false if the allocation should fail.
435 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache
*s
,
436 struct list_lru
*lru
,
437 struct obj_cgroup
**objcgp
,
438 size_t objects
, gfp_t flags
)
440 struct obj_cgroup
*objcg
;
442 if (!memcg_kmem_online())
445 if (!(flags
& __GFP_ACCOUNT
) && !(s
->flags
& SLAB_ACCOUNT
))
448 objcg
= get_obj_cgroup_from_current();
454 struct mem_cgroup
*memcg
;
456 memcg
= get_mem_cgroup_from_objcg(objcg
);
457 ret
= memcg_list_lru_alloc(memcg
, lru
, flags
);
458 css_put(&memcg
->css
);
464 if (obj_cgroup_charge(objcg
, flags
, objects
* obj_full_size(s
)))
470 obj_cgroup_put(objcg
);
474 static inline void memcg_slab_post_alloc_hook(struct kmem_cache
*s
,
475 struct obj_cgroup
*objcg
,
476 gfp_t flags
, size_t size
,
483 if (!memcg_kmem_online() || !objcg
)
486 for (i
= 0; i
< size
; i
++) {
488 slab
= virt_to_slab(p
[i
]);
490 if (!slab_objcgs(slab
) &&
491 memcg_alloc_slab_cgroups(slab
, s
, flags
,
493 obj_cgroup_uncharge(objcg
, obj_full_size(s
));
497 off
= obj_to_index(s
, slab
, p
[i
]);
498 obj_cgroup_get(objcg
);
499 slab_objcgs(slab
)[off
] = objcg
;
500 mod_objcg_state(objcg
, slab_pgdat(slab
),
501 cache_vmstat_idx(s
), obj_full_size(s
));
503 obj_cgroup_uncharge(objcg
, obj_full_size(s
));
506 obj_cgroup_put(objcg
);
509 static inline void memcg_slab_free_hook(struct kmem_cache
*s
, struct slab
*slab
,
510 void **p
, int objects
)
512 struct obj_cgroup
**objcgs
;
515 if (!memcg_kmem_online())
518 objcgs
= slab_objcgs(slab
);
522 for (i
= 0; i
< objects
; i
++) {
523 struct obj_cgroup
*objcg
;
526 off
= obj_to_index(s
, slab
, p
[i
]);
532 obj_cgroup_uncharge(objcg
, obj_full_size(s
));
533 mod_objcg_state(objcg
, slab_pgdat(slab
), cache_vmstat_idx(s
),
535 obj_cgroup_put(objcg
);
539 #else /* CONFIG_MEMCG_KMEM */
540 static inline struct obj_cgroup
**slab_objcgs(struct slab
*slab
)
545 static inline struct mem_cgroup
*memcg_from_slab_obj(void *ptr
)
550 static inline int memcg_alloc_slab_cgroups(struct slab
*slab
,
551 struct kmem_cache
*s
, gfp_t gfp
,
557 static inline void memcg_free_slab_cgroups(struct slab
*slab
)
561 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache
*s
,
562 struct list_lru
*lru
,
563 struct obj_cgroup
**objcgp
,
564 size_t objects
, gfp_t flags
)
569 static inline void memcg_slab_post_alloc_hook(struct kmem_cache
*s
,
570 struct obj_cgroup
*objcg
,
571 gfp_t flags
, size_t size
,
576 static inline void memcg_slab_free_hook(struct kmem_cache
*s
, struct slab
*slab
,
577 void **p
, int objects
)
580 #endif /* CONFIG_MEMCG_KMEM */
582 static inline struct kmem_cache
*virt_to_cache(const void *obj
)
586 slab
= virt_to_slab(obj
);
587 if (WARN_ONCE(!slab
, "%s: Object is not a Slab page!\n",
590 return slab
->slab_cache
;
593 static __always_inline
void account_slab(struct slab
*slab
, int order
,
594 struct kmem_cache
*s
, gfp_t gfp
)
596 if (memcg_kmem_online() && (s
->flags
& SLAB_ACCOUNT
))
597 memcg_alloc_slab_cgroups(slab
, s
, gfp
, true);
599 mod_node_page_state(slab_pgdat(slab
), cache_vmstat_idx(s
),
603 static __always_inline
void unaccount_slab(struct slab
*slab
, int order
,
604 struct kmem_cache
*s
)
606 if (memcg_kmem_online())
607 memcg_free_slab_cgroups(slab
);
609 mod_node_page_state(slab_pgdat(slab
), cache_vmstat_idx(s
),
610 -(PAGE_SIZE
<< order
));
613 static inline struct kmem_cache
*cache_from_obj(struct kmem_cache
*s
, void *x
)
615 struct kmem_cache
*cachep
;
617 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED
) &&
618 !kmem_cache_debug_flags(s
, SLAB_CONSISTENCY_CHECKS
))
621 cachep
= virt_to_cache(x
);
622 if (WARN(cachep
&& cachep
!= s
,
623 "%s: Wrong slab cache. %s but object is from %s\n",
624 __func__
, s
->name
, cachep
->name
))
625 print_tracking(cachep
, x
);
629 void free_large_kmalloc(struct folio
*folio
, void *object
);
631 size_t __ksize(const void *objp
);
633 static inline size_t slab_ksize(const struct kmem_cache
*s
)
636 return s
->object_size
;
638 #else /* CONFIG_SLUB */
639 # ifdef CONFIG_SLUB_DEBUG
641 * Debugging requires use of the padding between object
642 * and whatever may come after it.
644 if (s
->flags
& (SLAB_RED_ZONE
| SLAB_POISON
))
645 return s
->object_size
;
647 if (s
->flags
& SLAB_KASAN
)
648 return s
->object_size
;
650 * If we have the need to store the freelist pointer
651 * back there or track user information then we can
652 * only use the space before that information.
654 if (s
->flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_STORE_USER
))
657 * Else we can use all the padding etc for the allocation
663 static inline struct kmem_cache
*slab_pre_alloc_hook(struct kmem_cache
*s
,
664 struct list_lru
*lru
,
665 struct obj_cgroup
**objcgp
,
666 size_t size
, gfp_t flags
)
668 flags
&= gfp_allowed_mask
;
672 if (should_failslab(s
, flags
))
675 if (!memcg_slab_pre_alloc_hook(s
, lru
, objcgp
, size
, flags
))
681 static inline void slab_post_alloc_hook(struct kmem_cache
*s
,
682 struct obj_cgroup
*objcg
, gfp_t flags
,
683 size_t size
, void **p
, bool init
,
684 unsigned int orig_size
)
686 unsigned int zero_size
= s
->object_size
;
689 flags
&= gfp_allowed_mask
;
692 * For kmalloc object, the allocated memory size(object_size) is likely
693 * larger than the requested size(orig_size). If redzone check is
694 * enabled for the extra space, don't zero it, as it will be redzoned
695 * soon. The redzone operation for this extra space could be seen as a
696 * replacement of current poisoning under certain debug option, and
697 * won't break other sanity checks.
699 if (kmem_cache_debug_flags(s
, SLAB_STORE_USER
| SLAB_RED_ZONE
) &&
700 (s
->flags
& SLAB_KMALLOC
))
701 zero_size
= orig_size
;
704 * As memory initialization might be integrated into KASAN,
705 * kasan_slab_alloc and initialization memset must be
706 * kept together to avoid discrepancies in behavior.
708 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
710 for (i
= 0; i
< size
; i
++) {
711 p
[i
] = kasan_slab_alloc(s
, p
[i
], flags
, init
);
712 if (p
[i
] && init
&& !kasan_has_integrated_init())
713 memset(p
[i
], 0, zero_size
);
714 kmemleak_alloc_recursive(p
[i
], s
->object_size
, 1,
716 kmsan_slab_alloc(s
, p
[i
], flags
);
719 memcg_slab_post_alloc_hook(s
, objcg
, flags
, size
, p
);
723 * The slab lists for all objects.
725 struct kmem_cache_node
{
727 raw_spinlock_t list_lock
;
728 struct list_head slabs_partial
; /* partial list first, better asm code */
729 struct list_head slabs_full
;
730 struct list_head slabs_free
;
731 unsigned long total_slabs
; /* length of all slab lists */
732 unsigned long free_slabs
; /* length of free slab list only */
733 unsigned long free_objects
;
734 unsigned int free_limit
;
735 unsigned int colour_next
; /* Per-node cache coloring */
736 struct array_cache
*shared
; /* shared per node */
737 struct alien_cache
**alien
; /* on other nodes */
738 unsigned long next_reap
; /* updated without locking */
739 int free_touched
; /* updated without locking */
743 spinlock_t list_lock
;
744 unsigned long nr_partial
;
745 struct list_head partial
;
746 #ifdef CONFIG_SLUB_DEBUG
747 atomic_long_t nr_slabs
;
748 atomic_long_t total_objects
;
749 struct list_head full
;
755 static inline struct kmem_cache_node
*get_node(struct kmem_cache
*s
, int node
)
757 return s
->node
[node
];
761 * Iterator over all nodes. The body will be executed for each node that has
762 * a kmem_cache_node structure allocated (which is true for all online nodes)
764 #define for_each_kmem_cache_node(__s, __node, __n) \
765 for (__node = 0; __node < nr_node_ids; __node++) \
766 if ((__n = get_node(__s, __node)))
769 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
770 void dump_unreclaimable_slab(void);
772 static inline void dump_unreclaimable_slab(void)
777 void ___cache_free(struct kmem_cache
*cache
, void *x
, unsigned long addr
);
779 #ifdef CONFIG_SLAB_FREELIST_RANDOM
780 int cache_random_seq_create(struct kmem_cache
*cachep
, unsigned int count
,
782 void cache_random_seq_destroy(struct kmem_cache
*cachep
);
784 static inline int cache_random_seq_create(struct kmem_cache
*cachep
,
785 unsigned int count
, gfp_t gfp
)
789 static inline void cache_random_seq_destroy(struct kmem_cache
*cachep
) { }
790 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
792 static inline bool slab_want_init_on_alloc(gfp_t flags
, struct kmem_cache
*c
)
794 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON
,
798 if (c
->flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_POISON
))
799 return flags
& __GFP_ZERO
;
802 return flags
& __GFP_ZERO
;
805 static inline bool slab_want_init_on_free(struct kmem_cache
*c
)
807 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON
,
810 (c
->flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_POISON
)));
814 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
815 void debugfs_slab_release(struct kmem_cache
*);
817 static inline void debugfs_slab_release(struct kmem_cache
*s
) { }
821 #define KS_ADDRS_COUNT 16
822 struct kmem_obj_info
{
824 struct slab
*kp_slab
;
826 unsigned long kp_data_offset
;
827 struct kmem_cache
*kp_slab_cache
;
829 void *kp_stack
[KS_ADDRS_COUNT
];
830 void *kp_free_stack
[KS_ADDRS_COUNT
];
832 void __kmem_obj_info(struct kmem_obj_info
*kpp
, void *object
, struct slab
*slab
);
835 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
836 void __check_heap_object(const void *ptr
, unsigned long n
,
837 const struct slab
*slab
, bool to_user
);
840 void __check_heap_object(const void *ptr
, unsigned long n
,
841 const struct slab
*slab
, bool to_user
)
846 #ifdef CONFIG_SLUB_DEBUG
847 void skip_orig_size_check(struct kmem_cache
*s
, const void *object
);
850 #endif /* MM_SLAB_H */