NR_SLUB_STAT_ITEMS
};
-#ifndef CONFIG_SLUB_TINY
/*
* When changing the layout, make sure freelist and tid are still compatible
* with this_cpu_cmpxchg_double() alignment requirements.
unsigned int stat[NR_SLUB_STAT_ITEMS];
#endif
};
-#endif /* CONFIG_SLUB_TINY */
static inline void stat(const struct kmem_cache *s, enum stat_item si)
{
return freelist_ptr_decode(s, p, ptr_addr);
}
-#ifndef CONFIG_SLUB_TINY
static void prefetch_freepointer(const struct kmem_cache *s, void *object)
{
prefetchw(object + s->offset);
}
-#endif
/*
* When running under KMSAN, get_freepointer_safe() may return an uninitialized
return s->cpu_partial_slabs;
}
#else
+#ifdef SLAB_SUPPORTS_SYSFS
static inline void
slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
{
}
+#endif
static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
{
int objects) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
-#ifndef CONFIG_SLUB_TINY
static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
void **freelist, void *nextfree)
{
return false;
}
-#endif
#endif /* CONFIG_SLUB_DEBUG */
#ifdef CONFIG_SLAB_OBJ_EXT
return get_any_partial(s, pc);
}
-#ifndef CONFIG_SLUB_TINY
-
#ifdef CONFIG_PREEMPTION
/*
* Calculate the next globally unique transaction for disambiguation
return c->slab || slub_percpu_partial(c);
}
-#else /* CONFIG_SLUB_TINY */
-static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { }
-static inline bool has_cpu_slab(int cpu, struct kmem_cache *s) { return false; }
-static inline void flush_this_cpu_slab(struct kmem_cache *s) { }
-#endif /* CONFIG_SLUB_TINY */
-
static bool has_pcs_used(int cpu, struct kmem_cache *s)
{
struct slub_percpu_sheaves *pcs;
return true;
}
-#ifndef CONFIG_SLUB_TINY
static inline bool
__update_cpu_freelist_fast(struct kmem_cache *s,
void *freelist_old, void *freelist_new,
pc.orig_size = orig_size;
slab = get_partial(s, node, &pc);
if (slab) {
- if (kmem_cache_debug(s)) {
+ if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
freelist = pc.object;
/*
* For debug caches here we had to go through
stat(s, ALLOC_SLAB);
- if (kmem_cache_debug(s)) {
+ if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
freelist = alloc_single_from_new_slab(s, slab, orig_size, gfpflags);
if (unlikely(!freelist)) {
return object;
}
-#else /* CONFIG_SLUB_TINY */
-static void *__slab_alloc_node(struct kmem_cache *s,
- gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
-{
- struct partial_context pc;
- struct slab *slab;
- void *object;
-
- pc.flags = gfpflags;
- pc.orig_size = orig_size;
- slab = get_partial(s, node, &pc);
-
- if (slab)
- return pc.object;
-
- slab = new_slab(s, gfpflags, node);
- if (unlikely(!slab)) {
- slab_out_of_memory(s, gfpflags, node);
- return NULL;
- }
-
- object = alloc_single_from_new_slab(s, slab, orig_size, gfpflags);
-
- return object;
-}
-#endif /* CONFIG_SLUB_TINY */
/*
* If the object has been wiped upon free, make sure it's fully initialized by
* it did local_lock_irqsave(&s->cpu_slab->lock, flags).
* In this case fast path with __update_cpu_freelist_fast() is not safe.
*/
-#ifndef CONFIG_SLUB_TINY
if (!in_nmi() || !local_lock_is_locked(&s->cpu_slab->lock))
-#endif
ret = __slab_alloc_node(s, alloc_gfp, node, _RET_IP_, size);
if (PTR_ERR(ret) == -EBUSY) {
llist_for_each_safe(pos, t, llnode) {
struct slab *slab = container_of(pos, struct slab, llnode);
-#ifdef CONFIG_SLUB_TINY
- free_slab(slab->slab_cache, slab);
-#else
if (slab->frozen)
deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
else
free_slab(slab->slab_cache, slab);
-#endif
}
}
irq_work_sync(&per_cpu_ptr(&defer_free_objects, cpu)->work);
}
-#ifndef CONFIG_SLUB_TINY
/*
* Fastpath with forced inlining to produce a kfree and kmem_cache_free that
* can perform fastpath freeing without additional function calls.
}
stat_add(s, FREE_FASTPATH, cnt);
}
-#else /* CONFIG_SLUB_TINY */
-static void do_slab_free(struct kmem_cache *s,
- struct slab *slab, void *head, void *tail,
- int cnt, unsigned long addr)
-{
- __slab_free(s, slab, head, tail, cnt, addr);
-}
-#endif /* CONFIG_SLUB_TINY */
static __fastpath_inline
void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
* since kasan quarantine takes locks and not supported from NMI.
*/
kasan_slab_free(s, x, false, false, /* skip quarantine */true);
-#ifndef CONFIG_SLUB_TINY
do_slab_free(s, slab, x, x, 0, _RET_IP_);
-#else
- defer_free(s, x);
-#endif
}
EXPORT_SYMBOL_GPL(kfree_nolock);
}
EXPORT_SYMBOL(kmem_cache_free_bulk);
-#ifndef CONFIG_SLUB_TINY
static inline
int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void **p)
return 0;
}
-#else /* CONFIG_SLUB_TINY */
-static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
- size_t size, void **p)
-{
- int i;
-
- for (i = 0; i < size; i++) {
- void *object = kfence_alloc(s, s->object_size, flags);
-
- if (unlikely(object)) {
- p[i] = object;
- continue;
- }
-
- p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE,
- _RET_IP_, s->object_size);
- if (unlikely(!p[i]))
- goto error;
-
- maybe_wipe_obj_freeptr(s, p[i]);
- }
-
- return i;
-
-error:
- __kmem_cache_free_bulk(s, i, p);
- return 0;
-}
-#endif /* CONFIG_SLUB_TINY */
/* Note that interrupts must be enabled when calling this function. */
int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
barn_init(barn);
}
-#ifndef CONFIG_SLUB_TINY
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
{
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
return 1;
}
-#else
-static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
-{
- return 1;
-}
-#endif /* CONFIG_SLUB_TINY */
static int init_percpu_sheaves(struct kmem_cache *s)
{
cache_random_seq_destroy(s);
if (s->cpu_sheaves)
pcs_destroy(s);
-#ifndef CONFIG_SLUB_TINY
#ifdef CONFIG_PREEMPT_RT
if (s->cpu_slab)
lockdep_unregister_key(&s->lock_key);
#endif
free_percpu(s->cpu_slab);
-#endif
free_kmem_cache_nodes(s);
}
void __init kmem_cache_init_late(void)
{
-#ifndef CONFIG_SLUB_TINY
flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
WARN_ON(!flushwq);
-#endif
}
struct kmem_cache *