#endif
_SLAB_OBJECT_POISON,
_SLAB_CMPXCHG_DOUBLE,
-#ifdef CONFIG_SLAB_OBJ_EXT
_SLAB_NO_OBJ_EXT,
-#endif
_SLAB_FLAGS_LAST_BIT
};
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/* Slab created using create_boot_cache */
-#ifdef CONFIG_SLAB_OBJ_EXT
#define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT)
-#else
-#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED
-#endif
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
#endif
}
+static unsigned int calculate_sheaf_capacity(struct kmem_cache *s,
+ struct kmem_cache_args *args)
+
+{
+ unsigned int capacity;
+ size_t size;
+
+
+ if (IS_ENABLED(CONFIG_SLUB_TINY) || s->flags & SLAB_DEBUG_FLAGS)
+ return 0;
+
+ /*
+ * Bootstrap caches can't have sheaves for now (SLAB_NO_OBJ_EXT).
+ * SLAB_NOLEAKTRACE caches (e.g., kmemleak's object_cache) must not
+ * have sheaves to avoid recursion when sheaf allocation triggers
+ * kmemleak tracking.
+ */
+ if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
+ return 0;
+
+ /*
+ * For now we use roughly similar formula (divided by two as there are
+ * two percpu sheaves) as what was used for percpu partial slabs, which
+ * should result in similar lock contention (barn or list_lock)
+ */
+ if (s->size >= PAGE_SIZE)
+ capacity = 4;
+ else if (s->size >= 1024)
+ capacity = 12;
+ else if (s->size >= 256)
+ capacity = 26;
+ else
+ capacity = 60;
+
+ /* Increment capacity to make sheaf exactly a kmalloc size bucket */
+ size = struct_size_t(struct slab_sheaf, objects, capacity);
+ size = kmalloc_size_roundup(size);
+ capacity = (size - struct_size_t(struct slab_sheaf, objects, 0)) / sizeof(void *);
+
+ /*
+ * Respect an explicit request for capacity that's typically motivated by
+ * expected maximum size of kmem_cache_prefill_sheaf() to not end up
+ * using low-performance oversize sheaves
+ */
+ return max(capacity, args->sheaf_capacity);
+}
+
/*
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
if (s->flags & SLAB_RECLAIM_ACCOUNT)
s->allocflags |= __GFP_RECLAIMABLE;
+ /* kmalloc caches need extra care to support sheaves */
+ if (!is_kmalloc_cache(s))
+ s->sheaf_capacity = calculate_sheaf_capacity(s, args);
+
/*
* Determine the number of objects per slab
*/
set_cpu_partial(s);
- if (args->sheaf_capacity && !IS_ENABLED(CONFIG_SLUB_TINY)
- && !(s->flags & SLAB_DEBUG_FLAGS)) {
+ if (s->sheaf_capacity) {
s->cpu_sheaves = alloc_percpu(struct slub_percpu_sheaves);
if (!s->cpu_sheaves) {
err = -ENOMEM;
goto out;
}
- // TODO: increase capacity to grow slab_sheaf up to next kmalloc size?
- s->sheaf_capacity = args->sheaf_capacity;
}
#ifdef CONFIG_NUMA