slab->obj_exts = 0;
}
+/*
+ * Calculate the allocation size for slabobj_ext array.
+ *
+ * When memory allocation profiling is enabled, the obj_exts array
+ * could be allocated from the same slab cache it's being allocated for.
+ * This would prevent the slab from ever being freed because it would
+ * always contain at least one allocated object (its own obj_exts array).
+ *
+ * To avoid this, increase the allocation size when we detect the array
+ * may come from the same cache, forcing it to use a different cache.
+ */
+static inline size_t obj_exts_alloc_size(struct kmem_cache *s,
+ struct slab *slab, gfp_t gfp)
+{
+ size_t sz = sizeof(struct slabobj_ext) * slab->objects;
+ struct kmem_cache *obj_exts_cache;
+
+ /*
+ * slabobj_ext array for KMALLOC_CGROUP allocations
+ * are served from KMALLOC_NORMAL caches.
+ */
+ if (!mem_alloc_profiling_enabled())
+ return sz;
+
+ if (sz > KMALLOC_MAX_CACHE_SIZE)
+ return sz;
+
+ if (!is_kmalloc_normal(s))
+ return sz;
+
+ obj_exts_cache = kmalloc_slab(sz, NULL, gfp, 0);
+ /*
+ * We can't simply compare s with obj_exts_cache, because random kmalloc
+ * caches have multiple caches per size, selected by caller address.
+ * Since caller address may differ between kmalloc_slab() and actual
+ * allocation, bump size when sizes are equal.
+ */
+ if (s->object_size == obj_exts_cache->object_size)
+ return obj_exts_cache->object_size + 1;
+
+ return sz;
+}
+
int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
gfp_t gfp, bool new_slab)
{
unsigned long new_exts;
unsigned long old_exts;
struct slabobj_ext *vec;
+ size_t sz;
gfp &= ~OBJCGS_CLEAR_MASK;
/* Prevent recursive extension vector allocation */
gfp |= __GFP_NO_OBJ_EXT;
+ sz = obj_exts_alloc_size(s, slab, gfp);
+
/*
* Note that allow_spin may be false during early boot and its
* restricted GFP_BOOT_MASK. Due to kmalloc_nolock() only supporting
* architectures with cmpxchg16b, early obj_exts will be missing for
* very early allocations on those.
*/
- if (unlikely(!allow_spin)) {
- size_t sz = objects * sizeof(struct slabobj_ext);
-
+ if (unlikely(!allow_spin))
vec = kmalloc_nolock(sz, __GFP_ZERO | __GFP_NO_OBJ_EXT,
slab_nid(slab));
- } else {
- vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp,
- slab_nid(slab));
- }
+ else
+ vec = kmalloc_node(sz, gfp | __GFP_ZERO, slab_nid(slab));
+
if (!vec) {
/*
* Try to mark vectors which failed to allocate.
return -ENOMEM;
}
+ VM_WARN_ON_ONCE(virt_to_slab(vec) != NULL &&
+ virt_to_slab(vec)->slab_cache == s);
+
new_exts = (unsigned long)vec;
if (unlikely(!allow_spin))
new_exts |= OBJEXTS_NOSPIN_ALLOC;