#ifdef CONFIG_LOCKDEP
___GFP_NOLOCKDEP_BIT,
#endif
-#ifdef CONFIG_SLAB_OBJ_EXT
___GFP_NO_OBJ_EXT_BIT,
-#endif
___GFP_LAST_BIT
};
#else
#define ___GFP_NOLOCKDEP 0
#endif
-#ifdef CONFIG_SLAB_OBJ_EXT
#define ___GFP_NO_OBJ_EXT BIT(___GFP_NO_OBJ_EXT_BIT)
-#else
-#define ___GFP_NO_OBJ_EXT 0
-#endif
/*
* Physical address zone modifiers (see linux/mmzone.h - low four bits)
}
#endif /* CONFIG_SLUB_DEBUG */
+/*
+ * The allocated objcg pointers array is not accounted directly.
+ * Moreover, it should not come from DMA buffer and is not readily
+ * reclaimable. So those GFP bits should be masked off.
+ */
+#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
+ __GFP_ACCOUNT | __GFP_NOFAIL)
+
#ifdef CONFIG_SLAB_OBJ_EXT
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
-/*
- * The allocated objcg pointers array is not accounted directly.
- * Moreover, it should not come from DMA buffer and is not readily
- * reclaimable. So those GFP bits should be masked off.
- */
-#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
- __GFP_ACCOUNT | __GFP_NOFAIL)
-
static inline void init_slab_obj_exts(struct slab *slab)
{
slab->obj_exts = 0;
static struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp)
{
- struct slab_sheaf *sheaf = kzalloc(struct_size(sheaf, objects,
- s->sheaf_capacity), gfp);
+ struct slab_sheaf *sheaf;
+ size_t sheaf_size;
+
+ if (gfp & __GFP_NO_OBJ_EXT)
+ return NULL;
+
+ gfp &= ~OBJCGS_CLEAR_MASK;
+
+ /*
+ * Prevent recursion to the same cache, or a deep stack of kmallocs of
+ * varying sizes (sheaf capacity might differ for each kmalloc size
+ * bucket)
+ */
+ if (s->flags & SLAB_KMALLOC)
+ gfp |= __GFP_NO_OBJ_EXT;
+
+ sheaf_size = struct_size(sheaf, objects, s->sheaf_capacity);
+ sheaf = kzalloc(sheaf_size, gfp);
if (unlikely(!sheaf))
return NULL;