*
* The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT
* memcg charging is forced over limit if necessary, to avoid failure.
+ *
+ * It is possible that the allocation comes from kfence and then the sheaf
+ * size is not decreased.
*/
void *
kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp,
if (sheaf->size == 0)
goto out;
- ret = sheaf->objects[--sheaf->size];
+ ret = kfence_alloc(s, s->object_size, gfp);
+
+ if (likely(!ret))
+ ret = sheaf->objects[--sheaf->size];
init = slab_want_init_on_alloc(gfp, s);
local_lock_irqsave(&s->cpu_slab->lock, irqflags);
for (i = 0; i < size; i++) {
- void *object = kfence_alloc(s, s->object_size, flags);
-
- if (unlikely(object)) {
- p[i] = object;
- continue;
- }
+ void *object = c->freelist;
- object = c->freelist;
if (unlikely(!object)) {
/*
* We may have removed an object from c->freelist using
void **p)
{
unsigned int i = 0;
+ void *kfence_obj;
if (!size)
return 0;
if (unlikely(!s))
return 0;
+ /*
+ * to make things simpler, only assume at most once kfence allocated
+ * object per bulk allocation and choose its index randomly
+ */
+ kfence_obj = kfence_alloc(s, s->object_size, flags);
+
+ if (unlikely(kfence_obj)) {
+ if (unlikely(size == 1)) {
+ p[0] = kfence_obj;
+ goto out;
+ }
+ size--;
+ }
+
if (s->cpu_sheaves)
i = alloc_from_pcs_bulk(s, size, p);
if (unlikely(__kmem_cache_alloc_bulk(s, flags, size - i, p + i) == 0)) {
if (i > 0)
__kmem_cache_free_bulk(s, i, p);
+ if (kfence_obj)
+ __kfence_free(kfence_obj);
return 0;
}
}
+ if (unlikely(kfence_obj)) {
+ int idx = get_random_u32_below(size + 1);
+
+ if (idx != size)
+ p[size] = p[idx];
+ p[idx] = kfence_obj;
+
+ size++;
+ }
+
+out:
/*
* memcg and kmem_cache debug support and memory initialization.
* Done outside of the IRQ disabled fastpath loop.