flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
}
-static void __free_slab(struct kmem_cache *s, struct slab *slab)
+static void __free_slab(struct kmem_cache *s, struct slab *slab, bool allow_spin)
{
struct page *page = slab_page(slab);
int order = compound_order(page);
__ClearPageSlab(page);
mm_account_reclaimed_pages(pages);
unaccount_slab(slab, order, s);
- free_frozen_pages(page, order);
+ if (allow_spin)
+ free_frozen_pages(page, order);
+ else
+ free_frozen_pages_nolock(page, order);
+}
+
+static void free_new_slab_nolock(struct kmem_cache *s, struct slab *slab)
+{
+ /*
+ * Since it was just allocated, we can skip the actions in
+ * discard_slab() and free_slab().
+ */
+ __free_slab(s, slab, false);
}
static void rcu_free_slab(struct rcu_head *h)
{
struct slab *slab = container_of(h, struct slab, rcu_head);
- __free_slab(slab->slab_cache, slab);
+ __free_slab(slab->slab_cache, slab, true);
}
static void free_slab(struct kmem_cache *s, struct slab *slab)
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
call_rcu(&slab->rcu_head, rcu_free_slab);
else
- __free_slab(s, slab);
+ __free_slab(s, slab, true);
}
static void discard_slab(struct kmem_cache *s, struct slab *slab)
return object;
}
-static void defer_deactivate_slab(struct slab *slab, void *flush_freelist);
-
/*
* Called only for kmem_cache_debug() caches to allocate from a freshly
* allocated slab. Allocate a single object instead of whole freelist
void *object;
if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) {
- /* Unlucky, discard newly allocated slab */
- defer_deactivate_slab(slab, NULL);
+ /* Unlucky, discard newly allocated slab. */
+ free_new_slab_nolock(s, slab);
return NULL;
}
if (!spin_trylock_irqsave(&n->list_lock, flags)) {
/* Unlucky, discard newly allocated slab */
- defer_deactivate_slab(slab, NULL);
+ free_new_slab_nolock(s, slab);
return 0;
}
}
struct defer_free {
struct llist_head objects;
- struct llist_head slabs;
struct irq_work work;
};
static DEFINE_PER_CPU(struct defer_free, defer_free_objects) = {
.objects = LLIST_HEAD_INIT(objects),
- .slabs = LLIST_HEAD_INIT(slabs),
.work = IRQ_WORK_INIT(free_deferred_objects),
};
/*
* In PREEMPT_RT irq_work runs in per-cpu kthread, so it's safe
- * to take sleeping spin_locks from __slab_free() and deactivate_slab().
+ * to take sleeping spin_locks from __slab_free().
* In !PREEMPT_RT irq_work will run after local_unlock_irqrestore().
*/
static void free_deferred_objects(struct irq_work *work)
{
struct defer_free *df = container_of(work, struct defer_free, work);
struct llist_head *objs = &df->objects;
- struct llist_head *slabs = &df->slabs;
struct llist_node *llnode, *pos, *t;
- if (llist_empty(objs) && llist_empty(slabs))
+ if (llist_empty(objs))
return;
llnode = llist_del_all(objs);
__slab_free(s, slab, x, x, 1, _THIS_IP_);
}
-
- llnode = llist_del_all(slabs);
- llist_for_each_safe(pos, t, llnode) {
- struct slab *slab = container_of(pos, struct slab, llnode);
-
- if (slab->frozen)
- deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
- else
- free_slab(slab->slab_cache, slab);
- }
}
static void defer_free(struct kmem_cache *s, void *head)
irq_work_queue(&df->work);
}
-static void defer_deactivate_slab(struct slab *slab, void *flush_freelist)
-{
- struct defer_free *df;
-
- slab->flush_freelist = flush_freelist;
-
- guard(preempt)();
-
- df = this_cpu_ptr(&defer_free_objects);
- if (llist_add(&slab->llnode, &df->slabs))
- irq_work_queue(&df->work);
-}
-
void defer_free_barrier(void)
{
int cpu;