static bool get_partial_node_bulk(struct kmem_cache *s,
struct kmem_cache_node *n,
- struct partial_bulk_context *pc)
+ struct partial_bulk_context *pc,
+ bool allow_spin)
{
struct slab *slab, *slab2;
unsigned int total_free = 0;
INIT_LIST_HEAD(&pc->slabs);
- spin_lock_irqsave(&n->list_lock, flags);
+ if (allow_spin)
+ spin_lock_irqsave(&n->list_lock, flags);
+ else if (!spin_trylock_irqsave(&n->list_lock, flags))
+ return false;
list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
struct freelist_counters flc;
static unsigned int
__refill_objects_node(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
- unsigned int max, struct kmem_cache_node *n)
+ unsigned int max, struct kmem_cache_node *n,
+ bool allow_spin)
{
struct partial_bulk_context pc;
struct slab *slab, *slab2;
pc.min_objects = min;
pc.max_objects = max;
- if (!get_partial_node_bulk(s, n, &pc))
+ if (!get_partial_node_bulk(s, n, &pc, allow_spin))
return 0;
list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
n->nr_partial <= s->min_partial)
continue;
- r = __refill_objects_node(s, p, gfp, min, max, n);
+ r = __refill_objects_node(s, p, gfp, min, max, n,
+ /* allow_spin = */ false);
refilled += r;
if (r >= min) {
return 0;
refilled = __refill_objects_node(s, p, gfp, min, max,
- get_node(s, local_node));
+ get_node(s, local_node),
+ /* allow_spin = */ true);
if (refilled >= min)
return refilled;