* MEMCG_DATA_OBJEXTS.
*/
OBJEXTS_ALLOC_FAIL = __OBJEXTS_ALLOC_FAIL,
- /* slabobj_ext vector allocated with kmalloc_nolock() */
- OBJEXTS_NOSPIN_ALLOC = __FIRST_OBJEXT_FLAG,
+ __OBJEXTS_FLAG_UNUSED = __FIRST_OBJEXT_FLAG,
/* the next bit after the last actual flag */
__NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1),
};
virt_to_slab(vec)->slab_cache == s);
new_exts = (unsigned long)vec;
- if (unlikely(!allow_spin))
- new_exts |= OBJEXTS_NOSPIN_ALLOC;
#ifdef CONFIG_MEMCG
new_exts |= MEMCG_DATA_OBJEXTS;
#endif
return 0;
}
-static inline void free_slab_obj_exts(struct slab *slab)
+static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
{
struct slabobj_ext *obj_exts;
* the extension for obj_exts is expected to be NULL.
*/
mark_objexts_empty(obj_exts);
- if (unlikely(READ_ONCE(slab->obj_exts) & OBJEXTS_NOSPIN_ALLOC))
- kfree_nolock(obj_exts);
- else
+ if (allow_spin)
kfree(obj_exts);
+ else
+ kfree_nolock(obj_exts);
slab->obj_exts = 0;
}
return 0;
}
-static inline void free_slab_obj_exts(struct slab *slab)
+static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
{
}
}
static __always_inline void unaccount_slab(struct slab *slab, int order,
- struct kmem_cache *s)
+ struct kmem_cache *s, bool allow_spin)
{
/*
* The slab object extensions should now be freed regardless of
* whether mem_alloc_profiling_enabled() or not because profiling
* might have been disabled after slab->obj_exts got allocated.
*/
- free_slab_obj_exts(slab);
+ free_slab_obj_exts(slab, allow_spin);
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
-(PAGE_SIZE << order));
page->mapping = NULL;
__ClearPageSlab(page);
mm_account_reclaimed_pages(pages);
- unaccount_slab(slab, order, s);
+ unaccount_slab(slab, order, s, allow_spin);
if (allow_spin)
free_frozen_pages(page, order);
else