static inline void debugfs_slab_add(struct kmem_cache *s) { }
#endif
+enum add_mode {
+ ADD_TO_HEAD,
+ ADD_TO_TAIL,
+};
+
enum stat_item {
ALLOC_PCS, /* Allocation from percpu sheaf */
ALLOC_FASTPATH, /* Allocation from cpu slab */
CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
- DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
- DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
DEACTIVATE_BYPASS, /* Implicit deactivation */
ORDER_FALLBACK, /* Number of times fallback was necessary */
* Management of partially allocated slabs.
*/
static inline void
-__add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
+__add_partial(struct kmem_cache_node *n, struct slab *slab, enum add_mode mode)
{
n->nr_partial++;
- if (tail == DEACTIVATE_TO_TAIL)
+ if (mode == ADD_TO_TAIL)
list_add_tail(&slab->slab_list, &n->partial);
else
list_add(&slab->slab_list, &n->partial);
}
static inline void add_partial(struct kmem_cache_node *n,
- struct slab *slab, int tail)
+ struct slab *slab, enum add_mode mode)
{
lockdep_assert_held(&n->list_lock);
- __add_partial(n, slab, tail);
+ __add_partial(n, slab, mode);
}
static inline void remove_partial(struct kmem_cache_node *n,
if (slab->inuse == slab->objects)
add_full(s, n, slab);
else
- add_partial(n, slab, DEACTIVATE_TO_HEAD);
+ add_partial(n, slab, ADD_TO_HEAD);
inc_slabs_node(s, nid, slab->objects);
spin_unlock_irqrestore(&n->list_lock, flags);
n = get_node(s, slab_nid(slab));
spin_lock_irqsave(&n->list_lock, flags);
}
- add_partial(n, slab, DEACTIVATE_TO_HEAD);
+ add_partial(n, slab, ADD_TO_HEAD);
spin_unlock_irqrestore(&n->list_lock, flags);
}
/* was on full list */
remove_full(s, n, slab);
if (!slab_free) {
- add_partial(n, slab, DEACTIVATE_TO_TAIL);
+ add_partial(n, slab, ADD_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
} else if (slab_free) {
* then add it.
*/
if (unlikely(was_full)) {
- add_partial(n, slab, DEACTIVATE_TO_TAIL);
+ add_partial(n, slab, ADD_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
spin_unlock_irqrestore(&n->list_lock, flags);
continue;
list_del(&slab->slab_list);
- add_partial(n, slab, DEACTIVATE_TO_HEAD);
+ add_partial(n, slab, ADD_TO_HEAD);
}
spin_unlock_irqrestore(&n->list_lock, flags);
* No locks need to be taken here as it has just been
* initialized and there is no concurrent access.
*/
- __add_partial(n, slab, DEACTIVATE_TO_HEAD);
+ __add_partial(n, slab, ADD_TO_HEAD);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
-STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
-STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
STAT_ATTR(ORDER_FALLBACK, order_fallback);
&cpuslab_flush_attr.attr,
&deactivate_full_attr.attr,
&deactivate_empty_attr.attr,
- &deactivate_to_head_attr.attr,
- &deactivate_to_tail_attr.attr,
&deactivate_remote_frees_attr.attr,
&deactivate_bypass_attr.attr,
&order_fallback_attr.attr,