struct rcu_head rcu_head;
struct list_head barn_list;
/* only used for prefilled sheafs */
- unsigned int capacity;
+ struct {
+ unsigned int capacity;
+ bool pfmemalloc;
+ };
};
struct kmem_cache *cache;
unsigned int size;
if (!sheaf)
return NULL;
- if (refill_sheaf(s, sheaf, gfp)) {
+ if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC)) {
free_empty_sheaf(s, sheaf);
return NULL;
}
sheaf->size = 0;
}
-static void __rcu_free_sheaf_prepare(struct kmem_cache *s,
+static bool __rcu_free_sheaf_prepare(struct kmem_cache *s,
struct slab_sheaf *sheaf)
{
bool init = slab_want_init_on_free(s);
void **p = &sheaf->objects[0];
unsigned int i = 0;
+ bool pfmemalloc = false;
while (i < sheaf->size) {
struct slab *slab = virt_to_slab(p[i]);
continue;
}
+ if (slab_test_pfmemalloc(slab))
+ pfmemalloc = true;
+
i++;
}
+
+ return pfmemalloc;
}
static void rcu_free_sheaf_nobarn(struct rcu_head *head)
return NULL;
if (empty) {
- if (!refill_sheaf(s, empty, gfp)) {
+ if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC)) {
full = empty;
} else {
/*
}
EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
+static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s,
+ struct slab_sheaf *sheaf, gfp_t gfp)
+{
+ int ret = 0;
+
+ ret = refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC);
+
+ if (likely(!ret || !gfp_pfmemalloc_allowed(gfp)))
+ return ret;
+
+ /*
+ * if we are allowed to, refill sheaf with pfmemalloc but then remember
+ * it for when it's returned
+ */
+ ret = refill_sheaf(s, sheaf, gfp);
+ sheaf->pfmemalloc = true;
+
+ return ret;
+}
+
/*
* returns a sheaf that has at least the requested size
* when prefilling is needed, do so with given gfp flags
sheaf->cache = s;
sheaf->capacity = size;
+ /*
+ * we do not need to care about pfmemalloc here because oversize
+ * sheaves area always flushed and freed when returned
+ */
if (!__kmem_cache_alloc_bulk(s, gfp, size,
&sheaf->objects[0])) {
kfree(sheaf);
if (!sheaf)
sheaf = alloc_empty_sheaf(s, gfp);
- if (sheaf && sheaf->size < size) {
- if (refill_sheaf(s, sheaf, gfp)) {
+ if (sheaf) {
+ sheaf->capacity = s->sheaf_capacity;
+ sheaf->pfmemalloc = false;
+
+ if (sheaf->size < size &&
+ __prefill_sheaf_pfmemalloc(s, sheaf, gfp)) {
sheaf_flush_unused(s, sheaf);
free_empty_sheaf(s, sheaf);
sheaf = NULL;
}
}
- if (sheaf)
- sheaf->capacity = s->sheaf_capacity;
-
return sheaf;
}
struct slub_percpu_sheaves *pcs;
struct node_barn *barn;
- if (unlikely(sheaf->capacity != s->sheaf_capacity)) {
+ if (unlikely((sheaf->capacity != s->sheaf_capacity)
+ || sheaf->pfmemalloc)) {
sheaf_flush_unused(s, sheaf);
kfree(sheaf);
return;
if (likely(sheaf->capacity >= size)) {
if (likely(sheaf->capacity == s->sheaf_capacity))
- return refill_sheaf(s, sheaf, gfp);
+ return __prefill_sheaf_pfmemalloc(s, sheaf, gfp);
if (!__kmem_cache_alloc_bulk(s, gfp, sheaf->capacity - sheaf->size,
&sheaf->objects[sheaf->size])) {
* handles it fine. The only downside is that sheaf will serve fewer
* allocations when reused. It only happens due to debugging, which is a
* performance hit anyway.
+ *
+ * If it returns true, there was at least one object from pfmemalloc
+ * slab so simply flush everything.
*/
- __rcu_free_sheaf_prepare(s, sheaf);
+ if (__rcu_free_sheaf_prepare(s, sheaf))
+ goto flush;
n = get_node(s, sheaf->node);
if (!n)
continue;
}
- if (unlikely(IS_ENABLED(CONFIG_NUMA) && slab_nid(slab) != node)) {
+ if (unlikely((IS_ENABLED(CONFIG_NUMA) && slab_nid(slab) != node)
+ || slab_test_pfmemalloc(slab))) {
remote_objects[remote_nr] = p[i];
p[i] = p[--size];
if (++remote_nr >= PCS_BATCH_MAX)
return;
if (s->cpu_sheaves && likely(!IS_ENABLED(CONFIG_NUMA) ||
- slab_nid(slab) == numa_mem_id())) {
+ slab_nid(slab) == numa_mem_id())
+ && likely(!slab_test_pfmemalloc(slab))) {
if (likely(free_to_pcs(s, object)))
return;
}