}
static unsigned int
-__refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
- unsigned int max);
+refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
+ unsigned int max);
static int refill_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf,
gfp_t gfp)
if (!to_fill)
return 0;
- filled = __refill_objects(s, &sheaf->objects[sheaf->size], gfp,
- to_fill, to_fill);
+ filled = refill_objects(s, &sheaf->objects[sheaf->size], gfp, to_fill,
+ to_fill);
sheaf->size += filled;
EXPORT_SYMBOL(kmem_cache_free_bulk);
static unsigned int
-__refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
- unsigned int max)
+__refill_objects_node(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
+ unsigned int max, struct kmem_cache_node *n)
{
struct partial_bulk_context pc;
struct slab *slab, *slab2;
unsigned int refilled = 0;
unsigned long flags;
void *object;
- int node;
pc.flags = gfp;
pc.min_objects = min;
pc.max_objects = max;
- node = numa_mem_id();
-
- if (WARN_ON_ONCE(!gfpflags_allow_spinning(gfp)))
+ if (!get_partial_node_bulk(s, n, &pc))
return 0;
- /* TODO: consider also other nodes? */
- if (!get_partial_node_bulk(s, get_node(s, node), &pc))
- goto new_slab;
-
list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
list_del(&slab->slab_list);
}
if (unlikely(!list_empty(&pc.slabs))) {
- struct kmem_cache_node *n = get_node(s, node);
-
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
}
}
+ return refilled;
+}
- if (likely(refilled >= min))
- goto out;
+#ifdef CONFIG_NUMA
+static unsigned int
+__refill_objects_any(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
+ unsigned int max)
+{
+ struct zonelist *zonelist;
+ struct zoneref *z;
+ struct zone *zone;
+ enum zone_type highest_zoneidx = gfp_zone(gfp);
+ unsigned int cpuset_mems_cookie;
+ unsigned int refilled = 0;
+
+ /* see get_from_any_partial() for the defrag ratio description */
+ if (!s->remote_node_defrag_ratio ||
+ get_cycles() % 1024 > s->remote_node_defrag_ratio)
+ return 0;
+
+ do {
+ cpuset_mems_cookie = read_mems_allowed_begin();
+ zonelist = node_zonelist(mempolicy_slab_node(), gfp);
+ for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
+ struct kmem_cache_node *n;
+ unsigned int r;
+
+ n = get_node(s, zone_to_nid(zone));
+
+ if (!n || !cpuset_zone_allowed(zone, gfp) ||
+ n->nr_partial <= s->min_partial)
+ continue;
+
+ r = __refill_objects_node(s, p, gfp, min, max, n);
+ refilled += r;
+
+ if (r >= min) {
+ /*
+ * Don't check read_mems_allowed_retry() here -
+ * if mems_allowed was updated in parallel, that
+ * was a harmless race between allocation and
+ * the cpuset update
+ */
+ return refilled;
+ }
+ p += r;
+ min -= r;
+ max -= r;
+ }
+ } while (read_mems_allowed_retry(cpuset_mems_cookie));
+
+ return refilled;
+}
+#else
+static inline unsigned int
+__refill_objects_any(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
+ unsigned int max)
+{
+ return 0;
+}
+#endif
+
+static unsigned int
+refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
+ unsigned int max)
+{
+ int local_node = numa_mem_id();
+ unsigned int refilled;
+ struct slab *slab;
+
+ if (WARN_ON_ONCE(!gfpflags_allow_spinning(gfp)))
+ return 0;
+
+ refilled = __refill_objects_node(s, p, gfp, min, max,
+ get_node(s, local_node));
+ if (refilled >= min)
+ return refilled;
+
+ refilled += __refill_objects_any(s, p + refilled, gfp, min - refilled,
+ max - refilled);
+ if (refilled >= min)
+ return refilled;
new_slab:
- slab = new_slab(s, pc.flags, node);
+ slab = new_slab(s, gfp, local_node);
if (!slab)
goto out;
if (refilled < min)
goto new_slab;
-out:
+out:
return refilled;
}
{
int i;
- /*
- * TODO: this might be more efficient (if necessary) by reusing
- * __refill_objects()
- */
- for (i = 0; i < size; i++) {
+ if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
+ for (i = 0; i < size; i++) {
- p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_,
- s->object_size);
- if (unlikely(!p[i]))
- goto error;
+ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_,
+ s->object_size);
+ if (unlikely(!p[i]))
+ goto error;
- maybe_wipe_obj_freeptr(s, p[i]);
+ maybe_wipe_obj_freeptr(s, p[i]);
+ }
+ } else {
+ i = refill_objects(s, p, flags, size, size);
+ if (i < size)
+ goto error;
}
return i;
}
-/* Note that interrupts must be enabled when calling this function. */
+/*
+ * Note that interrupts must be enabled when calling this function and gfp
+ * flags must allow spinning.
+ */
int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
void **p)
{