*/
static inline struct node_barn *get_barn(struct kmem_cache *s)
{
- return get_barn_node(s, numa_mem_id());
+ return get_barn_node(s, numa_node_id());
}
/*
*/
static nodemask_t slab_nodes;
+/*
+ * Similar to slab_nodes but for where we have node_barn allocated.
+ * Corresponds to N_ONLINE nodes.
+ */
+static nodemask_t slab_barn_nodes;
+
/*
* Workqueue used for flushing cpu and kfree_rcu sheaves.
*/
rcu_barrier();
}
+static int slub_cpu_setup(unsigned int cpu)
+{
+ int nid = cpu_to_node(cpu);
+ struct kmem_cache *s;
+ int ret = 0;
+
+ /*
+ * we never clear a nid so it's safe to do a quick check before taking
+ * the mutex, and then recheck to handle parallel cpu hotplug safely
+ */
+ if (node_isset(nid, slab_barn_nodes))
+ return 0;
+
+ mutex_lock(&slab_mutex);
+
+ if (node_isset(nid, slab_barn_nodes))
+ goto out;
+
+ list_for_each_entry(s, &slab_caches, list) {
+ struct node_barn *barn;
+
+ /*
+ * barn might already exist if a previous callback failed midway
+ */
+ if (!cache_has_sheaves(s) || get_barn_node(s, nid))
+ continue;
+
+ barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, nid);
+
+ if (!barn) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ barn_init(barn);
+ s->per_node[nid].barn = barn;
+ }
+ node_set(nid, slab_barn_nodes);
+
+out:
+ mutex_unlock(&slab_mutex);
+
+ return ret;
+}
+
/*
* Use the cpu notifier to insure that the cpu slabs are flushed when
* necessary.
rcu_sheaf = NULL;
} else {
pcs->rcu_free = NULL;
- rcu_sheaf->node = numa_mem_id();
+ rcu_sheaf->node = numa_node_id();
}
/*
if (slab_state == DOWN || !cache_has_sheaves(s))
return 1;
- for_each_node_mask(node, slab_nodes) {
+ for_each_node_mask(node, slab_barn_nodes) {
struct node_barn *barn;
barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
* and barn initialized for the new node.
*/
node_set(nid, slab_nodes);
+ node_set(nid, slab_barn_nodes);
out:
mutex_unlock(&slab_mutex);
return ret;
if (!capacity)
return;
- for_each_node_mask(node, slab_nodes) {
+ for_each_node_mask(node, slab_barn_nodes) {
struct node_barn *barn;
barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
for_each_node_state(node, N_MEMORY)
node_set(node, slab_nodes);
+ for_each_online_node(node)
+ node_set(node, slab_barn_nodes);
+
create_boot_cache(kmem_cache_node, "kmem_cache_node",
sizeof(struct kmem_cache_node),
SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
/* Setup random freelists for each cache */
init_freelist_randomization();
- cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
+ cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", slub_cpu_setup,
slub_cpu_dead);
pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",