}
/* Allocate and initialize the internal cpumask dependend resources. */
-static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
+static struct parallel_data *padata_alloc_pd(struct padata_shell *ps,
+ int offlining_cpu)
{
struct padata_instance *pinst = ps->pinst;
struct parallel_data *pd;
cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
+ if (offlining_cpu >= 0) {
+ __cpumask_clear_cpu(offlining_cpu, pd->cpumask.pcpu);
+ __cpumask_clear_cpu(offlining_cpu, pd->cpumask.cbcpu);
+ }
padata_init_reorder_list(pd);
padata_init_squeues(pd);
}
/* Replace the internal control structure with a new one. */
-static int padata_replace_one(struct padata_shell *ps)
+static int padata_replace_one(struct padata_shell *ps, int offlining_cpu)
{
struct parallel_data *pd_new;
- pd_new = padata_alloc_pd(ps);
+ pd_new = padata_alloc_pd(ps, offlining_cpu);
if (!pd_new)
return -ENOMEM;
return 0;
}
-static int padata_replace(struct padata_instance *pinst)
+static int padata_replace(struct padata_instance *pinst, int offlining_cpu)
{
struct padata_shell *ps;
int err = 0;
pinst->flags |= PADATA_RESET;
list_for_each_entry(ps, &pinst->pslist, list) {
- err = padata_replace_one(ps);
+ err = padata_replace_one(ps, offlining_cpu);
if (err)
break;
}
/* If cpumask contains no active cpu, we mark the instance as invalid. */
static bool padata_validate_cpumask(struct padata_instance *pinst,
- const struct cpumask *cpumask)
+ const struct cpumask *cpumask,
+ int offlining_cpu)
{
- if (!cpumask_intersects(cpumask, cpu_online_mask)) {
+ cpumask_copy(pinst->validate_cpumask, cpu_online_mask);
+
+ /*
+ * @offlining_cpu is still in cpu_online_mask, so remove it here for
+ * validation. Using a sub-CPUHP_TEARDOWN_CPU hotplug state where
+ * @offlining_cpu wouldn't be in the online mask doesn't work because
+ * padata_cpu_offline() can fail but such a state doesn't allow failure.
+ */
+ if (offlining_cpu >= 0)
+ __cpumask_clear_cpu(offlining_cpu, pinst->validate_cpumask);
+
+ if (!cpumask_intersects(cpumask, pinst->validate_cpumask)) {
pinst->flags |= PADATA_INVALID;
return false;
}
int valid;
int err;
- valid = padata_validate_cpumask(pinst, pcpumask);
+ valid = padata_validate_cpumask(pinst, pcpumask, -1);
if (!valid) {
__padata_stop(pinst);
goto out_replace;
}
- valid = padata_validate_cpumask(pinst, cbcpumask);
+ valid = padata_validate_cpumask(pinst, cbcpumask, -1);
if (!valid)
__padata_stop(pinst);
cpumask_copy(pinst->cpumask.pcpu, pcpumask);
cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
- err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
+ err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst, -1);
if (valid)
__padata_start(pinst);
#ifdef CONFIG_HOTPLUG_CPU
-static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
-{
- int err = padata_replace(pinst);
-
- if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
- padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
- __padata_start(pinst);
-
- return err;
-}
-
-static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
-{
- if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
- !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
- __padata_stop(pinst);
-
- return padata_replace(pinst);
-}
-
static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
{
return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
struct padata_instance *pinst;
int ret;
- pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
+ pinst = hlist_entry_safe(node, struct padata_instance, cpuhp_node);
if (!pinst_has_cpu(pinst, cpu))
return 0;
mutex_lock(&pinst->lock);
- ret = __padata_add_cpu(pinst, cpu);
+
+ ret = padata_replace(pinst, -1);
+
+ if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu, -1) &&
+ padata_validate_cpumask(pinst, pinst->cpumask.cbcpu, -1))
+ __padata_start(pinst);
+
mutex_unlock(&pinst->lock);
return ret;
}
-static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
+static int padata_cpu_offline(unsigned int cpu, struct hlist_node *node)
{
struct padata_instance *pinst;
int ret;
- pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
+ pinst = hlist_entry_safe(node, struct padata_instance, cpuhp_node);
if (!pinst_has_cpu(pinst, cpu))
return 0;
mutex_lock(&pinst->lock);
- ret = __padata_remove_cpu(pinst, cpu);
+
+ if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu, cpu) ||
+ !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu, cpu))
+ __padata_stop(pinst);
+
+ ret = padata_replace(pinst, cpu);
+
mutex_unlock(&pinst->lock);
return ret;
}
static void __padata_free(struct padata_instance *pinst)
{
#ifdef CONFIG_HOTPLUG_CPU
- cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
- &pinst->cpu_dead_node);
- cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
+ cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpuhp_node);
#endif
WARN_ON(!list_empty(&pinst->pslist));
free_cpumask_var(pinst->cpumask.pcpu);
free_cpumask_var(pinst->cpumask.cbcpu);
+ free_cpumask_var(pinst->validate_cpumask);
destroy_workqueue(pinst->serial_wq);
destroy_workqueue(pinst->parallel_wq);
kfree(pinst);
if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
goto err_free_serial_wq;
- if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
- free_cpumask_var(pinst->cpumask.pcpu);
- goto err_free_serial_wq;
- }
+ if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL))
+ goto err_free_p_mask;
+ if (!alloc_cpumask_var(&pinst->validate_cpumask, GFP_KERNEL))
+ goto err_free_cb_mask;
INIT_LIST_HEAD(&pinst->pslist);
cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
if (padata_setup_cpumasks(pinst))
- goto err_free_masks;
+ goto err_free_v_mask;
__padata_start(pinst);
#ifdef CONFIG_HOTPLUG_CPU
cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
- &pinst->cpu_online_node);
- cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
- &pinst->cpu_dead_node);
+ &pinst->cpuhp_node);
#endif
cpus_read_unlock();
return pinst;
-err_free_masks:
- free_cpumask_var(pinst->cpumask.pcpu);
+err_free_v_mask:
+ free_cpumask_var(pinst->validate_cpumask);
+err_free_cb_mask:
free_cpumask_var(pinst->cpumask.cbcpu);
+err_free_p_mask:
+ free_cpumask_var(pinst->cpumask.pcpu);
err_free_serial_wq:
destroy_workqueue(pinst->serial_wq);
err_put_cpus:
ps->pinst = pinst;
cpus_read_lock();
- pd = padata_alloc_pd(ps);
+ pd = padata_alloc_pd(ps, -1);
cpus_read_unlock();
if (!pd)
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
- padata_cpu_online, NULL);
+ padata_cpu_online, padata_cpu_offline);
if (ret < 0)
goto err;
hp_online = ret;
-
- ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
- NULL, padata_cpu_dead);
- if (ret < 0)
- goto remove_online_state;
#endif
possible_cpus = num_possible_cpus();
padata_works = kmalloc_objs(struct padata_work, possible_cpus);
if (!padata_works)
- goto remove_dead_state;
+ goto remove_online_state;
for (i = 0; i < possible_cpus; ++i)
list_add(&padata_works[i].pw_list, &padata_free_works);
return;
-remove_dead_state:
-#ifdef CONFIG_HOTPLUG_CPU
- cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
remove_online_state:
+#ifdef CONFIG_HOTPLUG_CPU
cpuhp_remove_multi_state(hp_online);
err:
#endif