static struct list_head remote_children;
/*
- * A flag to force sched domain rebuild at the end of an operation while
- * inhibiting it in the intermediate stages when set. Currently it is only
- * set in hotplug code.
+ * A flag to force sched domain rebuild at the end of an operation.
+ * It can be set in
+ * - update_partition_sd_lb()
+ * - remote_partition_check()
+ * - update_cpumasks_hier()
+ * - cpuset_update_flag()
+ * - cpuset_hotplug_update_tasks()
+ * - cpuset_handle_hotplug()
+ *
+ * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
+ *
+ * Note that update_relax_domain_level() in cpuset-v1.c can still call
+ * rebuild_sched_domains_locked() directly without using this flag.
*/
static bool force_sd_rebuild;
lockdep_assert_cpus_held();
lockdep_assert_held(&cpuset_mutex);
+ force_sd_rebuild = false;
/*
* If we have raced with CPU hotplug, return early to avoid
clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
}
- if (rebuild_domains && !force_sd_rebuild)
- rebuild_sched_domains_locked();
+ if (rebuild_domains)
+ cpuset_force_rebuild();
}
/*
remote_partition_disable(child, tmp);
disable_cnt++;
}
- if (disable_cnt && !force_sd_rebuild)
- rebuild_sched_domains_locked();
+ if (disable_cnt)
+ cpuset_force_rebuild();
}
/*
}
rcu_read_unlock();
- if (need_rebuild_sched_domains && !force_sd_rebuild)
- rebuild_sched_domains_locked();
+ if (need_rebuild_sched_domains)
+ cpuset_force_rebuild();
}
/**
cs->flags = trialcs->flags;
spin_unlock_irq(&callback_lock);
- if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed &&
- !force_sd_rebuild)
- rebuild_sched_domains_locked();
+ if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
+ if (!IS_ENABLED(CONFIG_CPUSETS_V1) ||
+ cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
+ cpuset_force_rebuild();
+ else
+ rebuild_sched_domains_locked();
+ }
if (spread_flag_changed)
cpuset1_update_tasks_flags(cs);
update_partition_sd_lb(cs, old_prs);
notify_partition_change(cs, old_prs);
+ if (force_sd_rebuild)
+ rebuild_sched_domains_locked();
free_cpumasks(NULL, &tmpmask);
return 0;
}
}
free_cpuset(trialcs);
+ if (force_sd_rebuild)
+ rebuild_sched_domains_locked();
out_unlock:
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
rcu_read_unlock();
}
- /* rebuild sched domains if cpus_allowed has changed */
- if (force_sd_rebuild) {
- force_sd_rebuild = false;
+ /* rebuild sched domains if necessary */
+ if (force_sd_rebuild)
rebuild_sched_domains_cpuslocked();
- }
free_cpumasks(NULL, ptmp);
}