*/
int attach_in_progress;
- /* for custom sched domain */
- int relax_domain_level;
-
/* partition root state */
int partition_root_state;
#ifdef CONFIG_CPUSETS_V1
struct fmeter fmeter; /* memory_pressure filter */
+
+ /* for custom sched domain */
+ int relax_domain_level;
#endif
};
int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial);
void cpuset1_init(struct cpuset *cs);
void cpuset1_online_css(struct cgroup_subsys_state *css);
+void update_domain_attr_tree(struct sched_domain_attr *dattr,
+ struct cpuset *root_cs);
#else
static inline void cpuset1_update_task_spread_flags(struct cpuset *cs,
struct task_struct *tsk) {}
struct cpuset *trial) { return 0; }
static inline void cpuset1_init(struct cpuset *cs) {}
static inline void cpuset1_online_css(struct cgroup_subsys_state *css) {}
+static inline void update_domain_attr_tree(struct sched_domain_attr *dattr,
+ struct cpuset *root_cs) {}
+
#endif /* CONFIG_CPUSETS_V1 */
#endif /* __CPUSET_INTERNAL_H */
void cpuset1_init(struct cpuset *cs)
{
fmeter_init(&cs->fmeter);
+ cs->relax_domain_level = -1;
}
void cpuset1_online_css(struct cgroup_subsys_state *css)
cpuset_callback_unlock_irq();
}
+static void
+update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
+{
+ if (dattr->relax_domain_level < c->relax_domain_level)
+ dattr->relax_domain_level = c->relax_domain_level;
+}
+
+void update_domain_attr_tree(struct sched_domain_attr *dattr,
+ struct cpuset *root_cs)
+{
+ struct cpuset *cp;
+ struct cgroup_subsys_state *pos_css;
+
+ rcu_read_lock();
+ cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
+ /* skip the whole subtree if @cp doesn't have any CPU */
+ if (cpumask_empty(cp->cpus_allowed)) {
+ pos_css = css_rightmost_descendant(pos_css);
+ continue;
+ }
+
+ if (is_sched_load_balance(cp))
+ update_domain_attr(dattr, cp);
+ }
+ rcu_read_unlock();
+}
+
/*
* for the common functions, 'private' gives the type of file
*/
.flags = BIT(CS_CPU_EXCLUSIVE) |
BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE),
.partition_root_state = PRS_ROOT,
- .relax_domain_level = -1,
- .remote_partition = false,
};
/*
return cpumask_intersects(a->effective_cpus, b->effective_cpus);
}
-static void
-update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
-{
- if (dattr->relax_domain_level < c->relax_domain_level)
- dattr->relax_domain_level = c->relax_domain_level;
- return;
-}
-
-static void update_domain_attr_tree(struct sched_domain_attr *dattr,
- struct cpuset *root_cs)
-{
- struct cpuset *cp;
- struct cgroup_subsys_state *pos_css;
-
- rcu_read_lock();
- cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
- /* skip the whole subtree if @cp doesn't have any CPU */
- if (cpumask_empty(cp->cpus_allowed)) {
- pos_css = css_rightmost_descendant(pos_css);
- continue;
- }
-
- if (is_sched_load_balance(cp))
- update_domain_attr(dattr, cp);
- }
- rcu_read_unlock();
-}
-
/* Must be called with cpuset_mutex held. */
static inline int nr_cpusets(void)
{
__set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
cpuset1_init(cs);
- cs->relax_domain_level = -1;
/* Set CS_MEMORY_MIGRATE for default hierarchy */
if (cpuset_v2())