return flush;
}
+static void schedule_drain_work(int cpu, struct work_struct *work)
+{
+ /*
+ * Protect housekeeping cpumask read and work enqueue together
+ * in the same RCU critical section so that later cpuset isolated
+ * partition update only need to wait for an RCU GP and flush the
+ * pending work on newly isolated CPUs.
+ */
+ guard(rcu)();
+ if (!cpu_is_isolated(cpu))
+ schedule_work_on(cpu, work);
+}
+
/*
* Drains all per-CPU charge caches for given root_memcg resp. subtree
* of the hierarchy under it.
&memcg_st->flags)) {
if (cpu == curcpu)
drain_local_memcg_stock(&memcg_st->work);
- else if (!cpu_is_isolated(cpu))
- schedule_work_on(cpu, &memcg_st->work);
+ else
+ schedule_drain_work(cpu, &memcg_st->work);
}
if (!test_bit(FLUSHING_CACHED_CHARGE, &obj_st->flags) &&
&obj_st->flags)) {
if (cpu == curcpu)
drain_local_obj_stock(&obj_st->work);
- else if (!cpu_is_isolated(cpu))
- schedule_work_on(cpu, &obj_st->work);
+ else
+ schedule_drain_work(cpu, &obj_st->work);
}
}
migrate_enable();