rq_unlock_irqrestore(rq, &rf);
}
-void send_call_function_single_ipi(int cpu)
+/*
+ * Prepare the scene for sending an IPI for a remote smp_call
+ *
+ * Returns true if the caller can proceed with sending the IPI.
+ * Returns false otherwise.
+ */
+bool call_function_single_prep_ipi(int cpu)
{
- struct rq *rq = cpu_rq(cpu);
-
- if (!set_nr_if_polling(rq->idle)) {
- trace_ipi_send_cpumask(cpumask_of(cpu), _RET_IP_, NULL);
- arch_send_call_function_single_ipi(cpu);
- } else {
+ if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
trace_sched_wake_idle_without_ipi(cpu);
+ return false;
}
+
+ return true;
}
/*
}
static __always_inline void
-send_call_function_ipi_mask(struct cpumask *mask)
+send_call_function_single_ipi(int cpu, smp_call_func_t func)
{
- trace_ipi_send_cpumask(mask, _RET_IP_, NULL);
+ if (call_function_single_prep_ipi(cpu)) {
+ trace_ipi_send_cpumask(cpumask_of(cpu), _RET_IP_, func);
+ arch_send_call_function_single_ipi(cpu);
+ }
+}
+
+static __always_inline void
+send_call_function_ipi_mask(struct cpumask *mask, smp_call_func_t func)
+{
+ trace_ipi_send_cpumask(mask, _RET_IP_, func);
arch_send_call_function_ipi_mask(mask);
}
smp_store_release(&csd->node.u_flags, 0);
}
-static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
-
-void __smp_call_single_queue(int cpu, struct llist_node *node)
+static __always_inline void
+raw_smp_call_single_queue(int cpu, struct llist_node *node, smp_call_func_t func)
{
/*
* The list addition should be visible to the target CPU when it pops
* equipped to do the right thing...
*/
if (llist_add(node, &per_cpu(call_single_queue, cpu)))
- send_call_function_single_ipi(cpu);
+ send_call_function_single_ipi(cpu, func);
+}
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
+
+void __smp_call_single_queue(int cpu, struct llist_node *node)
+{
+ /*
+ * We have to check the type of the CSD before queueing it, because
+ * once queued it can have its flags cleared by
+ * flush_smp_call_function_queue()
+ * even if we haven't sent the smp_call IPI yet (e.g. the stopper
+ * executes migration_cpu_stop() on the remote CPU).
+ */
+ if (trace_ipi_send_cpumask_enabled()) {
+ call_single_data_t *csd;
+ smp_call_func_t func;
+
+ csd = container_of(node, call_single_data_t, node.llist);
+ func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
+ sched_ttwu_pending : csd->func;
+
+ raw_smp_call_single_queue(cpu, node, func);
+ } else {
+ raw_smp_call_single_queue(cpu, node, NULL);
+ }
}
/*
* provided mask.
*/
if (nr_cpus == 1)
- send_call_function_single_ipi(last_cpu);
+ send_call_function_single_ipi(last_cpu, func);
else if (likely(nr_cpus > 1))
- send_call_function_ipi_mask(cfd->cpumask_ipi);
+ send_call_function_ipi_mask(cfd->cpumask_ipi, func);
}
if (run_local && (!cond_func || cond_func(this_cpu, info))) {