TP_PROTO(struct task_struct *tsk, int cpu, int tif),
TP_ARGS(tsk, cpu, tif));
+#define DL_OTHER 0
+#define DL_TASK 1
+#define DL_SERVER_FAIR 2
+#define DL_SERVER_EXT 3
+
+DECLARE_TRACE(sched_dl_throttle,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
+DECLARE_TRACE(sched_dl_replenish,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
+/* Call to update_curr_dl_se not involving throttle or replenish */
+DECLARE_TRACE(sched_dl_update,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
+DECLARE_TRACE(sched_dl_server_start,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
+DECLARE_TRACE(sched_dl_server_stop,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_entry_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_exit_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_set_need_resched_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_throttle_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_replenish_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_update_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_server_start_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_server_stop_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
}
#endif /* !CONFIG_RT_MUTEXES */
+static inline u8 dl_get_type(struct sched_dl_entity *dl_se, struct rq *rq)
+{
+ if (!dl_server(dl_se))
+ return DL_TASK;
+ if (dl_se == &rq->fair_server)
+ return DL_SERVER_FAIR;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ if (dl_se == &rq->ext_server)
+ return DL_SERVER_EXT;
+#endif
+ return DL_OTHER;
+}
+
static inline struct dl_bw *dl_bw_of(int i)
{
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
dl_se->dl_throttled = 1;
dl_se->dl_defer_armed = 1;
}
+ trace_sched_dl_replenish_tp(dl_se, cpu_of(rq), dl_get_type(dl_se, rq));
}
/*
if (dl_se->dl_throttled)
dl_se->dl_throttled = 0;
+ trace_sched_dl_replenish_tp(dl_se, cpu_of(rq), dl_get_type(dl_se, rq));
+
/*
* If this is the replenishment of a deferred reservation,
* clear the flag and return.
dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se)))
return;
+ trace_sched_dl_throttle_tp(dl_se, cpu_of(rq), dl_get_type(dl_se, rq));
dl_se->dl_throttled = 1;
if (dl_se->runtime > 0)
dl_se->runtime = 0;
throttle:
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
+ trace_sched_dl_throttle_tp(dl_se, cpu_of(rq), dl_get_type(dl_se, rq));
dl_se->dl_throttled = 1;
/* If requested, inform the user about runtime overruns. */
if (!is_leftmost(dl_se, &rq->dl))
resched_curr(rq);
+ } else {
+ trace_sched_dl_update_tp(dl_se, cpu_of(rq), dl_get_type(dl_se, rq));
}
/*
if (WARN_ON_ONCE(!cpu_online(cpu_of(rq))))
return;
+ trace_sched_dl_server_start_tp(dl_se, cpu_of(rq), dl_get_type(dl_se, rq));
dl_se->dl_server_active = 1;
enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
if (!dl_server(dl_se) || !dl_server_active(dl_se))
return;
+ trace_sched_dl_server_stop_tp(dl_se, cpu_of(dl_se->rq),
+ dl_get_type(dl_se, dl_se->rq));
dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
hrtimer_try_to_cancel(&dl_se->dl_timer);
dl_se->dl_defer_armed = 0;