*/
u64 SCX_EV_ENQ_SKIP_EXITING;
+ /*
+ * If SCX_OPS_ENQ_MIGRATION_DISABLED is not set, the number of times a
+ * migration disabled task skips ops.enqueue() and is dispatched to its
+ * local DSQ.
+ */
+ u64 SCX_EV_ENQ_SKIP_MIGRATION_DISABLED;
+
/*
* The total number of tasks enqueued (or pick_task-ed) with a
* default time slice (SCX_SLICE_DFL).
/* see %SCX_OPS_ENQ_MIGRATION_DISABLED */
if (!static_branch_unlikely(&scx_ops_enq_migration_disabled) &&
- is_migration_disabled(p))
+ is_migration_disabled(p)) {
+ __scx_add_event(SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1);
goto local;
+ }
if (!SCX_HAS_OP(enqueue))
goto global;
scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST);
scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING);
+ scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
scx_dump_event(s, &events, SCX_EV_ENQ_SLICE_DFL);
scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION);
scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH);
scx_agg_event(&e_sys, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
scx_agg_event(&e_sys, e_cpu, SCX_EV_DISPATCH_KEEP_LAST);
scx_agg_event(&e_sys, e_cpu, SCX_EV_ENQ_SKIP_EXITING);
+ scx_agg_event(&e_sys, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
scx_agg_event(&e_sys, e_cpu, SCX_EV_ENQ_SLICE_DFL);
scx_agg_event(&e_sys, e_cpu, SCX_EV_BYPASS_DURATION);
scx_agg_event(&e_sys, e_cpu, SCX_EV_BYPASS_DISPATCH);