]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched: Adapt sched tracepoints for RV task model
authorGabriele Monaco <gmonaco@redhat.com>
Mon, 28 Jul 2025 13:50:18 +0000 (15:50 +0200)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Mon, 28 Jul 2025 20:47:34 +0000 (16:47 -0400)
Add the following tracepoint:
* sched_set_need_resched(tsk, cpu, tif)
    Called when a task is set the need resched [lazy] flag

Remove the unused ip parameter from sched_entry and sched_exit and alter
sched_entry to have a value of preempt consistent with the one used in
sched_switch.

Also adapt all monitors using sched_{entry,exit} to avoid breaking build.

These tracepoints are useful to describe the Linux task model and are
adapted from the patches by Daniel Bristot de Oliveira
(https://bristot.me/linux-task-model/).

Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Nam Cao <namcao@linutronix.de>
Cc: Tomas Glozar <tglozar@redhat.com>
Cc: Juri Lelli <jlelli@redhat.com>
Cc: Clark Williams <williams@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
Link: https://lore.kernel.org/20250728135022.255578-7-gmonaco@redhat.com
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
include/linux/sched.h
include/trace/events/sched.h
kernel/sched/core.c
kernel/trace/rv/monitors/sco/sco.c
kernel/trace/rv/monitors/scpd/scpd.c
kernel/trace/rv/monitors/sncid/sncid.c
kernel/trace/rv/monitors/snep/snep.c
kernel/trace/rv/monitors/tss/tss.c

index fabd7fe1a07a506b0c5fb2d62914e829628e73de..91d1fdbc2f560b02b5bccd8b1d5d30cebcf92ba3 100644 (file)
@@ -339,9 +339,11 @@ extern void io_schedule_finish(int token);
 extern long io_schedule_timeout(long timeout);
 extern void io_schedule(void);
 
-/* wrapper function to trace from this header file */
+/* wrapper functions to trace from this header file */
 DECLARE_TRACEPOINT(sched_set_state_tp);
 extern void __trace_set_current_state(int state_value);
+DECLARE_TRACEPOINT(sched_set_need_resched_tp);
+extern void __trace_set_need_resched(struct task_struct *curr, int tif);
 
 /**
  * struct prev_cputime - snapshot of system and user cputime
@@ -2063,6 +2065,9 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
 
 static inline void set_tsk_need_resched(struct task_struct *tsk)
 {
+       if (tracepoint_enabled(sched_set_need_resched_tp) &&
+           !test_tsk_thread_flag(tsk, TIF_NEED_RESCHED))
+               __trace_set_need_resched(tsk, TIF_NEED_RESCHED);
        set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
 }
 
index 4e6b2910cec3f5ac0653d063ba98aa20f7d395da..c08893bde255dca78d4da55c7eee7bea57739ff3 100644 (file)
@@ -882,18 +882,22 @@ DECLARE_TRACE(sched_compute_energy,
        TP_ARGS(p, dst_cpu, energy, max_util, busy_time));
 
 DECLARE_TRACE(sched_entry,
-       TP_PROTO(bool preempt, unsigned long ip),
-       TP_ARGS(preempt, ip));
+       TP_PROTO(bool preempt),
+       TP_ARGS(preempt));
 
 DECLARE_TRACE(sched_exit,
-       TP_PROTO(bool is_switch, unsigned long ip),
-       TP_ARGS(is_switch, ip));
+       TP_PROTO(bool is_switch),
+       TP_ARGS(is_switch));
 
 DECLARE_TRACE_CONDITION(sched_set_state,
        TP_PROTO(struct task_struct *tsk, int state),
        TP_ARGS(tsk, state),
        TP_CONDITION(!!(tsk->__state) != !!state));
 
+DECLARE_TRACE(sched_set_need_resched,
+       TP_PROTO(struct task_struct *tsk, int cpu, int tif),
+       TP_ARGS(tsk, cpu, tif));
+
 #endif /* _TRACE_SCHED_H */
 
 /* This part must be outside protection */
index ec68fc686bd74292e0edba3b0229f5db590ab038..b485e0639616a332c72c67605a6cf7150225e6e5 100644 (file)
@@ -1110,6 +1110,7 @@ static void __resched_curr(struct rq *rq, int tif)
 
        cpu = cpu_of(rq);
 
+       trace_sched_set_need_resched_tp(curr, cpu, tif);
        if (cpu == smp_processor_id()) {
                set_ti_thread_flag(cti, tif);
                if (tif == TIF_NEED_RESCHED)
@@ -1125,6 +1126,11 @@ static void __resched_curr(struct rq *rq, int tif)
        }
 }
 
+void __trace_set_need_resched(struct task_struct *curr, int tif)
+{
+       trace_sched_set_need_resched_tp(curr, smp_processor_id(), tif);
+}
+
 void resched_curr(struct rq *rq)
 {
        __resched_curr(rq, TIF_NEED_RESCHED);
@@ -5329,7 +5335,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
         * switched the context for the first time. It is returning from
         * schedule for the first time in this path.
         */
-       trace_sched_exit_tp(true, CALLER_ADDR0);
+       trace_sched_exit_tp(true);
        preempt_enable();
 
        if (current->set_child_tid)
@@ -6678,7 +6684,8 @@ static void __sched notrace __schedule(int sched_mode)
        struct rq *rq;
        int cpu;
 
-       trace_sched_entry_tp(preempt, CALLER_ADDR0);
+       /* Trace preemptions consistently with task switches */
+       trace_sched_entry_tp(sched_mode == SM_PREEMPT);
 
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
@@ -6793,7 +6800,7 @@ picked:
                __balance_callbacks(rq);
                raw_spin_rq_unlock_irq(rq);
        }
-       trace_sched_exit_tp(is_switch, CALLER_ADDR0);
+       trace_sched_exit_tp(is_switch);
 }
 
 void __noreturn do_task_dead(void)
index 66f4639d46ac496a8433a77fb46b7af1fef69efd..04c36405e2e3d8e9e35f8317dba68c464520aa8b 100644 (file)
@@ -24,12 +24,12 @@ static void handle_sched_set_state(void *data, struct task_struct *tsk, int stat
        da_handle_start_event_sco(sched_set_state_sco);
 }
 
-static void handle_schedule_entry(void *data, bool preempt, unsigned long ip)
+static void handle_schedule_entry(void *data, bool preempt)
 {
        da_handle_event_sco(schedule_entry_sco);
 }
 
-static void handle_schedule_exit(void *data, bool is_switch, unsigned long ip)
+static void handle_schedule_exit(void *data, bool is_switch)
 {
        da_handle_start_event_sco(schedule_exit_sco);
 }
index 299703cd72b06f4194e546f848d2ec0e7882979f..1e351ba52fee90ef66a1373ce25dd31bcb6c4a6a 100644 (file)
@@ -30,12 +30,12 @@ static void handle_preempt_enable(void *data, unsigned long ip, unsigned long pa
        da_handle_start_event_scpd(preempt_enable_scpd);
 }
 
-static void handle_schedule_entry(void *data, bool preempt, unsigned long ip)
+static void handle_schedule_entry(void *data, bool preempt)
 {
        da_handle_event_scpd(schedule_entry_scpd);
 }
 
-static void handle_schedule_exit(void *data, bool is_switch, unsigned long ip)
+static void handle_schedule_exit(void *data, bool is_switch)
 {
        da_handle_event_scpd(schedule_exit_scpd);
 }
index 3e1ee715a0fbf9e7e16ae67f4eb0ae5d37ed8e63..c8491f4263650ad88bdad2acba5da1b2fd22a303 100644 (file)
@@ -30,12 +30,12 @@ static void handle_irq_enable(void *data, unsigned long ip, unsigned long parent
        da_handle_start_event_sncid(irq_enable_sncid);
 }
 
-static void handle_schedule_entry(void *data, bool preempt, unsigned long ip)
+static void handle_schedule_entry(void *data, bool preempt)
 {
        da_handle_start_event_sncid(schedule_entry_sncid);
 }
 
-static void handle_schedule_exit(void *data, bool is_switch, unsigned long ip)
+static void handle_schedule_exit(void *data, bool is_switch)
 {
        da_handle_start_event_sncid(schedule_exit_sncid);
 }
index 2adc3108d60c9dec773ec83cf93d69082e8e862d..558950f524a52dda638c1e212d8a73b341418c63 100644 (file)
@@ -30,12 +30,12 @@ static void handle_preempt_enable(void *data, unsigned long ip, unsigned long pa
        da_handle_start_event_snep(preempt_enable_snep);
 }
 
-static void handle_schedule_entry(void *data, bool preempt, unsigned long ip)
+static void handle_schedule_entry(void *data, bool preempt)
 {
        da_handle_event_snep(schedule_entry_snep);
 }
 
-static void handle_schedule_exit(void *data, bool is_switch, unsigned long ip)
+static void handle_schedule_exit(void *data, bool is_switch)
 {
        da_handle_start_event_snep(schedule_exit_snep);
 }
index 0452fcd9edcfe26451a73274fe765988fc4f9881..95ebd15131f524c85d72d1874a92a604232ed1b1 100644 (file)
@@ -27,12 +27,12 @@ static void handle_sched_switch(void *data, bool preempt,
        da_handle_event_tss(sched_switch_tss);
 }
 
-static void handle_schedule_entry(void *data, bool preempt, unsigned long ip)
+static void handle_schedule_entry(void *data, bool preempt)
 {
        da_handle_event_tss(schedule_entry_tss);
 }
 
-static void handle_schedule_exit(void *data, bool is_switch, unsigned long ip)
+static void handle_schedule_exit(void *data, bool is_switch)
 {
        da_handle_start_event_tss(schedule_exit_tss);
 }