5 #include "util/evlist.h"
6 #include "util/cache.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13 #include "util/cloexec.h"
14 #include "util/thread_map.h"
15 #include "util/color.h"
16 #include "util/stat.h"
17 #include "util/callchain.h"
18 #include "util/time-utils.h"
20 #include <subcmd/parse-options.h>
21 #include "util/trace-event.h"
23 #include "util/debug.h"
25 #include <linux/log2.h>
26 #include <sys/prctl.h>
27 #include <sys/resource.h>
29 #include <semaphore.h>
32 #include <api/fs/fs.h>
33 #include <linux/time64.h>
35 #define PR_SET_NAME 15 /* Set process name */
39 #define MAX_PID 1024000
48 unsigned long nr_events
;
49 unsigned long curr_event
;
50 struct sched_atom
**atoms
;
61 enum sched_event_type
{
65 SCHED_EVENT_MIGRATION
,
69 enum sched_event_type type
;
75 struct task_desc
*wakee
;
78 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
88 struct list_head list
;
89 enum thread_state state
;
97 struct list_head work_list
;
98 struct thread
*thread
;
108 typedef int (*sort_fn_t
)(struct work_atoms
*, struct work_atoms
*);
112 struct trace_sched_handler
{
113 int (*switch_event
)(struct perf_sched
*sched
, struct perf_evsel
*evsel
,
114 struct perf_sample
*sample
, struct machine
*machine
);
116 int (*runtime_event
)(struct perf_sched
*sched
, struct perf_evsel
*evsel
,
117 struct perf_sample
*sample
, struct machine
*machine
);
119 int (*wakeup_event
)(struct perf_sched
*sched
, struct perf_evsel
*evsel
,
120 struct perf_sample
*sample
, struct machine
*machine
);
122 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
123 int (*fork_event
)(struct perf_sched
*sched
, union perf_event
*event
,
124 struct machine
*machine
);
126 int (*migrate_task_event
)(struct perf_sched
*sched
,
127 struct perf_evsel
*evsel
,
128 struct perf_sample
*sample
,
129 struct machine
*machine
);
132 #define COLOR_PIDS PERF_COLOR_BLUE
133 #define COLOR_CPUS PERF_COLOR_BG_RED
135 struct perf_sched_map
{
136 DECLARE_BITMAP(comp_cpus_mask
, MAX_CPUS
);
139 struct thread_map
*color_pids
;
140 const char *color_pids_str
;
141 struct cpu_map
*color_cpus
;
142 const char *color_cpus_str
;
143 struct cpu_map
*cpus
;
144 const char *cpus_str
;
148 struct perf_tool tool
;
149 const char *sort_order
;
150 unsigned long nr_tasks
;
151 struct task_desc
**pid_to_task
;
152 struct task_desc
**tasks
;
153 const struct trace_sched_handler
*tp_handler
;
154 pthread_mutex_t start_work_mutex
;
155 pthread_mutex_t work_done_wait_mutex
;
158 * Track the current task - that way we can know whether there's any
159 * weird events, such as a task being switched away that is not current.
162 u32 curr_pid
[MAX_CPUS
];
163 struct thread
*curr_thread
[MAX_CPUS
];
164 char next_shortname1
;
165 char next_shortname2
;
166 unsigned int replay_repeat
;
167 unsigned long nr_run_events
;
168 unsigned long nr_sleep_events
;
169 unsigned long nr_wakeup_events
;
170 unsigned long nr_sleep_corrections
;
171 unsigned long nr_run_events_optimized
;
172 unsigned long targetless_wakeups
;
173 unsigned long multitarget_wakeups
;
174 unsigned long nr_runs
;
175 unsigned long nr_timestamps
;
176 unsigned long nr_unordered_timestamps
;
177 unsigned long nr_context_switch_bugs
;
178 unsigned long nr_events
;
179 unsigned long nr_lost_chunks
;
180 unsigned long nr_lost_events
;
181 u64 run_measurement_overhead
;
182 u64 sleep_measurement_overhead
;
185 u64 runavg_cpu_usage
;
186 u64 parent_cpu_usage
;
187 u64 runavg_parent_cpu_usage
;
193 u64 cpu_last_switched
[MAX_CPUS
];
194 struct rb_root atom_root
, sorted_atom_root
, merged_atom_root
;
195 struct list_head sort_list
, cmp_pid
;
198 struct perf_sched_map map
;
200 /* options for timehist command */
204 unsigned int max_stack
;
205 bool show_cpu_visual
;
207 bool show_migrations
;
209 const char *time_str
;
210 struct perf_time_interval ptime
;
213 /* per thread run time data */
214 struct thread_runtime
{
215 u64 last_time
; /* time of previous sched in/out event */
216 u64 dt_run
; /* run time */
217 u64 dt_wait
; /* time between CPU access (off cpu) */
218 u64 dt_delay
; /* time between wakeup and sched-in */
219 u64 ready_to_run
; /* time of wakeup */
221 struct stats run_stats
;
227 /* per event run time data */
228 struct evsel_runtime
{
229 u64
*last_time
; /* time this event was last seen per cpu */
230 u32 ncpu
; /* highest cpu slot allocated */
233 /* track idle times per cpu */
234 static struct thread
**idle_threads
;
235 static int idle_max_cpu
;
236 static char idle_comm
[] = "<idle>";
238 static u64
get_nsecs(void)
242 clock_gettime(CLOCK_MONOTONIC
, &ts
);
244 return ts
.tv_sec
* NSEC_PER_SEC
+ ts
.tv_nsec
;
247 static void burn_nsecs(struct perf_sched
*sched
, u64 nsecs
)
249 u64 T0
= get_nsecs(), T1
;
253 } while (T1
+ sched
->run_measurement_overhead
< T0
+ nsecs
);
256 static void sleep_nsecs(u64 nsecs
)
260 ts
.tv_nsec
= nsecs
% 999999999;
261 ts
.tv_sec
= nsecs
/ 999999999;
263 nanosleep(&ts
, NULL
);
266 static void calibrate_run_measurement_overhead(struct perf_sched
*sched
)
268 u64 T0
, T1
, delta
, min_delta
= NSEC_PER_SEC
;
271 for (i
= 0; i
< 10; i
++) {
273 burn_nsecs(sched
, 0);
276 min_delta
= min(min_delta
, delta
);
278 sched
->run_measurement_overhead
= min_delta
;
280 printf("run measurement overhead: %" PRIu64
" nsecs\n", min_delta
);
283 static void calibrate_sleep_measurement_overhead(struct perf_sched
*sched
)
285 u64 T0
, T1
, delta
, min_delta
= NSEC_PER_SEC
;
288 for (i
= 0; i
< 10; i
++) {
293 min_delta
= min(min_delta
, delta
);
296 sched
->sleep_measurement_overhead
= min_delta
;
298 printf("sleep measurement overhead: %" PRIu64
" nsecs\n", min_delta
);
301 static struct sched_atom
*
302 get_new_event(struct task_desc
*task
, u64 timestamp
)
304 struct sched_atom
*event
= zalloc(sizeof(*event
));
305 unsigned long idx
= task
->nr_events
;
308 event
->timestamp
= timestamp
;
312 size
= sizeof(struct sched_atom
*) * task
->nr_events
;
313 task
->atoms
= realloc(task
->atoms
, size
);
314 BUG_ON(!task
->atoms
);
316 task
->atoms
[idx
] = event
;
321 static struct sched_atom
*last_event(struct task_desc
*task
)
323 if (!task
->nr_events
)
326 return task
->atoms
[task
->nr_events
- 1];
329 static void add_sched_event_run(struct perf_sched
*sched
, struct task_desc
*task
,
330 u64 timestamp
, u64 duration
)
332 struct sched_atom
*event
, *curr_event
= last_event(task
);
335 * optimize an existing RUN event by merging this one
338 if (curr_event
&& curr_event
->type
== SCHED_EVENT_RUN
) {
339 sched
->nr_run_events_optimized
++;
340 curr_event
->duration
+= duration
;
344 event
= get_new_event(task
, timestamp
);
346 event
->type
= SCHED_EVENT_RUN
;
347 event
->duration
= duration
;
349 sched
->nr_run_events
++;
352 static void add_sched_event_wakeup(struct perf_sched
*sched
, struct task_desc
*task
,
353 u64 timestamp
, struct task_desc
*wakee
)
355 struct sched_atom
*event
, *wakee_event
;
357 event
= get_new_event(task
, timestamp
);
358 event
->type
= SCHED_EVENT_WAKEUP
;
359 event
->wakee
= wakee
;
361 wakee_event
= last_event(wakee
);
362 if (!wakee_event
|| wakee_event
->type
!= SCHED_EVENT_SLEEP
) {
363 sched
->targetless_wakeups
++;
366 if (wakee_event
->wait_sem
) {
367 sched
->multitarget_wakeups
++;
371 wakee_event
->wait_sem
= zalloc(sizeof(*wakee_event
->wait_sem
));
372 sem_init(wakee_event
->wait_sem
, 0, 0);
373 wakee_event
->specific_wait
= 1;
374 event
->wait_sem
= wakee_event
->wait_sem
;
376 sched
->nr_wakeup_events
++;
379 static void add_sched_event_sleep(struct perf_sched
*sched
, struct task_desc
*task
,
380 u64 timestamp
, u64 task_state __maybe_unused
)
382 struct sched_atom
*event
= get_new_event(task
, timestamp
);
384 event
->type
= SCHED_EVENT_SLEEP
;
386 sched
->nr_sleep_events
++;
389 static struct task_desc
*register_pid(struct perf_sched
*sched
,
390 unsigned long pid
, const char *comm
)
392 struct task_desc
*task
;
395 if (sched
->pid_to_task
== NULL
) {
396 if (sysctl__read_int("kernel/pid_max", &pid_max
) < 0)
398 BUG_ON((sched
->pid_to_task
= calloc(pid_max
, sizeof(struct task_desc
*))) == NULL
);
400 if (pid
>= (unsigned long)pid_max
) {
401 BUG_ON((sched
->pid_to_task
= realloc(sched
->pid_to_task
, (pid
+ 1) *
402 sizeof(struct task_desc
*))) == NULL
);
403 while (pid
>= (unsigned long)pid_max
)
404 sched
->pid_to_task
[pid_max
++] = NULL
;
407 task
= sched
->pid_to_task
[pid
];
412 task
= zalloc(sizeof(*task
));
414 task
->nr
= sched
->nr_tasks
;
415 strcpy(task
->comm
, comm
);
417 * every task starts in sleeping state - this gets ignored
418 * if there's no wakeup pointing to this sleep state:
420 add_sched_event_sleep(sched
, task
, 0, 0);
422 sched
->pid_to_task
[pid
] = task
;
424 sched
->tasks
= realloc(sched
->tasks
, sched
->nr_tasks
* sizeof(struct task_desc
*));
425 BUG_ON(!sched
->tasks
);
426 sched
->tasks
[task
->nr
] = task
;
429 printf("registered task #%ld, PID %ld (%s)\n", sched
->nr_tasks
, pid
, comm
);
435 static void print_task_traces(struct perf_sched
*sched
)
437 struct task_desc
*task
;
440 for (i
= 0; i
< sched
->nr_tasks
; i
++) {
441 task
= sched
->tasks
[i
];
442 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
443 task
->nr
, task
->comm
, task
->pid
, task
->nr_events
);
447 static void add_cross_task_wakeups(struct perf_sched
*sched
)
449 struct task_desc
*task1
, *task2
;
452 for (i
= 0; i
< sched
->nr_tasks
; i
++) {
453 task1
= sched
->tasks
[i
];
455 if (j
== sched
->nr_tasks
)
457 task2
= sched
->tasks
[j
];
458 add_sched_event_wakeup(sched
, task1
, 0, task2
);
462 static void perf_sched__process_event(struct perf_sched
*sched
,
463 struct sched_atom
*atom
)
467 switch (atom
->type
) {
468 case SCHED_EVENT_RUN
:
469 burn_nsecs(sched
, atom
->duration
);
471 case SCHED_EVENT_SLEEP
:
473 ret
= sem_wait(atom
->wait_sem
);
476 case SCHED_EVENT_WAKEUP
:
478 ret
= sem_post(atom
->wait_sem
);
481 case SCHED_EVENT_MIGRATION
:
488 static u64
get_cpu_usage_nsec_parent(void)
494 err
= getrusage(RUSAGE_SELF
, &ru
);
497 sum
= ru
.ru_utime
.tv_sec
* NSEC_PER_SEC
+ ru
.ru_utime
.tv_usec
* NSEC_PER_USEC
;
498 sum
+= ru
.ru_stime
.tv_sec
* NSEC_PER_SEC
+ ru
.ru_stime
.tv_usec
* NSEC_PER_USEC
;
503 static int self_open_counters(struct perf_sched
*sched
, unsigned long cur_task
)
505 struct perf_event_attr attr
;
506 char sbuf
[STRERR_BUFSIZE
], info
[STRERR_BUFSIZE
];
509 bool need_privilege
= false;
511 memset(&attr
, 0, sizeof(attr
));
513 attr
.type
= PERF_TYPE_SOFTWARE
;
514 attr
.config
= PERF_COUNT_SW_TASK_CLOCK
;
517 fd
= sys_perf_event_open(&attr
, 0, -1, -1,
518 perf_event_open_cloexec_flag());
521 if (errno
== EMFILE
) {
523 BUG_ON(getrlimit(RLIMIT_NOFILE
, &limit
) == -1);
524 limit
.rlim_cur
+= sched
->nr_tasks
- cur_task
;
525 if (limit
.rlim_cur
> limit
.rlim_max
) {
526 limit
.rlim_max
= limit
.rlim_cur
;
527 need_privilege
= true;
529 if (setrlimit(RLIMIT_NOFILE
, &limit
) == -1) {
530 if (need_privilege
&& errno
== EPERM
)
531 strcpy(info
, "Need privilege\n");
535 strcpy(info
, "Have a try with -f option\n");
537 pr_err("Error: sys_perf_event_open() syscall returned "
538 "with %d (%s)\n%s", fd
,
539 str_error_r(errno
, sbuf
, sizeof(sbuf
)), info
);
545 static u64
get_cpu_usage_nsec_self(int fd
)
550 ret
= read(fd
, &runtime
, sizeof(runtime
));
551 BUG_ON(ret
!= sizeof(runtime
));
556 struct sched_thread_parms
{
557 struct task_desc
*task
;
558 struct perf_sched
*sched
;
562 static void *thread_func(void *ctx
)
564 struct sched_thread_parms
*parms
= ctx
;
565 struct task_desc
*this_task
= parms
->task
;
566 struct perf_sched
*sched
= parms
->sched
;
567 u64 cpu_usage_0
, cpu_usage_1
;
568 unsigned long i
, ret
;
574 sprintf(comm2
, ":%s", this_task
->comm
);
575 prctl(PR_SET_NAME
, comm2
);
579 ret
= sem_post(&this_task
->ready_for_work
);
581 ret
= pthread_mutex_lock(&sched
->start_work_mutex
);
583 ret
= pthread_mutex_unlock(&sched
->start_work_mutex
);
586 cpu_usage_0
= get_cpu_usage_nsec_self(fd
);
588 for (i
= 0; i
< this_task
->nr_events
; i
++) {
589 this_task
->curr_event
= i
;
590 perf_sched__process_event(sched
, this_task
->atoms
[i
]);
593 cpu_usage_1
= get_cpu_usage_nsec_self(fd
);
594 this_task
->cpu_usage
= cpu_usage_1
- cpu_usage_0
;
595 ret
= sem_post(&this_task
->work_done_sem
);
598 ret
= pthread_mutex_lock(&sched
->work_done_wait_mutex
);
600 ret
= pthread_mutex_unlock(&sched
->work_done_wait_mutex
);
606 static void create_tasks(struct perf_sched
*sched
)
608 struct task_desc
*task
;
613 err
= pthread_attr_init(&attr
);
615 err
= pthread_attr_setstacksize(&attr
,
616 (size_t) max(16 * 1024, PTHREAD_STACK_MIN
));
618 err
= pthread_mutex_lock(&sched
->start_work_mutex
);
620 err
= pthread_mutex_lock(&sched
->work_done_wait_mutex
);
622 for (i
= 0; i
< sched
->nr_tasks
; i
++) {
623 struct sched_thread_parms
*parms
= malloc(sizeof(*parms
));
624 BUG_ON(parms
== NULL
);
625 parms
->task
= task
= sched
->tasks
[i
];
626 parms
->sched
= sched
;
627 parms
->fd
= self_open_counters(sched
, i
);
628 sem_init(&task
->sleep_sem
, 0, 0);
629 sem_init(&task
->ready_for_work
, 0, 0);
630 sem_init(&task
->work_done_sem
, 0, 0);
631 task
->curr_event
= 0;
632 err
= pthread_create(&task
->thread
, &attr
, thread_func
, parms
);
637 static void wait_for_tasks(struct perf_sched
*sched
)
639 u64 cpu_usage_0
, cpu_usage_1
;
640 struct task_desc
*task
;
641 unsigned long i
, ret
;
643 sched
->start_time
= get_nsecs();
644 sched
->cpu_usage
= 0;
645 pthread_mutex_unlock(&sched
->work_done_wait_mutex
);
647 for (i
= 0; i
< sched
->nr_tasks
; i
++) {
648 task
= sched
->tasks
[i
];
649 ret
= sem_wait(&task
->ready_for_work
);
651 sem_init(&task
->ready_for_work
, 0, 0);
653 ret
= pthread_mutex_lock(&sched
->work_done_wait_mutex
);
656 cpu_usage_0
= get_cpu_usage_nsec_parent();
658 pthread_mutex_unlock(&sched
->start_work_mutex
);
660 for (i
= 0; i
< sched
->nr_tasks
; i
++) {
661 task
= sched
->tasks
[i
];
662 ret
= sem_wait(&task
->work_done_sem
);
664 sem_init(&task
->work_done_sem
, 0, 0);
665 sched
->cpu_usage
+= task
->cpu_usage
;
669 cpu_usage_1
= get_cpu_usage_nsec_parent();
670 if (!sched
->runavg_cpu_usage
)
671 sched
->runavg_cpu_usage
= sched
->cpu_usage
;
672 sched
->runavg_cpu_usage
= (sched
->runavg_cpu_usage
* (sched
->replay_repeat
- 1) + sched
->cpu_usage
) / sched
->replay_repeat
;
674 sched
->parent_cpu_usage
= cpu_usage_1
- cpu_usage_0
;
675 if (!sched
->runavg_parent_cpu_usage
)
676 sched
->runavg_parent_cpu_usage
= sched
->parent_cpu_usage
;
677 sched
->runavg_parent_cpu_usage
= (sched
->runavg_parent_cpu_usage
* (sched
->replay_repeat
- 1) +
678 sched
->parent_cpu_usage
)/sched
->replay_repeat
;
680 ret
= pthread_mutex_lock(&sched
->start_work_mutex
);
683 for (i
= 0; i
< sched
->nr_tasks
; i
++) {
684 task
= sched
->tasks
[i
];
685 sem_init(&task
->sleep_sem
, 0, 0);
686 task
->curr_event
= 0;
690 static void run_one_test(struct perf_sched
*sched
)
692 u64 T0
, T1
, delta
, avg_delta
, fluct
;
695 wait_for_tasks(sched
);
699 sched
->sum_runtime
+= delta
;
702 avg_delta
= sched
->sum_runtime
/ sched
->nr_runs
;
703 if (delta
< avg_delta
)
704 fluct
= avg_delta
- delta
;
706 fluct
= delta
- avg_delta
;
707 sched
->sum_fluct
+= fluct
;
709 sched
->run_avg
= delta
;
710 sched
->run_avg
= (sched
->run_avg
* (sched
->replay_repeat
- 1) + delta
) / sched
->replay_repeat
;
712 printf("#%-3ld: %0.3f, ", sched
->nr_runs
, (double)delta
/ NSEC_PER_MSEC
);
714 printf("ravg: %0.2f, ", (double)sched
->run_avg
/ NSEC_PER_MSEC
);
716 printf("cpu: %0.2f / %0.2f",
717 (double)sched
->cpu_usage
/ NSEC_PER_MSEC
, (double)sched
->runavg_cpu_usage
/ NSEC_PER_MSEC
);
721 * rusage statistics done by the parent, these are less
722 * accurate than the sched->sum_exec_runtime based statistics:
724 printf(" [%0.2f / %0.2f]",
725 (double)sched
->parent_cpu_usage
/ NSEC_PER_MSEC
,
726 (double)sched
->runavg_parent_cpu_usage
/ NSEC_PER_MSEC
);
731 if (sched
->nr_sleep_corrections
)
732 printf(" (%ld sleep corrections)\n", sched
->nr_sleep_corrections
);
733 sched
->nr_sleep_corrections
= 0;
736 static void test_calibrations(struct perf_sched
*sched
)
741 burn_nsecs(sched
, NSEC_PER_MSEC
);
744 printf("the run test took %" PRIu64
" nsecs\n", T1
- T0
);
747 sleep_nsecs(NSEC_PER_MSEC
);
750 printf("the sleep test took %" PRIu64
" nsecs\n", T1
- T0
);
754 replay_wakeup_event(struct perf_sched
*sched
,
755 struct perf_evsel
*evsel
, struct perf_sample
*sample
,
756 struct machine
*machine __maybe_unused
)
758 const char *comm
= perf_evsel__strval(evsel
, sample
, "comm");
759 const u32 pid
= perf_evsel__intval(evsel
, sample
, "pid");
760 struct task_desc
*waker
, *wakee
;
763 printf("sched_wakeup event %p\n", evsel
);
765 printf(" ... pid %d woke up %s/%d\n", sample
->tid
, comm
, pid
);
768 waker
= register_pid(sched
, sample
->tid
, "<unknown>");
769 wakee
= register_pid(sched
, pid
, comm
);
771 add_sched_event_wakeup(sched
, waker
, sample
->time
, wakee
);
775 static int replay_switch_event(struct perf_sched
*sched
,
776 struct perf_evsel
*evsel
,
777 struct perf_sample
*sample
,
778 struct machine
*machine __maybe_unused
)
780 const char *prev_comm
= perf_evsel__strval(evsel
, sample
, "prev_comm"),
781 *next_comm
= perf_evsel__strval(evsel
, sample
, "next_comm");
782 const u32 prev_pid
= perf_evsel__intval(evsel
, sample
, "prev_pid"),
783 next_pid
= perf_evsel__intval(evsel
, sample
, "next_pid");
784 const u64 prev_state
= perf_evsel__intval(evsel
, sample
, "prev_state");
785 struct task_desc
*prev
, __maybe_unused
*next
;
786 u64 timestamp0
, timestamp
= sample
->time
;
787 int cpu
= sample
->cpu
;
791 printf("sched_switch event %p\n", evsel
);
793 if (cpu
>= MAX_CPUS
|| cpu
< 0)
796 timestamp0
= sched
->cpu_last_switched
[cpu
];
798 delta
= timestamp
- timestamp0
;
803 pr_err("hm, delta: %" PRIu64
" < 0 ?\n", delta
);
807 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64
" nsecs]\n",
808 prev_comm
, prev_pid
, next_comm
, next_pid
, delta
);
810 prev
= register_pid(sched
, prev_pid
, prev_comm
);
811 next
= register_pid(sched
, next_pid
, next_comm
);
813 sched
->cpu_last_switched
[cpu
] = timestamp
;
815 add_sched_event_run(sched
, prev
, timestamp
, delta
);
816 add_sched_event_sleep(sched
, prev
, timestamp
, prev_state
);
821 static int replay_fork_event(struct perf_sched
*sched
,
822 union perf_event
*event
,
823 struct machine
*machine
)
825 struct thread
*child
, *parent
;
827 child
= machine__findnew_thread(machine
, event
->fork
.pid
,
829 parent
= machine__findnew_thread(machine
, event
->fork
.ppid
,
832 if (child
== NULL
|| parent
== NULL
) {
833 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
839 printf("fork event\n");
840 printf("... parent: %s/%d\n", thread__comm_str(parent
), parent
->tid
);
841 printf("... child: %s/%d\n", thread__comm_str(child
), child
->tid
);
844 register_pid(sched
, parent
->tid
, thread__comm_str(parent
));
845 register_pid(sched
, child
->tid
, thread__comm_str(child
));
852 struct sort_dimension
{
855 struct list_head list
;
859 thread_lat_cmp(struct list_head
*list
, struct work_atoms
*l
, struct work_atoms
*r
)
861 struct sort_dimension
*sort
;
864 BUG_ON(list_empty(list
));
866 list_for_each_entry(sort
, list
, list
) {
867 ret
= sort
->cmp(l
, r
);
875 static struct work_atoms
*
876 thread_atoms_search(struct rb_root
*root
, struct thread
*thread
,
877 struct list_head
*sort_list
)
879 struct rb_node
*node
= root
->rb_node
;
880 struct work_atoms key
= { .thread
= thread
};
883 struct work_atoms
*atoms
;
886 atoms
= container_of(node
, struct work_atoms
, node
);
888 cmp
= thread_lat_cmp(sort_list
, &key
, atoms
);
890 node
= node
->rb_left
;
892 node
= node
->rb_right
;
894 BUG_ON(thread
!= atoms
->thread
);
902 __thread_latency_insert(struct rb_root
*root
, struct work_atoms
*data
,
903 struct list_head
*sort_list
)
905 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
908 struct work_atoms
*this;
911 this = container_of(*new, struct work_atoms
, node
);
914 cmp
= thread_lat_cmp(sort_list
, data
, this);
917 new = &((*new)->rb_left
);
919 new = &((*new)->rb_right
);
922 rb_link_node(&data
->node
, parent
, new);
923 rb_insert_color(&data
->node
, root
);
926 static int thread_atoms_insert(struct perf_sched
*sched
, struct thread
*thread
)
928 struct work_atoms
*atoms
= zalloc(sizeof(*atoms
));
930 pr_err("No memory at %s\n", __func__
);
934 atoms
->thread
= thread__get(thread
);
935 INIT_LIST_HEAD(&atoms
->work_list
);
936 __thread_latency_insert(&sched
->atom_root
, atoms
, &sched
->cmp_pid
);
940 static char sched_out_state(u64 prev_state
)
942 const char *str
= TASK_STATE_TO_CHAR_STR
;
944 return str
[prev_state
];
948 add_sched_out_event(struct work_atoms
*atoms
,
952 struct work_atom
*atom
= zalloc(sizeof(*atom
));
954 pr_err("Non memory at %s", __func__
);
958 atom
->sched_out_time
= timestamp
;
960 if (run_state
== 'R') {
961 atom
->state
= THREAD_WAIT_CPU
;
962 atom
->wake_up_time
= atom
->sched_out_time
;
965 list_add_tail(&atom
->list
, &atoms
->work_list
);
970 add_runtime_event(struct work_atoms
*atoms
, u64 delta
,
971 u64 timestamp __maybe_unused
)
973 struct work_atom
*atom
;
975 BUG_ON(list_empty(&atoms
->work_list
));
977 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
979 atom
->runtime
+= delta
;
980 atoms
->total_runtime
+= delta
;
984 add_sched_in_event(struct work_atoms
*atoms
, u64 timestamp
)
986 struct work_atom
*atom
;
989 if (list_empty(&atoms
->work_list
))
992 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
994 if (atom
->state
!= THREAD_WAIT_CPU
)
997 if (timestamp
< atom
->wake_up_time
) {
998 atom
->state
= THREAD_IGNORE
;
1002 atom
->state
= THREAD_SCHED_IN
;
1003 atom
->sched_in_time
= timestamp
;
1005 delta
= atom
->sched_in_time
- atom
->wake_up_time
;
1006 atoms
->total_lat
+= delta
;
1007 if (delta
> atoms
->max_lat
) {
1008 atoms
->max_lat
= delta
;
1009 atoms
->max_lat_at
= timestamp
;
1014 static int latency_switch_event(struct perf_sched
*sched
,
1015 struct perf_evsel
*evsel
,
1016 struct perf_sample
*sample
,
1017 struct machine
*machine
)
1019 const u32 prev_pid
= perf_evsel__intval(evsel
, sample
, "prev_pid"),
1020 next_pid
= perf_evsel__intval(evsel
, sample
, "next_pid");
1021 const u64 prev_state
= perf_evsel__intval(evsel
, sample
, "prev_state");
1022 struct work_atoms
*out_events
, *in_events
;
1023 struct thread
*sched_out
, *sched_in
;
1024 u64 timestamp0
, timestamp
= sample
->time
;
1025 int cpu
= sample
->cpu
, err
= -1;
1028 BUG_ON(cpu
>= MAX_CPUS
|| cpu
< 0);
1030 timestamp0
= sched
->cpu_last_switched
[cpu
];
1031 sched
->cpu_last_switched
[cpu
] = timestamp
;
1033 delta
= timestamp
- timestamp0
;
1038 pr_err("hm, delta: %" PRIu64
" < 0 ?\n", delta
);
1042 sched_out
= machine__findnew_thread(machine
, -1, prev_pid
);
1043 sched_in
= machine__findnew_thread(machine
, -1, next_pid
);
1044 if (sched_out
== NULL
|| sched_in
== NULL
)
1047 out_events
= thread_atoms_search(&sched
->atom_root
, sched_out
, &sched
->cmp_pid
);
1049 if (thread_atoms_insert(sched
, sched_out
))
1051 out_events
= thread_atoms_search(&sched
->atom_root
, sched_out
, &sched
->cmp_pid
);
1053 pr_err("out-event: Internal tree error");
1057 if (add_sched_out_event(out_events
, sched_out_state(prev_state
), timestamp
))
1060 in_events
= thread_atoms_search(&sched
->atom_root
, sched_in
, &sched
->cmp_pid
);
1062 if (thread_atoms_insert(sched
, sched_in
))
1064 in_events
= thread_atoms_search(&sched
->atom_root
, sched_in
, &sched
->cmp_pid
);
1066 pr_err("in-event: Internal tree error");
1070 * Take came in we have not heard about yet,
1071 * add in an initial atom in runnable state:
1073 if (add_sched_out_event(in_events
, 'R', timestamp
))
1076 add_sched_in_event(in_events
, timestamp
);
1079 thread__put(sched_out
);
1080 thread__put(sched_in
);
1084 static int latency_runtime_event(struct perf_sched
*sched
,
1085 struct perf_evsel
*evsel
,
1086 struct perf_sample
*sample
,
1087 struct machine
*machine
)
1089 const u32 pid
= perf_evsel__intval(evsel
, sample
, "pid");
1090 const u64 runtime
= perf_evsel__intval(evsel
, sample
, "runtime");
1091 struct thread
*thread
= machine__findnew_thread(machine
, -1, pid
);
1092 struct work_atoms
*atoms
= thread_atoms_search(&sched
->atom_root
, thread
, &sched
->cmp_pid
);
1093 u64 timestamp
= sample
->time
;
1094 int cpu
= sample
->cpu
, err
= -1;
1099 BUG_ON(cpu
>= MAX_CPUS
|| cpu
< 0);
1101 if (thread_atoms_insert(sched
, thread
))
1103 atoms
= thread_atoms_search(&sched
->atom_root
, thread
, &sched
->cmp_pid
);
1105 pr_err("in-event: Internal tree error");
1108 if (add_sched_out_event(atoms
, 'R', timestamp
))
1112 add_runtime_event(atoms
, runtime
, timestamp
);
1115 thread__put(thread
);
1119 static int latency_wakeup_event(struct perf_sched
*sched
,
1120 struct perf_evsel
*evsel
,
1121 struct perf_sample
*sample
,
1122 struct machine
*machine
)
1124 const u32 pid
= perf_evsel__intval(evsel
, sample
, "pid");
1125 struct work_atoms
*atoms
;
1126 struct work_atom
*atom
;
1127 struct thread
*wakee
;
1128 u64 timestamp
= sample
->time
;
1131 wakee
= machine__findnew_thread(machine
, -1, pid
);
1134 atoms
= thread_atoms_search(&sched
->atom_root
, wakee
, &sched
->cmp_pid
);
1136 if (thread_atoms_insert(sched
, wakee
))
1138 atoms
= thread_atoms_search(&sched
->atom_root
, wakee
, &sched
->cmp_pid
);
1140 pr_err("wakeup-event: Internal tree error");
1143 if (add_sched_out_event(atoms
, 'S', timestamp
))
1147 BUG_ON(list_empty(&atoms
->work_list
));
1149 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1152 * As we do not guarantee the wakeup event happens when
1153 * task is out of run queue, also may happen when task is
1154 * on run queue and wakeup only change ->state to TASK_RUNNING,
1155 * then we should not set the ->wake_up_time when wake up a
1156 * task which is on run queue.
1158 * You WILL be missing events if you've recorded only
1159 * one CPU, or are only looking at only one, so don't
1160 * skip in this case.
1162 if (sched
->profile_cpu
== -1 && atom
->state
!= THREAD_SLEEPING
)
1165 sched
->nr_timestamps
++;
1166 if (atom
->sched_out_time
> timestamp
) {
1167 sched
->nr_unordered_timestamps
++;
1171 atom
->state
= THREAD_WAIT_CPU
;
1172 atom
->wake_up_time
= timestamp
;
1180 static int latency_migrate_task_event(struct perf_sched
*sched
,
1181 struct perf_evsel
*evsel
,
1182 struct perf_sample
*sample
,
1183 struct machine
*machine
)
1185 const u32 pid
= perf_evsel__intval(evsel
, sample
, "pid");
1186 u64 timestamp
= sample
->time
;
1187 struct work_atoms
*atoms
;
1188 struct work_atom
*atom
;
1189 struct thread
*migrant
;
1193 * Only need to worry about migration when profiling one CPU.
1195 if (sched
->profile_cpu
== -1)
1198 migrant
= machine__findnew_thread(machine
, -1, pid
);
1199 if (migrant
== NULL
)
1201 atoms
= thread_atoms_search(&sched
->atom_root
, migrant
, &sched
->cmp_pid
);
1203 if (thread_atoms_insert(sched
, migrant
))
1205 register_pid(sched
, migrant
->tid
, thread__comm_str(migrant
));
1206 atoms
= thread_atoms_search(&sched
->atom_root
, migrant
, &sched
->cmp_pid
);
1208 pr_err("migration-event: Internal tree error");
1211 if (add_sched_out_event(atoms
, 'R', timestamp
))
1215 BUG_ON(list_empty(&atoms
->work_list
));
1217 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1218 atom
->sched_in_time
= atom
->sched_out_time
= atom
->wake_up_time
= timestamp
;
1220 sched
->nr_timestamps
++;
1222 if (atom
->sched_out_time
> timestamp
)
1223 sched
->nr_unordered_timestamps
++;
1226 thread__put(migrant
);
1230 static void output_lat_thread(struct perf_sched
*sched
, struct work_atoms
*work_list
)
1235 char max_lat_at
[32];
1237 if (!work_list
->nb_atoms
)
1240 * Ignore idle threads:
1242 if (!strcmp(thread__comm_str(work_list
->thread
), "swapper"))
1245 sched
->all_runtime
+= work_list
->total_runtime
;
1246 sched
->all_count
+= work_list
->nb_atoms
;
1248 if (work_list
->num_merged
> 1)
1249 ret
= printf(" %s:(%d) ", thread__comm_str(work_list
->thread
), work_list
->num_merged
);
1251 ret
= printf(" %s:%d ", thread__comm_str(work_list
->thread
), work_list
->thread
->tid
);
1253 for (i
= 0; i
< 24 - ret
; i
++)
1256 avg
= work_list
->total_lat
/ work_list
->nb_atoms
;
1257 timestamp__scnprintf_usec(work_list
->max_lat_at
, max_lat_at
, sizeof(max_lat_at
));
1259 printf("|%11.3f ms |%9" PRIu64
" | avg:%9.3f ms | max:%9.3f ms | max at: %13s s\n",
1260 (double)work_list
->total_runtime
/ NSEC_PER_MSEC
,
1261 work_list
->nb_atoms
, (double)avg
/ NSEC_PER_MSEC
,
1262 (double)work_list
->max_lat
/ NSEC_PER_MSEC
,
1266 static int pid_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1268 if (l
->thread
== r
->thread
)
1270 if (l
->thread
->tid
< r
->thread
->tid
)
1272 if (l
->thread
->tid
> r
->thread
->tid
)
1274 return (int)(l
->thread
- r
->thread
);
1277 static int avg_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1287 avgl
= l
->total_lat
/ l
->nb_atoms
;
1288 avgr
= r
->total_lat
/ r
->nb_atoms
;
1298 static int max_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1300 if (l
->max_lat
< r
->max_lat
)
1302 if (l
->max_lat
> r
->max_lat
)
1308 static int switch_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1310 if (l
->nb_atoms
< r
->nb_atoms
)
1312 if (l
->nb_atoms
> r
->nb_atoms
)
1318 static int runtime_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1320 if (l
->total_runtime
< r
->total_runtime
)
1322 if (l
->total_runtime
> r
->total_runtime
)
1328 static int sort_dimension__add(const char *tok
, struct list_head
*list
)
1331 static struct sort_dimension avg_sort_dimension
= {
1335 static struct sort_dimension max_sort_dimension
= {
1339 static struct sort_dimension pid_sort_dimension
= {
1343 static struct sort_dimension runtime_sort_dimension
= {
1347 static struct sort_dimension switch_sort_dimension
= {
1351 struct sort_dimension
*available_sorts
[] = {
1352 &pid_sort_dimension
,
1353 &avg_sort_dimension
,
1354 &max_sort_dimension
,
1355 &switch_sort_dimension
,
1356 &runtime_sort_dimension
,
1359 for (i
= 0; i
< ARRAY_SIZE(available_sorts
); i
++) {
1360 if (!strcmp(available_sorts
[i
]->name
, tok
)) {
1361 list_add_tail(&available_sorts
[i
]->list
, list
);
1370 static void perf_sched__sort_lat(struct perf_sched
*sched
)
1372 struct rb_node
*node
;
1373 struct rb_root
*root
= &sched
->atom_root
;
1376 struct work_atoms
*data
;
1377 node
= rb_first(root
);
1381 rb_erase(node
, root
);
1382 data
= rb_entry(node
, struct work_atoms
, node
);
1383 __thread_latency_insert(&sched
->sorted_atom_root
, data
, &sched
->sort_list
);
1385 if (root
== &sched
->atom_root
) {
1386 root
= &sched
->merged_atom_root
;
1391 static int process_sched_wakeup_event(struct perf_tool
*tool
,
1392 struct perf_evsel
*evsel
,
1393 struct perf_sample
*sample
,
1394 struct machine
*machine
)
1396 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
1398 if (sched
->tp_handler
->wakeup_event
)
1399 return sched
->tp_handler
->wakeup_event(sched
, evsel
, sample
, machine
);
1409 static bool thread__has_color(struct thread
*thread
)
1411 union map_priv priv
= {
1412 .ptr
= thread__priv(thread
),
1418 static struct thread
*
1419 map__findnew_thread(struct perf_sched
*sched
, struct machine
*machine
, pid_t pid
, pid_t tid
)
1421 struct thread
*thread
= machine__findnew_thread(machine
, pid
, tid
);
1422 union map_priv priv
= {
1426 if (!sched
->map
.color_pids
|| !thread
|| thread__priv(thread
))
1429 if (thread_map__has(sched
->map
.color_pids
, tid
))
1432 thread__set_priv(thread
, priv
.ptr
);
1436 static int map_switch_event(struct perf_sched
*sched
, struct perf_evsel
*evsel
,
1437 struct perf_sample
*sample
, struct machine
*machine
)
1439 const u32 next_pid
= perf_evsel__intval(evsel
, sample
, "next_pid");
1440 struct thread
*sched_in
;
1442 u64 timestamp0
, timestamp
= sample
->time
;
1444 int i
, this_cpu
= sample
->cpu
;
1446 bool new_cpu
= false;
1447 const char *color
= PERF_COLOR_NORMAL
;
1448 char stimestamp
[32];
1450 BUG_ON(this_cpu
>= MAX_CPUS
|| this_cpu
< 0);
1452 if (this_cpu
> sched
->max_cpu
)
1453 sched
->max_cpu
= this_cpu
;
1455 if (sched
->map
.comp
) {
1456 cpus_nr
= bitmap_weight(sched
->map
.comp_cpus_mask
, MAX_CPUS
);
1457 if (!test_and_set_bit(this_cpu
, sched
->map
.comp_cpus_mask
)) {
1458 sched
->map
.comp_cpus
[cpus_nr
++] = this_cpu
;
1462 cpus_nr
= sched
->max_cpu
;
1464 timestamp0
= sched
->cpu_last_switched
[this_cpu
];
1465 sched
->cpu_last_switched
[this_cpu
] = timestamp
;
1467 delta
= timestamp
- timestamp0
;
1472 pr_err("hm, delta: %" PRIu64
" < 0 ?\n", delta
);
1476 sched_in
= map__findnew_thread(sched
, machine
, -1, next_pid
);
1477 if (sched_in
== NULL
)
1480 sched
->curr_thread
[this_cpu
] = thread__get(sched_in
);
1485 if (!sched_in
->shortname
[0]) {
1486 if (!strcmp(thread__comm_str(sched_in
), "swapper")) {
1488 * Don't allocate a letter-number for swapper:0
1489 * as a shortname. Instead, we use '.' for it.
1491 sched_in
->shortname
[0] = '.';
1492 sched_in
->shortname
[1] = ' ';
1494 sched_in
->shortname
[0] = sched
->next_shortname1
;
1495 sched_in
->shortname
[1] = sched
->next_shortname2
;
1497 if (sched
->next_shortname1
< 'Z') {
1498 sched
->next_shortname1
++;
1500 sched
->next_shortname1
= 'A';
1501 if (sched
->next_shortname2
< '9')
1502 sched
->next_shortname2
++;
1504 sched
->next_shortname2
= '0';
1510 for (i
= 0; i
< cpus_nr
; i
++) {
1511 int cpu
= sched
->map
.comp
? sched
->map
.comp_cpus
[i
] : i
;
1512 struct thread
*curr_thread
= sched
->curr_thread
[cpu
];
1513 const char *pid_color
= color
;
1514 const char *cpu_color
= color
;
1516 if (curr_thread
&& thread__has_color(curr_thread
))
1517 pid_color
= COLOR_PIDS
;
1519 if (sched
->map
.cpus
&& !cpu_map__has(sched
->map
.cpus
, cpu
))
1522 if (sched
->map
.color_cpus
&& cpu_map__has(sched
->map
.color_cpus
, cpu
))
1523 cpu_color
= COLOR_CPUS
;
1525 if (cpu
!= this_cpu
)
1526 color_fprintf(stdout
, color
, " ");
1528 color_fprintf(stdout
, cpu_color
, "*");
1530 if (sched
->curr_thread
[cpu
])
1531 color_fprintf(stdout
, pid_color
, "%2s ", sched
->curr_thread
[cpu
]->shortname
);
1533 color_fprintf(stdout
, color
, " ");
1536 if (sched
->map
.cpus
&& !cpu_map__has(sched
->map
.cpus
, this_cpu
))
1539 timestamp__scnprintf_usec(timestamp
, stimestamp
, sizeof(stimestamp
));
1540 color_fprintf(stdout
, color
, " %12s secs ", stimestamp
);
1541 if (new_shortname
|| (verbose
&& sched_in
->tid
)) {
1542 const char *pid_color
= color
;
1544 if (thread__has_color(sched_in
))
1545 pid_color
= COLOR_PIDS
;
1547 color_fprintf(stdout
, pid_color
, "%s => %s:%d",
1548 sched_in
->shortname
, thread__comm_str(sched_in
), sched_in
->tid
);
1551 if (sched
->map
.comp
&& new_cpu
)
1552 color_fprintf(stdout
, color
, " (CPU %d)", this_cpu
);
1555 color_fprintf(stdout
, color
, "\n");
1557 thread__put(sched_in
);
1562 static int process_sched_switch_event(struct perf_tool
*tool
,
1563 struct perf_evsel
*evsel
,
1564 struct perf_sample
*sample
,
1565 struct machine
*machine
)
1567 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
1568 int this_cpu
= sample
->cpu
, err
= 0;
1569 u32 prev_pid
= perf_evsel__intval(evsel
, sample
, "prev_pid"),
1570 next_pid
= perf_evsel__intval(evsel
, sample
, "next_pid");
1572 if (sched
->curr_pid
[this_cpu
] != (u32
)-1) {
1574 * Are we trying to switch away a PID that is
1577 if (sched
->curr_pid
[this_cpu
] != prev_pid
)
1578 sched
->nr_context_switch_bugs
++;
1581 if (sched
->tp_handler
->switch_event
)
1582 err
= sched
->tp_handler
->switch_event(sched
, evsel
, sample
, machine
);
1584 sched
->curr_pid
[this_cpu
] = next_pid
;
1588 static int process_sched_runtime_event(struct perf_tool
*tool
,
1589 struct perf_evsel
*evsel
,
1590 struct perf_sample
*sample
,
1591 struct machine
*machine
)
1593 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
1595 if (sched
->tp_handler
->runtime_event
)
1596 return sched
->tp_handler
->runtime_event(sched
, evsel
, sample
, machine
);
1601 static int perf_sched__process_fork_event(struct perf_tool
*tool
,
1602 union perf_event
*event
,
1603 struct perf_sample
*sample
,
1604 struct machine
*machine
)
1606 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
1608 /* run the fork event through the perf machineruy */
1609 perf_event__process_fork(tool
, event
, sample
, machine
);
1611 /* and then run additional processing needed for this command */
1612 if (sched
->tp_handler
->fork_event
)
1613 return sched
->tp_handler
->fork_event(sched
, event
, machine
);
1618 static int process_sched_migrate_task_event(struct perf_tool
*tool
,
1619 struct perf_evsel
*evsel
,
1620 struct perf_sample
*sample
,
1621 struct machine
*machine
)
1623 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
1625 if (sched
->tp_handler
->migrate_task_event
)
1626 return sched
->tp_handler
->migrate_task_event(sched
, evsel
, sample
, machine
);
1631 typedef int (*tracepoint_handler
)(struct perf_tool
*tool
,
1632 struct perf_evsel
*evsel
,
1633 struct perf_sample
*sample
,
1634 struct machine
*machine
);
1636 static int perf_sched__process_tracepoint_sample(struct perf_tool
*tool __maybe_unused
,
1637 union perf_event
*event __maybe_unused
,
1638 struct perf_sample
*sample
,
1639 struct perf_evsel
*evsel
,
1640 struct machine
*machine
)
1644 if (evsel
->handler
!= NULL
) {
1645 tracepoint_handler f
= evsel
->handler
;
1646 err
= f(tool
, evsel
, sample
, machine
);
1652 static int perf_sched__read_events(struct perf_sched
*sched
)
1654 const struct perf_evsel_str_handler handlers
[] = {
1655 { "sched:sched_switch", process_sched_switch_event
, },
1656 { "sched:sched_stat_runtime", process_sched_runtime_event
, },
1657 { "sched:sched_wakeup", process_sched_wakeup_event
, },
1658 { "sched:sched_wakeup_new", process_sched_wakeup_event
, },
1659 { "sched:sched_migrate_task", process_sched_migrate_task_event
, },
1661 struct perf_session
*session
;
1662 struct perf_data_file file
= {
1664 .mode
= PERF_DATA_MODE_READ
,
1665 .force
= sched
->force
,
1669 session
= perf_session__new(&file
, false, &sched
->tool
);
1670 if (session
== NULL
) {
1671 pr_debug("No Memory for session\n");
1675 symbol__init(&session
->header
.env
);
1677 if (perf_session__set_tracepoints_handlers(session
, handlers
))
1680 if (perf_session__has_traces(session
, "record -R")) {
1681 int err
= perf_session__process_events(session
);
1683 pr_err("Failed to process events, error %d", err
);
1687 sched
->nr_events
= session
->evlist
->stats
.nr_events
[0];
1688 sched
->nr_lost_events
= session
->evlist
->stats
.total_lost
;
1689 sched
->nr_lost_chunks
= session
->evlist
->stats
.nr_events
[PERF_RECORD_LOST
];
1694 perf_session__delete(session
);
1699 * scheduling times are printed as msec.usec
1701 static inline void print_sched_time(unsigned long long nsecs
, int width
)
1703 unsigned long msecs
;
1704 unsigned long usecs
;
1706 msecs
= nsecs
/ NSEC_PER_MSEC
;
1707 nsecs
-= msecs
* NSEC_PER_MSEC
;
1708 usecs
= nsecs
/ NSEC_PER_USEC
;
1709 printf("%*lu.%03lu ", width
, msecs
, usecs
);
1713 * returns runtime data for event, allocating memory for it the
1714 * first time it is used.
1716 static struct evsel_runtime
*perf_evsel__get_runtime(struct perf_evsel
*evsel
)
1718 struct evsel_runtime
*r
= evsel
->priv
;
1721 r
= zalloc(sizeof(struct evsel_runtime
));
1729 * save last time event was seen per cpu
1731 static void perf_evsel__save_time(struct perf_evsel
*evsel
,
1732 u64 timestamp
, u32 cpu
)
1734 struct evsel_runtime
*r
= perf_evsel__get_runtime(evsel
);
1739 if ((cpu
>= r
->ncpu
) || (r
->last_time
== NULL
)) {
1740 int i
, n
= __roundup_pow_of_two(cpu
+1);
1741 void *p
= r
->last_time
;
1743 p
= realloc(r
->last_time
, n
* sizeof(u64
));
1748 for (i
= r
->ncpu
; i
< n
; ++i
)
1749 r
->last_time
[i
] = (u64
) 0;
1754 r
->last_time
[cpu
] = timestamp
;
1757 /* returns last time this event was seen on the given cpu */
1758 static u64
perf_evsel__get_time(struct perf_evsel
*evsel
, u32 cpu
)
1760 struct evsel_runtime
*r
= perf_evsel__get_runtime(evsel
);
1762 if ((r
== NULL
) || (r
->last_time
== NULL
) || (cpu
>= r
->ncpu
))
1765 return r
->last_time
[cpu
];
1768 static int comm_width
= 20;
1770 static char *timehist_get_commstr(struct thread
*thread
)
1772 static char str
[32];
1773 const char *comm
= thread__comm_str(thread
);
1774 pid_t tid
= thread
->tid
;
1775 pid_t pid
= thread
->pid_
;
1779 n
= scnprintf(str
, sizeof(str
), "%s", comm
);
1781 else if (tid
!= pid
)
1782 n
= scnprintf(str
, sizeof(str
), "%s[%d/%d]", comm
, tid
, pid
);
1785 n
= scnprintf(str
, sizeof(str
), "%s[%d]", comm
, tid
);
1793 static void timehist_header(struct perf_sched
*sched
)
1795 u32 ncpus
= sched
->max_cpu
+ 1;
1798 printf("%15s %6s ", "time", "cpu");
1800 if (sched
->show_cpu_visual
) {
1802 for (i
= 0, j
= 0; i
< ncpus
; ++i
) {
1810 printf(" %-20s %9s %9s %9s",
1811 "task name", "wait time", "sch delay", "run time");
1818 printf("%15s %-6s ", "", "");
1820 if (sched
->show_cpu_visual
)
1821 printf(" %*s ", ncpus
, "");
1823 printf(" %-20s %9s %9s %9s\n", "[tid/pid]", "(msec)", "(msec)", "(msec)");
1828 printf("%.15s %.6s ", graph_dotted_line
, graph_dotted_line
);
1830 if (sched
->show_cpu_visual
)
1831 printf(" %.*s ", ncpus
, graph_dotted_line
);
1833 printf(" %.20s %.9s %.9s %.9s",
1834 graph_dotted_line
, graph_dotted_line
, graph_dotted_line
,
1840 static void timehist_print_sample(struct perf_sched
*sched
,
1841 struct perf_sample
*sample
,
1842 struct addr_location
*al
,
1843 struct thread
*thread
,
1846 struct thread_runtime
*tr
= thread__priv(thread
);
1847 u32 max_cpus
= sched
->max_cpu
+ 1;
1850 timestamp__scnprintf_usec(t
, tstr
, sizeof(tstr
));
1851 printf("%15s [%04d] ", tstr
, sample
->cpu
);
1853 if (sched
->show_cpu_visual
) {
1858 for (i
= 0; i
< max_cpus
; ++i
) {
1859 /* flag idle times with 'i'; others are sched events */
1860 if (i
== sample
->cpu
)
1861 c
= (thread
->tid
== 0) ? 'i' : 's';
1869 printf(" %-*s ", comm_width
, timehist_get_commstr(thread
));
1871 print_sched_time(tr
->dt_wait
, 6);
1872 print_sched_time(tr
->dt_delay
, 6);
1873 print_sched_time(tr
->dt_run
, 6);
1875 if (sched
->show_wakeups
)
1876 printf(" %-*s", comm_width
, "");
1878 if (thread
->tid
== 0)
1881 if (sched
->show_callchain
)
1884 sample__fprintf_sym(sample
, al
, 0,
1885 EVSEL__PRINT_SYM
| EVSEL__PRINT_ONELINE
|
1886 EVSEL__PRINT_CALLCHAIN_ARROW
|
1887 EVSEL__PRINT_SKIP_IGNORED
,
1888 &callchain_cursor
, stdout
);
1895 * Explanation of delta-time stats:
1897 * t = time of current schedule out event
1898 * tprev = time of previous sched out event
1899 * also time of schedule-in event for current task
1900 * last_time = time of last sched change event for current task
1901 * (i.e, time process was last scheduled out)
1902 * ready_to_run = time of wakeup for current task
1904 * -----|------------|------------|------------|------
1905 * last ready tprev t
1908 * |-------- dt_wait --------|
1909 * |- dt_delay -|-- dt_run --|
1911 * dt_run = run time of current task
1912 * dt_wait = time between last schedule out event for task and tprev
1913 * represents time spent off the cpu
1914 * dt_delay = time between wakeup and schedule-in of task
1917 static void timehist_update_runtime_stats(struct thread_runtime
*r
,
1924 r
->dt_run
= t
- tprev
;
1925 if (r
->ready_to_run
) {
1926 if (r
->ready_to_run
> tprev
)
1927 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
1929 r
->dt_delay
= tprev
- r
->ready_to_run
;
1932 if (r
->last_time
> tprev
)
1933 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
1934 else if (r
->last_time
)
1935 r
->dt_wait
= tprev
- r
->last_time
;
1938 update_stats(&r
->run_stats
, r
->dt_run
);
1939 r
->total_run_time
+= r
->dt_run
;
1942 static bool is_idle_sample(struct perf_sched
*sched
,
1943 struct perf_sample
*sample
,
1944 struct perf_evsel
*evsel
,
1945 struct machine
*machine
)
1947 struct thread
*thread
;
1948 struct callchain_cursor
*cursor
= &callchain_cursor
;
1950 /* pid 0 == swapper == idle task */
1951 if (sample
->pid
== 0)
1954 if (strcmp(perf_evsel__name(evsel
), "sched:sched_switch") == 0) {
1955 if (perf_evsel__intval(evsel
, sample
, "prev_pid") == 0)
1959 /* want main thread for process - has maps */
1960 thread
= machine__findnew_thread(machine
, sample
->pid
, sample
->pid
);
1961 if (thread
== NULL
) {
1962 pr_debug("Failed to get thread for pid %d.\n", sample
->pid
);
1966 if (!symbol_conf
.use_callchain
|| sample
->callchain
== NULL
)
1969 if (thread__resolve_callchain(thread
, cursor
, evsel
, sample
,
1970 NULL
, NULL
, sched
->max_stack
+ 2) != 0) {
1972 error("Failed to resolve callchain. Skipping\n");
1977 callchain_cursor_commit(cursor
);
1980 struct callchain_cursor_node
*node
;
1983 node
= callchain_cursor_current(cursor
);
1988 if (sym
&& sym
->name
) {
1989 if (!strcmp(sym
->name
, "schedule") ||
1990 !strcmp(sym
->name
, "__schedule") ||
1991 !strcmp(sym
->name
, "preempt_schedule"))
1995 callchain_cursor_advance(cursor
);
2002 * Track idle stats per cpu by maintaining a local thread
2003 * struct for the idle task on each cpu.
2005 static int init_idle_threads(int ncpu
)
2009 idle_threads
= zalloc(ncpu
* sizeof(struct thread
*));
2013 idle_max_cpu
= ncpu
- 1;
2015 /* allocate the actual thread struct if needed */
2016 for (i
= 0; i
< ncpu
; ++i
) {
2017 idle_threads
[i
] = thread__new(0, 0);
2018 if (idle_threads
[i
] == NULL
)
2021 thread__set_comm(idle_threads
[i
], idle_comm
, 0);
2027 static void free_idle_threads(void)
2031 if (idle_threads
== NULL
)
2034 for (i
= 0; i
<= idle_max_cpu
; ++i
) {
2035 if ((idle_threads
[i
]))
2036 thread__delete(idle_threads
[i
]);
2042 static struct thread
*get_idle_thread(int cpu
)
2045 * expand/allocate array of pointers to local thread
2048 if ((cpu
>= idle_max_cpu
) || (idle_threads
== NULL
)) {
2049 int i
, j
= __roundup_pow_of_two(cpu
+1);
2052 p
= realloc(idle_threads
, j
* sizeof(struct thread
*));
2056 idle_threads
= (struct thread
**) p
;
2057 i
= idle_max_cpu
? idle_max_cpu
+ 1 : 0;
2059 idle_threads
[i
] = NULL
;
2064 /* allocate a new thread struct if needed */
2065 if (idle_threads
[cpu
] == NULL
) {
2066 idle_threads
[cpu
] = thread__new(0, 0);
2068 if (idle_threads
[cpu
]) {
2069 idle_threads
[cpu
]->tid
= 0;
2070 thread__set_comm(idle_threads
[cpu
], idle_comm
, 0);
2074 return idle_threads
[cpu
];
2078 * handle runtime stats saved per thread
2080 static struct thread_runtime
*thread__init_runtime(struct thread
*thread
)
2082 struct thread_runtime
*r
;
2084 r
= zalloc(sizeof(struct thread_runtime
));
2088 init_stats(&r
->run_stats
);
2089 thread__set_priv(thread
, r
);
2094 static struct thread_runtime
*thread__get_runtime(struct thread
*thread
)
2096 struct thread_runtime
*tr
;
2098 tr
= thread__priv(thread
);
2100 tr
= thread__init_runtime(thread
);
2102 pr_debug("Failed to malloc memory for runtime data.\n");
2108 static struct thread
*timehist_get_thread(struct perf_sched
*sched
,
2109 struct perf_sample
*sample
,
2110 struct machine
*machine
,
2111 struct perf_evsel
*evsel
)
2113 struct thread
*thread
;
2115 if (is_idle_sample(sched
, sample
, evsel
, machine
)) {
2116 thread
= get_idle_thread(sample
->cpu
);
2118 pr_err("Failed to get idle thread for cpu %d.\n", sample
->cpu
);
2121 thread
= machine__findnew_thread(machine
, sample
->pid
, sample
->tid
);
2122 if (thread
== NULL
) {
2123 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2131 static bool timehist_skip_sample(struct perf_sched
*sched
,
2132 struct thread
*thread
)
2136 if (thread__is_filtered(thread
)) {
2138 sched
->skipped_samples
++;
2144 static void timehist_print_wakeup_event(struct perf_sched
*sched
,
2145 struct perf_sample
*sample
,
2146 struct machine
*machine
,
2147 struct thread
*awakened
)
2149 struct thread
*thread
;
2152 thread
= machine__findnew_thread(machine
, sample
->pid
, sample
->tid
);
2156 /* show wakeup unless both awakee and awaker are filtered */
2157 if (timehist_skip_sample(sched
, thread
) &&
2158 timehist_skip_sample(sched
, awakened
)) {
2162 timestamp__scnprintf_usec(sample
->time
, tstr
, sizeof(tstr
));
2163 printf("%15s [%04d] ", tstr
, sample
->cpu
);
2164 if (sched
->show_cpu_visual
)
2165 printf(" %*s ", sched
->max_cpu
+ 1, "");
2167 printf(" %-*s ", comm_width
, timehist_get_commstr(thread
));
2170 printf(" %9s %9s %9s ", "", "", "");
2172 printf("awakened: %s", timehist_get_commstr(awakened
));
2177 static int timehist_sched_wakeup_event(struct perf_tool
*tool
,
2178 union perf_event
*event __maybe_unused
,
2179 struct perf_evsel
*evsel
,
2180 struct perf_sample
*sample
,
2181 struct machine
*machine
)
2183 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
2184 struct thread
*thread
;
2185 struct thread_runtime
*tr
= NULL
;
2186 /* want pid of awakened task not pid in sample */
2187 const u32 pid
= perf_evsel__intval(evsel
, sample
, "pid");
2189 thread
= machine__findnew_thread(machine
, 0, pid
);
2193 tr
= thread__get_runtime(thread
);
2197 if (tr
->ready_to_run
== 0)
2198 tr
->ready_to_run
= sample
->time
;
2200 /* show wakeups if requested */
2201 if (sched
->show_wakeups
&&
2202 !perf_time__skip_sample(&sched
->ptime
, sample
->time
))
2203 timehist_print_wakeup_event(sched
, sample
, machine
, thread
);
2208 static void timehist_print_migration_event(struct perf_sched
*sched
,
2209 struct perf_evsel
*evsel
,
2210 struct perf_sample
*sample
,
2211 struct machine
*machine
,
2212 struct thread
*migrated
)
2214 struct thread
*thread
;
2216 u32 max_cpus
= sched
->max_cpu
+ 1;
2219 if (sched
->summary_only
)
2222 max_cpus
= sched
->max_cpu
+ 1;
2223 ocpu
= perf_evsel__intval(evsel
, sample
, "orig_cpu");
2224 dcpu
= perf_evsel__intval(evsel
, sample
, "dest_cpu");
2226 thread
= machine__findnew_thread(machine
, sample
->pid
, sample
->tid
);
2230 if (timehist_skip_sample(sched
, thread
) &&
2231 timehist_skip_sample(sched
, migrated
)) {
2235 timestamp__scnprintf_usec(sample
->time
, tstr
, sizeof(tstr
));
2236 printf("%15s [%04d] ", tstr
, sample
->cpu
);
2238 if (sched
->show_cpu_visual
) {
2243 for (i
= 0; i
< max_cpus
; ++i
) {
2244 c
= (i
== sample
->cpu
) ? 'm' : ' ';
2250 printf(" %-*s ", comm_width
, timehist_get_commstr(thread
));
2253 printf(" %9s %9s %9s ", "", "", "");
2255 printf("migrated: %s", timehist_get_commstr(migrated
));
2256 printf(" cpu %d => %d", ocpu
, dcpu
);
2261 static int timehist_migrate_task_event(struct perf_tool
*tool
,
2262 union perf_event
*event __maybe_unused
,
2263 struct perf_evsel
*evsel
,
2264 struct perf_sample
*sample
,
2265 struct machine
*machine
)
2267 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
2268 struct thread
*thread
;
2269 struct thread_runtime
*tr
= NULL
;
2270 /* want pid of migrated task not pid in sample */
2271 const u32 pid
= perf_evsel__intval(evsel
, sample
, "pid");
2273 thread
= machine__findnew_thread(machine
, 0, pid
);
2277 tr
= thread__get_runtime(thread
);
2283 /* show migrations if requested */
2284 timehist_print_migration_event(sched
, evsel
, sample
, machine
, thread
);
2289 static int timehist_sched_change_event(struct perf_tool
*tool
,
2290 union perf_event
*event
,
2291 struct perf_evsel
*evsel
,
2292 struct perf_sample
*sample
,
2293 struct machine
*machine
)
2295 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
2296 struct perf_time_interval
*ptime
= &sched
->ptime
;
2297 struct addr_location al
;
2298 struct thread
*thread
;
2299 struct thread_runtime
*tr
= NULL
;
2300 u64 tprev
, t
= sample
->time
;
2303 if (machine__resolve(machine
, &al
, sample
) < 0) {
2304 pr_err("problem processing %d event. skipping it\n",
2305 event
->header
.type
);
2310 thread
= timehist_get_thread(sched
, sample
, machine
, evsel
);
2311 if (thread
== NULL
) {
2316 if (timehist_skip_sample(sched
, thread
))
2319 tr
= thread__get_runtime(thread
);
2325 tprev
= perf_evsel__get_time(evsel
, sample
->cpu
);
2328 * If start time given:
2329 * - sample time is under window user cares about - skip sample
2330 * - tprev is under window user cares about - reset to start of window
2332 if (ptime
->start
&& ptime
->start
> t
)
2335 if (ptime
->start
> tprev
)
2336 tprev
= ptime
->start
;
2339 * If end time given:
2340 * - previous sched event is out of window - we are done
2341 * - sample time is beyond window user cares about - reset it
2342 * to close out stats for time window interest
2345 if (tprev
> ptime
->end
)
2352 timehist_update_runtime_stats(tr
, t
, tprev
);
2354 if (!sched
->summary_only
)
2355 timehist_print_sample(sched
, sample
, &al
, thread
, t
);
2359 /* time of this sched_switch event becomes last time task seen */
2360 tr
->last_time
= sample
->time
;
2362 /* sched out event for task so reset ready to run time */
2363 tr
->ready_to_run
= 0;
2366 perf_evsel__save_time(evsel
, sample
->time
, sample
->cpu
);
2371 static int timehist_sched_switch_event(struct perf_tool
*tool
,
2372 union perf_event
*event
,
2373 struct perf_evsel
*evsel
,
2374 struct perf_sample
*sample
,
2375 struct machine
*machine __maybe_unused
)
2377 return timehist_sched_change_event(tool
, event
, evsel
, sample
, machine
);
2380 static int process_lost(struct perf_tool
*tool __maybe_unused
,
2381 union perf_event
*event
,
2382 struct perf_sample
*sample
,
2383 struct machine
*machine __maybe_unused
)
2387 timestamp__scnprintf_usec(sample
->time
, tstr
, sizeof(tstr
));
2388 printf("%15s ", tstr
);
2389 printf("lost %" PRIu64
" events on cpu %d\n", event
->lost
.lost
, sample
->cpu
);
2395 static void print_thread_runtime(struct thread
*t
,
2396 struct thread_runtime
*r
)
2398 double mean
= avg_stats(&r
->run_stats
);
2401 printf("%*s %5d %9" PRIu64
" ",
2402 comm_width
, timehist_get_commstr(t
), t
->ppid
,
2403 (u64
) r
->run_stats
.n
);
2405 print_sched_time(r
->total_run_time
, 8);
2406 stddev
= rel_stddev_stats(stddev_stats(&r
->run_stats
), mean
);
2407 print_sched_time(r
->run_stats
.min
, 6);
2409 print_sched_time((u64
) mean
, 6);
2411 print_sched_time(r
->run_stats
.max
, 6);
2413 printf("%5.2f", stddev
);
2414 printf(" %5" PRIu64
, r
->migrations
);
2418 struct total_run_stats
{
2424 static int __show_thread_runtime(struct thread
*t
, void *priv
)
2426 struct total_run_stats
*stats
= priv
;
2427 struct thread_runtime
*r
;
2429 if (thread__is_filtered(t
))
2432 r
= thread__priv(t
);
2433 if (r
&& r
->run_stats
.n
) {
2434 stats
->task_count
++;
2435 stats
->sched_count
+= r
->run_stats
.n
;
2436 stats
->total_run_time
+= r
->total_run_time
;
2437 print_thread_runtime(t
, r
);
2443 static int show_thread_runtime(struct thread
*t
, void *priv
)
2448 return __show_thread_runtime(t
, priv
);
2451 static int show_deadthread_runtime(struct thread
*t
, void *priv
)
2456 return __show_thread_runtime(t
, priv
);
2459 static void timehist_print_summary(struct perf_sched
*sched
,
2460 struct perf_session
*session
)
2462 struct machine
*m
= &session
->machines
.host
;
2463 struct total_run_stats totals
;
2466 struct thread_runtime
*r
;
2469 memset(&totals
, 0, sizeof(totals
));
2471 if (comm_width
< 30)
2474 printf("\nRuntime summary\n");
2475 printf("%*s parent sched-in ", comm_width
, "comm");
2476 printf(" run-time min-run avg-run max-run stddev migrations\n");
2477 printf("%*s (count) ", comm_width
, "");
2478 printf(" (msec) (msec) (msec) (msec) %%\n");
2479 printf("%.117s\n", graph_dotted_line
);
2481 machine__for_each_thread(m
, show_thread_runtime
, &totals
);
2482 task_count
= totals
.task_count
;
2484 printf("<no still running tasks>\n");
2486 printf("\nTerminated tasks:\n");
2487 machine__for_each_thread(m
, show_deadthread_runtime
, &totals
);
2488 if (task_count
== totals
.task_count
)
2489 printf("<no terminated tasks>\n");
2491 /* CPU idle stats not tracked when samples were skipped */
2492 if (sched
->skipped_samples
)
2495 printf("\nIdle stats:\n");
2496 for (i
= 0; i
<= idle_max_cpu
; ++i
) {
2497 t
= idle_threads
[i
];
2501 r
= thread__priv(t
);
2502 if (r
&& r
->run_stats
.n
) {
2503 totals
.sched_count
+= r
->run_stats
.n
;
2504 printf(" CPU %2d idle for ", i
);
2505 print_sched_time(r
->total_run_time
, 6);
2508 printf(" CPU %2d idle entire time window\n", i
);
2512 " Total number of unique tasks: %" PRIu64
"\n"
2513 "Total number of context switches: %" PRIu64
"\n"
2514 " Total run time (msec): ",
2515 totals
.task_count
, totals
.sched_count
);
2517 print_sched_time(totals
.total_run_time
, 2);
2521 typedef int (*sched_handler
)(struct perf_tool
*tool
,
2522 union perf_event
*event
,
2523 struct perf_evsel
*evsel
,
2524 struct perf_sample
*sample
,
2525 struct machine
*machine
);
2527 static int perf_timehist__process_sample(struct perf_tool
*tool
,
2528 union perf_event
*event
,
2529 struct perf_sample
*sample
,
2530 struct perf_evsel
*evsel
,
2531 struct machine
*machine
)
2533 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
2535 int this_cpu
= sample
->cpu
;
2537 if (this_cpu
> sched
->max_cpu
)
2538 sched
->max_cpu
= this_cpu
;
2540 if (evsel
->handler
!= NULL
) {
2541 sched_handler f
= evsel
->handler
;
2543 err
= f(tool
, event
, evsel
, sample
, machine
);
2549 static int timehist_check_attr(struct perf_sched
*sched
,
2550 struct perf_evlist
*evlist
)
2552 struct perf_evsel
*evsel
;
2553 struct evsel_runtime
*er
;
2555 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
2556 er
= perf_evsel__get_runtime(evsel
);
2558 pr_err("Failed to allocate memory for evsel runtime data\n");
2562 if (sched
->show_callchain
&&
2563 !(evsel
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
)) {
2564 pr_info("Samples do not have callchains.\n");
2565 sched
->show_callchain
= 0;
2566 symbol_conf
.use_callchain
= 0;
2573 static int perf_sched__timehist(struct perf_sched
*sched
)
2575 const struct perf_evsel_str_handler handlers
[] = {
2576 { "sched:sched_switch", timehist_sched_switch_event
, },
2577 { "sched:sched_wakeup", timehist_sched_wakeup_event
, },
2578 { "sched:sched_wakeup_new", timehist_sched_wakeup_event
, },
2580 const struct perf_evsel_str_handler migrate_handlers
[] = {
2581 { "sched:sched_migrate_task", timehist_migrate_task_event
, },
2583 struct perf_data_file file
= {
2585 .mode
= PERF_DATA_MODE_READ
,
2586 .force
= sched
->force
,
2589 struct perf_session
*session
;
2590 struct perf_evlist
*evlist
;
2594 * event handlers for timehist option
2596 sched
->tool
.sample
= perf_timehist__process_sample
;
2597 sched
->tool
.mmap
= perf_event__process_mmap
;
2598 sched
->tool
.comm
= perf_event__process_comm
;
2599 sched
->tool
.exit
= perf_event__process_exit
;
2600 sched
->tool
.fork
= perf_event__process_fork
;
2601 sched
->tool
.lost
= process_lost
;
2602 sched
->tool
.attr
= perf_event__process_attr
;
2603 sched
->tool
.tracing_data
= perf_event__process_tracing_data
;
2604 sched
->tool
.build_id
= perf_event__process_build_id
;
2606 sched
->tool
.ordered_events
= true;
2607 sched
->tool
.ordering_requires_timestamps
= true;
2609 symbol_conf
.use_callchain
= sched
->show_callchain
;
2611 session
= perf_session__new(&file
, false, &sched
->tool
);
2612 if (session
== NULL
)
2615 evlist
= session
->evlist
;
2617 symbol__init(&session
->header
.env
);
2619 if (perf_time__parse_str(&sched
->ptime
, sched
->time_str
) != 0) {
2620 pr_err("Invalid time string\n");
2624 if (timehist_check_attr(sched
, evlist
) != 0)
2629 /* setup per-evsel handlers */
2630 if (perf_session__set_tracepoints_handlers(session
, handlers
))
2633 /* sched_switch event at a minimum needs to exist */
2634 if (!perf_evlist__find_tracepoint_by_name(session
->evlist
,
2635 "sched:sched_switch")) {
2636 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
2640 if (sched
->show_migrations
&&
2641 perf_session__set_tracepoints_handlers(session
, migrate_handlers
))
2644 /* pre-allocate struct for per-CPU idle stats */
2645 sched
->max_cpu
= session
->header
.env
.nr_cpus_online
;
2646 if (sched
->max_cpu
== 0)
2648 if (init_idle_threads(sched
->max_cpu
))
2651 /* summary_only implies summary option, but don't overwrite summary if set */
2652 if (sched
->summary_only
)
2653 sched
->summary
= sched
->summary_only
;
2655 if (!sched
->summary_only
)
2656 timehist_header(sched
);
2658 err
= perf_session__process_events(session
);
2660 pr_err("Failed to process events, error %d", err
);
2664 sched
->nr_events
= evlist
->stats
.nr_events
[0];
2665 sched
->nr_lost_events
= evlist
->stats
.total_lost
;
2666 sched
->nr_lost_chunks
= evlist
->stats
.nr_events
[PERF_RECORD_LOST
];
2669 timehist_print_summary(sched
, session
);
2672 free_idle_threads();
2673 perf_session__delete(session
);
2679 static void print_bad_events(struct perf_sched
*sched
)
2681 if (sched
->nr_unordered_timestamps
&& sched
->nr_timestamps
) {
2682 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
2683 (double)sched
->nr_unordered_timestamps
/(double)sched
->nr_timestamps
*100.0,
2684 sched
->nr_unordered_timestamps
, sched
->nr_timestamps
);
2686 if (sched
->nr_lost_events
&& sched
->nr_events
) {
2687 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
2688 (double)sched
->nr_lost_events
/(double)sched
->nr_events
* 100.0,
2689 sched
->nr_lost_events
, sched
->nr_events
, sched
->nr_lost_chunks
);
2691 if (sched
->nr_context_switch_bugs
&& sched
->nr_timestamps
) {
2692 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
2693 (double)sched
->nr_context_switch_bugs
/(double)sched
->nr_timestamps
*100.0,
2694 sched
->nr_context_switch_bugs
, sched
->nr_timestamps
);
2695 if (sched
->nr_lost_events
)
2696 printf(" (due to lost events?)");
2701 static void __merge_work_atoms(struct rb_root
*root
, struct work_atoms
*data
)
2703 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
2704 struct work_atoms
*this;
2705 const char *comm
= thread__comm_str(data
->thread
), *this_comm
;
2710 this = container_of(*new, struct work_atoms
, node
);
2713 this_comm
= thread__comm_str(this->thread
);
2714 cmp
= strcmp(comm
, this_comm
);
2716 new = &((*new)->rb_left
);
2717 } else if (cmp
< 0) {
2718 new = &((*new)->rb_right
);
2721 this->total_runtime
+= data
->total_runtime
;
2722 this->nb_atoms
+= data
->nb_atoms
;
2723 this->total_lat
+= data
->total_lat
;
2724 list_splice(&data
->work_list
, &this->work_list
);
2725 if (this->max_lat
< data
->max_lat
) {
2726 this->max_lat
= data
->max_lat
;
2727 this->max_lat_at
= data
->max_lat_at
;
2735 rb_link_node(&data
->node
, parent
, new);
2736 rb_insert_color(&data
->node
, root
);
2739 static void perf_sched__merge_lat(struct perf_sched
*sched
)
2741 struct work_atoms
*data
;
2742 struct rb_node
*node
;
2744 if (sched
->skip_merge
)
2747 while ((node
= rb_first(&sched
->atom_root
))) {
2748 rb_erase(node
, &sched
->atom_root
);
2749 data
= rb_entry(node
, struct work_atoms
, node
);
2750 __merge_work_atoms(&sched
->merged_atom_root
, data
);
2754 static int perf_sched__lat(struct perf_sched
*sched
)
2756 struct rb_node
*next
;
2760 if (perf_sched__read_events(sched
))
2763 perf_sched__merge_lat(sched
);
2764 perf_sched__sort_lat(sched
);
2766 printf("\n -----------------------------------------------------------------------------------------------------------------\n");
2767 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
2768 printf(" -----------------------------------------------------------------------------------------------------------------\n");
2770 next
= rb_first(&sched
->sorted_atom_root
);
2773 struct work_atoms
*work_list
;
2775 work_list
= rb_entry(next
, struct work_atoms
, node
);
2776 output_lat_thread(sched
, work_list
);
2777 next
= rb_next(next
);
2778 thread__zput(work_list
->thread
);
2781 printf(" -----------------------------------------------------------------------------------------------------------------\n");
2782 printf(" TOTAL: |%11.3f ms |%9" PRIu64
" |\n",
2783 (double)sched
->all_runtime
/ NSEC_PER_MSEC
, sched
->all_count
);
2785 printf(" ---------------------------------------------------\n");
2787 print_bad_events(sched
);
2793 static int setup_map_cpus(struct perf_sched
*sched
)
2795 struct cpu_map
*map
;
2797 sched
->max_cpu
= sysconf(_SC_NPROCESSORS_CONF
);
2799 if (sched
->map
.comp
) {
2800 sched
->map
.comp_cpus
= zalloc(sched
->max_cpu
* sizeof(int));
2801 if (!sched
->map
.comp_cpus
)
2805 if (!sched
->map
.cpus_str
)
2808 map
= cpu_map__new(sched
->map
.cpus_str
);
2810 pr_err("failed to get cpus map from %s\n", sched
->map
.cpus_str
);
2814 sched
->map
.cpus
= map
;
2818 static int setup_color_pids(struct perf_sched
*sched
)
2820 struct thread_map
*map
;
2822 if (!sched
->map
.color_pids_str
)
2825 map
= thread_map__new_by_tid_str(sched
->map
.color_pids_str
);
2827 pr_err("failed to get thread map from %s\n", sched
->map
.color_pids_str
);
2831 sched
->map
.color_pids
= map
;
2835 static int setup_color_cpus(struct perf_sched
*sched
)
2837 struct cpu_map
*map
;
2839 if (!sched
->map
.color_cpus_str
)
2842 map
= cpu_map__new(sched
->map
.color_cpus_str
);
2844 pr_err("failed to get thread map from %s\n", sched
->map
.color_cpus_str
);
2848 sched
->map
.color_cpus
= map
;
2852 static int perf_sched__map(struct perf_sched
*sched
)
2854 if (setup_map_cpus(sched
))
2857 if (setup_color_pids(sched
))
2860 if (setup_color_cpus(sched
))
2864 if (perf_sched__read_events(sched
))
2866 print_bad_events(sched
);
2870 static int perf_sched__replay(struct perf_sched
*sched
)
2874 calibrate_run_measurement_overhead(sched
);
2875 calibrate_sleep_measurement_overhead(sched
);
2877 test_calibrations(sched
);
2879 if (perf_sched__read_events(sched
))
2882 printf("nr_run_events: %ld\n", sched
->nr_run_events
);
2883 printf("nr_sleep_events: %ld\n", sched
->nr_sleep_events
);
2884 printf("nr_wakeup_events: %ld\n", sched
->nr_wakeup_events
);
2886 if (sched
->targetless_wakeups
)
2887 printf("target-less wakeups: %ld\n", sched
->targetless_wakeups
);
2888 if (sched
->multitarget_wakeups
)
2889 printf("multi-target wakeups: %ld\n", sched
->multitarget_wakeups
);
2890 if (sched
->nr_run_events_optimized
)
2891 printf("run atoms optimized: %ld\n",
2892 sched
->nr_run_events_optimized
);
2894 print_task_traces(sched
);
2895 add_cross_task_wakeups(sched
);
2897 create_tasks(sched
);
2898 printf("------------------------------------------------------------\n");
2899 for (i
= 0; i
< sched
->replay_repeat
; i
++)
2900 run_one_test(sched
);
2905 static void setup_sorting(struct perf_sched
*sched
, const struct option
*options
,
2906 const char * const usage_msg
[])
2908 char *tmp
, *tok
, *str
= strdup(sched
->sort_order
);
2910 for (tok
= strtok_r(str
, ", ", &tmp
);
2911 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
2912 if (sort_dimension__add(tok
, &sched
->sort_list
) < 0) {
2913 usage_with_options_msg(usage_msg
, options
,
2914 "Unknown --sort key: `%s'", tok
);
2920 sort_dimension__add("pid", &sched
->cmp_pid
);
2923 static int __cmd_record(int argc
, const char **argv
)
2925 unsigned int rec_argc
, i
, j
;
2926 const char **rec_argv
;
2927 const char * const record_args
[] = {
2933 "-e", "sched:sched_switch",
2934 "-e", "sched:sched_stat_wait",
2935 "-e", "sched:sched_stat_sleep",
2936 "-e", "sched:sched_stat_iowait",
2937 "-e", "sched:sched_stat_runtime",
2938 "-e", "sched:sched_process_fork",
2939 "-e", "sched:sched_wakeup",
2940 "-e", "sched:sched_wakeup_new",
2941 "-e", "sched:sched_migrate_task",
2944 rec_argc
= ARRAY_SIZE(record_args
) + argc
- 1;
2945 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
2947 if (rec_argv
== NULL
)
2950 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
2951 rec_argv
[i
] = strdup(record_args
[i
]);
2953 for (j
= 1; j
< (unsigned int)argc
; j
++, i
++)
2954 rec_argv
[i
] = argv
[j
];
2956 BUG_ON(i
!= rec_argc
);
2958 return cmd_record(i
, rec_argv
, NULL
);
2961 int cmd_sched(int argc
, const char **argv
, const char *prefix __maybe_unused
)
2963 const char default_sort_order
[] = "avg, max, switch, runtime";
2964 struct perf_sched sched
= {
2966 .sample
= perf_sched__process_tracepoint_sample
,
2967 .comm
= perf_event__process_comm
,
2968 .lost
= perf_event__process_lost
,
2969 .fork
= perf_sched__process_fork_event
,
2970 .ordered_events
= true,
2972 .cmp_pid
= LIST_HEAD_INIT(sched
.cmp_pid
),
2973 .sort_list
= LIST_HEAD_INIT(sched
.sort_list
),
2974 .start_work_mutex
= PTHREAD_MUTEX_INITIALIZER
,
2975 .work_done_wait_mutex
= PTHREAD_MUTEX_INITIALIZER
,
2976 .sort_order
= default_sort_order
,
2977 .replay_repeat
= 10,
2979 .next_shortname1
= 'A',
2980 .next_shortname2
= '0',
2982 .show_callchain
= 1,
2985 const struct option sched_options
[] = {
2986 OPT_STRING('i', "input", &input_name
, "file",
2988 OPT_INCR('v', "verbose", &verbose
,
2989 "be more verbose (show symbol address, etc)"),
2990 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
2991 "dump raw trace in ASCII"),
2992 OPT_BOOLEAN('f', "force", &sched
.force
, "don't complain, do it"),
2995 const struct option latency_options
[] = {
2996 OPT_STRING('s', "sort", &sched
.sort_order
, "key[,key2...]",
2997 "sort by key(s): runtime, switch, avg, max"),
2998 OPT_INTEGER('C', "CPU", &sched
.profile_cpu
,
2999 "CPU to profile on"),
3000 OPT_BOOLEAN('p', "pids", &sched
.skip_merge
,
3001 "latency stats per pid instead of per comm"),
3002 OPT_PARENT(sched_options
)
3004 const struct option replay_options
[] = {
3005 OPT_UINTEGER('r', "repeat", &sched
.replay_repeat
,
3006 "repeat the workload replay N times (-1: infinite)"),
3007 OPT_PARENT(sched_options
)
3009 const struct option map_options
[] = {
3010 OPT_BOOLEAN(0, "compact", &sched
.map
.comp
,
3011 "map output in compact mode"),
3012 OPT_STRING(0, "color-pids", &sched
.map
.color_pids_str
, "pids",
3013 "highlight given pids in map"),
3014 OPT_STRING(0, "color-cpus", &sched
.map
.color_cpus_str
, "cpus",
3015 "highlight given CPUs in map"),
3016 OPT_STRING(0, "cpus", &sched
.map
.cpus_str
, "cpus",
3017 "display given CPUs in map"),
3018 OPT_PARENT(sched_options
)
3020 const struct option timehist_options
[] = {
3021 OPT_STRING('k', "vmlinux", &symbol_conf
.vmlinux_name
,
3022 "file", "vmlinux pathname"),
3023 OPT_STRING(0, "kallsyms", &symbol_conf
.kallsyms_name
,
3024 "file", "kallsyms pathname"),
3025 OPT_BOOLEAN('g', "call-graph", &sched
.show_callchain
,
3026 "Display call chains if present (default on)"),
3027 OPT_UINTEGER(0, "max-stack", &sched
.max_stack
,
3028 "Maximum number of functions to display backtrace."),
3029 OPT_STRING(0, "symfs", &symbol_conf
.symfs
, "directory",
3030 "Look for files with symbols relative to this directory"),
3031 OPT_BOOLEAN('s', "summary", &sched
.summary_only
,
3032 "Show only syscall summary with statistics"),
3033 OPT_BOOLEAN('S', "with-summary", &sched
.summary
,
3034 "Show all syscalls and summary with statistics"),
3035 OPT_BOOLEAN('w', "wakeups", &sched
.show_wakeups
, "Show wakeup events"),
3036 OPT_BOOLEAN('M', "migrations", &sched
.show_migrations
, "Show migration events"),
3037 OPT_BOOLEAN('V', "cpu-visual", &sched
.show_cpu_visual
, "Add CPU visual"),
3038 OPT_STRING(0, "time", &sched
.time_str
, "str",
3039 "Time span for analysis (start,stop)"),
3040 OPT_PARENT(sched_options
)
3043 const char * const latency_usage
[] = {
3044 "perf sched latency [<options>]",
3047 const char * const replay_usage
[] = {
3048 "perf sched replay [<options>]",
3051 const char * const map_usage
[] = {
3052 "perf sched map [<options>]",
3055 const char * const timehist_usage
[] = {
3056 "perf sched timehist [<options>]",
3059 const char *const sched_subcommands
[] = { "record", "latency", "map",
3062 const char *sched_usage
[] = {
3066 struct trace_sched_handler lat_ops
= {
3067 .wakeup_event
= latency_wakeup_event
,
3068 .switch_event
= latency_switch_event
,
3069 .runtime_event
= latency_runtime_event
,
3070 .migrate_task_event
= latency_migrate_task_event
,
3072 struct trace_sched_handler map_ops
= {
3073 .switch_event
= map_switch_event
,
3075 struct trace_sched_handler replay_ops
= {
3076 .wakeup_event
= replay_wakeup_event
,
3077 .switch_event
= replay_switch_event
,
3078 .fork_event
= replay_fork_event
,
3082 for (i
= 0; i
< ARRAY_SIZE(sched
.curr_pid
); i
++)
3083 sched
.curr_pid
[i
] = -1;
3085 argc
= parse_options_subcommand(argc
, argv
, sched_options
, sched_subcommands
,
3086 sched_usage
, PARSE_OPT_STOP_AT_NON_OPTION
);
3088 usage_with_options(sched_usage
, sched_options
);
3091 * Aliased to 'perf script' for now:
3093 if (!strcmp(argv
[0], "script"))
3094 return cmd_script(argc
, argv
, prefix
);
3096 if (!strncmp(argv
[0], "rec", 3)) {
3097 return __cmd_record(argc
, argv
);
3098 } else if (!strncmp(argv
[0], "lat", 3)) {
3099 sched
.tp_handler
= &lat_ops
;
3101 argc
= parse_options(argc
, argv
, latency_options
, latency_usage
, 0);
3103 usage_with_options(latency_usage
, latency_options
);
3105 setup_sorting(&sched
, latency_options
, latency_usage
);
3106 return perf_sched__lat(&sched
);
3107 } else if (!strcmp(argv
[0], "map")) {
3109 argc
= parse_options(argc
, argv
, map_options
, map_usage
, 0);
3111 usage_with_options(map_usage
, map_options
);
3113 sched
.tp_handler
= &map_ops
;
3114 setup_sorting(&sched
, latency_options
, latency_usage
);
3115 return perf_sched__map(&sched
);
3116 } else if (!strncmp(argv
[0], "rep", 3)) {
3117 sched
.tp_handler
= &replay_ops
;
3119 argc
= parse_options(argc
, argv
, replay_options
, replay_usage
, 0);
3121 usage_with_options(replay_usage
, replay_options
);
3123 return perf_sched__replay(&sched
);
3124 } else if (!strcmp(argv
[0], "timehist")) {
3126 argc
= parse_options(argc
, argv
, timehist_options
,
3129 usage_with_options(timehist_usage
, timehist_options
);
3131 if (sched
.show_wakeups
&& sched
.summary_only
) {
3132 pr_err(" Error: -s and -w are mutually exclusive.\n");
3133 parse_options_usage(timehist_usage
, timehist_options
, "s", true);
3134 parse_options_usage(NULL
, timehist_options
, "w", true);
3138 return perf_sched__timehist(&sched
);
3140 usage_with_options(sched_usage
, sched_options
);