]> git.ipfire.org Git - thirdparty/linux.git/blob - tools/perf/builtin-sched.c
x86/fpu/xstate: Restore supervisor states for signal return
[thirdparty/linux.git] / tools / perf / builtin-sched.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "builtin.h"
3 #include "perf.h"
4 #include "perf-sys.h"
5
6 #include "util/cpumap.h"
7 #include "util/evlist.h"
8 #include "util/evsel.h"
9 #include "util/evsel_fprintf.h"
10 #include "util/symbol.h"
11 #include "util/thread.h"
12 #include "util/header.h"
13 #include "util/session.h"
14 #include "util/tool.h"
15 #include "util/cloexec.h"
16 #include "util/thread_map.h"
17 #include "util/color.h"
18 #include "util/stat.h"
19 #include "util/string2.h"
20 #include "util/callchain.h"
21 #include "util/time-utils.h"
22
23 #include <subcmd/pager.h>
24 #include <subcmd/parse-options.h>
25 #include "util/trace-event.h"
26
27 #include "util/debug.h"
28 #include "util/event.h"
29
30 #include <linux/kernel.h>
31 #include <linux/log2.h>
32 #include <linux/zalloc.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <inttypes.h>
36
37 #include <errno.h>
38 #include <semaphore.h>
39 #include <pthread.h>
40 #include <math.h>
41 #include <api/fs/fs.h>
42 #include <perf/cpumap.h>
43 #include <linux/time64.h>
44 #include <linux/err.h>
45
46 #include <linux/ctype.h>
47
48 #define PR_SET_NAME 15 /* Set process name */
49 #define MAX_CPUS 4096
50 #define COMM_LEN 20
51 #define SYM_LEN 129
52 #define MAX_PID 1024000
53
54 static const char *cpu_list;
55 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
56
57 struct sched_atom;
58
59 struct task_desc {
60 unsigned long nr;
61 unsigned long pid;
62 char comm[COMM_LEN];
63
64 unsigned long nr_events;
65 unsigned long curr_event;
66 struct sched_atom **atoms;
67
68 pthread_t thread;
69 sem_t sleep_sem;
70
71 sem_t ready_for_work;
72 sem_t work_done_sem;
73
74 u64 cpu_usage;
75 };
76
77 enum sched_event_type {
78 SCHED_EVENT_RUN,
79 SCHED_EVENT_SLEEP,
80 SCHED_EVENT_WAKEUP,
81 SCHED_EVENT_MIGRATION,
82 };
83
84 struct sched_atom {
85 enum sched_event_type type;
86 int specific_wait;
87 u64 timestamp;
88 u64 duration;
89 unsigned long nr;
90 sem_t *wait_sem;
91 struct task_desc *wakee;
92 };
93
94 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
95
96 /* task state bitmask, copied from include/linux/sched.h */
97 #define TASK_RUNNING 0
98 #define TASK_INTERRUPTIBLE 1
99 #define TASK_UNINTERRUPTIBLE 2
100 #define __TASK_STOPPED 4
101 #define __TASK_TRACED 8
102 /* in tsk->exit_state */
103 #define EXIT_DEAD 16
104 #define EXIT_ZOMBIE 32
105 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
106 /* in tsk->state again */
107 #define TASK_DEAD 64
108 #define TASK_WAKEKILL 128
109 #define TASK_WAKING 256
110 #define TASK_PARKED 512
111
112 enum thread_state {
113 THREAD_SLEEPING = 0,
114 THREAD_WAIT_CPU,
115 THREAD_SCHED_IN,
116 THREAD_IGNORE
117 };
118
119 struct work_atom {
120 struct list_head list;
121 enum thread_state state;
122 u64 sched_out_time;
123 u64 wake_up_time;
124 u64 sched_in_time;
125 u64 runtime;
126 };
127
128 struct work_atoms {
129 struct list_head work_list;
130 struct thread *thread;
131 struct rb_node node;
132 u64 max_lat;
133 u64 max_lat_at;
134 u64 total_lat;
135 u64 nb_atoms;
136 u64 total_runtime;
137 int num_merged;
138 };
139
140 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
141
142 struct perf_sched;
143
144 struct trace_sched_handler {
145 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
146 struct perf_sample *sample, struct machine *machine);
147
148 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
149 struct perf_sample *sample, struct machine *machine);
150
151 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
152 struct perf_sample *sample, struct machine *machine);
153
154 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
155 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
156 struct machine *machine);
157
158 int (*migrate_task_event)(struct perf_sched *sched,
159 struct evsel *evsel,
160 struct perf_sample *sample,
161 struct machine *machine);
162 };
163
164 #define COLOR_PIDS PERF_COLOR_BLUE
165 #define COLOR_CPUS PERF_COLOR_BG_RED
166
167 struct perf_sched_map {
168 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
169 int *comp_cpus;
170 bool comp;
171 struct perf_thread_map *color_pids;
172 const char *color_pids_str;
173 struct perf_cpu_map *color_cpus;
174 const char *color_cpus_str;
175 struct perf_cpu_map *cpus;
176 const char *cpus_str;
177 };
178
179 struct perf_sched {
180 struct perf_tool tool;
181 const char *sort_order;
182 unsigned long nr_tasks;
183 struct task_desc **pid_to_task;
184 struct task_desc **tasks;
185 const struct trace_sched_handler *tp_handler;
186 pthread_mutex_t start_work_mutex;
187 pthread_mutex_t work_done_wait_mutex;
188 int profile_cpu;
189 /*
190 * Track the current task - that way we can know whether there's any
191 * weird events, such as a task being switched away that is not current.
192 */
193 int max_cpu;
194 u32 curr_pid[MAX_CPUS];
195 struct thread *curr_thread[MAX_CPUS];
196 char next_shortname1;
197 char next_shortname2;
198 unsigned int replay_repeat;
199 unsigned long nr_run_events;
200 unsigned long nr_sleep_events;
201 unsigned long nr_wakeup_events;
202 unsigned long nr_sleep_corrections;
203 unsigned long nr_run_events_optimized;
204 unsigned long targetless_wakeups;
205 unsigned long multitarget_wakeups;
206 unsigned long nr_runs;
207 unsigned long nr_timestamps;
208 unsigned long nr_unordered_timestamps;
209 unsigned long nr_context_switch_bugs;
210 unsigned long nr_events;
211 unsigned long nr_lost_chunks;
212 unsigned long nr_lost_events;
213 u64 run_measurement_overhead;
214 u64 sleep_measurement_overhead;
215 u64 start_time;
216 u64 cpu_usage;
217 u64 runavg_cpu_usage;
218 u64 parent_cpu_usage;
219 u64 runavg_parent_cpu_usage;
220 u64 sum_runtime;
221 u64 sum_fluct;
222 u64 run_avg;
223 u64 all_runtime;
224 u64 all_count;
225 u64 cpu_last_switched[MAX_CPUS];
226 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
227 struct list_head sort_list, cmp_pid;
228 bool force;
229 bool skip_merge;
230 struct perf_sched_map map;
231
232 /* options for timehist command */
233 bool summary;
234 bool summary_only;
235 bool idle_hist;
236 bool show_callchain;
237 unsigned int max_stack;
238 bool show_cpu_visual;
239 bool show_wakeups;
240 bool show_next;
241 bool show_migrations;
242 bool show_state;
243 u64 skipped_samples;
244 const char *time_str;
245 struct perf_time_interval ptime;
246 struct perf_time_interval hist_time;
247 };
248
249 /* per thread run time data */
250 struct thread_runtime {
251 u64 last_time; /* time of previous sched in/out event */
252 u64 dt_run; /* run time */
253 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */
254 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */
255 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */
256 u64 dt_delay; /* time between wakeup and sched-in */
257 u64 ready_to_run; /* time of wakeup */
258
259 struct stats run_stats;
260 u64 total_run_time;
261 u64 total_sleep_time;
262 u64 total_iowait_time;
263 u64 total_preempt_time;
264 u64 total_delay_time;
265
266 int last_state;
267
268 char shortname[3];
269 bool comm_changed;
270
271 u64 migrations;
272 };
273
274 /* per event run time data */
275 struct evsel_runtime {
276 u64 *last_time; /* time this event was last seen per cpu */
277 u32 ncpu; /* highest cpu slot allocated */
278 };
279
280 /* per cpu idle time data */
281 struct idle_thread_runtime {
282 struct thread_runtime tr;
283 struct thread *last_thread;
284 struct rb_root_cached sorted_root;
285 struct callchain_root callchain;
286 struct callchain_cursor cursor;
287 };
288
289 /* track idle times per cpu */
290 static struct thread **idle_threads;
291 static int idle_max_cpu;
292 static char idle_comm[] = "<idle>";
293
294 static u64 get_nsecs(void)
295 {
296 struct timespec ts;
297
298 clock_gettime(CLOCK_MONOTONIC, &ts);
299
300 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
301 }
302
303 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
304 {
305 u64 T0 = get_nsecs(), T1;
306
307 do {
308 T1 = get_nsecs();
309 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
310 }
311
312 static void sleep_nsecs(u64 nsecs)
313 {
314 struct timespec ts;
315
316 ts.tv_nsec = nsecs % 999999999;
317 ts.tv_sec = nsecs / 999999999;
318
319 nanosleep(&ts, NULL);
320 }
321
322 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
323 {
324 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
325 int i;
326
327 for (i = 0; i < 10; i++) {
328 T0 = get_nsecs();
329 burn_nsecs(sched, 0);
330 T1 = get_nsecs();
331 delta = T1-T0;
332 min_delta = min(min_delta, delta);
333 }
334 sched->run_measurement_overhead = min_delta;
335
336 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
337 }
338
339 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
340 {
341 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
342 int i;
343
344 for (i = 0; i < 10; i++) {
345 T0 = get_nsecs();
346 sleep_nsecs(10000);
347 T1 = get_nsecs();
348 delta = T1-T0;
349 min_delta = min(min_delta, delta);
350 }
351 min_delta -= 10000;
352 sched->sleep_measurement_overhead = min_delta;
353
354 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
355 }
356
357 static struct sched_atom *
358 get_new_event(struct task_desc *task, u64 timestamp)
359 {
360 struct sched_atom *event = zalloc(sizeof(*event));
361 unsigned long idx = task->nr_events;
362 size_t size;
363
364 event->timestamp = timestamp;
365 event->nr = idx;
366
367 task->nr_events++;
368 size = sizeof(struct sched_atom *) * task->nr_events;
369 task->atoms = realloc(task->atoms, size);
370 BUG_ON(!task->atoms);
371
372 task->atoms[idx] = event;
373
374 return event;
375 }
376
377 static struct sched_atom *last_event(struct task_desc *task)
378 {
379 if (!task->nr_events)
380 return NULL;
381
382 return task->atoms[task->nr_events - 1];
383 }
384
385 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
386 u64 timestamp, u64 duration)
387 {
388 struct sched_atom *event, *curr_event = last_event(task);
389
390 /*
391 * optimize an existing RUN event by merging this one
392 * to it:
393 */
394 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
395 sched->nr_run_events_optimized++;
396 curr_event->duration += duration;
397 return;
398 }
399
400 event = get_new_event(task, timestamp);
401
402 event->type = SCHED_EVENT_RUN;
403 event->duration = duration;
404
405 sched->nr_run_events++;
406 }
407
408 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
409 u64 timestamp, struct task_desc *wakee)
410 {
411 struct sched_atom *event, *wakee_event;
412
413 event = get_new_event(task, timestamp);
414 event->type = SCHED_EVENT_WAKEUP;
415 event->wakee = wakee;
416
417 wakee_event = last_event(wakee);
418 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
419 sched->targetless_wakeups++;
420 return;
421 }
422 if (wakee_event->wait_sem) {
423 sched->multitarget_wakeups++;
424 return;
425 }
426
427 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
428 sem_init(wakee_event->wait_sem, 0, 0);
429 wakee_event->specific_wait = 1;
430 event->wait_sem = wakee_event->wait_sem;
431
432 sched->nr_wakeup_events++;
433 }
434
435 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
436 u64 timestamp, u64 task_state __maybe_unused)
437 {
438 struct sched_atom *event = get_new_event(task, timestamp);
439
440 event->type = SCHED_EVENT_SLEEP;
441
442 sched->nr_sleep_events++;
443 }
444
445 static struct task_desc *register_pid(struct perf_sched *sched,
446 unsigned long pid, const char *comm)
447 {
448 struct task_desc *task;
449 static int pid_max;
450
451 if (sched->pid_to_task == NULL) {
452 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
453 pid_max = MAX_PID;
454 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
455 }
456 if (pid >= (unsigned long)pid_max) {
457 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
458 sizeof(struct task_desc *))) == NULL);
459 while (pid >= (unsigned long)pid_max)
460 sched->pid_to_task[pid_max++] = NULL;
461 }
462
463 task = sched->pid_to_task[pid];
464
465 if (task)
466 return task;
467
468 task = zalloc(sizeof(*task));
469 task->pid = pid;
470 task->nr = sched->nr_tasks;
471 strcpy(task->comm, comm);
472 /*
473 * every task starts in sleeping state - this gets ignored
474 * if there's no wakeup pointing to this sleep state:
475 */
476 add_sched_event_sleep(sched, task, 0, 0);
477
478 sched->pid_to_task[pid] = task;
479 sched->nr_tasks++;
480 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
481 BUG_ON(!sched->tasks);
482 sched->tasks[task->nr] = task;
483
484 if (verbose > 0)
485 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
486
487 return task;
488 }
489
490
491 static void print_task_traces(struct perf_sched *sched)
492 {
493 struct task_desc *task;
494 unsigned long i;
495
496 for (i = 0; i < sched->nr_tasks; i++) {
497 task = sched->tasks[i];
498 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
499 task->nr, task->comm, task->pid, task->nr_events);
500 }
501 }
502
503 static void add_cross_task_wakeups(struct perf_sched *sched)
504 {
505 struct task_desc *task1, *task2;
506 unsigned long i, j;
507
508 for (i = 0; i < sched->nr_tasks; i++) {
509 task1 = sched->tasks[i];
510 j = i + 1;
511 if (j == sched->nr_tasks)
512 j = 0;
513 task2 = sched->tasks[j];
514 add_sched_event_wakeup(sched, task1, 0, task2);
515 }
516 }
517
518 static void perf_sched__process_event(struct perf_sched *sched,
519 struct sched_atom *atom)
520 {
521 int ret = 0;
522
523 switch (atom->type) {
524 case SCHED_EVENT_RUN:
525 burn_nsecs(sched, atom->duration);
526 break;
527 case SCHED_EVENT_SLEEP:
528 if (atom->wait_sem)
529 ret = sem_wait(atom->wait_sem);
530 BUG_ON(ret);
531 break;
532 case SCHED_EVENT_WAKEUP:
533 if (atom->wait_sem)
534 ret = sem_post(atom->wait_sem);
535 BUG_ON(ret);
536 break;
537 case SCHED_EVENT_MIGRATION:
538 break;
539 default:
540 BUG_ON(1);
541 }
542 }
543
544 static u64 get_cpu_usage_nsec_parent(void)
545 {
546 struct rusage ru;
547 u64 sum;
548 int err;
549
550 err = getrusage(RUSAGE_SELF, &ru);
551 BUG_ON(err);
552
553 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
554 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
555
556 return sum;
557 }
558
559 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
560 {
561 struct perf_event_attr attr;
562 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
563 int fd;
564 struct rlimit limit;
565 bool need_privilege = false;
566
567 memset(&attr, 0, sizeof(attr));
568
569 attr.type = PERF_TYPE_SOFTWARE;
570 attr.config = PERF_COUNT_SW_TASK_CLOCK;
571
572 force_again:
573 fd = sys_perf_event_open(&attr, 0, -1, -1,
574 perf_event_open_cloexec_flag());
575
576 if (fd < 0) {
577 if (errno == EMFILE) {
578 if (sched->force) {
579 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
580 limit.rlim_cur += sched->nr_tasks - cur_task;
581 if (limit.rlim_cur > limit.rlim_max) {
582 limit.rlim_max = limit.rlim_cur;
583 need_privilege = true;
584 }
585 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
586 if (need_privilege && errno == EPERM)
587 strcpy(info, "Need privilege\n");
588 } else
589 goto force_again;
590 } else
591 strcpy(info, "Have a try with -f option\n");
592 }
593 pr_err("Error: sys_perf_event_open() syscall returned "
594 "with %d (%s)\n%s", fd,
595 str_error_r(errno, sbuf, sizeof(sbuf)), info);
596 exit(EXIT_FAILURE);
597 }
598 return fd;
599 }
600
601 static u64 get_cpu_usage_nsec_self(int fd)
602 {
603 u64 runtime;
604 int ret;
605
606 ret = read(fd, &runtime, sizeof(runtime));
607 BUG_ON(ret != sizeof(runtime));
608
609 return runtime;
610 }
611
612 struct sched_thread_parms {
613 struct task_desc *task;
614 struct perf_sched *sched;
615 int fd;
616 };
617
618 static void *thread_func(void *ctx)
619 {
620 struct sched_thread_parms *parms = ctx;
621 struct task_desc *this_task = parms->task;
622 struct perf_sched *sched = parms->sched;
623 u64 cpu_usage_0, cpu_usage_1;
624 unsigned long i, ret;
625 char comm2[22];
626 int fd = parms->fd;
627
628 zfree(&parms);
629
630 sprintf(comm2, ":%s", this_task->comm);
631 prctl(PR_SET_NAME, comm2);
632 if (fd < 0)
633 return NULL;
634 again:
635 ret = sem_post(&this_task->ready_for_work);
636 BUG_ON(ret);
637 ret = pthread_mutex_lock(&sched->start_work_mutex);
638 BUG_ON(ret);
639 ret = pthread_mutex_unlock(&sched->start_work_mutex);
640 BUG_ON(ret);
641
642 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
643
644 for (i = 0; i < this_task->nr_events; i++) {
645 this_task->curr_event = i;
646 perf_sched__process_event(sched, this_task->atoms[i]);
647 }
648
649 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
650 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
651 ret = sem_post(&this_task->work_done_sem);
652 BUG_ON(ret);
653
654 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
655 BUG_ON(ret);
656 ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
657 BUG_ON(ret);
658
659 goto again;
660 }
661
662 static void create_tasks(struct perf_sched *sched)
663 {
664 struct task_desc *task;
665 pthread_attr_t attr;
666 unsigned long i;
667 int err;
668
669 err = pthread_attr_init(&attr);
670 BUG_ON(err);
671 err = pthread_attr_setstacksize(&attr,
672 (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
673 BUG_ON(err);
674 err = pthread_mutex_lock(&sched->start_work_mutex);
675 BUG_ON(err);
676 err = pthread_mutex_lock(&sched->work_done_wait_mutex);
677 BUG_ON(err);
678 for (i = 0; i < sched->nr_tasks; i++) {
679 struct sched_thread_parms *parms = malloc(sizeof(*parms));
680 BUG_ON(parms == NULL);
681 parms->task = task = sched->tasks[i];
682 parms->sched = sched;
683 parms->fd = self_open_counters(sched, i);
684 sem_init(&task->sleep_sem, 0, 0);
685 sem_init(&task->ready_for_work, 0, 0);
686 sem_init(&task->work_done_sem, 0, 0);
687 task->curr_event = 0;
688 err = pthread_create(&task->thread, &attr, thread_func, parms);
689 BUG_ON(err);
690 }
691 }
692
693 static void wait_for_tasks(struct perf_sched *sched)
694 {
695 u64 cpu_usage_0, cpu_usage_1;
696 struct task_desc *task;
697 unsigned long i, ret;
698
699 sched->start_time = get_nsecs();
700 sched->cpu_usage = 0;
701 pthread_mutex_unlock(&sched->work_done_wait_mutex);
702
703 for (i = 0; i < sched->nr_tasks; i++) {
704 task = sched->tasks[i];
705 ret = sem_wait(&task->ready_for_work);
706 BUG_ON(ret);
707 sem_init(&task->ready_for_work, 0, 0);
708 }
709 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
710 BUG_ON(ret);
711
712 cpu_usage_0 = get_cpu_usage_nsec_parent();
713
714 pthread_mutex_unlock(&sched->start_work_mutex);
715
716 for (i = 0; i < sched->nr_tasks; i++) {
717 task = sched->tasks[i];
718 ret = sem_wait(&task->work_done_sem);
719 BUG_ON(ret);
720 sem_init(&task->work_done_sem, 0, 0);
721 sched->cpu_usage += task->cpu_usage;
722 task->cpu_usage = 0;
723 }
724
725 cpu_usage_1 = get_cpu_usage_nsec_parent();
726 if (!sched->runavg_cpu_usage)
727 sched->runavg_cpu_usage = sched->cpu_usage;
728 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
729
730 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
731 if (!sched->runavg_parent_cpu_usage)
732 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
733 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
734 sched->parent_cpu_usage)/sched->replay_repeat;
735
736 ret = pthread_mutex_lock(&sched->start_work_mutex);
737 BUG_ON(ret);
738
739 for (i = 0; i < sched->nr_tasks; i++) {
740 task = sched->tasks[i];
741 sem_init(&task->sleep_sem, 0, 0);
742 task->curr_event = 0;
743 }
744 }
745
746 static void run_one_test(struct perf_sched *sched)
747 {
748 u64 T0, T1, delta, avg_delta, fluct;
749
750 T0 = get_nsecs();
751 wait_for_tasks(sched);
752 T1 = get_nsecs();
753
754 delta = T1 - T0;
755 sched->sum_runtime += delta;
756 sched->nr_runs++;
757
758 avg_delta = sched->sum_runtime / sched->nr_runs;
759 if (delta < avg_delta)
760 fluct = avg_delta - delta;
761 else
762 fluct = delta - avg_delta;
763 sched->sum_fluct += fluct;
764 if (!sched->run_avg)
765 sched->run_avg = delta;
766 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
767
768 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
769
770 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
771
772 printf("cpu: %0.2f / %0.2f",
773 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
774
775 #if 0
776 /*
777 * rusage statistics done by the parent, these are less
778 * accurate than the sched->sum_exec_runtime based statistics:
779 */
780 printf(" [%0.2f / %0.2f]",
781 (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
782 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
783 #endif
784
785 printf("\n");
786
787 if (sched->nr_sleep_corrections)
788 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
789 sched->nr_sleep_corrections = 0;
790 }
791
792 static void test_calibrations(struct perf_sched *sched)
793 {
794 u64 T0, T1;
795
796 T0 = get_nsecs();
797 burn_nsecs(sched, NSEC_PER_MSEC);
798 T1 = get_nsecs();
799
800 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
801
802 T0 = get_nsecs();
803 sleep_nsecs(NSEC_PER_MSEC);
804 T1 = get_nsecs();
805
806 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
807 }
808
809 static int
810 replay_wakeup_event(struct perf_sched *sched,
811 struct evsel *evsel, struct perf_sample *sample,
812 struct machine *machine __maybe_unused)
813 {
814 const char *comm = perf_evsel__strval(evsel, sample, "comm");
815 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
816 struct task_desc *waker, *wakee;
817
818 if (verbose > 0) {
819 printf("sched_wakeup event %p\n", evsel);
820
821 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
822 }
823
824 waker = register_pid(sched, sample->tid, "<unknown>");
825 wakee = register_pid(sched, pid, comm);
826
827 add_sched_event_wakeup(sched, waker, sample->time, wakee);
828 return 0;
829 }
830
831 static int replay_switch_event(struct perf_sched *sched,
832 struct evsel *evsel,
833 struct perf_sample *sample,
834 struct machine *machine __maybe_unused)
835 {
836 const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
837 *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
838 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
839 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
840 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
841 struct task_desc *prev, __maybe_unused *next;
842 u64 timestamp0, timestamp = sample->time;
843 int cpu = sample->cpu;
844 s64 delta;
845
846 if (verbose > 0)
847 printf("sched_switch event %p\n", evsel);
848
849 if (cpu >= MAX_CPUS || cpu < 0)
850 return 0;
851
852 timestamp0 = sched->cpu_last_switched[cpu];
853 if (timestamp0)
854 delta = timestamp - timestamp0;
855 else
856 delta = 0;
857
858 if (delta < 0) {
859 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
860 return -1;
861 }
862
863 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
864 prev_comm, prev_pid, next_comm, next_pid, delta);
865
866 prev = register_pid(sched, prev_pid, prev_comm);
867 next = register_pid(sched, next_pid, next_comm);
868
869 sched->cpu_last_switched[cpu] = timestamp;
870
871 add_sched_event_run(sched, prev, timestamp, delta);
872 add_sched_event_sleep(sched, prev, timestamp, prev_state);
873
874 return 0;
875 }
876
877 static int replay_fork_event(struct perf_sched *sched,
878 union perf_event *event,
879 struct machine *machine)
880 {
881 struct thread *child, *parent;
882
883 child = machine__findnew_thread(machine, event->fork.pid,
884 event->fork.tid);
885 parent = machine__findnew_thread(machine, event->fork.ppid,
886 event->fork.ptid);
887
888 if (child == NULL || parent == NULL) {
889 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
890 child, parent);
891 goto out_put;
892 }
893
894 if (verbose > 0) {
895 printf("fork event\n");
896 printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
897 printf("... child: %s/%d\n", thread__comm_str(child), child->tid);
898 }
899
900 register_pid(sched, parent->tid, thread__comm_str(parent));
901 register_pid(sched, child->tid, thread__comm_str(child));
902 out_put:
903 thread__put(child);
904 thread__put(parent);
905 return 0;
906 }
907
908 struct sort_dimension {
909 const char *name;
910 sort_fn_t cmp;
911 struct list_head list;
912 };
913
914 /*
915 * handle runtime stats saved per thread
916 */
917 static struct thread_runtime *thread__init_runtime(struct thread *thread)
918 {
919 struct thread_runtime *r;
920
921 r = zalloc(sizeof(struct thread_runtime));
922 if (!r)
923 return NULL;
924
925 init_stats(&r->run_stats);
926 thread__set_priv(thread, r);
927
928 return r;
929 }
930
931 static struct thread_runtime *thread__get_runtime(struct thread *thread)
932 {
933 struct thread_runtime *tr;
934
935 tr = thread__priv(thread);
936 if (tr == NULL) {
937 tr = thread__init_runtime(thread);
938 if (tr == NULL)
939 pr_debug("Failed to malloc memory for runtime data.\n");
940 }
941
942 return tr;
943 }
944
945 static int
946 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
947 {
948 struct sort_dimension *sort;
949 int ret = 0;
950
951 BUG_ON(list_empty(list));
952
953 list_for_each_entry(sort, list, list) {
954 ret = sort->cmp(l, r);
955 if (ret)
956 return ret;
957 }
958
959 return ret;
960 }
961
962 static struct work_atoms *
963 thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
964 struct list_head *sort_list)
965 {
966 struct rb_node *node = root->rb_root.rb_node;
967 struct work_atoms key = { .thread = thread };
968
969 while (node) {
970 struct work_atoms *atoms;
971 int cmp;
972
973 atoms = container_of(node, struct work_atoms, node);
974
975 cmp = thread_lat_cmp(sort_list, &key, atoms);
976 if (cmp > 0)
977 node = node->rb_left;
978 else if (cmp < 0)
979 node = node->rb_right;
980 else {
981 BUG_ON(thread != atoms->thread);
982 return atoms;
983 }
984 }
985 return NULL;
986 }
987
988 static void
989 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
990 struct list_head *sort_list)
991 {
992 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
993 bool leftmost = true;
994
995 while (*new) {
996 struct work_atoms *this;
997 int cmp;
998
999 this = container_of(*new, struct work_atoms, node);
1000 parent = *new;
1001
1002 cmp = thread_lat_cmp(sort_list, data, this);
1003
1004 if (cmp > 0)
1005 new = &((*new)->rb_left);
1006 else {
1007 new = &((*new)->rb_right);
1008 leftmost = false;
1009 }
1010 }
1011
1012 rb_link_node(&data->node, parent, new);
1013 rb_insert_color_cached(&data->node, root, leftmost);
1014 }
1015
1016 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1017 {
1018 struct work_atoms *atoms = zalloc(sizeof(*atoms));
1019 if (!atoms) {
1020 pr_err("No memory at %s\n", __func__);
1021 return -1;
1022 }
1023
1024 atoms->thread = thread__get(thread);
1025 INIT_LIST_HEAD(&atoms->work_list);
1026 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1027 return 0;
1028 }
1029
1030 static char sched_out_state(u64 prev_state)
1031 {
1032 const char *str = TASK_STATE_TO_CHAR_STR;
1033
1034 return str[prev_state];
1035 }
1036
1037 static int
1038 add_sched_out_event(struct work_atoms *atoms,
1039 char run_state,
1040 u64 timestamp)
1041 {
1042 struct work_atom *atom = zalloc(sizeof(*atom));
1043 if (!atom) {
1044 pr_err("Non memory at %s", __func__);
1045 return -1;
1046 }
1047
1048 atom->sched_out_time = timestamp;
1049
1050 if (run_state == 'R') {
1051 atom->state = THREAD_WAIT_CPU;
1052 atom->wake_up_time = atom->sched_out_time;
1053 }
1054
1055 list_add_tail(&atom->list, &atoms->work_list);
1056 return 0;
1057 }
1058
1059 static void
1060 add_runtime_event(struct work_atoms *atoms, u64 delta,
1061 u64 timestamp __maybe_unused)
1062 {
1063 struct work_atom *atom;
1064
1065 BUG_ON(list_empty(&atoms->work_list));
1066
1067 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1068
1069 atom->runtime += delta;
1070 atoms->total_runtime += delta;
1071 }
1072
1073 static void
1074 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1075 {
1076 struct work_atom *atom;
1077 u64 delta;
1078
1079 if (list_empty(&atoms->work_list))
1080 return;
1081
1082 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1083
1084 if (atom->state != THREAD_WAIT_CPU)
1085 return;
1086
1087 if (timestamp < atom->wake_up_time) {
1088 atom->state = THREAD_IGNORE;
1089 return;
1090 }
1091
1092 atom->state = THREAD_SCHED_IN;
1093 atom->sched_in_time = timestamp;
1094
1095 delta = atom->sched_in_time - atom->wake_up_time;
1096 atoms->total_lat += delta;
1097 if (delta > atoms->max_lat) {
1098 atoms->max_lat = delta;
1099 atoms->max_lat_at = timestamp;
1100 }
1101 atoms->nb_atoms++;
1102 }
1103
1104 static int latency_switch_event(struct perf_sched *sched,
1105 struct evsel *evsel,
1106 struct perf_sample *sample,
1107 struct machine *machine)
1108 {
1109 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1110 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1111 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
1112 struct work_atoms *out_events, *in_events;
1113 struct thread *sched_out, *sched_in;
1114 u64 timestamp0, timestamp = sample->time;
1115 int cpu = sample->cpu, err = -1;
1116 s64 delta;
1117
1118 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1119
1120 timestamp0 = sched->cpu_last_switched[cpu];
1121 sched->cpu_last_switched[cpu] = timestamp;
1122 if (timestamp0)
1123 delta = timestamp - timestamp0;
1124 else
1125 delta = 0;
1126
1127 if (delta < 0) {
1128 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1129 return -1;
1130 }
1131
1132 sched_out = machine__findnew_thread(machine, -1, prev_pid);
1133 sched_in = machine__findnew_thread(machine, -1, next_pid);
1134 if (sched_out == NULL || sched_in == NULL)
1135 goto out_put;
1136
1137 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1138 if (!out_events) {
1139 if (thread_atoms_insert(sched, sched_out))
1140 goto out_put;
1141 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1142 if (!out_events) {
1143 pr_err("out-event: Internal tree error");
1144 goto out_put;
1145 }
1146 }
1147 if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
1148 return -1;
1149
1150 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1151 if (!in_events) {
1152 if (thread_atoms_insert(sched, sched_in))
1153 goto out_put;
1154 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1155 if (!in_events) {
1156 pr_err("in-event: Internal tree error");
1157 goto out_put;
1158 }
1159 /*
1160 * Take came in we have not heard about yet,
1161 * add in an initial atom in runnable state:
1162 */
1163 if (add_sched_out_event(in_events, 'R', timestamp))
1164 goto out_put;
1165 }
1166 add_sched_in_event(in_events, timestamp);
1167 err = 0;
1168 out_put:
1169 thread__put(sched_out);
1170 thread__put(sched_in);
1171 return err;
1172 }
1173
1174 static int latency_runtime_event(struct perf_sched *sched,
1175 struct evsel *evsel,
1176 struct perf_sample *sample,
1177 struct machine *machine)
1178 {
1179 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1180 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
1181 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1182 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1183 u64 timestamp = sample->time;
1184 int cpu = sample->cpu, err = -1;
1185
1186 if (thread == NULL)
1187 return -1;
1188
1189 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1190 if (!atoms) {
1191 if (thread_atoms_insert(sched, thread))
1192 goto out_put;
1193 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1194 if (!atoms) {
1195 pr_err("in-event: Internal tree error");
1196 goto out_put;
1197 }
1198 if (add_sched_out_event(atoms, 'R', timestamp))
1199 goto out_put;
1200 }
1201
1202 add_runtime_event(atoms, runtime, timestamp);
1203 err = 0;
1204 out_put:
1205 thread__put(thread);
1206 return err;
1207 }
1208
1209 static int latency_wakeup_event(struct perf_sched *sched,
1210 struct evsel *evsel,
1211 struct perf_sample *sample,
1212 struct machine *machine)
1213 {
1214 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1215 struct work_atoms *atoms;
1216 struct work_atom *atom;
1217 struct thread *wakee;
1218 u64 timestamp = sample->time;
1219 int err = -1;
1220
1221 wakee = machine__findnew_thread(machine, -1, pid);
1222 if (wakee == NULL)
1223 return -1;
1224 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1225 if (!atoms) {
1226 if (thread_atoms_insert(sched, wakee))
1227 goto out_put;
1228 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1229 if (!atoms) {
1230 pr_err("wakeup-event: Internal tree error");
1231 goto out_put;
1232 }
1233 if (add_sched_out_event(atoms, 'S', timestamp))
1234 goto out_put;
1235 }
1236
1237 BUG_ON(list_empty(&atoms->work_list));
1238
1239 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1240
1241 /*
1242 * As we do not guarantee the wakeup event happens when
1243 * task is out of run queue, also may happen when task is
1244 * on run queue and wakeup only change ->state to TASK_RUNNING,
1245 * then we should not set the ->wake_up_time when wake up a
1246 * task which is on run queue.
1247 *
1248 * You WILL be missing events if you've recorded only
1249 * one CPU, or are only looking at only one, so don't
1250 * skip in this case.
1251 */
1252 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1253 goto out_ok;
1254
1255 sched->nr_timestamps++;
1256 if (atom->sched_out_time > timestamp) {
1257 sched->nr_unordered_timestamps++;
1258 goto out_ok;
1259 }
1260
1261 atom->state = THREAD_WAIT_CPU;
1262 atom->wake_up_time = timestamp;
1263 out_ok:
1264 err = 0;
1265 out_put:
1266 thread__put(wakee);
1267 return err;
1268 }
1269
1270 static int latency_migrate_task_event(struct perf_sched *sched,
1271 struct evsel *evsel,
1272 struct perf_sample *sample,
1273 struct machine *machine)
1274 {
1275 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1276 u64 timestamp = sample->time;
1277 struct work_atoms *atoms;
1278 struct work_atom *atom;
1279 struct thread *migrant;
1280 int err = -1;
1281
1282 /*
1283 * Only need to worry about migration when profiling one CPU.
1284 */
1285 if (sched->profile_cpu == -1)
1286 return 0;
1287
1288 migrant = machine__findnew_thread(machine, -1, pid);
1289 if (migrant == NULL)
1290 return -1;
1291 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1292 if (!atoms) {
1293 if (thread_atoms_insert(sched, migrant))
1294 goto out_put;
1295 register_pid(sched, migrant->tid, thread__comm_str(migrant));
1296 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1297 if (!atoms) {
1298 pr_err("migration-event: Internal tree error");
1299 goto out_put;
1300 }
1301 if (add_sched_out_event(atoms, 'R', timestamp))
1302 goto out_put;
1303 }
1304
1305 BUG_ON(list_empty(&atoms->work_list));
1306
1307 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1308 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1309
1310 sched->nr_timestamps++;
1311
1312 if (atom->sched_out_time > timestamp)
1313 sched->nr_unordered_timestamps++;
1314 err = 0;
1315 out_put:
1316 thread__put(migrant);
1317 return err;
1318 }
1319
1320 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1321 {
1322 int i;
1323 int ret;
1324 u64 avg;
1325 char max_lat_at[32];
1326
1327 if (!work_list->nb_atoms)
1328 return;
1329 /*
1330 * Ignore idle threads:
1331 */
1332 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1333 return;
1334
1335 sched->all_runtime += work_list->total_runtime;
1336 sched->all_count += work_list->nb_atoms;
1337
1338 if (work_list->num_merged > 1)
1339 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
1340 else
1341 ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
1342
1343 for (i = 0; i < 24 - ret; i++)
1344 printf(" ");
1345
1346 avg = work_list->total_lat / work_list->nb_atoms;
1347 timestamp__scnprintf_usec(work_list->max_lat_at, max_lat_at, sizeof(max_lat_at));
1348
1349 printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13s s\n",
1350 (double)work_list->total_runtime / NSEC_PER_MSEC,
1351 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1352 (double)work_list->max_lat / NSEC_PER_MSEC,
1353 max_lat_at);
1354 }
1355
1356 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1357 {
1358 if (l->thread == r->thread)
1359 return 0;
1360 if (l->thread->tid < r->thread->tid)
1361 return -1;
1362 if (l->thread->tid > r->thread->tid)
1363 return 1;
1364 return (int)(l->thread - r->thread);
1365 }
1366
1367 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1368 {
1369 u64 avgl, avgr;
1370
1371 if (!l->nb_atoms)
1372 return -1;
1373
1374 if (!r->nb_atoms)
1375 return 1;
1376
1377 avgl = l->total_lat / l->nb_atoms;
1378 avgr = r->total_lat / r->nb_atoms;
1379
1380 if (avgl < avgr)
1381 return -1;
1382 if (avgl > avgr)
1383 return 1;
1384
1385 return 0;
1386 }
1387
1388 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1389 {
1390 if (l->max_lat < r->max_lat)
1391 return -1;
1392 if (l->max_lat > r->max_lat)
1393 return 1;
1394
1395 return 0;
1396 }
1397
1398 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1399 {
1400 if (l->nb_atoms < r->nb_atoms)
1401 return -1;
1402 if (l->nb_atoms > r->nb_atoms)
1403 return 1;
1404
1405 return 0;
1406 }
1407
1408 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1409 {
1410 if (l->total_runtime < r->total_runtime)
1411 return -1;
1412 if (l->total_runtime > r->total_runtime)
1413 return 1;
1414
1415 return 0;
1416 }
1417
1418 static int sort_dimension__add(const char *tok, struct list_head *list)
1419 {
1420 size_t i;
1421 static struct sort_dimension avg_sort_dimension = {
1422 .name = "avg",
1423 .cmp = avg_cmp,
1424 };
1425 static struct sort_dimension max_sort_dimension = {
1426 .name = "max",
1427 .cmp = max_cmp,
1428 };
1429 static struct sort_dimension pid_sort_dimension = {
1430 .name = "pid",
1431 .cmp = pid_cmp,
1432 };
1433 static struct sort_dimension runtime_sort_dimension = {
1434 .name = "runtime",
1435 .cmp = runtime_cmp,
1436 };
1437 static struct sort_dimension switch_sort_dimension = {
1438 .name = "switch",
1439 .cmp = switch_cmp,
1440 };
1441 struct sort_dimension *available_sorts[] = {
1442 &pid_sort_dimension,
1443 &avg_sort_dimension,
1444 &max_sort_dimension,
1445 &switch_sort_dimension,
1446 &runtime_sort_dimension,
1447 };
1448
1449 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1450 if (!strcmp(available_sorts[i]->name, tok)) {
1451 list_add_tail(&available_sorts[i]->list, list);
1452
1453 return 0;
1454 }
1455 }
1456
1457 return -1;
1458 }
1459
1460 static void perf_sched__sort_lat(struct perf_sched *sched)
1461 {
1462 struct rb_node *node;
1463 struct rb_root_cached *root = &sched->atom_root;
1464 again:
1465 for (;;) {
1466 struct work_atoms *data;
1467 node = rb_first_cached(root);
1468 if (!node)
1469 break;
1470
1471 rb_erase_cached(node, root);
1472 data = rb_entry(node, struct work_atoms, node);
1473 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1474 }
1475 if (root == &sched->atom_root) {
1476 root = &sched->merged_atom_root;
1477 goto again;
1478 }
1479 }
1480
1481 static int process_sched_wakeup_event(struct perf_tool *tool,
1482 struct evsel *evsel,
1483 struct perf_sample *sample,
1484 struct machine *machine)
1485 {
1486 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1487
1488 if (sched->tp_handler->wakeup_event)
1489 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1490
1491 return 0;
1492 }
1493
1494 union map_priv {
1495 void *ptr;
1496 bool color;
1497 };
1498
1499 static bool thread__has_color(struct thread *thread)
1500 {
1501 union map_priv priv = {
1502 .ptr = thread__priv(thread),
1503 };
1504
1505 return priv.color;
1506 }
1507
1508 static struct thread*
1509 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1510 {
1511 struct thread *thread = machine__findnew_thread(machine, pid, tid);
1512 union map_priv priv = {
1513 .color = false,
1514 };
1515
1516 if (!sched->map.color_pids || !thread || thread__priv(thread))
1517 return thread;
1518
1519 if (thread_map__has(sched->map.color_pids, tid))
1520 priv.color = true;
1521
1522 thread__set_priv(thread, priv.ptr);
1523 return thread;
1524 }
1525
1526 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1527 struct perf_sample *sample, struct machine *machine)
1528 {
1529 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1530 struct thread *sched_in;
1531 struct thread_runtime *tr;
1532 int new_shortname;
1533 u64 timestamp0, timestamp = sample->time;
1534 s64 delta;
1535 int i, this_cpu = sample->cpu;
1536 int cpus_nr;
1537 bool new_cpu = false;
1538 const char *color = PERF_COLOR_NORMAL;
1539 char stimestamp[32];
1540
1541 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1542
1543 if (this_cpu > sched->max_cpu)
1544 sched->max_cpu = this_cpu;
1545
1546 if (sched->map.comp) {
1547 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1548 if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
1549 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1550 new_cpu = true;
1551 }
1552 } else
1553 cpus_nr = sched->max_cpu;
1554
1555 timestamp0 = sched->cpu_last_switched[this_cpu];
1556 sched->cpu_last_switched[this_cpu] = timestamp;
1557 if (timestamp0)
1558 delta = timestamp - timestamp0;
1559 else
1560 delta = 0;
1561
1562 if (delta < 0) {
1563 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1564 return -1;
1565 }
1566
1567 sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1568 if (sched_in == NULL)
1569 return -1;
1570
1571 tr = thread__get_runtime(sched_in);
1572 if (tr == NULL) {
1573 thread__put(sched_in);
1574 return -1;
1575 }
1576
1577 sched->curr_thread[this_cpu] = thread__get(sched_in);
1578
1579 printf(" ");
1580
1581 new_shortname = 0;
1582 if (!tr->shortname[0]) {
1583 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1584 /*
1585 * Don't allocate a letter-number for swapper:0
1586 * as a shortname. Instead, we use '.' for it.
1587 */
1588 tr->shortname[0] = '.';
1589 tr->shortname[1] = ' ';
1590 } else {
1591 tr->shortname[0] = sched->next_shortname1;
1592 tr->shortname[1] = sched->next_shortname2;
1593
1594 if (sched->next_shortname1 < 'Z') {
1595 sched->next_shortname1++;
1596 } else {
1597 sched->next_shortname1 = 'A';
1598 if (sched->next_shortname2 < '9')
1599 sched->next_shortname2++;
1600 else
1601 sched->next_shortname2 = '0';
1602 }
1603 }
1604 new_shortname = 1;
1605 }
1606
1607 for (i = 0; i < cpus_nr; i++) {
1608 int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
1609 struct thread *curr_thread = sched->curr_thread[cpu];
1610 struct thread_runtime *curr_tr;
1611 const char *pid_color = color;
1612 const char *cpu_color = color;
1613
1614 if (curr_thread && thread__has_color(curr_thread))
1615 pid_color = COLOR_PIDS;
1616
1617 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
1618 continue;
1619
1620 if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
1621 cpu_color = COLOR_CPUS;
1622
1623 if (cpu != this_cpu)
1624 color_fprintf(stdout, color, " ");
1625 else
1626 color_fprintf(stdout, cpu_color, "*");
1627
1628 if (sched->curr_thread[cpu]) {
1629 curr_tr = thread__get_runtime(sched->curr_thread[cpu]);
1630 if (curr_tr == NULL) {
1631 thread__put(sched_in);
1632 return -1;
1633 }
1634 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1635 } else
1636 color_fprintf(stdout, color, " ");
1637 }
1638
1639 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
1640 goto out;
1641
1642 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1643 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1644 if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) {
1645 const char *pid_color = color;
1646
1647 if (thread__has_color(sched_in))
1648 pid_color = COLOR_PIDS;
1649
1650 color_fprintf(stdout, pid_color, "%s => %s:%d",
1651 tr->shortname, thread__comm_str(sched_in), sched_in->tid);
1652 tr->comm_changed = false;
1653 }
1654
1655 if (sched->map.comp && new_cpu)
1656 color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1657
1658 out:
1659 color_fprintf(stdout, color, "\n");
1660
1661 thread__put(sched_in);
1662
1663 return 0;
1664 }
1665
1666 static int process_sched_switch_event(struct perf_tool *tool,
1667 struct evsel *evsel,
1668 struct perf_sample *sample,
1669 struct machine *machine)
1670 {
1671 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1672 int this_cpu = sample->cpu, err = 0;
1673 u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1674 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1675
1676 if (sched->curr_pid[this_cpu] != (u32)-1) {
1677 /*
1678 * Are we trying to switch away a PID that is
1679 * not current?
1680 */
1681 if (sched->curr_pid[this_cpu] != prev_pid)
1682 sched->nr_context_switch_bugs++;
1683 }
1684
1685 if (sched->tp_handler->switch_event)
1686 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1687
1688 sched->curr_pid[this_cpu] = next_pid;
1689 return err;
1690 }
1691
1692 static int process_sched_runtime_event(struct perf_tool *tool,
1693 struct evsel *evsel,
1694 struct perf_sample *sample,
1695 struct machine *machine)
1696 {
1697 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1698
1699 if (sched->tp_handler->runtime_event)
1700 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1701
1702 return 0;
1703 }
1704
1705 static int perf_sched__process_fork_event(struct perf_tool *tool,
1706 union perf_event *event,
1707 struct perf_sample *sample,
1708 struct machine *machine)
1709 {
1710 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1711
1712 /* run the fork event through the perf machineruy */
1713 perf_event__process_fork(tool, event, sample, machine);
1714
1715 /* and then run additional processing needed for this command */
1716 if (sched->tp_handler->fork_event)
1717 return sched->tp_handler->fork_event(sched, event, machine);
1718
1719 return 0;
1720 }
1721
1722 static int process_sched_migrate_task_event(struct perf_tool *tool,
1723 struct evsel *evsel,
1724 struct perf_sample *sample,
1725 struct machine *machine)
1726 {
1727 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1728
1729 if (sched->tp_handler->migrate_task_event)
1730 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1731
1732 return 0;
1733 }
1734
1735 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1736 struct evsel *evsel,
1737 struct perf_sample *sample,
1738 struct machine *machine);
1739
1740 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1741 union perf_event *event __maybe_unused,
1742 struct perf_sample *sample,
1743 struct evsel *evsel,
1744 struct machine *machine)
1745 {
1746 int err = 0;
1747
1748 if (evsel->handler != NULL) {
1749 tracepoint_handler f = evsel->handler;
1750 err = f(tool, evsel, sample, machine);
1751 }
1752
1753 return err;
1754 }
1755
1756 static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
1757 union perf_event *event,
1758 struct perf_sample *sample,
1759 struct machine *machine)
1760 {
1761 struct thread *thread;
1762 struct thread_runtime *tr;
1763 int err;
1764
1765 err = perf_event__process_comm(tool, event, sample, machine);
1766 if (err)
1767 return err;
1768
1769 thread = machine__find_thread(machine, sample->pid, sample->tid);
1770 if (!thread) {
1771 pr_err("Internal error: can't find thread\n");
1772 return -1;
1773 }
1774
1775 tr = thread__get_runtime(thread);
1776 if (tr == NULL) {
1777 thread__put(thread);
1778 return -1;
1779 }
1780
1781 tr->comm_changed = true;
1782 thread__put(thread);
1783
1784 return 0;
1785 }
1786
1787 static int perf_sched__read_events(struct perf_sched *sched)
1788 {
1789 const struct evsel_str_handler handlers[] = {
1790 { "sched:sched_switch", process_sched_switch_event, },
1791 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1792 { "sched:sched_wakeup", process_sched_wakeup_event, },
1793 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1794 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1795 };
1796 struct perf_session *session;
1797 struct perf_data data = {
1798 .path = input_name,
1799 .mode = PERF_DATA_MODE_READ,
1800 .force = sched->force,
1801 };
1802 int rc = -1;
1803
1804 session = perf_session__new(&data, false, &sched->tool);
1805 if (IS_ERR(session)) {
1806 pr_debug("Error creating perf session");
1807 return PTR_ERR(session);
1808 }
1809
1810 symbol__init(&session->header.env);
1811
1812 if (perf_session__set_tracepoints_handlers(session, handlers))
1813 goto out_delete;
1814
1815 if (perf_session__has_traces(session, "record -R")) {
1816 int err = perf_session__process_events(session);
1817 if (err) {
1818 pr_err("Failed to process events, error %d", err);
1819 goto out_delete;
1820 }
1821
1822 sched->nr_events = session->evlist->stats.nr_events[0];
1823 sched->nr_lost_events = session->evlist->stats.total_lost;
1824 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1825 }
1826
1827 rc = 0;
1828 out_delete:
1829 perf_session__delete(session);
1830 return rc;
1831 }
1832
1833 /*
1834 * scheduling times are printed as msec.usec
1835 */
1836 static inline void print_sched_time(unsigned long long nsecs, int width)
1837 {
1838 unsigned long msecs;
1839 unsigned long usecs;
1840
1841 msecs = nsecs / NSEC_PER_MSEC;
1842 nsecs -= msecs * NSEC_PER_MSEC;
1843 usecs = nsecs / NSEC_PER_USEC;
1844 printf("%*lu.%03lu ", width, msecs, usecs);
1845 }
1846
1847 /*
1848 * returns runtime data for event, allocating memory for it the
1849 * first time it is used.
1850 */
1851 static struct evsel_runtime *perf_evsel__get_runtime(struct evsel *evsel)
1852 {
1853 struct evsel_runtime *r = evsel->priv;
1854
1855 if (r == NULL) {
1856 r = zalloc(sizeof(struct evsel_runtime));
1857 evsel->priv = r;
1858 }
1859
1860 return r;
1861 }
1862
1863 /*
1864 * save last time event was seen per cpu
1865 */
1866 static void perf_evsel__save_time(struct evsel *evsel,
1867 u64 timestamp, u32 cpu)
1868 {
1869 struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
1870
1871 if (r == NULL)
1872 return;
1873
1874 if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1875 int i, n = __roundup_pow_of_two(cpu+1);
1876 void *p = r->last_time;
1877
1878 p = realloc(r->last_time, n * sizeof(u64));
1879 if (!p)
1880 return;
1881
1882 r->last_time = p;
1883 for (i = r->ncpu; i < n; ++i)
1884 r->last_time[i] = (u64) 0;
1885
1886 r->ncpu = n;
1887 }
1888
1889 r->last_time[cpu] = timestamp;
1890 }
1891
1892 /* returns last time this event was seen on the given cpu */
1893 static u64 perf_evsel__get_time(struct evsel *evsel, u32 cpu)
1894 {
1895 struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
1896
1897 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
1898 return 0;
1899
1900 return r->last_time[cpu];
1901 }
1902
1903 static int comm_width = 30;
1904
1905 static char *timehist_get_commstr(struct thread *thread)
1906 {
1907 static char str[32];
1908 const char *comm = thread__comm_str(thread);
1909 pid_t tid = thread->tid;
1910 pid_t pid = thread->pid_;
1911 int n;
1912
1913 if (pid == 0)
1914 n = scnprintf(str, sizeof(str), "%s", comm);
1915
1916 else if (tid != pid)
1917 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
1918
1919 else
1920 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
1921
1922 if (n > comm_width)
1923 comm_width = n;
1924
1925 return str;
1926 }
1927
1928 static void timehist_header(struct perf_sched *sched)
1929 {
1930 u32 ncpus = sched->max_cpu + 1;
1931 u32 i, j;
1932
1933 printf("%15s %6s ", "time", "cpu");
1934
1935 if (sched->show_cpu_visual) {
1936 printf(" ");
1937 for (i = 0, j = 0; i < ncpus; ++i) {
1938 printf("%x", j++);
1939 if (j > 15)
1940 j = 0;
1941 }
1942 printf(" ");
1943 }
1944
1945 printf(" %-*s %9s %9s %9s", comm_width,
1946 "task name", "wait time", "sch delay", "run time");
1947
1948 if (sched->show_state)
1949 printf(" %s", "state");
1950
1951 printf("\n");
1952
1953 /*
1954 * units row
1955 */
1956 printf("%15s %-6s ", "", "");
1957
1958 if (sched->show_cpu_visual)
1959 printf(" %*s ", ncpus, "");
1960
1961 printf(" %-*s %9s %9s %9s", comm_width,
1962 "[tid/pid]", "(msec)", "(msec)", "(msec)");
1963
1964 if (sched->show_state)
1965 printf(" %5s", "");
1966
1967 printf("\n");
1968
1969 /*
1970 * separator
1971 */
1972 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
1973
1974 if (sched->show_cpu_visual)
1975 printf(" %.*s ", ncpus, graph_dotted_line);
1976
1977 printf(" %.*s %.9s %.9s %.9s", comm_width,
1978 graph_dotted_line, graph_dotted_line, graph_dotted_line,
1979 graph_dotted_line);
1980
1981 if (sched->show_state)
1982 printf(" %.5s", graph_dotted_line);
1983
1984 printf("\n");
1985 }
1986
1987 static char task_state_char(struct thread *thread, int state)
1988 {
1989 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1990 unsigned bit = state ? ffs(state) : 0;
1991
1992 /* 'I' for idle */
1993 if (thread->tid == 0)
1994 return 'I';
1995
1996 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
1997 }
1998
1999 static void timehist_print_sample(struct perf_sched *sched,
2000 struct evsel *evsel,
2001 struct perf_sample *sample,
2002 struct addr_location *al,
2003 struct thread *thread,
2004 u64 t, int state)
2005 {
2006 struct thread_runtime *tr = thread__priv(thread);
2007 const char *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
2008 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
2009 u32 max_cpus = sched->max_cpu + 1;
2010 char tstr[64];
2011 char nstr[30];
2012 u64 wait_time;
2013
2014 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
2015 return;
2016
2017 timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2018 printf("%15s [%04d] ", tstr, sample->cpu);
2019
2020 if (sched->show_cpu_visual) {
2021 u32 i;
2022 char c;
2023
2024 printf(" ");
2025 for (i = 0; i < max_cpus; ++i) {
2026 /* flag idle times with 'i'; others are sched events */
2027 if (i == sample->cpu)
2028 c = (thread->tid == 0) ? 'i' : 's';
2029 else
2030 c = ' ';
2031 printf("%c", c);
2032 }
2033 printf(" ");
2034 }
2035
2036 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2037
2038 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2039 print_sched_time(wait_time, 6);
2040
2041 print_sched_time(tr->dt_delay, 6);
2042 print_sched_time(tr->dt_run, 6);
2043
2044 if (sched->show_state)
2045 printf(" %5c ", task_state_char(thread, state));
2046
2047 if (sched->show_next) {
2048 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2049 printf(" %-*s", comm_width, nstr);
2050 }
2051
2052 if (sched->show_wakeups && !sched->show_next)
2053 printf(" %-*s", comm_width, "");
2054
2055 if (thread->tid == 0)
2056 goto out;
2057
2058 if (sched->show_callchain)
2059 printf(" ");
2060
2061 sample__fprintf_sym(sample, al, 0,
2062 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2063 EVSEL__PRINT_CALLCHAIN_ARROW |
2064 EVSEL__PRINT_SKIP_IGNORED,
2065 &callchain_cursor, symbol_conf.bt_stop_list, stdout);
2066
2067 out:
2068 printf("\n");
2069 }
2070
2071 /*
2072 * Explanation of delta-time stats:
2073 *
2074 * t = time of current schedule out event
2075 * tprev = time of previous sched out event
2076 * also time of schedule-in event for current task
2077 * last_time = time of last sched change event for current task
2078 * (i.e, time process was last scheduled out)
2079 * ready_to_run = time of wakeup for current task
2080 *
2081 * -----|------------|------------|------------|------
2082 * last ready tprev t
2083 * time to run
2084 *
2085 * |-------- dt_wait --------|
2086 * |- dt_delay -|-- dt_run --|
2087 *
2088 * dt_run = run time of current task
2089 * dt_wait = time between last schedule out event for task and tprev
2090 * represents time spent off the cpu
2091 * dt_delay = time between wakeup and schedule-in of task
2092 */
2093
2094 static void timehist_update_runtime_stats(struct thread_runtime *r,
2095 u64 t, u64 tprev)
2096 {
2097 r->dt_delay = 0;
2098 r->dt_sleep = 0;
2099 r->dt_iowait = 0;
2100 r->dt_preempt = 0;
2101 r->dt_run = 0;
2102
2103 if (tprev) {
2104 r->dt_run = t - tprev;
2105 if (r->ready_to_run) {
2106 if (r->ready_to_run > tprev)
2107 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2108 else
2109 r->dt_delay = tprev - r->ready_to_run;
2110 }
2111
2112 if (r->last_time > tprev)
2113 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2114 else if (r->last_time) {
2115 u64 dt_wait = tprev - r->last_time;
2116
2117 if (r->last_state == TASK_RUNNING)
2118 r->dt_preempt = dt_wait;
2119 else if (r->last_state == TASK_UNINTERRUPTIBLE)
2120 r->dt_iowait = dt_wait;
2121 else
2122 r->dt_sleep = dt_wait;
2123 }
2124 }
2125
2126 update_stats(&r->run_stats, r->dt_run);
2127
2128 r->total_run_time += r->dt_run;
2129 r->total_delay_time += r->dt_delay;
2130 r->total_sleep_time += r->dt_sleep;
2131 r->total_iowait_time += r->dt_iowait;
2132 r->total_preempt_time += r->dt_preempt;
2133 }
2134
2135 static bool is_idle_sample(struct perf_sample *sample,
2136 struct evsel *evsel)
2137 {
2138 /* pid 0 == swapper == idle task */
2139 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0)
2140 return perf_evsel__intval(evsel, sample, "prev_pid") == 0;
2141
2142 return sample->pid == 0;
2143 }
2144
2145 static void save_task_callchain(struct perf_sched *sched,
2146 struct perf_sample *sample,
2147 struct evsel *evsel,
2148 struct machine *machine)
2149 {
2150 struct callchain_cursor *cursor = &callchain_cursor;
2151 struct thread *thread;
2152
2153 /* want main thread for process - has maps */
2154 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2155 if (thread == NULL) {
2156 pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2157 return;
2158 }
2159
2160 if (!sched->show_callchain || sample->callchain == NULL)
2161 return;
2162
2163 if (thread__resolve_callchain(thread, cursor, evsel, sample,
2164 NULL, NULL, sched->max_stack + 2) != 0) {
2165 if (verbose > 0)
2166 pr_err("Failed to resolve callchain. Skipping\n");
2167
2168 return;
2169 }
2170
2171 callchain_cursor_commit(cursor);
2172
2173 while (true) {
2174 struct callchain_cursor_node *node;
2175 struct symbol *sym;
2176
2177 node = callchain_cursor_current(cursor);
2178 if (node == NULL)
2179 break;
2180
2181 sym = node->ms.sym;
2182 if (sym) {
2183 if (!strcmp(sym->name, "schedule") ||
2184 !strcmp(sym->name, "__schedule") ||
2185 !strcmp(sym->name, "preempt_schedule"))
2186 sym->ignore = 1;
2187 }
2188
2189 callchain_cursor_advance(cursor);
2190 }
2191 }
2192
2193 static int init_idle_thread(struct thread *thread)
2194 {
2195 struct idle_thread_runtime *itr;
2196
2197 thread__set_comm(thread, idle_comm, 0);
2198
2199 itr = zalloc(sizeof(*itr));
2200 if (itr == NULL)
2201 return -ENOMEM;
2202
2203 init_stats(&itr->tr.run_stats);
2204 callchain_init(&itr->callchain);
2205 callchain_cursor_reset(&itr->cursor);
2206 thread__set_priv(thread, itr);
2207
2208 return 0;
2209 }
2210
2211 /*
2212 * Track idle stats per cpu by maintaining a local thread
2213 * struct for the idle task on each cpu.
2214 */
2215 static int init_idle_threads(int ncpu)
2216 {
2217 int i, ret;
2218
2219 idle_threads = zalloc(ncpu * sizeof(struct thread *));
2220 if (!idle_threads)
2221 return -ENOMEM;
2222
2223 idle_max_cpu = ncpu;
2224
2225 /* allocate the actual thread struct if needed */
2226 for (i = 0; i < ncpu; ++i) {
2227 idle_threads[i] = thread__new(0, 0);
2228 if (idle_threads[i] == NULL)
2229 return -ENOMEM;
2230
2231 ret = init_idle_thread(idle_threads[i]);
2232 if (ret < 0)
2233 return ret;
2234 }
2235
2236 return 0;
2237 }
2238
2239 static void free_idle_threads(void)
2240 {
2241 int i;
2242
2243 if (idle_threads == NULL)
2244 return;
2245
2246 for (i = 0; i < idle_max_cpu; ++i) {
2247 if ((idle_threads[i]))
2248 thread__delete(idle_threads[i]);
2249 }
2250
2251 free(idle_threads);
2252 }
2253
2254 static struct thread *get_idle_thread(int cpu)
2255 {
2256 /*
2257 * expand/allocate array of pointers to local thread
2258 * structs if needed
2259 */
2260 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2261 int i, j = __roundup_pow_of_two(cpu+1);
2262 void *p;
2263
2264 p = realloc(idle_threads, j * sizeof(struct thread *));
2265 if (!p)
2266 return NULL;
2267
2268 idle_threads = (struct thread **) p;
2269 for (i = idle_max_cpu; i < j; ++i)
2270 idle_threads[i] = NULL;
2271
2272 idle_max_cpu = j;
2273 }
2274
2275 /* allocate a new thread struct if needed */
2276 if (idle_threads[cpu] == NULL) {
2277 idle_threads[cpu] = thread__new(0, 0);
2278
2279 if (idle_threads[cpu]) {
2280 if (init_idle_thread(idle_threads[cpu]) < 0)
2281 return NULL;
2282 }
2283 }
2284
2285 return idle_threads[cpu];
2286 }
2287
2288 static void save_idle_callchain(struct perf_sched *sched,
2289 struct idle_thread_runtime *itr,
2290 struct perf_sample *sample)
2291 {
2292 if (!sched->show_callchain || sample->callchain == NULL)
2293 return;
2294
2295 callchain_cursor__copy(&itr->cursor, &callchain_cursor);
2296 }
2297
2298 static struct thread *timehist_get_thread(struct perf_sched *sched,
2299 struct perf_sample *sample,
2300 struct machine *machine,
2301 struct evsel *evsel)
2302 {
2303 struct thread *thread;
2304
2305 if (is_idle_sample(sample, evsel)) {
2306 thread = get_idle_thread(sample->cpu);
2307 if (thread == NULL)
2308 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2309
2310 } else {
2311 /* there were samples with tid 0 but non-zero pid */
2312 thread = machine__findnew_thread(machine, sample->pid,
2313 sample->tid ?: sample->pid);
2314 if (thread == NULL) {
2315 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2316 sample->tid);
2317 }
2318
2319 save_task_callchain(sched, sample, evsel, machine);
2320 if (sched->idle_hist) {
2321 struct thread *idle;
2322 struct idle_thread_runtime *itr;
2323
2324 idle = get_idle_thread(sample->cpu);
2325 if (idle == NULL) {
2326 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2327 return NULL;
2328 }
2329
2330 itr = thread__priv(idle);
2331 if (itr == NULL)
2332 return NULL;
2333
2334 itr->last_thread = thread;
2335
2336 /* copy task callchain when entering to idle */
2337 if (perf_evsel__intval(evsel, sample, "next_pid") == 0)
2338 save_idle_callchain(sched, itr, sample);
2339 }
2340 }
2341
2342 return thread;
2343 }
2344
2345 static bool timehist_skip_sample(struct perf_sched *sched,
2346 struct thread *thread,
2347 struct evsel *evsel,
2348 struct perf_sample *sample)
2349 {
2350 bool rc = false;
2351
2352 if (thread__is_filtered(thread)) {
2353 rc = true;
2354 sched->skipped_samples++;
2355 }
2356
2357 if (sched->idle_hist) {
2358 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch"))
2359 rc = true;
2360 else if (perf_evsel__intval(evsel, sample, "prev_pid") != 0 &&
2361 perf_evsel__intval(evsel, sample, "next_pid") != 0)
2362 rc = true;
2363 }
2364
2365 return rc;
2366 }
2367
2368 static void timehist_print_wakeup_event(struct perf_sched *sched,
2369 struct evsel *evsel,
2370 struct perf_sample *sample,
2371 struct machine *machine,
2372 struct thread *awakened)
2373 {
2374 struct thread *thread;
2375 char tstr[64];
2376
2377 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2378 if (thread == NULL)
2379 return;
2380
2381 /* show wakeup unless both awakee and awaker are filtered */
2382 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2383 timehist_skip_sample(sched, awakened, evsel, sample)) {
2384 return;
2385 }
2386
2387 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2388 printf("%15s [%04d] ", tstr, sample->cpu);
2389 if (sched->show_cpu_visual)
2390 printf(" %*s ", sched->max_cpu + 1, "");
2391
2392 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2393
2394 /* dt spacer */
2395 printf(" %9s %9s %9s ", "", "", "");
2396
2397 printf("awakened: %s", timehist_get_commstr(awakened));
2398
2399 printf("\n");
2400 }
2401
2402 static int timehist_sched_wakeup_event(struct perf_tool *tool,
2403 union perf_event *event __maybe_unused,
2404 struct evsel *evsel,
2405 struct perf_sample *sample,
2406 struct machine *machine)
2407 {
2408 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2409 struct thread *thread;
2410 struct thread_runtime *tr = NULL;
2411 /* want pid of awakened task not pid in sample */
2412 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
2413
2414 thread = machine__findnew_thread(machine, 0, pid);
2415 if (thread == NULL)
2416 return -1;
2417
2418 tr = thread__get_runtime(thread);
2419 if (tr == NULL)
2420 return -1;
2421
2422 if (tr->ready_to_run == 0)
2423 tr->ready_to_run = sample->time;
2424
2425 /* show wakeups if requested */
2426 if (sched->show_wakeups &&
2427 !perf_time__skip_sample(&sched->ptime, sample->time))
2428 timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2429
2430 return 0;
2431 }
2432
2433 static void timehist_print_migration_event(struct perf_sched *sched,
2434 struct evsel *evsel,
2435 struct perf_sample *sample,
2436 struct machine *machine,
2437 struct thread *migrated)
2438 {
2439 struct thread *thread;
2440 char tstr[64];
2441 u32 max_cpus = sched->max_cpu + 1;
2442 u32 ocpu, dcpu;
2443
2444 if (sched->summary_only)
2445 return;
2446
2447 max_cpus = sched->max_cpu + 1;
2448 ocpu = perf_evsel__intval(evsel, sample, "orig_cpu");
2449 dcpu = perf_evsel__intval(evsel, sample, "dest_cpu");
2450
2451 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2452 if (thread == NULL)
2453 return;
2454
2455 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2456 timehist_skip_sample(sched, migrated, evsel, sample)) {
2457 return;
2458 }
2459
2460 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2461 printf("%15s [%04d] ", tstr, sample->cpu);
2462
2463 if (sched->show_cpu_visual) {
2464 u32 i;
2465 char c;
2466
2467 printf(" ");
2468 for (i = 0; i < max_cpus; ++i) {
2469 c = (i == sample->cpu) ? 'm' : ' ';
2470 printf("%c", c);
2471 }
2472 printf(" ");
2473 }
2474
2475 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2476
2477 /* dt spacer */
2478 printf(" %9s %9s %9s ", "", "", "");
2479
2480 printf("migrated: %s", timehist_get_commstr(migrated));
2481 printf(" cpu %d => %d", ocpu, dcpu);
2482
2483 printf("\n");
2484 }
2485
2486 static int timehist_migrate_task_event(struct perf_tool *tool,
2487 union perf_event *event __maybe_unused,
2488 struct evsel *evsel,
2489 struct perf_sample *sample,
2490 struct machine *machine)
2491 {
2492 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2493 struct thread *thread;
2494 struct thread_runtime *tr = NULL;
2495 /* want pid of migrated task not pid in sample */
2496 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
2497
2498 thread = machine__findnew_thread(machine, 0, pid);
2499 if (thread == NULL)
2500 return -1;
2501
2502 tr = thread__get_runtime(thread);
2503 if (tr == NULL)
2504 return -1;
2505
2506 tr->migrations++;
2507
2508 /* show migrations if requested */
2509 timehist_print_migration_event(sched, evsel, sample, machine, thread);
2510
2511 return 0;
2512 }
2513
2514 static int timehist_sched_change_event(struct perf_tool *tool,
2515 union perf_event *event,
2516 struct evsel *evsel,
2517 struct perf_sample *sample,
2518 struct machine *machine)
2519 {
2520 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2521 struct perf_time_interval *ptime = &sched->ptime;
2522 struct addr_location al;
2523 struct thread *thread;
2524 struct thread_runtime *tr = NULL;
2525 u64 tprev, t = sample->time;
2526 int rc = 0;
2527 int state = perf_evsel__intval(evsel, sample, "prev_state");
2528
2529
2530 if (machine__resolve(machine, &al, sample) < 0) {
2531 pr_err("problem processing %d event. skipping it\n",
2532 event->header.type);
2533 rc = -1;
2534 goto out;
2535 }
2536
2537 thread = timehist_get_thread(sched, sample, machine, evsel);
2538 if (thread == NULL) {
2539 rc = -1;
2540 goto out;
2541 }
2542
2543 if (timehist_skip_sample(sched, thread, evsel, sample))
2544 goto out;
2545
2546 tr = thread__get_runtime(thread);
2547 if (tr == NULL) {
2548 rc = -1;
2549 goto out;
2550 }
2551
2552 tprev = perf_evsel__get_time(evsel, sample->cpu);
2553
2554 /*
2555 * If start time given:
2556 * - sample time is under window user cares about - skip sample
2557 * - tprev is under window user cares about - reset to start of window
2558 */
2559 if (ptime->start && ptime->start > t)
2560 goto out;
2561
2562 if (tprev && ptime->start > tprev)
2563 tprev = ptime->start;
2564
2565 /*
2566 * If end time given:
2567 * - previous sched event is out of window - we are done
2568 * - sample time is beyond window user cares about - reset it
2569 * to close out stats for time window interest
2570 */
2571 if (ptime->end) {
2572 if (tprev > ptime->end)
2573 goto out;
2574
2575 if (t > ptime->end)
2576 t = ptime->end;
2577 }
2578
2579 if (!sched->idle_hist || thread->tid == 0) {
2580 timehist_update_runtime_stats(tr, t, tprev);
2581
2582 if (sched->idle_hist) {
2583 struct idle_thread_runtime *itr = (void *)tr;
2584 struct thread_runtime *last_tr;
2585
2586 BUG_ON(thread->tid != 0);
2587
2588 if (itr->last_thread == NULL)
2589 goto out;
2590
2591 /* add current idle time as last thread's runtime */
2592 last_tr = thread__get_runtime(itr->last_thread);
2593 if (last_tr == NULL)
2594 goto out;
2595
2596 timehist_update_runtime_stats(last_tr, t, tprev);
2597 /*
2598 * remove delta time of last thread as it's not updated
2599 * and otherwise it will show an invalid value next
2600 * time. we only care total run time and run stat.
2601 */
2602 last_tr->dt_run = 0;
2603 last_tr->dt_delay = 0;
2604 last_tr->dt_sleep = 0;
2605 last_tr->dt_iowait = 0;
2606 last_tr->dt_preempt = 0;
2607
2608 if (itr->cursor.nr)
2609 callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2610
2611 itr->last_thread = NULL;
2612 }
2613 }
2614
2615 if (!sched->summary_only)
2616 timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2617
2618 out:
2619 if (sched->hist_time.start == 0 && t >= ptime->start)
2620 sched->hist_time.start = t;
2621 if (ptime->end == 0 || t <= ptime->end)
2622 sched->hist_time.end = t;
2623
2624 if (tr) {
2625 /* time of this sched_switch event becomes last time task seen */
2626 tr->last_time = sample->time;
2627
2628 /* last state is used to determine where to account wait time */
2629 tr->last_state = state;
2630
2631 /* sched out event for task so reset ready to run time */
2632 tr->ready_to_run = 0;
2633 }
2634
2635 perf_evsel__save_time(evsel, sample->time, sample->cpu);
2636
2637 return rc;
2638 }
2639
2640 static int timehist_sched_switch_event(struct perf_tool *tool,
2641 union perf_event *event,
2642 struct evsel *evsel,
2643 struct perf_sample *sample,
2644 struct machine *machine __maybe_unused)
2645 {
2646 return timehist_sched_change_event(tool, event, evsel, sample, machine);
2647 }
2648
2649 static int process_lost(struct perf_tool *tool __maybe_unused,
2650 union perf_event *event,
2651 struct perf_sample *sample,
2652 struct machine *machine __maybe_unused)
2653 {
2654 char tstr[64];
2655
2656 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2657 printf("%15s ", tstr);
2658 printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2659
2660 return 0;
2661 }
2662
2663
2664 static void print_thread_runtime(struct thread *t,
2665 struct thread_runtime *r)
2666 {
2667 double mean = avg_stats(&r->run_stats);
2668 float stddev;
2669
2670 printf("%*s %5d %9" PRIu64 " ",
2671 comm_width, timehist_get_commstr(t), t->ppid,
2672 (u64) r->run_stats.n);
2673
2674 print_sched_time(r->total_run_time, 8);
2675 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2676 print_sched_time(r->run_stats.min, 6);
2677 printf(" ");
2678 print_sched_time((u64) mean, 6);
2679 printf(" ");
2680 print_sched_time(r->run_stats.max, 6);
2681 printf(" ");
2682 printf("%5.2f", stddev);
2683 printf(" %5" PRIu64, r->migrations);
2684 printf("\n");
2685 }
2686
2687 static void print_thread_waittime(struct thread *t,
2688 struct thread_runtime *r)
2689 {
2690 printf("%*s %5d %9" PRIu64 " ",
2691 comm_width, timehist_get_commstr(t), t->ppid,
2692 (u64) r->run_stats.n);
2693
2694 print_sched_time(r->total_run_time, 8);
2695 print_sched_time(r->total_sleep_time, 6);
2696 printf(" ");
2697 print_sched_time(r->total_iowait_time, 6);
2698 printf(" ");
2699 print_sched_time(r->total_preempt_time, 6);
2700 printf(" ");
2701 print_sched_time(r->total_delay_time, 6);
2702 printf("\n");
2703 }
2704
2705 struct total_run_stats {
2706 struct perf_sched *sched;
2707 u64 sched_count;
2708 u64 task_count;
2709 u64 total_run_time;
2710 };
2711
2712 static int __show_thread_runtime(struct thread *t, void *priv)
2713 {
2714 struct total_run_stats *stats = priv;
2715 struct thread_runtime *r;
2716
2717 if (thread__is_filtered(t))
2718 return 0;
2719
2720 r = thread__priv(t);
2721 if (r && r->run_stats.n) {
2722 stats->task_count++;
2723 stats->sched_count += r->run_stats.n;
2724 stats->total_run_time += r->total_run_time;
2725
2726 if (stats->sched->show_state)
2727 print_thread_waittime(t, r);
2728 else
2729 print_thread_runtime(t, r);
2730 }
2731
2732 return 0;
2733 }
2734
2735 static int show_thread_runtime(struct thread *t, void *priv)
2736 {
2737 if (t->dead)
2738 return 0;
2739
2740 return __show_thread_runtime(t, priv);
2741 }
2742
2743 static int show_deadthread_runtime(struct thread *t, void *priv)
2744 {
2745 if (!t->dead)
2746 return 0;
2747
2748 return __show_thread_runtime(t, priv);
2749 }
2750
2751 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2752 {
2753 const char *sep = " <- ";
2754 struct callchain_list *chain;
2755 size_t ret = 0;
2756 char bf[1024];
2757 bool first;
2758
2759 if (node == NULL)
2760 return 0;
2761
2762 ret = callchain__fprintf_folded(fp, node->parent);
2763 first = (ret == 0);
2764
2765 list_for_each_entry(chain, &node->val, list) {
2766 if (chain->ip >= PERF_CONTEXT_MAX)
2767 continue;
2768 if (chain->ms.sym && chain->ms.sym->ignore)
2769 continue;
2770 ret += fprintf(fp, "%s%s", first ? "" : sep,
2771 callchain_list__sym_name(chain, bf, sizeof(bf),
2772 false));
2773 first = false;
2774 }
2775
2776 return ret;
2777 }
2778
2779 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
2780 {
2781 size_t ret = 0;
2782 FILE *fp = stdout;
2783 struct callchain_node *chain;
2784 struct rb_node *rb_node = rb_first_cached(root);
2785
2786 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
2787 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
2788 graph_dotted_line);
2789
2790 while (rb_node) {
2791 chain = rb_entry(rb_node, struct callchain_node, rb_node);
2792 rb_node = rb_next(rb_node);
2793
2794 ret += fprintf(fp, " ");
2795 print_sched_time(chain->hit, 12);
2796 ret += 16; /* print_sched_time returns 2nd arg + 4 */
2797 ret += fprintf(fp, " %8d ", chain->count);
2798 ret += callchain__fprintf_folded(fp, chain);
2799 ret += fprintf(fp, "\n");
2800 }
2801
2802 return ret;
2803 }
2804
2805 static void timehist_print_summary(struct perf_sched *sched,
2806 struct perf_session *session)
2807 {
2808 struct machine *m = &session->machines.host;
2809 struct total_run_stats totals;
2810 u64 task_count;
2811 struct thread *t;
2812 struct thread_runtime *r;
2813 int i;
2814 u64 hist_time = sched->hist_time.end - sched->hist_time.start;
2815
2816 memset(&totals, 0, sizeof(totals));
2817 totals.sched = sched;
2818
2819 if (sched->idle_hist) {
2820 printf("\nIdle-time summary\n");
2821 printf("%*s parent sched-out ", comm_width, "comm");
2822 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
2823 } else if (sched->show_state) {
2824 printf("\nWait-time summary\n");
2825 printf("%*s parent sched-in ", comm_width, "comm");
2826 printf(" run-time sleep iowait preempt delay\n");
2827 } else {
2828 printf("\nRuntime summary\n");
2829 printf("%*s parent sched-in ", comm_width, "comm");
2830 printf(" run-time min-run avg-run max-run stddev migrations\n");
2831 }
2832 printf("%*s (count) ", comm_width, "");
2833 printf(" (msec) (msec) (msec) (msec) %s\n",
2834 sched->show_state ? "(msec)" : "%");
2835 printf("%.117s\n", graph_dotted_line);
2836
2837 machine__for_each_thread(m, show_thread_runtime, &totals);
2838 task_count = totals.task_count;
2839 if (!task_count)
2840 printf("<no still running tasks>\n");
2841
2842 printf("\nTerminated tasks:\n");
2843 machine__for_each_thread(m, show_deadthread_runtime, &totals);
2844 if (task_count == totals.task_count)
2845 printf("<no terminated tasks>\n");
2846
2847 /* CPU idle stats not tracked when samples were skipped */
2848 if (sched->skipped_samples && !sched->idle_hist)
2849 return;
2850
2851 printf("\nIdle stats:\n");
2852 for (i = 0; i < idle_max_cpu; ++i) {
2853 t = idle_threads[i];
2854 if (!t)
2855 continue;
2856
2857 r = thread__priv(t);
2858 if (r && r->run_stats.n) {
2859 totals.sched_count += r->run_stats.n;
2860 printf(" CPU %2d idle for ", i);
2861 print_sched_time(r->total_run_time, 6);
2862 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
2863 } else
2864 printf(" CPU %2d idle entire time window\n", i);
2865 }
2866
2867 if (sched->idle_hist && sched->show_callchain) {
2868 callchain_param.mode = CHAIN_FOLDED;
2869 callchain_param.value = CCVAL_PERIOD;
2870
2871 callchain_register_param(&callchain_param);
2872
2873 printf("\nIdle stats by callchain:\n");
2874 for (i = 0; i < idle_max_cpu; ++i) {
2875 struct idle_thread_runtime *itr;
2876
2877 t = idle_threads[i];
2878 if (!t)
2879 continue;
2880
2881 itr = thread__priv(t);
2882 if (itr == NULL)
2883 continue;
2884
2885 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
2886 0, &callchain_param);
2887
2888 printf(" CPU %2d:", i);
2889 print_sched_time(itr->tr.total_run_time, 6);
2890 printf(" msec\n");
2891 timehist_print_idlehist_callchain(&itr->sorted_root);
2892 printf("\n");
2893 }
2894 }
2895
2896 printf("\n"
2897 " Total number of unique tasks: %" PRIu64 "\n"
2898 "Total number of context switches: %" PRIu64 "\n",
2899 totals.task_count, totals.sched_count);
2900
2901 printf(" Total run time (msec): ");
2902 print_sched_time(totals.total_run_time, 2);
2903 printf("\n");
2904
2905 printf(" Total scheduling time (msec): ");
2906 print_sched_time(hist_time, 2);
2907 printf(" (x %d)\n", sched->max_cpu);
2908 }
2909
2910 typedef int (*sched_handler)(struct perf_tool *tool,
2911 union perf_event *event,
2912 struct evsel *evsel,
2913 struct perf_sample *sample,
2914 struct machine *machine);
2915
2916 static int perf_timehist__process_sample(struct perf_tool *tool,
2917 union perf_event *event,
2918 struct perf_sample *sample,
2919 struct evsel *evsel,
2920 struct machine *machine)
2921 {
2922 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2923 int err = 0;
2924 int this_cpu = sample->cpu;
2925
2926 if (this_cpu > sched->max_cpu)
2927 sched->max_cpu = this_cpu;
2928
2929 if (evsel->handler != NULL) {
2930 sched_handler f = evsel->handler;
2931
2932 err = f(tool, event, evsel, sample, machine);
2933 }
2934
2935 return err;
2936 }
2937
2938 static int timehist_check_attr(struct perf_sched *sched,
2939 struct evlist *evlist)
2940 {
2941 struct evsel *evsel;
2942 struct evsel_runtime *er;
2943
2944 list_for_each_entry(evsel, &evlist->core.entries, core.node) {
2945 er = perf_evsel__get_runtime(evsel);
2946 if (er == NULL) {
2947 pr_err("Failed to allocate memory for evsel runtime data\n");
2948 return -1;
2949 }
2950
2951 if (sched->show_callchain && !evsel__has_callchain(evsel)) {
2952 pr_info("Samples do not have callchains.\n");
2953 sched->show_callchain = 0;
2954 symbol_conf.use_callchain = 0;
2955 }
2956 }
2957
2958 return 0;
2959 }
2960
2961 static int perf_sched__timehist(struct perf_sched *sched)
2962 {
2963 const struct evsel_str_handler handlers[] = {
2964 { "sched:sched_switch", timehist_sched_switch_event, },
2965 { "sched:sched_wakeup", timehist_sched_wakeup_event, },
2966 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
2967 };
2968 const struct evsel_str_handler migrate_handlers[] = {
2969 { "sched:sched_migrate_task", timehist_migrate_task_event, },
2970 };
2971 struct perf_data data = {
2972 .path = input_name,
2973 .mode = PERF_DATA_MODE_READ,
2974 .force = sched->force,
2975 };
2976
2977 struct perf_session *session;
2978 struct evlist *evlist;
2979 int err = -1;
2980
2981 /*
2982 * event handlers for timehist option
2983 */
2984 sched->tool.sample = perf_timehist__process_sample;
2985 sched->tool.mmap = perf_event__process_mmap;
2986 sched->tool.comm = perf_event__process_comm;
2987 sched->tool.exit = perf_event__process_exit;
2988 sched->tool.fork = perf_event__process_fork;
2989 sched->tool.lost = process_lost;
2990 sched->tool.attr = perf_event__process_attr;
2991 sched->tool.tracing_data = perf_event__process_tracing_data;
2992 sched->tool.build_id = perf_event__process_build_id;
2993
2994 sched->tool.ordered_events = true;
2995 sched->tool.ordering_requires_timestamps = true;
2996
2997 symbol_conf.use_callchain = sched->show_callchain;
2998
2999 session = perf_session__new(&data, false, &sched->tool);
3000 if (IS_ERR(session))
3001 return PTR_ERR(session);
3002
3003 if (cpu_list) {
3004 err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
3005 if (err < 0)
3006 goto out;
3007 }
3008
3009 evlist = session->evlist;
3010
3011 symbol__init(&session->header.env);
3012
3013 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
3014 pr_err("Invalid time string\n");
3015 return -EINVAL;
3016 }
3017
3018 if (timehist_check_attr(sched, evlist) != 0)
3019 goto out;
3020
3021 setup_pager();
3022
3023 /* setup per-evsel handlers */
3024 if (perf_session__set_tracepoints_handlers(session, handlers))
3025 goto out;
3026
3027 /* sched_switch event at a minimum needs to exist */
3028 if (!perf_evlist__find_tracepoint_by_name(session->evlist,
3029 "sched:sched_switch")) {
3030 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3031 goto out;
3032 }
3033
3034 if (sched->show_migrations &&
3035 perf_session__set_tracepoints_handlers(session, migrate_handlers))
3036 goto out;
3037
3038 /* pre-allocate struct for per-CPU idle stats */
3039 sched->max_cpu = session->header.env.nr_cpus_online;
3040 if (sched->max_cpu == 0)
3041 sched->max_cpu = 4;
3042 if (init_idle_threads(sched->max_cpu))
3043 goto out;
3044
3045 /* summary_only implies summary option, but don't overwrite summary if set */
3046 if (sched->summary_only)
3047 sched->summary = sched->summary_only;
3048
3049 if (!sched->summary_only)
3050 timehist_header(sched);
3051
3052 err = perf_session__process_events(session);
3053 if (err) {
3054 pr_err("Failed to process events, error %d", err);
3055 goto out;
3056 }
3057
3058 sched->nr_events = evlist->stats.nr_events[0];
3059 sched->nr_lost_events = evlist->stats.total_lost;
3060 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3061
3062 if (sched->summary)
3063 timehist_print_summary(sched, session);
3064
3065 out:
3066 free_idle_threads();
3067 perf_session__delete(session);
3068
3069 return err;
3070 }
3071
3072
3073 static void print_bad_events(struct perf_sched *sched)
3074 {
3075 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3076 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3077 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3078 sched->nr_unordered_timestamps, sched->nr_timestamps);
3079 }
3080 if (sched->nr_lost_events && sched->nr_events) {
3081 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3082 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3083 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3084 }
3085 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3086 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
3087 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3088 sched->nr_context_switch_bugs, sched->nr_timestamps);
3089 if (sched->nr_lost_events)
3090 printf(" (due to lost events?)");
3091 printf("\n");
3092 }
3093 }
3094
3095 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3096 {
3097 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3098 struct work_atoms *this;
3099 const char *comm = thread__comm_str(data->thread), *this_comm;
3100 bool leftmost = true;
3101
3102 while (*new) {
3103 int cmp;
3104
3105 this = container_of(*new, struct work_atoms, node);
3106 parent = *new;
3107
3108 this_comm = thread__comm_str(this->thread);
3109 cmp = strcmp(comm, this_comm);
3110 if (cmp > 0) {
3111 new = &((*new)->rb_left);
3112 } else if (cmp < 0) {
3113 new = &((*new)->rb_right);
3114 leftmost = false;
3115 } else {
3116 this->num_merged++;
3117 this->total_runtime += data->total_runtime;
3118 this->nb_atoms += data->nb_atoms;
3119 this->total_lat += data->total_lat;
3120 list_splice(&data->work_list, &this->work_list);
3121 if (this->max_lat < data->max_lat) {
3122 this->max_lat = data->max_lat;
3123 this->max_lat_at = data->max_lat_at;
3124 }
3125 zfree(&data);
3126 return;
3127 }
3128 }
3129
3130 data->num_merged++;
3131 rb_link_node(&data->node, parent, new);
3132 rb_insert_color_cached(&data->node, root, leftmost);
3133 }
3134
3135 static void perf_sched__merge_lat(struct perf_sched *sched)
3136 {
3137 struct work_atoms *data;
3138 struct rb_node *node;
3139
3140 if (sched->skip_merge)
3141 return;
3142
3143 while ((node = rb_first_cached(&sched->atom_root))) {
3144 rb_erase_cached(node, &sched->atom_root);
3145 data = rb_entry(node, struct work_atoms, node);
3146 __merge_work_atoms(&sched->merged_atom_root, data);
3147 }
3148 }
3149
3150 static int perf_sched__lat(struct perf_sched *sched)
3151 {
3152 struct rb_node *next;
3153
3154 setup_pager();
3155
3156 if (perf_sched__read_events(sched))
3157 return -1;
3158
3159 perf_sched__merge_lat(sched);
3160 perf_sched__sort_lat(sched);
3161
3162 printf("\n -----------------------------------------------------------------------------------------------------------------\n");
3163 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
3164 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3165
3166 next = rb_first_cached(&sched->sorted_atom_root);
3167
3168 while (next) {
3169 struct work_atoms *work_list;
3170
3171 work_list = rb_entry(next, struct work_atoms, node);
3172 output_lat_thread(sched, work_list);
3173 next = rb_next(next);
3174 thread__zput(work_list->thread);
3175 }
3176
3177 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3178 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
3179 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3180
3181 printf(" ---------------------------------------------------\n");
3182
3183 print_bad_events(sched);
3184 printf("\n");
3185
3186 return 0;
3187 }
3188
3189 static int setup_map_cpus(struct perf_sched *sched)
3190 {
3191 struct perf_cpu_map *map;
3192
3193 sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
3194
3195 if (sched->map.comp) {
3196 sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
3197 if (!sched->map.comp_cpus)
3198 return -1;
3199 }
3200
3201 if (!sched->map.cpus_str)
3202 return 0;
3203
3204 map = perf_cpu_map__new(sched->map.cpus_str);
3205 if (!map) {
3206 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3207 return -1;
3208 }
3209
3210 sched->map.cpus = map;
3211 return 0;
3212 }
3213
3214 static int setup_color_pids(struct perf_sched *sched)
3215 {
3216 struct perf_thread_map *map;
3217
3218 if (!sched->map.color_pids_str)
3219 return 0;
3220
3221 map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3222 if (!map) {
3223 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3224 return -1;
3225 }
3226
3227 sched->map.color_pids = map;
3228 return 0;
3229 }
3230
3231 static int setup_color_cpus(struct perf_sched *sched)
3232 {
3233 struct perf_cpu_map *map;
3234
3235 if (!sched->map.color_cpus_str)
3236 return 0;
3237
3238 map = perf_cpu_map__new(sched->map.color_cpus_str);
3239 if (!map) {
3240 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3241 return -1;
3242 }
3243
3244 sched->map.color_cpus = map;
3245 return 0;
3246 }
3247
3248 static int perf_sched__map(struct perf_sched *sched)
3249 {
3250 if (setup_map_cpus(sched))
3251 return -1;
3252
3253 if (setup_color_pids(sched))
3254 return -1;
3255
3256 if (setup_color_cpus(sched))
3257 return -1;
3258
3259 setup_pager();
3260 if (perf_sched__read_events(sched))
3261 return -1;
3262 print_bad_events(sched);
3263 return 0;
3264 }
3265
3266 static int perf_sched__replay(struct perf_sched *sched)
3267 {
3268 unsigned long i;
3269
3270 calibrate_run_measurement_overhead(sched);
3271 calibrate_sleep_measurement_overhead(sched);
3272
3273 test_calibrations(sched);
3274
3275 if (perf_sched__read_events(sched))
3276 return -1;
3277
3278 printf("nr_run_events: %ld\n", sched->nr_run_events);
3279 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
3280 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
3281
3282 if (sched->targetless_wakeups)
3283 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
3284 if (sched->multitarget_wakeups)
3285 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3286 if (sched->nr_run_events_optimized)
3287 printf("run atoms optimized: %ld\n",
3288 sched->nr_run_events_optimized);
3289
3290 print_task_traces(sched);
3291 add_cross_task_wakeups(sched);
3292
3293 create_tasks(sched);
3294 printf("------------------------------------------------------------\n");
3295 for (i = 0; i < sched->replay_repeat; i++)
3296 run_one_test(sched);
3297
3298 return 0;
3299 }
3300
3301 static void setup_sorting(struct perf_sched *sched, const struct option *options,
3302 const char * const usage_msg[])
3303 {
3304 char *tmp, *tok, *str = strdup(sched->sort_order);
3305
3306 for (tok = strtok_r(str, ", ", &tmp);
3307 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3308 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3309 usage_with_options_msg(usage_msg, options,
3310 "Unknown --sort key: `%s'", tok);
3311 }
3312 }
3313
3314 free(str);
3315
3316 sort_dimension__add("pid", &sched->cmp_pid);
3317 }
3318
3319 static int __cmd_record(int argc, const char **argv)
3320 {
3321 unsigned int rec_argc, i, j;
3322 const char **rec_argv;
3323 const char * const record_args[] = {
3324 "record",
3325 "-a",
3326 "-R",
3327 "-m", "1024",
3328 "-c", "1",
3329 "-e", "sched:sched_switch",
3330 "-e", "sched:sched_stat_wait",
3331 "-e", "sched:sched_stat_sleep",
3332 "-e", "sched:sched_stat_iowait",
3333 "-e", "sched:sched_stat_runtime",
3334 "-e", "sched:sched_process_fork",
3335 "-e", "sched:sched_wakeup",
3336 "-e", "sched:sched_wakeup_new",
3337 "-e", "sched:sched_migrate_task",
3338 };
3339
3340 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
3341 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3342
3343 if (rec_argv == NULL)
3344 return -ENOMEM;
3345
3346 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3347 rec_argv[i] = strdup(record_args[i]);
3348
3349 for (j = 1; j < (unsigned int)argc; j++, i++)
3350 rec_argv[i] = argv[j];
3351
3352 BUG_ON(i != rec_argc);
3353
3354 return cmd_record(i, rec_argv);
3355 }
3356
3357 int cmd_sched(int argc, const char **argv)
3358 {
3359 static const char default_sort_order[] = "avg, max, switch, runtime";
3360 struct perf_sched sched = {
3361 .tool = {
3362 .sample = perf_sched__process_tracepoint_sample,
3363 .comm = perf_sched__process_comm,
3364 .namespaces = perf_event__process_namespaces,
3365 .lost = perf_event__process_lost,
3366 .fork = perf_sched__process_fork_event,
3367 .ordered_events = true,
3368 },
3369 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
3370 .sort_list = LIST_HEAD_INIT(sched.sort_list),
3371 .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
3372 .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
3373 .sort_order = default_sort_order,
3374 .replay_repeat = 10,
3375 .profile_cpu = -1,
3376 .next_shortname1 = 'A',
3377 .next_shortname2 = '0',
3378 .skip_merge = 0,
3379 .show_callchain = 1,
3380 .max_stack = 5,
3381 };
3382 const struct option sched_options[] = {
3383 OPT_STRING('i', "input", &input_name, "file",
3384 "input file name"),
3385 OPT_INCR('v', "verbose", &verbose,
3386 "be more verbose (show symbol address, etc)"),
3387 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3388 "dump raw trace in ASCII"),
3389 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3390 OPT_END()
3391 };
3392 const struct option latency_options[] = {
3393 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3394 "sort by key(s): runtime, switch, avg, max"),
3395 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3396 "CPU to profile on"),
3397 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3398 "latency stats per pid instead of per comm"),
3399 OPT_PARENT(sched_options)
3400 };
3401 const struct option replay_options[] = {
3402 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3403 "repeat the workload replay N times (-1: infinite)"),
3404 OPT_PARENT(sched_options)
3405 };
3406 const struct option map_options[] = {
3407 OPT_BOOLEAN(0, "compact", &sched.map.comp,
3408 "map output in compact mode"),
3409 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3410 "highlight given pids in map"),
3411 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3412 "highlight given CPUs in map"),
3413 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3414 "display given CPUs in map"),
3415 OPT_PARENT(sched_options)
3416 };
3417 const struct option timehist_options[] = {
3418 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3419 "file", "vmlinux pathname"),
3420 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3421 "file", "kallsyms pathname"),
3422 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3423 "Display call chains if present (default on)"),
3424 OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3425 "Maximum number of functions to display backtrace."),
3426 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3427 "Look for files with symbols relative to this directory"),
3428 OPT_BOOLEAN('s', "summary", &sched.summary_only,
3429 "Show only syscall summary with statistics"),
3430 OPT_BOOLEAN('S', "with-summary", &sched.summary,
3431 "Show all syscalls and summary with statistics"),
3432 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3433 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3434 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3435 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3436 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3437 OPT_STRING(0, "time", &sched.time_str, "str",
3438 "Time span for analysis (start,stop)"),
3439 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3440 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3441 "analyze events only for given process id(s)"),
3442 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3443 "analyze events only for given thread id(s)"),
3444 OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
3445 OPT_PARENT(sched_options)
3446 };
3447
3448 const char * const latency_usage[] = {
3449 "perf sched latency [<options>]",
3450 NULL
3451 };
3452 const char * const replay_usage[] = {
3453 "perf sched replay [<options>]",
3454 NULL
3455 };
3456 const char * const map_usage[] = {
3457 "perf sched map [<options>]",
3458 NULL
3459 };
3460 const char * const timehist_usage[] = {
3461 "perf sched timehist [<options>]",
3462 NULL
3463 };
3464 const char *const sched_subcommands[] = { "record", "latency", "map",
3465 "replay", "script",
3466 "timehist", NULL };
3467 const char *sched_usage[] = {
3468 NULL,
3469 NULL
3470 };
3471 struct trace_sched_handler lat_ops = {
3472 .wakeup_event = latency_wakeup_event,
3473 .switch_event = latency_switch_event,
3474 .runtime_event = latency_runtime_event,
3475 .migrate_task_event = latency_migrate_task_event,
3476 };
3477 struct trace_sched_handler map_ops = {
3478 .switch_event = map_switch_event,
3479 };
3480 struct trace_sched_handler replay_ops = {
3481 .wakeup_event = replay_wakeup_event,
3482 .switch_event = replay_switch_event,
3483 .fork_event = replay_fork_event,
3484 };
3485 unsigned int i;
3486
3487 for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
3488 sched.curr_pid[i] = -1;
3489
3490 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3491 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3492 if (!argc)
3493 usage_with_options(sched_usage, sched_options);
3494
3495 /*
3496 * Aliased to 'perf script' for now:
3497 */
3498 if (!strcmp(argv[0], "script"))
3499 return cmd_script(argc, argv);
3500
3501 if (!strncmp(argv[0], "rec", 3)) {
3502 return __cmd_record(argc, argv);
3503 } else if (!strncmp(argv[0], "lat", 3)) {
3504 sched.tp_handler = &lat_ops;
3505 if (argc > 1) {
3506 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3507 if (argc)
3508 usage_with_options(latency_usage, latency_options);
3509 }
3510 setup_sorting(&sched, latency_options, latency_usage);
3511 return perf_sched__lat(&sched);
3512 } else if (!strcmp(argv[0], "map")) {
3513 if (argc) {
3514 argc = parse_options(argc, argv, map_options, map_usage, 0);
3515 if (argc)
3516 usage_with_options(map_usage, map_options);
3517 }
3518 sched.tp_handler = &map_ops;
3519 setup_sorting(&sched, latency_options, latency_usage);
3520 return perf_sched__map(&sched);
3521 } else if (!strncmp(argv[0], "rep", 3)) {
3522 sched.tp_handler = &replay_ops;
3523 if (argc) {
3524 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3525 if (argc)
3526 usage_with_options(replay_usage, replay_options);
3527 }
3528 return perf_sched__replay(&sched);
3529 } else if (!strcmp(argv[0], "timehist")) {
3530 if (argc) {
3531 argc = parse_options(argc, argv, timehist_options,
3532 timehist_usage, 0);
3533 if (argc)
3534 usage_with_options(timehist_usage, timehist_options);
3535 }
3536 if ((sched.show_wakeups || sched.show_next) &&
3537 sched.summary_only) {
3538 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
3539 parse_options_usage(timehist_usage, timehist_options, "s", true);
3540 if (sched.show_wakeups)
3541 parse_options_usage(NULL, timehist_options, "w", true);
3542 if (sched.show_next)
3543 parse_options_usage(NULL, timehist_options, "n", true);
3544 return -EINVAL;
3545 }
3546
3547 return perf_sched__timehist(&sched);
3548 } else {
3549 usage_with_options(sched_usage, sched_options);
3550 }
3551
3552 return 0;
3553 }