2 * trace irqs off critical timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * From code in the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
19 static struct trace_array
*irqsoff_trace __read_mostly
;
20 static int tracer_enabled __read_mostly
;
22 static DEFINE_PER_CPU(int, tracing_cpu
);
24 static DEFINE_RAW_SPINLOCK(max_trace_lock
);
27 TRACER_IRQS_OFF
= (1 << 1),
28 TRACER_PREEMPT_OFF
= (1 << 2),
31 static int trace_type __read_mostly
;
33 static int save_flags
;
35 static void stop_irqsoff_tracer(struct trace_array
*tr
, int graph
);
36 static int start_irqsoff_tracer(struct trace_array
*tr
, int graph
);
38 #ifdef CONFIG_PREEMPT_TRACER
42 return ((trace_type
& TRACER_PREEMPT_OFF
) && preempt_count());
45 # define preempt_trace() (0)
48 #ifdef CONFIG_IRQSOFF_TRACER
52 return ((trace_type
& TRACER_IRQS_OFF
) &&
56 # define irq_trace() (0)
59 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60 static int irqsoff_display_graph(struct trace_array
*tr
, int set
);
61 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
63 static inline int irqsoff_display_graph(struct trace_array
*tr
, int set
)
67 # define is_graph(tr) false
71 * Sequence count - we record it when starting a measurement and
72 * skip the latency if the sequence has changed - some other section
73 * did a maximum and could disturb our measurement with serial console
74 * printouts, etc. Truly coinciding maximum latencies should be rare
75 * and what happens together happens separately as well, so this doesn't
76 * decrease the validity of the maximum found:
78 static __cacheline_aligned_in_smp
unsigned long max_sequence
;
80 #ifdef CONFIG_FUNCTION_TRACER
82 * Prologue for the preempt and irqs off function tracers.
84 * Returns 1 if it is OK to continue, and data->disabled is
86 * 0 if the trace is to be ignored, and data->disabled
89 * Note, this function is also used outside this ifdef but
90 * inside the #ifdef of the function graph tracer below.
91 * This is OK, since the function graph tracer is
92 * dependent on the function tracer.
94 static int func_prolog_dec(struct trace_array
*tr
,
95 struct trace_array_cpu
**data
,
102 * Does not matter if we preempt. We test the flags
103 * afterward, to see if irqs are disabled or not.
104 * If we preempt and get a false positive, the flags
107 cpu
= raw_smp_processor_id();
108 if (likely(!per_cpu(tracing_cpu
, cpu
)))
111 local_save_flags(*flags
);
113 * Slight chance to get a false positive on tracing_cpu,
114 * although I'm starting to think there isn't a chance.
115 * Leave this for now just to be paranoid.
117 if (!irqs_disabled_flags(*flags
) && !preempt_count())
120 *data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
121 disabled
= atomic_inc_return(&(*data
)->disabled
);
123 if (likely(disabled
== 1))
126 atomic_dec(&(*data
)->disabled
);
132 * irqsoff uses its own tracer function to keep the overhead down:
135 irqsoff_tracer_call(unsigned long ip
, unsigned long parent_ip
,
136 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
138 struct trace_array
*tr
= irqsoff_trace
;
139 struct trace_array_cpu
*data
;
142 if (!func_prolog_dec(tr
, &data
, &flags
))
145 trace_function(tr
, ip
, parent_ip
, flags
, preempt_count());
147 atomic_dec(&data
->disabled
);
149 #endif /* CONFIG_FUNCTION_TRACER */
151 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
152 static int irqsoff_display_graph(struct trace_array
*tr
, int set
)
156 if (!(is_graph(tr
) ^ set
))
159 stop_irqsoff_tracer(irqsoff_trace
, !set
);
161 for_each_possible_cpu(cpu
)
162 per_cpu(tracing_cpu
, cpu
) = 0;
165 tracing_reset_online_cpus(&irqsoff_trace
->trace_buffer
);
167 return start_irqsoff_tracer(irqsoff_trace
, set
);
170 static int irqsoff_graph_entry(struct ftrace_graph_ent
*trace
)
172 struct trace_array
*tr
= irqsoff_trace
;
173 struct trace_array_cpu
*data
;
178 if (ftrace_graph_ignore_func(trace
))
181 * Do not trace a function if it's filtered by set_graph_notrace.
182 * Make the index of ret stack negative to indicate that it should
183 * ignore further functions. But it needs its own ret stack entry
184 * to recover the original index in order to continue tracing after
185 * returning from the function.
187 if (ftrace_graph_notrace_addr(trace
->func
))
190 if (!func_prolog_dec(tr
, &data
, &flags
))
193 pc
= preempt_count();
194 ret
= __trace_graph_entry(tr
, trace
, flags
, pc
);
195 atomic_dec(&data
->disabled
);
200 static void irqsoff_graph_return(struct ftrace_graph_ret
*trace
)
202 struct trace_array
*tr
= irqsoff_trace
;
203 struct trace_array_cpu
*data
;
207 ftrace_graph_addr_finish(trace
);
209 if (!func_prolog_dec(tr
, &data
, &flags
))
212 pc
= preempt_count();
213 __trace_graph_return(tr
, trace
, flags
, pc
);
214 atomic_dec(&data
->disabled
);
217 static void irqsoff_trace_open(struct trace_iterator
*iter
)
219 if (is_graph(iter
->tr
))
220 graph_trace_open(iter
);
224 static void irqsoff_trace_close(struct trace_iterator
*iter
)
227 graph_trace_close(iter
);
230 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
231 TRACE_GRAPH_PRINT_PROC | \
232 TRACE_GRAPH_PRINT_ABS_TIME | \
233 TRACE_GRAPH_PRINT_DURATION)
235 static enum print_line_t
irqsoff_print_line(struct trace_iterator
*iter
)
238 * In graph mode call the graph tracer output function,
239 * otherwise go with the TRACE_FN event handler
241 if (is_graph(iter
->tr
))
242 return print_graph_function_flags(iter
, GRAPH_TRACER_FLAGS
);
244 return TRACE_TYPE_UNHANDLED
;
247 static void irqsoff_print_header(struct seq_file
*s
)
249 struct trace_array
*tr
= irqsoff_trace
;
252 print_graph_headers_flags(s
, GRAPH_TRACER_FLAGS
);
254 trace_default_header(s
);
258 __trace_function(struct trace_array
*tr
,
259 unsigned long ip
, unsigned long parent_ip
,
260 unsigned long flags
, int pc
)
263 trace_graph_function(tr
, ip
, parent_ip
, flags
, pc
);
265 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
269 #define __trace_function trace_function
271 #ifdef CONFIG_FUNCTION_TRACER
272 static int irqsoff_graph_entry(struct ftrace_graph_ent
*trace
)
278 static enum print_line_t
irqsoff_print_line(struct trace_iterator
*iter
)
280 return TRACE_TYPE_UNHANDLED
;
283 static void irqsoff_trace_open(struct trace_iterator
*iter
) { }
284 static void irqsoff_trace_close(struct trace_iterator
*iter
) { }
286 #ifdef CONFIG_FUNCTION_TRACER
287 static void irqsoff_graph_return(struct ftrace_graph_ret
*trace
) { }
288 static void irqsoff_print_header(struct seq_file
*s
)
290 trace_default_header(s
);
293 static void irqsoff_print_header(struct seq_file
*s
)
295 trace_latency_header(s
);
297 #endif /* CONFIG_FUNCTION_TRACER */
298 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
301 * Should this new latency be reported/recorded?
303 static bool report_latency(struct trace_array
*tr
, u64 delta
)
305 if (tracing_thresh
) {
306 if (delta
< tracing_thresh
)
309 if (delta
<= tr
->max_latency
)
316 check_critical_timing(struct trace_array
*tr
,
317 struct trace_array_cpu
*data
,
318 unsigned long parent_ip
,
325 T0
= data
->preempt_timestamp
;
326 T1
= ftrace_now(cpu
);
329 local_save_flags(flags
);
331 pc
= preempt_count();
333 if (!report_latency(tr
, delta
))
336 raw_spin_lock_irqsave(&max_trace_lock
, flags
);
338 /* check if we are still the max latency */
339 if (!report_latency(tr
, delta
))
342 __trace_function(tr
, CALLER_ADDR0
, parent_ip
, flags
, pc
);
343 /* Skip 5 functions to get to the irq/preempt enable function */
344 __trace_stack(tr
, flags
, 5, pc
);
346 if (data
->critical_sequence
!= max_sequence
)
349 data
->critical_end
= parent_ip
;
351 if (likely(!is_tracing_stopped())) {
352 tr
->max_latency
= delta
;
353 update_max_tr_single(tr
, current
, cpu
);
359 raw_spin_unlock_irqrestore(&max_trace_lock
, flags
);
362 data
->critical_sequence
= max_sequence
;
363 data
->preempt_timestamp
= ftrace_now(cpu
);
364 __trace_function(tr
, CALLER_ADDR0
, parent_ip
, flags
, pc
);
368 start_critical_timing(unsigned long ip
, unsigned long parent_ip
)
371 struct trace_array
*tr
= irqsoff_trace
;
372 struct trace_array_cpu
*data
;
375 if (!tracer_enabled
|| !tracing_is_enabled())
378 cpu
= raw_smp_processor_id();
380 if (per_cpu(tracing_cpu
, cpu
))
383 data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
385 if (unlikely(!data
) || atomic_read(&data
->disabled
))
388 atomic_inc(&data
->disabled
);
390 data
->critical_sequence
= max_sequence
;
391 data
->preempt_timestamp
= ftrace_now(cpu
);
392 data
->critical_start
= parent_ip
? : ip
;
394 local_save_flags(flags
);
396 __trace_function(tr
, ip
, parent_ip
, flags
, preempt_count());
398 per_cpu(tracing_cpu
, cpu
) = 1;
400 atomic_dec(&data
->disabled
);
404 stop_critical_timing(unsigned long ip
, unsigned long parent_ip
)
407 struct trace_array
*tr
= irqsoff_trace
;
408 struct trace_array_cpu
*data
;
411 cpu
= raw_smp_processor_id();
412 /* Always clear the tracing cpu on stopping the trace */
413 if (unlikely(per_cpu(tracing_cpu
, cpu
)))
414 per_cpu(tracing_cpu
, cpu
) = 0;
418 if (!tracer_enabled
|| !tracing_is_enabled())
421 data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
423 if (unlikely(!data
) ||
424 !data
->critical_start
|| atomic_read(&data
->disabled
))
427 atomic_inc(&data
->disabled
);
429 local_save_flags(flags
);
430 __trace_function(tr
, ip
, parent_ip
, flags
, preempt_count());
431 check_critical_timing(tr
, data
, parent_ip
? : ip
, cpu
);
432 data
->critical_start
= 0;
433 atomic_dec(&data
->disabled
);
436 /* start and stop critical timings used to for stoppage (in idle) */
437 void start_critical_timings(void)
439 if (preempt_trace() || irq_trace())
440 start_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
442 EXPORT_SYMBOL_GPL(start_critical_timings
);
444 void stop_critical_timings(void)
446 if (preempt_trace() || irq_trace())
447 stop_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
449 EXPORT_SYMBOL_GPL(stop_critical_timings
);
451 #ifdef CONFIG_IRQSOFF_TRACER
452 #ifdef CONFIG_PROVE_LOCKING
453 void time_hardirqs_on(unsigned long a0
, unsigned long a1
)
455 if (!preempt_trace() && irq_trace())
456 stop_critical_timing(a0
, a1
);
459 void time_hardirqs_off(unsigned long a0
, unsigned long a1
)
461 if (!preempt_trace() && irq_trace())
462 start_critical_timing(a0
, a1
);
465 #else /* !CONFIG_PROVE_LOCKING */
471 void trace_softirqs_on(unsigned long ip
)
475 void trace_softirqs_off(unsigned long ip
)
479 inline void print_irqtrace_events(struct task_struct
*curr
)
484 * We are only interested in hardirq on/off events:
486 void trace_hardirqs_on(void)
488 if (!preempt_trace() && irq_trace())
489 stop_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
491 EXPORT_SYMBOL(trace_hardirqs_on
);
493 void trace_hardirqs_off(void)
495 if (!preempt_trace() && irq_trace())
496 start_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
498 EXPORT_SYMBOL(trace_hardirqs_off
);
500 __visible
void trace_hardirqs_on_caller(unsigned long caller_addr
)
502 if (!preempt_trace() && irq_trace())
503 stop_critical_timing(CALLER_ADDR0
, caller_addr
);
505 EXPORT_SYMBOL(trace_hardirqs_on_caller
);
507 __visible
void trace_hardirqs_off_caller(unsigned long caller_addr
)
509 if (!preempt_trace() && irq_trace())
510 start_critical_timing(CALLER_ADDR0
, caller_addr
);
512 EXPORT_SYMBOL(trace_hardirqs_off_caller
);
514 #endif /* CONFIG_PROVE_LOCKING */
515 #endif /* CONFIG_IRQSOFF_TRACER */
517 #ifdef CONFIG_PREEMPT_TRACER
518 void trace_preempt_on(unsigned long a0
, unsigned long a1
)
520 if (preempt_trace() && !irq_trace())
521 stop_critical_timing(a0
, a1
);
524 void trace_preempt_off(unsigned long a0
, unsigned long a1
)
526 if (preempt_trace() && !irq_trace())
527 start_critical_timing(a0
, a1
);
529 #endif /* CONFIG_PREEMPT_TRACER */
531 #ifdef CONFIG_FUNCTION_TRACER
532 static bool function_enabled
;
534 static int register_irqsoff_function(struct trace_array
*tr
, int graph
, int set
)
538 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
539 if (function_enabled
|| (!set
&& !(tr
->trace_flags
& TRACE_ITER_FUNCTION
)))
543 ret
= register_ftrace_graph(&irqsoff_graph_return
,
544 &irqsoff_graph_entry
);
546 ret
= register_ftrace_function(tr
->ops
);
549 function_enabled
= true;
554 static void unregister_irqsoff_function(struct trace_array
*tr
, int graph
)
556 if (!function_enabled
)
560 unregister_ftrace_graph();
562 unregister_ftrace_function(tr
->ops
);
564 function_enabled
= false;
567 static int irqsoff_function_set(struct trace_array
*tr
, u32 mask
, int set
)
569 if (!(mask
& TRACE_ITER_FUNCTION
))
573 register_irqsoff_function(tr
, is_graph(tr
), 1);
575 unregister_irqsoff_function(tr
, is_graph(tr
));
579 static int register_irqsoff_function(struct trace_array
*tr
, int graph
, int set
)
583 static void unregister_irqsoff_function(struct trace_array
*tr
, int graph
) { }
584 static inline int irqsoff_function_set(struct trace_array
*tr
, u32 mask
, int set
)
588 #endif /* CONFIG_FUNCTION_TRACER */
590 static int irqsoff_flag_changed(struct trace_array
*tr
, u32 mask
, int set
)
592 struct tracer
*tracer
= tr
->current_trace
;
594 if (irqsoff_function_set(tr
, mask
, set
))
597 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
598 if (mask
& TRACE_ITER_DISPLAY_GRAPH
)
599 return irqsoff_display_graph(tr
, set
);
602 return trace_keep_overwrite(tracer
, mask
, set
);
605 static int start_irqsoff_tracer(struct trace_array
*tr
, int graph
)
609 ret
= register_irqsoff_function(tr
, graph
, 0);
611 if (!ret
&& tracing_is_enabled())
619 static void stop_irqsoff_tracer(struct trace_array
*tr
, int graph
)
623 unregister_irqsoff_function(tr
, graph
);
626 static bool irqsoff_busy
;
628 static int __irqsoff_tracer_init(struct trace_array
*tr
)
633 save_flags
= tr
->trace_flags
;
635 /* non overwrite screws up the latency tracers */
636 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, 1);
637 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, 1);
641 /* make sure that the tracer is visible */
644 ftrace_init_array_ops(tr
, irqsoff_tracer_call
);
646 /* Only toplevel instance supports graph tracing */
647 if (start_irqsoff_tracer(tr
, (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
&&
649 printk(KERN_ERR
"failed to start irqsoff tracer\n");
655 static void irqsoff_tracer_reset(struct trace_array
*tr
)
657 int lat_flag
= save_flags
& TRACE_ITER_LATENCY_FMT
;
658 int overwrite_flag
= save_flags
& TRACE_ITER_OVERWRITE
;
660 stop_irqsoff_tracer(tr
, is_graph(tr
));
662 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, lat_flag
);
663 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, overwrite_flag
);
664 ftrace_reset_array_ops(tr
);
666 irqsoff_busy
= false;
669 static void irqsoff_tracer_start(struct trace_array
*tr
)
674 static void irqsoff_tracer_stop(struct trace_array
*tr
)
679 #ifdef CONFIG_IRQSOFF_TRACER
680 static int irqsoff_tracer_init(struct trace_array
*tr
)
682 trace_type
= TRACER_IRQS_OFF
;
684 return __irqsoff_tracer_init(tr
);
686 static struct tracer irqsoff_tracer __read_mostly
=
689 .init
= irqsoff_tracer_init
,
690 .reset
= irqsoff_tracer_reset
,
691 .start
= irqsoff_tracer_start
,
692 .stop
= irqsoff_tracer_stop
,
694 .print_header
= irqsoff_print_header
,
695 .print_line
= irqsoff_print_line
,
696 .flag_changed
= irqsoff_flag_changed
,
697 #ifdef CONFIG_FTRACE_SELFTEST
698 .selftest
= trace_selftest_startup_irqsoff
,
700 .open
= irqsoff_trace_open
,
701 .close
= irqsoff_trace_close
,
702 .allow_instances
= true,
705 # define register_irqsoff(trace) register_tracer(&trace)
707 # define register_irqsoff(trace) do { } while (0)
710 #ifdef CONFIG_PREEMPT_TRACER
711 static int preemptoff_tracer_init(struct trace_array
*tr
)
713 trace_type
= TRACER_PREEMPT_OFF
;
715 return __irqsoff_tracer_init(tr
);
718 static struct tracer preemptoff_tracer __read_mostly
=
720 .name
= "preemptoff",
721 .init
= preemptoff_tracer_init
,
722 .reset
= irqsoff_tracer_reset
,
723 .start
= irqsoff_tracer_start
,
724 .stop
= irqsoff_tracer_stop
,
726 .print_header
= irqsoff_print_header
,
727 .print_line
= irqsoff_print_line
,
728 .flag_changed
= irqsoff_flag_changed
,
729 #ifdef CONFIG_FTRACE_SELFTEST
730 .selftest
= trace_selftest_startup_preemptoff
,
732 .open
= irqsoff_trace_open
,
733 .close
= irqsoff_trace_close
,
734 .allow_instances
= true,
737 # define register_preemptoff(trace) register_tracer(&trace)
739 # define register_preemptoff(trace) do { } while (0)
742 #if defined(CONFIG_IRQSOFF_TRACER) && \
743 defined(CONFIG_PREEMPT_TRACER)
745 static int preemptirqsoff_tracer_init(struct trace_array
*tr
)
747 trace_type
= TRACER_IRQS_OFF
| TRACER_PREEMPT_OFF
;
749 return __irqsoff_tracer_init(tr
);
752 static struct tracer preemptirqsoff_tracer __read_mostly
=
754 .name
= "preemptirqsoff",
755 .init
= preemptirqsoff_tracer_init
,
756 .reset
= irqsoff_tracer_reset
,
757 .start
= irqsoff_tracer_start
,
758 .stop
= irqsoff_tracer_stop
,
760 .print_header
= irqsoff_print_header
,
761 .print_line
= irqsoff_print_line
,
762 .flag_changed
= irqsoff_flag_changed
,
763 #ifdef CONFIG_FTRACE_SELFTEST
764 .selftest
= trace_selftest_startup_preemptirqsoff
,
766 .open
= irqsoff_trace_open
,
767 .close
= irqsoff_trace_close
,
768 .allow_instances
= true,
772 # define register_preemptirqsoff(trace) register_tracer(&trace)
774 # define register_preemptirqsoff(trace) do { } while (0)
777 __init
static int init_irqsoff_tracer(void)
779 register_irqsoff(irqsoff_tracer
);
780 register_preemptoff(preemptoff_tracer
);
781 register_preemptirqsoff(preemptirqsoff_tracer
);
785 core_initcall(init_irqsoff_tracer
);