1 From: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
2 Subject: ftrace port to tracepoints
4 Porting the trace_mark() used by ftrace to tracepoints. (cleanup)
7 - Change error messages : marker -> tracepoint
9 Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 CC: Masami Hiramatsu <mhiramat@redhat.com>
11 CC: 'Peter Zijlstra' <peterz@infradead.org>
12 CC: "Frank Ch. Eigler" <fche@redhat.com>
13 CC: 'Ingo Molnar' <mingo@elte.hu>
14 CC: 'Hideo AOKI' <haoki@redhat.com>
15 CC: Takashi Nishiie <t-nishiie@np.css.fujitsu.com>
16 CC: 'Steven Rostedt' <rostedt@goodmis.org>
17 CC: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
18 Acked-by: Jan Blunck <jblunck@suse.de>
20 kernel/trace/trace_sched_switch.c | 120 ++++++---------------------------
21 kernel/trace/trace_sched_wakeup.c | 135 +++++++++-----------------------------
22 2 files changed, 58 insertions(+), 197 deletions(-)
24 Index: linux-2.6-lttng/kernel/trace/trace_sched_switch.c
25 ===================================================================
26 --- linux-2.6-lttng.orig/kernel/trace/trace_sched_switch.c 2008-08-06 00:41:47.000000000 -0400
27 +++ linux-2.6-lttng/kernel/trace/trace_sched_switch.c 2008-08-06 13:12:10.000000000 -0400
29 #include <linux/debugfs.h>
30 #include <linux/kallsyms.h>
31 #include <linux/uaccess.h>
32 -#include <linux/marker.h>
33 #include <linux/ftrace.h>
34 +#include <trace/sched.h>
38 @@ -19,16 +19,17 @@ static int __read_mostly tracer_enabled;
39 static atomic_t sched_ref;
42 -sched_switch_func(void *private, void *__rq, struct task_struct *prev,
43 +probe_sched_switch(struct rq *__rq, struct task_struct *prev,
44 struct task_struct *next)
46 - struct trace_array **ptr = private;
47 - struct trace_array *tr = *ptr;
48 struct trace_array_cpu *data;
53 + if (!atomic_read(&sched_ref))
56 tracing_record_cmdline(prev);
57 tracing_record_cmdline(next);
59 @@ -37,95 +38,42 @@ sched_switch_func(void *private, void *_
61 local_irq_save(flags);
62 cpu = raw_smp_processor_id();
63 - data = tr->data[cpu];
64 + data = ctx_trace->data[cpu];
65 disabled = atomic_inc_return(&data->disabled);
67 if (likely(disabled == 1))
68 - tracing_sched_switch_trace(tr, data, prev, next, flags);
69 + tracing_sched_switch_trace(ctx_trace, data, prev, next, flags);
71 atomic_dec(&data->disabled);
72 local_irq_restore(flags);
76 -sched_switch_callback(void *probe_data, void *call_data,
77 - const char *format, va_list *args)
79 - struct task_struct *prev;
80 - struct task_struct *next;
83 - if (!atomic_read(&sched_ref))
86 - /* skip prev_pid %d next_pid %d prev_state %ld */
87 - (void)va_arg(*args, int);
88 - (void)va_arg(*args, int);
89 - (void)va_arg(*args, long);
90 - __rq = va_arg(*args, typeof(__rq));
91 - prev = va_arg(*args, typeof(prev));
92 - next = va_arg(*args, typeof(next));
95 - * If tracer_switch_func only points to the local
96 - * switch func, it still needs the ptr passed to it.
98 - sched_switch_func(probe_data, __rq, prev, next);
102 -wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
104 +probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
106 - struct trace_array **ptr = private;
107 - struct trace_array *tr = *ptr;
108 struct trace_array_cpu *data;
113 - if (!tracer_enabled)
114 + if (!likely(tracer_enabled))
117 - tracing_record_cmdline(curr);
118 + tracing_record_cmdline(current);
120 local_irq_save(flags);
121 cpu = raw_smp_processor_id();
122 - data = tr->data[cpu];
123 + data = ctx_trace->data[cpu];
124 disabled = atomic_inc_return(&data->disabled);
126 if (likely(disabled == 1))
127 - tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
128 + tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
131 atomic_dec(&data->disabled);
132 local_irq_restore(flags);
136 -wake_up_callback(void *probe_data, void *call_data,
137 - const char *format, va_list *args)
139 - struct task_struct *curr;
140 - struct task_struct *task;
143 - if (likely(!tracer_enabled))
146 - /* Skip pid %d state %ld */
147 - (void)va_arg(*args, int);
148 - (void)va_arg(*args, long);
149 - /* now get the meat: "rq %p task %p rq->curr %p" */
150 - __rq = va_arg(*args, typeof(__rq));
151 - task = va_arg(*args, typeof(task));
152 - curr = va_arg(*args, typeof(curr));
154 - tracing_record_cmdline(task);
155 - tracing_record_cmdline(curr);
157 - wakeup_func(probe_data, __rq, task, curr);
160 static void sched_switch_reset(struct trace_array *tr)
163 @@ -140,60 +88,40 @@ static int tracing_sched_register(void)
167 - ret = marker_probe_register("kernel_sched_wakeup",
168 - "pid %d state %ld ## rq %p task %p rq->curr %p",
171 + ret = register_trace_sched_wakeup(probe_sched_wakeup);
173 - pr_info("wakeup trace: Couldn't add marker"
174 + pr_info("wakeup trace: Couldn't activate tracepoint"
175 " probe to kernel_sched_wakeup\n");
179 - ret = marker_probe_register("kernel_sched_wakeup_new",
180 - "pid %d state %ld ## rq %p task %p rq->curr %p",
183 + ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
185 - pr_info("wakeup trace: Couldn't add marker"
186 + pr_info("wakeup trace: Couldn't activate tracepoint"
187 " probe to kernel_sched_wakeup_new\n");
191 - ret = marker_probe_register("kernel_sched_schedule",
192 - "prev_pid %d next_pid %d prev_state %ld "
193 - "## rq %p prev %p next %p",
194 - sched_switch_callback,
196 + ret = register_trace_sched_switch(probe_sched_switch);
198 - pr_info("sched trace: Couldn't add marker"
199 + pr_info("sched trace: Couldn't activate tracepoint"
200 " probe to kernel_sched_schedule\n");
201 goto fail_deprobe_wake_new;
205 fail_deprobe_wake_new:
206 - marker_probe_unregister("kernel_sched_wakeup_new",
209 + unregister_trace_sched_wakeup_new(probe_sched_wakeup);
211 - marker_probe_unregister("kernel_sched_wakeup",
214 + unregister_trace_sched_wakeup(probe_sched_wakeup);
218 static void tracing_sched_unregister(void)
220 - marker_probe_unregister("kernel_sched_schedule",
221 - sched_switch_callback,
223 - marker_probe_unregister("kernel_sched_wakeup_new",
226 - marker_probe_unregister("kernel_sched_wakeup",
229 + unregister_trace_sched_switch(probe_sched_switch);
230 + unregister_trace_sched_wakeup_new(probe_sched_wakeup);
231 + unregister_trace_sched_wakeup(probe_sched_wakeup);
234 static void tracing_start_sched_switch(void)
235 Index: linux-2.6-lttng/kernel/trace/trace_sched_wakeup.c
236 ===================================================================
237 --- linux-2.6-lttng.orig/kernel/trace/trace_sched_wakeup.c 2008-08-06 00:41:47.000000000 -0400
238 +++ linux-2.6-lttng/kernel/trace/trace_sched_wakeup.c 2008-08-06 13:18:26.000000000 -0400
240 #include <linux/kallsyms.h>
241 #include <linux/uaccess.h>
242 #include <linux/ftrace.h>
243 -#include <linux/marker.h>
244 +#include <trace/sched.h>
248 @@ -112,18 +112,18 @@ static int report_latency(cycle_t delta)
252 -wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
253 +probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
254 struct task_struct *next)
256 unsigned long latency = 0, t0 = 0, t1 = 0;
257 - struct trace_array **ptr = private;
258 - struct trace_array *tr = *ptr;
259 struct trace_array_cpu *data;
260 cycle_t T0, T1, delta;
265 + tracing_record_cmdline(prev);
267 if (unlikely(!tracer_enabled))
270 @@ -140,11 +140,11 @@ wakeup_sched_switch(void *private, void
273 /* The task we are waiting for is waking up */
274 - data = tr->data[wakeup_cpu];
275 + data = wakeup_trace->data[wakeup_cpu];
277 /* disable local data, not wakeup_cpu data */
278 cpu = raw_smp_processor_id();
279 - disabled = atomic_inc_return(&tr->data[cpu]->disabled);
280 + disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
281 if (likely(disabled != 1))
284 @@ -155,7 +155,7 @@ wakeup_sched_switch(void *private, void
285 if (unlikely(!tracer_enabled || next != wakeup_task))
288 - trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
289 + trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags);
292 * usecs conversion is slow so we try to delay the conversion
293 @@ -174,39 +174,14 @@ wakeup_sched_switch(void *private, void
294 t0 = nsecs_to_usecs(T0);
295 t1 = nsecs_to_usecs(T1);
297 - update_max_tr(tr, wakeup_task, wakeup_cpu);
298 + update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
301 - __wakeup_reset(tr);
302 + __wakeup_reset(wakeup_trace);
303 __raw_spin_unlock(&wakeup_lock);
304 local_irq_restore(flags);
306 - atomic_dec(&tr->data[cpu]->disabled);
310 -sched_switch_callback(void *probe_data, void *call_data,
311 - const char *format, va_list *args)
313 - struct task_struct *prev;
314 - struct task_struct *next;
317 - /* skip prev_pid %d next_pid %d prev_state %ld */
318 - (void)va_arg(*args, int);
319 - (void)va_arg(*args, int);
320 - (void)va_arg(*args, long);
321 - __rq = va_arg(*args, typeof(__rq));
322 - prev = va_arg(*args, typeof(prev));
323 - next = va_arg(*args, typeof(next));
325 - tracing_record_cmdline(prev);
328 - * If tracer_switch_func only points to the local
329 - * switch func, it still needs the ptr passed to it.
331 - wakeup_sched_switch(probe_data, __rq, prev, next);
332 + atomic_dec(&wakeup_trace->data[cpu]->disabled);
335 static void __wakeup_reset(struct trace_array *tr)
336 @@ -240,19 +215,24 @@ static void wakeup_reset(struct trace_ar
340 -wakeup_check_start(struct trace_array *tr, struct task_struct *p,
341 - struct task_struct *curr)
342 +probe_wakeup(struct rq *rq, struct task_struct *p)
344 int cpu = smp_processor_id();
348 + if (likely(!tracer_enabled))
351 + tracing_record_cmdline(p);
352 + tracing_record_cmdline(current);
354 if (likely(!rt_task(p)) ||
355 p->prio >= wakeup_prio ||
356 - p->prio >= curr->prio)
357 + p->prio >= current->prio)
360 - disabled = atomic_inc_return(&tr->data[cpu]->disabled);
361 + disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
362 if (unlikely(disabled != 1))
365 @@ -264,7 +244,7 @@ wakeup_check_start(struct trace_array *t
368 /* reset the trace */
369 - __wakeup_reset(tr);
370 + __wakeup_reset(wakeup_trace);
372 wakeup_cpu = task_cpu(p);
373 wakeup_prio = p->prio;
374 @@ -274,74 +254,37 @@ wakeup_check_start(struct trace_array *t
376 local_save_flags(flags);
378 - tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
379 - trace_function(tr, tr->data[wakeup_cpu],
380 + wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
381 + trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
382 CALLER_ADDR1, CALLER_ADDR2, flags);
385 __raw_spin_unlock(&wakeup_lock);
387 - atomic_dec(&tr->data[cpu]->disabled);
391 -wake_up_callback(void *probe_data, void *call_data,
392 - const char *format, va_list *args)
394 - struct trace_array **ptr = probe_data;
395 - struct trace_array *tr = *ptr;
396 - struct task_struct *curr;
397 - struct task_struct *task;
400 - if (likely(!tracer_enabled))
403 - /* Skip pid %d state %ld */
404 - (void)va_arg(*args, int);
405 - (void)va_arg(*args, long);
406 - /* now get the meat: "rq %p task %p rq->curr %p" */
407 - __rq = va_arg(*args, typeof(__rq));
408 - task = va_arg(*args, typeof(task));
409 - curr = va_arg(*args, typeof(curr));
411 - tracing_record_cmdline(task);
412 - tracing_record_cmdline(curr);
414 - wakeup_check_start(tr, task, curr);
415 + atomic_dec(&wakeup_trace->data[cpu]->disabled);
418 static void start_wakeup_tracer(struct trace_array *tr)
422 - ret = marker_probe_register("kernel_sched_wakeup",
423 - "pid %d state %ld ## rq %p task %p rq->curr %p",
426 + ret = register_trace_sched_wakeup(probe_wakeup);
428 - pr_info("wakeup trace: Couldn't add marker"
429 + pr_info("wakeup trace: Couldn't activate tracepoint"
430 " probe to kernel_sched_wakeup\n");
434 - ret = marker_probe_register("kernel_sched_wakeup_new",
435 - "pid %d state %ld ## rq %p task %p rq->curr %p",
438 + ret = register_trace_sched_wakeup_new(probe_wakeup);
440 - pr_info("wakeup trace: Couldn't add marker"
441 + pr_info("wakeup trace: Couldn't activate tracepoint"
442 " probe to kernel_sched_wakeup_new\n");
446 - ret = marker_probe_register("kernel_sched_schedule",
447 - "prev_pid %d next_pid %d prev_state %ld "
448 - "## rq %p prev %p next %p",
449 - sched_switch_callback,
451 + ret = register_trace_sched_switch(probe_wakeup_sched_switch);
453 - pr_info("sched trace: Couldn't add marker"
454 + pr_info("sched trace: Couldn't activate tracepoint"
455 " probe to kernel_sched_schedule\n");
456 goto fail_deprobe_wake_new;
458 @@ -363,28 +306,18 @@ static void start_wakeup_tracer(struct t
461 fail_deprobe_wake_new:
462 - marker_probe_unregister("kernel_sched_wakeup_new",
465 + unregister_trace_sched_wakeup_new(probe_wakeup);
467 - marker_probe_unregister("kernel_sched_wakeup",
470 + unregister_trace_sched_wakeup(probe_wakeup);
473 static void stop_wakeup_tracer(struct trace_array *tr)
476 unregister_ftrace_function(&trace_ops);
477 - marker_probe_unregister("kernel_sched_schedule",
478 - sched_switch_callback,
480 - marker_probe_unregister("kernel_sched_wakeup_new",
483 - marker_probe_unregister("kernel_sched_wakeup",
486 + unregister_trace_sched_switch(probe_wakeup_sched_switch);
487 + unregister_trace_sched_wakeup_new(probe_wakeup);
488 + unregister_trace_sched_wakeup(probe_wakeup);
491 static void wakeup_tracer_init(struct trace_array *tr)