]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.trace/ftrace-port-to-tracepoints.patch
Updated xen patches taken from suse.
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.trace / ftrace-port-to-tracepoints.patch
1 From: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
2 Subject: ftrace port to tracepoints
3
4 Porting the trace_mark() used by ftrace to tracepoints. (cleanup)
5
6 Changelog :
7 - Change error messages : marker -> tracepoint
8
9 Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 CC: Masami Hiramatsu <mhiramat@redhat.com>
11 CC: 'Peter Zijlstra' <peterz@infradead.org>
12 CC: "Frank Ch. Eigler" <fche@redhat.com>
13 CC: 'Ingo Molnar' <mingo@elte.hu>
14 CC: 'Hideo AOKI' <haoki@redhat.com>
15 CC: Takashi Nishiie <t-nishiie@np.css.fujitsu.com>
16 CC: 'Steven Rostedt' <rostedt@goodmis.org>
17 CC: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
18 Acked-by: Jan Blunck <jblunck@suse.de>
19 ---
20 kernel/trace/trace_sched_switch.c | 120 ++++++---------------------------
21 kernel/trace/trace_sched_wakeup.c | 135 +++++++++-----------------------------
22 2 files changed, 58 insertions(+), 197 deletions(-)
23
24 Index: linux-2.6-lttng/kernel/trace/trace_sched_switch.c
25 ===================================================================
26 --- linux-2.6-lttng.orig/kernel/trace/trace_sched_switch.c 2008-08-06 00:41:47.000000000 -0400
27 +++ linux-2.6-lttng/kernel/trace/trace_sched_switch.c 2008-08-06 13:12:10.000000000 -0400
28 @@ -9,8 +9,8 @@
29 #include <linux/debugfs.h>
30 #include <linux/kallsyms.h>
31 #include <linux/uaccess.h>
32 -#include <linux/marker.h>
33 #include <linux/ftrace.h>
34 +#include <trace/sched.h>
35
36 #include "trace.h"
37
38 @@ -19,16 +19,17 @@ static int __read_mostly tracer_enabled;
39 static atomic_t sched_ref;
40
41 static void
42 -sched_switch_func(void *private, void *__rq, struct task_struct *prev,
43 +probe_sched_switch(struct rq *__rq, struct task_struct *prev,
44 struct task_struct *next)
45 {
46 - struct trace_array **ptr = private;
47 - struct trace_array *tr = *ptr;
48 struct trace_array_cpu *data;
49 unsigned long flags;
50 long disabled;
51 int cpu;
52
53 + if (!atomic_read(&sched_ref))
54 + return;
55 +
56 tracing_record_cmdline(prev);
57 tracing_record_cmdline(next);
58
59 @@ -37,95 +38,42 @@ sched_switch_func(void *private, void *_
60
61 local_irq_save(flags);
62 cpu = raw_smp_processor_id();
63 - data = tr->data[cpu];
64 + data = ctx_trace->data[cpu];
65 disabled = atomic_inc_return(&data->disabled);
66
67 if (likely(disabled == 1))
68 - tracing_sched_switch_trace(tr, data, prev, next, flags);
69 + tracing_sched_switch_trace(ctx_trace, data, prev, next, flags);
70
71 atomic_dec(&data->disabled);
72 local_irq_restore(flags);
73 }
74
75 -static notrace void
76 -sched_switch_callback(void *probe_data, void *call_data,
77 - const char *format, va_list *args)
78 -{
79 - struct task_struct *prev;
80 - struct task_struct *next;
81 - struct rq *__rq;
82 -
83 - if (!atomic_read(&sched_ref))
84 - return;
85 -
86 - /* skip prev_pid %d next_pid %d prev_state %ld */
87 - (void)va_arg(*args, int);
88 - (void)va_arg(*args, int);
89 - (void)va_arg(*args, long);
90 - __rq = va_arg(*args, typeof(__rq));
91 - prev = va_arg(*args, typeof(prev));
92 - next = va_arg(*args, typeof(next));
93 -
94 - /*
95 - * If tracer_switch_func only points to the local
96 - * switch func, it still needs the ptr passed to it.
97 - */
98 - sched_switch_func(probe_data, __rq, prev, next);
99 -}
100 -
101 static void
102 -wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
103 - task_struct *curr)
104 +probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
105 {
106 - struct trace_array **ptr = private;
107 - struct trace_array *tr = *ptr;
108 struct trace_array_cpu *data;
109 unsigned long flags;
110 long disabled;
111 int cpu;
112
113 - if (!tracer_enabled)
114 + if (!likely(tracer_enabled))
115 return;
116
117 - tracing_record_cmdline(curr);
118 + tracing_record_cmdline(current);
119
120 local_irq_save(flags);
121 cpu = raw_smp_processor_id();
122 - data = tr->data[cpu];
123 + data = ctx_trace->data[cpu];
124 disabled = atomic_inc_return(&data->disabled);
125
126 if (likely(disabled == 1))
127 - tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
128 + tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
129 + flags);
130
131 atomic_dec(&data->disabled);
132 local_irq_restore(flags);
133 }
134
135 -static notrace void
136 -wake_up_callback(void *probe_data, void *call_data,
137 - const char *format, va_list *args)
138 -{
139 - struct task_struct *curr;
140 - struct task_struct *task;
141 - struct rq *__rq;
142 -
143 - if (likely(!tracer_enabled))
144 - return;
145 -
146 - /* Skip pid %d state %ld */
147 - (void)va_arg(*args, int);
148 - (void)va_arg(*args, long);
149 - /* now get the meat: "rq %p task %p rq->curr %p" */
150 - __rq = va_arg(*args, typeof(__rq));
151 - task = va_arg(*args, typeof(task));
152 - curr = va_arg(*args, typeof(curr));
153 -
154 - tracing_record_cmdline(task);
155 - tracing_record_cmdline(curr);
156 -
157 - wakeup_func(probe_data, __rq, task, curr);
158 -}
159 -
160 static void sched_switch_reset(struct trace_array *tr)
161 {
162 int cpu;
163 @@ -140,60 +88,40 @@ static int tracing_sched_register(void)
164 {
165 int ret;
166
167 - ret = marker_probe_register("kernel_sched_wakeup",
168 - "pid %d state %ld ## rq %p task %p rq->curr %p",
169 - wake_up_callback,
170 - &ctx_trace);
171 + ret = register_trace_sched_wakeup(probe_sched_wakeup);
172 if (ret) {
173 - pr_info("wakeup trace: Couldn't add marker"
174 + pr_info("wakeup trace: Couldn't activate tracepoint"
175 " probe to kernel_sched_wakeup\n");
176 return ret;
177 }
178
179 - ret = marker_probe_register("kernel_sched_wakeup_new",
180 - "pid %d state %ld ## rq %p task %p rq->curr %p",
181 - wake_up_callback,
182 - &ctx_trace);
183 + ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
184 if (ret) {
185 - pr_info("wakeup trace: Couldn't add marker"
186 + pr_info("wakeup trace: Couldn't activate tracepoint"
187 " probe to kernel_sched_wakeup_new\n");
188 goto fail_deprobe;
189 }
190
191 - ret = marker_probe_register("kernel_sched_schedule",
192 - "prev_pid %d next_pid %d prev_state %ld "
193 - "## rq %p prev %p next %p",
194 - sched_switch_callback,
195 - &ctx_trace);
196 + ret = register_trace_sched_switch(probe_sched_switch);
197 if (ret) {
198 - pr_info("sched trace: Couldn't add marker"
199 + pr_info("sched trace: Couldn't activate tracepoint"
200 " probe to kernel_sched_schedule\n");
201 goto fail_deprobe_wake_new;
202 }
203
204 return ret;
205 fail_deprobe_wake_new:
206 - marker_probe_unregister("kernel_sched_wakeup_new",
207 - wake_up_callback,
208 - &ctx_trace);
209 + unregister_trace_sched_wakeup_new(probe_sched_wakeup);
210 fail_deprobe:
211 - marker_probe_unregister("kernel_sched_wakeup",
212 - wake_up_callback,
213 - &ctx_trace);
214 + unregister_trace_sched_wakeup(probe_sched_wakeup);
215 return ret;
216 }
217
218 static void tracing_sched_unregister(void)
219 {
220 - marker_probe_unregister("kernel_sched_schedule",
221 - sched_switch_callback,
222 - &ctx_trace);
223 - marker_probe_unregister("kernel_sched_wakeup_new",
224 - wake_up_callback,
225 - &ctx_trace);
226 - marker_probe_unregister("kernel_sched_wakeup",
227 - wake_up_callback,
228 - &ctx_trace);
229 + unregister_trace_sched_switch(probe_sched_switch);
230 + unregister_trace_sched_wakeup_new(probe_sched_wakeup);
231 + unregister_trace_sched_wakeup(probe_sched_wakeup);
232 }
233
234 static void tracing_start_sched_switch(void)
235 Index: linux-2.6-lttng/kernel/trace/trace_sched_wakeup.c
236 ===================================================================
237 --- linux-2.6-lttng.orig/kernel/trace/trace_sched_wakeup.c 2008-08-06 00:41:47.000000000 -0400
238 +++ linux-2.6-lttng/kernel/trace/trace_sched_wakeup.c 2008-08-06 13:18:26.000000000 -0400
239 @@ -15,7 +15,7 @@
240 #include <linux/kallsyms.h>
241 #include <linux/uaccess.h>
242 #include <linux/ftrace.h>
243 -#include <linux/marker.h>
244 +#include <trace/sched.h>
245
246 #include "trace.h"
247
248 @@ -112,18 +112,18 @@ static int report_latency(cycle_t delta)
249 }
250
251 static void notrace
252 -wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
253 +probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
254 struct task_struct *next)
255 {
256 unsigned long latency = 0, t0 = 0, t1 = 0;
257 - struct trace_array **ptr = private;
258 - struct trace_array *tr = *ptr;
259 struct trace_array_cpu *data;
260 cycle_t T0, T1, delta;
261 unsigned long flags;
262 long disabled;
263 int cpu;
264
265 + tracing_record_cmdline(prev);
266 +
267 if (unlikely(!tracer_enabled))
268 return;
269
270 @@ -140,11 +140,11 @@ wakeup_sched_switch(void *private, void
271 return;
272
273 /* The task we are waiting for is waking up */
274 - data = tr->data[wakeup_cpu];
275 + data = wakeup_trace->data[wakeup_cpu];
276
277 /* disable local data, not wakeup_cpu data */
278 cpu = raw_smp_processor_id();
279 - disabled = atomic_inc_return(&tr->data[cpu]->disabled);
280 + disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
281 if (likely(disabled != 1))
282 goto out;
283
284 @@ -155,7 +155,7 @@ wakeup_sched_switch(void *private, void
285 if (unlikely(!tracer_enabled || next != wakeup_task))
286 goto out_unlock;
287
288 - trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
289 + trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags);
290
291 /*
292 * usecs conversion is slow so we try to delay the conversion
293 @@ -174,39 +174,14 @@ wakeup_sched_switch(void *private, void
294 t0 = nsecs_to_usecs(T0);
295 t1 = nsecs_to_usecs(T1);
296
297 - update_max_tr(tr, wakeup_task, wakeup_cpu);
298 + update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
299
300 out_unlock:
301 - __wakeup_reset(tr);
302 + __wakeup_reset(wakeup_trace);
303 __raw_spin_unlock(&wakeup_lock);
304 local_irq_restore(flags);
305 out:
306 - atomic_dec(&tr->data[cpu]->disabled);
307 -}
308 -
309 -static notrace void
310 -sched_switch_callback(void *probe_data, void *call_data,
311 - const char *format, va_list *args)
312 -{
313 - struct task_struct *prev;
314 - struct task_struct *next;
315 - struct rq *__rq;
316 -
317 - /* skip prev_pid %d next_pid %d prev_state %ld */
318 - (void)va_arg(*args, int);
319 - (void)va_arg(*args, int);
320 - (void)va_arg(*args, long);
321 - __rq = va_arg(*args, typeof(__rq));
322 - prev = va_arg(*args, typeof(prev));
323 - next = va_arg(*args, typeof(next));
324 -
325 - tracing_record_cmdline(prev);
326 -
327 - /*
328 - * If tracer_switch_func only points to the local
329 - * switch func, it still needs the ptr passed to it.
330 - */
331 - wakeup_sched_switch(probe_data, __rq, prev, next);
332 + atomic_dec(&wakeup_trace->data[cpu]->disabled);
333 }
334
335 static void __wakeup_reset(struct trace_array *tr)
336 @@ -240,19 +215,24 @@ static void wakeup_reset(struct trace_ar
337 }
338
339 static void
340 -wakeup_check_start(struct trace_array *tr, struct task_struct *p,
341 - struct task_struct *curr)
342 +probe_wakeup(struct rq *rq, struct task_struct *p)
343 {
344 int cpu = smp_processor_id();
345 unsigned long flags;
346 long disabled;
347
348 + if (likely(!tracer_enabled))
349 + return;
350 +
351 + tracing_record_cmdline(p);
352 + tracing_record_cmdline(current);
353 +
354 if (likely(!rt_task(p)) ||
355 p->prio >= wakeup_prio ||
356 - p->prio >= curr->prio)
357 + p->prio >= current->prio)
358 return;
359
360 - disabled = atomic_inc_return(&tr->data[cpu]->disabled);
361 + disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
362 if (unlikely(disabled != 1))
363 goto out;
364
365 @@ -264,7 +244,7 @@ wakeup_check_start(struct trace_array *t
366 goto out_locked;
367
368 /* reset the trace */
369 - __wakeup_reset(tr);
370 + __wakeup_reset(wakeup_trace);
371
372 wakeup_cpu = task_cpu(p);
373 wakeup_prio = p->prio;
374 @@ -274,74 +254,37 @@ wakeup_check_start(struct trace_array *t
375
376 local_save_flags(flags);
377
378 - tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
379 - trace_function(tr, tr->data[wakeup_cpu],
380 + wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
381 + trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
382 CALLER_ADDR1, CALLER_ADDR2, flags);
383
384 out_locked:
385 __raw_spin_unlock(&wakeup_lock);
386 out:
387 - atomic_dec(&tr->data[cpu]->disabled);
388 -}
389 -
390 -static notrace void
391 -wake_up_callback(void *probe_data, void *call_data,
392 - const char *format, va_list *args)
393 -{
394 - struct trace_array **ptr = probe_data;
395 - struct trace_array *tr = *ptr;
396 - struct task_struct *curr;
397 - struct task_struct *task;
398 - struct rq *__rq;
399 -
400 - if (likely(!tracer_enabled))
401 - return;
402 -
403 - /* Skip pid %d state %ld */
404 - (void)va_arg(*args, int);
405 - (void)va_arg(*args, long);
406 - /* now get the meat: "rq %p task %p rq->curr %p" */
407 - __rq = va_arg(*args, typeof(__rq));
408 - task = va_arg(*args, typeof(task));
409 - curr = va_arg(*args, typeof(curr));
410 -
411 - tracing_record_cmdline(task);
412 - tracing_record_cmdline(curr);
413 -
414 - wakeup_check_start(tr, task, curr);
415 + atomic_dec(&wakeup_trace->data[cpu]->disabled);
416 }
417
418 static void start_wakeup_tracer(struct trace_array *tr)
419 {
420 int ret;
421
422 - ret = marker_probe_register("kernel_sched_wakeup",
423 - "pid %d state %ld ## rq %p task %p rq->curr %p",
424 - wake_up_callback,
425 - &wakeup_trace);
426 + ret = register_trace_sched_wakeup(probe_wakeup);
427 if (ret) {
428 - pr_info("wakeup trace: Couldn't add marker"
429 + pr_info("wakeup trace: Couldn't activate tracepoint"
430 " probe to kernel_sched_wakeup\n");
431 return;
432 }
433
434 - ret = marker_probe_register("kernel_sched_wakeup_new",
435 - "pid %d state %ld ## rq %p task %p rq->curr %p",
436 - wake_up_callback,
437 - &wakeup_trace);
438 + ret = register_trace_sched_wakeup_new(probe_wakeup);
439 if (ret) {
440 - pr_info("wakeup trace: Couldn't add marker"
441 + pr_info("wakeup trace: Couldn't activate tracepoint"
442 " probe to kernel_sched_wakeup_new\n");
443 goto fail_deprobe;
444 }
445
446 - ret = marker_probe_register("kernel_sched_schedule",
447 - "prev_pid %d next_pid %d prev_state %ld "
448 - "## rq %p prev %p next %p",
449 - sched_switch_callback,
450 - &wakeup_trace);
451 + ret = register_trace_sched_switch(probe_wakeup_sched_switch);
452 if (ret) {
453 - pr_info("sched trace: Couldn't add marker"
454 + pr_info("sched trace: Couldn't activate tracepoint"
455 " probe to kernel_sched_schedule\n");
456 goto fail_deprobe_wake_new;
457 }
458 @@ -363,28 +306,18 @@ static void start_wakeup_tracer(struct t
459
460 return;
461 fail_deprobe_wake_new:
462 - marker_probe_unregister("kernel_sched_wakeup_new",
463 - wake_up_callback,
464 - &wakeup_trace);
465 + unregister_trace_sched_wakeup_new(probe_wakeup);
466 fail_deprobe:
467 - marker_probe_unregister("kernel_sched_wakeup",
468 - wake_up_callback,
469 - &wakeup_trace);
470 + unregister_trace_sched_wakeup(probe_wakeup);
471 }
472
473 static void stop_wakeup_tracer(struct trace_array *tr)
474 {
475 tracer_enabled = 0;
476 unregister_ftrace_function(&trace_ops);
477 - marker_probe_unregister("kernel_sched_schedule",
478 - sched_switch_callback,
479 - &wakeup_trace);
480 - marker_probe_unregister("kernel_sched_wakeup_new",
481 - wake_up_callback,
482 - &wakeup_trace);
483 - marker_probe_unregister("kernel_sched_wakeup",
484 - wake_up_callback,
485 - &wakeup_trace);
486 + unregister_trace_sched_switch(probe_wakeup_sched_switch);
487 + unregister_trace_sched_wakeup_new(probe_wakeup);
488 + unregister_trace_sched_wakeup(probe_wakeup);
489 }
490
491 static void wakeup_tracer_init(struct trace_array *tr)