]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blobdiff - src/patches/suse-2.6.27.25/patches.trace/lttng-instrumentation-page_alloc.patch
Revert "Move xen patchset to new version's subdir."
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.trace / lttng-instrumentation-page_alloc.patch
diff --git a/src/patches/suse-2.6.27.25/patches.trace/lttng-instrumentation-page_alloc.patch b/src/patches/suse-2.6.27.25/patches.trace/lttng-instrumentation-page_alloc.patch
new file mode 100644 (file)
index 0000000..b90d678
--- /dev/null
@@ -0,0 +1,92 @@
+From: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+Subject: LTTng instrumentation - page_alloc
+
+Original patch header:
+  LTTng instrumentation - page_alloc
+  
+  Paging activity instrumentation. Instruments page allocation/free to keep track
+  of page allocation. This does not cover hugetlb activity, which is covered by a
+  separate patch.
+  
+  Those tracepoints are used by LTTng.
+  
+  About the performance impact of tracepoints (which is comparable to markers),
+  even without immediate values optimizations, tests done by Hideo Aoki on ia64
+  show no regression. His test case was using hackbench on a kernel where
+  scheduler instrumentation (about 5 events in code scheduler code) was added.
+  See the "Tracepoints" patch header for performance result detail.
+  
+  Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+  CC: Martin Bligh <mbligh@google.com>
+  CC: Masami Hiramatsu <mhiramat@redhat.com>
+  CC: 'Peter Zijlstra' <peterz@infradead.org>
+  CC: "Frank Ch. Eigler" <fche@redhat.com>
+  CC: 'Ingo Molnar' <mingo@elte.hu>
+  CC: 'Hideo AOKI' <haoki@redhat.com>
+  CC: Takashi Nishiie <t-nishiie@np.css.fujitsu.com>
+  CC: 'Steven Rostedt' <rostedt@goodmis.org>
+  CC: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
+
+Acked-by: Jan Blunck <jblunck@suse.de>
+--- 
+---
+ include/trace/page_alloc.h |   16 ++++++++++++++++
+ mm/page_alloc.c            |    7 +++++++
+ 2 files changed, 23 insertions(+)
+
+--- /dev/null
++++ b/include/trace/page_alloc.h
+@@ -0,0 +1,16 @@
++#ifndef _TRACE_PAGE_ALLOC_H
++#define _TRACE_PAGE_ALLOC_H
++
++#include <linux/tracepoint.h>
++
++/*
++ * mm_page_alloc : page can be NULL.
++ */
++DEFINE_TRACE(page_alloc,
++      TPPROTO(struct page *page, unsigned int order),
++      TPARGS(page, order));
++DEFINE_TRACE(page_free,
++      TPPROTO(struct page *page, unsigned int order),
++      TPARGS(page, order));
++
++#endif
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -51,6 +51,8 @@
+ #include <asm/div64.h>
+ #include "internal.h"
++#include <trace/page_alloc.h>
++
+ /*
+  * Array of node states.
+  */
+@@ -528,6 +530,8 @@ static void __free_pages_ok(struct page 
+       int i;
+       int reserved = 0;
++      trace_page_free(page, order);
++
+       for (i = 0 ; i < (1 << order) ; ++i)
+               reserved += free_pages_check(page + i);
+       if (reserved)
+@@ -988,6 +992,8 @@ static void free_hot_cold_page(struct pa
+       struct per_cpu_pages *pcp;
+       unsigned long flags;
++      trace_page_free(page, 0);
++
+       if (PageAnon(page))
+               page->mapping = NULL;
+       if (free_pages_check(page))
+@@ -1658,6 +1664,7 @@ nopage:
+               show_mem();
+       }
+ got_pg:
++      trace_page_alloc(page, order);
+       return page;
+ }
+ EXPORT_SYMBOL(__alloc_pages_internal);