1 From: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
2 Subject: LTTng instrumentation - memory page faults
5 LTTng instrumentation - memory page faults
7 Instrument the page fault entry and exit. Useful to detect delays caused by page
8 faults and bad memory usage patterns.
10 Those tracepoints are used by LTTng.
12 About the performance impact of tracepoints (which is comparable to markers),
13 even without immediate values optimizations, tests done by Hideo Aoki on ia64
14 show no regression. His test case was using hackbench on a kernel where
15 scheduler instrumentation (about 5 events in code scheduler code) was added.
16 See the "Tracepoints" patch header for performance result detail.
18 Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
19 CC: Andi Kleen <andi-suse@firstfloor.org>
20 CC: linux-mm@kvack.org
21 CC: Dave Hansen <haveblue@us.ibm.com>
22 CC: Masami Hiramatsu <mhiramat@redhat.com>
23 CC: 'Peter Zijlstra' <peterz@infradead.org>
24 CC: "Frank Ch. Eigler" <fche@redhat.com>
25 CC: 'Ingo Molnar' <mingo@elte.hu>
26 CC: 'Hideo AOKI' <haoki@redhat.com>
27 CC: Takashi Nishiie <t-nishiie@np.css.fujitsu.com>
28 CC: 'Steven Rostedt' <rostedt@goodmis.org>
29 CC: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
31 Acked-by: Jan Blunck <jblunck@suse.de>
34 include/trace/memory.h | 14 ++++++++++++++
35 mm/memory.c | 33 ++++++++++++++++++++++++---------
36 2 files changed, 38 insertions(+), 9 deletions(-)
39 +++ b/include/trace/memory.h
41 +#ifndef _TRACE_MEMORY_H
42 +#define _TRACE_MEMORY_H
44 +#include <linux/tracepoint.h>
46 +DEFINE_TRACE(memory_handle_fault_entry,
47 + TPPROTO(struct mm_struct *mm, struct vm_area_struct *vma,
48 + unsigned long address, int write_access),
49 + TPARGS(mm, vma, address, write_access));
50 +DEFINE_TRACE(memory_handle_fault_exit,
59 #include <linux/swapops.h>
60 #include <linux/elf.h>
61 +#include <trace/memory.h>
65 @@ -2832,30 +2833,44 @@ unlock:
66 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67 unsigned long address, int write_access)
75 + trace_memory_handle_fault_entry(mm, vma, address, write_access);
77 __set_current_state(TASK_RUNNING);
79 count_vm_event(PGFAULT);
81 - if (unlikely(is_vm_hugetlb_page(vma)))
82 - return hugetlb_fault(mm, vma, address, write_access);
83 + if (unlikely(is_vm_hugetlb_page(vma))) {
84 + res = hugetlb_fault(mm, vma, address, write_access);
88 pgd = pgd_offset(mm, address);
89 pud = pud_alloc(mm, pgd, address);
91 - return VM_FAULT_OOM;
96 pmd = pmd_alloc(mm, pud, address);
98 - return VM_FAULT_OOM;
100 + res = VM_FAULT_OOM;
103 pte = pte_alloc_map(mm, pmd, address);
105 - return VM_FAULT_OOM;
107 + res = VM_FAULT_OOM;
111 - return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
112 + res = handle_pte_fault(mm, vma, address, pte, pmd, write_access);
114 + trace_memory_handle_fault_exit(res);
117 EXPORT_SYMBOL_GPL(handle_mm_fault); /* For MoL */