]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.trace/lttng-instrumentation-hugetlb.patch
Updated xen patches taken from suse.
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.trace / lttng-instrumentation-hugetlb.patch
1 From: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
2 Subject: LTTng instrumentation - hugetlb
3
4 Original patch header:
5 LTTng instrumentation - hugetlb
6
7 Instrumentation of hugetlb activity (alloc/free/reserve/grab/release).
8
9 Those tracepoints are used by LTTng.
10
11 About the performance impact of tracepoints (which is comparable to markers),
12 even without immediate values optimizations, tests done by Hideo Aoki on ia64
13 show no regression. His test case was using hackbench on a kernel where
14 scheduler instrumentation (about 5 events in code scheduler code) was added.
15 See the "Tracepoints" patch header for performance result detail.
16
17 Changelog :
18 - instrument page grab, buddy allocator alloc, page release.
19
20 Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
21 CC: William Lee Irwin III <wli@holomorphy.com>
22 CC: Masami Hiramatsu <mhiramat@redhat.com>
23 CC: 'Peter Zijlstra' <peterz@infradead.org>
24 CC: "Frank Ch. Eigler" <fche@redhat.com>
25 CC: 'Ingo Molnar' <mingo@elte.hu>
26 CC: 'Hideo AOKI' <haoki@redhat.com>
27 CC: Takashi Nishiie <t-nishiie@np.css.fujitsu.com>
28 CC: 'Steven Rostedt' <rostedt@goodmis.org>
29 CC: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
30
31 Acked-by: Jan Blunck <jblunck@suse.de>
32 ---
33 ---
34 include/trace/hugetlb.h | 28 ++++++++++++++++++++++++
35 mm/hugetlb.c | 56 +++++++++++++++++++++++++++++++++---------------
36 2 files changed, 67 insertions(+), 17 deletions(-)
37
38 --- /dev/null
39 +++ b/include/trace/hugetlb.h
40 @@ -0,0 +1,28 @@
41 +#ifndef _TRACE_HUGETLB_H
42 +#define _TRACE_HUGETLB_H
43 +
44 +#include <linux/tracepoint.h>
45 +
46 +DEFINE_TRACE(hugetlb_page_release,
47 + TPPROTO(struct page *page),
48 + TPARGS(page));
49 +DEFINE_TRACE(hugetlb_page_grab,
50 + TPPROTO(struct page *page),
51 + TPARGS(page));
52 +DEFINE_TRACE(hugetlb_buddy_pgalloc,
53 + TPPROTO(struct page *page),
54 + TPARGS(page));
55 +DEFINE_TRACE(hugetlb_page_alloc,
56 + TPPROTO(struct page *page),
57 + TPARGS(page));
58 +DEFINE_TRACE(hugetlb_page_free,
59 + TPPROTO(struct page *page),
60 + TPARGS(page));
61 +DEFINE_TRACE(hugetlb_pages_reserve,
62 + TPPROTO(struct inode *inode, long from, long to, int ret),
63 + TPARGS(inode, from, to, ret));
64 +DEFINE_TRACE(hugetlb_pages_unreserve,
65 + TPPROTO(struct inode *inode, long offset, long freed),
66 + TPARGS(inode, offset, freed));
67 +
68 +#endif
69 --- a/mm/hugetlb.c
70 +++ b/mm/hugetlb.c
71 @@ -17,6 +17,7 @@
72 #include <linux/mutex.h>
73 #include <linux/bootmem.h>
74 #include <linux/sysfs.h>
75 +#include <trace/hugetlb.h>
76
77 #include <asm/page.h>
78 #include <asm/pgtable.h>
79 @@ -492,6 +493,7 @@ static void update_and_free_page(struct
80
81 VM_BUG_ON(h->order >= MAX_ORDER);
82
83 + trace_hugetlb_page_release(page);
84 h->nr_huge_pages--;
85 h->nr_huge_pages_node[page_to_nid(page)]--;
86 for (i = 0; i < pages_per_huge_page(h); i++) {
87 @@ -526,6 +528,7 @@ static void free_huge_page(struct page *
88 int nid = page_to_nid(page);
89 struct address_space *mapping;
90
91 + trace_hugetlb_page_free(page);
92 mapping = (struct address_space *) page_private(page);
93 set_page_private(page, 0);
94 BUG_ON(page_count(page));
95 @@ -593,8 +596,10 @@ static struct page *alloc_fresh_huge_pag
96 {
97 struct page *page;
98
99 - if (h->order >= MAX_ORDER)
100 - return NULL;
101 + if (h->order >= MAX_ORDER) {
102 + page = NULL;
103 + goto end;
104 + }
105
106 page = alloc_pages_node(nid,
107 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
108 @@ -603,11 +608,13 @@ static struct page *alloc_fresh_huge_pag
109 if (page) {
110 if (arch_prepare_hugepage(page)) {
111 __free_pages(page, huge_page_order(h));
112 - return NULL;
113 + page = NULL;
114 + goto end;
115 }
116 prep_new_huge_page(h, page, nid);
117 }
118 -
119 +end:
120 + trace_hugetlb_page_grab(page);
121 return page;
122 }
123
124 @@ -691,7 +698,8 @@ static struct page *alloc_buddy_huge_pag
125 spin_lock(&hugetlb_lock);
126 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
127 spin_unlock(&hugetlb_lock);
128 - return NULL;
129 + page = NULL;
130 + goto end;
131 } else {
132 h->nr_huge_pages++;
133 h->surplus_huge_pages++;
134 @@ -729,7 +737,8 @@ static struct page *alloc_buddy_huge_pag
135 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
136 }
137 spin_unlock(&hugetlb_lock);
138 -
139 +end:
140 + trace_hugetlb_buddy_pgalloc(page);
141 return page;
142 }
143
144 @@ -968,6 +977,7 @@ static struct page *alloc_huge_page(stru
145
146 vma_commit_reservation(h, vma, addr);
147
148 + trace_hugetlb_page_alloc(page);
149 return page;
150 }
151
152 @@ -2233,11 +2243,12 @@ int hugetlb_reserve_pages(struct inode *
153 long from, long to,
154 struct vm_area_struct *vma)
155 {
156 - long ret, chg;
157 + int ret = 0;
158 + long chg;
159 struct hstate *h = hstate_inode(inode);
160
161 if (vma && vma->vm_flags & VM_NORESERVE)
162 - return 0;
163 + goto end;
164
165 /*
166 * Shared mappings base their reservation on the number of pages that
167 @@ -2249,8 +2260,10 @@ int hugetlb_reserve_pages(struct inode *
168 chg = region_chg(&inode->i_mapping->private_list, from, to);
169 else {
170 struct resv_map *resv_map = resv_map_alloc();
171 - if (!resv_map)
172 - return -ENOMEM;
173 + if (!resv_map) {
174 + ret = -ENOMEM;
175 + goto end;
176 + }
177
178 chg = to - from;
179
180 @@ -2258,25 +2271,34 @@ int hugetlb_reserve_pages(struct inode *
181 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
182 }
183
184 - if (chg < 0)
185 - return chg;
186 + if (chg < 0) {
187 + ret = chg;
188 + goto end;
189 + }
190
191 - if (hugetlb_get_quota(inode->i_mapping, chg))
192 - return -ENOSPC;
193 + if (hugetlb_get_quota(inode->i_mapping, chg)) {
194 + ret = -ENOSPC;
195 + goto end;
196 + }
197 ret = hugetlb_acct_memory(h, chg);
198 if (ret < 0) {
199 hugetlb_put_quota(inode->i_mapping, chg);
200 - return ret;
201 + goto end;
202 }
203 if (!vma || vma->vm_flags & VM_MAYSHARE)
204 region_add(&inode->i_mapping->private_list, from, to);
205 - return 0;
206 +end:
207 + trace_hugetlb_pages_reserve(inode, from, to, ret);
208 + return ret;
209 }
210
211 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
212 {
213 struct hstate *h = hstate_inode(inode);
214 - long chg = region_truncate(&inode->i_mapping->private_list, offset);
215 + long chg;
216 +
217 + trace_hugetlb_pages_unreserve(inode, offset, freed);
218 + chg = region_truncate(&inode->i_mapping->private_list, offset);
219
220 spin_lock(&inode->i_lock);
221 inode->i_blocks -= blocks_per_huge_page(h);