]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.4.9/x86-mm-kmmio-fix-mmiotrace-for-hugepages.patch
4.14-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.4.9 / x86-mm-kmmio-fix-mmiotrace-for-hugepages.patch
CommitLineData
4f026f08
GKH
1From cfa52c0cfa4d727aa3e457bf29aeff296c528a08 Mon Sep 17 00:00:00 2001
2From: Karol Herbst <nouveau@karolherbst.de>
3Date: Thu, 3 Mar 2016 02:03:11 +0100
4Subject: x86/mm/kmmio: Fix mmiotrace for hugepages
5
6From: Karol Herbst <nouveau@karolherbst.de>
7
8commit cfa52c0cfa4d727aa3e457bf29aeff296c528a08 upstream.
9
10Because Linux might use bigger pages than the 4K pages to handle those mmio
11ioremaps, the kmmio code shouldn't rely on the pade id as it currently does.
12
13Using the memory address instead of the page id lets us look up how big the
14page is and what its base address is, so that we won't get a page fault
15within the same page twice anymore.
16
17Tested-by: Pierre Moreau <pierre.morrow@free.fr>
18Signed-off-by: Karol Herbst <nouveau@karolherbst.de>
19Cc: Andrew Morton <akpm@linux-foundation.org>
20Cc: Andy Lutomirski <luto@amacapital.net>
21Cc: Borislav Petkov <bp@alien8.de>
22Cc: Brian Gerst <brgerst@gmail.com>
23Cc: Denys Vlasenko <dvlasenk@redhat.com>
24Cc: H. Peter Anvin <hpa@zytor.com>
25Cc: Linus Torvalds <torvalds@linux-foundation.org>
26Cc: Luis R. Rodriguez <mcgrof@suse.com>
27Cc: Peter Zijlstra <peterz@infradead.org>
28Cc: Thomas Gleixner <tglx@linutronix.de>
29Cc: Toshi Kani <toshi.kani@hp.com>
30Cc: linux-mm@kvack.org
31Cc: linux-x86_64@vger.kernel.org
32Cc: nouveau@lists.freedesktop.org
33Cc: pq@iki.fi
34Cc: rostedt@goodmis.org
35Link: http://lkml.kernel.org/r/1456966991-6861-1-git-send-email-nouveau@karolherbst.de
36Signed-off-by: Ingo Molnar <mingo@kernel.org>
37Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
38
39---
40 arch/x86/mm/kmmio.c | 88 ++++++++++++++++++++++++++++++++++------------------
41 1 file changed, 59 insertions(+), 29 deletions(-)
42
43--- a/arch/x86/mm/kmmio.c
44+++ b/arch/x86/mm/kmmio.c
45@@ -33,7 +33,7 @@
46 struct kmmio_fault_page {
47 struct list_head list;
48 struct kmmio_fault_page *release_next;
49- unsigned long page; /* location of the fault page */
50+ unsigned long addr; /* the requested address */
51 pteval_t old_presence; /* page presence prior to arming */
52 bool armed;
53
54@@ -70,9 +70,16 @@ unsigned int kmmio_count;
55 static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
56 static LIST_HEAD(kmmio_probes);
57
58-static struct list_head *kmmio_page_list(unsigned long page)
59+static struct list_head *kmmio_page_list(unsigned long addr)
60 {
61- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
62+ unsigned int l;
63+ pte_t *pte = lookup_address(addr, &l);
64+
65+ if (!pte)
66+ return NULL;
67+ addr &= page_level_mask(l);
68+
69+ return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
70 }
71
72 /* Accessed per-cpu */
73@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_pro
74 }
75
76 /* You must be holding RCU read lock. */
77-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
78+static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
79 {
80 struct list_head *head;
81 struct kmmio_fault_page *f;
82+ unsigned int l;
83+ pte_t *pte = lookup_address(addr, &l);
84
85- page &= PAGE_MASK;
86- head = kmmio_page_list(page);
87+ if (!pte)
88+ return NULL;
89+ addr &= page_level_mask(l);
90+ head = kmmio_page_list(addr);
91 list_for_each_entry_rcu(f, head, list) {
92- if (f->page == page)
93+ if (f->addr == addr)
94 return f;
95 }
96 return NULL;
97@@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pt
98 static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
99 {
100 unsigned int level;
101- pte_t *pte = lookup_address(f->page, &level);
102+ pte_t *pte = lookup_address(f->addr, &level);
103
104 if (!pte) {
105- pr_err("no pte for page 0x%08lx\n", f->page);
106+ pr_err("no pte for addr 0x%08lx\n", f->addr);
107 return -1;
108 }
109
110@@ -156,7 +167,7 @@ static int clear_page_presence(struct km
111 return -1;
112 }
113
114- __flush_tlb_one(f->page);
115+ __flush_tlb_one(f->addr);
116 return 0;
117 }
118
119@@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct k
120 int ret;
121 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
122 if (f->armed) {
123- pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
124- f->page, f->count, !!f->old_presence);
125+ pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
126+ f->addr, f->count, !!f->old_presence);
127 }
128 ret = clear_page_presence(f, true);
129- WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
130- f->page);
131+ WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
132+ f->addr);
133 f->armed = true;
134 return ret;
135 }
136@@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(stru
137 {
138 int ret = clear_page_presence(f, false);
139 WARN_ONCE(ret < 0,
140- KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
141+ KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
142 f->armed = false;
143 }
144
145@@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs,
146 struct kmmio_context *ctx;
147 struct kmmio_fault_page *faultpage;
148 int ret = 0; /* default to fault not handled */
149+ unsigned long page_base = addr;
150+ unsigned int l;
151+ pte_t *pte = lookup_address(addr, &l);
152+ if (!pte)
153+ return -EINVAL;
154+ page_base &= page_level_mask(l);
155
156 /*
157 * Preemption is now disabled to prevent process switch during
158@@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs,
159 preempt_disable();
160 rcu_read_lock();
161
162- faultpage = get_kmmio_fault_page(addr);
163+ faultpage = get_kmmio_fault_page(page_base);
164 if (!faultpage) {
165 /*
166 * Either this page fault is not caused by kmmio, or
167@@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs,
168
169 ctx = &get_cpu_var(kmmio_ctx);
170 if (ctx->active) {
171- if (addr == ctx->addr) {
172+ if (page_base == ctx->addr) {
173 /*
174 * A second fault on the same page means some other
175 * condition needs handling by do_page_fault(), the
176@@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs,
177 ctx->active++;
178
179 ctx->fpage = faultpage;
180- ctx->probe = get_kmmio_probe(addr);
181+ ctx->probe = get_kmmio_probe(page_base);
182 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
183- ctx->addr = addr;
184+ ctx->addr = page_base;
185
186 if (ctx->probe && ctx->probe->pre_handler)
187 ctx->probe->pre_handler(ctx->probe, regs, addr);
188@@ -354,12 +371,11 @@ out:
189 }
190
191 /* You must be holding kmmio_lock. */
192-static int add_kmmio_fault_page(unsigned long page)
193+static int add_kmmio_fault_page(unsigned long addr)
194 {
195 struct kmmio_fault_page *f;
196
197- page &= PAGE_MASK;
198- f = get_kmmio_fault_page(page);
199+ f = get_kmmio_fault_page(addr);
200 if (f) {
201 if (!f->count)
202 arm_kmmio_fault_page(f);
203@@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned
204 return -1;
205
206 f->count = 1;
207- f->page = page;
208+ f->addr = addr;
209
210 if (arm_kmmio_fault_page(f)) {
211 kfree(f);
212 return -1;
213 }
214
215- list_add_rcu(&f->list, kmmio_page_list(f->page));
216+ list_add_rcu(&f->list, kmmio_page_list(f->addr));
217
218 return 0;
219 }
220
221 /* You must be holding kmmio_lock. */
222-static void release_kmmio_fault_page(unsigned long page,
223+static void release_kmmio_fault_page(unsigned long addr,
224 struct kmmio_fault_page **release_list)
225 {
226 struct kmmio_fault_page *f;
227
228- page &= PAGE_MASK;
229- f = get_kmmio_fault_page(page);
230+ f = get_kmmio_fault_page(addr);
231 if (!f)
232 return;
233
234@@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_pr
235 int ret = 0;
236 unsigned long size = 0;
237 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
238+ unsigned int l;
239+ pte_t *pte;
240
241 spin_lock_irqsave(&kmmio_lock, flags);
242 if (get_kmmio_probe(p->addr)) {
243 ret = -EEXIST;
244 goto out;
245 }
246+
247+ pte = lookup_address(p->addr, &l);
248+ if (!pte) {
249+ ret = -EINVAL;
250+ goto out;
251+ }
252+
253 kmmio_count++;
254 list_add_rcu(&p->list, &kmmio_probes);
255 while (size < size_lim) {
256 if (add_kmmio_fault_page(p->addr + size))
257 pr_err("Unable to set page fault.\n");
258- size += PAGE_SIZE;
259+ size += page_level_size(l);
260 }
261 out:
262 spin_unlock_irqrestore(&kmmio_lock, flags);
263@@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio
264 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
265 struct kmmio_fault_page *release_list = NULL;
266 struct kmmio_delayed_release *drelease;
267+ unsigned int l;
268+ pte_t *pte;
269+
270+ pte = lookup_address(p->addr, &l);
271+ if (!pte)
272+ return;
273
274 spin_lock_irqsave(&kmmio_lock, flags);
275 while (size < size_lim) {
276 release_kmmio_fault_page(p->addr + size, &release_list);
277- size += PAGE_SIZE;
278+ size += page_level_size(l);
279 }
280 list_del_rcu(&p->list);
281 kmmio_count--;