]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - mm/mmu_notifier.c
Merge tag 'tegra-for-5.2-bus-fixes' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/kernel/stable.git] / mm / mmu_notifier.c
1 /*
2 * linux/mm/mmu_notifier.c
3 *
4 * Copyright (C) 2008 Qumranet, Inc.
5 * Copyright (C) 2008 SGI
6 * Christoph Lameter <cl@linux.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2. See
9 * the COPYING file in the top-level directory.
10 */
11
12 #include <linux/rculist.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/export.h>
15 #include <linux/mm.h>
16 #include <linux/err.h>
17 #include <linux/srcu.h>
18 #include <linux/rcupdate.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
21 #include <linux/slab.h>
22
23 /* global SRCU for all MMs */
24 DEFINE_STATIC_SRCU(srcu);
25
26 /*
27 * This function allows mmu_notifier::release callback to delay a call to
28 * a function that will free appropriate resources. The function must be
29 * quick and must not block.
30 */
31 void mmu_notifier_call_srcu(struct rcu_head *rcu,
32 void (*func)(struct rcu_head *rcu))
33 {
34 call_srcu(&srcu, rcu, func);
35 }
36 EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu);
37
38 /*
39 * This function can't run concurrently against mmu_notifier_register
40 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
41 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
42 * in parallel despite there being no task using this mm any more,
43 * through the vmas outside of the exit_mmap context, such as with
44 * vmtruncate. This serializes against mmu_notifier_unregister with
45 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
46 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
47 * can't go away from under us as exit_mmap holds an mm_count pin
48 * itself.
49 */
50 void __mmu_notifier_release(struct mm_struct *mm)
51 {
52 struct mmu_notifier *mn;
53 int id;
54
55 /*
56 * SRCU here will block mmu_notifier_unregister until
57 * ->release returns.
58 */
59 id = srcu_read_lock(&srcu);
60 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
61 /*
62 * If ->release runs before mmu_notifier_unregister it must be
63 * handled, as it's the only way for the driver to flush all
64 * existing sptes and stop the driver from establishing any more
65 * sptes before all the pages in the mm are freed.
66 */
67 if (mn->ops->release)
68 mn->ops->release(mn, mm);
69
70 spin_lock(&mm->mmu_notifier_mm->lock);
71 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
72 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
73 struct mmu_notifier,
74 hlist);
75 /*
76 * We arrived before mmu_notifier_unregister so
77 * mmu_notifier_unregister will do nothing other than to wait
78 * for ->release to finish and for mmu_notifier_unregister to
79 * return.
80 */
81 hlist_del_init_rcu(&mn->hlist);
82 }
83 spin_unlock(&mm->mmu_notifier_mm->lock);
84 srcu_read_unlock(&srcu, id);
85
86 /*
87 * synchronize_srcu here prevents mmu_notifier_release from returning to
88 * exit_mmap (which would proceed with freeing all pages in the mm)
89 * until the ->release method returns, if it was invoked by
90 * mmu_notifier_unregister.
91 *
92 * The mmu_notifier_mm can't go away from under us because one mm_count
93 * is held by exit_mmap.
94 */
95 synchronize_srcu(&srcu);
96 }
97
98 /*
99 * If no young bitflag is supported by the hardware, ->clear_flush_young can
100 * unmap the address and return 1 or 0 depending if the mapping previously
101 * existed or not.
102 */
103 int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
104 unsigned long start,
105 unsigned long end)
106 {
107 struct mmu_notifier *mn;
108 int young = 0, id;
109
110 id = srcu_read_lock(&srcu);
111 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
112 if (mn->ops->clear_flush_young)
113 young |= mn->ops->clear_flush_young(mn, mm, start, end);
114 }
115 srcu_read_unlock(&srcu, id);
116
117 return young;
118 }
119
120 int __mmu_notifier_clear_young(struct mm_struct *mm,
121 unsigned long start,
122 unsigned long end)
123 {
124 struct mmu_notifier *mn;
125 int young = 0, id;
126
127 id = srcu_read_lock(&srcu);
128 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
129 if (mn->ops->clear_young)
130 young |= mn->ops->clear_young(mn, mm, start, end);
131 }
132 srcu_read_unlock(&srcu, id);
133
134 return young;
135 }
136
137 int __mmu_notifier_test_young(struct mm_struct *mm,
138 unsigned long address)
139 {
140 struct mmu_notifier *mn;
141 int young = 0, id;
142
143 id = srcu_read_lock(&srcu);
144 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
145 if (mn->ops->test_young) {
146 young = mn->ops->test_young(mn, mm, address);
147 if (young)
148 break;
149 }
150 }
151 srcu_read_unlock(&srcu, id);
152
153 return young;
154 }
155
156 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
157 pte_t pte)
158 {
159 struct mmu_notifier *mn;
160 int id;
161
162 id = srcu_read_lock(&srcu);
163 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
164 if (mn->ops->change_pte)
165 mn->ops->change_pte(mn, mm, address, pte);
166 }
167 srcu_read_unlock(&srcu, id);
168 }
169
170 int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
171 {
172 struct mmu_notifier *mn;
173 int ret = 0;
174 int id;
175
176 id = srcu_read_lock(&srcu);
177 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
178 if (mn->ops->invalidate_range_start) {
179 int _ret = mn->ops->invalidate_range_start(mn, range);
180 if (_ret) {
181 pr_info("%pS callback failed with %d in %sblockable context.\n",
182 mn->ops->invalidate_range_start, _ret,
183 !mmu_notifier_range_blockable(range) ? "non-" : "");
184 ret = _ret;
185 }
186 }
187 }
188 srcu_read_unlock(&srcu, id);
189
190 return ret;
191 }
192 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
193
194 void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
195 bool only_end)
196 {
197 struct mmu_notifier *mn;
198 int id;
199
200 id = srcu_read_lock(&srcu);
201 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
202 /*
203 * Call invalidate_range here too to avoid the need for the
204 * subsystem of having to register an invalidate_range_end
205 * call-back when there is invalidate_range already. Usually a
206 * subsystem registers either invalidate_range_start()/end() or
207 * invalidate_range(), so this will be no additional overhead
208 * (besides the pointer check).
209 *
210 * We skip call to invalidate_range() if we know it is safe ie
211 * call site use mmu_notifier_invalidate_range_only_end() which
212 * is safe to do when we know that a call to invalidate_range()
213 * already happen under page table lock.
214 */
215 if (!only_end && mn->ops->invalidate_range)
216 mn->ops->invalidate_range(mn, range->mm,
217 range->start,
218 range->end);
219 if (mn->ops->invalidate_range_end)
220 mn->ops->invalidate_range_end(mn, range);
221 }
222 srcu_read_unlock(&srcu, id);
223 }
224 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
225
226 void __mmu_notifier_invalidate_range(struct mm_struct *mm,
227 unsigned long start, unsigned long end)
228 {
229 struct mmu_notifier *mn;
230 int id;
231
232 id = srcu_read_lock(&srcu);
233 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
234 if (mn->ops->invalidate_range)
235 mn->ops->invalidate_range(mn, mm, start, end);
236 }
237 srcu_read_unlock(&srcu, id);
238 }
239 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
240
241 static int do_mmu_notifier_register(struct mmu_notifier *mn,
242 struct mm_struct *mm,
243 int take_mmap_sem)
244 {
245 struct mmu_notifier_mm *mmu_notifier_mm;
246 int ret;
247
248 BUG_ON(atomic_read(&mm->mm_users) <= 0);
249
250 ret = -ENOMEM;
251 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
252 if (unlikely(!mmu_notifier_mm))
253 goto out;
254
255 if (take_mmap_sem)
256 down_write(&mm->mmap_sem);
257 ret = mm_take_all_locks(mm);
258 if (unlikely(ret))
259 goto out_clean;
260
261 if (!mm_has_notifiers(mm)) {
262 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
263 spin_lock_init(&mmu_notifier_mm->lock);
264
265 mm->mmu_notifier_mm = mmu_notifier_mm;
266 mmu_notifier_mm = NULL;
267 }
268 mmgrab(mm);
269
270 /*
271 * Serialize the update against mmu_notifier_unregister. A
272 * side note: mmu_notifier_release can't run concurrently with
273 * us because we hold the mm_users pin (either implicitly as
274 * current->mm or explicitly with get_task_mm() or similar).
275 * We can't race against any other mmu notifier method either
276 * thanks to mm_take_all_locks().
277 */
278 spin_lock(&mm->mmu_notifier_mm->lock);
279 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
280 spin_unlock(&mm->mmu_notifier_mm->lock);
281
282 mm_drop_all_locks(mm);
283 out_clean:
284 if (take_mmap_sem)
285 up_write(&mm->mmap_sem);
286 kfree(mmu_notifier_mm);
287 out:
288 BUG_ON(atomic_read(&mm->mm_users) <= 0);
289 return ret;
290 }
291
292 /*
293 * Must not hold mmap_sem nor any other VM related lock when calling
294 * this registration function. Must also ensure mm_users can't go down
295 * to zero while this runs to avoid races with mmu_notifier_release,
296 * so mm has to be current->mm or the mm should be pinned safely such
297 * as with get_task_mm(). If the mm is not current->mm, the mm_users
298 * pin should be released by calling mmput after mmu_notifier_register
299 * returns. mmu_notifier_unregister must be always called to
300 * unregister the notifier. mm_count is automatically pinned to allow
301 * mmu_notifier_unregister to safely run at any time later, before or
302 * after exit_mmap. ->release will always be called before exit_mmap
303 * frees the pages.
304 */
305 int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
306 {
307 return do_mmu_notifier_register(mn, mm, 1);
308 }
309 EXPORT_SYMBOL_GPL(mmu_notifier_register);
310
311 /*
312 * Same as mmu_notifier_register but here the caller must hold the
313 * mmap_sem in write mode.
314 */
315 int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
316 {
317 return do_mmu_notifier_register(mn, mm, 0);
318 }
319 EXPORT_SYMBOL_GPL(__mmu_notifier_register);
320
321 /* this is called after the last mmu_notifier_unregister() returned */
322 void __mmu_notifier_mm_destroy(struct mm_struct *mm)
323 {
324 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
325 kfree(mm->mmu_notifier_mm);
326 mm->mmu_notifier_mm = LIST_POISON1; /* debug */
327 }
328
329 /*
330 * This releases the mm_count pin automatically and frees the mm
331 * structure if it was the last user of it. It serializes against
332 * running mmu notifiers with SRCU and against mmu_notifier_unregister
333 * with the unregister lock + SRCU. All sptes must be dropped before
334 * calling mmu_notifier_unregister. ->release or any other notifier
335 * method may be invoked concurrently with mmu_notifier_unregister,
336 * and only after mmu_notifier_unregister returned we're guaranteed
337 * that ->release or any other method can't run anymore.
338 */
339 void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
340 {
341 BUG_ON(atomic_read(&mm->mm_count) <= 0);
342
343 if (!hlist_unhashed(&mn->hlist)) {
344 /*
345 * SRCU here will force exit_mmap to wait for ->release to
346 * finish before freeing the pages.
347 */
348 int id;
349
350 id = srcu_read_lock(&srcu);
351 /*
352 * exit_mmap will block in mmu_notifier_release to guarantee
353 * that ->release is called before freeing the pages.
354 */
355 if (mn->ops->release)
356 mn->ops->release(mn, mm);
357 srcu_read_unlock(&srcu, id);
358
359 spin_lock(&mm->mmu_notifier_mm->lock);
360 /*
361 * Can not use list_del_rcu() since __mmu_notifier_release
362 * can delete it before we hold the lock.
363 */
364 hlist_del_init_rcu(&mn->hlist);
365 spin_unlock(&mm->mmu_notifier_mm->lock);
366 }
367
368 /*
369 * Wait for any running method to finish, of course including
370 * ->release if it was run by mmu_notifier_release instead of us.
371 */
372 synchronize_srcu(&srcu);
373
374 BUG_ON(atomic_read(&mm->mm_count) <= 0);
375
376 mmdrop(mm);
377 }
378 EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
379
380 /*
381 * Same as mmu_notifier_unregister but no callback and no srcu synchronization.
382 */
383 void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
384 struct mm_struct *mm)
385 {
386 spin_lock(&mm->mmu_notifier_mm->lock);
387 /*
388 * Can not use list_del_rcu() since __mmu_notifier_release
389 * can delete it before we hold the lock.
390 */
391 hlist_del_init_rcu(&mn->hlist);
392 spin_unlock(&mm->mmu_notifier_mm->lock);
393
394 BUG_ON(atomic_read(&mm->mm_count) <= 0);
395 mmdrop(mm);
396 }
397 EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);
398
399 bool
400 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
401 {
402 if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
403 return false;
404 /* Return true if the vma still have the read flag set. */
405 return range->vma->vm_flags & VM_READ;
406 }
407 EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);