]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/mmu_notifier.c
Linux 6.10-rc2
[thirdparty/linux.git] / mm / mmu_notifier.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
cddb8a5c
AA
2/*
3 * linux/mm/mmu_notifier.c
4 *
5 * Copyright (C) 2008 Qumranet, Inc.
6 * Copyright (C) 2008 SGI
93e205a7 7 * Christoph Lameter <cl@linux.com>
cddb8a5c
AA
8 */
9
10#include <linux/rculist.h>
11#include <linux/mmu_notifier.h>
b95f1b31 12#include <linux/export.h>
cddb8a5c
AA
13#include <linux/mm.h>
14#include <linux/err.h>
99cb252f 15#include <linux/interval_tree.h>
21a92735 16#include <linux/srcu.h>
cddb8a5c
AA
17#include <linux/rcupdate.h>
18#include <linux/sched.h>
6e84f315 19#include <linux/sched/mm.h>
5a0e3ad6 20#include <linux/slab.h>
cddb8a5c 21
21a92735 22/* global SRCU for all MMs */
dde8da6c 23DEFINE_STATIC_SRCU(srcu);
21a92735 24
23b68395
DV
25#ifdef CONFIG_LOCKDEP
26struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
27 .name = "mmu_notifier_invalidate_range_start"
28};
29#endif
30
56f434f4 31/*
984cfe4e
JG
32 * The mmu_notifier_subscriptions structure is allocated and installed in
33 * mm->notifier_subscriptions inside the mm_take_all_locks() protected
56f434f4
JG
34 * critical section and it's released only when mm_count reaches zero
35 * in mmdrop().
36 */
984cfe4e 37struct mmu_notifier_subscriptions {
56f434f4
JG
38 /* all mmu notifiers registered in this mm are queued in this list */
39 struct hlist_head list;
99cb252f 40 bool has_itree;
56f434f4
JG
41 /* to serialize the list modifications and hlist_unhashed */
42 spinlock_t lock;
99cb252f
JG
43 unsigned long invalidate_seq;
44 unsigned long active_invalidate_ranges;
45 struct rb_root_cached itree;
46 wait_queue_head_t wq;
47 struct hlist_head deferred_list;
56f434f4
JG
48};
49
99cb252f
JG
50/*
51 * This is a collision-retry read-side/write-side 'lock', a lot like a
52 * seqcount, however this allows multiple write-sides to hold it at
53 * once. Conceptually the write side is protecting the values of the PTEs in
54 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
55 * writer exists.
56 *
57 * Note that the core mm creates nested invalidate_range_start()/end() regions
58 * within the same thread, and runs invalidate_range_start()/end() in parallel
59 * on multiple CPUs. This is designed to not reduce concurrency or block
60 * progress on the mm side.
61 *
62 * As a secondary function, holding the full write side also serves to prevent
63 * writers for the itree, this is an optimization to avoid extra locking
64 * during invalidate_range_start/end notifiers.
65 *
66 * The write side has two states, fully excluded:
67 * - mm->active_invalidate_ranges != 0
984cfe4e 68 * - subscriptions->invalidate_seq & 1 == True (odd)
99cb252f
JG
69 * - some range on the mm_struct is being invalidated
70 * - the itree is not allowed to change
71 *
72 * And partially excluded:
73 * - mm->active_invalidate_ranges != 0
984cfe4e 74 * - subscriptions->invalidate_seq & 1 == False (even)
99cb252f
JG
75 * - some range on the mm_struct is being invalidated
76 * - the itree is allowed to change
77 *
984cfe4e 78 * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
99cb252f
JG
79 * seq |= 1 # Begin writing
80 * seq++ # Release the writing state
81 * seq & 1 # True if a writer exists
82 *
83 * The later state avoids some expensive work on inv_end in the common case of
5292e24a 84 * no mmu_interval_notifier monitoring the VA.
99cb252f 85 */
984cfe4e
JG
86static bool
87mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
99cb252f 88{
984cfe4e
JG
89 lockdep_assert_held(&subscriptions->lock);
90 return subscriptions->invalidate_seq & 1;
99cb252f
JG
91}
92
93static struct mmu_interval_notifier *
984cfe4e 94mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
99cb252f
JG
95 const struct mmu_notifier_range *range,
96 unsigned long *seq)
97{
98 struct interval_tree_node *node;
99 struct mmu_interval_notifier *res = NULL;
100
984cfe4e
JG
101 spin_lock(&subscriptions->lock);
102 subscriptions->active_invalidate_ranges++;
103 node = interval_tree_iter_first(&subscriptions->itree, range->start,
99cb252f
JG
104 range->end - 1);
105 if (node) {
984cfe4e 106 subscriptions->invalidate_seq |= 1;
99cb252f
JG
107 res = container_of(node, struct mmu_interval_notifier,
108 interval_tree);
109 }
110
984cfe4e
JG
111 *seq = subscriptions->invalidate_seq;
112 spin_unlock(&subscriptions->lock);
99cb252f
JG
113 return res;
114}
115
116static struct mmu_interval_notifier *
5292e24a 117mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
99cb252f
JG
118 const struct mmu_notifier_range *range)
119{
120 struct interval_tree_node *node;
121
5292e24a
JG
122 node = interval_tree_iter_next(&interval_sub->interval_tree,
123 range->start, range->end - 1);
99cb252f
JG
124 if (!node)
125 return NULL;
126 return container_of(node, struct mmu_interval_notifier, interval_tree);
127}
128
984cfe4e 129static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
99cb252f 130{
5292e24a 131 struct mmu_interval_notifier *interval_sub;
99cb252f
JG
132 struct hlist_node *next;
133
984cfe4e
JG
134 spin_lock(&subscriptions->lock);
135 if (--subscriptions->active_invalidate_ranges ||
136 !mn_itree_is_invalidating(subscriptions)) {
137 spin_unlock(&subscriptions->lock);
99cb252f
JG
138 return;
139 }
140
141 /* Make invalidate_seq even */
984cfe4e 142 subscriptions->invalidate_seq++;
99cb252f
JG
143
144 /*
145 * The inv_end incorporates a deferred mechanism like rtnl_unlock().
146 * Adds and removes are queued until the final inv_end happens then
147 * they are progressed. This arrangement for tree updates is used to
148 * avoid using a blocking lock during invalidate_range_start.
149 */
5292e24a
JG
150 hlist_for_each_entry_safe(interval_sub, next,
151 &subscriptions->deferred_list,
99cb252f 152 deferred_item) {
5292e24a
JG
153 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
154 interval_tree_insert(&interval_sub->interval_tree,
984cfe4e 155 &subscriptions->itree);
99cb252f 156 else
5292e24a 157 interval_tree_remove(&interval_sub->interval_tree,
984cfe4e 158 &subscriptions->itree);
5292e24a 159 hlist_del(&interval_sub->deferred_item);
99cb252f 160 }
984cfe4e 161 spin_unlock(&subscriptions->lock);
99cb252f 162
984cfe4e 163 wake_up_all(&subscriptions->wq);
99cb252f
JG
164}
165
166/**
167 * mmu_interval_read_begin - Begin a read side critical section against a VA
168 * range
d49653f3 169 * @interval_sub: The interval subscription
99cb252f
JG
170 *
171 * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
5292e24a
JG
172 * collision-retry scheme similar to seqcount for the VA range under
173 * subscription. If the mm invokes invalidation during the critical section
174 * then mmu_interval_read_retry() will return true.
99cb252f
JG
175 *
176 * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
177 * require a blocking context. The critical region formed by this can sleep,
178 * and the required 'user_lock' can also be a sleeping lock.
179 *
180 * The caller is required to provide a 'user_lock' to serialize both teardown
181 * and setup.
182 *
183 * The return value should be passed to mmu_interval_read_retry().
184 */
5292e24a
JG
185unsigned long
186mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
99cb252f 187{
984cfe4e 188 struct mmu_notifier_subscriptions *subscriptions =
5292e24a 189 interval_sub->mm->notifier_subscriptions;
99cb252f
JG
190 unsigned long seq;
191 bool is_invalidating;
192
193 /*
5292e24a
JG
194 * If the subscription has a different seq value under the user_lock
195 * than we started with then it has collided.
99cb252f 196 *
5292e24a
JG
197 * If the subscription currently has the same seq value as the
198 * subscriptions seq, then it is currently between
199 * invalidate_start/end and is colliding.
99cb252f
JG
200 *
201 * The locking looks broadly like this:
57b037db 202 * mn_itree_inv_start(): mmu_interval_read_begin():
99cb252f 203 * spin_lock
5292e24a 204 * seq = READ_ONCE(interval_sub->invalidate_seq);
984cfe4e 205 * seq == subs->invalidate_seq
99cb252f
JG
206 * spin_unlock
207 * spin_lock
984cfe4e 208 * seq = ++subscriptions->invalidate_seq
99cb252f 209 * spin_unlock
57b037db 210 * op->invalidate():
99cb252f
JG
211 * user_lock
212 * mmu_interval_set_seq()
5292e24a 213 * interval_sub->invalidate_seq = seq
99cb252f
JG
214 * user_unlock
215 *
216 * [Required: mmu_interval_read_retry() == true]
217 *
218 * mn_itree_inv_end():
219 * spin_lock
984cfe4e 220 * seq = ++subscriptions->invalidate_seq
99cb252f
JG
221 * spin_unlock
222 *
223 * user_lock
224 * mmu_interval_read_retry():
5292e24a 225 * interval_sub->invalidate_seq != seq
99cb252f
JG
226 * user_unlock
227 *
228 * Barriers are not needed here as any races here are closed by an
229 * eventual mmu_interval_read_retry(), which provides a barrier via the
230 * user_lock.
231 */
984cfe4e 232 spin_lock(&subscriptions->lock);
99cb252f 233 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
5292e24a 234 seq = READ_ONCE(interval_sub->invalidate_seq);
984cfe4e
JG
235 is_invalidating = seq == subscriptions->invalidate_seq;
236 spin_unlock(&subscriptions->lock);
99cb252f
JG
237
238 /*
5292e24a 239 * interval_sub->invalidate_seq must always be set to an odd value via
99cb252f
JG
240 * mmu_interval_set_seq() using the provided cur_seq from
241 * mn_itree_inv_start_range(). This ensures that if seq does wrap we
242 * will always clear the below sleep in some reasonable time as
984cfe4e 243 * subscriptions->invalidate_seq is even in the idle state.
99cb252f
JG
244 */
245 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
246 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
247 if (is_invalidating)
984cfe4e
JG
248 wait_event(subscriptions->wq,
249 READ_ONCE(subscriptions->invalidate_seq) != seq);
99cb252f
JG
250
251 /*
252 * Notice that mmu_interval_read_retry() can already be true at this
253 * point, avoiding loops here allows the caller to provide a global
254 * time bound.
255 */
256
257 return seq;
258}
259EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
260
984cfe4e 261static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
99cb252f
JG
262 struct mm_struct *mm)
263{
264 struct mmu_notifier_range range = {
265 .flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
266 .event = MMU_NOTIFY_RELEASE,
267 .mm = mm,
268 .start = 0,
269 .end = ULONG_MAX,
270 };
5292e24a 271 struct mmu_interval_notifier *interval_sub;
99cb252f
JG
272 unsigned long cur_seq;
273 bool ret;
274
5292e24a
JG
275 for (interval_sub =
276 mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
277 interval_sub;
278 interval_sub = mn_itree_inv_next(interval_sub, &range)) {
279 ret = interval_sub->ops->invalidate(interval_sub, &range,
280 cur_seq);
99cb252f
JG
281 WARN_ON(!ret);
282 }
283
984cfe4e 284 mn_itree_inv_end(subscriptions);
99cb252f
JG
285}
286
cddb8a5c
AA
287/*
288 * This function can't run concurrently against mmu_notifier_register
289 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
290 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
291 * in parallel despite there being no task using this mm any more,
292 * through the vmas outside of the exit_mmap context, such as with
293 * vmtruncate. This serializes against mmu_notifier_unregister with
984cfe4e
JG
294 * the notifier_subscriptions->lock in addition to SRCU and it serializes
295 * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
cddb8a5c
AA
296 * can't go away from under us as exit_mmap holds an mm_count pin
297 * itself.
298 */
984cfe4e 299static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
99cb252f 300 struct mm_struct *mm)
cddb8a5c 301{
1991722a 302 struct mmu_notifier *subscription;
21a92735 303 int id;
3ad3d901
XG
304
305 /*
d34883d4
XG
306 * SRCU here will block mmu_notifier_unregister until
307 * ->release returns.
3ad3d901 308 */
21a92735 309 id = srcu_read_lock(&srcu);
63886bad
QC
310 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
311 srcu_read_lock_held(&srcu))
d34883d4
XG
312 /*
313 * If ->release runs before mmu_notifier_unregister it must be
314 * handled, as it's the only way for the driver to flush all
315 * existing sptes and stop the driver from establishing any more
316 * sptes before all the pages in the mm are freed.
317 */
1991722a
JG
318 if (subscription->ops->release)
319 subscription->ops->release(subscription, mm);
d34883d4 320
984cfe4e
JG
321 spin_lock(&subscriptions->lock);
322 while (unlikely(!hlist_empty(&subscriptions->list))) {
1991722a
JG
323 subscription = hlist_entry(subscriptions->list.first,
324 struct mmu_notifier, hlist);
cddb8a5c 325 /*
d34883d4
XG
326 * We arrived before mmu_notifier_unregister so
327 * mmu_notifier_unregister will do nothing other than to wait
328 * for ->release to finish and for mmu_notifier_unregister to
329 * return.
cddb8a5c 330 */
1991722a 331 hlist_del_init_rcu(&subscription->hlist);
cddb8a5c 332 }
984cfe4e 333 spin_unlock(&subscriptions->lock);
b972216e 334 srcu_read_unlock(&srcu, id);
cddb8a5c
AA
335
336 /*
d34883d4
XG
337 * synchronize_srcu here prevents mmu_notifier_release from returning to
338 * exit_mmap (which would proceed with freeing all pages in the mm)
339 * until the ->release method returns, if it was invoked by
340 * mmu_notifier_unregister.
341 *
984cfe4e
JG
342 * The notifier_subscriptions can't go away from under us because
343 * one mm_count is held by exit_mmap.
cddb8a5c 344 */
21a92735 345 synchronize_srcu(&srcu);
cddb8a5c
AA
346}
347
99cb252f
JG
348void __mmu_notifier_release(struct mm_struct *mm)
349{
984cfe4e
JG
350 struct mmu_notifier_subscriptions *subscriptions =
351 mm->notifier_subscriptions;
99cb252f 352
984cfe4e
JG
353 if (subscriptions->has_itree)
354 mn_itree_release(subscriptions, mm);
99cb252f 355
984cfe4e
JG
356 if (!hlist_empty(&subscriptions->list))
357 mn_hlist_release(subscriptions, mm);
99cb252f
JG
358}
359
cddb8a5c
AA
360/*
361 * If no young bitflag is supported by the hardware, ->clear_flush_young can
362 * unmap the address and return 1 or 0 depending if the mapping previously
363 * existed or not.
364 */
365int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
57128468
ALC
366 unsigned long start,
367 unsigned long end)
cddb8a5c 368{
1991722a 369 struct mmu_notifier *subscription;
21a92735 370 int young = 0, id;
cddb8a5c 371
21a92735 372 id = srcu_read_lock(&srcu);
1991722a 373 hlist_for_each_entry_rcu(subscription,
63886bad
QC
374 &mm->notifier_subscriptions->list, hlist,
375 srcu_read_lock_held(&srcu)) {
1991722a
JG
376 if (subscription->ops->clear_flush_young)
377 young |= subscription->ops->clear_flush_young(
378 subscription, mm, start, end);
cddb8a5c 379 }
21a92735 380 srcu_read_unlock(&srcu, id);
cddb8a5c
AA
381
382 return young;
383}
384
1d7715c6
VD
385int __mmu_notifier_clear_young(struct mm_struct *mm,
386 unsigned long start,
387 unsigned long end)
388{
1991722a 389 struct mmu_notifier *subscription;
1d7715c6
VD
390 int young = 0, id;
391
392 id = srcu_read_lock(&srcu);
1991722a 393 hlist_for_each_entry_rcu(subscription,
63886bad
QC
394 &mm->notifier_subscriptions->list, hlist,
395 srcu_read_lock_held(&srcu)) {
1991722a
JG
396 if (subscription->ops->clear_young)
397 young |= subscription->ops->clear_young(subscription,
398 mm, start, end);
1d7715c6
VD
399 }
400 srcu_read_unlock(&srcu, id);
401
402 return young;
403}
404
8ee53820
AA
405int __mmu_notifier_test_young(struct mm_struct *mm,
406 unsigned long address)
407{
1991722a 408 struct mmu_notifier *subscription;
21a92735 409 int young = 0, id;
8ee53820 410
21a92735 411 id = srcu_read_lock(&srcu);
1991722a 412 hlist_for_each_entry_rcu(subscription,
63886bad
QC
413 &mm->notifier_subscriptions->list, hlist,
414 srcu_read_lock_held(&srcu)) {
1991722a
JG
415 if (subscription->ops->test_young) {
416 young = subscription->ops->test_young(subscription, mm,
417 address);
8ee53820
AA
418 if (young)
419 break;
420 }
421 }
21a92735 422 srcu_read_unlock(&srcu, id);
8ee53820
AA
423
424 return young;
425}
426
984cfe4e 427static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
99cb252f
JG
428 const struct mmu_notifier_range *range)
429{
5292e24a 430 struct mmu_interval_notifier *interval_sub;
99cb252f
JG
431 unsigned long cur_seq;
432
5292e24a
JG
433 for (interval_sub =
434 mn_itree_inv_start_range(subscriptions, range, &cur_seq);
435 interval_sub;
436 interval_sub = mn_itree_inv_next(interval_sub, range)) {
99cb252f
JG
437 bool ret;
438
5292e24a
JG
439 ret = interval_sub->ops->invalidate(interval_sub, range,
440 cur_seq);
99cb252f
JG
441 if (!ret) {
442 if (WARN_ON(mmu_notifier_range_blockable(range)))
443 continue;
444 goto out_would_block;
445 }
446 }
447 return 0;
448
449out_would_block:
450 /*
451 * On -EAGAIN the non-blocking caller is not allowed to call
452 * invalidate_range_end()
453 */
984cfe4e 454 mn_itree_inv_end(subscriptions);
99cb252f
JG
455 return -EAGAIN;
456}
457
984cfe4e
JG
458static int mn_hlist_invalidate_range_start(
459 struct mmu_notifier_subscriptions *subscriptions,
460 struct mmu_notifier_range *range)
cddb8a5c 461{
1991722a 462 struct mmu_notifier *subscription;
93065ac7 463 int ret = 0;
21a92735 464 int id;
cddb8a5c 465
21a92735 466 id = srcu_read_lock(&srcu);
63886bad
QC
467 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
468 srcu_read_lock_held(&srcu)) {
1991722a
JG
469 const struct mmu_notifier_ops *ops = subscription->ops;
470
471 if (ops->invalidate_range_start) {
ba170f76
DV
472 int _ret;
473
474 if (!mmu_notifier_range_blockable(range))
475 non_block_start();
1991722a 476 _ret = ops->invalidate_range_start(subscription, range);
ba170f76
DV
477 if (!mmu_notifier_range_blockable(range))
478 non_block_end();
93065ac7
MH
479 if (_ret) {
480 pr_info("%pS callback failed with %d in %sblockable context.\n",
1991722a
JG
481 ops->invalidate_range_start, _ret,
482 !mmu_notifier_range_blockable(range) ?
483 "non-" :
484 "");
8402ce61 485 WARN_ON(mmu_notifier_range_blockable(range) ||
df2ec764 486 _ret != -EAGAIN);
c2655835
SC
487 /*
488 * We call all the notifiers on any EAGAIN,
489 * there is no way for a notifier to know if
490 * its start method failed, thus a start that
491 * does EAGAIN can't also do end.
492 */
493 WARN_ON(ops->invalidate_range_end);
93065ac7
MH
494 ret = _ret;
495 }
496 }
cddb8a5c 497 }
c2655835
SC
498
499 if (ret) {
500 /*
501 * Must be non-blocking to get here. If there are multiple
502 * notifiers and one or more failed start, any that succeeded
503 * start are expecting their end to be called. Do so now.
504 */
505 hlist_for_each_entry_rcu(subscription, &subscriptions->list,
506 hlist, srcu_read_lock_held(&srcu)) {
507 if (!subscription->ops->invalidate_range_end)
508 continue;
509
510 subscription->ops->invalidate_range_end(subscription,
511 range);
512 }
513 }
21a92735 514 srcu_read_unlock(&srcu, id);
93065ac7
MH
515
516 return ret;
cddb8a5c
AA
517}
518
99cb252f
JG
519int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
520{
984cfe4e
JG
521 struct mmu_notifier_subscriptions *subscriptions =
522 range->mm->notifier_subscriptions;
99cb252f
JG
523 int ret;
524
984cfe4e
JG
525 if (subscriptions->has_itree) {
526 ret = mn_itree_invalidate(subscriptions, range);
99cb252f
JG
527 if (ret)
528 return ret;
529 }
984cfe4e
JG
530 if (!hlist_empty(&subscriptions->list))
531 return mn_hlist_invalidate_range_start(subscriptions, range);
99cb252f
JG
532 return 0;
533}
534
984cfe4e
JG
535static void
536mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
ec8832d0 537 struct mmu_notifier_range *range)
cddb8a5c 538{
1991722a 539 struct mmu_notifier *subscription;
21a92735 540 int id;
cddb8a5c 541
21a92735 542 id = srcu_read_lock(&srcu);
63886bad
QC
543 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
544 srcu_read_lock_held(&srcu)) {
1991722a 545 if (subscription->ops->invalidate_range_end) {
ba170f76
DV
546 if (!mmu_notifier_range_blockable(range))
547 non_block_start();
1991722a
JG
548 subscription->ops->invalidate_range_end(subscription,
549 range);
ba170f76
DV
550 if (!mmu_notifier_range_blockable(range))
551 non_block_end();
552 }
cddb8a5c 553 }
21a92735 554 srcu_read_unlock(&srcu, id);
99cb252f
JG
555}
556
ec8832d0 557void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
99cb252f 558{
984cfe4e
JG
559 struct mmu_notifier_subscriptions *subscriptions =
560 range->mm->notifier_subscriptions;
99cb252f
JG
561
562 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
984cfe4e
JG
563 if (subscriptions->has_itree)
564 mn_itree_inv_end(subscriptions);
99cb252f 565
984cfe4e 566 if (!hlist_empty(&subscriptions->list))
ec8832d0 567 mn_hlist_invalidate_end(subscriptions, range);
23b68395 568 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
cddb8a5c
AA
569}
570
1af5a810
AP
571void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
572 unsigned long start, unsigned long end)
0f0a327f 573{
1991722a 574 struct mmu_notifier *subscription;
0f0a327f
JR
575 int id;
576
577 id = srcu_read_lock(&srcu);
1991722a 578 hlist_for_each_entry_rcu(subscription,
63886bad
QC
579 &mm->notifier_subscriptions->list, hlist,
580 srcu_read_lock_held(&srcu)) {
1af5a810
AP
581 if (subscription->ops->arch_invalidate_secondary_tlbs)
582 subscription->ops->arch_invalidate_secondary_tlbs(
583 subscription, mm,
584 start, end);
0f0a327f
JR
585 }
586 srcu_read_unlock(&srcu, id);
587}
0f0a327f 588
56c57103 589/*
c1e8d7c6 590 * Same as mmu_notifier_register but here the caller must hold the mmap_lock in
99cb252f
JG
591 * write mode. A NULL mn signals the notifier is being registered for itree
592 * mode.
56c57103 593 */
1991722a
JG
594int __mmu_notifier_register(struct mmu_notifier *subscription,
595 struct mm_struct *mm)
cddb8a5c 596{
984cfe4e 597 struct mmu_notifier_subscriptions *subscriptions = NULL;
cddb8a5c
AA
598 int ret;
599
42fc5414 600 mmap_assert_write_locked(mm);
cddb8a5c
AA
601 BUG_ON(atomic_read(&mm->mm_users) <= 0);
602
1af5a810
AP
603 /*
604 * Subsystems should only register for invalidate_secondary_tlbs() or
605 * invalidate_range_start()/end() callbacks, not both.
606 */
607 if (WARN_ON_ONCE(subscription &&
608 (subscription->ops->arch_invalidate_secondary_tlbs &&
609 (subscription->ops->invalidate_range_start ||
610 subscription->ops->invalidate_range_end))))
611 return -EINVAL;
612
984cfe4e 613 if (!mm->notifier_subscriptions) {
70df291b
JG
614 /*
615 * kmalloc cannot be called under mm_take_all_locks(), but we
984cfe4e 616 * know that mm->notifier_subscriptions can't change while we
c1e8d7c6 617 * hold the write side of the mmap_lock.
70df291b 618 */
984cfe4e
JG
619 subscriptions = kzalloc(
620 sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
621 if (!subscriptions)
70df291b
JG
622 return -ENOMEM;
623
984cfe4e
JG
624 INIT_HLIST_HEAD(&subscriptions->list);
625 spin_lock_init(&subscriptions->lock);
626 subscriptions->invalidate_seq = 2;
627 subscriptions->itree = RB_ROOT_CACHED;
628 init_waitqueue_head(&subscriptions->wq);
629 INIT_HLIST_HEAD(&subscriptions->deferred_list);
70df291b 630 }
35cfa2b0 631
cddb8a5c
AA
632 ret = mm_take_all_locks(mm);
633 if (unlikely(ret))
35cfa2b0 634 goto out_clean;
cddb8a5c 635
cddb8a5c
AA
636 /*
637 * Serialize the update against mmu_notifier_unregister. A
638 * side note: mmu_notifier_release can't run concurrently with
639 * us because we hold the mm_users pin (either implicitly as
640 * current->mm or explicitly with get_task_mm() or similar).
641 * We can't race against any other mmu notifier method either
642 * thanks to mm_take_all_locks().
99cb252f 643 *
984cfe4e
JG
644 * release semantics on the initialization of the
645 * mmu_notifier_subscriptions's contents are provided for unlocked
646 * readers. acquire can only be used while holding the mmgrab or
647 * mmget, and is safe because once created the
648 * mmu_notifier_subscriptions is not freed until the mm is destroyed.
c1e8d7c6 649 * As above, users holding the mmap_lock or one of the
99cb252f 650 * mm_take_all_locks() do not need to use acquire semantics.
cddb8a5c 651 */
984cfe4e
JG
652 if (subscriptions)
653 smp_store_release(&mm->notifier_subscriptions, subscriptions);
70df291b 654
1991722a 655 if (subscription) {
99cb252f
JG
656 /* Pairs with the mmdrop in mmu_notifier_unregister_* */
657 mmgrab(mm);
1991722a
JG
658 subscription->mm = mm;
659 subscription->users = 1;
99cb252f 660
984cfe4e 661 spin_lock(&mm->notifier_subscriptions->lock);
1991722a 662 hlist_add_head_rcu(&subscription->hlist,
984cfe4e
JG
663 &mm->notifier_subscriptions->list);
664 spin_unlock(&mm->notifier_subscriptions->lock);
99cb252f 665 } else
984cfe4e 666 mm->notifier_subscriptions->has_itree = true;
cddb8a5c
AA
667
668 mm_drop_all_locks(mm);
70df291b
JG
669 BUG_ON(atomic_read(&mm->mm_users) <= 0);
670 return 0;
671
35cfa2b0 672out_clean:
984cfe4e 673 kfree(subscriptions);
cddb8a5c
AA
674 return ret;
675}
56c57103 676EXPORT_SYMBOL_GPL(__mmu_notifier_register);
cddb8a5c 677
2c7933f5
JG
678/**
679 * mmu_notifier_register - Register a notifier on a mm
d49653f3 680 * @subscription: The notifier to attach
2c7933f5
JG
681 * @mm: The mm to attach the notifier to
682 *
c1e8d7c6 683 * Must not hold mmap_lock nor any other VM related lock when calling
cddb8a5c
AA
684 * this registration function. Must also ensure mm_users can't go down
685 * to zero while this runs to avoid races with mmu_notifier_release,
686 * so mm has to be current->mm or the mm should be pinned safely such
687 * as with get_task_mm(). If the mm is not current->mm, the mm_users
688 * pin should be released by calling mmput after mmu_notifier_register
2c7933f5
JG
689 * returns.
690 *
691 * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
692 * unregister the notifier.
693 *
1991722a 694 * While the caller has a mmu_notifier get the subscription->mm pointer will remain
2c7933f5 695 * valid, and can be converted to an active mm pointer via mmget_not_zero().
cddb8a5c 696 */
1991722a
JG
697int mmu_notifier_register(struct mmu_notifier *subscription,
698 struct mm_struct *mm)
cddb8a5c 699{
56c57103 700 int ret;
cddb8a5c 701
d8ed45c5 702 mmap_write_lock(mm);
1991722a 703 ret = __mmu_notifier_register(subscription, mm);
d8ed45c5 704 mmap_write_unlock(mm);
56c57103 705 return ret;
cddb8a5c 706}
56c57103 707EXPORT_SYMBOL_GPL(mmu_notifier_register);
cddb8a5c 708
2c7933f5
JG
709static struct mmu_notifier *
710find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
711{
1991722a 712 struct mmu_notifier *subscription;
2c7933f5 713
984cfe4e 714 spin_lock(&mm->notifier_subscriptions->lock);
1991722a 715 hlist_for_each_entry_rcu(subscription,
63886bad
QC
716 &mm->notifier_subscriptions->list, hlist,
717 lockdep_is_held(&mm->notifier_subscriptions->lock)) {
1991722a 718 if (subscription->ops != ops)
2c7933f5
JG
719 continue;
720
1991722a
JG
721 if (likely(subscription->users != UINT_MAX))
722 subscription->users++;
2c7933f5 723 else
1991722a 724 subscription = ERR_PTR(-EOVERFLOW);
984cfe4e 725 spin_unlock(&mm->notifier_subscriptions->lock);
1991722a 726 return subscription;
2c7933f5 727 }
984cfe4e 728 spin_unlock(&mm->notifier_subscriptions->lock);
2c7933f5
JG
729 return NULL;
730}
731
732/**
733 * mmu_notifier_get_locked - Return the single struct mmu_notifier for
734 * the mm & ops
735 * @ops: The operations struct being subscribe with
736 * @mm : The mm to attach notifiers too
737 *
738 * This function either allocates a new mmu_notifier via
739 * ops->alloc_notifier(), or returns an already existing notifier on the
740 * list. The value of the ops pointer is used to determine when two notifiers
741 * are the same.
742 *
743 * Each call to mmu_notifier_get() must be paired with a call to
c1e8d7c6 744 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
2c7933f5
JG
745 *
746 * While the caller has a mmu_notifier get the mm pointer will remain valid,
747 * and can be converted to an active mm pointer via mmget_not_zero().
748 */
749struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
750 struct mm_struct *mm)
751{
1991722a 752 struct mmu_notifier *subscription;
2c7933f5
JG
753 int ret;
754
42fc5414 755 mmap_assert_write_locked(mm);
2c7933f5 756
984cfe4e 757 if (mm->notifier_subscriptions) {
1991722a
JG
758 subscription = find_get_mmu_notifier(mm, ops);
759 if (subscription)
760 return subscription;
2c7933f5
JG
761 }
762
1991722a
JG
763 subscription = ops->alloc_notifier(mm);
764 if (IS_ERR(subscription))
765 return subscription;
766 subscription->ops = ops;
767 ret = __mmu_notifier_register(subscription, mm);
2c7933f5
JG
768 if (ret)
769 goto out_free;
1991722a 770 return subscription;
2c7933f5 771out_free:
1991722a 772 subscription->ops->free_notifier(subscription);
2c7933f5
JG
773 return ERR_PTR(ret);
774}
775EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
776
cddb8a5c 777/* this is called after the last mmu_notifier_unregister() returned */
984cfe4e 778void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
cddb8a5c 779{
984cfe4e
JG
780 BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
781 kfree(mm->notifier_subscriptions);
782 mm->notifier_subscriptions = LIST_POISON1; /* debug */
cddb8a5c
AA
783}
784
785/*
786 * This releases the mm_count pin automatically and frees the mm
787 * structure if it was the last user of it. It serializes against
21a92735
SG
788 * running mmu notifiers with SRCU and against mmu_notifier_unregister
789 * with the unregister lock + SRCU. All sptes must be dropped before
cddb8a5c
AA
790 * calling mmu_notifier_unregister. ->release or any other notifier
791 * method may be invoked concurrently with mmu_notifier_unregister,
792 * and only after mmu_notifier_unregister returned we're guaranteed
793 * that ->release or any other method can't run anymore.
794 */
1991722a
JG
795void mmu_notifier_unregister(struct mmu_notifier *subscription,
796 struct mm_struct *mm)
cddb8a5c
AA
797{
798 BUG_ON(atomic_read(&mm->mm_count) <= 0);
799
1991722a 800 if (!hlist_unhashed(&subscription->hlist)) {
d34883d4
XG
801 /*
802 * SRCU here will force exit_mmap to wait for ->release to
803 * finish before freeing the pages.
804 */
21a92735 805 int id;
3ad3d901 806
d34883d4 807 id = srcu_read_lock(&srcu);
cddb8a5c 808 /*
d34883d4
XG
809 * exit_mmap will block in mmu_notifier_release to guarantee
810 * that ->release is called before freeing the pages.
cddb8a5c 811 */
1991722a
JG
812 if (subscription->ops->release)
813 subscription->ops->release(subscription, mm);
d34883d4 814 srcu_read_unlock(&srcu, id);
3ad3d901 815
984cfe4e 816 spin_lock(&mm->notifier_subscriptions->lock);
751efd86 817 /*
d34883d4
XG
818 * Can not use list_del_rcu() since __mmu_notifier_release
819 * can delete it before we hold the lock.
751efd86 820 */
1991722a 821 hlist_del_init_rcu(&subscription->hlist);
984cfe4e 822 spin_unlock(&mm->notifier_subscriptions->lock);
d34883d4 823 }
cddb8a5c
AA
824
825 /*
d34883d4 826 * Wait for any running method to finish, of course including
83a35e36 827 * ->release if it was run by mmu_notifier_release instead of us.
cddb8a5c 828 */
21a92735 829 synchronize_srcu(&srcu);
cddb8a5c
AA
830
831 BUG_ON(atomic_read(&mm->mm_count) <= 0);
832
833 mmdrop(mm);
834}
835EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
21a92735 836
2c7933f5
JG
837static void mmu_notifier_free_rcu(struct rcu_head *rcu)
838{
1991722a
JG
839 struct mmu_notifier *subscription =
840 container_of(rcu, struct mmu_notifier, rcu);
841 struct mm_struct *mm = subscription->mm;
2c7933f5 842
1991722a 843 subscription->ops->free_notifier(subscription);
2c7933f5
JG
844 /* Pairs with the get in __mmu_notifier_register() */
845 mmdrop(mm);
846}
847
848/**
849 * mmu_notifier_put - Release the reference on the notifier
d49653f3 850 * @subscription: The notifier to act on
2c7933f5
JG
851 *
852 * This function must be paired with each mmu_notifier_get(), it releases the
853 * reference obtained by the get. If this is the last reference then process
854 * to free the notifier will be run asynchronously.
855 *
856 * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
857 * when the mm_struct is destroyed. Instead free_notifier is always called to
858 * release any resources held by the user.
859 *
860 * As ops->release is not guaranteed to be called, the user must ensure that
861 * all sptes are dropped, and no new sptes can be established before
862 * mmu_notifier_put() is called.
863 *
864 * This function can be called from the ops->release callback, however the
865 * caller must still ensure it is called pairwise with mmu_notifier_get().
866 *
867 * Modules calling this function must call mmu_notifier_synchronize() in
868 * their __exit functions to ensure the async work is completed.
869 */
1991722a 870void mmu_notifier_put(struct mmu_notifier *subscription)
2c7933f5 871{
1991722a 872 struct mm_struct *mm = subscription->mm;
2c7933f5 873
984cfe4e 874 spin_lock(&mm->notifier_subscriptions->lock);
1991722a 875 if (WARN_ON(!subscription->users) || --subscription->users)
2c7933f5 876 goto out_unlock;
1991722a 877 hlist_del_init_rcu(&subscription->hlist);
984cfe4e 878 spin_unlock(&mm->notifier_subscriptions->lock);
2c7933f5 879
1991722a 880 call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
2c7933f5
JG
881 return;
882
883out_unlock:
984cfe4e 884 spin_unlock(&mm->notifier_subscriptions->lock);
2c7933f5
JG
885}
886EXPORT_SYMBOL_GPL(mmu_notifier_put);
887
99cb252f 888static int __mmu_interval_notifier_insert(
5292e24a 889 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
984cfe4e 890 struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
99cb252f
JG
891 unsigned long length, const struct mmu_interval_notifier_ops *ops)
892{
5292e24a
JG
893 interval_sub->mm = mm;
894 interval_sub->ops = ops;
895 RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
896 interval_sub->interval_tree.start = start;
99cb252f
JG
897 /*
898 * Note that the representation of the intervals in the interval tree
899 * considers the ending point as contained in the interval.
900 */
901 if (length == 0 ||
5292e24a
JG
902 check_add_overflow(start, length - 1,
903 &interval_sub->interval_tree.last))
99cb252f
JG
904 return -EOVERFLOW;
905
906 /* Must call with a mmget() held */
c9682d10 907 if (WARN_ON(atomic_read(&mm->mm_users) <= 0))
99cb252f
JG
908 return -EINVAL;
909
910 /* pairs with mmdrop in mmu_interval_notifier_remove() */
911 mmgrab(mm);
912
913 /*
914 * If some invalidate_range_start/end region is going on in parallel
915 * we don't know what VA ranges are affected, so we must assume this
916 * new range is included.
917 *
918 * If the itree is invalidating then we are not allowed to change
919 * it. Retrying until invalidation is done is tricky due to the
920 * possibility for live lock, instead defer the add to
921 * mn_itree_inv_end() so this algorithm is deterministic.
922 *
5292e24a 923 * In all cases the value for the interval_sub->invalidate_seq should be
99cb252f
JG
924 * odd, see mmu_interval_read_begin()
925 */
984cfe4e
JG
926 spin_lock(&subscriptions->lock);
927 if (subscriptions->active_invalidate_ranges) {
928 if (mn_itree_is_invalidating(subscriptions))
5292e24a 929 hlist_add_head(&interval_sub->deferred_item,
984cfe4e 930 &subscriptions->deferred_list);
99cb252f 931 else {
984cfe4e 932 subscriptions->invalidate_seq |= 1;
5292e24a 933 interval_tree_insert(&interval_sub->interval_tree,
984cfe4e 934 &subscriptions->itree);
99cb252f 935 }
5292e24a 936 interval_sub->invalidate_seq = subscriptions->invalidate_seq;
99cb252f 937 } else {
984cfe4e 938 WARN_ON(mn_itree_is_invalidating(subscriptions));
99cb252f 939 /*
5292e24a
JG
940 * The starting seq for a subscription not under invalidation
941 * should be odd, not equal to the current invalidate_seq and
99cb252f
JG
942 * invalidate_seq should not 'wrap' to the new seq any time
943 * soon.
944 */
5292e24a
JG
945 interval_sub->invalidate_seq =
946 subscriptions->invalidate_seq - 1;
947 interval_tree_insert(&interval_sub->interval_tree,
984cfe4e 948 &subscriptions->itree);
99cb252f 949 }
984cfe4e 950 spin_unlock(&subscriptions->lock);
99cb252f
JG
951 return 0;
952}
953
954/**
955 * mmu_interval_notifier_insert - Insert an interval notifier
5292e24a 956 * @interval_sub: Interval subscription to register
99cb252f
JG
957 * @start: Starting virtual address to monitor
958 * @length: Length of the range to monitor
d49653f3
KK
959 * @mm: mm_struct to attach to
960 * @ops: Interval notifier operations to be called on matching events
99cb252f
JG
961 *
962 * This function subscribes the interval notifier for notifications from the
963 * mm. Upon return the ops related to mmu_interval_notifier will be called
964 * whenever an event that intersects with the given range occurs.
965 *
966 * Upon return the range_notifier may not be present in the interval tree yet.
967 * The caller must use the normal interval notifier read flow via
968 * mmu_interval_read_begin() to establish SPTEs for this range.
969 */
5292e24a 970int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
99cb252f
JG
971 struct mm_struct *mm, unsigned long start,
972 unsigned long length,
973 const struct mmu_interval_notifier_ops *ops)
974{
984cfe4e 975 struct mmu_notifier_subscriptions *subscriptions;
99cb252f
JG
976 int ret;
977
da1c55f1 978 might_lock(&mm->mmap_lock);
99cb252f 979
984cfe4e
JG
980 subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
981 if (!subscriptions || !subscriptions->has_itree) {
99cb252f
JG
982 ret = mmu_notifier_register(NULL, mm);
983 if (ret)
984 return ret;
984cfe4e 985 subscriptions = mm->notifier_subscriptions;
99cb252f 986 }
5292e24a
JG
987 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
988 start, length, ops);
99cb252f
JG
989}
990EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
991
992int mmu_interval_notifier_insert_locked(
5292e24a 993 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
99cb252f
JG
994 unsigned long start, unsigned long length,
995 const struct mmu_interval_notifier_ops *ops)
996{
984cfe4e
JG
997 struct mmu_notifier_subscriptions *subscriptions =
998 mm->notifier_subscriptions;
99cb252f
JG
999 int ret;
1000
42fc5414 1001 mmap_assert_write_locked(mm);
99cb252f 1002
984cfe4e 1003 if (!subscriptions || !subscriptions->has_itree) {
99cb252f
JG
1004 ret = __mmu_notifier_register(NULL, mm);
1005 if (ret)
1006 return ret;
984cfe4e 1007 subscriptions = mm->notifier_subscriptions;
99cb252f 1008 }
5292e24a
JG
1009 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1010 start, length, ops);
99cb252f
JG
1011}
1012EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
1013
31956166
AP
1014static bool
1015mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
1016 unsigned long seq)
1017{
1018 bool ret;
1019
1020 spin_lock(&subscriptions->lock);
1021 ret = subscriptions->invalidate_seq != seq;
1022 spin_unlock(&subscriptions->lock);
1023 return ret;
1024}
1025
99cb252f
JG
1026/**
1027 * mmu_interval_notifier_remove - Remove a interval notifier
5292e24a 1028 * @interval_sub: Interval subscription to unregister
99cb252f
JG
1029 *
1030 * This function must be paired with mmu_interval_notifier_insert(). It cannot
1031 * be called from any ops callback.
1032 *
1033 * Once this returns ops callbacks are no longer running on other CPUs and
1034 * will not be called in future.
1035 */
5292e24a 1036void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
99cb252f 1037{
5292e24a 1038 struct mm_struct *mm = interval_sub->mm;
984cfe4e
JG
1039 struct mmu_notifier_subscriptions *subscriptions =
1040 mm->notifier_subscriptions;
99cb252f
JG
1041 unsigned long seq = 0;
1042
1043 might_sleep();
1044
984cfe4e
JG
1045 spin_lock(&subscriptions->lock);
1046 if (mn_itree_is_invalidating(subscriptions)) {
99cb252f
JG
1047 /*
1048 * remove is being called after insert put this on the
1049 * deferred list, but before the deferred list was processed.
1050 */
5292e24a
JG
1051 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
1052 hlist_del(&interval_sub->deferred_item);
99cb252f 1053 } else {
5292e24a 1054 hlist_add_head(&interval_sub->deferred_item,
984cfe4e
JG
1055 &subscriptions->deferred_list);
1056 seq = subscriptions->invalidate_seq;
99cb252f
JG
1057 }
1058 } else {
5292e24a
JG
1059 WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
1060 interval_tree_remove(&interval_sub->interval_tree,
984cfe4e 1061 &subscriptions->itree);
99cb252f 1062 }
984cfe4e 1063 spin_unlock(&subscriptions->lock);
99cb252f
JG
1064
1065 /*
1066 * The possible sleep on progress in the invalidation requires the
1067 * caller not hold any locks held by invalidation callbacks.
1068 */
1069 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
1070 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
1071 if (seq)
984cfe4e 1072 wait_event(subscriptions->wq,
31956166 1073 mmu_interval_seq_released(subscriptions, seq));
99cb252f
JG
1074
1075 /* pairs with mmgrab in mmu_interval_notifier_insert() */
1076 mmdrop(mm);
1077}
1078EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
1079
2c7933f5
JG
1080/**
1081 * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
1082 *
1083 * This function ensures that all outstanding async SRU work from
1084 * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
1085 * associated with an unused mmu_notifier will no longer be called.
1086 *
1087 * Before using the caller must ensure that all of its mmu_notifiers have been
1088 * fully released via mmu_notifier_put().
1089 *
1090 * Modules using the mmu_notifier_put() API should call this in their __exit
1091 * function to avoid module unloading races.
1092 */
1093void mmu_notifier_synchronize(void)
1094{
1095 synchronize_srcu(&srcu);
1096}
1097EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);