]>
Commit | Line | Data |
---|---|---|
86039bd3 AA |
1 | /* |
2 | * fs/userfaultfd.c | |
3 | * | |
4 | * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> | |
5 | * Copyright (C) 2008-2009 Red Hat, Inc. | |
6 | * Copyright (C) 2015 Red Hat, Inc. | |
7 | * | |
8 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
9 | * the COPYING file in the top-level directory. | |
10 | * | |
11 | * Some part derived from fs/eventfd.c (anon inode setup) and | |
12 | * mm/ksm.c (mm hashing). | |
13 | */ | |
14 | ||
9cd75c3c | 15 | #include <linux/list.h> |
86039bd3 | 16 | #include <linux/hashtable.h> |
174cd4b1 | 17 | #include <linux/sched/signal.h> |
6e84f315 | 18 | #include <linux/sched/mm.h> |
86039bd3 AA |
19 | #include <linux/mm.h> |
20 | #include <linux/poll.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/seq_file.h> | |
23 | #include <linux/file.h> | |
24 | #include <linux/bug.h> | |
25 | #include <linux/anon_inodes.h> | |
26 | #include <linux/syscalls.h> | |
27 | #include <linux/userfaultfd_k.h> | |
28 | #include <linux/mempolicy.h> | |
29 | #include <linux/ioctl.h> | |
30 | #include <linux/security.h> | |
cab350af | 31 | #include <linux/hugetlb.h> |
86039bd3 | 32 | |
cefdca0a PX |
33 | int sysctl_unprivileged_userfaultfd __read_mostly = 1; |
34 | ||
3004ec9c AA |
35 | static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly; |
36 | ||
86039bd3 AA |
37 | enum userfaultfd_state { |
38 | UFFD_STATE_WAIT_API, | |
39 | UFFD_STATE_RUNNING, | |
40 | }; | |
41 | ||
3004ec9c AA |
42 | /* |
43 | * Start with fault_pending_wqh and fault_wqh so they're more likely | |
44 | * to be in the same cacheline. | |
45 | */ | |
86039bd3 | 46 | struct userfaultfd_ctx { |
15b726ef AA |
47 | /* waitqueue head for the pending (i.e. not read) userfaults */ |
48 | wait_queue_head_t fault_pending_wqh; | |
49 | /* waitqueue head for the userfaults */ | |
86039bd3 AA |
50 | wait_queue_head_t fault_wqh; |
51 | /* waitqueue head for the pseudo fd to wakeup poll/read */ | |
52 | wait_queue_head_t fd_wqh; | |
9cd75c3c PE |
53 | /* waitqueue head for events */ |
54 | wait_queue_head_t event_wqh; | |
2c5b7e1b AA |
55 | /* a refile sequence protected by fault_pending_wqh lock */ |
56 | struct seqcount refile_seq; | |
3004ec9c | 57 | /* pseudo fd refcounting */ |
ca880420 | 58 | refcount_t refcount; |
86039bd3 AA |
59 | /* userfaultfd syscall flags */ |
60 | unsigned int flags; | |
9cd75c3c PE |
61 | /* features requested from the userspace */ |
62 | unsigned int features; | |
86039bd3 AA |
63 | /* state machine */ |
64 | enum userfaultfd_state state; | |
65 | /* released */ | |
66 | bool released; | |
df2cc96e MR |
67 | /* memory mappings are changing because of non-cooperative event */ |
68 | bool mmap_changing; | |
86039bd3 AA |
69 | /* mm with one ore more vmas attached to this userfaultfd_ctx */ |
70 | struct mm_struct *mm; | |
71 | }; | |
72 | ||
893e26e6 PE |
73 | struct userfaultfd_fork_ctx { |
74 | struct userfaultfd_ctx *orig; | |
75 | struct userfaultfd_ctx *new; | |
76 | struct list_head list; | |
77 | }; | |
78 | ||
897ab3e0 MR |
79 | struct userfaultfd_unmap_ctx { |
80 | struct userfaultfd_ctx *ctx; | |
81 | unsigned long start; | |
82 | unsigned long end; | |
83 | struct list_head list; | |
84 | }; | |
85 | ||
86039bd3 | 86 | struct userfaultfd_wait_queue { |
a9b85f94 | 87 | struct uffd_msg msg; |
ac6424b9 | 88 | wait_queue_entry_t wq; |
86039bd3 | 89 | struct userfaultfd_ctx *ctx; |
15a77c6f | 90 | bool waken; |
86039bd3 AA |
91 | }; |
92 | ||
93 | struct userfaultfd_wake_range { | |
94 | unsigned long start; | |
95 | unsigned long len; | |
96 | }; | |
97 | ||
ac6424b9 | 98 | static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, |
86039bd3 AA |
99 | int wake_flags, void *key) |
100 | { | |
101 | struct userfaultfd_wake_range *range = key; | |
102 | int ret; | |
103 | struct userfaultfd_wait_queue *uwq; | |
104 | unsigned long start, len; | |
105 | ||
106 | uwq = container_of(wq, struct userfaultfd_wait_queue, wq); | |
107 | ret = 0; | |
86039bd3 AA |
108 | /* len == 0 means wake all */ |
109 | start = range->start; | |
110 | len = range->len; | |
a9b85f94 AA |
111 | if (len && (start > uwq->msg.arg.pagefault.address || |
112 | start + len <= uwq->msg.arg.pagefault.address)) | |
86039bd3 | 113 | goto out; |
15a77c6f AA |
114 | WRITE_ONCE(uwq->waken, true); |
115 | /* | |
a9668cd6 PZ |
116 | * The Program-Order guarantees provided by the scheduler |
117 | * ensure uwq->waken is visible before the task is woken. | |
15a77c6f | 118 | */ |
86039bd3 | 119 | ret = wake_up_state(wq->private, mode); |
a9668cd6 | 120 | if (ret) { |
86039bd3 AA |
121 | /* |
122 | * Wake only once, autoremove behavior. | |
123 | * | |
a9668cd6 PZ |
124 | * After the effect of list_del_init is visible to the other |
125 | * CPUs, the waitqueue may disappear from under us, see the | |
126 | * !list_empty_careful() in handle_userfault(). | |
127 | * | |
128 | * try_to_wake_up() has an implicit smp_mb(), and the | |
129 | * wq->private is read before calling the extern function | |
130 | * "wake_up_state" (which in turns calls try_to_wake_up). | |
86039bd3 | 131 | */ |
2055da97 | 132 | list_del_init(&wq->entry); |
a9668cd6 | 133 | } |
86039bd3 AA |
134 | out: |
135 | return ret; | |
136 | } | |
137 | ||
138 | /** | |
139 | * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd | |
140 | * context. | |
141 | * @ctx: [in] Pointer to the userfaultfd context. | |
86039bd3 AA |
142 | */ |
143 | static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) | |
144 | { | |
ca880420 | 145 | refcount_inc(&ctx->refcount); |
86039bd3 AA |
146 | } |
147 | ||
148 | /** | |
149 | * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd | |
150 | * context. | |
151 | * @ctx: [in] Pointer to userfaultfd context. | |
152 | * | |
153 | * The userfaultfd context reference must have been previously acquired either | |
154 | * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget(). | |
155 | */ | |
156 | static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) | |
157 | { | |
ca880420 | 158 | if (refcount_dec_and_test(&ctx->refcount)) { |
86039bd3 AA |
159 | VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); |
160 | VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); | |
161 | VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); | |
162 | VM_BUG_ON(waitqueue_active(&ctx->fault_wqh)); | |
9cd75c3c PE |
163 | VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock)); |
164 | VM_BUG_ON(waitqueue_active(&ctx->event_wqh)); | |
86039bd3 AA |
165 | VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); |
166 | VM_BUG_ON(waitqueue_active(&ctx->fd_wqh)); | |
d2005e3f | 167 | mmdrop(ctx->mm); |
3004ec9c | 168 | kmem_cache_free(userfaultfd_ctx_cachep, ctx); |
86039bd3 AA |
169 | } |
170 | } | |
171 | ||
a9b85f94 | 172 | static inline void msg_init(struct uffd_msg *msg) |
86039bd3 | 173 | { |
a9b85f94 AA |
174 | BUILD_BUG_ON(sizeof(struct uffd_msg) != 32); |
175 | /* | |
176 | * Must use memset to zero out the paddings or kernel data is | |
177 | * leaked to userland. | |
178 | */ | |
179 | memset(msg, 0, sizeof(struct uffd_msg)); | |
180 | } | |
181 | ||
182 | static inline struct uffd_msg userfault_msg(unsigned long address, | |
183 | unsigned int flags, | |
9d4ac934 AP |
184 | unsigned long reason, |
185 | unsigned int features) | |
a9b85f94 AA |
186 | { |
187 | struct uffd_msg msg; | |
188 | msg_init(&msg); | |
189 | msg.event = UFFD_EVENT_PAGEFAULT; | |
190 | msg.arg.pagefault.address = address; | |
86039bd3 AA |
191 | if (flags & FAULT_FLAG_WRITE) |
192 | /* | |
a4605a61 | 193 | * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the |
a9b85f94 AA |
194 | * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE |
195 | * was not set in a UFFD_EVENT_PAGEFAULT, it means it | |
196 | * was a read fault, otherwise if set it means it's | |
197 | * a write fault. | |
86039bd3 | 198 | */ |
a9b85f94 | 199 | msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE; |
86039bd3 AA |
200 | if (reason & VM_UFFD_WP) |
201 | /* | |
a9b85f94 AA |
202 | * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the |
203 | * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was | |
204 | * not set in a UFFD_EVENT_PAGEFAULT, it means it was | |
205 | * a missing fault, otherwise if set it means it's a | |
206 | * write protect fault. | |
86039bd3 | 207 | */ |
a9b85f94 | 208 | msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP; |
9d4ac934 | 209 | if (features & UFFD_FEATURE_THREAD_ID) |
a36985d3 | 210 | msg.arg.pagefault.feat.ptid = task_pid_vnr(current); |
a9b85f94 | 211 | return msg; |
86039bd3 AA |
212 | } |
213 | ||
369cd212 MK |
214 | #ifdef CONFIG_HUGETLB_PAGE |
215 | /* | |
216 | * Same functionality as userfaultfd_must_wait below with modifications for | |
217 | * hugepmd ranges. | |
218 | */ | |
219 | static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, | |
7868a208 | 220 | struct vm_area_struct *vma, |
369cd212 MK |
221 | unsigned long address, |
222 | unsigned long flags, | |
223 | unsigned long reason) | |
224 | { | |
225 | struct mm_struct *mm = ctx->mm; | |
1e2c0436 | 226 | pte_t *ptep, pte; |
369cd212 MK |
227 | bool ret = true; |
228 | ||
229 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); | |
230 | ||
1e2c0436 JF |
231 | ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); |
232 | ||
233 | if (!ptep) | |
369cd212 MK |
234 | goto out; |
235 | ||
236 | ret = false; | |
1e2c0436 | 237 | pte = huge_ptep_get(ptep); |
369cd212 MK |
238 | |
239 | /* | |
240 | * Lockless access: we're in a wait_event so it's ok if it | |
241 | * changes under us. | |
242 | */ | |
1e2c0436 | 243 | if (huge_pte_none(pte)) |
369cd212 | 244 | ret = true; |
1e2c0436 | 245 | if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) |
369cd212 MK |
246 | ret = true; |
247 | out: | |
248 | return ret; | |
249 | } | |
250 | #else | |
251 | static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, | |
7868a208 | 252 | struct vm_area_struct *vma, |
369cd212 MK |
253 | unsigned long address, |
254 | unsigned long flags, | |
255 | unsigned long reason) | |
256 | { | |
257 | return false; /* should never get here */ | |
258 | } | |
259 | #endif /* CONFIG_HUGETLB_PAGE */ | |
260 | ||
8d2afd96 AA |
261 | /* |
262 | * Verify the pagetables are still not ok after having reigstered into | |
263 | * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any | |
264 | * userfault that has already been resolved, if userfaultfd_read and | |
265 | * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different | |
266 | * threads. | |
267 | */ | |
268 | static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, | |
269 | unsigned long address, | |
270 | unsigned long flags, | |
271 | unsigned long reason) | |
272 | { | |
273 | struct mm_struct *mm = ctx->mm; | |
274 | pgd_t *pgd; | |
c2febafc | 275 | p4d_t *p4d; |
8d2afd96 AA |
276 | pud_t *pud; |
277 | pmd_t *pmd, _pmd; | |
278 | pte_t *pte; | |
279 | bool ret = true; | |
280 | ||
281 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); | |
282 | ||
283 | pgd = pgd_offset(mm, address); | |
284 | if (!pgd_present(*pgd)) | |
285 | goto out; | |
c2febafc KS |
286 | p4d = p4d_offset(pgd, address); |
287 | if (!p4d_present(*p4d)) | |
288 | goto out; | |
289 | pud = pud_offset(p4d, address); | |
8d2afd96 AA |
290 | if (!pud_present(*pud)) |
291 | goto out; | |
292 | pmd = pmd_offset(pud, address); | |
293 | /* | |
294 | * READ_ONCE must function as a barrier with narrower scope | |
295 | * and it must be equivalent to: | |
296 | * _pmd = *pmd; barrier(); | |
297 | * | |
298 | * This is to deal with the instability (as in | |
299 | * pmd_trans_unstable) of the pmd. | |
300 | */ | |
301 | _pmd = READ_ONCE(*pmd); | |
a365ac09 | 302 | if (pmd_none(_pmd)) |
8d2afd96 AA |
303 | goto out; |
304 | ||
305 | ret = false; | |
a365ac09 HY |
306 | if (!pmd_present(_pmd)) |
307 | goto out; | |
308 | ||
8d2afd96 AA |
309 | if (pmd_trans_huge(_pmd)) |
310 | goto out; | |
311 | ||
312 | /* | |
313 | * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it | |
314 | * and use the standard pte_offset_map() instead of parsing _pmd. | |
315 | */ | |
316 | pte = pte_offset_map(pmd, address); | |
317 | /* | |
318 | * Lockless access: we're in a wait_event so it's ok if it | |
319 | * changes under us. | |
320 | */ | |
321 | if (pte_none(*pte)) | |
322 | ret = true; | |
323 | pte_unmap(pte); | |
324 | ||
325 | out: | |
326 | return ret; | |
327 | } | |
328 | ||
86039bd3 AA |
329 | /* |
330 | * The locking rules involved in returning VM_FAULT_RETRY depending on | |
331 | * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and | |
332 | * FAULT_FLAG_KILLABLE are not straightforward. The "Caution" | |
333 | * recommendation in __lock_page_or_retry is not an understatement. | |
334 | * | |
335 | * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released | |
336 | * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is | |
337 | * not set. | |
338 | * | |
339 | * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not | |
340 | * set, VM_FAULT_RETRY can still be returned if and only if there are | |
341 | * fatal_signal_pending()s, and the mmap_sem must be released before | |
342 | * returning it. | |
343 | */ | |
2b740303 | 344 | vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) |
86039bd3 | 345 | { |
82b0f8c3 | 346 | struct mm_struct *mm = vmf->vma->vm_mm; |
86039bd3 AA |
347 | struct userfaultfd_ctx *ctx; |
348 | struct userfaultfd_wait_queue uwq; | |
2b740303 | 349 | vm_fault_t ret = VM_FAULT_SIGBUS; |
dfa37dc3 | 350 | bool must_wait, return_to_userland; |
15a77c6f | 351 | long blocking_state; |
86039bd3 | 352 | |
64c2b203 AA |
353 | /* |
354 | * We don't do userfault handling for the final child pid update. | |
355 | * | |
356 | * We also don't do userfault handling during | |
357 | * coredumping. hugetlbfs has the special | |
358 | * follow_hugetlb_page() to skip missing pages in the | |
359 | * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with | |
360 | * the no_page_table() helper in follow_page_mask(), but the | |
361 | * shmem_vm_ops->fault method is invoked even during | |
362 | * coredumping without mmap_sem and it ends up here. | |
363 | */ | |
364 | if (current->flags & (PF_EXITING|PF_DUMPCORE)) | |
365 | goto out; | |
366 | ||
367 | /* | |
368 | * Coredumping runs without mmap_sem so we can only check that | |
369 | * the mmap_sem is held, if PF_DUMPCORE was not set. | |
370 | */ | |
371 | WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem)); | |
372 | ||
82b0f8c3 | 373 | ctx = vmf->vma->vm_userfaultfd_ctx.ctx; |
86039bd3 | 374 | if (!ctx) |
ba85c702 | 375 | goto out; |
86039bd3 AA |
376 | |
377 | BUG_ON(ctx->mm != mm); | |
378 | ||
379 | VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP)); | |
380 | VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP)); | |
381 | ||
2d6d6f5a PS |
382 | if (ctx->features & UFFD_FEATURE_SIGBUS) |
383 | goto out; | |
384 | ||
86039bd3 AA |
385 | /* |
386 | * If it's already released don't get it. This avoids to loop | |
387 | * in __get_user_pages if userfaultfd_release waits on the | |
388 | * caller of handle_userfault to release the mmap_sem. | |
389 | */ | |
6aa7de05 | 390 | if (unlikely(READ_ONCE(ctx->released))) { |
656710a6 AA |
391 | /* |
392 | * Don't return VM_FAULT_SIGBUS in this case, so a non | |
393 | * cooperative manager can close the uffd after the | |
394 | * last UFFDIO_COPY, without risking to trigger an | |
395 | * involuntary SIGBUS if the process was starting the | |
396 | * userfaultfd while the userfaultfd was still armed | |
397 | * (but after the last UFFDIO_COPY). If the uffd | |
398 | * wasn't already closed when the userfault reached | |
399 | * this point, that would normally be solved by | |
400 | * userfaultfd_must_wait returning 'false'. | |
401 | * | |
402 | * If we were to return VM_FAULT_SIGBUS here, the non | |
403 | * cooperative manager would be instead forced to | |
404 | * always call UFFDIO_UNREGISTER before it can safely | |
405 | * close the uffd. | |
406 | */ | |
407 | ret = VM_FAULT_NOPAGE; | |
ba85c702 | 408 | goto out; |
656710a6 | 409 | } |
86039bd3 AA |
410 | |
411 | /* | |
412 | * Check that we can return VM_FAULT_RETRY. | |
413 | * | |
414 | * NOTE: it should become possible to return VM_FAULT_RETRY | |
415 | * even if FAULT_FLAG_TRIED is set without leading to gup() | |
416 | * -EBUSY failures, if the userfaultfd is to be extended for | |
417 | * VM_UFFD_WP tracking and we intend to arm the userfault | |
418 | * without first stopping userland access to the memory. For | |
419 | * VM_UFFD_MISSING userfaults this is enough for now. | |
420 | */ | |
82b0f8c3 | 421 | if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) { |
86039bd3 AA |
422 | /* |
423 | * Validate the invariant that nowait must allow retry | |
424 | * to be sure not to return SIGBUS erroneously on | |
425 | * nowait invocations. | |
426 | */ | |
82b0f8c3 | 427 | BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT); |
86039bd3 AA |
428 | #ifdef CONFIG_DEBUG_VM |
429 | if (printk_ratelimit()) { | |
430 | printk(KERN_WARNING | |
82b0f8c3 JK |
431 | "FAULT_FLAG_ALLOW_RETRY missing %x\n", |
432 | vmf->flags); | |
86039bd3 AA |
433 | dump_stack(); |
434 | } | |
435 | #endif | |
ba85c702 | 436 | goto out; |
86039bd3 AA |
437 | } |
438 | ||
439 | /* | |
440 | * Handle nowait, not much to do other than tell it to retry | |
441 | * and wait. | |
442 | */ | |
ba85c702 | 443 | ret = VM_FAULT_RETRY; |
82b0f8c3 | 444 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) |
ba85c702 | 445 | goto out; |
86039bd3 AA |
446 | |
447 | /* take the reference before dropping the mmap_sem */ | |
448 | userfaultfd_ctx_get(ctx); | |
449 | ||
86039bd3 AA |
450 | init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); |
451 | uwq.wq.private = current; | |
9d4ac934 AP |
452 | uwq.msg = userfault_msg(vmf->address, vmf->flags, reason, |
453 | ctx->features); | |
86039bd3 | 454 | uwq.ctx = ctx; |
15a77c6f | 455 | uwq.waken = false; |
86039bd3 | 456 | |
bae473a4 | 457 | return_to_userland = |
82b0f8c3 | 458 | (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == |
dfa37dc3 | 459 | (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); |
15a77c6f AA |
460 | blocking_state = return_to_userland ? TASK_INTERRUPTIBLE : |
461 | TASK_KILLABLE; | |
dfa37dc3 | 462 | |
15b726ef | 463 | spin_lock(&ctx->fault_pending_wqh.lock); |
86039bd3 AA |
464 | /* |
465 | * After the __add_wait_queue the uwq is visible to userland | |
466 | * through poll/read(). | |
467 | */ | |
15b726ef AA |
468 | __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); |
469 | /* | |
470 | * The smp_mb() after __set_current_state prevents the reads | |
471 | * following the spin_unlock to happen before the list_add in | |
472 | * __add_wait_queue. | |
473 | */ | |
15a77c6f | 474 | set_current_state(blocking_state); |
15b726ef | 475 | spin_unlock(&ctx->fault_pending_wqh.lock); |
86039bd3 | 476 | |
369cd212 MK |
477 | if (!is_vm_hugetlb_page(vmf->vma)) |
478 | must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, | |
479 | reason); | |
480 | else | |
7868a208 PA |
481 | must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma, |
482 | vmf->address, | |
369cd212 | 483 | vmf->flags, reason); |
8d2afd96 AA |
484 | up_read(&mm->mmap_sem); |
485 | ||
6aa7de05 | 486 | if (likely(must_wait && !READ_ONCE(ctx->released) && |
dfa37dc3 AA |
487 | (return_to_userland ? !signal_pending(current) : |
488 | !fatal_signal_pending(current)))) { | |
a9a08845 | 489 | wake_up_poll(&ctx->fd_wqh, EPOLLIN); |
86039bd3 | 490 | schedule(); |
ba85c702 | 491 | ret |= VM_FAULT_MAJOR; |
15a77c6f AA |
492 | |
493 | /* | |
494 | * False wakeups can orginate even from rwsem before | |
495 | * up_read() however userfaults will wait either for a | |
496 | * targeted wakeup on the specific uwq waitqueue from | |
497 | * wake_userfault() or for signals or for uffd | |
498 | * release. | |
499 | */ | |
500 | while (!READ_ONCE(uwq.waken)) { | |
501 | /* | |
502 | * This needs the full smp_store_mb() | |
503 | * guarantee as the state write must be | |
504 | * visible to other CPUs before reading | |
505 | * uwq.waken from other CPUs. | |
506 | */ | |
507 | set_current_state(blocking_state); | |
508 | if (READ_ONCE(uwq.waken) || | |
509 | READ_ONCE(ctx->released) || | |
510 | (return_to_userland ? signal_pending(current) : | |
511 | fatal_signal_pending(current))) | |
512 | break; | |
513 | schedule(); | |
514 | } | |
ba85c702 | 515 | } |
86039bd3 | 516 | |
ba85c702 | 517 | __set_current_state(TASK_RUNNING); |
15b726ef | 518 | |
dfa37dc3 AA |
519 | if (return_to_userland) { |
520 | if (signal_pending(current) && | |
521 | !fatal_signal_pending(current)) { | |
522 | /* | |
523 | * If we got a SIGSTOP or SIGCONT and this is | |
524 | * a normal userland page fault, just let | |
525 | * userland return so the signal will be | |
526 | * handled and gdb debugging works. The page | |
527 | * fault code immediately after we return from | |
528 | * this function is going to release the | |
529 | * mmap_sem and it's not depending on it | |
530 | * (unlike gup would if we were not to return | |
531 | * VM_FAULT_RETRY). | |
532 | * | |
533 | * If a fatal signal is pending we still take | |
534 | * the streamlined VM_FAULT_RETRY failure path | |
535 | * and there's no need to retake the mmap_sem | |
536 | * in such case. | |
537 | */ | |
538 | down_read(&mm->mmap_sem); | |
6bbc4a41 | 539 | ret = VM_FAULT_NOPAGE; |
dfa37dc3 AA |
540 | } |
541 | } | |
542 | ||
15b726ef AA |
543 | /* |
544 | * Here we race with the list_del; list_add in | |
545 | * userfaultfd_ctx_read(), however because we don't ever run | |
546 | * list_del_init() to refile across the two lists, the prev | |
547 | * and next pointers will never point to self. list_add also | |
548 | * would never let any of the two pointers to point to | |
549 | * self. So list_empty_careful won't risk to see both pointers | |
550 | * pointing to self at any time during the list refile. The | |
551 | * only case where list_del_init() is called is the full | |
552 | * removal in the wake function and there we don't re-list_add | |
553 | * and it's fine not to block on the spinlock. The uwq on this | |
554 | * kernel stack can be released after the list_del_init. | |
555 | */ | |
2055da97 | 556 | if (!list_empty_careful(&uwq.wq.entry)) { |
15b726ef AA |
557 | spin_lock(&ctx->fault_pending_wqh.lock); |
558 | /* | |
559 | * No need of list_del_init(), the uwq on the stack | |
560 | * will be freed shortly anyway. | |
561 | */ | |
2055da97 | 562 | list_del(&uwq.wq.entry); |
15b726ef | 563 | spin_unlock(&ctx->fault_pending_wqh.lock); |
86039bd3 | 564 | } |
86039bd3 AA |
565 | |
566 | /* | |
567 | * ctx may go away after this if the userfault pseudo fd is | |
568 | * already released. | |
569 | */ | |
570 | userfaultfd_ctx_put(ctx); | |
571 | ||
ba85c702 AA |
572 | out: |
573 | return ret; | |
86039bd3 AA |
574 | } |
575 | ||
8c9e7bb7 AA |
576 | static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, |
577 | struct userfaultfd_wait_queue *ewq) | |
9cd75c3c | 578 | { |
0cbb4b4f AA |
579 | struct userfaultfd_ctx *release_new_ctx; |
580 | ||
9a69a829 AA |
581 | if (WARN_ON_ONCE(current->flags & PF_EXITING)) |
582 | goto out; | |
9cd75c3c PE |
583 | |
584 | ewq->ctx = ctx; | |
585 | init_waitqueue_entry(&ewq->wq, current); | |
0cbb4b4f | 586 | release_new_ctx = NULL; |
9cd75c3c PE |
587 | |
588 | spin_lock(&ctx->event_wqh.lock); | |
589 | /* | |
590 | * After the __add_wait_queue the uwq is visible to userland | |
591 | * through poll/read(). | |
592 | */ | |
593 | __add_wait_queue(&ctx->event_wqh, &ewq->wq); | |
594 | for (;;) { | |
595 | set_current_state(TASK_KILLABLE); | |
596 | if (ewq->msg.event == 0) | |
597 | break; | |
6aa7de05 | 598 | if (READ_ONCE(ctx->released) || |
9cd75c3c | 599 | fatal_signal_pending(current)) { |
384632e6 AA |
600 | /* |
601 | * &ewq->wq may be queued in fork_event, but | |
602 | * __remove_wait_queue ignores the head | |
603 | * parameter. It would be a problem if it | |
604 | * didn't. | |
605 | */ | |
9cd75c3c | 606 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); |
7eb76d45 MR |
607 | if (ewq->msg.event == UFFD_EVENT_FORK) { |
608 | struct userfaultfd_ctx *new; | |
609 | ||
610 | new = (struct userfaultfd_ctx *) | |
611 | (unsigned long) | |
612 | ewq->msg.arg.reserved.reserved1; | |
0cbb4b4f | 613 | release_new_ctx = new; |
7eb76d45 | 614 | } |
9cd75c3c PE |
615 | break; |
616 | } | |
617 | ||
618 | spin_unlock(&ctx->event_wqh.lock); | |
619 | ||
a9a08845 | 620 | wake_up_poll(&ctx->fd_wqh, EPOLLIN); |
9cd75c3c PE |
621 | schedule(); |
622 | ||
623 | spin_lock(&ctx->event_wqh.lock); | |
624 | } | |
625 | __set_current_state(TASK_RUNNING); | |
626 | spin_unlock(&ctx->event_wqh.lock); | |
627 | ||
0cbb4b4f AA |
628 | if (release_new_ctx) { |
629 | struct vm_area_struct *vma; | |
630 | struct mm_struct *mm = release_new_ctx->mm; | |
631 | ||
632 | /* the various vma->vm_userfaultfd_ctx still points to it */ | |
633 | down_write(&mm->mmap_sem); | |
04f5866e AA |
634 | /* no task can run (and in turn coredump) yet */ |
635 | VM_WARN_ON(!mmget_still_valid(mm)); | |
0cbb4b4f | 636 | for (vma = mm->mmap; vma; vma = vma->vm_next) |
31e810aa | 637 | if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { |
0cbb4b4f | 638 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
31e810aa MR |
639 | vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); |
640 | } | |
0cbb4b4f AA |
641 | up_write(&mm->mmap_sem); |
642 | ||
643 | userfaultfd_ctx_put(release_new_ctx); | |
644 | } | |
645 | ||
9cd75c3c PE |
646 | /* |
647 | * ctx may go away after this if the userfault pseudo fd is | |
648 | * already released. | |
649 | */ | |
9a69a829 | 650 | out: |
df2cc96e | 651 | WRITE_ONCE(ctx->mmap_changing, false); |
9cd75c3c | 652 | userfaultfd_ctx_put(ctx); |
9cd75c3c PE |
653 | } |
654 | ||
655 | static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, | |
656 | struct userfaultfd_wait_queue *ewq) | |
657 | { | |
658 | ewq->msg.event = 0; | |
659 | wake_up_locked(&ctx->event_wqh); | |
660 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); | |
661 | } | |
662 | ||
893e26e6 PE |
663 | int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) |
664 | { | |
665 | struct userfaultfd_ctx *ctx = NULL, *octx; | |
666 | struct userfaultfd_fork_ctx *fctx; | |
667 | ||
668 | octx = vma->vm_userfaultfd_ctx.ctx; | |
669 | if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { | |
670 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; | |
671 | vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); | |
672 | return 0; | |
673 | } | |
674 | ||
675 | list_for_each_entry(fctx, fcs, list) | |
676 | if (fctx->orig == octx) { | |
677 | ctx = fctx->new; | |
678 | break; | |
679 | } | |
680 | ||
681 | if (!ctx) { | |
682 | fctx = kmalloc(sizeof(*fctx), GFP_KERNEL); | |
683 | if (!fctx) | |
684 | return -ENOMEM; | |
685 | ||
686 | ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); | |
687 | if (!ctx) { | |
688 | kfree(fctx); | |
689 | return -ENOMEM; | |
690 | } | |
691 | ||
ca880420 | 692 | refcount_set(&ctx->refcount, 1); |
893e26e6 PE |
693 | ctx->flags = octx->flags; |
694 | ctx->state = UFFD_STATE_RUNNING; | |
695 | ctx->features = octx->features; | |
696 | ctx->released = false; | |
df2cc96e | 697 | ctx->mmap_changing = false; |
893e26e6 | 698 | ctx->mm = vma->vm_mm; |
00bb31fa | 699 | mmgrab(ctx->mm); |
893e26e6 PE |
700 | |
701 | userfaultfd_ctx_get(octx); | |
df2cc96e | 702 | WRITE_ONCE(octx->mmap_changing, true); |
893e26e6 PE |
703 | fctx->orig = octx; |
704 | fctx->new = ctx; | |
705 | list_add_tail(&fctx->list, fcs); | |
706 | } | |
707 | ||
708 | vma->vm_userfaultfd_ctx.ctx = ctx; | |
709 | return 0; | |
710 | } | |
711 | ||
8c9e7bb7 | 712 | static void dup_fctx(struct userfaultfd_fork_ctx *fctx) |
893e26e6 PE |
713 | { |
714 | struct userfaultfd_ctx *ctx = fctx->orig; | |
715 | struct userfaultfd_wait_queue ewq; | |
716 | ||
717 | msg_init(&ewq.msg); | |
718 | ||
719 | ewq.msg.event = UFFD_EVENT_FORK; | |
720 | ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new; | |
721 | ||
8c9e7bb7 | 722 | userfaultfd_event_wait_completion(ctx, &ewq); |
893e26e6 PE |
723 | } |
724 | ||
725 | void dup_userfaultfd_complete(struct list_head *fcs) | |
726 | { | |
893e26e6 PE |
727 | struct userfaultfd_fork_ctx *fctx, *n; |
728 | ||
729 | list_for_each_entry_safe(fctx, n, fcs, list) { | |
8c9e7bb7 | 730 | dup_fctx(fctx); |
893e26e6 PE |
731 | list_del(&fctx->list); |
732 | kfree(fctx); | |
733 | } | |
734 | } | |
735 | ||
72f87654 PE |
736 | void mremap_userfaultfd_prep(struct vm_area_struct *vma, |
737 | struct vm_userfaultfd_ctx *vm_ctx) | |
738 | { | |
739 | struct userfaultfd_ctx *ctx; | |
740 | ||
741 | ctx = vma->vm_userfaultfd_ctx.ctx; | |
3cfd22be PX |
742 | |
743 | if (!ctx) | |
744 | return; | |
745 | ||
746 | if (ctx->features & UFFD_FEATURE_EVENT_REMAP) { | |
72f87654 PE |
747 | vm_ctx->ctx = ctx; |
748 | userfaultfd_ctx_get(ctx); | |
df2cc96e | 749 | WRITE_ONCE(ctx->mmap_changing, true); |
3cfd22be PX |
750 | } else { |
751 | /* Drop uffd context if remap feature not enabled */ | |
752 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; | |
753 | vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); | |
72f87654 PE |
754 | } |
755 | } | |
756 | ||
90794bf1 | 757 | void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, |
72f87654 PE |
758 | unsigned long from, unsigned long to, |
759 | unsigned long len) | |
760 | { | |
90794bf1 | 761 | struct userfaultfd_ctx *ctx = vm_ctx->ctx; |
72f87654 PE |
762 | struct userfaultfd_wait_queue ewq; |
763 | ||
764 | if (!ctx) | |
765 | return; | |
766 | ||
767 | if (to & ~PAGE_MASK) { | |
768 | userfaultfd_ctx_put(ctx); | |
769 | return; | |
770 | } | |
771 | ||
772 | msg_init(&ewq.msg); | |
773 | ||
774 | ewq.msg.event = UFFD_EVENT_REMAP; | |
775 | ewq.msg.arg.remap.from = from; | |
776 | ewq.msg.arg.remap.to = to; | |
777 | ewq.msg.arg.remap.len = len; | |
778 | ||
779 | userfaultfd_event_wait_completion(ctx, &ewq); | |
780 | } | |
781 | ||
70ccb92f | 782 | bool userfaultfd_remove(struct vm_area_struct *vma, |
d811914d | 783 | unsigned long start, unsigned long end) |
05ce7724 PE |
784 | { |
785 | struct mm_struct *mm = vma->vm_mm; | |
786 | struct userfaultfd_ctx *ctx; | |
787 | struct userfaultfd_wait_queue ewq; | |
788 | ||
789 | ctx = vma->vm_userfaultfd_ctx.ctx; | |
d811914d | 790 | if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) |
70ccb92f | 791 | return true; |
05ce7724 PE |
792 | |
793 | userfaultfd_ctx_get(ctx); | |
df2cc96e | 794 | WRITE_ONCE(ctx->mmap_changing, true); |
05ce7724 PE |
795 | up_read(&mm->mmap_sem); |
796 | ||
05ce7724 PE |
797 | msg_init(&ewq.msg); |
798 | ||
d811914d MR |
799 | ewq.msg.event = UFFD_EVENT_REMOVE; |
800 | ewq.msg.arg.remove.start = start; | |
801 | ewq.msg.arg.remove.end = end; | |
05ce7724 PE |
802 | |
803 | userfaultfd_event_wait_completion(ctx, &ewq); | |
804 | ||
70ccb92f | 805 | return false; |
05ce7724 PE |
806 | } |
807 | ||
897ab3e0 MR |
808 | static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, |
809 | unsigned long start, unsigned long end) | |
810 | { | |
811 | struct userfaultfd_unmap_ctx *unmap_ctx; | |
812 | ||
813 | list_for_each_entry(unmap_ctx, unmaps, list) | |
814 | if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && | |
815 | unmap_ctx->end == end) | |
816 | return true; | |
817 | ||
818 | return false; | |
819 | } | |
820 | ||
821 | int userfaultfd_unmap_prep(struct vm_area_struct *vma, | |
822 | unsigned long start, unsigned long end, | |
823 | struct list_head *unmaps) | |
824 | { | |
825 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { | |
826 | struct userfaultfd_unmap_ctx *unmap_ctx; | |
827 | struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; | |
828 | ||
829 | if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || | |
830 | has_unmap_ctx(ctx, unmaps, start, end)) | |
831 | continue; | |
832 | ||
833 | unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); | |
834 | if (!unmap_ctx) | |
835 | return -ENOMEM; | |
836 | ||
837 | userfaultfd_ctx_get(ctx); | |
df2cc96e | 838 | WRITE_ONCE(ctx->mmap_changing, true); |
897ab3e0 MR |
839 | unmap_ctx->ctx = ctx; |
840 | unmap_ctx->start = start; | |
841 | unmap_ctx->end = end; | |
842 | list_add_tail(&unmap_ctx->list, unmaps); | |
843 | } | |
844 | ||
845 | return 0; | |
846 | } | |
847 | ||
848 | void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) | |
849 | { | |
850 | struct userfaultfd_unmap_ctx *ctx, *n; | |
851 | struct userfaultfd_wait_queue ewq; | |
852 | ||
853 | list_for_each_entry_safe(ctx, n, uf, list) { | |
854 | msg_init(&ewq.msg); | |
855 | ||
856 | ewq.msg.event = UFFD_EVENT_UNMAP; | |
857 | ewq.msg.arg.remove.start = ctx->start; | |
858 | ewq.msg.arg.remove.end = ctx->end; | |
859 | ||
860 | userfaultfd_event_wait_completion(ctx->ctx, &ewq); | |
861 | ||
862 | list_del(&ctx->list); | |
863 | kfree(ctx); | |
864 | } | |
865 | } | |
866 | ||
86039bd3 AA |
867 | static int userfaultfd_release(struct inode *inode, struct file *file) |
868 | { | |
869 | struct userfaultfd_ctx *ctx = file->private_data; | |
870 | struct mm_struct *mm = ctx->mm; | |
871 | struct vm_area_struct *vma, *prev; | |
872 | /* len == 0 means wake all */ | |
873 | struct userfaultfd_wake_range range = { .len = 0, }; | |
874 | unsigned long new_flags; | |
875 | ||
6aa7de05 | 876 | WRITE_ONCE(ctx->released, true); |
86039bd3 | 877 | |
d2005e3f ON |
878 | if (!mmget_not_zero(mm)) |
879 | goto wakeup; | |
880 | ||
86039bd3 AA |
881 | /* |
882 | * Flush page faults out of all CPUs. NOTE: all page faults | |
883 | * must be retried without returning VM_FAULT_SIGBUS if | |
884 | * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx | |
885 | * changes while handle_userfault released the mmap_sem. So | |
886 | * it's critical that released is set to true (above), before | |
887 | * taking the mmap_sem for writing. | |
888 | */ | |
889 | down_write(&mm->mmap_sem); | |
04f5866e AA |
890 | if (!mmget_still_valid(mm)) |
891 | goto skip_mm; | |
86039bd3 AA |
892 | prev = NULL; |
893 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | |
894 | cond_resched(); | |
895 | BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ | |
896 | !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); | |
897 | if (vma->vm_userfaultfd_ctx.ctx != ctx) { | |
898 | prev = vma; | |
899 | continue; | |
900 | } | |
901 | new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); | |
902 | prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, | |
903 | new_flags, vma->anon_vma, | |
904 | vma->vm_file, vma->vm_pgoff, | |
905 | vma_policy(vma), | |
906 | NULL_VM_UFFD_CTX); | |
907 | if (prev) | |
908 | vma = prev; | |
909 | else | |
910 | prev = vma; | |
911 | vma->vm_flags = new_flags; | |
912 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; | |
913 | } | |
04f5866e | 914 | skip_mm: |
86039bd3 | 915 | up_write(&mm->mmap_sem); |
d2005e3f ON |
916 | mmput(mm); |
917 | wakeup: | |
86039bd3 | 918 | /* |
15b726ef | 919 | * After no new page faults can wait on this fault_*wqh, flush |
86039bd3 | 920 | * the last page faults that may have been already waiting on |
15b726ef | 921 | * the fault_*wqh. |
86039bd3 | 922 | */ |
15b726ef | 923 | spin_lock(&ctx->fault_pending_wqh.lock); |
ac5be6b4 | 924 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); |
c430d1e8 | 925 | __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); |
15b726ef | 926 | spin_unlock(&ctx->fault_pending_wqh.lock); |
86039bd3 | 927 | |
5a18b64e MR |
928 | /* Flush pending events that may still wait on event_wqh */ |
929 | wake_up_all(&ctx->event_wqh); | |
930 | ||
a9a08845 | 931 | wake_up_poll(&ctx->fd_wqh, EPOLLHUP); |
86039bd3 AA |
932 | userfaultfd_ctx_put(ctx); |
933 | return 0; | |
934 | } | |
935 | ||
15b726ef | 936 | /* fault_pending_wqh.lock must be hold by the caller */ |
6dcc27fd PE |
937 | static inline struct userfaultfd_wait_queue *find_userfault_in( |
938 | wait_queue_head_t *wqh) | |
86039bd3 | 939 | { |
ac6424b9 | 940 | wait_queue_entry_t *wq; |
15b726ef | 941 | struct userfaultfd_wait_queue *uwq; |
86039bd3 | 942 | |
456a7378 | 943 | lockdep_assert_held(&wqh->lock); |
86039bd3 | 944 | |
15b726ef | 945 | uwq = NULL; |
6dcc27fd | 946 | if (!waitqueue_active(wqh)) |
15b726ef AA |
947 | goto out; |
948 | /* walk in reverse to provide FIFO behavior to read userfaults */ | |
2055da97 | 949 | wq = list_last_entry(&wqh->head, typeof(*wq), entry); |
15b726ef AA |
950 | uwq = container_of(wq, struct userfaultfd_wait_queue, wq); |
951 | out: | |
952 | return uwq; | |
86039bd3 | 953 | } |
6dcc27fd PE |
954 | |
955 | static inline struct userfaultfd_wait_queue *find_userfault( | |
956 | struct userfaultfd_ctx *ctx) | |
957 | { | |
958 | return find_userfault_in(&ctx->fault_pending_wqh); | |
959 | } | |
86039bd3 | 960 | |
9cd75c3c PE |
961 | static inline struct userfaultfd_wait_queue *find_userfault_evt( |
962 | struct userfaultfd_ctx *ctx) | |
963 | { | |
964 | return find_userfault_in(&ctx->event_wqh); | |
965 | } | |
966 | ||
076ccb76 | 967 | static __poll_t userfaultfd_poll(struct file *file, poll_table *wait) |
86039bd3 AA |
968 | { |
969 | struct userfaultfd_ctx *ctx = file->private_data; | |
076ccb76 | 970 | __poll_t ret; |
86039bd3 AA |
971 | |
972 | poll_wait(file, &ctx->fd_wqh, wait); | |
973 | ||
974 | switch (ctx->state) { | |
975 | case UFFD_STATE_WAIT_API: | |
a9a08845 | 976 | return EPOLLERR; |
86039bd3 | 977 | case UFFD_STATE_RUNNING: |
ba85c702 AA |
978 | /* |
979 | * poll() never guarantees that read won't block. | |
980 | * userfaults can be waken before they're read(). | |
981 | */ | |
982 | if (unlikely(!(file->f_flags & O_NONBLOCK))) | |
a9a08845 | 983 | return EPOLLERR; |
15b726ef AA |
984 | /* |
985 | * lockless access to see if there are pending faults | |
986 | * __pollwait last action is the add_wait_queue but | |
987 | * the spin_unlock would allow the waitqueue_active to | |
988 | * pass above the actual list_add inside | |
989 | * add_wait_queue critical section. So use a full | |
990 | * memory barrier to serialize the list_add write of | |
991 | * add_wait_queue() with the waitqueue_active read | |
992 | * below. | |
993 | */ | |
994 | ret = 0; | |
995 | smp_mb(); | |
996 | if (waitqueue_active(&ctx->fault_pending_wqh)) | |
a9a08845 | 997 | ret = EPOLLIN; |
9cd75c3c | 998 | else if (waitqueue_active(&ctx->event_wqh)) |
a9a08845 | 999 | ret = EPOLLIN; |
9cd75c3c | 1000 | |
86039bd3 AA |
1001 | return ret; |
1002 | default: | |
8474901a | 1003 | WARN_ON_ONCE(1); |
a9a08845 | 1004 | return EPOLLERR; |
86039bd3 AA |
1005 | } |
1006 | } | |
1007 | ||
893e26e6 PE |
1008 | static const struct file_operations userfaultfd_fops; |
1009 | ||
1010 | static int resolve_userfault_fork(struct userfaultfd_ctx *ctx, | |
1011 | struct userfaultfd_ctx *new, | |
1012 | struct uffd_msg *msg) | |
1013 | { | |
1014 | int fd; | |
893e26e6 | 1015 | |
284cd241 EB |
1016 | fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, new, |
1017 | O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS)); | |
893e26e6 PE |
1018 | if (fd < 0) |
1019 | return fd; | |
1020 | ||
893e26e6 PE |
1021 | msg->arg.reserved.reserved1 = 0; |
1022 | msg->arg.fork.ufd = fd; | |
893e26e6 PE |
1023 | return 0; |
1024 | } | |
1025 | ||
86039bd3 | 1026 | static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, |
a9b85f94 | 1027 | struct uffd_msg *msg) |
86039bd3 AA |
1028 | { |
1029 | ssize_t ret; | |
1030 | DECLARE_WAITQUEUE(wait, current); | |
15b726ef | 1031 | struct userfaultfd_wait_queue *uwq; |
893e26e6 PE |
1032 | /* |
1033 | * Handling fork event requires sleeping operations, so | |
1034 | * we drop the event_wqh lock, then do these ops, then | |
1035 | * lock it back and wake up the waiter. While the lock is | |
1036 | * dropped the ewq may go away so we keep track of it | |
1037 | * carefully. | |
1038 | */ | |
1039 | LIST_HEAD(fork_event); | |
1040 | struct userfaultfd_ctx *fork_nctx = NULL; | |
86039bd3 | 1041 | |
15b726ef | 1042 | /* always take the fd_wqh lock before the fault_pending_wqh lock */ |
ae62c16e | 1043 | spin_lock_irq(&ctx->fd_wqh.lock); |
86039bd3 AA |
1044 | __add_wait_queue(&ctx->fd_wqh, &wait); |
1045 | for (;;) { | |
1046 | set_current_state(TASK_INTERRUPTIBLE); | |
15b726ef AA |
1047 | spin_lock(&ctx->fault_pending_wqh.lock); |
1048 | uwq = find_userfault(ctx); | |
1049 | if (uwq) { | |
2c5b7e1b AA |
1050 | /* |
1051 | * Use a seqcount to repeat the lockless check | |
1052 | * in wake_userfault() to avoid missing | |
1053 | * wakeups because during the refile both | |
1054 | * waitqueue could become empty if this is the | |
1055 | * only userfault. | |
1056 | */ | |
1057 | write_seqcount_begin(&ctx->refile_seq); | |
1058 | ||
86039bd3 | 1059 | /* |
15b726ef AA |
1060 | * The fault_pending_wqh.lock prevents the uwq |
1061 | * to disappear from under us. | |
1062 | * | |
1063 | * Refile this userfault from | |
1064 | * fault_pending_wqh to fault_wqh, it's not | |
1065 | * pending anymore after we read it. | |
1066 | * | |
1067 | * Use list_del() by hand (as | |
1068 | * userfaultfd_wake_function also uses | |
1069 | * list_del_init() by hand) to be sure nobody | |
1070 | * changes __remove_wait_queue() to use | |
1071 | * list_del_init() in turn breaking the | |
1072 | * !list_empty_careful() check in | |
2055da97 | 1073 | * handle_userfault(). The uwq->wq.head list |
15b726ef AA |
1074 | * must never be empty at any time during the |
1075 | * refile, or the waitqueue could disappear | |
1076 | * from under us. The "wait_queue_head_t" | |
1077 | * parameter of __remove_wait_queue() is unused | |
1078 | * anyway. | |
86039bd3 | 1079 | */ |
2055da97 | 1080 | list_del(&uwq->wq.entry); |
c430d1e8 | 1081 | add_wait_queue(&ctx->fault_wqh, &uwq->wq); |
15b726ef | 1082 | |
2c5b7e1b AA |
1083 | write_seqcount_end(&ctx->refile_seq); |
1084 | ||
a9b85f94 AA |
1085 | /* careful to always initialize msg if ret == 0 */ |
1086 | *msg = uwq->msg; | |
15b726ef | 1087 | spin_unlock(&ctx->fault_pending_wqh.lock); |
86039bd3 AA |
1088 | ret = 0; |
1089 | break; | |
1090 | } | |
15b726ef | 1091 | spin_unlock(&ctx->fault_pending_wqh.lock); |
9cd75c3c PE |
1092 | |
1093 | spin_lock(&ctx->event_wqh.lock); | |
1094 | uwq = find_userfault_evt(ctx); | |
1095 | if (uwq) { | |
1096 | *msg = uwq->msg; | |
1097 | ||
893e26e6 PE |
1098 | if (uwq->msg.event == UFFD_EVENT_FORK) { |
1099 | fork_nctx = (struct userfaultfd_ctx *) | |
1100 | (unsigned long) | |
1101 | uwq->msg.arg.reserved.reserved1; | |
2055da97 | 1102 | list_move(&uwq->wq.entry, &fork_event); |
384632e6 AA |
1103 | /* |
1104 | * fork_nctx can be freed as soon as | |
1105 | * we drop the lock, unless we take a | |
1106 | * reference on it. | |
1107 | */ | |
1108 | userfaultfd_ctx_get(fork_nctx); | |
893e26e6 PE |
1109 | spin_unlock(&ctx->event_wqh.lock); |
1110 | ret = 0; | |
1111 | break; | |
1112 | } | |
1113 | ||
9cd75c3c PE |
1114 | userfaultfd_event_complete(ctx, uwq); |
1115 | spin_unlock(&ctx->event_wqh.lock); | |
1116 | ret = 0; | |
1117 | break; | |
1118 | } | |
1119 | spin_unlock(&ctx->event_wqh.lock); | |
1120 | ||
86039bd3 AA |
1121 | if (signal_pending(current)) { |
1122 | ret = -ERESTARTSYS; | |
1123 | break; | |
1124 | } | |
1125 | if (no_wait) { | |
1126 | ret = -EAGAIN; | |
1127 | break; | |
1128 | } | |
ae62c16e | 1129 | spin_unlock_irq(&ctx->fd_wqh.lock); |
86039bd3 | 1130 | schedule(); |
ae62c16e | 1131 | spin_lock_irq(&ctx->fd_wqh.lock); |
86039bd3 AA |
1132 | } |
1133 | __remove_wait_queue(&ctx->fd_wqh, &wait); | |
1134 | __set_current_state(TASK_RUNNING); | |
ae62c16e | 1135 | spin_unlock_irq(&ctx->fd_wqh.lock); |
86039bd3 | 1136 | |
893e26e6 PE |
1137 | if (!ret && msg->event == UFFD_EVENT_FORK) { |
1138 | ret = resolve_userfault_fork(ctx, fork_nctx, msg); | |
384632e6 AA |
1139 | spin_lock(&ctx->event_wqh.lock); |
1140 | if (!list_empty(&fork_event)) { | |
1141 | /* | |
1142 | * The fork thread didn't abort, so we can | |
1143 | * drop the temporary refcount. | |
1144 | */ | |
1145 | userfaultfd_ctx_put(fork_nctx); | |
1146 | ||
1147 | uwq = list_first_entry(&fork_event, | |
1148 | typeof(*uwq), | |
1149 | wq.entry); | |
1150 | /* | |
1151 | * If fork_event list wasn't empty and in turn | |
1152 | * the event wasn't already released by fork | |
1153 | * (the event is allocated on fork kernel | |
1154 | * stack), put the event back to its place in | |
1155 | * the event_wq. fork_event head will be freed | |
1156 | * as soon as we return so the event cannot | |
1157 | * stay queued there no matter the current | |
1158 | * "ret" value. | |
1159 | */ | |
1160 | list_del(&uwq->wq.entry); | |
1161 | __add_wait_queue(&ctx->event_wqh, &uwq->wq); | |
893e26e6 | 1162 | |
384632e6 AA |
1163 | /* |
1164 | * Leave the event in the waitqueue and report | |
1165 | * error to userland if we failed to resolve | |
1166 | * the userfault fork. | |
1167 | */ | |
1168 | if (likely(!ret)) | |
893e26e6 | 1169 | userfaultfd_event_complete(ctx, uwq); |
384632e6 AA |
1170 | } else { |
1171 | /* | |
1172 | * Here the fork thread aborted and the | |
1173 | * refcount from the fork thread on fork_nctx | |
1174 | * has already been released. We still hold | |
1175 | * the reference we took before releasing the | |
1176 | * lock above. If resolve_userfault_fork | |
1177 | * failed we've to drop it because the | |
1178 | * fork_nctx has to be freed in such case. If | |
1179 | * it succeeded we'll hold it because the new | |
1180 | * uffd references it. | |
1181 | */ | |
1182 | if (ret) | |
1183 | userfaultfd_ctx_put(fork_nctx); | |
893e26e6 | 1184 | } |
384632e6 | 1185 | spin_unlock(&ctx->event_wqh.lock); |
893e26e6 PE |
1186 | } |
1187 | ||
86039bd3 AA |
1188 | return ret; |
1189 | } | |
1190 | ||
1191 | static ssize_t userfaultfd_read(struct file *file, char __user *buf, | |
1192 | size_t count, loff_t *ppos) | |
1193 | { | |
1194 | struct userfaultfd_ctx *ctx = file->private_data; | |
1195 | ssize_t _ret, ret = 0; | |
a9b85f94 | 1196 | struct uffd_msg msg; |
86039bd3 AA |
1197 | int no_wait = file->f_flags & O_NONBLOCK; |
1198 | ||
1199 | if (ctx->state == UFFD_STATE_WAIT_API) | |
1200 | return -EINVAL; | |
86039bd3 AA |
1201 | |
1202 | for (;;) { | |
a9b85f94 | 1203 | if (count < sizeof(msg)) |
86039bd3 | 1204 | return ret ? ret : -EINVAL; |
a9b85f94 | 1205 | _ret = userfaultfd_ctx_read(ctx, no_wait, &msg); |
86039bd3 AA |
1206 | if (_ret < 0) |
1207 | return ret ? ret : _ret; | |
a9b85f94 | 1208 | if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg))) |
86039bd3 | 1209 | return ret ? ret : -EFAULT; |
a9b85f94 AA |
1210 | ret += sizeof(msg); |
1211 | buf += sizeof(msg); | |
1212 | count -= sizeof(msg); | |
86039bd3 AA |
1213 | /* |
1214 | * Allow to read more than one fault at time but only | |
1215 | * block if waiting for the very first one. | |
1216 | */ | |
1217 | no_wait = O_NONBLOCK; | |
1218 | } | |
1219 | } | |
1220 | ||
1221 | static void __wake_userfault(struct userfaultfd_ctx *ctx, | |
1222 | struct userfaultfd_wake_range *range) | |
1223 | { | |
15b726ef | 1224 | spin_lock(&ctx->fault_pending_wqh.lock); |
86039bd3 | 1225 | /* wake all in the range and autoremove */ |
15b726ef | 1226 | if (waitqueue_active(&ctx->fault_pending_wqh)) |
ac5be6b4 | 1227 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, |
15b726ef AA |
1228 | range); |
1229 | if (waitqueue_active(&ctx->fault_wqh)) | |
c430d1e8 | 1230 | __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); |
15b726ef | 1231 | spin_unlock(&ctx->fault_pending_wqh.lock); |
86039bd3 AA |
1232 | } |
1233 | ||
1234 | static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, | |
1235 | struct userfaultfd_wake_range *range) | |
1236 | { | |
2c5b7e1b AA |
1237 | unsigned seq; |
1238 | bool need_wakeup; | |
1239 | ||
86039bd3 AA |
1240 | /* |
1241 | * To be sure waitqueue_active() is not reordered by the CPU | |
1242 | * before the pagetable update, use an explicit SMP memory | |
1243 | * barrier here. PT lock release or up_read(mmap_sem) still | |
1244 | * have release semantics that can allow the | |
1245 | * waitqueue_active() to be reordered before the pte update. | |
1246 | */ | |
1247 | smp_mb(); | |
1248 | ||
1249 | /* | |
1250 | * Use waitqueue_active because it's very frequent to | |
1251 | * change the address space atomically even if there are no | |
1252 | * userfaults yet. So we take the spinlock only when we're | |
1253 | * sure we've userfaults to wake. | |
1254 | */ | |
2c5b7e1b AA |
1255 | do { |
1256 | seq = read_seqcount_begin(&ctx->refile_seq); | |
1257 | need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || | |
1258 | waitqueue_active(&ctx->fault_wqh); | |
1259 | cond_resched(); | |
1260 | } while (read_seqcount_retry(&ctx->refile_seq, seq)); | |
1261 | if (need_wakeup) | |
86039bd3 AA |
1262 | __wake_userfault(ctx, range); |
1263 | } | |
1264 | ||
1265 | static __always_inline int validate_range(struct mm_struct *mm, | |
1266 | __u64 start, __u64 len) | |
1267 | { | |
1268 | __u64 task_size = mm->task_size; | |
1269 | ||
1270 | if (start & ~PAGE_MASK) | |
1271 | return -EINVAL; | |
1272 | if (len & ~PAGE_MASK) | |
1273 | return -EINVAL; | |
1274 | if (!len) | |
1275 | return -EINVAL; | |
1276 | if (start < mmap_min_addr) | |
1277 | return -EINVAL; | |
1278 | if (start >= task_size) | |
1279 | return -EINVAL; | |
1280 | if (len > task_size - start) | |
1281 | return -EINVAL; | |
1282 | return 0; | |
1283 | } | |
1284 | ||
ba6907db MR |
1285 | static inline bool vma_can_userfault(struct vm_area_struct *vma) |
1286 | { | |
cac67329 MR |
1287 | return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || |
1288 | vma_is_shmem(vma); | |
ba6907db MR |
1289 | } |
1290 | ||
86039bd3 AA |
1291 | static int userfaultfd_register(struct userfaultfd_ctx *ctx, |
1292 | unsigned long arg) | |
1293 | { | |
1294 | struct mm_struct *mm = ctx->mm; | |
1295 | struct vm_area_struct *vma, *prev, *cur; | |
1296 | int ret; | |
1297 | struct uffdio_register uffdio_register; | |
1298 | struct uffdio_register __user *user_uffdio_register; | |
1299 | unsigned long vm_flags, new_flags; | |
1300 | bool found; | |
ce53e8e6 | 1301 | bool basic_ioctls; |
86039bd3 AA |
1302 | unsigned long start, end, vma_end; |
1303 | ||
1304 | user_uffdio_register = (struct uffdio_register __user *) arg; | |
1305 | ||
1306 | ret = -EFAULT; | |
1307 | if (copy_from_user(&uffdio_register, user_uffdio_register, | |
1308 | sizeof(uffdio_register)-sizeof(__u64))) | |
1309 | goto out; | |
1310 | ||
1311 | ret = -EINVAL; | |
1312 | if (!uffdio_register.mode) | |
1313 | goto out; | |
1314 | if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING| | |
1315 | UFFDIO_REGISTER_MODE_WP)) | |
1316 | goto out; | |
1317 | vm_flags = 0; | |
1318 | if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING) | |
1319 | vm_flags |= VM_UFFD_MISSING; | |
1320 | if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) { | |
1321 | vm_flags |= VM_UFFD_WP; | |
1322 | /* | |
1323 | * FIXME: remove the below error constraint by | |
1324 | * implementing the wprotect tracking mode. | |
1325 | */ | |
1326 | ret = -EINVAL; | |
1327 | goto out; | |
1328 | } | |
1329 | ||
1330 | ret = validate_range(mm, uffdio_register.range.start, | |
1331 | uffdio_register.range.len); | |
1332 | if (ret) | |
1333 | goto out; | |
1334 | ||
1335 | start = uffdio_register.range.start; | |
1336 | end = start + uffdio_register.range.len; | |
1337 | ||
d2005e3f ON |
1338 | ret = -ENOMEM; |
1339 | if (!mmget_not_zero(mm)) | |
1340 | goto out; | |
1341 | ||
86039bd3 | 1342 | down_write(&mm->mmap_sem); |
04f5866e AA |
1343 | if (!mmget_still_valid(mm)) |
1344 | goto out_unlock; | |
86039bd3 | 1345 | vma = find_vma_prev(mm, start, &prev); |
86039bd3 AA |
1346 | if (!vma) |
1347 | goto out_unlock; | |
1348 | ||
1349 | /* check that there's at least one vma in the range */ | |
1350 | ret = -EINVAL; | |
1351 | if (vma->vm_start >= end) | |
1352 | goto out_unlock; | |
1353 | ||
cab350af MK |
1354 | /* |
1355 | * If the first vma contains huge pages, make sure start address | |
1356 | * is aligned to huge page size. | |
1357 | */ | |
1358 | if (is_vm_hugetlb_page(vma)) { | |
1359 | unsigned long vma_hpagesize = vma_kernel_pagesize(vma); | |
1360 | ||
1361 | if (start & (vma_hpagesize - 1)) | |
1362 | goto out_unlock; | |
1363 | } | |
1364 | ||
86039bd3 AA |
1365 | /* |
1366 | * Search for not compatible vmas. | |
86039bd3 AA |
1367 | */ |
1368 | found = false; | |
ce53e8e6 | 1369 | basic_ioctls = false; |
86039bd3 AA |
1370 | for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { |
1371 | cond_resched(); | |
1372 | ||
1373 | BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ | |
1374 | !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); | |
1375 | ||
1376 | /* check not compatible vmas */ | |
1377 | ret = -EINVAL; | |
ba6907db | 1378 | if (!vma_can_userfault(cur)) |
86039bd3 | 1379 | goto out_unlock; |
29ec9066 AA |
1380 | |
1381 | /* | |
1382 | * UFFDIO_COPY will fill file holes even without | |
1383 | * PROT_WRITE. This check enforces that if this is a | |
1384 | * MAP_SHARED, the process has write permission to the backing | |
1385 | * file. If VM_MAYWRITE is set it also enforces that on a | |
1386 | * MAP_SHARED vma: there is no F_WRITE_SEAL and no further | |
1387 | * F_WRITE_SEAL can be taken until the vma is destroyed. | |
1388 | */ | |
1389 | ret = -EPERM; | |
1390 | if (unlikely(!(cur->vm_flags & VM_MAYWRITE))) | |
1391 | goto out_unlock; | |
1392 | ||
cab350af MK |
1393 | /* |
1394 | * If this vma contains ending address, and huge pages | |
1395 | * check alignment. | |
1396 | */ | |
1397 | if (is_vm_hugetlb_page(cur) && end <= cur->vm_end && | |
1398 | end > cur->vm_start) { | |
1399 | unsigned long vma_hpagesize = vma_kernel_pagesize(cur); | |
1400 | ||
1401 | ret = -EINVAL; | |
1402 | ||
1403 | if (end & (vma_hpagesize - 1)) | |
1404 | goto out_unlock; | |
1405 | } | |
86039bd3 AA |
1406 | |
1407 | /* | |
1408 | * Check that this vma isn't already owned by a | |
1409 | * different userfaultfd. We can't allow more than one | |
1410 | * userfaultfd to own a single vma simultaneously or we | |
1411 | * wouldn't know which one to deliver the userfaults to. | |
1412 | */ | |
1413 | ret = -EBUSY; | |
1414 | if (cur->vm_userfaultfd_ctx.ctx && | |
1415 | cur->vm_userfaultfd_ctx.ctx != ctx) | |
1416 | goto out_unlock; | |
1417 | ||
cab350af MK |
1418 | /* |
1419 | * Note vmas containing huge pages | |
1420 | */ | |
ce53e8e6 MR |
1421 | if (is_vm_hugetlb_page(cur)) |
1422 | basic_ioctls = true; | |
cab350af | 1423 | |
86039bd3 AA |
1424 | found = true; |
1425 | } | |
1426 | BUG_ON(!found); | |
1427 | ||
1428 | if (vma->vm_start < start) | |
1429 | prev = vma; | |
1430 | ||
1431 | ret = 0; | |
1432 | do { | |
1433 | cond_resched(); | |
1434 | ||
ba6907db | 1435 | BUG_ON(!vma_can_userfault(vma)); |
86039bd3 AA |
1436 | BUG_ON(vma->vm_userfaultfd_ctx.ctx && |
1437 | vma->vm_userfaultfd_ctx.ctx != ctx); | |
29ec9066 | 1438 | WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); |
86039bd3 AA |
1439 | |
1440 | /* | |
1441 | * Nothing to do: this vma is already registered into this | |
1442 | * userfaultfd and with the right tracking mode too. | |
1443 | */ | |
1444 | if (vma->vm_userfaultfd_ctx.ctx == ctx && | |
1445 | (vma->vm_flags & vm_flags) == vm_flags) | |
1446 | goto skip; | |
1447 | ||
1448 | if (vma->vm_start > start) | |
1449 | start = vma->vm_start; | |
1450 | vma_end = min(end, vma->vm_end); | |
1451 | ||
1452 | new_flags = (vma->vm_flags & ~vm_flags) | vm_flags; | |
1453 | prev = vma_merge(mm, prev, start, vma_end, new_flags, | |
1454 | vma->anon_vma, vma->vm_file, vma->vm_pgoff, | |
1455 | vma_policy(vma), | |
1456 | ((struct vm_userfaultfd_ctx){ ctx })); | |
1457 | if (prev) { | |
1458 | vma = prev; | |
1459 | goto next; | |
1460 | } | |
1461 | if (vma->vm_start < start) { | |
1462 | ret = split_vma(mm, vma, start, 1); | |
1463 | if (ret) | |
1464 | break; | |
1465 | } | |
1466 | if (vma->vm_end > end) { | |
1467 | ret = split_vma(mm, vma, end, 0); | |
1468 | if (ret) | |
1469 | break; | |
1470 | } | |
1471 | next: | |
1472 | /* | |
1473 | * In the vma_merge() successful mprotect-like case 8: | |
1474 | * the next vma was merged into the current one and | |
1475 | * the current one has not been updated yet. | |
1476 | */ | |
1477 | vma->vm_flags = new_flags; | |
1478 | vma->vm_userfaultfd_ctx.ctx = ctx; | |
1479 | ||
1480 | skip: | |
1481 | prev = vma; | |
1482 | start = vma->vm_end; | |
1483 | vma = vma->vm_next; | |
1484 | } while (vma && vma->vm_start < end); | |
1485 | out_unlock: | |
1486 | up_write(&mm->mmap_sem); | |
d2005e3f | 1487 | mmput(mm); |
86039bd3 AA |
1488 | if (!ret) { |
1489 | /* | |
1490 | * Now that we scanned all vmas we can already tell | |
1491 | * userland which ioctls methods are guaranteed to | |
1492 | * succeed on this range. | |
1493 | */ | |
ce53e8e6 | 1494 | if (put_user(basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC : |
cab350af | 1495 | UFFD_API_RANGE_IOCTLS, |
86039bd3 AA |
1496 | &user_uffdio_register->ioctls)) |
1497 | ret = -EFAULT; | |
1498 | } | |
1499 | out: | |
1500 | return ret; | |
1501 | } | |
1502 | ||
1503 | static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, | |
1504 | unsigned long arg) | |
1505 | { | |
1506 | struct mm_struct *mm = ctx->mm; | |
1507 | struct vm_area_struct *vma, *prev, *cur; | |
1508 | int ret; | |
1509 | struct uffdio_range uffdio_unregister; | |
1510 | unsigned long new_flags; | |
1511 | bool found; | |
1512 | unsigned long start, end, vma_end; | |
1513 | const void __user *buf = (void __user *)arg; | |
1514 | ||
1515 | ret = -EFAULT; | |
1516 | if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) | |
1517 | goto out; | |
1518 | ||
1519 | ret = validate_range(mm, uffdio_unregister.start, | |
1520 | uffdio_unregister.len); | |
1521 | if (ret) | |
1522 | goto out; | |
1523 | ||
1524 | start = uffdio_unregister.start; | |
1525 | end = start + uffdio_unregister.len; | |
1526 | ||
d2005e3f ON |
1527 | ret = -ENOMEM; |
1528 | if (!mmget_not_zero(mm)) | |
1529 | goto out; | |
1530 | ||
86039bd3 | 1531 | down_write(&mm->mmap_sem); |
04f5866e AA |
1532 | if (!mmget_still_valid(mm)) |
1533 | goto out_unlock; | |
86039bd3 | 1534 | vma = find_vma_prev(mm, start, &prev); |
86039bd3 AA |
1535 | if (!vma) |
1536 | goto out_unlock; | |
1537 | ||
1538 | /* check that there's at least one vma in the range */ | |
1539 | ret = -EINVAL; | |
1540 | if (vma->vm_start >= end) | |
1541 | goto out_unlock; | |
1542 | ||
cab350af MK |
1543 | /* |
1544 | * If the first vma contains huge pages, make sure start address | |
1545 | * is aligned to huge page size. | |
1546 | */ | |
1547 | if (is_vm_hugetlb_page(vma)) { | |
1548 | unsigned long vma_hpagesize = vma_kernel_pagesize(vma); | |
1549 | ||
1550 | if (start & (vma_hpagesize - 1)) | |
1551 | goto out_unlock; | |
1552 | } | |
1553 | ||
86039bd3 AA |
1554 | /* |
1555 | * Search for not compatible vmas. | |
86039bd3 AA |
1556 | */ |
1557 | found = false; | |
1558 | ret = -EINVAL; | |
1559 | for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { | |
1560 | cond_resched(); | |
1561 | ||
1562 | BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ | |
1563 | !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); | |
1564 | ||
1565 | /* | |
1566 | * Check not compatible vmas, not strictly required | |
1567 | * here as not compatible vmas cannot have an | |
1568 | * userfaultfd_ctx registered on them, but this | |
1569 | * provides for more strict behavior to notice | |
1570 | * unregistration errors. | |
1571 | */ | |
ba6907db | 1572 | if (!vma_can_userfault(cur)) |
86039bd3 AA |
1573 | goto out_unlock; |
1574 | ||
1575 | found = true; | |
1576 | } | |
1577 | BUG_ON(!found); | |
1578 | ||
1579 | if (vma->vm_start < start) | |
1580 | prev = vma; | |
1581 | ||
1582 | ret = 0; | |
1583 | do { | |
1584 | cond_resched(); | |
1585 | ||
ba6907db | 1586 | BUG_ON(!vma_can_userfault(vma)); |
86039bd3 AA |
1587 | |
1588 | /* | |
1589 | * Nothing to do: this vma is already registered into this | |
1590 | * userfaultfd and with the right tracking mode too. | |
1591 | */ | |
1592 | if (!vma->vm_userfaultfd_ctx.ctx) | |
1593 | goto skip; | |
1594 | ||
01e881f5 AA |
1595 | WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); |
1596 | ||
86039bd3 AA |
1597 | if (vma->vm_start > start) |
1598 | start = vma->vm_start; | |
1599 | vma_end = min(end, vma->vm_end); | |
1600 | ||
09fa5296 AA |
1601 | if (userfaultfd_missing(vma)) { |
1602 | /* | |
1603 | * Wake any concurrent pending userfault while | |
1604 | * we unregister, so they will not hang | |
1605 | * permanently and it avoids userland to call | |
1606 | * UFFDIO_WAKE explicitly. | |
1607 | */ | |
1608 | struct userfaultfd_wake_range range; | |
1609 | range.start = start; | |
1610 | range.len = vma_end - start; | |
1611 | wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range); | |
1612 | } | |
1613 | ||
86039bd3 AA |
1614 | new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); |
1615 | prev = vma_merge(mm, prev, start, vma_end, new_flags, | |
1616 | vma->anon_vma, vma->vm_file, vma->vm_pgoff, | |
1617 | vma_policy(vma), | |
1618 | NULL_VM_UFFD_CTX); | |
1619 | if (prev) { | |
1620 | vma = prev; | |
1621 | goto next; | |
1622 | } | |
1623 | if (vma->vm_start < start) { | |
1624 | ret = split_vma(mm, vma, start, 1); | |
1625 | if (ret) | |
1626 | break; | |
1627 | } | |
1628 | if (vma->vm_end > end) { | |
1629 | ret = split_vma(mm, vma, end, 0); | |
1630 | if (ret) | |
1631 | break; | |
1632 | } | |
1633 | next: | |
1634 | /* | |
1635 | * In the vma_merge() successful mprotect-like case 8: | |
1636 | * the next vma was merged into the current one and | |
1637 | * the current one has not been updated yet. | |
1638 | */ | |
1639 | vma->vm_flags = new_flags; | |
1640 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; | |
1641 | ||
1642 | skip: | |
1643 | prev = vma; | |
1644 | start = vma->vm_end; | |
1645 | vma = vma->vm_next; | |
1646 | } while (vma && vma->vm_start < end); | |
1647 | out_unlock: | |
1648 | up_write(&mm->mmap_sem); | |
d2005e3f | 1649 | mmput(mm); |
86039bd3 AA |
1650 | out: |
1651 | return ret; | |
1652 | } | |
1653 | ||
1654 | /* | |
ba85c702 AA |
1655 | * userfaultfd_wake may be used in combination with the |
1656 | * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches. | |
86039bd3 AA |
1657 | */ |
1658 | static int userfaultfd_wake(struct userfaultfd_ctx *ctx, | |
1659 | unsigned long arg) | |
1660 | { | |
1661 | int ret; | |
1662 | struct uffdio_range uffdio_wake; | |
1663 | struct userfaultfd_wake_range range; | |
1664 | const void __user *buf = (void __user *)arg; | |
1665 | ||
1666 | ret = -EFAULT; | |
1667 | if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) | |
1668 | goto out; | |
1669 | ||
1670 | ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); | |
1671 | if (ret) | |
1672 | goto out; | |
1673 | ||
1674 | range.start = uffdio_wake.start; | |
1675 | range.len = uffdio_wake.len; | |
1676 | ||
1677 | /* | |
1678 | * len == 0 means wake all and we don't want to wake all here, | |
1679 | * so check it again to be sure. | |
1680 | */ | |
1681 | VM_BUG_ON(!range.len); | |
1682 | ||
1683 | wake_userfault(ctx, &range); | |
1684 | ret = 0; | |
1685 | ||
1686 | out: | |
1687 | return ret; | |
1688 | } | |
1689 | ||
ad465cae AA |
1690 | static int userfaultfd_copy(struct userfaultfd_ctx *ctx, |
1691 | unsigned long arg) | |
1692 | { | |
1693 | __s64 ret; | |
1694 | struct uffdio_copy uffdio_copy; | |
1695 | struct uffdio_copy __user *user_uffdio_copy; | |
1696 | struct userfaultfd_wake_range range; | |
1697 | ||
1698 | user_uffdio_copy = (struct uffdio_copy __user *) arg; | |
1699 | ||
df2cc96e MR |
1700 | ret = -EAGAIN; |
1701 | if (READ_ONCE(ctx->mmap_changing)) | |
1702 | goto out; | |
1703 | ||
ad465cae AA |
1704 | ret = -EFAULT; |
1705 | if (copy_from_user(&uffdio_copy, user_uffdio_copy, | |
1706 | /* don't copy "copy" last field */ | |
1707 | sizeof(uffdio_copy)-sizeof(__s64))) | |
1708 | goto out; | |
1709 | ||
1710 | ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); | |
1711 | if (ret) | |
1712 | goto out; | |
1713 | /* | |
1714 | * double check for wraparound just in case. copy_from_user() | |
1715 | * will later check uffdio_copy.src + uffdio_copy.len to fit | |
1716 | * in the userland range. | |
1717 | */ | |
1718 | ret = -EINVAL; | |
1719 | if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src) | |
1720 | goto out; | |
1721 | if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE) | |
1722 | goto out; | |
d2005e3f ON |
1723 | if (mmget_not_zero(ctx->mm)) { |
1724 | ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, | |
df2cc96e | 1725 | uffdio_copy.len, &ctx->mmap_changing); |
d2005e3f | 1726 | mmput(ctx->mm); |
96333187 | 1727 | } else { |
e86b298b | 1728 | return -ESRCH; |
d2005e3f | 1729 | } |
ad465cae AA |
1730 | if (unlikely(put_user(ret, &user_uffdio_copy->copy))) |
1731 | return -EFAULT; | |
1732 | if (ret < 0) | |
1733 | goto out; | |
1734 | BUG_ON(!ret); | |
1735 | /* len == 0 would wake all */ | |
1736 | range.len = ret; | |
1737 | if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) { | |
1738 | range.start = uffdio_copy.dst; | |
1739 | wake_userfault(ctx, &range); | |
1740 | } | |
1741 | ret = range.len == uffdio_copy.len ? 0 : -EAGAIN; | |
1742 | out: | |
1743 | return ret; | |
1744 | } | |
1745 | ||
1746 | static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, | |
1747 | unsigned long arg) | |
1748 | { | |
1749 | __s64 ret; | |
1750 | struct uffdio_zeropage uffdio_zeropage; | |
1751 | struct uffdio_zeropage __user *user_uffdio_zeropage; | |
1752 | struct userfaultfd_wake_range range; | |
1753 | ||
1754 | user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg; | |
1755 | ||
df2cc96e MR |
1756 | ret = -EAGAIN; |
1757 | if (READ_ONCE(ctx->mmap_changing)) | |
1758 | goto out; | |
1759 | ||
ad465cae AA |
1760 | ret = -EFAULT; |
1761 | if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage, | |
1762 | /* don't copy "zeropage" last field */ | |
1763 | sizeof(uffdio_zeropage)-sizeof(__s64))) | |
1764 | goto out; | |
1765 | ||
1766 | ret = validate_range(ctx->mm, uffdio_zeropage.range.start, | |
1767 | uffdio_zeropage.range.len); | |
1768 | if (ret) | |
1769 | goto out; | |
1770 | ret = -EINVAL; | |
1771 | if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE) | |
1772 | goto out; | |
1773 | ||
d2005e3f ON |
1774 | if (mmget_not_zero(ctx->mm)) { |
1775 | ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, | |
df2cc96e MR |
1776 | uffdio_zeropage.range.len, |
1777 | &ctx->mmap_changing); | |
d2005e3f | 1778 | mmput(ctx->mm); |
9d95aa4b | 1779 | } else { |
e86b298b | 1780 | return -ESRCH; |
d2005e3f | 1781 | } |
ad465cae AA |
1782 | if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) |
1783 | return -EFAULT; | |
1784 | if (ret < 0) | |
1785 | goto out; | |
1786 | /* len == 0 would wake all */ | |
1787 | BUG_ON(!ret); | |
1788 | range.len = ret; | |
1789 | if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) { | |
1790 | range.start = uffdio_zeropage.range.start; | |
1791 | wake_userfault(ctx, &range); | |
1792 | } | |
1793 | ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN; | |
1794 | out: | |
1795 | return ret; | |
1796 | } | |
1797 | ||
9cd75c3c PE |
1798 | static inline unsigned int uffd_ctx_features(__u64 user_features) |
1799 | { | |
1800 | /* | |
1801 | * For the current set of features the bits just coincide | |
1802 | */ | |
1803 | return (unsigned int)user_features; | |
1804 | } | |
1805 | ||
86039bd3 AA |
1806 | /* |
1807 | * userland asks for a certain API version and we return which bits | |
1808 | * and ioctl commands are implemented in this kernel for such API | |
1809 | * version or -EINVAL if unknown. | |
1810 | */ | |
1811 | static int userfaultfd_api(struct userfaultfd_ctx *ctx, | |
1812 | unsigned long arg) | |
1813 | { | |
1814 | struct uffdio_api uffdio_api; | |
1815 | void __user *buf = (void __user *)arg; | |
1816 | int ret; | |
65603144 | 1817 | __u64 features; |
86039bd3 AA |
1818 | |
1819 | ret = -EINVAL; | |
1820 | if (ctx->state != UFFD_STATE_WAIT_API) | |
1821 | goto out; | |
1822 | ret = -EFAULT; | |
a9b85f94 | 1823 | if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api))) |
86039bd3 | 1824 | goto out; |
65603144 AA |
1825 | features = uffdio_api.features; |
1826 | if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) { | |
86039bd3 AA |
1827 | memset(&uffdio_api, 0, sizeof(uffdio_api)); |
1828 | if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) | |
1829 | goto out; | |
1830 | ret = -EINVAL; | |
1831 | goto out; | |
1832 | } | |
65603144 AA |
1833 | /* report all available features and ioctls to userland */ |
1834 | uffdio_api.features = UFFD_API_FEATURES; | |
86039bd3 AA |
1835 | uffdio_api.ioctls = UFFD_API_IOCTLS; |
1836 | ret = -EFAULT; | |
1837 | if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) | |
1838 | goto out; | |
1839 | ctx->state = UFFD_STATE_RUNNING; | |
65603144 AA |
1840 | /* only enable the requested features for this uffd context */ |
1841 | ctx->features = uffd_ctx_features(features); | |
86039bd3 AA |
1842 | ret = 0; |
1843 | out: | |
1844 | return ret; | |
1845 | } | |
1846 | ||
1847 | static long userfaultfd_ioctl(struct file *file, unsigned cmd, | |
1848 | unsigned long arg) | |
1849 | { | |
1850 | int ret = -EINVAL; | |
1851 | struct userfaultfd_ctx *ctx = file->private_data; | |
1852 | ||
e6485a47 AA |
1853 | if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API) |
1854 | return -EINVAL; | |
1855 | ||
86039bd3 AA |
1856 | switch(cmd) { |
1857 | case UFFDIO_API: | |
1858 | ret = userfaultfd_api(ctx, arg); | |
1859 | break; | |
1860 | case UFFDIO_REGISTER: | |
1861 | ret = userfaultfd_register(ctx, arg); | |
1862 | break; | |
1863 | case UFFDIO_UNREGISTER: | |
1864 | ret = userfaultfd_unregister(ctx, arg); | |
1865 | break; | |
1866 | case UFFDIO_WAKE: | |
1867 | ret = userfaultfd_wake(ctx, arg); | |
1868 | break; | |
ad465cae AA |
1869 | case UFFDIO_COPY: |
1870 | ret = userfaultfd_copy(ctx, arg); | |
1871 | break; | |
1872 | case UFFDIO_ZEROPAGE: | |
1873 | ret = userfaultfd_zeropage(ctx, arg); | |
1874 | break; | |
86039bd3 AA |
1875 | } |
1876 | return ret; | |
1877 | } | |
1878 | ||
1879 | #ifdef CONFIG_PROC_FS | |
1880 | static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) | |
1881 | { | |
1882 | struct userfaultfd_ctx *ctx = f->private_data; | |
ac6424b9 | 1883 | wait_queue_entry_t *wq; |
86039bd3 AA |
1884 | unsigned long pending = 0, total = 0; |
1885 | ||
15b726ef | 1886 | spin_lock(&ctx->fault_pending_wqh.lock); |
2055da97 | 1887 | list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { |
15b726ef AA |
1888 | pending++; |
1889 | total++; | |
1890 | } | |
2055da97 | 1891 | list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { |
86039bd3 AA |
1892 | total++; |
1893 | } | |
15b726ef | 1894 | spin_unlock(&ctx->fault_pending_wqh.lock); |
86039bd3 AA |
1895 | |
1896 | /* | |
1897 | * If more protocols will be added, there will be all shown | |
1898 | * separated by a space. Like this: | |
1899 | * protocols: aa:... bb:... | |
1900 | */ | |
1901 | seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", | |
045098e9 | 1902 | pending, total, UFFD_API, ctx->features, |
86039bd3 AA |
1903 | UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); |
1904 | } | |
1905 | #endif | |
1906 | ||
1907 | static const struct file_operations userfaultfd_fops = { | |
1908 | #ifdef CONFIG_PROC_FS | |
1909 | .show_fdinfo = userfaultfd_show_fdinfo, | |
1910 | #endif | |
1911 | .release = userfaultfd_release, | |
1912 | .poll = userfaultfd_poll, | |
1913 | .read = userfaultfd_read, | |
1914 | .unlocked_ioctl = userfaultfd_ioctl, | |
1915 | .compat_ioctl = userfaultfd_ioctl, | |
1916 | .llseek = noop_llseek, | |
1917 | }; | |
1918 | ||
3004ec9c AA |
1919 | static void init_once_userfaultfd_ctx(void *mem) |
1920 | { | |
1921 | struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem; | |
1922 | ||
1923 | init_waitqueue_head(&ctx->fault_pending_wqh); | |
1924 | init_waitqueue_head(&ctx->fault_wqh); | |
9cd75c3c | 1925 | init_waitqueue_head(&ctx->event_wqh); |
3004ec9c | 1926 | init_waitqueue_head(&ctx->fd_wqh); |
2c5b7e1b | 1927 | seqcount_init(&ctx->refile_seq); |
3004ec9c AA |
1928 | } |
1929 | ||
284cd241 | 1930 | SYSCALL_DEFINE1(userfaultfd, int, flags) |
86039bd3 | 1931 | { |
86039bd3 | 1932 | struct userfaultfd_ctx *ctx; |
284cd241 | 1933 | int fd; |
86039bd3 | 1934 | |
cefdca0a PX |
1935 | if (!sysctl_unprivileged_userfaultfd && !capable(CAP_SYS_PTRACE)) |
1936 | return -EPERM; | |
1937 | ||
86039bd3 AA |
1938 | BUG_ON(!current->mm); |
1939 | ||
1940 | /* Check the UFFD_* constants for consistency. */ | |
1941 | BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC); | |
1942 | BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK); | |
1943 | ||
86039bd3 | 1944 | if (flags & ~UFFD_SHARED_FCNTL_FLAGS) |
284cd241 | 1945 | return -EINVAL; |
86039bd3 | 1946 | |
3004ec9c | 1947 | ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); |
86039bd3 | 1948 | if (!ctx) |
284cd241 | 1949 | return -ENOMEM; |
86039bd3 | 1950 | |
ca880420 | 1951 | refcount_set(&ctx->refcount, 1); |
86039bd3 | 1952 | ctx->flags = flags; |
9cd75c3c | 1953 | ctx->features = 0; |
86039bd3 AA |
1954 | ctx->state = UFFD_STATE_WAIT_API; |
1955 | ctx->released = false; | |
df2cc96e | 1956 | ctx->mmap_changing = false; |
86039bd3 AA |
1957 | ctx->mm = current->mm; |
1958 | /* prevent the mm struct to be freed */ | |
f1f10076 | 1959 | mmgrab(ctx->mm); |
86039bd3 | 1960 | |
284cd241 EB |
1961 | fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, ctx, |
1962 | O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS)); | |
1963 | if (fd < 0) { | |
d2005e3f | 1964 | mmdrop(ctx->mm); |
3004ec9c | 1965 | kmem_cache_free(userfaultfd_ctx_cachep, ctx); |
c03e946f | 1966 | } |
86039bd3 | 1967 | return fd; |
86039bd3 | 1968 | } |
3004ec9c AA |
1969 | |
1970 | static int __init userfaultfd_init(void) | |
1971 | { | |
1972 | userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache", | |
1973 | sizeof(struct userfaultfd_ctx), | |
1974 | 0, | |
1975 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, | |
1976 | init_once_userfaultfd_ctx); | |
1977 | return 0; | |
1978 | } | |
1979 | __initcall(userfaultfd_init); |