]>
git.ipfire.org Git - people/arne_f/kernel.git/blob - mm/gup.c
1 #include <linux/kernel.h>
2 #include <linux/errno.h>
4 #include <linux/spinlock.h>
6 #include <linux/hugetlb.h>
8 #include <linux/pagemap.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
16 * follow_page_mask - look up a page descriptor from a user-virtual address
17 * @vma: vm_area_struct mapping @address
18 * @address: virtual address to look up
19 * @flags: flags modifying lookup behaviour
20 * @page_mask: on output, *page_mask is set according to the size of the page
22 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
24 * Returns the mapped (struct page *), %NULL if no mapping exists, or
25 * an error pointer if there is a mapping to something not represented
26 * by a page descriptor (see also vm_normal_page()).
28 struct page
*follow_page_mask(struct vm_area_struct
*vma
,
29 unsigned long address
, unsigned int flags
,
30 unsigned int *page_mask
)
38 struct mm_struct
*mm
= vma
->vm_mm
;
42 page
= follow_huge_addr(mm
, address
, flags
& FOLL_WRITE
);
44 BUG_ON(flags
& FOLL_GET
);
49 pgd
= pgd_offset(mm
, address
);
50 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
53 pud
= pud_offset(pgd
, address
);
56 if (pud_huge(*pud
) && vma
->vm_flags
& VM_HUGETLB
) {
59 page
= follow_huge_pud(mm
, address
, pud
, flags
& FOLL_WRITE
);
62 if (unlikely(pud_bad(*pud
)))
65 pmd
= pmd_offset(pud
, address
);
68 if (pmd_huge(*pmd
) && vma
->vm_flags
& VM_HUGETLB
) {
69 page
= follow_huge_pmd(mm
, address
, pmd
, flags
& FOLL_WRITE
);
70 if (flags
& FOLL_GET
) {
72 * Refcount on tail pages are not well-defined and
73 * shouldn't be taken. The caller should handle a NULL
74 * return when trying to follow tail pages.
85 if ((flags
& FOLL_NUMA
) && pmd_numa(*pmd
))
87 if (pmd_trans_huge(*pmd
)) {
88 if (flags
& FOLL_SPLIT
) {
89 split_huge_page_pmd(vma
, address
, pmd
);
90 goto split_fallthrough
;
92 ptl
= pmd_lock(mm
, pmd
);
93 if (likely(pmd_trans_huge(*pmd
))) {
94 if (unlikely(pmd_trans_splitting(*pmd
))) {
96 wait_split_huge_page(vma
->anon_vma
, pmd
);
98 page
= follow_trans_huge_pmd(vma
, address
,
101 *page_mask
= HPAGE_PMD_NR
- 1;
109 if (unlikely(pmd_bad(*pmd
)))
112 ptep
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
115 if (!pte_present(pte
)) {
118 * KSM's break_ksm() relies upon recognizing a ksm page
119 * even while it is being migrated, so for that case we
120 * need migration_entry_wait().
122 if (likely(!(flags
& FOLL_MIGRATION
)))
124 if (pte_none(pte
) || pte_file(pte
))
126 entry
= pte_to_swp_entry(pte
);
127 if (!is_migration_entry(entry
))
129 pte_unmap_unlock(ptep
, ptl
);
130 migration_entry_wait(mm
, pmd
, address
);
131 goto split_fallthrough
;
133 if ((flags
& FOLL_NUMA
) && pte_numa(pte
))
135 if ((flags
& FOLL_WRITE
) && !pte_write(pte
))
138 page
= vm_normal_page(vma
, address
, pte
);
139 if (unlikely(!page
)) {
140 if ((flags
& FOLL_DUMP
) ||
141 !is_zero_pfn(pte_pfn(pte
)))
143 page
= pte_page(pte
);
146 if (flags
& FOLL_GET
)
148 if (flags
& FOLL_TOUCH
) {
149 if ((flags
& FOLL_WRITE
) &&
150 !pte_dirty(pte
) && !PageDirty(page
))
151 set_page_dirty(page
);
153 * pte_mkyoung() would be more correct here, but atomic care
154 * is needed to avoid losing the dirty bit: it is easier to use
155 * mark_page_accessed().
157 mark_page_accessed(page
);
159 if ((flags
& FOLL_MLOCK
) && (vma
->vm_flags
& VM_LOCKED
)) {
161 * The preliminary mapping check is mainly to avoid the
162 * pointless overhead of lock_page on the ZERO_PAGE
163 * which might bounce very badly if there is contention.
165 * If the page is already locked, we don't need to
166 * handle it now - vmscan will handle it later if and
167 * when it attempts to reclaim the page.
169 if (page
->mapping
&& trylock_page(page
)) {
170 lru_add_drain(); /* push cached pages to LRU */
172 * Because we lock page here, and migration is
173 * blocked by the pte's page reference, and we
174 * know the page is still mapped, we don't even
175 * need to check for file-cache page truncation.
177 mlock_vma_page(page
);
182 pte_unmap_unlock(ptep
, ptl
);
187 pte_unmap_unlock(ptep
, ptl
);
188 return ERR_PTR(-EFAULT
);
191 pte_unmap_unlock(ptep
, ptl
);
197 * When core dumping an enormous anonymous area that nobody
198 * has touched so far, we don't want to allocate unnecessary pages or
199 * page tables. Return error instead of NULL to skip handle_mm_fault,
200 * then get_dump_page() will return NULL to leave a hole in the dump.
201 * But we can only make this optimization where a hole would surely
202 * be zero-filled if handle_mm_fault() actually did handle it.
204 if ((flags
& FOLL_DUMP
) &&
205 (!vma
->vm_ops
|| !vma
->vm_ops
->fault
))
206 return ERR_PTR(-EFAULT
);
210 static inline int stack_guard_page(struct vm_area_struct
*vma
, unsigned long addr
)
212 return stack_guard_page_start(vma
, addr
) ||
213 stack_guard_page_end(vma
, addr
+PAGE_SIZE
);
216 static int get_gate_page(struct mm_struct
*mm
, unsigned long address
,
217 unsigned int gup_flags
, struct vm_area_struct
**vma
,
226 /* user gate pages are read-only */
227 if (gup_flags
& FOLL_WRITE
)
229 if (address
> TASK_SIZE
)
230 pgd
= pgd_offset_k(address
);
232 pgd
= pgd_offset_gate(mm
, address
);
233 BUG_ON(pgd_none(*pgd
));
234 pud
= pud_offset(pgd
, address
);
235 BUG_ON(pud_none(*pud
));
236 pmd
= pmd_offset(pud
, address
);
239 VM_BUG_ON(pmd_trans_huge(*pmd
));
240 pte
= pte_offset_map(pmd
, address
);
243 *vma
= get_gate_vma(mm
);
246 *page
= vm_normal_page(*vma
, address
, *pte
);
248 if ((gup_flags
& FOLL_DUMP
) || !is_zero_pfn(pte_pfn(*pte
)))
250 *page
= pte_page(*pte
);
261 * __get_user_pages() - pin user pages in memory
262 * @tsk: task_struct of target task
263 * @mm: mm_struct of target mm
264 * @start: starting user address
265 * @nr_pages: number of pages from start to pin
266 * @gup_flags: flags modifying pin behaviour
267 * @pages: array that receives pointers to the pages pinned.
268 * Should be at least nr_pages long. Or NULL, if caller
269 * only intends to ensure the pages are faulted in.
270 * @vmas: array of pointers to vmas corresponding to each page.
271 * Or NULL if the caller does not require them.
272 * @nonblocking: whether waiting for disk IO or mmap_sem contention
274 * Returns number of pages pinned. This may be fewer than the number
275 * requested. If nr_pages is 0 or negative, returns 0. If no pages
276 * were pinned, returns -errno. Each page returned must be released
277 * with a put_page() call when it is finished with. vmas will only
278 * remain valid while mmap_sem is held.
280 * Must be called with mmap_sem held for read or write.
282 * __get_user_pages walks a process's page tables and takes a reference to
283 * each struct page that each user address corresponds to at a given
284 * instant. That is, it takes the page that would be accessed if a user
285 * thread accesses the given user virtual address at that instant.
287 * This does not guarantee that the page exists in the user mappings when
288 * __get_user_pages returns, and there may even be a completely different
289 * page there in some cases (eg. if mmapped pagecache has been invalidated
290 * and subsequently re faulted). However it does guarantee that the page
291 * won't be freed completely. And mostly callers simply care that the page
292 * contains data that was valid *at some point in time*. Typically, an IO
293 * or similar operation cannot guarantee anything stronger anyway because
294 * locks can't be held over the syscall boundary.
296 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
297 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
298 * appropriate) must be called after the page is finished with, and
299 * before put_page is called.
301 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
302 * or mmap_sem contention, and if waiting is needed to pin all pages,
303 * *@nonblocking will be set to 0.
305 * In most cases, get_user_pages or get_user_pages_fast should be used
306 * instead of __get_user_pages. __get_user_pages should be used only if
307 * you need some special @gup_flags.
309 long __get_user_pages(struct task_struct
*tsk
, struct mm_struct
*mm
,
310 unsigned long start
, unsigned long nr_pages
,
311 unsigned int gup_flags
, struct page
**pages
,
312 struct vm_area_struct
**vmas
, int *nonblocking
)
315 unsigned long vm_flags
;
316 unsigned int page_mask
;
321 VM_BUG_ON(!!pages
!= !!(gup_flags
& FOLL_GET
));
324 * If FOLL_FORCE is set then do not force a full fault as the hinting
325 * fault information is unrelated to the reference behaviour of a task
326 * using the address space
328 if (!(gup_flags
& FOLL_FORCE
))
329 gup_flags
|= FOLL_NUMA
;
334 struct vm_area_struct
*vma
;
336 vma
= find_extend_vma(mm
, start
);
337 if (!vma
&& in_gate_area(mm
, start
)) {
339 ret
= get_gate_page(mm
, start
& PAGE_MASK
, gup_flags
,
340 &vma
, pages
? &pages
[i
] : NULL
);
349 vm_flags
= vma
->vm_flags
;
350 if (vm_flags
& (VM_IO
| VM_PFNMAP
))
353 if (gup_flags
& FOLL_WRITE
) {
354 if (!(vm_flags
& VM_WRITE
)) {
355 if (!(gup_flags
& FOLL_FORCE
))
358 * We used to let the write,force case do COW
359 * in a VM_MAYWRITE VM_SHARED !VM_WRITE vma, so
360 * ptrace could set a breakpoint in a read-only
361 * mapping of an executable, without corrupting
362 * the file (yet only when that file had been
363 * opened for writing!). Anon pages in shared
364 * mappings are surprising: now just reject it.
366 if (!is_cow_mapping(vm_flags
)) {
367 WARN_ON_ONCE(vm_flags
& VM_MAYWRITE
);
372 if (!(vm_flags
& VM_READ
)) {
373 if (!(gup_flags
& FOLL_FORCE
))
376 * Is there actually any vma we can reach here
377 * which does not have VM_MAYREAD set?
379 if (!(vm_flags
& VM_MAYREAD
))
384 if (is_vm_hugetlb_page(vma
)) {
385 i
= follow_hugetlb_page(mm
, vma
, pages
, vmas
,
386 &start
, &nr_pages
, i
, gup_flags
);
392 unsigned int foll_flags
= gup_flags
;
393 unsigned int page_increm
;
396 * If we have a pending SIGKILL, don't keep faulting
397 * pages and potentially allocating memory.
399 if (unlikely(fatal_signal_pending(current
)))
400 return i
? i
: -ERESTARTSYS
;
403 while (!(page
= follow_page_mask(vma
, start
,
404 foll_flags
, &page_mask
))) {
406 unsigned int fault_flags
= 0;
408 /* For mlock, just skip the stack guard page. */
409 if (foll_flags
& FOLL_MLOCK
) {
410 if (stack_guard_page(vma
, start
))
413 if (foll_flags
& FOLL_WRITE
)
414 fault_flags
|= FAULT_FLAG_WRITE
;
416 fault_flags
|= FAULT_FLAG_ALLOW_RETRY
;
417 if (foll_flags
& FOLL_NOWAIT
)
418 fault_flags
|= (FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_RETRY_NOWAIT
);
420 ret
= handle_mm_fault(mm
, vma
, start
,
423 if (ret
& VM_FAULT_ERROR
) {
424 if (ret
& VM_FAULT_OOM
)
425 return i
? i
: -ENOMEM
;
426 if (ret
& (VM_FAULT_HWPOISON
|
427 VM_FAULT_HWPOISON_LARGE
)) {
430 else if (gup_flags
& FOLL_HWPOISON
)
435 if (ret
& VM_FAULT_SIGBUS
)
441 if (ret
& VM_FAULT_MAJOR
)
447 if (ret
& VM_FAULT_RETRY
) {
454 * The VM_FAULT_WRITE bit tells us that
455 * do_wp_page has broken COW when necessary,
456 * even if maybe_mkwrite decided not to set
457 * pte_write. We can thus safely do subsequent
458 * page lookups as if they were reads. But only
459 * do so when looping for pte_write is futile:
460 * in some cases userspace may also be wanting
461 * to write to the gotten user page, which a
462 * read fault here might prevent (a readonly
463 * page might get reCOWed by userspace write).
465 if ((ret
& VM_FAULT_WRITE
) &&
466 !(vma
->vm_flags
& VM_WRITE
))
467 foll_flags
&= ~FOLL_WRITE
;
472 return i
? i
: PTR_ERR(page
);
476 flush_anon_page(vma
, page
, start
);
477 flush_dcache_page(page
);
485 page_increm
= 1 + (~(start
>> PAGE_SHIFT
) & page_mask
);
486 if (page_increm
> nr_pages
)
487 page_increm
= nr_pages
;
489 start
+= page_increm
* PAGE_SIZE
;
490 nr_pages
-= page_increm
;
491 } while (nr_pages
&& start
< vma
->vm_end
);
495 return i
? : -EFAULT
;
497 EXPORT_SYMBOL(__get_user_pages
);
500 * fixup_user_fault() - manually resolve a user page fault
501 * @tsk: the task_struct to use for page fault accounting, or
502 * NULL if faults are not to be recorded.
503 * @mm: mm_struct of target mm
504 * @address: user address
505 * @fault_flags:flags to pass down to handle_mm_fault()
507 * This is meant to be called in the specific scenario where for locking reasons
508 * we try to access user memory in atomic context (within a pagefault_disable()
509 * section), this returns -EFAULT, and we want to resolve the user fault before
512 * Typically this is meant to be used by the futex code.
514 * The main difference with get_user_pages() is that this function will
515 * unconditionally call handle_mm_fault() which will in turn perform all the
516 * necessary SW fixup of the dirty and young bits in the PTE, while
517 * handle_mm_fault() only guarantees to update these in the struct page.
519 * This is important for some architectures where those bits also gate the
520 * access permission to the page because they are maintained in software. On
521 * such architectures, gup() will not be enough to make a subsequent access
524 * This should be called with the mm_sem held for read.
526 int fixup_user_fault(struct task_struct
*tsk
, struct mm_struct
*mm
,
527 unsigned long address
, unsigned int fault_flags
)
529 struct vm_area_struct
*vma
;
533 vma
= find_extend_vma(mm
, address
);
534 if (!vma
|| address
< vma
->vm_start
)
537 vm_flags
= (fault_flags
& FAULT_FLAG_WRITE
) ? VM_WRITE
: VM_READ
;
538 if (!(vm_flags
& vma
->vm_flags
))
541 ret
= handle_mm_fault(mm
, vma
, address
, fault_flags
);
542 if (ret
& VM_FAULT_ERROR
) {
543 if (ret
& VM_FAULT_OOM
)
545 if (ret
& (VM_FAULT_HWPOISON
| VM_FAULT_HWPOISON_LARGE
))
547 if (ret
& VM_FAULT_SIGBUS
)
552 if (ret
& VM_FAULT_MAJOR
)
561 * get_user_pages() - pin user pages in memory
562 * @tsk: the task_struct to use for page fault accounting, or
563 * NULL if faults are not to be recorded.
564 * @mm: mm_struct of target mm
565 * @start: starting user address
566 * @nr_pages: number of pages from start to pin
567 * @write: whether pages will be written to by the caller
568 * @force: whether to force access even when user mapping is currently
569 * protected (but never forces write access to shared mapping).
570 * @pages: array that receives pointers to the pages pinned.
571 * Should be at least nr_pages long. Or NULL, if caller
572 * only intends to ensure the pages are faulted in.
573 * @vmas: array of pointers to vmas corresponding to each page.
574 * Or NULL if the caller does not require them.
576 * Returns number of pages pinned. This may be fewer than the number
577 * requested. If nr_pages is 0 or negative, returns 0. If no pages
578 * were pinned, returns -errno. Each page returned must be released
579 * with a put_page() call when it is finished with. vmas will only
580 * remain valid while mmap_sem is held.
582 * Must be called with mmap_sem held for read or write.
584 * get_user_pages walks a process's page tables and takes a reference to
585 * each struct page that each user address corresponds to at a given
586 * instant. That is, it takes the page that would be accessed if a user
587 * thread accesses the given user virtual address at that instant.
589 * This does not guarantee that the page exists in the user mappings when
590 * get_user_pages returns, and there may even be a completely different
591 * page there in some cases (eg. if mmapped pagecache has been invalidated
592 * and subsequently re faulted). However it does guarantee that the page
593 * won't be freed completely. And mostly callers simply care that the page
594 * contains data that was valid *at some point in time*. Typically, an IO
595 * or similar operation cannot guarantee anything stronger anyway because
596 * locks can't be held over the syscall boundary.
598 * If write=0, the page must not be written to. If the page is written to,
599 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
600 * after the page is finished with, and before put_page is called.
602 * get_user_pages is typically used for fewer-copy IO operations, to get a
603 * handle on the memory by some means other than accesses via the user virtual
604 * addresses. The pages may be submitted for DMA to devices or accessed via
605 * their kernel linear mapping (via the kmap APIs). Care should be taken to
606 * use the correct cache flushing APIs.
608 * See also get_user_pages_fast, for performance critical applications.
610 long get_user_pages(struct task_struct
*tsk
, struct mm_struct
*mm
,
611 unsigned long start
, unsigned long nr_pages
, int write
,
612 int force
, struct page
**pages
, struct vm_area_struct
**vmas
)
614 int flags
= FOLL_TOUCH
;
623 return __get_user_pages(tsk
, mm
, start
, nr_pages
, flags
, pages
, vmas
,
626 EXPORT_SYMBOL(get_user_pages
);
629 * get_dump_page() - pin user page in memory while writing it to core dump
630 * @addr: user address
632 * Returns struct page pointer of user page pinned for dump,
633 * to be freed afterwards by page_cache_release() or put_page().
635 * Returns NULL on any kind of failure - a hole must then be inserted into
636 * the corefile, to preserve alignment with its headers; and also returns
637 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
638 * allowing a hole to be left in the corefile to save diskspace.
640 * Called without mmap_sem, but after all other threads have been killed.
642 #ifdef CONFIG_ELF_CORE
643 struct page
*get_dump_page(unsigned long addr
)
645 struct vm_area_struct
*vma
;
648 if (__get_user_pages(current
, current
->mm
, addr
, 1,
649 FOLL_FORCE
| FOLL_DUMP
| FOLL_GET
, &page
, &vma
,
652 flush_cache_page(vma
, addr
, page_to_pfn(page
));
655 #endif /* CONFIG_ELF_CORE */