]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * mm/mremap.c | |
4 | * | |
5 | * (C) Copyright 1996 Linus Torvalds | |
6 | * | |
046c6884 | 7 | * Address space accounting code <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
8 | * (C) Copyright 2002 Red Hat Inc, All Rights Reserved |
9 | */ | |
10 | ||
11 | #include <linux/mm.h> | |
12 | #include <linux/hugetlb.h> | |
1da177e4 | 13 | #include <linux/shm.h> |
1ff82995 | 14 | #include <linux/ksm.h> |
1da177e4 LT |
15 | #include <linux/mman.h> |
16 | #include <linux/swap.h> | |
c59ede7b | 17 | #include <linux/capability.h> |
1da177e4 | 18 | #include <linux/fs.h> |
6dec97dc | 19 | #include <linux/swapops.h> |
1da177e4 LT |
20 | #include <linux/highmem.h> |
21 | #include <linux/security.h> | |
22 | #include <linux/syscalls.h> | |
cddb8a5c | 23 | #include <linux/mmu_notifier.h> |
2581d202 | 24 | #include <linux/uaccess.h> |
4abad2ca | 25 | #include <linux/mm-arch-hooks.h> |
72f87654 | 26 | #include <linux/userfaultfd_k.h> |
1da177e4 | 27 | |
1da177e4 LT |
28 | #include <asm/cacheflush.h> |
29 | #include <asm/tlbflush.h> | |
30 | ||
ba470de4 RR |
31 | #include "internal.h" |
32 | ||
7be7a546 | 33 | static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) |
1da177e4 LT |
34 | { |
35 | pgd_t *pgd; | |
c2febafc | 36 | p4d_t *p4d; |
1da177e4 LT |
37 | pud_t *pud; |
38 | pmd_t *pmd; | |
39 | ||
40 | pgd = pgd_offset(mm, addr); | |
41 | if (pgd_none_or_clear_bad(pgd)) | |
42 | return NULL; | |
43 | ||
c2febafc KS |
44 | p4d = p4d_offset(pgd, addr); |
45 | if (p4d_none_or_clear_bad(p4d)) | |
46 | return NULL; | |
47 | ||
48 | pud = pud_offset(p4d, addr); | |
1da177e4 LT |
49 | if (pud_none_or_clear_bad(pud)) |
50 | return NULL; | |
51 | ||
52 | pmd = pmd_offset(pud, addr); | |
37a1c49a | 53 | if (pmd_none(*pmd)) |
1da177e4 LT |
54 | return NULL; |
55 | ||
7be7a546 | 56 | return pmd; |
1da177e4 LT |
57 | } |
58 | ||
8ac1f832 AA |
59 | static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, |
60 | unsigned long addr) | |
1da177e4 LT |
61 | { |
62 | pgd_t *pgd; | |
c2febafc | 63 | p4d_t *p4d; |
1da177e4 | 64 | pud_t *pud; |
c74df32c | 65 | pmd_t *pmd; |
1da177e4 LT |
66 | |
67 | pgd = pgd_offset(mm, addr); | |
c2febafc KS |
68 | p4d = p4d_alloc(mm, pgd, addr); |
69 | if (!p4d) | |
70 | return NULL; | |
71 | pud = pud_alloc(mm, p4d, addr); | |
1da177e4 | 72 | if (!pud) |
c74df32c | 73 | return NULL; |
7be7a546 | 74 | |
1da177e4 | 75 | pmd = pmd_alloc(mm, pud, addr); |
57a8f0cd | 76 | if (!pmd) |
c74df32c | 77 | return NULL; |
7be7a546 | 78 | |
8ac1f832 | 79 | VM_BUG_ON(pmd_trans_huge(*pmd)); |
c74df32c | 80 | |
7be7a546 | 81 | return pmd; |
1da177e4 LT |
82 | } |
83 | ||
1d069b7d HD |
84 | static void take_rmap_locks(struct vm_area_struct *vma) |
85 | { | |
86 | if (vma->vm_file) | |
87 | i_mmap_lock_write(vma->vm_file->f_mapping); | |
88 | if (vma->anon_vma) | |
89 | anon_vma_lock_write(vma->anon_vma); | |
90 | } | |
91 | ||
92 | static void drop_rmap_locks(struct vm_area_struct *vma) | |
93 | { | |
94 | if (vma->anon_vma) | |
95 | anon_vma_unlock_write(vma->anon_vma); | |
96 | if (vma->vm_file) | |
97 | i_mmap_unlock_write(vma->vm_file->f_mapping); | |
98 | } | |
99 | ||
6dec97dc CG |
100 | static pte_t move_soft_dirty_pte(pte_t pte) |
101 | { | |
102 | /* | |
103 | * Set soft dirty bit so we can notice | |
104 | * in userspace the ptes were moved. | |
105 | */ | |
106 | #ifdef CONFIG_MEM_SOFT_DIRTY | |
107 | if (pte_present(pte)) | |
108 | pte = pte_mksoft_dirty(pte); | |
109 | else if (is_swap_pte(pte)) | |
110 | pte = pte_swp_mksoft_dirty(pte); | |
6dec97dc CG |
111 | #endif |
112 | return pte; | |
113 | } | |
114 | ||
7be7a546 HD |
115 | static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, |
116 | unsigned long old_addr, unsigned long old_end, | |
117 | struct vm_area_struct *new_vma, pmd_t *new_pmd, | |
eb66ae03 | 118 | unsigned long new_addr, bool need_rmap_locks) |
1da177e4 | 119 | { |
1da177e4 | 120 | struct mm_struct *mm = vma->vm_mm; |
7be7a546 | 121 | pte_t *old_pte, *new_pte, pte; |
4c21e2f2 | 122 | spinlock_t *old_ptl, *new_ptl; |
5d190420 AL |
123 | bool force_flush = false; |
124 | unsigned long len = old_end - old_addr; | |
1da177e4 | 125 | |
38a76013 | 126 | /* |
c8c06efa | 127 | * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma |
38a76013 ML |
128 | * locks to ensure that rmap will always observe either the old or the |
129 | * new ptes. This is the easiest way to avoid races with | |
130 | * truncate_pagecache(), page migration, etc... | |
131 | * | |
132 | * When need_rmap_locks is false, we use other ways to avoid | |
133 | * such races: | |
134 | * | |
135 | * - During exec() shift_arg_pages(), we use a specially tagged vma | |
222100ee | 136 | * which rmap call sites look for using vma_is_temporary_stack(). |
38a76013 ML |
137 | * |
138 | * - During mremap(), new_vma is often known to be placed after vma | |
139 | * in rmap traversal order. This ensures rmap will always observe | |
140 | * either the old pte, or the new pte, or both (the page table locks | |
141 | * serialize access to individual ptes, but only rmap traversal | |
142 | * order guarantees that we won't miss both the old and new ptes). | |
143 | */ | |
1d069b7d HD |
144 | if (need_rmap_locks) |
145 | take_rmap_locks(vma); | |
1da177e4 | 146 | |
4c21e2f2 HD |
147 | /* |
148 | * We don't have to worry about the ordering of src and dst | |
149 | * pte locks because exclusive mmap_sem prevents deadlock. | |
150 | */ | |
c74df32c | 151 | old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); |
ece0e2b6 | 152 | new_pte = pte_offset_map(new_pmd, new_addr); |
4c21e2f2 HD |
153 | new_ptl = pte_lockptr(mm, new_pmd); |
154 | if (new_ptl != old_ptl) | |
f20dc5f7 | 155 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
3ea27719 | 156 | flush_tlb_batched_pending(vma->vm_mm); |
6606c3e0 | 157 | arch_enter_lazy_mmu_mode(); |
7be7a546 HD |
158 | |
159 | for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, | |
160 | new_pte++, new_addr += PAGE_SIZE) { | |
161 | if (pte_none(*old_pte)) | |
162 | continue; | |
5d190420 | 163 | |
a2ce2666 | 164 | pte = ptep_get_and_clear(mm, old_addr, old_pte); |
5d190420 | 165 | /* |
eb66ae03 | 166 | * If we are remapping a valid PTE, make sure |
a2ce2666 | 167 | * to flush TLB before we drop the PTL for the |
eb66ae03 | 168 | * PTE. |
a2ce2666 | 169 | * |
eb66ae03 LT |
170 | * NOTE! Both old and new PTL matter: the old one |
171 | * for racing with page_mkclean(), the new one to | |
172 | * make sure the physical page stays valid until | |
173 | * the TLB entry for the old mapping has been | |
174 | * flushed. | |
5d190420 | 175 | */ |
eb66ae03 | 176 | if (pte_present(pte)) |
5d190420 | 177 | force_flush = true; |
7be7a546 | 178 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); |
6dec97dc CG |
179 | pte = move_soft_dirty_pte(pte); |
180 | set_pte_at(mm, new_addr, new_pte, pte); | |
1da177e4 | 181 | } |
7be7a546 | 182 | |
6606c3e0 | 183 | arch_leave_lazy_mmu_mode(); |
eb66ae03 LT |
184 | if (force_flush) |
185 | flush_tlb_range(vma, old_end - len, old_end); | |
4c21e2f2 HD |
186 | if (new_ptl != old_ptl) |
187 | spin_unlock(new_ptl); | |
ece0e2b6 | 188 | pte_unmap(new_pte - 1); |
c74df32c | 189 | pte_unmap_unlock(old_pte - 1, old_ptl); |
1d069b7d HD |
190 | if (need_rmap_locks) |
191 | drop_rmap_locks(vma); | |
1da177e4 LT |
192 | } |
193 | ||
2c91bd4a JFG |
194 | #ifdef CONFIG_HAVE_MOVE_PMD |
195 | static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, | |
196 | unsigned long new_addr, unsigned long old_end, | |
197 | pmd_t *old_pmd, pmd_t *new_pmd) | |
198 | { | |
199 | spinlock_t *old_ptl, *new_ptl; | |
200 | struct mm_struct *mm = vma->vm_mm; | |
201 | pmd_t pmd; | |
202 | ||
203 | if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK) | |
204 | || old_end - old_addr < PMD_SIZE) | |
205 | return false; | |
206 | ||
207 | /* | |
208 | * The destination pmd shouldn't be established, free_pgtables() | |
209 | * should have release it. | |
210 | */ | |
211 | if (WARN_ON(!pmd_none(*new_pmd))) | |
212 | return false; | |
213 | ||
214 | /* | |
215 | * We don't have to worry about the ordering of src and dst | |
216 | * ptlocks because exclusive mmap_sem prevents deadlock. | |
217 | */ | |
218 | old_ptl = pmd_lock(vma->vm_mm, old_pmd); | |
219 | new_ptl = pmd_lockptr(mm, new_pmd); | |
220 | if (new_ptl != old_ptl) | |
221 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); | |
222 | ||
223 | /* Clear the pmd */ | |
224 | pmd = *old_pmd; | |
225 | pmd_clear(old_pmd); | |
226 | ||
227 | VM_BUG_ON(!pmd_none(*new_pmd)); | |
228 | ||
229 | /* Set the new pmd */ | |
230 | set_pmd_at(mm, new_addr, new_pmd, pmd); | |
231 | flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); | |
232 | if (new_ptl != old_ptl) | |
233 | spin_unlock(new_ptl); | |
234 | spin_unlock(old_ptl); | |
235 | ||
236 | return true; | |
237 | } | |
238 | #endif | |
239 | ||
b6a2fea3 | 240 | unsigned long move_page_tables(struct vm_area_struct *vma, |
1da177e4 | 241 | unsigned long old_addr, struct vm_area_struct *new_vma, |
38a76013 ML |
242 | unsigned long new_addr, unsigned long len, |
243 | bool need_rmap_locks) | |
1da177e4 | 244 | { |
7be7a546 | 245 | unsigned long extent, next, old_end; |
ac46d4f3 | 246 | struct mmu_notifier_range range; |
7be7a546 | 247 | pmd_t *old_pmd, *new_pmd; |
1da177e4 | 248 | |
7be7a546 HD |
249 | old_end = old_addr + len; |
250 | flush_cache_range(vma, old_addr, old_end); | |
1da177e4 | 251 | |
6f4f13e8 JG |
252 | mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, |
253 | old_addr, old_end); | |
ac46d4f3 | 254 | mmu_notifier_invalidate_range_start(&range); |
7b6efc2b | 255 | |
7be7a546 | 256 | for (; old_addr < old_end; old_addr += extent, new_addr += extent) { |
1da177e4 | 257 | cond_resched(); |
7be7a546 | 258 | next = (old_addr + PMD_SIZE) & PMD_MASK; |
ebed4846 | 259 | /* even if next overflowed, extent below will be ok */ |
7be7a546 | 260 | extent = next - old_addr; |
ebed4846 AA |
261 | if (extent > old_end - old_addr) |
262 | extent = old_end - old_addr; | |
7be7a546 HD |
263 | old_pmd = get_old_pmd(vma->vm_mm, old_addr); |
264 | if (!old_pmd) | |
265 | continue; | |
8ac1f832 | 266 | new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); |
7be7a546 HD |
267 | if (!new_pmd) |
268 | break; | |
84c3fc4e | 269 | if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) { |
dd18dbc2 | 270 | if (extent == HPAGE_PMD_SIZE) { |
4b471e88 | 271 | bool moved; |
dd18dbc2 KS |
272 | /* See comment in move_ptes() */ |
273 | if (need_rmap_locks) | |
1d069b7d | 274 | take_rmap_locks(vma); |
bf8616d5 | 275 | moved = move_huge_pmd(vma, old_addr, new_addr, |
eb66ae03 | 276 | old_end, old_pmd, new_pmd); |
dd18dbc2 | 277 | if (need_rmap_locks) |
1d069b7d | 278 | drop_rmap_locks(vma); |
5d190420 | 279 | if (moved) |
4b471e88 | 280 | continue; |
dd18dbc2 | 281 | } |
4b471e88 | 282 | split_huge_pmd(vma, old_pmd, old_addr); |
337d9abf | 283 | if (pmd_trans_unstable(old_pmd)) |
6b9116a6 | 284 | continue; |
2c91bd4a JFG |
285 | } else if (extent == PMD_SIZE) { |
286 | #ifdef CONFIG_HAVE_MOVE_PMD | |
287 | /* | |
288 | * If the extent is PMD-sized, try to speed the move by | |
289 | * moving at the PMD level if possible. | |
290 | */ | |
291 | bool moved; | |
292 | ||
293 | if (need_rmap_locks) | |
294 | take_rmap_locks(vma); | |
295 | moved = move_normal_pmd(vma, old_addr, new_addr, | |
296 | old_end, old_pmd, new_pmd); | |
297 | if (need_rmap_locks) | |
298 | drop_rmap_locks(vma); | |
299 | if (moved) | |
300 | continue; | |
301 | #endif | |
37a1c49a | 302 | } |
2c91bd4a | 303 | |
4cf58924 | 304 | if (pte_alloc(new_vma->vm_mm, new_pmd)) |
37a1c49a | 305 | break; |
7be7a546 HD |
306 | next = (new_addr + PMD_SIZE) & PMD_MASK; |
307 | if (extent > next - new_addr) | |
308 | extent = next - new_addr; | |
5d190420 | 309 | move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, |
eb66ae03 | 310 | new_pmd, new_addr, need_rmap_locks); |
1da177e4 | 311 | } |
7b6efc2b | 312 | |
ac46d4f3 | 313 | mmu_notifier_invalidate_range_end(&range); |
7be7a546 HD |
314 | |
315 | return len + old_addr - old_end; /* how much done */ | |
1da177e4 LT |
316 | } |
317 | ||
318 | static unsigned long move_vma(struct vm_area_struct *vma, | |
319 | unsigned long old_addr, unsigned long old_len, | |
72f87654 | 320 | unsigned long new_len, unsigned long new_addr, |
e346b381 BG |
321 | bool *locked, unsigned long flags, |
322 | struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) | |
1da177e4 LT |
323 | { |
324 | struct mm_struct *mm = vma->vm_mm; | |
325 | struct vm_area_struct *new_vma; | |
326 | unsigned long vm_flags = vma->vm_flags; | |
327 | unsigned long new_pgoff; | |
328 | unsigned long moved_len; | |
329 | unsigned long excess = 0; | |
365e9c87 | 330 | unsigned long hiwater_vm; |
1da177e4 | 331 | int split = 0; |
7103ad32 | 332 | int err; |
38a76013 | 333 | bool need_rmap_locks; |
1da177e4 LT |
334 | |
335 | /* | |
336 | * We'd prefer to avoid failure later on in do_munmap: | |
337 | * which may split one vma into three before unmapping. | |
338 | */ | |
339 | if (mm->map_count >= sysctl_max_map_count - 3) | |
340 | return -ENOMEM; | |
341 | ||
1ff82995 HD |
342 | /* |
343 | * Advise KSM to break any KSM pages in the area to be moved: | |
344 | * it would be confusing if they were to turn up at the new | |
345 | * location, where they happen to coincide with different KSM | |
346 | * pages recently unmapped. But leave vma->vm_flags as it was, | |
347 | * so KSM can come around to merge on vma and new_vma afterwards. | |
348 | */ | |
7103ad32 HD |
349 | err = ksm_madvise(vma, old_addr, old_addr + old_len, |
350 | MADV_UNMERGEABLE, &vm_flags); | |
351 | if (err) | |
352 | return err; | |
1ff82995 | 353 | |
1da177e4 | 354 | new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); |
38a76013 ML |
355 | new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, |
356 | &need_rmap_locks); | |
1da177e4 LT |
357 | if (!new_vma) |
358 | return -ENOMEM; | |
359 | ||
38a76013 ML |
360 | moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, |
361 | need_rmap_locks); | |
1da177e4 | 362 | if (moved_len < old_len) { |
df1eab30 | 363 | err = -ENOMEM; |
5477e70a ON |
364 | } else if (vma->vm_ops && vma->vm_ops->mremap) { |
365 | err = vma->vm_ops->mremap(new_vma); | |
df1eab30 ON |
366 | } |
367 | ||
368 | if (unlikely(err)) { | |
1da177e4 LT |
369 | /* |
370 | * On error, move entries back from new area to old, | |
371 | * which will succeed since page tables still there, | |
372 | * and then proceed to unmap new area instead of old. | |
373 | */ | |
38a76013 ML |
374 | move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, |
375 | true); | |
1da177e4 LT |
376 | vma = new_vma; |
377 | old_len = new_len; | |
378 | old_addr = new_addr; | |
df1eab30 | 379 | new_addr = err; |
4abad2ca | 380 | } else { |
72f87654 | 381 | mremap_userfaultfd_prep(new_vma, uf); |
4abad2ca LD |
382 | arch_remap(mm, old_addr, old_addr + old_len, |
383 | new_addr, new_addr + new_len); | |
b2edffdd | 384 | } |
1da177e4 LT |
385 | |
386 | /* Conceal VM_ACCOUNT so old reservation is not undone */ | |
387 | if (vm_flags & VM_ACCOUNT) { | |
388 | vma->vm_flags &= ~VM_ACCOUNT; | |
389 | excess = vma->vm_end - vma->vm_start - old_len; | |
390 | if (old_addr > vma->vm_start && | |
391 | old_addr + old_len < vma->vm_end) | |
392 | split = 1; | |
393 | } | |
394 | ||
71799062 | 395 | /* |
365e9c87 HD |
396 | * If we failed to move page tables we still do total_vm increment |
397 | * since do_munmap() will decrement it by old_len == new_len. | |
398 | * | |
399 | * Since total_vm is about to be raised artificially high for a | |
400 | * moment, we need to restore high watermark afterwards: if stats | |
401 | * are taken meanwhile, total_vm and hiwater_vm appear too high. | |
402 | * If this were a serious issue, we'd add a flag to do_munmap(). | |
71799062 | 403 | */ |
365e9c87 | 404 | hiwater_vm = mm->hiwater_vm; |
84638335 | 405 | vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); |
71799062 | 406 | |
d9fe4fab TK |
407 | /* Tell pfnmap has moved from this vma */ |
408 | if (unlikely(vma->vm_flags & VM_PFNMAP)) | |
409 | untrack_pfn_moved(vma); | |
410 | ||
e346b381 BG |
411 | if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { |
412 | if (vm_flags & VM_ACCOUNT) { | |
413 | /* Always put back VM_ACCOUNT since we won't unmap */ | |
414 | vma->vm_flags |= VM_ACCOUNT; | |
415 | ||
dadbd85f | 416 | vm_acct_memory(new_len >> PAGE_SHIFT); |
e346b381 BG |
417 | } |
418 | ||
dadbd85f BG |
419 | /* |
420 | * VMAs can actually be merged back together in copy_vma | |
421 | * calling merge_vma. This can happen with anonymous vmas | |
422 | * which have not yet been faulted, so if we were to consider | |
423 | * this VMA split we'll end up adding VM_ACCOUNT on the | |
424 | * next VMA, which is completely unrelated if this VMA | |
425 | * was re-merged. | |
426 | */ | |
427 | if (split && new_vma == vma) | |
428 | split = 0; | |
429 | ||
e346b381 BG |
430 | /* We always clear VM_LOCKED[ONFAULT] on the old vma */ |
431 | vma->vm_flags &= VM_LOCKED_CLEAR_MASK; | |
432 | ||
433 | /* Because we won't unmap we don't need to touch locked_vm */ | |
434 | goto out; | |
435 | } | |
436 | ||
897ab3e0 | 437 | if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) { |
1da177e4 LT |
438 | /* OOM: unable to split vma, just get accounts right */ |
439 | vm_unacct_memory(excess >> PAGE_SHIFT); | |
440 | excess = 0; | |
441 | } | |
e346b381 BG |
442 | |
443 | if (vm_flags & VM_LOCKED) { | |
444 | mm->locked_vm += new_len >> PAGE_SHIFT; | |
445 | *locked = true; | |
446 | } | |
447 | out: | |
365e9c87 | 448 | mm->hiwater_vm = hiwater_vm; |
1da177e4 LT |
449 | |
450 | /* Restore VM_ACCOUNT if one or two pieces of vma left */ | |
451 | if (excess) { | |
452 | vma->vm_flags |= VM_ACCOUNT; | |
453 | if (split) | |
454 | vma->vm_next->vm_flags |= VM_ACCOUNT; | |
455 | } | |
456 | ||
1da177e4 LT |
457 | return new_addr; |
458 | } | |
459 | ||
54f5de70 | 460 | static struct vm_area_struct *vma_to_resize(unsigned long addr, |
e346b381 BG |
461 | unsigned long old_len, unsigned long new_len, unsigned long flags, |
462 | unsigned long *p) | |
54f5de70 AV |
463 | { |
464 | struct mm_struct *mm = current->mm; | |
465 | struct vm_area_struct *vma = find_vma(mm, addr); | |
1d391686 | 466 | unsigned long pgoff; |
54f5de70 AV |
467 | |
468 | if (!vma || vma->vm_start > addr) | |
6cd57613 | 469 | return ERR_PTR(-EFAULT); |
54f5de70 | 470 | |
dba58d3b MK |
471 | /* |
472 | * !old_len is a special case where an attempt is made to 'duplicate' | |
473 | * a mapping. This makes no sense for private mappings as it will | |
474 | * instead create a fresh/new mapping unrelated to the original. This | |
475 | * is contrary to the basic idea of mremap which creates new mappings | |
476 | * based on the original. There are no known use cases for this | |
477 | * behavior. As a result, fail such attempts. | |
478 | */ | |
479 | if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { | |
480 | pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid); | |
481 | return ERR_PTR(-EINVAL); | |
482 | } | |
483 | ||
e346b381 BG |
484 | if (flags & MREMAP_DONTUNMAP && (!vma_is_anonymous(vma) || |
485 | vma->vm_flags & VM_SHARED)) | |
486 | return ERR_PTR(-EINVAL); | |
487 | ||
54f5de70 | 488 | if (is_vm_hugetlb_page(vma)) |
6cd57613 | 489 | return ERR_PTR(-EINVAL); |
54f5de70 AV |
490 | |
491 | /* We can't remap across vm area boundaries */ | |
492 | if (old_len > vma->vm_end - addr) | |
6cd57613 | 493 | return ERR_PTR(-EFAULT); |
54f5de70 | 494 | |
1d391686 ON |
495 | if (new_len == old_len) |
496 | return vma; | |
497 | ||
982134ba | 498 | /* Need to be careful about a growing mapping */ |
1d391686 ON |
499 | pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; |
500 | pgoff += vma->vm_pgoff; | |
501 | if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) | |
502 | return ERR_PTR(-EINVAL); | |
503 | ||
504 | if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) | |
505 | return ERR_PTR(-EFAULT); | |
54f5de70 AV |
506 | |
507 | if (vma->vm_flags & VM_LOCKED) { | |
508 | unsigned long locked, lock_limit; | |
509 | locked = mm->locked_vm << PAGE_SHIFT; | |
59e99e5b | 510 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
54f5de70 AV |
511 | locked += new_len - old_len; |
512 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) | |
6cd57613 | 513 | return ERR_PTR(-EAGAIN); |
54f5de70 AV |
514 | } |
515 | ||
84638335 KK |
516 | if (!may_expand_vm(mm, vma->vm_flags, |
517 | (new_len - old_len) >> PAGE_SHIFT)) | |
6cd57613 | 518 | return ERR_PTR(-ENOMEM); |
54f5de70 AV |
519 | |
520 | if (vma->vm_flags & VM_ACCOUNT) { | |
521 | unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; | |
191c5424 | 522 | if (security_vm_enough_memory_mm(mm, charged)) |
6cd57613 | 523 | return ERR_PTR(-ENOMEM); |
54f5de70 AV |
524 | *p = charged; |
525 | } | |
526 | ||
527 | return vma; | |
54f5de70 AV |
528 | } |
529 | ||
81909b84 | 530 | static unsigned long mremap_to(unsigned long addr, unsigned long old_len, |
72f87654 | 531 | unsigned long new_addr, unsigned long new_len, bool *locked, |
e346b381 | 532 | unsigned long flags, struct vm_userfaultfd_ctx *uf, |
b2282371 | 533 | struct list_head *uf_unmap_early, |
897ab3e0 | 534 | struct list_head *uf_unmap) |
ecc1a899 AV |
535 | { |
536 | struct mm_struct *mm = current->mm; | |
537 | struct vm_area_struct *vma; | |
538 | unsigned long ret = -EINVAL; | |
539 | unsigned long charged = 0; | |
e346b381 | 540 | unsigned long map_flags = 0; |
ecc1a899 | 541 | |
f19cb115 | 542 | if (offset_in_page(new_addr)) |
ecc1a899 AV |
543 | goto out; |
544 | ||
545 | if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) | |
546 | goto out; | |
547 | ||
9943242c ON |
548 | /* Ensure the old/new locations do not overlap */ |
549 | if (addr + old_len > new_addr && new_addr + new_len > addr) | |
ecc1a899 AV |
550 | goto out; |
551 | ||
ea2c3f6f OS |
552 | /* |
553 | * move_vma() need us to stay 4 maps below the threshold, otherwise | |
554 | * it will bail out at the very beginning. | |
555 | * That is a problem if we have already unmaped the regions here | |
556 | * (new_addr, and old_addr), because userspace will not know the | |
557 | * state of the vma's after it gets -ENOMEM. | |
558 | * So, to avoid such scenario we can pre-compute if the whole | |
559 | * operation has high chances to success map-wise. | |
560 | * Worst-scenario case is when both vma's (new_addr and old_addr) get | |
561 | * split in 3 before unmaping it. | |
562 | * That means 2 more maps (1 for each) to the ones we already hold. | |
563 | * Check whether current map count plus 2 still leads us to 4 maps below | |
564 | * the threshold, otherwise return -ENOMEM here to be more safe. | |
565 | */ | |
566 | if ((mm->map_count + 2) >= sysctl_max_map_count - 3) | |
567 | return -ENOMEM; | |
568 | ||
e346b381 BG |
569 | if (flags & MREMAP_FIXED) { |
570 | ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); | |
571 | if (ret) | |
572 | goto out; | |
573 | } | |
ecc1a899 AV |
574 | |
575 | if (old_len >= new_len) { | |
897ab3e0 | 576 | ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); |
ecc1a899 AV |
577 | if (ret && old_len != new_len) |
578 | goto out; | |
579 | old_len = new_len; | |
580 | } | |
581 | ||
e346b381 | 582 | vma = vma_to_resize(addr, old_len, new_len, flags, &charged); |
ecc1a899 AV |
583 | if (IS_ERR(vma)) { |
584 | ret = PTR_ERR(vma); | |
585 | goto out; | |
586 | } | |
587 | ||
e346b381 BG |
588 | /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ |
589 | if (flags & MREMAP_DONTUNMAP && | |
590 | !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { | |
591 | ret = -ENOMEM; | |
592 | goto out; | |
593 | } | |
594 | ||
595 | if (flags & MREMAP_FIXED) | |
596 | map_flags |= MAP_FIXED; | |
597 | ||
097eed10 AV |
598 | if (vma->vm_flags & VM_MAYSHARE) |
599 | map_flags |= MAP_SHARED; | |
9206de95 | 600 | |
097eed10 AV |
601 | ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + |
602 | ((addr - vma->vm_start) >> PAGE_SHIFT), | |
603 | map_flags); | |
ff68dac6 | 604 | if (IS_ERR_VALUE(ret)) |
097eed10 AV |
605 | goto out1; |
606 | ||
e346b381 BG |
607 | /* We got a new mapping */ |
608 | if (!(flags & MREMAP_FIXED)) | |
609 | new_addr = ret; | |
610 | ||
611 | ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, | |
897ab3e0 | 612 | uf_unmap); |
e346b381 | 613 | |
f19cb115 | 614 | if (!(offset_in_page(ret))) |
097eed10 | 615 | goto out; |
e346b381 | 616 | |
097eed10 AV |
617 | out1: |
618 | vm_unacct_memory(charged); | |
ecc1a899 AV |
619 | |
620 | out: | |
621 | return ret; | |
622 | } | |
623 | ||
1a0ef85f AV |
624 | static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) |
625 | { | |
f106af4e | 626 | unsigned long end = vma->vm_end + delta; |
9206de95 | 627 | if (end < vma->vm_end) /* overflow */ |
f106af4e | 628 | return 0; |
9206de95 | 629 | if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ |
f106af4e AV |
630 | return 0; |
631 | if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, | |
632 | 0, MAP_FIXED) & ~PAGE_MASK) | |
1a0ef85f | 633 | return 0; |
1a0ef85f AV |
634 | return 1; |
635 | } | |
636 | ||
1da177e4 LT |
637 | /* |
638 | * Expand (or shrink) an existing mapping, potentially moving it at the | |
639 | * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) | |
640 | * | |
641 | * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise | |
642 | * This option implies MREMAP_MAYMOVE. | |
643 | */ | |
63a81db1 AV |
644 | SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, |
645 | unsigned long, new_len, unsigned long, flags, | |
646 | unsigned long, new_addr) | |
1da177e4 | 647 | { |
d0de32d9 | 648 | struct mm_struct *mm = current->mm; |
1da177e4 LT |
649 | struct vm_area_struct *vma; |
650 | unsigned long ret = -EINVAL; | |
651 | unsigned long charged = 0; | |
81909b84 | 652 | bool locked = false; |
85a06835 | 653 | bool downgraded = false; |
72f87654 | 654 | struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; |
b2282371 | 655 | LIST_HEAD(uf_unmap_early); |
897ab3e0 | 656 | LIST_HEAD(uf_unmap); |
1da177e4 | 657 | |
b2a84de2 WD |
658 | /* |
659 | * There is a deliberate asymmetry here: we strip the pointer tag | |
660 | * from the old address but leave the new address alone. This is | |
661 | * for consistency with mmap(), where we prevent the creation of | |
662 | * aliasing mappings in userspace by leaving the tag bits of the | |
663 | * mapping address intact. A non-zero tag will cause the subsequent | |
664 | * range checks to reject the address as invalid. | |
665 | * | |
666 | * See Documentation/arm64/tagged-address-abi.rst for more information. | |
667 | */ | |
057d3389 AK |
668 | addr = untagged_addr(addr); |
669 | ||
e346b381 | 670 | if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) |
9a2458a6 RV |
671 | return ret; |
672 | ||
673 | if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) | |
674 | return ret; | |
1da177e4 | 675 | |
e346b381 BG |
676 | /* |
677 | * MREMAP_DONTUNMAP is always a move and it does not allow resizing | |
678 | * in the process. | |
679 | */ | |
680 | if (flags & MREMAP_DONTUNMAP && | |
681 | (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) | |
682 | return ret; | |
683 | ||
684 | ||
f19cb115 | 685 | if (offset_in_page(addr)) |
9a2458a6 | 686 | return ret; |
1da177e4 LT |
687 | |
688 | old_len = PAGE_ALIGN(old_len); | |
689 | new_len = PAGE_ALIGN(new_len); | |
690 | ||
691 | /* | |
692 | * We allow a zero old-len as a special case | |
693 | * for DOS-emu "duplicate shm area" thing. But | |
694 | * a zero new-len is nonsensical. | |
695 | */ | |
696 | if (!new_len) | |
9a2458a6 RV |
697 | return ret; |
698 | ||
dc0ef0df MH |
699 | if (down_write_killable(¤t->mm->mmap_sem)) |
700 | return -EINTR; | |
1da177e4 | 701 | |
e346b381 | 702 | if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) { |
9a2458a6 | 703 | ret = mremap_to(addr, old_len, new_addr, new_len, |
e346b381 BG |
704 | &locked, flags, &uf, &uf_unmap_early, |
705 | &uf_unmap); | |
ecc1a899 | 706 | goto out; |
1da177e4 LT |
707 | } |
708 | ||
709 | /* | |
710 | * Always allow a shrinking remap: that just unmaps | |
711 | * the unnecessary pages.. | |
85a06835 YS |
712 | * __do_munmap does all the needed commit accounting, and |
713 | * downgrades mmap_sem to read if so directed. | |
1da177e4 LT |
714 | */ |
715 | if (old_len >= new_len) { | |
85a06835 YS |
716 | int retval; |
717 | ||
718 | retval = __do_munmap(mm, addr+new_len, old_len - new_len, | |
719 | &uf_unmap, true); | |
720 | if (retval < 0 && old_len != new_len) { | |
721 | ret = retval; | |
1da177e4 | 722 | goto out; |
85a06835 YS |
723 | /* Returning 1 indicates mmap_sem is downgraded to read. */ |
724 | } else if (retval == 1) | |
725 | downgraded = true; | |
1da177e4 | 726 | ret = addr; |
ecc1a899 | 727 | goto out; |
1da177e4 LT |
728 | } |
729 | ||
730 | /* | |
ecc1a899 | 731 | * Ok, we need to grow.. |
1da177e4 | 732 | */ |
e346b381 | 733 | vma = vma_to_resize(addr, old_len, new_len, flags, &charged); |
54f5de70 AV |
734 | if (IS_ERR(vma)) { |
735 | ret = PTR_ERR(vma); | |
1da177e4 | 736 | goto out; |
119f657c | 737 | } |
1da177e4 | 738 | |
1da177e4 | 739 | /* old_len exactly to the end of the area.. |
1da177e4 | 740 | */ |
ecc1a899 | 741 | if (old_len == vma->vm_end - addr) { |
1da177e4 | 742 | /* can we just expand the current mapping? */ |
1a0ef85f | 743 | if (vma_expandable(vma, new_len - old_len)) { |
1da177e4 LT |
744 | int pages = (new_len - old_len) >> PAGE_SHIFT; |
745 | ||
5beb4930 RR |
746 | if (vma_adjust(vma, vma->vm_start, addr + new_len, |
747 | vma->vm_pgoff, NULL)) { | |
748 | ret = -ENOMEM; | |
749 | goto out; | |
750 | } | |
1da177e4 | 751 | |
84638335 | 752 | vm_stat_account(mm, vma->vm_flags, pages); |
1da177e4 | 753 | if (vma->vm_flags & VM_LOCKED) { |
d0de32d9 | 754 | mm->locked_vm += pages; |
81909b84 ML |
755 | locked = true; |
756 | new_addr = addr; | |
1da177e4 LT |
757 | } |
758 | ret = addr; | |
759 | goto out; | |
760 | } | |
761 | } | |
762 | ||
763 | /* | |
764 | * We weren't able to just expand or shrink the area, | |
765 | * we need to create a new one and move it.. | |
766 | */ | |
767 | ret = -ENOMEM; | |
768 | if (flags & MREMAP_MAYMOVE) { | |
ecc1a899 AV |
769 | unsigned long map_flags = 0; |
770 | if (vma->vm_flags & VM_MAYSHARE) | |
771 | map_flags |= MAP_SHARED; | |
772 | ||
773 | new_addr = get_unmapped_area(vma->vm_file, 0, new_len, | |
93587414 AV |
774 | vma->vm_pgoff + |
775 | ((addr - vma->vm_start) >> PAGE_SHIFT), | |
776 | map_flags); | |
ff68dac6 | 777 | if (IS_ERR_VALUE(new_addr)) { |
ecc1a899 AV |
778 | ret = new_addr; |
779 | goto out; | |
1da177e4 | 780 | } |
ecc1a899 | 781 | |
72f87654 | 782 | ret = move_vma(vma, addr, old_len, new_len, new_addr, |
e346b381 | 783 | &locked, flags, &uf, &uf_unmap); |
1da177e4 LT |
784 | } |
785 | out: | |
f19cb115 | 786 | if (offset_in_page(ret)) { |
1da177e4 | 787 | vm_unacct_memory(charged); |
d456fb9e ON |
788 | locked = 0; |
789 | } | |
85a06835 YS |
790 | if (downgraded) |
791 | up_read(¤t->mm->mmap_sem); | |
792 | else | |
793 | up_write(¤t->mm->mmap_sem); | |
81909b84 ML |
794 | if (locked && new_len > old_len) |
795 | mm_populate(new_addr + old_len, new_len - old_len); | |
b2282371 | 796 | userfaultfd_unmap_complete(mm, &uf_unmap_early); |
d1564926 | 797 | mremap_userfaultfd_complete(&uf, addr, ret, old_len); |
897ab3e0 | 798 | userfaultfd_unmap_complete(mm, &uf_unmap); |
1da177e4 LT |
799 | return ret; |
800 | } |