]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/mprotect.c | |
3 | * | |
4 | * (C) Copyright 1994 Linus Torvalds | |
5 | * (C) Copyright 2002 Christoph Hellwig | |
6 | * | |
046c6884 | 7 | * Address space accounting code <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
8 | * (C) Copyright 2002 Red Hat Inc, All Rights Reserved |
9 | */ | |
10 | ||
11 | #include <linux/mm.h> | |
12 | #include <linux/hugetlb.h> | |
1da177e4 LT |
13 | #include <linux/shm.h> |
14 | #include <linux/mman.h> | |
15 | #include <linux/fs.h> | |
16 | #include <linux/highmem.h> | |
17 | #include <linux/security.h> | |
18 | #include <linux/mempolicy.h> | |
19 | #include <linux/personality.h> | |
20 | #include <linux/syscalls.h> | |
0697212a CL |
21 | #include <linux/swap.h> |
22 | #include <linux/swapops.h> | |
cddb8a5c | 23 | #include <linux/mmu_notifier.h> |
64cdd548 | 24 | #include <linux/migrate.h> |
cdd6c482 | 25 | #include <linux/perf_event.h> |
64a9a34e | 26 | #include <linux/ksm.h> |
1da177e4 LT |
27 | #include <asm/uaccess.h> |
28 | #include <asm/pgtable.h> | |
29 | #include <asm/cacheflush.h> | |
30 | #include <asm/tlbflush.h> | |
31 | ||
1c12c4cf VP |
32 | #ifndef pgprot_modify |
33 | static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |
34 | { | |
35 | return newprot; | |
36 | } | |
37 | #endif | |
38 | ||
1ad9f620 MG |
39 | /* |
40 | * For a prot_numa update we only hold mmap_sem for read so there is a | |
41 | * potential race with faulting where a pmd was temporarily none. This | |
42 | * function checks for a transhuge pmd under the appropriate lock. It | |
43 | * returns a pte if it was successfully locked or NULL if it raced with | |
44 | * a transhuge insertion. | |
45 | */ | |
46 | static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, | |
47 | unsigned long addr, int prot_numa, spinlock_t **ptl) | |
48 | { | |
49 | pte_t *pte; | |
50 | spinlock_t *pmdl; | |
51 | ||
52 | /* !prot_numa is protected by mmap_sem held for write */ | |
53 | if (!prot_numa) | |
54 | return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); | |
55 | ||
56 | pmdl = pmd_lock(vma->vm_mm, pmd); | |
57 | if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) { | |
58 | spin_unlock(pmdl); | |
59 | return NULL; | |
60 | } | |
61 | ||
62 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); | |
63 | spin_unlock(pmdl); | |
64 | return pte; | |
65 | } | |
66 | ||
4b10e7d5 | 67 | static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
c1e6098b | 68 | unsigned long addr, unsigned long end, pgprot_t newprot, |
0f19c179 | 69 | int dirty_accountable, int prot_numa) |
1da177e4 | 70 | { |
4b10e7d5 | 71 | struct mm_struct *mm = vma->vm_mm; |
0697212a | 72 | pte_t *pte, oldpte; |
705e87c0 | 73 | spinlock_t *ptl; |
7da4d641 | 74 | unsigned long pages = 0; |
1da177e4 | 75 | |
1ad9f620 MG |
76 | pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); |
77 | if (!pte) | |
78 | return 0; | |
79 | ||
6606c3e0 | 80 | arch_enter_lazy_mmu_mode(); |
1da177e4 | 81 | do { |
0697212a CL |
82 | oldpte = *pte; |
83 | if (pte_present(oldpte)) { | |
1da177e4 | 84 | pte_t ptent; |
4b10e7d5 | 85 | bool updated = false; |
1da177e4 | 86 | |
4b10e7d5 | 87 | if (!prot_numa) { |
0c5f83c2 | 88 | ptent = ptep_modify_prot_start(mm, addr, pte); |
1667918b MG |
89 | if (pte_numa(ptent)) |
90 | ptent = pte_mknonnuma(ptent); | |
4b10e7d5 | 91 | ptent = pte_modify(ptent, newprot); |
9d85d586 AK |
92 | /* |
93 | * Avoid taking write faults for pages we | |
94 | * know to be dirty. | |
95 | */ | |
96 | if (dirty_accountable && pte_dirty(ptent)) | |
97 | ptent = pte_mkwrite(ptent); | |
98 | ptep_modify_prot_commit(mm, addr, pte, ptent); | |
4b10e7d5 MG |
99 | updated = true; |
100 | } else { | |
101 | struct page *page; | |
102 | ||
103 | page = vm_normal_page(vma, addr, oldpte); | |
64a9a34e | 104 | if (page && !PageKsm(page)) { |
1bc115d8 | 105 | if (!pte_numa(oldpte)) { |
56eecdb9 | 106 | ptep_set_numa(mm, addr, pte); |
4b10e7d5 MG |
107 | updated = true; |
108 | } | |
109 | } | |
110 | } | |
4b10e7d5 MG |
111 | if (updated) |
112 | pages++; | |
38ae3752 | 113 | } else if (IS_ENABLED(CONFIG_MIGRATION)) { |
0697212a CL |
114 | swp_entry_t entry = pte_to_swp_entry(oldpte); |
115 | ||
116 | if (is_write_migration_entry(entry)) { | |
c3d16e16 | 117 | pte_t newpte; |
0697212a CL |
118 | /* |
119 | * A protection check is difficult so | |
120 | * just be safe and disable write | |
121 | */ | |
122 | make_migration_entry_read(&entry); | |
c3d16e16 CG |
123 | newpte = swp_entry_to_pte(entry); |
124 | if (pte_swp_soft_dirty(oldpte)) | |
125 | newpte = pte_swp_mksoft_dirty(newpte); | |
126 | set_pte_at(mm, addr, pte, newpte); | |
e920e14c MG |
127 | |
128 | pages++; | |
0697212a | 129 | } |
1da177e4 LT |
130 | } |
131 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
6606c3e0 | 132 | arch_leave_lazy_mmu_mode(); |
705e87c0 | 133 | pte_unmap_unlock(pte - 1, ptl); |
7da4d641 PZ |
134 | |
135 | return pages; | |
1da177e4 LT |
136 | } |
137 | ||
7d12efae AM |
138 | static inline unsigned long change_pmd_range(struct vm_area_struct *vma, |
139 | pud_t *pud, unsigned long addr, unsigned long end, | |
140 | pgprot_t newprot, int dirty_accountable, int prot_numa) | |
1da177e4 LT |
141 | { |
142 | pmd_t *pmd; | |
a5338093 | 143 | struct mm_struct *mm = vma->vm_mm; |
1da177e4 | 144 | unsigned long next; |
7da4d641 | 145 | unsigned long pages = 0; |
72403b4a | 146 | unsigned long nr_huge_updates = 0; |
a5338093 | 147 | unsigned long mni_start = 0; |
1da177e4 LT |
148 | |
149 | pmd = pmd_offset(pud, addr); | |
150 | do { | |
25cbbef1 MG |
151 | unsigned long this_pages; |
152 | ||
1da177e4 | 153 | next = pmd_addr_end(addr, end); |
88a9ab6e | 154 | if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd)) |
8ceeebad | 155 | goto next; |
a5338093 RR |
156 | |
157 | /* invoke the mmu notifier if the pmd is populated */ | |
158 | if (!mni_start) { | |
159 | mni_start = addr; | |
160 | mmu_notifier_invalidate_range_start(mm, mni_start, end); | |
161 | } | |
162 | ||
cd7548ab JW |
163 | if (pmd_trans_huge(*pmd)) { |
164 | if (next - addr != HPAGE_PMD_SIZE) | |
e180377f | 165 | split_huge_page_pmd(vma, addr, pmd); |
f123d74a MG |
166 | else { |
167 | int nr_ptes = change_huge_pmd(vma, pmd, addr, | |
168 | newprot, prot_numa); | |
169 | ||
170 | if (nr_ptes) { | |
72403b4a MG |
171 | if (nr_ptes == HPAGE_PMD_NR) { |
172 | pages += HPAGE_PMD_NR; | |
173 | nr_huge_updates++; | |
174 | } | |
1ad9f620 MG |
175 | |
176 | /* huge pmd was handled */ | |
8ceeebad | 177 | goto next; |
f123d74a | 178 | } |
7da4d641 | 179 | } |
88a9ab6e | 180 | /* fall through, the trans huge pmd just split */ |
cd7548ab | 181 | } |
25cbbef1 | 182 | this_pages = change_pte_range(vma, pmd, addr, next, newprot, |
0f19c179 | 183 | dirty_accountable, prot_numa); |
25cbbef1 | 184 | pages += this_pages; |
8ceeebad AK |
185 | next: |
186 | cond_resched(); | |
1da177e4 | 187 | } while (pmd++, addr = next, addr != end); |
7da4d641 | 188 | |
a5338093 RR |
189 | if (mni_start) |
190 | mmu_notifier_invalidate_range_end(mm, mni_start, end); | |
191 | ||
72403b4a MG |
192 | if (nr_huge_updates) |
193 | count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); | |
7da4d641 | 194 | return pages; |
1da177e4 LT |
195 | } |
196 | ||
7d12efae AM |
197 | static inline unsigned long change_pud_range(struct vm_area_struct *vma, |
198 | pgd_t *pgd, unsigned long addr, unsigned long end, | |
199 | pgprot_t newprot, int dirty_accountable, int prot_numa) | |
1da177e4 LT |
200 | { |
201 | pud_t *pud; | |
202 | unsigned long next; | |
7da4d641 | 203 | unsigned long pages = 0; |
1da177e4 LT |
204 | |
205 | pud = pud_offset(pgd, addr); | |
206 | do { | |
207 | next = pud_addr_end(addr, end); | |
208 | if (pud_none_or_clear_bad(pud)) | |
209 | continue; | |
7da4d641 | 210 | pages += change_pmd_range(vma, pud, addr, next, newprot, |
4b10e7d5 | 211 | dirty_accountable, prot_numa); |
1da177e4 | 212 | } while (pud++, addr = next, addr != end); |
7da4d641 PZ |
213 | |
214 | return pages; | |
1da177e4 LT |
215 | } |
216 | ||
7da4d641 | 217 | static unsigned long change_protection_range(struct vm_area_struct *vma, |
c1e6098b | 218 | unsigned long addr, unsigned long end, pgprot_t newprot, |
4b10e7d5 | 219 | int dirty_accountable, int prot_numa) |
1da177e4 LT |
220 | { |
221 | struct mm_struct *mm = vma->vm_mm; | |
222 | pgd_t *pgd; | |
223 | unsigned long next; | |
224 | unsigned long start = addr; | |
7da4d641 | 225 | unsigned long pages = 0; |
1da177e4 LT |
226 | |
227 | BUG_ON(addr >= end); | |
228 | pgd = pgd_offset(mm, addr); | |
229 | flush_cache_range(vma, addr, end); | |
7b590b41 | 230 | inc_tlb_flush_pending(mm); |
1da177e4 LT |
231 | do { |
232 | next = pgd_addr_end(addr, end); | |
233 | if (pgd_none_or_clear_bad(pgd)) | |
234 | continue; | |
7da4d641 | 235 | pages += change_pud_range(vma, pgd, addr, next, newprot, |
4b10e7d5 | 236 | dirty_accountable, prot_numa); |
1da177e4 | 237 | } while (pgd++, addr = next, addr != end); |
7da4d641 | 238 | |
1233d588 IM |
239 | /* Only flush the TLB if we actually modified any entries: */ |
240 | if (pages) | |
241 | flush_tlb_range(vma, start, end); | |
7b590b41 | 242 | dec_tlb_flush_pending(mm); |
7da4d641 PZ |
243 | |
244 | return pages; | |
245 | } | |
246 | ||
247 | unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, | |
248 | unsigned long end, pgprot_t newprot, | |
4b10e7d5 | 249 | int dirty_accountable, int prot_numa) |
7da4d641 | 250 | { |
7da4d641 PZ |
251 | unsigned long pages; |
252 | ||
7da4d641 PZ |
253 | if (is_vm_hugetlb_page(vma)) |
254 | pages = hugetlb_change_protection(vma, start, end, newprot); | |
255 | else | |
4b10e7d5 | 256 | pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); |
7da4d641 PZ |
257 | |
258 | return pages; | |
1da177e4 LT |
259 | } |
260 | ||
74a43011 AK |
261 | static int prot_none_pte_entry(pte_t *pte, unsigned long addr, |
262 | unsigned long next, struct mm_walk *walk) | |
263 | { | |
264 | return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? | |
265 | 0 : -EACCES; | |
266 | } | |
267 | ||
268 | static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask, | |
269 | unsigned long addr, unsigned long next, | |
270 | struct mm_walk *walk) | |
271 | { | |
272 | return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? | |
273 | 0 : -EACCES; | |
274 | } | |
275 | ||
276 | static int prot_none_test(unsigned long addr, unsigned long next, | |
277 | struct mm_walk *walk) | |
278 | { | |
279 | return 0; | |
280 | } | |
281 | ||
282 | static int prot_none_walk(struct vm_area_struct *vma, unsigned long start, | |
283 | unsigned long end, unsigned long newflags) | |
284 | { | |
285 | pgprot_t new_pgprot = vm_get_page_prot(newflags); | |
286 | struct mm_walk prot_none_walk = { | |
287 | .pte_entry = prot_none_pte_entry, | |
288 | .hugetlb_entry = prot_none_hugetlb_entry, | |
289 | .test_walk = prot_none_test, | |
290 | .mm = current->mm, | |
291 | .private = &new_pgprot, | |
292 | }; | |
293 | ||
294 | return walk_page_range(start, end, &prot_none_walk); | |
295 | } | |
296 | ||
b6a2fea3 | 297 | int |
1da177e4 LT |
298 | mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, |
299 | unsigned long start, unsigned long end, unsigned long newflags) | |
300 | { | |
301 | struct mm_struct *mm = vma->vm_mm; | |
302 | unsigned long oldflags = vma->vm_flags; | |
303 | long nrpages = (end - start) >> PAGE_SHIFT; | |
304 | unsigned long charged = 0; | |
1da177e4 LT |
305 | pgoff_t pgoff; |
306 | int error; | |
c1e6098b | 307 | int dirty_accountable = 0; |
1da177e4 LT |
308 | |
309 | if (newflags == oldflags) { | |
310 | *pprev = vma; | |
311 | return 0; | |
312 | } | |
313 | ||
74a43011 AK |
314 | /* |
315 | * Do PROT_NONE PFN permission checks here when we can still | |
316 | * bail out without undoing a lot of state. This is a rather | |
317 | * uncommon case, so doesn't need to be very optimized. | |
318 | */ | |
319 | if (arch_has_pfn_modify_check() && | |
320 | (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && | |
321 | (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) { | |
322 | error = prot_none_walk(vma, start, end, newflags); | |
323 | if (error) | |
324 | return error; | |
325 | } | |
326 | ||
1da177e4 LT |
327 | /* |
328 | * If we make a private mapping writable we increase our commit; | |
329 | * but (without finer accounting) cannot reduce our commit if we | |
5a6fe125 MG |
330 | * make it unwritable again. hugetlb mapping were accounted for |
331 | * even if read-only so there is no need to account for them here | |
1da177e4 LT |
332 | */ |
333 | if (newflags & VM_WRITE) { | |
5a6fe125 | 334 | if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| |
cdfd4325 | 335 | VM_SHARED|VM_NORESERVE))) { |
1da177e4 | 336 | charged = nrpages; |
191c5424 | 337 | if (security_vm_enough_memory_mm(mm, charged)) |
1da177e4 LT |
338 | return -ENOMEM; |
339 | newflags |= VM_ACCOUNT; | |
340 | } | |
341 | } | |
342 | ||
1da177e4 LT |
343 | /* |
344 | * First try to merge with previous and/or next vma. | |
345 | */ | |
346 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); | |
347 | *pprev = vma_merge(mm, *pprev, start, end, newflags, | |
348 | vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); | |
349 | if (*pprev) { | |
350 | vma = *pprev; | |
351 | goto success; | |
352 | } | |
353 | ||
354 | *pprev = vma; | |
355 | ||
356 | if (start != vma->vm_start) { | |
357 | error = split_vma(mm, vma, start, 1); | |
358 | if (error) | |
359 | goto fail; | |
360 | } | |
361 | ||
362 | if (end != vma->vm_end) { | |
363 | error = split_vma(mm, vma, end, 0); | |
364 | if (error) | |
365 | goto fail; | |
366 | } | |
367 | ||
368 | success: | |
369 | /* | |
370 | * vm_flags and vm_page_prot are protected by the mmap_sem | |
371 | * held in write mode. | |
372 | */ | |
373 | vma->vm_flags = newflags; | |
1c12c4cf VP |
374 | vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, |
375 | vm_get_page_prot(newflags)); | |
376 | ||
c1e6098b | 377 | if (vma_wants_writenotify(vma)) { |
1ddd439e | 378 | vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); |
c1e6098b PZ |
379 | dirty_accountable = 1; |
380 | } | |
d08b3851 | 381 | |
7d12efae AM |
382 | change_protection(vma, start, end, vma->vm_page_prot, |
383 | dirty_accountable, 0); | |
7da4d641 | 384 | |
ab50b8ed HD |
385 | vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); |
386 | vm_stat_account(mm, newflags, vma->vm_file, nrpages); | |
63bfd738 | 387 | perf_event_mmap(vma); |
1da177e4 LT |
388 | return 0; |
389 | ||
390 | fail: | |
391 | vm_unacct_memory(charged); | |
392 | return error; | |
393 | } | |
394 | ||
6a6160a7 HC |
395 | SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, |
396 | unsigned long, prot) | |
1da177e4 LT |
397 | { |
398 | unsigned long vm_flags, nstart, end, tmp, reqprot; | |
399 | struct vm_area_struct *vma, *prev; | |
400 | int error = -EINVAL; | |
401 | const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); | |
402 | prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); | |
403 | if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ | |
404 | return -EINVAL; | |
405 | ||
406 | if (start & ~PAGE_MASK) | |
407 | return -EINVAL; | |
408 | if (!len) | |
409 | return 0; | |
410 | len = PAGE_ALIGN(len); | |
411 | end = start + len; | |
412 | if (end <= start) | |
413 | return -ENOMEM; | |
b845f313 | 414 | if (!arch_validate_prot(prot)) |
1da177e4 LT |
415 | return -EINVAL; |
416 | ||
417 | reqprot = prot; | |
418 | /* | |
419 | * Does the application expect PROT_READ to imply PROT_EXEC: | |
420 | */ | |
b344e05c | 421 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) |
1da177e4 LT |
422 | prot |= PROT_EXEC; |
423 | ||
424 | vm_flags = calc_vm_prot_bits(prot); | |
425 | ||
426 | down_write(¤t->mm->mmap_sem); | |
427 | ||
097d5910 | 428 | vma = find_vma(current->mm, start); |
1da177e4 LT |
429 | error = -ENOMEM; |
430 | if (!vma) | |
431 | goto out; | |
097d5910 | 432 | prev = vma->vm_prev; |
1da177e4 LT |
433 | if (unlikely(grows & PROT_GROWSDOWN)) { |
434 | if (vma->vm_start >= end) | |
435 | goto out; | |
436 | start = vma->vm_start; | |
437 | error = -EINVAL; | |
438 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
439 | goto out; | |
7d12efae | 440 | } else { |
1da177e4 LT |
441 | if (vma->vm_start > start) |
442 | goto out; | |
443 | if (unlikely(grows & PROT_GROWSUP)) { | |
444 | end = vma->vm_end; | |
445 | error = -EINVAL; | |
446 | if (!(vma->vm_flags & VM_GROWSUP)) | |
447 | goto out; | |
448 | } | |
449 | } | |
450 | if (start > vma->vm_start) | |
451 | prev = vma; | |
452 | ||
453 | for (nstart = start ; ; ) { | |
454 | unsigned long newflags; | |
455 | ||
7d12efae | 456 | /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ |
1da177e4 | 457 | |
7d12efae AM |
458 | newflags = vm_flags; |
459 | newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); | |
1da177e4 | 460 | |
7e2cff42 PBG |
461 | /* newflags >> 4 shift VM_MAY% in place of VM_% */ |
462 | if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { | |
1da177e4 LT |
463 | error = -EACCES; |
464 | goto out; | |
465 | } | |
466 | ||
467 | error = security_file_mprotect(vma, reqprot, prot); | |
468 | if (error) | |
469 | goto out; | |
470 | ||
471 | tmp = vma->vm_end; | |
472 | if (tmp > end) | |
473 | tmp = end; | |
474 | error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); | |
475 | if (error) | |
476 | goto out; | |
477 | nstart = tmp; | |
478 | ||
479 | if (nstart < prev->vm_end) | |
480 | nstart = prev->vm_end; | |
481 | if (nstart >= end) | |
482 | goto out; | |
483 | ||
484 | vma = prev->vm_next; | |
485 | if (!vma || vma->vm_start != nstart) { | |
486 | error = -ENOMEM; | |
487 | goto out; | |
488 | } | |
489 | } | |
490 | out: | |
491 | up_write(¤t->mm->mmap_sem); | |
492 | return error; | |
493 | } |