]>
git.ipfire.org Git - thirdparty/kernel/stable.git/blob - mm/mprotect.c
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
16 #include <linux/highmem.h>
17 #include <linux/security.h>
18 #include <linux/mempolicy.h>
19 #include <linux/personality.h>
20 #include <linux/syscalls.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/migrate.h>
25 #include <linux/perf_event.h>
26 #include <linux/ksm.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgtable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
33 static inline pgprot_t
pgprot_modify(pgprot_t oldprot
, pgprot_t newprot
)
40 * For a prot_numa update we only hold mmap_sem for read so there is a
41 * potential race with faulting where a pmd was temporarily none. This
42 * function checks for a transhuge pmd under the appropriate lock. It
43 * returns a pte if it was successfully locked or NULL if it raced with
44 * a transhuge insertion.
46 static pte_t
*lock_pte_protection(struct vm_area_struct
*vma
, pmd_t
*pmd
,
47 unsigned long addr
, int prot_numa
, spinlock_t
**ptl
)
52 /* !prot_numa is protected by mmap_sem held for write */
54 return pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, ptl
);
56 pmdl
= pmd_lock(vma
->vm_mm
, pmd
);
57 if (unlikely(pmd_trans_huge(*pmd
) || pmd_none(*pmd
))) {
62 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, ptl
);
67 static unsigned long change_pte_range(struct vm_area_struct
*vma
, pmd_t
*pmd
,
68 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
69 int dirty_accountable
, int prot_numa
)
71 struct mm_struct
*mm
= vma
->vm_mm
;
74 unsigned long pages
= 0;
76 pte
= lock_pte_protection(vma
, pmd
, addr
, prot_numa
, &ptl
);
80 arch_enter_lazy_mmu_mode();
83 if (pte_present(oldpte
)) {
88 ptent
= ptep_modify_prot_start(mm
, addr
, pte
);
90 ptent
= pte_mknonnuma(ptent
);
91 ptent
= pte_modify(ptent
, newprot
);
93 * Avoid taking write faults for pages we
96 if (dirty_accountable
&& pte_dirty(ptent
))
97 ptent
= pte_mkwrite(ptent
);
98 ptep_modify_prot_commit(mm
, addr
, pte
, ptent
);
103 page
= vm_normal_page(vma
, addr
, oldpte
);
104 if (page
&& !PageKsm(page
)) {
105 if (!pte_numa(oldpte
)) {
106 ptep_set_numa(mm
, addr
, pte
);
113 } else if (IS_ENABLED(CONFIG_MIGRATION
)) {
114 swp_entry_t entry
= pte_to_swp_entry(oldpte
);
116 if (is_write_migration_entry(entry
)) {
119 * A protection check is difficult so
120 * just be safe and disable write
122 make_migration_entry_read(&entry
);
123 newpte
= swp_entry_to_pte(entry
);
124 if (pte_swp_soft_dirty(oldpte
))
125 newpte
= pte_swp_mksoft_dirty(newpte
);
126 set_pte_at(mm
, addr
, pte
, newpte
);
131 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
132 arch_leave_lazy_mmu_mode();
133 pte_unmap_unlock(pte
- 1, ptl
);
138 static inline unsigned long change_pmd_range(struct vm_area_struct
*vma
,
139 pud_t
*pud
, unsigned long addr
, unsigned long end
,
140 pgprot_t newprot
, int dirty_accountable
, int prot_numa
)
143 struct mm_struct
*mm
= vma
->vm_mm
;
145 unsigned long pages
= 0;
146 unsigned long nr_huge_updates
= 0;
147 unsigned long mni_start
= 0;
149 pmd
= pmd_offset(pud
, addr
);
151 unsigned long this_pages
;
153 next
= pmd_addr_end(addr
, end
);
154 if (!pmd_trans_huge(*pmd
) && pmd_none_or_clear_bad(pmd
))
157 /* invoke the mmu notifier if the pmd is populated */
160 mmu_notifier_invalidate_range_start(mm
, mni_start
, end
);
163 if (pmd_trans_huge(*pmd
)) {
164 if (next
- addr
!= HPAGE_PMD_SIZE
)
165 split_huge_page_pmd(vma
, addr
, pmd
);
167 int nr_ptes
= change_huge_pmd(vma
, pmd
, addr
,
171 if (nr_ptes
== HPAGE_PMD_NR
) {
172 pages
+= HPAGE_PMD_NR
;
176 /* huge pmd was handled */
180 /* fall through, the trans huge pmd just split */
182 this_pages
= change_pte_range(vma
, pmd
, addr
, next
, newprot
,
183 dirty_accountable
, prot_numa
);
187 } while (pmd
++, addr
= next
, addr
!= end
);
190 mmu_notifier_invalidate_range_end(mm
, mni_start
, end
);
193 count_vm_numa_events(NUMA_HUGE_PTE_UPDATES
, nr_huge_updates
);
197 static inline unsigned long change_pud_range(struct vm_area_struct
*vma
,
198 pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
199 pgprot_t newprot
, int dirty_accountable
, int prot_numa
)
203 unsigned long pages
= 0;
205 pud
= pud_offset(pgd
, addr
);
207 next
= pud_addr_end(addr
, end
);
208 if (pud_none_or_clear_bad(pud
))
210 pages
+= change_pmd_range(vma
, pud
, addr
, next
, newprot
,
211 dirty_accountable
, prot_numa
);
212 } while (pud
++, addr
= next
, addr
!= end
);
217 static unsigned long change_protection_range(struct vm_area_struct
*vma
,
218 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
219 int dirty_accountable
, int prot_numa
)
221 struct mm_struct
*mm
= vma
->vm_mm
;
224 unsigned long start
= addr
;
225 unsigned long pages
= 0;
228 pgd
= pgd_offset(mm
, addr
);
229 flush_cache_range(vma
, addr
, end
);
230 inc_tlb_flush_pending(mm
);
232 next
= pgd_addr_end(addr
, end
);
233 if (pgd_none_or_clear_bad(pgd
))
235 pages
+= change_pud_range(vma
, pgd
, addr
, next
, newprot
,
236 dirty_accountable
, prot_numa
);
237 } while (pgd
++, addr
= next
, addr
!= end
);
239 /* Only flush the TLB if we actually modified any entries: */
241 flush_tlb_range(vma
, start
, end
);
242 dec_tlb_flush_pending(mm
);
247 unsigned long change_protection(struct vm_area_struct
*vma
, unsigned long start
,
248 unsigned long end
, pgprot_t newprot
,
249 int dirty_accountable
, int prot_numa
)
253 if (is_vm_hugetlb_page(vma
))
254 pages
= hugetlb_change_protection(vma
, start
, end
, newprot
);
256 pages
= change_protection_range(vma
, start
, end
, newprot
, dirty_accountable
, prot_numa
);
261 static int prot_none_pte_entry(pte_t
*pte
, unsigned long addr
,
262 unsigned long next
, struct mm_walk
*walk
)
264 return pfn_modify_allowed(pte_pfn(*pte
), *(pgprot_t
*)(walk
->private)) ?
268 static int prot_none_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
269 unsigned long addr
, unsigned long next
,
270 struct mm_walk
*walk
)
272 return pfn_modify_allowed(pte_pfn(*pte
), *(pgprot_t
*)(walk
->private)) ?
276 static int prot_none_test(unsigned long addr
, unsigned long next
,
277 struct mm_walk
*walk
)
282 static int prot_none_walk(struct vm_area_struct
*vma
, unsigned long start
,
283 unsigned long end
, unsigned long newflags
)
285 pgprot_t new_pgprot
= vm_get_page_prot(newflags
);
286 struct mm_walk prot_none_walk
= {
287 .pte_entry
= prot_none_pte_entry
,
288 .hugetlb_entry
= prot_none_hugetlb_entry
,
289 .test_walk
= prot_none_test
,
291 .private = &new_pgprot
,
294 return walk_page_range(start
, end
, &prot_none_walk
);
298 mprotect_fixup(struct vm_area_struct
*vma
, struct vm_area_struct
**pprev
,
299 unsigned long start
, unsigned long end
, unsigned long newflags
)
301 struct mm_struct
*mm
= vma
->vm_mm
;
302 unsigned long oldflags
= vma
->vm_flags
;
303 long nrpages
= (end
- start
) >> PAGE_SHIFT
;
304 unsigned long charged
= 0;
307 int dirty_accountable
= 0;
309 if (newflags
== oldflags
) {
315 * Do PROT_NONE PFN permission checks here when we can still
316 * bail out without undoing a lot of state. This is a rather
317 * uncommon case, so doesn't need to be very optimized.
319 if (arch_has_pfn_modify_check() &&
320 (vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) &&
321 (newflags
& (VM_READ
|VM_WRITE
|VM_EXEC
)) == 0) {
322 error
= prot_none_walk(vma
, start
, end
, newflags
);
328 * If we make a private mapping writable we increase our commit;
329 * but (without finer accounting) cannot reduce our commit if we
330 * make it unwritable again. hugetlb mapping were accounted for
331 * even if read-only so there is no need to account for them here
333 if (newflags
& VM_WRITE
) {
334 if (!(oldflags
& (VM_ACCOUNT
|VM_WRITE
|VM_HUGETLB
|
335 VM_SHARED
|VM_NORESERVE
))) {
337 if (security_vm_enough_memory_mm(mm
, charged
))
339 newflags
|= VM_ACCOUNT
;
344 * First try to merge with previous and/or next vma.
346 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
347 *pprev
= vma_merge(mm
, *pprev
, start
, end
, newflags
,
348 vma
->anon_vma
, vma
->vm_file
, pgoff
, vma_policy(vma
));
356 if (start
!= vma
->vm_start
) {
357 error
= split_vma(mm
, vma
, start
, 1);
362 if (end
!= vma
->vm_end
) {
363 error
= split_vma(mm
, vma
, end
, 0);
370 * vm_flags and vm_page_prot are protected by the mmap_sem
371 * held in write mode.
373 vma
->vm_flags
= newflags
;
374 vma
->vm_page_prot
= pgprot_modify(vma
->vm_page_prot
,
375 vm_get_page_prot(newflags
));
377 if (vma_wants_writenotify(vma
)) {
378 vma
->vm_page_prot
= vm_get_page_prot(newflags
& ~VM_SHARED
);
379 dirty_accountable
= 1;
382 change_protection(vma
, start
, end
, vma
->vm_page_prot
,
383 dirty_accountable
, 0);
385 vm_stat_account(mm
, oldflags
, vma
->vm_file
, -nrpages
);
386 vm_stat_account(mm
, newflags
, vma
->vm_file
, nrpages
);
387 perf_event_mmap(vma
);
391 vm_unacct_memory(charged
);
395 SYSCALL_DEFINE3(mprotect
, unsigned long, start
, size_t, len
,
398 unsigned long vm_flags
, nstart
, end
, tmp
, reqprot
;
399 struct vm_area_struct
*vma
, *prev
;
401 const int grows
= prot
& (PROT_GROWSDOWN
|PROT_GROWSUP
);
402 prot
&= ~(PROT_GROWSDOWN
|PROT_GROWSUP
);
403 if (grows
== (PROT_GROWSDOWN
|PROT_GROWSUP
)) /* can't be both */
406 if (start
& ~PAGE_MASK
)
410 len
= PAGE_ALIGN(len
);
414 if (!arch_validate_prot(prot
))
419 * Does the application expect PROT_READ to imply PROT_EXEC:
421 if ((prot
& PROT_READ
) && (current
->personality
& READ_IMPLIES_EXEC
))
424 vm_flags
= calc_vm_prot_bits(prot
);
426 down_write(¤t
->mm
->mmap_sem
);
428 vma
= find_vma(current
->mm
, start
);
433 if (unlikely(grows
& PROT_GROWSDOWN
)) {
434 if (vma
->vm_start
>= end
)
436 start
= vma
->vm_start
;
438 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
441 if (vma
->vm_start
> start
)
443 if (unlikely(grows
& PROT_GROWSUP
)) {
446 if (!(vma
->vm_flags
& VM_GROWSUP
))
450 if (start
> vma
->vm_start
)
453 for (nstart
= start
; ; ) {
454 unsigned long newflags
;
456 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
459 newflags
|= (vma
->vm_flags
& ~(VM_READ
| VM_WRITE
| VM_EXEC
));
461 /* newflags >> 4 shift VM_MAY% in place of VM_% */
462 if ((newflags
& ~(newflags
>> 4)) & (VM_READ
| VM_WRITE
| VM_EXEC
)) {
467 error
= security_file_mprotect(vma
, reqprot
, prot
);
474 error
= mprotect_fixup(vma
, &prev
, nstart
, tmp
, newflags
);
479 if (nstart
< prev
->vm_end
)
480 nstart
= prev
->vm_end
;
485 if (!vma
|| vma
->vm_start
!= nstart
) {
491 up_write(¤t
->mm
->mmap_sem
);