1 // SPDX-License-Identifier: GPL-2.0
5 * (C) Copyright 1994 Linus Torvalds
6 * (C) Copyright 2002 Christoph Hellwig
8 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
9 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
12 #include <linux/pagewalk.h>
13 #include <linux/hugetlb.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/mempolicy.h>
20 #include <linux/personality.h>
21 #include <linux/syscalls.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/mmu_notifier.h>
25 #include <linux/migrate.h>
26 #include <linux/perf_event.h>
27 #include <linux/pkeys.h>
28 #include <linux/ksm.h>
29 #include <linux/uaccess.h>
30 #include <linux/mm_inline.h>
31 #include <linux/pgtable.h>
32 #include <linux/sched/sysctl.h>
33 #include <linux/userfaultfd_k.h>
34 #include <asm/cacheflush.h>
35 #include <asm/mmu_context.h>
36 #include <asm/tlbflush.h>
41 static inline bool can_change_pte_writable(struct vm_area_struct
*vma
,
42 unsigned long addr
, pte_t pte
)
46 VM_BUG_ON(!(vma
->vm_flags
& VM_WRITE
) || pte_write(pte
));
48 if (pte_protnone(pte
) || !pte_dirty(pte
))
51 /* Do we need write faults for softdirty tracking? */
52 if (vma_soft_dirty_enabled(vma
) && !pte_soft_dirty(pte
))
55 /* Do we need write faults for uffd-wp tracking? */
56 if (userfaultfd_pte_wp(vma
, pte
))
59 if (!(vma
->vm_flags
& VM_SHARED
)) {
61 * We can only special-case on exclusive anonymous pages,
62 * because we know that our write-fault handler similarly would
63 * map them writable without any additional checks while holding
66 page
= vm_normal_page(vma
, addr
, pte
);
67 if (!page
|| !PageAnon(page
) || !PageAnonExclusive(page
))
74 static unsigned long change_pte_range(struct mmu_gather
*tlb
,
75 struct vm_area_struct
*vma
, pmd_t
*pmd
, unsigned long addr
,
76 unsigned long end
, pgprot_t newprot
, unsigned long cp_flags
)
80 unsigned long pages
= 0;
81 int target_node
= NUMA_NO_NODE
;
82 bool prot_numa
= cp_flags
& MM_CP_PROT_NUMA
;
83 bool uffd_wp
= cp_flags
& MM_CP_UFFD_WP
;
84 bool uffd_wp_resolve
= cp_flags
& MM_CP_UFFD_WP_RESOLVE
;
86 tlb_change_page_size(tlb
, PAGE_SIZE
);
89 * Can be called with only the mmap_lock for reading by
90 * prot_numa so we must check the pmd isn't constantly
91 * changing from under us from pmd_none to pmd_trans_huge
92 * and/or the other way around.
94 if (pmd_trans_unstable(pmd
))
98 * The pmd points to a regular pte so the pmd can't change
99 * from under us even if the mmap_lock is only hold for
102 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
104 /* Get target node for single threaded private VMAs */
105 if (prot_numa
&& !(vma
->vm_flags
& VM_SHARED
) &&
106 atomic_read(&vma
->vm_mm
->mm_users
) == 1)
107 target_node
= numa_node_id();
109 flush_tlb_batched_pending(vma
->vm_mm
);
110 arch_enter_lazy_mmu_mode();
113 if (pte_present(oldpte
)) {
115 bool preserve_write
= prot_numa
&& pte_write(oldpte
);
118 * Avoid trapping faults against the zero or KSM
119 * pages. See similar comment in change_huge_pmd.
125 /* Avoid TLB flush if possible */
126 if (pte_protnone(oldpte
))
129 page
= vm_normal_page(vma
, addr
, oldpte
);
130 if (!page
|| is_zone_device_page(page
) || PageKsm(page
))
133 /* Also skip shared copy-on-write pages */
134 if (is_cow_mapping(vma
->vm_flags
) &&
135 page_count(page
) != 1)
139 * While migration can move some dirty pages,
140 * it cannot move them all from MIGRATE_ASYNC
143 if (page_is_file_lru(page
) && PageDirty(page
))
147 * Don't mess with PTEs if page is already on the node
148 * a single-threaded process is running on.
150 nid
= page_to_nid(page
);
151 if (target_node
== nid
)
155 * Skip scanning top tier node if normal numa
156 * balancing is disabled
158 if (!(sysctl_numa_balancing_mode
& NUMA_BALANCING_NORMAL
) &&
159 node_is_toptier(nid
))
163 oldpte
= ptep_modify_prot_start(vma
, addr
, pte
);
164 ptent
= pte_modify(oldpte
, newprot
);
166 ptent
= pte_mk_savedwrite(ptent
);
169 ptent
= pte_wrprotect(ptent
);
170 ptent
= pte_mkuffd_wp(ptent
);
171 } else if (uffd_wp_resolve
) {
172 ptent
= pte_clear_uffd_wp(ptent
);
176 * In some writable, shared mappings, we might want
177 * to catch actual write access -- see
178 * vma_wants_writenotify().
180 * In all writable, private mappings, we have to
181 * properly handle COW.
183 * In both cases, we can sometimes still change PTEs
184 * writable and avoid the write-fault handler, for
185 * example, if a PTE is already dirty and no other
186 * COW or special handling is required.
188 if ((cp_flags
& MM_CP_TRY_CHANGE_WRITABLE
) &&
190 can_change_pte_writable(vma
, addr
, ptent
))
191 ptent
= pte_mkwrite(ptent
);
193 ptep_modify_prot_commit(vma
, addr
, pte
, oldpte
, ptent
);
194 if (pte_needs_flush(oldpte
, ptent
))
195 tlb_flush_pte_range(tlb
, addr
, PAGE_SIZE
);
197 } else if (is_swap_pte(oldpte
)) {
198 swp_entry_t entry
= pte_to_swp_entry(oldpte
);
201 if (is_writable_migration_entry(entry
)) {
202 struct page
*page
= pfn_swap_entry_to_page(entry
);
205 * A protection check is difficult so
206 * just be safe and disable write
209 entry
= make_readable_exclusive_migration_entry(
212 entry
= make_readable_migration_entry(swp_offset(entry
));
213 newpte
= swp_entry_to_pte(entry
);
214 if (pte_swp_soft_dirty(oldpte
))
215 newpte
= pte_swp_mksoft_dirty(newpte
);
216 if (pte_swp_uffd_wp(oldpte
))
217 newpte
= pte_swp_mkuffd_wp(newpte
);
218 } else if (is_writable_device_private_entry(entry
)) {
220 * We do not preserve soft-dirtiness. See
221 * copy_one_pte() for explanation.
223 entry
= make_readable_device_private_entry(
225 newpte
= swp_entry_to_pte(entry
);
226 if (pte_swp_uffd_wp(oldpte
))
227 newpte
= pte_swp_mkuffd_wp(newpte
);
228 } else if (is_writable_device_exclusive_entry(entry
)) {
229 entry
= make_readable_device_exclusive_entry(
231 newpte
= swp_entry_to_pte(entry
);
232 if (pte_swp_soft_dirty(oldpte
))
233 newpte
= pte_swp_mksoft_dirty(newpte
);
234 if (pte_swp_uffd_wp(oldpte
))
235 newpte
= pte_swp_mkuffd_wp(newpte
);
236 } else if (pte_marker_entry_uffd_wp(entry
)) {
238 * If this is uffd-wp pte marker and we'd like
239 * to unprotect it, drop it; the next page
240 * fault will trigger without uffd trapping.
242 if (uffd_wp_resolve
) {
243 pte_clear(vma
->vm_mm
, addr
, pte
);
252 newpte
= pte_swp_mkuffd_wp(newpte
);
253 else if (uffd_wp_resolve
)
254 newpte
= pte_swp_clear_uffd_wp(newpte
);
256 if (!pte_same(oldpte
, newpte
)) {
257 set_pte_at(vma
->vm_mm
, addr
, pte
, newpte
);
261 /* It must be an none page, or what else?.. */
262 WARN_ON_ONCE(!pte_none(oldpte
));
263 if (unlikely(uffd_wp
&& !vma_is_anonymous(vma
))) {
265 * For file-backed mem, we need to be able to
266 * wr-protect a none pte, because even if the
267 * pte is none, the page/swap cache could
268 * exist. Doing that by install a marker.
270 set_pte_at(vma
->vm_mm
, addr
, pte
,
271 make_pte_marker(PTE_MARKER_UFFD_WP
));
275 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
276 arch_leave_lazy_mmu_mode();
277 pte_unmap_unlock(pte
- 1, ptl
);
283 * Used when setting automatic NUMA hinting protection where it is
284 * critical that a numa hinting PMD is not confused with a bad PMD.
286 static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t
*pmd
)
288 pmd_t pmdval
= pmd_read_atomic(pmd
);
290 /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
291 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
295 if (pmd_none(pmdval
))
297 if (pmd_trans_huge(pmdval
))
299 if (unlikely(pmd_bad(pmdval
))) {
307 /* Return true if we're uffd wr-protecting file-backed memory, or false */
309 uffd_wp_protect_file(struct vm_area_struct
*vma
, unsigned long cp_flags
)
311 return (cp_flags
& MM_CP_UFFD_WP
) && !vma_is_anonymous(vma
);
315 * If wr-protecting the range for file-backed, populate pgtable for the case
316 * when pgtable is empty but page cache exists. When {pte|pmd|...}_alloc()
317 * failed it means no memory, we don't have a better option but stop.
319 #define change_pmd_prepare(vma, pmd, cp_flags) \
321 if (unlikely(uffd_wp_protect_file(vma, cp_flags))) { \
322 if (WARN_ON_ONCE(pte_alloc(vma->vm_mm, pmd))) \
327 * This is the general pud/p4d/pgd version of change_pmd_prepare(). We need to
328 * have separate change_pmd_prepare() because pte_alloc() returns 0 on success,
329 * while {pmd|pud|p4d}_alloc() returns the valid pointer on success.
331 #define change_prepare(vma, high, low, addr, cp_flags) \
333 if (unlikely(uffd_wp_protect_file(vma, cp_flags))) { \
334 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
335 if (WARN_ON_ONCE(p == NULL)) \
340 static inline unsigned long change_pmd_range(struct mmu_gather
*tlb
,
341 struct vm_area_struct
*vma
, pud_t
*pud
, unsigned long addr
,
342 unsigned long end
, pgprot_t newprot
, unsigned long cp_flags
)
346 unsigned long pages
= 0;
347 unsigned long nr_huge_updates
= 0;
348 struct mmu_notifier_range range
;
352 pmd
= pmd_offset(pud
, addr
);
354 unsigned long this_pages
;
356 next
= pmd_addr_end(addr
, end
);
358 change_pmd_prepare(vma
, pmd
, cp_flags
);
360 * Automatic NUMA balancing walks the tables with mmap_lock
361 * held for read. It's possible a parallel update to occur
362 * between pmd_trans_huge() and a pmd_none_or_clear_bad()
363 * check leading to a false positive and clearing.
364 * Hence, it's necessary to atomically read the PMD value
365 * for all the checks.
367 if (!is_swap_pmd(*pmd
) && !pmd_devmap(*pmd
) &&
368 pmd_none_or_clear_bad_unless_trans_huge(pmd
))
371 /* invoke the mmu notifier if the pmd is populated */
373 mmu_notifier_range_init(&range
,
374 MMU_NOTIFY_PROTECTION_VMA
, 0,
375 vma
, vma
->vm_mm
, addr
, end
);
376 mmu_notifier_invalidate_range_start(&range
);
379 if (is_swap_pmd(*pmd
) || pmd_trans_huge(*pmd
) || pmd_devmap(*pmd
)) {
380 if ((next
- addr
!= HPAGE_PMD_SIZE
) ||
381 uffd_wp_protect_file(vma
, cp_flags
)) {
382 __split_huge_pmd(vma
, pmd
, addr
, false, NULL
);
384 * For file-backed, the pmd could have been
385 * cleared; make sure pmd populated if
386 * necessary, then fall-through to pte level.
388 change_pmd_prepare(vma
, pmd
, cp_flags
);
391 * change_huge_pmd() does not defer TLB flushes,
392 * so no need to propagate the tlb argument.
394 int nr_ptes
= change_huge_pmd(tlb
, vma
, pmd
,
395 addr
, newprot
, cp_flags
);
398 if (nr_ptes
== HPAGE_PMD_NR
) {
399 pages
+= HPAGE_PMD_NR
;
403 /* huge pmd was handled */
407 /* fall through, the trans huge pmd just split */
409 this_pages
= change_pte_range(tlb
, vma
, pmd
, addr
, next
,
414 } while (pmd
++, addr
= next
, addr
!= end
);
417 mmu_notifier_invalidate_range_end(&range
);
420 count_vm_numa_events(NUMA_HUGE_PTE_UPDATES
, nr_huge_updates
);
424 static inline unsigned long change_pud_range(struct mmu_gather
*tlb
,
425 struct vm_area_struct
*vma
, p4d_t
*p4d
, unsigned long addr
,
426 unsigned long end
, pgprot_t newprot
, unsigned long cp_flags
)
430 unsigned long pages
= 0;
432 pud
= pud_offset(p4d
, addr
);
434 next
= pud_addr_end(addr
, end
);
435 change_prepare(vma
, pud
, pmd
, addr
, cp_flags
);
436 if (pud_none_or_clear_bad(pud
))
438 pages
+= change_pmd_range(tlb
, vma
, pud
, addr
, next
, newprot
,
440 } while (pud
++, addr
= next
, addr
!= end
);
445 static inline unsigned long change_p4d_range(struct mmu_gather
*tlb
,
446 struct vm_area_struct
*vma
, pgd_t
*pgd
, unsigned long addr
,
447 unsigned long end
, pgprot_t newprot
, unsigned long cp_flags
)
451 unsigned long pages
= 0;
453 p4d
= p4d_offset(pgd
, addr
);
455 next
= p4d_addr_end(addr
, end
);
456 change_prepare(vma
, p4d
, pud
, addr
, cp_flags
);
457 if (p4d_none_or_clear_bad(p4d
))
459 pages
+= change_pud_range(tlb
, vma
, p4d
, addr
, next
, newprot
,
461 } while (p4d
++, addr
= next
, addr
!= end
);
466 static unsigned long change_protection_range(struct mmu_gather
*tlb
,
467 struct vm_area_struct
*vma
, unsigned long addr
,
468 unsigned long end
, pgprot_t newprot
, unsigned long cp_flags
)
470 struct mm_struct
*mm
= vma
->vm_mm
;
473 unsigned long pages
= 0;
476 pgd
= pgd_offset(mm
, addr
);
477 tlb_start_vma(tlb
, vma
);
479 next
= pgd_addr_end(addr
, end
);
480 change_prepare(vma
, pgd
, p4d
, addr
, cp_flags
);
481 if (pgd_none_or_clear_bad(pgd
))
483 pages
+= change_p4d_range(tlb
, vma
, pgd
, addr
, next
, newprot
,
485 } while (pgd
++, addr
= next
, addr
!= end
);
487 tlb_end_vma(tlb
, vma
);
492 unsigned long change_protection(struct mmu_gather
*tlb
,
493 struct vm_area_struct
*vma
, unsigned long start
,
494 unsigned long end
, pgprot_t newprot
,
495 unsigned long cp_flags
)
499 BUG_ON((cp_flags
& MM_CP_UFFD_WP_ALL
) == MM_CP_UFFD_WP_ALL
);
501 if (is_vm_hugetlb_page(vma
))
502 pages
= hugetlb_change_protection(vma
, start
, end
, newprot
,
505 pages
= change_protection_range(tlb
, vma
, start
, end
, newprot
,
511 static int prot_none_pte_entry(pte_t
*pte
, unsigned long addr
,
512 unsigned long next
, struct mm_walk
*walk
)
514 return pfn_modify_allowed(pte_pfn(*pte
), *(pgprot_t
*)(walk
->private)) ?
518 static int prot_none_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
519 unsigned long addr
, unsigned long next
,
520 struct mm_walk
*walk
)
522 return pfn_modify_allowed(pte_pfn(*pte
), *(pgprot_t
*)(walk
->private)) ?
526 static int prot_none_test(unsigned long addr
, unsigned long next
,
527 struct mm_walk
*walk
)
532 static const struct mm_walk_ops prot_none_walk_ops
= {
533 .pte_entry
= prot_none_pte_entry
,
534 .hugetlb_entry
= prot_none_hugetlb_entry
,
535 .test_walk
= prot_none_test
,
539 mprotect_fixup(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
540 struct vm_area_struct
**pprev
, unsigned long start
,
541 unsigned long end
, unsigned long newflags
)
543 struct mm_struct
*mm
= vma
->vm_mm
;
544 unsigned long oldflags
= vma
->vm_flags
;
545 long nrpages
= (end
- start
) >> PAGE_SHIFT
;
546 unsigned long charged
= 0;
547 bool try_change_writable
;
551 if (newflags
== oldflags
) {
557 * Do PROT_NONE PFN permission checks here when we can still
558 * bail out without undoing a lot of state. This is a rather
559 * uncommon case, so doesn't need to be very optimized.
561 if (arch_has_pfn_modify_check() &&
562 (vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) &&
563 (newflags
& VM_ACCESS_FLAGS
) == 0) {
564 pgprot_t new_pgprot
= vm_get_page_prot(newflags
);
566 error
= walk_page_range(current
->mm
, start
, end
,
567 &prot_none_walk_ops
, &new_pgprot
);
573 * If we make a private mapping writable we increase our commit;
574 * but (without finer accounting) cannot reduce our commit if we
575 * make it unwritable again. hugetlb mapping were accounted for
576 * even if read-only so there is no need to account for them here
578 if (newflags
& VM_WRITE
) {
579 /* Check space limits when area turns into data. */
580 if (!may_expand_vm(mm
, newflags
, nrpages
) &&
581 may_expand_vm(mm
, oldflags
, nrpages
))
583 if (!(oldflags
& (VM_ACCOUNT
|VM_WRITE
|VM_HUGETLB
|
584 VM_SHARED
|VM_NORESERVE
))) {
586 if (security_vm_enough_memory_mm(mm
, charged
))
588 newflags
|= VM_ACCOUNT
;
593 * First try to merge with previous and/or next vma.
595 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
596 *pprev
= vma_merge(mm
, *pprev
, start
, end
, newflags
,
597 vma
->anon_vma
, vma
->vm_file
, pgoff
, vma_policy(vma
),
598 vma
->vm_userfaultfd_ctx
, anon_vma_name(vma
));
601 VM_WARN_ON((vma
->vm_flags
^ newflags
) & ~VM_SOFTDIRTY
);
607 if (start
!= vma
->vm_start
) {
608 error
= split_vma(mm
, vma
, start
, 1);
613 if (end
!= vma
->vm_end
) {
614 error
= split_vma(mm
, vma
, end
, 0);
621 * vm_flags and vm_page_prot are protected by the mmap_lock
622 * held in write mode.
624 vma
->vm_flags
= newflags
;
626 * We want to check manually if we can change individual PTEs writable
627 * if we can't do that automatically for all PTEs in a mapping. For
628 * private mappings, that's always the case when we have write
629 * permissions as we properly have to handle COW.
631 if (vma
->vm_flags
& VM_SHARED
)
632 try_change_writable
= vma_wants_writenotify(vma
, vma
->vm_page_prot
);
634 try_change_writable
= !!(vma
->vm_flags
& VM_WRITE
);
635 vma_set_page_prot(vma
);
637 change_protection(tlb
, vma
, start
, end
, vma
->vm_page_prot
,
638 try_change_writable
? MM_CP_TRY_CHANGE_WRITABLE
: 0);
641 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
644 if ((oldflags
& (VM_WRITE
| VM_SHARED
| VM_LOCKED
)) == VM_LOCKED
&&
645 (newflags
& VM_WRITE
)) {
646 populate_vma_page_range(vma
, start
, end
, NULL
);
649 vm_stat_account(mm
, oldflags
, -nrpages
);
650 vm_stat_account(mm
, newflags
, nrpages
);
651 perf_event_mmap(vma
);
655 vm_unacct_memory(charged
);
660 * pkey==-1 when doing a legacy mprotect()
662 static int do_mprotect_pkey(unsigned long start
, size_t len
,
663 unsigned long prot
, int pkey
)
665 unsigned long nstart
, end
, tmp
, reqprot
;
666 struct vm_area_struct
*vma
, *prev
;
668 const int grows
= prot
& (PROT_GROWSDOWN
|PROT_GROWSUP
);
669 const bool rier
= (current
->personality
& READ_IMPLIES_EXEC
) &&
671 struct mmu_gather tlb
;
673 start
= untagged_addr(start
);
675 prot
&= ~(PROT_GROWSDOWN
|PROT_GROWSUP
);
676 if (grows
== (PROT_GROWSDOWN
|PROT_GROWSUP
)) /* can't be both */
679 if (start
& ~PAGE_MASK
)
683 len
= PAGE_ALIGN(len
);
687 if (!arch_validate_prot(prot
, start
))
692 if (mmap_write_lock_killable(current
->mm
))
696 * If userspace did not allocate the pkey, do not let
700 if ((pkey
!= -1) && !mm_pkey_is_allocated(current
->mm
, pkey
))
703 vma
= find_vma(current
->mm
, start
);
708 if (unlikely(grows
& PROT_GROWSDOWN
)) {
709 if (vma
->vm_start
>= end
)
711 start
= vma
->vm_start
;
713 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
716 if (vma
->vm_start
> start
)
718 if (unlikely(grows
& PROT_GROWSUP
)) {
721 if (!(vma
->vm_flags
& VM_GROWSUP
))
726 if (start
> vma
->vm_start
)
731 tlb_gather_mmu(&tlb
, current
->mm
);
732 for (nstart
= start
; ; ) {
733 unsigned long mask_off_old_flags
;
734 unsigned long newflags
;
737 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
739 /* Does the application expect PROT_READ to imply PROT_EXEC */
740 if (rier
&& (vma
->vm_flags
& VM_MAYEXEC
))
744 * Each mprotect() call explicitly passes r/w/x permissions.
745 * If a permission is not passed to mprotect(), it must be
746 * cleared from the VMA.
748 mask_off_old_flags
= VM_READ
| VM_WRITE
| VM_EXEC
|
751 new_vma_pkey
= arch_override_mprotect_pkey(vma
, prot
, pkey
);
752 newflags
= calc_vm_prot_bits(prot
, new_vma_pkey
);
753 newflags
|= (vma
->vm_flags
& ~mask_off_old_flags
);
755 /* newflags >> 4 shift VM_MAY% in place of VM_% */
756 if ((newflags
& ~(newflags
>> 4)) & VM_ACCESS_FLAGS
) {
761 /* Allow architectures to sanity-check the new flags */
762 if (!arch_validate_flags(newflags
)) {
767 error
= security_file_mprotect(vma
, reqprot
, prot
);
775 if (vma
->vm_ops
&& vma
->vm_ops
->mprotect
) {
776 error
= vma
->vm_ops
->mprotect(vma
, nstart
, tmp
, newflags
);
781 error
= mprotect_fixup(&tlb
, vma
, &prev
, nstart
, tmp
, newflags
);
787 if (nstart
< prev
->vm_end
)
788 nstart
= prev
->vm_end
;
793 if (!vma
|| vma
->vm_start
!= nstart
) {
799 tlb_finish_mmu(&tlb
);
801 mmap_write_unlock(current
->mm
);
805 SYSCALL_DEFINE3(mprotect
, unsigned long, start
, size_t, len
,
808 return do_mprotect_pkey(start
, len
, prot
, -1);
811 #ifdef CONFIG_ARCH_HAS_PKEYS
813 SYSCALL_DEFINE4(pkey_mprotect
, unsigned long, start
, size_t, len
,
814 unsigned long, prot
, int, pkey
)
816 return do_mprotect_pkey(start
, len
, prot
, pkey
);
819 SYSCALL_DEFINE2(pkey_alloc
, unsigned long, flags
, unsigned long, init_val
)
824 /* No flags supported yet. */
827 /* check for unsupported init values */
828 if (init_val
& ~PKEY_ACCESS_MASK
)
831 mmap_write_lock(current
->mm
);
832 pkey
= mm_pkey_alloc(current
->mm
);
838 ret
= arch_set_user_pkey_access(current
, pkey
, init_val
);
840 mm_pkey_free(current
->mm
, pkey
);
845 mmap_write_unlock(current
->mm
);
849 SYSCALL_DEFINE1(pkey_free
, int, pkey
)
853 mmap_write_lock(current
->mm
);
854 ret
= mm_pkey_free(current
->mm
, pkey
);
855 mmap_write_unlock(current
->mm
);
858 * We could provide warnings or errors if any VMA still
859 * has the pkey set here.
864 #endif /* CONFIG_ARCH_HAS_PKEYS */