1 // SPDX-License-Identifier: GPL-2.0
5 * (C) Copyright 1994 Linus Torvalds
6 * (C) Copyright 2002 Christoph Hellwig
8 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
9 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
12 #include <linux/pagewalk.h>
13 #include <linux/hugetlb.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/mempolicy.h>
20 #include <linux/personality.h>
21 #include <linux/syscalls.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/mmu_notifier.h>
25 #include <linux/migrate.h>
26 #include <linux/perf_event.h>
27 #include <linux/pkeys.h>
28 #include <linux/ksm.h>
29 #include <linux/uaccess.h>
30 #include <linux/mm_inline.h>
31 #include <linux/pgtable.h>
32 #include <linux/sched/sysctl.h>
33 #include <linux/userfaultfd_k.h>
34 #include <linux/memory-tiers.h>
35 #include <asm/cacheflush.h>
36 #include <asm/mmu_context.h>
37 #include <asm/tlbflush.h>
42 bool can_change_pte_writable(struct vm_area_struct
*vma
, unsigned long addr
,
47 if (WARN_ON_ONCE(!(vma
->vm_flags
& VM_WRITE
)))
50 /* Don't touch entries that are not even readable. */
51 if (pte_protnone(pte
))
54 /* Do we need write faults for softdirty tracking? */
55 if (vma_soft_dirty_enabled(vma
) && !pte_soft_dirty(pte
))
58 /* Do we need write faults for uffd-wp tracking? */
59 if (userfaultfd_pte_wp(vma
, pte
))
62 if (!(vma
->vm_flags
& VM_SHARED
)) {
64 * Writable MAP_PRIVATE mapping: We can only special-case on
65 * exclusive anonymous pages, because we know that our
66 * write-fault handler similarly would map them writable without
67 * any additional checks while holding the PT lock.
69 page
= vm_normal_page(vma
, addr
, pte
);
70 return page
&& PageAnon(page
) && PageAnonExclusive(page
);
74 * Writable MAP_SHARED mapping: "clean" might indicate that the FS still
75 * needs a real write-fault for writenotify
76 * (see vma_wants_writenotify()). If "dirty", the assumption is that the
77 * FS was already notified and we can simply mark the PTE writable
78 * just like the write-fault handler would do.
80 return pte_dirty(pte
);
83 static long change_pte_range(struct mmu_gather
*tlb
,
84 struct vm_area_struct
*vma
, pmd_t
*pmd
, unsigned long addr
,
85 unsigned long end
, pgprot_t newprot
, unsigned long cp_flags
)
90 int target_node
= NUMA_NO_NODE
;
91 bool prot_numa
= cp_flags
& MM_CP_PROT_NUMA
;
92 bool uffd_wp
= cp_flags
& MM_CP_UFFD_WP
;
93 bool uffd_wp_resolve
= cp_flags
& MM_CP_UFFD_WP_RESOLVE
;
95 tlb_change_page_size(tlb
, PAGE_SIZE
);
96 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
100 /* Get target node for single threaded private VMAs */
101 if (prot_numa
&& !(vma
->vm_flags
& VM_SHARED
) &&
102 atomic_read(&vma
->vm_mm
->mm_users
) == 1)
103 target_node
= numa_node_id();
105 flush_tlb_batched_pending(vma
->vm_mm
);
106 arch_enter_lazy_mmu_mode();
108 oldpte
= ptep_get(pte
);
109 if (pte_present(oldpte
)) {
113 * Avoid trapping faults against the zero or KSM
114 * pages. See similar comment in change_huge_pmd.
121 /* Avoid TLB flush if possible */
122 if (pte_protnone(oldpte
))
125 folio
= vm_normal_folio(vma
, addr
, oldpte
);
126 if (!folio
|| folio_is_zone_device(folio
) ||
127 folio_test_ksm(folio
))
130 /* Also skip shared copy-on-write pages */
131 if (is_cow_mapping(vma
->vm_flags
) &&
132 folio_ref_count(folio
) != 1)
136 * While migration can move some dirty pages,
137 * it cannot move them all from MIGRATE_ASYNC
140 if (folio_is_file_lru(folio
) &&
141 folio_test_dirty(folio
))
145 * Don't mess with PTEs if page is already on the node
146 * a single-threaded process is running on.
148 nid
= folio_nid(folio
);
149 if (target_node
== nid
)
151 toptier
= node_is_toptier(nid
);
154 * Skip scanning top tier node if normal numa
155 * balancing is disabled
157 if (!(sysctl_numa_balancing_mode
& NUMA_BALANCING_NORMAL
) &&
160 if (sysctl_numa_balancing_mode
& NUMA_BALANCING_MEMORY_TIERING
&&
162 folio_xchg_access_time(folio
,
163 jiffies_to_msecs(jiffies
));
166 oldpte
= ptep_modify_prot_start(vma
, addr
, pte
);
167 ptent
= pte_modify(oldpte
, newprot
);
170 ptent
= pte_mkuffd_wp(ptent
);
171 else if (uffd_wp_resolve
)
172 ptent
= pte_clear_uffd_wp(ptent
);
175 * In some writable, shared mappings, we might want
176 * to catch actual write access -- see
177 * vma_wants_writenotify().
179 * In all writable, private mappings, we have to
180 * properly handle COW.
182 * In both cases, we can sometimes still change PTEs
183 * writable and avoid the write-fault handler, for
184 * example, if a PTE is already dirty and no other
185 * COW or special handling is required.
187 if ((cp_flags
& MM_CP_TRY_CHANGE_WRITABLE
) &&
189 can_change_pte_writable(vma
, addr
, ptent
))
190 ptent
= pte_mkwrite(ptent
, vma
);
192 ptep_modify_prot_commit(vma
, addr
, pte
, oldpte
, ptent
);
193 if (pte_needs_flush(oldpte
, ptent
))
194 tlb_flush_pte_range(tlb
, addr
, PAGE_SIZE
);
196 } else if (is_swap_pte(oldpte
)) {
197 swp_entry_t entry
= pte_to_swp_entry(oldpte
);
200 if (is_writable_migration_entry(entry
)) {
201 struct folio
*folio
= pfn_swap_entry_folio(entry
);
204 * A protection check is difficult so
205 * just be safe and disable write
207 if (folio_test_anon(folio
))
208 entry
= make_readable_exclusive_migration_entry(
211 entry
= make_readable_migration_entry(swp_offset(entry
));
212 newpte
= swp_entry_to_pte(entry
);
213 if (pte_swp_soft_dirty(oldpte
))
214 newpte
= pte_swp_mksoft_dirty(newpte
);
215 } else if (is_writable_device_private_entry(entry
)) {
217 * We do not preserve soft-dirtiness. See
218 * copy_nonpresent_pte() for explanation.
220 entry
= make_readable_device_private_entry(
222 newpte
= swp_entry_to_pte(entry
);
223 if (pte_swp_uffd_wp(oldpte
))
224 newpte
= pte_swp_mkuffd_wp(newpte
);
225 } else if (is_writable_device_exclusive_entry(entry
)) {
226 entry
= make_readable_device_exclusive_entry(
228 newpte
= swp_entry_to_pte(entry
);
229 if (pte_swp_soft_dirty(oldpte
))
230 newpte
= pte_swp_mksoft_dirty(newpte
);
231 if (pte_swp_uffd_wp(oldpte
))
232 newpte
= pte_swp_mkuffd_wp(newpte
);
233 } else if (is_pte_marker_entry(entry
)) {
235 * Ignore error swap entries unconditionally,
236 * because any access should sigbus anyway.
238 if (is_poisoned_swp_entry(entry
))
241 * If this is uffd-wp pte marker and we'd like
242 * to unprotect it, drop it; the next page
243 * fault will trigger without uffd trapping.
245 if (uffd_wp_resolve
) {
246 pte_clear(vma
->vm_mm
, addr
, pte
);
255 newpte
= pte_swp_mkuffd_wp(newpte
);
256 else if (uffd_wp_resolve
)
257 newpte
= pte_swp_clear_uffd_wp(newpte
);
259 if (!pte_same(oldpte
, newpte
)) {
260 set_pte_at(vma
->vm_mm
, addr
, pte
, newpte
);
264 /* It must be an none page, or what else?.. */
265 WARN_ON_ONCE(!pte_none(oldpte
));
268 * Nobody plays with any none ptes besides
269 * userfaultfd when applying the protections.
271 if (likely(!uffd_wp
))
274 if (userfaultfd_wp_use_markers(vma
)) {
276 * For file-backed mem, we need to be able to
277 * wr-protect a none pte, because even if the
278 * pte is none, the page/swap cache could
279 * exist. Doing that by install a marker.
281 set_pte_at(vma
->vm_mm
, addr
, pte
,
282 make_pte_marker(PTE_MARKER_UFFD_WP
));
286 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
287 arch_leave_lazy_mmu_mode();
288 pte_unmap_unlock(pte
- 1, ptl
);
294 * Return true if we want to split THPs into PTE mappings in change
295 * protection procedure, false otherwise.
298 pgtable_split_needed(struct vm_area_struct
*vma
, unsigned long cp_flags
)
301 * pte markers only resides in pte level, if we need pte markers,
302 * we need to split. We cannot wr-protect shmem thp because file
303 * thp is handled differently when split by erasing the pmd so far.
305 return (cp_flags
& MM_CP_UFFD_WP
) && !vma_is_anonymous(vma
);
309 * Return true if we want to populate pgtables in change protection
310 * procedure, false otherwise
313 pgtable_populate_needed(struct vm_area_struct
*vma
, unsigned long cp_flags
)
315 /* If not within ioctl(UFFDIO_WRITEPROTECT), then don't bother */
316 if (!(cp_flags
& MM_CP_UFFD_WP
))
319 /* Populate if the userfaultfd mode requires pte markers */
320 return userfaultfd_wp_use_markers(vma
);
324 * Populate the pgtable underneath for whatever reason if requested.
325 * When {pte|pmd|...}_alloc() failed we treat it the same way as pgtable
326 * allocation failures during page faults by kicking OOM and returning
329 #define change_pmd_prepare(vma, pmd, cp_flags) \
332 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
333 if (pte_alloc(vma->vm_mm, pmd)) \
340 * This is the general pud/p4d/pgd version of change_pmd_prepare(). We need to
341 * have separate change_pmd_prepare() because pte_alloc() returns 0 on success,
342 * while {pmd|pud|p4d}_alloc() returns the valid pointer on success.
344 #define change_prepare(vma, high, low, addr, cp_flags) \
347 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
348 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
355 static inline long change_pmd_range(struct mmu_gather
*tlb
,
356 struct vm_area_struct
*vma
, pud_t
*pud
, unsigned long addr
,
357 unsigned long end
, pgprot_t newprot
, unsigned long cp_flags
)
362 unsigned long nr_huge_updates
= 0;
363 struct mmu_notifier_range range
;
367 pmd
= pmd_offset(pud
, addr
);
372 next
= pmd_addr_end(addr
, end
);
374 ret
= change_pmd_prepare(vma
, pmd
, cp_flags
);
383 /* invoke the mmu notifier if the pmd is populated */
385 mmu_notifier_range_init(&range
,
386 MMU_NOTIFY_PROTECTION_VMA
, 0,
387 vma
->vm_mm
, addr
, end
);
388 mmu_notifier_invalidate_range_start(&range
);
391 _pmd
= pmdp_get_lockless(pmd
);
392 if (is_swap_pmd(_pmd
) || pmd_trans_huge(_pmd
) || pmd_devmap(_pmd
)) {
393 if ((next
- addr
!= HPAGE_PMD_SIZE
) ||
394 pgtable_split_needed(vma
, cp_flags
)) {
395 __split_huge_pmd(vma
, pmd
, addr
, false, NULL
);
397 * For file-backed, the pmd could have been
398 * cleared; make sure pmd populated if
399 * necessary, then fall-through to pte level.
401 ret
= change_pmd_prepare(vma
, pmd
, cp_flags
);
407 ret
= change_huge_pmd(tlb
, vma
, pmd
,
408 addr
, newprot
, cp_flags
);
410 if (ret
== HPAGE_PMD_NR
) {
411 pages
+= HPAGE_PMD_NR
;
415 /* huge pmd was handled */
419 /* fall through, the trans huge pmd just split */
422 ret
= change_pte_range(tlb
, vma
, pmd
, addr
, next
, newprot
,
429 } while (pmd
++, addr
= next
, addr
!= end
);
432 mmu_notifier_invalidate_range_end(&range
);
435 count_vm_numa_events(NUMA_HUGE_PTE_UPDATES
, nr_huge_updates
);
439 static inline long change_pud_range(struct mmu_gather
*tlb
,
440 struct vm_area_struct
*vma
, p4d_t
*p4d
, unsigned long addr
,
441 unsigned long end
, pgprot_t newprot
, unsigned long cp_flags
)
447 pud
= pud_offset(p4d
, addr
);
449 next
= pud_addr_end(addr
, end
);
450 ret
= change_prepare(vma
, pud
, pmd
, addr
, cp_flags
);
453 if (pud_none_or_clear_bad(pud
))
455 pages
+= change_pmd_range(tlb
, vma
, pud
, addr
, next
, newprot
,
457 } while (pud
++, addr
= next
, addr
!= end
);
462 static inline long change_p4d_range(struct mmu_gather
*tlb
,
463 struct vm_area_struct
*vma
, pgd_t
*pgd
, unsigned long addr
,
464 unsigned long end
, pgprot_t newprot
, unsigned long cp_flags
)
470 p4d
= p4d_offset(pgd
, addr
);
472 next
= p4d_addr_end(addr
, end
);
473 ret
= change_prepare(vma
, p4d
, pud
, addr
, cp_flags
);
476 if (p4d_none_or_clear_bad(p4d
))
478 pages
+= change_pud_range(tlb
, vma
, p4d
, addr
, next
, newprot
,
480 } while (p4d
++, addr
= next
, addr
!= end
);
485 static long change_protection_range(struct mmu_gather
*tlb
,
486 struct vm_area_struct
*vma
, unsigned long addr
,
487 unsigned long end
, pgprot_t newprot
, unsigned long cp_flags
)
489 struct mm_struct
*mm
= vma
->vm_mm
;
495 pgd
= pgd_offset(mm
, addr
);
496 tlb_start_vma(tlb
, vma
);
498 next
= pgd_addr_end(addr
, end
);
499 ret
= change_prepare(vma
, pgd
, p4d
, addr
, cp_flags
);
504 if (pgd_none_or_clear_bad(pgd
))
506 pages
+= change_p4d_range(tlb
, vma
, pgd
, addr
, next
, newprot
,
508 } while (pgd
++, addr
= next
, addr
!= end
);
510 tlb_end_vma(tlb
, vma
);
515 long change_protection(struct mmu_gather
*tlb
,
516 struct vm_area_struct
*vma
, unsigned long start
,
517 unsigned long end
, unsigned long cp_flags
)
519 pgprot_t newprot
= vma
->vm_page_prot
;
522 BUG_ON((cp_flags
& MM_CP_UFFD_WP_ALL
) == MM_CP_UFFD_WP_ALL
);
524 #ifdef CONFIG_NUMA_BALANCING
526 * Ordinary protection updates (mprotect, uffd-wp, softdirty tracking)
527 * are expected to reflect their requirements via VMA flags such that
528 * vma_set_page_prot() will adjust vma->vm_page_prot accordingly.
530 if (cp_flags
& MM_CP_PROT_NUMA
)
533 WARN_ON_ONCE(cp_flags
& MM_CP_PROT_NUMA
);
536 if (is_vm_hugetlb_page(vma
))
537 pages
= hugetlb_change_protection(vma
, start
, end
, newprot
,
540 pages
= change_protection_range(tlb
, vma
, start
, end
, newprot
,
546 static int prot_none_pte_entry(pte_t
*pte
, unsigned long addr
,
547 unsigned long next
, struct mm_walk
*walk
)
549 return pfn_modify_allowed(pte_pfn(ptep_get(pte
)),
550 *(pgprot_t
*)(walk
->private)) ?
554 static int prot_none_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
555 unsigned long addr
, unsigned long next
,
556 struct mm_walk
*walk
)
558 return pfn_modify_allowed(pte_pfn(ptep_get(pte
)),
559 *(pgprot_t
*)(walk
->private)) ?
563 static int prot_none_test(unsigned long addr
, unsigned long next
,
564 struct mm_walk
*walk
)
569 static const struct mm_walk_ops prot_none_walk_ops
= {
570 .pte_entry
= prot_none_pte_entry
,
571 .hugetlb_entry
= prot_none_hugetlb_entry
,
572 .test_walk
= prot_none_test
,
573 .walk_lock
= PGWALK_WRLOCK
,
577 mprotect_fixup(struct vma_iterator
*vmi
, struct mmu_gather
*tlb
,
578 struct vm_area_struct
*vma
, struct vm_area_struct
**pprev
,
579 unsigned long start
, unsigned long end
, unsigned long newflags
)
581 struct mm_struct
*mm
= vma
->vm_mm
;
582 unsigned long oldflags
= vma
->vm_flags
;
583 long nrpages
= (end
- start
) >> PAGE_SHIFT
;
584 unsigned int mm_cp_flags
= 0;
585 unsigned long charged
= 0;
588 if (newflags
== oldflags
) {
594 * Do PROT_NONE PFN permission checks here when we can still
595 * bail out without undoing a lot of state. This is a rather
596 * uncommon case, so doesn't need to be very optimized.
598 if (arch_has_pfn_modify_check() &&
599 (vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) &&
600 (newflags
& VM_ACCESS_FLAGS
) == 0) {
601 pgprot_t new_pgprot
= vm_get_page_prot(newflags
);
603 error
= walk_page_range(current
->mm
, start
, end
,
604 &prot_none_walk_ops
, &new_pgprot
);
610 * If we make a private mapping writable we increase our commit;
611 * but (without finer accounting) cannot reduce our commit if we
612 * make it unwritable again except in the anonymous case where no
613 * anon_vma has yet to be assigned.
615 * hugetlb mapping were accounted for even if read-only so there is
616 * no need to account for them here.
618 if (newflags
& VM_WRITE
) {
619 /* Check space limits when area turns into data. */
620 if (!may_expand_vm(mm
, newflags
, nrpages
) &&
621 may_expand_vm(mm
, oldflags
, nrpages
))
623 if (!(oldflags
& (VM_ACCOUNT
|VM_WRITE
|VM_HUGETLB
|
624 VM_SHARED
|VM_NORESERVE
))) {
626 if (security_vm_enough_memory_mm(mm
, charged
))
628 newflags
|= VM_ACCOUNT
;
630 } else if ((oldflags
& VM_ACCOUNT
) && vma_is_anonymous(vma
) &&
632 newflags
&= ~VM_ACCOUNT
;
635 vma
= vma_modify_flags(vmi
, *pprev
, vma
, start
, end
, newflags
);
637 error
= PTR_ERR(vma
);
644 * vm_flags and vm_page_prot are protected by the mmap_lock
645 * held in write mode.
647 vma_start_write(vma
);
648 vm_flags_reset(vma
, newflags
);
649 if (vma_wants_manual_pte_write_upgrade(vma
))
650 mm_cp_flags
|= MM_CP_TRY_CHANGE_WRITABLE
;
651 vma_set_page_prot(vma
);
653 change_protection(tlb
, vma
, start
, end
, mm_cp_flags
);
655 if ((oldflags
& VM_ACCOUNT
) && !(newflags
& VM_ACCOUNT
))
656 vm_unacct_memory(nrpages
);
659 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
662 if ((oldflags
& (VM_WRITE
| VM_SHARED
| VM_LOCKED
)) == VM_LOCKED
&&
663 (newflags
& VM_WRITE
)) {
664 populate_vma_page_range(vma
, start
, end
, NULL
);
667 vm_stat_account(mm
, oldflags
, -nrpages
);
668 vm_stat_account(mm
, newflags
, nrpages
);
669 perf_event_mmap(vma
);
673 vm_unacct_memory(charged
);
678 * pkey==-1 when doing a legacy mprotect()
680 static int do_mprotect_pkey(unsigned long start
, size_t len
,
681 unsigned long prot
, int pkey
)
683 unsigned long nstart
, end
, tmp
, reqprot
;
684 struct vm_area_struct
*vma
, *prev
;
686 const int grows
= prot
& (PROT_GROWSDOWN
|PROT_GROWSUP
);
687 const bool rier
= (current
->personality
& READ_IMPLIES_EXEC
) &&
689 struct mmu_gather tlb
;
690 struct vma_iterator vmi
;
692 start
= untagged_addr(start
);
694 prot
&= ~(PROT_GROWSDOWN
|PROT_GROWSUP
);
695 if (grows
== (PROT_GROWSDOWN
|PROT_GROWSUP
)) /* can't be both */
698 if (start
& ~PAGE_MASK
)
702 len
= PAGE_ALIGN(len
);
706 if (!arch_validate_prot(prot
, start
))
711 if (mmap_write_lock_killable(current
->mm
))
715 * If userspace did not allocate the pkey, do not let
719 if ((pkey
!= -1) && !mm_pkey_is_allocated(current
->mm
, pkey
))
722 vma_iter_init(&vmi
, current
->mm
, start
);
723 vma
= vma_find(&vmi
, end
);
728 if (unlikely(grows
& PROT_GROWSDOWN
)) {
729 if (vma
->vm_start
>= end
)
731 start
= vma
->vm_start
;
733 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
736 if (vma
->vm_start
> start
)
738 if (unlikely(grows
& PROT_GROWSUP
)) {
741 if (!(vma
->vm_flags
& VM_GROWSUP
))
746 prev
= vma_prev(&vmi
);
747 if (start
> vma
->vm_start
)
750 tlb_gather_mmu(&tlb
, current
->mm
);
753 for_each_vma_range(vmi
, vma
, end
) {
754 unsigned long mask_off_old_flags
;
755 unsigned long newflags
;
758 if (vma
->vm_start
!= tmp
) {
763 /* Does the application expect PROT_READ to imply PROT_EXEC */
764 if (rier
&& (vma
->vm_flags
& VM_MAYEXEC
))
768 * Each mprotect() call explicitly passes r/w/x permissions.
769 * If a permission is not passed to mprotect(), it must be
770 * cleared from the VMA.
772 mask_off_old_flags
= VM_ACCESS_FLAGS
| VM_FLAGS_CLEAR
;
774 new_vma_pkey
= arch_override_mprotect_pkey(vma
, prot
, pkey
);
775 newflags
= calc_vm_prot_bits(prot
, new_vma_pkey
);
776 newflags
|= (vma
->vm_flags
& ~mask_off_old_flags
);
778 /* newflags >> 4 shift VM_MAY% in place of VM_% */
779 if ((newflags
& ~(newflags
>> 4)) & VM_ACCESS_FLAGS
) {
784 if (map_deny_write_exec(vma
, newflags
)) {
789 /* Allow architectures to sanity-check the new flags */
790 if (!arch_validate_flags(newflags
)) {
795 error
= security_file_mprotect(vma
, reqprot
, prot
);
803 if (vma
->vm_ops
&& vma
->vm_ops
->mprotect
) {
804 error
= vma
->vm_ops
->mprotect(vma
, nstart
, tmp
, newflags
);
809 error
= mprotect_fixup(&vmi
, &tlb
, vma
, &prev
, nstart
, tmp
, newflags
);
813 tmp
= vma_iter_end(&vmi
);
817 tlb_finish_mmu(&tlb
);
819 if (!error
&& tmp
< end
)
823 mmap_write_unlock(current
->mm
);
827 SYSCALL_DEFINE3(mprotect
, unsigned long, start
, size_t, len
,
830 return do_mprotect_pkey(start
, len
, prot
, -1);
833 #ifdef CONFIG_ARCH_HAS_PKEYS
835 SYSCALL_DEFINE4(pkey_mprotect
, unsigned long, start
, size_t, len
,
836 unsigned long, prot
, int, pkey
)
838 return do_mprotect_pkey(start
, len
, prot
, pkey
);
841 SYSCALL_DEFINE2(pkey_alloc
, unsigned long, flags
, unsigned long, init_val
)
846 /* No flags supported yet. */
849 /* check for unsupported init values */
850 if (init_val
& ~PKEY_ACCESS_MASK
)
853 mmap_write_lock(current
->mm
);
854 pkey
= mm_pkey_alloc(current
->mm
);
860 ret
= arch_set_user_pkey_access(current
, pkey
, init_val
);
862 mm_pkey_free(current
->mm
, pkey
);
867 mmap_write_unlock(current
->mm
);
871 SYSCALL_DEFINE1(pkey_free
, int, pkey
)
875 mmap_write_lock(current
->mm
);
876 ret
= mm_pkey_free(current
->mm
, pkey
);
877 mmap_write_unlock(current
->mm
);
880 * We could provide warnings or errors if any VMA still
881 * has the pkey set here.
886 #endif /* CONFIG_ARCH_HAS_PKEYS */