* Return true if the original pte was a uffd-wp pte marker (so the pte was
* wr-protected).
*/
-static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
+static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
{
+ if (!userfaultfd_wp(vmf->vma))
+ return false;
if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
return false;
*/
static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
- bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
struct vm_area_struct *vma = vmf->vma;
unsigned long addr = vmf->address;
struct folio *folio;
folio_add_new_anon_rmap(folio, vma, addr);
folio_add_lru_vma(folio, vma);
setpte:
- if (uffd_wp)
+ if (vmf_orig_pte_uffd_wp(vmf))
entry = pte_mkuffd_wp(entry);
set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages);
struct page *page, unsigned int nr, unsigned long addr)
{
struct vm_area_struct *vma = vmf->vma;
- bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
bool write = vmf->flags & FAULT_FLAG_WRITE;
bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE);
pte_t entry;
if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- if (unlikely(uffd_wp))
+ if (unlikely(vmf_orig_pte_uffd_wp(vmf)))
entry = pte_mkuffd_wp(entry);
/* copy-on-write page */
if (write && !(vma->vm_flags & VM_SHARED)) {