return true;
}
+static struct folio *anon_alloc_folio(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct folio *folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
+ addr);
+
+ if (!folio)
+ return NULL;
+
+ if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) {
+ folio_put(folio);
+ return NULL;
+ }
+
+ return folio;
+}
+
static const struct vm_uffd_ops anon_uffd_ops = {
.can_userfault = anon_can_userfault,
+ .alloc_folio = anon_alloc_folio,
};
static const struct vm_uffd_ops *vma_uffd_ops(struct vm_area_struct *vma)
return 0;
}
-static int mfill_atomic_pte_copy(struct mfill_state *state)
+static int __mfill_atomic_pte(struct mfill_state *state,
+ const struct vm_uffd_ops *ops)
{
unsigned long dst_addr = state->dst_addr;
unsigned long src_addr = state->src_addr;
struct folio *folio;
int ret;
- folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, state->vma, dst_addr);
+ folio = ops->alloc_folio(state->vma, state->dst_addr);
if (!folio)
return -ENOMEM;
- ret = -ENOMEM;
- if (mem_cgroup_charge(folio, state->vma->vm_mm, GFP_KERNEL))
- goto out_release;
-
- ret = mfill_copy_folio_locked(folio, src_addr);
- if (unlikely(ret)) {
+ if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
+ ret = mfill_copy_folio_locked(folio, src_addr);
/*
* Fallback to copy_from_user outside mmap_lock.
* If retry is successful, mfill_copy_folio_locked() returns
* If there was an error, we must mfill_put_vma() anyway and it
* will take care of unlocking if needed.
*/
- ret = mfill_copy_folio_retry(state, folio);
- if (ret)
- goto out_release;
+ if (unlikely(ret)) {
+ ret = mfill_copy_folio_retry(state, folio);
+ if (ret)
+ goto err_folio_put;
+ }
+ } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
+ clear_user_highpage(&folio->page, state->dst_addr);
+ } else {
+ VM_WARN_ONCE(1, "Unknown UFFDIO operation, flags: %x", flags);
}
/*
ret = mfill_atomic_install_pte(state->pmd, state->vma, dst_addr,
&folio->page, true, flags);
if (ret)
- goto out_release;
-out:
- return ret;
-out_release:
+ goto err_folio_put;
+
+ return 0;
+
+err_folio_put:
+ folio_put(folio);
/* Don't return -ENOENT so that our caller won't retry */
if (ret == -ENOENT)
ret = -EFAULT;
- folio_put(folio);
- goto out;
+ return ret;
}
-static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr)
+static int mfill_atomic_pte_copy(struct mfill_state *state)
{
- struct folio *folio;
- int ret = -ENOMEM;
-
- folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr);
- if (!folio)
- return ret;
-
- if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
- goto out_put;
+ const struct vm_uffd_ops *ops = vma_uffd_ops(state->vma);
- /*
- * The memory barrier inside __folio_mark_uptodate makes sure that
- * zeroing out the folio become visible before mapping the page
- * using set_pte_at(). See do_anonymous_page().
- */
- __folio_mark_uptodate(folio);
+ return __mfill_atomic_pte(state, ops);
+}
- ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
- &folio->page, true, 0);
- if (ret)
- goto out_put;
+static int mfill_atomic_pte_zeroed_folio(struct mfill_state *state)
+{
+ const struct vm_uffd_ops *ops = vma_uffd_ops(state->vma);
- return 0;
-out_put:
- folio_put(folio);
- return ret;
+ return __mfill_atomic_pte(state, ops);
}
static int mfill_atomic_pte_zeropage(struct mfill_state *state)
int ret;
if (mm_forbids_zeropage(dst_vma->vm_mm))
- return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr);
+ return mfill_atomic_pte_zeroed_folio(state);
_dst_pte = pte_mkspecial(pfn_pte(zero_pfn(dst_addr),
dst_vma->vm_page_prot));