page = vm_normal_page_pmd(vma, addr, *pmd);
present = true;
} else if (unlikely(thp_migration_supported())) {
- swp_entry_t entry = pmd_to_swp_entry(*pmd);
+ const softleaf_t entry = softleaf_from_pmd(*pmd);
- if (is_pfn_swap_entry(entry))
- page = pfn_swap_entry_to_page(entry);
+ if (softleaf_has_pfn(entry))
+ page = softleaf_to_page(entry);
}
if (IS_ERR_OR_NULL(page))
return;
pmd = pmd_clear_soft_dirty(pmd);
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
- } else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
+ } else if (pmd_is_migration_entry(pmd)) {
pmd = pmd_swp_clear_soft_dirty(pmd);
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
}
if (pm->show_pfn)
frame = pmd_pfn(pmd) + idx;
} else if (thp_migration_supported()) {
- swp_entry_t entry = pmd_to_swp_entry(pmd);
+ const softleaf_t entry = softleaf_from_pmd(pmd);
unsigned long offset;
if (pm->show_pfn) {
- if (is_pfn_swap_entry(entry))
- offset = swp_offset_pfn(entry) + idx;
+ if (softleaf_has_pfn(entry))
+ offset = softleaf_to_pfn(entry) + idx;
else
offset = swp_offset(entry) + idx;
frame = swp_type(entry) |
flags |= PM_SOFT_DIRTY;
if (pmd_swp_uffd_wp(pmd))
flags |= PM_UFFD_WP;
- VM_WARN_ON_ONCE(!is_pmd_migration_entry(pmd));
+ VM_WARN_ON_ONCE(!pmd_is_migration_entry(pmd));
page = pfn_swap_entry_to_page(entry);
}
if (pmd_soft_dirty(pmd))
categories |= PAGE_IS_SOFT_DIRTY;
} else {
- swp_entry_t swp;
-
categories |= PAGE_IS_SWAPPED;
if (!pmd_swp_uffd_wp(pmd))
categories |= PAGE_IS_WRITTEN;
categories |= PAGE_IS_SOFT_DIRTY;
if (p->masks_of_interest & PAGE_IS_FILE) {
- swp = pmd_to_swp_entry(pmd);
- if (is_pfn_swap_entry(swp) &&
- !folio_test_anon(pfn_swap_entry_folio(swp)))
+ const softleaf_t entry = softleaf_from_pmd(pmd);
+
+ if (softleaf_has_pfn(entry) &&
+ !folio_test_anon(softleaf_to_folio(entry)))
categories |= PAGE_IS_FILE;
}
}
old = pmdp_invalidate_ad(vma, addr, pmdp);
pmd = pmd_mkuffd_wp(old);
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
- } else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
+ } else if (pmd_is_migration_entry(pmd)) {
pmd = pmd_swp_mkuffd_wp(pmd);
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
}
return pte_to_swp_entry(pte);
}
+/**
+ * softleaf_to_pte() - Obtain a PTE entry from a leaf entry.
+ * @entry: Leaf entry.
+ *
+ * This generates an architecture-specific PTE entry that can be utilised to
+ * encode the metadata the leaf entry encodes.
+ *
+ * Returns: Architecture-specific PTE entry encoding leaf entry.
+ */
+static inline pte_t softleaf_to_pte(softleaf_t entry)
+{
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_entry_to_pte(entry);
+}
+
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+/**
+ * softleaf_from_pmd() - Obtain a leaf entry from a PMD entry.
+ * @pmd: PMD entry.
+ *
+ * If @pmd is present (therefore not a leaf entry) the function returns an empty
+ * leaf entry. Otherwise, it returns a leaf entry.
+ *
+ * Returns: Leaf entry.
+ */
+static inline softleaf_t softleaf_from_pmd(pmd_t pmd)
+{
+ softleaf_t arch_entry;
+
+ if (pmd_present(pmd) || pmd_none(pmd))
+ return softleaf_mk_none();
+
+ if (pmd_swp_soft_dirty(pmd))
+ pmd = pmd_swp_clear_soft_dirty(pmd);
+ if (pmd_swp_uffd_wp(pmd))
+ pmd = pmd_swp_clear_uffd_wp(pmd);
+ arch_entry = __pmd_to_swp_entry(pmd);
+
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+}
+
+#else
+
+static inline softleaf_t softleaf_from_pmd(pmd_t pmd)
+{
+ return softleaf_mk_none();
+}
+
+#endif
+
/**
* softleaf_is_none() - Is the leaf entry empty?
* @entry: Leaf entry.
return softleaf_type(entry) == SOFTLEAF_SWAP;
}
+/**
+ * softleaf_is_migration_write() - Is this leaf entry a writable migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a writable migration entry, otherwise
+ * false.
+ */
+static inline bool softleaf_is_migration_write(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MIGRATION_WRITE;
+}
+
+/**
+ * softleaf_is_migration_read() - Is this leaf entry a readable migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a readable migration entry, otherwise
+ * false.
+ */
+static inline bool softleaf_is_migration_read(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ;
+}
+
+/**
+ * softleaf_is_migration_read_exclusive() - Is this leaf entry an exclusive
+ * readable migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is an exclusive readable migration entry,
+ * otherwise false.
+ */
+static inline bool softleaf_is_migration_read_exclusive(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ_EXCLUSIVE;
+}
+
/**
* softleaf_is_migration() - Is this leaf entry a migration entry?
* @entry: Leaf entry.
}
}
+/**
+ * softleaf_is_device_private_write() - Is this leaf entry a device private
+ * writable entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a device private writable entry, otherwise
+ * false.
+ */
+static inline bool softleaf_is_device_private_write(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_DEVICE_PRIVATE_WRITE;
+}
+
/**
* softleaf_is_device_private() - Is this leaf entry a device private entry?
* @entry: Leaf entry.
}
/**
- * softleaf_is_device_exclusive() - Is this leaf entry a device exclusive entry?
+ * softleaf_is_device_exclusive() - Is this leaf entry a device-exclusive entry?
* @entry: Leaf entry.
*
- * Returns: true if the leaf entry is a device exclusive entry, otherwise false.
+ * Returns: true if the leaf entry is a device-exclusive entry, otherwise false.
*/
static inline bool softleaf_is_device_exclusive(softleaf_t entry)
{
return softleaf_to_marker(entry) & PTE_MARKER_UFFD_WP;
}
+#ifdef CONFIG_MIGRATION
+
+/**
+ * softleaf_is_migration_young() - Does this migration entry contain an accessed
+ * bit?
+ * @entry: Leaf entry.
+ *
+ * If the architecture can support storing A/D bits in migration entries, this
+ * determines whether the accessed (or 'young') bit was set on the migrated page
+ * table entry.
+ *
+ * Returns: true if the entry contains an accessed bit, otherwise false.
+ */
+static inline bool softleaf_is_migration_young(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_is_migration(entry));
+
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_YOUNG;
+ /* Keep the old behavior of aging page after migration */
+ return false;
+}
+
+/**
+ * softleaf_is_migration_dirty() - Does this migration entry contain a dirty bit?
+ * @entry: Leaf entry.
+ *
+ * If the architecture can support storing A/D bits in migration entries, this
+ * determines whether the dirty bit was set on the migrated page table entry.
+ *
+ * Returns: true if the entry contains a dirty bit, otherwise false.
+ */
+static inline bool softleaf_is_migration_dirty(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_is_migration(entry));
+
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_DIRTY;
+ /* Keep the old behavior of clean page after migration */
+ return false;
+}
+
+#else /* CONFIG_MIGRATION */
+
+static inline bool softleaf_is_migration_young(softleaf_t entry)
+{
+ return false;
+}
+
+static inline bool softleaf_is_migration_dirty(softleaf_t entry)
+{
+ return false;
+}
+#endif /* CONFIG_MIGRATION */
+
/**
* pte_is_marker() - Does the PTE entry encode a marker leaf entry?
* @pte: PTE entry.
return false;
}
+#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_ENABLE_THP_MIGRATION)
+
+/**
+ * pmd_is_device_private_entry() - Check if PMD contains a device private swap
+ * entry.
+ * @pmd: The PMD to check.
+ *
+ * Returns true if the PMD contains a swap entry that represents a device private
+ * page mapping. This is used for zone device private pages that have been
+ * swapped out but still need special handling during various memory management
+ * operations.
+ *
+ * Return: true if PMD contains device private entry, false otherwise
+ */
+static inline bool pmd_is_device_private_entry(pmd_t pmd)
+{
+ return softleaf_is_device_private(softleaf_from_pmd(pmd));
+}
+
+#else /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+static inline bool pmd_is_device_private_entry(pmd_t pmd)
+{
+ return false;
+}
+
+#endif /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+/**
+ * pmd_is_migration_entry() - Does this PMD entry encode a migration entry?
+ * @pmd: PMD entry.
+ *
+ * Returns: true if the PMD encodes a migration entry, otherwise false.
+ */
+static inline bool pmd_is_migration_entry(pmd_t pmd)
+{
+ return softleaf_is_migration(softleaf_from_pmd(pmd));
+}
+
+/**
+ * pmd_is_valid_softleaf() - Is this PMD entry a valid leaf entry?
+ * @pmd: PMD entry.
+ *
+ * PMD leaf entries are valid only if they are device private or migration
+ * entries. This function asserts that a PMD leaf entry is valid in this
+ * respect.
+ *
+ * Returns: true if the PMD entry is a valid leaf entry, otherwise false.
+ */
+static inline bool pmd_is_valid_softleaf(pmd_t pmd)
+{
+ const softleaf_t entry = softleaf_from_pmd(pmd);
+
+ /* Only device private, migration entries valid for PMD. */
+ return softleaf_is_device_private(entry) ||
+ softleaf_is_migration(entry);
+}
+
#endif /* CONFIG_MMU */
#endif /* _LINUX_LEAFOPS_H */
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
-void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
+void migration_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl)
__releases(ptl);
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
int folio_migrate_mapping(struct address_space *mapping,
return entry;
}
-static inline bool is_migration_entry_young(swp_entry_t entry)
-{
- if (migration_entry_supports_ad())
- return swp_offset(entry) & SWP_MIG_YOUNG;
- /* Keep the old behavior of aging page after migration */
- return false;
-}
-
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
{
if (migration_entry_supports_ad())
return entry;
}
-static inline bool is_migration_entry_dirty(swp_entry_t entry)
-{
- if (migration_entry_supports_ad())
- return swp_offset(entry) & SWP_MIG_DIRTY;
- /* Keep the old behavior of clean page after migration */
- return false;
-}
-
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte);
return entry;
}
-static inline bool is_migration_entry_young(swp_entry_t entry)
-{
- return false;
-}
-
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
{
return entry;
}
-static inline bool is_migration_entry_dirty(swp_entry_t entry)
-{
- return false;
-}
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_MEMORY_FAILURE
extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
-static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
-{
- swp_entry_t arch_entry;
-
- if (pmd_swp_soft_dirty(pmd))
- pmd = pmd_swp_clear_soft_dirty(pmd);
- if (pmd_swp_uffd_wp(pmd))
- pmd = pmd_swp_clear_uffd_wp(pmd);
- arch_entry = __pmd_to_swp_entry(pmd);
- return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
-}
-
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
{
swp_entry_t arch_entry;
return __swp_entry_to_pmd(arch_entry);
}
-static inline int is_pmd_migration_entry(pmd_t pmd)
-{
- swp_entry_t entry;
-
- if (pmd_present(pmd))
- return 0;
-
- entry = pmd_to_swp_entry(pmd);
- return is_migration_entry(entry);
-}
#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
-static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
- struct page *page)
-{
- BUILD_BUG();
-}
-
static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
struct page *new)
{
static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
-static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
-{
- return swp_entry(0, 0);
-}
-
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
{
return __pmd(0);
}
-static inline int is_pmd_migration_entry(pmd_t pmd)
-{
- return 0;
-}
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
-#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_ENABLE_THP_MIGRATION)
-
-/**
- * is_pmd_device_private_entry() - Check if PMD contains a device private swap entry
- * @pmd: The PMD to check
- *
- * Returns true if the PMD contains a swap entry that represents a device private
- * page mapping. This is used for zone device private pages that have been
- * swapped out but still need special handling during various memory management
- * operations.
- *
- * Return: 1 if PMD contains device private entry, 0 otherwise
- */
-static inline int is_pmd_device_private_entry(pmd_t pmd)
-{
- swp_entry_t entry;
-
- if (pmd_present(pmd))
- return 0;
-
- entry = pmd_to_swp_entry(pmd);
- return is_device_private_entry(entry);
-}
-
-#else /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
-
-static inline int is_pmd_device_private_entry(pmd_t pmd)
-{
- return 0;
-}
-
-#endif /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
-
static inline int non_swap_entry(swp_entry_t entry)
{
return swp_type(entry) >= MAX_SWAPFILES;
}
-static inline int is_pmd_non_present_folio_entry(pmd_t pmd)
-{
- return is_pmd_migration_entry(pmd) || is_pmd_device_private_entry(pmd);
-}
-
#endif /* CONFIG_MMU */
#endif /* _LINUX_SWAPOPS_H */
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include "../internal.h"
#include "ops-common.h"
if (likely(pte_present(pteval)))
pfn = pte_pfn(pteval);
else
- pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
+ pfn = softleaf_to_pfn(softleaf_from_pte(pteval));
folio = damon_get_folio(pfn);
if (!folio)
if (likely(pmd_present(pmdval)))
pfn = pmd_pfn(pmdval);
else
- pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval));
+ pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval));
folio = damon_get_folio(pfn);
if (!folio)
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
* This follows the same logic as folio_wait_bit_common() so see the comments
* there.
*/
-void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
+void migration_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl)
__releases(ptl)
{
struct wait_page_queue wait_page;
unsigned long pflags;
bool in_thrashing;
wait_queue_head_t *q;
- struct folio *folio = pfn_swap_entry_folio(entry);
+ struct folio *folio = softleaf_to_folio(entry);
q = folio_waitqueue(folio);
if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
#include <linux/sched.h>
#include <linux/mmzone.h>
#include <linux/pagemap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/hugetlb.h>
#include <linux/memremap.h>
#include <linux/sched/mm.h>
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
unsigned long npages = (end - start) >> PAGE_SHIFT;
+ const softleaf_t entry = softleaf_from_pmd(pmd);
unsigned long addr = start;
- swp_entry_t entry = pmd_to_swp_entry(pmd);
unsigned int required_fault;
- if (is_device_private_entry(entry) &&
- pfn_swap_entry_folio(entry)->pgmap->owner ==
+ if (softleaf_is_device_private(entry) &&
+ softleaf_to_folio(entry)->pgmap->owner ==
range->dev_private_owner) {
unsigned long cpu_flags = HMM_PFN_VALID |
hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
- unsigned long pfn = swp_offset_pfn(entry);
+ unsigned long pfn = softleaf_to_pfn(entry);
unsigned long i;
- if (is_writable_device_private_entry(entry))
+ if (softleaf_is_device_private_write(entry))
cpu_flags |= HMM_PFN_WRITE;
/*
required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
npages, 0);
if (required_fault) {
- if (is_device_private_entry(entry))
+ if (softleaf_is_device_private(entry))
return hmm_vma_fault(addr, end, required_fault, walk);
else
return -EFAULT;
if (pmd_none(pmd))
return hmm_vma_walk_hole(start, end, -1, walk);
- if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
+ if (thp_migration_supported() && pmd_is_migration_entry(pmd)) {
if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
hmm_vma_walk->last = addr;
pmd_migration_entry_wait(walk->mm, pmdp);
struct vm_area_struct *vma = vmf->vma;
vm_fault_t ret = 0;
spinlock_t *ptl;
- swp_entry_t swp_entry;
+ softleaf_t entry;
struct page *page;
struct folio *folio;
return 0;
}
- swp_entry = pmd_to_swp_entry(vmf->orig_pmd);
- page = pfn_swap_entry_to_page(swp_entry);
+ entry = softleaf_from_pmd(vmf->orig_pmd);
+ page = softleaf_to_page(entry);
folio = page_folio(page);
vmf->page = page;
vmf->pte = NULL;
struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pmd_t pmd, pgtable_t pgtable)
{
- swp_entry_t entry = pmd_to_swp_entry(pmd);
+ softleaf_t entry = softleaf_from_pmd(pmd);
struct folio *src_folio;
- VM_WARN_ON(!is_pmd_non_present_folio_entry(pmd));
+ VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(pmd));
- if (is_writable_migration_entry(entry) ||
- is_readable_exclusive_migration_entry(entry)) {
+ if (softleaf_is_migration_write(entry) ||
+ softleaf_is_migration_read_exclusive(entry)) {
entry = make_readable_migration_entry(swp_offset(entry));
pmd = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*src_pmd))
if (pmd_swp_uffd_wp(*src_pmd))
pmd = pmd_swp_mkuffd_wp(pmd);
set_pmd_at(src_mm, addr, src_pmd, pmd);
- } else if (is_device_private_entry(entry)) {
+ } else if (softleaf_is_device_private(entry)) {
/*
* For device private entries, since there are no
* read exclusive entries, writable = !readable
*/
- if (is_writable_device_private_entry(entry)) {
+ if (softleaf_is_device_private_write(entry)) {
entry = make_readable_device_private_entry(swp_offset(entry));
pmd = swp_entry_to_pmd(entry);
set_pmd_at(src_mm, addr, src_pmd, pmd);
}
- src_folio = pfn_swap_entry_folio(entry);
+ src_folio = softleaf_to_folio(entry);
VM_WARN_ON(!folio_test_large(src_folio));
folio_get(src_folio);
if (unlikely(!pmd_present(orig_pmd))) {
VM_BUG_ON(thp_migration_supported() &&
- !is_pmd_migration_entry(orig_pmd));
+ !pmd_is_migration_entry(orig_pmd));
goto out;
}
folio_remove_rmap_pmd(folio, page, vma);
WARN_ON_ONCE(folio_mapcount(folio) < 0);
VM_BUG_ON_PAGE(!PageHead(page), page);
- } else if (is_pmd_non_present_folio_entry(orig_pmd)) {
- swp_entry_t entry;
+ } else if (pmd_is_valid_softleaf(orig_pmd)) {
+ const softleaf_t entry = softleaf_from_pmd(orig_pmd);
- entry = pmd_to_swp_entry(orig_pmd);
- folio = pfn_swap_entry_folio(entry);
+ folio = softleaf_to_folio(entry);
flush_needed = 0;
if (!thp_migration_supported())
static pmd_t move_soft_dirty_pmd(pmd_t pmd)
{
#ifdef CONFIG_MEM_SOFT_DIRTY
- if (unlikely(is_pmd_migration_entry(pmd)))
+ if (unlikely(pmd_is_migration_entry(pmd)))
pmd = pmd_swp_mksoft_dirty(pmd);
else if (pmd_present(pmd))
pmd = pmd_mksoft_dirty(pmd);
unsigned long addr, pmd_t *pmd, bool uffd_wp,
bool uffd_wp_resolve)
{
- swp_entry_t entry = pmd_to_swp_entry(*pmd);
- struct folio *folio = pfn_swap_entry_folio(entry);
+ softleaf_t entry = softleaf_from_pmd(*pmd);
+ const struct folio *folio = softleaf_to_folio(entry);
pmd_t newpmd;
- VM_WARN_ON(!is_pmd_non_present_folio_entry(*pmd));
- if (is_writable_migration_entry(entry)) {
+ VM_WARN_ON(!pmd_is_valid_softleaf(*pmd));
+ if (softleaf_is_migration_write(entry)) {
/*
* A protection check is difficult so
* just be safe and disable write
newpmd = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*pmd))
newpmd = pmd_swp_mksoft_dirty(newpmd);
- } else if (is_writable_device_private_entry(entry)) {
+ } else if (softleaf_is_device_private_write(entry)) {
entry = make_readable_device_private_entry(swp_offset(entry));
newpmd = swp_entry_to_pmd(entry);
} else {
if (!pmd_trans_huge(src_pmdval)) {
spin_unlock(src_ptl);
- if (is_pmd_migration_entry(src_pmdval)) {
+ if (pmd_is_migration_entry(src_pmdval)) {
pmd_migration_entry_wait(mm, &src_pmdval);
return -EAGAIN;
}
unsigned long addr;
pte_t *pte;
int i;
- swp_entry_t entry;
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
- VM_WARN_ON(!is_pmd_non_present_folio_entry(*pmd) && !pmd_trans_huge(*pmd));
+ VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(*pmd) && !pmd_trans_huge(*pmd));
count_vm_event(THP_SPLIT_PMD);
zap_deposited_table(mm, pmd);
if (!vma_is_dax(vma) && vma_is_special_huge(vma))
return;
- if (unlikely(is_pmd_migration_entry(old_pmd))) {
- swp_entry_t entry;
+ if (unlikely(pmd_is_migration_entry(old_pmd))) {
+ const softleaf_t old_entry = softleaf_from_pmd(old_pmd);
- entry = pmd_to_swp_entry(old_pmd);
- folio = pfn_swap_entry_folio(entry);
+ folio = softleaf_to_folio(old_entry);
} else if (is_huge_zero_pmd(old_pmd)) {
return;
} else {
return __split_huge_zero_page_pmd(vma, haddr, pmd);
}
+ if (pmd_is_migration_entry(*pmd)) {
+ softleaf_t entry;
- if (is_pmd_migration_entry(*pmd)) {
old_pmd = *pmd;
- entry = pmd_to_swp_entry(old_pmd);
- page = pfn_swap_entry_to_page(entry);
+ entry = softleaf_from_pmd(old_pmd);
+ page = softleaf_to_page(entry);
folio = page_folio(page);
soft_dirty = pmd_swp_soft_dirty(old_pmd);
uffd_wp = pmd_swp_uffd_wp(old_pmd);
- write = is_writable_migration_entry(entry);
+ write = softleaf_is_migration_write(entry);
if (PageAnon(page))
- anon_exclusive = is_readable_exclusive_migration_entry(entry);
- young = is_migration_entry_young(entry);
- dirty = is_migration_entry_dirty(entry);
- } else if (is_pmd_device_private_entry(*pmd)) {
+ anon_exclusive = softleaf_is_migration_read_exclusive(entry);
+ young = softleaf_is_migration_young(entry);
+ dirty = softleaf_is_migration_dirty(entry);
+ } else if (pmd_is_device_private_entry(*pmd)) {
+ softleaf_t entry;
+
old_pmd = *pmd;
- entry = pmd_to_swp_entry(old_pmd);
- page = pfn_swap_entry_to_page(entry);
+ entry = softleaf_from_pmd(old_pmd);
+ page = softleaf_to_page(entry);
folio = page_folio(page);
soft_dirty = pmd_swp_soft_dirty(old_pmd);
uffd_wp = pmd_swp_uffd_wp(old_pmd);
- write = is_writable_device_private_entry(entry);
+ write = softleaf_is_device_private_write(entry);
anon_exclusive = PageAnonExclusive(page);
/*
* Note that NUMA hinting access restrictions are not transferred to
* avoid any possibility of altering permissions across VMAs.
*/
- if (freeze || is_pmd_migration_entry(old_pmd)) {
+ if (freeze || pmd_is_migration_entry(old_pmd)) {
pte_t entry;
swp_entry_t swp_entry;
VM_WARN_ON(!pte_none(ptep_get(pte + i)));
set_pte_at(mm, addr, pte + i, entry);
}
- } else if (is_pmd_device_private_entry(old_pmd)) {
+ } else if (pmd_is_device_private_entry(old_pmd)) {
pte_t entry;
swp_entry_t swp_entry;
}
pte_unmap(pte);
- if (!is_pmd_migration_entry(*pmd))
+ if (!pmd_is_migration_entry(*pmd))
folio_remove_rmap_pmd(folio, page, vma);
if (freeze)
put_page(page);
pmd_t *pmd, bool freeze)
{
VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
- if (pmd_trans_huge(*pmd) || is_pmd_non_present_folio_entry(*pmd))
+ if (pmd_trans_huge(*pmd) || pmd_is_valid_softleaf(*pmd))
__split_huge_pmd_locked(vma, pmd, address, freeze);
}
unsigned long address = pvmw->address;
unsigned long haddr = address & HPAGE_PMD_MASK;
pmd_t pmde;
- swp_entry_t entry;
+ softleaf_t entry;
if (!(pvmw->pmd && !pvmw->pte))
return;
- entry = pmd_to_swp_entry(*pvmw->pmd);
+ entry = softleaf_from_pmd(*pvmw->pmd);
folio_get(folio);
pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
- if (is_writable_migration_entry(entry))
+ if (softleaf_is_migration_write(entry))
pmde = pmd_mkwrite(pmde, vma);
if (pmd_swp_uffd_wp(*pvmw->pmd))
pmde = pmd_mkuffd_wp(pmde);
- if (!is_migration_entry_young(entry))
+ if (!softleaf_is_migration_young(entry))
pmde = pmd_mkold(pmde);
/* NOTE: this may contain setting soft-dirty on some archs */
- if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
+ if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry))
pmde = pmd_mkdirty(pmde);
if (folio_is_device_private(folio)) {
if (folio_test_anon(folio)) {
rmap_t rmap_flags = RMAP_NONE;
- if (!is_readable_migration_entry(entry))
+ if (!softleaf_is_migration_read(entry))
rmap_flags |= RMAP_EXCLUSIVE;
folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
#include <linux/page_idle.h>
#include <linux/page_table_check.h>
#include <linux/rcupdate_wait.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/shmem_fs.h>
#include <linux/dax.h>
#include <linux/ksm.h>
* collapse it. Migration success or failure will eventually end
* up with a present PMD mapping a folio again.
*/
- if (is_pmd_migration_entry(pmde))
+ if (pmd_is_migration_entry(pmde))
return SCAN_PMD_MAPPED;
if (!pmd_present(pmde))
return SCAN_PMD_NULL;
if (unlikely(!pmd_present(orig_pmd))) {
VM_BUG_ON(thp_migration_supported() &&
- !is_pmd_migration_entry(orig_pmd));
+ !pmd_is_migration_entry(orig_pmd));
goto huge_unlock;
}
goto fallback;
if (unlikely(!pmd_present(vmf.orig_pmd))) {
- if (is_pmd_device_private_entry(vmf.orig_pmd))
+ if (pmd_is_device_private_entry(vmf.orig_pmd))
return do_huge_pmd_device_private(&vmf);
- if (is_pmd_migration_entry(vmf.orig_pmd))
+ if (pmd_is_migration_entry(vmf.orig_pmd))
pmd_migration_entry_wait(mm, vmf.pmd);
return 0;
}
#include <linux/mm_inline.h>
#include <linux/mmu_notifier.h>
#include <linux/printk.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/gcd.h>
#include <asm/tlbflush.h>
struct folio *folio;
struct queue_pages *qp = walk->private;
- if (unlikely(is_pmd_migration_entry(*pmd))) {
+ if (unlikely(pmd_is_migration_entry(*pmd))) {
qp->nr_failed++;
return;
}
#include <linux/migrate.h>
#include <linux/export.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/mm_inline.h>
rmap_t rmap_flags = RMAP_NONE;
pte_t old_pte;
pte_t pte;
- swp_entry_t entry;
+ softleaf_t entry;
struct page *new;
unsigned long idx = 0;
folio_get(folio);
pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
- entry = pte_to_swp_entry(old_pte);
- if (!is_migration_entry_young(entry))
+ entry = softleaf_from_pte(old_pte);
+ if (!softleaf_is_migration_young(entry))
pte = pte_mkold(pte);
- if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
+ if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry))
pte = pte_mkdirty(pte);
if (pte_swp_soft_dirty(old_pte))
pte = pte_mksoft_dirty(pte);
else
pte = pte_clear_soft_dirty(pte);
- if (is_writable_migration_entry(entry))
+ if (softleaf_is_migration_write(entry))
pte = pte_mkwrite(pte, vma);
else if (pte_swp_uffd_wp(old_pte))
pte = pte_mkuffd_wp(pte);
- if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
+ if (folio_test_anon(folio) && !softleaf_is_migration_read(entry))
rmap_flags |= RMAP_EXCLUSIVE;
if (unlikely(is_device_private_page(new))) {
else
entry = make_readable_device_private_entry(
page_to_pfn(new));
- pte = swp_entry_to_pte(entry);
+ pte = softleaf_to_pte(entry);
if (pte_swp_soft_dirty(old_pte))
pte = pte_swp_mksoft_dirty(pte);
if (pte_swp_uffd_wp(old_pte))
spinlock_t *ptl;
ptl = pmd_lock(mm, pmd);
- if (!is_pmd_migration_entry(*pmd))
+ if (!pmd_is_migration_entry(*pmd))
goto unlock;
- migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
+ migration_entry_wait_on_locked(softleaf_from_pmd(*pmd), ptl);
return;
unlock:
spin_unlock(ptl);
#include <linux/oom.h>
#include <linux/pagewalk.h>
#include <linux/rmap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/pgalloc.h>
#include <asm/tlbflush.h>
#include "internal.h"
struct folio *folio;
struct migrate_vma *migrate = walk->private;
spinlock_t *ptl;
- swp_entry_t entry;
int ret;
unsigned long write = 0;
if (pmd_write(*pmdp))
write = MIGRATE_PFN_WRITE;
} else if (!pmd_present(*pmdp)) {
- entry = pmd_to_swp_entry(*pmdp);
- folio = pfn_swap_entry_folio(entry);
+ const softleaf_t entry = softleaf_from_pmd(*pmdp);
- if (!is_device_private_entry(entry) ||
+ folio = softleaf_to_folio(entry);
+
+ if (!softleaf_is_device_private(entry) ||
!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
(folio->pgmap->owner != migrate->pgmap_owner)) {
spin_unlock(ptl);
return migrate_vma_collect_skip(start, end, walk);
}
- if (is_migration_entry(entry)) {
+ if (softleaf_is_migration(entry)) {
migration_entry_wait_on_locked(entry, ptl);
spin_unlock(ptl);
return -EAGAIN;
}
- if (is_writable_device_private_entry(entry))
+ if (softleaf_is_device_private_write(entry))
write = MIGRATE_PFN_WRITE;
} else {
spin_unlock(ptl);
#include <linux/mm.h>
#include <linux/page_table_check.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#undef pr_fmt
#define pr_fmt(fmt) "page_table_check: " fmt
EXPORT_SYMBOL(__page_table_check_pud_clear);
/* Whether the swap entry cached writable information */
-static inline bool swap_cached_writable(swp_entry_t entry)
+static inline bool softleaf_cached_writable(softleaf_t entry)
{
- return is_writable_device_private_entry(entry) ||
- is_writable_migration_entry(entry);
+ return softleaf_is_device_private_write(entry) ||
+ softleaf_is_migration_write(entry);
}
static void page_table_check_pte_flags(pte_t pte)
if (pte_present(pte)) {
WARN_ON_ONCE(pte_uffd_wp(pte) && pte_write(pte));
} else if (pte_swp_uffd_wp(pte)) {
- const swp_entry_t entry = pte_to_swp_entry(pte);
+ const softleaf_t entry = softleaf_from_pte(pte);
- WARN_ON_ONCE(swap_cached_writable(entry));
+ WARN_ON_ONCE(softleaf_cached_writable(entry));
}
}
if (pmd_uffd_wp(pmd))
WARN_ON_ONCE(pmd_write(pmd));
} else if (pmd_swp_uffd_wp(pmd)) {
- swp_entry_t entry = pmd_to_swp_entry(pmd);
+ const softleaf_t entry = softleaf_from_pmd(pmd);
- WARN_ON_ONCE(swap_cached_writable(entry));
+ WARN_ON_ONCE(softleaf_cached_writable(entry));
}
}
*/
pmde = pmdp_get_lockless(pvmw->pmd);
- if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
+ if (pmd_trans_huge(pmde) || pmd_is_migration_entry(pmde)) {
pvmw->ptl = pmd_lock(mm, pvmw->pmd);
pmde = *pvmw->pmd;
if (!pmd_present(pmde)) {
- swp_entry_t entry;
+ softleaf_t entry;
if (!thp_migration_supported() ||
!(pvmw->flags & PVMW_MIGRATION))
return not_found(pvmw);
- entry = pmd_to_swp_entry(pmde);
- if (!is_migration_entry(entry) ||
- !check_pmd(swp_offset_pfn(entry), pvmw))
+ entry = softleaf_from_pmd(pmde);
+
+ if (!softleaf_is_migration(entry) ||
+ !check_pmd(softleaf_to_pfn(entry), pvmw))
return not_found(pvmw);
return true;
}
* cannot return prematurely, while zap_huge_pmd() has
* cleared *pmd but not decremented compound_mapcount().
*/
- swp_entry_t entry = pmd_to_swp_entry(pmde);
+ const softleaf_t entry = softleaf_from_pmd(pmde);
- if (is_device_private_entry(entry)) {
+ if (softleaf_is_device_private(entry)) {
pvmw->ptl = pmd_lock(mm, pvmw->pmd);
return true;
}
#include <linux/hugetlb.h>
#include <linux/mmu_context.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <asm/tlbflush.h>
goto found;
}
} else if ((flags & FW_MIGRATION) &&
- is_pmd_migration_entry(pmd)) {
- swp_entry_t entry = pmd_to_swp_entry(pmd);
+ pmd_is_migration_entry(pmd)) {
+ const softleaf_t entry = softleaf_from_pmd(pmd);
- page = pfn_swap_entry_to_page(entry);
+ page = softleaf_to_page(entry);
expose_page = false;
goto found;
}
#include <linux/sched/task.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/ksm.h>
if (likely(pmd_present(pmdval)))
pfn = pmd_pfn(pmdval);
else
- pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval));
+ pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval));
subpage = folio_page(folio, pfn - folio_pfn(folio));