return max_nr_accesses;
}
+static bool damos_va_filter_young_match(struct damos_filter *filter,
+ struct folio *folio, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, pmd_t *pmdp)
+{
+ bool young = false;
+
+ if (ptep)
+ young = pte_young(ptep_get(ptep));
+ else if (pmdp)
+ young = pmd_young(pmdp_get(pmdp));
+
+ young = young || !folio_test_idle(folio) ||
+ mmu_notifier_test_young(vma->vm_mm, addr);
+
+ if (young && ptep)
+ damon_ptep_mkold(ptep, vma, addr);
+ else if (young && pmdp)
+ damon_pmdp_mkold(pmdp, vma, addr);
+
+ return young == filter->matching;
+}
+
+static bool damos_va_filter_out(struct damos *scheme, struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, pmd_t *pmdp)
+{
+ struct damos_filter *filter;
+ bool matched;
+
+ if (scheme->core_filters_allowed)
+ return false;
+
+ damos_for_each_ops_filter(filter, scheme) {
+ /*
+ * damos_folio_filter_match checks the young filter by doing an
+ * rmap on the folio to find its page table. However, being the
+ * vaddr scheme, we have direct access to the page tables, so
+ * use that instead.
+ */
+ if (filter->type == DAMOS_FILTER_TYPE_YOUNG)
+ matched = damos_va_filter_young_match(filter, folio,
+ vma, addr, ptep, pmdp);
+ else
+ matched = damos_folio_filter_match(filter, folio);
+
+ if (matched)
+ return !filter->allow;
+ }
+ return scheme->ops_filters_default_reject;
+}
+
struct damos_va_migrate_private {
struct list_head *migration_lists;
- struct damos_migrate_dests *dests;
+ struct damos *scheme;
};
/*
{
struct damos_va_migrate_private *priv = walk->private;
struct list_head *migration_lists = priv->migration_lists;
- struct damos_migrate_dests *dests = priv->dests;
+ struct damos *s = priv->scheme;
+ struct damos_migrate_dests *dests = &s->migrate_dests;
struct folio *folio;
spinlock_t *ptl;
pmd_t pmde;
if (!folio)
goto unlock;
+ if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd))
+ goto put_folio;
+
damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
migration_lists);
+put_folio:
folio_put(folio);
unlock:
spin_unlock(ptl);
{
struct damos_va_migrate_private *priv = walk->private;
struct list_head *migration_lists = priv->migration_lists;
- struct damos_migrate_dests *dests = priv->dests;
+ struct damos *s = priv->scheme;
+ struct damos_migrate_dests *dests = &s->migrate_dests;
struct folio *folio;
pte_t ptent;
if (!folio)
return 0;
+ if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL))
+ goto put_folio;
+
damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
migration_lists);
+put_folio:
folio_put(folio);
return 0;
}
use_target_nid = dests->nr_dests == 0;
nr_dests = use_target_nid ? 1 : dests->nr_dests;
- priv.dests = dests;
+ priv.scheme = s;
priv.migration_lists = kmalloc_array(nr_dests,
sizeof(*priv.migration_lists), GFP_KERNEL);
if (!priv.migration_lists)