return max_nr_accesses;
}
+struct damos_va_migrate_private {
+ struct list_head *migration_lists;
+ struct damos_migrate_dests *dests;
+};
+
+/*
+ * Place the given folio in the migration_list corresponding to where the folio
+ * should be migrated.
+ *
+ * The algorithm used here is similar to weighted_interleave_nid()
+ */
+static void damos_va_migrate_dests_add(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr,
+ struct damos_migrate_dests *dests,
+ struct list_head *migration_lists)
+{
+ pgoff_t ilx;
+ int order;
+ unsigned int target;
+ unsigned int weight_total = 0;
+ int i;
+
+ /*
+ * If dests is empty, there is only one migration list corresponding
+ * to s->target_nid.
+ */
+ if (!dests->nr_dests) {
+ i = 0;
+ goto isolate;
+ }
+
+ order = folio_order(folio);
+ ilx = vma->vm_pgoff >> order;
+ ilx += (addr - vma->vm_start) >> (PAGE_SHIFT + order);
+
+ for (i = 0; i < dests->nr_dests; i++)
+ weight_total += dests->weight_arr[i];
+
+ /* If the total weights are somehow 0, don't migrate at all */
+ if (!weight_total)
+ return;
+
+ target = ilx % weight_total;
+ for (i = 0; i < dests->nr_dests; i++) {
+ if (target < dests->weight_arr[i])
+ break;
+ target -= dests->weight_arr[i];
+ }
+
+isolate:
+ if (!folio_isolate_lru(folio))
+ return;
+
+ list_add(&folio->lru, &migration_lists[i]);
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- struct list_head *migration_list = walk->private;
+ struct damos_va_migrate_private *priv = walk->private;
+ struct list_head *migration_lists = priv->migration_lists;
+ struct damos_migrate_dests *dests = priv->dests;
struct folio *folio;
spinlock_t *ptl;
pmd_t pmde;
if (!folio)
goto unlock;
- if (!folio_isolate_lru(folio))
- goto put_folio;
-
- list_add(&folio->lru, migration_list);
+ damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
+ migration_lists);
-put_folio:
folio_put(folio);
unlock:
spin_unlock(ptl);
static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- struct list_head *migration_list = walk->private;
+ struct damos_va_migrate_private *priv = walk->private;
+ struct list_head *migration_lists = priv->migration_lists;
+ struct damos_migrate_dests *dests = priv->dests;
struct folio *folio;
pte_t ptent;
if (!folio)
return 0;
- if (!folio_isolate_lru(folio))
- goto out;
+ damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
+ migration_lists);
- list_add(&folio->lru, migration_list);
-
-out:
folio_put(folio);
return 0;
}
unsigned long *sz_filter_passed)
{
LIST_HEAD(folio_list);
+ struct damos_va_migrate_private priv;
struct mm_struct *mm;
+ int nr_dests;
+ int nid;
+ bool use_target_nid;
unsigned long applied = 0;
+ struct damos_migrate_dests *dests = &s->migrate_dests;
struct mm_walk_ops walk_ops = {
.pmd_entry = damos_va_migrate_pmd_entry,
.pte_entry = damos_va_migrate_pte_entry,
.walk_lock = PGWALK_RDLOCK,
};
+ use_target_nid = dests->nr_dests == 0;
+ nr_dests = use_target_nid ? 1 : dests->nr_dests;
+ priv.dests = dests;
+ priv.migration_lists = kmalloc_array(nr_dests,
+ sizeof(*priv.migration_lists), GFP_KERNEL);
+ if (!priv.migration_lists)
+ return 0;
+
+ for (int i = 0; i < nr_dests; i++)
+ INIT_LIST_HEAD(&priv.migration_lists[i]);
+
+
mm = damon_get_mm(target);
if (!mm)
- return 0;
+ goto free_lists;
mmap_read_lock(mm);
- walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &folio_list);
+ walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv);
mmap_read_unlock(mm);
mmput(mm);
- applied = damon_migrate_pages(&folio_list, s->target_nid);
- cond_resched();
+ for (int i = 0; i < nr_dests; i++) {
+ nid = use_target_nid ? s->target_nid : dests->node_id_arr[i];
+ applied += damon_migrate_pages(&priv.migration_lists[i], nid);
+ cond_resched();
+ }
+free_lists:
+ kfree(priv.migration_lists);
return applied * PAGE_SIZE;
}