]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/damon/lru_sort: support active:inactive memory ratio based auto-tuning
authorSeongJae Park <sj@kernel.org>
Tue, 13 Jan 2026 15:27:13 +0000 (07:27 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Jan 2026 04:02:31 +0000 (20:02 -0800)
Doing DAMOS_LRU_[DE]PRIO with DAMOS_QUOTA_[IN]ACTIVE_MEM_BP based quota
auto-tuning can be easy and intuitive, compared to the manual
[de]prioritization target access pattern thresholds tuning.  For example,
users can ask DAMON to "find hot/cold pages and activate/deactivate those
aiming 50:50 active:inactive memory size." But DAMON_LRU_SORT has no
interface to do that.  Add a module parameter for setting the target
ratio.

[sj@kernel.org: add inactive mem ratio quota goal to cold_scheme]
Link: https://lkml.kernel.org/r/20260114055308.79884-1-sj@kernel.org
Link: https://lkml.kernel.org/r/20260113152717.70459-9-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/damon/lru_sort.c

index f1fdb37b9b47c8d8fe93df1aa0fae6398db826b6..8af97642912a5d869300a43b44bc8d954bee1bcf 100644 (file)
@@ -41,6 +41,20 @@ static bool enabled __read_mostly;
 static bool commit_inputs __read_mostly;
 module_param(commit_inputs, bool, 0600);
 
+/*
+ * Desired active to [in]active memory ratio in bp (1/10,000).
+ *
+ * While keeping the caps that set by other quotas, DAMON_LRU_SORT
+ * automatically increases and decreases the effective level of the quota
+ * aiming the LRU [de]prioritizations of the hot and cold memory resulting in
+ * this active to [in]active memory ratio.  Value zero means disabling this
+ * auto-tuning feature.
+ *
+ * Disabled by default.
+ */
+static unsigned long active_mem_bp __read_mostly;
+module_param(active_mem_bp, ulong, 0600);
+
 /*
  * Filter [non-]young pages accordingly for LRU [de]prioritizations.
  *
@@ -208,6 +222,26 @@ static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
        return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO);
 }
 
+static int damon_lru_sort_add_quota_goals(struct damos *hot_scheme,
+               struct damos *cold_scheme)
+{
+       struct damos_quota_goal *goal;
+
+       if (!active_mem_bp)
+               return 0;
+       goal = damos_new_quota_goal(DAMOS_QUOTA_ACTIVE_MEM_BP, active_mem_bp);
+       if (!goal)
+               return -ENOMEM;
+       damos_add_quota_goal(&hot_scheme->quota, goal);
+       /* aim 0.2 % goal conflict, to keep little ping pong */
+       goal = damos_new_quota_goal(DAMOS_QUOTA_INACTIVE_MEM_BP,
+                       10000 - active_mem_bp + 2);
+       if (!goal)
+               return -ENOMEM;
+       damos_add_quota_goal(&cold_scheme->quota, goal);
+       return 0;
+}
+
 static int damon_lru_sort_add_filters(struct damos *hot_scheme,
                struct damos *cold_scheme)
 {
@@ -277,6 +311,9 @@ static int damon_lru_sort_apply_parameters(void)
        damon_set_schemes(param_ctx, &hot_scheme, 1);
        damon_add_scheme(param_ctx, cold_scheme);
 
+       err = damon_lru_sort_add_quota_goals(hot_scheme, cold_scheme);
+       if (err)
+               goto out;
        err = damon_lru_sort_add_filters(hot_scheme, cold_scheme);
        if (err)
                goto out;