From: SeongJae Park Date: Sat, 12 Jul 2025 19:50:11 +0000 (-0700) Subject: mm/damon/core: add cleanup_target() ops callback X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=7114bc5e01cf393e1fdc97e10399eb9451b6af45;p=thirdparty%2Flinux.git mm/damon/core: add cleanup_target() ops callback Some DAMON operation sets may need additional cleanup per target. For example, [f]vaddr need to put pids of each target. Each user and core logic is doing that redundantly. Add another DAMON ops callback that will be used for doing such cleanups in operations set layer. [sj@kernel.org: add kernel-doc comment for damon_operations->cleanup_target] Link: https://lkml.kernel.org/r/20250715185239.89152-2-sj@kernel.org [sj@kernel.org: remove damon_ctx->callback kernel-doc comment] Link: https://lkml.kernel.org/r/20250715185239.89152-3-sj@kernel.org Link: https://lkml.kernel.org/r/20250712195016.151108-10-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- diff --git a/include/linux/damon.h b/include/linux/damon.h index b83987275ff9d..8c765e36623aa 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -576,6 +576,7 @@ enum damon_ops_id { * @get_scheme_score: Get the score of a region for a scheme. * @apply_scheme: Apply a DAMON-based operation scheme. * @target_valid: Determine if the target is valid. + * @cleanup_target: Clean up each target before deallocation. * @cleanup: Clean up the context. * * DAMON can be extended for various address spaces and usages. For this, @@ -608,6 +609,7 @@ enum damon_ops_id { * filters (&struct damos_filter) that handled by itself. * @target_valid should check whether the target is still valid for the * monitoring. + * @cleanup_target is called before the target will be deallocated. * @cleanup is called from @kdamond just before its termination. */ struct damon_operations { @@ -623,6 +625,7 @@ struct damon_operations { struct damon_target *t, struct damon_region *r, struct damos *scheme, unsigned long *sz_filter_passed); bool (*target_valid)(struct damon_target *t); + void (*cleanup_target)(struct damon_target *t); void (*cleanup)(struct damon_ctx *context); }; @@ -771,7 +774,6 @@ struct damon_attrs { * Accesses to other fields must be protected by themselves. * * @ops: Set of monitoring operations for given use cases. - * @callback: Set of callbacks for monitoring events notifications. * * @adaptive_targets: Head of monitoring targets (&damon_target) list. * @schemes: Head of schemes (&damos) list. @@ -933,7 +935,7 @@ struct damon_target *damon_new_target(void); void damon_add_target(struct damon_ctx *ctx, struct damon_target *t); bool damon_targets_empty(struct damon_ctx *ctx); void damon_free_target(struct damon_target *t); -void damon_destroy_target(struct damon_target *t); +void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx); unsigned int damon_nr_regions(struct damon_target *t); struct damon_ctx *damon_new_ctx(void); diff --git a/mm/damon/core.c b/mm/damon/core.c index b82a838b5a0e8..678c9b4e038cd 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -502,8 +502,12 @@ void damon_free_target(struct damon_target *t) kfree(t); } -void damon_destroy_target(struct damon_target *t) +void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx) { + + if (ctx && ctx->ops.cleanup_target) + ctx->ops.cleanup_target(t); + damon_del_target(t); damon_free_target(t); } @@ -551,7 +555,7 @@ static void damon_destroy_targets(struct damon_ctx *ctx) struct damon_target *t, *next_t; damon_for_each_target_safe(t, next_t, ctx) - damon_destroy_target(t); + damon_destroy_target(t, ctx); } void damon_destroy_ctx(struct damon_ctx *ctx) @@ -1137,7 +1141,7 @@ static int damon_commit_targets( if (damon_target_has_pid(dst)) put_pid(dst_target->pid); - damon_destroy_target(dst_target); + damon_destroy_target(dst_target, dst); damon_for_each_scheme(s, dst) { if (s->quota.charge_target_from == dst_target) { s->quota.charge_target_from = NULL; @@ -1156,7 +1160,7 @@ static int damon_commit_targets( err = damon_commit_target(new_target, false, src_target, damon_target_has_pid(src)); if (err) { - damon_destroy_target(new_target); + damon_destroy_target(new_target, NULL); return err; } damon_add_target(dst, new_target); diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index c0193de6fb9af..f2f9f756f5a2b 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1303,7 +1303,7 @@ static void damon_sysfs_destroy_targets(struct damon_ctx *ctx) damon_for_each_target_safe(t, next, ctx) { if (has_pid) put_pid(t->pid); - damon_destroy_target(t); + damon_destroy_target(t, ctx); } } @@ -1389,7 +1389,7 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx) damon_for_each_target_safe(t, next, ctx) { put_pid(t->pid); - damon_destroy_target(t); + damon_destroy_target(t, ctx); } } diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h index 298c67557fae4..dfedfff19940e 100644 --- a/mm/damon/tests/core-kunit.h +++ b/mm/damon/tests/core-kunit.h @@ -58,7 +58,7 @@ static void damon_test_target(struct kunit *test) damon_add_target(c, t); KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c)); - damon_destroy_target(t); + damon_destroy_target(t, c); KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); damon_destroy_ctx(c); @@ -310,7 +310,7 @@ static void damon_test_set_regions(struct kunit *test) KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]); KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]); } - damon_destroy_target(t); + damon_destroy_target(t, NULL); } static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test) diff --git a/mm/damon/tests/vaddr-kunit.h b/mm/damon/tests/vaddr-kunit.h index 7cd944266a928..d2b37ccf2cc06 100644 --- a/mm/damon/tests/vaddr-kunit.h +++ b/mm/damon/tests/vaddr-kunit.h @@ -149,7 +149,7 @@ static void damon_do_test_apply_three_regions(struct kunit *test, KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]); } - damon_destroy_target(t); + damon_destroy_target(t, NULL); } /*