* @get_scheme_score: Get the score of a region for a scheme.
* @apply_scheme: Apply a DAMON-based operation scheme.
* @target_valid: Determine if the target is valid.
+ * @cleanup_target: Clean up each target before deallocation.
* @cleanup: Clean up the context.
*
* DAMON can be extended for various address spaces and usages. For this,
* filters (&struct damos_filter) that handled by itself.
* @target_valid should check whether the target is still valid for the
* monitoring.
+ * @cleanup_target is called before the target will be deallocated.
* @cleanup is called from @kdamond just before its termination.
*/
struct damon_operations {
struct damon_target *t, struct damon_region *r,
struct damos *scheme, unsigned long *sz_filter_passed);
bool (*target_valid)(struct damon_target *t);
+ void (*cleanup_target)(struct damon_target *t);
void (*cleanup)(struct damon_ctx *context);
};
* Accesses to other fields must be protected by themselves.
*
* @ops: Set of monitoring operations for given use cases.
- * @callback: Set of callbacks for monitoring events notifications.
*
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
* @schemes: Head of schemes (&damos) list.
void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
bool damon_targets_empty(struct damon_ctx *ctx);
void damon_free_target(struct damon_target *t);
-void damon_destroy_target(struct damon_target *t);
+void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx);
unsigned int damon_nr_regions(struct damon_target *t);
struct damon_ctx *damon_new_ctx(void);
kfree(t);
}
-void damon_destroy_target(struct damon_target *t)
+void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx)
{
+
+ if (ctx && ctx->ops.cleanup_target)
+ ctx->ops.cleanup_target(t);
+
damon_del_target(t);
damon_free_target(t);
}
struct damon_target *t, *next_t;
damon_for_each_target_safe(t, next_t, ctx)
- damon_destroy_target(t);
+ damon_destroy_target(t, ctx);
}
void damon_destroy_ctx(struct damon_ctx *ctx)
if (damon_target_has_pid(dst))
put_pid(dst_target->pid);
- damon_destroy_target(dst_target);
+ damon_destroy_target(dst_target, dst);
damon_for_each_scheme(s, dst) {
if (s->quota.charge_target_from == dst_target) {
s->quota.charge_target_from = NULL;
err = damon_commit_target(new_target, false,
src_target, damon_target_has_pid(src));
if (err) {
- damon_destroy_target(new_target);
+ damon_destroy_target(new_target, NULL);
return err;
}
damon_add_target(dst, new_target);
damon_for_each_target_safe(t, next, ctx) {
if (has_pid)
put_pid(t->pid);
- damon_destroy_target(t);
+ damon_destroy_target(t, ctx);
}
}
damon_for_each_target_safe(t, next, ctx) {
put_pid(t->pid);
- damon_destroy_target(t);
+ damon_destroy_target(t, ctx);
}
}
damon_add_target(c, t);
KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c));
- damon_destroy_target(t);
+ damon_destroy_target(t, c);
KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
damon_destroy_ctx(c);
KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]);
KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]);
}
- damon_destroy_target(t);
+ damon_destroy_target(t, NULL);
}
static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
}
- damon_destroy_target(t);
+ damon_destroy_target(t, NULL);
}
/*