1 // SPDX-License-Identifier: GPL-2.0
5 * Author: SeongJae Park <sjpark@amazon.de>
8 #define pr_fmt(fmt) "damon: " fmt
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/damon.h>
20 #ifdef CONFIG_DAMON_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
25 static DEFINE_MUTEX(damon_lock
);
26 static int nr_running_ctxs
;
27 static bool running_exclusive_ctxs
;
29 static DEFINE_MUTEX(damon_ops_lock
);
30 static struct damon_operations damon_registered_ops
[NR_DAMON_OPS
];
32 static struct kmem_cache
*damon_region_cache __ro_after_init
;
34 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
35 static bool __damon_is_registered_ops(enum damon_ops_id id
)
37 struct damon_operations empty_ops
= {};
39 if (!memcmp(&empty_ops
, &damon_registered_ops
[id
], sizeof(empty_ops
)))
45 * damon_is_registered_ops() - Check if a given damon_operations is registered.
46 * @id: Id of the damon_operations to check if registered.
48 * Return: true if the ops is set, false otherwise.
50 bool damon_is_registered_ops(enum damon_ops_id id
)
54 if (id
>= NR_DAMON_OPS
)
56 mutex_lock(&damon_ops_lock
);
57 registered
= __damon_is_registered_ops(id
);
58 mutex_unlock(&damon_ops_lock
);
63 * damon_register_ops() - Register a monitoring operations set to DAMON.
64 * @ops: monitoring operations set to register.
66 * This function registers a monitoring operations set of valid &struct
67 * damon_operations->id so that others can find and use them later.
69 * Return: 0 on success, negative error code otherwise.
71 int damon_register_ops(struct damon_operations
*ops
)
75 if (ops
->id
>= NR_DAMON_OPS
)
77 mutex_lock(&damon_ops_lock
);
78 /* Fail for already registered ops */
79 if (__damon_is_registered_ops(ops
->id
)) {
83 damon_registered_ops
[ops
->id
] = *ops
;
85 mutex_unlock(&damon_ops_lock
);
90 * damon_select_ops() - Select a monitoring operations to use with the context.
91 * @ctx: monitoring context to use the operations.
92 * @id: id of the registered monitoring operations to select.
94 * This function finds registered monitoring operations set of @id and make
97 * Return: 0 on success, negative error code otherwise.
99 int damon_select_ops(struct damon_ctx
*ctx
, enum damon_ops_id id
)
103 if (id
>= NR_DAMON_OPS
)
106 mutex_lock(&damon_ops_lock
);
107 if (!__damon_is_registered_ops(id
))
110 ctx
->ops
= damon_registered_ops
[id
];
111 mutex_unlock(&damon_ops_lock
);
116 * Construct a damon_region struct
118 * Returns the pointer to the new struct if success, or NULL otherwise
120 struct damon_region
*damon_new_region(unsigned long start
, unsigned long end
)
122 struct damon_region
*region
;
124 region
= kmem_cache_alloc(damon_region_cache
, GFP_KERNEL
);
128 region
->ar
.start
= start
;
129 region
->ar
.end
= end
;
130 region
->nr_accesses
= 0;
131 region
->nr_accesses_bp
= 0;
132 INIT_LIST_HEAD(®ion
->list
);
135 region
->last_nr_accesses
= 0;
140 void damon_add_region(struct damon_region
*r
, struct damon_target
*t
)
142 list_add_tail(&r
->list
, &t
->regions_list
);
146 static void damon_del_region(struct damon_region
*r
, struct damon_target
*t
)
152 static void damon_free_region(struct damon_region
*r
)
154 kmem_cache_free(damon_region_cache
, r
);
157 void damon_destroy_region(struct damon_region
*r
, struct damon_target
*t
)
159 damon_del_region(r
, t
);
160 damon_free_region(r
);
164 * Check whether a region is intersecting an address range
166 * Returns true if it is.
168 static bool damon_intersect(struct damon_region
*r
,
169 struct damon_addr_range
*re
)
171 return !(r
->ar
.end
<= re
->start
|| re
->end
<= r
->ar
.start
);
175 * Fill holes in regions with new regions.
177 static int damon_fill_regions_holes(struct damon_region
*first
,
178 struct damon_region
*last
, struct damon_target
*t
)
180 struct damon_region
*r
= first
;
182 damon_for_each_region_from(r
, t
) {
183 struct damon_region
*next
, *newr
;
187 next
= damon_next_region(r
);
188 if (r
->ar
.end
!= next
->ar
.start
) {
189 newr
= damon_new_region(r
->ar
.end
, next
->ar
.start
);
192 damon_insert_region(newr
, r
, next
, t
);
199 * damon_set_regions() - Set regions of a target for given address ranges.
200 * @t: the given target.
201 * @ranges: array of new monitoring target ranges.
202 * @nr_ranges: length of @ranges.
204 * This function adds new regions to, or modify existing regions of a
205 * monitoring target to fit in specific ranges.
207 * Return: 0 if success, or negative error code otherwise.
209 int damon_set_regions(struct damon_target
*t
, struct damon_addr_range
*ranges
,
210 unsigned int nr_ranges
)
212 struct damon_region
*r
, *next
;
216 /* Remove regions which are not in the new ranges */
217 damon_for_each_region_safe(r
, next
, t
) {
218 for (i
= 0; i
< nr_ranges
; i
++) {
219 if (damon_intersect(r
, &ranges
[i
]))
223 damon_destroy_region(r
, t
);
226 r
= damon_first_region(t
);
227 /* Add new regions or resize existing regions to fit in the ranges */
228 for (i
= 0; i
< nr_ranges
; i
++) {
229 struct damon_region
*first
= NULL
, *last
, *newr
;
230 struct damon_addr_range
*range
;
233 /* Get the first/last regions intersecting with the range */
234 damon_for_each_region_from(r
, t
) {
235 if (damon_intersect(r
, range
)) {
240 if (r
->ar
.start
>= range
->end
)
244 /* no region intersects with this range */
245 newr
= damon_new_region(
246 ALIGN_DOWN(range
->start
,
248 ALIGN(range
->end
, DAMON_MIN_REGION
));
251 damon_insert_region(newr
, damon_prev_region(r
), r
, t
);
253 /* resize intersecting regions to fit in this range */
254 first
->ar
.start
= ALIGN_DOWN(range
->start
,
256 last
->ar
.end
= ALIGN(range
->end
, DAMON_MIN_REGION
);
258 /* fill possible holes in the range */
259 err
= damon_fill_regions_holes(first
, last
, t
);
267 struct damos_filter
*damos_new_filter(enum damos_filter_type type
,
270 struct damos_filter
*filter
;
272 filter
= kmalloc(sizeof(*filter
), GFP_KERNEL
);
276 filter
->matching
= matching
;
277 INIT_LIST_HEAD(&filter
->list
);
281 void damos_add_filter(struct damos
*s
, struct damos_filter
*f
)
283 list_add_tail(&f
->list
, &s
->filters
);
286 static void damos_del_filter(struct damos_filter
*f
)
291 static void damos_free_filter(struct damos_filter
*f
)
296 void damos_destroy_filter(struct damos_filter
*f
)
299 damos_free_filter(f
);
302 /* initialize private fields of damos_quota and return the pointer */
303 static struct damos_quota
*damos_quota_init_priv(struct damos_quota
*quota
)
305 quota
->total_charged_sz
= 0;
306 quota
->total_charged_ns
= 0;
308 quota
->charged_sz
= 0;
309 quota
->charged_from
= 0;
310 quota
->charge_target_from
= NULL
;
311 quota
->charge_addr_from
= 0;
315 struct damos
*damon_new_scheme(struct damos_access_pattern
*pattern
,
316 enum damos_action action
,
317 unsigned long apply_interval_us
,
318 struct damos_quota
*quota
,
319 struct damos_watermarks
*wmarks
)
321 struct damos
*scheme
;
323 scheme
= kmalloc(sizeof(*scheme
), GFP_KERNEL
);
326 scheme
->pattern
= *pattern
;
327 scheme
->action
= action
;
328 scheme
->apply_interval_us
= apply_interval_us
;
330 * next_apply_sis will be set when kdamond starts. While kdamond is
331 * running, it will also updated when it is added to the DAMON context,
332 * or damon_attrs are updated.
334 scheme
->next_apply_sis
= 0;
335 INIT_LIST_HEAD(&scheme
->filters
);
336 scheme
->stat
= (struct damos_stat
){};
337 INIT_LIST_HEAD(&scheme
->list
);
339 scheme
->quota
= *(damos_quota_init_priv(quota
));
341 scheme
->wmarks
= *wmarks
;
342 scheme
->wmarks
.activated
= true;
347 static void damos_set_next_apply_sis(struct damos
*s
, struct damon_ctx
*ctx
)
349 unsigned long sample_interval
= ctx
->attrs
.sample_interval
?
350 ctx
->attrs
.sample_interval
: 1;
351 unsigned long apply_interval
= s
->apply_interval_us
?
352 s
->apply_interval_us
: ctx
->attrs
.aggr_interval
;
354 s
->next_apply_sis
= ctx
->passed_sample_intervals
+
355 apply_interval
/ sample_interval
;
358 void damon_add_scheme(struct damon_ctx
*ctx
, struct damos
*s
)
360 list_add_tail(&s
->list
, &ctx
->schemes
);
361 damos_set_next_apply_sis(s
, ctx
);
364 static void damon_del_scheme(struct damos
*s
)
369 static void damon_free_scheme(struct damos
*s
)
374 void damon_destroy_scheme(struct damos
*s
)
376 struct damos_filter
*f
, *next
;
378 damos_for_each_filter_safe(f
, next
, s
)
379 damos_destroy_filter(f
);
381 damon_free_scheme(s
);
385 * Construct a damon_target struct
387 * Returns the pointer to the new struct if success, or NULL otherwise
389 struct damon_target
*damon_new_target(void)
391 struct damon_target
*t
;
393 t
= kmalloc(sizeof(*t
), GFP_KERNEL
);
399 INIT_LIST_HEAD(&t
->regions_list
);
400 INIT_LIST_HEAD(&t
->list
);
405 void damon_add_target(struct damon_ctx
*ctx
, struct damon_target
*t
)
407 list_add_tail(&t
->list
, &ctx
->adaptive_targets
);
410 bool damon_targets_empty(struct damon_ctx
*ctx
)
412 return list_empty(&ctx
->adaptive_targets
);
415 static void damon_del_target(struct damon_target
*t
)
420 void damon_free_target(struct damon_target
*t
)
422 struct damon_region
*r
, *next
;
424 damon_for_each_region_safe(r
, next
, t
)
425 damon_free_region(r
);
429 void damon_destroy_target(struct damon_target
*t
)
432 damon_free_target(t
);
435 unsigned int damon_nr_regions(struct damon_target
*t
)
437 return t
->nr_regions
;
440 struct damon_ctx
*damon_new_ctx(void)
442 struct damon_ctx
*ctx
;
444 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
448 init_completion(&ctx
->kdamond_started
);
450 ctx
->attrs
.sample_interval
= 5 * 1000;
451 ctx
->attrs
.aggr_interval
= 100 * 1000;
452 ctx
->attrs
.ops_update_interval
= 60 * 1000 * 1000;
454 ctx
->passed_sample_intervals
= 0;
455 /* These will be set from kdamond_init_intervals_sis() */
456 ctx
->next_aggregation_sis
= 0;
457 ctx
->next_ops_update_sis
= 0;
459 mutex_init(&ctx
->kdamond_lock
);
461 ctx
->attrs
.min_nr_regions
= 10;
462 ctx
->attrs
.max_nr_regions
= 1000;
464 INIT_LIST_HEAD(&ctx
->adaptive_targets
);
465 INIT_LIST_HEAD(&ctx
->schemes
);
470 static void damon_destroy_targets(struct damon_ctx
*ctx
)
472 struct damon_target
*t
, *next_t
;
474 if (ctx
->ops
.cleanup
) {
475 ctx
->ops
.cleanup(ctx
);
479 damon_for_each_target_safe(t
, next_t
, ctx
)
480 damon_destroy_target(t
);
483 void damon_destroy_ctx(struct damon_ctx
*ctx
)
485 struct damos
*s
, *next_s
;
487 damon_destroy_targets(ctx
);
489 damon_for_each_scheme_safe(s
, next_s
, ctx
)
490 damon_destroy_scheme(s
);
495 static unsigned int damon_age_for_new_attrs(unsigned int age
,
496 struct damon_attrs
*old_attrs
, struct damon_attrs
*new_attrs
)
498 return age
* old_attrs
->aggr_interval
/ new_attrs
->aggr_interval
;
501 /* convert access ratio in bp (per 10,000) to nr_accesses */
502 static unsigned int damon_accesses_bp_to_nr_accesses(
503 unsigned int accesses_bp
, struct damon_attrs
*attrs
)
505 return accesses_bp
* damon_max_nr_accesses(attrs
) / 10000;
508 /* convert nr_accesses to access ratio in bp (per 10,000) */
509 static unsigned int damon_nr_accesses_to_accesses_bp(
510 unsigned int nr_accesses
, struct damon_attrs
*attrs
)
512 return nr_accesses
* 10000 / damon_max_nr_accesses(attrs
);
515 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses
,
516 struct damon_attrs
*old_attrs
, struct damon_attrs
*new_attrs
)
518 return damon_accesses_bp_to_nr_accesses(
519 damon_nr_accesses_to_accesses_bp(
520 nr_accesses
, old_attrs
),
524 static void damon_update_monitoring_result(struct damon_region
*r
,
525 struct damon_attrs
*old_attrs
, struct damon_attrs
*new_attrs
)
527 r
->nr_accesses
= damon_nr_accesses_for_new_attrs(r
->nr_accesses
,
528 old_attrs
, new_attrs
);
529 r
->nr_accesses_bp
= r
->nr_accesses
* 10000;
530 r
->age
= damon_age_for_new_attrs(r
->age
, old_attrs
, new_attrs
);
534 * region->nr_accesses is the number of sampling intervals in the last
535 * aggregation interval that access to the region has found, and region->age is
536 * the number of aggregation intervals that its access pattern has maintained.
537 * For the reason, the real meaning of the two fields depend on current
538 * sampling interval and aggregation interval. This function updates
539 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
541 static void damon_update_monitoring_results(struct damon_ctx
*ctx
,
542 struct damon_attrs
*new_attrs
)
544 struct damon_attrs
*old_attrs
= &ctx
->attrs
;
545 struct damon_target
*t
;
546 struct damon_region
*r
;
548 /* if any interval is zero, simply forgive conversion */
549 if (!old_attrs
->sample_interval
|| !old_attrs
->aggr_interval
||
550 !new_attrs
->sample_interval
||
551 !new_attrs
->aggr_interval
)
554 damon_for_each_target(t
, ctx
)
555 damon_for_each_region(r
, t
)
556 damon_update_monitoring_result(
557 r
, old_attrs
, new_attrs
);
561 * damon_set_attrs() - Set attributes for the monitoring.
562 * @ctx: monitoring context
563 * @attrs: monitoring attributes
565 * This function should be called while the kdamond is not running, or an
566 * access check results aggregation is not ongoing (e.g., from
567 * &struct damon_callback->after_aggregation or
568 * &struct damon_callback->after_wmarks_check callbacks).
570 * Every time interval is in micro-seconds.
572 * Return: 0 on success, negative error code otherwise.
574 int damon_set_attrs(struct damon_ctx
*ctx
, struct damon_attrs
*attrs
)
576 unsigned long sample_interval
= attrs
->sample_interval
?
577 attrs
->sample_interval
: 1;
580 if (attrs
->min_nr_regions
< 3)
582 if (attrs
->min_nr_regions
> attrs
->max_nr_regions
)
584 if (attrs
->sample_interval
> attrs
->aggr_interval
)
587 ctx
->next_aggregation_sis
= ctx
->passed_sample_intervals
+
588 attrs
->aggr_interval
/ sample_interval
;
589 ctx
->next_ops_update_sis
= ctx
->passed_sample_intervals
+
590 attrs
->ops_update_interval
/ sample_interval
;
592 damon_update_monitoring_results(ctx
, attrs
);
595 damon_for_each_scheme(s
, ctx
)
596 damos_set_next_apply_sis(s
, ctx
);
602 * damon_set_schemes() - Set data access monitoring based operation schemes.
603 * @ctx: monitoring context
604 * @schemes: array of the schemes
605 * @nr_schemes: number of entries in @schemes
607 * This function should not be called while the kdamond of the context is
610 void damon_set_schemes(struct damon_ctx
*ctx
, struct damos
**schemes
,
613 struct damos
*s
, *next
;
616 damon_for_each_scheme_safe(s
, next
, ctx
)
617 damon_destroy_scheme(s
);
618 for (i
= 0; i
< nr_schemes
; i
++)
619 damon_add_scheme(ctx
, schemes
[i
]);
623 * damon_nr_running_ctxs() - Return number of currently running contexts.
625 int damon_nr_running_ctxs(void)
629 mutex_lock(&damon_lock
);
630 nr_ctxs
= nr_running_ctxs
;
631 mutex_unlock(&damon_lock
);
636 /* Returns the size upper limit for each monitoring region */
637 static unsigned long damon_region_sz_limit(struct damon_ctx
*ctx
)
639 struct damon_target
*t
;
640 struct damon_region
*r
;
641 unsigned long sz
= 0;
643 damon_for_each_target(t
, ctx
) {
644 damon_for_each_region(r
, t
)
645 sz
+= damon_sz_region(r
);
648 if (ctx
->attrs
.min_nr_regions
)
649 sz
/= ctx
->attrs
.min_nr_regions
;
650 if (sz
< DAMON_MIN_REGION
)
651 sz
= DAMON_MIN_REGION
;
656 static int kdamond_fn(void *data
);
659 * __damon_start() - Starts monitoring with given context.
660 * @ctx: monitoring context
662 * This function should be called while damon_lock is hold.
664 * Return: 0 on success, negative error code otherwise.
666 static int __damon_start(struct damon_ctx
*ctx
)
670 mutex_lock(&ctx
->kdamond_lock
);
673 reinit_completion(&ctx
->kdamond_started
);
674 ctx
->kdamond
= kthread_run(kdamond_fn
, ctx
, "kdamond.%d",
676 if (IS_ERR(ctx
->kdamond
)) {
677 err
= PTR_ERR(ctx
->kdamond
);
680 wait_for_completion(&ctx
->kdamond_started
);
683 mutex_unlock(&ctx
->kdamond_lock
);
689 * damon_start() - Starts the monitorings for a given group of contexts.
690 * @ctxs: an array of the pointers for contexts to start monitoring
691 * @nr_ctxs: size of @ctxs
692 * @exclusive: exclusiveness of this contexts group
694 * This function starts a group of monitoring threads for a group of monitoring
695 * contexts. One thread per each context is created and run in parallel. The
696 * caller should handle synchronization between the threads by itself. If
697 * @exclusive is true and a group of threads that created by other
698 * 'damon_start()' call is currently running, this function does nothing but
701 * Return: 0 on success, negative error code otherwise.
703 int damon_start(struct damon_ctx
**ctxs
, int nr_ctxs
, bool exclusive
)
708 mutex_lock(&damon_lock
);
709 if ((exclusive
&& nr_running_ctxs
) ||
710 (!exclusive
&& running_exclusive_ctxs
)) {
711 mutex_unlock(&damon_lock
);
715 for (i
= 0; i
< nr_ctxs
; i
++) {
716 err
= __damon_start(ctxs
[i
]);
721 if (exclusive
&& nr_running_ctxs
)
722 running_exclusive_ctxs
= true;
723 mutex_unlock(&damon_lock
);
729 * __damon_stop() - Stops monitoring of a given context.
730 * @ctx: monitoring context
732 * Return: 0 on success, negative error code otherwise.
734 static int __damon_stop(struct damon_ctx
*ctx
)
736 struct task_struct
*tsk
;
738 mutex_lock(&ctx
->kdamond_lock
);
741 get_task_struct(tsk
);
742 mutex_unlock(&ctx
->kdamond_lock
);
743 kthread_stop_put(tsk
);
746 mutex_unlock(&ctx
->kdamond_lock
);
752 * damon_stop() - Stops the monitorings for a given group of contexts.
753 * @ctxs: an array of the pointers for contexts to stop monitoring
754 * @nr_ctxs: size of @ctxs
756 * Return: 0 on success, negative error code otherwise.
758 int damon_stop(struct damon_ctx
**ctxs
, int nr_ctxs
)
762 for (i
= 0; i
< nr_ctxs
; i
++) {
763 /* nr_running_ctxs is decremented in kdamond_fn */
764 err
= __damon_stop(ctxs
[i
]);
772 * Reset the aggregated monitoring results ('nr_accesses' of each region).
774 static void kdamond_reset_aggregated(struct damon_ctx
*c
)
776 struct damon_target
*t
;
777 unsigned int ti
= 0; /* target's index */
779 damon_for_each_target(t
, c
) {
780 struct damon_region
*r
;
782 damon_for_each_region(r
, t
) {
783 trace_damon_aggregated(ti
, r
, damon_nr_regions(t
));
784 r
->last_nr_accesses
= r
->nr_accesses
;
791 static void damon_split_region_at(struct damon_target
*t
,
792 struct damon_region
*r
, unsigned long sz_r
);
794 static bool __damos_valid_target(struct damon_region
*r
, struct damos
*s
)
797 unsigned int nr_accesses
= r
->nr_accesses_bp
/ 10000;
799 sz
= damon_sz_region(r
);
800 return s
->pattern
.min_sz_region
<= sz
&&
801 sz
<= s
->pattern
.max_sz_region
&&
802 s
->pattern
.min_nr_accesses
<= nr_accesses
&&
803 nr_accesses
<= s
->pattern
.max_nr_accesses
&&
804 s
->pattern
.min_age_region
<= r
->age
&&
805 r
->age
<= s
->pattern
.max_age_region
;
808 static bool damos_valid_target(struct damon_ctx
*c
, struct damon_target
*t
,
809 struct damon_region
*r
, struct damos
*s
)
811 bool ret
= __damos_valid_target(r
, s
);
813 if (!ret
|| !s
->quota
.esz
|| !c
->ops
.get_scheme_score
)
816 return c
->ops
.get_scheme_score(c
, t
, r
, s
) >= s
->quota
.min_score
;
820 * damos_skip_charged_region() - Check if the given region or starting part of
821 * it is already charged for the DAMOS quota.
822 * @t: The target of the region.
823 * @rp: The pointer to the region.
824 * @s: The scheme to be applied.
826 * If a quota of a scheme has exceeded in a quota charge window, the scheme's
827 * action would applied to only a part of the target access pattern fulfilling
828 * regions. To avoid applying the scheme action to only already applied
829 * regions, DAMON skips applying the scheme action to the regions that charged
830 * in the previous charge window.
832 * This function checks if a given region should be skipped or not for the
833 * reason. If only the starting part of the region has previously charged,
834 * this function splits the region into two so that the second one covers the
835 * area that not charged in the previous charge widnow and saves the second
836 * region in *rp and returns false, so that the caller can apply DAMON action
839 * Return: true if the region should be entirely skipped, false otherwise.
841 static bool damos_skip_charged_region(struct damon_target
*t
,
842 struct damon_region
**rp
, struct damos
*s
)
844 struct damon_region
*r
= *rp
;
845 struct damos_quota
*quota
= &s
->quota
;
846 unsigned long sz_to_skip
;
848 /* Skip previously charged regions */
849 if (quota
->charge_target_from
) {
850 if (t
!= quota
->charge_target_from
)
852 if (r
== damon_last_region(t
)) {
853 quota
->charge_target_from
= NULL
;
854 quota
->charge_addr_from
= 0;
857 if (quota
->charge_addr_from
&&
858 r
->ar
.end
<= quota
->charge_addr_from
)
861 if (quota
->charge_addr_from
&& r
->ar
.start
<
862 quota
->charge_addr_from
) {
863 sz_to_skip
= ALIGN_DOWN(quota
->charge_addr_from
-
864 r
->ar
.start
, DAMON_MIN_REGION
);
866 if (damon_sz_region(r
) <= DAMON_MIN_REGION
)
868 sz_to_skip
= DAMON_MIN_REGION
;
870 damon_split_region_at(t
, r
, sz_to_skip
);
871 r
= damon_next_region(r
);
874 quota
->charge_target_from
= NULL
;
875 quota
->charge_addr_from
= 0;
880 static void damos_update_stat(struct damos
*s
,
881 unsigned long sz_tried
, unsigned long sz_applied
)
884 s
->stat
.sz_tried
+= sz_tried
;
886 s
->stat
.nr_applied
++;
887 s
->stat
.sz_applied
+= sz_applied
;
890 static bool __damos_filter_out(struct damon_ctx
*ctx
, struct damon_target
*t
,
891 struct damon_region
*r
, struct damos_filter
*filter
)
893 bool matched
= false;
894 struct damon_target
*ti
;
896 unsigned long start
, end
;
898 switch (filter
->type
) {
899 case DAMOS_FILTER_TYPE_TARGET
:
900 damon_for_each_target(ti
, ctx
) {
905 matched
= target_idx
== filter
->target_idx
;
907 case DAMOS_FILTER_TYPE_ADDR
:
908 start
= ALIGN_DOWN(filter
->addr_range
.start
, DAMON_MIN_REGION
);
909 end
= ALIGN_DOWN(filter
->addr_range
.end
, DAMON_MIN_REGION
);
911 /* inside the range */
912 if (start
<= r
->ar
.start
&& r
->ar
.end
<= end
) {
916 /* outside of the range */
917 if (r
->ar
.end
<= start
|| end
<= r
->ar
.start
) {
921 /* start before the range and overlap */
922 if (r
->ar
.start
< start
) {
923 damon_split_region_at(t
, r
, start
- r
->ar
.start
);
927 /* start inside the range */
928 damon_split_region_at(t
, r
, end
- r
->ar
.start
);
935 return matched
== filter
->matching
;
938 static bool damos_filter_out(struct damon_ctx
*ctx
, struct damon_target
*t
,
939 struct damon_region
*r
, struct damos
*s
)
941 struct damos_filter
*filter
;
943 damos_for_each_filter(filter
, s
) {
944 if (__damos_filter_out(ctx
, t
, r
, filter
))
950 static void damos_apply_scheme(struct damon_ctx
*c
, struct damon_target
*t
,
951 struct damon_region
*r
, struct damos
*s
)
953 struct damos_quota
*quota
= &s
->quota
;
954 unsigned long sz
= damon_sz_region(r
);
955 struct timespec64 begin
, end
;
956 unsigned long sz_applied
= 0;
959 * We plan to support multiple context per kdamond, as DAMON sysfs
960 * implies with 'nr_contexts' file. Nevertheless, only single context
961 * per kdamond is supported for now. So, we can simply use '0' context
964 unsigned int cidx
= 0;
965 struct damos
*siter
; /* schemes iterator */
966 unsigned int sidx
= 0;
967 struct damon_target
*titer
; /* targets iterator */
968 unsigned int tidx
= 0;
969 bool do_trace
= false;
971 /* get indices for trace_damos_before_apply() */
972 if (trace_damos_before_apply_enabled()) {
973 damon_for_each_scheme(siter
, c
) {
978 damon_for_each_target(titer
, c
) {
986 if (c
->ops
.apply_scheme
) {
987 if (quota
->esz
&& quota
->charged_sz
+ sz
> quota
->esz
) {
988 sz
= ALIGN_DOWN(quota
->esz
- quota
->charged_sz
,
992 damon_split_region_at(t
, r
, sz
);
994 if (damos_filter_out(c
, t
, r
, s
))
996 ktime_get_coarse_ts64(&begin
);
997 if (c
->callback
.before_damos_apply
)
998 err
= c
->callback
.before_damos_apply(c
, t
, r
, s
);
1000 trace_damos_before_apply(cidx
, sidx
, tidx
, r
,
1001 damon_nr_regions(t
), do_trace
);
1002 sz_applied
= c
->ops
.apply_scheme(c
, t
, r
, s
);
1004 ktime_get_coarse_ts64(&end
);
1005 quota
->total_charged_ns
+= timespec64_to_ns(&end
) -
1006 timespec64_to_ns(&begin
);
1007 quota
->charged_sz
+= sz
;
1008 if (quota
->esz
&& quota
->charged_sz
>= quota
->esz
) {
1009 quota
->charge_target_from
= t
;
1010 quota
->charge_addr_from
= r
->ar
.end
+ 1;
1013 if (s
->action
!= DAMOS_STAT
)
1017 damos_update_stat(s
, sz
, sz_applied
);
1020 static void damon_do_apply_schemes(struct damon_ctx
*c
,
1021 struct damon_target
*t
,
1022 struct damon_region
*r
)
1026 damon_for_each_scheme(s
, c
) {
1027 struct damos_quota
*quota
= &s
->quota
;
1029 if (!s
->wmarks
.activated
)
1032 /* Check the quota */
1033 if (quota
->esz
&& quota
->charged_sz
>= quota
->esz
)
1036 if (damos_skip_charged_region(t
, &r
, s
))
1039 if (!damos_valid_target(c
, t
, r
, s
))
1042 damos_apply_scheme(c
, t
, r
, s
);
1046 /* Shouldn't be called if quota->ms and quota->sz are zero */
1047 static void damos_set_effective_quota(struct damos_quota
*quota
)
1049 unsigned long throughput
;
1053 quota
->esz
= quota
->sz
;
1057 if (quota
->total_charged_ns
)
1058 throughput
= quota
->total_charged_sz
* 1000000 /
1059 quota
->total_charged_ns
;
1061 throughput
= PAGE_SIZE
* 1024;
1062 esz
= throughput
* quota
->ms
;
1064 if (quota
->sz
&& quota
->sz
< esz
)
1069 static void damos_adjust_quota(struct damon_ctx
*c
, struct damos
*s
)
1071 struct damos_quota
*quota
= &s
->quota
;
1072 struct damon_target
*t
;
1073 struct damon_region
*r
;
1074 unsigned long cumulated_sz
;
1075 unsigned int score
, max_score
= 0;
1077 if (!quota
->ms
&& !quota
->sz
)
1080 /* New charge window starts */
1081 if (time_after_eq(jiffies
, quota
->charged_from
+
1082 msecs_to_jiffies(quota
->reset_interval
))) {
1083 if (quota
->esz
&& quota
->charged_sz
>= quota
->esz
)
1084 s
->stat
.qt_exceeds
++;
1085 quota
->total_charged_sz
+= quota
->charged_sz
;
1086 quota
->charged_from
= jiffies
;
1087 quota
->charged_sz
= 0;
1088 damos_set_effective_quota(quota
);
1091 if (!c
->ops
.get_scheme_score
)
1094 /* Fill up the score histogram */
1095 memset(quota
->histogram
, 0, sizeof(quota
->histogram
));
1096 damon_for_each_target(t
, c
) {
1097 damon_for_each_region(r
, t
) {
1098 if (!__damos_valid_target(r
, s
))
1100 score
= c
->ops
.get_scheme_score(c
, t
, r
, s
);
1101 quota
->histogram
[score
] += damon_sz_region(r
);
1102 if (score
> max_score
)
1107 /* Set the min score limit */
1108 for (cumulated_sz
= 0, score
= max_score
; ; score
--) {
1109 cumulated_sz
+= quota
->histogram
[score
];
1110 if (cumulated_sz
>= quota
->esz
|| !score
)
1113 quota
->min_score
= score
;
1116 static void kdamond_apply_schemes(struct damon_ctx
*c
)
1118 struct damon_target
*t
;
1119 struct damon_region
*r
, *next_r
;
1121 unsigned long sample_interval
= c
->attrs
.sample_interval
?
1122 c
->attrs
.sample_interval
: 1;
1123 bool has_schemes_to_apply
= false;
1125 damon_for_each_scheme(s
, c
) {
1126 if (c
->passed_sample_intervals
!= s
->next_apply_sis
)
1129 s
->next_apply_sis
+=
1130 (s
->apply_interval_us
? s
->apply_interval_us
:
1131 c
->attrs
.aggr_interval
) / sample_interval
;
1133 if (!s
->wmarks
.activated
)
1136 has_schemes_to_apply
= true;
1138 damos_adjust_quota(c
, s
);
1141 if (!has_schemes_to_apply
)
1144 damon_for_each_target(t
, c
) {
1145 damon_for_each_region_safe(r
, next_r
, t
)
1146 damon_do_apply_schemes(c
, t
, r
);
1151 * Merge two adjacent regions into one region
1153 static void damon_merge_two_regions(struct damon_target
*t
,
1154 struct damon_region
*l
, struct damon_region
*r
)
1156 unsigned long sz_l
= damon_sz_region(l
), sz_r
= damon_sz_region(r
);
1158 l
->nr_accesses
= (l
->nr_accesses
* sz_l
+ r
->nr_accesses
* sz_r
) /
1160 l
->nr_accesses_bp
= l
->nr_accesses
* 10000;
1161 l
->age
= (l
->age
* sz_l
+ r
->age
* sz_r
) / (sz_l
+ sz_r
);
1162 l
->ar
.end
= r
->ar
.end
;
1163 damon_destroy_region(r
, t
);
1167 * Merge adjacent regions having similar access frequencies
1169 * t target affected by this merge operation
1170 * thres '->nr_accesses' diff threshold for the merge
1171 * sz_limit size upper limit of each region
1173 static void damon_merge_regions_of(struct damon_target
*t
, unsigned int thres
,
1174 unsigned long sz_limit
)
1176 struct damon_region
*r
, *prev
= NULL
, *next
;
1178 damon_for_each_region_safe(r
, next
, t
) {
1179 if (abs(r
->nr_accesses
- r
->last_nr_accesses
) > thres
)
1184 if (prev
&& prev
->ar
.end
== r
->ar
.start
&&
1185 abs(prev
->nr_accesses
- r
->nr_accesses
) <= thres
&&
1186 damon_sz_region(prev
) + damon_sz_region(r
) <= sz_limit
)
1187 damon_merge_two_regions(t
, prev
, r
);
1194 * Merge adjacent regions having similar access frequencies
1196 * threshold '->nr_accesses' diff threshold for the merge
1197 * sz_limit size upper limit of each region
1199 * This function merges monitoring target regions which are adjacent and their
1200 * access frequencies are similar. This is for minimizing the monitoring
1201 * overhead under the dynamically changeable access pattern. If a merge was
1202 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
1204 static void kdamond_merge_regions(struct damon_ctx
*c
, unsigned int threshold
,
1205 unsigned long sz_limit
)
1207 struct damon_target
*t
;
1209 damon_for_each_target(t
, c
)
1210 damon_merge_regions_of(t
, threshold
, sz_limit
);
1214 * Split a region in two
1216 * r the region to be split
1217 * sz_r size of the first sub-region that will be made
1219 static void damon_split_region_at(struct damon_target
*t
,
1220 struct damon_region
*r
, unsigned long sz_r
)
1222 struct damon_region
*new;
1224 new = damon_new_region(r
->ar
.start
+ sz_r
, r
->ar
.end
);
1228 r
->ar
.end
= new->ar
.start
;
1231 new->last_nr_accesses
= r
->last_nr_accesses
;
1232 new->nr_accesses_bp
= r
->nr_accesses_bp
;
1233 new->nr_accesses
= r
->nr_accesses
;
1235 damon_insert_region(new, r
, damon_next_region(r
), t
);
1238 /* Split every region in the given target into 'nr_subs' regions */
1239 static void damon_split_regions_of(struct damon_target
*t
, int nr_subs
)
1241 struct damon_region
*r
, *next
;
1242 unsigned long sz_region
, sz_sub
= 0;
1245 damon_for_each_region_safe(r
, next
, t
) {
1246 sz_region
= damon_sz_region(r
);
1248 for (i
= 0; i
< nr_subs
- 1 &&
1249 sz_region
> 2 * DAMON_MIN_REGION
; i
++) {
1251 * Randomly select size of left sub-region to be at
1252 * least 10 percent and at most 90% of original region
1254 sz_sub
= ALIGN_DOWN(damon_rand(1, 10) *
1255 sz_region
/ 10, DAMON_MIN_REGION
);
1256 /* Do not allow blank region */
1257 if (sz_sub
== 0 || sz_sub
>= sz_region
)
1260 damon_split_region_at(t
, r
, sz_sub
);
1267 * Split every target region into randomly-sized small regions
1269 * This function splits every target region into random-sized small regions if
1270 * current total number of the regions is equal or smaller than half of the
1271 * user-specified maximum number of regions. This is for maximizing the
1272 * monitoring accuracy under the dynamically changeable access patterns. If a
1273 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
1276 static void kdamond_split_regions(struct damon_ctx
*ctx
)
1278 struct damon_target
*t
;
1279 unsigned int nr_regions
= 0;
1280 static unsigned int last_nr_regions
;
1281 int nr_subregions
= 2;
1283 damon_for_each_target(t
, ctx
)
1284 nr_regions
+= damon_nr_regions(t
);
1286 if (nr_regions
> ctx
->attrs
.max_nr_regions
/ 2)
1289 /* Maybe the middle of the region has different access frequency */
1290 if (last_nr_regions
== nr_regions
&&
1291 nr_regions
< ctx
->attrs
.max_nr_regions
/ 3)
1294 damon_for_each_target(t
, ctx
)
1295 damon_split_regions_of(t
, nr_subregions
);
1297 last_nr_regions
= nr_regions
;
1301 * Check whether current monitoring should be stopped
1303 * The monitoring is stopped when either the user requested to stop, or all
1304 * monitoring targets are invalid.
1306 * Returns true if need to stop current monitoring.
1308 static bool kdamond_need_stop(struct damon_ctx
*ctx
)
1310 struct damon_target
*t
;
1312 if (kthread_should_stop())
1315 if (!ctx
->ops
.target_valid
)
1318 damon_for_each_target(t
, ctx
) {
1319 if (ctx
->ops
.target_valid(t
))
1326 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric
)
1329 case DAMOS_WMARK_FREE_MEM_RATE
:
1330 return global_zone_page_state(NR_FREE_PAGES
) * 1000 /
1339 * Returns zero if the scheme is active. Else, returns time to wait for next
1340 * watermark check in micro-seconds.
1342 static unsigned long damos_wmark_wait_us(struct damos
*scheme
)
1344 unsigned long metric
;
1346 if (scheme
->wmarks
.metric
== DAMOS_WMARK_NONE
)
1349 metric
= damos_wmark_metric_value(scheme
->wmarks
.metric
);
1350 /* higher than high watermark or lower than low watermark */
1351 if (metric
> scheme
->wmarks
.high
|| scheme
->wmarks
.low
> metric
) {
1352 if (scheme
->wmarks
.activated
)
1353 pr_debug("deactivate a scheme (%d) for %s wmark\n",
1355 metric
> scheme
->wmarks
.high
?
1357 scheme
->wmarks
.activated
= false;
1358 return scheme
->wmarks
.interval
;
1361 /* inactive and higher than middle watermark */
1362 if ((scheme
->wmarks
.high
>= metric
&& metric
>= scheme
->wmarks
.mid
) &&
1363 !scheme
->wmarks
.activated
)
1364 return scheme
->wmarks
.interval
;
1366 if (!scheme
->wmarks
.activated
)
1367 pr_debug("activate a scheme (%d)\n", scheme
->action
);
1368 scheme
->wmarks
.activated
= true;
1372 static void kdamond_usleep(unsigned long usecs
)
1374 /* See Documentation/timers/timers-howto.rst for the thresholds */
1375 if (usecs
> 20 * USEC_PER_MSEC
)
1376 schedule_timeout_idle(usecs_to_jiffies(usecs
));
1378 usleep_idle_range(usecs
, usecs
+ 1);
1381 /* Returns negative error code if it's not activated but should return */
1382 static int kdamond_wait_activation(struct damon_ctx
*ctx
)
1385 unsigned long wait_time
;
1386 unsigned long min_wait_time
= 0;
1387 bool init_wait_time
= false;
1389 while (!kdamond_need_stop(ctx
)) {
1390 damon_for_each_scheme(s
, ctx
) {
1391 wait_time
= damos_wmark_wait_us(s
);
1392 if (!init_wait_time
|| wait_time
< min_wait_time
) {
1393 init_wait_time
= true;
1394 min_wait_time
= wait_time
;
1400 kdamond_usleep(min_wait_time
);
1402 if (ctx
->callback
.after_wmarks_check
&&
1403 ctx
->callback
.after_wmarks_check(ctx
))
1409 static void kdamond_init_intervals_sis(struct damon_ctx
*ctx
)
1411 unsigned long sample_interval
= ctx
->attrs
.sample_interval
?
1412 ctx
->attrs
.sample_interval
: 1;
1413 unsigned long apply_interval
;
1414 struct damos
*scheme
;
1416 ctx
->passed_sample_intervals
= 0;
1417 ctx
->next_aggregation_sis
= ctx
->attrs
.aggr_interval
/ sample_interval
;
1418 ctx
->next_ops_update_sis
= ctx
->attrs
.ops_update_interval
/
1421 damon_for_each_scheme(scheme
, ctx
) {
1422 apply_interval
= scheme
->apply_interval_us
?
1423 scheme
->apply_interval_us
: ctx
->attrs
.aggr_interval
;
1424 scheme
->next_apply_sis
= apply_interval
/ sample_interval
;
1429 * The monitoring daemon that runs as a kernel thread
1431 static int kdamond_fn(void *data
)
1433 struct damon_ctx
*ctx
= data
;
1434 struct damon_target
*t
;
1435 struct damon_region
*r
, *next
;
1436 unsigned int max_nr_accesses
= 0;
1437 unsigned long sz_limit
= 0;
1439 pr_debug("kdamond (%d) starts\n", current
->pid
);
1441 complete(&ctx
->kdamond_started
);
1442 kdamond_init_intervals_sis(ctx
);
1446 if (ctx
->callback
.before_start
&& ctx
->callback
.before_start(ctx
))
1449 sz_limit
= damon_region_sz_limit(ctx
);
1451 while (!kdamond_need_stop(ctx
)) {
1453 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
1454 * be changed from after_wmarks_check() or after_aggregation()
1455 * callbacks. Read the values here, and use those for this
1456 * iteration. That is, damon_set_attrs() updated new values
1457 * are respected from next iteration.
1459 unsigned long next_aggregation_sis
= ctx
->next_aggregation_sis
;
1460 unsigned long next_ops_update_sis
= ctx
->next_ops_update_sis
;
1461 unsigned long sample_interval
= ctx
->attrs
.sample_interval
;
1463 if (kdamond_wait_activation(ctx
))
1466 if (ctx
->ops
.prepare_access_checks
)
1467 ctx
->ops
.prepare_access_checks(ctx
);
1468 if (ctx
->callback
.after_sampling
&&
1469 ctx
->callback
.after_sampling(ctx
))
1472 kdamond_usleep(sample_interval
);
1473 ctx
->passed_sample_intervals
++;
1475 if (ctx
->ops
.check_accesses
)
1476 max_nr_accesses
= ctx
->ops
.check_accesses(ctx
);
1478 if (ctx
->passed_sample_intervals
== next_aggregation_sis
) {
1479 kdamond_merge_regions(ctx
,
1480 max_nr_accesses
/ 10,
1482 if (ctx
->callback
.after_aggregation
&&
1483 ctx
->callback
.after_aggregation(ctx
))
1488 * do kdamond_apply_schemes() after kdamond_merge_regions() if
1489 * possible, to reduce overhead
1491 if (!list_empty(&ctx
->schemes
))
1492 kdamond_apply_schemes(ctx
);
1494 sample_interval
= ctx
->attrs
.sample_interval
?
1495 ctx
->attrs
.sample_interval
: 1;
1496 if (ctx
->passed_sample_intervals
== next_aggregation_sis
) {
1497 ctx
->next_aggregation_sis
= next_aggregation_sis
+
1498 ctx
->attrs
.aggr_interval
/ sample_interval
;
1500 kdamond_reset_aggregated(ctx
);
1501 kdamond_split_regions(ctx
);
1502 if (ctx
->ops
.reset_aggregated
)
1503 ctx
->ops
.reset_aggregated(ctx
);
1506 if (ctx
->passed_sample_intervals
== next_ops_update_sis
) {
1507 ctx
->next_ops_update_sis
= next_ops_update_sis
+
1508 ctx
->attrs
.ops_update_interval
/
1510 if (ctx
->ops
.update
)
1511 ctx
->ops
.update(ctx
);
1512 sz_limit
= damon_region_sz_limit(ctx
);
1516 damon_for_each_target(t
, ctx
) {
1517 damon_for_each_region_safe(r
, next
, t
)
1518 damon_destroy_region(r
, t
);
1521 if (ctx
->callback
.before_terminate
)
1522 ctx
->callback
.before_terminate(ctx
);
1523 if (ctx
->ops
.cleanup
)
1524 ctx
->ops
.cleanup(ctx
);
1526 pr_debug("kdamond (%d) finishes\n", current
->pid
);
1527 mutex_lock(&ctx
->kdamond_lock
);
1528 ctx
->kdamond
= NULL
;
1529 mutex_unlock(&ctx
->kdamond_lock
);
1531 mutex_lock(&damon_lock
);
1533 if (!nr_running_ctxs
&& running_exclusive_ctxs
)
1534 running_exclusive_ctxs
= false;
1535 mutex_unlock(&damon_lock
);
1541 * struct damon_system_ram_region - System RAM resource address region of
1543 * @start: Start address of the region (inclusive).
1544 * @end: End address of the region (exclusive).
1546 struct damon_system_ram_region
{
1547 unsigned long start
;
1551 static int walk_system_ram(struct resource
*res
, void *arg
)
1553 struct damon_system_ram_region
*a
= arg
;
1555 if (a
->end
- a
->start
< resource_size(res
)) {
1556 a
->start
= res
->start
;
1563 * Find biggest 'System RAM' resource and store its start and end address in
1564 * @start and @end, respectively. If no System RAM is found, returns false.
1566 static bool damon_find_biggest_system_ram(unsigned long *start
,
1570 struct damon_system_ram_region arg
= {};
1572 walk_system_ram_res(0, ULONG_MAX
, &arg
, walk_system_ram
);
1573 if (arg
.end
<= arg
.start
)
1582 * damon_set_region_biggest_system_ram_default() - Set the region of the given
1583 * monitoring target as requested, or biggest 'System RAM'.
1584 * @t: The monitoring target to set the region.
1585 * @start: The pointer to the start address of the region.
1586 * @end: The pointer to the end address of the region.
1588 * This function sets the region of @t as requested by @start and @end. If the
1589 * values of @start and @end are zero, however, this function finds the biggest
1590 * 'System RAM' resource and sets the region to cover the resource. In the
1591 * latter case, this function saves the start and end addresses of the resource
1592 * in @start and @end, respectively.
1594 * Return: 0 on success, negative error code otherwise.
1596 int damon_set_region_biggest_system_ram_default(struct damon_target
*t
,
1597 unsigned long *start
, unsigned long *end
)
1599 struct damon_addr_range addr_range
;
1604 if (!*start
&& !*end
&&
1605 !damon_find_biggest_system_ram(start
, end
))
1608 addr_range
.start
= *start
;
1609 addr_range
.end
= *end
;
1610 return damon_set_regions(t
, &addr_range
, 1);
1614 * damon_moving_sum() - Calculate an inferred moving sum value.
1615 * @mvsum: Inferred sum of the last @len_window values.
1616 * @nomvsum: Non-moving sum of the last discrete @len_window window values.
1617 * @len_window: The number of last values to take care of.
1618 * @new_value: New value that will be added to the pseudo moving sum.
1620 * Moving sum (moving average * window size) is good for handling noise, but
1621 * the cost of keeping past values can be high for arbitrary window size. This
1622 * function implements a lightweight pseudo moving sum function that doesn't
1623 * keep the past window values.
1625 * It simply assumes there was no noise in the past, and get the no-noise
1626 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a
1627 * non-moving sum of the last window. For example, if @len_window is 10 and we
1628 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
1629 * values. Hence, this function simply drops @nomvsum / @len_window from
1630 * given @mvsum and add @new_value.
1632 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
1633 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For
1634 * calculating next moving sum with a new value, we should drop 0 from 50 and
1635 * add the new value. However, this function assumes it got value 5 for each
1636 * of the last ten times. Based on the assumption, when the next value is
1637 * measured, it drops the assumed past value, 5 from the current sum, and add
1638 * the new value to get the updated pseduo-moving average.
1640 * This means the value could have errors, but the errors will be disappeared
1641 * for every @len_window aligned calls. For example, if @len_window is 10, the
1642 * pseudo moving sum with 11th value to 19th value would have an error. But
1643 * the sum with 20th value will not have the error.
1645 * Return: Pseudo-moving average after getting the @new_value.
1647 static unsigned int damon_moving_sum(unsigned int mvsum
, unsigned int nomvsum
,
1648 unsigned int len_window
, unsigned int new_value
)
1650 return mvsum
- nomvsum
/ len_window
+ new_value
;
1654 * damon_update_region_access_rate() - Update the access rate of a region.
1655 * @r: The DAMON region to update for its access check result.
1656 * @accessed: Whether the region has accessed during last sampling interval.
1657 * @attrs: The damon_attrs of the DAMON context.
1659 * Update the access rate of a region with the region's last sampling interval
1660 * access check result.
1662 * Usually this will be called by &damon_operations->check_accesses callback.
1664 void damon_update_region_access_rate(struct damon_region
*r
, bool accessed
,
1665 struct damon_attrs
*attrs
)
1667 unsigned int len_window
= 1;
1670 * sample_interval can be zero, but cannot be larger than
1671 * aggr_interval, owing to validation of damon_set_attrs().
1673 if (attrs
->sample_interval
)
1674 len_window
= damon_max_nr_accesses(attrs
);
1675 r
->nr_accesses_bp
= damon_moving_sum(r
->nr_accesses_bp
,
1676 r
->last_nr_accesses
* 10000, len_window
,
1677 accessed
? 10000 : 0);
1683 static int __init
damon_init(void)
1685 damon_region_cache
= KMEM_CACHE(damon_region
, 0);
1686 if (unlikely(!damon_region_cache
)) {
1687 pr_err("creating damon_region_cache fails\n");
1694 subsys_initcall(damon_init
);
1696 #include "core-test.h"