1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON sysfs Interface
5 * Copyright (c) 2022 SeongJae Park <sj@kernel.org>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
12 #include "sysfs-common.h"
15 * init region directory
18 struct damon_sysfs_region
{
20 struct damon_addr_range ar
;
23 static struct damon_sysfs_region
*damon_sysfs_region_alloc(void)
25 return kzalloc(sizeof(struct damon_sysfs_region
), GFP_KERNEL
);
28 static ssize_t
start_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
31 struct damon_sysfs_region
*region
= container_of(kobj
,
32 struct damon_sysfs_region
, kobj
);
34 return sysfs_emit(buf
, "%lu\n", region
->ar
.start
);
37 static ssize_t
start_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
38 const char *buf
, size_t count
)
40 struct damon_sysfs_region
*region
= container_of(kobj
,
41 struct damon_sysfs_region
, kobj
);
42 int err
= kstrtoul(buf
, 0, ®ion
->ar
.start
);
44 return err
? err
: count
;
47 static ssize_t
end_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
50 struct damon_sysfs_region
*region
= container_of(kobj
,
51 struct damon_sysfs_region
, kobj
);
53 return sysfs_emit(buf
, "%lu\n", region
->ar
.end
);
56 static ssize_t
end_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
57 const char *buf
, size_t count
)
59 struct damon_sysfs_region
*region
= container_of(kobj
,
60 struct damon_sysfs_region
, kobj
);
61 int err
= kstrtoul(buf
, 0, ®ion
->ar
.end
);
63 return err
? err
: count
;
66 static void damon_sysfs_region_release(struct kobject
*kobj
)
68 kfree(container_of(kobj
, struct damon_sysfs_region
, kobj
));
71 static struct kobj_attribute damon_sysfs_region_start_attr
=
72 __ATTR_RW_MODE(start
, 0600);
74 static struct kobj_attribute damon_sysfs_region_end_attr
=
75 __ATTR_RW_MODE(end
, 0600);
77 static struct attribute
*damon_sysfs_region_attrs
[] = {
78 &damon_sysfs_region_start_attr
.attr
,
79 &damon_sysfs_region_end_attr
.attr
,
82 ATTRIBUTE_GROUPS(damon_sysfs_region
);
84 static const struct kobj_type damon_sysfs_region_ktype
= {
85 .release
= damon_sysfs_region_release
,
86 .sysfs_ops
= &kobj_sysfs_ops
,
87 .default_groups
= damon_sysfs_region_groups
,
91 * init_regions directory
94 struct damon_sysfs_regions
{
96 struct damon_sysfs_region
**regions_arr
;
100 static struct damon_sysfs_regions
*damon_sysfs_regions_alloc(void)
102 return kzalloc(sizeof(struct damon_sysfs_regions
), GFP_KERNEL
);
105 static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions
*regions
)
107 struct damon_sysfs_region
**regions_arr
= regions
->regions_arr
;
110 for (i
= 0; i
< regions
->nr
; i
++)
111 kobject_put(®ions_arr
[i
]->kobj
);
114 regions
->regions_arr
= NULL
;
117 static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions
*regions
,
120 struct damon_sysfs_region
**regions_arr
, *region
;
123 damon_sysfs_regions_rm_dirs(regions
);
127 regions_arr
= kmalloc_array(nr_regions
, sizeof(*regions_arr
),
128 GFP_KERNEL
| __GFP_NOWARN
);
131 regions
->regions_arr
= regions_arr
;
133 for (i
= 0; i
< nr_regions
; i
++) {
134 region
= damon_sysfs_region_alloc();
136 damon_sysfs_regions_rm_dirs(regions
);
140 err
= kobject_init_and_add(®ion
->kobj
,
141 &damon_sysfs_region_ktype
, ®ions
->kobj
,
144 kobject_put(®ion
->kobj
);
145 damon_sysfs_regions_rm_dirs(regions
);
149 regions_arr
[i
] = region
;
155 static ssize_t
nr_regions_show(struct kobject
*kobj
,
156 struct kobj_attribute
*attr
, char *buf
)
158 struct damon_sysfs_regions
*regions
= container_of(kobj
,
159 struct damon_sysfs_regions
, kobj
);
161 return sysfs_emit(buf
, "%d\n", regions
->nr
);
164 static ssize_t
nr_regions_store(struct kobject
*kobj
,
165 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
167 struct damon_sysfs_regions
*regions
;
168 int nr
, err
= kstrtoint(buf
, 0, &nr
);
175 regions
= container_of(kobj
, struct damon_sysfs_regions
, kobj
);
177 if (!mutex_trylock(&damon_sysfs_lock
))
179 err
= damon_sysfs_regions_add_dirs(regions
, nr
);
180 mutex_unlock(&damon_sysfs_lock
);
187 static void damon_sysfs_regions_release(struct kobject
*kobj
)
189 kfree(container_of(kobj
, struct damon_sysfs_regions
, kobj
));
192 static struct kobj_attribute damon_sysfs_regions_nr_attr
=
193 __ATTR_RW_MODE(nr_regions
, 0600);
195 static struct attribute
*damon_sysfs_regions_attrs
[] = {
196 &damon_sysfs_regions_nr_attr
.attr
,
199 ATTRIBUTE_GROUPS(damon_sysfs_regions
);
201 static const struct kobj_type damon_sysfs_regions_ktype
= {
202 .release
= damon_sysfs_regions_release
,
203 .sysfs_ops
= &kobj_sysfs_ops
,
204 .default_groups
= damon_sysfs_regions_groups
,
211 struct damon_sysfs_target
{
213 struct damon_sysfs_regions
*regions
;
217 static struct damon_sysfs_target
*damon_sysfs_target_alloc(void)
219 return kzalloc(sizeof(struct damon_sysfs_target
), GFP_KERNEL
);
222 static int damon_sysfs_target_add_dirs(struct damon_sysfs_target
*target
)
224 struct damon_sysfs_regions
*regions
= damon_sysfs_regions_alloc();
230 err
= kobject_init_and_add(®ions
->kobj
, &damon_sysfs_regions_ktype
,
231 &target
->kobj
, "regions");
233 kobject_put(®ions
->kobj
);
235 target
->regions
= regions
;
239 static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target
*target
)
241 damon_sysfs_regions_rm_dirs(target
->regions
);
242 kobject_put(&target
->regions
->kobj
);
245 static ssize_t
pid_target_show(struct kobject
*kobj
,
246 struct kobj_attribute
*attr
, char *buf
)
248 struct damon_sysfs_target
*target
= container_of(kobj
,
249 struct damon_sysfs_target
, kobj
);
251 return sysfs_emit(buf
, "%d\n", target
->pid
);
254 static ssize_t
pid_target_store(struct kobject
*kobj
,
255 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
257 struct damon_sysfs_target
*target
= container_of(kobj
,
258 struct damon_sysfs_target
, kobj
);
259 int err
= kstrtoint(buf
, 0, &target
->pid
);
266 static void damon_sysfs_target_release(struct kobject
*kobj
)
268 kfree(container_of(kobj
, struct damon_sysfs_target
, kobj
));
271 static struct kobj_attribute damon_sysfs_target_pid_attr
=
272 __ATTR_RW_MODE(pid_target
, 0600);
274 static struct attribute
*damon_sysfs_target_attrs
[] = {
275 &damon_sysfs_target_pid_attr
.attr
,
278 ATTRIBUTE_GROUPS(damon_sysfs_target
);
280 static const struct kobj_type damon_sysfs_target_ktype
= {
281 .release
= damon_sysfs_target_release
,
282 .sysfs_ops
= &kobj_sysfs_ops
,
283 .default_groups
= damon_sysfs_target_groups
,
290 struct damon_sysfs_targets
{
292 struct damon_sysfs_target
**targets_arr
;
296 static struct damon_sysfs_targets
*damon_sysfs_targets_alloc(void)
298 return kzalloc(sizeof(struct damon_sysfs_targets
), GFP_KERNEL
);
301 static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets
*targets
)
303 struct damon_sysfs_target
**targets_arr
= targets
->targets_arr
;
306 for (i
= 0; i
< targets
->nr
; i
++) {
307 damon_sysfs_target_rm_dirs(targets_arr
[i
]);
308 kobject_put(&targets_arr
[i
]->kobj
);
312 targets
->targets_arr
= NULL
;
315 static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets
*targets
,
318 struct damon_sysfs_target
**targets_arr
, *target
;
321 damon_sysfs_targets_rm_dirs(targets
);
325 targets_arr
= kmalloc_array(nr_targets
, sizeof(*targets_arr
),
326 GFP_KERNEL
| __GFP_NOWARN
);
329 targets
->targets_arr
= targets_arr
;
331 for (i
= 0; i
< nr_targets
; i
++) {
332 target
= damon_sysfs_target_alloc();
334 damon_sysfs_targets_rm_dirs(targets
);
338 err
= kobject_init_and_add(&target
->kobj
,
339 &damon_sysfs_target_ktype
, &targets
->kobj
,
344 err
= damon_sysfs_target_add_dirs(target
);
348 targets_arr
[i
] = target
;
354 damon_sysfs_targets_rm_dirs(targets
);
355 kobject_put(&target
->kobj
);
359 static ssize_t
nr_targets_show(struct kobject
*kobj
,
360 struct kobj_attribute
*attr
, char *buf
)
362 struct damon_sysfs_targets
*targets
= container_of(kobj
,
363 struct damon_sysfs_targets
, kobj
);
365 return sysfs_emit(buf
, "%d\n", targets
->nr
);
368 static ssize_t
nr_targets_store(struct kobject
*kobj
,
369 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
371 struct damon_sysfs_targets
*targets
;
372 int nr
, err
= kstrtoint(buf
, 0, &nr
);
379 targets
= container_of(kobj
, struct damon_sysfs_targets
, kobj
);
381 if (!mutex_trylock(&damon_sysfs_lock
))
383 err
= damon_sysfs_targets_add_dirs(targets
, nr
);
384 mutex_unlock(&damon_sysfs_lock
);
391 static void damon_sysfs_targets_release(struct kobject
*kobj
)
393 kfree(container_of(kobj
, struct damon_sysfs_targets
, kobj
));
396 static struct kobj_attribute damon_sysfs_targets_nr_attr
=
397 __ATTR_RW_MODE(nr_targets
, 0600);
399 static struct attribute
*damon_sysfs_targets_attrs
[] = {
400 &damon_sysfs_targets_nr_attr
.attr
,
403 ATTRIBUTE_GROUPS(damon_sysfs_targets
);
405 static const struct kobj_type damon_sysfs_targets_ktype
= {
406 .release
= damon_sysfs_targets_release
,
407 .sysfs_ops
= &kobj_sysfs_ops
,
408 .default_groups
= damon_sysfs_targets_groups
,
412 * intervals directory
415 struct damon_sysfs_intervals
{
417 unsigned long sample_us
;
418 unsigned long aggr_us
;
419 unsigned long update_us
;
422 static struct damon_sysfs_intervals
*damon_sysfs_intervals_alloc(
423 unsigned long sample_us
, unsigned long aggr_us
,
424 unsigned long update_us
)
426 struct damon_sysfs_intervals
*intervals
= kmalloc(sizeof(*intervals
),
432 intervals
->kobj
= (struct kobject
){};
433 intervals
->sample_us
= sample_us
;
434 intervals
->aggr_us
= aggr_us
;
435 intervals
->update_us
= update_us
;
439 static ssize_t
sample_us_show(struct kobject
*kobj
,
440 struct kobj_attribute
*attr
, char *buf
)
442 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
443 struct damon_sysfs_intervals
, kobj
);
445 return sysfs_emit(buf
, "%lu\n", intervals
->sample_us
);
448 static ssize_t
sample_us_store(struct kobject
*kobj
,
449 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
451 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
452 struct damon_sysfs_intervals
, kobj
);
454 int err
= kstrtoul(buf
, 0, &us
);
459 intervals
->sample_us
= us
;
463 static ssize_t
aggr_us_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
466 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
467 struct damon_sysfs_intervals
, kobj
);
469 return sysfs_emit(buf
, "%lu\n", intervals
->aggr_us
);
472 static ssize_t
aggr_us_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
473 const char *buf
, size_t count
)
475 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
476 struct damon_sysfs_intervals
, kobj
);
478 int err
= kstrtoul(buf
, 0, &us
);
483 intervals
->aggr_us
= us
;
487 static ssize_t
update_us_show(struct kobject
*kobj
,
488 struct kobj_attribute
*attr
, char *buf
)
490 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
491 struct damon_sysfs_intervals
, kobj
);
493 return sysfs_emit(buf
, "%lu\n", intervals
->update_us
);
496 static ssize_t
update_us_store(struct kobject
*kobj
,
497 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
499 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
500 struct damon_sysfs_intervals
, kobj
);
502 int err
= kstrtoul(buf
, 0, &us
);
507 intervals
->update_us
= us
;
511 static void damon_sysfs_intervals_release(struct kobject
*kobj
)
513 kfree(container_of(kobj
, struct damon_sysfs_intervals
, kobj
));
516 static struct kobj_attribute damon_sysfs_intervals_sample_us_attr
=
517 __ATTR_RW_MODE(sample_us
, 0600);
519 static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr
=
520 __ATTR_RW_MODE(aggr_us
, 0600);
522 static struct kobj_attribute damon_sysfs_intervals_update_us_attr
=
523 __ATTR_RW_MODE(update_us
, 0600);
525 static struct attribute
*damon_sysfs_intervals_attrs
[] = {
526 &damon_sysfs_intervals_sample_us_attr
.attr
,
527 &damon_sysfs_intervals_aggr_us_attr
.attr
,
528 &damon_sysfs_intervals_update_us_attr
.attr
,
531 ATTRIBUTE_GROUPS(damon_sysfs_intervals
);
533 static const struct kobj_type damon_sysfs_intervals_ktype
= {
534 .release
= damon_sysfs_intervals_release
,
535 .sysfs_ops
= &kobj_sysfs_ops
,
536 .default_groups
= damon_sysfs_intervals_groups
,
540 * monitoring_attrs directory
543 struct damon_sysfs_attrs
{
545 struct damon_sysfs_intervals
*intervals
;
546 struct damon_sysfs_ul_range
*nr_regions_range
;
549 static struct damon_sysfs_attrs
*damon_sysfs_attrs_alloc(void)
551 struct damon_sysfs_attrs
*attrs
= kmalloc(sizeof(*attrs
), GFP_KERNEL
);
555 attrs
->kobj
= (struct kobject
){};
559 static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs
*attrs
)
561 struct damon_sysfs_intervals
*intervals
;
562 struct damon_sysfs_ul_range
*nr_regions_range
;
565 intervals
= damon_sysfs_intervals_alloc(5000, 100000, 60000000);
569 err
= kobject_init_and_add(&intervals
->kobj
,
570 &damon_sysfs_intervals_ktype
, &attrs
->kobj
,
573 goto put_intervals_out
;
574 attrs
->intervals
= intervals
;
576 nr_regions_range
= damon_sysfs_ul_range_alloc(10, 1000);
577 if (!nr_regions_range
) {
579 goto put_intervals_out
;
582 err
= kobject_init_and_add(&nr_regions_range
->kobj
,
583 &damon_sysfs_ul_range_ktype
, &attrs
->kobj
,
586 goto put_nr_regions_intervals_out
;
587 attrs
->nr_regions_range
= nr_regions_range
;
590 put_nr_regions_intervals_out
:
591 kobject_put(&nr_regions_range
->kobj
);
592 attrs
->nr_regions_range
= NULL
;
594 kobject_put(&intervals
->kobj
);
595 attrs
->intervals
= NULL
;
599 static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs
*attrs
)
601 kobject_put(&attrs
->nr_regions_range
->kobj
);
602 kobject_put(&attrs
->intervals
->kobj
);
605 static void damon_sysfs_attrs_release(struct kobject
*kobj
)
607 kfree(container_of(kobj
, struct damon_sysfs_attrs
, kobj
));
610 static struct attribute
*damon_sysfs_attrs_attrs
[] = {
613 ATTRIBUTE_GROUPS(damon_sysfs_attrs
);
615 static const struct kobj_type damon_sysfs_attrs_ktype
= {
616 .release
= damon_sysfs_attrs_release
,
617 .sysfs_ops
= &kobj_sysfs_ops
,
618 .default_groups
= damon_sysfs_attrs_groups
,
625 /* This should match with enum damon_ops_id */
626 static const char * const damon_sysfs_ops_strs
[] = {
632 struct damon_sysfs_context
{
634 enum damon_ops_id ops_id
;
635 struct damon_sysfs_attrs
*attrs
;
636 struct damon_sysfs_targets
*targets
;
637 struct damon_sysfs_schemes
*schemes
;
640 static struct damon_sysfs_context
*damon_sysfs_context_alloc(
641 enum damon_ops_id ops_id
)
643 struct damon_sysfs_context
*context
= kmalloc(sizeof(*context
),
648 context
->kobj
= (struct kobject
){};
649 context
->ops_id
= ops_id
;
653 static int damon_sysfs_context_set_attrs(struct damon_sysfs_context
*context
)
655 struct damon_sysfs_attrs
*attrs
= damon_sysfs_attrs_alloc();
660 err
= kobject_init_and_add(&attrs
->kobj
, &damon_sysfs_attrs_ktype
,
661 &context
->kobj
, "monitoring_attrs");
664 err
= damon_sysfs_attrs_add_dirs(attrs
);
667 context
->attrs
= attrs
;
671 kobject_put(&attrs
->kobj
);
675 static int damon_sysfs_context_set_targets(struct damon_sysfs_context
*context
)
677 struct damon_sysfs_targets
*targets
= damon_sysfs_targets_alloc();
682 err
= kobject_init_and_add(&targets
->kobj
, &damon_sysfs_targets_ktype
,
683 &context
->kobj
, "targets");
685 kobject_put(&targets
->kobj
);
688 context
->targets
= targets
;
692 static int damon_sysfs_context_set_schemes(struct damon_sysfs_context
*context
)
694 struct damon_sysfs_schemes
*schemes
= damon_sysfs_schemes_alloc();
699 err
= kobject_init_and_add(&schemes
->kobj
, &damon_sysfs_schemes_ktype
,
700 &context
->kobj
, "schemes");
702 kobject_put(&schemes
->kobj
);
705 context
->schemes
= schemes
;
709 static int damon_sysfs_context_add_dirs(struct damon_sysfs_context
*context
)
713 err
= damon_sysfs_context_set_attrs(context
);
717 err
= damon_sysfs_context_set_targets(context
);
721 err
= damon_sysfs_context_set_schemes(context
);
723 goto put_targets_attrs_out
;
726 put_targets_attrs_out
:
727 kobject_put(&context
->targets
->kobj
);
728 context
->targets
= NULL
;
730 kobject_put(&context
->attrs
->kobj
);
731 context
->attrs
= NULL
;
735 static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context
*context
)
737 damon_sysfs_attrs_rm_dirs(context
->attrs
);
738 kobject_put(&context
->attrs
->kobj
);
739 damon_sysfs_targets_rm_dirs(context
->targets
);
740 kobject_put(&context
->targets
->kobj
);
741 damon_sysfs_schemes_rm_dirs(context
->schemes
);
742 kobject_put(&context
->schemes
->kobj
);
745 static ssize_t
avail_operations_show(struct kobject
*kobj
,
746 struct kobj_attribute
*attr
, char *buf
)
748 enum damon_ops_id id
;
751 for (id
= 0; id
< NR_DAMON_OPS
; id
++) {
752 if (!damon_is_registered_ops(id
))
754 len
+= sysfs_emit_at(buf
, len
, "%s\n",
755 damon_sysfs_ops_strs
[id
]);
760 static ssize_t
operations_show(struct kobject
*kobj
,
761 struct kobj_attribute
*attr
, char *buf
)
763 struct damon_sysfs_context
*context
= container_of(kobj
,
764 struct damon_sysfs_context
, kobj
);
766 return sysfs_emit(buf
, "%s\n", damon_sysfs_ops_strs
[context
->ops_id
]);
769 static ssize_t
operations_store(struct kobject
*kobj
,
770 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
772 struct damon_sysfs_context
*context
= container_of(kobj
,
773 struct damon_sysfs_context
, kobj
);
774 enum damon_ops_id id
;
776 for (id
= 0; id
< NR_DAMON_OPS
; id
++) {
777 if (sysfs_streq(buf
, damon_sysfs_ops_strs
[id
])) {
778 context
->ops_id
= id
;
785 static void damon_sysfs_context_release(struct kobject
*kobj
)
787 kfree(container_of(kobj
, struct damon_sysfs_context
, kobj
));
790 static struct kobj_attribute damon_sysfs_context_avail_operations_attr
=
791 __ATTR_RO_MODE(avail_operations
, 0400);
793 static struct kobj_attribute damon_sysfs_context_operations_attr
=
794 __ATTR_RW_MODE(operations
, 0600);
796 static struct attribute
*damon_sysfs_context_attrs
[] = {
797 &damon_sysfs_context_avail_operations_attr
.attr
,
798 &damon_sysfs_context_operations_attr
.attr
,
801 ATTRIBUTE_GROUPS(damon_sysfs_context
);
803 static const struct kobj_type damon_sysfs_context_ktype
= {
804 .release
= damon_sysfs_context_release
,
805 .sysfs_ops
= &kobj_sysfs_ops
,
806 .default_groups
= damon_sysfs_context_groups
,
813 struct damon_sysfs_contexts
{
815 struct damon_sysfs_context
**contexts_arr
;
819 static struct damon_sysfs_contexts
*damon_sysfs_contexts_alloc(void)
821 return kzalloc(sizeof(struct damon_sysfs_contexts
), GFP_KERNEL
);
824 static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts
*contexts
)
826 struct damon_sysfs_context
**contexts_arr
= contexts
->contexts_arr
;
829 for (i
= 0; i
< contexts
->nr
; i
++) {
830 damon_sysfs_context_rm_dirs(contexts_arr
[i
]);
831 kobject_put(&contexts_arr
[i
]->kobj
);
835 contexts
->contexts_arr
= NULL
;
838 static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts
*contexts
,
841 struct damon_sysfs_context
**contexts_arr
, *context
;
844 damon_sysfs_contexts_rm_dirs(contexts
);
848 contexts_arr
= kmalloc_array(nr_contexts
, sizeof(*contexts_arr
),
849 GFP_KERNEL
| __GFP_NOWARN
);
852 contexts
->contexts_arr
= contexts_arr
;
854 for (i
= 0; i
< nr_contexts
; i
++) {
855 context
= damon_sysfs_context_alloc(DAMON_OPS_VADDR
);
857 damon_sysfs_contexts_rm_dirs(contexts
);
861 err
= kobject_init_and_add(&context
->kobj
,
862 &damon_sysfs_context_ktype
, &contexts
->kobj
,
867 err
= damon_sysfs_context_add_dirs(context
);
871 contexts_arr
[i
] = context
;
877 damon_sysfs_contexts_rm_dirs(contexts
);
878 kobject_put(&context
->kobj
);
882 static ssize_t
nr_contexts_show(struct kobject
*kobj
,
883 struct kobj_attribute
*attr
, char *buf
)
885 struct damon_sysfs_contexts
*contexts
= container_of(kobj
,
886 struct damon_sysfs_contexts
, kobj
);
888 return sysfs_emit(buf
, "%d\n", contexts
->nr
);
891 static ssize_t
nr_contexts_store(struct kobject
*kobj
,
892 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
894 struct damon_sysfs_contexts
*contexts
;
897 err
= kstrtoint(buf
, 0, &nr
);
900 /* TODO: support multiple contexts per kdamond */
901 if (nr
< 0 || 1 < nr
)
904 contexts
= container_of(kobj
, struct damon_sysfs_contexts
, kobj
);
905 if (!mutex_trylock(&damon_sysfs_lock
))
907 err
= damon_sysfs_contexts_add_dirs(contexts
, nr
);
908 mutex_unlock(&damon_sysfs_lock
);
915 static void damon_sysfs_contexts_release(struct kobject
*kobj
)
917 kfree(container_of(kobj
, struct damon_sysfs_contexts
, kobj
));
920 static struct kobj_attribute damon_sysfs_contexts_nr_attr
921 = __ATTR_RW_MODE(nr_contexts
, 0600);
923 static struct attribute
*damon_sysfs_contexts_attrs
[] = {
924 &damon_sysfs_contexts_nr_attr
.attr
,
927 ATTRIBUTE_GROUPS(damon_sysfs_contexts
);
929 static const struct kobj_type damon_sysfs_contexts_ktype
= {
930 .release
= damon_sysfs_contexts_release
,
931 .sysfs_ops
= &kobj_sysfs_ops
,
932 .default_groups
= damon_sysfs_contexts_groups
,
939 struct damon_sysfs_kdamond
{
941 struct damon_sysfs_contexts
*contexts
;
942 struct damon_ctx
*damon_ctx
;
945 static struct damon_sysfs_kdamond
*damon_sysfs_kdamond_alloc(void)
947 return kzalloc(sizeof(struct damon_sysfs_kdamond
), GFP_KERNEL
);
950 static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond
*kdamond
)
952 struct damon_sysfs_contexts
*contexts
;
955 contexts
= damon_sysfs_contexts_alloc();
959 err
= kobject_init_and_add(&contexts
->kobj
,
960 &damon_sysfs_contexts_ktype
, &kdamond
->kobj
,
963 kobject_put(&contexts
->kobj
);
966 kdamond
->contexts
= contexts
;
971 static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond
*kdamond
)
973 damon_sysfs_contexts_rm_dirs(kdamond
->contexts
);
974 kobject_put(&kdamond
->contexts
->kobj
);
977 static bool damon_sysfs_ctx_running(struct damon_ctx
*ctx
)
981 mutex_lock(&ctx
->kdamond_lock
);
982 running
= ctx
->kdamond
!= NULL
;
983 mutex_unlock(&ctx
->kdamond_lock
);
988 * enum damon_sysfs_cmd - Commands for a specific kdamond.
990 enum damon_sysfs_cmd
{
991 /* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */
993 /* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */
995 /* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
996 DAMON_SYSFS_CMD_COMMIT
,
998 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
1001 DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS
,
1003 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES: Update
1004 * tried_regions/total_bytes sysfs files for each scheme.
1006 DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES
,
1008 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS: Update schemes tried
1011 DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS
,
1013 * @DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: Clear schemes tried
1016 DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS
,
1018 * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
1020 NR_DAMON_SYSFS_CMDS
,
1023 /* Should match with enum damon_sysfs_cmd */
1024 static const char * const damon_sysfs_cmd_strs
[] = {
1028 "update_schemes_stats",
1029 "update_schemes_tried_bytes",
1030 "update_schemes_tried_regions",
1031 "clear_schemes_tried_regions",
1035 * struct damon_sysfs_cmd_request - A request to the DAMON callback.
1036 * @cmd: The command that needs to be handled by the callback.
1037 * @kdamond: The kobject wrapper that associated to the kdamond thread.
1039 * This structure represents a sysfs command request that need to access some
1040 * DAMON context-internal data. Because DAMON context-internal data can be
1041 * safely accessed from DAMON callbacks without additional synchronization, the
1042 * request will be handled by the DAMON callback. None-``NULL`` @kdamond means
1043 * the request is valid.
1045 struct damon_sysfs_cmd_request
{
1046 enum damon_sysfs_cmd cmd
;
1047 struct damon_sysfs_kdamond
*kdamond
;
1050 /* Current DAMON callback request. Protected by damon_sysfs_lock. */
1051 static struct damon_sysfs_cmd_request damon_sysfs_cmd_request
;
1053 static ssize_t
state_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
1056 struct damon_sysfs_kdamond
*kdamond
= container_of(kobj
,
1057 struct damon_sysfs_kdamond
, kobj
);
1058 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1064 running
= damon_sysfs_ctx_running(ctx
);
1066 return sysfs_emit(buf
, "%s\n", running
?
1067 damon_sysfs_cmd_strs
[DAMON_SYSFS_CMD_ON
] :
1068 damon_sysfs_cmd_strs
[DAMON_SYSFS_CMD_OFF
]);
1071 static int damon_sysfs_set_attrs(struct damon_ctx
*ctx
,
1072 struct damon_sysfs_attrs
*sys_attrs
)
1074 struct damon_sysfs_intervals
*sys_intervals
= sys_attrs
->intervals
;
1075 struct damon_sysfs_ul_range
*sys_nr_regions
=
1076 sys_attrs
->nr_regions_range
;
1077 struct damon_attrs attrs
= {
1078 .sample_interval
= sys_intervals
->sample_us
,
1079 .aggr_interval
= sys_intervals
->aggr_us
,
1080 .ops_update_interval
= sys_intervals
->update_us
,
1081 .min_nr_regions
= sys_nr_regions
->min
,
1082 .max_nr_regions
= sys_nr_regions
->max
,
1084 return damon_set_attrs(ctx
, &attrs
);
1087 static void damon_sysfs_destroy_targets(struct damon_ctx
*ctx
)
1089 struct damon_target
*t
, *next
;
1090 bool has_pid
= damon_target_has_pid(ctx
);
1092 damon_for_each_target_safe(t
, next
, ctx
) {
1095 damon_destroy_target(t
);
1099 static int damon_sysfs_set_regions(struct damon_target
*t
,
1100 struct damon_sysfs_regions
*sysfs_regions
)
1102 struct damon_addr_range
*ranges
= kmalloc_array(sysfs_regions
->nr
,
1103 sizeof(*ranges
), GFP_KERNEL
| __GFP_NOWARN
);
1104 int i
, err
= -EINVAL
;
1108 for (i
= 0; i
< sysfs_regions
->nr
; i
++) {
1109 struct damon_sysfs_region
*sys_region
=
1110 sysfs_regions
->regions_arr
[i
];
1112 if (sys_region
->ar
.start
> sys_region
->ar
.end
)
1115 ranges
[i
].start
= sys_region
->ar
.start
;
1116 ranges
[i
].end
= sys_region
->ar
.end
;
1119 if (ranges
[i
- 1].end
> ranges
[i
].start
)
1122 err
= damon_set_regions(t
, ranges
, sysfs_regions
->nr
);
1129 static int damon_sysfs_add_target(struct damon_sysfs_target
*sys_target
,
1130 struct damon_ctx
*ctx
)
1132 struct damon_target
*t
= damon_new_target();
1137 damon_add_target(ctx
, t
);
1138 if (damon_target_has_pid(ctx
)) {
1139 t
->pid
= find_get_pid(sys_target
->pid
);
1141 goto destroy_targets_out
;
1143 err
= damon_sysfs_set_regions(t
, sys_target
->regions
);
1145 goto destroy_targets_out
;
1148 destroy_targets_out
:
1149 damon_sysfs_destroy_targets(ctx
);
1153 static int damon_sysfs_update_target_pid(struct damon_target
*target
, int pid
)
1155 struct pid
*pid_new
;
1157 pid_new
= find_get_pid(pid
);
1161 if (pid_new
== target
->pid
) {
1166 put_pid(target
->pid
);
1167 target
->pid
= pid_new
;
1171 static int damon_sysfs_update_target(struct damon_target
*target
,
1172 struct damon_ctx
*ctx
,
1173 struct damon_sysfs_target
*sys_target
)
1177 if (damon_target_has_pid(ctx
)) {
1178 err
= damon_sysfs_update_target_pid(target
, sys_target
->pid
);
1184 * Do monitoring target region boundary update only if one or more
1185 * regions are set by the user. This is for keeping current monitoring
1186 * target results and range easier, especially for dynamic monitoring
1187 * target regions update ops like 'vaddr'.
1189 if (sys_target
->regions
->nr
)
1190 err
= damon_sysfs_set_regions(target
, sys_target
->regions
);
1194 static int damon_sysfs_set_targets(struct damon_ctx
*ctx
,
1195 struct damon_sysfs_targets
*sysfs_targets
)
1197 struct damon_target
*t
, *next
;
1200 /* Multiple physical address space monitoring targets makes no sense */
1201 if (ctx
->ops
.id
== DAMON_OPS_PADDR
&& sysfs_targets
->nr
> 1)
1204 damon_for_each_target_safe(t
, next
, ctx
) {
1205 if (i
< sysfs_targets
->nr
) {
1206 err
= damon_sysfs_update_target(t
, ctx
,
1207 sysfs_targets
->targets_arr
[i
]);
1211 if (damon_target_has_pid(ctx
))
1213 damon_destroy_target(t
);
1218 for (; i
< sysfs_targets
->nr
; i
++) {
1219 struct damon_sysfs_target
*st
= sysfs_targets
->targets_arr
[i
];
1221 err
= damon_sysfs_add_target(st
, ctx
);
1228 static bool damon_sysfs_schemes_regions_updating
;
1230 static void damon_sysfs_before_terminate(struct damon_ctx
*ctx
)
1232 struct damon_target
*t
, *next
;
1233 struct damon_sysfs_kdamond
*kdamond
;
1234 enum damon_sysfs_cmd cmd
;
1236 /* damon_sysfs_schemes_update_regions_stop() might not yet called */
1237 kdamond
= damon_sysfs_cmd_request
.kdamond
;
1238 cmd
= damon_sysfs_cmd_request
.cmd
;
1239 if (kdamond
&& ctx
== kdamond
->damon_ctx
&&
1240 (cmd
== DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS
||
1241 cmd
== DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES
) &&
1242 damon_sysfs_schemes_regions_updating
) {
1243 damon_sysfs_schemes_update_regions_stop(ctx
);
1244 damon_sysfs_schemes_regions_updating
= false;
1245 mutex_unlock(&damon_sysfs_lock
);
1248 if (!damon_target_has_pid(ctx
))
1251 mutex_lock(&ctx
->kdamond_lock
);
1252 damon_for_each_target_safe(t
, next
, ctx
) {
1254 damon_destroy_target(t
);
1256 mutex_unlock(&ctx
->kdamond_lock
);
1260 * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
1261 * @kdamond: The kobject wrapper that associated to the kdamond thread.
1263 * This function reads the schemes stats of specific kdamond and update the
1264 * related values for sysfs files. This function should be called from DAMON
1265 * callbacks while holding ``damon_syfs_lock``, to safely access the DAMON
1266 * contexts-internal data and DAMON sysfs variables.
1268 static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond
*kdamond
)
1270 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1274 damon_sysfs_schemes_update_stats(
1275 kdamond
->contexts
->contexts_arr
[0]->schemes
, ctx
);
1279 static int damon_sysfs_upd_schemes_regions_start(
1280 struct damon_sysfs_kdamond
*kdamond
, bool total_bytes_only
)
1282 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1286 return damon_sysfs_schemes_update_regions_start(
1287 kdamond
->contexts
->contexts_arr
[0]->schemes
, ctx
,
1291 static int damon_sysfs_upd_schemes_regions_stop(
1292 struct damon_sysfs_kdamond
*kdamond
)
1294 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1298 return damon_sysfs_schemes_update_regions_stop(ctx
);
1301 static int damon_sysfs_clear_schemes_regions(
1302 struct damon_sysfs_kdamond
*kdamond
)
1304 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1308 return damon_sysfs_schemes_clear_regions(
1309 kdamond
->contexts
->contexts_arr
[0]->schemes
, ctx
);
1312 static inline bool damon_sysfs_kdamond_running(
1313 struct damon_sysfs_kdamond
*kdamond
)
1315 return kdamond
->damon_ctx
&&
1316 damon_sysfs_ctx_running(kdamond
->damon_ctx
);
1319 static int damon_sysfs_apply_inputs(struct damon_ctx
*ctx
,
1320 struct damon_sysfs_context
*sys_ctx
)
1324 err
= damon_select_ops(ctx
, sys_ctx
->ops_id
);
1327 err
= damon_sysfs_set_attrs(ctx
, sys_ctx
->attrs
);
1330 err
= damon_sysfs_set_targets(ctx
, sys_ctx
->targets
);
1333 return damon_sysfs_set_schemes(ctx
, sys_ctx
->schemes
);
1337 * damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
1338 * @kdamond: The kobject wrapper for the associated kdamond.
1340 * If the sysfs input is wrong, the kdamond will be terminated.
1342 static int damon_sysfs_commit_input(struct damon_sysfs_kdamond
*kdamond
)
1344 if (!damon_sysfs_kdamond_running(kdamond
))
1346 /* TODO: Support multiple contexts per kdamond */
1347 if (kdamond
->contexts
->nr
!= 1)
1350 return damon_sysfs_apply_inputs(kdamond
->damon_ctx
,
1351 kdamond
->contexts
->contexts_arr
[0]);
1355 * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
1356 * @c: The DAMON context of the callback.
1357 * @active: Whether @c is not deactivated due to watermarks.
1359 * This function is periodically called back from the kdamond thread for @c.
1360 * Then, it checks if there is a waiting DAMON sysfs request and handles it.
1362 static int damon_sysfs_cmd_request_callback(struct damon_ctx
*c
, bool active
)
1364 struct damon_sysfs_kdamond
*kdamond
;
1365 bool total_bytes_only
= false;
1368 /* avoid deadlock due to concurrent state_store('off') */
1369 if (!damon_sysfs_schemes_regions_updating
&&
1370 !mutex_trylock(&damon_sysfs_lock
))
1372 kdamond
= damon_sysfs_cmd_request
.kdamond
;
1373 if (!kdamond
|| kdamond
->damon_ctx
!= c
)
1375 switch (damon_sysfs_cmd_request
.cmd
) {
1376 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS
:
1377 err
= damon_sysfs_upd_schemes_stats(kdamond
);
1379 case DAMON_SYSFS_CMD_COMMIT
:
1380 err
= damon_sysfs_commit_input(kdamond
);
1382 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES
:
1383 total_bytes_only
= true;
1385 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS
:
1386 if (!damon_sysfs_schemes_regions_updating
) {
1387 err
= damon_sysfs_upd_schemes_regions_start(kdamond
,
1390 damon_sysfs_schemes_regions_updating
= true;
1395 * Continue regions updating if DAMON is till
1396 * active and the update for all schemes is not
1399 if (active
&& !damos_sysfs_regions_upd_done())
1401 err
= damon_sysfs_upd_schemes_regions_stop(kdamond
);
1402 damon_sysfs_schemes_regions_updating
= false;
1405 case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS
:
1406 err
= damon_sysfs_clear_schemes_regions(kdamond
);
1411 /* Mark the request as invalid now. */
1412 damon_sysfs_cmd_request
.kdamond
= NULL
;
1414 if (!damon_sysfs_schemes_regions_updating
)
1415 mutex_unlock(&damon_sysfs_lock
);
1420 static int damon_sysfs_after_wmarks_check(struct damon_ctx
*c
)
1423 * after_wmarks_check() is called back while the context is deactivated
1426 return damon_sysfs_cmd_request_callback(c
, false);
1429 static int damon_sysfs_after_aggregation(struct damon_ctx
*c
)
1432 * after_aggregation() is called back only while the context is not
1433 * deactivated by watermarks.
1435 return damon_sysfs_cmd_request_callback(c
, true);
1438 static struct damon_ctx
*damon_sysfs_build_ctx(
1439 struct damon_sysfs_context
*sys_ctx
)
1441 struct damon_ctx
*ctx
= damon_new_ctx();
1445 return ERR_PTR(-ENOMEM
);
1447 err
= damon_sysfs_apply_inputs(ctx
, sys_ctx
);
1449 damon_destroy_ctx(ctx
);
1450 return ERR_PTR(err
);
1453 ctx
->callback
.after_wmarks_check
= damon_sysfs_after_wmarks_check
;
1454 ctx
->callback
.after_aggregation
= damon_sysfs_after_aggregation
;
1455 ctx
->callback
.before_terminate
= damon_sysfs_before_terminate
;
1459 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond
*kdamond
)
1461 struct damon_ctx
*ctx
;
1464 if (damon_sysfs_kdamond_running(kdamond
))
1466 if (damon_sysfs_cmd_request
.kdamond
== kdamond
)
1468 /* TODO: support multiple contexts per kdamond */
1469 if (kdamond
->contexts
->nr
!= 1)
1472 if (kdamond
->damon_ctx
)
1473 damon_destroy_ctx(kdamond
->damon_ctx
);
1474 kdamond
->damon_ctx
= NULL
;
1476 ctx
= damon_sysfs_build_ctx(kdamond
->contexts
->contexts_arr
[0]);
1478 return PTR_ERR(ctx
);
1479 err
= damon_start(&ctx
, 1, false);
1481 damon_destroy_ctx(ctx
);
1484 kdamond
->damon_ctx
= ctx
;
1488 static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond
*kdamond
)
1490 if (!kdamond
->damon_ctx
)
1492 return damon_stop(&kdamond
->damon_ctx
, 1);
1494 * To allow users show final monitoring results of already turned-off
1495 * DAMON, we free kdamond->damon_ctx in next
1496 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
1501 * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
1502 * @cmd: The command to handle.
1503 * @kdamond: The kobject wrapper for the associated kdamond.
1505 * This function handles a DAMON sysfs command for a kdamond. For commands
1506 * that need to access running DAMON context-internal data, it requests
1507 * handling of the command to the DAMON callback
1508 * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled,
1509 * or the context is completed.
1511 * Return: 0 on success, negative error code otherwise.
1513 static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd
,
1514 struct damon_sysfs_kdamond
*kdamond
)
1516 bool need_wait
= true;
1518 /* Handle commands that doesn't access DAMON context-internal data */
1520 case DAMON_SYSFS_CMD_ON
:
1521 return damon_sysfs_turn_damon_on(kdamond
);
1522 case DAMON_SYSFS_CMD_OFF
:
1523 return damon_sysfs_turn_damon_off(kdamond
);
1528 /* Pass the command to DAMON callback for safe DAMON context access */
1529 if (damon_sysfs_cmd_request
.kdamond
)
1531 if (!damon_sysfs_kdamond_running(kdamond
))
1533 damon_sysfs_cmd_request
.cmd
= cmd
;
1534 damon_sysfs_cmd_request
.kdamond
= kdamond
;
1537 * wait until damon_sysfs_cmd_request_callback() handles the request
1538 * from kdamond context
1540 mutex_unlock(&damon_sysfs_lock
);
1542 schedule_timeout_idle(msecs_to_jiffies(100));
1543 if (!mutex_trylock(&damon_sysfs_lock
))
1545 if (!damon_sysfs_cmd_request
.kdamond
) {
1546 /* damon_sysfs_cmd_request_callback() handled */
1548 } else if (!damon_sysfs_kdamond_running(kdamond
)) {
1549 /* kdamond has already finished */
1551 damon_sysfs_cmd_request
.kdamond
= NULL
;
1553 mutex_unlock(&damon_sysfs_lock
);
1555 mutex_lock(&damon_sysfs_lock
);
1559 static ssize_t
state_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
1560 const char *buf
, size_t count
)
1562 struct damon_sysfs_kdamond
*kdamond
= container_of(kobj
,
1563 struct damon_sysfs_kdamond
, kobj
);
1564 enum damon_sysfs_cmd cmd
;
1565 ssize_t ret
= -EINVAL
;
1567 if (!mutex_trylock(&damon_sysfs_lock
))
1569 for (cmd
= 0; cmd
< NR_DAMON_SYSFS_CMDS
; cmd
++) {
1570 if (sysfs_streq(buf
, damon_sysfs_cmd_strs
[cmd
])) {
1571 ret
= damon_sysfs_handle_cmd(cmd
, kdamond
);
1575 mutex_unlock(&damon_sysfs_lock
);
1581 static ssize_t
pid_show(struct kobject
*kobj
,
1582 struct kobj_attribute
*attr
, char *buf
)
1584 struct damon_sysfs_kdamond
*kdamond
= container_of(kobj
,
1585 struct damon_sysfs_kdamond
, kobj
);
1586 struct damon_ctx
*ctx
;
1589 if (!mutex_trylock(&damon_sysfs_lock
))
1591 ctx
= kdamond
->damon_ctx
;
1595 mutex_lock(&ctx
->kdamond_lock
);
1597 pid
= ctx
->kdamond
->pid
;
1598 mutex_unlock(&ctx
->kdamond_lock
);
1600 mutex_unlock(&damon_sysfs_lock
);
1601 return sysfs_emit(buf
, "%d\n", pid
);
1604 static void damon_sysfs_kdamond_release(struct kobject
*kobj
)
1606 struct damon_sysfs_kdamond
*kdamond
= container_of(kobj
,
1607 struct damon_sysfs_kdamond
, kobj
);
1609 if (kdamond
->damon_ctx
)
1610 damon_destroy_ctx(kdamond
->damon_ctx
);
1614 static struct kobj_attribute damon_sysfs_kdamond_state_attr
=
1615 __ATTR_RW_MODE(state
, 0600);
1617 static struct kobj_attribute damon_sysfs_kdamond_pid_attr
=
1618 __ATTR_RO_MODE(pid
, 0400);
1620 static struct attribute
*damon_sysfs_kdamond_attrs
[] = {
1621 &damon_sysfs_kdamond_state_attr
.attr
,
1622 &damon_sysfs_kdamond_pid_attr
.attr
,
1625 ATTRIBUTE_GROUPS(damon_sysfs_kdamond
);
1627 static const struct kobj_type damon_sysfs_kdamond_ktype
= {
1628 .release
= damon_sysfs_kdamond_release
,
1629 .sysfs_ops
= &kobj_sysfs_ops
,
1630 .default_groups
= damon_sysfs_kdamond_groups
,
1634 * kdamonds directory
1637 struct damon_sysfs_kdamonds
{
1638 struct kobject kobj
;
1639 struct damon_sysfs_kdamond
**kdamonds_arr
;
1643 static struct damon_sysfs_kdamonds
*damon_sysfs_kdamonds_alloc(void)
1645 return kzalloc(sizeof(struct damon_sysfs_kdamonds
), GFP_KERNEL
);
1648 static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds
*kdamonds
)
1650 struct damon_sysfs_kdamond
**kdamonds_arr
= kdamonds
->kdamonds_arr
;
1653 for (i
= 0; i
< kdamonds
->nr
; i
++) {
1654 damon_sysfs_kdamond_rm_dirs(kdamonds_arr
[i
]);
1655 kobject_put(&kdamonds_arr
[i
]->kobj
);
1658 kfree(kdamonds_arr
);
1659 kdamonds
->kdamonds_arr
= NULL
;
1662 static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond
**kdamonds
,
1667 for (i
= 0; i
< nr_kdamonds
; i
++) {
1668 if (damon_sysfs_kdamond_running(kdamonds
[i
]) ||
1669 damon_sysfs_cmd_request
.kdamond
== kdamonds
[i
])
1676 static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds
*kdamonds
,
1679 struct damon_sysfs_kdamond
**kdamonds_arr
, *kdamond
;
1682 if (damon_sysfs_kdamonds_busy(kdamonds
->kdamonds_arr
, kdamonds
->nr
))
1685 damon_sysfs_kdamonds_rm_dirs(kdamonds
);
1689 kdamonds_arr
= kmalloc_array(nr_kdamonds
, sizeof(*kdamonds_arr
),
1690 GFP_KERNEL
| __GFP_NOWARN
);
1693 kdamonds
->kdamonds_arr
= kdamonds_arr
;
1695 for (i
= 0; i
< nr_kdamonds
; i
++) {
1696 kdamond
= damon_sysfs_kdamond_alloc();
1698 damon_sysfs_kdamonds_rm_dirs(kdamonds
);
1702 err
= kobject_init_and_add(&kdamond
->kobj
,
1703 &damon_sysfs_kdamond_ktype
, &kdamonds
->kobj
,
1708 err
= damon_sysfs_kdamond_add_dirs(kdamond
);
1712 kdamonds_arr
[i
] = kdamond
;
1718 damon_sysfs_kdamonds_rm_dirs(kdamonds
);
1719 kobject_put(&kdamond
->kobj
);
1723 static ssize_t
nr_kdamonds_show(struct kobject
*kobj
,
1724 struct kobj_attribute
*attr
, char *buf
)
1726 struct damon_sysfs_kdamonds
*kdamonds
= container_of(kobj
,
1727 struct damon_sysfs_kdamonds
, kobj
);
1729 return sysfs_emit(buf
, "%d\n", kdamonds
->nr
);
1732 static ssize_t
nr_kdamonds_store(struct kobject
*kobj
,
1733 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
1735 struct damon_sysfs_kdamonds
*kdamonds
;
1738 err
= kstrtoint(buf
, 0, &nr
);
1744 kdamonds
= container_of(kobj
, struct damon_sysfs_kdamonds
, kobj
);
1746 if (!mutex_trylock(&damon_sysfs_lock
))
1748 err
= damon_sysfs_kdamonds_add_dirs(kdamonds
, nr
);
1749 mutex_unlock(&damon_sysfs_lock
);
1756 static void damon_sysfs_kdamonds_release(struct kobject
*kobj
)
1758 kfree(container_of(kobj
, struct damon_sysfs_kdamonds
, kobj
));
1761 static struct kobj_attribute damon_sysfs_kdamonds_nr_attr
=
1762 __ATTR_RW_MODE(nr_kdamonds
, 0600);
1764 static struct attribute
*damon_sysfs_kdamonds_attrs
[] = {
1765 &damon_sysfs_kdamonds_nr_attr
.attr
,
1768 ATTRIBUTE_GROUPS(damon_sysfs_kdamonds
);
1770 static const struct kobj_type damon_sysfs_kdamonds_ktype
= {
1771 .release
= damon_sysfs_kdamonds_release
,
1772 .sysfs_ops
= &kobj_sysfs_ops
,
1773 .default_groups
= damon_sysfs_kdamonds_groups
,
1777 * damon user interface directory
1780 struct damon_sysfs_ui_dir
{
1781 struct kobject kobj
;
1782 struct damon_sysfs_kdamonds
*kdamonds
;
1785 static struct damon_sysfs_ui_dir
*damon_sysfs_ui_dir_alloc(void)
1787 return kzalloc(sizeof(struct damon_sysfs_ui_dir
), GFP_KERNEL
);
1790 static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir
*ui_dir
)
1792 struct damon_sysfs_kdamonds
*kdamonds
;
1795 kdamonds
= damon_sysfs_kdamonds_alloc();
1799 err
= kobject_init_and_add(&kdamonds
->kobj
,
1800 &damon_sysfs_kdamonds_ktype
, &ui_dir
->kobj
,
1803 kobject_put(&kdamonds
->kobj
);
1806 ui_dir
->kdamonds
= kdamonds
;
1810 static void damon_sysfs_ui_dir_release(struct kobject
*kobj
)
1812 kfree(container_of(kobj
, struct damon_sysfs_ui_dir
, kobj
));
1815 static struct attribute
*damon_sysfs_ui_dir_attrs
[] = {
1818 ATTRIBUTE_GROUPS(damon_sysfs_ui_dir
);
1820 static const struct kobj_type damon_sysfs_ui_dir_ktype
= {
1821 .release
= damon_sysfs_ui_dir_release
,
1822 .sysfs_ops
= &kobj_sysfs_ops
,
1823 .default_groups
= damon_sysfs_ui_dir_groups
,
1826 static int __init
damon_sysfs_init(void)
1828 struct kobject
*damon_sysfs_root
;
1829 struct damon_sysfs_ui_dir
*admin
;
1832 damon_sysfs_root
= kobject_create_and_add("damon", mm_kobj
);
1833 if (!damon_sysfs_root
)
1836 admin
= damon_sysfs_ui_dir_alloc();
1838 kobject_put(damon_sysfs_root
);
1841 err
= kobject_init_and_add(&admin
->kobj
, &damon_sysfs_ui_dir_ktype
,
1842 damon_sysfs_root
, "admin");
1845 err
= damon_sysfs_ui_dir_add_dirs(admin
);
1851 kobject_put(&admin
->kobj
);
1852 kobject_put(damon_sysfs_root
);
1855 subsys_initcall(damon_sysfs_init
);
1857 #include "sysfs-test.h"