1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON sysfs Interface
5 * Copyright (c) 2022 SeongJae Park <sj@kernel.org>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
12 #include "sysfs-common.h"
15 * init region directory
18 struct damon_sysfs_region
{
20 struct damon_addr_range ar
;
23 static struct damon_sysfs_region
*damon_sysfs_region_alloc(void)
25 return kzalloc(sizeof(struct damon_sysfs_region
), GFP_KERNEL
);
28 static ssize_t
start_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
31 struct damon_sysfs_region
*region
= container_of(kobj
,
32 struct damon_sysfs_region
, kobj
);
34 return sysfs_emit(buf
, "%lu\n", region
->ar
.start
);
37 static ssize_t
start_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
38 const char *buf
, size_t count
)
40 struct damon_sysfs_region
*region
= container_of(kobj
,
41 struct damon_sysfs_region
, kobj
);
42 int err
= kstrtoul(buf
, 0, ®ion
->ar
.start
);
44 return err
? err
: count
;
47 static ssize_t
end_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
50 struct damon_sysfs_region
*region
= container_of(kobj
,
51 struct damon_sysfs_region
, kobj
);
53 return sysfs_emit(buf
, "%lu\n", region
->ar
.end
);
56 static ssize_t
end_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
57 const char *buf
, size_t count
)
59 struct damon_sysfs_region
*region
= container_of(kobj
,
60 struct damon_sysfs_region
, kobj
);
61 int err
= kstrtoul(buf
, 0, ®ion
->ar
.end
);
63 return err
? err
: count
;
66 static void damon_sysfs_region_release(struct kobject
*kobj
)
68 kfree(container_of(kobj
, struct damon_sysfs_region
, kobj
));
71 static struct kobj_attribute damon_sysfs_region_start_attr
=
72 __ATTR_RW_MODE(start
, 0600);
74 static struct kobj_attribute damon_sysfs_region_end_attr
=
75 __ATTR_RW_MODE(end
, 0600);
77 static struct attribute
*damon_sysfs_region_attrs
[] = {
78 &damon_sysfs_region_start_attr
.attr
,
79 &damon_sysfs_region_end_attr
.attr
,
82 ATTRIBUTE_GROUPS(damon_sysfs_region
);
84 static const struct kobj_type damon_sysfs_region_ktype
= {
85 .release
= damon_sysfs_region_release
,
86 .sysfs_ops
= &kobj_sysfs_ops
,
87 .default_groups
= damon_sysfs_region_groups
,
91 * init_regions directory
94 struct damon_sysfs_regions
{
96 struct damon_sysfs_region
**regions_arr
;
100 static struct damon_sysfs_regions
*damon_sysfs_regions_alloc(void)
102 return kzalloc(sizeof(struct damon_sysfs_regions
), GFP_KERNEL
);
105 static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions
*regions
)
107 struct damon_sysfs_region
**regions_arr
= regions
->regions_arr
;
110 for (i
= 0; i
< regions
->nr
; i
++)
111 kobject_put(®ions_arr
[i
]->kobj
);
114 regions
->regions_arr
= NULL
;
117 static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions
*regions
,
120 struct damon_sysfs_region
**regions_arr
, *region
;
123 damon_sysfs_regions_rm_dirs(regions
);
127 regions_arr
= kmalloc_array(nr_regions
, sizeof(*regions_arr
),
128 GFP_KERNEL
| __GFP_NOWARN
);
131 regions
->regions_arr
= regions_arr
;
133 for (i
= 0; i
< nr_regions
; i
++) {
134 region
= damon_sysfs_region_alloc();
136 damon_sysfs_regions_rm_dirs(regions
);
140 err
= kobject_init_and_add(®ion
->kobj
,
141 &damon_sysfs_region_ktype
, ®ions
->kobj
,
144 kobject_put(®ion
->kobj
);
145 damon_sysfs_regions_rm_dirs(regions
);
149 regions_arr
[i
] = region
;
155 static ssize_t
nr_regions_show(struct kobject
*kobj
,
156 struct kobj_attribute
*attr
, char *buf
)
158 struct damon_sysfs_regions
*regions
= container_of(kobj
,
159 struct damon_sysfs_regions
, kobj
);
161 return sysfs_emit(buf
, "%d\n", regions
->nr
);
164 static ssize_t
nr_regions_store(struct kobject
*kobj
,
165 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
167 struct damon_sysfs_regions
*regions
;
168 int nr
, err
= kstrtoint(buf
, 0, &nr
);
175 regions
= container_of(kobj
, struct damon_sysfs_regions
, kobj
);
177 if (!mutex_trylock(&damon_sysfs_lock
))
179 err
= damon_sysfs_regions_add_dirs(regions
, nr
);
180 mutex_unlock(&damon_sysfs_lock
);
187 static void damon_sysfs_regions_release(struct kobject
*kobj
)
189 kfree(container_of(kobj
, struct damon_sysfs_regions
, kobj
));
192 static struct kobj_attribute damon_sysfs_regions_nr_attr
=
193 __ATTR_RW_MODE(nr_regions
, 0600);
195 static struct attribute
*damon_sysfs_regions_attrs
[] = {
196 &damon_sysfs_regions_nr_attr
.attr
,
199 ATTRIBUTE_GROUPS(damon_sysfs_regions
);
201 static const struct kobj_type damon_sysfs_regions_ktype
= {
202 .release
= damon_sysfs_regions_release
,
203 .sysfs_ops
= &kobj_sysfs_ops
,
204 .default_groups
= damon_sysfs_regions_groups
,
211 struct damon_sysfs_target
{
213 struct damon_sysfs_regions
*regions
;
217 static struct damon_sysfs_target
*damon_sysfs_target_alloc(void)
219 return kzalloc(sizeof(struct damon_sysfs_target
), GFP_KERNEL
);
222 static int damon_sysfs_target_add_dirs(struct damon_sysfs_target
*target
)
224 struct damon_sysfs_regions
*regions
= damon_sysfs_regions_alloc();
230 err
= kobject_init_and_add(®ions
->kobj
, &damon_sysfs_regions_ktype
,
231 &target
->kobj
, "regions");
233 kobject_put(®ions
->kobj
);
235 target
->regions
= regions
;
239 static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target
*target
)
241 damon_sysfs_regions_rm_dirs(target
->regions
);
242 kobject_put(&target
->regions
->kobj
);
245 static ssize_t
pid_target_show(struct kobject
*kobj
,
246 struct kobj_attribute
*attr
, char *buf
)
248 struct damon_sysfs_target
*target
= container_of(kobj
,
249 struct damon_sysfs_target
, kobj
);
251 return sysfs_emit(buf
, "%d\n", target
->pid
);
254 static ssize_t
pid_target_store(struct kobject
*kobj
,
255 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
257 struct damon_sysfs_target
*target
= container_of(kobj
,
258 struct damon_sysfs_target
, kobj
);
259 int err
= kstrtoint(buf
, 0, &target
->pid
);
266 static void damon_sysfs_target_release(struct kobject
*kobj
)
268 kfree(container_of(kobj
, struct damon_sysfs_target
, kobj
));
271 static struct kobj_attribute damon_sysfs_target_pid_attr
=
272 __ATTR_RW_MODE(pid_target
, 0600);
274 static struct attribute
*damon_sysfs_target_attrs
[] = {
275 &damon_sysfs_target_pid_attr
.attr
,
278 ATTRIBUTE_GROUPS(damon_sysfs_target
);
280 static const struct kobj_type damon_sysfs_target_ktype
= {
281 .release
= damon_sysfs_target_release
,
282 .sysfs_ops
= &kobj_sysfs_ops
,
283 .default_groups
= damon_sysfs_target_groups
,
290 struct damon_sysfs_targets
{
292 struct damon_sysfs_target
**targets_arr
;
296 static struct damon_sysfs_targets
*damon_sysfs_targets_alloc(void)
298 return kzalloc(sizeof(struct damon_sysfs_targets
), GFP_KERNEL
);
301 static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets
*targets
)
303 struct damon_sysfs_target
**targets_arr
= targets
->targets_arr
;
306 for (i
= 0; i
< targets
->nr
; i
++) {
307 damon_sysfs_target_rm_dirs(targets_arr
[i
]);
308 kobject_put(&targets_arr
[i
]->kobj
);
312 targets
->targets_arr
= NULL
;
315 static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets
*targets
,
318 struct damon_sysfs_target
**targets_arr
, *target
;
321 damon_sysfs_targets_rm_dirs(targets
);
325 targets_arr
= kmalloc_array(nr_targets
, sizeof(*targets_arr
),
326 GFP_KERNEL
| __GFP_NOWARN
);
329 targets
->targets_arr
= targets_arr
;
331 for (i
= 0; i
< nr_targets
; i
++) {
332 target
= damon_sysfs_target_alloc();
334 damon_sysfs_targets_rm_dirs(targets
);
338 err
= kobject_init_and_add(&target
->kobj
,
339 &damon_sysfs_target_ktype
, &targets
->kobj
,
344 err
= damon_sysfs_target_add_dirs(target
);
348 targets_arr
[i
] = target
;
354 damon_sysfs_targets_rm_dirs(targets
);
355 kobject_put(&target
->kobj
);
359 static ssize_t
nr_targets_show(struct kobject
*kobj
,
360 struct kobj_attribute
*attr
, char *buf
)
362 struct damon_sysfs_targets
*targets
= container_of(kobj
,
363 struct damon_sysfs_targets
, kobj
);
365 return sysfs_emit(buf
, "%d\n", targets
->nr
);
368 static ssize_t
nr_targets_store(struct kobject
*kobj
,
369 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
371 struct damon_sysfs_targets
*targets
;
372 int nr
, err
= kstrtoint(buf
, 0, &nr
);
379 targets
= container_of(kobj
, struct damon_sysfs_targets
, kobj
);
381 if (!mutex_trylock(&damon_sysfs_lock
))
383 err
= damon_sysfs_targets_add_dirs(targets
, nr
);
384 mutex_unlock(&damon_sysfs_lock
);
391 static void damon_sysfs_targets_release(struct kobject
*kobj
)
393 kfree(container_of(kobj
, struct damon_sysfs_targets
, kobj
));
396 static struct kobj_attribute damon_sysfs_targets_nr_attr
=
397 __ATTR_RW_MODE(nr_targets
, 0600);
399 static struct attribute
*damon_sysfs_targets_attrs
[] = {
400 &damon_sysfs_targets_nr_attr
.attr
,
403 ATTRIBUTE_GROUPS(damon_sysfs_targets
);
405 static const struct kobj_type damon_sysfs_targets_ktype
= {
406 .release
= damon_sysfs_targets_release
,
407 .sysfs_ops
= &kobj_sysfs_ops
,
408 .default_groups
= damon_sysfs_targets_groups
,
412 * intervals directory
415 struct damon_sysfs_intervals
{
417 unsigned long sample_us
;
418 unsigned long aggr_us
;
419 unsigned long update_us
;
422 static struct damon_sysfs_intervals
*damon_sysfs_intervals_alloc(
423 unsigned long sample_us
, unsigned long aggr_us
,
424 unsigned long update_us
)
426 struct damon_sysfs_intervals
*intervals
= kmalloc(sizeof(*intervals
),
432 intervals
->kobj
= (struct kobject
){};
433 intervals
->sample_us
= sample_us
;
434 intervals
->aggr_us
= aggr_us
;
435 intervals
->update_us
= update_us
;
439 static ssize_t
sample_us_show(struct kobject
*kobj
,
440 struct kobj_attribute
*attr
, char *buf
)
442 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
443 struct damon_sysfs_intervals
, kobj
);
445 return sysfs_emit(buf
, "%lu\n", intervals
->sample_us
);
448 static ssize_t
sample_us_store(struct kobject
*kobj
,
449 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
451 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
452 struct damon_sysfs_intervals
, kobj
);
454 int err
= kstrtoul(buf
, 0, &us
);
459 intervals
->sample_us
= us
;
463 static ssize_t
aggr_us_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
466 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
467 struct damon_sysfs_intervals
, kobj
);
469 return sysfs_emit(buf
, "%lu\n", intervals
->aggr_us
);
472 static ssize_t
aggr_us_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
473 const char *buf
, size_t count
)
475 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
476 struct damon_sysfs_intervals
, kobj
);
478 int err
= kstrtoul(buf
, 0, &us
);
483 intervals
->aggr_us
= us
;
487 static ssize_t
update_us_show(struct kobject
*kobj
,
488 struct kobj_attribute
*attr
, char *buf
)
490 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
491 struct damon_sysfs_intervals
, kobj
);
493 return sysfs_emit(buf
, "%lu\n", intervals
->update_us
);
496 static ssize_t
update_us_store(struct kobject
*kobj
,
497 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
499 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
500 struct damon_sysfs_intervals
, kobj
);
502 int err
= kstrtoul(buf
, 0, &us
);
507 intervals
->update_us
= us
;
511 static void damon_sysfs_intervals_release(struct kobject
*kobj
)
513 kfree(container_of(kobj
, struct damon_sysfs_intervals
, kobj
));
516 static struct kobj_attribute damon_sysfs_intervals_sample_us_attr
=
517 __ATTR_RW_MODE(sample_us
, 0600);
519 static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr
=
520 __ATTR_RW_MODE(aggr_us
, 0600);
522 static struct kobj_attribute damon_sysfs_intervals_update_us_attr
=
523 __ATTR_RW_MODE(update_us
, 0600);
525 static struct attribute
*damon_sysfs_intervals_attrs
[] = {
526 &damon_sysfs_intervals_sample_us_attr
.attr
,
527 &damon_sysfs_intervals_aggr_us_attr
.attr
,
528 &damon_sysfs_intervals_update_us_attr
.attr
,
531 ATTRIBUTE_GROUPS(damon_sysfs_intervals
);
533 static const struct kobj_type damon_sysfs_intervals_ktype
= {
534 .release
= damon_sysfs_intervals_release
,
535 .sysfs_ops
= &kobj_sysfs_ops
,
536 .default_groups
= damon_sysfs_intervals_groups
,
540 * monitoring_attrs directory
543 struct damon_sysfs_attrs
{
545 struct damon_sysfs_intervals
*intervals
;
546 struct damon_sysfs_ul_range
*nr_regions_range
;
549 static struct damon_sysfs_attrs
*damon_sysfs_attrs_alloc(void)
551 struct damon_sysfs_attrs
*attrs
= kmalloc(sizeof(*attrs
), GFP_KERNEL
);
555 attrs
->kobj
= (struct kobject
){};
559 static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs
*attrs
)
561 struct damon_sysfs_intervals
*intervals
;
562 struct damon_sysfs_ul_range
*nr_regions_range
;
565 intervals
= damon_sysfs_intervals_alloc(5000, 100000, 60000000);
569 err
= kobject_init_and_add(&intervals
->kobj
,
570 &damon_sysfs_intervals_ktype
, &attrs
->kobj
,
573 goto put_intervals_out
;
574 attrs
->intervals
= intervals
;
576 nr_regions_range
= damon_sysfs_ul_range_alloc(10, 1000);
577 if (!nr_regions_range
) {
579 goto put_intervals_out
;
582 err
= kobject_init_and_add(&nr_regions_range
->kobj
,
583 &damon_sysfs_ul_range_ktype
, &attrs
->kobj
,
586 goto put_nr_regions_intervals_out
;
587 attrs
->nr_regions_range
= nr_regions_range
;
590 put_nr_regions_intervals_out
:
591 kobject_put(&nr_regions_range
->kobj
);
592 attrs
->nr_regions_range
= NULL
;
594 kobject_put(&intervals
->kobj
);
595 attrs
->intervals
= NULL
;
599 static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs
*attrs
)
601 kobject_put(&attrs
->nr_regions_range
->kobj
);
602 kobject_put(&attrs
->intervals
->kobj
);
605 static void damon_sysfs_attrs_release(struct kobject
*kobj
)
607 kfree(container_of(kobj
, struct damon_sysfs_attrs
, kobj
));
610 static struct attribute
*damon_sysfs_attrs_attrs
[] = {
613 ATTRIBUTE_GROUPS(damon_sysfs_attrs
);
615 static const struct kobj_type damon_sysfs_attrs_ktype
= {
616 .release
= damon_sysfs_attrs_release
,
617 .sysfs_ops
= &kobj_sysfs_ops
,
618 .default_groups
= damon_sysfs_attrs_groups
,
625 /* This should match with enum damon_ops_id */
626 static const char * const damon_sysfs_ops_strs
[] = {
632 struct damon_sysfs_context
{
634 enum damon_ops_id ops_id
;
635 struct damon_sysfs_attrs
*attrs
;
636 struct damon_sysfs_targets
*targets
;
637 struct damon_sysfs_schemes
*schemes
;
640 static struct damon_sysfs_context
*damon_sysfs_context_alloc(
641 enum damon_ops_id ops_id
)
643 struct damon_sysfs_context
*context
= kmalloc(sizeof(*context
),
648 context
->kobj
= (struct kobject
){};
649 context
->ops_id
= ops_id
;
653 static int damon_sysfs_context_set_attrs(struct damon_sysfs_context
*context
)
655 struct damon_sysfs_attrs
*attrs
= damon_sysfs_attrs_alloc();
660 err
= kobject_init_and_add(&attrs
->kobj
, &damon_sysfs_attrs_ktype
,
661 &context
->kobj
, "monitoring_attrs");
664 err
= damon_sysfs_attrs_add_dirs(attrs
);
667 context
->attrs
= attrs
;
671 kobject_put(&attrs
->kobj
);
675 static int damon_sysfs_context_set_targets(struct damon_sysfs_context
*context
)
677 struct damon_sysfs_targets
*targets
= damon_sysfs_targets_alloc();
682 err
= kobject_init_and_add(&targets
->kobj
, &damon_sysfs_targets_ktype
,
683 &context
->kobj
, "targets");
685 kobject_put(&targets
->kobj
);
688 context
->targets
= targets
;
692 static int damon_sysfs_context_set_schemes(struct damon_sysfs_context
*context
)
694 struct damon_sysfs_schemes
*schemes
= damon_sysfs_schemes_alloc();
699 err
= kobject_init_and_add(&schemes
->kobj
, &damon_sysfs_schemes_ktype
,
700 &context
->kobj
, "schemes");
702 kobject_put(&schemes
->kobj
);
705 context
->schemes
= schemes
;
709 static int damon_sysfs_context_add_dirs(struct damon_sysfs_context
*context
)
713 err
= damon_sysfs_context_set_attrs(context
);
717 err
= damon_sysfs_context_set_targets(context
);
721 err
= damon_sysfs_context_set_schemes(context
);
723 goto put_targets_attrs_out
;
726 put_targets_attrs_out
:
727 kobject_put(&context
->targets
->kobj
);
728 context
->targets
= NULL
;
730 kobject_put(&context
->attrs
->kobj
);
731 context
->attrs
= NULL
;
735 static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context
*context
)
737 damon_sysfs_attrs_rm_dirs(context
->attrs
);
738 kobject_put(&context
->attrs
->kobj
);
739 damon_sysfs_targets_rm_dirs(context
->targets
);
740 kobject_put(&context
->targets
->kobj
);
741 damon_sysfs_schemes_rm_dirs(context
->schemes
);
742 kobject_put(&context
->schemes
->kobj
);
745 static ssize_t
avail_operations_show(struct kobject
*kobj
,
746 struct kobj_attribute
*attr
, char *buf
)
748 enum damon_ops_id id
;
751 for (id
= 0; id
< NR_DAMON_OPS
; id
++) {
752 if (!damon_is_registered_ops(id
))
754 len
+= sysfs_emit_at(buf
, len
, "%s\n",
755 damon_sysfs_ops_strs
[id
]);
760 static ssize_t
operations_show(struct kobject
*kobj
,
761 struct kobj_attribute
*attr
, char *buf
)
763 struct damon_sysfs_context
*context
= container_of(kobj
,
764 struct damon_sysfs_context
, kobj
);
766 return sysfs_emit(buf
, "%s\n", damon_sysfs_ops_strs
[context
->ops_id
]);
769 static ssize_t
operations_store(struct kobject
*kobj
,
770 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
772 struct damon_sysfs_context
*context
= container_of(kobj
,
773 struct damon_sysfs_context
, kobj
);
774 enum damon_ops_id id
;
776 for (id
= 0; id
< NR_DAMON_OPS
; id
++) {
777 if (sysfs_streq(buf
, damon_sysfs_ops_strs
[id
])) {
778 context
->ops_id
= id
;
785 static void damon_sysfs_context_release(struct kobject
*kobj
)
787 kfree(container_of(kobj
, struct damon_sysfs_context
, kobj
));
790 static struct kobj_attribute damon_sysfs_context_avail_operations_attr
=
791 __ATTR_RO_MODE(avail_operations
, 0400);
793 static struct kobj_attribute damon_sysfs_context_operations_attr
=
794 __ATTR_RW_MODE(operations
, 0600);
796 static struct attribute
*damon_sysfs_context_attrs
[] = {
797 &damon_sysfs_context_avail_operations_attr
.attr
,
798 &damon_sysfs_context_operations_attr
.attr
,
801 ATTRIBUTE_GROUPS(damon_sysfs_context
);
803 static const struct kobj_type damon_sysfs_context_ktype
= {
804 .release
= damon_sysfs_context_release
,
805 .sysfs_ops
= &kobj_sysfs_ops
,
806 .default_groups
= damon_sysfs_context_groups
,
813 struct damon_sysfs_contexts
{
815 struct damon_sysfs_context
**contexts_arr
;
819 static struct damon_sysfs_contexts
*damon_sysfs_contexts_alloc(void)
821 return kzalloc(sizeof(struct damon_sysfs_contexts
), GFP_KERNEL
);
824 static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts
*contexts
)
826 struct damon_sysfs_context
**contexts_arr
= contexts
->contexts_arr
;
829 for (i
= 0; i
< contexts
->nr
; i
++) {
830 damon_sysfs_context_rm_dirs(contexts_arr
[i
]);
831 kobject_put(&contexts_arr
[i
]->kobj
);
835 contexts
->contexts_arr
= NULL
;
838 static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts
*contexts
,
841 struct damon_sysfs_context
**contexts_arr
, *context
;
844 damon_sysfs_contexts_rm_dirs(contexts
);
848 contexts_arr
= kmalloc_array(nr_contexts
, sizeof(*contexts_arr
),
849 GFP_KERNEL
| __GFP_NOWARN
);
852 contexts
->contexts_arr
= contexts_arr
;
854 for (i
= 0; i
< nr_contexts
; i
++) {
855 context
= damon_sysfs_context_alloc(DAMON_OPS_VADDR
);
857 damon_sysfs_contexts_rm_dirs(contexts
);
861 err
= kobject_init_and_add(&context
->kobj
,
862 &damon_sysfs_context_ktype
, &contexts
->kobj
,
867 err
= damon_sysfs_context_add_dirs(context
);
871 contexts_arr
[i
] = context
;
877 damon_sysfs_contexts_rm_dirs(contexts
);
878 kobject_put(&context
->kobj
);
882 static ssize_t
nr_contexts_show(struct kobject
*kobj
,
883 struct kobj_attribute
*attr
, char *buf
)
885 struct damon_sysfs_contexts
*contexts
= container_of(kobj
,
886 struct damon_sysfs_contexts
, kobj
);
888 return sysfs_emit(buf
, "%d\n", contexts
->nr
);
891 static ssize_t
nr_contexts_store(struct kobject
*kobj
,
892 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
894 struct damon_sysfs_contexts
*contexts
;
897 err
= kstrtoint(buf
, 0, &nr
);
900 /* TODO: support multiple contexts per kdamond */
901 if (nr
< 0 || 1 < nr
)
904 contexts
= container_of(kobj
, struct damon_sysfs_contexts
, kobj
);
905 if (!mutex_trylock(&damon_sysfs_lock
))
907 err
= damon_sysfs_contexts_add_dirs(contexts
, nr
);
908 mutex_unlock(&damon_sysfs_lock
);
915 static void damon_sysfs_contexts_release(struct kobject
*kobj
)
917 kfree(container_of(kobj
, struct damon_sysfs_contexts
, kobj
));
920 static struct kobj_attribute damon_sysfs_contexts_nr_attr
921 = __ATTR_RW_MODE(nr_contexts
, 0600);
923 static struct attribute
*damon_sysfs_contexts_attrs
[] = {
924 &damon_sysfs_contexts_nr_attr
.attr
,
927 ATTRIBUTE_GROUPS(damon_sysfs_contexts
);
929 static const struct kobj_type damon_sysfs_contexts_ktype
= {
930 .release
= damon_sysfs_contexts_release
,
931 .sysfs_ops
= &kobj_sysfs_ops
,
932 .default_groups
= damon_sysfs_contexts_groups
,
939 struct damon_sysfs_kdamond
{
941 struct damon_sysfs_contexts
*contexts
;
942 struct damon_ctx
*damon_ctx
;
945 static struct damon_sysfs_kdamond
*damon_sysfs_kdamond_alloc(void)
947 return kzalloc(sizeof(struct damon_sysfs_kdamond
), GFP_KERNEL
);
950 static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond
*kdamond
)
952 struct damon_sysfs_contexts
*contexts
;
955 contexts
= damon_sysfs_contexts_alloc();
959 err
= kobject_init_and_add(&contexts
->kobj
,
960 &damon_sysfs_contexts_ktype
, &kdamond
->kobj
,
963 kobject_put(&contexts
->kobj
);
966 kdamond
->contexts
= contexts
;
971 static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond
*kdamond
)
973 damon_sysfs_contexts_rm_dirs(kdamond
->contexts
);
974 kobject_put(&kdamond
->contexts
->kobj
);
977 static bool damon_sysfs_ctx_running(struct damon_ctx
*ctx
)
981 mutex_lock(&ctx
->kdamond_lock
);
982 running
= ctx
->kdamond
!= NULL
;
983 mutex_unlock(&ctx
->kdamond_lock
);
988 * enum damon_sysfs_cmd - Commands for a specific kdamond.
990 enum damon_sysfs_cmd
{
991 /* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */
993 /* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */
995 /* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
996 DAMON_SYSFS_CMD_COMMIT
,
998 * @DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: Commit the quota goals
1001 DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS
,
1003 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
1006 DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS
,
1008 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES: Update
1009 * tried_regions/total_bytes sysfs files for each scheme.
1011 DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES
,
1013 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS: Update schemes tried
1016 DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS
,
1018 * @DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: Clear schemes tried
1021 DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS
,
1023 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS: Update the
1024 * effective size quota of the scheme in bytes.
1026 DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS
,
1028 * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
1030 NR_DAMON_SYSFS_CMDS
,
1033 /* Should match with enum damon_sysfs_cmd */
1034 static const char * const damon_sysfs_cmd_strs
[] = {
1038 "commit_schemes_quota_goals",
1039 "update_schemes_stats",
1040 "update_schemes_tried_bytes",
1041 "update_schemes_tried_regions",
1042 "clear_schemes_tried_regions",
1043 "update_schemes_effective_quotas",
1047 * struct damon_sysfs_cmd_request - A request to the DAMON callback.
1048 * @cmd: The command that needs to be handled by the callback.
1049 * @kdamond: The kobject wrapper that associated to the kdamond thread.
1051 * This structure represents a sysfs command request that need to access some
1052 * DAMON context-internal data. Because DAMON context-internal data can be
1053 * safely accessed from DAMON callbacks without additional synchronization, the
1054 * request will be handled by the DAMON callback. None-``NULL`` @kdamond means
1055 * the request is valid.
1057 struct damon_sysfs_cmd_request
{
1058 enum damon_sysfs_cmd cmd
;
1059 struct damon_sysfs_kdamond
*kdamond
;
1062 /* Current DAMON callback request. Protected by damon_sysfs_lock. */
1063 static struct damon_sysfs_cmd_request damon_sysfs_cmd_request
;
1065 static ssize_t
state_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
1068 struct damon_sysfs_kdamond
*kdamond
= container_of(kobj
,
1069 struct damon_sysfs_kdamond
, kobj
);
1070 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1076 running
= damon_sysfs_ctx_running(ctx
);
1078 return sysfs_emit(buf
, "%s\n", running
?
1079 damon_sysfs_cmd_strs
[DAMON_SYSFS_CMD_ON
] :
1080 damon_sysfs_cmd_strs
[DAMON_SYSFS_CMD_OFF
]);
1083 static int damon_sysfs_set_attrs(struct damon_ctx
*ctx
,
1084 struct damon_sysfs_attrs
*sys_attrs
)
1086 struct damon_sysfs_intervals
*sys_intervals
= sys_attrs
->intervals
;
1087 struct damon_sysfs_ul_range
*sys_nr_regions
=
1088 sys_attrs
->nr_regions_range
;
1089 struct damon_attrs attrs
= {
1090 .sample_interval
= sys_intervals
->sample_us
,
1091 .aggr_interval
= sys_intervals
->aggr_us
,
1092 .ops_update_interval
= sys_intervals
->update_us
,
1093 .min_nr_regions
= sys_nr_regions
->min
,
1094 .max_nr_regions
= sys_nr_regions
->max
,
1096 return damon_set_attrs(ctx
, &attrs
);
1099 static void damon_sysfs_destroy_targets(struct damon_ctx
*ctx
)
1101 struct damon_target
*t
, *next
;
1102 bool has_pid
= damon_target_has_pid(ctx
);
1104 damon_for_each_target_safe(t
, next
, ctx
) {
1107 damon_destroy_target(t
);
1111 static int damon_sysfs_set_regions(struct damon_target
*t
,
1112 struct damon_sysfs_regions
*sysfs_regions
)
1114 struct damon_addr_range
*ranges
= kmalloc_array(sysfs_regions
->nr
,
1115 sizeof(*ranges
), GFP_KERNEL
| __GFP_NOWARN
);
1116 int i
, err
= -EINVAL
;
1120 for (i
= 0; i
< sysfs_regions
->nr
; i
++) {
1121 struct damon_sysfs_region
*sys_region
=
1122 sysfs_regions
->regions_arr
[i
];
1124 if (sys_region
->ar
.start
> sys_region
->ar
.end
)
1127 ranges
[i
].start
= sys_region
->ar
.start
;
1128 ranges
[i
].end
= sys_region
->ar
.end
;
1131 if (ranges
[i
- 1].end
> ranges
[i
].start
)
1134 err
= damon_set_regions(t
, ranges
, sysfs_regions
->nr
);
1141 static int damon_sysfs_add_target(struct damon_sysfs_target
*sys_target
,
1142 struct damon_ctx
*ctx
)
1144 struct damon_target
*t
= damon_new_target();
1149 damon_add_target(ctx
, t
);
1150 if (damon_target_has_pid(ctx
)) {
1151 t
->pid
= find_get_pid(sys_target
->pid
);
1153 goto destroy_targets_out
;
1155 err
= damon_sysfs_set_regions(t
, sys_target
->regions
);
1157 goto destroy_targets_out
;
1160 destroy_targets_out
:
1161 damon_sysfs_destroy_targets(ctx
);
1165 static int damon_sysfs_add_targets(struct damon_ctx
*ctx
,
1166 struct damon_sysfs_targets
*sysfs_targets
)
1170 /* Multiple physical address space monitoring targets makes no sense */
1171 if (ctx
->ops
.id
== DAMON_OPS_PADDR
&& sysfs_targets
->nr
> 1)
1174 for (i
= 0; i
< sysfs_targets
->nr
; i
++) {
1175 struct damon_sysfs_target
*st
= sysfs_targets
->targets_arr
[i
];
1177 err
= damon_sysfs_add_target(st
, ctx
);
1184 static void damon_sysfs_before_terminate(struct damon_ctx
*ctx
)
1186 struct damon_target
*t
, *next
;
1188 if (!damon_target_has_pid(ctx
))
1191 mutex_lock(&ctx
->kdamond_lock
);
1192 damon_for_each_target_safe(t
, next
, ctx
) {
1194 damon_destroy_target(t
);
1196 mutex_unlock(&ctx
->kdamond_lock
);
1200 * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
1201 * @data: The kobject wrapper that associated to the kdamond thread.
1203 * This function reads the schemes stats of specific kdamond and update the
1204 * related values for sysfs files. This function should be called from DAMON
1205 * worker thread,to safely access the DAMON contexts-internal data. Caller
1206 * should also ensure holding ``damon_syfs_lock``, and ->damon_ctx of @data is
1207 * not NULL but a valid pointer, to safely access DAMON sysfs variables.
1209 static int damon_sysfs_upd_schemes_stats(void *data
)
1211 struct damon_sysfs_kdamond
*kdamond
= data
;
1212 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1214 damon_sysfs_schemes_update_stats(
1215 kdamond
->contexts
->contexts_arr
[0]->schemes
, ctx
);
1219 static inline bool damon_sysfs_kdamond_running(
1220 struct damon_sysfs_kdamond
*kdamond
)
1222 return kdamond
->damon_ctx
&&
1223 damon_sysfs_ctx_running(kdamond
->damon_ctx
);
1226 static int damon_sysfs_apply_inputs(struct damon_ctx
*ctx
,
1227 struct damon_sysfs_context
*sys_ctx
)
1231 err
= damon_select_ops(ctx
, sys_ctx
->ops_id
);
1234 err
= damon_sysfs_set_attrs(ctx
, sys_ctx
->attrs
);
1237 err
= damon_sysfs_add_targets(ctx
, sys_ctx
->targets
);
1240 return damon_sysfs_add_schemes(ctx
, sys_ctx
->schemes
);
1243 static struct damon_ctx
*damon_sysfs_build_ctx(
1244 struct damon_sysfs_context
*sys_ctx
);
1247 * damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
1248 * @kdamond: The kobject wrapper for the associated kdamond.
1250 * If the sysfs input is wrong, the kdamond will be terminated.
1252 static int damon_sysfs_commit_input(struct damon_sysfs_kdamond
*kdamond
)
1254 struct damon_ctx
*param_ctx
;
1257 if (!damon_sysfs_kdamond_running(kdamond
))
1259 /* TODO: Support multiple contexts per kdamond */
1260 if (kdamond
->contexts
->nr
!= 1)
1263 param_ctx
= damon_sysfs_build_ctx(kdamond
->contexts
->contexts_arr
[0]);
1264 if (IS_ERR(param_ctx
))
1265 return PTR_ERR(param_ctx
);
1266 err
= damon_commit_ctx(kdamond
->damon_ctx
, param_ctx
);
1267 damon_sysfs_destroy_targets(param_ctx
);
1268 damon_destroy_ctx(param_ctx
);
1272 static int damon_sysfs_commit_schemes_quota_goals(void *data
)
1274 struct damon_sysfs_kdamond
*sysfs_kdamond
= data
;
1275 struct damon_ctx
*ctx
;
1276 struct damon_sysfs_context
*sysfs_ctx
;
1278 if (!damon_sysfs_kdamond_running(sysfs_kdamond
))
1280 /* TODO: Support multiple contexts per kdamond */
1281 if (sysfs_kdamond
->contexts
->nr
!= 1)
1284 ctx
= sysfs_kdamond
->damon_ctx
;
1285 sysfs_ctx
= sysfs_kdamond
->contexts
->contexts_arr
[0];
1286 return damos_sysfs_set_quota_scores(sysfs_ctx
->schemes
, ctx
);
1290 * damon_sysfs_upd_schemes_effective_quotas() - Update schemes effective quotas
1292 * @data: The kobject wrapper that associated to the kdamond thread.
1294 * This function reads the schemes' effective quotas of specific kdamond and
1295 * update the related values for sysfs files. This function should be called
1296 * from DAMON callbacks while holding ``damon_syfs_lock``, to safely access the
1297 * DAMON contexts-internal data and DAMON sysfs variables.
1299 static int damon_sysfs_upd_schemes_effective_quotas(void *data
)
1301 struct damon_sysfs_kdamond
*kdamond
= data
;
1302 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1304 damos_sysfs_update_effective_quotas(
1305 kdamond
->contexts
->contexts_arr
[0]->schemes
, ctx
);
1311 * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
1312 * @c: The DAMON context of the callback.
1313 * @active: Whether @c is not deactivated due to watermarks.
1314 * @after_aggr: Whether this is called from after_aggregation() callback.
1316 * This function is periodically called back from the kdamond thread for @c.
1317 * Then, it checks if there is a waiting DAMON sysfs request and handles it.
1319 static int damon_sysfs_cmd_request_callback(struct damon_ctx
*c
, bool active
,
1320 bool after_aggregation
)
1322 struct damon_sysfs_kdamond
*kdamond
;
1325 /* avoid deadlock due to concurrent state_store('off') */
1326 if (!mutex_trylock(&damon_sysfs_lock
))
1328 kdamond
= damon_sysfs_cmd_request
.kdamond
;
1329 if (!kdamond
|| kdamond
->damon_ctx
!= c
)
1331 switch (damon_sysfs_cmd_request
.cmd
) {
1332 case DAMON_SYSFS_CMD_COMMIT
:
1333 if (!after_aggregation
)
1335 err
= damon_sysfs_commit_input(kdamond
);
1340 /* Mark the request as invalid now. */
1341 damon_sysfs_cmd_request
.kdamond
= NULL
;
1343 mutex_unlock(&damon_sysfs_lock
);
1347 static int damon_sysfs_after_wmarks_check(struct damon_ctx
*c
)
1350 * after_wmarks_check() is called back while the context is deactivated
1353 return damon_sysfs_cmd_request_callback(c
, false, false);
1356 static int damon_sysfs_after_sampling(struct damon_ctx
*c
)
1359 * after_sampling() is called back only while the context is not
1360 * deactivated by watermarks.
1362 return damon_sysfs_cmd_request_callback(c
, true, false);
1365 static int damon_sysfs_after_aggregation(struct damon_ctx
*c
)
1368 * after_aggregation() is called back only while the context is not
1369 * deactivated by watermarks.
1371 return damon_sysfs_cmd_request_callback(c
, true, true);
1374 static struct damon_ctx
*damon_sysfs_build_ctx(
1375 struct damon_sysfs_context
*sys_ctx
)
1377 struct damon_ctx
*ctx
= damon_new_ctx();
1381 return ERR_PTR(-ENOMEM
);
1383 err
= damon_sysfs_apply_inputs(ctx
, sys_ctx
);
1385 damon_destroy_ctx(ctx
);
1386 return ERR_PTR(err
);
1389 ctx
->callback
.after_wmarks_check
= damon_sysfs_after_wmarks_check
;
1390 ctx
->callback
.after_sampling
= damon_sysfs_after_sampling
;
1391 ctx
->callback
.after_aggregation
= damon_sysfs_after_aggregation
;
1392 ctx
->callback
.before_terminate
= damon_sysfs_before_terminate
;
1396 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond
*kdamond
)
1398 struct damon_ctx
*ctx
;
1401 if (damon_sysfs_kdamond_running(kdamond
))
1403 if (damon_sysfs_cmd_request
.kdamond
== kdamond
)
1405 /* TODO: support multiple contexts per kdamond */
1406 if (kdamond
->contexts
->nr
!= 1)
1409 if (kdamond
->damon_ctx
)
1410 damon_destroy_ctx(kdamond
->damon_ctx
);
1411 kdamond
->damon_ctx
= NULL
;
1413 ctx
= damon_sysfs_build_ctx(kdamond
->contexts
->contexts_arr
[0]);
1415 return PTR_ERR(ctx
);
1416 err
= damon_start(&ctx
, 1, false);
1418 damon_destroy_ctx(ctx
);
1421 kdamond
->damon_ctx
= ctx
;
1425 static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond
*kdamond
)
1427 if (!kdamond
->damon_ctx
)
1429 return damon_stop(&kdamond
->damon_ctx
, 1);
1431 * To allow users show final monitoring results of already turned-off
1432 * DAMON, we free kdamond->damon_ctx in next
1433 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
1437 static int damon_sysfs_damon_call(int (*fn
)(void *data
),
1438 struct damon_sysfs_kdamond
*kdamond
)
1440 struct damon_call_control call_control
= {};
1442 if (!kdamond
->damon_ctx
)
1444 call_control
.fn
= fn
;
1445 call_control
.data
= kdamond
;
1446 return damon_call(kdamond
->damon_ctx
, &call_control
);
1449 struct damon_sysfs_schemes_walk_data
{
1450 struct damon_sysfs_kdamond
*sysfs_kdamond
;
1451 bool total_bytes_only
;
1454 /* populate the region directory */
1455 static void damon_sysfs_schemes_tried_regions_upd_one(void *data
, struct damon_ctx
*ctx
,
1456 struct damon_target
*t
, struct damon_region
*r
,
1457 struct damos
*s
, unsigned long sz_filter_passed
)
1459 struct damon_sysfs_schemes_walk_data
*walk_data
= data
;
1460 struct damon_sysfs_kdamond
*sysfs_kdamond
= walk_data
->sysfs_kdamond
;
1462 damos_sysfs_populate_region_dir(
1463 sysfs_kdamond
->contexts
->contexts_arr
[0]->schemes
,
1464 ctx
, t
, r
, s
, walk_data
->total_bytes_only
,
1468 static int damon_sysfs_update_schemes_tried_regions(
1469 struct damon_sysfs_kdamond
*sysfs_kdamond
, bool total_bytes_only
)
1471 struct damon_sysfs_schemes_walk_data walk_data
= {
1472 .sysfs_kdamond
= sysfs_kdamond
,
1473 .total_bytes_only
= total_bytes_only
,
1475 struct damos_walk_control control
= {
1476 .walk_fn
= damon_sysfs_schemes_tried_regions_upd_one
,
1479 struct damon_ctx
*ctx
= sysfs_kdamond
->damon_ctx
;
1484 damon_sysfs_schemes_clear_regions(
1485 sysfs_kdamond
->contexts
->contexts_arr
[0]->schemes
);
1486 return damos_walk(ctx
, &control
);
1490 * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
1491 * @cmd: The command to handle.
1492 * @kdamond: The kobject wrapper for the associated kdamond.
1494 * This function handles a DAMON sysfs command for a kdamond. For commands
1495 * that need to access running DAMON context-internal data, it requests
1496 * handling of the command to the DAMON callback
1497 * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled,
1498 * or the context is completed.
1500 * Return: 0 on success, negative error code otherwise.
1502 static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd
,
1503 struct damon_sysfs_kdamond
*kdamond
)
1505 bool need_wait
= true;
1508 case DAMON_SYSFS_CMD_ON
:
1509 return damon_sysfs_turn_damon_on(kdamond
);
1510 case DAMON_SYSFS_CMD_OFF
:
1511 return damon_sysfs_turn_damon_off(kdamond
);
1512 case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS
:
1513 return damon_sysfs_damon_call(
1514 damon_sysfs_commit_schemes_quota_goals
,
1516 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS
:
1517 return damon_sysfs_damon_call(
1518 damon_sysfs_upd_schemes_stats
, kdamond
);
1519 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES
:
1520 return damon_sysfs_update_schemes_tried_regions(kdamond
, true);
1521 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS
:
1522 return damon_sysfs_update_schemes_tried_regions(kdamond
, false);
1523 case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS
:
1524 return damon_sysfs_schemes_clear_regions(
1525 kdamond
->contexts
->contexts_arr
[0]->schemes
);
1526 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS
:
1527 return damon_sysfs_damon_call(
1528 damon_sysfs_upd_schemes_effective_quotas
,
1534 /* Pass the command to DAMON callback for safe DAMON context access */
1535 if (damon_sysfs_cmd_request
.kdamond
)
1537 if (!damon_sysfs_kdamond_running(kdamond
))
1539 damon_sysfs_cmd_request
.cmd
= cmd
;
1540 damon_sysfs_cmd_request
.kdamond
= kdamond
;
1543 * wait until damon_sysfs_cmd_request_callback() handles the request
1544 * from kdamond context
1546 mutex_unlock(&damon_sysfs_lock
);
1548 schedule_timeout_idle(msecs_to_jiffies(100));
1549 if (!mutex_trylock(&damon_sysfs_lock
))
1551 if (!damon_sysfs_cmd_request
.kdamond
) {
1552 /* damon_sysfs_cmd_request_callback() handled */
1554 } else if (!damon_sysfs_kdamond_running(kdamond
)) {
1555 /* kdamond has already finished */
1557 damon_sysfs_cmd_request
.kdamond
= NULL
;
1559 mutex_unlock(&damon_sysfs_lock
);
1561 mutex_lock(&damon_sysfs_lock
);
1565 static ssize_t
state_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
1566 const char *buf
, size_t count
)
1568 struct damon_sysfs_kdamond
*kdamond
= container_of(kobj
,
1569 struct damon_sysfs_kdamond
, kobj
);
1570 enum damon_sysfs_cmd cmd
;
1571 ssize_t ret
= -EINVAL
;
1573 if (!mutex_trylock(&damon_sysfs_lock
))
1575 for (cmd
= 0; cmd
< NR_DAMON_SYSFS_CMDS
; cmd
++) {
1576 if (sysfs_streq(buf
, damon_sysfs_cmd_strs
[cmd
])) {
1577 ret
= damon_sysfs_handle_cmd(cmd
, kdamond
);
1581 mutex_unlock(&damon_sysfs_lock
);
1587 static ssize_t
pid_show(struct kobject
*kobj
,
1588 struct kobj_attribute
*attr
, char *buf
)
1590 struct damon_sysfs_kdamond
*kdamond
= container_of(kobj
,
1591 struct damon_sysfs_kdamond
, kobj
);
1592 struct damon_ctx
*ctx
;
1595 if (!mutex_trylock(&damon_sysfs_lock
))
1597 ctx
= kdamond
->damon_ctx
;
1601 mutex_lock(&ctx
->kdamond_lock
);
1603 pid
= ctx
->kdamond
->pid
;
1604 mutex_unlock(&ctx
->kdamond_lock
);
1606 mutex_unlock(&damon_sysfs_lock
);
1607 return sysfs_emit(buf
, "%d\n", pid
);
1610 static void damon_sysfs_kdamond_release(struct kobject
*kobj
)
1612 struct damon_sysfs_kdamond
*kdamond
= container_of(kobj
,
1613 struct damon_sysfs_kdamond
, kobj
);
1615 if (kdamond
->damon_ctx
)
1616 damon_destroy_ctx(kdamond
->damon_ctx
);
1620 static struct kobj_attribute damon_sysfs_kdamond_state_attr
=
1621 __ATTR_RW_MODE(state
, 0600);
1623 static struct kobj_attribute damon_sysfs_kdamond_pid_attr
=
1624 __ATTR_RO_MODE(pid
, 0400);
1626 static struct attribute
*damon_sysfs_kdamond_attrs
[] = {
1627 &damon_sysfs_kdamond_state_attr
.attr
,
1628 &damon_sysfs_kdamond_pid_attr
.attr
,
1631 ATTRIBUTE_GROUPS(damon_sysfs_kdamond
);
1633 static const struct kobj_type damon_sysfs_kdamond_ktype
= {
1634 .release
= damon_sysfs_kdamond_release
,
1635 .sysfs_ops
= &kobj_sysfs_ops
,
1636 .default_groups
= damon_sysfs_kdamond_groups
,
1640 * kdamonds directory
1643 struct damon_sysfs_kdamonds
{
1644 struct kobject kobj
;
1645 struct damon_sysfs_kdamond
**kdamonds_arr
;
1649 static struct damon_sysfs_kdamonds
*damon_sysfs_kdamonds_alloc(void)
1651 return kzalloc(sizeof(struct damon_sysfs_kdamonds
), GFP_KERNEL
);
1654 static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds
*kdamonds
)
1656 struct damon_sysfs_kdamond
**kdamonds_arr
= kdamonds
->kdamonds_arr
;
1659 for (i
= 0; i
< kdamonds
->nr
; i
++) {
1660 damon_sysfs_kdamond_rm_dirs(kdamonds_arr
[i
]);
1661 kobject_put(&kdamonds_arr
[i
]->kobj
);
1664 kfree(kdamonds_arr
);
1665 kdamonds
->kdamonds_arr
= NULL
;
1668 static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond
**kdamonds
,
1673 for (i
= 0; i
< nr_kdamonds
; i
++) {
1674 if (damon_sysfs_kdamond_running(kdamonds
[i
]) ||
1675 damon_sysfs_cmd_request
.kdamond
== kdamonds
[i
])
1682 static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds
*kdamonds
,
1685 struct damon_sysfs_kdamond
**kdamonds_arr
, *kdamond
;
1688 if (damon_sysfs_kdamonds_busy(kdamonds
->kdamonds_arr
, kdamonds
->nr
))
1691 damon_sysfs_kdamonds_rm_dirs(kdamonds
);
1695 kdamonds_arr
= kmalloc_array(nr_kdamonds
, sizeof(*kdamonds_arr
),
1696 GFP_KERNEL
| __GFP_NOWARN
);
1699 kdamonds
->kdamonds_arr
= kdamonds_arr
;
1701 for (i
= 0; i
< nr_kdamonds
; i
++) {
1702 kdamond
= damon_sysfs_kdamond_alloc();
1704 damon_sysfs_kdamonds_rm_dirs(kdamonds
);
1708 err
= kobject_init_and_add(&kdamond
->kobj
,
1709 &damon_sysfs_kdamond_ktype
, &kdamonds
->kobj
,
1714 err
= damon_sysfs_kdamond_add_dirs(kdamond
);
1718 kdamonds_arr
[i
] = kdamond
;
1724 damon_sysfs_kdamonds_rm_dirs(kdamonds
);
1725 kobject_put(&kdamond
->kobj
);
1729 static ssize_t
nr_kdamonds_show(struct kobject
*kobj
,
1730 struct kobj_attribute
*attr
, char *buf
)
1732 struct damon_sysfs_kdamonds
*kdamonds
= container_of(kobj
,
1733 struct damon_sysfs_kdamonds
, kobj
);
1735 return sysfs_emit(buf
, "%d\n", kdamonds
->nr
);
1738 static ssize_t
nr_kdamonds_store(struct kobject
*kobj
,
1739 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
1741 struct damon_sysfs_kdamonds
*kdamonds
;
1744 err
= kstrtoint(buf
, 0, &nr
);
1750 kdamonds
= container_of(kobj
, struct damon_sysfs_kdamonds
, kobj
);
1752 if (!mutex_trylock(&damon_sysfs_lock
))
1754 err
= damon_sysfs_kdamonds_add_dirs(kdamonds
, nr
);
1755 mutex_unlock(&damon_sysfs_lock
);
1762 static void damon_sysfs_kdamonds_release(struct kobject
*kobj
)
1764 kfree(container_of(kobj
, struct damon_sysfs_kdamonds
, kobj
));
1767 static struct kobj_attribute damon_sysfs_kdamonds_nr_attr
=
1768 __ATTR_RW_MODE(nr_kdamonds
, 0600);
1770 static struct attribute
*damon_sysfs_kdamonds_attrs
[] = {
1771 &damon_sysfs_kdamonds_nr_attr
.attr
,
1774 ATTRIBUTE_GROUPS(damon_sysfs_kdamonds
);
1776 static const struct kobj_type damon_sysfs_kdamonds_ktype
= {
1777 .release
= damon_sysfs_kdamonds_release
,
1778 .sysfs_ops
= &kobj_sysfs_ops
,
1779 .default_groups
= damon_sysfs_kdamonds_groups
,
1783 * damon user interface directory
1786 struct damon_sysfs_ui_dir
{
1787 struct kobject kobj
;
1788 struct damon_sysfs_kdamonds
*kdamonds
;
1791 static struct damon_sysfs_ui_dir
*damon_sysfs_ui_dir_alloc(void)
1793 return kzalloc(sizeof(struct damon_sysfs_ui_dir
), GFP_KERNEL
);
1796 static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir
*ui_dir
)
1798 struct damon_sysfs_kdamonds
*kdamonds
;
1801 kdamonds
= damon_sysfs_kdamonds_alloc();
1805 err
= kobject_init_and_add(&kdamonds
->kobj
,
1806 &damon_sysfs_kdamonds_ktype
, &ui_dir
->kobj
,
1809 kobject_put(&kdamonds
->kobj
);
1812 ui_dir
->kdamonds
= kdamonds
;
1816 static void damon_sysfs_ui_dir_release(struct kobject
*kobj
)
1818 kfree(container_of(kobj
, struct damon_sysfs_ui_dir
, kobj
));
1821 static struct attribute
*damon_sysfs_ui_dir_attrs
[] = {
1824 ATTRIBUTE_GROUPS(damon_sysfs_ui_dir
);
1826 static const struct kobj_type damon_sysfs_ui_dir_ktype
= {
1827 .release
= damon_sysfs_ui_dir_release
,
1828 .sysfs_ops
= &kobj_sysfs_ops
,
1829 .default_groups
= damon_sysfs_ui_dir_groups
,
1832 static int __init
damon_sysfs_init(void)
1834 struct kobject
*damon_sysfs_root
;
1835 struct damon_sysfs_ui_dir
*admin
;
1838 damon_sysfs_root
= kobject_create_and_add("damon", mm_kobj
);
1839 if (!damon_sysfs_root
)
1842 admin
= damon_sysfs_ui_dir_alloc();
1844 kobject_put(damon_sysfs_root
);
1847 err
= kobject_init_and_add(&admin
->kobj
, &damon_sysfs_ui_dir_ktype
,
1848 damon_sysfs_root
, "admin");
1851 err
= damon_sysfs_ui_dir_add_dirs(admin
);
1857 kobject_put(&admin
->kobj
);
1858 kobject_put(damon_sysfs_root
);
1861 subsys_initcall(damon_sysfs_init
);
1863 #include "tests/sysfs-kunit.h"