1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON sysfs Interface
5 * Copyright (c) 2022 SeongJae Park <sj@kernel.org>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
12 #include "sysfs-common.h"
15 * init region directory
18 struct damon_sysfs_region
{
20 struct damon_addr_range ar
;
23 static struct damon_sysfs_region
*damon_sysfs_region_alloc(void)
25 return kzalloc(sizeof(struct damon_sysfs_region
), GFP_KERNEL
);
28 static ssize_t
start_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
31 struct damon_sysfs_region
*region
= container_of(kobj
,
32 struct damon_sysfs_region
, kobj
);
34 return sysfs_emit(buf
, "%lu\n", region
->ar
.start
);
37 static ssize_t
start_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
38 const char *buf
, size_t count
)
40 struct damon_sysfs_region
*region
= container_of(kobj
,
41 struct damon_sysfs_region
, kobj
);
42 int err
= kstrtoul(buf
, 0, ®ion
->ar
.start
);
44 return err
? err
: count
;
47 static ssize_t
end_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
50 struct damon_sysfs_region
*region
= container_of(kobj
,
51 struct damon_sysfs_region
, kobj
);
53 return sysfs_emit(buf
, "%lu\n", region
->ar
.end
);
56 static ssize_t
end_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
57 const char *buf
, size_t count
)
59 struct damon_sysfs_region
*region
= container_of(kobj
,
60 struct damon_sysfs_region
, kobj
);
61 int err
= kstrtoul(buf
, 0, ®ion
->ar
.end
);
63 return err
? err
: count
;
66 static void damon_sysfs_region_release(struct kobject
*kobj
)
68 kfree(container_of(kobj
, struct damon_sysfs_region
, kobj
));
71 static struct kobj_attribute damon_sysfs_region_start_attr
=
72 __ATTR_RW_MODE(start
, 0600);
74 static struct kobj_attribute damon_sysfs_region_end_attr
=
75 __ATTR_RW_MODE(end
, 0600);
77 static struct attribute
*damon_sysfs_region_attrs
[] = {
78 &damon_sysfs_region_start_attr
.attr
,
79 &damon_sysfs_region_end_attr
.attr
,
82 ATTRIBUTE_GROUPS(damon_sysfs_region
);
84 static const struct kobj_type damon_sysfs_region_ktype
= {
85 .release
= damon_sysfs_region_release
,
86 .sysfs_ops
= &kobj_sysfs_ops
,
87 .default_groups
= damon_sysfs_region_groups
,
91 * init_regions directory
94 struct damon_sysfs_regions
{
96 struct damon_sysfs_region
**regions_arr
;
100 static struct damon_sysfs_regions
*damon_sysfs_regions_alloc(void)
102 return kzalloc(sizeof(struct damon_sysfs_regions
), GFP_KERNEL
);
105 static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions
*regions
)
107 struct damon_sysfs_region
**regions_arr
= regions
->regions_arr
;
110 for (i
= 0; i
< regions
->nr
; i
++)
111 kobject_put(®ions_arr
[i
]->kobj
);
114 regions
->regions_arr
= NULL
;
117 static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions
*regions
,
120 struct damon_sysfs_region
**regions_arr
, *region
;
123 damon_sysfs_regions_rm_dirs(regions
);
127 regions_arr
= kmalloc_array(nr_regions
, sizeof(*regions_arr
),
128 GFP_KERNEL
| __GFP_NOWARN
);
131 regions
->regions_arr
= regions_arr
;
133 for (i
= 0; i
< nr_regions
; i
++) {
134 region
= damon_sysfs_region_alloc();
136 damon_sysfs_regions_rm_dirs(regions
);
140 err
= kobject_init_and_add(®ion
->kobj
,
141 &damon_sysfs_region_ktype
, ®ions
->kobj
,
144 kobject_put(®ion
->kobj
);
145 damon_sysfs_regions_rm_dirs(regions
);
149 regions_arr
[i
] = region
;
155 static ssize_t
nr_regions_show(struct kobject
*kobj
,
156 struct kobj_attribute
*attr
, char *buf
)
158 struct damon_sysfs_regions
*regions
= container_of(kobj
,
159 struct damon_sysfs_regions
, kobj
);
161 return sysfs_emit(buf
, "%d\n", regions
->nr
);
164 static ssize_t
nr_regions_store(struct kobject
*kobj
,
165 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
167 struct damon_sysfs_regions
*regions
;
168 int nr
, err
= kstrtoint(buf
, 0, &nr
);
175 regions
= container_of(kobj
, struct damon_sysfs_regions
, kobj
);
177 if (!mutex_trylock(&damon_sysfs_lock
))
179 err
= damon_sysfs_regions_add_dirs(regions
, nr
);
180 mutex_unlock(&damon_sysfs_lock
);
187 static void damon_sysfs_regions_release(struct kobject
*kobj
)
189 kfree(container_of(kobj
, struct damon_sysfs_regions
, kobj
));
192 static struct kobj_attribute damon_sysfs_regions_nr_attr
=
193 __ATTR_RW_MODE(nr_regions
, 0600);
195 static struct attribute
*damon_sysfs_regions_attrs
[] = {
196 &damon_sysfs_regions_nr_attr
.attr
,
199 ATTRIBUTE_GROUPS(damon_sysfs_regions
);
201 static const struct kobj_type damon_sysfs_regions_ktype
= {
202 .release
= damon_sysfs_regions_release
,
203 .sysfs_ops
= &kobj_sysfs_ops
,
204 .default_groups
= damon_sysfs_regions_groups
,
211 struct damon_sysfs_target
{
213 struct damon_sysfs_regions
*regions
;
217 static struct damon_sysfs_target
*damon_sysfs_target_alloc(void)
219 return kzalloc(sizeof(struct damon_sysfs_target
), GFP_KERNEL
);
222 static int damon_sysfs_target_add_dirs(struct damon_sysfs_target
*target
)
224 struct damon_sysfs_regions
*regions
= damon_sysfs_regions_alloc();
230 err
= kobject_init_and_add(®ions
->kobj
, &damon_sysfs_regions_ktype
,
231 &target
->kobj
, "regions");
233 kobject_put(®ions
->kobj
);
235 target
->regions
= regions
;
239 static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target
*target
)
241 damon_sysfs_regions_rm_dirs(target
->regions
);
242 kobject_put(&target
->regions
->kobj
);
245 static ssize_t
pid_target_show(struct kobject
*kobj
,
246 struct kobj_attribute
*attr
, char *buf
)
248 struct damon_sysfs_target
*target
= container_of(kobj
,
249 struct damon_sysfs_target
, kobj
);
251 return sysfs_emit(buf
, "%d\n", target
->pid
);
254 static ssize_t
pid_target_store(struct kobject
*kobj
,
255 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
257 struct damon_sysfs_target
*target
= container_of(kobj
,
258 struct damon_sysfs_target
, kobj
);
259 int err
= kstrtoint(buf
, 0, &target
->pid
);
266 static void damon_sysfs_target_release(struct kobject
*kobj
)
268 kfree(container_of(kobj
, struct damon_sysfs_target
, kobj
));
271 static struct kobj_attribute damon_sysfs_target_pid_attr
=
272 __ATTR_RW_MODE(pid_target
, 0600);
274 static struct attribute
*damon_sysfs_target_attrs
[] = {
275 &damon_sysfs_target_pid_attr
.attr
,
278 ATTRIBUTE_GROUPS(damon_sysfs_target
);
280 static const struct kobj_type damon_sysfs_target_ktype
= {
281 .release
= damon_sysfs_target_release
,
282 .sysfs_ops
= &kobj_sysfs_ops
,
283 .default_groups
= damon_sysfs_target_groups
,
290 struct damon_sysfs_targets
{
292 struct damon_sysfs_target
**targets_arr
;
296 static struct damon_sysfs_targets
*damon_sysfs_targets_alloc(void)
298 return kzalloc(sizeof(struct damon_sysfs_targets
), GFP_KERNEL
);
301 static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets
*targets
)
303 struct damon_sysfs_target
**targets_arr
= targets
->targets_arr
;
306 for (i
= 0; i
< targets
->nr
; i
++) {
307 damon_sysfs_target_rm_dirs(targets_arr
[i
]);
308 kobject_put(&targets_arr
[i
]->kobj
);
312 targets
->targets_arr
= NULL
;
315 static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets
*targets
,
318 struct damon_sysfs_target
**targets_arr
, *target
;
321 damon_sysfs_targets_rm_dirs(targets
);
325 targets_arr
= kmalloc_array(nr_targets
, sizeof(*targets_arr
),
326 GFP_KERNEL
| __GFP_NOWARN
);
329 targets
->targets_arr
= targets_arr
;
331 for (i
= 0; i
< nr_targets
; i
++) {
332 target
= damon_sysfs_target_alloc();
334 damon_sysfs_targets_rm_dirs(targets
);
338 err
= kobject_init_and_add(&target
->kobj
,
339 &damon_sysfs_target_ktype
, &targets
->kobj
,
344 err
= damon_sysfs_target_add_dirs(target
);
348 targets_arr
[i
] = target
;
354 damon_sysfs_targets_rm_dirs(targets
);
355 kobject_put(&target
->kobj
);
359 static ssize_t
nr_targets_show(struct kobject
*kobj
,
360 struct kobj_attribute
*attr
, char *buf
)
362 struct damon_sysfs_targets
*targets
= container_of(kobj
,
363 struct damon_sysfs_targets
, kobj
);
365 return sysfs_emit(buf
, "%d\n", targets
->nr
);
368 static ssize_t
nr_targets_store(struct kobject
*kobj
,
369 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
371 struct damon_sysfs_targets
*targets
;
372 int nr
, err
= kstrtoint(buf
, 0, &nr
);
379 targets
= container_of(kobj
, struct damon_sysfs_targets
, kobj
);
381 if (!mutex_trylock(&damon_sysfs_lock
))
383 err
= damon_sysfs_targets_add_dirs(targets
, nr
);
384 mutex_unlock(&damon_sysfs_lock
);
391 static void damon_sysfs_targets_release(struct kobject
*kobj
)
393 kfree(container_of(kobj
, struct damon_sysfs_targets
, kobj
));
396 static struct kobj_attribute damon_sysfs_targets_nr_attr
=
397 __ATTR_RW_MODE(nr_targets
, 0600);
399 static struct attribute
*damon_sysfs_targets_attrs
[] = {
400 &damon_sysfs_targets_nr_attr
.attr
,
403 ATTRIBUTE_GROUPS(damon_sysfs_targets
);
405 static const struct kobj_type damon_sysfs_targets_ktype
= {
406 .release
= damon_sysfs_targets_release
,
407 .sysfs_ops
= &kobj_sysfs_ops
,
408 .default_groups
= damon_sysfs_targets_groups
,
412 * intervals directory
415 struct damon_sysfs_intervals
{
417 unsigned long sample_us
;
418 unsigned long aggr_us
;
419 unsigned long update_us
;
422 static struct damon_sysfs_intervals
*damon_sysfs_intervals_alloc(
423 unsigned long sample_us
, unsigned long aggr_us
,
424 unsigned long update_us
)
426 struct damon_sysfs_intervals
*intervals
= kmalloc(sizeof(*intervals
),
432 intervals
->kobj
= (struct kobject
){};
433 intervals
->sample_us
= sample_us
;
434 intervals
->aggr_us
= aggr_us
;
435 intervals
->update_us
= update_us
;
439 static ssize_t
sample_us_show(struct kobject
*kobj
,
440 struct kobj_attribute
*attr
, char *buf
)
442 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
443 struct damon_sysfs_intervals
, kobj
);
445 return sysfs_emit(buf
, "%lu\n", intervals
->sample_us
);
448 static ssize_t
sample_us_store(struct kobject
*kobj
,
449 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
451 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
452 struct damon_sysfs_intervals
, kobj
);
454 int err
= kstrtoul(buf
, 0, &us
);
459 intervals
->sample_us
= us
;
463 static ssize_t
aggr_us_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
466 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
467 struct damon_sysfs_intervals
, kobj
);
469 return sysfs_emit(buf
, "%lu\n", intervals
->aggr_us
);
472 static ssize_t
aggr_us_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
473 const char *buf
, size_t count
)
475 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
476 struct damon_sysfs_intervals
, kobj
);
478 int err
= kstrtoul(buf
, 0, &us
);
483 intervals
->aggr_us
= us
;
487 static ssize_t
update_us_show(struct kobject
*kobj
,
488 struct kobj_attribute
*attr
, char *buf
)
490 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
491 struct damon_sysfs_intervals
, kobj
);
493 return sysfs_emit(buf
, "%lu\n", intervals
->update_us
);
496 static ssize_t
update_us_store(struct kobject
*kobj
,
497 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
499 struct damon_sysfs_intervals
*intervals
= container_of(kobj
,
500 struct damon_sysfs_intervals
, kobj
);
502 int err
= kstrtoul(buf
, 0, &us
);
507 intervals
->update_us
= us
;
511 static void damon_sysfs_intervals_release(struct kobject
*kobj
)
513 kfree(container_of(kobj
, struct damon_sysfs_intervals
, kobj
));
516 static struct kobj_attribute damon_sysfs_intervals_sample_us_attr
=
517 __ATTR_RW_MODE(sample_us
, 0600);
519 static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr
=
520 __ATTR_RW_MODE(aggr_us
, 0600);
522 static struct kobj_attribute damon_sysfs_intervals_update_us_attr
=
523 __ATTR_RW_MODE(update_us
, 0600);
525 static struct attribute
*damon_sysfs_intervals_attrs
[] = {
526 &damon_sysfs_intervals_sample_us_attr
.attr
,
527 &damon_sysfs_intervals_aggr_us_attr
.attr
,
528 &damon_sysfs_intervals_update_us_attr
.attr
,
531 ATTRIBUTE_GROUPS(damon_sysfs_intervals
);
533 static const struct kobj_type damon_sysfs_intervals_ktype
= {
534 .release
= damon_sysfs_intervals_release
,
535 .sysfs_ops
= &kobj_sysfs_ops
,
536 .default_groups
= damon_sysfs_intervals_groups
,
540 * monitoring_attrs directory
543 struct damon_sysfs_attrs
{
545 struct damon_sysfs_intervals
*intervals
;
546 struct damon_sysfs_ul_range
*nr_regions_range
;
549 static struct damon_sysfs_attrs
*damon_sysfs_attrs_alloc(void)
551 struct damon_sysfs_attrs
*attrs
= kmalloc(sizeof(*attrs
), GFP_KERNEL
);
555 attrs
->kobj
= (struct kobject
){};
559 static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs
*attrs
)
561 struct damon_sysfs_intervals
*intervals
;
562 struct damon_sysfs_ul_range
*nr_regions_range
;
565 intervals
= damon_sysfs_intervals_alloc(5000, 100000, 60000000);
569 err
= kobject_init_and_add(&intervals
->kobj
,
570 &damon_sysfs_intervals_ktype
, &attrs
->kobj
,
573 goto put_intervals_out
;
574 attrs
->intervals
= intervals
;
576 nr_regions_range
= damon_sysfs_ul_range_alloc(10, 1000);
577 if (!nr_regions_range
) {
579 goto put_intervals_out
;
582 err
= kobject_init_and_add(&nr_regions_range
->kobj
,
583 &damon_sysfs_ul_range_ktype
, &attrs
->kobj
,
586 goto put_nr_regions_intervals_out
;
587 attrs
->nr_regions_range
= nr_regions_range
;
590 put_nr_regions_intervals_out
:
591 kobject_put(&nr_regions_range
->kobj
);
592 attrs
->nr_regions_range
= NULL
;
594 kobject_put(&intervals
->kobj
);
595 attrs
->intervals
= NULL
;
599 static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs
*attrs
)
601 kobject_put(&attrs
->nr_regions_range
->kobj
);
602 kobject_put(&attrs
->intervals
->kobj
);
605 static void damon_sysfs_attrs_release(struct kobject
*kobj
)
607 kfree(container_of(kobj
, struct damon_sysfs_attrs
, kobj
));
610 static struct attribute
*damon_sysfs_attrs_attrs
[] = {
613 ATTRIBUTE_GROUPS(damon_sysfs_attrs
);
615 static const struct kobj_type damon_sysfs_attrs_ktype
= {
616 .release
= damon_sysfs_attrs_release
,
617 .sysfs_ops
= &kobj_sysfs_ops
,
618 .default_groups
= damon_sysfs_attrs_groups
,
625 /* This should match with enum damon_ops_id */
626 static const char * const damon_sysfs_ops_strs
[] = {
632 struct damon_sysfs_context
{
634 enum damon_ops_id ops_id
;
635 struct damon_sysfs_attrs
*attrs
;
636 struct damon_sysfs_targets
*targets
;
637 struct damon_sysfs_schemes
*schemes
;
640 static struct damon_sysfs_context
*damon_sysfs_context_alloc(
641 enum damon_ops_id ops_id
)
643 struct damon_sysfs_context
*context
= kmalloc(sizeof(*context
),
648 context
->kobj
= (struct kobject
){};
649 context
->ops_id
= ops_id
;
653 static int damon_sysfs_context_set_attrs(struct damon_sysfs_context
*context
)
655 struct damon_sysfs_attrs
*attrs
= damon_sysfs_attrs_alloc();
660 err
= kobject_init_and_add(&attrs
->kobj
, &damon_sysfs_attrs_ktype
,
661 &context
->kobj
, "monitoring_attrs");
664 err
= damon_sysfs_attrs_add_dirs(attrs
);
667 context
->attrs
= attrs
;
671 kobject_put(&attrs
->kobj
);
675 static int damon_sysfs_context_set_targets(struct damon_sysfs_context
*context
)
677 struct damon_sysfs_targets
*targets
= damon_sysfs_targets_alloc();
682 err
= kobject_init_and_add(&targets
->kobj
, &damon_sysfs_targets_ktype
,
683 &context
->kobj
, "targets");
685 kobject_put(&targets
->kobj
);
688 context
->targets
= targets
;
692 static int damon_sysfs_context_set_schemes(struct damon_sysfs_context
*context
)
694 struct damon_sysfs_schemes
*schemes
= damon_sysfs_schemes_alloc();
699 err
= kobject_init_and_add(&schemes
->kobj
, &damon_sysfs_schemes_ktype
,
700 &context
->kobj
, "schemes");
702 kobject_put(&schemes
->kobj
);
705 context
->schemes
= schemes
;
709 static int damon_sysfs_context_add_dirs(struct damon_sysfs_context
*context
)
713 err
= damon_sysfs_context_set_attrs(context
);
717 err
= damon_sysfs_context_set_targets(context
);
721 err
= damon_sysfs_context_set_schemes(context
);
723 goto put_targets_attrs_out
;
726 put_targets_attrs_out
:
727 kobject_put(&context
->targets
->kobj
);
728 context
->targets
= NULL
;
730 kobject_put(&context
->attrs
->kobj
);
731 context
->attrs
= NULL
;
735 static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context
*context
)
737 damon_sysfs_attrs_rm_dirs(context
->attrs
);
738 kobject_put(&context
->attrs
->kobj
);
739 damon_sysfs_targets_rm_dirs(context
->targets
);
740 kobject_put(&context
->targets
->kobj
);
741 damon_sysfs_schemes_rm_dirs(context
->schemes
);
742 kobject_put(&context
->schemes
->kobj
);
745 static ssize_t
avail_operations_show(struct kobject
*kobj
,
746 struct kobj_attribute
*attr
, char *buf
)
748 enum damon_ops_id id
;
751 for (id
= 0; id
< NR_DAMON_OPS
; id
++) {
752 if (!damon_is_registered_ops(id
))
754 len
+= sysfs_emit_at(buf
, len
, "%s\n",
755 damon_sysfs_ops_strs
[id
]);
760 static ssize_t
operations_show(struct kobject
*kobj
,
761 struct kobj_attribute
*attr
, char *buf
)
763 struct damon_sysfs_context
*context
= container_of(kobj
,
764 struct damon_sysfs_context
, kobj
);
766 return sysfs_emit(buf
, "%s\n", damon_sysfs_ops_strs
[context
->ops_id
]);
769 static ssize_t
operations_store(struct kobject
*kobj
,
770 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
772 struct damon_sysfs_context
*context
= container_of(kobj
,
773 struct damon_sysfs_context
, kobj
);
774 enum damon_ops_id id
;
776 for (id
= 0; id
< NR_DAMON_OPS
; id
++) {
777 if (sysfs_streq(buf
, damon_sysfs_ops_strs
[id
])) {
778 context
->ops_id
= id
;
785 static void damon_sysfs_context_release(struct kobject
*kobj
)
787 kfree(container_of(kobj
, struct damon_sysfs_context
, kobj
));
790 static struct kobj_attribute damon_sysfs_context_avail_operations_attr
=
791 __ATTR_RO_MODE(avail_operations
, 0400);
793 static struct kobj_attribute damon_sysfs_context_operations_attr
=
794 __ATTR_RW_MODE(operations
, 0600);
796 static struct attribute
*damon_sysfs_context_attrs
[] = {
797 &damon_sysfs_context_avail_operations_attr
.attr
,
798 &damon_sysfs_context_operations_attr
.attr
,
801 ATTRIBUTE_GROUPS(damon_sysfs_context
);
803 static const struct kobj_type damon_sysfs_context_ktype
= {
804 .release
= damon_sysfs_context_release
,
805 .sysfs_ops
= &kobj_sysfs_ops
,
806 .default_groups
= damon_sysfs_context_groups
,
813 struct damon_sysfs_contexts
{
815 struct damon_sysfs_context
**contexts_arr
;
819 static struct damon_sysfs_contexts
*damon_sysfs_contexts_alloc(void)
821 return kzalloc(sizeof(struct damon_sysfs_contexts
), GFP_KERNEL
);
824 static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts
*contexts
)
826 struct damon_sysfs_context
**contexts_arr
= contexts
->contexts_arr
;
829 for (i
= 0; i
< contexts
->nr
; i
++) {
830 damon_sysfs_context_rm_dirs(contexts_arr
[i
]);
831 kobject_put(&contexts_arr
[i
]->kobj
);
835 contexts
->contexts_arr
= NULL
;
838 static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts
*contexts
,
841 struct damon_sysfs_context
**contexts_arr
, *context
;
844 damon_sysfs_contexts_rm_dirs(contexts
);
848 contexts_arr
= kmalloc_array(nr_contexts
, sizeof(*contexts_arr
),
849 GFP_KERNEL
| __GFP_NOWARN
);
852 contexts
->contexts_arr
= contexts_arr
;
854 for (i
= 0; i
< nr_contexts
; i
++) {
855 context
= damon_sysfs_context_alloc(DAMON_OPS_VADDR
);
857 damon_sysfs_contexts_rm_dirs(contexts
);
861 err
= kobject_init_and_add(&context
->kobj
,
862 &damon_sysfs_context_ktype
, &contexts
->kobj
,
867 err
= damon_sysfs_context_add_dirs(context
);
871 contexts_arr
[i
] = context
;
877 damon_sysfs_contexts_rm_dirs(contexts
);
878 kobject_put(&context
->kobj
);
882 static ssize_t
nr_contexts_show(struct kobject
*kobj
,
883 struct kobj_attribute
*attr
, char *buf
)
885 struct damon_sysfs_contexts
*contexts
= container_of(kobj
,
886 struct damon_sysfs_contexts
, kobj
);
888 return sysfs_emit(buf
, "%d\n", contexts
->nr
);
891 static ssize_t
nr_contexts_store(struct kobject
*kobj
,
892 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
894 struct damon_sysfs_contexts
*contexts
;
897 err
= kstrtoint(buf
, 0, &nr
);
900 /* TODO: support multiple contexts per kdamond */
901 if (nr
< 0 || 1 < nr
)
904 contexts
= container_of(kobj
, struct damon_sysfs_contexts
, kobj
);
905 if (!mutex_trylock(&damon_sysfs_lock
))
907 err
= damon_sysfs_contexts_add_dirs(contexts
, nr
);
908 mutex_unlock(&damon_sysfs_lock
);
915 static void damon_sysfs_contexts_release(struct kobject
*kobj
)
917 kfree(container_of(kobj
, struct damon_sysfs_contexts
, kobj
));
920 static struct kobj_attribute damon_sysfs_contexts_nr_attr
921 = __ATTR_RW_MODE(nr_contexts
, 0600);
923 static struct attribute
*damon_sysfs_contexts_attrs
[] = {
924 &damon_sysfs_contexts_nr_attr
.attr
,
927 ATTRIBUTE_GROUPS(damon_sysfs_contexts
);
929 static const struct kobj_type damon_sysfs_contexts_ktype
= {
930 .release
= damon_sysfs_contexts_release
,
931 .sysfs_ops
= &kobj_sysfs_ops
,
932 .default_groups
= damon_sysfs_contexts_groups
,
939 struct damon_sysfs_kdamond
{
941 struct damon_sysfs_contexts
*contexts
;
942 struct damon_ctx
*damon_ctx
;
945 static struct damon_sysfs_kdamond
*damon_sysfs_kdamond_alloc(void)
947 return kzalloc(sizeof(struct damon_sysfs_kdamond
), GFP_KERNEL
);
950 static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond
*kdamond
)
952 struct damon_sysfs_contexts
*contexts
;
955 contexts
= damon_sysfs_contexts_alloc();
959 err
= kobject_init_and_add(&contexts
->kobj
,
960 &damon_sysfs_contexts_ktype
, &kdamond
->kobj
,
963 kobject_put(&contexts
->kobj
);
966 kdamond
->contexts
= contexts
;
971 static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond
*kdamond
)
973 damon_sysfs_contexts_rm_dirs(kdamond
->contexts
);
974 kobject_put(&kdamond
->contexts
->kobj
);
977 static bool damon_sysfs_ctx_running(struct damon_ctx
*ctx
)
981 mutex_lock(&ctx
->kdamond_lock
);
982 running
= ctx
->kdamond
!= NULL
;
983 mutex_unlock(&ctx
->kdamond_lock
);
988 * enum damon_sysfs_cmd - Commands for a specific kdamond.
990 enum damon_sysfs_cmd
{
991 /* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */
993 /* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */
995 /* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
996 DAMON_SYSFS_CMD_COMMIT
,
998 * @DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: Commit the quota goals
1001 DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS
,
1003 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
1006 DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS
,
1008 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES: Update
1009 * tried_regions/total_bytes sysfs files for each scheme.
1011 DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES
,
1013 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS: Update schemes tried
1016 DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS
,
1018 * @DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: Clear schemes tried
1021 DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS
,
1023 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS: Update the
1024 * effective size quota of the scheme in bytes.
1026 DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS
,
1028 * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
1030 NR_DAMON_SYSFS_CMDS
,
1033 /* Should match with enum damon_sysfs_cmd */
1034 static const char * const damon_sysfs_cmd_strs
[] = {
1038 "commit_schemes_quota_goals",
1039 "update_schemes_stats",
1040 "update_schemes_tried_bytes",
1041 "update_schemes_tried_regions",
1042 "clear_schemes_tried_regions",
1043 "update_schemes_effective_quotas",
1047 * struct damon_sysfs_cmd_request - A request to the DAMON callback.
1048 * @cmd: The command that needs to be handled by the callback.
1049 * @kdamond: The kobject wrapper that associated to the kdamond thread.
1051 * This structure represents a sysfs command request that need to access some
1052 * DAMON context-internal data. Because DAMON context-internal data can be
1053 * safely accessed from DAMON callbacks without additional synchronization, the
1054 * request will be handled by the DAMON callback. None-``NULL`` @kdamond means
1055 * the request is valid.
1057 struct damon_sysfs_cmd_request
{
1058 enum damon_sysfs_cmd cmd
;
1059 struct damon_sysfs_kdamond
*kdamond
;
1062 /* Current DAMON callback request. Protected by damon_sysfs_lock. */
1063 static struct damon_sysfs_cmd_request damon_sysfs_cmd_request
;
1065 static ssize_t
state_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
1068 struct damon_sysfs_kdamond
*kdamond
= container_of(kobj
,
1069 struct damon_sysfs_kdamond
, kobj
);
1070 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1076 running
= damon_sysfs_ctx_running(ctx
);
1078 return sysfs_emit(buf
, "%s\n", running
?
1079 damon_sysfs_cmd_strs
[DAMON_SYSFS_CMD_ON
] :
1080 damon_sysfs_cmd_strs
[DAMON_SYSFS_CMD_OFF
]);
1083 static int damon_sysfs_set_attrs(struct damon_ctx
*ctx
,
1084 struct damon_sysfs_attrs
*sys_attrs
)
1086 struct damon_sysfs_intervals
*sys_intervals
= sys_attrs
->intervals
;
1087 struct damon_sysfs_ul_range
*sys_nr_regions
=
1088 sys_attrs
->nr_regions_range
;
1089 struct damon_attrs attrs
= {
1090 .sample_interval
= sys_intervals
->sample_us
,
1091 .aggr_interval
= sys_intervals
->aggr_us
,
1092 .ops_update_interval
= sys_intervals
->update_us
,
1093 .min_nr_regions
= sys_nr_regions
->min
,
1094 .max_nr_regions
= sys_nr_regions
->max
,
1096 return damon_set_attrs(ctx
, &attrs
);
1099 static void damon_sysfs_destroy_targets(struct damon_ctx
*ctx
)
1101 struct damon_target
*t
, *next
;
1102 bool has_pid
= damon_target_has_pid(ctx
);
1104 damon_for_each_target_safe(t
, next
, ctx
) {
1107 damon_destroy_target(t
);
1111 static int damon_sysfs_set_regions(struct damon_target
*t
,
1112 struct damon_sysfs_regions
*sysfs_regions
)
1114 struct damon_addr_range
*ranges
= kmalloc_array(sysfs_regions
->nr
,
1115 sizeof(*ranges
), GFP_KERNEL
| __GFP_NOWARN
);
1116 int i
, err
= -EINVAL
;
1120 for (i
= 0; i
< sysfs_regions
->nr
; i
++) {
1121 struct damon_sysfs_region
*sys_region
=
1122 sysfs_regions
->regions_arr
[i
];
1124 if (sys_region
->ar
.start
> sys_region
->ar
.end
)
1127 ranges
[i
].start
= sys_region
->ar
.start
;
1128 ranges
[i
].end
= sys_region
->ar
.end
;
1131 if (ranges
[i
- 1].end
> ranges
[i
].start
)
1134 err
= damon_set_regions(t
, ranges
, sysfs_regions
->nr
);
1141 static int damon_sysfs_add_target(struct damon_sysfs_target
*sys_target
,
1142 struct damon_ctx
*ctx
)
1144 struct damon_target
*t
= damon_new_target();
1149 damon_add_target(ctx
, t
);
1150 if (damon_target_has_pid(ctx
)) {
1151 t
->pid
= find_get_pid(sys_target
->pid
);
1153 goto destroy_targets_out
;
1155 err
= damon_sysfs_set_regions(t
, sys_target
->regions
);
1157 goto destroy_targets_out
;
1160 destroy_targets_out
:
1161 damon_sysfs_destroy_targets(ctx
);
1165 static int damon_sysfs_add_targets(struct damon_ctx
*ctx
,
1166 struct damon_sysfs_targets
*sysfs_targets
)
1170 /* Multiple physical address space monitoring targets makes no sense */
1171 if (ctx
->ops
.id
== DAMON_OPS_PADDR
&& sysfs_targets
->nr
> 1)
1174 for (i
= 0; i
< sysfs_targets
->nr
; i
++) {
1175 struct damon_sysfs_target
*st
= sysfs_targets
->targets_arr
[i
];
1177 err
= damon_sysfs_add_target(st
, ctx
);
1184 static bool damon_sysfs_schemes_regions_updating
;
1186 static void damon_sysfs_before_terminate(struct damon_ctx
*ctx
)
1188 struct damon_target
*t
, *next
;
1189 struct damon_sysfs_kdamond
*kdamond
;
1190 enum damon_sysfs_cmd cmd
;
1192 /* damon_sysfs_schemes_update_regions_stop() might not yet called */
1193 kdamond
= damon_sysfs_cmd_request
.kdamond
;
1194 cmd
= damon_sysfs_cmd_request
.cmd
;
1195 if (kdamond
&& ctx
== kdamond
->damon_ctx
&&
1196 (cmd
== DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS
||
1197 cmd
== DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES
) &&
1198 damon_sysfs_schemes_regions_updating
) {
1199 damon_sysfs_schemes_update_regions_stop(ctx
);
1200 damon_sysfs_schemes_regions_updating
= false;
1201 mutex_unlock(&damon_sysfs_lock
);
1204 if (!damon_target_has_pid(ctx
))
1207 mutex_lock(&ctx
->kdamond_lock
);
1208 damon_for_each_target_safe(t
, next
, ctx
) {
1210 damon_destroy_target(t
);
1212 mutex_unlock(&ctx
->kdamond_lock
);
1216 * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
1217 * @kdamond: The kobject wrapper that associated to the kdamond thread.
1219 * This function reads the schemes stats of specific kdamond and update the
1220 * related values for sysfs files. This function should be called from DAMON
1221 * callbacks while holding ``damon_syfs_lock``, to safely access the DAMON
1222 * contexts-internal data and DAMON sysfs variables.
1224 static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond
*kdamond
)
1226 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1230 damon_sysfs_schemes_update_stats(
1231 kdamond
->contexts
->contexts_arr
[0]->schemes
, ctx
);
1235 static int damon_sysfs_upd_schemes_regions_start(
1236 struct damon_sysfs_kdamond
*kdamond
, bool total_bytes_only
)
1238 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1242 return damon_sysfs_schemes_update_regions_start(
1243 kdamond
->contexts
->contexts_arr
[0]->schemes
, ctx
,
1247 static int damon_sysfs_upd_schemes_regions_stop(
1248 struct damon_sysfs_kdamond
*kdamond
)
1250 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1254 return damon_sysfs_schemes_update_regions_stop(ctx
);
1257 static int damon_sysfs_clear_schemes_regions(
1258 struct damon_sysfs_kdamond
*kdamond
)
1260 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1264 return damon_sysfs_schemes_clear_regions(
1265 kdamond
->contexts
->contexts_arr
[0]->schemes
, ctx
);
1268 static inline bool damon_sysfs_kdamond_running(
1269 struct damon_sysfs_kdamond
*kdamond
)
1271 return kdamond
->damon_ctx
&&
1272 damon_sysfs_ctx_running(kdamond
->damon_ctx
);
1275 static int damon_sysfs_apply_inputs(struct damon_ctx
*ctx
,
1276 struct damon_sysfs_context
*sys_ctx
)
1280 err
= damon_select_ops(ctx
, sys_ctx
->ops_id
);
1283 err
= damon_sysfs_set_attrs(ctx
, sys_ctx
->attrs
);
1286 err
= damon_sysfs_add_targets(ctx
, sys_ctx
->targets
);
1289 return damon_sysfs_add_schemes(ctx
, sys_ctx
->schemes
);
1292 static struct damon_ctx
*damon_sysfs_build_ctx(
1293 struct damon_sysfs_context
*sys_ctx
);
1296 * damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
1297 * @kdamond: The kobject wrapper for the associated kdamond.
1299 * If the sysfs input is wrong, the kdamond will be terminated.
1301 static int damon_sysfs_commit_input(struct damon_sysfs_kdamond
*kdamond
)
1303 struct damon_ctx
*param_ctx
;
1306 if (!damon_sysfs_kdamond_running(kdamond
))
1308 /* TODO: Support multiple contexts per kdamond */
1309 if (kdamond
->contexts
->nr
!= 1)
1312 param_ctx
= damon_sysfs_build_ctx(kdamond
->contexts
->contexts_arr
[0]);
1313 if (IS_ERR(param_ctx
))
1314 return PTR_ERR(param_ctx
);
1315 err
= damon_commit_ctx(kdamond
->damon_ctx
, param_ctx
);
1316 damon_sysfs_destroy_targets(param_ctx
);
1317 damon_destroy_ctx(param_ctx
);
1321 static int damon_sysfs_commit_schemes_quota_goals(
1322 struct damon_sysfs_kdamond
*sysfs_kdamond
)
1324 struct damon_ctx
*ctx
;
1325 struct damon_sysfs_context
*sysfs_ctx
;
1327 if (!damon_sysfs_kdamond_running(sysfs_kdamond
))
1329 /* TODO: Support multiple contexts per kdamond */
1330 if (sysfs_kdamond
->contexts
->nr
!= 1)
1333 ctx
= sysfs_kdamond
->damon_ctx
;
1334 sysfs_ctx
= sysfs_kdamond
->contexts
->contexts_arr
[0];
1335 return damos_sysfs_set_quota_scores(sysfs_ctx
->schemes
, ctx
);
1339 * damon_sysfs_upd_schemes_effective_quotas() - Update schemes effective quotas
1341 * @kdamond: The kobject wrapper that associated to the kdamond thread.
1343 * This function reads the schemes' effective quotas of specific kdamond and
1344 * update the related values for sysfs files. This function should be called
1345 * from DAMON callbacks while holding ``damon_syfs_lock``, to safely access the
1346 * DAMON contexts-internal data and DAMON sysfs variables.
1348 static int damon_sysfs_upd_schemes_effective_quotas(
1349 struct damon_sysfs_kdamond
*kdamond
)
1351 struct damon_ctx
*ctx
= kdamond
->damon_ctx
;
1355 damos_sysfs_update_effective_quotas(
1356 kdamond
->contexts
->contexts_arr
[0]->schemes
, ctx
);
1362 * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
1363 * @c: The DAMON context of the callback.
1364 * @active: Whether @c is not deactivated due to watermarks.
1365 * @after_aggr: Whether this is called from after_aggregation() callback.
1367 * This function is periodically called back from the kdamond thread for @c.
1368 * Then, it checks if there is a waiting DAMON sysfs request and handles it.
1370 static int damon_sysfs_cmd_request_callback(struct damon_ctx
*c
, bool active
,
1371 bool after_aggregation
)
1373 struct damon_sysfs_kdamond
*kdamond
;
1374 bool total_bytes_only
= false;
1377 /* avoid deadlock due to concurrent state_store('off') */
1378 if (!damon_sysfs_schemes_regions_updating
&&
1379 !mutex_trylock(&damon_sysfs_lock
))
1381 kdamond
= damon_sysfs_cmd_request
.kdamond
;
1382 if (!kdamond
|| kdamond
->damon_ctx
!= c
)
1384 switch (damon_sysfs_cmd_request
.cmd
) {
1385 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS
:
1386 err
= damon_sysfs_upd_schemes_stats(kdamond
);
1388 case DAMON_SYSFS_CMD_COMMIT
:
1389 if (!after_aggregation
)
1391 err
= damon_sysfs_commit_input(kdamond
);
1393 case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS
:
1394 err
= damon_sysfs_commit_schemes_quota_goals(kdamond
);
1396 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES
:
1397 total_bytes_only
= true;
1399 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS
:
1400 if (!damon_sysfs_schemes_regions_updating
) {
1401 err
= damon_sysfs_upd_schemes_regions_start(kdamond
,
1404 damon_sysfs_schemes_regions_updating
= true;
1408 damos_sysfs_mark_finished_regions_updates(c
);
1410 * Continue regions updating if DAMON is till
1411 * active and the update for all schemes is not
1414 if (active
&& !damos_sysfs_regions_upd_done())
1416 err
= damon_sysfs_upd_schemes_regions_stop(kdamond
);
1417 damon_sysfs_schemes_regions_updating
= false;
1420 case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS
:
1421 err
= damon_sysfs_clear_schemes_regions(kdamond
);
1423 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS
:
1424 err
= damon_sysfs_upd_schemes_effective_quotas(kdamond
);
1429 /* Mark the request as invalid now. */
1430 damon_sysfs_cmd_request
.kdamond
= NULL
;
1432 if (!damon_sysfs_schemes_regions_updating
)
1433 mutex_unlock(&damon_sysfs_lock
);
1438 static int damon_sysfs_after_wmarks_check(struct damon_ctx
*c
)
1441 * after_wmarks_check() is called back while the context is deactivated
1444 return damon_sysfs_cmd_request_callback(c
, false, false);
1447 static int damon_sysfs_after_sampling(struct damon_ctx
*c
)
1450 * after_sampling() is called back only while the context is not
1451 * deactivated by watermarks.
1453 return damon_sysfs_cmd_request_callback(c
, true, false);
1456 static int damon_sysfs_after_aggregation(struct damon_ctx
*c
)
1459 * after_aggregation() is called back only while the context is not
1460 * deactivated by watermarks.
1462 return damon_sysfs_cmd_request_callback(c
, true, true);
1465 static struct damon_ctx
*damon_sysfs_build_ctx(
1466 struct damon_sysfs_context
*sys_ctx
)
1468 struct damon_ctx
*ctx
= damon_new_ctx();
1472 return ERR_PTR(-ENOMEM
);
1474 err
= damon_sysfs_apply_inputs(ctx
, sys_ctx
);
1476 damon_destroy_ctx(ctx
);
1477 return ERR_PTR(err
);
1480 ctx
->callback
.after_wmarks_check
= damon_sysfs_after_wmarks_check
;
1481 ctx
->callback
.after_sampling
= damon_sysfs_after_sampling
;
1482 ctx
->callback
.after_aggregation
= damon_sysfs_after_aggregation
;
1483 ctx
->callback
.before_terminate
= damon_sysfs_before_terminate
;
1487 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond
*kdamond
)
1489 struct damon_ctx
*ctx
;
1492 if (damon_sysfs_kdamond_running(kdamond
))
1494 if (damon_sysfs_cmd_request
.kdamond
== kdamond
)
1496 /* TODO: support multiple contexts per kdamond */
1497 if (kdamond
->contexts
->nr
!= 1)
1500 if (kdamond
->damon_ctx
)
1501 damon_destroy_ctx(kdamond
->damon_ctx
);
1502 kdamond
->damon_ctx
= NULL
;
1504 ctx
= damon_sysfs_build_ctx(kdamond
->contexts
->contexts_arr
[0]);
1506 return PTR_ERR(ctx
);
1507 err
= damon_start(&ctx
, 1, false);
1509 damon_destroy_ctx(ctx
);
1512 kdamond
->damon_ctx
= ctx
;
1516 static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond
*kdamond
)
1518 if (!kdamond
->damon_ctx
)
1520 return damon_stop(&kdamond
->damon_ctx
, 1);
1522 * To allow users show final monitoring results of already turned-off
1523 * DAMON, we free kdamond->damon_ctx in next
1524 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
1529 * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
1530 * @cmd: The command to handle.
1531 * @kdamond: The kobject wrapper for the associated kdamond.
1533 * This function handles a DAMON sysfs command for a kdamond. For commands
1534 * that need to access running DAMON context-internal data, it requests
1535 * handling of the command to the DAMON callback
1536 * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled,
1537 * or the context is completed.
1539 * Return: 0 on success, negative error code otherwise.
1541 static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd
,
1542 struct damon_sysfs_kdamond
*kdamond
)
1544 bool need_wait
= true;
1546 /* Handle commands that doesn't access DAMON context-internal data */
1548 case DAMON_SYSFS_CMD_ON
:
1549 return damon_sysfs_turn_damon_on(kdamond
);
1550 case DAMON_SYSFS_CMD_OFF
:
1551 return damon_sysfs_turn_damon_off(kdamond
);
1556 /* Pass the command to DAMON callback for safe DAMON context access */
1557 if (damon_sysfs_cmd_request
.kdamond
)
1559 if (!damon_sysfs_kdamond_running(kdamond
))
1561 damon_sysfs_cmd_request
.cmd
= cmd
;
1562 damon_sysfs_cmd_request
.kdamond
= kdamond
;
1565 * wait until damon_sysfs_cmd_request_callback() handles the request
1566 * from kdamond context
1568 mutex_unlock(&damon_sysfs_lock
);
1570 schedule_timeout_idle(msecs_to_jiffies(100));
1571 if (!mutex_trylock(&damon_sysfs_lock
))
1573 if (!damon_sysfs_cmd_request
.kdamond
) {
1574 /* damon_sysfs_cmd_request_callback() handled */
1576 } else if (!damon_sysfs_kdamond_running(kdamond
)) {
1577 /* kdamond has already finished */
1579 damon_sysfs_cmd_request
.kdamond
= NULL
;
1581 mutex_unlock(&damon_sysfs_lock
);
1583 mutex_lock(&damon_sysfs_lock
);
1587 static ssize_t
state_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
1588 const char *buf
, size_t count
)
1590 struct damon_sysfs_kdamond
*kdamond
= container_of(kobj
,
1591 struct damon_sysfs_kdamond
, kobj
);
1592 enum damon_sysfs_cmd cmd
;
1593 ssize_t ret
= -EINVAL
;
1595 if (!mutex_trylock(&damon_sysfs_lock
))
1597 for (cmd
= 0; cmd
< NR_DAMON_SYSFS_CMDS
; cmd
++) {
1598 if (sysfs_streq(buf
, damon_sysfs_cmd_strs
[cmd
])) {
1599 ret
= damon_sysfs_handle_cmd(cmd
, kdamond
);
1603 mutex_unlock(&damon_sysfs_lock
);
1609 static ssize_t
pid_show(struct kobject
*kobj
,
1610 struct kobj_attribute
*attr
, char *buf
)
1612 struct damon_sysfs_kdamond
*kdamond
= container_of(kobj
,
1613 struct damon_sysfs_kdamond
, kobj
);
1614 struct damon_ctx
*ctx
;
1617 if (!mutex_trylock(&damon_sysfs_lock
))
1619 ctx
= kdamond
->damon_ctx
;
1623 mutex_lock(&ctx
->kdamond_lock
);
1625 pid
= ctx
->kdamond
->pid
;
1626 mutex_unlock(&ctx
->kdamond_lock
);
1628 mutex_unlock(&damon_sysfs_lock
);
1629 return sysfs_emit(buf
, "%d\n", pid
);
1632 static void damon_sysfs_kdamond_release(struct kobject
*kobj
)
1634 struct damon_sysfs_kdamond
*kdamond
= container_of(kobj
,
1635 struct damon_sysfs_kdamond
, kobj
);
1637 if (kdamond
->damon_ctx
)
1638 damon_destroy_ctx(kdamond
->damon_ctx
);
1642 static struct kobj_attribute damon_sysfs_kdamond_state_attr
=
1643 __ATTR_RW_MODE(state
, 0600);
1645 static struct kobj_attribute damon_sysfs_kdamond_pid_attr
=
1646 __ATTR_RO_MODE(pid
, 0400);
1648 static struct attribute
*damon_sysfs_kdamond_attrs
[] = {
1649 &damon_sysfs_kdamond_state_attr
.attr
,
1650 &damon_sysfs_kdamond_pid_attr
.attr
,
1653 ATTRIBUTE_GROUPS(damon_sysfs_kdamond
);
1655 static const struct kobj_type damon_sysfs_kdamond_ktype
= {
1656 .release
= damon_sysfs_kdamond_release
,
1657 .sysfs_ops
= &kobj_sysfs_ops
,
1658 .default_groups
= damon_sysfs_kdamond_groups
,
1662 * kdamonds directory
1665 struct damon_sysfs_kdamonds
{
1666 struct kobject kobj
;
1667 struct damon_sysfs_kdamond
**kdamonds_arr
;
1671 static struct damon_sysfs_kdamonds
*damon_sysfs_kdamonds_alloc(void)
1673 return kzalloc(sizeof(struct damon_sysfs_kdamonds
), GFP_KERNEL
);
1676 static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds
*kdamonds
)
1678 struct damon_sysfs_kdamond
**kdamonds_arr
= kdamonds
->kdamonds_arr
;
1681 for (i
= 0; i
< kdamonds
->nr
; i
++) {
1682 damon_sysfs_kdamond_rm_dirs(kdamonds_arr
[i
]);
1683 kobject_put(&kdamonds_arr
[i
]->kobj
);
1686 kfree(kdamonds_arr
);
1687 kdamonds
->kdamonds_arr
= NULL
;
1690 static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond
**kdamonds
,
1695 for (i
= 0; i
< nr_kdamonds
; i
++) {
1696 if (damon_sysfs_kdamond_running(kdamonds
[i
]) ||
1697 damon_sysfs_cmd_request
.kdamond
== kdamonds
[i
])
1704 static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds
*kdamonds
,
1707 struct damon_sysfs_kdamond
**kdamonds_arr
, *kdamond
;
1710 if (damon_sysfs_kdamonds_busy(kdamonds
->kdamonds_arr
, kdamonds
->nr
))
1713 damon_sysfs_kdamonds_rm_dirs(kdamonds
);
1717 kdamonds_arr
= kmalloc_array(nr_kdamonds
, sizeof(*kdamonds_arr
),
1718 GFP_KERNEL
| __GFP_NOWARN
);
1721 kdamonds
->kdamonds_arr
= kdamonds_arr
;
1723 for (i
= 0; i
< nr_kdamonds
; i
++) {
1724 kdamond
= damon_sysfs_kdamond_alloc();
1726 damon_sysfs_kdamonds_rm_dirs(kdamonds
);
1730 err
= kobject_init_and_add(&kdamond
->kobj
,
1731 &damon_sysfs_kdamond_ktype
, &kdamonds
->kobj
,
1736 err
= damon_sysfs_kdamond_add_dirs(kdamond
);
1740 kdamonds_arr
[i
] = kdamond
;
1746 damon_sysfs_kdamonds_rm_dirs(kdamonds
);
1747 kobject_put(&kdamond
->kobj
);
1751 static ssize_t
nr_kdamonds_show(struct kobject
*kobj
,
1752 struct kobj_attribute
*attr
, char *buf
)
1754 struct damon_sysfs_kdamonds
*kdamonds
= container_of(kobj
,
1755 struct damon_sysfs_kdamonds
, kobj
);
1757 return sysfs_emit(buf
, "%d\n", kdamonds
->nr
);
1760 static ssize_t
nr_kdamonds_store(struct kobject
*kobj
,
1761 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
1763 struct damon_sysfs_kdamonds
*kdamonds
;
1766 err
= kstrtoint(buf
, 0, &nr
);
1772 kdamonds
= container_of(kobj
, struct damon_sysfs_kdamonds
, kobj
);
1774 if (!mutex_trylock(&damon_sysfs_lock
))
1776 err
= damon_sysfs_kdamonds_add_dirs(kdamonds
, nr
);
1777 mutex_unlock(&damon_sysfs_lock
);
1784 static void damon_sysfs_kdamonds_release(struct kobject
*kobj
)
1786 kfree(container_of(kobj
, struct damon_sysfs_kdamonds
, kobj
));
1789 static struct kobj_attribute damon_sysfs_kdamonds_nr_attr
=
1790 __ATTR_RW_MODE(nr_kdamonds
, 0600);
1792 static struct attribute
*damon_sysfs_kdamonds_attrs
[] = {
1793 &damon_sysfs_kdamonds_nr_attr
.attr
,
1796 ATTRIBUTE_GROUPS(damon_sysfs_kdamonds
);
1798 static const struct kobj_type damon_sysfs_kdamonds_ktype
= {
1799 .release
= damon_sysfs_kdamonds_release
,
1800 .sysfs_ops
= &kobj_sysfs_ops
,
1801 .default_groups
= damon_sysfs_kdamonds_groups
,
1805 * damon user interface directory
1808 struct damon_sysfs_ui_dir
{
1809 struct kobject kobj
;
1810 struct damon_sysfs_kdamonds
*kdamonds
;
1813 static struct damon_sysfs_ui_dir
*damon_sysfs_ui_dir_alloc(void)
1815 return kzalloc(sizeof(struct damon_sysfs_ui_dir
), GFP_KERNEL
);
1818 static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir
*ui_dir
)
1820 struct damon_sysfs_kdamonds
*kdamonds
;
1823 kdamonds
= damon_sysfs_kdamonds_alloc();
1827 err
= kobject_init_and_add(&kdamonds
->kobj
,
1828 &damon_sysfs_kdamonds_ktype
, &ui_dir
->kobj
,
1831 kobject_put(&kdamonds
->kobj
);
1834 ui_dir
->kdamonds
= kdamonds
;
1838 static void damon_sysfs_ui_dir_release(struct kobject
*kobj
)
1840 kfree(container_of(kobj
, struct damon_sysfs_ui_dir
, kobj
));
1843 static struct attribute
*damon_sysfs_ui_dir_attrs
[] = {
1846 ATTRIBUTE_GROUPS(damon_sysfs_ui_dir
);
1848 static const struct kobj_type damon_sysfs_ui_dir_ktype
= {
1849 .release
= damon_sysfs_ui_dir_release
,
1850 .sysfs_ops
= &kobj_sysfs_ops
,
1851 .default_groups
= damon_sysfs_ui_dir_groups
,
1854 static int __init
damon_sysfs_init(void)
1856 struct kobject
*damon_sysfs_root
;
1857 struct damon_sysfs_ui_dir
*admin
;
1860 damon_sysfs_root
= kobject_create_and_add("damon", mm_kobj
);
1861 if (!damon_sysfs_root
)
1864 admin
= damon_sysfs_ui_dir_alloc();
1866 kobject_put(damon_sysfs_root
);
1869 err
= kobject_init_and_add(&admin
->kobj
, &damon_sysfs_ui_dir_ktype
,
1870 damon_sysfs_root
, "admin");
1873 err
= damon_sysfs_ui_dir_add_dirs(admin
);
1879 kobject_put(&admin
->kobj
);
1880 kobject_put(damon_sysfs_root
);
1883 subsys_initcall(damon_sysfs_init
);
1885 #include "tests/sysfs-kunit.h"