1 // SPDX-License-Identifier: GPL-2.0
5 * Author: SeongJae Park <sj@kernel.org>
8 #define pr_fmt(fmt) "damon: " fmt
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
14 #include <linux/psi.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/string_choices.h>
19 #define CREATE_TRACE_POINTS
20 #include <trace/events/damon.h>
22 #ifdef CONFIG_DAMON_KUNIT_TEST
23 #undef DAMON_MIN_REGION
24 #define DAMON_MIN_REGION 1
27 static DEFINE_MUTEX(damon_lock
);
28 static int nr_running_ctxs
;
29 static bool running_exclusive_ctxs
;
31 static DEFINE_MUTEX(damon_ops_lock
);
32 static struct damon_operations damon_registered_ops
[NR_DAMON_OPS
];
34 static struct kmem_cache
*damon_region_cache __ro_after_init
;
36 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
37 static bool __damon_is_registered_ops(enum damon_ops_id id
)
39 struct damon_operations empty_ops
= {};
41 if (!memcmp(&empty_ops
, &damon_registered_ops
[id
], sizeof(empty_ops
)))
47 * damon_is_registered_ops() - Check if a given damon_operations is registered.
48 * @id: Id of the damon_operations to check if registered.
50 * Return: true if the ops is set, false otherwise.
52 bool damon_is_registered_ops(enum damon_ops_id id
)
56 if (id
>= NR_DAMON_OPS
)
58 mutex_lock(&damon_ops_lock
);
59 registered
= __damon_is_registered_ops(id
);
60 mutex_unlock(&damon_ops_lock
);
65 * damon_register_ops() - Register a monitoring operations set to DAMON.
66 * @ops: monitoring operations set to register.
68 * This function registers a monitoring operations set of valid &struct
69 * damon_operations->id so that others can find and use them later.
71 * Return: 0 on success, negative error code otherwise.
73 int damon_register_ops(struct damon_operations
*ops
)
77 if (ops
->id
>= NR_DAMON_OPS
)
79 mutex_lock(&damon_ops_lock
);
80 /* Fail for already registered ops */
81 if (__damon_is_registered_ops(ops
->id
)) {
85 damon_registered_ops
[ops
->id
] = *ops
;
87 mutex_unlock(&damon_ops_lock
);
92 * damon_select_ops() - Select a monitoring operations to use with the context.
93 * @ctx: monitoring context to use the operations.
94 * @id: id of the registered monitoring operations to select.
96 * This function finds registered monitoring operations set of @id and make
99 * Return: 0 on success, negative error code otherwise.
101 int damon_select_ops(struct damon_ctx
*ctx
, enum damon_ops_id id
)
105 if (id
>= NR_DAMON_OPS
)
108 mutex_lock(&damon_ops_lock
);
109 if (!__damon_is_registered_ops(id
))
112 ctx
->ops
= damon_registered_ops
[id
];
113 mutex_unlock(&damon_ops_lock
);
118 * Construct a damon_region struct
120 * Returns the pointer to the new struct if success, or NULL otherwise
122 struct damon_region
*damon_new_region(unsigned long start
, unsigned long end
)
124 struct damon_region
*region
;
126 region
= kmem_cache_alloc(damon_region_cache
, GFP_KERNEL
);
130 region
->ar
.start
= start
;
131 region
->ar
.end
= end
;
132 region
->nr_accesses
= 0;
133 region
->nr_accesses_bp
= 0;
134 INIT_LIST_HEAD(®ion
->list
);
137 region
->last_nr_accesses
= 0;
142 void damon_add_region(struct damon_region
*r
, struct damon_target
*t
)
144 list_add_tail(&r
->list
, &t
->regions_list
);
148 static void damon_del_region(struct damon_region
*r
, struct damon_target
*t
)
154 static void damon_free_region(struct damon_region
*r
)
156 kmem_cache_free(damon_region_cache
, r
);
159 void damon_destroy_region(struct damon_region
*r
, struct damon_target
*t
)
161 damon_del_region(r
, t
);
162 damon_free_region(r
);
166 * Check whether a region is intersecting an address range
168 * Returns true if it is.
170 static bool damon_intersect(struct damon_region
*r
,
171 struct damon_addr_range
*re
)
173 return !(r
->ar
.end
<= re
->start
|| re
->end
<= r
->ar
.start
);
177 * Fill holes in regions with new regions.
179 static int damon_fill_regions_holes(struct damon_region
*first
,
180 struct damon_region
*last
, struct damon_target
*t
)
182 struct damon_region
*r
= first
;
184 damon_for_each_region_from(r
, t
) {
185 struct damon_region
*next
, *newr
;
189 next
= damon_next_region(r
);
190 if (r
->ar
.end
!= next
->ar
.start
) {
191 newr
= damon_new_region(r
->ar
.end
, next
->ar
.start
);
194 damon_insert_region(newr
, r
, next
, t
);
201 * damon_set_regions() - Set regions of a target for given address ranges.
202 * @t: the given target.
203 * @ranges: array of new monitoring target ranges.
204 * @nr_ranges: length of @ranges.
206 * This function adds new regions to, or modify existing regions of a
207 * monitoring target to fit in specific ranges.
209 * Return: 0 if success, or negative error code otherwise.
211 int damon_set_regions(struct damon_target
*t
, struct damon_addr_range
*ranges
,
212 unsigned int nr_ranges
)
214 struct damon_region
*r
, *next
;
218 /* Remove regions which are not in the new ranges */
219 damon_for_each_region_safe(r
, next
, t
) {
220 for (i
= 0; i
< nr_ranges
; i
++) {
221 if (damon_intersect(r
, &ranges
[i
]))
225 damon_destroy_region(r
, t
);
228 r
= damon_first_region(t
);
229 /* Add new regions or resize existing regions to fit in the ranges */
230 for (i
= 0; i
< nr_ranges
; i
++) {
231 struct damon_region
*first
= NULL
, *last
, *newr
;
232 struct damon_addr_range
*range
;
235 /* Get the first/last regions intersecting with the range */
236 damon_for_each_region_from(r
, t
) {
237 if (damon_intersect(r
, range
)) {
242 if (r
->ar
.start
>= range
->end
)
246 /* no region intersects with this range */
247 newr
= damon_new_region(
248 ALIGN_DOWN(range
->start
,
250 ALIGN(range
->end
, DAMON_MIN_REGION
));
253 damon_insert_region(newr
, damon_prev_region(r
), r
, t
);
255 /* resize intersecting regions to fit in this range */
256 first
->ar
.start
= ALIGN_DOWN(range
->start
,
258 last
->ar
.end
= ALIGN(range
->end
, DAMON_MIN_REGION
);
260 /* fill possible holes in the range */
261 err
= damon_fill_regions_holes(first
, last
, t
);
269 struct damos_filter
*damos_new_filter(enum damos_filter_type type
,
270 bool matching
, bool allow
)
272 struct damos_filter
*filter
;
274 filter
= kmalloc(sizeof(*filter
), GFP_KERNEL
);
278 filter
->matching
= matching
;
279 filter
->allow
= allow
;
280 INIT_LIST_HEAD(&filter
->list
);
284 void damos_add_filter(struct damos
*s
, struct damos_filter
*f
)
286 list_add_tail(&f
->list
, &s
->filters
);
289 static void damos_del_filter(struct damos_filter
*f
)
294 static void damos_free_filter(struct damos_filter
*f
)
299 void damos_destroy_filter(struct damos_filter
*f
)
302 damos_free_filter(f
);
305 struct damos_quota_goal
*damos_new_quota_goal(
306 enum damos_quota_goal_metric metric
,
307 unsigned long target_value
)
309 struct damos_quota_goal
*goal
;
311 goal
= kmalloc(sizeof(*goal
), GFP_KERNEL
);
314 goal
->metric
= metric
;
315 goal
->target_value
= target_value
;
316 INIT_LIST_HEAD(&goal
->list
);
320 void damos_add_quota_goal(struct damos_quota
*q
, struct damos_quota_goal
*g
)
322 list_add_tail(&g
->list
, &q
->goals
);
325 static void damos_del_quota_goal(struct damos_quota_goal
*g
)
330 static void damos_free_quota_goal(struct damos_quota_goal
*g
)
335 void damos_destroy_quota_goal(struct damos_quota_goal
*g
)
337 damos_del_quota_goal(g
);
338 damos_free_quota_goal(g
);
341 /* initialize fields of @quota that normally API users wouldn't set */
342 static struct damos_quota
*damos_quota_init(struct damos_quota
*quota
)
345 quota
->total_charged_sz
= 0;
346 quota
->total_charged_ns
= 0;
347 quota
->charged_sz
= 0;
348 quota
->charged_from
= 0;
349 quota
->charge_target_from
= NULL
;
350 quota
->charge_addr_from
= 0;
355 struct damos
*damon_new_scheme(struct damos_access_pattern
*pattern
,
356 enum damos_action action
,
357 unsigned long apply_interval_us
,
358 struct damos_quota
*quota
,
359 struct damos_watermarks
*wmarks
,
362 struct damos
*scheme
;
364 scheme
= kmalloc(sizeof(*scheme
), GFP_KERNEL
);
367 scheme
->pattern
= *pattern
;
368 scheme
->action
= action
;
369 scheme
->apply_interval_us
= apply_interval_us
;
371 * next_apply_sis will be set when kdamond starts. While kdamond is
372 * running, it will also updated when it is added to the DAMON context,
373 * or damon_attrs are updated.
375 scheme
->next_apply_sis
= 0;
376 INIT_LIST_HEAD(&scheme
->filters
);
377 scheme
->stat
= (struct damos_stat
){};
378 INIT_LIST_HEAD(&scheme
->list
);
380 scheme
->quota
= *(damos_quota_init(quota
));
381 /* quota.goals should be separately set by caller */
382 INIT_LIST_HEAD(&scheme
->quota
.goals
);
384 scheme
->wmarks
= *wmarks
;
385 scheme
->wmarks
.activated
= true;
387 scheme
->target_nid
= target_nid
;
392 static void damos_set_next_apply_sis(struct damos
*s
, struct damon_ctx
*ctx
)
394 unsigned long sample_interval
= ctx
->attrs
.sample_interval
?
395 ctx
->attrs
.sample_interval
: 1;
396 unsigned long apply_interval
= s
->apply_interval_us
?
397 s
->apply_interval_us
: ctx
->attrs
.aggr_interval
;
399 s
->next_apply_sis
= ctx
->passed_sample_intervals
+
400 apply_interval
/ sample_interval
;
403 void damon_add_scheme(struct damon_ctx
*ctx
, struct damos
*s
)
405 list_add_tail(&s
->list
, &ctx
->schemes
);
406 damos_set_next_apply_sis(s
, ctx
);
409 static void damon_del_scheme(struct damos
*s
)
414 static void damon_free_scheme(struct damos
*s
)
419 void damon_destroy_scheme(struct damos
*s
)
421 struct damos_quota_goal
*g
, *g_next
;
422 struct damos_filter
*f
, *next
;
424 damos_for_each_quota_goal_safe(g
, g_next
, &s
->quota
)
425 damos_destroy_quota_goal(g
);
427 damos_for_each_filter_safe(f
, next
, s
)
428 damos_destroy_filter(f
);
430 damon_free_scheme(s
);
434 * Construct a damon_target struct
436 * Returns the pointer to the new struct if success, or NULL otherwise
438 struct damon_target
*damon_new_target(void)
440 struct damon_target
*t
;
442 t
= kmalloc(sizeof(*t
), GFP_KERNEL
);
448 INIT_LIST_HEAD(&t
->regions_list
);
449 INIT_LIST_HEAD(&t
->list
);
454 void damon_add_target(struct damon_ctx
*ctx
, struct damon_target
*t
)
456 list_add_tail(&t
->list
, &ctx
->adaptive_targets
);
459 bool damon_targets_empty(struct damon_ctx
*ctx
)
461 return list_empty(&ctx
->adaptive_targets
);
464 static void damon_del_target(struct damon_target
*t
)
469 void damon_free_target(struct damon_target
*t
)
471 struct damon_region
*r
, *next
;
473 damon_for_each_region_safe(r
, next
, t
)
474 damon_free_region(r
);
478 void damon_destroy_target(struct damon_target
*t
)
481 damon_free_target(t
);
484 unsigned int damon_nr_regions(struct damon_target
*t
)
486 return t
->nr_regions
;
489 struct damon_ctx
*damon_new_ctx(void)
491 struct damon_ctx
*ctx
;
493 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
497 init_completion(&ctx
->kdamond_started
);
499 ctx
->attrs
.sample_interval
= 5 * 1000;
500 ctx
->attrs
.aggr_interval
= 100 * 1000;
501 ctx
->attrs
.ops_update_interval
= 60 * 1000 * 1000;
503 ctx
->passed_sample_intervals
= 0;
504 /* These will be set from kdamond_init_intervals_sis() */
505 ctx
->next_aggregation_sis
= 0;
506 ctx
->next_ops_update_sis
= 0;
508 mutex_init(&ctx
->kdamond_lock
);
509 mutex_init(&ctx
->call_control_lock
);
510 mutex_init(&ctx
->walk_control_lock
);
512 ctx
->attrs
.min_nr_regions
= 10;
513 ctx
->attrs
.max_nr_regions
= 1000;
515 INIT_LIST_HEAD(&ctx
->adaptive_targets
);
516 INIT_LIST_HEAD(&ctx
->schemes
);
521 static void damon_destroy_targets(struct damon_ctx
*ctx
)
523 struct damon_target
*t
, *next_t
;
525 if (ctx
->ops
.cleanup
) {
526 ctx
->ops
.cleanup(ctx
);
530 damon_for_each_target_safe(t
, next_t
, ctx
)
531 damon_destroy_target(t
);
534 void damon_destroy_ctx(struct damon_ctx
*ctx
)
536 struct damos
*s
, *next_s
;
538 damon_destroy_targets(ctx
);
540 damon_for_each_scheme_safe(s
, next_s
, ctx
)
541 damon_destroy_scheme(s
);
546 static unsigned int damon_age_for_new_attrs(unsigned int age
,
547 struct damon_attrs
*old_attrs
, struct damon_attrs
*new_attrs
)
549 return age
* old_attrs
->aggr_interval
/ new_attrs
->aggr_interval
;
552 /* convert access ratio in bp (per 10,000) to nr_accesses */
553 static unsigned int damon_accesses_bp_to_nr_accesses(
554 unsigned int accesses_bp
, struct damon_attrs
*attrs
)
556 return accesses_bp
* damon_max_nr_accesses(attrs
) / 10000;
560 * Convert nr_accesses to access ratio in bp (per 10,000).
562 * Callers should ensure attrs.aggr_interval is not zero, like
563 * damon_update_monitoring_results() does . Otherwise, divide-by-zero would
566 static unsigned int damon_nr_accesses_to_accesses_bp(
567 unsigned int nr_accesses
, struct damon_attrs
*attrs
)
569 return nr_accesses
* 10000 / damon_max_nr_accesses(attrs
);
572 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses
,
573 struct damon_attrs
*old_attrs
, struct damon_attrs
*new_attrs
)
575 return damon_accesses_bp_to_nr_accesses(
576 damon_nr_accesses_to_accesses_bp(
577 nr_accesses
, old_attrs
),
581 static void damon_update_monitoring_result(struct damon_region
*r
,
582 struct damon_attrs
*old_attrs
, struct damon_attrs
*new_attrs
)
584 r
->nr_accesses
= damon_nr_accesses_for_new_attrs(r
->nr_accesses
,
585 old_attrs
, new_attrs
);
586 r
->nr_accesses_bp
= r
->nr_accesses
* 10000;
587 r
->age
= damon_age_for_new_attrs(r
->age
, old_attrs
, new_attrs
);
591 * region->nr_accesses is the number of sampling intervals in the last
592 * aggregation interval that access to the region has found, and region->age is
593 * the number of aggregation intervals that its access pattern has maintained.
594 * For the reason, the real meaning of the two fields depend on current
595 * sampling interval and aggregation interval. This function updates
596 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
598 static void damon_update_monitoring_results(struct damon_ctx
*ctx
,
599 struct damon_attrs
*new_attrs
)
601 struct damon_attrs
*old_attrs
= &ctx
->attrs
;
602 struct damon_target
*t
;
603 struct damon_region
*r
;
605 /* if any interval is zero, simply forgive conversion */
606 if (!old_attrs
->sample_interval
|| !old_attrs
->aggr_interval
||
607 !new_attrs
->sample_interval
||
608 !new_attrs
->aggr_interval
)
611 damon_for_each_target(t
, ctx
)
612 damon_for_each_region(r
, t
)
613 damon_update_monitoring_result(
614 r
, old_attrs
, new_attrs
);
618 * damon_set_attrs() - Set attributes for the monitoring.
619 * @ctx: monitoring context
620 * @attrs: monitoring attributes
622 * This function should be called while the kdamond is not running, or an
623 * access check results aggregation is not ongoing (e.g., from
624 * &struct damon_callback->after_aggregation or
625 * &struct damon_callback->after_wmarks_check callbacks).
627 * Every time interval is in micro-seconds.
629 * Return: 0 on success, negative error code otherwise.
631 int damon_set_attrs(struct damon_ctx
*ctx
, struct damon_attrs
*attrs
)
633 unsigned long sample_interval
= attrs
->sample_interval
?
634 attrs
->sample_interval
: 1;
637 if (attrs
->min_nr_regions
< 3)
639 if (attrs
->min_nr_regions
> attrs
->max_nr_regions
)
641 if (attrs
->sample_interval
> attrs
->aggr_interval
)
644 ctx
->next_aggregation_sis
= ctx
->passed_sample_intervals
+
645 attrs
->aggr_interval
/ sample_interval
;
646 ctx
->next_ops_update_sis
= ctx
->passed_sample_intervals
+
647 attrs
->ops_update_interval
/ sample_interval
;
649 damon_update_monitoring_results(ctx
, attrs
);
652 damon_for_each_scheme(s
, ctx
)
653 damos_set_next_apply_sis(s
, ctx
);
659 * damon_set_schemes() - Set data access monitoring based operation schemes.
660 * @ctx: monitoring context
661 * @schemes: array of the schemes
662 * @nr_schemes: number of entries in @schemes
664 * This function should not be called while the kdamond of the context is
667 void damon_set_schemes(struct damon_ctx
*ctx
, struct damos
**schemes
,
670 struct damos
*s
, *next
;
673 damon_for_each_scheme_safe(s
, next
, ctx
)
674 damon_destroy_scheme(s
);
675 for (i
= 0; i
< nr_schemes
; i
++)
676 damon_add_scheme(ctx
, schemes
[i
]);
679 static struct damos_quota_goal
*damos_nth_quota_goal(
680 int n
, struct damos_quota
*q
)
682 struct damos_quota_goal
*goal
;
685 damos_for_each_quota_goal(goal
, q
) {
692 static void damos_commit_quota_goal(
693 struct damos_quota_goal
*dst
, struct damos_quota_goal
*src
)
695 dst
->metric
= src
->metric
;
696 dst
->target_value
= src
->target_value
;
697 if (dst
->metric
== DAMOS_QUOTA_USER_INPUT
)
698 dst
->current_value
= src
->current_value
;
699 /* keep last_psi_total as is, since it will be updated in next cycle */
703 * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
704 * @dst: The commit destination DAMOS quota.
705 * @src: The commit source DAMOS quota.
707 * Copies user-specified parameters for quota goals from @src to @dst. Users
708 * should use this function for quota goals-level parameters update of running
709 * DAMON contexts, instead of manual in-place updates.
711 * This function should be called from parameters-update safe context, like
714 int damos_commit_quota_goals(struct damos_quota
*dst
, struct damos_quota
*src
)
716 struct damos_quota_goal
*dst_goal
, *next
, *src_goal
, *new_goal
;
719 damos_for_each_quota_goal_safe(dst_goal
, next
, dst
) {
720 src_goal
= damos_nth_quota_goal(i
++, src
);
722 damos_commit_quota_goal(dst_goal
, src_goal
);
724 damos_destroy_quota_goal(dst_goal
);
726 damos_for_each_quota_goal_safe(src_goal
, next
, src
) {
729 new_goal
= damos_new_quota_goal(
730 src_goal
->metric
, src_goal
->target_value
);
733 damos_add_quota_goal(dst
, new_goal
);
738 static int damos_commit_quota(struct damos_quota
*dst
, struct damos_quota
*src
)
742 dst
->reset_interval
= src
->reset_interval
;
745 err
= damos_commit_quota_goals(dst
, src
);
748 dst
->weight_sz
= src
->weight_sz
;
749 dst
->weight_nr_accesses
= src
->weight_nr_accesses
;
750 dst
->weight_age
= src
->weight_age
;
754 static struct damos_filter
*damos_nth_filter(int n
, struct damos
*s
)
756 struct damos_filter
*filter
;
759 damos_for_each_filter(filter
, s
) {
766 static void damos_commit_filter_arg(
767 struct damos_filter
*dst
, struct damos_filter
*src
)
770 case DAMOS_FILTER_TYPE_MEMCG
:
771 dst
->memcg_id
= src
->memcg_id
;
773 case DAMOS_FILTER_TYPE_ADDR
:
774 dst
->addr_range
= src
->addr_range
;
776 case DAMOS_FILTER_TYPE_TARGET
:
777 dst
->target_idx
= src
->target_idx
;
784 static void damos_commit_filter(
785 struct damos_filter
*dst
, struct damos_filter
*src
)
787 dst
->type
= src
->type
;
788 dst
->matching
= src
->matching
;
789 damos_commit_filter_arg(dst
, src
);
792 static int damos_commit_filters(struct damos
*dst
, struct damos
*src
)
794 struct damos_filter
*dst_filter
, *next
, *src_filter
, *new_filter
;
797 damos_for_each_filter_safe(dst_filter
, next
, dst
) {
798 src_filter
= damos_nth_filter(i
++, src
);
800 damos_commit_filter(dst_filter
, src_filter
);
802 damos_destroy_filter(dst_filter
);
805 damos_for_each_filter_safe(src_filter
, next
, src
) {
809 new_filter
= damos_new_filter(
810 src_filter
->type
, src_filter
->matching
,
814 damos_commit_filter_arg(new_filter
, src_filter
);
815 damos_add_filter(dst
, new_filter
);
820 static struct damos
*damon_nth_scheme(int n
, struct damon_ctx
*ctx
)
825 damon_for_each_scheme(s
, ctx
) {
832 static int damos_commit(struct damos
*dst
, struct damos
*src
)
836 dst
->pattern
= src
->pattern
;
837 dst
->action
= src
->action
;
838 dst
->apply_interval_us
= src
->apply_interval_us
;
840 err
= damos_commit_quota(&dst
->quota
, &src
->quota
);
844 dst
->wmarks
= src
->wmarks
;
846 err
= damos_commit_filters(dst
, src
);
850 static int damon_commit_schemes(struct damon_ctx
*dst
, struct damon_ctx
*src
)
852 struct damos
*dst_scheme
, *next
, *src_scheme
, *new_scheme
;
853 int i
= 0, j
= 0, err
;
855 damon_for_each_scheme_safe(dst_scheme
, next
, dst
) {
856 src_scheme
= damon_nth_scheme(i
++, src
);
858 err
= damos_commit(dst_scheme
, src_scheme
);
862 damon_destroy_scheme(dst_scheme
);
866 damon_for_each_scheme_safe(src_scheme
, next
, src
) {
869 new_scheme
= damon_new_scheme(&src_scheme
->pattern
,
871 src_scheme
->apply_interval_us
,
872 &src_scheme
->quota
, &src_scheme
->wmarks
,
876 err
= damos_commit(new_scheme
, src_scheme
);
878 damon_destroy_scheme(new_scheme
);
881 damon_add_scheme(dst
, new_scheme
);
886 static struct damon_target
*damon_nth_target(int n
, struct damon_ctx
*ctx
)
888 struct damon_target
*t
;
891 damon_for_each_target(t
, ctx
) {
899 * The caller should ensure the regions of @src are
900 * 1. valid (end >= src) and
901 * 2. sorted by starting address.
903 * If @src has no region, @dst keeps current regions.
905 static int damon_commit_target_regions(
906 struct damon_target
*dst
, struct damon_target
*src
)
908 struct damon_region
*src_region
;
909 struct damon_addr_range
*ranges
;
912 damon_for_each_region(src_region
, src
)
917 ranges
= kmalloc_array(i
, sizeof(*ranges
), GFP_KERNEL
| __GFP_NOWARN
);
921 damon_for_each_region(src_region
, src
)
922 ranges
[i
++] = src_region
->ar
;
923 err
= damon_set_regions(dst
, ranges
, i
);
928 static int damon_commit_target(
929 struct damon_target
*dst
, bool dst_has_pid
,
930 struct damon_target
*src
, bool src_has_pid
)
934 err
= damon_commit_target_regions(dst
, src
);
945 static int damon_commit_targets(
946 struct damon_ctx
*dst
, struct damon_ctx
*src
)
948 struct damon_target
*dst_target
, *next
, *src_target
, *new_target
;
949 int i
= 0, j
= 0, err
;
951 damon_for_each_target_safe(dst_target
, next
, dst
) {
952 src_target
= damon_nth_target(i
++, src
);
954 err
= damon_commit_target(
955 dst_target
, damon_target_has_pid(dst
),
956 src_target
, damon_target_has_pid(src
));
960 if (damon_target_has_pid(dst
))
961 put_pid(dst_target
->pid
);
962 damon_destroy_target(dst_target
);
966 damon_for_each_target_safe(src_target
, next
, src
) {
969 new_target
= damon_new_target();
972 err
= damon_commit_target(new_target
, false,
973 src_target
, damon_target_has_pid(src
));
975 damon_destroy_target(new_target
);
978 damon_add_target(dst
, new_target
);
984 * damon_commit_ctx() - Commit parameters of a DAMON context to another.
985 * @dst: The commit destination DAMON context.
986 * @src: The commit source DAMON context.
988 * This function copies user-specified parameters from @src to @dst and update
989 * the internal status and results accordingly. Users should use this function
990 * for context-level parameters update of running context, instead of manual
993 * This function should be called from parameters-update safe context, like
996 int damon_commit_ctx(struct damon_ctx
*dst
, struct damon_ctx
*src
)
1000 err
= damon_commit_schemes(dst
, src
);
1003 err
= damon_commit_targets(dst
, src
);
1007 * schemes and targets should be updated first, since
1008 * 1. damon_set_attrs() updates monitoring results of targets and
1009 * next_apply_sis of schemes, and
1010 * 2. ops update should be done after pid handling is done (target
1011 * committing require putting pids).
1013 err
= damon_set_attrs(dst
, &src
->attrs
);
1016 dst
->ops
= src
->ops
;
1022 * damon_nr_running_ctxs() - Return number of currently running contexts.
1024 int damon_nr_running_ctxs(void)
1028 mutex_lock(&damon_lock
);
1029 nr_ctxs
= nr_running_ctxs
;
1030 mutex_unlock(&damon_lock
);
1035 /* Returns the size upper limit for each monitoring region */
1036 static unsigned long damon_region_sz_limit(struct damon_ctx
*ctx
)
1038 struct damon_target
*t
;
1039 struct damon_region
*r
;
1040 unsigned long sz
= 0;
1042 damon_for_each_target(t
, ctx
) {
1043 damon_for_each_region(r
, t
)
1044 sz
+= damon_sz_region(r
);
1047 if (ctx
->attrs
.min_nr_regions
)
1048 sz
/= ctx
->attrs
.min_nr_regions
;
1049 if (sz
< DAMON_MIN_REGION
)
1050 sz
= DAMON_MIN_REGION
;
1055 static int kdamond_fn(void *data
);
1058 * __damon_start() - Starts monitoring with given context.
1059 * @ctx: monitoring context
1061 * This function should be called while damon_lock is hold.
1063 * Return: 0 on success, negative error code otherwise.
1065 static int __damon_start(struct damon_ctx
*ctx
)
1069 mutex_lock(&ctx
->kdamond_lock
);
1070 if (!ctx
->kdamond
) {
1072 reinit_completion(&ctx
->kdamond_started
);
1073 ctx
->kdamond
= kthread_run(kdamond_fn
, ctx
, "kdamond.%d",
1075 if (IS_ERR(ctx
->kdamond
)) {
1076 err
= PTR_ERR(ctx
->kdamond
);
1077 ctx
->kdamond
= NULL
;
1079 wait_for_completion(&ctx
->kdamond_started
);
1082 mutex_unlock(&ctx
->kdamond_lock
);
1088 * damon_start() - Starts the monitorings for a given group of contexts.
1089 * @ctxs: an array of the pointers for contexts to start monitoring
1090 * @nr_ctxs: size of @ctxs
1091 * @exclusive: exclusiveness of this contexts group
1093 * This function starts a group of monitoring threads for a group of monitoring
1094 * contexts. One thread per each context is created and run in parallel. The
1095 * caller should handle synchronization between the threads by itself. If
1096 * @exclusive is true and a group of threads that created by other
1097 * 'damon_start()' call is currently running, this function does nothing but
1100 * Return: 0 on success, negative error code otherwise.
1102 int damon_start(struct damon_ctx
**ctxs
, int nr_ctxs
, bool exclusive
)
1107 mutex_lock(&damon_lock
);
1108 if ((exclusive
&& nr_running_ctxs
) ||
1109 (!exclusive
&& running_exclusive_ctxs
)) {
1110 mutex_unlock(&damon_lock
);
1114 for (i
= 0; i
< nr_ctxs
; i
++) {
1115 err
= __damon_start(ctxs
[i
]);
1120 if (exclusive
&& nr_running_ctxs
)
1121 running_exclusive_ctxs
= true;
1122 mutex_unlock(&damon_lock
);
1128 * __damon_stop() - Stops monitoring of a given context.
1129 * @ctx: monitoring context
1131 * Return: 0 on success, negative error code otherwise.
1133 static int __damon_stop(struct damon_ctx
*ctx
)
1135 struct task_struct
*tsk
;
1137 mutex_lock(&ctx
->kdamond_lock
);
1140 get_task_struct(tsk
);
1141 mutex_unlock(&ctx
->kdamond_lock
);
1142 kthread_stop_put(tsk
);
1145 mutex_unlock(&ctx
->kdamond_lock
);
1151 * damon_stop() - Stops the monitorings for a given group of contexts.
1152 * @ctxs: an array of the pointers for contexts to stop monitoring
1153 * @nr_ctxs: size of @ctxs
1155 * Return: 0 on success, negative error code otherwise.
1157 int damon_stop(struct damon_ctx
**ctxs
, int nr_ctxs
)
1161 for (i
= 0; i
< nr_ctxs
; i
++) {
1162 /* nr_running_ctxs is decremented in kdamond_fn */
1163 err
= __damon_stop(ctxs
[i
]);
1170 static bool damon_is_running(struct damon_ctx
*ctx
)
1174 mutex_lock(&ctx
->kdamond_lock
);
1175 running
= ctx
->kdamond
!= NULL
;
1176 mutex_unlock(&ctx
->kdamond_lock
);
1181 * damon_call() - Invoke a given function on DAMON worker thread (kdamond).
1182 * @ctx: DAMON context to call the function for.
1183 * @control: Control variable of the call request.
1185 * Ask DAMON worker thread (kdamond) of @ctx to call a function with an
1186 * argument data that respectively passed via &damon_call_control->fn and
1187 * &damon_call_control->data of @control, and wait until the kdamond finishes
1188 * handling of the request.
1190 * The kdamond executes the function with the argument in the main loop, just
1191 * after a sampling of the iteration is finished. The function can hence
1192 * safely access the internal data of the &struct damon_ctx without additional
1193 * synchronization. The return value of the function will be saved in
1194 * &damon_call_control->return_code.
1196 * Return: 0 on success, negative error code otherwise.
1198 int damon_call(struct damon_ctx
*ctx
, struct damon_call_control
*control
)
1200 init_completion(&control
->completion
);
1201 control
->canceled
= false;
1203 mutex_lock(&ctx
->call_control_lock
);
1204 if (ctx
->call_control
) {
1205 mutex_unlock(&ctx
->call_control_lock
);
1208 ctx
->call_control
= control
;
1209 mutex_unlock(&ctx
->call_control_lock
);
1210 if (!damon_is_running(ctx
))
1212 wait_for_completion(&control
->completion
);
1213 if (control
->canceled
)
1219 * damos_walk() - Invoke a given functions while DAMOS walk regions.
1220 * @ctx: DAMON context to call the functions for.
1221 * @control: Control variable of the walk request.
1223 * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region
1224 * that the kdamond will apply DAMOS action to, and wait until the kdamond
1225 * finishes handling of the request.
1227 * The kdamond executes the given function in the main loop, for each region
1228 * just after it applied any DAMOS actions of @ctx to it. The invocation is
1229 * made only within one &damos->apply_interval_us since damos_walk()
1230 * invocation, for each scheme. The given callback function can hence safely
1231 * access the internal data of &struct damon_ctx and &struct damon_region that
1232 * each of the scheme will apply the action for next interval, without
1233 * additional synchronizations against the kdamond. If every scheme of @ctx
1234 * passed at least one &damos->apply_interval_us, kdamond marks the request as
1235 * completed so that damos_walk() can wakeup and return.
1237 * Return: 0 on success, negative error code otherwise.
1239 int damos_walk(struct damon_ctx
*ctx
, struct damos_walk_control
*control
)
1241 init_completion(&control
->completion
);
1242 control
->canceled
= false;
1243 mutex_lock(&ctx
->walk_control_lock
);
1244 if (ctx
->walk_control
) {
1245 mutex_unlock(&ctx
->walk_control_lock
);
1248 ctx
->walk_control
= control
;
1249 mutex_unlock(&ctx
->walk_control_lock
);
1250 if (!damon_is_running(ctx
))
1252 wait_for_completion(&control
->completion
);
1253 if (control
->canceled
)
1259 * Reset the aggregated monitoring results ('nr_accesses' of each region).
1261 static void kdamond_reset_aggregated(struct damon_ctx
*c
)
1263 struct damon_target
*t
;
1264 unsigned int ti
= 0; /* target's index */
1266 damon_for_each_target(t
, c
) {
1267 struct damon_region
*r
;
1269 damon_for_each_region(r
, t
) {
1270 trace_damon_aggregated(ti
, r
, damon_nr_regions(t
));
1271 r
->last_nr_accesses
= r
->nr_accesses
;
1278 static void damon_split_region_at(struct damon_target
*t
,
1279 struct damon_region
*r
, unsigned long sz_r
);
1281 static bool __damos_valid_target(struct damon_region
*r
, struct damos
*s
)
1284 unsigned int nr_accesses
= r
->nr_accesses_bp
/ 10000;
1286 sz
= damon_sz_region(r
);
1287 return s
->pattern
.min_sz_region
<= sz
&&
1288 sz
<= s
->pattern
.max_sz_region
&&
1289 s
->pattern
.min_nr_accesses
<= nr_accesses
&&
1290 nr_accesses
<= s
->pattern
.max_nr_accesses
&&
1291 s
->pattern
.min_age_region
<= r
->age
&&
1292 r
->age
<= s
->pattern
.max_age_region
;
1295 static bool damos_valid_target(struct damon_ctx
*c
, struct damon_target
*t
,
1296 struct damon_region
*r
, struct damos
*s
)
1298 bool ret
= __damos_valid_target(r
, s
);
1300 if (!ret
|| !s
->quota
.esz
|| !c
->ops
.get_scheme_score
)
1303 return c
->ops
.get_scheme_score(c
, t
, r
, s
) >= s
->quota
.min_score
;
1307 * damos_skip_charged_region() - Check if the given region or starting part of
1308 * it is already charged for the DAMOS quota.
1309 * @t: The target of the region.
1310 * @rp: The pointer to the region.
1311 * @s: The scheme to be applied.
1313 * If a quota of a scheme has exceeded in a quota charge window, the scheme's
1314 * action would applied to only a part of the target access pattern fulfilling
1315 * regions. To avoid applying the scheme action to only already applied
1316 * regions, DAMON skips applying the scheme action to the regions that charged
1317 * in the previous charge window.
1319 * This function checks if a given region should be skipped or not for the
1320 * reason. If only the starting part of the region has previously charged,
1321 * this function splits the region into two so that the second one covers the
1322 * area that not charged in the previous charge widnow and saves the second
1323 * region in *rp and returns false, so that the caller can apply DAMON action
1324 * to the second one.
1326 * Return: true if the region should be entirely skipped, false otherwise.
1328 static bool damos_skip_charged_region(struct damon_target
*t
,
1329 struct damon_region
**rp
, struct damos
*s
)
1331 struct damon_region
*r
= *rp
;
1332 struct damos_quota
*quota
= &s
->quota
;
1333 unsigned long sz_to_skip
;
1335 /* Skip previously charged regions */
1336 if (quota
->charge_target_from
) {
1337 if (t
!= quota
->charge_target_from
)
1339 if (r
== damon_last_region(t
)) {
1340 quota
->charge_target_from
= NULL
;
1341 quota
->charge_addr_from
= 0;
1344 if (quota
->charge_addr_from
&&
1345 r
->ar
.end
<= quota
->charge_addr_from
)
1348 if (quota
->charge_addr_from
&& r
->ar
.start
<
1349 quota
->charge_addr_from
) {
1350 sz_to_skip
= ALIGN_DOWN(quota
->charge_addr_from
-
1351 r
->ar
.start
, DAMON_MIN_REGION
);
1353 if (damon_sz_region(r
) <= DAMON_MIN_REGION
)
1355 sz_to_skip
= DAMON_MIN_REGION
;
1357 damon_split_region_at(t
, r
, sz_to_skip
);
1358 r
= damon_next_region(r
);
1361 quota
->charge_target_from
= NULL
;
1362 quota
->charge_addr_from
= 0;
1367 static void damos_update_stat(struct damos
*s
,
1368 unsigned long sz_tried
, unsigned long sz_applied
,
1369 unsigned long sz_ops_filter_passed
)
1372 s
->stat
.sz_tried
+= sz_tried
;
1374 s
->stat
.nr_applied
++;
1375 s
->stat
.sz_applied
+= sz_applied
;
1376 s
->stat
.sz_ops_filter_passed
+= sz_ops_filter_passed
;
1379 static bool damos_filter_match(struct damon_ctx
*ctx
, struct damon_target
*t
,
1380 struct damon_region
*r
, struct damos_filter
*filter
)
1382 bool matched
= false;
1383 struct damon_target
*ti
;
1385 unsigned long start
, end
;
1387 switch (filter
->type
) {
1388 case DAMOS_FILTER_TYPE_TARGET
:
1389 damon_for_each_target(ti
, ctx
) {
1394 matched
= target_idx
== filter
->target_idx
;
1396 case DAMOS_FILTER_TYPE_ADDR
:
1397 start
= ALIGN_DOWN(filter
->addr_range
.start
, DAMON_MIN_REGION
);
1398 end
= ALIGN_DOWN(filter
->addr_range
.end
, DAMON_MIN_REGION
);
1400 /* inside the range */
1401 if (start
<= r
->ar
.start
&& r
->ar
.end
<= end
) {
1405 /* outside of the range */
1406 if (r
->ar
.end
<= start
|| end
<= r
->ar
.start
) {
1410 /* start before the range and overlap */
1411 if (r
->ar
.start
< start
) {
1412 damon_split_region_at(t
, r
, start
- r
->ar
.start
);
1416 /* start inside the range */
1417 damon_split_region_at(t
, r
, end
- r
->ar
.start
);
1424 return matched
== filter
->matching
;
1427 static bool damos_filter_out(struct damon_ctx
*ctx
, struct damon_target
*t
,
1428 struct damon_region
*r
, struct damos
*s
)
1430 struct damos_filter
*filter
;
1432 damos_for_each_filter(filter
, s
) {
1433 if (damos_filter_match(ctx
, t
, r
, filter
))
1434 return !filter
->allow
;
1440 * damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
1441 * @ctx: The context of &damon_ctx->walk_control.
1442 * @t: The monitoring target of @r that @s will be applied.
1443 * @r: The region of @t that @s will be applied.
1444 * @s: The scheme of @ctx that will be applied to @r.
1446 * This function is called from kdamond whenever it asked the operation set to
1447 * apply a DAMOS scheme action to a region. If a DAMOS walk request is
1448 * installed by damos_walk() and not yet uninstalled, invoke it.
1450 static void damos_walk_call_walk(struct damon_ctx
*ctx
, struct damon_target
*t
,
1451 struct damon_region
*r
, struct damos
*s
,
1452 unsigned long sz_filter_passed
)
1454 struct damos_walk_control
*control
;
1456 mutex_lock(&ctx
->walk_control_lock
);
1457 control
= ctx
->walk_control
;
1458 mutex_unlock(&ctx
->walk_control_lock
);
1461 control
->walk_fn(control
->data
, ctx
, t
, r
, s
, sz_filter_passed
);
1465 * damos_walk_complete() - Complete DAMOS walk request if all walks are done.
1466 * @ctx: The context of &damon_ctx->walk_control.
1467 * @s: A scheme of @ctx that all walks are now done.
1469 * This function is called when kdamond finished applying the action of a DAMOS
1470 * scheme to all regions that eligible for the given &damos->apply_interval_us.
1471 * If every scheme of @ctx including @s now finished walking for at least one
1472 * &damos->apply_interval_us, this function makrs the handling of the given
1473 * DAMOS walk request is done, so that damos_walk() can wake up and return.
1475 static void damos_walk_complete(struct damon_ctx
*ctx
, struct damos
*s
)
1477 struct damos
*siter
;
1478 struct damos_walk_control
*control
;
1480 mutex_lock(&ctx
->walk_control_lock
);
1481 control
= ctx
->walk_control
;
1482 mutex_unlock(&ctx
->walk_control_lock
);
1486 s
->walk_completed
= true;
1487 /* if all schemes completed, signal completion to walker */
1488 damon_for_each_scheme(siter
, ctx
) {
1489 if (!siter
->walk_completed
)
1492 complete(&control
->completion
);
1493 mutex_lock(&ctx
->walk_control_lock
);
1494 ctx
->walk_control
= NULL
;
1495 mutex_unlock(&ctx
->walk_control_lock
);
1499 * damos_walk_cancel() - Cancel the current DAMOS walk request.
1500 * @ctx: The context of &damon_ctx->walk_control.
1502 * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS
1503 * walk is requested but there is no DAMOS scheme to walk for, or the kdamond
1504 * is already out of the main loop and therefore gonna be terminated, and hence
1505 * cannot continue the walks. This function therefore marks the walk request
1506 * as canceled, so that damos_walk() can wake up and return.
1508 static void damos_walk_cancel(struct damon_ctx
*ctx
)
1510 struct damos_walk_control
*control
;
1512 mutex_lock(&ctx
->walk_control_lock
);
1513 control
= ctx
->walk_control
;
1514 mutex_unlock(&ctx
->walk_control_lock
);
1518 control
->canceled
= true;
1519 complete(&control
->completion
);
1520 mutex_lock(&ctx
->walk_control_lock
);
1521 ctx
->walk_control
= NULL
;
1522 mutex_unlock(&ctx
->walk_control_lock
);
1525 static void damos_apply_scheme(struct damon_ctx
*c
, struct damon_target
*t
,
1526 struct damon_region
*r
, struct damos
*s
)
1528 struct damos_quota
*quota
= &s
->quota
;
1529 unsigned long sz
= damon_sz_region(r
);
1530 struct timespec64 begin
, end
;
1531 unsigned long sz_applied
= 0;
1532 unsigned long sz_ops_filter_passed
= 0;
1535 * We plan to support multiple context per kdamond, as DAMON sysfs
1536 * implies with 'nr_contexts' file. Nevertheless, only single context
1537 * per kdamond is supported for now. So, we can simply use '0' context
1540 unsigned int cidx
= 0;
1541 struct damos
*siter
; /* schemes iterator */
1542 unsigned int sidx
= 0;
1543 struct damon_target
*titer
; /* targets iterator */
1544 unsigned int tidx
= 0;
1545 bool do_trace
= false;
1547 /* get indices for trace_damos_before_apply() */
1548 if (trace_damos_before_apply_enabled()) {
1549 damon_for_each_scheme(siter
, c
) {
1554 damon_for_each_target(titer
, c
) {
1562 if (c
->ops
.apply_scheme
) {
1563 if (quota
->esz
&& quota
->charged_sz
+ sz
> quota
->esz
) {
1564 sz
= ALIGN_DOWN(quota
->esz
- quota
->charged_sz
,
1568 damon_split_region_at(t
, r
, sz
);
1570 if (damos_filter_out(c
, t
, r
, s
))
1572 ktime_get_coarse_ts64(&begin
);
1573 if (c
->callback
.before_damos_apply
)
1574 err
= c
->callback
.before_damos_apply(c
, t
, r
, s
);
1576 trace_damos_before_apply(cidx
, sidx
, tidx
, r
,
1577 damon_nr_regions(t
), do_trace
);
1578 sz_applied
= c
->ops
.apply_scheme(c
, t
, r
, s
,
1579 &sz_ops_filter_passed
);
1581 damos_walk_call_walk(c
, t
, r
, s
, sz_ops_filter_passed
);
1582 ktime_get_coarse_ts64(&end
);
1583 quota
->total_charged_ns
+= timespec64_to_ns(&end
) -
1584 timespec64_to_ns(&begin
);
1585 quota
->charged_sz
+= sz
;
1586 if (quota
->esz
&& quota
->charged_sz
>= quota
->esz
) {
1587 quota
->charge_target_from
= t
;
1588 quota
->charge_addr_from
= r
->ar
.end
+ 1;
1591 if (s
->action
!= DAMOS_STAT
)
1595 damos_update_stat(s
, sz
, sz_applied
, sz_ops_filter_passed
);
1598 static void damon_do_apply_schemes(struct damon_ctx
*c
,
1599 struct damon_target
*t
,
1600 struct damon_region
*r
)
1604 damon_for_each_scheme(s
, c
) {
1605 struct damos_quota
*quota
= &s
->quota
;
1607 if (c
->passed_sample_intervals
< s
->next_apply_sis
)
1610 if (!s
->wmarks
.activated
)
1613 /* Check the quota */
1614 if (quota
->esz
&& quota
->charged_sz
>= quota
->esz
)
1617 if (damos_skip_charged_region(t
, &r
, s
))
1620 if (!damos_valid_target(c
, t
, r
, s
))
1623 damos_apply_scheme(c
, t
, r
, s
);
1628 * damon_feed_loop_next_input() - get next input to achieve a target score.
1629 * @last_input The last input.
1630 * @score Current score that made with @last_input.
1632 * Calculate next input to achieve the target score, based on the last input
1633 * and current score. Assuming the input and the score are positively
1634 * proportional, calculate how much compensation should be added to or
1635 * subtracted from the last input as a proportion of the last input. Avoid
1636 * next input always being zero by setting it non-zero always. In short form
1637 * (assuming support of float and signed calculations), the algorithm is as
1640 * next_input = max(last_input * ((goal - current) / goal + 1), 1)
1642 * For simple implementation, we assume the target score is always 10,000. The
1643 * caller should adjust @score for this.
1645 * Returns next input that assumed to achieve the target score.
1647 static unsigned long damon_feed_loop_next_input(unsigned long last_input
,
1648 unsigned long score
)
1650 const unsigned long goal
= 10000;
1651 /* Set minimum input as 10000 to avoid compensation be zero */
1652 const unsigned long min_input
= 10000;
1653 unsigned long score_goal_diff
, compensation
;
1654 bool over_achieving
= score
> goal
;
1658 if (score
>= goal
* 2)
1662 score_goal_diff
= score
- goal
;
1664 score_goal_diff
= goal
- score
;
1666 if (last_input
< ULONG_MAX
/ score_goal_diff
)
1667 compensation
= last_input
* score_goal_diff
/ goal
;
1669 compensation
= last_input
/ goal
* score_goal_diff
;
1672 return max(last_input
- compensation
, min_input
);
1673 if (last_input
< ULONG_MAX
- compensation
)
1674 return last_input
+ compensation
;
1680 static u64
damos_get_some_mem_psi_total(void)
1682 if (static_branch_likely(&psi_disabled
))
1684 return div_u64(psi_system
.total
[PSI_AVGS
][PSI_MEM
* 2],
1688 #else /* CONFIG_PSI */
1690 static inline u64
damos_get_some_mem_psi_total(void)
1695 #endif /* CONFIG_PSI */
1697 static void damos_set_quota_goal_current_value(struct damos_quota_goal
*goal
)
1701 switch (goal
->metric
) {
1702 case DAMOS_QUOTA_USER_INPUT
:
1703 /* User should already set goal->current_value */
1705 case DAMOS_QUOTA_SOME_MEM_PSI_US
:
1706 now_psi_total
= damos_get_some_mem_psi_total();
1707 goal
->current_value
= now_psi_total
- goal
->last_psi_total
;
1708 goal
->last_psi_total
= now_psi_total
;
1715 /* Return the highest score since it makes schemes least aggressive */
1716 static unsigned long damos_quota_score(struct damos_quota
*quota
)
1718 struct damos_quota_goal
*goal
;
1719 unsigned long highest_score
= 0;
1721 damos_for_each_quota_goal(goal
, quota
) {
1722 damos_set_quota_goal_current_value(goal
);
1723 highest_score
= max(highest_score
,
1724 goal
->current_value
* 10000 /
1725 goal
->target_value
);
1728 return highest_score
;
1732 * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
1734 static void damos_set_effective_quota(struct damos_quota
*quota
)
1736 unsigned long throughput
;
1737 unsigned long esz
= ULONG_MAX
;
1739 if (!quota
->ms
&& list_empty("a
->goals
)) {
1740 quota
->esz
= quota
->sz
;
1744 if (!list_empty("a
->goals
)) {
1745 unsigned long score
= damos_quota_score(quota
);
1747 quota
->esz_bp
= damon_feed_loop_next_input(
1748 max(quota
->esz_bp
, 10000UL),
1750 esz
= quota
->esz_bp
/ 10000;
1754 if (quota
->total_charged_ns
)
1755 throughput
= quota
->total_charged_sz
* 1000000 /
1756 quota
->total_charged_ns
;
1758 throughput
= PAGE_SIZE
* 1024;
1759 esz
= min(throughput
* quota
->ms
, esz
);
1762 if (quota
->sz
&& quota
->sz
< esz
)
1768 static void damos_adjust_quota(struct damon_ctx
*c
, struct damos
*s
)
1770 struct damos_quota
*quota
= &s
->quota
;
1771 struct damon_target
*t
;
1772 struct damon_region
*r
;
1773 unsigned long cumulated_sz
;
1774 unsigned int score
, max_score
= 0;
1776 if (!quota
->ms
&& !quota
->sz
&& list_empty("a
->goals
))
1779 /* New charge window starts */
1780 if (time_after_eq(jiffies
, quota
->charged_from
+
1781 msecs_to_jiffies(quota
->reset_interval
))) {
1782 if (quota
->esz
&& quota
->charged_sz
>= quota
->esz
)
1783 s
->stat
.qt_exceeds
++;
1784 quota
->total_charged_sz
+= quota
->charged_sz
;
1785 quota
->charged_from
= jiffies
;
1786 quota
->charged_sz
= 0;
1787 damos_set_effective_quota(quota
);
1790 if (!c
->ops
.get_scheme_score
)
1793 /* Fill up the score histogram */
1794 memset(c
->regions_score_histogram
, 0,
1795 sizeof(*c
->regions_score_histogram
) *
1796 (DAMOS_MAX_SCORE
+ 1));
1797 damon_for_each_target(t
, c
) {
1798 damon_for_each_region(r
, t
) {
1799 if (!__damos_valid_target(r
, s
))
1801 score
= c
->ops
.get_scheme_score(c
, t
, r
, s
);
1802 c
->regions_score_histogram
[score
] +=
1804 if (score
> max_score
)
1809 /* Set the min score limit */
1810 for (cumulated_sz
= 0, score
= max_score
; ; score
--) {
1811 cumulated_sz
+= c
->regions_score_histogram
[score
];
1812 if (cumulated_sz
>= quota
->esz
|| !score
)
1815 quota
->min_score
= score
;
1818 static void kdamond_apply_schemes(struct damon_ctx
*c
)
1820 struct damon_target
*t
;
1821 struct damon_region
*r
, *next_r
;
1823 unsigned long sample_interval
= c
->attrs
.sample_interval
?
1824 c
->attrs
.sample_interval
: 1;
1825 bool has_schemes_to_apply
= false;
1827 damon_for_each_scheme(s
, c
) {
1828 if (c
->passed_sample_intervals
< s
->next_apply_sis
)
1831 if (!s
->wmarks
.activated
)
1834 has_schemes_to_apply
= true;
1836 damos_adjust_quota(c
, s
);
1839 if (!has_schemes_to_apply
)
1842 damon_for_each_target(t
, c
) {
1843 damon_for_each_region_safe(r
, next_r
, t
)
1844 damon_do_apply_schemes(c
, t
, r
);
1847 damon_for_each_scheme(s
, c
) {
1848 if (c
->passed_sample_intervals
< s
->next_apply_sis
)
1850 damos_walk_complete(c
, s
);
1851 s
->next_apply_sis
= c
->passed_sample_intervals
+
1852 (s
->apply_interval_us
? s
->apply_interval_us
:
1853 c
->attrs
.aggr_interval
) / sample_interval
;
1858 * Merge two adjacent regions into one region
1860 static void damon_merge_two_regions(struct damon_target
*t
,
1861 struct damon_region
*l
, struct damon_region
*r
)
1863 unsigned long sz_l
= damon_sz_region(l
), sz_r
= damon_sz_region(r
);
1865 l
->nr_accesses
= (l
->nr_accesses
* sz_l
+ r
->nr_accesses
* sz_r
) /
1867 l
->nr_accesses_bp
= l
->nr_accesses
* 10000;
1868 l
->age
= (l
->age
* sz_l
+ r
->age
* sz_r
) / (sz_l
+ sz_r
);
1869 l
->ar
.end
= r
->ar
.end
;
1870 damon_destroy_region(r
, t
);
1874 * Merge adjacent regions having similar access frequencies
1876 * t target affected by this merge operation
1877 * thres '->nr_accesses' diff threshold for the merge
1878 * sz_limit size upper limit of each region
1880 static void damon_merge_regions_of(struct damon_target
*t
, unsigned int thres
,
1881 unsigned long sz_limit
)
1883 struct damon_region
*r
, *prev
= NULL
, *next
;
1885 damon_for_each_region_safe(r
, next
, t
) {
1886 if (abs(r
->nr_accesses
- r
->last_nr_accesses
) > thres
)
1891 if (prev
&& prev
->ar
.end
== r
->ar
.start
&&
1892 abs(prev
->nr_accesses
- r
->nr_accesses
) <= thres
&&
1893 damon_sz_region(prev
) + damon_sz_region(r
) <= sz_limit
)
1894 damon_merge_two_regions(t
, prev
, r
);
1901 * Merge adjacent regions having similar access frequencies
1903 * threshold '->nr_accesses' diff threshold for the merge
1904 * sz_limit size upper limit of each region
1906 * This function merges monitoring target regions which are adjacent and their
1907 * access frequencies are similar. This is for minimizing the monitoring
1908 * overhead under the dynamically changeable access pattern. If a merge was
1909 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
1911 * The total number of regions could be higher than the user-defined limit,
1912 * max_nr_regions for some cases. For example, the user can update
1913 * max_nr_regions to a number that lower than the current number of regions
1914 * while DAMON is running. For such a case, repeat merging until the limit is
1915 * met while increasing @threshold up to possible maximum level.
1917 static void kdamond_merge_regions(struct damon_ctx
*c
, unsigned int threshold
,
1918 unsigned long sz_limit
)
1920 struct damon_target
*t
;
1921 unsigned int nr_regions
;
1922 unsigned int max_thres
;
1924 max_thres
= c
->attrs
.aggr_interval
/
1925 (c
->attrs
.sample_interval
? c
->attrs
.sample_interval
: 1);
1928 damon_for_each_target(t
, c
) {
1929 damon_merge_regions_of(t
, threshold
, sz_limit
);
1930 nr_regions
+= damon_nr_regions(t
);
1932 threshold
= max(1, threshold
* 2);
1933 } while (nr_regions
> c
->attrs
.max_nr_regions
&&
1934 threshold
/ 2 < max_thres
);
1938 * Split a region in two
1940 * r the region to be split
1941 * sz_r size of the first sub-region that will be made
1943 static void damon_split_region_at(struct damon_target
*t
,
1944 struct damon_region
*r
, unsigned long sz_r
)
1946 struct damon_region
*new;
1948 new = damon_new_region(r
->ar
.start
+ sz_r
, r
->ar
.end
);
1952 r
->ar
.end
= new->ar
.start
;
1955 new->last_nr_accesses
= r
->last_nr_accesses
;
1956 new->nr_accesses_bp
= r
->nr_accesses_bp
;
1957 new->nr_accesses
= r
->nr_accesses
;
1959 damon_insert_region(new, r
, damon_next_region(r
), t
);
1962 /* Split every region in the given target into 'nr_subs' regions */
1963 static void damon_split_regions_of(struct damon_target
*t
, int nr_subs
)
1965 struct damon_region
*r
, *next
;
1966 unsigned long sz_region
, sz_sub
= 0;
1969 damon_for_each_region_safe(r
, next
, t
) {
1970 sz_region
= damon_sz_region(r
);
1972 for (i
= 0; i
< nr_subs
- 1 &&
1973 sz_region
> 2 * DAMON_MIN_REGION
; i
++) {
1975 * Randomly select size of left sub-region to be at
1976 * least 10 percent and at most 90% of original region
1978 sz_sub
= ALIGN_DOWN(damon_rand(1, 10) *
1979 sz_region
/ 10, DAMON_MIN_REGION
);
1980 /* Do not allow blank region */
1981 if (sz_sub
== 0 || sz_sub
>= sz_region
)
1984 damon_split_region_at(t
, r
, sz_sub
);
1991 * Split every target region into randomly-sized small regions
1993 * This function splits every target region into random-sized small regions if
1994 * current total number of the regions is equal or smaller than half of the
1995 * user-specified maximum number of regions. This is for maximizing the
1996 * monitoring accuracy under the dynamically changeable access patterns. If a
1997 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
2000 static void kdamond_split_regions(struct damon_ctx
*ctx
)
2002 struct damon_target
*t
;
2003 unsigned int nr_regions
= 0;
2004 static unsigned int last_nr_regions
;
2005 int nr_subregions
= 2;
2007 damon_for_each_target(t
, ctx
)
2008 nr_regions
+= damon_nr_regions(t
);
2010 if (nr_regions
> ctx
->attrs
.max_nr_regions
/ 2)
2013 /* Maybe the middle of the region has different access frequency */
2014 if (last_nr_regions
== nr_regions
&&
2015 nr_regions
< ctx
->attrs
.max_nr_regions
/ 3)
2018 damon_for_each_target(t
, ctx
)
2019 damon_split_regions_of(t
, nr_subregions
);
2021 last_nr_regions
= nr_regions
;
2025 * Check whether current monitoring should be stopped
2027 * The monitoring is stopped when either the user requested to stop, or all
2028 * monitoring targets are invalid.
2030 * Returns true if need to stop current monitoring.
2032 static bool kdamond_need_stop(struct damon_ctx
*ctx
)
2034 struct damon_target
*t
;
2036 if (kthread_should_stop())
2039 if (!ctx
->ops
.target_valid
)
2042 damon_for_each_target(t
, ctx
) {
2043 if (ctx
->ops
.target_valid(t
))
2050 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric
,
2051 unsigned long *metric_value
)
2054 case DAMOS_WMARK_FREE_MEM_RATE
:
2055 *metric_value
= global_zone_page_state(NR_FREE_PAGES
) * 1000 /
2065 * Returns zero if the scheme is active. Else, returns time to wait for next
2066 * watermark check in micro-seconds.
2068 static unsigned long damos_wmark_wait_us(struct damos
*scheme
)
2070 unsigned long metric
;
2072 if (damos_get_wmark_metric_value(scheme
->wmarks
.metric
, &metric
))
2075 /* higher than high watermark or lower than low watermark */
2076 if (metric
> scheme
->wmarks
.high
|| scheme
->wmarks
.low
> metric
) {
2077 if (scheme
->wmarks
.activated
)
2078 pr_debug("deactivate a scheme (%d) for %s wmark\n",
2080 str_high_low(metric
> scheme
->wmarks
.high
));
2081 scheme
->wmarks
.activated
= false;
2082 return scheme
->wmarks
.interval
;
2085 /* inactive and higher than middle watermark */
2086 if ((scheme
->wmarks
.high
>= metric
&& metric
>= scheme
->wmarks
.mid
) &&
2087 !scheme
->wmarks
.activated
)
2088 return scheme
->wmarks
.interval
;
2090 if (!scheme
->wmarks
.activated
)
2091 pr_debug("activate a scheme (%d)\n", scheme
->action
);
2092 scheme
->wmarks
.activated
= true;
2096 static void kdamond_usleep(unsigned long usecs
)
2098 if (usecs
>= USLEEP_RANGE_UPPER_BOUND
)
2099 schedule_timeout_idle(usecs_to_jiffies(usecs
));
2101 usleep_range_idle(usecs
, usecs
+ 1);
2105 * kdamond_call() - handle damon_call_control.
2106 * @ctx: The &struct damon_ctx of the kdamond.
2107 * @cancel: Whether to cancel the invocation of the function.
2109 * If there is a &struct damon_call_control request that registered via
2110 * &damon_call() on @ctx, do or cancel the invocation of the function depending
2111 * on @cancel. @cancel is set when the kdamond is deactivated by DAMOS
2112 * watermarks, or the kdamond is already out of the main loop and therefore
2113 * will be terminated.
2115 static void kdamond_call(struct damon_ctx
*ctx
, bool cancel
)
2117 struct damon_call_control
*control
;
2120 mutex_lock(&ctx
->call_control_lock
);
2121 control
= ctx
->call_control
;
2122 mutex_unlock(&ctx
->call_control_lock
);
2126 control
->canceled
= true;
2128 ret
= control
->fn(control
->data
);
2129 control
->return_code
= ret
;
2131 complete(&control
->completion
);
2132 mutex_lock(&ctx
->call_control_lock
);
2133 ctx
->call_control
= NULL
;
2134 mutex_unlock(&ctx
->call_control_lock
);
2137 /* Returns negative error code if it's not activated but should return */
2138 static int kdamond_wait_activation(struct damon_ctx
*ctx
)
2141 unsigned long wait_time
;
2142 unsigned long min_wait_time
= 0;
2143 bool init_wait_time
= false;
2145 while (!kdamond_need_stop(ctx
)) {
2146 damon_for_each_scheme(s
, ctx
) {
2147 wait_time
= damos_wmark_wait_us(s
);
2148 if (!init_wait_time
|| wait_time
< min_wait_time
) {
2149 init_wait_time
= true;
2150 min_wait_time
= wait_time
;
2156 kdamond_usleep(min_wait_time
);
2158 if (ctx
->callback
.after_wmarks_check
&&
2159 ctx
->callback
.after_wmarks_check(ctx
))
2161 kdamond_call(ctx
, true);
2162 damos_walk_cancel(ctx
);
2167 static void kdamond_init_intervals_sis(struct damon_ctx
*ctx
)
2169 unsigned long sample_interval
= ctx
->attrs
.sample_interval
?
2170 ctx
->attrs
.sample_interval
: 1;
2171 unsigned long apply_interval
;
2172 struct damos
*scheme
;
2174 ctx
->passed_sample_intervals
= 0;
2175 ctx
->next_aggregation_sis
= ctx
->attrs
.aggr_interval
/ sample_interval
;
2176 ctx
->next_ops_update_sis
= ctx
->attrs
.ops_update_interval
/
2179 damon_for_each_scheme(scheme
, ctx
) {
2180 apply_interval
= scheme
->apply_interval_us
?
2181 scheme
->apply_interval_us
: ctx
->attrs
.aggr_interval
;
2182 scheme
->next_apply_sis
= apply_interval
/ sample_interval
;
2187 * The monitoring daemon that runs as a kernel thread
2189 static int kdamond_fn(void *data
)
2191 struct damon_ctx
*ctx
= data
;
2192 struct damon_target
*t
;
2193 struct damon_region
*r
, *next
;
2194 unsigned int max_nr_accesses
= 0;
2195 unsigned long sz_limit
= 0;
2197 pr_debug("kdamond (%d) starts\n", current
->pid
);
2199 complete(&ctx
->kdamond_started
);
2200 kdamond_init_intervals_sis(ctx
);
2204 if (ctx
->callback
.before_start
&& ctx
->callback
.before_start(ctx
))
2206 ctx
->regions_score_histogram
= kmalloc_array(DAMOS_MAX_SCORE
+ 1,
2207 sizeof(*ctx
->regions_score_histogram
), GFP_KERNEL
);
2208 if (!ctx
->regions_score_histogram
)
2211 sz_limit
= damon_region_sz_limit(ctx
);
2213 while (!kdamond_need_stop(ctx
)) {
2215 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
2216 * be changed from after_wmarks_check() or after_aggregation()
2217 * callbacks. Read the values here, and use those for this
2218 * iteration. That is, damon_set_attrs() updated new values
2219 * are respected from next iteration.
2221 unsigned long next_aggregation_sis
= ctx
->next_aggregation_sis
;
2222 unsigned long next_ops_update_sis
= ctx
->next_ops_update_sis
;
2223 unsigned long sample_interval
= ctx
->attrs
.sample_interval
;
2225 if (kdamond_wait_activation(ctx
))
2228 if (ctx
->ops
.prepare_access_checks
)
2229 ctx
->ops
.prepare_access_checks(ctx
);
2230 if (ctx
->callback
.after_sampling
&&
2231 ctx
->callback
.after_sampling(ctx
))
2233 kdamond_call(ctx
, false);
2235 kdamond_usleep(sample_interval
);
2236 ctx
->passed_sample_intervals
++;
2238 if (ctx
->ops
.check_accesses
)
2239 max_nr_accesses
= ctx
->ops
.check_accesses(ctx
);
2241 if (ctx
->passed_sample_intervals
>= next_aggregation_sis
) {
2242 kdamond_merge_regions(ctx
,
2243 max_nr_accesses
/ 10,
2245 if (ctx
->callback
.after_aggregation
&&
2246 ctx
->callback
.after_aggregation(ctx
))
2251 * do kdamond_apply_schemes() after kdamond_merge_regions() if
2252 * possible, to reduce overhead
2254 if (!list_empty(&ctx
->schemes
))
2255 kdamond_apply_schemes(ctx
);
2257 damos_walk_cancel(ctx
);
2259 sample_interval
= ctx
->attrs
.sample_interval
?
2260 ctx
->attrs
.sample_interval
: 1;
2261 if (ctx
->passed_sample_intervals
>= next_aggregation_sis
) {
2262 ctx
->next_aggregation_sis
= next_aggregation_sis
+
2263 ctx
->attrs
.aggr_interval
/ sample_interval
;
2265 kdamond_reset_aggregated(ctx
);
2266 kdamond_split_regions(ctx
);
2267 if (ctx
->ops
.reset_aggregated
)
2268 ctx
->ops
.reset_aggregated(ctx
);
2271 if (ctx
->passed_sample_intervals
>= next_ops_update_sis
) {
2272 ctx
->next_ops_update_sis
= next_ops_update_sis
+
2273 ctx
->attrs
.ops_update_interval
/
2275 if (ctx
->ops
.update
)
2276 ctx
->ops
.update(ctx
);
2277 sz_limit
= damon_region_sz_limit(ctx
);
2281 damon_for_each_target(t
, ctx
) {
2282 damon_for_each_region_safe(r
, next
, t
)
2283 damon_destroy_region(r
, t
);
2286 if (ctx
->callback
.before_terminate
)
2287 ctx
->callback
.before_terminate(ctx
);
2288 if (ctx
->ops
.cleanup
)
2289 ctx
->ops
.cleanup(ctx
);
2290 kfree(ctx
->regions_score_histogram
);
2292 pr_debug("kdamond (%d) finishes\n", current
->pid
);
2293 mutex_lock(&ctx
->kdamond_lock
);
2294 ctx
->kdamond
= NULL
;
2295 mutex_unlock(&ctx
->kdamond_lock
);
2297 kdamond_call(ctx
, true);
2298 damos_walk_cancel(ctx
);
2300 mutex_lock(&damon_lock
);
2302 if (!nr_running_ctxs
&& running_exclusive_ctxs
)
2303 running_exclusive_ctxs
= false;
2304 mutex_unlock(&damon_lock
);
2310 * struct damon_system_ram_region - System RAM resource address region of
2312 * @start: Start address of the region (inclusive).
2313 * @end: End address of the region (exclusive).
2315 struct damon_system_ram_region
{
2316 unsigned long start
;
2320 static int walk_system_ram(struct resource
*res
, void *arg
)
2322 struct damon_system_ram_region
*a
= arg
;
2324 if (a
->end
- a
->start
< resource_size(res
)) {
2325 a
->start
= res
->start
;
2332 * Find biggest 'System RAM' resource and store its start and end address in
2333 * @start and @end, respectively. If no System RAM is found, returns false.
2335 static bool damon_find_biggest_system_ram(unsigned long *start
,
2339 struct damon_system_ram_region arg
= {};
2341 walk_system_ram_res(0, ULONG_MAX
, &arg
, walk_system_ram
);
2342 if (arg
.end
<= arg
.start
)
2351 * damon_set_region_biggest_system_ram_default() - Set the region of the given
2352 * monitoring target as requested, or biggest 'System RAM'.
2353 * @t: The monitoring target to set the region.
2354 * @start: The pointer to the start address of the region.
2355 * @end: The pointer to the end address of the region.
2357 * This function sets the region of @t as requested by @start and @end. If the
2358 * values of @start and @end are zero, however, this function finds the biggest
2359 * 'System RAM' resource and sets the region to cover the resource. In the
2360 * latter case, this function saves the start and end addresses of the resource
2361 * in @start and @end, respectively.
2363 * Return: 0 on success, negative error code otherwise.
2365 int damon_set_region_biggest_system_ram_default(struct damon_target
*t
,
2366 unsigned long *start
, unsigned long *end
)
2368 struct damon_addr_range addr_range
;
2373 if (!*start
&& !*end
&&
2374 !damon_find_biggest_system_ram(start
, end
))
2377 addr_range
.start
= *start
;
2378 addr_range
.end
= *end
;
2379 return damon_set_regions(t
, &addr_range
, 1);
2383 * damon_moving_sum() - Calculate an inferred moving sum value.
2384 * @mvsum: Inferred sum of the last @len_window values.
2385 * @nomvsum: Non-moving sum of the last discrete @len_window window values.
2386 * @len_window: The number of last values to take care of.
2387 * @new_value: New value that will be added to the pseudo moving sum.
2389 * Moving sum (moving average * window size) is good for handling noise, but
2390 * the cost of keeping past values can be high for arbitrary window size. This
2391 * function implements a lightweight pseudo moving sum function that doesn't
2392 * keep the past window values.
2394 * It simply assumes there was no noise in the past, and get the no-noise
2395 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a
2396 * non-moving sum of the last window. For example, if @len_window is 10 and we
2397 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
2398 * values. Hence, this function simply drops @nomvsum / @len_window from
2399 * given @mvsum and add @new_value.
2401 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
2402 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For
2403 * calculating next moving sum with a new value, we should drop 0 from 50 and
2404 * add the new value. However, this function assumes it got value 5 for each
2405 * of the last ten times. Based on the assumption, when the next value is
2406 * measured, it drops the assumed past value, 5 from the current sum, and add
2407 * the new value to get the updated pseduo-moving average.
2409 * This means the value could have errors, but the errors will be disappeared
2410 * for every @len_window aligned calls. For example, if @len_window is 10, the
2411 * pseudo moving sum with 11th value to 19th value would have an error. But
2412 * the sum with 20th value will not have the error.
2414 * Return: Pseudo-moving average after getting the @new_value.
2416 static unsigned int damon_moving_sum(unsigned int mvsum
, unsigned int nomvsum
,
2417 unsigned int len_window
, unsigned int new_value
)
2419 return mvsum
- nomvsum
/ len_window
+ new_value
;
2423 * damon_update_region_access_rate() - Update the access rate of a region.
2424 * @r: The DAMON region to update for its access check result.
2425 * @accessed: Whether the region has accessed during last sampling interval.
2426 * @attrs: The damon_attrs of the DAMON context.
2428 * Update the access rate of a region with the region's last sampling interval
2429 * access check result.
2431 * Usually this will be called by &damon_operations->check_accesses callback.
2433 void damon_update_region_access_rate(struct damon_region
*r
, bool accessed
,
2434 struct damon_attrs
*attrs
)
2436 unsigned int len_window
= 1;
2439 * sample_interval can be zero, but cannot be larger than
2440 * aggr_interval, owing to validation of damon_set_attrs().
2442 if (attrs
->sample_interval
)
2443 len_window
= damon_max_nr_accesses(attrs
);
2444 r
->nr_accesses_bp
= damon_moving_sum(r
->nr_accesses_bp
,
2445 r
->last_nr_accesses
* 10000, len_window
,
2446 accessed
? 10000 : 0);
2452 static int __init
damon_init(void)
2454 damon_region_cache
= KMEM_CACHE(damon_region
, 0);
2455 if (unlikely(!damon_region_cache
)) {
2456 pr_err("creating damon_region_cache fails\n");
2463 subsys_initcall(damon_init
);
2465 #include "tests/core-kunit.h"