1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Data Access Monitor Unit Tests
5 * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
7 * Author: SeongJae Park <sj@kernel.org>
10 #ifdef CONFIG_DAMON_KUNIT_TEST
12 #ifndef _DAMON_CORE_TEST_H
13 #define _DAMON_CORE_TEST_H
15 #include <kunit/test.h>
17 static void damon_test_regions(struct kunit
*test
)
19 struct damon_region
*r
;
20 struct damon_target
*t
;
22 r
= damon_new_region(1, 2);
23 KUNIT_EXPECT_EQ(test
, 1ul, r
->ar
.start
);
24 KUNIT_EXPECT_EQ(test
, 2ul, r
->ar
.end
);
25 KUNIT_EXPECT_EQ(test
, 0u, r
->nr_accesses
);
27 t
= damon_new_target();
28 KUNIT_EXPECT_EQ(test
, 0u, damon_nr_regions(t
));
30 damon_add_region(r
, t
);
31 KUNIT_EXPECT_EQ(test
, 1u, damon_nr_regions(t
));
33 damon_destroy_region(r
, t
);
34 KUNIT_EXPECT_EQ(test
, 0u, damon_nr_regions(t
));
39 static unsigned int nr_damon_targets(struct damon_ctx
*ctx
)
41 struct damon_target
*t
;
42 unsigned int nr_targets
= 0;
44 damon_for_each_target(t
, ctx
)
50 static void damon_test_target(struct kunit
*test
)
52 struct damon_ctx
*c
= damon_new_ctx();
53 struct damon_target
*t
;
55 t
= damon_new_target();
56 KUNIT_EXPECT_EQ(test
, 0u, nr_damon_targets(c
));
58 damon_add_target(c
, t
);
59 KUNIT_EXPECT_EQ(test
, 1u, nr_damon_targets(c
));
61 damon_destroy_target(t
);
62 KUNIT_EXPECT_EQ(test
, 0u, nr_damon_targets(c
));
68 * Test kdamond_reset_aggregated()
70 * DAMON checks access to each region and aggregates this information as the
71 * access frequency of each region. In detail, it increases '->nr_accesses' of
72 * regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes
73 * the aggregated information ('->nr_accesses' of each regions) to the result
74 * buffer. As a result of the flushing, the '->nr_accesses' of regions are
75 * initialized to zero.
77 static void damon_test_aggregate(struct kunit
*test
)
79 struct damon_ctx
*ctx
= damon_new_ctx();
80 unsigned long saddr
[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} };
81 unsigned long eaddr
[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} };
82 unsigned long accesses
[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} };
83 struct damon_target
*t
;
84 struct damon_region
*r
;
87 for (it
= 0; it
< 3; it
++) {
88 t
= damon_new_target();
89 damon_add_target(ctx
, t
);
93 damon_for_each_target(t
, ctx
) {
94 for (ir
= 0; ir
< 3; ir
++) {
95 r
= damon_new_region(saddr
[it
][ir
], eaddr
[it
][ir
]);
96 r
->nr_accesses
= accesses
[it
][ir
];
97 r
->nr_accesses_bp
= accesses
[it
][ir
] * 10000;
98 damon_add_region(r
, t
);
102 kdamond_reset_aggregated(ctx
);
104 damon_for_each_target(t
, ctx
) {
106 /* '->nr_accesses' should be zeroed */
107 damon_for_each_region(r
, t
) {
108 KUNIT_EXPECT_EQ(test
, 0u, r
->nr_accesses
);
111 /* regions should be preserved */
112 KUNIT_EXPECT_EQ(test
, 3, ir
);
115 /* targets also should be preserved */
116 KUNIT_EXPECT_EQ(test
, 3, it
);
118 damon_destroy_ctx(ctx
);
121 static void damon_test_split_at(struct kunit
*test
)
123 struct damon_ctx
*c
= damon_new_ctx();
124 struct damon_target
*t
;
125 struct damon_region
*r
, *r_new
;
127 t
= damon_new_target();
128 r
= damon_new_region(0, 100);
129 r
->nr_accesses_bp
= 420000;
131 r
->last_nr_accesses
= 15;
132 damon_add_region(r
, t
);
133 damon_split_region_at(t
, r
, 25);
134 KUNIT_EXPECT_EQ(test
, r
->ar
.start
, 0ul);
135 KUNIT_EXPECT_EQ(test
, r
->ar
.end
, 25ul);
137 r_new
= damon_next_region(r
);
138 KUNIT_EXPECT_EQ(test
, r_new
->ar
.start
, 25ul);
139 KUNIT_EXPECT_EQ(test
, r_new
->ar
.end
, 100ul);
141 KUNIT_EXPECT_EQ(test
, r
->nr_accesses_bp
, r_new
->nr_accesses_bp
);
142 KUNIT_EXPECT_EQ(test
, r
->nr_accesses
, r_new
->nr_accesses
);
143 KUNIT_EXPECT_EQ(test
, r
->last_nr_accesses
, r_new
->last_nr_accesses
);
145 damon_free_target(t
);
146 damon_destroy_ctx(c
);
149 static void damon_test_merge_two(struct kunit
*test
)
151 struct damon_target
*t
;
152 struct damon_region
*r
, *r2
, *r3
;
155 t
= damon_new_target();
156 r
= damon_new_region(0, 100);
158 r
->nr_accesses_bp
= 100000;
159 damon_add_region(r
, t
);
160 r2
= damon_new_region(100, 300);
161 r2
->nr_accesses
= 20;
162 r2
->nr_accesses_bp
= 200000;
163 damon_add_region(r2
, t
);
165 damon_merge_two_regions(t
, r
, r2
);
166 KUNIT_EXPECT_EQ(test
, r
->ar
.start
, 0ul);
167 KUNIT_EXPECT_EQ(test
, r
->ar
.end
, 300ul);
168 KUNIT_EXPECT_EQ(test
, r
->nr_accesses
, 16u);
171 damon_for_each_region(r3
, t
) {
172 KUNIT_EXPECT_PTR_EQ(test
, r
, r3
);
175 KUNIT_EXPECT_EQ(test
, i
, 1);
177 damon_free_target(t
);
180 static struct damon_region
*__nth_region_of(struct damon_target
*t
, int idx
)
182 struct damon_region
*r
;
185 damon_for_each_region(r
, t
) {
193 static void damon_test_merge_regions_of(struct kunit
*test
)
195 struct damon_target
*t
;
196 struct damon_region
*r
;
197 unsigned long sa
[] = {0, 100, 114, 122, 130, 156, 170, 184};
198 unsigned long ea
[] = {100, 112, 122, 130, 156, 170, 184, 230};
199 unsigned int nrs
[] = {0, 0, 10, 10, 20, 30, 1, 2};
201 unsigned long saddrs
[] = {0, 114, 130, 156, 170};
202 unsigned long eaddrs
[] = {112, 130, 156, 170, 230};
205 t
= damon_new_target();
206 for (i
= 0; i
< ARRAY_SIZE(sa
); i
++) {
207 r
= damon_new_region(sa
[i
], ea
[i
]);
208 r
->nr_accesses
= nrs
[i
];
209 r
->nr_accesses_bp
= nrs
[i
] * 10000;
210 damon_add_region(r
, t
);
213 damon_merge_regions_of(t
, 9, 9999);
214 /* 0-112, 114-130, 130-156, 156-170 */
215 KUNIT_EXPECT_EQ(test
, damon_nr_regions(t
), 5u);
216 for (i
= 0; i
< 5; i
++) {
217 r
= __nth_region_of(t
, i
);
218 KUNIT_EXPECT_EQ(test
, r
->ar
.start
, saddrs
[i
]);
219 KUNIT_EXPECT_EQ(test
, r
->ar
.end
, eaddrs
[i
]);
221 damon_free_target(t
);
224 static void damon_test_split_regions_of(struct kunit
*test
)
226 struct damon_ctx
*c
= damon_new_ctx();
227 struct damon_target
*t
;
228 struct damon_region
*r
;
230 t
= damon_new_target();
231 r
= damon_new_region(0, 22);
232 damon_add_region(r
, t
);
233 damon_split_regions_of(t
, 2);
234 KUNIT_EXPECT_LE(test
, damon_nr_regions(t
), 2u);
235 damon_free_target(t
);
237 t
= damon_new_target();
238 r
= damon_new_region(0, 220);
239 damon_add_region(r
, t
);
240 damon_split_regions_of(t
, 4);
241 KUNIT_EXPECT_LE(test
, damon_nr_regions(t
), 4u);
242 damon_free_target(t
);
243 damon_destroy_ctx(c
);
246 static void damon_test_ops_registration(struct kunit
*test
)
248 struct damon_ctx
*c
= damon_new_ctx();
249 struct damon_operations ops
= {.id
= DAMON_OPS_VADDR
}, bak
;
250 bool need_cleanup
= false;
252 /* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */
253 if (!damon_is_registered_ops(DAMON_OPS_VADDR
)) {
254 bak
.id
= DAMON_OPS_VADDR
;
255 KUNIT_EXPECT_EQ(test
, damon_register_ops(&bak
), 0);
259 /* DAMON_OPS_VADDR is ensured to be registered */
260 KUNIT_EXPECT_EQ(test
, damon_select_ops(c
, DAMON_OPS_VADDR
), 0);
262 /* Double-registration is prohibited */
263 KUNIT_EXPECT_EQ(test
, damon_register_ops(&ops
), -EINVAL
);
265 /* Unknown ops id cannot be registered */
266 KUNIT_EXPECT_EQ(test
, damon_select_ops(c
, NR_DAMON_OPS
), -EINVAL
);
268 /* Registration should success after unregistration */
269 mutex_lock(&damon_ops_lock
);
270 bak
= damon_registered_ops
[DAMON_OPS_VADDR
];
271 damon_registered_ops
[DAMON_OPS_VADDR
] = (struct damon_operations
){};
272 mutex_unlock(&damon_ops_lock
);
274 ops
.id
= DAMON_OPS_VADDR
;
275 KUNIT_EXPECT_EQ(test
, damon_register_ops(&ops
), 0);
277 mutex_lock(&damon_ops_lock
);
278 damon_registered_ops
[DAMON_OPS_VADDR
] = bak
;
279 mutex_unlock(&damon_ops_lock
);
281 /* Check double-registration failure again */
282 KUNIT_EXPECT_EQ(test
, damon_register_ops(&ops
), -EINVAL
);
284 damon_destroy_ctx(c
);
287 mutex_lock(&damon_ops_lock
);
288 damon_registered_ops
[DAMON_OPS_VADDR
] =
289 (struct damon_operations
){};
290 mutex_unlock(&damon_ops_lock
);
294 static void damon_test_set_regions(struct kunit
*test
)
296 struct damon_target
*t
= damon_new_target();
297 struct damon_region
*r1
= damon_new_region(4, 16);
298 struct damon_region
*r2
= damon_new_region(24, 32);
299 struct damon_addr_range range
= {.start
= 8, .end
= 28};
300 unsigned long expects
[] = {8, 16, 16, 24, 24, 28};
302 struct damon_region
*r
;
304 damon_add_region(r1
, t
);
305 damon_add_region(r2
, t
);
306 damon_set_regions(t
, &range
, 1);
308 KUNIT_EXPECT_EQ(test
, damon_nr_regions(t
), 3);
309 damon_for_each_region(r
, t
) {
310 KUNIT_EXPECT_EQ(test
, r
->ar
.start
, expects
[expect_idx
++]);
311 KUNIT_EXPECT_EQ(test
, r
->ar
.end
, expects
[expect_idx
++]);
313 damon_destroy_target(t
);
316 static void damon_test_nr_accesses_to_accesses_bp(struct kunit
*test
)
318 struct damon_attrs attrs
= {
319 .sample_interval
= 10,
320 .aggr_interval
= ((unsigned long)UINT_MAX
+ 1) * 10
324 * In some cases such as 32bit architectures where UINT_MAX is
325 * ULONG_MAX, attrs.aggr_interval becomes zero. Calling
326 * damon_nr_accesses_to_accesses_bp() in the case will cause
327 * divide-by-zero. Such case is prohibited in normal execution since
328 * the caution is documented on the comment for the function, and
329 * damon_update_monitoring_results() does the check. Skip the test in
332 if (!attrs
.aggr_interval
)
333 kunit_skip(test
, "aggr_interval is zero.");
335 KUNIT_EXPECT_EQ(test
, damon_nr_accesses_to_accesses_bp(123, &attrs
), 0);
338 static void damon_test_update_monitoring_result(struct kunit
*test
)
340 struct damon_attrs old_attrs
= {
341 .sample_interval
= 10, .aggr_interval
= 1000,};
342 struct damon_attrs new_attrs
;
343 struct damon_region
*r
= damon_new_region(3, 7);
346 r
->nr_accesses_bp
= 150000;
349 new_attrs
= (struct damon_attrs
){
350 .sample_interval
= 100, .aggr_interval
= 10000,};
351 damon_update_monitoring_result(r
, &old_attrs
, &new_attrs
);
352 KUNIT_EXPECT_EQ(test
, r
->nr_accesses
, 15);
353 KUNIT_EXPECT_EQ(test
, r
->age
, 2);
355 new_attrs
= (struct damon_attrs
){
356 .sample_interval
= 1, .aggr_interval
= 1000};
357 damon_update_monitoring_result(r
, &old_attrs
, &new_attrs
);
358 KUNIT_EXPECT_EQ(test
, r
->nr_accesses
, 150);
359 KUNIT_EXPECT_EQ(test
, r
->age
, 2);
361 new_attrs
= (struct damon_attrs
){
362 .sample_interval
= 1, .aggr_interval
= 100};
363 damon_update_monitoring_result(r
, &old_attrs
, &new_attrs
);
364 KUNIT_EXPECT_EQ(test
, r
->nr_accesses
, 150);
365 KUNIT_EXPECT_EQ(test
, r
->age
, 20);
367 damon_free_region(r
);
370 static void damon_test_set_attrs(struct kunit
*test
)
372 struct damon_ctx
*c
= damon_new_ctx();
373 struct damon_attrs valid_attrs
= {
374 .min_nr_regions
= 10, .max_nr_regions
= 1000,
375 .sample_interval
= 5000, .aggr_interval
= 100000,};
376 struct damon_attrs invalid_attrs
;
378 KUNIT_EXPECT_EQ(test
, damon_set_attrs(c
, &valid_attrs
), 0);
380 invalid_attrs
= valid_attrs
;
381 invalid_attrs
.min_nr_regions
= 1;
382 KUNIT_EXPECT_EQ(test
, damon_set_attrs(c
, &invalid_attrs
), -EINVAL
);
384 invalid_attrs
= valid_attrs
;
385 invalid_attrs
.max_nr_regions
= 9;
386 KUNIT_EXPECT_EQ(test
, damon_set_attrs(c
, &invalid_attrs
), -EINVAL
);
388 invalid_attrs
= valid_attrs
;
389 invalid_attrs
.aggr_interval
= 4999;
390 KUNIT_EXPECT_EQ(test
, damon_set_attrs(c
, &invalid_attrs
), -EINVAL
);
392 damon_destroy_ctx(c
);
395 static void damon_test_moving_sum(struct kunit
*test
)
397 unsigned int mvsum
= 50000, nomvsum
= 50000, len_window
= 10;
398 unsigned int new_values
[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0};
399 unsigned int expects
[] = {55000, 50000, 55000, 50000, 45000, 40000,
400 45000, 40000, 35000, 30000};
403 for (i
= 0; i
< ARRAY_SIZE(new_values
); i
++) {
404 mvsum
= damon_moving_sum(mvsum
, nomvsum
, len_window
,
406 KUNIT_EXPECT_EQ(test
, mvsum
, expects
[i
]);
410 static void damos_test_new_filter(struct kunit
*test
)
412 struct damos_filter
*filter
;
414 filter
= damos_new_filter(DAMOS_FILTER_TYPE_ANON
, true);
415 KUNIT_EXPECT_EQ(test
, filter
->type
, DAMOS_FILTER_TYPE_ANON
);
416 KUNIT_EXPECT_EQ(test
, filter
->matching
, true);
417 KUNIT_EXPECT_PTR_EQ(test
, filter
->list
.prev
, &filter
->list
);
418 KUNIT_EXPECT_PTR_EQ(test
, filter
->list
.next
, &filter
->list
);
419 damos_destroy_filter(filter
);
422 static void damos_test_filter_out(struct kunit
*test
)
424 struct damon_target
*t
;
425 struct damon_region
*r
, *r2
;
426 struct damos_filter
*f
;
428 f
= damos_new_filter(DAMOS_FILTER_TYPE_ADDR
, true);
429 f
->addr_range
= (struct damon_addr_range
){
430 .start
= DAMON_MIN_REGION
* 2, .end
= DAMON_MIN_REGION
* 6};
432 t
= damon_new_target();
433 r
= damon_new_region(DAMON_MIN_REGION
* 3, DAMON_MIN_REGION
* 5);
434 damon_add_region(r
, t
);
436 /* region in the range */
437 KUNIT_EXPECT_TRUE(test
, __damos_filter_out(NULL
, t
, r
, f
));
438 KUNIT_EXPECT_EQ(test
, damon_nr_regions(t
), 1);
440 /* region before the range */
441 r
->ar
.start
= DAMON_MIN_REGION
* 1;
442 r
->ar
.end
= DAMON_MIN_REGION
* 2;
443 KUNIT_EXPECT_FALSE(test
, __damos_filter_out(NULL
, t
, r
, f
));
444 KUNIT_EXPECT_EQ(test
, damon_nr_regions(t
), 1);
446 /* region after the range */
447 r
->ar
.start
= DAMON_MIN_REGION
* 6;
448 r
->ar
.end
= DAMON_MIN_REGION
* 8;
449 KUNIT_EXPECT_FALSE(test
, __damos_filter_out(NULL
, t
, r
, f
));
450 KUNIT_EXPECT_EQ(test
, damon_nr_regions(t
), 1);
452 /* region started before the range */
453 r
->ar
.start
= DAMON_MIN_REGION
* 1;
454 r
->ar
.end
= DAMON_MIN_REGION
* 4;
455 KUNIT_EXPECT_FALSE(test
, __damos_filter_out(NULL
, t
, r
, f
));
456 /* filter should have split the region */
457 KUNIT_EXPECT_EQ(test
, r
->ar
.start
, DAMON_MIN_REGION
* 1);
458 KUNIT_EXPECT_EQ(test
, r
->ar
.end
, DAMON_MIN_REGION
* 2);
459 KUNIT_EXPECT_EQ(test
, damon_nr_regions(t
), 2);
460 r2
= damon_next_region(r
);
461 KUNIT_EXPECT_EQ(test
, r2
->ar
.start
, DAMON_MIN_REGION
* 2);
462 KUNIT_EXPECT_EQ(test
, r2
->ar
.end
, DAMON_MIN_REGION
* 4);
463 damon_destroy_region(r2
, t
);
465 /* region started in the range */
466 r
->ar
.start
= DAMON_MIN_REGION
* 2;
467 r
->ar
.end
= DAMON_MIN_REGION
* 8;
468 KUNIT_EXPECT_TRUE(test
, __damos_filter_out(NULL
, t
, r
, f
));
469 /* filter should have split the region */
470 KUNIT_EXPECT_EQ(test
, r
->ar
.start
, DAMON_MIN_REGION
* 2);
471 KUNIT_EXPECT_EQ(test
, r
->ar
.end
, DAMON_MIN_REGION
* 6);
472 KUNIT_EXPECT_EQ(test
, damon_nr_regions(t
), 2);
473 r2
= damon_next_region(r
);
474 KUNIT_EXPECT_EQ(test
, r2
->ar
.start
, DAMON_MIN_REGION
* 6);
475 KUNIT_EXPECT_EQ(test
, r2
->ar
.end
, DAMON_MIN_REGION
* 8);
476 damon_destroy_region(r2
, t
);
478 damon_free_target(t
);
479 damos_free_filter(f
);
482 static void damon_test_feed_loop_next_input(struct kunit
*test
)
484 unsigned long last_input
= 900000, current_score
= 200;
487 * If current score is lower than the goal, which is always 10,000
488 * (read the comment on damon_feed_loop_next_input()'s comment), next
489 * input should be higher than the last input.
491 KUNIT_EXPECT_GT(test
,
492 damon_feed_loop_next_input(last_input
, current_score
),
496 * If current score is higher than the goal, next input should be lower
497 * than the last input.
499 current_score
= 250000000;
500 KUNIT_EXPECT_LT(test
,
501 damon_feed_loop_next_input(last_input
, current_score
),
505 * The next input depends on the distance between the current score and
508 KUNIT_EXPECT_GT(test
,
509 damon_feed_loop_next_input(last_input
, 200),
510 damon_feed_loop_next_input(last_input
, 2000));
513 static struct kunit_case damon_test_cases
[] = {
514 KUNIT_CASE(damon_test_target
),
515 KUNIT_CASE(damon_test_regions
),
516 KUNIT_CASE(damon_test_aggregate
),
517 KUNIT_CASE(damon_test_split_at
),
518 KUNIT_CASE(damon_test_merge_two
),
519 KUNIT_CASE(damon_test_merge_regions_of
),
520 KUNIT_CASE(damon_test_split_regions_of
),
521 KUNIT_CASE(damon_test_ops_registration
),
522 KUNIT_CASE(damon_test_set_regions
),
523 KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp
),
524 KUNIT_CASE(damon_test_update_monitoring_result
),
525 KUNIT_CASE(damon_test_set_attrs
),
526 KUNIT_CASE(damon_test_moving_sum
),
527 KUNIT_CASE(damos_test_new_filter
),
528 KUNIT_CASE(damos_test_filter_out
),
529 KUNIT_CASE(damon_test_feed_loop_next_input
),
533 static struct kunit_suite damon_test_suite
= {
535 .test_cases
= damon_test_cases
,
537 kunit_test_suite(damon_test_suite
);
539 #endif /* _DAMON_CORE_TEST_H */
541 #endif /* CONFIG_DAMON_KUNIT_TEST */