1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON Primitives for Virtual Address Spaces
5 * Author: SeongJae Park <sj@kernel.org>
8 #define pr_fmt(fmt) "damon-va: " fmt
10 #include <linux/highmem.h>
11 #include <linux/hugetlb.h>
12 #include <linux/mman.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/page_idle.h>
15 #include <linux/pagewalk.h>
16 #include <linux/sched/mm.h>
18 #include "ops-common.h"
20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
26 * 't->pid' should be the pointer to the relevant 'struct pid' having reference
27 * count. Caller must put the returned task, unless it is NULL.
29 static inline struct task_struct
*damon_get_task_struct(struct damon_target
*t
)
31 return get_pid_task(t
->pid
, PIDTYPE_PID
);
35 * Get the mm_struct of the given target
37 * Caller _must_ put the mm_struct after use, unless it is NULL.
39 * Returns the mm_struct of the target on success, NULL on failure
41 static struct mm_struct
*damon_get_mm(struct damon_target
*t
)
43 struct task_struct
*task
;
46 task
= damon_get_task_struct(t
);
50 mm
= get_task_mm(task
);
51 put_task_struct(task
);
56 * Functions for the initial monitoring target regions construction
60 * Size-evenly split a region into 'nr_pieces' small regions
62 * Returns 0 on success, or negative error code otherwise.
64 static int damon_va_evenly_split_region(struct damon_target
*t
,
65 struct damon_region
*r
, unsigned int nr_pieces
)
67 unsigned long sz_orig
, sz_piece
, orig_end
;
68 struct damon_region
*n
= NULL
, *next
;
79 sz_orig
= damon_sz_region(r
);
80 sz_piece
= ALIGN_DOWN(sz_orig
/ nr_pieces
, DAMON_MIN_REGION
);
85 r
->ar
.end
= r
->ar
.start
+ sz_piece
;
86 next
= damon_next_region(r
);
87 for (start
= r
->ar
.end
, i
= 1; i
< nr_pieces
; start
+= sz_piece
, i
++) {
88 n
= damon_new_region(start
, start
+ sz_piece
);
91 damon_insert_region(n
, r
, next
, t
);
94 /* complement last region for possible rounding error */
101 static unsigned long sz_range(struct damon_addr_range
*r
)
103 return r
->end
- r
->start
;
107 * Find three regions separated by two biggest unmapped regions
109 * vma the head vma of the target address space
110 * regions an array of three address ranges that results will be saved
112 * This function receives an address space and finds three regions in it which
113 * separated by the two biggest unmapped regions in the space. Please refer to
114 * below comments of '__damon_va_init_regions()' function to know why this is
117 * Returns 0 if success, or negative error code otherwise.
119 static int __damon_va_three_regions(struct mm_struct
*mm
,
120 struct damon_addr_range regions
[3])
122 struct damon_addr_range first_gap
= {0}, second_gap
= {0};
123 VMA_ITERATOR(vmi
, mm
, 0);
124 struct vm_area_struct
*vma
, *prev
= NULL
;
128 * Find the two biggest gaps so that first_gap > second_gap > others.
129 * If this is too slow, it can be optimised to examine the maple
133 for_each_vma(vmi
, vma
) {
137 start
= vma
->vm_start
;
140 gap
= vma
->vm_start
- prev
->vm_end
;
142 if (gap
> sz_range(&first_gap
)) {
143 second_gap
= first_gap
;
144 first_gap
.start
= prev
->vm_end
;
145 first_gap
.end
= vma
->vm_start
;
146 } else if (gap
> sz_range(&second_gap
)) {
147 second_gap
.start
= prev
->vm_end
;
148 second_gap
.end
= vma
->vm_start
;
155 if (!sz_range(&second_gap
) || !sz_range(&first_gap
))
158 /* Sort the two biggest gaps by address */
159 if (first_gap
.start
> second_gap
.start
)
160 swap(first_gap
, second_gap
);
162 /* Store the result */
163 regions
[0].start
= ALIGN(start
, DAMON_MIN_REGION
);
164 regions
[0].end
= ALIGN(first_gap
.start
, DAMON_MIN_REGION
);
165 regions
[1].start
= ALIGN(first_gap
.end
, DAMON_MIN_REGION
);
166 regions
[1].end
= ALIGN(second_gap
.start
, DAMON_MIN_REGION
);
167 regions
[2].start
= ALIGN(second_gap
.end
, DAMON_MIN_REGION
);
168 regions
[2].end
= ALIGN(prev
->vm_end
, DAMON_MIN_REGION
);
174 * Get the three regions in the given target (task)
176 * Returns 0 on success, negative error code otherwise.
178 static int damon_va_three_regions(struct damon_target
*t
,
179 struct damon_addr_range regions
[3])
181 struct mm_struct
*mm
;
184 mm
= damon_get_mm(t
);
189 rc
= __damon_va_three_regions(mm
, regions
);
190 mmap_read_unlock(mm
);
197 * Initialize the monitoring target regions for the given target (task)
201 * Because only a number of small portions of the entire address space
202 * is actually mapped to the memory and accessed, monitoring the unmapped
203 * regions is wasteful. That said, because we can deal with small noises,
204 * tracking every mapping is not strictly required but could even incur a high
205 * overhead if the mapping frequently changes or the number of mappings is
206 * high. The adaptive regions adjustment mechanism will further help to deal
207 * with the noise by simply identifying the unmapped areas as a region that
208 * has no access. Moreover, applying the real mappings that would have many
209 * unmapped areas inside will make the adaptive mechanism quite complex. That
210 * said, too huge unmapped areas inside the monitoring target should be removed
211 * to not take the time for the adaptive mechanism.
213 * For the reason, we convert the complex mappings to three distinct regions
214 * that cover every mapped area of the address space. Also the two gaps
215 * between the three regions are the two biggest unmapped areas in the given
216 * address space. In detail, this function first identifies the start and the
217 * end of the mappings and the two biggest unmapped areas of the address space.
218 * Then, it constructs the three regions as below:
220 * [mappings[0]->start, big_two_unmapped_areas[0]->start)
221 * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
222 * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
224 * As usual memory map of processes is as below, the gap between the heap and
225 * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
226 * region and the stack will be two biggest unmapped regions. Because these
227 * gaps are exceptionally huge areas in usual address space, excluding these
228 * two biggest unmapped regions will be sufficient to make a trade-off.
231 * <BIG UNMAPPED REGION 1>
232 * <uppermost mmap()-ed region>
233 * (other mmap()-ed regions and small unmapped regions)
234 * <lowermost mmap()-ed region>
235 * <BIG UNMAPPED REGION 2>
238 static void __damon_va_init_regions(struct damon_ctx
*ctx
,
239 struct damon_target
*t
)
241 struct damon_target
*ti
;
242 struct damon_region
*r
;
243 struct damon_addr_range regions
[3];
244 unsigned long sz
= 0, nr_pieces
;
247 if (damon_va_three_regions(t
, regions
)) {
248 damon_for_each_target(ti
, ctx
) {
253 pr_debug("Failed to get three regions of %dth target\n", tidx
);
257 for (i
= 0; i
< 3; i
++)
258 sz
+= regions
[i
].end
- regions
[i
].start
;
259 if (ctx
->attrs
.min_nr_regions
)
260 sz
/= ctx
->attrs
.min_nr_regions
;
261 if (sz
< DAMON_MIN_REGION
)
262 sz
= DAMON_MIN_REGION
;
264 /* Set the initial three regions of the target */
265 for (i
= 0; i
< 3; i
++) {
266 r
= damon_new_region(regions
[i
].start
, regions
[i
].end
);
268 pr_err("%d'th init region creation failed\n", i
);
271 damon_add_region(r
, t
);
273 nr_pieces
= (regions
[i
].end
- regions
[i
].start
) / sz
;
274 damon_va_evenly_split_region(t
, r
, nr_pieces
);
278 /* Initialize '->regions_list' of every target (task) */
279 static void damon_va_init(struct damon_ctx
*ctx
)
281 struct damon_target
*t
;
283 damon_for_each_target(t
, ctx
) {
284 /* the user may set the target regions as they want */
285 if (!damon_nr_regions(t
))
286 __damon_va_init_regions(ctx
, t
);
291 * Update regions for current memory mappings
293 static void damon_va_update(struct damon_ctx
*ctx
)
295 struct damon_addr_range three_regions
[3];
296 struct damon_target
*t
;
298 damon_for_each_target(t
, ctx
) {
299 if (damon_va_three_regions(t
, three_regions
))
301 damon_set_regions(t
, three_regions
, 3);
305 static int damon_mkold_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
306 unsigned long next
, struct mm_walk
*walk
)
312 if (pmd_trans_huge(pmdp_get(pmd
))) {
313 ptl
= pmd_lock(walk
->mm
, pmd
);
314 pmde
= pmdp_get(pmd
);
316 if (!pmd_present(pmde
)) {
321 if (pmd_trans_huge(pmde
)) {
322 damon_pmdp_mkold(pmd
, walk
->vma
, addr
);
329 pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
331 walk
->action
= ACTION_AGAIN
;
334 if (!pte_present(ptep_get(pte
)))
336 damon_ptep_mkold(pte
, walk
->vma
, addr
);
338 pte_unmap_unlock(pte
, ptl
);
342 #ifdef CONFIG_HUGETLB_PAGE
343 static void damon_hugetlb_mkold(pte_t
*pte
, struct mm_struct
*mm
,
344 struct vm_area_struct
*vma
, unsigned long addr
)
346 bool referenced
= false;
347 pte_t entry
= huge_ptep_get(mm
, addr
, pte
);
348 struct folio
*folio
= pfn_folio(pte_pfn(entry
));
349 unsigned long psize
= huge_page_size(hstate_vma(vma
));
353 if (pte_young(entry
)) {
355 entry
= pte_mkold(entry
);
356 set_huge_pte_at(mm
, addr
, pte
, entry
, psize
);
359 if (mmu_notifier_clear_young(mm
, addr
,
360 addr
+ huge_page_size(hstate_vma(vma
))))
364 folio_set_young(folio
);
366 folio_set_idle(folio
);
370 static int damon_mkold_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
371 unsigned long addr
, unsigned long end
,
372 struct mm_walk
*walk
)
374 struct hstate
*h
= hstate_vma(walk
->vma
);
378 ptl
= huge_pte_lock(h
, walk
->mm
, pte
);
379 entry
= huge_ptep_get(walk
->mm
, addr
, pte
);
380 if (!pte_present(entry
))
383 damon_hugetlb_mkold(pte
, walk
->mm
, walk
->vma
, addr
);
390 #define damon_mkold_hugetlb_entry NULL
391 #endif /* CONFIG_HUGETLB_PAGE */
393 static const struct mm_walk_ops damon_mkold_ops
= {
394 .pmd_entry
= damon_mkold_pmd_entry
,
395 .hugetlb_entry
= damon_mkold_hugetlb_entry
,
396 .walk_lock
= PGWALK_RDLOCK
,
399 static void damon_va_mkold(struct mm_struct
*mm
, unsigned long addr
)
402 walk_page_range(mm
, addr
, addr
+ 1, &damon_mkold_ops
, NULL
);
403 mmap_read_unlock(mm
);
407 * Functions for the access checking of the regions
410 static void __damon_va_prepare_access_check(struct mm_struct
*mm
,
411 struct damon_region
*r
)
413 r
->sampling_addr
= damon_rand(r
->ar
.start
, r
->ar
.end
);
415 damon_va_mkold(mm
, r
->sampling_addr
);
418 static void damon_va_prepare_access_checks(struct damon_ctx
*ctx
)
420 struct damon_target
*t
;
421 struct mm_struct
*mm
;
422 struct damon_region
*r
;
424 damon_for_each_target(t
, ctx
) {
425 mm
= damon_get_mm(t
);
428 damon_for_each_region(r
, t
)
429 __damon_va_prepare_access_check(mm
, r
);
434 struct damon_young_walk_private
{
435 /* size of the folio for the access checked virtual memory address */
436 unsigned long *folio_sz
;
440 static int damon_young_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
441 unsigned long next
, struct mm_walk
*walk
)
447 struct damon_young_walk_private
*priv
= walk
->private;
449 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
450 if (pmd_trans_huge(pmdp_get(pmd
))) {
453 ptl
= pmd_lock(walk
->mm
, pmd
);
454 pmde
= pmdp_get(pmd
);
456 if (!pmd_present(pmde
)) {
461 if (!pmd_trans_huge(pmde
)) {
465 folio
= damon_get_folio(pmd_pfn(pmde
));
468 if (pmd_young(pmde
) || !folio_test_idle(folio
) ||
469 mmu_notifier_test_young(walk
->mm
,
472 *priv
->folio_sz
= HPAGE_PMD_SIZE
;
480 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
482 pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
484 walk
->action
= ACTION_AGAIN
;
487 ptent
= ptep_get(pte
);
488 if (!pte_present(ptent
))
490 folio
= damon_get_folio(pte_pfn(ptent
));
493 if (pte_young(ptent
) || !folio_test_idle(folio
) ||
494 mmu_notifier_test_young(walk
->mm
, addr
))
496 *priv
->folio_sz
= folio_size(folio
);
499 pte_unmap_unlock(pte
, ptl
);
503 #ifdef CONFIG_HUGETLB_PAGE
504 static int damon_young_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
505 unsigned long addr
, unsigned long end
,
506 struct mm_walk
*walk
)
508 struct damon_young_walk_private
*priv
= walk
->private;
509 struct hstate
*h
= hstate_vma(walk
->vma
);
514 ptl
= huge_pte_lock(h
, walk
->mm
, pte
);
515 entry
= huge_ptep_get(walk
->mm
, addr
, pte
);
516 if (!pte_present(entry
))
519 folio
= pfn_folio(pte_pfn(entry
));
522 if (pte_young(entry
) || !folio_test_idle(folio
) ||
523 mmu_notifier_test_young(walk
->mm
, addr
))
525 *priv
->folio_sz
= huge_page_size(h
);
534 #define damon_young_hugetlb_entry NULL
535 #endif /* CONFIG_HUGETLB_PAGE */
537 static const struct mm_walk_ops damon_young_ops
= {
538 .pmd_entry
= damon_young_pmd_entry
,
539 .hugetlb_entry
= damon_young_hugetlb_entry
,
540 .walk_lock
= PGWALK_RDLOCK
,
543 static bool damon_va_young(struct mm_struct
*mm
, unsigned long addr
,
544 unsigned long *folio_sz
)
546 struct damon_young_walk_private arg
= {
547 .folio_sz
= folio_sz
,
552 walk_page_range(mm
, addr
, addr
+ 1, &damon_young_ops
, &arg
);
553 mmap_read_unlock(mm
);
558 * Check whether the region was accessed after the last preparation
560 * mm 'mm_struct' for the given virtual address space
561 * r the region to be checked
563 static void __damon_va_check_access(struct mm_struct
*mm
,
564 struct damon_region
*r
, bool same_target
,
565 struct damon_attrs
*attrs
)
567 static unsigned long last_addr
;
568 static unsigned long last_folio_sz
= PAGE_SIZE
;
569 static bool last_accessed
;
572 damon_update_region_access_rate(r
, false, attrs
);
576 /* If the region is in the last checked page, reuse the result */
577 if (same_target
&& (ALIGN_DOWN(last_addr
, last_folio_sz
) ==
578 ALIGN_DOWN(r
->sampling_addr
, last_folio_sz
))) {
579 damon_update_region_access_rate(r
, last_accessed
, attrs
);
583 last_accessed
= damon_va_young(mm
, r
->sampling_addr
, &last_folio_sz
);
584 damon_update_region_access_rate(r
, last_accessed
, attrs
);
586 last_addr
= r
->sampling_addr
;
589 static unsigned int damon_va_check_accesses(struct damon_ctx
*ctx
)
591 struct damon_target
*t
;
592 struct mm_struct
*mm
;
593 struct damon_region
*r
;
594 unsigned int max_nr_accesses
= 0;
597 damon_for_each_target(t
, ctx
) {
598 mm
= damon_get_mm(t
);
600 damon_for_each_region(r
, t
) {
601 __damon_va_check_access(mm
, r
, same_target
,
603 max_nr_accesses
= max(r
->nr_accesses
, max_nr_accesses
);
610 return max_nr_accesses
;
614 * Functions for the target validity check and cleanup
617 static bool damon_va_target_valid(struct damon_target
*t
)
619 struct task_struct
*task
;
621 task
= damon_get_task_struct(t
);
623 put_task_struct(task
);
630 #ifndef CONFIG_ADVISE_SYSCALLS
631 static unsigned long damos_madvise(struct damon_target
*target
,
632 struct damon_region
*r
, int behavior
)
637 static unsigned long damos_madvise(struct damon_target
*target
,
638 struct damon_region
*r
, int behavior
)
640 struct mm_struct
*mm
;
641 unsigned long start
= PAGE_ALIGN(r
->ar
.start
);
642 unsigned long len
= PAGE_ALIGN(damon_sz_region(r
));
643 unsigned long applied
;
645 mm
= damon_get_mm(target
);
649 applied
= do_madvise(mm
, start
, len
, behavior
) ? 0 : len
;
654 #endif /* CONFIG_ADVISE_SYSCALLS */
656 static unsigned long damon_va_apply_scheme(struct damon_ctx
*ctx
,
657 struct damon_target
*t
, struct damon_region
*r
,
658 struct damos
*scheme
)
662 switch (scheme
->action
) {
664 madv_action
= MADV_WILLNEED
;
667 madv_action
= MADV_COLD
;
670 madv_action
= MADV_PAGEOUT
;
673 madv_action
= MADV_HUGEPAGE
;
675 case DAMOS_NOHUGEPAGE
:
676 madv_action
= MADV_NOHUGEPAGE
;
682 * DAMOS actions that are not yet supported by 'vaddr'.
687 return damos_madvise(t
, r
, madv_action
);
690 static int damon_va_scheme_score(struct damon_ctx
*context
,
691 struct damon_target
*t
, struct damon_region
*r
,
692 struct damos
*scheme
)
695 switch (scheme
->action
) {
697 return damon_cold_score(context
, r
, scheme
);
702 return DAMOS_MAX_SCORE
;
705 static int __init
damon_va_initcall(void)
707 struct damon_operations ops
= {
708 .id
= DAMON_OPS_VADDR
,
709 .init
= damon_va_init
,
710 .update
= damon_va_update
,
711 .prepare_access_checks
= damon_va_prepare_access_checks
,
712 .check_accesses
= damon_va_check_accesses
,
713 .reset_aggregated
= NULL
,
714 .target_valid
= damon_va_target_valid
,
716 .apply_scheme
= damon_va_apply_scheme
,
717 .get_scheme_score
= damon_va_scheme_score
,
719 /* ops for fixed virtual address ranges */
720 struct damon_operations ops_fvaddr
= ops
;
723 /* Don't set the monitoring target regions for the entire mapping */
724 ops_fvaddr
.id
= DAMON_OPS_FVADDR
;
725 ops_fvaddr
.init
= NULL
;
726 ops_fvaddr
.update
= NULL
;
728 err
= damon_register_ops(&ops
);
731 return damon_register_ops(&ops_fvaddr
);
734 subsys_initcall(damon_va_initcall
);
736 #include "tests/vaddr-kunit.h"