Merge tag 'hardening-v6.14-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-stable.git] / mm / damon / paddr.c
blob0f9ae14f884dd8c7b9e4261f28448cc0db47605b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DAMON Primitives for The Physical Address Space
5 * Author: SeongJae Park <sj@kernel.org>
6 */
8 #define pr_fmt(fmt) "damon-pa: " fmt
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/memory-tiers.h>
16 #include <linux/migrate.h>
17 #include <linux/mm_inline.h>
19 #include "../internal.h"
20 #include "ops-common.h"
22 static bool damon_folio_mkold_one(struct folio *folio,
23 struct vm_area_struct *vma, unsigned long addr, void *arg)
25 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
27 while (page_vma_mapped_walk(&pvmw)) {
28 addr = pvmw.address;
29 if (pvmw.pte)
30 damon_ptep_mkold(pvmw.pte, vma, addr);
31 else
32 damon_pmdp_mkold(pvmw.pmd, vma, addr);
34 return true;
37 static void damon_folio_mkold(struct folio *folio)
39 struct rmap_walk_control rwc = {
40 .rmap_one = damon_folio_mkold_one,
41 .anon_lock = folio_lock_anon_vma_read,
43 bool need_lock;
45 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
46 folio_set_idle(folio);
47 return;
50 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
51 if (need_lock && !folio_trylock(folio))
52 return;
54 rmap_walk(folio, &rwc);
56 if (need_lock)
57 folio_unlock(folio);
61 static void damon_pa_mkold(unsigned long paddr)
63 struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
65 if (!folio)
66 return;
68 damon_folio_mkold(folio);
69 folio_put(folio);
72 static void __damon_pa_prepare_access_check(struct damon_region *r)
74 r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
76 damon_pa_mkold(r->sampling_addr);
79 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
81 struct damon_target *t;
82 struct damon_region *r;
84 damon_for_each_target(t, ctx) {
85 damon_for_each_region(r, t)
86 __damon_pa_prepare_access_check(r);
90 static bool damon_folio_young_one(struct folio *folio,
91 struct vm_area_struct *vma, unsigned long addr, void *arg)
93 bool *accessed = arg;
94 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
96 *accessed = false;
97 while (page_vma_mapped_walk(&pvmw)) {
98 addr = pvmw.address;
99 if (pvmw.pte) {
100 *accessed = pte_young(ptep_get(pvmw.pte)) ||
101 !folio_test_idle(folio) ||
102 mmu_notifier_test_young(vma->vm_mm, addr);
103 } else {
104 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
105 *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
106 !folio_test_idle(folio) ||
107 mmu_notifier_test_young(vma->vm_mm, addr);
108 #else
109 WARN_ON_ONCE(1);
110 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
112 if (*accessed) {
113 page_vma_mapped_walk_done(&pvmw);
114 break;
118 /* If accessed, stop walking */
119 return *accessed == false;
122 static bool damon_folio_young(struct folio *folio)
124 bool accessed = false;
125 struct rmap_walk_control rwc = {
126 .arg = &accessed,
127 .rmap_one = damon_folio_young_one,
128 .anon_lock = folio_lock_anon_vma_read,
130 bool need_lock;
132 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
133 if (folio_test_idle(folio))
134 return false;
135 else
136 return true;
139 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
140 if (need_lock && !folio_trylock(folio))
141 return false;
143 rmap_walk(folio, &rwc);
145 if (need_lock)
146 folio_unlock(folio);
148 return accessed;
151 static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
153 struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
154 bool accessed;
156 if (!folio)
157 return false;
159 accessed = damon_folio_young(folio);
160 *folio_sz = folio_size(folio);
161 folio_put(folio);
162 return accessed;
165 static void __damon_pa_check_access(struct damon_region *r,
166 struct damon_attrs *attrs)
168 static unsigned long last_addr;
169 static unsigned long last_folio_sz = PAGE_SIZE;
170 static bool last_accessed;
172 /* If the region is in the last checked page, reuse the result */
173 if (ALIGN_DOWN(last_addr, last_folio_sz) ==
174 ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
175 damon_update_region_access_rate(r, last_accessed, attrs);
176 return;
179 last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
180 damon_update_region_access_rate(r, last_accessed, attrs);
182 last_addr = r->sampling_addr;
185 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
187 struct damon_target *t;
188 struct damon_region *r;
189 unsigned int max_nr_accesses = 0;
191 damon_for_each_target(t, ctx) {
192 damon_for_each_region(r, t) {
193 __damon_pa_check_access(r, &ctx->attrs);
194 max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
198 return max_nr_accesses;
201 static bool damos_pa_filter_match(struct damos_filter *filter,
202 struct folio *folio)
204 bool matched = false;
205 struct mem_cgroup *memcg;
207 switch (filter->type) {
208 case DAMOS_FILTER_TYPE_ANON:
209 matched = folio_test_anon(folio);
210 break;
211 case DAMOS_FILTER_TYPE_MEMCG:
212 rcu_read_lock();
213 memcg = folio_memcg_check(folio);
214 if (!memcg)
215 matched = false;
216 else
217 matched = filter->memcg_id == mem_cgroup_id(memcg);
218 rcu_read_unlock();
219 break;
220 case DAMOS_FILTER_TYPE_YOUNG:
221 matched = damon_folio_young(folio);
222 if (matched)
223 damon_folio_mkold(folio);
224 break;
225 default:
226 break;
229 return matched == filter->matching;
233 * damos_pa_filter_out - Return true if the page should be filtered out.
235 static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
237 struct damos_filter *filter;
239 damos_for_each_filter(filter, scheme) {
240 if (damos_pa_filter_match(filter, folio))
241 return !filter->allow;
243 return false;
246 static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
247 unsigned long *sz_filter_passed)
249 unsigned long addr, applied;
250 LIST_HEAD(folio_list);
251 bool install_young_filter = true;
252 struct damos_filter *filter;
254 /* check access in page level again by default */
255 damos_for_each_filter(filter, s) {
256 if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
257 install_young_filter = false;
258 break;
261 if (install_young_filter) {
262 filter = damos_new_filter(
263 DAMOS_FILTER_TYPE_YOUNG, true, false);
264 if (!filter)
265 return 0;
266 damos_add_filter(s, filter);
269 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
270 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
272 if (!folio)
273 continue;
275 if (damos_pa_filter_out(s, folio))
276 goto put_folio;
277 else
278 *sz_filter_passed += folio_size(folio);
280 folio_clear_referenced(folio);
281 folio_test_clear_young(folio);
282 if (!folio_isolate_lru(folio))
283 goto put_folio;
284 if (folio_test_unevictable(folio))
285 folio_putback_lru(folio);
286 else
287 list_add(&folio->lru, &folio_list);
288 put_folio:
289 folio_put(folio);
291 if (install_young_filter)
292 damos_destroy_filter(filter);
293 applied = reclaim_pages(&folio_list);
294 cond_resched();
295 return applied * PAGE_SIZE;
298 static inline unsigned long damon_pa_mark_accessed_or_deactivate(
299 struct damon_region *r, struct damos *s, bool mark_accessed,
300 unsigned long *sz_filter_passed)
302 unsigned long addr, applied = 0;
304 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
305 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
307 if (!folio)
308 continue;
310 if (damos_pa_filter_out(s, folio))
311 goto put_folio;
312 else
313 *sz_filter_passed += folio_size(folio);
315 if (mark_accessed)
316 folio_mark_accessed(folio);
317 else
318 folio_deactivate(folio);
319 applied += folio_nr_pages(folio);
320 put_folio:
321 folio_put(folio);
323 return applied * PAGE_SIZE;
326 static unsigned long damon_pa_mark_accessed(struct damon_region *r,
327 struct damos *s, unsigned long *sz_filter_passed)
329 return damon_pa_mark_accessed_or_deactivate(r, s, true,
330 sz_filter_passed);
333 static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
334 struct damos *s, unsigned long *sz_filter_passed)
336 return damon_pa_mark_accessed_or_deactivate(r, s, false,
337 sz_filter_passed);
340 static unsigned int __damon_pa_migrate_folio_list(
341 struct list_head *migrate_folios, struct pglist_data *pgdat,
342 int target_nid)
344 unsigned int nr_succeeded = 0;
345 nodemask_t allowed_mask = NODE_MASK_NONE;
346 struct migration_target_control mtc = {
348 * Allocate from 'node', or fail quickly and quietly.
349 * When this happens, 'page' will likely just be discarded
350 * instead of migrated.
352 .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
353 __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
354 .nid = target_nid,
355 .nmask = &allowed_mask
358 if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE)
359 return 0;
361 if (list_empty(migrate_folios))
362 return 0;
364 /* Migration ignores all cpuset and mempolicy settings */
365 migrate_pages(migrate_folios, alloc_migrate_folio, NULL,
366 (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON,
367 &nr_succeeded);
369 return nr_succeeded;
372 static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list,
373 struct pglist_data *pgdat,
374 int target_nid)
376 unsigned int nr_migrated = 0;
377 struct folio *folio;
378 LIST_HEAD(ret_folios);
379 LIST_HEAD(migrate_folios);
381 while (!list_empty(folio_list)) {
382 struct folio *folio;
384 cond_resched();
386 folio = lru_to_folio(folio_list);
387 list_del(&folio->lru);
389 if (!folio_trylock(folio))
390 goto keep;
392 /* Relocate its contents to another node. */
393 list_add(&folio->lru, &migrate_folios);
394 folio_unlock(folio);
395 continue;
396 keep:
397 list_add(&folio->lru, &ret_folios);
399 /* 'folio_list' is always empty here */
401 /* Migrate folios selected for migration */
402 nr_migrated += __damon_pa_migrate_folio_list(
403 &migrate_folios, pgdat, target_nid);
405 * Folios that could not be migrated are still in @migrate_folios. Add
406 * those back on @folio_list
408 if (!list_empty(&migrate_folios))
409 list_splice_init(&migrate_folios, folio_list);
411 try_to_unmap_flush();
413 list_splice(&ret_folios, folio_list);
415 while (!list_empty(folio_list)) {
416 folio = lru_to_folio(folio_list);
417 list_del(&folio->lru);
418 folio_putback_lru(folio);
421 return nr_migrated;
424 static unsigned long damon_pa_migrate_pages(struct list_head *folio_list,
425 int target_nid)
427 int nid;
428 unsigned long nr_migrated = 0;
429 LIST_HEAD(node_folio_list);
430 unsigned int noreclaim_flag;
432 if (list_empty(folio_list))
433 return nr_migrated;
435 noreclaim_flag = memalloc_noreclaim_save();
437 nid = folio_nid(lru_to_folio(folio_list));
438 do {
439 struct folio *folio = lru_to_folio(folio_list);
441 if (nid == folio_nid(folio)) {
442 list_move(&folio->lru, &node_folio_list);
443 continue;
446 nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
447 NODE_DATA(nid),
448 target_nid);
449 nid = folio_nid(lru_to_folio(folio_list));
450 } while (!list_empty(folio_list));
452 nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
453 NODE_DATA(nid),
454 target_nid);
456 memalloc_noreclaim_restore(noreclaim_flag);
458 return nr_migrated;
461 static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s,
462 unsigned long *sz_filter_passed)
464 unsigned long addr, applied;
465 LIST_HEAD(folio_list);
467 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
468 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
470 if (!folio)
471 continue;
473 if (damos_pa_filter_out(s, folio))
474 goto put_folio;
475 else
476 *sz_filter_passed += folio_size(folio);
478 if (!folio_isolate_lru(folio))
479 goto put_folio;
480 list_add(&folio->lru, &folio_list);
481 put_folio:
482 folio_put(folio);
484 applied = damon_pa_migrate_pages(&folio_list, s->target_nid);
485 cond_resched();
486 return applied * PAGE_SIZE;
489 static bool damon_pa_scheme_has_filter(struct damos *s)
491 struct damos_filter *f;
493 damos_for_each_filter(f, s)
494 return true;
495 return false;
498 static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s,
499 unsigned long *sz_filter_passed)
501 unsigned long addr;
502 LIST_HEAD(folio_list);
504 if (!damon_pa_scheme_has_filter(s))
505 return 0;
507 addr = r->ar.start;
508 while (addr < r->ar.end) {
509 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
511 if (!folio) {
512 addr += PAGE_SIZE;
513 continue;
516 if (!damos_pa_filter_out(s, folio))
517 *sz_filter_passed += folio_size(folio);
518 addr += folio_size(folio);
519 folio_put(folio);
521 return 0;
524 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
525 struct damon_target *t, struct damon_region *r,
526 struct damos *scheme, unsigned long *sz_filter_passed)
528 switch (scheme->action) {
529 case DAMOS_PAGEOUT:
530 return damon_pa_pageout(r, scheme, sz_filter_passed);
531 case DAMOS_LRU_PRIO:
532 return damon_pa_mark_accessed(r, scheme, sz_filter_passed);
533 case DAMOS_LRU_DEPRIO:
534 return damon_pa_deactivate_pages(r, scheme, sz_filter_passed);
535 case DAMOS_MIGRATE_HOT:
536 case DAMOS_MIGRATE_COLD:
537 return damon_pa_migrate(r, scheme, sz_filter_passed);
538 case DAMOS_STAT:
539 return damon_pa_stat(r, scheme, sz_filter_passed);
540 default:
541 /* DAMOS actions that not yet supported by 'paddr'. */
542 break;
544 return 0;
547 static int damon_pa_scheme_score(struct damon_ctx *context,
548 struct damon_target *t, struct damon_region *r,
549 struct damos *scheme)
551 switch (scheme->action) {
552 case DAMOS_PAGEOUT:
553 return damon_cold_score(context, r, scheme);
554 case DAMOS_LRU_PRIO:
555 return damon_hot_score(context, r, scheme);
556 case DAMOS_LRU_DEPRIO:
557 return damon_cold_score(context, r, scheme);
558 case DAMOS_MIGRATE_HOT:
559 return damon_hot_score(context, r, scheme);
560 case DAMOS_MIGRATE_COLD:
561 return damon_cold_score(context, r, scheme);
562 default:
563 break;
566 return DAMOS_MAX_SCORE;
569 static int __init damon_pa_initcall(void)
571 struct damon_operations ops = {
572 .id = DAMON_OPS_PADDR,
573 .init = NULL,
574 .update = NULL,
575 .prepare_access_checks = damon_pa_prepare_access_checks,
576 .check_accesses = damon_pa_check_accesses,
577 .reset_aggregated = NULL,
578 .target_valid = NULL,
579 .cleanup = NULL,
580 .apply_scheme = damon_pa_apply_scheme,
581 .get_scheme_score = damon_pa_scheme_score,
584 return damon_register_ops(&ops);
587 subsys_initcall(damon_pa_initcall);