1 // SPDX-License-Identifier: GPL-2.0-only
3 * Memory merging support.
5 * This code enables dynamic sharing of identical pages found in different
6 * memory areas, even if they are not shared by fork()
8 * Copyright (C) 2008-2009 Red Hat, Inc.
16 #include <linux/errno.h>
18 #include <linux/mm_inline.h>
20 #include <linux/mman.h>
21 #include <linux/sched.h>
22 #include <linux/sched/mm.h>
23 #include <linux/sched/cputime.h>
24 #include <linux/rwsem.h>
25 #include <linux/pagemap.h>
26 #include <linux/rmap.h>
27 #include <linux/spinlock.h>
28 #include <linux/xxhash.h>
29 #include <linux/delay.h>
30 #include <linux/kthread.h>
31 #include <linux/wait.h>
32 #include <linux/slab.h>
33 #include <linux/rbtree.h>
34 #include <linux/memory.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/swap.h>
37 #include <linux/ksm.h>
38 #include <linux/hashtable.h>
39 #include <linux/freezer.h>
40 #include <linux/oom.h>
41 #include <linux/numa.h>
42 #include <linux/pagewalk.h>
44 #include <asm/tlbflush.h>
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/ksm.h>
53 #define DO_NUMA(x) do { (x); } while (0)
56 #define DO_NUMA(x) do { } while (0)
59 typedef u8 rmap_age_t
;
64 * A few notes about the KSM scanning process,
65 * to make it easier to understand the data structures below:
67 * In order to reduce excessive scanning, KSM sorts the memory pages by their
68 * contents into a data structure that holds pointers to the pages' locations.
70 * Since the contents of the pages may change at any moment, KSM cannot just
71 * insert the pages into a normal sorted tree and expect it to find anything.
72 * Therefore KSM uses two data structures - the stable and the unstable tree.
74 * The stable tree holds pointers to all the merged pages (ksm pages), sorted
75 * by their contents. Because each such page is write-protected, searching on
76 * this tree is fully assured to be working (except when pages are unmapped),
77 * and therefore this tree is called the stable tree.
79 * The stable tree node includes information required for reverse
80 * mapping from a KSM page to virtual addresses that map this page.
82 * In order to avoid large latencies of the rmap walks on KSM pages,
83 * KSM maintains two types of nodes in the stable tree:
85 * * the regular nodes that keep the reverse mapping structures in a
87 * * the "chains" that link nodes ("dups") that represent the same
88 * write protected memory content, but each "dup" corresponds to a
89 * different KSM page copy of that content
91 * Internally, the regular nodes, "dups" and "chains" are represented
92 * using the same struct ksm_stable_node structure.
94 * In addition to the stable tree, KSM uses a second data structure called the
95 * unstable tree: this tree holds pointers to pages which have been found to
96 * be "unchanged for a period of time". The unstable tree sorts these pages
97 * by their contents, but since they are not write-protected, KSM cannot rely
98 * upon the unstable tree to work correctly - the unstable tree is liable to
99 * be corrupted as its contents are modified, and so it is called unstable.
101 * KSM solves this problem by several techniques:
103 * 1) The unstable tree is flushed every time KSM completes scanning all
104 * memory areas, and then the tree is rebuilt again from the beginning.
105 * 2) KSM will only insert into the unstable tree, pages whose hash value
106 * has not changed since the previous scan of all memory areas.
107 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
108 * colors of the nodes and not on their contents, assuring that even when
109 * the tree gets "corrupted" it won't get out of balance, so scanning time
110 * remains the same (also, searching and inserting nodes in an rbtree uses
111 * the same algorithm, so we have no overhead when we flush and rebuild).
112 * 4) KSM never flushes the stable tree, which means that even if it were to
113 * take 10 attempts to find a page in the unstable tree, once it is found,
114 * it is secured in the stable tree. (When we scan a new page, we first
115 * compare it against the stable tree, and then against the unstable tree.)
117 * If the merge_across_nodes tunable is unset, then KSM maintains multiple
118 * stable trees and multiple unstable trees: one of each for each NUMA node.
122 * struct ksm_mm_slot - ksm information per mm that is being scanned
123 * @slot: hash lookup from mm to mm_slot
124 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
128 struct ksm_rmap_item
*rmap_list
;
132 * struct ksm_scan - cursor for scanning
133 * @mm_slot: the current mm_slot we are scanning
134 * @address: the next address inside that to be scanned
135 * @rmap_list: link to the next rmap to be scanned in the rmap_list
136 * @seqnr: count of completed full scans (needed when removing unstable node)
138 * There is only the one ksm_scan instance of this cursor structure.
141 struct ksm_mm_slot
*mm_slot
;
142 unsigned long address
;
143 struct ksm_rmap_item
**rmap_list
;
148 * struct ksm_stable_node - node of the stable rbtree
149 * @node: rb node of this ksm page in the stable tree
150 * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
151 * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
152 * @list: linked into migrate_nodes, pending placement in the proper node tree
153 * @hlist: hlist head of rmap_items using this ksm page
154 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
155 * @chain_prune_time: time of the last full garbage collection
156 * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
157 * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
159 struct ksm_stable_node
{
161 struct rb_node node
; /* when node of stable tree */
162 struct { /* when listed for migration */
163 struct list_head
*head
;
165 struct hlist_node hlist_dup
;
166 struct list_head list
;
170 struct hlist_head hlist
;
173 unsigned long chain_prune_time
;
176 * STABLE_NODE_CHAIN can be any negative number in
177 * rmap_hlist_len negative range, but better not -1 to be able
178 * to reliably detect underflows.
180 #define STABLE_NODE_CHAIN -1024
188 * struct ksm_rmap_item - reverse mapping item for virtual addresses
189 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
190 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
191 * @nid: NUMA node id of unstable tree in which linked (may not match page)
192 * @mm: the memory structure this rmap_item is pointing into
193 * @address: the virtual address this rmap_item tracks (+ flags in low bits)
194 * @oldchecksum: previous checksum of the page at that virtual address
195 * @node: rb node of this rmap_item in the unstable tree
196 * @head: pointer to stable_node heading this list in the stable tree
197 * @hlist: link into hlist of rmap_items hanging off that stable_node
198 * @age: number of scan iterations since creation
199 * @remaining_skips: how many scans to skip
201 struct ksm_rmap_item
{
202 struct ksm_rmap_item
*rmap_list
;
204 struct anon_vma
*anon_vma
; /* when stable */
206 int nid
; /* when node of unstable tree */
209 struct mm_struct
*mm
;
210 unsigned long address
; /* + low bits used for flags below */
211 unsigned int oldchecksum
; /* when unstable */
213 rmap_age_t remaining_skips
;
215 struct rb_node node
; /* when node of unstable tree */
216 struct { /* when listed from stable tree */
217 struct ksm_stable_node
*head
;
218 struct hlist_node hlist
;
223 #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
224 #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
225 #define STABLE_FLAG 0x200 /* is listed from the stable tree */
227 /* The stable and unstable tree heads */
228 static struct rb_root one_stable_tree
[1] = { RB_ROOT
};
229 static struct rb_root one_unstable_tree
[1] = { RB_ROOT
};
230 static struct rb_root
*root_stable_tree
= one_stable_tree
;
231 static struct rb_root
*root_unstable_tree
= one_unstable_tree
;
233 /* Recently migrated nodes of stable tree, pending proper placement */
234 static LIST_HEAD(migrate_nodes
);
235 #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
237 #define MM_SLOTS_HASH_BITS 10
238 static DEFINE_HASHTABLE(mm_slots_hash
, MM_SLOTS_HASH_BITS
);
240 static struct ksm_mm_slot ksm_mm_head
= {
241 .slot
.mm_node
= LIST_HEAD_INIT(ksm_mm_head
.slot
.mm_node
),
243 static struct ksm_scan ksm_scan
= {
244 .mm_slot
= &ksm_mm_head
,
247 static struct kmem_cache
*rmap_item_cache
;
248 static struct kmem_cache
*stable_node_cache
;
249 static struct kmem_cache
*mm_slot_cache
;
251 /* Default number of pages to scan per batch */
252 #define DEFAULT_PAGES_TO_SCAN 100
254 /* The number of pages scanned */
255 static unsigned long ksm_pages_scanned
;
257 /* The number of nodes in the stable tree */
258 static unsigned long ksm_pages_shared
;
260 /* The number of page slots additionally sharing those nodes */
261 static unsigned long ksm_pages_sharing
;
263 /* The number of nodes in the unstable tree */
264 static unsigned long ksm_pages_unshared
;
266 /* The number of rmap_items in use: to calculate pages_volatile */
267 static unsigned long ksm_rmap_items
;
269 /* The number of stable_node chains */
270 static unsigned long ksm_stable_node_chains
;
272 /* The number of stable_node dups linked to the stable_node chains */
273 static unsigned long ksm_stable_node_dups
;
275 /* Delay in pruning stale stable_node_dups in the stable_node_chains */
276 static unsigned int ksm_stable_node_chains_prune_millisecs
= 2000;
278 /* Maximum number of page slots sharing a stable node */
279 static int ksm_max_page_sharing
= 256;
281 /* Number of pages ksmd should scan in one batch */
282 static unsigned int ksm_thread_pages_to_scan
= DEFAULT_PAGES_TO_SCAN
;
284 /* Milliseconds ksmd should sleep between batches */
285 static unsigned int ksm_thread_sleep_millisecs
= 20;
287 /* Checksum of an empty (zeroed) page */
288 static unsigned int zero_checksum __read_mostly
;
290 /* Whether to merge empty (zeroed) pages with actual zero pages */
291 static bool ksm_use_zero_pages __read_mostly
;
293 /* Skip pages that couldn't be de-duplicated previously */
294 /* Default to true at least temporarily, for testing */
295 static bool ksm_smart_scan
= true;
297 /* The number of zero pages which is placed by KSM */
298 atomic_long_t ksm_zero_pages
= ATOMIC_LONG_INIT(0);
300 /* The number of pages that have been skipped due to "smart scanning" */
301 static unsigned long ksm_pages_skipped
;
303 /* Don't scan more than max pages per batch. */
304 static unsigned long ksm_advisor_max_pages_to_scan
= 30000;
306 /* Min CPU for scanning pages per scan */
307 #define KSM_ADVISOR_MIN_CPU 10
309 /* Max CPU for scanning pages per scan */
310 static unsigned int ksm_advisor_max_cpu
= 70;
312 /* Target scan time in seconds to analyze all KSM candidate pages. */
313 static unsigned long ksm_advisor_target_scan_time
= 200;
315 /* Exponentially weighted moving average. */
316 #define EWMA_WEIGHT 30
319 * struct advisor_ctx - metadata for KSM advisor
320 * @start_scan: start time of the current scan
321 * @scan_time: scan time of previous scan
322 * @change: change in percent to pages_to_scan parameter
323 * @cpu_time: cpu time consumed by the ksmd thread in the previous scan
327 unsigned long scan_time
;
328 unsigned long change
;
329 unsigned long long cpu_time
;
331 static struct advisor_ctx advisor_ctx
;
333 /* Define different advisor's */
334 enum ksm_advisor_type
{
336 KSM_ADVISOR_SCAN_TIME
,
338 static enum ksm_advisor_type ksm_advisor
;
342 * Only called through the sysfs control interface:
345 /* At least scan this many pages per batch. */
346 static unsigned long ksm_advisor_min_pages_to_scan
= 500;
348 static void set_advisor_defaults(void)
350 if (ksm_advisor
== KSM_ADVISOR_NONE
) {
351 ksm_thread_pages_to_scan
= DEFAULT_PAGES_TO_SCAN
;
352 } else if (ksm_advisor
== KSM_ADVISOR_SCAN_TIME
) {
353 advisor_ctx
= (const struct advisor_ctx
){ 0 };
354 ksm_thread_pages_to_scan
= ksm_advisor_min_pages_to_scan
;
357 #endif /* CONFIG_SYSFS */
359 static inline void advisor_start_scan(void)
361 if (ksm_advisor
== KSM_ADVISOR_SCAN_TIME
)
362 advisor_ctx
.start_scan
= ktime_get();
366 * Use previous scan time if available, otherwise use current scan time as an
367 * approximation for the previous scan time.
369 static inline unsigned long prev_scan_time(struct advisor_ctx
*ctx
,
370 unsigned long scan_time
)
372 return ctx
->scan_time
? ctx
->scan_time
: scan_time
;
375 /* Calculate exponential weighted moving average */
376 static unsigned long ewma(unsigned long prev
, unsigned long curr
)
378 return ((100 - EWMA_WEIGHT
) * prev
+ EWMA_WEIGHT
* curr
) / 100;
382 * The scan time advisor is based on the current scan rate and the target
385 * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time)
387 * To avoid perturbations it calculates a change factor of previous changes.
388 * A new change factor is calculated for each iteration and it uses an
389 * exponentially weighted moving average. The new pages_to_scan value is
390 * multiplied with that change factor:
392 * new_pages_to_scan *= change facor
394 * The new_pages_to_scan value is limited by the cpu min and max values. It
395 * calculates the cpu percent for the last scan and calculates the new
396 * estimated cpu percent cost for the next scan. That value is capped by the
397 * cpu min and max setting.
399 * In addition the new pages_to_scan value is capped by the max and min
402 static void scan_time_advisor(void)
404 unsigned int cpu_percent
;
405 unsigned long cpu_time
;
406 unsigned long cpu_time_diff
;
407 unsigned long cpu_time_diff_ms
;
409 unsigned long per_page_cost
;
410 unsigned long factor
;
411 unsigned long change
;
412 unsigned long last_scan_time
;
413 unsigned long scan_time
;
415 /* Convert scan time to seconds */
416 scan_time
= div_s64(ktime_ms_delta(ktime_get(), advisor_ctx
.start_scan
),
418 scan_time
= scan_time
? scan_time
: 1;
420 /* Calculate CPU consumption of ksmd background thread */
421 cpu_time
= task_sched_runtime(current
);
422 cpu_time_diff
= cpu_time
- advisor_ctx
.cpu_time
;
423 cpu_time_diff_ms
= cpu_time_diff
/ 1000 / 1000;
425 cpu_percent
= (cpu_time_diff_ms
* 100) / (scan_time
* 1000);
426 cpu_percent
= cpu_percent
? cpu_percent
: 1;
427 last_scan_time
= prev_scan_time(&advisor_ctx
, scan_time
);
429 /* Calculate scan time as percentage of target scan time */
430 factor
= ksm_advisor_target_scan_time
* 100 / scan_time
;
431 factor
= factor
? factor
: 1;
434 * Calculate scan time as percentage of last scan time and use
435 * exponentially weighted average to smooth it
437 change
= scan_time
* 100 / last_scan_time
;
438 change
= change
? change
: 1;
439 change
= ewma(advisor_ctx
.change
, change
);
441 /* Calculate new scan rate based on target scan rate. */
442 pages
= ksm_thread_pages_to_scan
* 100 / factor
;
443 /* Update pages_to_scan by weighted change percentage. */
444 pages
= pages
* change
/ 100;
446 /* Cap new pages_to_scan value */
447 per_page_cost
= ksm_thread_pages_to_scan
/ cpu_percent
;
448 per_page_cost
= per_page_cost
? per_page_cost
: 1;
450 pages
= min(pages
, per_page_cost
* ksm_advisor_max_cpu
);
451 pages
= max(pages
, per_page_cost
* KSM_ADVISOR_MIN_CPU
);
452 pages
= min(pages
, ksm_advisor_max_pages_to_scan
);
454 /* Update advisor context */
455 advisor_ctx
.change
= change
;
456 advisor_ctx
.scan_time
= scan_time
;
457 advisor_ctx
.cpu_time
= cpu_time
;
459 ksm_thread_pages_to_scan
= pages
;
460 trace_ksm_advisor(scan_time
, pages
, cpu_percent
);
463 static void advisor_stop_scan(void)
465 if (ksm_advisor
== KSM_ADVISOR_SCAN_TIME
)
470 /* Zeroed when merging across nodes is not allowed */
471 static unsigned int ksm_merge_across_nodes
= 1;
472 static int ksm_nr_node_ids
= 1;
474 #define ksm_merge_across_nodes 1U
475 #define ksm_nr_node_ids 1
478 #define KSM_RUN_STOP 0
479 #define KSM_RUN_MERGE 1
480 #define KSM_RUN_UNMERGE 2
481 #define KSM_RUN_OFFLINE 4
482 static unsigned long ksm_run
= KSM_RUN_STOP
;
483 static void wait_while_offlining(void);
485 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait
);
486 static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait
);
487 static DEFINE_MUTEX(ksm_thread_mutex
);
488 static DEFINE_SPINLOCK(ksm_mmlist_lock
);
490 static int __init
ksm_slab_init(void)
492 rmap_item_cache
= KMEM_CACHE(ksm_rmap_item
, 0);
493 if (!rmap_item_cache
)
496 stable_node_cache
= KMEM_CACHE(ksm_stable_node
, 0);
497 if (!stable_node_cache
)
500 mm_slot_cache
= KMEM_CACHE(ksm_mm_slot
, 0);
507 kmem_cache_destroy(stable_node_cache
);
509 kmem_cache_destroy(rmap_item_cache
);
514 static void __init
ksm_slab_free(void)
516 kmem_cache_destroy(mm_slot_cache
);
517 kmem_cache_destroy(stable_node_cache
);
518 kmem_cache_destroy(rmap_item_cache
);
519 mm_slot_cache
= NULL
;
522 static __always_inline
bool is_stable_node_chain(struct ksm_stable_node
*chain
)
524 return chain
->rmap_hlist_len
== STABLE_NODE_CHAIN
;
527 static __always_inline
bool is_stable_node_dup(struct ksm_stable_node
*dup
)
529 return dup
->head
== STABLE_NODE_DUP_HEAD
;
532 static inline void stable_node_chain_add_dup(struct ksm_stable_node
*dup
,
533 struct ksm_stable_node
*chain
)
535 VM_BUG_ON(is_stable_node_dup(dup
));
536 dup
->head
= STABLE_NODE_DUP_HEAD
;
537 VM_BUG_ON(!is_stable_node_chain(chain
));
538 hlist_add_head(&dup
->hlist_dup
, &chain
->hlist
);
539 ksm_stable_node_dups
++;
542 static inline void __stable_node_dup_del(struct ksm_stable_node
*dup
)
544 VM_BUG_ON(!is_stable_node_dup(dup
));
545 hlist_del(&dup
->hlist_dup
);
546 ksm_stable_node_dups
--;
549 static inline void stable_node_dup_del(struct ksm_stable_node
*dup
)
551 VM_BUG_ON(is_stable_node_chain(dup
));
552 if (is_stable_node_dup(dup
))
553 __stable_node_dup_del(dup
);
555 rb_erase(&dup
->node
, root_stable_tree
+ NUMA(dup
->nid
));
556 #ifdef CONFIG_DEBUG_VM
561 static inline struct ksm_rmap_item
*alloc_rmap_item(void)
563 struct ksm_rmap_item
*rmap_item
;
565 rmap_item
= kmem_cache_zalloc(rmap_item_cache
, GFP_KERNEL
|
566 __GFP_NORETRY
| __GFP_NOWARN
);
572 static inline void free_rmap_item(struct ksm_rmap_item
*rmap_item
)
575 rmap_item
->mm
->ksm_rmap_items
--;
576 rmap_item
->mm
= NULL
; /* debug safety */
577 kmem_cache_free(rmap_item_cache
, rmap_item
);
580 static inline struct ksm_stable_node
*alloc_stable_node(void)
583 * The allocation can take too long with GFP_KERNEL when memory is under
584 * pressure, which may lead to hung task warnings. Adding __GFP_HIGH
585 * grants access to memory reserves, helping to avoid this problem.
587 return kmem_cache_alloc(stable_node_cache
, GFP_KERNEL
| __GFP_HIGH
);
590 static inline void free_stable_node(struct ksm_stable_node
*stable_node
)
592 VM_BUG_ON(stable_node
->rmap_hlist_len
&&
593 !is_stable_node_chain(stable_node
));
594 kmem_cache_free(stable_node_cache
, stable_node
);
598 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
599 * page tables after it has passed through ksm_exit() - which, if necessary,
600 * takes mmap_lock briefly to serialize against them. ksm_exit() does not set
601 * a special flag: they can just back out as soon as mm_users goes to zero.
602 * ksm_test_exit() is used throughout to make this test for exit: in some
603 * places for correctness, in some places just to avoid unnecessary work.
605 static inline bool ksm_test_exit(struct mm_struct
*mm
)
607 return atomic_read(&mm
->mm_users
) == 0;
611 * We use break_ksm to break COW on a ksm page by triggering unsharing,
612 * such that the ksm page will get replaced by an exclusive anonymous page.
614 * We take great care only to touch a ksm page, in a VM_MERGEABLE vma,
615 * in case the application has unmapped and remapped mm,addr meanwhile.
616 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
617 * mmap of /dev/mem, where we would not want to touch it.
619 * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context
620 * of the process that owns 'vma'. We also do not want to enforce
621 * protection keys here anyway.
623 static int break_ksm(struct vm_area_struct
*vma
, unsigned long addr
, bool lock_vma
)
628 vma_start_write(vma
);
631 bool ksm_page
= false;
632 struct folio_walk fw
;
636 folio
= folio_walk_start(&fw
, vma
, addr
,
637 FW_MIGRATION
| FW_ZEROPAGE
);
639 /* Small folio implies FW_LEVEL_PTE. */
640 if (!folio_test_large(folio
) &&
641 (folio_test_ksm(folio
) || is_ksm_zero_pte(fw
.pte
)))
643 folio_walk_end(&fw
, vma
);
648 ret
= handle_mm_fault(vma
, addr
,
649 FAULT_FLAG_UNSHARE
| FAULT_FLAG_REMOTE
,
651 } while (!(ret
& (VM_FAULT_SIGBUS
| VM_FAULT_SIGSEGV
| VM_FAULT_OOM
)));
653 * We must loop until we no longer find a KSM page because
654 * handle_mm_fault() may back out if there's any difficulty e.g. if
655 * pte accessed bit gets updated concurrently.
657 * VM_FAULT_SIGBUS could occur if we race with truncation of the
658 * backing file, which also invalidates anonymous pages: that's
659 * okay, that truncation will have unmapped the KSM page for us.
661 * VM_FAULT_OOM: at the time of writing (late July 2009), setting
662 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
663 * current task has TIF_MEMDIE set, and will be OOM killed on return
664 * to user; and ksmd, having no mm, would never be chosen for that.
666 * But if the mm is in a limited mem_cgroup, then the fault may fail
667 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
668 * even ksmd can fail in this way - though it's usually breaking ksm
669 * just to undo a merge it made a moment before, so unlikely to oom.
671 * That's a pity: we might therefore have more kernel pages allocated
672 * than we're counting as nodes in the stable tree; but ksm_do_scan
673 * will retry to break_cow on each pass, so should recover the page
674 * in due course. The important thing is to not let VM_MERGEABLE
675 * be cleared while any such pages might remain in the area.
677 return (ret
& VM_FAULT_OOM
) ? -ENOMEM
: 0;
680 static bool vma_ksm_compatible(struct vm_area_struct
*vma
)
682 if (vma
->vm_flags
& (VM_SHARED
| VM_MAYSHARE
| VM_PFNMAP
|
683 VM_IO
| VM_DONTEXPAND
| VM_HUGETLB
|
684 VM_MIXEDMAP
| VM_DROPPABLE
))
685 return false; /* just ignore the advice */
691 if (vma
->vm_flags
& VM_SAO
)
695 if (vma
->vm_flags
& VM_SPARC_ADI
)
702 static struct vm_area_struct
*find_mergeable_vma(struct mm_struct
*mm
,
705 struct vm_area_struct
*vma
;
706 if (ksm_test_exit(mm
))
708 vma
= vma_lookup(mm
, addr
);
709 if (!vma
|| !(vma
->vm_flags
& VM_MERGEABLE
) || !vma
->anon_vma
)
714 static void break_cow(struct ksm_rmap_item
*rmap_item
)
716 struct mm_struct
*mm
= rmap_item
->mm
;
717 unsigned long addr
= rmap_item
->address
;
718 struct vm_area_struct
*vma
;
721 * It is not an accident that whenever we want to break COW
722 * to undo, we also need to drop a reference to the anon_vma.
724 put_anon_vma(rmap_item
->anon_vma
);
727 vma
= find_mergeable_vma(mm
, addr
);
729 break_ksm(vma
, addr
, false);
730 mmap_read_unlock(mm
);
733 static struct page
*get_mergeable_page(struct ksm_rmap_item
*rmap_item
)
735 struct mm_struct
*mm
= rmap_item
->mm
;
736 unsigned long addr
= rmap_item
->address
;
737 struct vm_area_struct
*vma
;
738 struct page
*page
= NULL
;
739 struct folio_walk fw
;
743 vma
= find_mergeable_vma(mm
, addr
);
747 folio
= folio_walk_start(&fw
, vma
, addr
, 0);
749 if (!folio_is_zone_device(folio
) &&
750 folio_test_anon(folio
)) {
754 folio_walk_end(&fw
, vma
);
758 flush_anon_page(vma
, page
, addr
);
759 flush_dcache_page(page
);
761 mmap_read_unlock(mm
);
766 * This helper is used for getting right index into array of tree roots.
767 * When merge_across_nodes knob is set to 1, there are only two rb-trees for
768 * stable and unstable pages from all nodes with roots in index 0. Otherwise,
769 * every node has its own stable and unstable tree.
771 static inline int get_kpfn_nid(unsigned long kpfn
)
773 return ksm_merge_across_nodes
? 0 : NUMA(pfn_to_nid(kpfn
));
776 static struct ksm_stable_node
*alloc_stable_node_chain(struct ksm_stable_node
*dup
,
777 struct rb_root
*root
)
779 struct ksm_stable_node
*chain
= alloc_stable_node();
780 VM_BUG_ON(is_stable_node_chain(dup
));
782 INIT_HLIST_HEAD(&chain
->hlist
);
783 chain
->chain_prune_time
= jiffies
;
784 chain
->rmap_hlist_len
= STABLE_NODE_CHAIN
;
785 #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
786 chain
->nid
= NUMA_NO_NODE
; /* debug */
788 ksm_stable_node_chains
++;
791 * Put the stable node chain in the first dimension of
792 * the stable tree and at the same time remove the old
795 rb_replace_node(&dup
->node
, &chain
->node
, root
);
798 * Move the old stable node to the second dimension
799 * queued in the hlist_dup. The invariant is that all
800 * dup stable_nodes in the chain->hlist point to pages
801 * that are write protected and have the exact same
804 stable_node_chain_add_dup(dup
, chain
);
809 static inline void free_stable_node_chain(struct ksm_stable_node
*chain
,
810 struct rb_root
*root
)
812 rb_erase(&chain
->node
, root
);
813 free_stable_node(chain
);
814 ksm_stable_node_chains
--;
817 static void remove_node_from_stable_tree(struct ksm_stable_node
*stable_node
)
819 struct ksm_rmap_item
*rmap_item
;
821 /* check it's not STABLE_NODE_CHAIN or negative */
822 BUG_ON(stable_node
->rmap_hlist_len
< 0);
824 hlist_for_each_entry(rmap_item
, &stable_node
->hlist
, hlist
) {
825 if (rmap_item
->hlist
.next
) {
827 trace_ksm_remove_rmap_item(stable_node
->kpfn
, rmap_item
, rmap_item
->mm
);
832 rmap_item
->mm
->ksm_merging_pages
--;
834 VM_BUG_ON(stable_node
->rmap_hlist_len
<= 0);
835 stable_node
->rmap_hlist_len
--;
836 put_anon_vma(rmap_item
->anon_vma
);
837 rmap_item
->address
&= PAGE_MASK
;
842 * We need the second aligned pointer of the migrate_nodes
843 * list_head to stay clear from the rb_parent_color union
844 * (aligned and different than any node) and also different
845 * from &migrate_nodes. This will verify that future list.h changes
846 * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
848 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD
<= &migrate_nodes
);
849 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD
>= &migrate_nodes
+ 1);
851 trace_ksm_remove_ksm_page(stable_node
->kpfn
);
852 if (stable_node
->head
== &migrate_nodes
)
853 list_del(&stable_node
->list
);
855 stable_node_dup_del(stable_node
);
856 free_stable_node(stable_node
);
859 enum ksm_get_folio_flags
{
860 KSM_GET_FOLIO_NOLOCK
,
862 KSM_GET_FOLIO_TRYLOCK
866 * ksm_get_folio: checks if the page indicated by the stable node
867 * is still its ksm page, despite having held no reference to it.
868 * In which case we can trust the content of the page, and it
869 * returns the gotten page; but if the page has now been zapped,
870 * remove the stale node from the stable tree and return NULL.
871 * But beware, the stable node's page might be being migrated.
873 * You would expect the stable_node to hold a reference to the ksm page.
874 * But if it increments the page's count, swapping out has to wait for
875 * ksmd to come around again before it can free the page, which may take
876 * seconds or even minutes: much too unresponsive. So instead we use a
877 * "keyhole reference": access to the ksm page from the stable node peeps
878 * out through its keyhole to see if that page still holds the right key,
879 * pointing back to this stable node. This relies on freeing a PageAnon
880 * page to reset its page->mapping to NULL, and relies on no other use of
881 * a page to put something that might look like our key in page->mapping.
882 * is on its way to being freed; but it is an anomaly to bear in mind.
884 static struct folio
*ksm_get_folio(struct ksm_stable_node
*stable_node
,
885 enum ksm_get_folio_flags flags
)
888 void *expected_mapping
;
891 expected_mapping
= (void *)((unsigned long)stable_node
|
894 kpfn
= READ_ONCE(stable_node
->kpfn
); /* Address dependency. */
895 folio
= pfn_folio(kpfn
);
896 if (READ_ONCE(folio
->mapping
) != expected_mapping
)
900 * We cannot do anything with the page while its refcount is 0.
901 * Usually 0 means free, or tail of a higher-order page: in which
902 * case this node is no longer referenced, and should be freed;
903 * however, it might mean that the page is under page_ref_freeze().
904 * The __remove_mapping() case is easy, again the node is now stale;
905 * the same is in reuse_ksm_page() case; but if page is swapcache
906 * in folio_migrate_mapping(), it might still be our page,
907 * in which case it's essential to keep the node.
909 while (!folio_try_get(folio
)) {
911 * Another check for folio->mapping != expected_mapping
912 * would work here too. We have chosen to test the
913 * swapcache flag to optimize the common case, when the
914 * folio is or is about to be freed: the swapcache flag
915 * is cleared (under spin_lock_irq) in the ref_freeze
916 * section of __remove_mapping(); but anon folio->mapping
917 * is reset to NULL later, in free_pages_prepare().
919 if (!folio_test_swapcache(folio
))
924 if (READ_ONCE(folio
->mapping
) != expected_mapping
) {
929 if (flags
== KSM_GET_FOLIO_TRYLOCK
) {
930 if (!folio_trylock(folio
)) {
932 return ERR_PTR(-EBUSY
);
934 } else if (flags
== KSM_GET_FOLIO_LOCK
)
937 if (flags
!= KSM_GET_FOLIO_NOLOCK
) {
938 if (READ_ONCE(folio
->mapping
) != expected_mapping
) {
948 * We come here from above when folio->mapping or the swapcache flag
949 * suggests that the node is stale; but it might be under migration.
950 * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
951 * before checking whether node->kpfn has been changed.
954 if (READ_ONCE(stable_node
->kpfn
) != kpfn
)
956 remove_node_from_stable_tree(stable_node
);
961 * Removing rmap_item from stable or unstable tree.
962 * This function will clean the information from the stable/unstable tree.
964 static void remove_rmap_item_from_tree(struct ksm_rmap_item
*rmap_item
)
966 if (rmap_item
->address
& STABLE_FLAG
) {
967 struct ksm_stable_node
*stable_node
;
970 stable_node
= rmap_item
->head
;
971 folio
= ksm_get_folio(stable_node
, KSM_GET_FOLIO_LOCK
);
975 hlist_del(&rmap_item
->hlist
);
979 if (!hlist_empty(&stable_node
->hlist
))
984 rmap_item
->mm
->ksm_merging_pages
--;
986 VM_BUG_ON(stable_node
->rmap_hlist_len
<= 0);
987 stable_node
->rmap_hlist_len
--;
989 put_anon_vma(rmap_item
->anon_vma
);
990 rmap_item
->head
= NULL
;
991 rmap_item
->address
&= PAGE_MASK
;
993 } else if (rmap_item
->address
& UNSTABLE_FLAG
) {
996 * Usually ksmd can and must skip the rb_erase, because
997 * root_unstable_tree was already reset to RB_ROOT.
998 * But be careful when an mm is exiting: do the rb_erase
999 * if this rmap_item was inserted by this scan, rather
1000 * than left over from before.
1002 age
= (unsigned char)(ksm_scan
.seqnr
- rmap_item
->address
);
1005 rb_erase(&rmap_item
->node
,
1006 root_unstable_tree
+ NUMA(rmap_item
->nid
));
1007 ksm_pages_unshared
--;
1008 rmap_item
->address
&= PAGE_MASK
;
1011 cond_resched(); /* we're called from many long loops */
1014 static void remove_trailing_rmap_items(struct ksm_rmap_item
**rmap_list
)
1016 while (*rmap_list
) {
1017 struct ksm_rmap_item
*rmap_item
= *rmap_list
;
1018 *rmap_list
= rmap_item
->rmap_list
;
1019 remove_rmap_item_from_tree(rmap_item
);
1020 free_rmap_item(rmap_item
);
1025 * Though it's very tempting to unmerge rmap_items from stable tree rather
1026 * than check every pte of a given vma, the locking doesn't quite work for
1027 * that - an rmap_item is assigned to the stable tree after inserting ksm
1028 * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
1029 * rmap_items from parent to child at fork time (so as not to waste time
1030 * if exit comes before the next scan reaches it).
1032 * Similarly, although we'd like to remove rmap_items (so updating counts
1033 * and freeing memory) when unmerging an area, it's easier to leave that
1034 * to the next pass of ksmd - consider, for example, how ksmd might be
1035 * in cmp_and_merge_page on one of the rmap_items we would be removing.
1037 static int unmerge_ksm_pages(struct vm_area_struct
*vma
,
1038 unsigned long start
, unsigned long end
, bool lock_vma
)
1043 for (addr
= start
; addr
< end
&& !err
; addr
+= PAGE_SIZE
) {
1044 if (ksm_test_exit(vma
->vm_mm
))
1046 if (signal_pending(current
))
1049 err
= break_ksm(vma
, addr
, lock_vma
);
1055 struct ksm_stable_node
*folio_stable_node(const struct folio
*folio
)
1057 return folio_test_ksm(folio
) ? folio_raw_mapping(folio
) : NULL
;
1060 static inline struct ksm_stable_node
*page_stable_node(struct page
*page
)
1062 return folio_stable_node(page_folio(page
));
1065 static inline void folio_set_stable_node(struct folio
*folio
,
1066 struct ksm_stable_node
*stable_node
)
1068 VM_WARN_ON_FOLIO(folio_test_anon(folio
) && PageAnonExclusive(&folio
->page
), folio
);
1069 folio
->mapping
= (void *)((unsigned long)stable_node
| PAGE_MAPPING_KSM
);
1074 * Only called through the sysfs control interface:
1076 static int remove_stable_node(struct ksm_stable_node
*stable_node
)
1078 struct folio
*folio
;
1081 folio
= ksm_get_folio(stable_node
, KSM_GET_FOLIO_LOCK
);
1084 * ksm_get_folio did remove_node_from_stable_tree itself.
1090 * Page could be still mapped if this races with __mmput() running in
1091 * between ksm_exit() and exit_mmap(). Just refuse to let
1092 * merge_across_nodes/max_page_sharing be switched.
1095 if (!folio_mapped(folio
)) {
1097 * The stable node did not yet appear stale to ksm_get_folio(),
1098 * since that allows for an unmapped ksm folio to be recognized
1099 * right up until it is freed; but the node is safe to remove.
1100 * This folio might be in an LRU cache waiting to be freed,
1101 * or it might be in the swapcache (perhaps under writeback),
1102 * or it might have been removed from swapcache a moment ago.
1104 folio_set_stable_node(folio
, NULL
);
1105 remove_node_from_stable_tree(stable_node
);
1109 folio_unlock(folio
);
1114 static int remove_stable_node_chain(struct ksm_stable_node
*stable_node
,
1115 struct rb_root
*root
)
1117 struct ksm_stable_node
*dup
;
1118 struct hlist_node
*hlist_safe
;
1120 if (!is_stable_node_chain(stable_node
)) {
1121 VM_BUG_ON(is_stable_node_dup(stable_node
));
1122 if (remove_stable_node(stable_node
))
1128 hlist_for_each_entry_safe(dup
, hlist_safe
,
1129 &stable_node
->hlist
, hlist_dup
) {
1130 VM_BUG_ON(!is_stable_node_dup(dup
));
1131 if (remove_stable_node(dup
))
1134 BUG_ON(!hlist_empty(&stable_node
->hlist
));
1135 free_stable_node_chain(stable_node
, root
);
1139 static int remove_all_stable_nodes(void)
1141 struct ksm_stable_node
*stable_node
, *next
;
1145 for (nid
= 0; nid
< ksm_nr_node_ids
; nid
++) {
1146 while (root_stable_tree
[nid
].rb_node
) {
1147 stable_node
= rb_entry(root_stable_tree
[nid
].rb_node
,
1148 struct ksm_stable_node
, node
);
1149 if (remove_stable_node_chain(stable_node
,
1150 root_stable_tree
+ nid
)) {
1152 break; /* proceed to next nid */
1157 list_for_each_entry_safe(stable_node
, next
, &migrate_nodes
, list
) {
1158 if (remove_stable_node(stable_node
))
1165 static int unmerge_and_remove_all_rmap_items(void)
1167 struct ksm_mm_slot
*mm_slot
;
1168 struct mm_slot
*slot
;
1169 struct mm_struct
*mm
;
1170 struct vm_area_struct
*vma
;
1173 spin_lock(&ksm_mmlist_lock
);
1174 slot
= list_entry(ksm_mm_head
.slot
.mm_node
.next
,
1175 struct mm_slot
, mm_node
);
1176 ksm_scan
.mm_slot
= mm_slot_entry(slot
, struct ksm_mm_slot
, slot
);
1177 spin_unlock(&ksm_mmlist_lock
);
1179 for (mm_slot
= ksm_scan
.mm_slot
; mm_slot
!= &ksm_mm_head
;
1180 mm_slot
= ksm_scan
.mm_slot
) {
1181 VMA_ITERATOR(vmi
, mm_slot
->slot
.mm
, 0);
1183 mm
= mm_slot
->slot
.mm
;
1187 * Exit right away if mm is exiting to avoid lockdep issue in
1190 if (ksm_test_exit(mm
))
1193 for_each_vma(vmi
, vma
) {
1194 if (!(vma
->vm_flags
& VM_MERGEABLE
) || !vma
->anon_vma
)
1196 err
= unmerge_ksm_pages(vma
,
1197 vma
->vm_start
, vma
->vm_end
, false);
1203 remove_trailing_rmap_items(&mm_slot
->rmap_list
);
1204 mmap_read_unlock(mm
);
1206 spin_lock(&ksm_mmlist_lock
);
1207 slot
= list_entry(mm_slot
->slot
.mm_node
.next
,
1208 struct mm_slot
, mm_node
);
1209 ksm_scan
.mm_slot
= mm_slot_entry(slot
, struct ksm_mm_slot
, slot
);
1210 if (ksm_test_exit(mm
)) {
1211 hash_del(&mm_slot
->slot
.hash
);
1212 list_del(&mm_slot
->slot
.mm_node
);
1213 spin_unlock(&ksm_mmlist_lock
);
1215 mm_slot_free(mm_slot_cache
, mm_slot
);
1216 clear_bit(MMF_VM_MERGEABLE
, &mm
->flags
);
1217 clear_bit(MMF_VM_MERGE_ANY
, &mm
->flags
);
1220 spin_unlock(&ksm_mmlist_lock
);
1223 /* Clean up stable nodes, but don't worry if some are still busy */
1224 remove_all_stable_nodes();
1229 mmap_read_unlock(mm
);
1230 spin_lock(&ksm_mmlist_lock
);
1231 ksm_scan
.mm_slot
= &ksm_mm_head
;
1232 spin_unlock(&ksm_mmlist_lock
);
1235 #endif /* CONFIG_SYSFS */
1237 static u32
calc_checksum(struct page
*page
)
1240 void *addr
= kmap_local_page(page
);
1241 checksum
= xxhash(addr
, PAGE_SIZE
, 0);
1246 static int write_protect_page(struct vm_area_struct
*vma
, struct folio
*folio
,
1249 struct mm_struct
*mm
= vma
->vm_mm
;
1250 DEFINE_FOLIO_VMA_WALK(pvmw
, folio
, vma
, 0, 0);
1253 struct mmu_notifier_range range
;
1254 bool anon_exclusive
;
1257 if (WARN_ON_ONCE(folio_test_large(folio
)))
1260 pvmw
.address
= page_address_in_vma(folio
, folio_page(folio
, 0), vma
);
1261 if (pvmw
.address
== -EFAULT
)
1264 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
, pvmw
.address
,
1265 pvmw
.address
+ PAGE_SIZE
);
1266 mmu_notifier_invalidate_range_start(&range
);
1268 if (!page_vma_mapped_walk(&pvmw
))
1270 if (WARN_ONCE(!pvmw
.pte
, "Unexpected PMD mapping?"))
1273 anon_exclusive
= PageAnonExclusive(&folio
->page
);
1274 entry
= ptep_get(pvmw
.pte
);
1275 if (pte_write(entry
) || pte_dirty(entry
) ||
1276 anon_exclusive
|| mm_tlb_flush_pending(mm
)) {
1277 swapped
= folio_test_swapcache(folio
);
1278 flush_cache_page(vma
, pvmw
.address
, folio_pfn(folio
));
1280 * Ok this is tricky, when get_user_pages_fast() run it doesn't
1281 * take any lock, therefore the check that we are going to make
1282 * with the pagecount against the mapcount is racy and
1283 * O_DIRECT can happen right after the check.
1284 * So we clear the pte and flush the tlb before the check
1285 * this assure us that no O_DIRECT can happen after the check
1286 * or in the middle of the check.
1288 * No need to notify as we are downgrading page table to read
1289 * only not changing it to point to a new page.
1291 * See Documentation/mm/mmu_notifier.rst
1293 entry
= ptep_clear_flush(vma
, pvmw
.address
, pvmw
.pte
);
1295 * Check that no O_DIRECT or similar I/O is in progress on the
1298 if (folio_mapcount(folio
) + 1 + swapped
!= folio_ref_count(folio
)) {
1299 set_pte_at(mm
, pvmw
.address
, pvmw
.pte
, entry
);
1303 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
1304 if (anon_exclusive
&&
1305 folio_try_share_anon_rmap_pte(folio
, &folio
->page
)) {
1306 set_pte_at(mm
, pvmw
.address
, pvmw
.pte
, entry
);
1310 if (pte_dirty(entry
))
1311 folio_mark_dirty(folio
);
1312 entry
= pte_mkclean(entry
);
1314 if (pte_write(entry
))
1315 entry
= pte_wrprotect(entry
);
1317 set_pte_at(mm
, pvmw
.address
, pvmw
.pte
, entry
);
1323 page_vma_mapped_walk_done(&pvmw
);
1325 mmu_notifier_invalidate_range_end(&range
);
1331 * replace_page - replace page in vma by new ksm page
1332 * @vma: vma that holds the pte pointing to page
1333 * @page: the page we are replacing by kpage
1334 * @kpage: the ksm page we replace page by
1335 * @orig_pte: the original value of the pte
1337 * Returns 0 on success, -EFAULT on failure.
1339 static int replace_page(struct vm_area_struct
*vma
, struct page
*page
,
1340 struct page
*kpage
, pte_t orig_pte
)
1342 struct folio
*kfolio
= page_folio(kpage
);
1343 struct mm_struct
*mm
= vma
->vm_mm
;
1344 struct folio
*folio
= page_folio(page
);
1352 struct mmu_notifier_range range
;
1354 addr
= page_address_in_vma(folio
, page
, vma
);
1355 if (addr
== -EFAULT
)
1358 pmd
= mm_find_pmd(mm
, addr
);
1362 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
1363 * without holding anon_vma lock for write. So when looking for a
1364 * genuine pmde (in which to find pte), test present and !THP together.
1366 pmde
= pmdp_get_lockless(pmd
);
1367 if (!pmd_present(pmde
) || pmd_trans_huge(pmde
))
1370 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
, addr
,
1372 mmu_notifier_invalidate_range_start(&range
);
1374 ptep
= pte_offset_map_lock(mm
, pmd
, addr
, &ptl
);
1377 if (!pte_same(ptep_get(ptep
), orig_pte
)) {
1378 pte_unmap_unlock(ptep
, ptl
);
1381 VM_BUG_ON_PAGE(PageAnonExclusive(page
), page
);
1382 VM_BUG_ON_FOLIO(folio_test_anon(kfolio
) && PageAnonExclusive(kpage
),
1386 * No need to check ksm_use_zero_pages here: we can only have a
1387 * zero_page here if ksm_use_zero_pages was enabled already.
1389 if (!is_zero_pfn(page_to_pfn(kpage
))) {
1391 folio_add_anon_rmap_pte(kfolio
, kpage
, vma
, addr
, RMAP_NONE
);
1392 newpte
= mk_pte(kpage
, vma
->vm_page_prot
);
1395 * Use pte_mkdirty to mark the zero page mapped by KSM, and then
1396 * we can easily track all KSM-placed zero pages by checking if
1397 * the dirty bit in zero page's PTE is set.
1399 newpte
= pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage
), vma
->vm_page_prot
)));
1400 ksm_map_zero_page(mm
);
1402 * We're replacing an anonymous page with a zero page, which is
1403 * not anonymous. We need to do proper accounting otherwise we
1404 * will get wrong values in /proc, and a BUG message in dmesg
1405 * when tearing down the mm.
1407 dec_mm_counter(mm
, MM_ANONPAGES
);
1410 flush_cache_page(vma
, addr
, pte_pfn(ptep_get(ptep
)));
1412 * No need to notify as we are replacing a read only page with another
1413 * read only page with the same content.
1415 * See Documentation/mm/mmu_notifier.rst
1417 ptep_clear_flush(vma
, addr
, ptep
);
1418 set_pte_at(mm
, addr
, ptep
, newpte
);
1420 folio_remove_rmap_pte(folio
, page
, vma
);
1421 if (!folio_mapped(folio
))
1422 folio_free_swap(folio
);
1425 pte_unmap_unlock(ptep
, ptl
);
1428 mmu_notifier_invalidate_range_end(&range
);
1434 * try_to_merge_one_page - take two pages and merge them into one
1435 * @vma: the vma that holds the pte pointing to page
1436 * @page: the PageAnon page that we want to replace with kpage
1437 * @kpage: the KSM page that we want to map instead of page,
1438 * or NULL the first time when we want to use page as kpage.
1440 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1442 static int try_to_merge_one_page(struct vm_area_struct
*vma
,
1443 struct page
*page
, struct page
*kpage
)
1445 struct folio
*folio
= page_folio(page
);
1446 pte_t orig_pte
= __pte(0);
1449 if (page
== kpage
) /* ksm page forked */
1452 if (!folio_test_anon(folio
))
1456 * We need the folio lock to read a stable swapcache flag in
1457 * write_protect_page(). We trylock because we don't want to wait
1458 * here - we prefer to continue scanning and merging different
1459 * pages, then come back to this page when it is unlocked.
1461 if (!folio_trylock(folio
))
1464 if (folio_test_large(folio
)) {
1465 if (split_huge_page(page
))
1467 folio
= page_folio(page
);
1471 * If this anonymous page is mapped only here, its pte may need
1472 * to be write-protected. If it's mapped elsewhere, all of its
1473 * ptes are necessarily already write-protected. But in either
1474 * case, we need to lock and check page_count is not raised.
1476 if (write_protect_page(vma
, folio
, &orig_pte
) == 0) {
1479 * While we hold folio lock, upgrade folio from
1480 * anon to a NULL stable_node with the KSM flag set:
1481 * stable_tree_insert() will update stable_node.
1483 folio_set_stable_node(folio
, NULL
);
1484 folio_mark_accessed(folio
);
1486 * Page reclaim just frees a clean folio with no dirty
1487 * ptes: make sure that the ksm page would be swapped.
1489 if (!folio_test_dirty(folio
))
1490 folio_mark_dirty(folio
);
1492 } else if (pages_identical(page
, kpage
))
1493 err
= replace_page(vma
, page
, kpage
, orig_pte
);
1497 folio_unlock(folio
);
1503 * This function returns 0 if the pages were merged or if they are
1504 * no longer merging candidates (e.g., VMA stale), -EFAULT otherwise.
1506 static int try_to_merge_with_zero_page(struct ksm_rmap_item
*rmap_item
,
1509 struct mm_struct
*mm
= rmap_item
->mm
;
1513 * Same checksum as an empty page. We attempt to merge it with the
1514 * appropriate zero page if the user enabled this via sysfs.
1516 if (ksm_use_zero_pages
&& (rmap_item
->oldchecksum
== zero_checksum
)) {
1517 struct vm_area_struct
*vma
;
1520 vma
= find_mergeable_vma(mm
, rmap_item
->address
);
1522 err
= try_to_merge_one_page(vma
, page
,
1523 ZERO_PAGE(rmap_item
->address
));
1524 trace_ksm_merge_one_page(
1525 page_to_pfn(ZERO_PAGE(rmap_item
->address
)),
1526 rmap_item
, mm
, err
);
1529 * If the vma is out of date, we do not need to
1534 mmap_read_unlock(mm
);
1541 * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
1542 * but no new kernel page is allocated: kpage must already be a ksm page.
1544 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1546 static int try_to_merge_with_ksm_page(struct ksm_rmap_item
*rmap_item
,
1547 struct page
*page
, struct page
*kpage
)
1549 struct mm_struct
*mm
= rmap_item
->mm
;
1550 struct vm_area_struct
*vma
;
1554 vma
= find_mergeable_vma(mm
, rmap_item
->address
);
1558 err
= try_to_merge_one_page(vma
, page
, kpage
);
1562 /* Unstable nid is in union with stable anon_vma: remove first */
1563 remove_rmap_item_from_tree(rmap_item
);
1565 /* Must get reference to anon_vma while still holding mmap_lock */
1566 rmap_item
->anon_vma
= vma
->anon_vma
;
1567 get_anon_vma(vma
->anon_vma
);
1569 mmap_read_unlock(mm
);
1570 trace_ksm_merge_with_ksm_page(kpage
, page_to_pfn(kpage
? kpage
: page
),
1571 rmap_item
, mm
, err
);
1576 * try_to_merge_two_pages - take two identical pages and prepare them
1577 * to be merged into one page.
1579 * This function returns the kpage if we successfully merged two identical
1580 * pages into one ksm page, NULL otherwise.
1582 * Note that this function upgrades page to ksm page: if one of the pages
1583 * is already a ksm page, try_to_merge_with_ksm_page should be used.
1585 static struct folio
*try_to_merge_two_pages(struct ksm_rmap_item
*rmap_item
,
1587 struct ksm_rmap_item
*tree_rmap_item
,
1588 struct page
*tree_page
)
1592 err
= try_to_merge_with_ksm_page(rmap_item
, page
, NULL
);
1594 err
= try_to_merge_with_ksm_page(tree_rmap_item
,
1597 * If that fails, we have a ksm page with only one pte
1598 * pointing to it: so break it.
1601 break_cow(rmap_item
);
1603 return err
? NULL
: page_folio(page
);
1606 static __always_inline
1607 bool __is_page_sharing_candidate(struct ksm_stable_node
*stable_node
, int offset
)
1609 VM_BUG_ON(stable_node
->rmap_hlist_len
< 0);
1611 * Check that at least one mapping still exists, otherwise
1612 * there's no much point to merge and share with this
1613 * stable_node, as the underlying tree_page of the other
1614 * sharer is going to be freed soon.
1616 return stable_node
->rmap_hlist_len
&&
1617 stable_node
->rmap_hlist_len
+ offset
< ksm_max_page_sharing
;
1620 static __always_inline
1621 bool is_page_sharing_candidate(struct ksm_stable_node
*stable_node
)
1623 return __is_page_sharing_candidate(stable_node
, 0);
1626 static struct folio
*stable_node_dup(struct ksm_stable_node
**_stable_node_dup
,
1627 struct ksm_stable_node
**_stable_node
,
1628 struct rb_root
*root
,
1629 bool prune_stale_stable_nodes
)
1631 struct ksm_stable_node
*dup
, *found
= NULL
, *stable_node
= *_stable_node
;
1632 struct hlist_node
*hlist_safe
;
1633 struct folio
*folio
, *tree_folio
= NULL
;
1634 int found_rmap_hlist_len
;
1636 if (!prune_stale_stable_nodes
||
1637 time_before(jiffies
, stable_node
->chain_prune_time
+
1639 ksm_stable_node_chains_prune_millisecs
)))
1640 prune_stale_stable_nodes
= false;
1642 stable_node
->chain_prune_time
= jiffies
;
1644 hlist_for_each_entry_safe(dup
, hlist_safe
,
1645 &stable_node
->hlist
, hlist_dup
) {
1648 * We must walk all stable_node_dup to prune the stale
1649 * stable nodes during lookup.
1651 * ksm_get_folio can drop the nodes from the
1652 * stable_node->hlist if they point to freed pages
1653 * (that's why we do a _safe walk). The "dup"
1654 * stable_node parameter itself will be freed from
1655 * under us if it returns NULL.
1657 folio
= ksm_get_folio(dup
, KSM_GET_FOLIO_NOLOCK
);
1660 /* Pick the best candidate if possible. */
1661 if (!found
|| (is_page_sharing_candidate(dup
) &&
1662 (!is_page_sharing_candidate(found
) ||
1663 dup
->rmap_hlist_len
> found_rmap_hlist_len
))) {
1665 folio_put(tree_folio
);
1667 found_rmap_hlist_len
= found
->rmap_hlist_len
;
1669 /* skip put_page for found candidate */
1670 if (!prune_stale_stable_nodes
&&
1671 is_page_sharing_candidate(found
))
1679 if (hlist_is_singular_node(&found
->hlist_dup
, &stable_node
->hlist
)) {
1681 * If there's not just one entry it would
1682 * corrupt memory, better BUG_ON. In KSM
1683 * context with no lock held it's not even
1686 BUG_ON(stable_node
->hlist
.first
->next
);
1689 * There's just one entry and it is below the
1690 * deduplication limit so drop the chain.
1692 rb_replace_node(&stable_node
->node
, &found
->node
,
1694 free_stable_node(stable_node
);
1695 ksm_stable_node_chains
--;
1696 ksm_stable_node_dups
--;
1698 * NOTE: the caller depends on the stable_node
1699 * to be equal to stable_node_dup if the chain
1702 *_stable_node
= found
;
1704 * Just for robustness, as stable_node is
1705 * otherwise left as a stable pointer, the
1706 * compiler shall optimize it away at build
1710 } else if (stable_node
->hlist
.first
!= &found
->hlist_dup
&&
1711 __is_page_sharing_candidate(found
, 1)) {
1713 * If the found stable_node dup can accept one
1714 * more future merge (in addition to the one
1715 * that is underway) and is not at the head of
1716 * the chain, put it there so next search will
1717 * be quicker in the !prune_stale_stable_nodes
1720 * NOTE: it would be inaccurate to use nr > 1
1721 * instead of checking the hlist.first pointer
1722 * directly, because in the
1723 * prune_stale_stable_nodes case "nr" isn't
1724 * the position of the found dup in the chain,
1725 * but the total number of dups in the chain.
1727 hlist_del(&found
->hlist_dup
);
1728 hlist_add_head(&found
->hlist_dup
,
1729 &stable_node
->hlist
);
1732 /* Its hlist must be empty if no one found. */
1733 free_stable_node_chain(stable_node
, root
);
1736 *_stable_node_dup
= found
;
1741 * Like for ksm_get_folio, this function can free the *_stable_node and
1742 * *_stable_node_dup if the returned tree_page is NULL.
1744 * It can also free and overwrite *_stable_node with the found
1745 * stable_node_dup if the chain is collapsed (in which case
1746 * *_stable_node will be equal to *_stable_node_dup like if the chain
1747 * never existed). It's up to the caller to verify tree_page is not
1748 * NULL before dereferencing *_stable_node or *_stable_node_dup.
1750 * *_stable_node_dup is really a second output parameter of this
1751 * function and will be overwritten in all cases, the caller doesn't
1752 * need to initialize it.
1754 static struct folio
*__stable_node_chain(struct ksm_stable_node
**_stable_node_dup
,
1755 struct ksm_stable_node
**_stable_node
,
1756 struct rb_root
*root
,
1757 bool prune_stale_stable_nodes
)
1759 struct ksm_stable_node
*stable_node
= *_stable_node
;
1761 if (!is_stable_node_chain(stable_node
)) {
1762 *_stable_node_dup
= stable_node
;
1763 return ksm_get_folio(stable_node
, KSM_GET_FOLIO_NOLOCK
);
1765 return stable_node_dup(_stable_node_dup
, _stable_node
, root
,
1766 prune_stale_stable_nodes
);
1769 static __always_inline
struct folio
*chain_prune(struct ksm_stable_node
**s_n_d
,
1770 struct ksm_stable_node
**s_n
,
1771 struct rb_root
*root
)
1773 return __stable_node_chain(s_n_d
, s_n
, root
, true);
1776 static __always_inline
struct folio
*chain(struct ksm_stable_node
**s_n_d
,
1777 struct ksm_stable_node
**s_n
,
1778 struct rb_root
*root
)
1780 return __stable_node_chain(s_n_d
, s_n
, root
, false);
1784 * stable_tree_search - search for page inside the stable tree
1786 * This function checks if there is a page inside the stable tree
1787 * with identical content to the page that we are scanning right now.
1789 * This function returns the stable tree node of identical content if found,
1790 * -EBUSY if the stable node's page is being migrated, NULL otherwise.
1792 static struct folio
*stable_tree_search(struct page
*page
)
1795 struct rb_root
*root
;
1796 struct rb_node
**new;
1797 struct rb_node
*parent
;
1798 struct ksm_stable_node
*stable_node
, *stable_node_dup
;
1799 struct ksm_stable_node
*page_node
;
1800 struct folio
*folio
;
1802 folio
= page_folio(page
);
1803 page_node
= folio_stable_node(folio
);
1804 if (page_node
&& page_node
->head
!= &migrate_nodes
) {
1805 /* ksm page forked */
1810 nid
= get_kpfn_nid(folio_pfn(folio
));
1811 root
= root_stable_tree
+ nid
;
1813 new = &root
->rb_node
;
1817 struct folio
*tree_folio
;
1821 stable_node
= rb_entry(*new, struct ksm_stable_node
, node
);
1822 tree_folio
= chain_prune(&stable_node_dup
, &stable_node
, root
);
1825 * If we walked over a stale stable_node,
1826 * ksm_get_folio() will call rb_erase() and it
1827 * may rebalance the tree from under us. So
1828 * restart the search from scratch. Returning
1829 * NULL would be safe too, but we'd generate
1830 * false negative insertions just because some
1831 * stable_node was stale.
1836 ret
= memcmp_pages(page
, &tree_folio
->page
);
1837 folio_put(tree_folio
);
1841 new = &parent
->rb_left
;
1843 new = &parent
->rb_right
;
1846 VM_BUG_ON(page_node
->head
!= &migrate_nodes
);
1848 * If the mapcount of our migrated KSM folio is
1849 * at most 1, we can merge it with another
1850 * KSM folio where we know that we have space
1851 * for one more mapping without exceeding the
1852 * ksm_max_page_sharing limit: see
1853 * chain_prune(). This way, we can avoid adding
1854 * this stable node to the chain.
1856 if (folio_mapcount(folio
) > 1)
1860 if (!is_page_sharing_candidate(stable_node_dup
)) {
1862 * If the stable_node is a chain and
1863 * we got a payload match in memcmp
1864 * but we cannot merge the scanned
1865 * page in any of the existing
1866 * stable_node dups because they're
1867 * all full, we need to wait the
1868 * scanned page to find itself a match
1869 * in the unstable tree to create a
1870 * brand new KSM page to add later to
1871 * the dups of this stable_node.
1877 * Lock and unlock the stable_node's page (which
1878 * might already have been migrated) so that page
1879 * migration is sure to notice its raised count.
1880 * It would be more elegant to return stable_node
1881 * than kpage, but that involves more changes.
1883 tree_folio
= ksm_get_folio(stable_node_dup
,
1884 KSM_GET_FOLIO_TRYLOCK
);
1886 if (PTR_ERR(tree_folio
) == -EBUSY
)
1887 return ERR_PTR(-EBUSY
);
1889 if (unlikely(!tree_folio
))
1891 * The tree may have been rebalanced,
1892 * so re-evaluate parent and new.
1895 folio_unlock(tree_folio
);
1897 if (get_kpfn_nid(stable_node_dup
->kpfn
) !=
1898 NUMA(stable_node_dup
->nid
)) {
1899 folio_put(tree_folio
);
1909 list_del(&page_node
->list
);
1910 DO_NUMA(page_node
->nid
= nid
);
1911 rb_link_node(&page_node
->node
, parent
, new);
1912 rb_insert_color(&page_node
->node
, root
);
1914 if (is_page_sharing_candidate(page_node
)) {
1922 * If stable_node was a chain and chain_prune collapsed it,
1923 * stable_node has been updated to be the new regular
1924 * stable_node. A collapse of the chain is indistinguishable
1925 * from the case there was no chain in the stable
1926 * rbtree. Otherwise stable_node is the chain and
1927 * stable_node_dup is the dup to replace.
1929 if (stable_node_dup
== stable_node
) {
1930 VM_BUG_ON(is_stable_node_chain(stable_node_dup
));
1931 VM_BUG_ON(is_stable_node_dup(stable_node_dup
));
1932 /* there is no chain */
1934 VM_BUG_ON(page_node
->head
!= &migrate_nodes
);
1935 list_del(&page_node
->list
);
1936 DO_NUMA(page_node
->nid
= nid
);
1937 rb_replace_node(&stable_node_dup
->node
,
1940 if (is_page_sharing_candidate(page_node
))
1945 rb_erase(&stable_node_dup
->node
, root
);
1949 VM_BUG_ON(!is_stable_node_chain(stable_node
));
1950 __stable_node_dup_del(stable_node_dup
);
1952 VM_BUG_ON(page_node
->head
!= &migrate_nodes
);
1953 list_del(&page_node
->list
);
1954 DO_NUMA(page_node
->nid
= nid
);
1955 stable_node_chain_add_dup(page_node
, stable_node
);
1956 if (is_page_sharing_candidate(page_node
))
1964 stable_node_dup
->head
= &migrate_nodes
;
1965 list_add(&stable_node_dup
->list
, stable_node_dup
->head
);
1970 * If stable_node was a chain and chain_prune collapsed it,
1971 * stable_node has been updated to be the new regular
1972 * stable_node. A collapse of the chain is indistinguishable
1973 * from the case there was no chain in the stable
1974 * rbtree. Otherwise stable_node is the chain and
1975 * stable_node_dup is the dup to replace.
1977 if (stable_node_dup
== stable_node
) {
1978 VM_BUG_ON(is_stable_node_dup(stable_node_dup
));
1979 /* chain is missing so create it */
1980 stable_node
= alloc_stable_node_chain(stable_node_dup
,
1986 * Add this stable_node dup that was
1987 * migrated to the stable_node chain
1988 * of the current nid for this page
1991 VM_BUG_ON(!is_stable_node_dup(stable_node_dup
));
1992 VM_BUG_ON(page_node
->head
!= &migrate_nodes
);
1993 list_del(&page_node
->list
);
1994 DO_NUMA(page_node
->nid
= nid
);
1995 stable_node_chain_add_dup(page_node
, stable_node
);
2000 * stable_tree_insert - insert stable tree node pointing to new ksm page
2001 * into the stable tree.
2003 * This function returns the stable tree node just allocated on success,
2006 static struct ksm_stable_node
*stable_tree_insert(struct folio
*kfolio
)
2010 struct rb_root
*root
;
2011 struct rb_node
**new;
2012 struct rb_node
*parent
;
2013 struct ksm_stable_node
*stable_node
, *stable_node_dup
;
2014 bool need_chain
= false;
2016 kpfn
= folio_pfn(kfolio
);
2017 nid
= get_kpfn_nid(kpfn
);
2018 root
= root_stable_tree
+ nid
;
2021 new = &root
->rb_node
;
2024 struct folio
*tree_folio
;
2028 stable_node
= rb_entry(*new, struct ksm_stable_node
, node
);
2029 tree_folio
= chain(&stable_node_dup
, &stable_node
, root
);
2032 * If we walked over a stale stable_node,
2033 * ksm_get_folio() will call rb_erase() and it
2034 * may rebalance the tree from under us. So
2035 * restart the search from scratch. Returning
2036 * NULL would be safe too, but we'd generate
2037 * false negative insertions just because some
2038 * stable_node was stale.
2043 ret
= memcmp_pages(&kfolio
->page
, &tree_folio
->page
);
2044 folio_put(tree_folio
);
2048 new = &parent
->rb_left
;
2050 new = &parent
->rb_right
;
2057 stable_node_dup
= alloc_stable_node();
2058 if (!stable_node_dup
)
2061 INIT_HLIST_HEAD(&stable_node_dup
->hlist
);
2062 stable_node_dup
->kpfn
= kpfn
;
2063 stable_node_dup
->rmap_hlist_len
= 0;
2064 DO_NUMA(stable_node_dup
->nid
= nid
);
2066 rb_link_node(&stable_node_dup
->node
, parent
, new);
2067 rb_insert_color(&stable_node_dup
->node
, root
);
2069 if (!is_stable_node_chain(stable_node
)) {
2070 struct ksm_stable_node
*orig
= stable_node
;
2071 /* chain is missing so create it */
2072 stable_node
= alloc_stable_node_chain(orig
, root
);
2074 free_stable_node(stable_node_dup
);
2078 stable_node_chain_add_dup(stable_node_dup
, stable_node
);
2081 folio_set_stable_node(kfolio
, stable_node_dup
);
2083 return stable_node_dup
;
2087 * unstable_tree_search_insert - search for identical page,
2088 * else insert rmap_item into the unstable tree.
2090 * This function searches for a page in the unstable tree identical to the
2091 * page currently being scanned; and if no identical page is found in the
2092 * tree, we insert rmap_item as a new object into the unstable tree.
2094 * This function returns pointer to rmap_item found to be identical
2095 * to the currently scanned page, NULL otherwise.
2097 * This function does both searching and inserting, because they share
2098 * the same walking algorithm in an rbtree.
2101 struct ksm_rmap_item
*unstable_tree_search_insert(struct ksm_rmap_item
*rmap_item
,
2103 struct page
**tree_pagep
)
2105 struct rb_node
**new;
2106 struct rb_root
*root
;
2107 struct rb_node
*parent
= NULL
;
2110 nid
= get_kpfn_nid(page_to_pfn(page
));
2111 root
= root_unstable_tree
+ nid
;
2112 new = &root
->rb_node
;
2115 struct ksm_rmap_item
*tree_rmap_item
;
2116 struct page
*tree_page
;
2120 tree_rmap_item
= rb_entry(*new, struct ksm_rmap_item
, node
);
2121 tree_page
= get_mergeable_page(tree_rmap_item
);
2126 * Don't substitute a ksm page for a forked page.
2128 if (page
== tree_page
) {
2129 put_page(tree_page
);
2133 ret
= memcmp_pages(page
, tree_page
);
2137 put_page(tree_page
);
2138 new = &parent
->rb_left
;
2139 } else if (ret
> 0) {
2140 put_page(tree_page
);
2141 new = &parent
->rb_right
;
2142 } else if (!ksm_merge_across_nodes
&&
2143 page_to_nid(tree_page
) != nid
) {
2145 * If tree_page has been migrated to another NUMA node,
2146 * it will be flushed out and put in the right unstable
2147 * tree next time: only merge with it when across_nodes.
2149 put_page(tree_page
);
2152 *tree_pagep
= tree_page
;
2153 return tree_rmap_item
;
2157 rmap_item
->address
|= UNSTABLE_FLAG
;
2158 rmap_item
->address
|= (ksm_scan
.seqnr
& SEQNR_MASK
);
2159 DO_NUMA(rmap_item
->nid
= nid
);
2160 rb_link_node(&rmap_item
->node
, parent
, new);
2161 rb_insert_color(&rmap_item
->node
, root
);
2163 ksm_pages_unshared
++;
2168 * stable_tree_append - add another rmap_item to the linked list of
2169 * rmap_items hanging off a given node of the stable tree, all sharing
2170 * the same ksm page.
2172 static void stable_tree_append(struct ksm_rmap_item
*rmap_item
,
2173 struct ksm_stable_node
*stable_node
,
2174 bool max_page_sharing_bypass
)
2177 * rmap won't find this mapping if we don't insert the
2178 * rmap_item in the right stable_node
2179 * duplicate. page_migration could break later if rmap breaks,
2180 * so we can as well crash here. We really need to check for
2181 * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
2182 * for other negative values as an underflow if detected here
2183 * for the first time (and not when decreasing rmap_hlist_len)
2184 * would be sign of memory corruption in the stable_node.
2186 BUG_ON(stable_node
->rmap_hlist_len
< 0);
2188 stable_node
->rmap_hlist_len
++;
2189 if (!max_page_sharing_bypass
)
2190 /* possibly non fatal but unexpected overflow, only warn */
2191 WARN_ON_ONCE(stable_node
->rmap_hlist_len
>
2192 ksm_max_page_sharing
);
2194 rmap_item
->head
= stable_node
;
2195 rmap_item
->address
|= STABLE_FLAG
;
2196 hlist_add_head(&rmap_item
->hlist
, &stable_node
->hlist
);
2198 if (rmap_item
->hlist
.next
)
2199 ksm_pages_sharing
++;
2203 rmap_item
->mm
->ksm_merging_pages
++;
2207 * cmp_and_merge_page - first see if page can be merged into the stable tree;
2208 * if not, compare checksum to previous and if it's the same, see if page can
2209 * be inserted into the unstable tree, or merged with a page already there and
2210 * both transferred to the stable tree.
2212 * @page: the page that we are searching identical page to.
2213 * @rmap_item: the reverse mapping into the virtual address of this page
2215 static void cmp_and_merge_page(struct page
*page
, struct ksm_rmap_item
*rmap_item
)
2217 struct ksm_rmap_item
*tree_rmap_item
;
2218 struct page
*tree_page
= NULL
;
2219 struct ksm_stable_node
*stable_node
;
2220 struct folio
*kfolio
;
2221 unsigned int checksum
;
2223 bool max_page_sharing_bypass
= false;
2225 stable_node
= page_stable_node(page
);
2227 if (stable_node
->head
!= &migrate_nodes
&&
2228 get_kpfn_nid(READ_ONCE(stable_node
->kpfn
)) !=
2229 NUMA(stable_node
->nid
)) {
2230 stable_node_dup_del(stable_node
);
2231 stable_node
->head
= &migrate_nodes
;
2232 list_add(&stable_node
->list
, stable_node
->head
);
2234 if (stable_node
->head
!= &migrate_nodes
&&
2235 rmap_item
->head
== stable_node
)
2238 * If it's a KSM fork, allow it to go over the sharing limit
2241 if (!is_page_sharing_candidate(stable_node
))
2242 max_page_sharing_bypass
= true;
2244 remove_rmap_item_from_tree(rmap_item
);
2247 * If the hash value of the page has changed from the last time
2248 * we calculated it, this page is changing frequently: therefore we
2249 * don't want to insert it in the unstable tree, and we don't want
2250 * to waste our time searching for something identical to it there.
2252 checksum
= calc_checksum(page
);
2253 if (rmap_item
->oldchecksum
!= checksum
) {
2254 rmap_item
->oldchecksum
= checksum
;
2258 if (!try_to_merge_with_zero_page(rmap_item
, page
))
2262 /* Start by searching for the folio in the stable tree */
2263 kfolio
= stable_tree_search(page
);
2264 if (&kfolio
->page
== page
&& rmap_item
->head
== stable_node
) {
2269 remove_rmap_item_from_tree(rmap_item
);
2272 if (kfolio
== ERR_PTR(-EBUSY
))
2275 err
= try_to_merge_with_ksm_page(rmap_item
, page
, &kfolio
->page
);
2278 * The page was successfully merged:
2279 * add its rmap_item to the stable tree.
2282 stable_tree_append(rmap_item
, folio_stable_node(kfolio
),
2283 max_page_sharing_bypass
);
2284 folio_unlock(kfolio
);
2291 unstable_tree_search_insert(rmap_item
, page
, &tree_page
);
2292 if (tree_rmap_item
) {
2295 kfolio
= try_to_merge_two_pages(rmap_item
, page
,
2296 tree_rmap_item
, tree_page
);
2298 * If both pages we tried to merge belong to the same compound
2299 * page, then we actually ended up increasing the reference
2300 * count of the same compound page twice, and split_huge_page
2302 * Here we set a flag if that happened, and we use it later to
2303 * try split_huge_page again. Since we call put_page right
2304 * afterwards, the reference count will be correct and
2305 * split_huge_page should succeed.
2307 split
= PageTransCompound(page
)
2308 && compound_head(page
) == compound_head(tree_page
);
2309 put_page(tree_page
);
2312 * The pages were successfully merged: insert new
2313 * node in the stable tree and add both rmap_items.
2316 stable_node
= stable_tree_insert(kfolio
);
2318 stable_tree_append(tree_rmap_item
, stable_node
,
2320 stable_tree_append(rmap_item
, stable_node
,
2323 folio_unlock(kfolio
);
2326 * If we fail to insert the page into the stable tree,
2327 * we will have 2 virtual addresses that are pointing
2328 * to a ksm page left outside the stable tree,
2329 * in which case we need to break_cow on both.
2332 break_cow(tree_rmap_item
);
2333 break_cow(rmap_item
);
2337 * We are here if we tried to merge two pages and
2338 * failed because they both belonged to the same
2339 * compound page. We will split the page now, but no
2340 * merging will take place.
2341 * We do not want to add the cost of a full lock; if
2342 * the page is locked, it is better to skip it and
2343 * perhaps try again later.
2345 if (!trylock_page(page
))
2347 split_huge_page(page
);
2353 static struct ksm_rmap_item
*get_next_rmap_item(struct ksm_mm_slot
*mm_slot
,
2354 struct ksm_rmap_item
**rmap_list
,
2357 struct ksm_rmap_item
*rmap_item
;
2359 while (*rmap_list
) {
2360 rmap_item
= *rmap_list
;
2361 if ((rmap_item
->address
& PAGE_MASK
) == addr
)
2363 if (rmap_item
->address
> addr
)
2365 *rmap_list
= rmap_item
->rmap_list
;
2366 remove_rmap_item_from_tree(rmap_item
);
2367 free_rmap_item(rmap_item
);
2370 rmap_item
= alloc_rmap_item();
2372 /* It has already been zeroed */
2373 rmap_item
->mm
= mm_slot
->slot
.mm
;
2374 rmap_item
->mm
->ksm_rmap_items
++;
2375 rmap_item
->address
= addr
;
2376 rmap_item
->rmap_list
= *rmap_list
;
2377 *rmap_list
= rmap_item
;
2383 * Calculate skip age for the ksm page age. The age determines how often
2384 * de-duplicating has already been tried unsuccessfully. If the age is
2385 * smaller, the scanning of this page is skipped for less scans.
2387 * @age: rmap_item age of page
2389 static unsigned int skip_age(rmap_age_t age
)
2402 * Determines if a page should be skipped for the current scan.
2404 * @folio: folio containing the page to check
2405 * @rmap_item: associated rmap_item of page
2407 static bool should_skip_rmap_item(struct folio
*folio
,
2408 struct ksm_rmap_item
*rmap_item
)
2412 if (!ksm_smart_scan
)
2416 * Never skip pages that are already KSM; pages cmp_and_merge_page()
2417 * will essentially ignore them, but we still have to process them
2420 if (folio_test_ksm(folio
))
2423 age
= rmap_item
->age
;
2428 * Smaller ages are not skipped, they need to get a chance to go
2429 * through the different phases of the KSM merging.
2435 * Are we still allowed to skip? If not, then don't skip it
2436 * and determine how much more often we are allowed to skip next.
2438 if (!rmap_item
->remaining_skips
) {
2439 rmap_item
->remaining_skips
= skip_age(age
);
2443 /* Skip this page */
2444 ksm_pages_skipped
++;
2445 rmap_item
->remaining_skips
--;
2446 remove_rmap_item_from_tree(rmap_item
);
2450 static struct ksm_rmap_item
*scan_get_next_rmap_item(struct page
**page
)
2452 struct mm_struct
*mm
;
2453 struct ksm_mm_slot
*mm_slot
;
2454 struct mm_slot
*slot
;
2455 struct vm_area_struct
*vma
;
2456 struct ksm_rmap_item
*rmap_item
;
2457 struct vma_iterator vmi
;
2460 if (list_empty(&ksm_mm_head
.slot
.mm_node
))
2463 mm_slot
= ksm_scan
.mm_slot
;
2464 if (mm_slot
== &ksm_mm_head
) {
2465 advisor_start_scan();
2466 trace_ksm_start_scan(ksm_scan
.seqnr
, ksm_rmap_items
);
2469 * A number of pages can hang around indefinitely in per-cpu
2470 * LRU cache, raised page count preventing write_protect_page
2471 * from merging them. Though it doesn't really matter much,
2472 * it is puzzling to see some stuck in pages_volatile until
2473 * other activity jostles them out, and they also prevented
2474 * LTP's KSM test from succeeding deterministically; so drain
2475 * them here (here rather than on entry to ksm_do_scan(),
2476 * so we don't IPI too often when pages_to_scan is set low).
2478 lru_add_drain_all();
2481 * Whereas stale stable_nodes on the stable_tree itself
2482 * get pruned in the regular course of stable_tree_search(),
2483 * those moved out to the migrate_nodes list can accumulate:
2484 * so prune them once before each full scan.
2486 if (!ksm_merge_across_nodes
) {
2487 struct ksm_stable_node
*stable_node
, *next
;
2488 struct folio
*folio
;
2490 list_for_each_entry_safe(stable_node
, next
,
2491 &migrate_nodes
, list
) {
2492 folio
= ksm_get_folio(stable_node
,
2493 KSM_GET_FOLIO_NOLOCK
);
2500 for (nid
= 0; nid
< ksm_nr_node_ids
; nid
++)
2501 root_unstable_tree
[nid
] = RB_ROOT
;
2503 spin_lock(&ksm_mmlist_lock
);
2504 slot
= list_entry(mm_slot
->slot
.mm_node
.next
,
2505 struct mm_slot
, mm_node
);
2506 mm_slot
= mm_slot_entry(slot
, struct ksm_mm_slot
, slot
);
2507 ksm_scan
.mm_slot
= mm_slot
;
2508 spin_unlock(&ksm_mmlist_lock
);
2510 * Although we tested list_empty() above, a racing __ksm_exit
2511 * of the last mm on the list may have removed it since then.
2513 if (mm_slot
== &ksm_mm_head
)
2516 ksm_scan
.address
= 0;
2517 ksm_scan
.rmap_list
= &mm_slot
->rmap_list
;
2520 slot
= &mm_slot
->slot
;
2522 vma_iter_init(&vmi
, mm
, ksm_scan
.address
);
2525 if (ksm_test_exit(mm
))
2528 for_each_vma(vmi
, vma
) {
2529 if (!(vma
->vm_flags
& VM_MERGEABLE
))
2531 if (ksm_scan
.address
< vma
->vm_start
)
2532 ksm_scan
.address
= vma
->vm_start
;
2534 ksm_scan
.address
= vma
->vm_end
;
2536 while (ksm_scan
.address
< vma
->vm_end
) {
2537 struct page
*tmp_page
= NULL
;
2538 struct folio_walk fw
;
2539 struct folio
*folio
;
2541 if (ksm_test_exit(mm
))
2544 folio
= folio_walk_start(&fw
, vma
, ksm_scan
.address
, 0);
2546 if (!folio_is_zone_device(folio
) &&
2547 folio_test_anon(folio
)) {
2551 folio_walk_end(&fw
, vma
);
2555 flush_anon_page(vma
, tmp_page
, ksm_scan
.address
);
2556 flush_dcache_page(tmp_page
);
2557 rmap_item
= get_next_rmap_item(mm_slot
,
2558 ksm_scan
.rmap_list
, ksm_scan
.address
);
2560 ksm_scan
.rmap_list
=
2561 &rmap_item
->rmap_list
;
2563 if (should_skip_rmap_item(folio
, rmap_item
)) {
2568 ksm_scan
.address
+= PAGE_SIZE
;
2573 mmap_read_unlock(mm
);
2577 ksm_scan
.address
+= PAGE_SIZE
;
2582 if (ksm_test_exit(mm
)) {
2584 ksm_scan
.address
= 0;
2585 ksm_scan
.rmap_list
= &mm_slot
->rmap_list
;
2588 * Nuke all the rmap_items that are above this current rmap:
2589 * because there were no VM_MERGEABLE vmas with such addresses.
2591 remove_trailing_rmap_items(ksm_scan
.rmap_list
);
2593 spin_lock(&ksm_mmlist_lock
);
2594 slot
= list_entry(mm_slot
->slot
.mm_node
.next
,
2595 struct mm_slot
, mm_node
);
2596 ksm_scan
.mm_slot
= mm_slot_entry(slot
, struct ksm_mm_slot
, slot
);
2597 if (ksm_scan
.address
== 0) {
2599 * We've completed a full scan of all vmas, holding mmap_lock
2600 * throughout, and found no VM_MERGEABLE: so do the same as
2601 * __ksm_exit does to remove this mm from all our lists now.
2602 * This applies either when cleaning up after __ksm_exit
2603 * (but beware: we can reach here even before __ksm_exit),
2604 * or when all VM_MERGEABLE areas have been unmapped (and
2605 * mmap_lock then protects against race with MADV_MERGEABLE).
2607 hash_del(&mm_slot
->slot
.hash
);
2608 list_del(&mm_slot
->slot
.mm_node
);
2609 spin_unlock(&ksm_mmlist_lock
);
2611 mm_slot_free(mm_slot_cache
, mm_slot
);
2612 clear_bit(MMF_VM_MERGEABLE
, &mm
->flags
);
2613 clear_bit(MMF_VM_MERGE_ANY
, &mm
->flags
);
2614 mmap_read_unlock(mm
);
2617 mmap_read_unlock(mm
);
2619 * mmap_read_unlock(mm) first because after
2620 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
2621 * already have been freed under us by __ksm_exit()
2622 * because the "mm_slot" is still hashed and
2623 * ksm_scan.mm_slot doesn't point to it anymore.
2625 spin_unlock(&ksm_mmlist_lock
);
2628 /* Repeat until we've completed scanning the whole list */
2629 mm_slot
= ksm_scan
.mm_slot
;
2630 if (mm_slot
!= &ksm_mm_head
)
2633 advisor_stop_scan();
2635 trace_ksm_stop_scan(ksm_scan
.seqnr
, ksm_rmap_items
);
2641 * ksm_do_scan - the ksm scanner main worker function.
2642 * @scan_npages: number of pages we want to scan before we return.
2644 static void ksm_do_scan(unsigned int scan_npages
)
2646 struct ksm_rmap_item
*rmap_item
;
2649 while (scan_npages
-- && likely(!freezing(current
))) {
2651 rmap_item
= scan_get_next_rmap_item(&page
);
2654 cmp_and_merge_page(page
, rmap_item
);
2656 ksm_pages_scanned
++;
2660 static int ksmd_should_run(void)
2662 return (ksm_run
& KSM_RUN_MERGE
) && !list_empty(&ksm_mm_head
.slot
.mm_node
);
2665 static int ksm_scan_thread(void *nothing
)
2667 unsigned int sleep_ms
;
2670 set_user_nice(current
, 5);
2672 while (!kthread_should_stop()) {
2673 mutex_lock(&ksm_thread_mutex
);
2674 wait_while_offlining();
2675 if (ksmd_should_run())
2676 ksm_do_scan(ksm_thread_pages_to_scan
);
2677 mutex_unlock(&ksm_thread_mutex
);
2679 if (ksmd_should_run()) {
2680 sleep_ms
= READ_ONCE(ksm_thread_sleep_millisecs
);
2681 wait_event_freezable_timeout(ksm_iter_wait
,
2682 sleep_ms
!= READ_ONCE(ksm_thread_sleep_millisecs
),
2683 msecs_to_jiffies(sleep_ms
));
2685 wait_event_freezable(ksm_thread_wait
,
2686 ksmd_should_run() || kthread_should_stop());
2692 static void __ksm_add_vma(struct vm_area_struct
*vma
)
2694 unsigned long vm_flags
= vma
->vm_flags
;
2696 if (vm_flags
& VM_MERGEABLE
)
2699 if (vma_ksm_compatible(vma
))
2700 vm_flags_set(vma
, VM_MERGEABLE
);
2703 static int __ksm_del_vma(struct vm_area_struct
*vma
)
2707 if (!(vma
->vm_flags
& VM_MERGEABLE
))
2710 if (vma
->anon_vma
) {
2711 err
= unmerge_ksm_pages(vma
, vma
->vm_start
, vma
->vm_end
, true);
2716 vm_flags_clear(vma
, VM_MERGEABLE
);
2720 * ksm_add_vma - Mark vma as mergeable if compatible
2722 * @vma: Pointer to vma
2724 void ksm_add_vma(struct vm_area_struct
*vma
)
2726 struct mm_struct
*mm
= vma
->vm_mm
;
2728 if (test_bit(MMF_VM_MERGE_ANY
, &mm
->flags
))
2732 static void ksm_add_vmas(struct mm_struct
*mm
)
2734 struct vm_area_struct
*vma
;
2736 VMA_ITERATOR(vmi
, mm
, 0);
2737 for_each_vma(vmi
, vma
)
2741 static int ksm_del_vmas(struct mm_struct
*mm
)
2743 struct vm_area_struct
*vma
;
2746 VMA_ITERATOR(vmi
, mm
, 0);
2747 for_each_vma(vmi
, vma
) {
2748 err
= __ksm_del_vma(vma
);
2756 * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all
2759 * @mm: Pointer to mm
2761 * Returns 0 on success, otherwise error code
2763 int ksm_enable_merge_any(struct mm_struct
*mm
)
2767 if (test_bit(MMF_VM_MERGE_ANY
, &mm
->flags
))
2770 if (!test_bit(MMF_VM_MERGEABLE
, &mm
->flags
)) {
2771 err
= __ksm_enter(mm
);
2776 set_bit(MMF_VM_MERGE_ANY
, &mm
->flags
);
2783 * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm,
2784 * previously enabled via ksm_enable_merge_any().
2786 * Disabling merging implies unmerging any merged pages, like setting
2787 * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and
2788 * merging on all compatible VMA's remains enabled.
2790 * @mm: Pointer to mm
2792 * Returns 0 on success, otherwise error code
2794 int ksm_disable_merge_any(struct mm_struct
*mm
)
2798 if (!test_bit(MMF_VM_MERGE_ANY
, &mm
->flags
))
2801 err
= ksm_del_vmas(mm
);
2807 clear_bit(MMF_VM_MERGE_ANY
, &mm
->flags
);
2811 int ksm_disable(struct mm_struct
*mm
)
2813 mmap_assert_write_locked(mm
);
2815 if (!test_bit(MMF_VM_MERGEABLE
, &mm
->flags
))
2817 if (test_bit(MMF_VM_MERGE_ANY
, &mm
->flags
))
2818 return ksm_disable_merge_any(mm
);
2819 return ksm_del_vmas(mm
);
2822 int ksm_madvise(struct vm_area_struct
*vma
, unsigned long start
,
2823 unsigned long end
, int advice
, unsigned long *vm_flags
)
2825 struct mm_struct
*mm
= vma
->vm_mm
;
2829 case MADV_MERGEABLE
:
2830 if (vma
->vm_flags
& VM_MERGEABLE
)
2832 if (!vma_ksm_compatible(vma
))
2835 if (!test_bit(MMF_VM_MERGEABLE
, &mm
->flags
)) {
2836 err
= __ksm_enter(mm
);
2841 *vm_flags
|= VM_MERGEABLE
;
2844 case MADV_UNMERGEABLE
:
2845 if (!(*vm_flags
& VM_MERGEABLE
))
2846 return 0; /* just ignore the advice */
2848 if (vma
->anon_vma
) {
2849 err
= unmerge_ksm_pages(vma
, start
, end
, true);
2854 *vm_flags
&= ~VM_MERGEABLE
;
2860 EXPORT_SYMBOL_GPL(ksm_madvise
);
2862 int __ksm_enter(struct mm_struct
*mm
)
2864 struct ksm_mm_slot
*mm_slot
;
2865 struct mm_slot
*slot
;
2868 mm_slot
= mm_slot_alloc(mm_slot_cache
);
2872 slot
= &mm_slot
->slot
;
2874 /* Check ksm_run too? Would need tighter locking */
2875 needs_wakeup
= list_empty(&ksm_mm_head
.slot
.mm_node
);
2877 spin_lock(&ksm_mmlist_lock
);
2878 mm_slot_insert(mm_slots_hash
, mm
, slot
);
2880 * When KSM_RUN_MERGE (or KSM_RUN_STOP),
2881 * insert just behind the scanning cursor, to let the area settle
2882 * down a little; when fork is followed by immediate exec, we don't
2883 * want ksmd to waste time setting up and tearing down an rmap_list.
2885 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
2886 * scanning cursor, otherwise KSM pages in newly forked mms will be
2887 * missed: then we might as well insert at the end of the list.
2889 if (ksm_run
& KSM_RUN_UNMERGE
)
2890 list_add_tail(&slot
->mm_node
, &ksm_mm_head
.slot
.mm_node
);
2892 list_add_tail(&slot
->mm_node
, &ksm_scan
.mm_slot
->slot
.mm_node
);
2893 spin_unlock(&ksm_mmlist_lock
);
2895 set_bit(MMF_VM_MERGEABLE
, &mm
->flags
);
2899 wake_up_interruptible(&ksm_thread_wait
);
2901 trace_ksm_enter(mm
);
2905 void __ksm_exit(struct mm_struct
*mm
)
2907 struct ksm_mm_slot
*mm_slot
;
2908 struct mm_slot
*slot
;
2909 int easy_to_free
= 0;
2912 * This process is exiting: if it's straightforward (as is the
2913 * case when ksmd was never running), free mm_slot immediately.
2914 * But if it's at the cursor or has rmap_items linked to it, use
2915 * mmap_lock to synchronize with any break_cows before pagetables
2916 * are freed, and leave the mm_slot on the list for ksmd to free.
2917 * Beware: ksm may already have noticed it exiting and freed the slot.
2920 spin_lock(&ksm_mmlist_lock
);
2921 slot
= mm_slot_lookup(mm_slots_hash
, mm
);
2922 mm_slot
= mm_slot_entry(slot
, struct ksm_mm_slot
, slot
);
2923 if (mm_slot
&& ksm_scan
.mm_slot
!= mm_slot
) {
2924 if (!mm_slot
->rmap_list
) {
2925 hash_del(&slot
->hash
);
2926 list_del(&slot
->mm_node
);
2929 list_move(&slot
->mm_node
,
2930 &ksm_scan
.mm_slot
->slot
.mm_node
);
2933 spin_unlock(&ksm_mmlist_lock
);
2936 mm_slot_free(mm_slot_cache
, mm_slot
);
2937 clear_bit(MMF_VM_MERGE_ANY
, &mm
->flags
);
2938 clear_bit(MMF_VM_MERGEABLE
, &mm
->flags
);
2940 } else if (mm_slot
) {
2941 mmap_write_lock(mm
);
2942 mmap_write_unlock(mm
);
2948 struct folio
*ksm_might_need_to_copy(struct folio
*folio
,
2949 struct vm_area_struct
*vma
, unsigned long addr
)
2951 struct page
*page
= folio_page(folio
, 0);
2952 struct anon_vma
*anon_vma
= folio_anon_vma(folio
);
2953 struct folio
*new_folio
;
2955 if (folio_test_large(folio
))
2958 if (folio_test_ksm(folio
)) {
2959 if (folio_stable_node(folio
) &&
2960 !(ksm_run
& KSM_RUN_UNMERGE
))
2961 return folio
; /* no need to copy it */
2962 } else if (!anon_vma
) {
2963 return folio
; /* no need to copy it */
2964 } else if (folio
->index
== linear_page_index(vma
, addr
) &&
2965 anon_vma
->root
== vma
->anon_vma
->root
) {
2966 return folio
; /* still no need to copy it */
2968 if (PageHWPoison(page
))
2969 return ERR_PTR(-EHWPOISON
);
2970 if (!folio_test_uptodate(folio
))
2971 return folio
; /* let do_swap_page report the error */
2973 new_folio
= vma_alloc_folio(GFP_HIGHUSER_MOVABLE
, 0, vma
, addr
);
2975 mem_cgroup_charge(new_folio
, vma
->vm_mm
, GFP_KERNEL
)) {
2976 folio_put(new_folio
);
2980 if (copy_mc_user_highpage(folio_page(new_folio
, 0), page
,
2982 folio_put(new_folio
);
2983 return ERR_PTR(-EHWPOISON
);
2985 folio_set_dirty(new_folio
);
2986 __folio_mark_uptodate(new_folio
);
2987 __folio_set_locked(new_folio
);
2989 count_vm_event(KSM_SWPIN_COPY
);
2996 void rmap_walk_ksm(struct folio
*folio
, struct rmap_walk_control
*rwc
)
2998 struct ksm_stable_node
*stable_node
;
2999 struct ksm_rmap_item
*rmap_item
;
3000 int search_new_forks
= 0;
3002 VM_BUG_ON_FOLIO(!folio_test_ksm(folio
), folio
);
3005 * Rely on the page lock to protect against concurrent modifications
3006 * to that page's node of the stable tree.
3008 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
3010 stable_node
= folio_stable_node(folio
);
3014 hlist_for_each_entry(rmap_item
, &stable_node
->hlist
, hlist
) {
3015 struct anon_vma
*anon_vma
= rmap_item
->anon_vma
;
3016 struct anon_vma_chain
*vmac
;
3017 struct vm_area_struct
*vma
;
3020 if (!anon_vma_trylock_read(anon_vma
)) {
3021 if (rwc
->try_lock
) {
3022 rwc
->contended
= true;
3025 anon_vma_lock_read(anon_vma
);
3027 anon_vma_interval_tree_foreach(vmac
, &anon_vma
->rb_root
,
3034 /* Ignore the stable/unstable/sqnr flags */
3035 addr
= rmap_item
->address
& PAGE_MASK
;
3037 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
3040 * Initially we examine only the vma which covers this
3041 * rmap_item; but later, if there is still work to do,
3042 * we examine covering vmas in other mms: in case they
3043 * were forked from the original since ksmd passed.
3045 if ((rmap_item
->mm
== vma
->vm_mm
) == search_new_forks
)
3048 if (rwc
->invalid_vma
&& rwc
->invalid_vma(vma
, rwc
->arg
))
3051 if (!rwc
->rmap_one(folio
, vma
, addr
, rwc
->arg
)) {
3052 anon_vma_unlock_read(anon_vma
);
3055 if (rwc
->done
&& rwc
->done(folio
)) {
3056 anon_vma_unlock_read(anon_vma
);
3060 anon_vma_unlock_read(anon_vma
);
3062 if (!search_new_forks
++)
3066 #ifdef CONFIG_MEMORY_FAILURE
3068 * Collect processes when the error hit an ksm page.
3070 void collect_procs_ksm(const struct folio
*folio
, const struct page
*page
,
3071 struct list_head
*to_kill
, int force_early
)
3073 struct ksm_stable_node
*stable_node
;
3074 struct ksm_rmap_item
*rmap_item
;
3075 struct vm_area_struct
*vma
;
3076 struct task_struct
*tsk
;
3078 stable_node
= folio_stable_node(folio
);
3081 hlist_for_each_entry(rmap_item
, &stable_node
->hlist
, hlist
) {
3082 struct anon_vma
*av
= rmap_item
->anon_vma
;
3084 anon_vma_lock_read(av
);
3086 for_each_process(tsk
) {
3087 struct anon_vma_chain
*vmac
;
3089 struct task_struct
*t
=
3090 task_early_kill(tsk
, force_early
);
3093 anon_vma_interval_tree_foreach(vmac
, &av
->rb_root
, 0,
3097 if (vma
->vm_mm
== t
->mm
) {
3098 addr
= rmap_item
->address
& PAGE_MASK
;
3099 add_to_kill_ksm(t
, page
, vma
, to_kill
,
3105 anon_vma_unlock_read(av
);
3110 #ifdef CONFIG_MIGRATION
3111 void folio_migrate_ksm(struct folio
*newfolio
, struct folio
*folio
)
3113 struct ksm_stable_node
*stable_node
;
3115 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
3116 VM_BUG_ON_FOLIO(!folio_test_locked(newfolio
), newfolio
);
3117 VM_BUG_ON_FOLIO(newfolio
->mapping
!= folio
->mapping
, newfolio
);
3119 stable_node
= folio_stable_node(folio
);
3121 VM_BUG_ON_FOLIO(stable_node
->kpfn
!= folio_pfn(folio
), folio
);
3122 stable_node
->kpfn
= folio_pfn(newfolio
);
3124 * newfolio->mapping was set in advance; now we need smp_wmb()
3125 * to make sure that the new stable_node->kpfn is visible
3126 * to ksm_get_folio() before it can see that folio->mapping
3127 * has gone stale (or that the swapcache flag has been cleared).
3130 folio_set_stable_node(folio
, NULL
);
3133 #endif /* CONFIG_MIGRATION */
3135 #ifdef CONFIG_MEMORY_HOTREMOVE
3136 static void wait_while_offlining(void)
3138 while (ksm_run
& KSM_RUN_OFFLINE
) {
3139 mutex_unlock(&ksm_thread_mutex
);
3140 wait_on_bit(&ksm_run
, ilog2(KSM_RUN_OFFLINE
),
3141 TASK_UNINTERRUPTIBLE
);
3142 mutex_lock(&ksm_thread_mutex
);
3146 static bool stable_node_dup_remove_range(struct ksm_stable_node
*stable_node
,
3147 unsigned long start_pfn
,
3148 unsigned long end_pfn
)
3150 if (stable_node
->kpfn
>= start_pfn
&&
3151 stable_node
->kpfn
< end_pfn
) {
3153 * Don't ksm_get_folio, page has already gone:
3154 * which is why we keep kpfn instead of page*
3156 remove_node_from_stable_tree(stable_node
);
3162 static bool stable_node_chain_remove_range(struct ksm_stable_node
*stable_node
,
3163 unsigned long start_pfn
,
3164 unsigned long end_pfn
,
3165 struct rb_root
*root
)
3167 struct ksm_stable_node
*dup
;
3168 struct hlist_node
*hlist_safe
;
3170 if (!is_stable_node_chain(stable_node
)) {
3171 VM_BUG_ON(is_stable_node_dup(stable_node
));
3172 return stable_node_dup_remove_range(stable_node
, start_pfn
,
3176 hlist_for_each_entry_safe(dup
, hlist_safe
,
3177 &stable_node
->hlist
, hlist_dup
) {
3178 VM_BUG_ON(!is_stable_node_dup(dup
));
3179 stable_node_dup_remove_range(dup
, start_pfn
, end_pfn
);
3181 if (hlist_empty(&stable_node
->hlist
)) {
3182 free_stable_node_chain(stable_node
, root
);
3183 return true; /* notify caller that tree was rebalanced */
3188 static void ksm_check_stable_tree(unsigned long start_pfn
,
3189 unsigned long end_pfn
)
3191 struct ksm_stable_node
*stable_node
, *next
;
3192 struct rb_node
*node
;
3195 for (nid
= 0; nid
< ksm_nr_node_ids
; nid
++) {
3196 node
= rb_first(root_stable_tree
+ nid
);
3198 stable_node
= rb_entry(node
, struct ksm_stable_node
, node
);
3199 if (stable_node_chain_remove_range(stable_node
,
3203 node
= rb_first(root_stable_tree
+ nid
);
3205 node
= rb_next(node
);
3209 list_for_each_entry_safe(stable_node
, next
, &migrate_nodes
, list
) {
3210 if (stable_node
->kpfn
>= start_pfn
&&
3211 stable_node
->kpfn
< end_pfn
)
3212 remove_node_from_stable_tree(stable_node
);
3217 static int ksm_memory_callback(struct notifier_block
*self
,
3218 unsigned long action
, void *arg
)
3220 struct memory_notify
*mn
= arg
;
3223 case MEM_GOING_OFFLINE
:
3225 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
3226 * and remove_all_stable_nodes() while memory is going offline:
3227 * it is unsafe for them to touch the stable tree at this time.
3228 * But unmerge_ksm_pages(), rmap lookups and other entry points
3229 * which do not need the ksm_thread_mutex are all safe.
3231 mutex_lock(&ksm_thread_mutex
);
3232 ksm_run
|= KSM_RUN_OFFLINE
;
3233 mutex_unlock(&ksm_thread_mutex
);
3238 * Most of the work is done by page migration; but there might
3239 * be a few stable_nodes left over, still pointing to struct
3240 * pages which have been offlined: prune those from the tree,
3241 * otherwise ksm_get_folio() might later try to access a
3242 * non-existent struct page.
3244 ksm_check_stable_tree(mn
->start_pfn
,
3245 mn
->start_pfn
+ mn
->nr_pages
);
3247 case MEM_CANCEL_OFFLINE
:
3248 mutex_lock(&ksm_thread_mutex
);
3249 ksm_run
&= ~KSM_RUN_OFFLINE
;
3250 mutex_unlock(&ksm_thread_mutex
);
3252 smp_mb(); /* wake_up_bit advises this */
3253 wake_up_bit(&ksm_run
, ilog2(KSM_RUN_OFFLINE
));
3259 static void wait_while_offlining(void)
3262 #endif /* CONFIG_MEMORY_HOTREMOVE */
3264 #ifdef CONFIG_PROC_FS
3265 long ksm_process_profit(struct mm_struct
*mm
)
3267 return (long)(mm
->ksm_merging_pages
+ mm_ksm_zero_pages(mm
)) * PAGE_SIZE
-
3268 mm
->ksm_rmap_items
* sizeof(struct ksm_rmap_item
);
3270 #endif /* CONFIG_PROC_FS */
3274 * This all compiles without CONFIG_SYSFS, but is a waste of space.
3277 #define KSM_ATTR_RO(_name) \
3278 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3279 #define KSM_ATTR(_name) \
3280 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3282 static ssize_t
sleep_millisecs_show(struct kobject
*kobj
,
3283 struct kobj_attribute
*attr
, char *buf
)
3285 return sysfs_emit(buf
, "%u\n", ksm_thread_sleep_millisecs
);
3288 static ssize_t
sleep_millisecs_store(struct kobject
*kobj
,
3289 struct kobj_attribute
*attr
,
3290 const char *buf
, size_t count
)
3295 err
= kstrtouint(buf
, 10, &msecs
);
3299 ksm_thread_sleep_millisecs
= msecs
;
3300 wake_up_interruptible(&ksm_iter_wait
);
3304 KSM_ATTR(sleep_millisecs
);
3306 static ssize_t
pages_to_scan_show(struct kobject
*kobj
,
3307 struct kobj_attribute
*attr
, char *buf
)
3309 return sysfs_emit(buf
, "%u\n", ksm_thread_pages_to_scan
);
3312 static ssize_t
pages_to_scan_store(struct kobject
*kobj
,
3313 struct kobj_attribute
*attr
,
3314 const char *buf
, size_t count
)
3316 unsigned int nr_pages
;
3319 if (ksm_advisor
!= KSM_ADVISOR_NONE
)
3322 err
= kstrtouint(buf
, 10, &nr_pages
);
3326 ksm_thread_pages_to_scan
= nr_pages
;
3330 KSM_ATTR(pages_to_scan
);
3332 static ssize_t
run_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
3335 return sysfs_emit(buf
, "%lu\n", ksm_run
);
3338 static ssize_t
run_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
3339 const char *buf
, size_t count
)
3344 err
= kstrtouint(buf
, 10, &flags
);
3347 if (flags
> KSM_RUN_UNMERGE
)
3351 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
3352 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
3353 * breaking COW to free the pages_shared (but leaves mm_slots
3354 * on the list for when ksmd may be set running again).
3357 mutex_lock(&ksm_thread_mutex
);
3358 wait_while_offlining();
3359 if (ksm_run
!= flags
) {
3361 if (flags
& KSM_RUN_UNMERGE
) {
3362 set_current_oom_origin();
3363 err
= unmerge_and_remove_all_rmap_items();
3364 clear_current_oom_origin();
3366 ksm_run
= KSM_RUN_STOP
;
3371 mutex_unlock(&ksm_thread_mutex
);
3373 if (flags
& KSM_RUN_MERGE
)
3374 wake_up_interruptible(&ksm_thread_wait
);
3381 static ssize_t
merge_across_nodes_show(struct kobject
*kobj
,
3382 struct kobj_attribute
*attr
, char *buf
)
3384 return sysfs_emit(buf
, "%u\n", ksm_merge_across_nodes
);
3387 static ssize_t
merge_across_nodes_store(struct kobject
*kobj
,
3388 struct kobj_attribute
*attr
,
3389 const char *buf
, size_t count
)
3394 err
= kstrtoul(buf
, 10, &knob
);
3400 mutex_lock(&ksm_thread_mutex
);
3401 wait_while_offlining();
3402 if (ksm_merge_across_nodes
!= knob
) {
3403 if (ksm_pages_shared
|| remove_all_stable_nodes())
3405 else if (root_stable_tree
== one_stable_tree
) {
3406 struct rb_root
*buf
;
3408 * This is the first time that we switch away from the
3409 * default of merging across nodes: must now allocate
3410 * a buffer to hold as many roots as may be needed.
3411 * Allocate stable and unstable together:
3412 * MAXSMP NODES_SHIFT 10 will use 16kB.
3414 buf
= kcalloc(nr_node_ids
+ nr_node_ids
, sizeof(*buf
),
3416 /* Let us assume that RB_ROOT is NULL is zero */
3420 root_stable_tree
= buf
;
3421 root_unstable_tree
= buf
+ nr_node_ids
;
3422 /* Stable tree is empty but not the unstable */
3423 root_unstable_tree
[0] = one_unstable_tree
[0];
3427 ksm_merge_across_nodes
= knob
;
3428 ksm_nr_node_ids
= knob
? 1 : nr_node_ids
;
3431 mutex_unlock(&ksm_thread_mutex
);
3433 return err
? err
: count
;
3435 KSM_ATTR(merge_across_nodes
);
3438 static ssize_t
use_zero_pages_show(struct kobject
*kobj
,
3439 struct kobj_attribute
*attr
, char *buf
)
3441 return sysfs_emit(buf
, "%u\n", ksm_use_zero_pages
);
3443 static ssize_t
use_zero_pages_store(struct kobject
*kobj
,
3444 struct kobj_attribute
*attr
,
3445 const char *buf
, size_t count
)
3450 err
= kstrtobool(buf
, &value
);
3454 ksm_use_zero_pages
= value
;
3458 KSM_ATTR(use_zero_pages
);
3460 static ssize_t
max_page_sharing_show(struct kobject
*kobj
,
3461 struct kobj_attribute
*attr
, char *buf
)
3463 return sysfs_emit(buf
, "%u\n", ksm_max_page_sharing
);
3466 static ssize_t
max_page_sharing_store(struct kobject
*kobj
,
3467 struct kobj_attribute
*attr
,
3468 const char *buf
, size_t count
)
3473 err
= kstrtoint(buf
, 10, &knob
);
3477 * When a KSM page is created it is shared by 2 mappings. This
3478 * being a signed comparison, it implicitly verifies it's not
3484 if (READ_ONCE(ksm_max_page_sharing
) == knob
)
3487 mutex_lock(&ksm_thread_mutex
);
3488 wait_while_offlining();
3489 if (ksm_max_page_sharing
!= knob
) {
3490 if (ksm_pages_shared
|| remove_all_stable_nodes())
3493 ksm_max_page_sharing
= knob
;
3495 mutex_unlock(&ksm_thread_mutex
);
3497 return err
? err
: count
;
3499 KSM_ATTR(max_page_sharing
);
3501 static ssize_t
pages_scanned_show(struct kobject
*kobj
,
3502 struct kobj_attribute
*attr
, char *buf
)
3504 return sysfs_emit(buf
, "%lu\n", ksm_pages_scanned
);
3506 KSM_ATTR_RO(pages_scanned
);
3508 static ssize_t
pages_shared_show(struct kobject
*kobj
,
3509 struct kobj_attribute
*attr
, char *buf
)
3511 return sysfs_emit(buf
, "%lu\n", ksm_pages_shared
);
3513 KSM_ATTR_RO(pages_shared
);
3515 static ssize_t
pages_sharing_show(struct kobject
*kobj
,
3516 struct kobj_attribute
*attr
, char *buf
)
3518 return sysfs_emit(buf
, "%lu\n", ksm_pages_sharing
);
3520 KSM_ATTR_RO(pages_sharing
);
3522 static ssize_t
pages_unshared_show(struct kobject
*kobj
,
3523 struct kobj_attribute
*attr
, char *buf
)
3525 return sysfs_emit(buf
, "%lu\n", ksm_pages_unshared
);
3527 KSM_ATTR_RO(pages_unshared
);
3529 static ssize_t
pages_volatile_show(struct kobject
*kobj
,
3530 struct kobj_attribute
*attr
, char *buf
)
3532 long ksm_pages_volatile
;
3534 ksm_pages_volatile
= ksm_rmap_items
- ksm_pages_shared
3535 - ksm_pages_sharing
- ksm_pages_unshared
;
3537 * It was not worth any locking to calculate that statistic,
3538 * but it might therefore sometimes be negative: conceal that.
3540 if (ksm_pages_volatile
< 0)
3541 ksm_pages_volatile
= 0;
3542 return sysfs_emit(buf
, "%ld\n", ksm_pages_volatile
);
3544 KSM_ATTR_RO(pages_volatile
);
3546 static ssize_t
pages_skipped_show(struct kobject
*kobj
,
3547 struct kobj_attribute
*attr
, char *buf
)
3549 return sysfs_emit(buf
, "%lu\n", ksm_pages_skipped
);
3551 KSM_ATTR_RO(pages_skipped
);
3553 static ssize_t
ksm_zero_pages_show(struct kobject
*kobj
,
3554 struct kobj_attribute
*attr
, char *buf
)
3556 return sysfs_emit(buf
, "%ld\n", atomic_long_read(&ksm_zero_pages
));
3558 KSM_ATTR_RO(ksm_zero_pages
);
3560 static ssize_t
general_profit_show(struct kobject
*kobj
,
3561 struct kobj_attribute
*attr
, char *buf
)
3563 long general_profit
;
3565 general_profit
= (ksm_pages_sharing
+ atomic_long_read(&ksm_zero_pages
)) * PAGE_SIZE
-
3566 ksm_rmap_items
* sizeof(struct ksm_rmap_item
);
3568 return sysfs_emit(buf
, "%ld\n", general_profit
);
3570 KSM_ATTR_RO(general_profit
);
3572 static ssize_t
stable_node_dups_show(struct kobject
*kobj
,
3573 struct kobj_attribute
*attr
, char *buf
)
3575 return sysfs_emit(buf
, "%lu\n", ksm_stable_node_dups
);
3577 KSM_ATTR_RO(stable_node_dups
);
3579 static ssize_t
stable_node_chains_show(struct kobject
*kobj
,
3580 struct kobj_attribute
*attr
, char *buf
)
3582 return sysfs_emit(buf
, "%lu\n", ksm_stable_node_chains
);
3584 KSM_ATTR_RO(stable_node_chains
);
3587 stable_node_chains_prune_millisecs_show(struct kobject
*kobj
,
3588 struct kobj_attribute
*attr
,
3591 return sysfs_emit(buf
, "%u\n", ksm_stable_node_chains_prune_millisecs
);
3595 stable_node_chains_prune_millisecs_store(struct kobject
*kobj
,
3596 struct kobj_attribute
*attr
,
3597 const char *buf
, size_t count
)
3602 err
= kstrtouint(buf
, 10, &msecs
);
3606 ksm_stable_node_chains_prune_millisecs
= msecs
;
3610 KSM_ATTR(stable_node_chains_prune_millisecs
);
3612 static ssize_t
full_scans_show(struct kobject
*kobj
,
3613 struct kobj_attribute
*attr
, char *buf
)
3615 return sysfs_emit(buf
, "%lu\n", ksm_scan
.seqnr
);
3617 KSM_ATTR_RO(full_scans
);
3619 static ssize_t
smart_scan_show(struct kobject
*kobj
,
3620 struct kobj_attribute
*attr
, char *buf
)
3622 return sysfs_emit(buf
, "%u\n", ksm_smart_scan
);
3625 static ssize_t
smart_scan_store(struct kobject
*kobj
,
3626 struct kobj_attribute
*attr
,
3627 const char *buf
, size_t count
)
3632 err
= kstrtobool(buf
, &value
);
3636 ksm_smart_scan
= value
;
3639 KSM_ATTR(smart_scan
);
3641 static ssize_t
advisor_mode_show(struct kobject
*kobj
,
3642 struct kobj_attribute
*attr
, char *buf
)
3646 if (ksm_advisor
== KSM_ADVISOR_NONE
)
3647 output
= "[none] scan-time";
3648 else if (ksm_advisor
== KSM_ADVISOR_SCAN_TIME
)
3649 output
= "none [scan-time]";
3651 return sysfs_emit(buf
, "%s\n", output
);
3654 static ssize_t
advisor_mode_store(struct kobject
*kobj
,
3655 struct kobj_attribute
*attr
, const char *buf
,
3658 enum ksm_advisor_type curr_advisor
= ksm_advisor
;
3660 if (sysfs_streq("scan-time", buf
))
3661 ksm_advisor
= KSM_ADVISOR_SCAN_TIME
;
3662 else if (sysfs_streq("none", buf
))
3663 ksm_advisor
= KSM_ADVISOR_NONE
;
3667 /* Set advisor default values */
3668 if (curr_advisor
!= ksm_advisor
)
3669 set_advisor_defaults();
3673 KSM_ATTR(advisor_mode
);
3675 static ssize_t
advisor_max_cpu_show(struct kobject
*kobj
,
3676 struct kobj_attribute
*attr
, char *buf
)
3678 return sysfs_emit(buf
, "%u\n", ksm_advisor_max_cpu
);
3681 static ssize_t
advisor_max_cpu_store(struct kobject
*kobj
,
3682 struct kobj_attribute
*attr
,
3683 const char *buf
, size_t count
)
3686 unsigned long value
;
3688 err
= kstrtoul(buf
, 10, &value
);
3692 ksm_advisor_max_cpu
= value
;
3695 KSM_ATTR(advisor_max_cpu
);
3697 static ssize_t
advisor_min_pages_to_scan_show(struct kobject
*kobj
,
3698 struct kobj_attribute
*attr
, char *buf
)
3700 return sysfs_emit(buf
, "%lu\n", ksm_advisor_min_pages_to_scan
);
3703 static ssize_t
advisor_min_pages_to_scan_store(struct kobject
*kobj
,
3704 struct kobj_attribute
*attr
,
3705 const char *buf
, size_t count
)
3708 unsigned long value
;
3710 err
= kstrtoul(buf
, 10, &value
);
3714 ksm_advisor_min_pages_to_scan
= value
;
3717 KSM_ATTR(advisor_min_pages_to_scan
);
3719 static ssize_t
advisor_max_pages_to_scan_show(struct kobject
*kobj
,
3720 struct kobj_attribute
*attr
, char *buf
)
3722 return sysfs_emit(buf
, "%lu\n", ksm_advisor_max_pages_to_scan
);
3725 static ssize_t
advisor_max_pages_to_scan_store(struct kobject
*kobj
,
3726 struct kobj_attribute
*attr
,
3727 const char *buf
, size_t count
)
3730 unsigned long value
;
3732 err
= kstrtoul(buf
, 10, &value
);
3736 ksm_advisor_max_pages_to_scan
= value
;
3739 KSM_ATTR(advisor_max_pages_to_scan
);
3741 static ssize_t
advisor_target_scan_time_show(struct kobject
*kobj
,
3742 struct kobj_attribute
*attr
, char *buf
)
3744 return sysfs_emit(buf
, "%lu\n", ksm_advisor_target_scan_time
);
3747 static ssize_t
advisor_target_scan_time_store(struct kobject
*kobj
,
3748 struct kobj_attribute
*attr
,
3749 const char *buf
, size_t count
)
3752 unsigned long value
;
3754 err
= kstrtoul(buf
, 10, &value
);
3760 ksm_advisor_target_scan_time
= value
;
3763 KSM_ATTR(advisor_target_scan_time
);
3765 static struct attribute
*ksm_attrs
[] = {
3766 &sleep_millisecs_attr
.attr
,
3767 &pages_to_scan_attr
.attr
,
3769 &pages_scanned_attr
.attr
,
3770 &pages_shared_attr
.attr
,
3771 &pages_sharing_attr
.attr
,
3772 &pages_unshared_attr
.attr
,
3773 &pages_volatile_attr
.attr
,
3774 &pages_skipped_attr
.attr
,
3775 &ksm_zero_pages_attr
.attr
,
3776 &full_scans_attr
.attr
,
3778 &merge_across_nodes_attr
.attr
,
3780 &max_page_sharing_attr
.attr
,
3781 &stable_node_chains_attr
.attr
,
3782 &stable_node_dups_attr
.attr
,
3783 &stable_node_chains_prune_millisecs_attr
.attr
,
3784 &use_zero_pages_attr
.attr
,
3785 &general_profit_attr
.attr
,
3786 &smart_scan_attr
.attr
,
3787 &advisor_mode_attr
.attr
,
3788 &advisor_max_cpu_attr
.attr
,
3789 &advisor_min_pages_to_scan_attr
.attr
,
3790 &advisor_max_pages_to_scan_attr
.attr
,
3791 &advisor_target_scan_time_attr
.attr
,
3795 static const struct attribute_group ksm_attr_group
= {
3799 #endif /* CONFIG_SYSFS */
3801 static int __init
ksm_init(void)
3803 struct task_struct
*ksm_thread
;
3806 /* The correct value depends on page size and endianness */
3807 zero_checksum
= calc_checksum(ZERO_PAGE(0));
3808 /* Default to false for backwards compatibility */
3809 ksm_use_zero_pages
= false;
3811 err
= ksm_slab_init();
3815 ksm_thread
= kthread_run(ksm_scan_thread
, NULL
, "ksmd");
3816 if (IS_ERR(ksm_thread
)) {
3817 pr_err("ksm: creating kthread failed\n");
3818 err
= PTR_ERR(ksm_thread
);
3823 err
= sysfs_create_group(mm_kobj
, &ksm_attr_group
);
3825 pr_err("ksm: register sysfs failed\n");
3826 kthread_stop(ksm_thread
);
3830 ksm_run
= KSM_RUN_MERGE
; /* no way for user to start it */
3832 #endif /* CONFIG_SYSFS */
3834 #ifdef CONFIG_MEMORY_HOTREMOVE
3835 /* There is no significance to this priority 100 */
3836 hotplug_memory_notifier(ksm_memory_callback
, KSM_CALLBACK_PRI
);
3845 subsys_initcall(ksm_init
);