1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Memory merging support.
7 * This code enables dynamic sharing of identical pages found in different
8 * memory areas, even if they are not shared by fork().
11 #include <linux/bitops.h>
13 #include <linux/pagemap.h>
14 #include <linux/rmap.h>
15 #include <linux/sched.h>
18 int ksm_madvise(struct vm_area_struct
*vma
, unsigned long start
,
19 unsigned long end
, int advice
, unsigned long *vm_flags
);
21 void ksm_add_vma(struct vm_area_struct
*vma
);
22 int ksm_enable_merge_any(struct mm_struct
*mm
);
23 int ksm_disable_merge_any(struct mm_struct
*mm
);
24 int ksm_disable(struct mm_struct
*mm
);
26 int __ksm_enter(struct mm_struct
*mm
);
27 void __ksm_exit(struct mm_struct
*mm
);
29 * To identify zeropages that were mapped by KSM, we reuse the dirty bit
30 * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
31 * deduplicating memory.
33 #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
35 extern atomic_long_t ksm_zero_pages
;
37 static inline void ksm_map_zero_page(struct mm_struct
*mm
)
39 atomic_long_inc(&ksm_zero_pages
);
40 atomic_long_inc(&mm
->ksm_zero_pages
);
43 static inline void ksm_might_unmap_zero_page(struct mm_struct
*mm
, pte_t pte
)
45 if (is_ksm_zero_pte(pte
)) {
46 atomic_long_dec(&ksm_zero_pages
);
47 atomic_long_dec(&mm
->ksm_zero_pages
);
51 static inline long mm_ksm_zero_pages(struct mm_struct
*mm
)
53 return atomic_long_read(&mm
->ksm_zero_pages
);
56 static inline void ksm_fork(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
58 /* Adding mm to ksm is best effort on fork. */
59 if (test_bit(MMF_VM_MERGEABLE
, &oldmm
->flags
))
63 static inline int ksm_execve(struct mm_struct
*mm
)
65 if (test_bit(MMF_VM_MERGE_ANY
, &mm
->flags
))
66 return __ksm_enter(mm
);
71 static inline void ksm_exit(struct mm_struct
*mm
)
73 if (test_bit(MMF_VM_MERGEABLE
, &mm
->flags
))
78 * When do_swap_page() first faults in from swap what used to be a KSM page,
79 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
80 * it might be faulted into a different anon_vma (or perhaps to a different
81 * offset in the same anon_vma). do_swap_page() cannot do all the locking
82 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
83 * a copy, and leave remerging the pages to a later pass of ksmd.
85 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
86 * but what if the vma was unmerged while the page was swapped out?
88 struct folio
*ksm_might_need_to_copy(struct folio
*folio
,
89 struct vm_area_struct
*vma
, unsigned long addr
);
91 void rmap_walk_ksm(struct folio
*folio
, struct rmap_walk_control
*rwc
);
92 void folio_migrate_ksm(struct folio
*newfolio
, struct folio
*folio
);
93 void collect_procs_ksm(const struct folio
*folio
, const struct page
*page
,
94 struct list_head
*to_kill
, int force_early
);
95 long ksm_process_profit(struct mm_struct
*);
97 #else /* !CONFIG_KSM */
99 static inline void ksm_add_vma(struct vm_area_struct
*vma
)
103 static inline int ksm_disable(struct mm_struct
*mm
)
108 static inline void ksm_fork(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
112 static inline int ksm_execve(struct mm_struct
*mm
)
117 static inline void ksm_exit(struct mm_struct
*mm
)
121 static inline void ksm_might_unmap_zero_page(struct mm_struct
*mm
, pte_t pte
)
125 static inline void collect_procs_ksm(const struct folio
*folio
,
126 const struct page
*page
, struct list_head
*to_kill
,
132 static inline int ksm_madvise(struct vm_area_struct
*vma
, unsigned long start
,
133 unsigned long end
, int advice
, unsigned long *vm_flags
)
138 static inline struct folio
*ksm_might_need_to_copy(struct folio
*folio
,
139 struct vm_area_struct
*vma
, unsigned long addr
)
144 static inline void rmap_walk_ksm(struct folio
*folio
,
145 struct rmap_walk_control
*rwc
)
149 static inline void folio_migrate_ksm(struct folio
*newfolio
, struct folio
*old
)
152 #endif /* CONFIG_MMU */
153 #endif /* !CONFIG_KSM */
155 #endif /* __LINUX_KSM_H */