1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
23 #include <asm/pgalloc.h>
33 SCAN_LACK_REFERENCED_PAGE
,
47 SCAN_ALLOC_HUGE_PAGE_FAIL
,
48 SCAN_CGROUP_CHARGE_FAIL
,
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/huge_memory.h>
56 /* default scan 8*512 pte (or vmas) every 30 second */
57 static unsigned int khugepaged_pages_to_scan __read_mostly
;
58 static unsigned int khugepaged_pages_collapsed
;
59 static unsigned int khugepaged_full_scans
;
60 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly
= 10000;
61 /* during fragmentation poll the hugepage allocator once every minute */
62 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly
= 60000;
63 static unsigned long khugepaged_sleep_expire
;
64 static DEFINE_SPINLOCK(khugepaged_mm_lock
);
65 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait
);
67 * default collapse hugepages if there is at least one pte mapped like
68 * it would have happened if the vma was large enough during page
71 static unsigned int khugepaged_max_ptes_none __read_mostly
;
72 static unsigned int khugepaged_max_ptes_swap __read_mostly
;
74 #define MM_SLOTS_HASH_BITS 10
75 static __read_mostly
DEFINE_HASHTABLE(mm_slots_hash
, MM_SLOTS_HASH_BITS
);
77 static struct kmem_cache
*mm_slot_cache __read_mostly
;
80 * struct mm_slot - hash lookup from mm to mm_slot
81 * @hash: hash collision list
82 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
83 * @mm: the mm that this information is valid for
86 struct hlist_node hash
;
87 struct list_head mm_node
;
92 * struct khugepaged_scan - cursor for scanning
93 * @mm_head: the head of the mm list to scan
94 * @mm_slot: the current mm_slot we are scanning
95 * @address: the next address inside that to be scanned
97 * There is only the one khugepaged_scan instance of this cursor structure.
99 struct khugepaged_scan
{
100 struct list_head mm_head
;
101 struct mm_slot
*mm_slot
;
102 unsigned long address
;
105 static struct khugepaged_scan khugepaged_scan
= {
106 .mm_head
= LIST_HEAD_INIT(khugepaged_scan
.mm_head
),
110 static ssize_t
scan_sleep_millisecs_show(struct kobject
*kobj
,
111 struct kobj_attribute
*attr
,
114 return sprintf(buf
, "%u\n", khugepaged_scan_sleep_millisecs
);
117 static ssize_t
scan_sleep_millisecs_store(struct kobject
*kobj
,
118 struct kobj_attribute
*attr
,
119 const char *buf
, size_t count
)
124 err
= kstrtoul(buf
, 10, &msecs
);
125 if (err
|| msecs
> UINT_MAX
)
128 khugepaged_scan_sleep_millisecs
= msecs
;
129 khugepaged_sleep_expire
= 0;
130 wake_up_interruptible(&khugepaged_wait
);
134 static struct kobj_attribute scan_sleep_millisecs_attr
=
135 __ATTR(scan_sleep_millisecs
, 0644, scan_sleep_millisecs_show
,
136 scan_sleep_millisecs_store
);
138 static ssize_t
alloc_sleep_millisecs_show(struct kobject
*kobj
,
139 struct kobj_attribute
*attr
,
142 return sprintf(buf
, "%u\n", khugepaged_alloc_sleep_millisecs
);
145 static ssize_t
alloc_sleep_millisecs_store(struct kobject
*kobj
,
146 struct kobj_attribute
*attr
,
147 const char *buf
, size_t count
)
152 err
= kstrtoul(buf
, 10, &msecs
);
153 if (err
|| msecs
> UINT_MAX
)
156 khugepaged_alloc_sleep_millisecs
= msecs
;
157 khugepaged_sleep_expire
= 0;
158 wake_up_interruptible(&khugepaged_wait
);
162 static struct kobj_attribute alloc_sleep_millisecs_attr
=
163 __ATTR(alloc_sleep_millisecs
, 0644, alloc_sleep_millisecs_show
,
164 alloc_sleep_millisecs_store
);
166 static ssize_t
pages_to_scan_show(struct kobject
*kobj
,
167 struct kobj_attribute
*attr
,
170 return sprintf(buf
, "%u\n", khugepaged_pages_to_scan
);
172 static ssize_t
pages_to_scan_store(struct kobject
*kobj
,
173 struct kobj_attribute
*attr
,
174 const char *buf
, size_t count
)
179 err
= kstrtoul(buf
, 10, &pages
);
180 if (err
|| !pages
|| pages
> UINT_MAX
)
183 khugepaged_pages_to_scan
= pages
;
187 static struct kobj_attribute pages_to_scan_attr
=
188 __ATTR(pages_to_scan
, 0644, pages_to_scan_show
,
189 pages_to_scan_store
);
191 static ssize_t
pages_collapsed_show(struct kobject
*kobj
,
192 struct kobj_attribute
*attr
,
195 return sprintf(buf
, "%u\n", khugepaged_pages_collapsed
);
197 static struct kobj_attribute pages_collapsed_attr
=
198 __ATTR_RO(pages_collapsed
);
200 static ssize_t
full_scans_show(struct kobject
*kobj
,
201 struct kobj_attribute
*attr
,
204 return sprintf(buf
, "%u\n", khugepaged_full_scans
);
206 static struct kobj_attribute full_scans_attr
=
207 __ATTR_RO(full_scans
);
209 static ssize_t
khugepaged_defrag_show(struct kobject
*kobj
,
210 struct kobj_attribute
*attr
, char *buf
)
212 return single_hugepage_flag_show(kobj
, attr
, buf
,
213 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
215 static ssize_t
khugepaged_defrag_store(struct kobject
*kobj
,
216 struct kobj_attribute
*attr
,
217 const char *buf
, size_t count
)
219 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
220 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
222 static struct kobj_attribute khugepaged_defrag_attr
=
223 __ATTR(defrag
, 0644, khugepaged_defrag_show
,
224 khugepaged_defrag_store
);
227 * max_ptes_none controls if khugepaged should collapse hugepages over
228 * any unmapped ptes in turn potentially increasing the memory
229 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
230 * reduce the available free memory in the system as it
231 * runs. Increasing max_ptes_none will instead potentially reduce the
232 * free memory in the system during the khugepaged scan.
234 static ssize_t
khugepaged_max_ptes_none_show(struct kobject
*kobj
,
235 struct kobj_attribute
*attr
,
238 return sprintf(buf
, "%u\n", khugepaged_max_ptes_none
);
240 static ssize_t
khugepaged_max_ptes_none_store(struct kobject
*kobj
,
241 struct kobj_attribute
*attr
,
242 const char *buf
, size_t count
)
245 unsigned long max_ptes_none
;
247 err
= kstrtoul(buf
, 10, &max_ptes_none
);
248 if (err
|| max_ptes_none
> HPAGE_PMD_NR
-1)
251 khugepaged_max_ptes_none
= max_ptes_none
;
255 static struct kobj_attribute khugepaged_max_ptes_none_attr
=
256 __ATTR(max_ptes_none
, 0644, khugepaged_max_ptes_none_show
,
257 khugepaged_max_ptes_none_store
);
259 static ssize_t
khugepaged_max_ptes_swap_show(struct kobject
*kobj
,
260 struct kobj_attribute
*attr
,
263 return sprintf(buf
, "%u\n", khugepaged_max_ptes_swap
);
266 static ssize_t
khugepaged_max_ptes_swap_store(struct kobject
*kobj
,
267 struct kobj_attribute
*attr
,
268 const char *buf
, size_t count
)
271 unsigned long max_ptes_swap
;
273 err
= kstrtoul(buf
, 10, &max_ptes_swap
);
274 if (err
|| max_ptes_swap
> HPAGE_PMD_NR
-1)
277 khugepaged_max_ptes_swap
= max_ptes_swap
;
282 static struct kobj_attribute khugepaged_max_ptes_swap_attr
=
283 __ATTR(max_ptes_swap
, 0644, khugepaged_max_ptes_swap_show
,
284 khugepaged_max_ptes_swap_store
);
286 static struct attribute
*khugepaged_attr
[] = {
287 &khugepaged_defrag_attr
.attr
,
288 &khugepaged_max_ptes_none_attr
.attr
,
289 &pages_to_scan_attr
.attr
,
290 &pages_collapsed_attr
.attr
,
291 &full_scans_attr
.attr
,
292 &scan_sleep_millisecs_attr
.attr
,
293 &alloc_sleep_millisecs_attr
.attr
,
294 &khugepaged_max_ptes_swap_attr
.attr
,
298 struct attribute_group khugepaged_attr_group
= {
299 .attrs
= khugepaged_attr
,
300 .name
= "khugepaged",
302 #endif /* CONFIG_SYSFS */
304 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
306 int hugepage_madvise(struct vm_area_struct
*vma
,
307 unsigned long *vm_flags
, int advice
)
313 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
314 * can't handle this properly after s390_enable_sie, so we simply
315 * ignore the madvise to prevent qemu from causing a SIGSEGV.
317 if (mm_has_pgste(vma
->vm_mm
))
320 *vm_flags
&= ~VM_NOHUGEPAGE
;
321 *vm_flags
|= VM_HUGEPAGE
;
323 * If the vma become good for khugepaged to scan,
324 * register it here without waiting a page fault that
325 * may not happen any time soon.
327 if (!(*vm_flags
& VM_NO_KHUGEPAGED
) &&
328 khugepaged_enter_vma_merge(vma
, *vm_flags
))
331 case MADV_NOHUGEPAGE
:
332 *vm_flags
&= ~VM_HUGEPAGE
;
333 *vm_flags
|= VM_NOHUGEPAGE
;
335 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
336 * this vma even if we leave the mm registered in khugepaged if
337 * it got registered before VM_NOHUGEPAGE was set.
345 int __init
khugepaged_init(void)
347 mm_slot_cache
= kmem_cache_create("khugepaged_mm_slot",
348 sizeof(struct mm_slot
),
349 __alignof__(struct mm_slot
), 0, NULL
);
353 khugepaged_pages_to_scan
= HPAGE_PMD_NR
* 8;
354 khugepaged_max_ptes_none
= HPAGE_PMD_NR
- 1;
355 khugepaged_max_ptes_swap
= HPAGE_PMD_NR
/ 8;
360 void __init
khugepaged_destroy(void)
362 kmem_cache_destroy(mm_slot_cache
);
365 static inline struct mm_slot
*alloc_mm_slot(void)
367 if (!mm_slot_cache
) /* initialization failed */
369 return kmem_cache_zalloc(mm_slot_cache
, GFP_KERNEL
);
372 static inline void free_mm_slot(struct mm_slot
*mm_slot
)
374 kmem_cache_free(mm_slot_cache
, mm_slot
);
377 static struct mm_slot
*get_mm_slot(struct mm_struct
*mm
)
379 struct mm_slot
*mm_slot
;
381 hash_for_each_possible(mm_slots_hash
, mm_slot
, hash
, (unsigned long)mm
)
382 if (mm
== mm_slot
->mm
)
388 static void insert_to_mm_slots_hash(struct mm_struct
*mm
,
389 struct mm_slot
*mm_slot
)
392 hash_add(mm_slots_hash
, &mm_slot
->hash
, (long)mm
);
395 static inline int khugepaged_test_exit(struct mm_struct
*mm
)
397 return atomic_read(&mm
->mm_users
) == 0;
400 static bool hugepage_vma_check(struct vm_area_struct
*vma
,
401 unsigned long vm_flags
)
403 if ((!(vm_flags
& VM_HUGEPAGE
) && !khugepaged_always()) ||
404 (vm_flags
& VM_NOHUGEPAGE
) ||
405 test_bit(MMF_DISABLE_THP
, &vma
->vm_mm
->flags
))
407 if (shmem_file(vma
->vm_file
)) {
408 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE
))
410 return IS_ALIGNED((vma
->vm_start
>> PAGE_SHIFT
) - vma
->vm_pgoff
,
413 if (!vma
->anon_vma
|| vma
->vm_ops
)
415 if (is_vma_temporary_stack(vma
))
417 return !(vm_flags
& VM_NO_KHUGEPAGED
);
420 int __khugepaged_enter(struct mm_struct
*mm
)
422 struct mm_slot
*mm_slot
;
425 mm_slot
= alloc_mm_slot();
429 /* __khugepaged_exit() must not run from under us */
430 VM_BUG_ON_MM(khugepaged_test_exit(mm
), mm
);
431 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE
, &mm
->flags
))) {
432 free_mm_slot(mm_slot
);
436 spin_lock(&khugepaged_mm_lock
);
437 insert_to_mm_slots_hash(mm
, mm_slot
);
439 * Insert just behind the scanning cursor, to let the area settle
442 wakeup
= list_empty(&khugepaged_scan
.mm_head
);
443 list_add_tail(&mm_slot
->mm_node
, &khugepaged_scan
.mm_head
);
444 spin_unlock(&khugepaged_mm_lock
);
448 wake_up_interruptible(&khugepaged_wait
);
453 int khugepaged_enter_vma_merge(struct vm_area_struct
*vma
,
454 unsigned long vm_flags
)
456 unsigned long hstart
, hend
;
459 * khugepaged does not yet work on non-shmem files or special
460 * mappings. And file-private shmem THP is not supported.
462 if (!hugepage_vma_check(vma
, vm_flags
))
465 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
466 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
468 return khugepaged_enter(vma
, vm_flags
);
472 void __khugepaged_exit(struct mm_struct
*mm
)
474 struct mm_slot
*mm_slot
;
477 spin_lock(&khugepaged_mm_lock
);
478 mm_slot
= get_mm_slot(mm
);
479 if (mm_slot
&& khugepaged_scan
.mm_slot
!= mm_slot
) {
480 hash_del(&mm_slot
->hash
);
481 list_del(&mm_slot
->mm_node
);
484 spin_unlock(&khugepaged_mm_lock
);
487 clear_bit(MMF_VM_HUGEPAGE
, &mm
->flags
);
488 free_mm_slot(mm_slot
);
490 } else if (mm_slot
) {
492 * This is required to serialize against
493 * khugepaged_test_exit() (which is guaranteed to run
494 * under mmap sem read mode). Stop here (after we
495 * return all pagetables will be destroyed) until
496 * khugepaged has finished working on the pagetables
497 * under the mmap_sem.
499 down_write(&mm
->mmap_sem
);
500 up_write(&mm
->mmap_sem
);
504 static void release_pte_page(struct page
*page
)
506 dec_node_page_state(page
, NR_ISOLATED_ANON
+ page_is_file_cache(page
));
508 putback_lru_page(page
);
511 static void release_pte_pages(pte_t
*pte
, pte_t
*_pte
)
513 while (--_pte
>= pte
) {
514 pte_t pteval
= *_pte
;
515 if (!pte_none(pteval
) && !is_zero_pfn(pte_pfn(pteval
)))
516 release_pte_page(pte_page(pteval
));
520 static int __collapse_huge_page_isolate(struct vm_area_struct
*vma
,
521 unsigned long address
,
524 struct page
*page
= NULL
;
526 int none_or_zero
= 0, result
= 0, referenced
= 0;
527 bool writable
= false;
529 for (_pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
530 _pte
++, address
+= PAGE_SIZE
) {
531 pte_t pteval
= *_pte
;
532 if (pte_none(pteval
) || (pte_present(pteval
) &&
533 is_zero_pfn(pte_pfn(pteval
)))) {
534 if (!userfaultfd_armed(vma
) &&
535 ++none_or_zero
<= khugepaged_max_ptes_none
) {
538 result
= SCAN_EXCEED_NONE_PTE
;
542 if (!pte_present(pteval
)) {
543 result
= SCAN_PTE_NON_PRESENT
;
546 page
= vm_normal_page(vma
, address
, pteval
);
547 if (unlikely(!page
)) {
548 result
= SCAN_PAGE_NULL
;
552 /* TODO: teach khugepaged to collapse THP mapped with pte */
553 if (PageCompound(page
)) {
554 result
= SCAN_PAGE_COMPOUND
;
558 VM_BUG_ON_PAGE(!PageAnon(page
), page
);
561 * We can do it before isolate_lru_page because the
562 * page can't be freed from under us. NOTE: PG_lock
563 * is needed to serialize against split_huge_page
564 * when invoked from the VM.
566 if (!trylock_page(page
)) {
567 result
= SCAN_PAGE_LOCK
;
572 * cannot use mapcount: can't collapse if there's a gup pin.
573 * The page must only be referenced by the scanned process
574 * and page swap cache.
576 if (page_count(page
) != 1 + PageSwapCache(page
)) {
578 result
= SCAN_PAGE_COUNT
;
581 if (pte_write(pteval
)) {
584 if (PageSwapCache(page
) &&
585 !reuse_swap_page(page
, NULL
)) {
587 result
= SCAN_SWAP_CACHE_PAGE
;
591 * Page is not in the swap cache. It can be collapsed
597 * Isolate the page to avoid collapsing an hugepage
598 * currently in use by the VM.
600 if (isolate_lru_page(page
)) {
602 result
= SCAN_DEL_PAGE_LRU
;
605 inc_node_page_state(page
,
606 NR_ISOLATED_ANON
+ page_is_file_cache(page
));
607 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
608 VM_BUG_ON_PAGE(PageLRU(page
), page
);
610 /* There should be enough young pte to collapse the page */
611 if (pte_young(pteval
) ||
612 page_is_young(page
) || PageReferenced(page
) ||
613 mmu_notifier_test_young(vma
->vm_mm
, address
))
616 if (likely(writable
)) {
617 if (likely(referenced
)) {
618 result
= SCAN_SUCCEED
;
619 trace_mm_collapse_huge_page_isolate(page
, none_or_zero
,
620 referenced
, writable
, result
);
624 result
= SCAN_PAGE_RO
;
628 release_pte_pages(pte
, _pte
);
629 trace_mm_collapse_huge_page_isolate(page
, none_or_zero
,
630 referenced
, writable
, result
);
634 static void __collapse_huge_page_copy(pte_t
*pte
, struct page
*page
,
635 struct vm_area_struct
*vma
,
636 unsigned long address
,
640 for (_pte
= pte
; _pte
< pte
+ HPAGE_PMD_NR
;
641 _pte
++, page
++, address
+= PAGE_SIZE
) {
642 pte_t pteval
= *_pte
;
643 struct page
*src_page
;
645 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
646 clear_user_highpage(page
, address
);
647 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, 1);
648 if (is_zero_pfn(pte_pfn(pteval
))) {
650 * ptl mostly unnecessary.
654 * paravirt calls inside pte_clear here are
657 pte_clear(vma
->vm_mm
, address
, _pte
);
661 src_page
= pte_page(pteval
);
662 copy_user_highpage(page
, src_page
, address
, vma
);
663 VM_BUG_ON_PAGE(page_mapcount(src_page
) != 1, src_page
);
664 release_pte_page(src_page
);
666 * ptl mostly unnecessary, but preempt has to
667 * be disabled to update the per-cpu stats
668 * inside page_remove_rmap().
672 * paravirt calls inside pte_clear here are
675 pte_clear(vma
->vm_mm
, address
, _pte
);
676 page_remove_rmap(src_page
, false);
678 free_page_and_swap_cache(src_page
);
683 static void khugepaged_alloc_sleep(void)
687 add_wait_queue(&khugepaged_wait
, &wait
);
688 freezable_schedule_timeout_interruptible(
689 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs
));
690 remove_wait_queue(&khugepaged_wait
, &wait
);
693 static int khugepaged_node_load
[MAX_NUMNODES
];
695 static bool khugepaged_scan_abort(int nid
)
700 * If node_reclaim_mode is disabled, then no extra effort is made to
701 * allocate memory locally.
703 if (!node_reclaim_mode
)
706 /* If there is a count for this node already, it must be acceptable */
707 if (khugepaged_node_load
[nid
])
710 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
711 if (!khugepaged_node_load
[i
])
713 if (node_distance(nid
, i
) > RECLAIM_DISTANCE
)
719 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
720 static inline gfp_t
alloc_hugepage_khugepaged_gfpmask(void)
722 return khugepaged_defrag() ? GFP_TRANSHUGE
: GFP_TRANSHUGE_LIGHT
;
726 static int khugepaged_find_target_node(void)
728 static int last_khugepaged_target_node
= NUMA_NO_NODE
;
729 int nid
, target_node
= 0, max_value
= 0;
731 /* find first node with max normal pages hit */
732 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++)
733 if (khugepaged_node_load
[nid
] > max_value
) {
734 max_value
= khugepaged_node_load
[nid
];
738 /* do some balance if several nodes have the same hit record */
739 if (target_node
<= last_khugepaged_target_node
)
740 for (nid
= last_khugepaged_target_node
+ 1; nid
< MAX_NUMNODES
;
742 if (max_value
== khugepaged_node_load
[nid
]) {
747 last_khugepaged_target_node
= target_node
;
751 static bool khugepaged_prealloc_page(struct page
**hpage
, bool *wait
)
753 if (IS_ERR(*hpage
)) {
759 khugepaged_alloc_sleep();
769 khugepaged_alloc_page(struct page
**hpage
, gfp_t gfp
, int node
)
771 VM_BUG_ON_PAGE(*hpage
, *hpage
);
773 *hpage
= __alloc_pages_node(node
, gfp
, HPAGE_PMD_ORDER
);
774 if (unlikely(!*hpage
)) {
775 count_vm_event(THP_COLLAPSE_ALLOC_FAILED
);
776 *hpage
= ERR_PTR(-ENOMEM
);
780 prep_transhuge_page(*hpage
);
781 count_vm_event(THP_COLLAPSE_ALLOC
);
785 static int khugepaged_find_target_node(void)
790 static inline struct page
*alloc_khugepaged_hugepage(void)
794 page
= alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
797 prep_transhuge_page(page
);
801 static struct page
*khugepaged_alloc_hugepage(bool *wait
)
806 hpage
= alloc_khugepaged_hugepage();
808 count_vm_event(THP_COLLAPSE_ALLOC_FAILED
);
813 khugepaged_alloc_sleep();
815 count_vm_event(THP_COLLAPSE_ALLOC
);
816 } while (unlikely(!hpage
) && likely(khugepaged_enabled()));
821 static bool khugepaged_prealloc_page(struct page
**hpage
, bool *wait
)
824 *hpage
= khugepaged_alloc_hugepage(wait
);
826 if (unlikely(!*hpage
))
833 khugepaged_alloc_page(struct page
**hpage
, gfp_t gfp
, int node
)
842 * If mmap_sem temporarily dropped, revalidate vma
843 * before taking mmap_sem.
844 * Return 0 if succeeds, otherwise return none-zero
848 static int hugepage_vma_revalidate(struct mm_struct
*mm
, unsigned long address
,
849 struct vm_area_struct
**vmap
)
851 struct vm_area_struct
*vma
;
852 unsigned long hstart
, hend
;
854 if (unlikely(khugepaged_test_exit(mm
)))
855 return SCAN_ANY_PROCESS
;
857 *vmap
= vma
= find_vma(mm
, address
);
859 return SCAN_VMA_NULL
;
861 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
862 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
863 if (address
< hstart
|| address
+ HPAGE_PMD_SIZE
> hend
)
864 return SCAN_ADDRESS_RANGE
;
865 if (!hugepage_vma_check(vma
, vma
->vm_flags
))
866 return SCAN_VMA_CHECK
;
871 * Bring missing pages in from swap, to complete THP collapse.
872 * Only done if khugepaged_scan_pmd believes it is worthwhile.
874 * Called and returns without pte mapped or spinlocks held,
875 * but with mmap_sem held to protect against vma changes.
878 static bool __collapse_huge_page_swapin(struct mm_struct
*mm
,
879 struct vm_area_struct
*vma
,
880 unsigned long address
, pmd_t
*pmd
,
885 struct vm_fault vmf
= {
888 .flags
= FAULT_FLAG_ALLOW_RETRY
,
890 .pgoff
= linear_page_index(vma
, address
),
893 /* we only decide to swapin, if there is enough young ptes */
894 if (referenced
< HPAGE_PMD_NR
/2) {
895 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
898 vmf
.pte
= pte_offset_map(pmd
, address
);
899 for (; vmf
.address
< address
+ HPAGE_PMD_NR
*PAGE_SIZE
;
900 vmf
.pte
++, vmf
.address
+= PAGE_SIZE
) {
901 vmf
.orig_pte
= *vmf
.pte
;
902 if (!is_swap_pte(vmf
.orig_pte
))
905 ret
= do_swap_page(&vmf
);
907 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
908 if (ret
& VM_FAULT_RETRY
) {
909 down_read(&mm
->mmap_sem
);
910 if (hugepage_vma_revalidate(mm
, address
, &vmf
.vma
)) {
911 /* vma is no longer available, don't continue to swapin */
912 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
915 /* check if the pmd is still valid */
916 if (mm_find_pmd(mm
, address
) != pmd
) {
917 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
921 if (ret
& VM_FAULT_ERROR
) {
922 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
925 /* pte is unmapped now, we need to map it */
926 vmf
.pte
= pte_offset_map(pmd
, vmf
.address
);
930 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 1);
934 static void collapse_huge_page(struct mm_struct
*mm
,
935 unsigned long address
,
937 int node
, int referenced
)
942 struct page
*new_page
;
943 spinlock_t
*pmd_ptl
, *pte_ptl
;
944 int isolated
= 0, result
= 0;
945 struct mem_cgroup
*memcg
;
946 struct vm_area_struct
*vma
;
947 struct mmu_notifier_range range
;
950 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
952 /* Only allocate from the target node */
953 gfp
= alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE
;
956 * Before allocating the hugepage, release the mmap_sem read lock.
957 * The allocation can take potentially a long time if it involves
958 * sync compaction, and we do not need to hold the mmap_sem during
959 * that. We will recheck the vma after taking it again in write mode.
961 up_read(&mm
->mmap_sem
);
962 new_page
= khugepaged_alloc_page(hpage
, gfp
, node
);
964 result
= SCAN_ALLOC_HUGE_PAGE_FAIL
;
968 if (unlikely(mem_cgroup_try_charge(new_page
, mm
, gfp
, &memcg
, true))) {
969 result
= SCAN_CGROUP_CHARGE_FAIL
;
973 down_read(&mm
->mmap_sem
);
974 result
= hugepage_vma_revalidate(mm
, address
, &vma
);
976 mem_cgroup_cancel_charge(new_page
, memcg
, true);
977 up_read(&mm
->mmap_sem
);
981 pmd
= mm_find_pmd(mm
, address
);
983 result
= SCAN_PMD_NULL
;
984 mem_cgroup_cancel_charge(new_page
, memcg
, true);
985 up_read(&mm
->mmap_sem
);
990 * __collapse_huge_page_swapin always returns with mmap_sem locked.
991 * If it fails, we release mmap_sem and jump out_nolock.
992 * Continuing to collapse causes inconsistency.
994 if (!__collapse_huge_page_swapin(mm
, vma
, address
, pmd
, referenced
)) {
995 mem_cgroup_cancel_charge(new_page
, memcg
, true);
996 up_read(&mm
->mmap_sem
);
1000 up_read(&mm
->mmap_sem
);
1002 * Prevent all access to pagetables with the exception of
1003 * gup_fast later handled by the ptep_clear_flush and the VM
1004 * handled by the anon_vma lock + PG_lock.
1006 down_write(&mm
->mmap_sem
);
1007 result
= hugepage_vma_revalidate(mm
, address
, &vma
);
1010 /* check if the pmd is still valid */
1011 if (mm_find_pmd(mm
, address
) != pmd
)
1014 anon_vma_lock_write(vma
->anon_vma
);
1016 pte
= pte_offset_map(pmd
, address
);
1017 pte_ptl
= pte_lockptr(mm
, pmd
);
1019 mmu_notifier_range_init(&range
, mm
, address
, address
+ HPAGE_PMD_SIZE
);
1020 mmu_notifier_invalidate_range_start(&range
);
1021 pmd_ptl
= pmd_lock(mm
, pmd
); /* probably unnecessary */
1023 * After this gup_fast can't run anymore. This also removes
1024 * any huge TLB entry from the CPU so we won't allow
1025 * huge and small TLB entries for the same virtual address
1026 * to avoid the risk of CPU bugs in that area.
1028 _pmd
= pmdp_collapse_flush(vma
, address
, pmd
);
1029 spin_unlock(pmd_ptl
);
1030 mmu_notifier_invalidate_range_end(&range
);
1033 isolated
= __collapse_huge_page_isolate(vma
, address
, pte
);
1034 spin_unlock(pte_ptl
);
1036 if (unlikely(!isolated
)) {
1039 BUG_ON(!pmd_none(*pmd
));
1041 * We can only use set_pmd_at when establishing
1042 * hugepmds and never for establishing regular pmds that
1043 * points to regular pagetables. Use pmd_populate for that
1045 pmd_populate(mm
, pmd
, pmd_pgtable(_pmd
));
1046 spin_unlock(pmd_ptl
);
1047 anon_vma_unlock_write(vma
->anon_vma
);
1053 * All pages are isolated and locked so anon_vma rmap
1054 * can't run anymore.
1056 anon_vma_unlock_write(vma
->anon_vma
);
1058 __collapse_huge_page_copy(pte
, new_page
, vma
, address
, pte_ptl
);
1060 __SetPageUptodate(new_page
);
1061 pgtable
= pmd_pgtable(_pmd
);
1063 _pmd
= mk_huge_pmd(new_page
, vma
->vm_page_prot
);
1064 _pmd
= maybe_pmd_mkwrite(pmd_mkdirty(_pmd
), vma
);
1067 * spin_lock() below is not the equivalent of smp_wmb(), so
1068 * this is needed to avoid the copy_huge_page writes to become
1069 * visible after the set_pmd_at() write.
1074 BUG_ON(!pmd_none(*pmd
));
1075 page_add_new_anon_rmap(new_page
, vma
, address
, true);
1076 mem_cgroup_commit_charge(new_page
, memcg
, false, true);
1077 count_memcg_events(memcg
, THP_COLLAPSE_ALLOC
, 1);
1078 lru_cache_add_active_or_unevictable(new_page
, vma
);
1079 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
1080 set_pmd_at(mm
, address
, pmd
, _pmd
);
1081 update_mmu_cache_pmd(vma
, address
, pmd
);
1082 spin_unlock(pmd_ptl
);
1086 khugepaged_pages_collapsed
++;
1087 result
= SCAN_SUCCEED
;
1089 up_write(&mm
->mmap_sem
);
1091 trace_mm_collapse_huge_page(mm
, isolated
, result
);
1094 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1098 static int khugepaged_scan_pmd(struct mm_struct
*mm
,
1099 struct vm_area_struct
*vma
,
1100 unsigned long address
,
1101 struct page
**hpage
)
1105 int ret
= 0, none_or_zero
= 0, result
= 0, referenced
= 0;
1106 struct page
*page
= NULL
;
1107 unsigned long _address
;
1109 int node
= NUMA_NO_NODE
, unmapped
= 0;
1110 bool writable
= false;
1112 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1114 pmd
= mm_find_pmd(mm
, address
);
1116 result
= SCAN_PMD_NULL
;
1120 memset(khugepaged_node_load
, 0, sizeof(khugepaged_node_load
));
1121 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
1122 for (_address
= address
, _pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
1123 _pte
++, _address
+= PAGE_SIZE
) {
1124 pte_t pteval
= *_pte
;
1125 if (is_swap_pte(pteval
)) {
1126 if (++unmapped
<= khugepaged_max_ptes_swap
) {
1129 result
= SCAN_EXCEED_SWAP_PTE
;
1133 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
1134 if (!userfaultfd_armed(vma
) &&
1135 ++none_or_zero
<= khugepaged_max_ptes_none
) {
1138 result
= SCAN_EXCEED_NONE_PTE
;
1142 if (!pte_present(pteval
)) {
1143 result
= SCAN_PTE_NON_PRESENT
;
1146 if (pte_write(pteval
))
1149 page
= vm_normal_page(vma
, _address
, pteval
);
1150 if (unlikely(!page
)) {
1151 result
= SCAN_PAGE_NULL
;
1155 /* TODO: teach khugepaged to collapse THP mapped with pte */
1156 if (PageCompound(page
)) {
1157 result
= SCAN_PAGE_COMPOUND
;
1162 * Record which node the original page is from and save this
1163 * information to khugepaged_node_load[].
1164 * Khupaged will allocate hugepage from the node has the max
1167 node
= page_to_nid(page
);
1168 if (khugepaged_scan_abort(node
)) {
1169 result
= SCAN_SCAN_ABORT
;
1172 khugepaged_node_load
[node
]++;
1173 if (!PageLRU(page
)) {
1174 result
= SCAN_PAGE_LRU
;
1177 if (PageLocked(page
)) {
1178 result
= SCAN_PAGE_LOCK
;
1181 if (!PageAnon(page
)) {
1182 result
= SCAN_PAGE_ANON
;
1187 * cannot use mapcount: can't collapse if there's a gup pin.
1188 * The page must only be referenced by the scanned process
1189 * and page swap cache.
1191 if (page_count(page
) != 1 + PageSwapCache(page
)) {
1192 result
= SCAN_PAGE_COUNT
;
1195 if (pte_young(pteval
) ||
1196 page_is_young(page
) || PageReferenced(page
) ||
1197 mmu_notifier_test_young(vma
->vm_mm
, address
))
1202 result
= SCAN_SUCCEED
;
1205 result
= SCAN_LACK_REFERENCED_PAGE
;
1208 result
= SCAN_PAGE_RO
;
1211 pte_unmap_unlock(pte
, ptl
);
1213 node
= khugepaged_find_target_node();
1214 /* collapse_huge_page will return with the mmap_sem released */
1215 collapse_huge_page(mm
, address
, hpage
, node
, referenced
);
1218 trace_mm_khugepaged_scan_pmd(mm
, page
, writable
, referenced
,
1219 none_or_zero
, result
, unmapped
);
1223 static void collect_mm_slot(struct mm_slot
*mm_slot
)
1225 struct mm_struct
*mm
= mm_slot
->mm
;
1227 lockdep_assert_held(&khugepaged_mm_lock
);
1229 if (khugepaged_test_exit(mm
)) {
1231 hash_del(&mm_slot
->hash
);
1232 list_del(&mm_slot
->mm_node
);
1235 * Not strictly needed because the mm exited already.
1237 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1240 /* khugepaged_mm_lock actually not necessary for the below */
1241 free_mm_slot(mm_slot
);
1246 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1247 static void retract_page_tables(struct address_space
*mapping
, pgoff_t pgoff
)
1249 struct vm_area_struct
*vma
;
1253 i_mmap_lock_write(mapping
);
1254 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
, pgoff
) {
1255 /* probably overkill */
1258 addr
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
1259 if (addr
& ~HPAGE_PMD_MASK
)
1261 if (vma
->vm_end
< addr
+ HPAGE_PMD_SIZE
)
1263 pmd
= mm_find_pmd(vma
->vm_mm
, addr
);
1267 * We need exclusive mmap_sem to retract page table.
1268 * If trylock fails we would end up with pte-mapped THP after
1269 * re-fault. Not ideal, but it's more important to not disturb
1270 * the system too much.
1272 if (down_write_trylock(&vma
->vm_mm
->mmap_sem
)) {
1273 spinlock_t
*ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1274 /* assume page table is clear */
1275 _pmd
= pmdp_collapse_flush(vma
, addr
, pmd
);
1277 up_write(&vma
->vm_mm
->mmap_sem
);
1278 mm_dec_nr_ptes(vma
->vm_mm
);
1279 pte_free(vma
->vm_mm
, pmd_pgtable(_pmd
));
1282 i_mmap_unlock_write(mapping
);
1286 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1288 * Basic scheme is simple, details are more complex:
1289 * - allocate and lock a new huge page;
1290 * - scan page cache replacing old pages with the new one
1291 * + swap in pages if necessary;
1293 * + keep old pages around in case rollback is required;
1294 * - if replacing succeeds:
1297 * + unlock huge page;
1298 * - if replacing failed;
1299 * + put all pages back and unfreeze them;
1300 * + restore gaps in the page cache;
1301 * + unlock and free huge page;
1303 static void collapse_shmem(struct mm_struct
*mm
,
1304 struct address_space
*mapping
, pgoff_t start
,
1305 struct page
**hpage
, int node
)
1308 struct page
*new_page
;
1309 struct mem_cgroup
*memcg
;
1310 pgoff_t index
, end
= start
+ HPAGE_PMD_NR
;
1311 LIST_HEAD(pagelist
);
1312 XA_STATE_ORDER(xas
, &mapping
->i_pages
, start
, HPAGE_PMD_ORDER
);
1313 int nr_none
= 0, result
= SCAN_SUCCEED
;
1315 VM_BUG_ON(start
& (HPAGE_PMD_NR
- 1));
1317 /* Only allocate from the target node */
1318 gfp
= alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE
;
1320 new_page
= khugepaged_alloc_page(hpage
, gfp
, node
);
1322 result
= SCAN_ALLOC_HUGE_PAGE_FAIL
;
1326 if (unlikely(mem_cgroup_try_charge(new_page
, mm
, gfp
, &memcg
, true))) {
1327 result
= SCAN_CGROUP_CHARGE_FAIL
;
1331 /* This will be less messy when we use multi-index entries */
1334 xas_create_range(&xas
);
1335 if (!xas_error(&xas
))
1337 xas_unlock_irq(&xas
);
1338 if (!xas_nomem(&xas
, GFP_KERNEL
)) {
1339 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1345 __SetPageLocked(new_page
);
1346 __SetPageSwapBacked(new_page
);
1347 new_page
->index
= start
;
1348 new_page
->mapping
= mapping
;
1351 * At this point the new_page is locked and not up-to-date.
1352 * It's safe to insert it into the page cache, because nobody would
1353 * be able to map it or use it in another way until we unlock it.
1356 xas_set(&xas
, start
);
1357 for (index
= start
; index
< end
; index
++) {
1358 struct page
*page
= xas_next(&xas
);
1360 VM_BUG_ON(index
!= xas
.xa_index
);
1363 * Stop if extent has been truncated or hole-punched,
1364 * and is now completely empty.
1366 if (index
== start
) {
1367 if (!xas_next_entry(&xas
, end
- 1)) {
1368 result
= SCAN_TRUNCATED
;
1371 xas_set(&xas
, index
);
1373 if (!shmem_charge(mapping
->host
, 1)) {
1377 xas_store(&xas
, new_page
+ (index
% HPAGE_PMD_NR
));
1382 if (xa_is_value(page
) || !PageUptodate(page
)) {
1383 xas_unlock_irq(&xas
);
1384 /* swap in or instantiate fallocated page */
1385 if (shmem_getpage(mapping
->host
, index
, &page
,
1390 } else if (trylock_page(page
)) {
1392 xas_unlock_irq(&xas
);
1394 result
= SCAN_PAGE_LOCK
;
1399 * The page must be locked, so we can drop the i_pages lock
1400 * without racing with truncate.
1402 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
1403 VM_BUG_ON_PAGE(!PageUptodate(page
), page
);
1406 * If file was truncated then extended, or hole-punched, before
1407 * we locked the first page, then a THP might be there already.
1409 if (PageTransCompound(page
)) {
1410 result
= SCAN_PAGE_COMPOUND
;
1414 if (page_mapping(page
) != mapping
) {
1415 result
= SCAN_TRUNCATED
;
1419 if (isolate_lru_page(page
)) {
1420 result
= SCAN_DEL_PAGE_LRU
;
1424 if (page_mapped(page
))
1425 unmap_mapping_pages(mapping
, index
, 1, false);
1428 xas_set(&xas
, index
);
1430 VM_BUG_ON_PAGE(page
!= xas_load(&xas
), page
);
1431 VM_BUG_ON_PAGE(page_mapped(page
), page
);
1434 * The page is expected to have page_count() == 3:
1435 * - we hold a pin on it;
1436 * - one reference from page cache;
1437 * - one from isolate_lru_page;
1439 if (!page_ref_freeze(page
, 3)) {
1440 result
= SCAN_PAGE_COUNT
;
1441 xas_unlock_irq(&xas
);
1442 putback_lru_page(page
);
1447 * Add the page to the list to be able to undo the collapse if
1448 * something go wrong.
1450 list_add_tail(&page
->lru
, &pagelist
);
1452 /* Finally, replace with the new page. */
1453 xas_store(&xas
, new_page
+ (index
% HPAGE_PMD_NR
));
1461 __inc_node_page_state(new_page
, NR_SHMEM_THPS
);
1463 struct zone
*zone
= page_zone(new_page
);
1465 __mod_node_page_state(zone
->zone_pgdat
, NR_FILE_PAGES
, nr_none
);
1466 __mod_node_page_state(zone
->zone_pgdat
, NR_SHMEM
, nr_none
);
1470 xas_unlock_irq(&xas
);
1473 if (result
== SCAN_SUCCEED
) {
1474 struct page
*page
, *tmp
;
1477 * Replacing old pages with new one has succeeded, now we
1478 * need to copy the content and free the old pages.
1481 list_for_each_entry_safe(page
, tmp
, &pagelist
, lru
) {
1482 while (index
< page
->index
) {
1483 clear_highpage(new_page
+ (index
% HPAGE_PMD_NR
));
1486 copy_highpage(new_page
+ (page
->index
% HPAGE_PMD_NR
),
1488 list_del(&page
->lru
);
1489 page
->mapping
= NULL
;
1490 page_ref_unfreeze(page
, 1);
1491 ClearPageActive(page
);
1492 ClearPageUnevictable(page
);
1497 while (index
< end
) {
1498 clear_highpage(new_page
+ (index
% HPAGE_PMD_NR
));
1502 SetPageUptodate(new_page
);
1503 page_ref_add(new_page
, HPAGE_PMD_NR
- 1);
1504 set_page_dirty(new_page
);
1505 mem_cgroup_commit_charge(new_page
, memcg
, false, true);
1506 count_memcg_events(memcg
, THP_COLLAPSE_ALLOC
, 1);
1507 lru_cache_add_anon(new_page
);
1510 * Remove pte page tables, so we can re-fault the page as huge.
1512 retract_page_tables(mapping
, start
);
1515 khugepaged_pages_collapsed
++;
1519 /* Something went wrong: roll back page cache changes */
1521 mapping
->nrpages
-= nr_none
;
1522 shmem_uncharge(mapping
->host
, nr_none
);
1524 xas_set(&xas
, start
);
1525 xas_for_each(&xas
, page
, end
- 1) {
1526 page
= list_first_entry_or_null(&pagelist
,
1528 if (!page
|| xas
.xa_index
< page
->index
) {
1532 /* Put holes back where they were */
1533 xas_store(&xas
, NULL
);
1537 VM_BUG_ON_PAGE(page
->index
!= xas
.xa_index
, page
);
1539 /* Unfreeze the page. */
1540 list_del(&page
->lru
);
1541 page_ref_unfreeze(page
, 2);
1542 xas_store(&xas
, page
);
1544 xas_unlock_irq(&xas
);
1546 putback_lru_page(page
);
1550 xas_unlock_irq(&xas
);
1552 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1553 new_page
->mapping
= NULL
;
1556 unlock_page(new_page
);
1558 VM_BUG_ON(!list_empty(&pagelist
));
1559 /* TODO: tracepoints */
1562 static void khugepaged_scan_shmem(struct mm_struct
*mm
,
1563 struct address_space
*mapping
,
1564 pgoff_t start
, struct page
**hpage
)
1566 struct page
*page
= NULL
;
1567 XA_STATE(xas
, &mapping
->i_pages
, start
);
1569 int node
= NUMA_NO_NODE
;
1570 int result
= SCAN_SUCCEED
;
1574 memset(khugepaged_node_load
, 0, sizeof(khugepaged_node_load
));
1576 xas_for_each(&xas
, page
, start
+ HPAGE_PMD_NR
- 1) {
1577 if (xas_retry(&xas
, page
))
1580 if (xa_is_value(page
)) {
1581 if (++swap
> khugepaged_max_ptes_swap
) {
1582 result
= SCAN_EXCEED_SWAP_PTE
;
1588 if (PageTransCompound(page
)) {
1589 result
= SCAN_PAGE_COMPOUND
;
1593 node
= page_to_nid(page
);
1594 if (khugepaged_scan_abort(node
)) {
1595 result
= SCAN_SCAN_ABORT
;
1598 khugepaged_node_load
[node
]++;
1600 if (!PageLRU(page
)) {
1601 result
= SCAN_PAGE_LRU
;
1605 if (page_count(page
) != 1 + page_mapcount(page
)) {
1606 result
= SCAN_PAGE_COUNT
;
1611 * We probably should check if the page is referenced here, but
1612 * nobody would transfer pte_young() to PageReferenced() for us.
1613 * And rmap walk here is just too costly...
1618 if (need_resched()) {
1625 if (result
== SCAN_SUCCEED
) {
1626 if (present
< HPAGE_PMD_NR
- khugepaged_max_ptes_none
) {
1627 result
= SCAN_EXCEED_NONE_PTE
;
1629 node
= khugepaged_find_target_node();
1630 collapse_shmem(mm
, mapping
, start
, hpage
, node
);
1634 /* TODO: tracepoints */
1637 static void khugepaged_scan_shmem(struct mm_struct
*mm
,
1638 struct address_space
*mapping
,
1639 pgoff_t start
, struct page
**hpage
)
1645 static unsigned int khugepaged_scan_mm_slot(unsigned int pages
,
1646 struct page
**hpage
)
1647 __releases(&khugepaged_mm_lock
)
1648 __acquires(&khugepaged_mm_lock
)
1650 struct mm_slot
*mm_slot
;
1651 struct mm_struct
*mm
;
1652 struct vm_area_struct
*vma
;
1656 lockdep_assert_held(&khugepaged_mm_lock
);
1658 if (khugepaged_scan
.mm_slot
)
1659 mm_slot
= khugepaged_scan
.mm_slot
;
1661 mm_slot
= list_entry(khugepaged_scan
.mm_head
.next
,
1662 struct mm_slot
, mm_node
);
1663 khugepaged_scan
.address
= 0;
1664 khugepaged_scan
.mm_slot
= mm_slot
;
1666 spin_unlock(&khugepaged_mm_lock
);
1670 * Don't wait for semaphore (to avoid long wait times). Just move to
1671 * the next mm on the list.
1674 if (unlikely(!down_read_trylock(&mm
->mmap_sem
)))
1675 goto breakouterloop_mmap_sem
;
1676 if (likely(!khugepaged_test_exit(mm
)))
1677 vma
= find_vma(mm
, khugepaged_scan
.address
);
1680 for (; vma
; vma
= vma
->vm_next
) {
1681 unsigned long hstart
, hend
;
1684 if (unlikely(khugepaged_test_exit(mm
))) {
1688 if (!hugepage_vma_check(vma
, vma
->vm_flags
)) {
1693 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
1694 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
1697 if (khugepaged_scan
.address
> hend
)
1699 if (khugepaged_scan
.address
< hstart
)
1700 khugepaged_scan
.address
= hstart
;
1701 VM_BUG_ON(khugepaged_scan
.address
& ~HPAGE_PMD_MASK
);
1703 while (khugepaged_scan
.address
< hend
) {
1706 if (unlikely(khugepaged_test_exit(mm
)))
1707 goto breakouterloop
;
1709 VM_BUG_ON(khugepaged_scan
.address
< hstart
||
1710 khugepaged_scan
.address
+ HPAGE_PMD_SIZE
>
1712 if (shmem_file(vma
->vm_file
)) {
1714 pgoff_t pgoff
= linear_page_index(vma
,
1715 khugepaged_scan
.address
);
1716 if (!shmem_huge_enabled(vma
))
1718 file
= get_file(vma
->vm_file
);
1719 up_read(&mm
->mmap_sem
);
1721 khugepaged_scan_shmem(mm
, file
->f_mapping
,
1725 ret
= khugepaged_scan_pmd(mm
, vma
,
1726 khugepaged_scan
.address
,
1729 /* move to next address */
1730 khugepaged_scan
.address
+= HPAGE_PMD_SIZE
;
1731 progress
+= HPAGE_PMD_NR
;
1733 /* we released mmap_sem so break loop */
1734 goto breakouterloop_mmap_sem
;
1735 if (progress
>= pages
)
1736 goto breakouterloop
;
1740 up_read(&mm
->mmap_sem
); /* exit_mmap will destroy ptes after this */
1741 breakouterloop_mmap_sem
:
1743 spin_lock(&khugepaged_mm_lock
);
1744 VM_BUG_ON(khugepaged_scan
.mm_slot
!= mm_slot
);
1746 * Release the current mm_slot if this mm is about to die, or
1747 * if we scanned all vmas of this mm.
1749 if (khugepaged_test_exit(mm
) || !vma
) {
1751 * Make sure that if mm_users is reaching zero while
1752 * khugepaged runs here, khugepaged_exit will find
1753 * mm_slot not pointing to the exiting mm.
1755 if (mm_slot
->mm_node
.next
!= &khugepaged_scan
.mm_head
) {
1756 khugepaged_scan
.mm_slot
= list_entry(
1757 mm_slot
->mm_node
.next
,
1758 struct mm_slot
, mm_node
);
1759 khugepaged_scan
.address
= 0;
1761 khugepaged_scan
.mm_slot
= NULL
;
1762 khugepaged_full_scans
++;
1765 collect_mm_slot(mm_slot
);
1771 static int khugepaged_has_work(void)
1773 return !list_empty(&khugepaged_scan
.mm_head
) &&
1774 khugepaged_enabled();
1777 static int khugepaged_wait_event(void)
1779 return !list_empty(&khugepaged_scan
.mm_head
) ||
1780 kthread_should_stop();
1783 static void khugepaged_do_scan(void)
1785 struct page
*hpage
= NULL
;
1786 unsigned int progress
= 0, pass_through_head
= 0;
1787 unsigned int pages
= khugepaged_pages_to_scan
;
1790 barrier(); /* write khugepaged_pages_to_scan to local stack */
1792 while (progress
< pages
) {
1793 if (!khugepaged_prealloc_page(&hpage
, &wait
))
1798 if (unlikely(kthread_should_stop() || try_to_freeze()))
1801 spin_lock(&khugepaged_mm_lock
);
1802 if (!khugepaged_scan
.mm_slot
)
1803 pass_through_head
++;
1804 if (khugepaged_has_work() &&
1805 pass_through_head
< 2)
1806 progress
+= khugepaged_scan_mm_slot(pages
- progress
,
1810 spin_unlock(&khugepaged_mm_lock
);
1813 if (!IS_ERR_OR_NULL(hpage
))
1817 static bool khugepaged_should_wakeup(void)
1819 return kthread_should_stop() ||
1820 time_after_eq(jiffies
, khugepaged_sleep_expire
);
1823 static void khugepaged_wait_work(void)
1825 if (khugepaged_has_work()) {
1826 const unsigned long scan_sleep_jiffies
=
1827 msecs_to_jiffies(khugepaged_scan_sleep_millisecs
);
1829 if (!scan_sleep_jiffies
)
1832 khugepaged_sleep_expire
= jiffies
+ scan_sleep_jiffies
;
1833 wait_event_freezable_timeout(khugepaged_wait
,
1834 khugepaged_should_wakeup(),
1835 scan_sleep_jiffies
);
1839 if (khugepaged_enabled())
1840 wait_event_freezable(khugepaged_wait
, khugepaged_wait_event());
1843 static int khugepaged(void *none
)
1845 struct mm_slot
*mm_slot
;
1848 set_user_nice(current
, MAX_NICE
);
1850 while (!kthread_should_stop()) {
1851 khugepaged_do_scan();
1852 khugepaged_wait_work();
1855 spin_lock(&khugepaged_mm_lock
);
1856 mm_slot
= khugepaged_scan
.mm_slot
;
1857 khugepaged_scan
.mm_slot
= NULL
;
1859 collect_mm_slot(mm_slot
);
1860 spin_unlock(&khugepaged_mm_lock
);
1864 static void set_recommended_min_free_kbytes(void)
1868 unsigned long recommended_min
;
1870 for_each_populated_zone(zone
) {
1872 * We don't need to worry about fragmentation of
1873 * ZONE_MOVABLE since it only has movable pages.
1875 if (zone_idx(zone
) > gfp_zone(GFP_USER
))
1881 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1882 recommended_min
= pageblock_nr_pages
* nr_zones
* 2;
1885 * Make sure that on average at least two pageblocks are almost free
1886 * of another type, one for a migratetype to fall back to and a
1887 * second to avoid subsequent fallbacks of other types There are 3
1888 * MIGRATE_TYPES we care about.
1890 recommended_min
+= pageblock_nr_pages
* nr_zones
*
1891 MIGRATE_PCPTYPES
* MIGRATE_PCPTYPES
;
1893 /* don't ever allow to reserve more than 5% of the lowmem */
1894 recommended_min
= min(recommended_min
,
1895 (unsigned long) nr_free_buffer_pages() / 20);
1896 recommended_min
<<= (PAGE_SHIFT
-10);
1898 if (recommended_min
> min_free_kbytes
) {
1899 if (user_min_free_kbytes
>= 0)
1900 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1901 min_free_kbytes
, recommended_min
);
1903 min_free_kbytes
= recommended_min
;
1905 setup_per_zone_wmarks();
1908 int start_stop_khugepaged(void)
1910 static struct task_struct
*khugepaged_thread __read_mostly
;
1911 static DEFINE_MUTEX(khugepaged_mutex
);
1914 mutex_lock(&khugepaged_mutex
);
1915 if (khugepaged_enabled()) {
1916 if (!khugepaged_thread
)
1917 khugepaged_thread
= kthread_run(khugepaged
, NULL
,
1919 if (IS_ERR(khugepaged_thread
)) {
1920 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1921 err
= PTR_ERR(khugepaged_thread
);
1922 khugepaged_thread
= NULL
;
1926 if (!list_empty(&khugepaged_scan
.mm_head
))
1927 wake_up_interruptible(&khugepaged_wait
);
1929 set_recommended_min_free_kbytes();
1930 } else if (khugepaged_thread
) {
1931 kthread_stop(khugepaged_thread
);
1932 khugepaged_thread
= NULL
;
1935 mutex_unlock(&khugepaged_mutex
);