2 * Copyright (C) 2009 Red Hat, Inc.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/sched.h>
12 #include <linux/sched/coredump.h>
13 #include <linux/sched/numa_balancing.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/mmu_notifier.h>
17 #include <linux/rmap.h>
18 #include <linux/swap.h>
19 #include <linux/shrinker.h>
20 #include <linux/mm_inline.h>
21 #include <linux/swapops.h>
22 #include <linux/dax.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/pfn_t.h>
26 #include <linux/mman.h>
27 #include <linux/memremap.h>
28 #include <linux/pagemap.h>
29 #include <linux/debugfs.h>
30 #include <linux/migrate.h>
31 #include <linux/hashtable.h>
32 #include <linux/userfaultfd_k.h>
33 #include <linux/page_idle.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/oom.h>
38 #include <asm/pgalloc.h>
42 * By default, transparent hugepage support is disabled in order to avoid
43 * risking an increased memory footprint for applications that are not
44 * guaranteed to benefit from it. When transparent hugepage support is
45 * enabled, it is for all mappings, and khugepaged scans all mappings.
46 * Defrag is invoked by khugepaged hugepage allocations and by page faults
47 * for all hugepage allocations.
49 unsigned long transparent_hugepage_flags __read_mostly
=
50 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
51 (1<<TRANSPARENT_HUGEPAGE_FLAG
)|
53 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
54 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
)|
56 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
)|
57 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
)|
58 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
60 static struct shrinker deferred_split_shrinker
;
62 static atomic_t huge_zero_refcount
;
63 struct page
*huge_zero_page __read_mostly
;
65 static struct page
*get_huge_zero_page(void)
67 struct page
*zero_page
;
69 if (likely(atomic_inc_not_zero(&huge_zero_refcount
)))
70 return READ_ONCE(huge_zero_page
);
72 zero_page
= alloc_pages((GFP_TRANSHUGE
| __GFP_ZERO
) & ~__GFP_MOVABLE
,
75 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED
);
78 count_vm_event(THP_ZERO_PAGE_ALLOC
);
80 if (cmpxchg(&huge_zero_page
, NULL
, zero_page
)) {
82 __free_pages(zero_page
, compound_order(zero_page
));
86 /* We take additional reference here. It will be put back by shrinker */
87 atomic_set(&huge_zero_refcount
, 2);
89 return READ_ONCE(huge_zero_page
);
92 static void put_huge_zero_page(void)
95 * Counter should never go to zero here. Only shrinker can put
98 BUG_ON(atomic_dec_and_test(&huge_zero_refcount
));
101 struct page
*mm_get_huge_zero_page(struct mm_struct
*mm
)
103 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
104 return READ_ONCE(huge_zero_page
);
106 if (!get_huge_zero_page())
109 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
110 put_huge_zero_page();
112 return READ_ONCE(huge_zero_page
);
115 void mm_put_huge_zero_page(struct mm_struct
*mm
)
117 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
118 put_huge_zero_page();
121 static unsigned long shrink_huge_zero_page_count(struct shrinker
*shrink
,
122 struct shrink_control
*sc
)
124 /* we can free zero page only if last reference remains */
125 return atomic_read(&huge_zero_refcount
) == 1 ? HPAGE_PMD_NR
: 0;
128 static unsigned long shrink_huge_zero_page_scan(struct shrinker
*shrink
,
129 struct shrink_control
*sc
)
131 if (atomic_cmpxchg(&huge_zero_refcount
, 1, 0) == 1) {
132 struct page
*zero_page
= xchg(&huge_zero_page
, NULL
);
133 BUG_ON(zero_page
== NULL
);
134 __free_pages(zero_page
, compound_order(zero_page
));
141 static struct shrinker huge_zero_page_shrinker
= {
142 .count_objects
= shrink_huge_zero_page_count
,
143 .scan_objects
= shrink_huge_zero_page_scan
,
144 .seeks
= DEFAULT_SEEKS
,
148 static ssize_t
enabled_show(struct kobject
*kobj
,
149 struct kobj_attribute
*attr
, char *buf
)
151 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
))
152 return sprintf(buf
, "[always] madvise never\n");
153 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
154 return sprintf(buf
, "always [madvise] never\n");
156 return sprintf(buf
, "always madvise [never]\n");
159 static ssize_t
enabled_store(struct kobject
*kobj
,
160 struct kobj_attribute
*attr
,
161 const char *buf
, size_t count
)
165 if (!memcmp("always", buf
,
166 min(sizeof("always")-1, count
))) {
167 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
168 set_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
169 } else if (!memcmp("madvise", buf
,
170 min(sizeof("madvise")-1, count
))) {
171 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
172 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
173 } else if (!memcmp("never", buf
,
174 min(sizeof("never")-1, count
))) {
175 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
176 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
181 int err
= start_stop_khugepaged();
187 static struct kobj_attribute enabled_attr
=
188 __ATTR(enabled
, 0644, enabled_show
, enabled_store
);
190 ssize_t
single_hugepage_flag_show(struct kobject
*kobj
,
191 struct kobj_attribute
*attr
, char *buf
,
192 enum transparent_hugepage_flag flag
)
194 return sprintf(buf
, "%d\n",
195 !!test_bit(flag
, &transparent_hugepage_flags
));
198 ssize_t
single_hugepage_flag_store(struct kobject
*kobj
,
199 struct kobj_attribute
*attr
,
200 const char *buf
, size_t count
,
201 enum transparent_hugepage_flag flag
)
206 ret
= kstrtoul(buf
, 10, &value
);
213 set_bit(flag
, &transparent_hugepage_flags
);
215 clear_bit(flag
, &transparent_hugepage_flags
);
220 static ssize_t
defrag_show(struct kobject
*kobj
,
221 struct kobj_attribute
*attr
, char *buf
)
223 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
))
224 return sprintf(buf
, "[always] defer defer+madvise madvise never\n");
225 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
))
226 return sprintf(buf
, "always [defer] defer+madvise madvise never\n");
227 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
))
228 return sprintf(buf
, "always defer [defer+madvise] madvise never\n");
229 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
230 return sprintf(buf
, "always defer defer+madvise [madvise] never\n");
231 return sprintf(buf
, "always defer defer+madvise madvise [never]\n");
234 static ssize_t
defrag_store(struct kobject
*kobj
,
235 struct kobj_attribute
*attr
,
236 const char *buf
, size_t count
)
238 if (!memcmp("always", buf
,
239 min(sizeof("always")-1, count
))) {
240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
242 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
243 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
244 } else if (!memcmp("defer+madvise", buf
,
245 min(sizeof("defer+madvise")-1, count
))) {
246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
248 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
249 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
250 } else if (!memcmp("defer", buf
,
251 min(sizeof("defer")-1, count
))) {
252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
254 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
255 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
256 } else if (!memcmp("madvise", buf
,
257 min(sizeof("madvise")-1, count
))) {
258 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
259 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
260 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
261 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
262 } else if (!memcmp("never", buf
,
263 min(sizeof("never")-1, count
))) {
264 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
265 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
266 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
267 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
273 static struct kobj_attribute defrag_attr
=
274 __ATTR(defrag
, 0644, defrag_show
, defrag_store
);
276 static ssize_t
use_zero_page_show(struct kobject
*kobj
,
277 struct kobj_attribute
*attr
, char *buf
)
279 return single_hugepage_flag_show(kobj
, attr
, buf
,
280 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
282 static ssize_t
use_zero_page_store(struct kobject
*kobj
,
283 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
285 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
286 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
288 static struct kobj_attribute use_zero_page_attr
=
289 __ATTR(use_zero_page
, 0644, use_zero_page_show
, use_zero_page_store
);
291 static ssize_t
hpage_pmd_size_show(struct kobject
*kobj
,
292 struct kobj_attribute
*attr
, char *buf
)
294 return sprintf(buf
, "%lu\n", HPAGE_PMD_SIZE
);
296 static struct kobj_attribute hpage_pmd_size_attr
=
297 __ATTR_RO(hpage_pmd_size
);
299 #ifdef CONFIG_DEBUG_VM
300 static ssize_t
debug_cow_show(struct kobject
*kobj
,
301 struct kobj_attribute
*attr
, char *buf
)
303 return single_hugepage_flag_show(kobj
, attr
, buf
,
304 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
);
306 static ssize_t
debug_cow_store(struct kobject
*kobj
,
307 struct kobj_attribute
*attr
,
308 const char *buf
, size_t count
)
310 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
311 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
);
313 static struct kobj_attribute debug_cow_attr
=
314 __ATTR(debug_cow
, 0644, debug_cow_show
, debug_cow_store
);
315 #endif /* CONFIG_DEBUG_VM */
317 static struct attribute
*hugepage_attr
[] = {
320 &use_zero_page_attr
.attr
,
321 &hpage_pmd_size_attr
.attr
,
322 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
323 &shmem_enabled_attr
.attr
,
325 #ifdef CONFIG_DEBUG_VM
326 &debug_cow_attr
.attr
,
331 static const struct attribute_group hugepage_attr_group
= {
332 .attrs
= hugepage_attr
,
335 static int __init
hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
339 *hugepage_kobj
= kobject_create_and_add("transparent_hugepage", mm_kobj
);
340 if (unlikely(!*hugepage_kobj
)) {
341 pr_err("failed to create transparent hugepage kobject\n");
345 err
= sysfs_create_group(*hugepage_kobj
, &hugepage_attr_group
);
347 pr_err("failed to register transparent hugepage group\n");
351 err
= sysfs_create_group(*hugepage_kobj
, &khugepaged_attr_group
);
353 pr_err("failed to register transparent hugepage group\n");
354 goto remove_hp_group
;
360 sysfs_remove_group(*hugepage_kobj
, &hugepage_attr_group
);
362 kobject_put(*hugepage_kobj
);
366 static void __init
hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
368 sysfs_remove_group(hugepage_kobj
, &khugepaged_attr_group
);
369 sysfs_remove_group(hugepage_kobj
, &hugepage_attr_group
);
370 kobject_put(hugepage_kobj
);
373 static inline int hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
378 static inline void hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
381 #endif /* CONFIG_SYSFS */
383 static int __init
hugepage_init(void)
386 struct kobject
*hugepage_kobj
;
388 if (!has_transparent_hugepage()) {
389 transparent_hugepage_flags
= 0;
394 * hugepages can't be allocated by the buddy allocator
396 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
>= MAX_ORDER
);
398 * we use page->mapping and page->index in second tail page
399 * as list_head: assuming THP order >= 2
401 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
< 2);
403 err
= hugepage_init_sysfs(&hugepage_kobj
);
407 err
= khugepaged_init();
411 err
= register_shrinker(&huge_zero_page_shrinker
);
413 goto err_hzp_shrinker
;
414 err
= register_shrinker(&deferred_split_shrinker
);
416 goto err_split_shrinker
;
419 * By default disable transparent hugepages on smaller systems,
420 * where the extra memory used could hurt more than TLB overhead
421 * is likely to save. The admin can still enable it through /sys.
423 if (totalram_pages
< (512 << (20 - PAGE_SHIFT
))) {
424 transparent_hugepage_flags
= 0;
428 err
= start_stop_khugepaged();
434 unregister_shrinker(&deferred_split_shrinker
);
436 unregister_shrinker(&huge_zero_page_shrinker
);
438 khugepaged_destroy();
440 hugepage_exit_sysfs(hugepage_kobj
);
444 subsys_initcall(hugepage_init
);
446 static int __init
setup_transparent_hugepage(char *str
)
451 if (!strcmp(str
, "always")) {
452 set_bit(TRANSPARENT_HUGEPAGE_FLAG
,
453 &transparent_hugepage_flags
);
454 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
455 &transparent_hugepage_flags
);
457 } else if (!strcmp(str
, "madvise")) {
458 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
459 &transparent_hugepage_flags
);
460 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
461 &transparent_hugepage_flags
);
463 } else if (!strcmp(str
, "never")) {
464 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
465 &transparent_hugepage_flags
);
466 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
467 &transparent_hugepage_flags
);
472 pr_warn("transparent_hugepage= cannot parse, ignored\n");
475 __setup("transparent_hugepage=", setup_transparent_hugepage
);
477 pmd_t
maybe_pmd_mkwrite(pmd_t pmd
, struct vm_area_struct
*vma
)
479 if (likely(vma
->vm_flags
& VM_WRITE
))
480 pmd
= pmd_mkwrite(pmd
);
484 static inline struct list_head
*page_deferred_list(struct page
*page
)
487 * ->lru in the tail pages is occupied by compound_head.
488 * Let's use ->mapping + ->index in the second tail page as list_head.
490 return (struct list_head
*)&page
[2].mapping
;
493 void prep_transhuge_page(struct page
*page
)
496 * we use page->mapping and page->indexlru in second tail page
497 * as list_head: assuming THP order >= 2
500 INIT_LIST_HEAD(page_deferred_list(page
));
501 set_compound_page_dtor(page
, TRANSHUGE_PAGE_DTOR
);
504 unsigned long __thp_get_unmapped_area(struct file
*filp
, unsigned long len
,
505 loff_t off
, unsigned long flags
, unsigned long size
)
508 loff_t off_end
= off
+ len
;
509 loff_t off_align
= round_up(off
, size
);
510 unsigned long len_pad
;
512 if (off_end
<= off_align
|| (off_end
- off_align
) < size
)
515 len_pad
= len
+ size
;
516 if (len_pad
< len
|| (off
+ len_pad
) < off
)
519 addr
= current
->mm
->get_unmapped_area(filp
, 0, len_pad
,
520 off
>> PAGE_SHIFT
, flags
);
521 if (IS_ERR_VALUE(addr
))
524 addr
+= (off
- addr
) & (size
- 1);
528 unsigned long thp_get_unmapped_area(struct file
*filp
, unsigned long addr
,
529 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
531 loff_t off
= (loff_t
)pgoff
<< PAGE_SHIFT
;
535 if (!IS_DAX(filp
->f_mapping
->host
) || !IS_ENABLED(CONFIG_FS_DAX_PMD
))
538 addr
= __thp_get_unmapped_area(filp
, len
, off
, flags
, PMD_SIZE
);
543 return current
->mm
->get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
545 EXPORT_SYMBOL_GPL(thp_get_unmapped_area
);
547 static int __do_huge_pmd_anonymous_page(struct vm_fault
*vmf
, struct page
*page
,
550 struct vm_area_struct
*vma
= vmf
->vma
;
551 struct mem_cgroup
*memcg
;
553 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
556 VM_BUG_ON_PAGE(!PageCompound(page
), page
);
558 if (mem_cgroup_try_charge(page
, vma
->vm_mm
, gfp
| __GFP_NORETRY
, &memcg
,
561 count_vm_event(THP_FAULT_FALLBACK
);
562 return VM_FAULT_FALLBACK
;
565 pgtable
= pte_alloc_one(vma
->vm_mm
, haddr
);
566 if (unlikely(!pgtable
)) {
571 clear_huge_page(page
, vmf
->address
, HPAGE_PMD_NR
);
573 * The memory barrier inside __SetPageUptodate makes sure that
574 * clear_huge_page writes become visible before the set_pmd_at()
577 __SetPageUptodate(page
);
579 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
580 if (unlikely(!pmd_none(*vmf
->pmd
))) {
585 ret
= check_stable_address_space(vma
->vm_mm
);
589 /* Deliver the page fault to userland */
590 if (userfaultfd_missing(vma
)) {
593 spin_unlock(vmf
->ptl
);
594 mem_cgroup_cancel_charge(page
, memcg
, true);
596 pte_free(vma
->vm_mm
, pgtable
);
597 ret
= handle_userfault(vmf
, VM_UFFD_MISSING
);
598 VM_BUG_ON(ret
& VM_FAULT_FALLBACK
);
602 entry
= mk_huge_pmd(page
, vma
->vm_page_prot
);
603 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
604 page_add_new_anon_rmap(page
, vma
, haddr
, true);
605 mem_cgroup_commit_charge(page
, memcg
, false, true);
606 lru_cache_add_active_or_unevictable(page
, vma
);
607 pgtable_trans_huge_deposit(vma
->vm_mm
, vmf
->pmd
, pgtable
);
608 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, entry
);
609 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
610 mm_inc_nr_ptes(vma
->vm_mm
);
611 spin_unlock(vmf
->ptl
);
612 count_vm_event(THP_FAULT_ALLOC
);
617 spin_unlock(vmf
->ptl
);
620 pte_free(vma
->vm_mm
, pgtable
);
621 mem_cgroup_cancel_charge(page
, memcg
, true);
628 * always: directly stall for all thp allocations
629 * defer: wake kswapd and fail if not immediately available
630 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
631 * fail if not immediately available
632 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
634 * never: never stall for any thp allocation
636 static inline gfp_t
alloc_hugepage_direct_gfpmask(struct vm_area_struct
*vma
)
638 const bool vma_madvised
= !!(vma
->vm_flags
& VM_HUGEPAGE
);
640 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
))
641 return GFP_TRANSHUGE
| (vma_madvised
? 0 : __GFP_NORETRY
);
642 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
))
643 return GFP_TRANSHUGE_LIGHT
| __GFP_KSWAPD_RECLAIM
;
644 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
))
645 return GFP_TRANSHUGE_LIGHT
| (vma_madvised
? __GFP_DIRECT_RECLAIM
:
646 __GFP_KSWAPD_RECLAIM
);
647 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
648 return GFP_TRANSHUGE_LIGHT
| (vma_madvised
? __GFP_DIRECT_RECLAIM
:
650 return GFP_TRANSHUGE_LIGHT
;
653 /* Caller must hold page table lock. */
654 static bool set_huge_zero_page(pgtable_t pgtable
, struct mm_struct
*mm
,
655 struct vm_area_struct
*vma
, unsigned long haddr
, pmd_t
*pmd
,
656 struct page
*zero_page
)
661 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
662 entry
= pmd_mkhuge(entry
);
664 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
665 set_pmd_at(mm
, haddr
, pmd
, entry
);
670 int do_huge_pmd_anonymous_page(struct vm_fault
*vmf
)
672 struct vm_area_struct
*vma
= vmf
->vma
;
675 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
677 if (haddr
< vma
->vm_start
|| haddr
+ HPAGE_PMD_SIZE
> vma
->vm_end
)
678 return VM_FAULT_FALLBACK
;
679 if (unlikely(anon_vma_prepare(vma
)))
681 if (unlikely(khugepaged_enter(vma
, vma
->vm_flags
)))
683 if (!(vmf
->flags
& FAULT_FLAG_WRITE
) &&
684 !mm_forbids_zeropage(vma
->vm_mm
) &&
685 transparent_hugepage_use_zero_page()) {
687 struct page
*zero_page
;
690 pgtable
= pte_alloc_one(vma
->vm_mm
, haddr
);
691 if (unlikely(!pgtable
))
693 zero_page
= mm_get_huge_zero_page(vma
->vm_mm
);
694 if (unlikely(!zero_page
)) {
695 pte_free(vma
->vm_mm
, pgtable
);
696 count_vm_event(THP_FAULT_FALLBACK
);
697 return VM_FAULT_FALLBACK
;
699 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
702 if (pmd_none(*vmf
->pmd
)) {
703 ret
= check_stable_address_space(vma
->vm_mm
);
705 spin_unlock(vmf
->ptl
);
706 } else if (userfaultfd_missing(vma
)) {
707 spin_unlock(vmf
->ptl
);
708 ret
= handle_userfault(vmf
, VM_UFFD_MISSING
);
709 VM_BUG_ON(ret
& VM_FAULT_FALLBACK
);
711 set_huge_zero_page(pgtable
, vma
->vm_mm
, vma
,
712 haddr
, vmf
->pmd
, zero_page
);
713 spin_unlock(vmf
->ptl
);
717 spin_unlock(vmf
->ptl
);
719 pte_free(vma
->vm_mm
, pgtable
);
722 gfp
= alloc_hugepage_direct_gfpmask(vma
);
723 page
= alloc_hugepage_vma(gfp
, vma
, haddr
, HPAGE_PMD_ORDER
);
724 if (unlikely(!page
)) {
725 count_vm_event(THP_FAULT_FALLBACK
);
726 return VM_FAULT_FALLBACK
;
728 prep_transhuge_page(page
);
729 return __do_huge_pmd_anonymous_page(vmf
, page
, gfp
);
732 static void insert_pfn_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
733 pmd_t
*pmd
, pfn_t pfn
, pgprot_t prot
, bool write
,
736 struct mm_struct
*mm
= vma
->vm_mm
;
740 ptl
= pmd_lock(mm
, pmd
);
741 entry
= pmd_mkhuge(pfn_t_pmd(pfn
, prot
));
742 if (pfn_t_devmap(pfn
))
743 entry
= pmd_mkdevmap(entry
);
745 entry
= pmd_mkyoung(pmd_mkdirty(entry
));
746 entry
= maybe_pmd_mkwrite(entry
, vma
);
750 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
754 set_pmd_at(mm
, addr
, pmd
, entry
);
755 update_mmu_cache_pmd(vma
, addr
, pmd
);
759 int vmf_insert_pfn_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
760 pmd_t
*pmd
, pfn_t pfn
, bool write
)
762 pgprot_t pgprot
= vma
->vm_page_prot
;
763 pgtable_t pgtable
= NULL
;
765 * If we had pmd_special, we could avoid all these restrictions,
766 * but we need to be consistent with PTEs and architectures that
767 * can't support a 'special' bit.
769 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)));
770 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
771 (VM_PFNMAP
|VM_MIXEDMAP
));
772 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
773 BUG_ON(!pfn_t_devmap(pfn
));
775 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
776 return VM_FAULT_SIGBUS
;
778 if (arch_needs_pgtable_deposit()) {
779 pgtable
= pte_alloc_one(vma
->vm_mm
, addr
);
784 track_pfn_insert(vma
, &pgprot
, pfn
);
786 insert_pfn_pmd(vma
, addr
, pmd
, pfn
, pgprot
, write
, pgtable
);
787 return VM_FAULT_NOPAGE
;
789 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd
);
791 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
792 static pud_t
maybe_pud_mkwrite(pud_t pud
, struct vm_area_struct
*vma
)
794 if (likely(vma
->vm_flags
& VM_WRITE
))
795 pud
= pud_mkwrite(pud
);
799 static void insert_pfn_pud(struct vm_area_struct
*vma
, unsigned long addr
,
800 pud_t
*pud
, pfn_t pfn
, pgprot_t prot
, bool write
)
802 struct mm_struct
*mm
= vma
->vm_mm
;
806 ptl
= pud_lock(mm
, pud
);
807 entry
= pud_mkhuge(pfn_t_pud(pfn
, prot
));
808 if (pfn_t_devmap(pfn
))
809 entry
= pud_mkdevmap(entry
);
811 entry
= pud_mkyoung(pud_mkdirty(entry
));
812 entry
= maybe_pud_mkwrite(entry
, vma
);
814 set_pud_at(mm
, addr
, pud
, entry
);
815 update_mmu_cache_pud(vma
, addr
, pud
);
819 int vmf_insert_pfn_pud(struct vm_area_struct
*vma
, unsigned long addr
,
820 pud_t
*pud
, pfn_t pfn
, bool write
)
822 pgprot_t pgprot
= vma
->vm_page_prot
;
824 * If we had pud_special, we could avoid all these restrictions,
825 * but we need to be consistent with PTEs and architectures that
826 * can't support a 'special' bit.
828 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)));
829 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
830 (VM_PFNMAP
|VM_MIXEDMAP
));
831 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
832 BUG_ON(!pfn_t_devmap(pfn
));
834 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
835 return VM_FAULT_SIGBUS
;
837 track_pfn_insert(vma
, &pgprot
, pfn
);
839 insert_pfn_pud(vma
, addr
, pud
, pfn
, pgprot
, write
);
840 return VM_FAULT_NOPAGE
;
842 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud
);
843 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
845 static void touch_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
846 pmd_t
*pmd
, int flags
)
850 _pmd
= pmd_mkyoung(*pmd
);
851 if (flags
& FOLL_WRITE
)
852 _pmd
= pmd_mkdirty(_pmd
);
853 if (pmdp_set_access_flags(vma
, addr
& HPAGE_PMD_MASK
,
854 pmd
, _pmd
, flags
& FOLL_WRITE
))
855 update_mmu_cache_pmd(vma
, addr
, pmd
);
858 struct page
*follow_devmap_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
859 pmd_t
*pmd
, int flags
)
861 unsigned long pfn
= pmd_pfn(*pmd
);
862 struct mm_struct
*mm
= vma
->vm_mm
;
863 struct dev_pagemap
*pgmap
;
866 assert_spin_locked(pmd_lockptr(mm
, pmd
));
869 * When we COW a devmap PMD entry, we split it into PTEs, so we should
870 * not be in this function with `flags & FOLL_COW` set.
872 WARN_ONCE(flags
& FOLL_COW
, "mm: In follow_devmap_pmd with FOLL_COW set");
874 if (flags
& FOLL_WRITE
&& !pmd_write(*pmd
))
877 if (pmd_present(*pmd
) && pmd_devmap(*pmd
))
882 if (flags
& FOLL_TOUCH
)
883 touch_pmd(vma
, addr
, pmd
, flags
);
886 * device mapped pages can only be returned if the
887 * caller will manage the page reference count.
889 if (!(flags
& FOLL_GET
))
890 return ERR_PTR(-EEXIST
);
892 pfn
+= (addr
& ~PMD_MASK
) >> PAGE_SHIFT
;
893 pgmap
= get_dev_pagemap(pfn
, NULL
);
895 return ERR_PTR(-EFAULT
);
896 page
= pfn_to_page(pfn
);
898 put_dev_pagemap(pgmap
);
903 int copy_huge_pmd(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
904 pmd_t
*dst_pmd
, pmd_t
*src_pmd
, unsigned long addr
,
905 struct vm_area_struct
*vma
)
907 spinlock_t
*dst_ptl
, *src_ptl
;
908 struct page
*src_page
;
910 pgtable_t pgtable
= NULL
;
913 /* Skip if can be re-fill on fault */
914 if (!vma_is_anonymous(vma
))
917 pgtable
= pte_alloc_one(dst_mm
, addr
);
918 if (unlikely(!pgtable
))
921 dst_ptl
= pmd_lock(dst_mm
, dst_pmd
);
922 src_ptl
= pmd_lockptr(src_mm
, src_pmd
);
923 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
928 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
929 if (unlikely(is_swap_pmd(pmd
))) {
930 swp_entry_t entry
= pmd_to_swp_entry(pmd
);
932 VM_BUG_ON(!is_pmd_migration_entry(pmd
));
933 if (is_write_migration_entry(entry
)) {
934 make_migration_entry_read(&entry
);
935 pmd
= swp_entry_to_pmd(entry
);
936 if (pmd_swp_soft_dirty(*src_pmd
))
937 pmd
= pmd_swp_mksoft_dirty(pmd
);
938 set_pmd_at(src_mm
, addr
, src_pmd
, pmd
);
940 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
941 mm_inc_nr_ptes(dst_mm
);
942 pgtable_trans_huge_deposit(dst_mm
, dst_pmd
, pgtable
);
943 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
949 if (unlikely(!pmd_trans_huge(pmd
))) {
950 pte_free(dst_mm
, pgtable
);
954 * When page table lock is held, the huge zero pmd should not be
955 * under splitting since we don't split the page itself, only pmd to
958 if (is_huge_zero_pmd(pmd
)) {
959 struct page
*zero_page
;
961 * get_huge_zero_page() will never allocate a new page here,
962 * since we already have a zero page to copy. It just takes a
965 zero_page
= mm_get_huge_zero_page(dst_mm
);
966 set_huge_zero_page(pgtable
, dst_mm
, vma
, addr
, dst_pmd
,
972 src_page
= pmd_page(pmd
);
973 VM_BUG_ON_PAGE(!PageHead(src_page
), src_page
);
975 page_dup_rmap(src_page
, true);
976 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
977 mm_inc_nr_ptes(dst_mm
);
978 pgtable_trans_huge_deposit(dst_mm
, dst_pmd
, pgtable
);
980 pmdp_set_wrprotect(src_mm
, addr
, src_pmd
);
981 pmd
= pmd_mkold(pmd_wrprotect(pmd
));
982 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
986 spin_unlock(src_ptl
);
987 spin_unlock(dst_ptl
);
992 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
993 static void touch_pud(struct vm_area_struct
*vma
, unsigned long addr
,
994 pud_t
*pud
, int flags
)
998 _pud
= pud_mkyoung(*pud
);
999 if (flags
& FOLL_WRITE
)
1000 _pud
= pud_mkdirty(_pud
);
1001 if (pudp_set_access_flags(vma
, addr
& HPAGE_PUD_MASK
,
1002 pud
, _pud
, flags
& FOLL_WRITE
))
1003 update_mmu_cache_pud(vma
, addr
, pud
);
1006 struct page
*follow_devmap_pud(struct vm_area_struct
*vma
, unsigned long addr
,
1007 pud_t
*pud
, int flags
)
1009 unsigned long pfn
= pud_pfn(*pud
);
1010 struct mm_struct
*mm
= vma
->vm_mm
;
1011 struct dev_pagemap
*pgmap
;
1014 assert_spin_locked(pud_lockptr(mm
, pud
));
1016 if (flags
& FOLL_WRITE
&& !pud_write(*pud
))
1019 if (pud_present(*pud
) && pud_devmap(*pud
))
1024 if (flags
& FOLL_TOUCH
)
1025 touch_pud(vma
, addr
, pud
, flags
);
1028 * device mapped pages can only be returned if the
1029 * caller will manage the page reference count.
1031 if (!(flags
& FOLL_GET
))
1032 return ERR_PTR(-EEXIST
);
1034 pfn
+= (addr
& ~PUD_MASK
) >> PAGE_SHIFT
;
1035 pgmap
= get_dev_pagemap(pfn
, NULL
);
1037 return ERR_PTR(-EFAULT
);
1038 page
= pfn_to_page(pfn
);
1040 put_dev_pagemap(pgmap
);
1045 int copy_huge_pud(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
1046 pud_t
*dst_pud
, pud_t
*src_pud
, unsigned long addr
,
1047 struct vm_area_struct
*vma
)
1049 spinlock_t
*dst_ptl
, *src_ptl
;
1053 dst_ptl
= pud_lock(dst_mm
, dst_pud
);
1054 src_ptl
= pud_lockptr(src_mm
, src_pud
);
1055 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
1059 if (unlikely(!pud_trans_huge(pud
) && !pud_devmap(pud
)))
1063 * When page table lock is held, the huge zero pud should not be
1064 * under splitting since we don't split the page itself, only pud to
1067 if (is_huge_zero_pud(pud
)) {
1068 /* No huge zero pud yet */
1071 pudp_set_wrprotect(src_mm
, addr
, src_pud
);
1072 pud
= pud_mkold(pud_wrprotect(pud
));
1073 set_pud_at(dst_mm
, addr
, dst_pud
, pud
);
1077 spin_unlock(src_ptl
);
1078 spin_unlock(dst_ptl
);
1082 void huge_pud_set_accessed(struct vm_fault
*vmf
, pud_t orig_pud
)
1085 unsigned long haddr
;
1086 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1088 vmf
->ptl
= pud_lock(vmf
->vma
->vm_mm
, vmf
->pud
);
1089 if (unlikely(!pud_same(*vmf
->pud
, orig_pud
)))
1092 entry
= pud_mkyoung(orig_pud
);
1094 entry
= pud_mkdirty(entry
);
1095 haddr
= vmf
->address
& HPAGE_PUD_MASK
;
1096 if (pudp_set_access_flags(vmf
->vma
, haddr
, vmf
->pud
, entry
, write
))
1097 update_mmu_cache_pud(vmf
->vma
, vmf
->address
, vmf
->pud
);
1100 spin_unlock(vmf
->ptl
);
1102 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1104 void huge_pmd_set_accessed(struct vm_fault
*vmf
, pmd_t orig_pmd
)
1107 unsigned long haddr
;
1108 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1110 vmf
->ptl
= pmd_lock(vmf
->vma
->vm_mm
, vmf
->pmd
);
1111 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
)))
1114 entry
= pmd_mkyoung(orig_pmd
);
1116 entry
= pmd_mkdirty(entry
);
1117 haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1118 if (pmdp_set_access_flags(vmf
->vma
, haddr
, vmf
->pmd
, entry
, write
))
1119 update_mmu_cache_pmd(vmf
->vma
, vmf
->address
, vmf
->pmd
);
1122 spin_unlock(vmf
->ptl
);
1125 static int do_huge_pmd_wp_page_fallback(struct vm_fault
*vmf
, pmd_t orig_pmd
,
1128 struct vm_area_struct
*vma
= vmf
->vma
;
1129 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1130 struct mem_cgroup
*memcg
;
1134 struct page
**pages
;
1135 unsigned long mmun_start
; /* For mmu_notifiers */
1136 unsigned long mmun_end
; /* For mmu_notifiers */
1138 pages
= kmalloc(sizeof(struct page
*) * HPAGE_PMD_NR
,
1140 if (unlikely(!pages
)) {
1141 ret
|= VM_FAULT_OOM
;
1145 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
1146 pages
[i
] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE
, vma
,
1147 vmf
->address
, page_to_nid(page
));
1148 if (unlikely(!pages
[i
] ||
1149 mem_cgroup_try_charge(pages
[i
], vma
->vm_mm
,
1150 GFP_KERNEL
, &memcg
, false))) {
1154 memcg
= (void *)page_private(pages
[i
]);
1155 set_page_private(pages
[i
], 0);
1156 mem_cgroup_cancel_charge(pages
[i
], memcg
,
1161 ret
|= VM_FAULT_OOM
;
1164 set_page_private(pages
[i
], (unsigned long)memcg
);
1167 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
1168 copy_user_highpage(pages
[i
], page
+ i
,
1169 haddr
+ PAGE_SIZE
* i
, vma
);
1170 __SetPageUptodate(pages
[i
]);
1175 mmun_end
= haddr
+ HPAGE_PMD_SIZE
;
1176 mmu_notifier_invalidate_range_start(vma
->vm_mm
, mmun_start
, mmun_end
);
1178 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1179 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
)))
1180 goto out_free_pages
;
1181 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1184 * Leave pmd empty until pte is filled note we must notify here as
1185 * concurrent CPU thread might write to new page before the call to
1186 * mmu_notifier_invalidate_range_end() happens which can lead to a
1187 * device seeing memory write in different order than CPU.
1189 * See Documentation/vm/mmu_notifier.txt
1191 pmdp_huge_clear_flush_notify(vma
, haddr
, vmf
->pmd
);
1193 pgtable
= pgtable_trans_huge_withdraw(vma
->vm_mm
, vmf
->pmd
);
1194 pmd_populate(vma
->vm_mm
, &_pmd
, pgtable
);
1196 for (i
= 0; i
< HPAGE_PMD_NR
; i
++, haddr
+= PAGE_SIZE
) {
1198 entry
= mk_pte(pages
[i
], vma
->vm_page_prot
);
1199 entry
= maybe_mkwrite(pte_mkdirty(entry
), vma
);
1200 memcg
= (void *)page_private(pages
[i
]);
1201 set_page_private(pages
[i
], 0);
1202 page_add_new_anon_rmap(pages
[i
], vmf
->vma
, haddr
, false);
1203 mem_cgroup_commit_charge(pages
[i
], memcg
, false, false);
1204 lru_cache_add_active_or_unevictable(pages
[i
], vma
);
1205 vmf
->pte
= pte_offset_map(&_pmd
, haddr
);
1206 VM_BUG_ON(!pte_none(*vmf
->pte
));
1207 set_pte_at(vma
->vm_mm
, haddr
, vmf
->pte
, entry
);
1208 pte_unmap(vmf
->pte
);
1212 smp_wmb(); /* make pte visible before pmd */
1213 pmd_populate(vma
->vm_mm
, vmf
->pmd
, pgtable
);
1214 page_remove_rmap(page
, true);
1215 spin_unlock(vmf
->ptl
);
1218 * No need to double call mmu_notifier->invalidate_range() callback as
1219 * the above pmdp_huge_clear_flush_notify() did already call it.
1221 mmu_notifier_invalidate_range_only_end(vma
->vm_mm
, mmun_start
,
1224 ret
|= VM_FAULT_WRITE
;
1231 spin_unlock(vmf
->ptl
);
1232 mmu_notifier_invalidate_range_end(vma
->vm_mm
, mmun_start
, mmun_end
);
1233 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
1234 memcg
= (void *)page_private(pages
[i
]);
1235 set_page_private(pages
[i
], 0);
1236 mem_cgroup_cancel_charge(pages
[i
], memcg
, false);
1243 int do_huge_pmd_wp_page(struct vm_fault
*vmf
, pmd_t orig_pmd
)
1245 struct vm_area_struct
*vma
= vmf
->vma
;
1246 struct page
*page
= NULL
, *new_page
;
1247 struct mem_cgroup
*memcg
;
1248 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1249 unsigned long mmun_start
; /* For mmu_notifiers */
1250 unsigned long mmun_end
; /* For mmu_notifiers */
1251 gfp_t huge_gfp
; /* for allocation and charge */
1254 vmf
->ptl
= pmd_lockptr(vma
->vm_mm
, vmf
->pmd
);
1255 VM_BUG_ON_VMA(!vma
->anon_vma
, vma
);
1256 if (is_huge_zero_pmd(orig_pmd
))
1258 spin_lock(vmf
->ptl
);
1259 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
)))
1262 page
= pmd_page(orig_pmd
);
1263 VM_BUG_ON_PAGE(!PageCompound(page
) || !PageHead(page
), page
);
1265 * We can only reuse the page if nobody else maps the huge page or it's
1268 if (!trylock_page(page
)) {
1270 spin_unlock(vmf
->ptl
);
1272 spin_lock(vmf
->ptl
);
1273 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
))) {
1280 if (reuse_swap_page(page
, NULL
)) {
1282 entry
= pmd_mkyoung(orig_pmd
);
1283 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
1284 if (pmdp_set_access_flags(vma
, haddr
, vmf
->pmd
, entry
, 1))
1285 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1286 ret
|= VM_FAULT_WRITE
;
1292 spin_unlock(vmf
->ptl
);
1294 if (transparent_hugepage_enabled(vma
) &&
1295 !transparent_hugepage_debug_cow()) {
1296 huge_gfp
= alloc_hugepage_direct_gfpmask(vma
);
1297 new_page
= alloc_hugepage_vma(huge_gfp
, vma
, haddr
, HPAGE_PMD_ORDER
);
1301 if (likely(new_page
)) {
1302 prep_transhuge_page(new_page
);
1305 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1306 ret
|= VM_FAULT_FALLBACK
;
1308 ret
= do_huge_pmd_wp_page_fallback(vmf
, orig_pmd
, page
);
1309 if (ret
& VM_FAULT_OOM
) {
1310 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1311 ret
|= VM_FAULT_FALLBACK
;
1315 count_vm_event(THP_FAULT_FALLBACK
);
1319 if (unlikely(mem_cgroup_try_charge(new_page
, vma
->vm_mm
,
1320 huge_gfp
| __GFP_NORETRY
, &memcg
, true))) {
1322 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1325 ret
|= VM_FAULT_FALLBACK
;
1326 count_vm_event(THP_FAULT_FALLBACK
);
1330 count_vm_event(THP_FAULT_ALLOC
);
1333 clear_huge_page(new_page
, vmf
->address
, HPAGE_PMD_NR
);
1335 copy_user_huge_page(new_page
, page
, haddr
, vma
, HPAGE_PMD_NR
);
1336 __SetPageUptodate(new_page
);
1339 mmun_end
= haddr
+ HPAGE_PMD_SIZE
;
1340 mmu_notifier_invalidate_range_start(vma
->vm_mm
, mmun_start
, mmun_end
);
1342 spin_lock(vmf
->ptl
);
1345 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
))) {
1346 spin_unlock(vmf
->ptl
);
1347 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1352 entry
= mk_huge_pmd(new_page
, vma
->vm_page_prot
);
1353 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
1354 pmdp_huge_clear_flush_notify(vma
, haddr
, vmf
->pmd
);
1355 page_add_new_anon_rmap(new_page
, vma
, haddr
, true);
1356 mem_cgroup_commit_charge(new_page
, memcg
, false, true);
1357 lru_cache_add_active_or_unevictable(new_page
, vma
);
1358 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, entry
);
1359 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1361 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
1363 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1364 page_remove_rmap(page
, true);
1367 ret
|= VM_FAULT_WRITE
;
1369 spin_unlock(vmf
->ptl
);
1372 * No need to double call mmu_notifier->invalidate_range() callback as
1373 * the above pmdp_huge_clear_flush_notify() did already call it.
1375 mmu_notifier_invalidate_range_only_end(vma
->vm_mm
, mmun_start
,
1380 spin_unlock(vmf
->ptl
);
1385 * FOLL_FORCE can write to even unwritable pmd's, but only
1386 * after we've gone through a COW cycle and they are dirty.
1388 static inline bool can_follow_write_pmd(pmd_t pmd
, unsigned int flags
)
1390 return pmd_write(pmd
) ||
1391 ((flags
& FOLL_FORCE
) && (flags
& FOLL_COW
) && pmd_dirty(pmd
));
1394 struct page
*follow_trans_huge_pmd(struct vm_area_struct
*vma
,
1399 struct mm_struct
*mm
= vma
->vm_mm
;
1400 struct page
*page
= NULL
;
1402 assert_spin_locked(pmd_lockptr(mm
, pmd
));
1404 if (flags
& FOLL_WRITE
&& !can_follow_write_pmd(*pmd
, flags
))
1407 /* Avoid dumping huge zero page */
1408 if ((flags
& FOLL_DUMP
) && is_huge_zero_pmd(*pmd
))
1409 return ERR_PTR(-EFAULT
);
1411 /* Full NUMA hinting faults to serialise migration in fault paths */
1412 if ((flags
& FOLL_NUMA
) && pmd_protnone(*pmd
))
1415 page
= pmd_page(*pmd
);
1416 VM_BUG_ON_PAGE(!PageHead(page
) && !is_zone_device_page(page
), page
);
1417 if (flags
& FOLL_TOUCH
)
1418 touch_pmd(vma
, addr
, pmd
, flags
);
1419 if ((flags
& FOLL_MLOCK
) && (vma
->vm_flags
& VM_LOCKED
)) {
1421 * We don't mlock() pte-mapped THPs. This way we can avoid
1422 * leaking mlocked pages into non-VM_LOCKED VMAs.
1426 * In most cases the pmd is the only mapping of the page as we
1427 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1428 * writable private mappings in populate_vma_page_range().
1430 * The only scenario when we have the page shared here is if we
1431 * mlocking read-only mapping shared over fork(). We skip
1432 * mlocking such pages.
1436 * We can expect PageDoubleMap() to be stable under page lock:
1437 * for file pages we set it in page_add_file_rmap(), which
1438 * requires page to be locked.
1441 if (PageAnon(page
) && compound_mapcount(page
) != 1)
1443 if (PageDoubleMap(page
) || !page
->mapping
)
1445 if (!trylock_page(page
))
1448 if (page
->mapping
&& !PageDoubleMap(page
))
1449 mlock_vma_page(page
);
1453 page
+= (addr
& ~HPAGE_PMD_MASK
) >> PAGE_SHIFT
;
1454 VM_BUG_ON_PAGE(!PageCompound(page
) && !is_zone_device_page(page
), page
);
1455 if (flags
& FOLL_GET
)
1462 /* NUMA hinting page fault entry point for trans huge pmds */
1463 int do_huge_pmd_numa_page(struct vm_fault
*vmf
, pmd_t pmd
)
1465 struct vm_area_struct
*vma
= vmf
->vma
;
1466 struct anon_vma
*anon_vma
= NULL
;
1468 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1469 int page_nid
= -1, this_nid
= numa_node_id();
1470 int target_nid
, last_cpupid
= -1;
1472 bool migrated
= false;
1476 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1477 if (unlikely(!pmd_same(pmd
, *vmf
->pmd
)))
1481 * If there are potential migrations, wait for completion and retry
1482 * without disrupting NUMA hinting information. Do not relock and
1483 * check_same as the page may no longer be mapped.
1485 if (unlikely(pmd_trans_migrating(*vmf
->pmd
))) {
1486 page
= pmd_page(*vmf
->pmd
);
1487 if (!get_page_unless_zero(page
))
1489 spin_unlock(vmf
->ptl
);
1490 wait_on_page_locked(page
);
1495 page
= pmd_page(pmd
);
1496 BUG_ON(is_huge_zero_page(page
));
1497 page_nid
= page_to_nid(page
);
1498 last_cpupid
= page_cpupid_last(page
);
1499 count_vm_numa_event(NUMA_HINT_FAULTS
);
1500 if (page_nid
== this_nid
) {
1501 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL
);
1502 flags
|= TNF_FAULT_LOCAL
;
1505 /* See similar comment in do_numa_page for explanation */
1506 if (!pmd_savedwrite(pmd
))
1507 flags
|= TNF_NO_GROUP
;
1510 * Acquire the page lock to serialise THP migrations but avoid dropping
1511 * page_table_lock if at all possible
1513 page_locked
= trylock_page(page
);
1514 target_nid
= mpol_misplaced(page
, vma
, haddr
);
1515 if (target_nid
== -1) {
1516 /* If the page was locked, there are no parallel migrations */
1521 /* Migration could have started since the pmd_trans_migrating check */
1524 if (!get_page_unless_zero(page
))
1526 spin_unlock(vmf
->ptl
);
1527 wait_on_page_locked(page
);
1533 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1534 * to serialises splits
1537 spin_unlock(vmf
->ptl
);
1538 anon_vma
= page_lock_anon_vma_read(page
);
1540 /* Confirm the PMD did not change while page_table_lock was released */
1541 spin_lock(vmf
->ptl
);
1542 if (unlikely(!pmd_same(pmd
, *vmf
->pmd
))) {
1549 /* Bail if we fail to protect against THP splits for any reason */
1550 if (unlikely(!anon_vma
)) {
1557 * Since we took the NUMA fault, we must have observed the !accessible
1558 * bit. Make sure all other CPUs agree with that, to avoid them
1559 * modifying the page we're about to migrate.
1561 * Must be done under PTL such that we'll observe the relevant
1562 * inc_tlb_flush_pending().
1564 * We are not sure a pending tlb flush here is for a huge page
1565 * mapping or not. Hence use the tlb range variant
1567 if (mm_tlb_flush_pending(vma
->vm_mm
))
1568 flush_tlb_range(vma
, haddr
, haddr
+ HPAGE_PMD_SIZE
);
1571 * Migrate the THP to the requested node, returns with page unlocked
1572 * and access rights restored.
1574 spin_unlock(vmf
->ptl
);
1576 migrated
= migrate_misplaced_transhuge_page(vma
->vm_mm
, vma
,
1577 vmf
->pmd
, pmd
, vmf
->address
, page
, target_nid
);
1579 flags
|= TNF_MIGRATED
;
1580 page_nid
= target_nid
;
1582 flags
|= TNF_MIGRATE_FAIL
;
1586 BUG_ON(!PageLocked(page
));
1587 was_writable
= pmd_savedwrite(pmd
);
1588 pmd
= pmd_modify(pmd
, vma
->vm_page_prot
);
1589 pmd
= pmd_mkyoung(pmd
);
1591 pmd
= pmd_mkwrite(pmd
);
1592 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, pmd
);
1593 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1596 spin_unlock(vmf
->ptl
);
1600 page_unlock_anon_vma_read(anon_vma
);
1603 task_numa_fault(last_cpupid
, page_nid
, HPAGE_PMD_NR
,
1610 * Return true if we do MADV_FREE successfully on entire pmd page.
1611 * Otherwise, return false.
1613 bool madvise_free_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1614 pmd_t
*pmd
, unsigned long addr
, unsigned long next
)
1619 struct mm_struct
*mm
= tlb
->mm
;
1622 tlb_remove_check_page_size_change(tlb
, HPAGE_PMD_SIZE
);
1624 ptl
= pmd_trans_huge_lock(pmd
, vma
);
1629 if (is_huge_zero_pmd(orig_pmd
))
1632 if (unlikely(!pmd_present(orig_pmd
))) {
1633 VM_BUG_ON(thp_migration_supported() &&
1634 !is_pmd_migration_entry(orig_pmd
));
1638 page
= pmd_page(orig_pmd
);
1640 * If other processes are mapping this page, we couldn't discard
1641 * the page unless they all do MADV_FREE so let's skip the page.
1643 if (page_mapcount(page
) != 1)
1646 if (!trylock_page(page
))
1650 * If user want to discard part-pages of THP, split it so MADV_FREE
1651 * will deactivate only them.
1653 if (next
- addr
!= HPAGE_PMD_SIZE
) {
1656 split_huge_page(page
);
1662 if (PageDirty(page
))
1663 ClearPageDirty(page
);
1666 if (pmd_young(orig_pmd
) || pmd_dirty(orig_pmd
)) {
1667 pmdp_invalidate(vma
, addr
, pmd
);
1668 orig_pmd
= pmd_mkold(orig_pmd
);
1669 orig_pmd
= pmd_mkclean(orig_pmd
);
1671 set_pmd_at(mm
, addr
, pmd
, orig_pmd
);
1672 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1675 mark_page_lazyfree(page
);
1683 static inline void zap_deposited_table(struct mm_struct
*mm
, pmd_t
*pmd
)
1687 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
1688 pte_free(mm
, pgtable
);
1692 int zap_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1693 pmd_t
*pmd
, unsigned long addr
)
1698 tlb_remove_check_page_size_change(tlb
, HPAGE_PMD_SIZE
);
1700 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1704 * For architectures like ppc64 we look at deposited pgtable
1705 * when calling pmdp_huge_get_and_clear. So do the
1706 * pgtable_trans_huge_withdraw after finishing pmdp related
1709 orig_pmd
= pmdp_huge_get_and_clear_full(tlb
->mm
, addr
, pmd
,
1711 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1712 if (vma_is_dax(vma
)) {
1713 if (arch_needs_pgtable_deposit())
1714 zap_deposited_table(tlb
->mm
, pmd
);
1716 if (is_huge_zero_pmd(orig_pmd
))
1717 tlb_remove_page_size(tlb
, pmd_page(orig_pmd
), HPAGE_PMD_SIZE
);
1718 } else if (is_huge_zero_pmd(orig_pmd
)) {
1719 zap_deposited_table(tlb
->mm
, pmd
);
1721 tlb_remove_page_size(tlb
, pmd_page(orig_pmd
), HPAGE_PMD_SIZE
);
1723 struct page
*page
= NULL
;
1724 int flush_needed
= 1;
1726 if (pmd_present(orig_pmd
)) {
1727 page
= pmd_page(orig_pmd
);
1728 page_remove_rmap(page
, true);
1729 VM_BUG_ON_PAGE(page_mapcount(page
) < 0, page
);
1730 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1731 } else if (thp_migration_supported()) {
1734 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd
));
1735 entry
= pmd_to_swp_entry(orig_pmd
);
1736 page
= pfn_to_page(swp_offset(entry
));
1739 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1741 if (PageAnon(page
)) {
1742 zap_deposited_table(tlb
->mm
, pmd
);
1743 add_mm_counter(tlb
->mm
, MM_ANONPAGES
, -HPAGE_PMD_NR
);
1745 if (arch_needs_pgtable_deposit())
1746 zap_deposited_table(tlb
->mm
, pmd
);
1747 add_mm_counter(tlb
->mm
, MM_FILEPAGES
, -HPAGE_PMD_NR
);
1752 tlb_remove_page_size(tlb
, page
, HPAGE_PMD_SIZE
);
1757 #ifndef pmd_move_must_withdraw
1758 static inline int pmd_move_must_withdraw(spinlock_t
*new_pmd_ptl
,
1759 spinlock_t
*old_pmd_ptl
,
1760 struct vm_area_struct
*vma
)
1763 * With split pmd lock we also need to move preallocated
1764 * PTE page table if new_pmd is on different PMD page table.
1766 * We also don't deposit and withdraw tables for file pages.
1768 return (new_pmd_ptl
!= old_pmd_ptl
) && vma_is_anonymous(vma
);
1772 static pmd_t
move_soft_dirty_pmd(pmd_t pmd
)
1774 #ifdef CONFIG_MEM_SOFT_DIRTY
1775 if (unlikely(is_pmd_migration_entry(pmd
)))
1776 pmd
= pmd_swp_mksoft_dirty(pmd
);
1777 else if (pmd_present(pmd
))
1778 pmd
= pmd_mksoft_dirty(pmd
);
1783 bool move_huge_pmd(struct vm_area_struct
*vma
, unsigned long old_addr
,
1784 unsigned long new_addr
, unsigned long old_end
,
1785 pmd_t
*old_pmd
, pmd_t
*new_pmd
, bool *need_flush
)
1787 spinlock_t
*old_ptl
, *new_ptl
;
1789 struct mm_struct
*mm
= vma
->vm_mm
;
1790 bool force_flush
= false;
1792 if ((old_addr
& ~HPAGE_PMD_MASK
) ||
1793 (new_addr
& ~HPAGE_PMD_MASK
) ||
1794 old_end
- old_addr
< HPAGE_PMD_SIZE
)
1798 * The destination pmd shouldn't be established, free_pgtables()
1799 * should have release it.
1801 if (WARN_ON(!pmd_none(*new_pmd
))) {
1802 VM_BUG_ON(pmd_trans_huge(*new_pmd
));
1807 * We don't have to worry about the ordering of src and dst
1808 * ptlocks because exclusive mmap_sem prevents deadlock.
1810 old_ptl
= __pmd_trans_huge_lock(old_pmd
, vma
);
1812 new_ptl
= pmd_lockptr(mm
, new_pmd
);
1813 if (new_ptl
!= old_ptl
)
1814 spin_lock_nested(new_ptl
, SINGLE_DEPTH_NESTING
);
1815 pmd
= pmdp_huge_get_and_clear(mm
, old_addr
, old_pmd
);
1816 if (pmd_present(pmd
) && pmd_dirty(pmd
))
1818 VM_BUG_ON(!pmd_none(*new_pmd
));
1820 if (pmd_move_must_withdraw(new_ptl
, old_ptl
, vma
)) {
1822 pgtable
= pgtable_trans_huge_withdraw(mm
, old_pmd
);
1823 pgtable_trans_huge_deposit(mm
, new_pmd
, pgtable
);
1825 pmd
= move_soft_dirty_pmd(pmd
);
1826 set_pmd_at(mm
, new_addr
, new_pmd
, pmd
);
1827 if (new_ptl
!= old_ptl
)
1828 spin_unlock(new_ptl
);
1830 flush_tlb_range(vma
, old_addr
, old_addr
+ PMD_SIZE
);
1833 spin_unlock(old_ptl
);
1841 * - 0 if PMD could not be locked
1842 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1843 * - HPAGE_PMD_NR is protections changed and TLB flush necessary
1845 int change_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
1846 unsigned long addr
, pgprot_t newprot
, int prot_numa
)
1848 struct mm_struct
*mm
= vma
->vm_mm
;
1851 bool preserve_write
;
1854 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1858 preserve_write
= prot_numa
&& pmd_write(*pmd
);
1861 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1862 if (is_swap_pmd(*pmd
)) {
1863 swp_entry_t entry
= pmd_to_swp_entry(*pmd
);
1865 VM_BUG_ON(!is_pmd_migration_entry(*pmd
));
1866 if (is_write_migration_entry(entry
)) {
1869 * A protection check is difficult so
1870 * just be safe and disable write
1872 make_migration_entry_read(&entry
);
1873 newpmd
= swp_entry_to_pmd(entry
);
1874 if (pmd_swp_soft_dirty(*pmd
))
1875 newpmd
= pmd_swp_mksoft_dirty(newpmd
);
1876 set_pmd_at(mm
, addr
, pmd
, newpmd
);
1883 * Avoid trapping faults against the zero page. The read-only
1884 * data is likely to be read-cached on the local CPU and
1885 * local/remote hits to the zero page are not interesting.
1887 if (prot_numa
&& is_huge_zero_pmd(*pmd
))
1890 if (prot_numa
&& pmd_protnone(*pmd
))
1894 * In case prot_numa, we are under down_read(mmap_sem). It's critical
1895 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1896 * which is also under down_read(mmap_sem):
1899 * change_huge_pmd(prot_numa=1)
1900 * pmdp_huge_get_and_clear_notify()
1901 * madvise_dontneed()
1903 * pmd_trans_huge(*pmd) == 0 (without ptl)
1906 * // pmd is re-established
1908 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1909 * which may break userspace.
1911 * pmdp_invalidate() is required to make sure we don't miss
1912 * dirty/young flags set by hardware.
1914 entry
= pmdp_invalidate(vma
, addr
, pmd
);
1916 entry
= pmd_modify(entry
, newprot
);
1918 entry
= pmd_mk_savedwrite(entry
);
1920 set_pmd_at(mm
, addr
, pmd
, entry
);
1921 BUG_ON(vma_is_anonymous(vma
) && !preserve_write
&& pmd_write(entry
));
1928 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1930 * Note that if it returns page table lock pointer, this routine returns without
1931 * unlocking page table lock. So callers must unlock it.
1933 spinlock_t
*__pmd_trans_huge_lock(pmd_t
*pmd
, struct vm_area_struct
*vma
)
1936 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1937 if (likely(is_swap_pmd(*pmd
) || pmd_trans_huge(*pmd
) ||
1945 * Returns true if a given pud maps a thp, false otherwise.
1947 * Note that if it returns true, this routine returns without unlocking page
1948 * table lock. So callers must unlock it.
1950 spinlock_t
*__pud_trans_huge_lock(pud_t
*pud
, struct vm_area_struct
*vma
)
1954 ptl
= pud_lock(vma
->vm_mm
, pud
);
1955 if (likely(pud_trans_huge(*pud
) || pud_devmap(*pud
)))
1961 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1962 int zap_huge_pud(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1963 pud_t
*pud
, unsigned long addr
)
1968 ptl
= __pud_trans_huge_lock(pud
, vma
);
1972 * For architectures like ppc64 we look at deposited pgtable
1973 * when calling pudp_huge_get_and_clear. So do the
1974 * pgtable_trans_huge_withdraw after finishing pudp related
1977 orig_pud
= pudp_huge_get_and_clear_full(tlb
->mm
, addr
, pud
,
1979 tlb_remove_pud_tlb_entry(tlb
, pud
, addr
);
1980 if (vma_is_dax(vma
)) {
1982 /* No zero page support yet */
1984 /* No support for anonymous PUD pages yet */
1990 static void __split_huge_pud_locked(struct vm_area_struct
*vma
, pud_t
*pud
,
1991 unsigned long haddr
)
1993 VM_BUG_ON(haddr
& ~HPAGE_PUD_MASK
);
1994 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
1995 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PUD_SIZE
, vma
);
1996 VM_BUG_ON(!pud_trans_huge(*pud
) && !pud_devmap(*pud
));
1998 count_vm_event(THP_SPLIT_PUD
);
2000 pudp_huge_clear_flush_notify(vma
, haddr
, pud
);
2003 void __split_huge_pud(struct vm_area_struct
*vma
, pud_t
*pud
,
2004 unsigned long address
)
2007 struct mm_struct
*mm
= vma
->vm_mm
;
2008 unsigned long haddr
= address
& HPAGE_PUD_MASK
;
2010 mmu_notifier_invalidate_range_start(mm
, haddr
, haddr
+ HPAGE_PUD_SIZE
);
2011 ptl
= pud_lock(mm
, pud
);
2012 if (unlikely(!pud_trans_huge(*pud
) && !pud_devmap(*pud
)))
2014 __split_huge_pud_locked(vma
, pud
, haddr
);
2019 * No need to double call mmu_notifier->invalidate_range() callback as
2020 * the above pudp_huge_clear_flush_notify() did already call it.
2022 mmu_notifier_invalidate_range_only_end(mm
, haddr
, haddr
+
2025 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2027 static void __split_huge_zero_page_pmd(struct vm_area_struct
*vma
,
2028 unsigned long haddr
, pmd_t
*pmd
)
2030 struct mm_struct
*mm
= vma
->vm_mm
;
2036 * Leave pmd empty until pte is filled note that it is fine to delay
2037 * notification until mmu_notifier_invalidate_range_end() as we are
2038 * replacing a zero pmd write protected page with a zero pte write
2041 * See Documentation/vm/mmu_notifier.txt
2043 pmdp_huge_clear_flush(vma
, haddr
, pmd
);
2045 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
2046 pmd_populate(mm
, &_pmd
, pgtable
);
2048 for (i
= 0; i
< HPAGE_PMD_NR
; i
++, haddr
+= PAGE_SIZE
) {
2050 entry
= pfn_pte(my_zero_pfn(haddr
), vma
->vm_page_prot
);
2051 entry
= pte_mkspecial(entry
);
2052 pte
= pte_offset_map(&_pmd
, haddr
);
2053 VM_BUG_ON(!pte_none(*pte
));
2054 set_pte_at(mm
, haddr
, pte
, entry
);
2057 smp_wmb(); /* make pte visible before pmd */
2058 pmd_populate(mm
, pmd
, pgtable
);
2061 static void __split_huge_pmd_locked(struct vm_area_struct
*vma
, pmd_t
*pmd
,
2062 unsigned long haddr
, bool freeze
)
2064 struct mm_struct
*mm
= vma
->vm_mm
;
2067 pmd_t old_pmd
, _pmd
;
2068 bool young
, write
, soft_dirty
, pmd_migration
= false;
2072 VM_BUG_ON(haddr
& ~HPAGE_PMD_MASK
);
2073 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
2074 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PMD_SIZE
, vma
);
2075 VM_BUG_ON(!is_pmd_migration_entry(*pmd
) && !pmd_trans_huge(*pmd
)
2076 && !pmd_devmap(*pmd
));
2078 count_vm_event(THP_SPLIT_PMD
);
2080 if (!vma_is_anonymous(vma
)) {
2081 _pmd
= pmdp_huge_clear_flush_notify(vma
, haddr
, pmd
);
2083 * We are going to unmap this huge page. So
2084 * just go ahead and zap it
2086 if (arch_needs_pgtable_deposit())
2087 zap_deposited_table(mm
, pmd
);
2088 if (vma_is_dax(vma
))
2090 page
= pmd_page(_pmd
);
2091 if (!PageReferenced(page
) && pmd_young(_pmd
))
2092 SetPageReferenced(page
);
2093 page_remove_rmap(page
, true);
2095 add_mm_counter(mm
, MM_FILEPAGES
, -HPAGE_PMD_NR
);
2097 } else if (is_huge_zero_pmd(*pmd
)) {
2099 * FIXME: Do we want to invalidate secondary mmu by calling
2100 * mmu_notifier_invalidate_range() see comments below inside
2101 * __split_huge_pmd() ?
2103 * We are going from a zero huge page write protected to zero
2104 * small page also write protected so it does not seems useful
2105 * to invalidate secondary mmu at this time.
2107 return __split_huge_zero_page_pmd(vma
, haddr
, pmd
);
2111 * Up to this point the pmd is present and huge and userland has the
2112 * whole access to the hugepage during the split (which happens in
2113 * place). If we overwrite the pmd with the not-huge version pointing
2114 * to the pte here (which of course we could if all CPUs were bug
2115 * free), userland could trigger a small page size TLB miss on the
2116 * small sized TLB while the hugepage TLB entry is still established in
2117 * the huge TLB. Some CPU doesn't like that.
2118 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
2119 * 383 on page 93. Intel should be safe but is also warns that it's
2120 * only safe if the permission and cache attributes of the two entries
2121 * loaded in the two TLB is identical (which should be the case here).
2122 * But it is generally safer to never allow small and huge TLB entries
2123 * for the same virtual address to be loaded simultaneously. So instead
2124 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2125 * current pmd notpresent (atomically because here the pmd_trans_huge
2126 * must remain set at all times on the pmd until the split is complete
2127 * for this pmd), then we flush the SMP TLB and finally we write the
2128 * non-huge version of the pmd entry with pmd_populate.
2130 old_pmd
= pmdp_invalidate(vma
, haddr
, pmd
);
2132 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2133 pmd_migration
= is_pmd_migration_entry(old_pmd
);
2134 if (pmd_migration
) {
2137 entry
= pmd_to_swp_entry(old_pmd
);
2138 page
= pfn_to_page(swp_offset(entry
));
2141 page
= pmd_page(old_pmd
);
2142 VM_BUG_ON_PAGE(!page_count(page
), page
);
2143 page_ref_add(page
, HPAGE_PMD_NR
- 1);
2144 if (pmd_dirty(old_pmd
))
2146 write
= pmd_write(old_pmd
);
2147 young
= pmd_young(old_pmd
);
2148 soft_dirty
= pmd_soft_dirty(old_pmd
);
2151 * Withdraw the table only after we mark the pmd entry invalid.
2152 * This's critical for some architectures (Power).
2154 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
2155 pmd_populate(mm
, &_pmd
, pgtable
);
2157 for (i
= 0, addr
= haddr
; i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
) {
2160 * Note that NUMA hinting access restrictions are not
2161 * transferred to avoid any possibility of altering
2162 * permissions across VMAs.
2164 if (freeze
|| pmd_migration
) {
2165 swp_entry_t swp_entry
;
2166 swp_entry
= make_migration_entry(page
+ i
, write
);
2167 entry
= swp_entry_to_pte(swp_entry
);
2169 entry
= pte_swp_mksoft_dirty(entry
);
2171 entry
= mk_pte(page
+ i
, READ_ONCE(vma
->vm_page_prot
));
2172 entry
= maybe_mkwrite(entry
, vma
);
2174 entry
= pte_wrprotect(entry
);
2176 entry
= pte_mkold(entry
);
2178 entry
= pte_mksoft_dirty(entry
);
2180 pte
= pte_offset_map(&_pmd
, addr
);
2181 BUG_ON(!pte_none(*pte
));
2182 set_pte_at(mm
, addr
, pte
, entry
);
2183 atomic_inc(&page
[i
]._mapcount
);
2188 * Set PG_double_map before dropping compound_mapcount to avoid
2189 * false-negative page_mapped().
2191 if (compound_mapcount(page
) > 1 && !TestSetPageDoubleMap(page
)) {
2192 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2193 atomic_inc(&page
[i
]._mapcount
);
2196 if (atomic_add_negative(-1, compound_mapcount_ptr(page
))) {
2197 /* Last compound_mapcount is gone. */
2198 __dec_node_page_state(page
, NR_ANON_THPS
);
2199 if (TestClearPageDoubleMap(page
)) {
2200 /* No need in mapcount reference anymore */
2201 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2202 atomic_dec(&page
[i
]._mapcount
);
2206 smp_wmb(); /* make pte visible before pmd */
2207 pmd_populate(mm
, pmd
, pgtable
);
2210 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
2211 page_remove_rmap(page
+ i
, false);
2217 void __split_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
2218 unsigned long address
, bool freeze
, struct page
*page
)
2221 struct mm_struct
*mm
= vma
->vm_mm
;
2222 unsigned long haddr
= address
& HPAGE_PMD_MASK
;
2224 mmu_notifier_invalidate_range_start(mm
, haddr
, haddr
+ HPAGE_PMD_SIZE
);
2225 ptl
= pmd_lock(mm
, pmd
);
2228 * If caller asks to setup a migration entries, we need a page to check
2229 * pmd against. Otherwise we can end up replacing wrong page.
2231 VM_BUG_ON(freeze
&& !page
);
2232 if (page
&& page
!= pmd_page(*pmd
))
2235 if (pmd_trans_huge(*pmd
)) {
2236 page
= pmd_page(*pmd
);
2237 if (PageMlocked(page
))
2238 clear_page_mlock(page
);
2239 } else if (!(pmd_devmap(*pmd
) || is_pmd_migration_entry(*pmd
)))
2241 __split_huge_pmd_locked(vma
, pmd
, haddr
, freeze
);
2245 * No need to double call mmu_notifier->invalidate_range() callback.
2246 * They are 3 cases to consider inside __split_huge_pmd_locked():
2247 * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
2248 * 2) __split_huge_zero_page_pmd() read only zero page and any write
2249 * fault will trigger a flush_notify before pointing to a new page
2250 * (it is fine if the secondary mmu keeps pointing to the old zero
2251 * page in the meantime)
2252 * 3) Split a huge pmd into pte pointing to the same page. No need
2253 * to invalidate secondary tlb entry they are all still valid.
2254 * any further changes to individual pte will notify. So no need
2255 * to call mmu_notifier->invalidate_range()
2257 mmu_notifier_invalidate_range_only_end(mm
, haddr
, haddr
+
2261 void split_huge_pmd_address(struct vm_area_struct
*vma
, unsigned long address
,
2262 bool freeze
, struct page
*page
)
2269 pgd
= pgd_offset(vma
->vm_mm
, address
);
2270 if (!pgd_present(*pgd
))
2273 p4d
= p4d_offset(pgd
, address
);
2274 if (!p4d_present(*p4d
))
2277 pud
= pud_offset(p4d
, address
);
2278 if (!pud_present(*pud
))
2281 pmd
= pmd_offset(pud
, address
);
2283 __split_huge_pmd(vma
, pmd
, address
, freeze
, page
);
2286 void vma_adjust_trans_huge(struct vm_area_struct
*vma
,
2287 unsigned long start
,
2292 * If the new start address isn't hpage aligned and it could
2293 * previously contain an hugepage: check if we need to split
2296 if (start
& ~HPAGE_PMD_MASK
&&
2297 (start
& HPAGE_PMD_MASK
) >= vma
->vm_start
&&
2298 (start
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= vma
->vm_end
)
2299 split_huge_pmd_address(vma
, start
, false, NULL
);
2302 * If the new end address isn't hpage aligned and it could
2303 * previously contain an hugepage: check if we need to split
2306 if (end
& ~HPAGE_PMD_MASK
&&
2307 (end
& HPAGE_PMD_MASK
) >= vma
->vm_start
&&
2308 (end
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= vma
->vm_end
)
2309 split_huge_pmd_address(vma
, end
, false, NULL
);
2312 * If we're also updating the vma->vm_next->vm_start, if the new
2313 * vm_next->vm_start isn't page aligned and it could previously
2314 * contain an hugepage: check if we need to split an huge pmd.
2316 if (adjust_next
> 0) {
2317 struct vm_area_struct
*next
= vma
->vm_next
;
2318 unsigned long nstart
= next
->vm_start
;
2319 nstart
+= adjust_next
<< PAGE_SHIFT
;
2320 if (nstart
& ~HPAGE_PMD_MASK
&&
2321 (nstart
& HPAGE_PMD_MASK
) >= next
->vm_start
&&
2322 (nstart
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= next
->vm_end
)
2323 split_huge_pmd_address(next
, nstart
, false, NULL
);
2327 static void freeze_page(struct page
*page
)
2329 enum ttu_flags ttu_flags
= TTU_IGNORE_MLOCK
| TTU_IGNORE_ACCESS
|
2330 TTU_RMAP_LOCKED
| TTU_SPLIT_HUGE_PMD
;
2333 VM_BUG_ON_PAGE(!PageHead(page
), page
);
2336 ttu_flags
|= TTU_SPLIT_FREEZE
;
2338 unmap_success
= try_to_unmap(page
, ttu_flags
);
2339 VM_BUG_ON_PAGE(!unmap_success
, page
);
2342 static void unfreeze_page(struct page
*page
)
2345 if (PageTransHuge(page
)) {
2346 remove_migration_ptes(page
, page
, true);
2348 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2349 remove_migration_ptes(page
+ i
, page
+ i
, true);
2353 static void __split_huge_page_tail(struct page
*head
, int tail
,
2354 struct lruvec
*lruvec
, struct list_head
*list
)
2356 struct page
*page_tail
= head
+ tail
;
2358 VM_BUG_ON_PAGE(atomic_read(&page_tail
->_mapcount
) != -1, page_tail
);
2359 VM_BUG_ON_PAGE(page_ref_count(page_tail
) != 0, page_tail
);
2362 * tail_page->_refcount is zero and not changing from under us. But
2363 * get_page_unless_zero() may be running from under us on the
2364 * tail_page. If we used atomic_set() below instead of atomic_inc() or
2365 * atomic_add(), we would then run atomic_set() concurrently with
2366 * get_page_unless_zero(), and atomic_set() is implemented in C not
2367 * using locked ops. spin_unlock on x86 sometime uses locked ops
2368 * because of PPro errata 66, 92, so unless somebody can guarantee
2369 * atomic_set() here would be safe on all archs (and not only on x86),
2370 * it's safer to use atomic_inc()/atomic_add().
2372 if (PageAnon(head
) && !PageSwapCache(head
)) {
2373 page_ref_inc(page_tail
);
2375 /* Additional pin to radix tree */
2376 page_ref_add(page_tail
, 2);
2379 page_tail
->flags
&= ~PAGE_FLAGS_CHECK_AT_PREP
;
2380 page_tail
->flags
|= (head
->flags
&
2381 ((1L << PG_referenced
) |
2382 (1L << PG_swapbacked
) |
2383 (1L << PG_swapcache
) |
2384 (1L << PG_mlocked
) |
2385 (1L << PG_uptodate
) |
2388 (1L << PG_unevictable
) |
2392 * After clearing PageTail the gup refcount can be released.
2393 * Page flags also must be visible before we make the page non-compound.
2397 clear_compound_head(page_tail
);
2399 if (page_is_young(head
))
2400 set_page_young(page_tail
);
2401 if (page_is_idle(head
))
2402 set_page_idle(page_tail
);
2404 /* ->mapping in first tail page is compound_mapcount */
2405 VM_BUG_ON_PAGE(tail
> 2 && page_tail
->mapping
!= TAIL_MAPPING
,
2407 page_tail
->mapping
= head
->mapping
;
2409 page_tail
->index
= head
->index
+ tail
;
2410 page_cpupid_xchg_last(page_tail
, page_cpupid_last(head
));
2411 lru_add_page_tail(head
, page_tail
, lruvec
, list
);
2414 static void __split_huge_page(struct page
*page
, struct list_head
*list
,
2415 unsigned long flags
)
2417 struct page
*head
= compound_head(page
);
2418 struct zone
*zone
= page_zone(head
);
2419 struct lruvec
*lruvec
;
2423 lruvec
= mem_cgroup_page_lruvec(head
, zone
->zone_pgdat
);
2425 /* complete memcg works before add pages to LRU */
2426 mem_cgroup_split_huge_fixup(head
);
2428 if (!PageAnon(page
))
2429 end
= DIV_ROUND_UP(i_size_read(head
->mapping
->host
), PAGE_SIZE
);
2431 for (i
= HPAGE_PMD_NR
- 1; i
>= 1; i
--) {
2432 __split_huge_page_tail(head
, i
, lruvec
, list
);
2433 /* Some pages can be beyond i_size: drop them from page cache */
2434 if (head
[i
].index
>= end
) {
2435 __ClearPageDirty(head
+ i
);
2436 __delete_from_page_cache(head
+ i
, NULL
);
2437 if (IS_ENABLED(CONFIG_SHMEM
) && PageSwapBacked(head
))
2438 shmem_uncharge(head
->mapping
->host
, 1);
2443 ClearPageCompound(head
);
2444 /* See comment in __split_huge_page_tail() */
2445 if (PageAnon(head
)) {
2446 /* Additional pin to radix tree of swap cache */
2447 if (PageSwapCache(head
))
2448 page_ref_add(head
, 2);
2452 /* Additional pin to radix tree */
2453 page_ref_add(head
, 2);
2454 spin_unlock(&head
->mapping
->tree_lock
);
2457 spin_unlock_irqrestore(zone_lru_lock(page_zone(head
)), flags
);
2459 unfreeze_page(head
);
2461 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
2462 struct page
*subpage
= head
+ i
;
2463 if (subpage
== page
)
2465 unlock_page(subpage
);
2468 * Subpages may be freed if there wasn't any mapping
2469 * like if add_to_swap() is running on a lru page that
2470 * had its mapping zapped. And freeing these pages
2471 * requires taking the lru_lock so we do the put_page
2472 * of the tail pages after the split is complete.
2478 int total_mapcount(struct page
*page
)
2480 int i
, compound
, ret
;
2482 VM_BUG_ON_PAGE(PageTail(page
), page
);
2484 if (likely(!PageCompound(page
)))
2485 return atomic_read(&page
->_mapcount
) + 1;
2487 compound
= compound_mapcount(page
);
2491 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2492 ret
+= atomic_read(&page
[i
]._mapcount
) + 1;
2493 /* File pages has compound_mapcount included in _mapcount */
2494 if (!PageAnon(page
))
2495 return ret
- compound
* HPAGE_PMD_NR
;
2496 if (PageDoubleMap(page
))
2497 ret
-= HPAGE_PMD_NR
;
2502 * This calculates accurately how many mappings a transparent hugepage
2503 * has (unlike page_mapcount() which isn't fully accurate). This full
2504 * accuracy is primarily needed to know if copy-on-write faults can
2505 * reuse the page and change the mapping to read-write instead of
2506 * copying them. At the same time this returns the total_mapcount too.
2508 * The function returns the highest mapcount any one of the subpages
2509 * has. If the return value is one, even if different processes are
2510 * mapping different subpages of the transparent hugepage, they can
2511 * all reuse it, because each process is reusing a different subpage.
2513 * The total_mapcount is instead counting all virtual mappings of the
2514 * subpages. If the total_mapcount is equal to "one", it tells the
2515 * caller all mappings belong to the same "mm" and in turn the
2516 * anon_vma of the transparent hugepage can become the vma->anon_vma
2517 * local one as no other process may be mapping any of the subpages.
2519 * It would be more accurate to replace page_mapcount() with
2520 * page_trans_huge_mapcount(), however we only use
2521 * page_trans_huge_mapcount() in the copy-on-write faults where we
2522 * need full accuracy to avoid breaking page pinning, because
2523 * page_trans_huge_mapcount() is slower than page_mapcount().
2525 int page_trans_huge_mapcount(struct page
*page
, int *total_mapcount
)
2527 int i
, ret
, _total_mapcount
, mapcount
;
2529 /* hugetlbfs shouldn't call it */
2530 VM_BUG_ON_PAGE(PageHuge(page
), page
);
2532 if (likely(!PageTransCompound(page
))) {
2533 mapcount
= atomic_read(&page
->_mapcount
) + 1;
2535 *total_mapcount
= mapcount
;
2539 page
= compound_head(page
);
2541 _total_mapcount
= ret
= 0;
2542 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
2543 mapcount
= atomic_read(&page
[i
]._mapcount
) + 1;
2544 ret
= max(ret
, mapcount
);
2545 _total_mapcount
+= mapcount
;
2547 if (PageDoubleMap(page
)) {
2549 _total_mapcount
-= HPAGE_PMD_NR
;
2551 mapcount
= compound_mapcount(page
);
2553 _total_mapcount
+= mapcount
;
2555 *total_mapcount
= _total_mapcount
;
2559 /* Racy check whether the huge page can be split */
2560 bool can_split_huge_page(struct page
*page
, int *pextra_pins
)
2564 /* Additional pins from radix tree */
2566 extra_pins
= PageSwapCache(page
) ? HPAGE_PMD_NR
: 0;
2568 extra_pins
= HPAGE_PMD_NR
;
2570 *pextra_pins
= extra_pins
;
2571 return total_mapcount(page
) == page_count(page
) - extra_pins
- 1;
2575 * This function splits huge page into normal pages. @page can point to any
2576 * subpage of huge page to split. Split doesn't change the position of @page.
2578 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2579 * The huge page must be locked.
2581 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2583 * Both head page and tail pages will inherit mapping, flags, and so on from
2586 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2587 * they are not mapped.
2589 * Returns 0 if the hugepage is split successfully.
2590 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2593 int split_huge_page_to_list(struct page
*page
, struct list_head
*list
)
2595 struct page
*head
= compound_head(page
);
2596 struct pglist_data
*pgdata
= NODE_DATA(page_to_nid(head
));
2597 struct anon_vma
*anon_vma
= NULL
;
2598 struct address_space
*mapping
= NULL
;
2599 int count
, mapcount
, extra_pins
, ret
;
2601 unsigned long flags
;
2603 VM_BUG_ON_PAGE(is_huge_zero_page(page
), page
);
2604 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
2605 VM_BUG_ON_PAGE(!PageCompound(page
), page
);
2607 if (PageWriteback(page
))
2610 if (PageAnon(head
)) {
2612 * The caller does not necessarily hold an mmap_sem that would
2613 * prevent the anon_vma disappearing so we first we take a
2614 * reference to it and then lock the anon_vma for write. This
2615 * is similar to page_lock_anon_vma_read except the write lock
2616 * is taken to serialise against parallel split or collapse
2619 anon_vma
= page_get_anon_vma(head
);
2625 anon_vma_lock_write(anon_vma
);
2627 mapping
= head
->mapping
;
2636 i_mmap_lock_read(mapping
);
2640 * Racy check if we can split the page, before freeze_page() will
2643 if (!can_split_huge_page(head
, &extra_pins
)) {
2648 mlocked
= PageMlocked(page
);
2650 VM_BUG_ON_PAGE(compound_mapcount(head
), head
);
2652 /* Make sure the page is not on per-CPU pagevec as it takes pin */
2656 /* prevent PageLRU to go away from under us, and freeze lru stats */
2657 spin_lock_irqsave(zone_lru_lock(page_zone(head
)), flags
);
2662 spin_lock(&mapping
->tree_lock
);
2663 pslot
= radix_tree_lookup_slot(&mapping
->page_tree
,
2666 * Check if the head page is present in radix tree.
2667 * We assume all tail are present too, if head is there.
2669 if (radix_tree_deref_slot_protected(pslot
,
2670 &mapping
->tree_lock
) != head
)
2674 /* Prevent deferred_split_scan() touching ->_refcount */
2675 spin_lock(&pgdata
->split_queue_lock
);
2676 count
= page_count(head
);
2677 mapcount
= total_mapcount(head
);
2678 if (!mapcount
&& page_ref_freeze(head
, 1 + extra_pins
)) {
2679 if (!list_empty(page_deferred_list(head
))) {
2680 pgdata
->split_queue_len
--;
2681 list_del(page_deferred_list(head
));
2684 __dec_node_page_state(page
, NR_SHMEM_THPS
);
2685 spin_unlock(&pgdata
->split_queue_lock
);
2686 __split_huge_page(page
, list
, flags
);
2687 if (PageSwapCache(head
)) {
2688 swp_entry_t entry
= { .val
= page_private(head
) };
2690 ret
= split_swap_cluster(entry
);
2694 if (IS_ENABLED(CONFIG_DEBUG_VM
) && mapcount
) {
2695 pr_alert("total_mapcount: %u, page_count(): %u\n",
2698 dump_page(head
, NULL
);
2699 dump_page(page
, "total_mapcount(head) > 0");
2702 spin_unlock(&pgdata
->split_queue_lock
);
2704 spin_unlock(&mapping
->tree_lock
);
2705 spin_unlock_irqrestore(zone_lru_lock(page_zone(head
)), flags
);
2706 unfreeze_page(head
);
2712 anon_vma_unlock_write(anon_vma
);
2713 put_anon_vma(anon_vma
);
2716 i_mmap_unlock_read(mapping
);
2718 count_vm_event(!ret
? THP_SPLIT_PAGE
: THP_SPLIT_PAGE_FAILED
);
2722 void free_transhuge_page(struct page
*page
)
2724 struct pglist_data
*pgdata
= NODE_DATA(page_to_nid(page
));
2725 unsigned long flags
;
2727 spin_lock_irqsave(&pgdata
->split_queue_lock
, flags
);
2728 if (!list_empty(page_deferred_list(page
))) {
2729 pgdata
->split_queue_len
--;
2730 list_del(page_deferred_list(page
));
2732 spin_unlock_irqrestore(&pgdata
->split_queue_lock
, flags
);
2733 free_compound_page(page
);
2736 void deferred_split_huge_page(struct page
*page
)
2738 struct pglist_data
*pgdata
= NODE_DATA(page_to_nid(page
));
2739 unsigned long flags
;
2741 VM_BUG_ON_PAGE(!PageTransHuge(page
), page
);
2743 spin_lock_irqsave(&pgdata
->split_queue_lock
, flags
);
2744 if (list_empty(page_deferred_list(page
))) {
2745 count_vm_event(THP_DEFERRED_SPLIT_PAGE
);
2746 list_add_tail(page_deferred_list(page
), &pgdata
->split_queue
);
2747 pgdata
->split_queue_len
++;
2749 spin_unlock_irqrestore(&pgdata
->split_queue_lock
, flags
);
2752 static unsigned long deferred_split_count(struct shrinker
*shrink
,
2753 struct shrink_control
*sc
)
2755 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2756 return READ_ONCE(pgdata
->split_queue_len
);
2759 static unsigned long deferred_split_scan(struct shrinker
*shrink
,
2760 struct shrink_control
*sc
)
2762 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2763 unsigned long flags
;
2764 LIST_HEAD(list
), *pos
, *next
;
2768 spin_lock_irqsave(&pgdata
->split_queue_lock
, flags
);
2769 /* Take pin on all head pages to avoid freeing them under us */
2770 list_for_each_safe(pos
, next
, &pgdata
->split_queue
) {
2771 page
= list_entry((void *)pos
, struct page
, mapping
);
2772 page
= compound_head(page
);
2773 if (get_page_unless_zero(page
)) {
2774 list_move(page_deferred_list(page
), &list
);
2776 /* We lost race with put_compound_page() */
2777 list_del_init(page_deferred_list(page
));
2778 pgdata
->split_queue_len
--;
2780 if (!--sc
->nr_to_scan
)
2783 spin_unlock_irqrestore(&pgdata
->split_queue_lock
, flags
);
2785 list_for_each_safe(pos
, next
, &list
) {
2786 page
= list_entry((void *)pos
, struct page
, mapping
);
2787 if (!trylock_page(page
))
2789 /* split_huge_page() removes page from list on success */
2790 if (!split_huge_page(page
))
2797 spin_lock_irqsave(&pgdata
->split_queue_lock
, flags
);
2798 list_splice_tail(&list
, &pgdata
->split_queue
);
2799 spin_unlock_irqrestore(&pgdata
->split_queue_lock
, flags
);
2802 * Stop shrinker if we didn't split any page, but the queue is empty.
2803 * This can happen if pages were freed under us.
2805 if (!split
&& list_empty(&pgdata
->split_queue
))
2810 static struct shrinker deferred_split_shrinker
= {
2811 .count_objects
= deferred_split_count
,
2812 .scan_objects
= deferred_split_scan
,
2813 .seeks
= DEFAULT_SEEKS
,
2814 .flags
= SHRINKER_NUMA_AWARE
,
2817 #ifdef CONFIG_DEBUG_FS
2818 static int split_huge_pages_set(void *data
, u64 val
)
2822 unsigned long pfn
, max_zone_pfn
;
2823 unsigned long total
= 0, split
= 0;
2828 for_each_populated_zone(zone
) {
2829 max_zone_pfn
= zone_end_pfn(zone
);
2830 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++) {
2831 if (!pfn_valid(pfn
))
2834 page
= pfn_to_page(pfn
);
2835 if (!get_page_unless_zero(page
))
2838 if (zone
!= page_zone(page
))
2841 if (!PageHead(page
) || PageHuge(page
) || !PageLRU(page
))
2846 if (!split_huge_page(page
))
2854 pr_info("%lu of %lu THP split\n", split
, total
);
2858 DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops
, NULL
, split_huge_pages_set
,
2861 static int __init
split_huge_pages_debugfs(void)
2865 ret
= debugfs_create_file("split_huge_pages", 0200, NULL
, NULL
,
2866 &split_huge_pages_fops
);
2868 pr_warn("Failed to create split_huge_pages in debugfs");
2871 late_initcall(split_huge_pages_debugfs
);
2874 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2875 void set_pmd_migration_entry(struct page_vma_mapped_walk
*pvmw
,
2878 struct vm_area_struct
*vma
= pvmw
->vma
;
2879 struct mm_struct
*mm
= vma
->vm_mm
;
2880 unsigned long address
= pvmw
->address
;
2885 if (!(pvmw
->pmd
&& !pvmw
->pte
))
2888 mmu_notifier_invalidate_range_start(mm
, address
,
2889 address
+ HPAGE_PMD_SIZE
);
2891 flush_cache_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
2892 pmdval
= *pvmw
->pmd
;
2893 pmdp_invalidate(vma
, address
, pvmw
->pmd
);
2894 if (pmd_dirty(pmdval
))
2895 set_page_dirty(page
);
2896 entry
= make_migration_entry(page
, pmd_write(pmdval
));
2897 pmdswp
= swp_entry_to_pmd(entry
);
2898 if (pmd_soft_dirty(pmdval
))
2899 pmdswp
= pmd_swp_mksoft_dirty(pmdswp
);
2900 set_pmd_at(mm
, address
, pvmw
->pmd
, pmdswp
);
2901 page_remove_rmap(page
, true);
2904 mmu_notifier_invalidate_range_end(mm
, address
,
2905 address
+ HPAGE_PMD_SIZE
);
2908 void remove_migration_pmd(struct page_vma_mapped_walk
*pvmw
, struct page
*new)
2910 struct vm_area_struct
*vma
= pvmw
->vma
;
2911 struct mm_struct
*mm
= vma
->vm_mm
;
2912 unsigned long address
= pvmw
->address
;
2913 unsigned long mmun_start
= address
& HPAGE_PMD_MASK
;
2917 if (!(pvmw
->pmd
&& !pvmw
->pte
))
2920 entry
= pmd_to_swp_entry(*pvmw
->pmd
);
2922 pmde
= pmd_mkold(mk_huge_pmd(new, vma
->vm_page_prot
));
2923 if (pmd_swp_soft_dirty(*pvmw
->pmd
))
2924 pmde
= pmd_mksoft_dirty(pmde
);
2925 if (is_write_migration_entry(entry
))
2926 pmde
= maybe_pmd_mkwrite(pmde
, vma
);
2928 flush_cache_range(vma
, mmun_start
, mmun_start
+ HPAGE_PMD_SIZE
);
2929 page_add_anon_rmap(new, vma
, mmun_start
, true);
2930 set_pmd_at(mm
, mmun_start
, pvmw
->pmd
, pmde
);
2931 if (vma
->vm_flags
& VM_LOCKED
)
2932 mlock_vma_page(new);
2933 update_mmu_cache_pmd(vma
, address
, pvmw
->pmd
);