drm/ast: Only warn about unsupported TX chips on Gen4 and later
[drm/drm-misc.git] / mm / huge_memory.c
blobee335d96fc3900ba15745a328c8024b586799c85
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2009 Red Hat, Inc.
4 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/numa_balancing.h>
12 #include <linux/highmem.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/rmap.h>
16 #include <linux/swap.h>
17 #include <linux/shrinker.h>
18 #include <linux/mm_inline.h>
19 #include <linux/swapops.h>
20 #include <linux/backing-dev.h>
21 #include <linux/dax.h>
22 #include <linux/mm_types.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/pfn_t.h>
26 #include <linux/mman.h>
27 #include <linux/memremap.h>
28 #include <linux/pagemap.h>
29 #include <linux/debugfs.h>
30 #include <linux/migrate.h>
31 #include <linux/hashtable.h>
32 #include <linux/userfaultfd_k.h>
33 #include <linux/page_idle.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/oom.h>
36 #include <linux/numa.h>
37 #include <linux/page_owner.h>
38 #include <linux/sched/sysctl.h>
39 #include <linux/memory-tiers.h>
40 #include <linux/compat.h>
41 #include <linux/pgalloc_tag.h>
42 #include <linux/pagewalk.h>
44 #include <asm/tlb.h>
45 #include <asm/pgalloc.h>
46 #include "internal.h"
47 #include "swap.h"
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/thp.h>
53 * By default, transparent hugepage support is disabled in order to avoid
54 * risking an increased memory footprint for applications that are not
55 * guaranteed to benefit from it. When transparent hugepage support is
56 * enabled, it is for all mappings, and khugepaged scans all mappings.
57 * Defrag is invoked by khugepaged hugepage allocations and by page faults
58 * for all hugepage allocations.
60 unsigned long transparent_hugepage_flags __read_mostly =
61 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
62 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
63 #endif
64 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
65 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
66 #endif
67 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
68 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
69 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
71 static struct shrinker *deferred_split_shrinker;
72 static unsigned long deferred_split_count(struct shrinker *shrink,
73 struct shrink_control *sc);
74 static unsigned long deferred_split_scan(struct shrinker *shrink,
75 struct shrink_control *sc);
76 static bool split_underused_thp = true;
78 static atomic_t huge_zero_refcount;
79 struct folio *huge_zero_folio __read_mostly;
80 unsigned long huge_zero_pfn __read_mostly = ~0UL;
81 unsigned long huge_anon_orders_always __read_mostly;
82 unsigned long huge_anon_orders_madvise __read_mostly;
83 unsigned long huge_anon_orders_inherit __read_mostly;
84 static bool anon_orders_configured __initdata;
86 static inline bool file_thp_enabled(struct vm_area_struct *vma)
88 struct inode *inode;
90 if (!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS))
91 return false;
93 if (!vma->vm_file)
94 return false;
96 inode = file_inode(vma->vm_file);
98 return !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
101 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
102 unsigned long vm_flags,
103 unsigned long tva_flags,
104 unsigned long orders)
106 bool smaps = tva_flags & TVA_SMAPS;
107 bool in_pf = tva_flags & TVA_IN_PF;
108 bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
109 unsigned long supported_orders;
111 /* Check the intersection of requested and supported orders. */
112 if (vma_is_anonymous(vma))
113 supported_orders = THP_ORDERS_ALL_ANON;
114 else if (vma_is_special_huge(vma))
115 supported_orders = THP_ORDERS_ALL_SPECIAL;
116 else
117 supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
119 orders &= supported_orders;
120 if (!orders)
121 return 0;
123 if (!vma->vm_mm) /* vdso */
124 return 0;
126 if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags))
127 return 0;
129 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
130 if (vma_is_dax(vma))
131 return in_pf ? orders : 0;
134 * khugepaged special VMA and hugetlb VMA.
135 * Must be checked after dax since some dax mappings may have
136 * VM_MIXEDMAP set.
138 if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
139 return 0;
142 * Check alignment for file vma and size for both file and anon vma by
143 * filtering out the unsuitable orders.
145 * Skip the check for page fault. Huge fault does the check in fault
146 * handlers.
148 if (!in_pf) {
149 int order = highest_order(orders);
150 unsigned long addr;
152 while (orders) {
153 addr = vma->vm_end - (PAGE_SIZE << order);
154 if (thp_vma_suitable_order(vma, addr, order))
155 break;
156 order = next_order(&orders, order);
159 if (!orders)
160 return 0;
164 * Enabled via shmem mount options or sysfs settings.
165 * Must be done before hugepage flags check since shmem has its
166 * own flags.
168 if (!in_pf && shmem_file(vma->vm_file))
169 return shmem_allowable_huge_orders(file_inode(vma->vm_file),
170 vma, vma->vm_pgoff, 0,
171 !enforce_sysfs);
173 if (!vma_is_anonymous(vma)) {
175 * Enforce sysfs THP requirements as necessary. Anonymous vmas
176 * were already handled in thp_vma_allowable_orders().
178 if (enforce_sysfs &&
179 (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
180 !hugepage_global_always())))
181 return 0;
184 * Trust that ->huge_fault() handlers know what they are doing
185 * in fault path.
187 if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
188 return orders;
189 /* Only regular file is valid in collapse path */
190 if (((!in_pf || smaps)) && file_thp_enabled(vma))
191 return orders;
192 return 0;
195 if (vma_is_temporary_stack(vma))
196 return 0;
199 * THPeligible bit of smaps should show 1 for proper VMAs even
200 * though anon_vma is not initialized yet.
202 * Allow page fault since anon_vma may be not initialized until
203 * the first page fault.
205 if (!vma->anon_vma)
206 return (smaps || in_pf) ? orders : 0;
208 return orders;
211 static bool get_huge_zero_page(void)
213 struct folio *zero_folio;
214 retry:
215 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
216 return true;
218 zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
219 HPAGE_PMD_ORDER);
220 if (!zero_folio) {
221 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
222 return false;
224 /* Ensure zero folio won't have large_rmappable flag set. */
225 folio_clear_large_rmappable(zero_folio);
226 preempt_disable();
227 if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
228 preempt_enable();
229 folio_put(zero_folio);
230 goto retry;
232 WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio));
234 /* We take additional reference here. It will be put back by shrinker */
235 atomic_set(&huge_zero_refcount, 2);
236 preempt_enable();
237 count_vm_event(THP_ZERO_PAGE_ALLOC);
238 return true;
241 static void put_huge_zero_page(void)
244 * Counter should never go to zero here. Only shrinker can put
245 * last reference.
247 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
250 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
252 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
253 return READ_ONCE(huge_zero_folio);
255 if (!get_huge_zero_page())
256 return NULL;
258 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
259 put_huge_zero_page();
261 return READ_ONCE(huge_zero_folio);
264 void mm_put_huge_zero_folio(struct mm_struct *mm)
266 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
267 put_huge_zero_page();
270 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
271 struct shrink_control *sc)
273 /* we can free zero page only if last reference remains */
274 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
277 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
278 struct shrink_control *sc)
280 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
281 struct folio *zero_folio = xchg(&huge_zero_folio, NULL);
282 BUG_ON(zero_folio == NULL);
283 WRITE_ONCE(huge_zero_pfn, ~0UL);
284 folio_put(zero_folio);
285 return HPAGE_PMD_NR;
288 return 0;
291 static struct shrinker *huge_zero_page_shrinker;
293 #ifdef CONFIG_SYSFS
294 static ssize_t enabled_show(struct kobject *kobj,
295 struct kobj_attribute *attr, char *buf)
297 const char *output;
299 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
300 output = "[always] madvise never";
301 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
302 &transparent_hugepage_flags))
303 output = "always [madvise] never";
304 else
305 output = "always madvise [never]";
307 return sysfs_emit(buf, "%s\n", output);
310 static ssize_t enabled_store(struct kobject *kobj,
311 struct kobj_attribute *attr,
312 const char *buf, size_t count)
314 ssize_t ret = count;
316 if (sysfs_streq(buf, "always")) {
317 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
318 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
319 } else if (sysfs_streq(buf, "madvise")) {
320 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
321 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
322 } else if (sysfs_streq(buf, "never")) {
323 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
324 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
325 } else
326 ret = -EINVAL;
328 if (ret > 0) {
329 int err = start_stop_khugepaged();
330 if (err)
331 ret = err;
333 return ret;
336 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
338 ssize_t single_hugepage_flag_show(struct kobject *kobj,
339 struct kobj_attribute *attr, char *buf,
340 enum transparent_hugepage_flag flag)
342 return sysfs_emit(buf, "%d\n",
343 !!test_bit(flag, &transparent_hugepage_flags));
346 ssize_t single_hugepage_flag_store(struct kobject *kobj,
347 struct kobj_attribute *attr,
348 const char *buf, size_t count,
349 enum transparent_hugepage_flag flag)
351 unsigned long value;
352 int ret;
354 ret = kstrtoul(buf, 10, &value);
355 if (ret < 0)
356 return ret;
357 if (value > 1)
358 return -EINVAL;
360 if (value)
361 set_bit(flag, &transparent_hugepage_flags);
362 else
363 clear_bit(flag, &transparent_hugepage_flags);
365 return count;
368 static ssize_t defrag_show(struct kobject *kobj,
369 struct kobj_attribute *attr, char *buf)
371 const char *output;
373 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
374 &transparent_hugepage_flags))
375 output = "[always] defer defer+madvise madvise never";
376 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
377 &transparent_hugepage_flags))
378 output = "always [defer] defer+madvise madvise never";
379 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
380 &transparent_hugepage_flags))
381 output = "always defer [defer+madvise] madvise never";
382 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
383 &transparent_hugepage_flags))
384 output = "always defer defer+madvise [madvise] never";
385 else
386 output = "always defer defer+madvise madvise [never]";
388 return sysfs_emit(buf, "%s\n", output);
391 static ssize_t defrag_store(struct kobject *kobj,
392 struct kobj_attribute *attr,
393 const char *buf, size_t count)
395 if (sysfs_streq(buf, "always")) {
396 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
397 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
398 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
399 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
400 } else if (sysfs_streq(buf, "defer+madvise")) {
401 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
402 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
403 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
404 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
405 } else if (sysfs_streq(buf, "defer")) {
406 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
407 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
408 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
409 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
410 } else if (sysfs_streq(buf, "madvise")) {
411 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
412 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
413 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
414 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
415 } else if (sysfs_streq(buf, "never")) {
416 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
417 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
418 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
419 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
420 } else
421 return -EINVAL;
423 return count;
425 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
427 static ssize_t use_zero_page_show(struct kobject *kobj,
428 struct kobj_attribute *attr, char *buf)
430 return single_hugepage_flag_show(kobj, attr, buf,
431 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
433 static ssize_t use_zero_page_store(struct kobject *kobj,
434 struct kobj_attribute *attr, const char *buf, size_t count)
436 return single_hugepage_flag_store(kobj, attr, buf, count,
437 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
439 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
441 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
442 struct kobj_attribute *attr, char *buf)
444 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
446 static struct kobj_attribute hpage_pmd_size_attr =
447 __ATTR_RO(hpage_pmd_size);
449 static ssize_t split_underused_thp_show(struct kobject *kobj,
450 struct kobj_attribute *attr, char *buf)
452 return sysfs_emit(buf, "%d\n", split_underused_thp);
455 static ssize_t split_underused_thp_store(struct kobject *kobj,
456 struct kobj_attribute *attr,
457 const char *buf, size_t count)
459 int err = kstrtobool(buf, &split_underused_thp);
461 if (err < 0)
462 return err;
464 return count;
467 static struct kobj_attribute split_underused_thp_attr = __ATTR(
468 shrink_underused, 0644, split_underused_thp_show, split_underused_thp_store);
470 static struct attribute *hugepage_attr[] = {
471 &enabled_attr.attr,
472 &defrag_attr.attr,
473 &use_zero_page_attr.attr,
474 &hpage_pmd_size_attr.attr,
475 #ifdef CONFIG_SHMEM
476 &shmem_enabled_attr.attr,
477 #endif
478 &split_underused_thp_attr.attr,
479 NULL,
482 static const struct attribute_group hugepage_attr_group = {
483 .attrs = hugepage_attr,
486 static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
487 static void thpsize_release(struct kobject *kobj);
488 static DEFINE_SPINLOCK(huge_anon_orders_lock);
489 static LIST_HEAD(thpsize_list);
491 static ssize_t anon_enabled_show(struct kobject *kobj,
492 struct kobj_attribute *attr, char *buf)
494 int order = to_thpsize(kobj)->order;
495 const char *output;
497 if (test_bit(order, &huge_anon_orders_always))
498 output = "[always] inherit madvise never";
499 else if (test_bit(order, &huge_anon_orders_inherit))
500 output = "always [inherit] madvise never";
501 else if (test_bit(order, &huge_anon_orders_madvise))
502 output = "always inherit [madvise] never";
503 else
504 output = "always inherit madvise [never]";
506 return sysfs_emit(buf, "%s\n", output);
509 static ssize_t anon_enabled_store(struct kobject *kobj,
510 struct kobj_attribute *attr,
511 const char *buf, size_t count)
513 int order = to_thpsize(kobj)->order;
514 ssize_t ret = count;
516 if (sysfs_streq(buf, "always")) {
517 spin_lock(&huge_anon_orders_lock);
518 clear_bit(order, &huge_anon_orders_inherit);
519 clear_bit(order, &huge_anon_orders_madvise);
520 set_bit(order, &huge_anon_orders_always);
521 spin_unlock(&huge_anon_orders_lock);
522 } else if (sysfs_streq(buf, "inherit")) {
523 spin_lock(&huge_anon_orders_lock);
524 clear_bit(order, &huge_anon_orders_always);
525 clear_bit(order, &huge_anon_orders_madvise);
526 set_bit(order, &huge_anon_orders_inherit);
527 spin_unlock(&huge_anon_orders_lock);
528 } else if (sysfs_streq(buf, "madvise")) {
529 spin_lock(&huge_anon_orders_lock);
530 clear_bit(order, &huge_anon_orders_always);
531 clear_bit(order, &huge_anon_orders_inherit);
532 set_bit(order, &huge_anon_orders_madvise);
533 spin_unlock(&huge_anon_orders_lock);
534 } else if (sysfs_streq(buf, "never")) {
535 spin_lock(&huge_anon_orders_lock);
536 clear_bit(order, &huge_anon_orders_always);
537 clear_bit(order, &huge_anon_orders_inherit);
538 clear_bit(order, &huge_anon_orders_madvise);
539 spin_unlock(&huge_anon_orders_lock);
540 } else
541 ret = -EINVAL;
543 if (ret > 0) {
544 int err;
546 err = start_stop_khugepaged();
547 if (err)
548 ret = err;
550 return ret;
553 static struct kobj_attribute anon_enabled_attr =
554 __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store);
556 static struct attribute *anon_ctrl_attrs[] = {
557 &anon_enabled_attr.attr,
558 NULL,
561 static const struct attribute_group anon_ctrl_attr_grp = {
562 .attrs = anon_ctrl_attrs,
565 static struct attribute *file_ctrl_attrs[] = {
566 #ifdef CONFIG_SHMEM
567 &thpsize_shmem_enabled_attr.attr,
568 #endif
569 NULL,
572 static const struct attribute_group file_ctrl_attr_grp = {
573 .attrs = file_ctrl_attrs,
576 static struct attribute *any_ctrl_attrs[] = {
577 NULL,
580 static const struct attribute_group any_ctrl_attr_grp = {
581 .attrs = any_ctrl_attrs,
584 static const struct kobj_type thpsize_ktype = {
585 .release = &thpsize_release,
586 .sysfs_ops = &kobj_sysfs_ops,
589 DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}};
591 static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
593 unsigned long sum = 0;
594 int cpu;
596 for_each_possible_cpu(cpu) {
597 struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
599 sum += this->stats[order][item];
602 return sum;
605 #define DEFINE_MTHP_STAT_ATTR(_name, _index) \
606 static ssize_t _name##_show(struct kobject *kobj, \
607 struct kobj_attribute *attr, char *buf) \
609 int order = to_thpsize(kobj)->order; \
611 return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index)); \
613 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
615 DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
616 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
617 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
618 DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
619 DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN);
620 DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
621 DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
622 #ifdef CONFIG_SHMEM
623 DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC);
624 DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK);
625 DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE);
626 #endif
627 DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
628 DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
629 DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
630 DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
631 DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED);
633 static struct attribute *anon_stats_attrs[] = {
634 &anon_fault_alloc_attr.attr,
635 &anon_fault_fallback_attr.attr,
636 &anon_fault_fallback_charge_attr.attr,
637 #ifndef CONFIG_SHMEM
638 &zswpout_attr.attr,
639 &swpin_attr.attr,
640 &swpout_attr.attr,
641 &swpout_fallback_attr.attr,
642 #endif
643 &split_deferred_attr.attr,
644 &nr_anon_attr.attr,
645 &nr_anon_partially_mapped_attr.attr,
646 NULL,
649 static struct attribute_group anon_stats_attr_grp = {
650 .name = "stats",
651 .attrs = anon_stats_attrs,
654 static struct attribute *file_stats_attrs[] = {
655 #ifdef CONFIG_SHMEM
656 &shmem_alloc_attr.attr,
657 &shmem_fallback_attr.attr,
658 &shmem_fallback_charge_attr.attr,
659 #endif
660 NULL,
663 static struct attribute_group file_stats_attr_grp = {
664 .name = "stats",
665 .attrs = file_stats_attrs,
668 static struct attribute *any_stats_attrs[] = {
669 #ifdef CONFIG_SHMEM
670 &zswpout_attr.attr,
671 &swpin_attr.attr,
672 &swpout_attr.attr,
673 &swpout_fallback_attr.attr,
674 #endif
675 &split_attr.attr,
676 &split_failed_attr.attr,
677 NULL,
680 static struct attribute_group any_stats_attr_grp = {
681 .name = "stats",
682 .attrs = any_stats_attrs,
685 static int sysfs_add_group(struct kobject *kobj,
686 const struct attribute_group *grp)
688 int ret = -ENOENT;
691 * If the group is named, try to merge first, assuming the subdirectory
692 * was already created. This avoids the warning emitted by
693 * sysfs_create_group() if the directory already exists.
695 if (grp->name)
696 ret = sysfs_merge_group(kobj, grp);
697 if (ret)
698 ret = sysfs_create_group(kobj, grp);
700 return ret;
703 static struct thpsize *thpsize_create(int order, struct kobject *parent)
705 unsigned long size = (PAGE_SIZE << order) / SZ_1K;
706 struct thpsize *thpsize;
707 int ret = -ENOMEM;
709 thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
710 if (!thpsize)
711 goto err;
713 thpsize->order = order;
715 ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
716 "hugepages-%lukB", size);
717 if (ret) {
718 kfree(thpsize);
719 goto err;
723 ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp);
724 if (ret)
725 goto err_put;
727 ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp);
728 if (ret)
729 goto err_put;
731 if (BIT(order) & THP_ORDERS_ALL_ANON) {
732 ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp);
733 if (ret)
734 goto err_put;
736 ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp);
737 if (ret)
738 goto err_put;
741 if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) {
742 ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp);
743 if (ret)
744 goto err_put;
746 ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp);
747 if (ret)
748 goto err_put;
751 return thpsize;
752 err_put:
753 kobject_put(&thpsize->kobj);
754 err:
755 return ERR_PTR(ret);
758 static void thpsize_release(struct kobject *kobj)
760 kfree(to_thpsize(kobj));
763 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
765 int err;
766 struct thpsize *thpsize;
767 unsigned long orders;
768 int order;
771 * Default to setting PMD-sized THP to inherit the global setting and
772 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
773 * constant so we have to do this here.
775 if (!anon_orders_configured)
776 huge_anon_orders_inherit = BIT(PMD_ORDER);
778 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
779 if (unlikely(!*hugepage_kobj)) {
780 pr_err("failed to create transparent hugepage kobject\n");
781 return -ENOMEM;
784 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
785 if (err) {
786 pr_err("failed to register transparent hugepage group\n");
787 goto delete_obj;
790 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
791 if (err) {
792 pr_err("failed to register transparent hugepage group\n");
793 goto remove_hp_group;
796 orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT;
797 order = highest_order(orders);
798 while (orders) {
799 thpsize = thpsize_create(order, *hugepage_kobj);
800 if (IS_ERR(thpsize)) {
801 pr_err("failed to create thpsize for order %d\n", order);
802 err = PTR_ERR(thpsize);
803 goto remove_all;
805 list_add(&thpsize->node, &thpsize_list);
806 order = next_order(&orders, order);
809 return 0;
811 remove_all:
812 hugepage_exit_sysfs(*hugepage_kobj);
813 return err;
814 remove_hp_group:
815 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
816 delete_obj:
817 kobject_put(*hugepage_kobj);
818 return err;
821 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
823 struct thpsize *thpsize, *tmp;
825 list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
826 list_del(&thpsize->node);
827 kobject_put(&thpsize->kobj);
830 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
831 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
832 kobject_put(hugepage_kobj);
834 #else
835 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
837 return 0;
840 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
843 #endif /* CONFIG_SYSFS */
845 static int __init thp_shrinker_init(void)
847 huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
848 if (!huge_zero_page_shrinker)
849 return -ENOMEM;
851 deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
852 SHRINKER_MEMCG_AWARE |
853 SHRINKER_NONSLAB,
854 "thp-deferred_split");
855 if (!deferred_split_shrinker) {
856 shrinker_free(huge_zero_page_shrinker);
857 return -ENOMEM;
860 huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
861 huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
862 shrinker_register(huge_zero_page_shrinker);
864 deferred_split_shrinker->count_objects = deferred_split_count;
865 deferred_split_shrinker->scan_objects = deferred_split_scan;
866 shrinker_register(deferred_split_shrinker);
868 return 0;
871 static void __init thp_shrinker_exit(void)
873 shrinker_free(huge_zero_page_shrinker);
874 shrinker_free(deferred_split_shrinker);
877 static int __init hugepage_init(void)
879 int err;
880 struct kobject *hugepage_kobj;
882 if (!has_transparent_hugepage()) {
883 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
884 return -EINVAL;
888 * hugepages can't be allocated by the buddy allocator
890 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
892 err = hugepage_init_sysfs(&hugepage_kobj);
893 if (err)
894 goto err_sysfs;
896 err = khugepaged_init();
897 if (err)
898 goto err_slab;
900 err = thp_shrinker_init();
901 if (err)
902 goto err_shrinker;
905 * By default disable transparent hugepages on smaller systems,
906 * where the extra memory used could hurt more than TLB overhead
907 * is likely to save. The admin can still enable it through /sys.
909 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
910 transparent_hugepage_flags = 0;
911 return 0;
914 err = start_stop_khugepaged();
915 if (err)
916 goto err_khugepaged;
918 return 0;
919 err_khugepaged:
920 thp_shrinker_exit();
921 err_shrinker:
922 khugepaged_destroy();
923 err_slab:
924 hugepage_exit_sysfs(hugepage_kobj);
925 err_sysfs:
926 return err;
928 subsys_initcall(hugepage_init);
930 static int __init setup_transparent_hugepage(char *str)
932 int ret = 0;
933 if (!str)
934 goto out;
935 if (!strcmp(str, "always")) {
936 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
937 &transparent_hugepage_flags);
938 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
939 &transparent_hugepage_flags);
940 ret = 1;
941 } else if (!strcmp(str, "madvise")) {
942 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
943 &transparent_hugepage_flags);
944 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
945 &transparent_hugepage_flags);
946 ret = 1;
947 } else if (!strcmp(str, "never")) {
948 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
949 &transparent_hugepage_flags);
950 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
951 &transparent_hugepage_flags);
952 ret = 1;
954 out:
955 if (!ret)
956 pr_warn("transparent_hugepage= cannot parse, ignored\n");
957 return ret;
959 __setup("transparent_hugepage=", setup_transparent_hugepage);
961 static char str_dup[PAGE_SIZE] __initdata;
962 static int __init setup_thp_anon(char *str)
964 char *token, *range, *policy, *subtoken;
965 unsigned long always, inherit, madvise;
966 char *start_size, *end_size;
967 int start, end, nr;
968 char *p;
970 if (!str || strlen(str) + 1 > PAGE_SIZE)
971 goto err;
972 strscpy(str_dup, str);
974 always = huge_anon_orders_always;
975 madvise = huge_anon_orders_madvise;
976 inherit = huge_anon_orders_inherit;
977 p = str_dup;
978 while ((token = strsep(&p, ";")) != NULL) {
979 range = strsep(&token, ":");
980 policy = token;
982 if (!policy)
983 goto err;
985 while ((subtoken = strsep(&range, ",")) != NULL) {
986 if (strchr(subtoken, '-')) {
987 start_size = strsep(&subtoken, "-");
988 end_size = subtoken;
990 start = get_order_from_str(start_size, THP_ORDERS_ALL_ANON);
991 end = get_order_from_str(end_size, THP_ORDERS_ALL_ANON);
992 } else {
993 start_size = end_size = subtoken;
994 start = end = get_order_from_str(subtoken,
995 THP_ORDERS_ALL_ANON);
998 if (start == -EINVAL) {
999 pr_err("invalid size %s in thp_anon boot parameter\n", start_size);
1000 goto err;
1003 if (end == -EINVAL) {
1004 pr_err("invalid size %s in thp_anon boot parameter\n", end_size);
1005 goto err;
1008 if (start < 0 || end < 0 || start > end)
1009 goto err;
1011 nr = end - start + 1;
1012 if (!strcmp(policy, "always")) {
1013 bitmap_set(&always, start, nr);
1014 bitmap_clear(&inherit, start, nr);
1015 bitmap_clear(&madvise, start, nr);
1016 } else if (!strcmp(policy, "madvise")) {
1017 bitmap_set(&madvise, start, nr);
1018 bitmap_clear(&inherit, start, nr);
1019 bitmap_clear(&always, start, nr);
1020 } else if (!strcmp(policy, "inherit")) {
1021 bitmap_set(&inherit, start, nr);
1022 bitmap_clear(&madvise, start, nr);
1023 bitmap_clear(&always, start, nr);
1024 } else if (!strcmp(policy, "never")) {
1025 bitmap_clear(&inherit, start, nr);
1026 bitmap_clear(&madvise, start, nr);
1027 bitmap_clear(&always, start, nr);
1028 } else {
1029 pr_err("invalid policy %s in thp_anon boot parameter\n", policy);
1030 goto err;
1035 huge_anon_orders_always = always;
1036 huge_anon_orders_madvise = madvise;
1037 huge_anon_orders_inherit = inherit;
1038 anon_orders_configured = true;
1039 return 1;
1041 err:
1042 pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str);
1043 return 0;
1045 __setup("thp_anon=", setup_thp_anon);
1047 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
1049 if (likely(vma->vm_flags & VM_WRITE))
1050 pmd = pmd_mkwrite(pmd, vma);
1051 return pmd;
1054 #ifdef CONFIG_MEMCG
1055 static inline
1056 struct deferred_split *get_deferred_split_queue(struct folio *folio)
1058 struct mem_cgroup *memcg = folio_memcg(folio);
1059 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
1061 if (memcg)
1062 return &memcg->deferred_split_queue;
1063 else
1064 return &pgdat->deferred_split_queue;
1066 #else
1067 static inline
1068 struct deferred_split *get_deferred_split_queue(struct folio *folio)
1070 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
1072 return &pgdat->deferred_split_queue;
1074 #endif
1076 static inline bool is_transparent_hugepage(const struct folio *folio)
1078 if (!folio_test_large(folio))
1079 return false;
1081 return is_huge_zero_folio(folio) ||
1082 folio_test_large_rmappable(folio);
1085 static unsigned long __thp_get_unmapped_area(struct file *filp,
1086 unsigned long addr, unsigned long len,
1087 loff_t off, unsigned long flags, unsigned long size,
1088 vm_flags_t vm_flags)
1090 loff_t off_end = off + len;
1091 loff_t off_align = round_up(off, size);
1092 unsigned long len_pad, ret, off_sub;
1094 if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
1095 return 0;
1097 if (off_end <= off_align || (off_end - off_align) < size)
1098 return 0;
1100 len_pad = len + size;
1101 if (len_pad < len || (off + len_pad) < off)
1102 return 0;
1104 ret = mm_get_unmapped_area_vmflags(current->mm, filp, addr, len_pad,
1105 off >> PAGE_SHIFT, flags, vm_flags);
1108 * The failure might be due to length padding. The caller will retry
1109 * without the padding.
1111 if (IS_ERR_VALUE(ret))
1112 return 0;
1115 * Do not try to align to THP boundary if allocation at the address
1116 * hint succeeds.
1118 if (ret == addr)
1119 return addr;
1121 off_sub = (off - ret) & (size - 1);
1123 if (test_bit(MMF_TOPDOWN, &current->mm->flags) && !off_sub)
1124 return ret + size;
1126 ret += off_sub;
1127 return ret;
1130 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
1131 unsigned long len, unsigned long pgoff, unsigned long flags,
1132 vm_flags_t vm_flags)
1134 unsigned long ret;
1135 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
1137 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags);
1138 if (ret)
1139 return ret;
1141 return mm_get_unmapped_area_vmflags(current->mm, filp, addr, len, pgoff, flags,
1142 vm_flags);
1145 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
1146 unsigned long len, unsigned long pgoff, unsigned long flags)
1148 return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
1150 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
1152 static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
1153 unsigned long addr)
1155 gfp_t gfp = vma_thp_gfp_mask(vma);
1156 const int order = HPAGE_PMD_ORDER;
1157 struct folio *folio;
1159 folio = vma_alloc_folio(gfp, order, vma, addr & HPAGE_PMD_MASK);
1161 if (unlikely(!folio)) {
1162 count_vm_event(THP_FAULT_FALLBACK);
1163 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
1164 return NULL;
1167 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1168 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
1169 folio_put(folio);
1170 count_vm_event(THP_FAULT_FALLBACK);
1171 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
1172 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
1173 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
1174 return NULL;
1176 folio_throttle_swaprate(folio, gfp);
1179 * When a folio is not zeroed during allocation (__GFP_ZERO not used),
1180 * folio_zero_user() is used to make sure that the page corresponding
1181 * to the faulting address will be hot in the cache after zeroing.
1183 if (!alloc_zeroed())
1184 folio_zero_user(folio, addr);
1186 * The memory barrier inside __folio_mark_uptodate makes sure that
1187 * folio_zero_user writes become visible before the set_pmd_at()
1188 * write.
1190 __folio_mark_uptodate(folio);
1191 return folio;
1194 static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd,
1195 struct vm_area_struct *vma, unsigned long haddr)
1197 pmd_t entry;
1199 entry = mk_huge_pmd(&folio->page, vma->vm_page_prot);
1200 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1201 folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
1202 folio_add_lru_vma(folio, vma);
1203 set_pmd_at(vma->vm_mm, haddr, pmd, entry);
1204 update_mmu_cache_pmd(vma, haddr, pmd);
1205 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1206 count_vm_event(THP_FAULT_ALLOC);
1207 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
1208 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
1211 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1213 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1214 struct vm_area_struct *vma = vmf->vma;
1215 struct folio *folio;
1216 pgtable_t pgtable;
1217 vm_fault_t ret = 0;
1219 folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
1220 if (unlikely(!folio))
1221 return VM_FAULT_FALLBACK;
1223 pgtable = pte_alloc_one(vma->vm_mm);
1224 if (unlikely(!pgtable)) {
1225 ret = VM_FAULT_OOM;
1226 goto release;
1229 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1230 if (unlikely(!pmd_none(*vmf->pmd))) {
1231 goto unlock_release;
1232 } else {
1233 ret = check_stable_address_space(vma->vm_mm);
1234 if (ret)
1235 goto unlock_release;
1237 /* Deliver the page fault to userland */
1238 if (userfaultfd_missing(vma)) {
1239 spin_unlock(vmf->ptl);
1240 folio_put(folio);
1241 pte_free(vma->vm_mm, pgtable);
1242 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1243 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1244 return ret;
1246 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1247 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr);
1248 mm_inc_nr_ptes(vma->vm_mm);
1249 deferred_split_folio(folio, false);
1250 spin_unlock(vmf->ptl);
1253 return 0;
1254 unlock_release:
1255 spin_unlock(vmf->ptl);
1256 release:
1257 if (pgtable)
1258 pte_free(vma->vm_mm, pgtable);
1259 folio_put(folio);
1260 return ret;
1265 * always: directly stall for all thp allocations
1266 * defer: wake kswapd and fail if not immediately available
1267 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
1268 * fail if not immediately available
1269 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
1270 * available
1271 * never: never stall for any thp allocation
1273 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
1275 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
1277 /* Always do synchronous compaction */
1278 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
1279 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
1281 /* Kick kcompactd and fail quickly */
1282 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
1283 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
1285 /* Synchronous compaction if madvised, otherwise kick kcompactd */
1286 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
1287 return GFP_TRANSHUGE_LIGHT |
1288 (vma_madvised ? __GFP_DIRECT_RECLAIM :
1289 __GFP_KSWAPD_RECLAIM);
1291 /* Only do synchronous compaction if madvised */
1292 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
1293 return GFP_TRANSHUGE_LIGHT |
1294 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
1296 return GFP_TRANSHUGE_LIGHT;
1299 /* Caller must hold page table lock. */
1300 static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
1301 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
1302 struct folio *zero_folio)
1304 pmd_t entry;
1305 if (!pmd_none(*pmd))
1306 return;
1307 entry = mk_pmd(&zero_folio->page, vma->vm_page_prot);
1308 entry = pmd_mkhuge(entry);
1309 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1310 set_pmd_at(mm, haddr, pmd, entry);
1311 mm_inc_nr_ptes(mm);
1314 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1316 struct vm_area_struct *vma = vmf->vma;
1317 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1318 vm_fault_t ret;
1320 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
1321 return VM_FAULT_FALLBACK;
1322 ret = vmf_anon_prepare(vmf);
1323 if (ret)
1324 return ret;
1325 khugepaged_enter_vma(vma, vma->vm_flags);
1327 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
1328 !mm_forbids_zeropage(vma->vm_mm) &&
1329 transparent_hugepage_use_zero_page()) {
1330 pgtable_t pgtable;
1331 struct folio *zero_folio;
1332 vm_fault_t ret;
1334 pgtable = pte_alloc_one(vma->vm_mm);
1335 if (unlikely(!pgtable))
1336 return VM_FAULT_OOM;
1337 zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
1338 if (unlikely(!zero_folio)) {
1339 pte_free(vma->vm_mm, pgtable);
1340 count_vm_event(THP_FAULT_FALLBACK);
1341 return VM_FAULT_FALLBACK;
1343 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1344 ret = 0;
1345 if (pmd_none(*vmf->pmd)) {
1346 ret = check_stable_address_space(vma->vm_mm);
1347 if (ret) {
1348 spin_unlock(vmf->ptl);
1349 pte_free(vma->vm_mm, pgtable);
1350 } else if (userfaultfd_missing(vma)) {
1351 spin_unlock(vmf->ptl);
1352 pte_free(vma->vm_mm, pgtable);
1353 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1354 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1355 } else {
1356 set_huge_zero_folio(pgtable, vma->vm_mm, vma,
1357 haddr, vmf->pmd, zero_folio);
1358 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1359 spin_unlock(vmf->ptl);
1361 } else {
1362 spin_unlock(vmf->ptl);
1363 pte_free(vma->vm_mm, pgtable);
1365 return ret;
1368 return __do_huge_pmd_anonymous_page(vmf);
1371 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
1372 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
1373 pgtable_t pgtable)
1375 struct mm_struct *mm = vma->vm_mm;
1376 pmd_t entry;
1377 spinlock_t *ptl;
1379 ptl = pmd_lock(mm, pmd);
1380 if (!pmd_none(*pmd)) {
1381 if (write) {
1382 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
1383 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
1384 goto out_unlock;
1386 entry = pmd_mkyoung(*pmd);
1387 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1388 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1389 update_mmu_cache_pmd(vma, addr, pmd);
1392 goto out_unlock;
1395 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
1396 if (pfn_t_devmap(pfn))
1397 entry = pmd_mkdevmap(entry);
1398 else
1399 entry = pmd_mkspecial(entry);
1400 if (write) {
1401 entry = pmd_mkyoung(pmd_mkdirty(entry));
1402 entry = maybe_pmd_mkwrite(entry, vma);
1405 if (pgtable) {
1406 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1407 mm_inc_nr_ptes(mm);
1408 pgtable = NULL;
1411 set_pmd_at(mm, addr, pmd, entry);
1412 update_mmu_cache_pmd(vma, addr, pmd);
1414 out_unlock:
1415 spin_unlock(ptl);
1416 if (pgtable)
1417 pte_free(mm, pgtable);
1421 * vmf_insert_pfn_pmd - insert a pmd size pfn
1422 * @vmf: Structure describing the fault
1423 * @pfn: pfn to insert
1424 * @write: whether it's a write fault
1426 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
1428 * Return: vm_fault_t value.
1430 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
1432 unsigned long addr = vmf->address & PMD_MASK;
1433 struct vm_area_struct *vma = vmf->vma;
1434 pgprot_t pgprot = vma->vm_page_prot;
1435 pgtable_t pgtable = NULL;
1438 * If we had pmd_special, we could avoid all these restrictions,
1439 * but we need to be consistent with PTEs and architectures that
1440 * can't support a 'special' bit.
1442 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1443 !pfn_t_devmap(pfn));
1444 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1445 (VM_PFNMAP|VM_MIXEDMAP));
1446 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1448 if (addr < vma->vm_start || addr >= vma->vm_end)
1449 return VM_FAULT_SIGBUS;
1451 if (arch_needs_pgtable_deposit()) {
1452 pgtable = pte_alloc_one(vma->vm_mm);
1453 if (!pgtable)
1454 return VM_FAULT_OOM;
1457 track_pfn_insert(vma, &pgprot, pfn);
1459 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
1460 return VM_FAULT_NOPAGE;
1462 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
1464 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1465 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
1467 if (likely(vma->vm_flags & VM_WRITE))
1468 pud = pud_mkwrite(pud);
1469 return pud;
1472 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
1473 pud_t *pud, pfn_t pfn, bool write)
1475 struct mm_struct *mm = vma->vm_mm;
1476 pgprot_t prot = vma->vm_page_prot;
1477 pud_t entry;
1478 spinlock_t *ptl;
1480 ptl = pud_lock(mm, pud);
1481 if (!pud_none(*pud)) {
1482 if (write) {
1483 if (WARN_ON_ONCE(pud_pfn(*pud) != pfn_t_to_pfn(pfn)))
1484 goto out_unlock;
1485 entry = pud_mkyoung(*pud);
1486 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1487 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1488 update_mmu_cache_pud(vma, addr, pud);
1490 goto out_unlock;
1493 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
1494 if (pfn_t_devmap(pfn))
1495 entry = pud_mkdevmap(entry);
1496 else
1497 entry = pud_mkspecial(entry);
1498 if (write) {
1499 entry = pud_mkyoung(pud_mkdirty(entry));
1500 entry = maybe_pud_mkwrite(entry, vma);
1502 set_pud_at(mm, addr, pud, entry);
1503 update_mmu_cache_pud(vma, addr, pud);
1505 out_unlock:
1506 spin_unlock(ptl);
1510 * vmf_insert_pfn_pud - insert a pud size pfn
1511 * @vmf: Structure describing the fault
1512 * @pfn: pfn to insert
1513 * @write: whether it's a write fault
1515 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
1517 * Return: vm_fault_t value.
1519 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
1521 unsigned long addr = vmf->address & PUD_MASK;
1522 struct vm_area_struct *vma = vmf->vma;
1523 pgprot_t pgprot = vma->vm_page_prot;
1526 * If we had pud_special, we could avoid all these restrictions,
1527 * but we need to be consistent with PTEs and architectures that
1528 * can't support a 'special' bit.
1530 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1531 !pfn_t_devmap(pfn));
1532 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1533 (VM_PFNMAP|VM_MIXEDMAP));
1534 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1536 if (addr < vma->vm_start || addr >= vma->vm_end)
1537 return VM_FAULT_SIGBUS;
1539 track_pfn_insert(vma, &pgprot, pfn);
1541 insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
1542 return VM_FAULT_NOPAGE;
1544 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
1545 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1547 void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1548 pmd_t *pmd, bool write)
1550 pmd_t _pmd;
1552 _pmd = pmd_mkyoung(*pmd);
1553 if (write)
1554 _pmd = pmd_mkdirty(_pmd);
1555 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1556 pmd, _pmd, write))
1557 update_mmu_cache_pmd(vma, addr, pmd);
1560 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1561 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1563 unsigned long pfn = pmd_pfn(*pmd);
1564 struct mm_struct *mm = vma->vm_mm;
1565 struct page *page;
1566 int ret;
1568 assert_spin_locked(pmd_lockptr(mm, pmd));
1570 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1571 return NULL;
1573 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1574 /* pass */;
1575 else
1576 return NULL;
1578 if (flags & FOLL_TOUCH)
1579 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1582 * device mapped pages can only be returned if the
1583 * caller will manage the page reference count.
1585 if (!(flags & (FOLL_GET | FOLL_PIN)))
1586 return ERR_PTR(-EEXIST);
1588 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1589 *pgmap = get_dev_pagemap(pfn, *pgmap);
1590 if (!*pgmap)
1591 return ERR_PTR(-EFAULT);
1592 page = pfn_to_page(pfn);
1593 ret = try_grab_folio(page_folio(page), 1, flags);
1594 if (ret)
1595 page = ERR_PTR(ret);
1597 return page;
1600 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1601 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1602 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1604 spinlock_t *dst_ptl, *src_ptl;
1605 struct page *src_page;
1606 struct folio *src_folio;
1607 pmd_t pmd;
1608 pgtable_t pgtable = NULL;
1609 int ret = -ENOMEM;
1611 pmd = pmdp_get_lockless(src_pmd);
1612 if (unlikely(pmd_present(pmd) && pmd_special(pmd))) {
1613 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1614 src_ptl = pmd_lockptr(src_mm, src_pmd);
1615 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1617 * No need to recheck the pmd, it can't change with write
1618 * mmap lock held here.
1620 * Meanwhile, making sure it's not a CoW VMA with writable
1621 * mapping, otherwise it means either the anon page wrongly
1622 * applied special bit, or we made the PRIVATE mapping be
1623 * able to wrongly write to the backend MMIO.
1625 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd));
1626 goto set_pmd;
1629 /* Skip if can be re-fill on fault */
1630 if (!vma_is_anonymous(dst_vma))
1631 return 0;
1633 pgtable = pte_alloc_one(dst_mm);
1634 if (unlikely(!pgtable))
1635 goto out;
1637 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1638 src_ptl = pmd_lockptr(src_mm, src_pmd);
1639 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1641 ret = -EAGAIN;
1642 pmd = *src_pmd;
1644 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1645 if (unlikely(is_swap_pmd(pmd))) {
1646 swp_entry_t entry = pmd_to_swp_entry(pmd);
1648 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1649 if (!is_readable_migration_entry(entry)) {
1650 entry = make_readable_migration_entry(
1651 swp_offset(entry));
1652 pmd = swp_entry_to_pmd(entry);
1653 if (pmd_swp_soft_dirty(*src_pmd))
1654 pmd = pmd_swp_mksoft_dirty(pmd);
1655 if (pmd_swp_uffd_wp(*src_pmd))
1656 pmd = pmd_swp_mkuffd_wp(pmd);
1657 set_pmd_at(src_mm, addr, src_pmd, pmd);
1659 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1660 mm_inc_nr_ptes(dst_mm);
1661 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1662 if (!userfaultfd_wp(dst_vma))
1663 pmd = pmd_swp_clear_uffd_wp(pmd);
1664 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1665 ret = 0;
1666 goto out_unlock;
1668 #endif
1670 if (unlikely(!pmd_trans_huge(pmd))) {
1671 pte_free(dst_mm, pgtable);
1672 goto out_unlock;
1675 * When page table lock is held, the huge zero pmd should not be
1676 * under splitting since we don't split the page itself, only pmd to
1677 * a page table.
1679 if (is_huge_zero_pmd(pmd)) {
1681 * mm_get_huge_zero_folio() will never allocate a new
1682 * folio here, since we already have a zero page to
1683 * copy. It just takes a reference.
1685 mm_get_huge_zero_folio(dst_mm);
1686 goto out_zero_page;
1689 src_page = pmd_page(pmd);
1690 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1691 src_folio = page_folio(src_page);
1693 folio_get(src_folio);
1694 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
1695 /* Page maybe pinned: split and retry the fault on PTEs. */
1696 folio_put(src_folio);
1697 pte_free(dst_mm, pgtable);
1698 spin_unlock(src_ptl);
1699 spin_unlock(dst_ptl);
1700 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
1701 return -EAGAIN;
1703 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1704 out_zero_page:
1705 mm_inc_nr_ptes(dst_mm);
1706 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1707 pmdp_set_wrprotect(src_mm, addr, src_pmd);
1708 if (!userfaultfd_wp(dst_vma))
1709 pmd = pmd_clear_uffd_wp(pmd);
1710 pmd = pmd_wrprotect(pmd);
1711 set_pmd:
1712 pmd = pmd_mkold(pmd);
1713 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1715 ret = 0;
1716 out_unlock:
1717 spin_unlock(src_ptl);
1718 spin_unlock(dst_ptl);
1719 out:
1720 return ret;
1723 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1724 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1725 pud_t *pud, bool write)
1727 pud_t _pud;
1729 _pud = pud_mkyoung(*pud);
1730 if (write)
1731 _pud = pud_mkdirty(_pud);
1732 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1733 pud, _pud, write))
1734 update_mmu_cache_pud(vma, addr, pud);
1737 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1738 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1739 struct vm_area_struct *vma)
1741 spinlock_t *dst_ptl, *src_ptl;
1742 pud_t pud;
1743 int ret;
1745 dst_ptl = pud_lock(dst_mm, dst_pud);
1746 src_ptl = pud_lockptr(src_mm, src_pud);
1747 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1749 ret = -EAGAIN;
1750 pud = *src_pud;
1751 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1752 goto out_unlock;
1755 * TODO: once we support anonymous pages, use
1756 * folio_try_dup_anon_rmap_*() and split if duplicating fails.
1758 if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) {
1759 pudp_set_wrprotect(src_mm, addr, src_pud);
1760 pud = pud_wrprotect(pud);
1762 pud = pud_mkold(pud);
1763 set_pud_at(dst_mm, addr, dst_pud, pud);
1765 ret = 0;
1766 out_unlock:
1767 spin_unlock(src_ptl);
1768 spin_unlock(dst_ptl);
1769 return ret;
1772 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1774 bool write = vmf->flags & FAULT_FLAG_WRITE;
1776 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1777 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1778 goto unlock;
1780 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1781 unlock:
1782 spin_unlock(vmf->ptl);
1784 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1786 void huge_pmd_set_accessed(struct vm_fault *vmf)
1788 bool write = vmf->flags & FAULT_FLAG_WRITE;
1790 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1791 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1792 goto unlock;
1794 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1796 unlock:
1797 spin_unlock(vmf->ptl);
1800 static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf)
1802 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1803 struct vm_area_struct *vma = vmf->vma;
1804 struct mmu_notifier_range range;
1805 struct folio *folio;
1806 vm_fault_t ret = 0;
1808 folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
1809 if (unlikely(!folio))
1810 return VM_FAULT_FALLBACK;
1812 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, haddr,
1813 haddr + HPAGE_PMD_SIZE);
1814 mmu_notifier_invalidate_range_start(&range);
1815 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1816 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd)))
1817 goto release;
1818 ret = check_stable_address_space(vma->vm_mm);
1819 if (ret)
1820 goto release;
1821 (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
1822 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr);
1823 goto unlock;
1824 release:
1825 folio_put(folio);
1826 unlock:
1827 spin_unlock(vmf->ptl);
1828 mmu_notifier_invalidate_range_end(&range);
1829 return ret;
1832 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1834 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1835 struct vm_area_struct *vma = vmf->vma;
1836 struct folio *folio;
1837 struct page *page;
1838 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1839 pmd_t orig_pmd = vmf->orig_pmd;
1841 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1842 VM_BUG_ON_VMA(!vma->anon_vma, vma);
1844 if (is_huge_zero_pmd(orig_pmd)) {
1845 vm_fault_t ret = do_huge_zero_wp_pmd(vmf);
1847 if (!(ret & VM_FAULT_FALLBACK))
1848 return ret;
1850 /* Fallback to splitting PMD if THP cannot be allocated */
1851 goto fallback;
1854 spin_lock(vmf->ptl);
1856 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1857 spin_unlock(vmf->ptl);
1858 return 0;
1861 page = pmd_page(orig_pmd);
1862 folio = page_folio(page);
1863 VM_BUG_ON_PAGE(!PageHead(page), page);
1865 /* Early check when only holding the PT lock. */
1866 if (PageAnonExclusive(page))
1867 goto reuse;
1869 if (!folio_trylock(folio)) {
1870 folio_get(folio);
1871 spin_unlock(vmf->ptl);
1872 folio_lock(folio);
1873 spin_lock(vmf->ptl);
1874 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1875 spin_unlock(vmf->ptl);
1876 folio_unlock(folio);
1877 folio_put(folio);
1878 return 0;
1880 folio_put(folio);
1883 /* Recheck after temporarily dropping the PT lock. */
1884 if (PageAnonExclusive(page)) {
1885 folio_unlock(folio);
1886 goto reuse;
1890 * See do_wp_page(): we can only reuse the folio exclusively if
1891 * there are no additional references. Note that we always drain
1892 * the LRU cache immediately after adding a THP.
1894 if (folio_ref_count(folio) >
1895 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
1896 goto unlock_fallback;
1897 if (folio_test_swapcache(folio))
1898 folio_free_swap(folio);
1899 if (folio_ref_count(folio) == 1) {
1900 pmd_t entry;
1902 folio_move_anon_rmap(folio, vma);
1903 SetPageAnonExclusive(page);
1904 folio_unlock(folio);
1905 reuse:
1906 if (unlikely(unshare)) {
1907 spin_unlock(vmf->ptl);
1908 return 0;
1910 entry = pmd_mkyoung(orig_pmd);
1911 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1912 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1913 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1914 spin_unlock(vmf->ptl);
1915 return 0;
1918 unlock_fallback:
1919 folio_unlock(folio);
1920 spin_unlock(vmf->ptl);
1921 fallback:
1922 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1923 return VM_FAULT_FALLBACK;
1926 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1927 unsigned long addr, pmd_t pmd)
1929 struct page *page;
1931 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1932 return false;
1934 /* Don't touch entries that are not even readable (NUMA hinting). */
1935 if (pmd_protnone(pmd))
1936 return false;
1938 /* Do we need write faults for softdirty tracking? */
1939 if (pmd_needs_soft_dirty_wp(vma, pmd))
1940 return false;
1942 /* Do we need write faults for uffd-wp tracking? */
1943 if (userfaultfd_huge_pmd_wp(vma, pmd))
1944 return false;
1946 if (!(vma->vm_flags & VM_SHARED)) {
1947 /* See can_change_pte_writable(). */
1948 page = vm_normal_page_pmd(vma, addr, pmd);
1949 return page && PageAnon(page) && PageAnonExclusive(page);
1952 /* See can_change_pte_writable(). */
1953 return pmd_dirty(pmd);
1956 /* NUMA hinting page fault entry point for trans huge pmds */
1957 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1959 struct vm_area_struct *vma = vmf->vma;
1960 struct folio *folio;
1961 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1962 int nid = NUMA_NO_NODE;
1963 int target_nid, last_cpupid;
1964 pmd_t pmd, old_pmd;
1965 bool writable = false;
1966 int flags = 0;
1968 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1969 old_pmd = pmdp_get(vmf->pmd);
1971 if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
1972 spin_unlock(vmf->ptl);
1973 return 0;
1976 pmd = pmd_modify(old_pmd, vma->vm_page_prot);
1979 * Detect now whether the PMD could be writable; this information
1980 * is only valid while holding the PT lock.
1982 writable = pmd_write(pmd);
1983 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1984 can_change_pmd_writable(vma, vmf->address, pmd))
1985 writable = true;
1987 folio = vm_normal_folio_pmd(vma, haddr, pmd);
1988 if (!folio)
1989 goto out_map;
1991 nid = folio_nid(folio);
1993 target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
1994 &last_cpupid);
1995 if (target_nid == NUMA_NO_NODE)
1996 goto out_map;
1997 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
1998 flags |= TNF_MIGRATE_FAIL;
1999 goto out_map;
2001 /* The folio is isolated and isolation code holds a folio reference. */
2002 spin_unlock(vmf->ptl);
2003 writable = false;
2005 if (!migrate_misplaced_folio(folio, vma, target_nid)) {
2006 flags |= TNF_MIGRATED;
2007 nid = target_nid;
2008 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
2009 return 0;
2012 flags |= TNF_MIGRATE_FAIL;
2013 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2014 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
2015 spin_unlock(vmf->ptl);
2016 return 0;
2018 out_map:
2019 /* Restore the PMD */
2020 pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
2021 pmd = pmd_mkyoung(pmd);
2022 if (writable)
2023 pmd = pmd_mkwrite(pmd, vma);
2024 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
2025 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
2026 spin_unlock(vmf->ptl);
2028 if (nid != NUMA_NO_NODE)
2029 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
2030 return 0;
2034 * Return true if we do MADV_FREE successfully on entire pmd page.
2035 * Otherwise, return false.
2037 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2038 pmd_t *pmd, unsigned long addr, unsigned long next)
2040 spinlock_t *ptl;
2041 pmd_t orig_pmd;
2042 struct folio *folio;
2043 struct mm_struct *mm = tlb->mm;
2044 bool ret = false;
2046 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2048 ptl = pmd_trans_huge_lock(pmd, vma);
2049 if (!ptl)
2050 goto out_unlocked;
2052 orig_pmd = *pmd;
2053 if (is_huge_zero_pmd(orig_pmd))
2054 goto out;
2056 if (unlikely(!pmd_present(orig_pmd))) {
2057 VM_BUG_ON(thp_migration_supported() &&
2058 !is_pmd_migration_entry(orig_pmd));
2059 goto out;
2062 folio = pmd_folio(orig_pmd);
2064 * If other processes are mapping this folio, we couldn't discard
2065 * the folio unless they all do MADV_FREE so let's skip the folio.
2067 if (folio_likely_mapped_shared(folio))
2068 goto out;
2070 if (!folio_trylock(folio))
2071 goto out;
2074 * If user want to discard part-pages of THP, split it so MADV_FREE
2075 * will deactivate only them.
2077 if (next - addr != HPAGE_PMD_SIZE) {
2078 folio_get(folio);
2079 spin_unlock(ptl);
2080 split_folio(folio);
2081 folio_unlock(folio);
2082 folio_put(folio);
2083 goto out_unlocked;
2086 if (folio_test_dirty(folio))
2087 folio_clear_dirty(folio);
2088 folio_unlock(folio);
2090 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
2091 pmdp_invalidate(vma, addr, pmd);
2092 orig_pmd = pmd_mkold(orig_pmd);
2093 orig_pmd = pmd_mkclean(orig_pmd);
2095 set_pmd_at(mm, addr, pmd, orig_pmd);
2096 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2099 folio_mark_lazyfree(folio);
2100 ret = true;
2101 out:
2102 spin_unlock(ptl);
2103 out_unlocked:
2104 return ret;
2107 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
2109 pgtable_t pgtable;
2111 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2112 pte_free(mm, pgtable);
2113 mm_dec_nr_ptes(mm);
2116 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2117 pmd_t *pmd, unsigned long addr)
2119 pmd_t orig_pmd;
2120 spinlock_t *ptl;
2122 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2124 ptl = __pmd_trans_huge_lock(pmd, vma);
2125 if (!ptl)
2126 return 0;
2128 * For architectures like ppc64 we look at deposited pgtable
2129 * when calling pmdp_huge_get_and_clear. So do the
2130 * pgtable_trans_huge_withdraw after finishing pmdp related
2131 * operations.
2133 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
2134 tlb->fullmm);
2135 arch_check_zapped_pmd(vma, orig_pmd);
2136 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2137 if (vma_is_special_huge(vma)) {
2138 if (arch_needs_pgtable_deposit())
2139 zap_deposited_table(tlb->mm, pmd);
2140 spin_unlock(ptl);
2141 } else if (is_huge_zero_pmd(orig_pmd)) {
2142 zap_deposited_table(tlb->mm, pmd);
2143 spin_unlock(ptl);
2144 } else {
2145 struct folio *folio = NULL;
2146 int flush_needed = 1;
2148 if (pmd_present(orig_pmd)) {
2149 struct page *page = pmd_page(orig_pmd);
2151 folio = page_folio(page);
2152 folio_remove_rmap_pmd(folio, page, vma);
2153 WARN_ON_ONCE(folio_mapcount(folio) < 0);
2154 VM_BUG_ON_PAGE(!PageHead(page), page);
2155 } else if (thp_migration_supported()) {
2156 swp_entry_t entry;
2158 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
2159 entry = pmd_to_swp_entry(orig_pmd);
2160 folio = pfn_swap_entry_folio(entry);
2161 flush_needed = 0;
2162 } else
2163 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
2165 if (folio_test_anon(folio)) {
2166 zap_deposited_table(tlb->mm, pmd);
2167 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
2168 } else {
2169 if (arch_needs_pgtable_deposit())
2170 zap_deposited_table(tlb->mm, pmd);
2171 add_mm_counter(tlb->mm, mm_counter_file(folio),
2172 -HPAGE_PMD_NR);
2175 spin_unlock(ptl);
2176 if (flush_needed)
2177 tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
2179 return 1;
2182 #ifndef pmd_move_must_withdraw
2183 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
2184 spinlock_t *old_pmd_ptl,
2185 struct vm_area_struct *vma)
2188 * With split pmd lock we also need to move preallocated
2189 * PTE page table if new_pmd is on different PMD page table.
2191 * We also don't deposit and withdraw tables for file pages.
2193 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
2195 #endif
2197 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
2199 #ifdef CONFIG_MEM_SOFT_DIRTY
2200 if (unlikely(is_pmd_migration_entry(pmd)))
2201 pmd = pmd_swp_mksoft_dirty(pmd);
2202 else if (pmd_present(pmd))
2203 pmd = pmd_mksoft_dirty(pmd);
2204 #endif
2205 return pmd;
2208 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
2209 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
2211 spinlock_t *old_ptl, *new_ptl;
2212 pmd_t pmd;
2213 struct mm_struct *mm = vma->vm_mm;
2214 bool force_flush = false;
2217 * The destination pmd shouldn't be established, free_pgtables()
2218 * should have released it; but move_page_tables() might have already
2219 * inserted a page table, if racing against shmem/file collapse.
2221 if (!pmd_none(*new_pmd)) {
2222 VM_BUG_ON(pmd_trans_huge(*new_pmd));
2223 return false;
2227 * We don't have to worry about the ordering of src and dst
2228 * ptlocks because exclusive mmap_lock prevents deadlock.
2230 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
2231 if (old_ptl) {
2232 new_ptl = pmd_lockptr(mm, new_pmd);
2233 if (new_ptl != old_ptl)
2234 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
2235 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
2236 if (pmd_present(pmd))
2237 force_flush = true;
2238 VM_BUG_ON(!pmd_none(*new_pmd));
2240 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
2241 pgtable_t pgtable;
2242 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
2243 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
2245 pmd = move_soft_dirty_pmd(pmd);
2246 set_pmd_at(mm, new_addr, new_pmd, pmd);
2247 if (force_flush)
2248 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
2249 if (new_ptl != old_ptl)
2250 spin_unlock(new_ptl);
2251 spin_unlock(old_ptl);
2252 return true;
2254 return false;
2258 * Returns
2259 * - 0 if PMD could not be locked
2260 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
2261 * or if prot_numa but THP migration is not supported
2262 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
2264 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2265 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
2266 unsigned long cp_flags)
2268 struct mm_struct *mm = vma->vm_mm;
2269 spinlock_t *ptl;
2270 pmd_t oldpmd, entry;
2271 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
2272 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
2273 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
2274 int ret = 1;
2276 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2278 if (prot_numa && !thp_migration_supported())
2279 return 1;
2281 ptl = __pmd_trans_huge_lock(pmd, vma);
2282 if (!ptl)
2283 return 0;
2285 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2286 if (is_swap_pmd(*pmd)) {
2287 swp_entry_t entry = pmd_to_swp_entry(*pmd);
2288 struct folio *folio = pfn_swap_entry_folio(entry);
2289 pmd_t newpmd;
2291 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
2292 if (is_writable_migration_entry(entry)) {
2294 * A protection check is difficult so
2295 * just be safe and disable write
2297 if (folio_test_anon(folio))
2298 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2299 else
2300 entry = make_readable_migration_entry(swp_offset(entry));
2301 newpmd = swp_entry_to_pmd(entry);
2302 if (pmd_swp_soft_dirty(*pmd))
2303 newpmd = pmd_swp_mksoft_dirty(newpmd);
2304 } else {
2305 newpmd = *pmd;
2308 if (uffd_wp)
2309 newpmd = pmd_swp_mkuffd_wp(newpmd);
2310 else if (uffd_wp_resolve)
2311 newpmd = pmd_swp_clear_uffd_wp(newpmd);
2312 if (!pmd_same(*pmd, newpmd))
2313 set_pmd_at(mm, addr, pmd, newpmd);
2314 goto unlock;
2316 #endif
2318 if (prot_numa) {
2319 struct folio *folio;
2320 bool toptier;
2322 * Avoid trapping faults against the zero page. The read-only
2323 * data is likely to be read-cached on the local CPU and
2324 * local/remote hits to the zero page are not interesting.
2326 if (is_huge_zero_pmd(*pmd))
2327 goto unlock;
2329 if (pmd_protnone(*pmd))
2330 goto unlock;
2332 folio = pmd_folio(*pmd);
2333 toptier = node_is_toptier(folio_nid(folio));
2335 * Skip scanning top tier node if normal numa
2336 * balancing is disabled
2338 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
2339 toptier)
2340 goto unlock;
2342 if (folio_use_access_time(folio))
2343 folio_xchg_access_time(folio,
2344 jiffies_to_msecs(jiffies));
2347 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
2348 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
2349 * which is also under mmap_read_lock(mm):
2351 * CPU0: CPU1:
2352 * change_huge_pmd(prot_numa=1)
2353 * pmdp_huge_get_and_clear_notify()
2354 * madvise_dontneed()
2355 * zap_pmd_range()
2356 * pmd_trans_huge(*pmd) == 0 (without ptl)
2357 * // skip the pmd
2358 * set_pmd_at();
2359 * // pmd is re-established
2361 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2362 * which may break userspace.
2364 * pmdp_invalidate_ad() is required to make sure we don't miss
2365 * dirty/young flags set by hardware.
2367 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
2369 entry = pmd_modify(oldpmd, newprot);
2370 if (uffd_wp)
2371 entry = pmd_mkuffd_wp(entry);
2372 else if (uffd_wp_resolve)
2374 * Leave the write bit to be handled by PF interrupt
2375 * handler, then things like COW could be properly
2376 * handled.
2378 entry = pmd_clear_uffd_wp(entry);
2380 /* See change_pte_range(). */
2381 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2382 can_change_pmd_writable(vma, addr, entry))
2383 entry = pmd_mkwrite(entry, vma);
2385 ret = HPAGE_PMD_NR;
2386 set_pmd_at(mm, addr, pmd, entry);
2388 if (huge_pmd_needs_flush(oldpmd, entry))
2389 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
2390 unlock:
2391 spin_unlock(ptl);
2392 return ret;
2396 * Returns:
2398 * - 0: if pud leaf changed from under us
2399 * - 1: if pud can be skipped
2400 * - HPAGE_PUD_NR: if pud was successfully processed
2402 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2403 int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2404 pud_t *pudp, unsigned long addr, pgprot_t newprot,
2405 unsigned long cp_flags)
2407 struct mm_struct *mm = vma->vm_mm;
2408 pud_t oldpud, entry;
2409 spinlock_t *ptl;
2411 tlb_change_page_size(tlb, HPAGE_PUD_SIZE);
2413 /* NUMA balancing doesn't apply to dax */
2414 if (cp_flags & MM_CP_PROT_NUMA)
2415 return 1;
2418 * Huge entries on userfault-wp only works with anonymous, while we
2419 * don't have anonymous PUDs yet.
2421 if (WARN_ON_ONCE(cp_flags & MM_CP_UFFD_WP_ALL))
2422 return 1;
2424 ptl = __pud_trans_huge_lock(pudp, vma);
2425 if (!ptl)
2426 return 0;
2429 * Can't clear PUD or it can race with concurrent zapping. See
2430 * change_huge_pmd().
2432 oldpud = pudp_invalidate(vma, addr, pudp);
2433 entry = pud_modify(oldpud, newprot);
2434 set_pud_at(mm, addr, pudp, entry);
2435 tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE);
2437 spin_unlock(ptl);
2438 return HPAGE_PUD_NR;
2440 #endif
2442 #ifdef CONFIG_USERFAULTFD
2444 * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
2445 * the caller, but it must return after releasing the page_table_lock.
2446 * Just move the page from src_pmd to dst_pmd if possible.
2447 * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2448 * repeated by the caller, or other errors in case of failure.
2450 int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2451 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2452 unsigned long dst_addr, unsigned long src_addr)
2454 pmd_t _dst_pmd, src_pmdval;
2455 struct page *src_page;
2456 struct folio *src_folio;
2457 struct anon_vma *src_anon_vma;
2458 spinlock_t *src_ptl, *dst_ptl;
2459 pgtable_t src_pgtable;
2460 struct mmu_notifier_range range;
2461 int err = 0;
2463 src_pmdval = *src_pmd;
2464 src_ptl = pmd_lockptr(mm, src_pmd);
2466 lockdep_assert_held(src_ptl);
2467 vma_assert_locked(src_vma);
2468 vma_assert_locked(dst_vma);
2470 /* Sanity checks before the operation */
2471 if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2472 WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2473 spin_unlock(src_ptl);
2474 return -EINVAL;
2477 if (!pmd_trans_huge(src_pmdval)) {
2478 spin_unlock(src_ptl);
2479 if (is_pmd_migration_entry(src_pmdval)) {
2480 pmd_migration_entry_wait(mm, &src_pmdval);
2481 return -EAGAIN;
2483 return -ENOENT;
2486 src_page = pmd_page(src_pmdval);
2488 if (!is_huge_zero_pmd(src_pmdval)) {
2489 if (unlikely(!PageAnonExclusive(src_page))) {
2490 spin_unlock(src_ptl);
2491 return -EBUSY;
2494 src_folio = page_folio(src_page);
2495 folio_get(src_folio);
2496 } else
2497 src_folio = NULL;
2499 spin_unlock(src_ptl);
2501 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2502 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2503 src_addr + HPAGE_PMD_SIZE);
2504 mmu_notifier_invalidate_range_start(&range);
2506 if (src_folio) {
2507 folio_lock(src_folio);
2510 * split_huge_page walks the anon_vma chain without the page
2511 * lock. Serialize against it with the anon_vma lock, the page
2512 * lock is not enough.
2514 src_anon_vma = folio_get_anon_vma(src_folio);
2515 if (!src_anon_vma) {
2516 err = -EAGAIN;
2517 goto unlock_folio;
2519 anon_vma_lock_write(src_anon_vma);
2520 } else
2521 src_anon_vma = NULL;
2523 dst_ptl = pmd_lockptr(mm, dst_pmd);
2524 double_pt_lock(src_ptl, dst_ptl);
2525 if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2526 !pmd_same(*dst_pmd, dst_pmdval))) {
2527 err = -EAGAIN;
2528 goto unlock_ptls;
2530 if (src_folio) {
2531 if (folio_maybe_dma_pinned(src_folio) ||
2532 !PageAnonExclusive(&src_folio->page)) {
2533 err = -EBUSY;
2534 goto unlock_ptls;
2537 if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2538 WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2539 err = -EBUSY;
2540 goto unlock_ptls;
2543 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2544 /* Folio got pinned from under us. Put it back and fail the move. */
2545 if (folio_maybe_dma_pinned(src_folio)) {
2546 set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2547 err = -EBUSY;
2548 goto unlock_ptls;
2551 folio_move_anon_rmap(src_folio, dst_vma);
2552 src_folio->index = linear_page_index(dst_vma, dst_addr);
2554 _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
2555 /* Follow mremap() behavior and treat the entry dirty after the move */
2556 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2557 } else {
2558 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2559 _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
2561 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2563 src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2564 pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2565 unlock_ptls:
2566 double_pt_unlock(src_ptl, dst_ptl);
2567 if (src_anon_vma) {
2568 anon_vma_unlock_write(src_anon_vma);
2569 put_anon_vma(src_anon_vma);
2571 unlock_folio:
2572 /* unblock rmap walks */
2573 if (src_folio)
2574 folio_unlock(src_folio);
2575 mmu_notifier_invalidate_range_end(&range);
2576 if (src_folio)
2577 folio_put(src_folio);
2578 return err;
2580 #endif /* CONFIG_USERFAULTFD */
2583 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
2585 * Note that if it returns page table lock pointer, this routine returns without
2586 * unlocking page table lock. So callers must unlock it.
2588 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
2590 spinlock_t *ptl;
2591 ptl = pmd_lock(vma->vm_mm, pmd);
2592 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
2593 pmd_devmap(*pmd)))
2594 return ptl;
2595 spin_unlock(ptl);
2596 return NULL;
2600 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
2602 * Note that if it returns page table lock pointer, this routine returns without
2603 * unlocking page table lock. So callers must unlock it.
2605 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2607 spinlock_t *ptl;
2609 ptl = pud_lock(vma->vm_mm, pud);
2610 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
2611 return ptl;
2612 spin_unlock(ptl);
2613 return NULL;
2616 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2617 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2618 pud_t *pud, unsigned long addr)
2620 spinlock_t *ptl;
2621 pud_t orig_pud;
2623 ptl = __pud_trans_huge_lock(pud, vma);
2624 if (!ptl)
2625 return 0;
2627 orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
2628 arch_check_zapped_pud(vma, orig_pud);
2629 tlb_remove_pud_tlb_entry(tlb, pud, addr);
2630 if (vma_is_special_huge(vma)) {
2631 spin_unlock(ptl);
2632 /* No zero page support yet */
2633 } else {
2634 /* No support for anonymous PUD pages yet */
2635 BUG();
2637 return 1;
2640 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2641 unsigned long haddr)
2643 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2644 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2645 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2646 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2648 count_vm_event(THP_SPLIT_PUD);
2650 pudp_huge_clear_flush(vma, haddr, pud);
2653 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2654 unsigned long address)
2656 spinlock_t *ptl;
2657 struct mmu_notifier_range range;
2659 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2660 address & HPAGE_PUD_MASK,
2661 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2662 mmu_notifier_invalidate_range_start(&range);
2663 ptl = pud_lock(vma->vm_mm, pud);
2664 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2665 goto out;
2666 __split_huge_pud_locked(vma, pud, range.start);
2668 out:
2669 spin_unlock(ptl);
2670 mmu_notifier_invalidate_range_end(&range);
2672 #else
2673 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2674 unsigned long address)
2677 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2679 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2680 unsigned long haddr, pmd_t *pmd)
2682 struct mm_struct *mm = vma->vm_mm;
2683 pgtable_t pgtable;
2684 pmd_t _pmd, old_pmd;
2685 unsigned long addr;
2686 pte_t *pte;
2687 int i;
2690 * Leave pmd empty until pte is filled note that it is fine to delay
2691 * notification until mmu_notifier_invalidate_range_end() as we are
2692 * replacing a zero pmd write protected page with a zero pte write
2693 * protected page.
2695 * See Documentation/mm/mmu_notifier.rst
2697 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2699 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2700 pmd_populate(mm, &_pmd, pgtable);
2702 pte = pte_offset_map(&_pmd, haddr);
2703 VM_BUG_ON(!pte);
2704 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2705 pte_t entry;
2707 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
2708 entry = pte_mkspecial(entry);
2709 if (pmd_uffd_wp(old_pmd))
2710 entry = pte_mkuffd_wp(entry);
2711 VM_BUG_ON(!pte_none(ptep_get(pte)));
2712 set_pte_at(mm, addr, pte, entry);
2713 pte++;
2715 pte_unmap(pte - 1);
2716 smp_wmb(); /* make pte visible before pmd */
2717 pmd_populate(mm, pmd, pgtable);
2720 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2721 unsigned long haddr, bool freeze)
2723 struct mm_struct *mm = vma->vm_mm;
2724 struct folio *folio;
2725 struct page *page;
2726 pgtable_t pgtable;
2727 pmd_t old_pmd, _pmd;
2728 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2729 bool anon_exclusive = false, dirty = false;
2730 unsigned long addr;
2731 pte_t *pte;
2732 int i;
2734 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2735 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2736 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2737 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2738 && !pmd_devmap(*pmd));
2740 count_vm_event(THP_SPLIT_PMD);
2742 if (!vma_is_anonymous(vma)) {
2743 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2745 * We are going to unmap this huge page. So
2746 * just go ahead and zap it
2748 if (arch_needs_pgtable_deposit())
2749 zap_deposited_table(mm, pmd);
2750 if (vma_is_special_huge(vma))
2751 return;
2752 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2753 swp_entry_t entry;
2755 entry = pmd_to_swp_entry(old_pmd);
2756 folio = pfn_swap_entry_folio(entry);
2757 } else {
2758 page = pmd_page(old_pmd);
2759 folio = page_folio(page);
2760 if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
2761 folio_mark_dirty(folio);
2762 if (!folio_test_referenced(folio) && pmd_young(old_pmd))
2763 folio_set_referenced(folio);
2764 folio_remove_rmap_pmd(folio, page, vma);
2765 folio_put(folio);
2767 add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
2768 return;
2771 if (is_huge_zero_pmd(*pmd)) {
2773 * FIXME: Do we want to invalidate secondary mmu by calling
2774 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2775 * inside __split_huge_pmd() ?
2777 * We are going from a zero huge page write protected to zero
2778 * small page also write protected so it does not seems useful
2779 * to invalidate secondary mmu at this time.
2781 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2784 pmd_migration = is_pmd_migration_entry(*pmd);
2785 if (unlikely(pmd_migration)) {
2786 swp_entry_t entry;
2788 old_pmd = *pmd;
2789 entry = pmd_to_swp_entry(old_pmd);
2790 page = pfn_swap_entry_to_page(entry);
2791 write = is_writable_migration_entry(entry);
2792 if (PageAnon(page))
2793 anon_exclusive = is_readable_exclusive_migration_entry(entry);
2794 young = is_migration_entry_young(entry);
2795 dirty = is_migration_entry_dirty(entry);
2796 soft_dirty = pmd_swp_soft_dirty(old_pmd);
2797 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2798 } else {
2800 * Up to this point the pmd is present and huge and userland has
2801 * the whole access to the hugepage during the split (which
2802 * happens in place). If we overwrite the pmd with the not-huge
2803 * version pointing to the pte here (which of course we could if
2804 * all CPUs were bug free), userland could trigger a small page
2805 * size TLB miss on the small sized TLB while the hugepage TLB
2806 * entry is still established in the huge TLB. Some CPU doesn't
2807 * like that. See
2808 * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2809 * 383 on page 105. Intel should be safe but is also warns that
2810 * it's only safe if the permission and cache attributes of the
2811 * two entries loaded in the two TLB is identical (which should
2812 * be the case here). But it is generally safer to never allow
2813 * small and huge TLB entries for the same virtual address to be
2814 * loaded simultaneously. So instead of doing "pmd_populate();
2815 * flush_pmd_tlb_range();" we first mark the current pmd
2816 * notpresent (atomically because here the pmd_trans_huge must
2817 * remain set at all times on the pmd until the split is
2818 * complete for this pmd), then we flush the SMP TLB and finally
2819 * we write the non-huge version of the pmd entry with
2820 * pmd_populate.
2822 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2823 page = pmd_page(old_pmd);
2824 folio = page_folio(page);
2825 if (pmd_dirty(old_pmd)) {
2826 dirty = true;
2827 folio_set_dirty(folio);
2829 write = pmd_write(old_pmd);
2830 young = pmd_young(old_pmd);
2831 soft_dirty = pmd_soft_dirty(old_pmd);
2832 uffd_wp = pmd_uffd_wp(old_pmd);
2834 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
2835 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
2838 * Without "freeze", we'll simply split the PMD, propagating the
2839 * PageAnonExclusive() flag for each PTE by setting it for
2840 * each subpage -- no need to (temporarily) clear.
2842 * With "freeze" we want to replace mapped pages by
2843 * migration entries right away. This is only possible if we
2844 * managed to clear PageAnonExclusive() -- see
2845 * set_pmd_migration_entry().
2847 * In case we cannot clear PageAnonExclusive(), split the PMD
2848 * only and let try_to_migrate_one() fail later.
2850 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
2852 anon_exclusive = PageAnonExclusive(page);
2853 if (freeze && anon_exclusive &&
2854 folio_try_share_anon_rmap_pmd(folio, page))
2855 freeze = false;
2856 if (!freeze) {
2857 rmap_t rmap_flags = RMAP_NONE;
2859 folio_ref_add(folio, HPAGE_PMD_NR - 1);
2860 if (anon_exclusive)
2861 rmap_flags |= RMAP_EXCLUSIVE;
2862 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
2863 vma, haddr, rmap_flags);
2868 * Withdraw the table only after we mark the pmd entry invalid.
2869 * This's critical for some architectures (Power).
2871 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2872 pmd_populate(mm, &_pmd, pgtable);
2874 pte = pte_offset_map(&_pmd, haddr);
2875 VM_BUG_ON(!pte);
2878 * Note that NUMA hinting access restrictions are not transferred to
2879 * avoid any possibility of altering permissions across VMAs.
2881 if (freeze || pmd_migration) {
2882 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2883 pte_t entry;
2884 swp_entry_t swp_entry;
2886 if (write)
2887 swp_entry = make_writable_migration_entry(
2888 page_to_pfn(page + i));
2889 else if (anon_exclusive)
2890 swp_entry = make_readable_exclusive_migration_entry(
2891 page_to_pfn(page + i));
2892 else
2893 swp_entry = make_readable_migration_entry(
2894 page_to_pfn(page + i));
2895 if (young)
2896 swp_entry = make_migration_entry_young(swp_entry);
2897 if (dirty)
2898 swp_entry = make_migration_entry_dirty(swp_entry);
2899 entry = swp_entry_to_pte(swp_entry);
2900 if (soft_dirty)
2901 entry = pte_swp_mksoft_dirty(entry);
2902 if (uffd_wp)
2903 entry = pte_swp_mkuffd_wp(entry);
2905 VM_WARN_ON(!pte_none(ptep_get(pte + i)));
2906 set_pte_at(mm, addr, pte + i, entry);
2908 } else {
2909 pte_t entry;
2911 entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
2912 if (write)
2913 entry = pte_mkwrite(entry, vma);
2914 if (!young)
2915 entry = pte_mkold(entry);
2916 /* NOTE: this may set soft-dirty too on some archs */
2917 if (dirty)
2918 entry = pte_mkdirty(entry);
2919 if (soft_dirty)
2920 entry = pte_mksoft_dirty(entry);
2921 if (uffd_wp)
2922 entry = pte_mkuffd_wp(entry);
2924 for (i = 0; i < HPAGE_PMD_NR; i++)
2925 VM_WARN_ON(!pte_none(ptep_get(pte + i)));
2927 set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
2929 pte_unmap(pte);
2931 if (!pmd_migration)
2932 folio_remove_rmap_pmd(folio, page, vma);
2933 if (freeze)
2934 put_page(page);
2936 smp_wmb(); /* make pte visible before pmd */
2937 pmd_populate(mm, pmd, pgtable);
2940 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
2941 pmd_t *pmd, bool freeze, struct folio *folio)
2943 VM_WARN_ON_ONCE(folio && !folio_test_pmd_mappable(folio));
2944 VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
2945 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2946 VM_BUG_ON(freeze && !folio);
2949 * When the caller requests to set up a migration entry, we
2950 * require a folio to check the PMD against. Otherwise, there
2951 * is a risk of replacing the wrong folio.
2953 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2954 is_pmd_migration_entry(*pmd)) {
2955 if (folio && folio != pmd_folio(*pmd))
2956 return;
2957 __split_huge_pmd_locked(vma, pmd, address, freeze);
2961 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2962 unsigned long address, bool freeze, struct folio *folio)
2964 spinlock_t *ptl;
2965 struct mmu_notifier_range range;
2967 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2968 address & HPAGE_PMD_MASK,
2969 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2970 mmu_notifier_invalidate_range_start(&range);
2971 ptl = pmd_lock(vma->vm_mm, pmd);
2972 split_huge_pmd_locked(vma, range.start, pmd, freeze, folio);
2973 spin_unlock(ptl);
2974 mmu_notifier_invalidate_range_end(&range);
2977 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2978 bool freeze, struct folio *folio)
2980 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
2982 if (!pmd)
2983 return;
2985 __split_huge_pmd(vma, pmd, address, freeze, folio);
2988 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2991 * If the new address isn't hpage aligned and it could previously
2992 * contain an hugepage: check if we need to split an huge pmd.
2994 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2995 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2996 ALIGN(address, HPAGE_PMD_SIZE)))
2997 split_huge_pmd_address(vma, address, false, NULL);
3000 void vma_adjust_trans_huge(struct vm_area_struct *vma,
3001 unsigned long start,
3002 unsigned long end,
3003 long adjust_next)
3005 /* Check if we need to split start first. */
3006 split_huge_pmd_if_needed(vma, start);
3008 /* Check if we need to split end next. */
3009 split_huge_pmd_if_needed(vma, end);
3012 * If we're also updating the next vma vm_start,
3013 * check if we need to split it.
3015 if (adjust_next > 0) {
3016 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
3017 unsigned long nstart = next->vm_start;
3018 nstart += adjust_next;
3019 split_huge_pmd_if_needed(next, nstart);
3023 static void unmap_folio(struct folio *folio)
3025 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC |
3026 TTU_BATCH_FLUSH;
3028 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
3030 if (folio_test_pmd_mappable(folio))
3031 ttu_flags |= TTU_SPLIT_HUGE_PMD;
3034 * Anon pages need migration entries to preserve them, but file
3035 * pages can simply be left unmapped, then faulted back on demand.
3036 * If that is ever changed (perhaps for mlock), update remap_page().
3038 if (folio_test_anon(folio))
3039 try_to_migrate(folio, ttu_flags);
3040 else
3041 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
3043 try_to_unmap_flush();
3046 static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
3047 unsigned long addr, pmd_t *pmdp,
3048 struct folio *folio)
3050 struct mm_struct *mm = vma->vm_mm;
3051 int ref_count, map_count;
3052 pmd_t orig_pmd = *pmdp;
3054 if (folio_test_dirty(folio) || pmd_dirty(orig_pmd))
3055 return false;
3057 orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
3060 * Syncing against concurrent GUP-fast:
3061 * - clear PMD; barrier; read refcount
3062 * - inc refcount; barrier; read PMD
3064 smp_mb();
3066 ref_count = folio_ref_count(folio);
3067 map_count = folio_mapcount(folio);
3070 * Order reads for folio refcount and dirty flag
3071 * (see comments in __remove_mapping()).
3073 smp_rmb();
3076 * If the folio or its PMD is redirtied at this point, or if there
3077 * are unexpected references, we will give up to discard this folio
3078 * and remap it.
3080 * The only folio refs must be one from isolation plus the rmap(s).
3082 if (folio_test_dirty(folio) || pmd_dirty(orig_pmd) ||
3083 ref_count != map_count + 1) {
3084 set_pmd_at(mm, addr, pmdp, orig_pmd);
3085 return false;
3088 folio_remove_rmap_pmd(folio, pmd_page(orig_pmd), vma);
3089 zap_deposited_table(mm, pmdp);
3090 add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
3091 if (vma->vm_flags & VM_LOCKED)
3092 mlock_drain_local();
3093 folio_put(folio);
3095 return true;
3098 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
3099 pmd_t *pmdp, struct folio *folio)
3101 VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
3102 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
3103 VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
3105 if (folio_test_anon(folio) && !folio_test_swapbacked(folio))
3106 return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
3108 return false;
3111 static void remap_page(struct folio *folio, unsigned long nr, int flags)
3113 int i = 0;
3115 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
3116 if (!folio_test_anon(folio))
3117 return;
3118 for (;;) {
3119 remove_migration_ptes(folio, folio, RMP_LOCKED | flags);
3120 i += folio_nr_pages(folio);
3121 if (i >= nr)
3122 break;
3123 folio = folio_next(folio);
3127 static void lru_add_page_tail(struct folio *folio, struct page *tail,
3128 struct lruvec *lruvec, struct list_head *list)
3130 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
3131 VM_BUG_ON_FOLIO(PageLRU(tail), folio);
3132 lockdep_assert_held(&lruvec->lru_lock);
3134 if (list) {
3135 /* page reclaim is reclaiming a huge page */
3136 VM_WARN_ON(folio_test_lru(folio));
3137 get_page(tail);
3138 list_add_tail(&tail->lru, list);
3139 } else {
3140 /* head is still on lru (and we have it frozen) */
3141 VM_WARN_ON(!folio_test_lru(folio));
3142 if (folio_test_unevictable(folio))
3143 tail->mlock_count = 0;
3144 else
3145 list_add_tail(&tail->lru, &folio->lru);
3146 SetPageLRU(tail);
3150 static void __split_huge_page_tail(struct folio *folio, int tail,
3151 struct lruvec *lruvec, struct list_head *list,
3152 unsigned int new_order)
3154 struct page *head = &folio->page;
3155 struct page *page_tail = head + tail;
3157 * Careful: new_folio is not a "real" folio before we cleared PageTail.
3158 * Don't pass it around before clear_compound_head().
3160 struct folio *new_folio = (struct folio *)page_tail;
3162 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
3165 * Clone page flags before unfreezing refcount.
3167 * After successful get_page_unless_zero() might follow flags change,
3168 * for example lock_page() which set PG_waiters.
3170 * Note that for mapped sub-pages of an anonymous THP,
3171 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
3172 * the migration entry instead from where remap_page() will restore it.
3173 * We can still have PG_anon_exclusive set on effectively unmapped and
3174 * unreferenced sub-pages of an anonymous THP: we can simply drop
3175 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
3177 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
3178 page_tail->flags |= (head->flags &
3179 ((1L << PG_referenced) |
3180 (1L << PG_swapbacked) |
3181 (1L << PG_swapcache) |
3182 (1L << PG_mlocked) |
3183 (1L << PG_uptodate) |
3184 (1L << PG_active) |
3185 (1L << PG_workingset) |
3186 (1L << PG_locked) |
3187 (1L << PG_unevictable) |
3188 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
3189 (1L << PG_arch_2) |
3190 #endif
3191 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
3192 (1L << PG_arch_3) |
3193 #endif
3194 (1L << PG_dirty) |
3195 LRU_GEN_MASK | LRU_REFS_MASK));
3197 /* ->mapping in first and second tail page is replaced by other uses */
3198 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
3199 page_tail);
3200 new_folio->mapping = folio->mapping;
3201 new_folio->index = folio->index + tail;
3204 * page->private should not be set in tail pages. Fix up and warn once
3205 * if private is unexpectedly set.
3207 if (unlikely(page_tail->private)) {
3208 VM_WARN_ON_ONCE_PAGE(true, page_tail);
3209 page_tail->private = 0;
3211 if (folio_test_swapcache(folio))
3212 new_folio->swap.val = folio->swap.val + tail;
3214 /* Page flags must be visible before we make the page non-compound. */
3215 smp_wmb();
3218 * Clear PageTail before unfreezing page refcount.
3220 * After successful get_page_unless_zero() might follow put_page()
3221 * which needs correct compound_head().
3223 clear_compound_head(page_tail);
3224 if (new_order) {
3225 prep_compound_page(page_tail, new_order);
3226 folio_set_large_rmappable(new_folio);
3229 /* Finally unfreeze refcount. Additional reference from page cache. */
3230 page_ref_unfreeze(page_tail,
3231 1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ?
3232 folio_nr_pages(new_folio) : 0));
3234 if (folio_test_young(folio))
3235 folio_set_young(new_folio);
3236 if (folio_test_idle(folio))
3237 folio_set_idle(new_folio);
3239 folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
3242 * always add to the tail because some iterators expect new
3243 * pages to show after the currently processed elements - e.g.
3244 * migrate_pages
3246 lru_add_page_tail(folio, page_tail, lruvec, list);
3249 static void __split_huge_page(struct page *page, struct list_head *list,
3250 pgoff_t end, unsigned int new_order)
3252 struct folio *folio = page_folio(page);
3253 struct page *head = &folio->page;
3254 struct lruvec *lruvec;
3255 struct address_space *swap_cache = NULL;
3256 unsigned long offset = 0;
3257 int i, nr_dropped = 0;
3258 unsigned int new_nr = 1 << new_order;
3259 int order = folio_order(folio);
3260 unsigned int nr = 1 << order;
3262 /* complete memcg works before add pages to LRU */
3263 split_page_memcg(head, order, new_order);
3265 if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
3266 offset = swap_cache_index(folio->swap);
3267 swap_cache = swap_address_space(folio->swap);
3268 xa_lock(&swap_cache->i_pages);
3271 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
3272 lruvec = folio_lruvec_lock(folio);
3274 ClearPageHasHWPoisoned(head);
3276 for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
3277 struct folio *tail;
3278 __split_huge_page_tail(folio, i, lruvec, list, new_order);
3279 tail = page_folio(head + i);
3280 /* Some pages can be beyond EOF: drop them from page cache */
3281 if (tail->index >= end) {
3282 if (shmem_mapping(folio->mapping))
3283 nr_dropped++;
3284 else if (folio_test_clear_dirty(tail))
3285 folio_account_cleaned(tail,
3286 inode_to_wb(folio->mapping->host));
3287 __filemap_remove_folio(tail, NULL);
3288 folio_put(tail);
3289 } else if (!folio_test_anon(folio)) {
3290 __xa_store(&folio->mapping->i_pages, tail->index,
3291 tail, 0);
3292 } else if (swap_cache) {
3293 __xa_store(&swap_cache->i_pages, offset + i,
3294 tail, 0);
3298 if (!new_order)
3299 ClearPageCompound(head);
3300 else {
3301 struct folio *new_folio = (struct folio *)head;
3303 folio_set_order(new_folio, new_order);
3305 unlock_page_lruvec(lruvec);
3306 /* Caller disabled irqs, so they are still disabled here */
3308 split_page_owner(head, order, new_order);
3309 pgalloc_tag_split(folio, order, new_order);
3311 /* See comment in __split_huge_page_tail() */
3312 if (folio_test_anon(folio)) {
3313 /* Additional pin to swap cache */
3314 if (folio_test_swapcache(folio)) {
3315 folio_ref_add(folio, 1 + new_nr);
3316 xa_unlock(&swap_cache->i_pages);
3317 } else {
3318 folio_ref_inc(folio);
3320 } else {
3321 /* Additional pin to page cache */
3322 folio_ref_add(folio, 1 + new_nr);
3323 xa_unlock(&folio->mapping->i_pages);
3325 local_irq_enable();
3327 if (nr_dropped)
3328 shmem_uncharge(folio->mapping->host, nr_dropped);
3329 remap_page(folio, nr, PageAnon(head) ? RMP_USE_SHARED_ZEROPAGE : 0);
3332 * set page to its compound_head when split to non order-0 pages, so
3333 * we can skip unlocking it below, since PG_locked is transferred to
3334 * the compound_head of the page and the caller will unlock it.
3336 if (new_order)
3337 page = compound_head(page);
3339 for (i = 0; i < nr; i += new_nr) {
3340 struct page *subpage = head + i;
3341 struct folio *new_folio = page_folio(subpage);
3342 if (subpage == page)
3343 continue;
3344 folio_unlock(new_folio);
3347 * Subpages may be freed if there wasn't any mapping
3348 * like if add_to_swap() is running on a lru page that
3349 * had its mapping zapped. And freeing these pages
3350 * requires taking the lru_lock so we do the put_page
3351 * of the tail pages after the split is complete.
3353 free_page_and_swap_cache(subpage);
3357 /* Racy check whether the huge page can be split */
3358 bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
3360 int extra_pins;
3362 /* Additional pins from page cache */
3363 if (folio_test_anon(folio))
3364 extra_pins = folio_test_swapcache(folio) ?
3365 folio_nr_pages(folio) : 0;
3366 else
3367 extra_pins = folio_nr_pages(folio);
3368 if (pextra_pins)
3369 *pextra_pins = extra_pins;
3370 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
3371 caller_pins;
3375 * This function splits a large folio into smaller folios of order @new_order.
3376 * @page can point to any page of the large folio to split. The split operation
3377 * does not change the position of @page.
3379 * Prerequisites:
3381 * 1) The caller must hold a reference on the @page's owning folio, also known
3382 * as the large folio.
3384 * 2) The large folio must be locked.
3386 * 3) The folio must not be pinned. Any unexpected folio references, including
3387 * GUP pins, will result in the folio not getting split; instead, the caller
3388 * will receive an -EAGAIN.
3390 * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
3391 * supported for non-file-backed folios, because folio->_deferred_list, which
3392 * is used by partially mapped folios, is stored in subpage 2, but an order-1
3393 * folio only has subpages 0 and 1. File-backed order-1 folios are supported,
3394 * since they do not use _deferred_list.
3396 * After splitting, the caller's folio reference will be transferred to @page,
3397 * resulting in a raised refcount of @page after this call. The other pages may
3398 * be freed if they are not mapped.
3400 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
3402 * Pages in @new_order will inherit the mapping, flags, and so on from the
3403 * huge page.
3405 * Returns 0 if the huge page was split successfully.
3407 * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
3408 * the folio was concurrently removed from the page cache.
3410 * Returns -EBUSY when trying to split the huge zeropage, if the folio is
3411 * under writeback, if fs-specific folio metadata cannot currently be
3412 * released, or if some unexpected race happened (e.g., anon VMA disappeared,
3413 * truncation).
3415 * Callers should ensure that the order respects the address space mapping
3416 * min-order if one is set for non-anonymous folios.
3418 * Returns -EINVAL when trying to split to an order that is incompatible
3419 * with the folio. Splitting to order 0 is compatible with all folios.
3421 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3422 unsigned int new_order)
3424 struct folio *folio = page_folio(page);
3425 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3426 /* reset xarray order to new order after split */
3427 XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
3428 bool is_anon = folio_test_anon(folio);
3429 struct address_space *mapping = NULL;
3430 struct anon_vma *anon_vma = NULL;
3431 int order = folio_order(folio);
3432 int extra_pins, ret;
3433 pgoff_t end;
3434 bool is_hzp;
3436 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3437 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
3439 if (new_order >= folio_order(folio))
3440 return -EINVAL;
3442 if (is_anon) {
3443 /* order-1 is not supported for anonymous THP. */
3444 if (new_order == 1) {
3445 VM_WARN_ONCE(1, "Cannot split to order-1 folio");
3446 return -EINVAL;
3448 } else if (new_order) {
3449 /* Split shmem folio to non-zero order not supported */
3450 if (shmem_mapping(folio->mapping)) {
3451 VM_WARN_ONCE(1,
3452 "Cannot split shmem folio to non-0 order");
3453 return -EINVAL;
3456 * No split if the file system does not support large folio.
3457 * Note that we might still have THPs in such mappings due to
3458 * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping
3459 * does not actually support large folios properly.
3461 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
3462 !mapping_large_folio_support(folio->mapping)) {
3463 VM_WARN_ONCE(1,
3464 "Cannot split file folio to non-0 order");
3465 return -EINVAL;
3469 /* Only swapping a whole PMD-mapped folio is supported */
3470 if (folio_test_swapcache(folio) && new_order)
3471 return -EINVAL;
3473 is_hzp = is_huge_zero_folio(folio);
3474 if (is_hzp) {
3475 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
3476 return -EBUSY;
3479 if (folio_test_writeback(folio))
3480 return -EBUSY;
3482 if (is_anon) {
3484 * The caller does not necessarily hold an mmap_lock that would
3485 * prevent the anon_vma disappearing so we first we take a
3486 * reference to it and then lock the anon_vma for write. This
3487 * is similar to folio_lock_anon_vma_read except the write lock
3488 * is taken to serialise against parallel split or collapse
3489 * operations.
3491 anon_vma = folio_get_anon_vma(folio);
3492 if (!anon_vma) {
3493 ret = -EBUSY;
3494 goto out;
3496 end = -1;
3497 mapping = NULL;
3498 anon_vma_lock_write(anon_vma);
3499 } else {
3500 unsigned int min_order;
3501 gfp_t gfp;
3503 mapping = folio->mapping;
3505 /* Truncated ? */
3506 if (!mapping) {
3507 ret = -EBUSY;
3508 goto out;
3511 min_order = mapping_min_folio_order(folio->mapping);
3512 if (new_order < min_order) {
3513 VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
3514 min_order);
3515 ret = -EINVAL;
3516 goto out;
3519 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
3520 GFP_RECLAIM_MASK);
3522 if (!filemap_release_folio(folio, gfp)) {
3523 ret = -EBUSY;
3524 goto out;
3527 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
3528 if (xas_error(&xas)) {
3529 ret = xas_error(&xas);
3530 goto out;
3533 anon_vma = NULL;
3534 i_mmap_lock_read(mapping);
3537 *__split_huge_page() may need to trim off pages beyond EOF:
3538 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
3539 * which cannot be nested inside the page tree lock. So note
3540 * end now: i_size itself may be changed at any moment, but
3541 * folio lock is good enough to serialize the trimming.
3543 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3544 if (shmem_mapping(mapping))
3545 end = shmem_fallocend(mapping->host, end);
3549 * Racy check if we can split the page, before unmap_folio() will
3550 * split PMDs
3552 if (!can_split_folio(folio, 1, &extra_pins)) {
3553 ret = -EAGAIN;
3554 goto out_unlock;
3557 unmap_folio(folio);
3559 /* block interrupt reentry in xa_lock and spinlock */
3560 local_irq_disable();
3561 if (mapping) {
3563 * Check if the folio is present in page cache.
3564 * We assume all tail are present too, if folio is there.
3566 xas_lock(&xas);
3567 xas_reset(&xas);
3568 if (xas_load(&xas) != folio)
3569 goto fail;
3572 /* Prevent deferred_split_scan() touching ->_refcount */
3573 spin_lock(&ds_queue->split_queue_lock);
3574 if (folio_ref_freeze(folio, 1 + extra_pins)) {
3575 if (folio_order(folio) > 1 &&
3576 !list_empty(&folio->_deferred_list)) {
3577 ds_queue->split_queue_len--;
3578 if (folio_test_partially_mapped(folio)) {
3579 __folio_clear_partially_mapped(folio);
3580 mod_mthp_stat(folio_order(folio),
3581 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
3584 * Reinitialize page_deferred_list after removing the
3585 * page from the split_queue, otherwise a subsequent
3586 * split will see list corruption when checking the
3587 * page_deferred_list.
3589 list_del_init(&folio->_deferred_list);
3591 spin_unlock(&ds_queue->split_queue_lock);
3592 if (mapping) {
3593 int nr = folio_nr_pages(folio);
3595 xas_split(&xas, folio, folio_order(folio));
3596 if (folio_test_pmd_mappable(folio) &&
3597 new_order < HPAGE_PMD_ORDER) {
3598 if (folio_test_swapbacked(folio)) {
3599 __lruvec_stat_mod_folio(folio,
3600 NR_SHMEM_THPS, -nr);
3601 } else {
3602 __lruvec_stat_mod_folio(folio,
3603 NR_FILE_THPS, -nr);
3604 filemap_nr_thps_dec(mapping);
3609 if (is_anon) {
3610 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
3611 mod_mthp_stat(new_order, MTHP_STAT_NR_ANON, 1 << (order - new_order));
3613 __split_huge_page(page, list, end, new_order);
3614 ret = 0;
3615 } else {
3616 spin_unlock(&ds_queue->split_queue_lock);
3617 fail:
3618 if (mapping)
3619 xas_unlock(&xas);
3620 local_irq_enable();
3621 remap_page(folio, folio_nr_pages(folio), 0);
3622 ret = -EAGAIN;
3625 out_unlock:
3626 if (anon_vma) {
3627 anon_vma_unlock_write(anon_vma);
3628 put_anon_vma(anon_vma);
3630 if (mapping)
3631 i_mmap_unlock_read(mapping);
3632 out:
3633 xas_destroy(&xas);
3634 if (order == HPAGE_PMD_ORDER)
3635 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3636 count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
3637 return ret;
3640 int min_order_for_split(struct folio *folio)
3642 if (folio_test_anon(folio))
3643 return 0;
3645 if (!folio->mapping) {
3646 if (folio_test_pmd_mappable(folio))
3647 count_vm_event(THP_SPLIT_PAGE_FAILED);
3648 return -EBUSY;
3651 return mapping_min_folio_order(folio->mapping);
3654 int split_folio_to_list(struct folio *folio, struct list_head *list)
3656 int ret = min_order_for_split(folio);
3658 if (ret < 0)
3659 return ret;
3661 return split_huge_page_to_list_to_order(&folio->page, list, ret);
3665 * __folio_unqueue_deferred_split() is not to be called directly:
3666 * the folio_unqueue_deferred_split() inline wrapper in mm/internal.h
3667 * limits its calls to those folios which may have a _deferred_list for
3668 * queueing THP splits, and that list is (racily observed to be) non-empty.
3670 * It is unsafe to call folio_unqueue_deferred_split() until folio refcount is
3671 * zero: because even when split_queue_lock is held, a non-empty _deferred_list
3672 * might be in use on deferred_split_scan()'s unlocked on-stack list.
3674 * If memory cgroups are enabled, split_queue_lock is in the mem_cgroup: it is
3675 * therefore important to unqueue deferred split before changing folio memcg.
3677 bool __folio_unqueue_deferred_split(struct folio *folio)
3679 struct deferred_split *ds_queue;
3680 unsigned long flags;
3681 bool unqueued = false;
3683 WARN_ON_ONCE(folio_ref_count(folio));
3684 WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg(folio));
3686 ds_queue = get_deferred_split_queue(folio);
3687 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3688 if (!list_empty(&folio->_deferred_list)) {
3689 ds_queue->split_queue_len--;
3690 if (folio_test_partially_mapped(folio)) {
3691 __folio_clear_partially_mapped(folio);
3692 mod_mthp_stat(folio_order(folio),
3693 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
3695 list_del_init(&folio->_deferred_list);
3696 unqueued = true;
3698 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3700 return unqueued; /* useful for debug warnings */
3703 /* partially_mapped=false won't clear PG_partially_mapped folio flag */
3704 void deferred_split_folio(struct folio *folio, bool partially_mapped)
3706 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3707 #ifdef CONFIG_MEMCG
3708 struct mem_cgroup *memcg = folio_memcg(folio);
3709 #endif
3710 unsigned long flags;
3713 * Order 1 folios have no space for a deferred list, but we also
3714 * won't waste much memory by not adding them to the deferred list.
3716 if (folio_order(folio) <= 1)
3717 return;
3719 if (!partially_mapped && !split_underused_thp)
3720 return;
3723 * Exclude swapcache: originally to avoid a corrupt deferred split
3724 * queue. Nowadays that is fully prevented by mem_cgroup_swapout();
3725 * but if page reclaim is already handling the same folio, it is
3726 * unnecessary to handle it again in the shrinker, so excluding
3727 * swapcache here may still be a useful optimization.
3729 if (folio_test_swapcache(folio))
3730 return;
3732 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3733 if (partially_mapped) {
3734 if (!folio_test_partially_mapped(folio)) {
3735 __folio_set_partially_mapped(folio);
3736 if (folio_test_pmd_mappable(folio))
3737 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
3738 count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
3739 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1);
3742 } else {
3743 /* partially mapped folios cannot become non-partially mapped */
3744 VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
3746 if (list_empty(&folio->_deferred_list)) {
3747 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
3748 ds_queue->split_queue_len++;
3749 #ifdef CONFIG_MEMCG
3750 if (memcg)
3751 set_shrinker_bit(memcg, folio_nid(folio),
3752 deferred_split_shrinker->id);
3753 #endif
3755 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3758 static unsigned long deferred_split_count(struct shrinker *shrink,
3759 struct shrink_control *sc)
3761 struct pglist_data *pgdata = NODE_DATA(sc->nid);
3762 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3764 #ifdef CONFIG_MEMCG
3765 if (sc->memcg)
3766 ds_queue = &sc->memcg->deferred_split_queue;
3767 #endif
3768 return READ_ONCE(ds_queue->split_queue_len);
3771 static bool thp_underused(struct folio *folio)
3773 int num_zero_pages = 0, num_filled_pages = 0;
3774 void *kaddr;
3775 int i;
3777 if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
3778 return false;
3780 for (i = 0; i < folio_nr_pages(folio); i++) {
3781 kaddr = kmap_local_folio(folio, i * PAGE_SIZE);
3782 if (!memchr_inv(kaddr, 0, PAGE_SIZE)) {
3783 num_zero_pages++;
3784 if (num_zero_pages > khugepaged_max_ptes_none) {
3785 kunmap_local(kaddr);
3786 return true;
3788 } else {
3790 * Another path for early exit once the number
3791 * of non-zero filled pages exceeds threshold.
3793 num_filled_pages++;
3794 if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) {
3795 kunmap_local(kaddr);
3796 return false;
3799 kunmap_local(kaddr);
3801 return false;
3804 static unsigned long deferred_split_scan(struct shrinker *shrink,
3805 struct shrink_control *sc)
3807 struct pglist_data *pgdata = NODE_DATA(sc->nid);
3808 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3809 unsigned long flags;
3810 LIST_HEAD(list);
3811 struct folio *folio, *next, *prev = NULL;
3812 int split = 0, removed = 0;
3814 #ifdef CONFIG_MEMCG
3815 if (sc->memcg)
3816 ds_queue = &sc->memcg->deferred_split_queue;
3817 #endif
3819 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3820 /* Take pin on all head pages to avoid freeing them under us */
3821 list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
3822 _deferred_list) {
3823 if (folio_try_get(folio)) {
3824 list_move(&folio->_deferred_list, &list);
3825 } else {
3826 /* We lost race with folio_put() */
3827 if (folio_test_partially_mapped(folio)) {
3828 __folio_clear_partially_mapped(folio);
3829 mod_mthp_stat(folio_order(folio),
3830 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
3832 list_del_init(&folio->_deferred_list);
3833 ds_queue->split_queue_len--;
3835 if (!--sc->nr_to_scan)
3836 break;
3838 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3840 list_for_each_entry_safe(folio, next, &list, _deferred_list) {
3841 bool did_split = false;
3842 bool underused = false;
3844 if (!folio_test_partially_mapped(folio)) {
3845 underused = thp_underused(folio);
3846 if (!underused)
3847 goto next;
3849 if (!folio_trylock(folio))
3850 goto next;
3851 if (!split_folio(folio)) {
3852 did_split = true;
3853 if (underused)
3854 count_vm_event(THP_UNDERUSED_SPLIT_PAGE);
3855 split++;
3857 folio_unlock(folio);
3858 next:
3860 * split_folio() removes folio from list on success.
3861 * Only add back to the queue if folio is partially mapped.
3862 * If thp_underused returns false, or if split_folio fails
3863 * in the case it was underused, then consider it used and
3864 * don't add it back to split_queue.
3866 if (did_split) {
3867 ; /* folio already removed from list */
3868 } else if (!folio_test_partially_mapped(folio)) {
3869 list_del_init(&folio->_deferred_list);
3870 removed++;
3871 } else {
3873 * That unlocked list_del_init() above would be unsafe,
3874 * unless its folio is separated from any earlier folios
3875 * left on the list (which may be concurrently unqueued)
3876 * by one safe folio with refcount still raised.
3878 swap(folio, prev);
3880 if (folio)
3881 folio_put(folio);
3884 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3885 list_splice_tail(&list, &ds_queue->split_queue);
3886 ds_queue->split_queue_len -= removed;
3887 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3889 if (prev)
3890 folio_put(prev);
3893 * Stop shrinker if we didn't split any page, but the queue is empty.
3894 * This can happen if pages were freed under us.
3896 if (!split && list_empty(&ds_queue->split_queue))
3897 return SHRINK_STOP;
3898 return split;
3901 #ifdef CONFIG_DEBUG_FS
3902 static void split_huge_pages_all(void)
3904 struct zone *zone;
3905 struct page *page;
3906 struct folio *folio;
3907 unsigned long pfn, max_zone_pfn;
3908 unsigned long total = 0, split = 0;
3910 pr_debug("Split all THPs\n");
3911 for_each_zone(zone) {
3912 if (!managed_zone(zone))
3913 continue;
3914 max_zone_pfn = zone_end_pfn(zone);
3915 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
3916 int nr_pages;
3918 page = pfn_to_online_page(pfn);
3919 if (!page || PageTail(page))
3920 continue;
3921 folio = page_folio(page);
3922 if (!folio_try_get(folio))
3923 continue;
3925 if (unlikely(page_folio(page) != folio))
3926 goto next;
3928 if (zone != folio_zone(folio))
3929 goto next;
3931 if (!folio_test_large(folio)
3932 || folio_test_hugetlb(folio)
3933 || !folio_test_lru(folio))
3934 goto next;
3936 total++;
3937 folio_lock(folio);
3938 nr_pages = folio_nr_pages(folio);
3939 if (!split_folio(folio))
3940 split++;
3941 pfn += nr_pages - 1;
3942 folio_unlock(folio);
3943 next:
3944 folio_put(folio);
3945 cond_resched();
3949 pr_debug("%lu of %lu THP split\n", split, total);
3952 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
3954 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
3955 is_vm_hugetlb_page(vma);
3958 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
3959 unsigned long vaddr_end, unsigned int new_order)
3961 int ret = 0;
3962 struct task_struct *task;
3963 struct mm_struct *mm;
3964 unsigned long total = 0, split = 0;
3965 unsigned long addr;
3967 vaddr_start &= PAGE_MASK;
3968 vaddr_end &= PAGE_MASK;
3970 task = find_get_task_by_vpid(pid);
3971 if (!task) {
3972 ret = -ESRCH;
3973 goto out;
3976 /* Find the mm_struct */
3977 mm = get_task_mm(task);
3978 put_task_struct(task);
3980 if (!mm) {
3981 ret = -EINVAL;
3982 goto out;
3985 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3986 pid, vaddr_start, vaddr_end);
3988 mmap_read_lock(mm);
3990 * always increase addr by PAGE_SIZE, since we could have a PTE page
3991 * table filled with PTE-mapped THPs, each of which is distinct.
3993 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
3994 struct vm_area_struct *vma = vma_lookup(mm, addr);
3995 struct folio_walk fw;
3996 struct folio *folio;
3997 struct address_space *mapping;
3998 unsigned int target_order = new_order;
4000 if (!vma)
4001 break;
4003 /* skip special VMA and hugetlb VMA */
4004 if (vma_not_suitable_for_thp_split(vma)) {
4005 addr = vma->vm_end;
4006 continue;
4009 folio = folio_walk_start(&fw, vma, addr, 0);
4010 if (!folio)
4011 continue;
4013 if (!is_transparent_hugepage(folio))
4014 goto next;
4016 if (!folio_test_anon(folio)) {
4017 mapping = folio->mapping;
4018 target_order = max(new_order,
4019 mapping_min_folio_order(mapping));
4022 if (target_order >= folio_order(folio))
4023 goto next;
4025 total++;
4027 * For folios with private, split_huge_page_to_list_to_order()
4028 * will try to drop it before split and then check if the folio
4029 * can be split or not. So skip the check here.
4031 if (!folio_test_private(folio) &&
4032 !can_split_folio(folio, 0, NULL))
4033 goto next;
4035 if (!folio_trylock(folio))
4036 goto next;
4037 folio_get(folio);
4038 folio_walk_end(&fw, vma);
4040 if (!folio_test_anon(folio) && folio->mapping != mapping)
4041 goto unlock;
4043 if (!split_folio_to_order(folio, target_order))
4044 split++;
4046 unlock:
4048 folio_unlock(folio);
4049 folio_put(folio);
4051 cond_resched();
4052 continue;
4053 next:
4054 folio_walk_end(&fw, vma);
4055 cond_resched();
4057 mmap_read_unlock(mm);
4058 mmput(mm);
4060 pr_debug("%lu of %lu THP split\n", split, total);
4062 out:
4063 return ret;
4066 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
4067 pgoff_t off_end, unsigned int new_order)
4069 struct filename *file;
4070 struct file *candidate;
4071 struct address_space *mapping;
4072 int ret = -EINVAL;
4073 pgoff_t index;
4074 int nr_pages = 1;
4075 unsigned long total = 0, split = 0;
4076 unsigned int min_order;
4077 unsigned int target_order;
4079 file = getname_kernel(file_path);
4080 if (IS_ERR(file))
4081 return ret;
4083 candidate = file_open_name(file, O_RDONLY, 0);
4084 if (IS_ERR(candidate))
4085 goto out;
4087 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
4088 file_path, off_start, off_end);
4090 mapping = candidate->f_mapping;
4091 min_order = mapping_min_folio_order(mapping);
4092 target_order = max(new_order, min_order);
4094 for (index = off_start; index < off_end; index += nr_pages) {
4095 struct folio *folio = filemap_get_folio(mapping, index);
4097 nr_pages = 1;
4098 if (IS_ERR(folio))
4099 continue;
4101 if (!folio_test_large(folio))
4102 goto next;
4104 total++;
4105 nr_pages = folio_nr_pages(folio);
4107 if (target_order >= folio_order(folio))
4108 goto next;
4110 if (!folio_trylock(folio))
4111 goto next;
4113 if (folio->mapping != mapping)
4114 goto unlock;
4116 if (!split_folio_to_order(folio, target_order))
4117 split++;
4119 unlock:
4120 folio_unlock(folio);
4121 next:
4122 folio_put(folio);
4123 cond_resched();
4126 filp_close(candidate, NULL);
4127 ret = 0;
4129 pr_debug("%lu of %lu file-backed THP split\n", split, total);
4130 out:
4131 putname(file);
4132 return ret;
4135 #define MAX_INPUT_BUF_SZ 255
4137 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
4138 size_t count, loff_t *ppops)
4140 static DEFINE_MUTEX(split_debug_mutex);
4141 ssize_t ret;
4143 * hold pid, start_vaddr, end_vaddr, new_order or
4144 * file_path, off_start, off_end, new_order
4146 char input_buf[MAX_INPUT_BUF_SZ];
4147 int pid;
4148 unsigned long vaddr_start, vaddr_end;
4149 unsigned int new_order = 0;
4151 ret = mutex_lock_interruptible(&split_debug_mutex);
4152 if (ret)
4153 return ret;
4155 ret = -EFAULT;
4157 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
4158 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
4159 goto out;
4161 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
4163 if (input_buf[0] == '/') {
4164 char *tok;
4165 char *buf = input_buf;
4166 char file_path[MAX_INPUT_BUF_SZ];
4167 pgoff_t off_start = 0, off_end = 0;
4168 size_t input_len = strlen(input_buf);
4170 tok = strsep(&buf, ",");
4171 if (tok) {
4172 strscpy(file_path, tok);
4173 } else {
4174 ret = -EINVAL;
4175 goto out;
4178 ret = sscanf(buf, "0x%lx,0x%lx,%d", &off_start, &off_end, &new_order);
4179 if (ret != 2 && ret != 3) {
4180 ret = -EINVAL;
4181 goto out;
4183 ret = split_huge_pages_in_file(file_path, off_start, off_end, new_order);
4184 if (!ret)
4185 ret = input_len;
4187 goto out;
4190 ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d", &pid, &vaddr_start, &vaddr_end, &new_order);
4191 if (ret == 1 && pid == 1) {
4192 split_huge_pages_all();
4193 ret = strlen(input_buf);
4194 goto out;
4195 } else if (ret != 3 && ret != 4) {
4196 ret = -EINVAL;
4197 goto out;
4200 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order);
4201 if (!ret)
4202 ret = strlen(input_buf);
4203 out:
4204 mutex_unlock(&split_debug_mutex);
4205 return ret;
4209 static const struct file_operations split_huge_pages_fops = {
4210 .owner = THIS_MODULE,
4211 .write = split_huge_pages_write,
4214 static int __init split_huge_pages_debugfs(void)
4216 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
4217 &split_huge_pages_fops);
4218 return 0;
4220 late_initcall(split_huge_pages_debugfs);
4221 #endif
4223 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
4224 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
4225 struct page *page)
4227 struct folio *folio = page_folio(page);
4228 struct vm_area_struct *vma = pvmw->vma;
4229 struct mm_struct *mm = vma->vm_mm;
4230 unsigned long address = pvmw->address;
4231 bool anon_exclusive;
4232 pmd_t pmdval;
4233 swp_entry_t entry;
4234 pmd_t pmdswp;
4236 if (!(pvmw->pmd && !pvmw->pte))
4237 return 0;
4239 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
4240 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
4242 /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
4243 anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
4244 if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
4245 set_pmd_at(mm, address, pvmw->pmd, pmdval);
4246 return -EBUSY;
4249 if (pmd_dirty(pmdval))
4250 folio_mark_dirty(folio);
4251 if (pmd_write(pmdval))
4252 entry = make_writable_migration_entry(page_to_pfn(page));
4253 else if (anon_exclusive)
4254 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
4255 else
4256 entry = make_readable_migration_entry(page_to_pfn(page));
4257 if (pmd_young(pmdval))
4258 entry = make_migration_entry_young(entry);
4259 if (pmd_dirty(pmdval))
4260 entry = make_migration_entry_dirty(entry);
4261 pmdswp = swp_entry_to_pmd(entry);
4262 if (pmd_soft_dirty(pmdval))
4263 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
4264 if (pmd_uffd_wp(pmdval))
4265 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
4266 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
4267 folio_remove_rmap_pmd(folio, page, vma);
4268 folio_put(folio);
4269 trace_set_migration_pmd(address, pmd_val(pmdswp));
4271 return 0;
4274 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
4276 struct folio *folio = page_folio(new);
4277 struct vm_area_struct *vma = pvmw->vma;
4278 struct mm_struct *mm = vma->vm_mm;
4279 unsigned long address = pvmw->address;
4280 unsigned long haddr = address & HPAGE_PMD_MASK;
4281 pmd_t pmde;
4282 swp_entry_t entry;
4284 if (!(pvmw->pmd && !pvmw->pte))
4285 return;
4287 entry = pmd_to_swp_entry(*pvmw->pmd);
4288 folio_get(folio);
4289 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
4290 if (pmd_swp_soft_dirty(*pvmw->pmd))
4291 pmde = pmd_mksoft_dirty(pmde);
4292 if (is_writable_migration_entry(entry))
4293 pmde = pmd_mkwrite(pmde, vma);
4294 if (pmd_swp_uffd_wp(*pvmw->pmd))
4295 pmde = pmd_mkuffd_wp(pmde);
4296 if (!is_migration_entry_young(entry))
4297 pmde = pmd_mkold(pmde);
4298 /* NOTE: this may contain setting soft-dirty on some archs */
4299 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
4300 pmde = pmd_mkdirty(pmde);
4302 if (folio_test_anon(folio)) {
4303 rmap_t rmap_flags = RMAP_NONE;
4305 if (!is_readable_migration_entry(entry))
4306 rmap_flags |= RMAP_EXCLUSIVE;
4308 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
4309 } else {
4310 folio_add_file_rmap_pmd(folio, new, vma);
4312 VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
4313 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
4315 /* No need to invalidate - it was non-present before */
4316 update_mmu_cache_pmd(vma, address, pvmw->pmd);
4317 trace_remove_migration_pmd(address, pmd_val(pmde));
4319 #endif