2 * PPC Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/moduleparam.h>
20 #include <linux/swap.h>
21 #include <linux/swapops.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
25 #include <asm/setup.h>
26 #include <asm/hugetlb.h>
27 #include <asm/pte-walk.h>
30 #ifdef CONFIG_HUGETLB_PAGE
32 #define PAGE_SHIFT_64K 16
33 #define PAGE_SHIFT_512K 19
34 #define PAGE_SHIFT_8M 23
35 #define PAGE_SHIFT_16M 24
36 #define PAGE_SHIFT_16G 34
38 bool hugetlb_disabled
= false;
40 unsigned int HPAGE_SHIFT
;
41 EXPORT_SYMBOL(HPAGE_SHIFT
);
43 #define hugepd_none(hpd) (hpd_val(hpd) == 0)
45 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
, unsigned long sz
)
48 * Only called for hugetlbfs pages, hence can ignore THP and the
51 return __find_linux_pte(mm
->pgd
, addr
, NULL
, NULL
);
54 static int __hugepte_alloc(struct mm_struct
*mm
, hugepd_t
*hpdp
,
55 unsigned long address
, unsigned int pdshift
,
56 unsigned int pshift
, spinlock_t
*ptl
)
58 struct kmem_cache
*cachep
;
63 if (pshift
>= pdshift
) {
64 cachep
= hugepte_cache
;
65 num_hugepd
= 1 << (pshift
- pdshift
);
67 cachep
= PGT_CACHE(pdshift
- pshift
);
71 new = kmem_cache_zalloc(cachep
, pgtable_gfp_flags(mm
, GFP_KERNEL
));
73 BUG_ON(pshift
> HUGEPD_SHIFT_MASK
);
74 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK
);
80 * Make sure other cpus find the hugepd set only after a
81 * properly initialized page table is visible to them.
82 * For more details look for comment in __pte_alloc().
88 * We have multiple higher-level entries that point to the same
89 * actual pte location. Fill in each as we go and backtrack on error.
90 * We need all of these so the DTLB pgtable walk code can find the
91 * right higher-level entry without knowing if it's a hugepage or not.
93 for (i
= 0; i
< num_hugepd
; i
++, hpdp
++) {
94 if (unlikely(!hugepd_none(*hpdp
)))
97 #ifdef CONFIG_PPC_BOOK3S_64
98 *hpdp
= __hugepd(__pa(new) |
99 (shift_to_mmu_psize(pshift
) << 2));
100 #elif defined(CONFIG_PPC_8xx)
101 *hpdp
= __hugepd(__pa(new) | _PMD_USER
|
102 (pshift
== PAGE_SHIFT_8M
? _PMD_PAGE_8M
:
103 _PMD_PAGE_512K
) | _PMD_PRESENT
);
105 /* We use the old format for PPC_FSL_BOOK3E */
106 *hpdp
= __hugepd(((unsigned long)new & ~PD_HUGE
) | pshift
);
110 /* If we bailed from the for loop early, an error occurred, clean up */
111 if (i
< num_hugepd
) {
112 for (i
= i
- 1 ; i
>= 0; i
--, hpdp
--)
114 kmem_cache_free(cachep
, new);
121 * These macros define how to determine which level of the page table holds
124 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
125 #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
126 #define HUGEPD_PUD_SHIFT PUD_SHIFT
130 * At this point we do the placement change only for BOOK3S 64. This would
131 * possibly work on other subarchs.
133 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, unsigned long addr
, unsigned long sz
)
138 hugepd_t
*hpdp
= NULL
;
139 unsigned pshift
= __ffs(sz
);
140 unsigned pdshift
= PGDIR_SHIFT
;
144 pg
= pgd_offset(mm
, addr
);
146 #ifdef CONFIG_PPC_BOOK3S_64
147 if (pshift
== PGDIR_SHIFT
)
150 else if (pshift
> PUD_SHIFT
) {
152 * We need to use hugepd table
154 ptl
= &mm
->page_table_lock
;
155 hpdp
= (hugepd_t
*)pg
;
158 pu
= pud_alloc(mm
, pg
, addr
);
159 if (pshift
== PUD_SHIFT
)
161 else if (pshift
> PMD_SHIFT
) {
162 ptl
= pud_lockptr(mm
, pu
);
163 hpdp
= (hugepd_t
*)pu
;
166 pm
= pmd_alloc(mm
, pu
, addr
);
167 if (pshift
== PMD_SHIFT
)
171 ptl
= pmd_lockptr(mm
, pm
);
172 hpdp
= (hugepd_t
*)pm
;
177 if (pshift
>= HUGEPD_PGD_SHIFT
) {
178 ptl
= &mm
->page_table_lock
;
179 hpdp
= (hugepd_t
*)pg
;
182 pu
= pud_alloc(mm
, pg
, addr
);
183 if (pshift
>= HUGEPD_PUD_SHIFT
) {
184 ptl
= pud_lockptr(mm
, pu
);
185 hpdp
= (hugepd_t
*)pu
;
188 pm
= pmd_alloc(mm
, pu
, addr
);
189 ptl
= pmd_lockptr(mm
, pm
);
190 hpdp
= (hugepd_t
*)pm
;
197 BUG_ON(!hugepd_none(*hpdp
) && !hugepd_ok(*hpdp
));
199 if (hugepd_none(*hpdp
) && __hugepte_alloc(mm
, hpdp
, addr
,
200 pdshift
, pshift
, ptl
))
203 return hugepte_offset(*hpdp
, addr
, pdshift
);
206 #ifdef CONFIG_PPC_BOOK3S_64
208 * Tracks gpages after the device tree is scanned and before the
209 * huge_boot_pages list is ready on pseries.
211 #define MAX_NUMBER_GPAGES 1024
212 __initdata
static u64 gpage_freearray
[MAX_NUMBER_GPAGES
];
213 __initdata
static unsigned nr_gpages
;
216 * Build list of addresses of gigantic pages. This function is used in early
217 * boot before the buddy allocator is setup.
219 void __init
pseries_add_gpage(u64 addr
, u64 page_size
, unsigned long number_of_pages
)
223 while (number_of_pages
> 0) {
224 gpage_freearray
[nr_gpages
] = addr
;
231 int __init
pseries_alloc_bootmem_huge_page(struct hstate
*hstate
)
233 struct huge_bootmem_page
*m
;
236 m
= phys_to_virt(gpage_freearray
[--nr_gpages
]);
237 gpage_freearray
[nr_gpages
] = 0;
238 list_add(&m
->list
, &huge_boot_pages
);
245 int __init
alloc_bootmem_huge_page(struct hstate
*h
)
248 #ifdef CONFIG_PPC_BOOK3S_64
249 if (firmware_has_feature(FW_FEATURE_LPAR
) && !radix_enabled())
250 return pseries_alloc_bootmem_huge_page(h
);
252 return __alloc_bootmem_huge_page(h
);
255 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
256 #define HUGEPD_FREELIST_SIZE \
257 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
259 struct hugepd_freelist
{
265 static DEFINE_PER_CPU(struct hugepd_freelist
*, hugepd_freelist_cur
);
267 static void hugepd_free_rcu_callback(struct rcu_head
*head
)
269 struct hugepd_freelist
*batch
=
270 container_of(head
, struct hugepd_freelist
, rcu
);
273 for (i
= 0; i
< batch
->index
; i
++)
274 kmem_cache_free(hugepte_cache
, batch
->ptes
[i
]);
276 free_page((unsigned long)batch
);
279 static void hugepd_free(struct mmu_gather
*tlb
, void *hugepte
)
281 struct hugepd_freelist
**batchp
;
283 batchp
= &get_cpu_var(hugepd_freelist_cur
);
285 if (atomic_read(&tlb
->mm
->mm_users
) < 2 ||
286 mm_is_thread_local(tlb
->mm
)) {
287 kmem_cache_free(hugepte_cache
, hugepte
);
288 put_cpu_var(hugepd_freelist_cur
);
292 if (*batchp
== NULL
) {
293 *batchp
= (struct hugepd_freelist
*)__get_free_page(GFP_ATOMIC
);
294 (*batchp
)->index
= 0;
297 (*batchp
)->ptes
[(*batchp
)->index
++] = hugepte
;
298 if ((*batchp
)->index
== HUGEPD_FREELIST_SIZE
) {
299 call_rcu_sched(&(*batchp
)->rcu
, hugepd_free_rcu_callback
);
302 put_cpu_var(hugepd_freelist_cur
);
305 static inline void hugepd_free(struct mmu_gather
*tlb
, void *hugepte
) {}
308 static void free_hugepd_range(struct mmu_gather
*tlb
, hugepd_t
*hpdp
, int pdshift
,
309 unsigned long start
, unsigned long end
,
310 unsigned long floor
, unsigned long ceiling
)
312 pte_t
*hugepte
= hugepd_page(*hpdp
);
315 unsigned long pdmask
= ~((1UL << pdshift
) - 1);
316 unsigned int num_hugepd
= 1;
317 unsigned int shift
= hugepd_shift(*hpdp
);
319 /* Note: On fsl the hpdp may be the first of several */
321 num_hugepd
= 1 << (shift
- pdshift
);
331 if (end
- 1 > ceiling
- 1)
334 for (i
= 0; i
< num_hugepd
; i
++, hpdp
++)
337 if (shift
>= pdshift
)
338 hugepd_free(tlb
, hugepte
);
340 pgtable_free_tlb(tlb
, hugepte
,
341 get_hugepd_cache_index(pdshift
- shift
));
344 static void hugetlb_free_pmd_range(struct mmu_gather
*tlb
, pud_t
*pud
,
345 unsigned long addr
, unsigned long end
,
346 unsigned long floor
, unsigned long ceiling
)
356 pmd
= pmd_offset(pud
, addr
);
357 next
= pmd_addr_end(addr
, end
);
358 if (!is_hugepd(__hugepd(pmd_val(*pmd
)))) {
360 * if it is not hugepd pointer, we should already find
363 WARN_ON(!pmd_none_or_clear_bad(pmd
));
367 * Increment next by the size of the huge mapping since
368 * there may be more than one entry at this level for a
369 * single hugepage, but all of them point to
370 * the same kmem cache that holds the hugepte.
372 more
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pmd
));
376 free_hugepd_range(tlb
, (hugepd_t
*)pmd
, PMD_SHIFT
,
377 addr
, next
, floor
, ceiling
);
378 } while (addr
= next
, addr
!= end
);
388 if (end
- 1 > ceiling
- 1)
391 pmd
= pmd_offset(pud
, start
);
393 pmd_free_tlb(tlb
, pmd
, start
);
394 mm_dec_nr_pmds(tlb
->mm
);
397 static void hugetlb_free_pud_range(struct mmu_gather
*tlb
, pgd_t
*pgd
,
398 unsigned long addr
, unsigned long end
,
399 unsigned long floor
, unsigned long ceiling
)
407 pud
= pud_offset(pgd
, addr
);
408 next
= pud_addr_end(addr
, end
);
409 if (!is_hugepd(__hugepd(pud_val(*pud
)))) {
410 if (pud_none_or_clear_bad(pud
))
412 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
,
417 * Increment next by the size of the huge mapping since
418 * there may be more than one entry at this level for a
419 * single hugepage, but all of them point to
420 * the same kmem cache that holds the hugepte.
422 more
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pud
));
426 free_hugepd_range(tlb
, (hugepd_t
*)pud
, PUD_SHIFT
,
427 addr
, next
, floor
, ceiling
);
429 } while (addr
= next
, addr
!= end
);
435 ceiling
&= PGDIR_MASK
;
439 if (end
- 1 > ceiling
- 1)
442 pud
= pud_offset(pgd
, start
);
444 pud_free_tlb(tlb
, pud
, start
);
445 mm_dec_nr_puds(tlb
->mm
);
449 * This function frees user-level page tables of a process.
451 void hugetlb_free_pgd_range(struct mmu_gather
*tlb
,
452 unsigned long addr
, unsigned long end
,
453 unsigned long floor
, unsigned long ceiling
)
459 * Because there are a number of different possible pagetable
460 * layouts for hugepage ranges, we limit knowledge of how
461 * things should be laid out to the allocation path
462 * (huge_pte_alloc(), above). Everything else works out the
463 * structure as it goes from information in the hugepd
464 * pointers. That means that we can't here use the
465 * optimization used in the normal page free_pgd_range(), of
466 * checking whether we're actually covering a large enough
467 * range to have to do anything at the top level of the walk
468 * instead of at the bottom.
470 * To make sense of this, you should probably go read the big
471 * block comment at the top of the normal free_pgd_range(),
476 next
= pgd_addr_end(addr
, end
);
477 pgd
= pgd_offset(tlb
->mm
, addr
);
478 if (!is_hugepd(__hugepd(pgd_val(*pgd
)))) {
479 if (pgd_none_or_clear_bad(pgd
))
481 hugetlb_free_pud_range(tlb
, pgd
, addr
, next
, floor
, ceiling
);
485 * Increment next by the size of the huge mapping since
486 * there may be more than one entry at the pgd level
487 * for a single hugepage, but all of them point to the
488 * same kmem cache that holds the hugepte.
490 more
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pgd
));
494 free_hugepd_range(tlb
, (hugepd_t
*)pgd
, PGDIR_SHIFT
,
495 addr
, next
, floor
, ceiling
);
497 } while (addr
= next
, addr
!= end
);
500 struct page
*follow_huge_pd(struct vm_area_struct
*vma
,
501 unsigned long address
, hugepd_t hpd
,
502 int flags
, int pdshift
)
506 struct page
*page
= NULL
;
508 int shift
= hugepd_shift(hpd
);
509 struct mm_struct
*mm
= vma
->vm_mm
;
513 * hugepage directory entries are protected by mm->page_table_lock
514 * Use this instead of huge_pte_lockptr
516 ptl
= &mm
->page_table_lock
;
519 ptep
= hugepte_offset(hpd
, address
, pdshift
);
520 if (pte_present(*ptep
)) {
521 mask
= (1UL << shift
) - 1;
522 page
= pte_page(*ptep
);
523 page
+= ((address
& mask
) >> PAGE_SHIFT
);
524 if (flags
& FOLL_GET
)
527 if (is_hugetlb_entry_migration(*ptep
)) {
529 __migration_entry_wait(mm
, ptep
, ptl
);
537 static unsigned long hugepte_addr_end(unsigned long addr
, unsigned long end
,
540 unsigned long __boundary
= (addr
+ sz
) & ~(sz
-1);
541 return (__boundary
- 1 < end
- 1) ? __boundary
: end
;
544 int gup_huge_pd(hugepd_t hugepd
, unsigned long addr
, unsigned pdshift
,
545 unsigned long end
, int write
, struct page
**pages
, int *nr
)
548 unsigned long sz
= 1UL << hugepd_shift(hugepd
);
551 ptep
= hugepte_offset(hugepd
, addr
, pdshift
);
553 next
= hugepte_addr_end(addr
, end
, sz
);
554 if (!gup_hugepte(ptep
, sz
, addr
, end
, write
, pages
, nr
))
556 } while (ptep
++, addr
= next
, addr
!= end
);
561 #ifdef CONFIG_PPC_MM_SLICES
562 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
563 unsigned long len
, unsigned long pgoff
,
566 struct hstate
*hstate
= hstate_file(file
);
567 int mmu_psize
= shift_to_mmu_psize(huge_page_shift(hstate
));
569 #ifdef CONFIG_PPC_RADIX_MMU
571 return radix__hugetlb_get_unmapped_area(file
, addr
, len
,
574 return slice_get_unmapped_area(addr
, len
, flags
, mmu_psize
, 1);
578 unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
)
580 #ifdef CONFIG_PPC_MM_SLICES
581 /* With radix we don't use slice, so derive it from vma*/
582 if (!radix_enabled()) {
583 unsigned int psize
= get_slice_psize(vma
->vm_mm
, vma
->vm_start
);
585 return 1UL << mmu_psize_to_shift(psize
);
588 return vma_kernel_pagesize(vma
);
591 static inline bool is_power_of_4(unsigned long x
)
593 if (is_power_of_2(x
))
594 return (__ilog2(x
) % 2) ? false : true;
598 static int __init
add_huge_page_size(unsigned long long size
)
600 int shift
= __ffs(size
);
603 /* Check that it is a page size supported by the hardware and
604 * that it fits within pagetable and slice limits. */
605 if (size
<= PAGE_SIZE
)
607 #if defined(CONFIG_PPC_FSL_BOOK3E)
608 if (!is_power_of_4(size
))
610 #elif !defined(CONFIG_PPC_8xx)
611 if (!is_power_of_2(size
) || (shift
> SLICE_HIGH_SHIFT
))
615 if ((mmu_psize
= shift_to_mmu_psize(shift
)) < 0)
618 #ifdef CONFIG_PPC_BOOK3S_64
620 * We need to make sure that for different page sizes reported by
621 * firmware we only add hugetlb support for page sizes that can be
622 * supported by linux page table layout.
627 if (radix_enabled()) {
628 if (mmu_psize
!= MMU_PAGE_2M
) {
629 if (cpu_has_feature(CPU_FTR_POWER9_DD1
) ||
630 (mmu_psize
!= MMU_PAGE_1G
))
634 if (mmu_psize
!= MMU_PAGE_16M
&& mmu_psize
!= MMU_PAGE_16G
)
639 BUG_ON(mmu_psize_defs
[mmu_psize
].shift
!= shift
);
641 /* Return if huge page size has already been setup */
642 if (size_to_hstate(size
))
645 hugetlb_add_hstate(shift
- PAGE_SHIFT
);
650 static int __init
hugepage_setup_sz(char *str
)
652 unsigned long long size
;
654 size
= memparse(str
, &str
);
656 if (add_huge_page_size(size
) != 0) {
658 pr_err("Invalid huge page size specified(%llu)\n", size
);
663 __setup("hugepagesz=", hugepage_setup_sz
);
665 struct kmem_cache
*hugepte_cache
;
666 static int __init
hugetlbpage_init(void)
670 if (hugetlb_disabled
) {
671 pr_info("HugeTLB support is disabled!\n");
675 #if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
676 if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE
))
679 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
683 if (!mmu_psize_defs
[psize
].shift
)
686 shift
= mmu_psize_to_shift(psize
);
688 #ifdef CONFIG_PPC_BOOK3S_64
689 if (shift
> PGDIR_SHIFT
)
691 else if (shift
> PUD_SHIFT
)
692 pdshift
= PGDIR_SHIFT
;
693 else if (shift
> PMD_SHIFT
)
698 if (shift
< HUGEPD_PUD_SHIFT
)
700 else if (shift
< HUGEPD_PGD_SHIFT
)
703 pdshift
= PGDIR_SHIFT
;
706 if (add_huge_page_size(1ULL << shift
) < 0)
709 * if we have pdshift and shift value same, we don't
710 * use pgt cache for hugepd.
713 pgtable_cache_add(pdshift
- shift
, NULL
);
714 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
715 else if (!hugepte_cache
) {
717 * Create a kmem cache for hugeptes. The bottom bits in
718 * the pte have size information encoded in them, so
719 * align them to allow this
721 hugepte_cache
= kmem_cache_create("hugepte-cache",
723 HUGEPD_SHIFT_MASK
+ 1,
725 if (hugepte_cache
== NULL
)
726 panic("%s: Unable to create kmem cache "
727 "for hugeptes\n", __func__
);
733 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
734 /* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
735 if (mmu_psize_defs
[MMU_PAGE_4M
].shift
)
736 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_4M
].shift
;
737 else if (mmu_psize_defs
[MMU_PAGE_512K
].shift
)
738 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_512K
].shift
;
740 /* Set default large page size. Currently, we pick 16M or 1M
741 * depending on what is available
743 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
)
744 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_16M
].shift
;
745 else if (mmu_psize_defs
[MMU_PAGE_1M
].shift
)
746 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_1M
].shift
;
747 else if (mmu_psize_defs
[MMU_PAGE_2M
].shift
)
748 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_2M
].shift
;
753 arch_initcall(hugetlbpage_init
);
755 void flush_dcache_icache_hugepage(struct page
*page
)
760 BUG_ON(!PageCompound(page
));
762 for (i
= 0; i
< (1UL << compound_order(page
)); i
++) {
763 if (!PageHighMem(page
)) {
764 __flush_dcache_icache(page_address(page
+i
));
766 start
= kmap_atomic(page
+i
);
767 __flush_dcache_icache(start
);
768 kunmap_atomic(start
);
773 #endif /* CONFIG_HUGETLB_PAGE */
776 * We have 4 cases for pgds and pmds:
777 * (1) invalid (all zeroes)
778 * (2) pointer to next table, as normal; bottom 6 bits == 0
779 * (3) leaf pte for huge page _PAGE_PTE set
780 * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
782 * So long as we atomically load page table pointers we are safe against teardown,
783 * we can follow the address down to the the page and take a ref on it.
784 * This function need to be called with interrupts disabled. We use this variant
785 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
787 pte_t
*__find_linux_pte(pgd_t
*pgdir
, unsigned long ea
,
788 bool *is_thp
, unsigned *hpage_shift
)
794 hugepd_t
*hpdp
= NULL
;
795 unsigned pdshift
= PGDIR_SHIFT
;
803 pgdp
= pgdir
+ pgd_index(ea
);
804 pgd
= READ_ONCE(*pgdp
);
806 * Always operate on the local stack value. This make sure the
807 * value don't get updated by a parallel THP split/collapse,
808 * page fault or a page unmap. The return pte_t * is still not
809 * stable. So should be checked there for above conditions.
813 else if (pgd_huge(pgd
)) {
814 ret_pte
= (pte_t
*) pgdp
;
816 } else if (is_hugepd(__hugepd(pgd_val(pgd
))))
817 hpdp
= (hugepd_t
*)&pgd
;
820 * Even if we end up with an unmap, the pgtable will not
821 * be freed, because we do an rcu free and here we are
825 pudp
= pud_offset(&pgd
, ea
);
826 pud
= READ_ONCE(*pudp
);
830 else if (pud_huge(pud
)) {
831 ret_pte
= (pte_t
*) pudp
;
833 } else if (is_hugepd(__hugepd(pud_val(pud
))))
834 hpdp
= (hugepd_t
*)&pud
;
837 pmdp
= pmd_offset(&pud
, ea
);
838 pmd
= READ_ONCE(*pmdp
);
840 * A hugepage collapse is captured by pmd_none, because
841 * it mark the pmd none and do a hpte invalidate.
846 if (pmd_trans_huge(pmd
) || pmd_devmap(pmd
)) {
849 ret_pte
= (pte_t
*) pmdp
;
854 ret_pte
= (pte_t
*) pmdp
;
856 } else if (is_hugepd(__hugepd(pmd_val(pmd
))))
857 hpdp
= (hugepd_t
*)&pmd
;
859 return pte_offset_kernel(&pmd
, ea
);
865 ret_pte
= hugepte_offset(*hpdp
, ea
, pdshift
);
866 pdshift
= hugepd_shift(*hpdp
);
869 *hpage_shift
= pdshift
;
872 EXPORT_SYMBOL_GPL(__find_linux_pte
);
874 int gup_hugepte(pte_t
*ptep
, unsigned long sz
, unsigned long addr
,
875 unsigned long end
, int write
, struct page
**pages
, int *nr
)
877 unsigned long pte_end
;
878 struct page
*head
, *page
;
882 pte_end
= (addr
+ sz
) & ~(sz
-1);
886 pte
= READ_ONCE(*ptep
);
888 if (!pte_access_permitted(pte
, write
))
891 /* hugepages are never "special" */
892 VM_BUG_ON(!pfn_valid(pte_pfn(pte
)));
895 head
= pte_page(pte
);
897 page
= head
+ ((addr
& (sz
-1)) >> PAGE_SHIFT
);
899 VM_BUG_ON(compound_head(page
) != head
);
904 } while (addr
+= PAGE_SIZE
, addr
!= end
);
906 if (!page_cache_add_speculative(head
, refs
)) {
911 if (unlikely(pte_val(pte
) != pte_val(*ptep
))) {
912 /* Could be optimized better */