1 // SPDX-License-Identifier: GPL-2.0
3 * SPARC64 Huge TLB page support.
5 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
10 #include <linux/sched/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/sysctl.h>
16 #include <asm/pgalloc.h>
17 #include <asm/pgtable.h>
19 #include <asm/tlbflush.h>
20 #include <asm/cacheflush.h>
21 #include <asm/mmu_context.h>
23 /* Slightly simplified from the non-hugepage variant because by
24 * definition we don't have to worry about any page coloring stuff
27 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file
*filp
,
33 struct hstate
*h
= hstate_file(filp
);
34 unsigned long task_size
= TASK_SIZE
;
35 struct vm_unmapped_area_info info
;
37 if (test_thread_flag(TIF_32BIT
))
38 task_size
= STACK_TOP32
;
42 info
.low_limit
= TASK_UNMAPPED_BASE
;
43 info
.high_limit
= min(task_size
, VA_EXCLUDE_START
);
44 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
45 info
.align_offset
= 0;
46 addr
= vm_unmapped_area(&info
);
48 if ((addr
& ~PAGE_MASK
) && task_size
> VA_EXCLUDE_END
) {
49 VM_BUG_ON(addr
!= -ENOMEM
);
50 info
.low_limit
= VA_EXCLUDE_END
;
51 info
.high_limit
= task_size
;
52 addr
= vm_unmapped_area(&info
);
59 hugetlb_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
60 const unsigned long len
,
61 const unsigned long pgoff
,
62 const unsigned long flags
)
64 struct hstate
*h
= hstate_file(filp
);
65 struct mm_struct
*mm
= current
->mm
;
66 unsigned long addr
= addr0
;
67 struct vm_unmapped_area_info info
;
69 /* This should only ever run for 32-bit processes. */
70 BUG_ON(!test_thread_flag(TIF_32BIT
));
72 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
74 info
.low_limit
= PAGE_SIZE
;
75 info
.high_limit
= mm
->mmap_base
;
76 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
77 info
.align_offset
= 0;
78 addr
= vm_unmapped_area(&info
);
81 * A failed mmap() very likely causes application failure,
82 * so fall back to the bottom-up function here. This scenario
83 * can happen with large stack limits and large mmap()
86 if (addr
& ~PAGE_MASK
) {
87 VM_BUG_ON(addr
!= -ENOMEM
);
89 info
.low_limit
= TASK_UNMAPPED_BASE
;
90 info
.high_limit
= STACK_TOP32
;
91 addr
= vm_unmapped_area(&info
);
98 hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
99 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
101 struct hstate
*h
= hstate_file(file
);
102 struct mm_struct
*mm
= current
->mm
;
103 struct vm_area_struct
*vma
;
104 unsigned long task_size
= TASK_SIZE
;
106 if (test_thread_flag(TIF_32BIT
))
107 task_size
= STACK_TOP32
;
109 if (len
& ~huge_page_mask(h
))
114 if (flags
& MAP_FIXED
) {
115 if (prepare_hugepage_range(file
, addr
, len
))
121 addr
= ALIGN(addr
, huge_page_size(h
));
122 vma
= find_vma(mm
, addr
);
123 if (task_size
- len
>= addr
&&
124 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
127 if (mm
->get_unmapped_area
== arch_get_unmapped_area
)
128 return hugetlb_get_unmapped_area_bottomup(file
, addr
, len
,
131 return hugetlb_get_unmapped_area_topdown(file
, addr
, len
,
135 static pte_t
sun4u_hugepage_shift_to_tte(pte_t entry
, unsigned int shift
)
140 static pte_t
sun4v_hugepage_shift_to_tte(pte_t entry
, unsigned int shift
)
142 unsigned long hugepage_size
= _PAGE_SZ4MB_4V
;
144 pte_val(entry
) = pte_val(entry
) & ~_PAGE_SZALL_4V
;
147 case HPAGE_16GB_SHIFT
:
148 hugepage_size
= _PAGE_SZ16GB_4V
;
149 pte_val(entry
) |= _PAGE_PUD_HUGE
;
151 case HPAGE_2GB_SHIFT
:
152 hugepage_size
= _PAGE_SZ2GB_4V
;
153 pte_val(entry
) |= _PAGE_PMD_HUGE
;
155 case HPAGE_256MB_SHIFT
:
156 hugepage_size
= _PAGE_SZ256MB_4V
;
157 pte_val(entry
) |= _PAGE_PMD_HUGE
;
160 pte_val(entry
) |= _PAGE_PMD_HUGE
;
162 case HPAGE_64K_SHIFT
:
163 hugepage_size
= _PAGE_SZ64K_4V
;
166 WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift
);
169 pte_val(entry
) = pte_val(entry
) | hugepage_size
;
173 static pte_t
hugepage_shift_to_tte(pte_t entry
, unsigned int shift
)
175 if (tlb_type
== hypervisor
)
176 return sun4v_hugepage_shift_to_tte(entry
, shift
);
178 return sun4u_hugepage_shift_to_tte(entry
, shift
);
181 pte_t
arch_make_huge_pte(pte_t entry
, struct vm_area_struct
*vma
,
182 struct page
*page
, int writeable
)
184 unsigned int shift
= huge_page_shift(hstate_vma(vma
));
187 pte
= hugepage_shift_to_tte(entry
, shift
);
189 #ifdef CONFIG_SPARC64
190 /* If this vma has ADI enabled on it, turn on TTE.mcd
192 if (vma
->vm_flags
& VM_SPARC_ADI
)
193 return pte_mkmcd(pte
);
195 return pte_mknotmcd(pte
);
201 static unsigned int sun4v_huge_tte_to_shift(pte_t entry
)
203 unsigned long tte_szbits
= pte_val(entry
) & _PAGE_SZALL_4V
;
206 switch (tte_szbits
) {
207 case _PAGE_SZ16GB_4V
:
208 shift
= HPAGE_16GB_SHIFT
;
211 shift
= HPAGE_2GB_SHIFT
;
213 case _PAGE_SZ256MB_4V
:
214 shift
= HPAGE_256MB_SHIFT
;
217 shift
= REAL_HPAGE_SHIFT
;
220 shift
= HPAGE_64K_SHIFT
;
229 static unsigned int sun4u_huge_tte_to_shift(pte_t entry
)
231 unsigned long tte_szbits
= pte_val(entry
) & _PAGE_SZALL_4U
;
234 switch (tte_szbits
) {
235 case _PAGE_SZ256MB_4U
:
236 shift
= HPAGE_256MB_SHIFT
;
239 shift
= REAL_HPAGE_SHIFT
;
242 shift
= HPAGE_64K_SHIFT
;
251 static unsigned int huge_tte_to_shift(pte_t entry
)
255 if (tlb_type
== hypervisor
)
256 shift
= sun4v_huge_tte_to_shift(entry
);
258 shift
= sun4u_huge_tte_to_shift(entry
);
260 if (shift
== PAGE_SHIFT
)
261 WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
267 static unsigned long huge_tte_to_size(pte_t pte
)
269 unsigned long size
= 1UL << huge_tte_to_shift(pte
);
271 if (size
== REAL_HPAGE_SIZE
)
276 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
277 unsigned long addr
, unsigned long sz
)
283 pgd
= pgd_offset(mm
, addr
);
284 pud
= pud_alloc(mm
, pgd
, addr
);
289 pmd
= pmd_alloc(mm
, pud
, addr
);
294 return pte_alloc_map(mm
, pmd
, addr
);
297 pte_t
*huge_pte_offset(struct mm_struct
*mm
,
298 unsigned long addr
, unsigned long sz
)
304 pgd
= pgd_offset(mm
, addr
);
307 pud
= pud_offset(pgd
, addr
);
310 if (is_hugetlb_pud(*pud
))
312 pmd
= pmd_offset(pud
, addr
);
315 if (is_hugetlb_pmd(*pmd
))
317 return pte_offset_map(pmd
, addr
);
320 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
321 pte_t
*ptep
, pte_t entry
)
323 unsigned int nptes
, orig_shift
, shift
;
324 unsigned long i
, size
;
327 size
= huge_tte_to_size(entry
);
330 if (size
>= PUD_SIZE
)
332 else if (size
>= PMD_SIZE
)
337 nptes
= size
>> shift
;
339 if (!pte_present(*ptep
) && pte_present(entry
))
340 mm
->context
.hugetlb_pte_count
+= nptes
;
344 orig_shift
= pte_none(orig
) ? PAGE_SHIFT
: huge_tte_to_shift(orig
);
346 for (i
= 0; i
< nptes
; i
++)
347 ptep
[i
] = __pte(pte_val(entry
) + (i
<< shift
));
349 maybe_tlb_batch_add(mm
, addr
, ptep
, orig
, 0, orig_shift
);
350 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
351 if (size
== HPAGE_SIZE
)
352 maybe_tlb_batch_add(mm
, addr
+ REAL_HPAGE_SIZE
, ptep
, orig
, 0,
356 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
359 unsigned int i
, nptes
, orig_shift
, shift
;
364 size
= huge_tte_to_size(entry
);
367 if (size
>= PUD_SIZE
)
369 else if (size
>= PMD_SIZE
)
374 nptes
= size
>> shift
;
375 orig_shift
= pte_none(entry
) ? PAGE_SHIFT
: huge_tte_to_shift(entry
);
377 if (pte_present(entry
))
378 mm
->context
.hugetlb_pte_count
-= nptes
;
381 for (i
= 0; i
< nptes
; i
++)
382 ptep
[i
] = __pte(0UL);
384 maybe_tlb_batch_add(mm
, addr
, ptep
, entry
, 0, orig_shift
);
385 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
386 if (size
== HPAGE_SIZE
)
387 maybe_tlb_batch_add(mm
, addr
+ REAL_HPAGE_SIZE
, ptep
, entry
, 0,
393 int pmd_huge(pmd_t pmd
)
395 return !pmd_none(pmd
) &&
396 (pmd_val(pmd
) & (_PAGE_VALID
|_PAGE_PMD_HUGE
)) != _PAGE_VALID
;
399 int pud_huge(pud_t pud
)
401 return !pud_none(pud
) &&
402 (pud_val(pud
) & (_PAGE_VALID
|_PAGE_PUD_HUGE
)) != _PAGE_VALID
;
405 static void hugetlb_free_pte_range(struct mmu_gather
*tlb
, pmd_t
*pmd
,
408 pgtable_t token
= pmd_pgtable(*pmd
);
411 pte_free_tlb(tlb
, token
, addr
);
412 mm_dec_nr_ptes(tlb
->mm
);
415 static void hugetlb_free_pmd_range(struct mmu_gather
*tlb
, pud_t
*pud
,
416 unsigned long addr
, unsigned long end
,
417 unsigned long floor
, unsigned long ceiling
)
424 pmd
= pmd_offset(pud
, addr
);
426 next
= pmd_addr_end(addr
, end
);
429 if (is_hugetlb_pmd(*pmd
))
432 hugetlb_free_pte_range(tlb
, pmd
, addr
);
433 } while (pmd
++, addr
= next
, addr
!= end
);
443 if (end
- 1 > ceiling
- 1)
446 pmd
= pmd_offset(pud
, start
);
448 pmd_free_tlb(tlb
, pmd
, start
);
449 mm_dec_nr_pmds(tlb
->mm
);
452 static void hugetlb_free_pud_range(struct mmu_gather
*tlb
, pgd_t
*pgd
,
453 unsigned long addr
, unsigned long end
,
454 unsigned long floor
, unsigned long ceiling
)
461 pud
= pud_offset(pgd
, addr
);
463 next
= pud_addr_end(addr
, end
);
464 if (pud_none_or_clear_bad(pud
))
466 if (is_hugetlb_pud(*pud
))
469 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
,
471 } while (pud
++, addr
= next
, addr
!= end
);
477 ceiling
&= PGDIR_MASK
;
481 if (end
- 1 > ceiling
- 1)
484 pud
= pud_offset(pgd
, start
);
486 pud_free_tlb(tlb
, pud
, start
);
487 mm_dec_nr_puds(tlb
->mm
);
490 void hugetlb_free_pgd_range(struct mmu_gather
*tlb
,
491 unsigned long addr
, unsigned long end
,
492 unsigned long floor
, unsigned long ceiling
)
508 if (end
- 1 > ceiling
- 1)
513 pgd
= pgd_offset(tlb
->mm
, addr
);
515 next
= pgd_addr_end(addr
, end
);
516 if (pgd_none_or_clear_bad(pgd
))
518 hugetlb_free_pud_range(tlb
, pgd
, addr
, next
, floor
, ceiling
);
519 } while (pgd
++, addr
= next
, addr
!= end
);