treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / sparc / mm / hugetlbpage.c
blob7b9fa861b67c270adcea581c8a52144af205330b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SPARC64 Huge TLB page support.
5 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
6 */
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/sched/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/sysctl.h>
15 #include <asm/mman.h>
16 #include <asm/pgalloc.h>
17 #include <asm/pgtable.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20 #include <asm/cacheflush.h>
21 #include <asm/mmu_context.h>
23 /* Slightly simplified from the non-hugepage variant because by
24 * definition we don't have to worry about any page coloring stuff
27 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
28 unsigned long addr,
29 unsigned long len,
30 unsigned long pgoff,
31 unsigned long flags)
33 struct hstate *h = hstate_file(filp);
34 unsigned long task_size = TASK_SIZE;
35 struct vm_unmapped_area_info info;
37 if (test_thread_flag(TIF_32BIT))
38 task_size = STACK_TOP32;
40 info.flags = 0;
41 info.length = len;
42 info.low_limit = TASK_UNMAPPED_BASE;
43 info.high_limit = min(task_size, VA_EXCLUDE_START);
44 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
45 info.align_offset = 0;
46 addr = vm_unmapped_area(&info);
48 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
49 VM_BUG_ON(addr != -ENOMEM);
50 info.low_limit = VA_EXCLUDE_END;
51 info.high_limit = task_size;
52 addr = vm_unmapped_area(&info);
55 return addr;
58 static unsigned long
59 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
60 const unsigned long len,
61 const unsigned long pgoff,
62 const unsigned long flags)
64 struct hstate *h = hstate_file(filp);
65 struct mm_struct *mm = current->mm;
66 unsigned long addr = addr0;
67 struct vm_unmapped_area_info info;
69 /* This should only ever run for 32-bit processes. */
70 BUG_ON(!test_thread_flag(TIF_32BIT));
72 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
73 info.length = len;
74 info.low_limit = PAGE_SIZE;
75 info.high_limit = mm->mmap_base;
76 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
77 info.align_offset = 0;
78 addr = vm_unmapped_area(&info);
81 * A failed mmap() very likely causes application failure,
82 * so fall back to the bottom-up function here. This scenario
83 * can happen with large stack limits and large mmap()
84 * allocations.
86 if (addr & ~PAGE_MASK) {
87 VM_BUG_ON(addr != -ENOMEM);
88 info.flags = 0;
89 info.low_limit = TASK_UNMAPPED_BASE;
90 info.high_limit = STACK_TOP32;
91 addr = vm_unmapped_area(&info);
94 return addr;
97 unsigned long
98 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
99 unsigned long len, unsigned long pgoff, unsigned long flags)
101 struct hstate *h = hstate_file(file);
102 struct mm_struct *mm = current->mm;
103 struct vm_area_struct *vma;
104 unsigned long task_size = TASK_SIZE;
106 if (test_thread_flag(TIF_32BIT))
107 task_size = STACK_TOP32;
109 if (len & ~huge_page_mask(h))
110 return -EINVAL;
111 if (len > task_size)
112 return -ENOMEM;
114 if (flags & MAP_FIXED) {
115 if (prepare_hugepage_range(file, addr, len))
116 return -EINVAL;
117 return addr;
120 if (addr) {
121 addr = ALIGN(addr, huge_page_size(h));
122 vma = find_vma(mm, addr);
123 if (task_size - len >= addr &&
124 (!vma || addr + len <= vm_start_gap(vma)))
125 return addr;
127 if (mm->get_unmapped_area == arch_get_unmapped_area)
128 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
129 pgoff, flags);
130 else
131 return hugetlb_get_unmapped_area_topdown(file, addr, len,
132 pgoff, flags);
135 static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
137 return entry;
140 static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
142 unsigned long hugepage_size = _PAGE_SZ4MB_4V;
144 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
146 switch (shift) {
147 case HPAGE_16GB_SHIFT:
148 hugepage_size = _PAGE_SZ16GB_4V;
149 pte_val(entry) |= _PAGE_PUD_HUGE;
150 break;
151 case HPAGE_2GB_SHIFT:
152 hugepage_size = _PAGE_SZ2GB_4V;
153 pte_val(entry) |= _PAGE_PMD_HUGE;
154 break;
155 case HPAGE_256MB_SHIFT:
156 hugepage_size = _PAGE_SZ256MB_4V;
157 pte_val(entry) |= _PAGE_PMD_HUGE;
158 break;
159 case HPAGE_SHIFT:
160 pte_val(entry) |= _PAGE_PMD_HUGE;
161 break;
162 case HPAGE_64K_SHIFT:
163 hugepage_size = _PAGE_SZ64K_4V;
164 break;
165 default:
166 WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
169 pte_val(entry) = pte_val(entry) | hugepage_size;
170 return entry;
173 static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
175 if (tlb_type == hypervisor)
176 return sun4v_hugepage_shift_to_tte(entry, shift);
177 else
178 return sun4u_hugepage_shift_to_tte(entry, shift);
181 pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
182 struct page *page, int writeable)
184 unsigned int shift = huge_page_shift(hstate_vma(vma));
185 pte_t pte;
187 pte = hugepage_shift_to_tte(entry, shift);
189 #ifdef CONFIG_SPARC64
190 /* If this vma has ADI enabled on it, turn on TTE.mcd
192 if (vma->vm_flags & VM_SPARC_ADI)
193 return pte_mkmcd(pte);
194 else
195 return pte_mknotmcd(pte);
196 #else
197 return pte;
198 #endif
201 static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
203 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
204 unsigned int shift;
206 switch (tte_szbits) {
207 case _PAGE_SZ16GB_4V:
208 shift = HPAGE_16GB_SHIFT;
209 break;
210 case _PAGE_SZ2GB_4V:
211 shift = HPAGE_2GB_SHIFT;
212 break;
213 case _PAGE_SZ256MB_4V:
214 shift = HPAGE_256MB_SHIFT;
215 break;
216 case _PAGE_SZ4MB_4V:
217 shift = REAL_HPAGE_SHIFT;
218 break;
219 case _PAGE_SZ64K_4V:
220 shift = HPAGE_64K_SHIFT;
221 break;
222 default:
223 shift = PAGE_SHIFT;
224 break;
226 return shift;
229 static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
231 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
232 unsigned int shift;
234 switch (tte_szbits) {
235 case _PAGE_SZ256MB_4U:
236 shift = HPAGE_256MB_SHIFT;
237 break;
238 case _PAGE_SZ4MB_4U:
239 shift = REAL_HPAGE_SHIFT;
240 break;
241 case _PAGE_SZ64K_4U:
242 shift = HPAGE_64K_SHIFT;
243 break;
244 default:
245 shift = PAGE_SHIFT;
246 break;
248 return shift;
251 static unsigned int huge_tte_to_shift(pte_t entry)
253 unsigned long shift;
255 if (tlb_type == hypervisor)
256 shift = sun4v_huge_tte_to_shift(entry);
257 else
258 shift = sun4u_huge_tte_to_shift(entry);
260 if (shift == PAGE_SHIFT)
261 WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
262 pte_val(entry));
264 return shift;
267 static unsigned long huge_tte_to_size(pte_t pte)
269 unsigned long size = 1UL << huge_tte_to_shift(pte);
271 if (size == REAL_HPAGE_SIZE)
272 size = HPAGE_SIZE;
273 return size;
276 pte_t *huge_pte_alloc(struct mm_struct *mm,
277 unsigned long addr, unsigned long sz)
279 pgd_t *pgd;
280 p4d_t *p4d;
281 pud_t *pud;
282 pmd_t *pmd;
284 pgd = pgd_offset(mm, addr);
285 p4d = p4d_offset(pgd, addr);
286 pud = pud_alloc(mm, p4d, addr);
287 if (!pud)
288 return NULL;
289 if (sz >= PUD_SIZE)
290 return (pte_t *)pud;
291 pmd = pmd_alloc(mm, pud, addr);
292 if (!pmd)
293 return NULL;
294 if (sz >= PMD_SIZE)
295 return (pte_t *)pmd;
296 return pte_alloc_map(mm, pmd, addr);
299 pte_t *huge_pte_offset(struct mm_struct *mm,
300 unsigned long addr, unsigned long sz)
302 pgd_t *pgd;
303 p4d_t *p4d;
304 pud_t *pud;
305 pmd_t *pmd;
307 pgd = pgd_offset(mm, addr);
308 if (pgd_none(*pgd))
309 return NULL;
310 p4d = p4d_offset(pgd, addr);
311 if (p4d_none(*p4d))
312 return NULL;
313 pud = pud_offset(p4d, addr);
314 if (pud_none(*pud))
315 return NULL;
316 if (is_hugetlb_pud(*pud))
317 return (pte_t *)pud;
318 pmd = pmd_offset(pud, addr);
319 if (pmd_none(*pmd))
320 return NULL;
321 if (is_hugetlb_pmd(*pmd))
322 return (pte_t *)pmd;
323 return pte_offset_map(pmd, addr);
326 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
327 pte_t *ptep, pte_t entry)
329 unsigned int nptes, orig_shift, shift;
330 unsigned long i, size;
331 pte_t orig;
333 size = huge_tte_to_size(entry);
335 shift = PAGE_SHIFT;
336 if (size >= PUD_SIZE)
337 shift = PUD_SHIFT;
338 else if (size >= PMD_SIZE)
339 shift = PMD_SHIFT;
340 else
341 shift = PAGE_SHIFT;
343 nptes = size >> shift;
345 if (!pte_present(*ptep) && pte_present(entry))
346 mm->context.hugetlb_pte_count += nptes;
348 addr &= ~(size - 1);
349 orig = *ptep;
350 orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
352 for (i = 0; i < nptes; i++)
353 ptep[i] = __pte(pte_val(entry) + (i << shift));
355 maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
356 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
357 if (size == HPAGE_SIZE)
358 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
359 orig_shift);
362 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
363 pte_t *ptep)
365 unsigned int i, nptes, orig_shift, shift;
366 unsigned long size;
367 pte_t entry;
369 entry = *ptep;
370 size = huge_tte_to_size(entry);
372 shift = PAGE_SHIFT;
373 if (size >= PUD_SIZE)
374 shift = PUD_SHIFT;
375 else if (size >= PMD_SIZE)
376 shift = PMD_SHIFT;
377 else
378 shift = PAGE_SHIFT;
380 nptes = size >> shift;
381 orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
383 if (pte_present(entry))
384 mm->context.hugetlb_pte_count -= nptes;
386 addr &= ~(size - 1);
387 for (i = 0; i < nptes; i++)
388 ptep[i] = __pte(0UL);
390 maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
391 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
392 if (size == HPAGE_SIZE)
393 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
394 orig_shift);
396 return entry;
399 int pmd_huge(pmd_t pmd)
401 return !pmd_none(pmd) &&
402 (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
405 int pud_huge(pud_t pud)
407 return !pud_none(pud) &&
408 (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
411 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
412 unsigned long addr)
414 pgtable_t token = pmd_pgtable(*pmd);
416 pmd_clear(pmd);
417 pte_free_tlb(tlb, token, addr);
418 mm_dec_nr_ptes(tlb->mm);
421 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
422 unsigned long addr, unsigned long end,
423 unsigned long floor, unsigned long ceiling)
425 pmd_t *pmd;
426 unsigned long next;
427 unsigned long start;
429 start = addr;
430 pmd = pmd_offset(pud, addr);
431 do {
432 next = pmd_addr_end(addr, end);
433 if (pmd_none(*pmd))
434 continue;
435 if (is_hugetlb_pmd(*pmd))
436 pmd_clear(pmd);
437 else
438 hugetlb_free_pte_range(tlb, pmd, addr);
439 } while (pmd++, addr = next, addr != end);
441 start &= PUD_MASK;
442 if (start < floor)
443 return;
444 if (ceiling) {
445 ceiling &= PUD_MASK;
446 if (!ceiling)
447 return;
449 if (end - 1 > ceiling - 1)
450 return;
452 pmd = pmd_offset(pud, start);
453 pud_clear(pud);
454 pmd_free_tlb(tlb, pmd, start);
455 mm_dec_nr_pmds(tlb->mm);
458 static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
459 unsigned long addr, unsigned long end,
460 unsigned long floor, unsigned long ceiling)
462 pud_t *pud;
463 unsigned long next;
464 unsigned long start;
466 start = addr;
467 pud = pud_offset(p4d, addr);
468 do {
469 next = pud_addr_end(addr, end);
470 if (pud_none_or_clear_bad(pud))
471 continue;
472 if (is_hugetlb_pud(*pud))
473 pud_clear(pud);
474 else
475 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
476 ceiling);
477 } while (pud++, addr = next, addr != end);
479 start &= PGDIR_MASK;
480 if (start < floor)
481 return;
482 if (ceiling) {
483 ceiling &= PGDIR_MASK;
484 if (!ceiling)
485 return;
487 if (end - 1 > ceiling - 1)
488 return;
490 pud = pud_offset(p4d, start);
491 p4d_clear(p4d);
492 pud_free_tlb(tlb, pud, start);
493 mm_dec_nr_puds(tlb->mm);
496 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
497 unsigned long addr, unsigned long end,
498 unsigned long floor, unsigned long ceiling)
500 pgd_t *pgd;
501 p4d_t *p4d;
502 unsigned long next;
504 addr &= PMD_MASK;
505 if (addr < floor) {
506 addr += PMD_SIZE;
507 if (!addr)
508 return;
510 if (ceiling) {
511 ceiling &= PMD_MASK;
512 if (!ceiling)
513 return;
515 if (end - 1 > ceiling - 1)
516 end -= PMD_SIZE;
517 if (addr > end - 1)
518 return;
520 pgd = pgd_offset(tlb->mm, addr);
521 p4d = p4d_offset(pgd, addr);
522 do {
523 next = p4d_addr_end(addr, end);
524 if (p4d_none_or_clear_bad(p4d))
525 continue;
526 hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
527 } while (p4d++, addr = next, addr != end);