2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/smp_lock.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/sysctl.h>
20 #include <asm/pgalloc.h>
22 #include <asm/tlbflush.h>
23 #include <asm/mmu_context.h>
24 #include <asm/machdep.h>
25 #include <asm/cputable.h>
28 #include <linux/sysctl.h>
30 #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31 #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
33 /* Modelled after find_linux_pte() */
34 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
41 BUG_ON(! in_hugepage_area(mm
->context
, addr
));
45 pg
= pgd_offset(mm
, addr
);
47 pu
= pud_offset(pg
, addr
);
49 pm
= pmd_offset(pu
, addr
);
52 && !(pte_present(*pt
) && pte_huge(*pt
)));
60 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, unsigned long addr
)
67 BUG_ON(! in_hugepage_area(mm
->context
, addr
));
71 pg
= pgd_offset(mm
, addr
);
72 pu
= pud_alloc(mm
, pg
, addr
);
75 pm
= pmd_alloc(mm
, pu
, addr
);
79 && !(pte_present(*pt
) && pte_huge(*pt
)));
87 #define HUGEPTE_BATCH_SIZE (HPAGE_SIZE / PMD_SIZE)
89 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
90 pte_t
*ptep
, pte_t pte
)
94 if (pte_present(*ptep
)) {
95 pte_clear(mm
, addr
, ptep
);
99 for (i
= 0; i
< HUGEPTE_BATCH_SIZE
; i
++) {
100 *ptep
= __pte(pte_val(pte
) & ~_PAGE_HPTEFLAGS
);
105 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
108 unsigned long old
= pte_update(ptep
, ~0UL);
111 if (old
& _PAGE_HASHPTE
)
112 hpte_update(mm
, addr
, old
, 0);
114 for (i
= 1; i
< HUGEPTE_BATCH_SIZE
; i
++)
121 * This function checks for proper alignment of input addr and len parameters.
123 int is_aligned_hugepage_range(unsigned long addr
, unsigned long len
)
125 if (len
& ~HPAGE_MASK
)
127 if (addr
& ~HPAGE_MASK
)
129 if (! (within_hugepage_low_range(addr
, len
)
130 || within_hugepage_high_range(addr
, len
)) )
135 static void flush_low_segments(void *parm
)
137 u16 areas
= (unsigned long) parm
;
140 asm volatile("isync" : : : "memory");
142 BUILD_BUG_ON((sizeof(areas
)*8) != NUM_LOW_AREAS
);
144 for (i
= 0; i
< NUM_LOW_AREAS
; i
++) {
145 if (! (areas
& (1U << i
)))
147 asm volatile("slbie %0" : : "r" (i
<< SID_SHIFT
));
150 asm volatile("isync" : : : "memory");
153 static void flush_high_segments(void *parm
)
155 u16 areas
= (unsigned long) parm
;
158 asm volatile("isync" : : : "memory");
160 BUILD_BUG_ON((sizeof(areas
)*8) != NUM_HIGH_AREAS
);
162 for (i
= 0; i
< NUM_HIGH_AREAS
; i
++) {
163 if (! (areas
& (1U << i
)))
165 for (j
= 0; j
< (1UL << (HTLB_AREA_SHIFT
-SID_SHIFT
)); j
++)
166 asm volatile("slbie %0"
167 :: "r" ((i
<< HTLB_AREA_SHIFT
) + (j
<< SID_SHIFT
)));
170 asm volatile("isync" : : : "memory");
173 static int prepare_low_area_for_htlb(struct mm_struct
*mm
, unsigned long area
)
175 unsigned long start
= area
<< SID_SHIFT
;
176 unsigned long end
= (area
+1) << SID_SHIFT
;
177 struct vm_area_struct
*vma
;
179 BUG_ON(area
>= NUM_LOW_AREAS
);
181 /* Check no VMAs are in the region */
182 vma
= find_vma(mm
, start
);
183 if (vma
&& (vma
->vm_start
< end
))
189 static int prepare_high_area_for_htlb(struct mm_struct
*mm
, unsigned long area
)
191 unsigned long start
= area
<< HTLB_AREA_SHIFT
;
192 unsigned long end
= (area
+1) << HTLB_AREA_SHIFT
;
193 struct vm_area_struct
*vma
;
195 BUG_ON(area
>= NUM_HIGH_AREAS
);
197 /* Check no VMAs are in the region */
198 vma
= find_vma(mm
, start
);
199 if (vma
&& (vma
->vm_start
< end
))
205 static int open_low_hpage_areas(struct mm_struct
*mm
, u16 newareas
)
209 BUILD_BUG_ON((sizeof(newareas
)*8) != NUM_LOW_AREAS
);
210 BUILD_BUG_ON((sizeof(mm
->context
.low_htlb_areas
)*8) != NUM_LOW_AREAS
);
212 newareas
&= ~(mm
->context
.low_htlb_areas
);
214 return 0; /* The segments we want are already open */
216 for (i
= 0; i
< NUM_LOW_AREAS
; i
++)
217 if ((1 << i
) & newareas
)
218 if (prepare_low_area_for_htlb(mm
, i
) != 0)
221 mm
->context
.low_htlb_areas
|= newareas
;
223 /* update the paca copy of the context struct */
224 get_paca()->context
= mm
->context
;
226 /* the context change must make it to memory before the flush,
227 * so that further SLB misses do the right thing. */
229 on_each_cpu(flush_low_segments
, (void *)(unsigned long)newareas
, 0, 1);
234 static int open_high_hpage_areas(struct mm_struct
*mm
, u16 newareas
)
238 BUILD_BUG_ON((sizeof(newareas
)*8) != NUM_HIGH_AREAS
);
239 BUILD_BUG_ON((sizeof(mm
->context
.high_htlb_areas
)*8)
242 newareas
&= ~(mm
->context
.high_htlb_areas
);
244 return 0; /* The areas we want are already open */
246 for (i
= 0; i
< NUM_HIGH_AREAS
; i
++)
247 if ((1 << i
) & newareas
)
248 if (prepare_high_area_for_htlb(mm
, i
) != 0)
251 mm
->context
.high_htlb_areas
|= newareas
;
253 /* update the paca copy of the context struct */
254 get_paca()->context
= mm
->context
;
256 /* the context change must make it to memory before the flush,
257 * so that further SLB misses do the right thing. */
259 on_each_cpu(flush_high_segments
, (void *)(unsigned long)newareas
, 0, 1);
264 int prepare_hugepage_range(unsigned long addr
, unsigned long len
)
268 if ( (addr
+len
) < addr
)
271 if ((addr
+ len
) < 0x100000000UL
)
272 err
= open_low_hpage_areas(current
->mm
,
273 LOW_ESID_MASK(addr
, len
));
275 err
= open_high_hpage_areas(current
->mm
,
276 HTLB_AREA_MASK(addr
, len
));
278 printk(KERN_DEBUG
"prepare_hugepage_range(%lx, %lx)"
279 " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
281 LOW_ESID_MASK(addr
, len
), HTLB_AREA_MASK(addr
, len
));
289 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
, int write
)
294 if (! in_hugepage_area(mm
->context
, address
))
295 return ERR_PTR(-EINVAL
);
297 ptep
= huge_pte_offset(mm
, address
);
298 page
= pte_page(*ptep
);
300 page
+= (address
% HPAGE_SIZE
) / PAGE_SIZE
;
305 int pmd_huge(pmd_t pmd
)
311 follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
312 pmd_t
*pmd
, int write
)
318 /* Because we have an exclusive hugepage region which lies within the
319 * normal user address space, we have to take special measures to make
320 * non-huge mmap()s evade the hugepage reserved regions. */
321 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
322 unsigned long len
, unsigned long pgoff
,
325 struct mm_struct
*mm
= current
->mm
;
326 struct vm_area_struct
*vma
;
327 unsigned long start_addr
;
333 addr
= PAGE_ALIGN(addr
);
334 vma
= find_vma(mm
, addr
);
335 if (((TASK_SIZE
- len
) >= addr
)
336 && (!vma
|| (addr
+len
) <= vma
->vm_start
)
337 && !is_hugepage_only_range(mm
, addr
,len
))
340 if (len
> mm
->cached_hole_size
) {
341 start_addr
= addr
= mm
->free_area_cache
;
343 start_addr
= addr
= TASK_UNMAPPED_BASE
;
344 mm
->cached_hole_size
= 0;
348 vma
= find_vma(mm
, addr
);
349 while (TASK_SIZE
- len
>= addr
) {
350 BUG_ON(vma
&& (addr
>= vma
->vm_end
));
352 if (touches_hugepage_low_range(mm
, addr
, len
)) {
353 addr
= ALIGN(addr
+1, 1<<SID_SHIFT
);
354 vma
= find_vma(mm
, addr
);
357 if (touches_hugepage_high_range(mm
, addr
, len
)) {
358 addr
= ALIGN(addr
+1, 1UL<<HTLB_AREA_SHIFT
);
359 vma
= find_vma(mm
, addr
);
362 if (!vma
|| addr
+ len
<= vma
->vm_start
) {
364 * Remember the place where we stopped the search:
366 mm
->free_area_cache
= addr
+ len
;
369 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
370 mm
->cached_hole_size
= vma
->vm_start
- addr
;
375 /* Make sure we didn't miss any holes */
376 if (start_addr
!= TASK_UNMAPPED_BASE
) {
377 start_addr
= addr
= TASK_UNMAPPED_BASE
;
378 mm
->cached_hole_size
= 0;
385 * This mmap-allocator allocates new areas top-down from below the
386 * stack's low limit (the base):
388 * Because we have an exclusive hugepage region which lies within the
389 * normal user address space, we have to take special measures to make
390 * non-huge mmap()s evade the hugepage reserved regions.
393 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
394 const unsigned long len
, const unsigned long pgoff
,
395 const unsigned long flags
)
397 struct vm_area_struct
*vma
, *prev_vma
;
398 struct mm_struct
*mm
= current
->mm
;
399 unsigned long base
= mm
->mmap_base
, addr
= addr0
;
400 unsigned long largest_hole
= mm
->cached_hole_size
;
403 /* requested length too big for entire address space */
407 /* dont allow allocations above current base */
408 if (mm
->free_area_cache
> base
)
409 mm
->free_area_cache
= base
;
411 /* requesting a specific address */
413 addr
= PAGE_ALIGN(addr
);
414 vma
= find_vma(mm
, addr
);
415 if (TASK_SIZE
- len
>= addr
&&
416 (!vma
|| addr
+ len
<= vma
->vm_start
)
417 && !is_hugepage_only_range(mm
, addr
,len
))
421 if (len
<= largest_hole
) {
423 mm
->free_area_cache
= base
;
426 /* make sure it can fit in the remaining address space */
427 if (mm
->free_area_cache
< len
)
430 /* either no address requested or cant fit in requested address hole */
431 addr
= (mm
->free_area_cache
- len
) & PAGE_MASK
;
434 if (touches_hugepage_low_range(mm
, addr
, len
)) {
435 addr
= (addr
& ((~0) << SID_SHIFT
)) - len
;
436 goto hugepage_recheck
;
437 } else if (touches_hugepage_high_range(mm
, addr
, len
)) {
438 addr
= (addr
& ((~0UL) << HTLB_AREA_SHIFT
)) - len
;
439 goto hugepage_recheck
;
443 * Lookup failure means no vma is above this address,
444 * i.e. return with success:
446 if (!(vma
= find_vma_prev(mm
, addr
, &prev_vma
)))
450 * new region fits between prev_vma->vm_end and
451 * vma->vm_start, use it:
453 if (addr
+len
<= vma
->vm_start
&&
454 (!prev_vma
|| (addr
>= prev_vma
->vm_end
))) {
455 /* remember the address as a hint for next time */
456 mm
->cached_hole_size
= largest_hole
;
457 return (mm
->free_area_cache
= addr
);
459 /* pull free_area_cache down to the first hole */
460 if (mm
->free_area_cache
== vma
->vm_end
) {
461 mm
->free_area_cache
= vma
->vm_start
;
462 mm
->cached_hole_size
= largest_hole
;
466 /* remember the largest hole we saw so far */
467 if (addr
+ largest_hole
< vma
->vm_start
)
468 largest_hole
= vma
->vm_start
- addr
;
470 /* try just below the current vma->vm_start */
471 addr
= vma
->vm_start
-len
;
472 } while (len
<= vma
->vm_start
);
476 * if hint left us with no space for the requested
477 * mapping then try again:
480 mm
->free_area_cache
= base
;
486 * A failed mmap() very likely causes application failure,
487 * so fall back to the bottom-up function here. This scenario
488 * can happen with large stack limits and large mmap()
491 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
492 mm
->cached_hole_size
= ~0UL;
493 addr
= arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
);
495 * Restore the topdown base:
497 mm
->free_area_cache
= base
;
498 mm
->cached_hole_size
= ~0UL;
503 static unsigned long htlb_get_low_area(unsigned long len
, u16 segmask
)
505 unsigned long addr
= 0;
506 struct vm_area_struct
*vma
;
508 vma
= find_vma(current
->mm
, addr
);
509 while (addr
+ len
<= 0x100000000UL
) {
510 BUG_ON(vma
&& (addr
>= vma
->vm_end
)); /* invariant */
512 if (! __within_hugepage_low_range(addr
, len
, segmask
)) {
513 addr
= ALIGN(addr
+1, 1<<SID_SHIFT
);
514 vma
= find_vma(current
->mm
, addr
);
518 if (!vma
|| (addr
+ len
) <= vma
->vm_start
)
520 addr
= ALIGN(vma
->vm_end
, HPAGE_SIZE
);
521 /* Depending on segmask this might not be a confirmed
522 * hugepage region, so the ALIGN could have skipped
524 vma
= find_vma(current
->mm
, addr
);
530 static unsigned long htlb_get_high_area(unsigned long len
, u16 areamask
)
532 unsigned long addr
= 0x100000000UL
;
533 struct vm_area_struct
*vma
;
535 vma
= find_vma(current
->mm
, addr
);
536 while (addr
+ len
<= TASK_SIZE_USER64
) {
537 BUG_ON(vma
&& (addr
>= vma
->vm_end
)); /* invariant */
539 if (! __within_hugepage_high_range(addr
, len
, areamask
)) {
540 addr
= ALIGN(addr
+1, 1UL<<HTLB_AREA_SHIFT
);
541 vma
= find_vma(current
->mm
, addr
);
545 if (!vma
|| (addr
+ len
) <= vma
->vm_start
)
547 addr
= ALIGN(vma
->vm_end
, HPAGE_SIZE
);
548 /* Depending on segmask this might not be a confirmed
549 * hugepage region, so the ALIGN could have skipped
551 vma
= find_vma(current
->mm
, addr
);
557 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
558 unsigned long len
, unsigned long pgoff
,
562 u16 areamask
, curareas
;
564 if (len
& ~HPAGE_MASK
)
567 if (!cpu_has_feature(CPU_FTR_16M_PAGE
))
570 if (test_thread_flag(TIF_32BIT
)) {
571 curareas
= current
->mm
->context
.low_htlb_areas
;
573 /* First see if we can do the mapping in the existing
575 addr
= htlb_get_low_area(len
, curareas
);
580 for (areamask
= LOW_ESID_MASK(0x100000000UL
-len
, len
);
581 ! lastshift
; areamask
>>=1) {
585 addr
= htlb_get_low_area(len
, curareas
| areamask
);
586 if ((addr
!= -ENOMEM
)
587 && open_low_hpage_areas(current
->mm
, areamask
) == 0)
591 curareas
= current
->mm
->context
.high_htlb_areas
;
593 /* First see if we can do the mapping in the existing
595 addr
= htlb_get_high_area(len
, curareas
);
600 for (areamask
= HTLB_AREA_MASK(TASK_SIZE_USER64
-len
, len
);
601 ! lastshift
; areamask
>>=1) {
605 addr
= htlb_get_high_area(len
, curareas
| areamask
);
606 if ((addr
!= -ENOMEM
)
607 && open_high_hpage_areas(current
->mm
, areamask
) == 0)
611 printk(KERN_DEBUG
"hugetlb_get_unmapped_area() unable to open"
616 int hash_huge_page(struct mm_struct
*mm
, unsigned long access
,
617 unsigned long ea
, unsigned long vsid
, int local
)
620 unsigned long va
, vpn
;
621 pte_t old_pte
, new_pte
;
622 unsigned long rflags
, prpn
;
626 spin_lock(&mm
->page_table_lock
);
628 ptep
= huge_pte_offset(mm
, ea
);
630 /* Search the Linux page table for a match with va */
631 va
= (vsid
<< 28) | (ea
& 0x0fffffff);
632 vpn
= va
>> HPAGE_SHIFT
;
635 * If no pte found or not present, send the problem up to
638 if (unlikely(!ptep
|| pte_none(*ptep
)))
641 /* BUG_ON(pte_bad(*ptep)); */
644 * Check the user's access rights to the page. If access should be
645 * prevented then send the problem up to do_page_fault.
647 if (unlikely(access
& ~pte_val(*ptep
)))
650 * At this point, we have a pte (old_pte) which can be used to build
651 * or update an HPTE. There are 2 cases:
653 * 1. There is a valid (present) pte with no associated HPTE (this is
654 * the most common case)
655 * 2. There is a valid (present) pte with an associated HPTE. The
656 * current values of the pp bits in the HPTE prevent access
657 * because we are doing software DIRTY bit management and the
658 * page is currently not DIRTY.
665 rflags
= 0x2 | (! (pte_val(new_pte
) & _PAGE_RW
));
666 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
667 rflags
|= ((pte_val(new_pte
) & _PAGE_EXEC
) ? 0 : HW_NO_EXEC
);
669 /* Check if pte already has an hpte (case 2) */
670 if (unlikely(pte_val(old_pte
) & _PAGE_HASHPTE
)) {
671 /* There MIGHT be an HPTE for this pte */
672 unsigned long hash
, slot
;
674 hash
= hpt_hash(vpn
, 1);
675 if (pte_val(old_pte
) & _PAGE_SECONDARY
)
677 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
678 slot
+= (pte_val(old_pte
) & _PAGE_GROUP_IX
) >> 12;
680 if (ppc_md
.hpte_updatepp(slot
, rflags
, va
, 1, local
) == -1)
681 pte_val(old_pte
) &= ~_PAGE_HPTEFLAGS
;
684 if (likely(!(pte_val(old_pte
) & _PAGE_HASHPTE
))) {
685 unsigned long hash
= hpt_hash(vpn
, 1);
686 unsigned long hpte_group
;
688 prpn
= pte_pfn(old_pte
);
691 hpte_group
= ((hash
& htab_hash_mask
) *
692 HPTES_PER_GROUP
) & ~0x7UL
;
694 /* Update the linux pte with the HPTE slot */
695 pte_val(new_pte
) &= ~_PAGE_HPTEFLAGS
;
696 pte_val(new_pte
) |= _PAGE_HASHPTE
;
698 /* Add in WIMG bits */
699 /* XXX We should store these in the pte */
700 rflags
|= _PAGE_COHERENT
;
702 slot
= ppc_md
.hpte_insert(hpte_group
, va
, prpn
,
703 HPTE_V_LARGE
, rflags
);
705 /* Primary is full, try the secondary */
706 if (unlikely(slot
== -1)) {
707 pte_val(new_pte
) |= _PAGE_SECONDARY
;
708 hpte_group
= ((~hash
& htab_hash_mask
) *
709 HPTES_PER_GROUP
) & ~0x7UL
;
710 slot
= ppc_md
.hpte_insert(hpte_group
, va
, prpn
,
711 HPTE_V_LARGE
, rflags
);
714 hpte_group
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
) & ~0x7UL
;
716 ppc_md
.hpte_remove(hpte_group
);
721 if (unlikely(slot
== -2))
722 panic("hash_huge_page: pte_insert failed\n");
724 pte_val(new_pte
) |= (slot
<<12) & _PAGE_GROUP_IX
;
727 * No need to use ldarx/stdcx here because all who
728 * might be updating the pte will hold the
737 spin_unlock(&mm
->page_table_lock
);