2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/sysctl.h>
19 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
22 #include <asm/mmu_context.h>
23 #include <asm/machdep.h>
24 #include <asm/cputable.h>
27 #define HPAGE_SHIFT_64K 16
28 #define HPAGE_SHIFT_16M 24
30 #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31 #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
33 unsigned int hugepte_shift
;
34 #define PTRS_PER_HUGEPTE (1 << hugepte_shift)
35 #define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << hugepte_shift)
37 #define HUGEPD_SHIFT (HPAGE_SHIFT + hugepte_shift)
38 #define HUGEPD_SIZE (1UL << HUGEPD_SHIFT)
39 #define HUGEPD_MASK (~(HUGEPD_SIZE-1))
41 #define huge_pgtable_cache (pgtable_cache[HUGEPTE_CACHE_NUM])
43 /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
44 * will choke on pointers to hugepte tables, which is handy for
45 * catching screwups early. */
48 typedef struct { unsigned long pd
; } hugepd_t
;
50 #define hugepd_none(hpd) ((hpd).pd == 0)
52 static inline pte_t
*hugepd_page(hugepd_t hpd
)
54 BUG_ON(!(hpd
.pd
& HUGEPD_OK
));
55 return (pte_t
*)(hpd
.pd
& ~HUGEPD_OK
);
58 static inline pte_t
*hugepte_offset(hugepd_t
*hpdp
, unsigned long addr
)
60 unsigned long idx
= ((addr
>> HPAGE_SHIFT
) & (PTRS_PER_HUGEPTE
-1));
61 pte_t
*dir
= hugepd_page(*hpdp
);
66 static int __hugepte_alloc(struct mm_struct
*mm
, hugepd_t
*hpdp
,
67 unsigned long address
)
69 pte_t
*new = kmem_cache_alloc(huge_pgtable_cache
,
70 GFP_KERNEL
|__GFP_REPEAT
);
75 spin_lock(&mm
->page_table_lock
);
76 if (!hugepd_none(*hpdp
))
77 kmem_cache_free(huge_pgtable_cache
, new);
79 hpdp
->pd
= (unsigned long)new | HUGEPD_OK
;
80 spin_unlock(&mm
->page_table_lock
);
84 /* Base page size affects how we walk hugetlb page tables */
85 #ifdef CONFIG_PPC_64K_PAGES
86 #define hpmd_offset(pud, addr) pmd_offset(pud, addr)
87 #define hpmd_alloc(mm, pud, addr) pmd_alloc(mm, pud, addr)
90 pmd_t
*hpmd_offset(pud_t
*pud
, unsigned long addr
)
92 if (HPAGE_SHIFT
== HPAGE_SHIFT_64K
)
93 return pmd_offset(pud
, addr
);
98 pmd_t
*hpmd_alloc(struct mm_struct
*mm
, pud_t
*pud
, unsigned long addr
)
100 if (HPAGE_SHIFT
== HPAGE_SHIFT_64K
)
101 return pmd_alloc(mm
, pud
, addr
);
103 return (pmd_t
*) pud
;
107 /* Modelled after find_linux_pte() */
108 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
114 BUG_ON(get_slice_psize(mm
, addr
) != mmu_huge_psize
);
118 pg
= pgd_offset(mm
, addr
);
119 if (!pgd_none(*pg
)) {
120 pu
= pud_offset(pg
, addr
);
121 if (!pud_none(*pu
)) {
122 pm
= hpmd_offset(pu
, addr
);
124 return hugepte_offset((hugepd_t
*)pm
, addr
);
131 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, unsigned long addr
)
136 hugepd_t
*hpdp
= NULL
;
138 BUG_ON(get_slice_psize(mm
, addr
) != mmu_huge_psize
);
142 pg
= pgd_offset(mm
, addr
);
143 pu
= pud_alloc(mm
, pg
, addr
);
146 pm
= hpmd_alloc(mm
, pu
, addr
);
148 hpdp
= (hugepd_t
*)pm
;
154 if (hugepd_none(*hpdp
) && __hugepte_alloc(mm
, hpdp
, addr
))
157 return hugepte_offset(hpdp
, addr
);
160 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
)
165 static void free_hugepte_range(struct mmu_gather
*tlb
, hugepd_t
*hpdp
)
167 pte_t
*hugepte
= hugepd_page(*hpdp
);
171 pgtable_free_tlb(tlb
, pgtable_free_cache(hugepte
, HUGEPTE_CACHE_NUM
,
175 static void hugetlb_free_pmd_range(struct mmu_gather
*tlb
, pud_t
*pud
,
176 unsigned long addr
, unsigned long end
,
177 unsigned long floor
, unsigned long ceiling
)
184 pmd
= pmd_offset(pud
, addr
);
186 next
= pmd_addr_end(addr
, end
);
189 free_hugepte_range(tlb
, (hugepd_t
*)pmd
);
190 } while (pmd
++, addr
= next
, addr
!= end
);
200 if (end
- 1 > ceiling
- 1)
203 pmd
= pmd_offset(pud
, start
);
205 pmd_free_tlb(tlb
, pmd
);
208 static void hugetlb_free_pud_range(struct mmu_gather
*tlb
, pgd_t
*pgd
,
209 unsigned long addr
, unsigned long end
,
210 unsigned long floor
, unsigned long ceiling
)
217 pud
= pud_offset(pgd
, addr
);
219 next
= pud_addr_end(addr
, end
);
220 #ifdef CONFIG_PPC_64K_PAGES
221 if (pud_none_or_clear_bad(pud
))
223 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
, ceiling
);
225 if (HPAGE_SHIFT
== HPAGE_SHIFT_64K
) {
226 if (pud_none_or_clear_bad(pud
))
228 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
, ceiling
);
232 free_hugepte_range(tlb
, (hugepd_t
*)pud
);
235 } while (pud
++, addr
= next
, addr
!= end
);
241 ceiling
&= PGDIR_MASK
;
245 if (end
- 1 > ceiling
- 1)
248 pud
= pud_offset(pgd
, start
);
250 pud_free_tlb(tlb
, pud
);
254 * This function frees user-level page tables of a process.
256 * Must be called with pagetable lock held.
258 void hugetlb_free_pgd_range(struct mmu_gather
**tlb
,
259 unsigned long addr
, unsigned long end
,
260 unsigned long floor
, unsigned long ceiling
)
267 * Comments below take from the normal free_pgd_range(). They
268 * apply here too. The tests against HUGEPD_MASK below are
269 * essential, because we *don't* test for this at the bottom
270 * level. Without them we'll attempt to free a hugepte table
271 * when we unmap just part of it, even if there are other
272 * active mappings using it.
274 * The next few lines have given us lots of grief...
276 * Why are we testing HUGEPD* at this top level? Because
277 * often there will be no work to do at all, and we'd prefer
278 * not to go all the way down to the bottom just to discover
281 * Why all these "- 1"s? Because 0 represents both the bottom
282 * of the address space and the top of it (using -1 for the
283 * top wouldn't help much: the masks would do the wrong thing).
284 * The rule is that addr 0 and floor 0 refer to the bottom of
285 * the address space, but end 0 and ceiling 0 refer to the top
286 * Comparisons need to use "end - 1" and "ceiling - 1" (though
287 * that end 0 case should be mythical).
289 * Wherever addr is brought up or ceiling brought down, we
290 * must be careful to reject "the opposite 0" before it
291 * confuses the subsequent tests. But what about where end is
292 * brought down by HUGEPD_SIZE below? no, end can't go down to
295 * Whereas we round start (addr) and ceiling down, by different
296 * masks at different levels, in order to test whether a table
297 * now has no other vmas using it, so can be freed, we don't
298 * bother to round floor or end up - the tests don't need that.
308 ceiling
&= HUGEPD_MASK
;
312 if (end
- 1 > ceiling
- 1)
318 pgd
= pgd_offset((*tlb
)->mm
, addr
);
320 BUG_ON(get_slice_psize((*tlb
)->mm
, addr
) != mmu_huge_psize
);
321 next
= pgd_addr_end(addr
, end
);
322 if (pgd_none_or_clear_bad(pgd
))
324 hugetlb_free_pud_range(*tlb
, pgd
, addr
, next
, floor
, ceiling
);
325 } while (pgd
++, addr
= next
, addr
!= end
);
328 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
329 pte_t
*ptep
, pte_t pte
)
331 if (pte_present(*ptep
)) {
332 /* We open-code pte_clear because we need to pass the right
333 * argument to hpte_need_flush (huge / !huge). Might not be
334 * necessary anymore if we make hpte_need_flush() get the
335 * page size from the slices
337 pte_update(mm
, addr
& HPAGE_MASK
, ptep
, ~0UL, 1);
339 *ptep
= __pte(pte_val(pte
) & ~_PAGE_HPTEFLAGS
);
342 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
345 unsigned long old
= pte_update(mm
, addr
, ptep
, ~0UL, 1);
350 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
, int write
)
355 if (get_slice_psize(mm
, address
) != mmu_huge_psize
)
356 return ERR_PTR(-EINVAL
);
358 ptep
= huge_pte_offset(mm
, address
);
359 page
= pte_page(*ptep
);
361 page
+= (address
% HPAGE_SIZE
) / PAGE_SIZE
;
366 int pmd_huge(pmd_t pmd
)
372 follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
373 pmd_t
*pmd
, int write
)
380 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
381 unsigned long len
, unsigned long pgoff
,
384 return slice_get_unmapped_area(addr
, len
, flags
,
385 mmu_huge_psize
, 1, 0);
389 * Called by asm hashtable.S for doing lazy icache flush
391 static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags
,
397 if (!pfn_valid(pte_pfn(pte
)))
400 page
= pte_page(pte
);
403 if (!test_bit(PG_arch_1
, &page
->flags
) && !PageReserved(page
)) {
405 for (i
= 0; i
< (HPAGE_SIZE
/ PAGE_SIZE
); i
++)
406 __flush_dcache_icache(page_address(page
+i
));
407 set_bit(PG_arch_1
, &page
->flags
);
415 int hash_huge_page(struct mm_struct
*mm
, unsigned long access
,
416 unsigned long ea
, unsigned long vsid
, int local
,
420 unsigned long old_pte
, new_pte
;
421 unsigned long va
, rflags
, pa
;
424 int ssize
= user_segment_size(ea
);
426 ptep
= huge_pte_offset(mm
, ea
);
428 /* Search the Linux page table for a match with va */
429 va
= hpt_va(ea
, vsid
, ssize
);
432 * If no pte found or not present, send the problem up to
435 if (unlikely(!ptep
|| pte_none(*ptep
)))
439 * Check the user's access rights to the page. If access should be
440 * prevented then send the problem up to do_page_fault.
442 if (unlikely(access
& ~pte_val(*ptep
)))
445 * At this point, we have a pte (old_pte) which can be used to build
446 * or update an HPTE. There are 2 cases:
448 * 1. There is a valid (present) pte with no associated HPTE (this is
449 * the most common case)
450 * 2. There is a valid (present) pte with an associated HPTE. The
451 * current values of the pp bits in the HPTE prevent access
452 * because we are doing software DIRTY bit management and the
453 * page is currently not DIRTY.
458 old_pte
= pte_val(*ptep
);
459 if (old_pte
& _PAGE_BUSY
)
461 new_pte
= old_pte
| _PAGE_BUSY
|
462 _PAGE_ACCESSED
| _PAGE_HASHPTE
;
463 } while(old_pte
!= __cmpxchg_u64((unsigned long *)ptep
,
466 rflags
= 0x2 | (!(new_pte
& _PAGE_RW
));
467 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
468 rflags
|= ((new_pte
& _PAGE_EXEC
) ? 0 : HPTE_R_N
);
469 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
470 /* No CPU has hugepages but lacks no execute, so we
471 * don't need to worry about that case */
472 rflags
= hash_huge_page_do_lazy_icache(rflags
, __pte(old_pte
),
475 /* Check if pte already has an hpte (case 2) */
476 if (unlikely(old_pte
& _PAGE_HASHPTE
)) {
477 /* There MIGHT be an HPTE for this pte */
478 unsigned long hash
, slot
;
480 hash
= hpt_hash(va
, HPAGE_SHIFT
, ssize
);
481 if (old_pte
& _PAGE_F_SECOND
)
483 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
484 slot
+= (old_pte
& _PAGE_F_GIX
) >> 12;
486 if (ppc_md
.hpte_updatepp(slot
, rflags
, va
, mmu_huge_psize
,
488 old_pte
&= ~_PAGE_HPTEFLAGS
;
491 if (likely(!(old_pte
& _PAGE_HASHPTE
))) {
492 unsigned long hash
= hpt_hash(va
, HPAGE_SHIFT
, ssize
);
493 unsigned long hpte_group
;
495 pa
= pte_pfn(__pte(old_pte
)) << PAGE_SHIFT
;
498 hpte_group
= ((hash
& htab_hash_mask
) *
499 HPTES_PER_GROUP
) & ~0x7UL
;
501 /* clear HPTE slot informations in new PTE */
502 new_pte
= (new_pte
& ~_PAGE_HPTEFLAGS
) | _PAGE_HASHPTE
;
504 /* Add in WIMG bits */
505 /* XXX We should store these in the pte */
506 /* --BenH: I think they are ... */
507 rflags
|= _PAGE_COHERENT
;
509 /* Insert into the hash table, primary slot */
510 slot
= ppc_md
.hpte_insert(hpte_group
, va
, pa
, rflags
, 0,
511 mmu_huge_psize
, ssize
);
513 /* Primary is full, try the secondary */
514 if (unlikely(slot
== -1)) {
515 hpte_group
= ((~hash
& htab_hash_mask
) *
516 HPTES_PER_GROUP
) & ~0x7UL
;
517 slot
= ppc_md
.hpte_insert(hpte_group
, va
, pa
, rflags
,
519 mmu_huge_psize
, ssize
);
522 hpte_group
= ((hash
& htab_hash_mask
) *
523 HPTES_PER_GROUP
)&~0x7UL
;
525 ppc_md
.hpte_remove(hpte_group
);
530 if (unlikely(slot
== -2))
531 panic("hash_huge_page: pte_insert failed\n");
533 new_pte
|= (slot
<< 12) & (_PAGE_F_SECOND
| _PAGE_F_GIX
);
537 * No need to use ldarx/stdcx here
539 *ptep
= __pte(new_pte
& ~_PAGE_BUSY
);
547 void set_huge_psize(int psize
)
549 /* Check that it is a page size supported by the hardware and
550 * that it fits within pagetable limits. */
551 if (mmu_psize_defs
[psize
].shift
&& mmu_psize_defs
[psize
].shift
< SID_SHIFT
&&
552 (mmu_psize_defs
[psize
].shift
> MIN_HUGEPTE_SHIFT
||
553 mmu_psize_defs
[psize
].shift
== HPAGE_SHIFT_64K
)) {
554 HPAGE_SHIFT
= mmu_psize_defs
[psize
].shift
;
555 mmu_huge_psize
= psize
;
556 #ifdef CONFIG_PPC_64K_PAGES
557 hugepte_shift
= (PMD_SHIFT
-HPAGE_SHIFT
);
559 if (HPAGE_SHIFT
== HPAGE_SHIFT_64K
)
560 hugepte_shift
= (PMD_SHIFT
-HPAGE_SHIFT
);
562 hugepte_shift
= (PUD_SHIFT
-HPAGE_SHIFT
);
569 static int __init
hugepage_setup_sz(char *str
)
571 unsigned long long size
;
575 size
= memparse(str
, &str
);
579 #ifndef CONFIG_PPC_64K_PAGES
580 case HPAGE_SHIFT_64K
:
581 mmu_psize
= MMU_PAGE_64K
;
584 case HPAGE_SHIFT_16M
:
585 mmu_psize
= MMU_PAGE_16M
;
589 if (mmu_psize
>=0 && mmu_psize_defs
[mmu_psize
].shift
)
590 set_huge_psize(mmu_psize
);
592 printk(KERN_WARNING
"Invalid huge page size specified(%llu)\n", size
);
596 __setup("hugepagesz=", hugepage_setup_sz
);
598 static void zero_ctor(struct kmem_cache
*cache
, void *addr
)
600 memset(addr
, 0, kmem_cache_size(cache
));
603 static int __init
hugetlbpage_init(void)
605 if (!cpu_has_feature(CPU_FTR_16M_PAGE
))
608 huge_pgtable_cache
= kmem_cache_create("hugepte_cache",
613 if (! huge_pgtable_cache
)
614 panic("hugetlbpage_init(): could not create hugepte cache\n");
619 module_init(hugetlbpage_init
);