2 * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later)
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
11 #include <linux/hugetlb.h>
12 #include <asm/pgtable.h>
13 #include <asm/pgalloc.h>
14 #include <asm/cacheflush.h>
15 #include <asm/machdep.h>
17 extern long hpte_insert_repeating(unsigned long hash
, unsigned long vpn
,
18 unsigned long pa
, unsigned long rlags
,
19 unsigned long vflags
, int psize
, int ssize
);
21 int __hash_page_huge(unsigned long ea
, unsigned long access
, unsigned long vsid
,
22 pte_t
*ptep
, unsigned long trap
, unsigned long flags
,
23 int ssize
, unsigned int shift
, unsigned int mmu_psize
)
26 unsigned long old_pte
, new_pte
;
27 unsigned long rflags
, pa
, sz
;
30 BUG_ON(shift
!= mmu_psize_defs
[mmu_psize
].shift
);
32 /* Search the Linux page table for a match with va */
33 vpn
= hpt_vpn(ea
, vsid
, ssize
);
35 /* At this point, we have a pte (old_pte) which can be used to build
36 * or update an HPTE. There are 2 cases:
38 * 1. There is a valid (present) pte with no associated HPTE (this is
39 * the most common case)
40 * 2. There is a valid (present) pte with an associated HPTE. The
41 * current values of the pp bits in the HPTE prevent access
42 * because we are doing software DIRTY bit management and the
43 * page is currently not DIRTY.
48 old_pte
= pte_val(*ptep
);
49 /* If PTE busy, retry the access */
50 if (unlikely(old_pte
& _PAGE_BUSY
))
52 /* If PTE permissions don't match, take page fault */
53 if (unlikely(access
& ~old_pte
))
55 /* Try to lock the PTE, add ACCESSED and DIRTY if it was
57 new_pte
= old_pte
| _PAGE_BUSY
| _PAGE_ACCESSED
;
58 if (access
& _PAGE_RW
)
59 new_pte
|= _PAGE_DIRTY
;
60 } while(old_pte
!= __cmpxchg_u64((unsigned long *)ptep
,
62 rflags
= htab_convert_pte_flags(new_pte
);
64 sz
= ((1UL) << shift
);
65 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
66 /* No CPU has hugepages but lacks no execute, so we
67 * don't need to worry about that case */
68 rflags
= hash_page_do_lazy_icache(rflags
, __pte(old_pte
), trap
);
70 /* Check if pte already has an hpte (case 2) */
71 if (unlikely(old_pte
& _PAGE_HASHPTE
)) {
72 /* There MIGHT be an HPTE for this pte */
73 unsigned long hash
, slot
;
75 hash
= hpt_hash(vpn
, shift
, ssize
);
76 if (old_pte
& _PAGE_F_SECOND
)
78 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
79 slot
+= (old_pte
& _PAGE_F_GIX
) >> _PAGE_F_GIX_SHIFT
;
81 if (ppc_md
.hpte_updatepp(slot
, rflags
, vpn
, mmu_psize
,
82 mmu_psize
, ssize
, flags
) == -1)
83 old_pte
&= ~_PAGE_HPTEFLAGS
;
86 if (likely(!(old_pte
& _PAGE_HASHPTE
))) {
87 unsigned long hash
= hpt_hash(vpn
, shift
, ssize
);
89 pa
= pte_pfn(__pte(old_pte
)) << PAGE_SHIFT
;
91 /* clear HPTE slot informations in new PTE */
92 new_pte
= (new_pte
& ~_PAGE_HPTEFLAGS
) | _PAGE_HASHPTE
;
94 slot
= hpte_insert_repeating(hash
, vpn
, pa
, rflags
, 0,
98 * Hypervisor failure. Restore old pte and return -1
99 * similar to __hash_page_*
101 if (unlikely(slot
== -2)) {
102 *ptep
= __pte(old_pte
);
103 hash_failure_debug(ea
, access
, vsid
, trap
, ssize
,
104 mmu_psize
, mmu_psize
, old_pte
);
108 new_pte
|= (slot
<< _PAGE_F_GIX_SHIFT
) &
109 (_PAGE_F_SECOND
| _PAGE_F_GIX
);
113 * No need to use ldarx/stdcx here
115 *ptep
= __pte(new_pte
& ~_PAGE_BUSY
);
119 #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM)
121 * This enables us to catch the wrong page directory format
122 * Moved here so that we can use WARN() in the call.
124 int hugepd_ok(hugepd_t hpd
)
129 * We should not find this format in page directory, warn otherwise.
131 is_hugepd
= (((hpd
.pd
& 0x3) == 0x0) && ((hpd
.pd
& HUGEPD_SHIFT_MASK
) != 0));
132 WARN(is_hugepd
, "Found wrong page directory format\n");