cpufreq: kirkwood: Reinstate cpufreq driver for ARCH_KIRKWOOD
[linux/fpc-iii.git] / arch / powerpc / mm / hugepage-hash64.c
blob826893fcb3a78b0198d5fa4b0b047428d8852802
1 /*
2 * Copyright IBM Corporation, 2013
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2.1 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16 * PPC64 THP Support for hash based MMUs
18 #include <linux/mm.h>
19 #include <asm/machdep.h>
21 int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
22 pmd_t *pmdp, unsigned long trap, int local, int ssize,
23 unsigned int psize)
25 unsigned int index, valid;
26 unsigned char *hpte_slot_array;
27 unsigned long rflags, pa, hidx;
28 unsigned long old_pmd, new_pmd;
29 int ret, lpsize = MMU_PAGE_16M;
30 unsigned long vpn, hash, shift, slot;
33 * atomically mark the linux large page PMD busy and dirty
35 do {
36 old_pmd = pmd_val(*pmdp);
37 /* If PMD busy, retry the access */
38 if (unlikely(old_pmd & _PAGE_BUSY))
39 return 0;
40 /* If PMD is trans splitting retry the access */
41 if (unlikely(old_pmd & _PAGE_SPLITTING))
42 return 0;
43 /* If PMD permissions don't match, take page fault */
44 if (unlikely(access & ~old_pmd))
45 return 1;
47 * Try to lock the PTE, add ACCESSED and DIRTY if it was
48 * a write access
50 new_pmd = old_pmd | _PAGE_BUSY | _PAGE_ACCESSED;
51 if (access & _PAGE_RW)
52 new_pmd |= _PAGE_DIRTY;
53 } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
54 old_pmd, new_pmd));
56 * PP bits. _PAGE_USER is already PP bit 0x2, so we only
57 * need to add in 0x1 if it's a read-only user page
59 rflags = new_pmd & _PAGE_USER;
60 if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) &&
61 (new_pmd & _PAGE_DIRTY)))
62 rflags |= 0x1;
64 * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
66 rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N);
68 #if 0
69 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
72 * No CPU has hugepages but lacks no execute, so we
73 * don't need to worry about that case
75 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
77 #endif
79 * Find the slot index details for this ea, using base page size.
81 shift = mmu_psize_defs[psize].shift;
82 index = (ea & ~HPAGE_PMD_MASK) >> shift;
83 BUG_ON(index >= 4096);
85 vpn = hpt_vpn(ea, vsid, ssize);
86 hash = hpt_hash(vpn, shift, ssize);
87 hpte_slot_array = get_hpte_slot_array(pmdp);
89 valid = hpte_valid(hpte_slot_array, index);
90 if (valid) {
91 /* update the hpte bits */
92 hidx = hpte_hash_index(hpte_slot_array, index);
93 if (hidx & _PTEIDX_SECONDARY)
94 hash = ~hash;
95 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
96 slot += hidx & _PTEIDX_GROUP_IX;
98 ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
99 psize, lpsize, ssize, local);
101 * We failed to update, try to insert a new entry.
103 if (ret == -1) {
105 * large pte is marked busy, so we can be sure
106 * nobody is looking at hpte_slot_array. hence we can
107 * safely update this here.
109 valid = 0;
110 new_pmd &= ~_PAGE_HPTEFLAGS;
111 hpte_slot_array[index] = 0;
112 } else
113 /* clear the busy bits and set the hash pte bits */
114 new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
117 if (!valid) {
118 unsigned long hpte_group;
120 /* insert new entry */
121 pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
122 repeat:
123 hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
125 /* clear the busy bits and set the hash pte bits */
126 new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
128 /* Add in WIMG bits */
129 rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
130 _PAGE_GUARDED));
132 * enable the memory coherence always
134 rflags |= HPTE_R_M;
136 /* Insert into the hash table, primary slot */
137 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
138 psize, lpsize, ssize);
140 * Primary is full, try the secondary
142 if (unlikely(slot == -1)) {
143 hpte_group = ((~hash & htab_hash_mask) *
144 HPTES_PER_GROUP) & ~0x7UL;
145 slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
146 rflags, HPTE_V_SECONDARY,
147 psize, lpsize, ssize);
148 if (slot == -1) {
149 if (mftb() & 0x1)
150 hpte_group = ((hash & htab_hash_mask) *
151 HPTES_PER_GROUP) & ~0x7UL;
153 ppc_md.hpte_remove(hpte_group);
154 goto repeat;
158 * Hypervisor failure. Restore old pmd and return -1
159 * similar to __hash_page_*
161 if (unlikely(slot == -2)) {
162 *pmdp = __pmd(old_pmd);
163 hash_failure_debug(ea, access, vsid, trap, ssize,
164 psize, lpsize, old_pmd);
165 return -1;
168 * large pte is marked busy, so we can be sure
169 * nobody is looking at hpte_slot_array. hence we can
170 * safely update this here.
172 mark_hpte_slot_valid(hpte_slot_array, index, slot);
175 * No need to use ldarx/stdcx here
177 *pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
178 return 0;