[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / ppc64 / mm / hash_native.c
blob52b6b9305341e3774b94bba7bca216cbd6ce3bd0
1 /*
2 * native hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 #include <linux/spinlock.h>
13 #include <linux/bitops.h>
14 #include <linux/threads.h>
15 #include <linux/smp.h>
17 #include <asm/abs_addr.h>
18 #include <asm/machdep.h>
19 #include <asm/mmu.h>
20 #include <asm/mmu_context.h>
21 #include <asm/pgtable.h>
22 #include <asm/tlbflush.h>
23 #include <asm/tlb.h>
24 #include <asm/cputable.h>
26 #define HPTE_LOCK_BIT 3
28 static DEFINE_SPINLOCK(native_tlbie_lock);
30 static inline void native_lock_hpte(HPTE *hptep)
32 unsigned long *word = &hptep->dw0.dword0;
34 while (1) {
35 if (!test_and_set_bit(HPTE_LOCK_BIT, word))
36 break;
37 while(test_bit(HPTE_LOCK_BIT, word))
38 cpu_relax();
42 static inline void native_unlock_hpte(HPTE *hptep)
44 unsigned long *word = &hptep->dw0.dword0;
46 asm volatile("lwsync":::"memory");
47 clear_bit(HPTE_LOCK_BIT, word);
50 long native_hpte_insert(unsigned long hpte_group, unsigned long va,
51 unsigned long prpn, int secondary,
52 unsigned long hpteflags, int bolted, int large)
54 unsigned long arpn = physRpn_to_absRpn(prpn);
55 HPTE *hptep = htab_address + hpte_group;
56 Hpte_dword0 dw0;
57 HPTE lhpte;
58 int i;
60 for (i = 0; i < HPTES_PER_GROUP; i++) {
61 dw0 = hptep->dw0.dw0;
63 if (!dw0.v) {
64 /* retry with lock held */
65 native_lock_hpte(hptep);
66 dw0 = hptep->dw0.dw0;
67 if (!dw0.v)
68 break;
69 native_unlock_hpte(hptep);
72 hptep++;
75 if (i == HPTES_PER_GROUP)
76 return -1;
78 lhpte.dw1.dword1 = 0;
79 lhpte.dw1.dw1.rpn = arpn;
80 lhpte.dw1.flags.flags = hpteflags;
82 lhpte.dw0.dword0 = 0;
83 lhpte.dw0.dw0.avpn = va >> 23;
84 lhpte.dw0.dw0.h = secondary;
85 lhpte.dw0.dw0.bolted = bolted;
86 lhpte.dw0.dw0.v = 1;
88 if (large) {
89 lhpte.dw0.dw0.l = 1;
90 lhpte.dw0.dw0.avpn &= ~0x1UL;
93 hptep->dw1.dword1 = lhpte.dw1.dword1;
95 /* Guarantee the second dword is visible before the valid bit */
96 __asm__ __volatile__ ("eieio" : : : "memory");
99 * Now set the first dword including the valid bit
100 * NOTE: this also unlocks the hpte
102 hptep->dw0.dword0 = lhpte.dw0.dword0;
104 __asm__ __volatile__ ("ptesync" : : : "memory");
106 return i | (secondary << 3);
109 static long native_hpte_remove(unsigned long hpte_group)
111 HPTE *hptep;
112 Hpte_dword0 dw0;
113 int i;
114 int slot_offset;
116 /* pick a random entry to start at */
117 slot_offset = mftb() & 0x7;
119 for (i = 0; i < HPTES_PER_GROUP; i++) {
120 hptep = htab_address + hpte_group + slot_offset;
121 dw0 = hptep->dw0.dw0;
123 if (dw0.v && !dw0.bolted) {
124 /* retry with lock held */
125 native_lock_hpte(hptep);
126 dw0 = hptep->dw0.dw0;
127 if (dw0.v && !dw0.bolted)
128 break;
129 native_unlock_hpte(hptep);
132 slot_offset++;
133 slot_offset &= 0x7;
136 if (i == HPTES_PER_GROUP)
137 return -1;
139 /* Invalidate the hpte. NOTE: this also unlocks it */
140 hptep->dw0.dword0 = 0;
142 return i;
145 static inline void set_pp_bit(unsigned long pp, HPTE *addr)
147 unsigned long old;
148 unsigned long *p = &addr->dw1.dword1;
150 __asm__ __volatile__(
151 "1: ldarx %0,0,%3\n\
152 rldimi %0,%2,0,61\n\
153 stdcx. %0,0,%3\n\
154 bne 1b"
155 : "=&r" (old), "=m" (*p)
156 : "r" (pp), "r" (p), "m" (*p)
157 : "cc");
161 * Only works on small pages. Yes its ugly to have to check each slot in
162 * the group but we only use this during bootup.
164 static long native_hpte_find(unsigned long vpn)
166 HPTE *hptep;
167 unsigned long hash;
168 unsigned long i, j;
169 long slot;
170 Hpte_dword0 dw0;
172 hash = hpt_hash(vpn, 0);
174 for (j = 0; j < 2; j++) {
175 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
176 for (i = 0; i < HPTES_PER_GROUP; i++) {
177 hptep = htab_address + slot;
178 dw0 = hptep->dw0.dw0;
180 if ((dw0.avpn == (vpn >> 11)) && dw0.v &&
181 (dw0.h == j)) {
182 /* HPTE matches */
183 if (j)
184 slot = -slot;
185 return slot;
187 ++slot;
189 hash = ~hash;
192 return -1;
195 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
196 unsigned long va, int large, int local)
198 HPTE *hptep = htab_address + slot;
199 Hpte_dword0 dw0;
200 unsigned long avpn = va >> 23;
201 int ret = 0;
203 if (large)
204 avpn &= ~0x1UL;
206 native_lock_hpte(hptep);
208 dw0 = hptep->dw0.dw0;
210 /* Even if we miss, we need to invalidate the TLB */
211 if ((dw0.avpn != avpn) || !dw0.v) {
212 native_unlock_hpte(hptep);
213 ret = -1;
214 } else {
215 set_pp_bit(newpp, hptep);
216 native_unlock_hpte(hptep);
219 /* Ensure it is out of the tlb too */
220 if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
221 tlbiel(va);
222 } else {
223 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
225 if (lock_tlbie)
226 spin_lock(&native_tlbie_lock);
227 tlbie(va, large);
228 if (lock_tlbie)
229 spin_unlock(&native_tlbie_lock);
232 return ret;
236 * Update the page protection bits. Intended to be used to create
237 * guard pages for kernel data structures on pages which are bolted
238 * in the HPT. Assumes pages being operated on will not be stolen.
239 * Does not work on large pages.
241 * No need to lock here because we should be the only user.
243 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
245 unsigned long vsid, va, vpn, flags = 0;
246 long slot;
247 HPTE *hptep;
248 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
250 vsid = get_kernel_vsid(ea);
251 va = (vsid << 28) | (ea & 0x0fffffff);
252 vpn = va >> PAGE_SHIFT;
254 slot = native_hpte_find(vpn);
255 if (slot == -1)
256 panic("could not find page to bolt\n");
257 hptep = htab_address + slot;
259 set_pp_bit(newpp, hptep);
261 /* Ensure it is out of the tlb too */
262 if (lock_tlbie)
263 spin_lock_irqsave(&native_tlbie_lock, flags);
264 tlbie(va, 0);
265 if (lock_tlbie)
266 spin_unlock_irqrestore(&native_tlbie_lock, flags);
269 static void native_hpte_invalidate(unsigned long slot, unsigned long va,
270 int large, int local)
272 HPTE *hptep = htab_address + slot;
273 Hpte_dword0 dw0;
274 unsigned long avpn = va >> 23;
275 unsigned long flags;
276 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
278 if (large)
279 avpn &= ~0x1UL;
281 local_irq_save(flags);
282 native_lock_hpte(hptep);
284 dw0 = hptep->dw0.dw0;
286 /* Even if we miss, we need to invalidate the TLB */
287 if ((dw0.avpn != avpn) || !dw0.v) {
288 native_unlock_hpte(hptep);
289 } else {
290 /* Invalidate the hpte. NOTE: this also unlocks it */
291 hptep->dw0.dword0 = 0;
294 /* Invalidate the tlb */
295 if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
296 tlbiel(va);
297 } else {
298 if (lock_tlbie)
299 spin_lock(&native_tlbie_lock);
300 tlbie(va, large);
301 if (lock_tlbie)
302 spin_unlock(&native_tlbie_lock);
304 local_irq_restore(flags);
307 static void native_flush_hash_range(unsigned long context,
308 unsigned long number, int local)
310 unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn;
311 int i, j;
312 HPTE *hptep;
313 Hpte_dword0 dw0;
314 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
316 /* XXX fix for large ptes */
317 unsigned long large = 0;
319 local_irq_save(flags);
321 j = 0;
322 for (i = 0; i < number; i++) {
323 if (batch->addr[i] < KERNELBASE)
324 vsid = get_vsid(context, batch->addr[i]);
325 else
326 vsid = get_kernel_vsid(batch->addr[i]);
328 va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
329 batch->vaddr[j] = va;
330 if (large)
331 vpn = va >> HPAGE_SHIFT;
332 else
333 vpn = va >> PAGE_SHIFT;
334 hash = hpt_hash(vpn, large);
335 secondary = (pte_val(batch->pte[i]) & _PAGE_SECONDARY) >> 15;
336 if (secondary)
337 hash = ~hash;
338 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
339 slot += (pte_val(batch->pte[i]) & _PAGE_GROUP_IX) >> 12;
341 hptep = htab_address + slot;
343 avpn = va >> 23;
344 if (large)
345 avpn &= ~0x1UL;
347 native_lock_hpte(hptep);
349 dw0 = hptep->dw0.dw0;
351 /* Even if we miss, we need to invalidate the TLB */
352 if ((dw0.avpn != avpn) || !dw0.v) {
353 native_unlock_hpte(hptep);
354 } else {
355 /* Invalidate the hpte. NOTE: this also unlocks it */
356 hptep->dw0.dword0 = 0;
359 j++;
362 if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
363 asm volatile("ptesync":::"memory");
365 for (i = 0; i < j; i++)
366 __tlbiel(batch->vaddr[i]);
368 asm volatile("ptesync":::"memory");
369 } else {
370 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
372 if (lock_tlbie)
373 spin_lock(&native_tlbie_lock);
375 asm volatile("ptesync":::"memory");
377 for (i = 0; i < j; i++)
378 __tlbie(batch->vaddr[i], 0);
380 asm volatile("eieio; tlbsync; ptesync":::"memory");
382 if (lock_tlbie)
383 spin_unlock(&native_tlbie_lock);
386 local_irq_restore(flags);
389 #ifdef CONFIG_PPC_PSERIES
390 /* Disable TLB batching on nighthawk */
391 static inline int tlb_batching_enabled(void)
393 struct device_node *root = of_find_node_by_path("/");
394 int enabled = 1;
396 if (root) {
397 const char *model = get_property(root, "model", NULL);
398 if (model && !strcmp(model, "IBM,9076-N81"))
399 enabled = 0;
400 of_node_put(root);
403 return enabled;
405 #else
406 static inline int tlb_batching_enabled(void)
408 return 1;
410 #endif
412 void hpte_init_native(void)
414 ppc_md.hpte_invalidate = native_hpte_invalidate;
415 ppc_md.hpte_updatepp = native_hpte_updatepp;
416 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
417 ppc_md.hpte_insert = native_hpte_insert;
418 ppc_md.hpte_remove = native_hpte_remove;
419 if (tlb_batching_enabled())
420 ppc_md.flush_hash_range = native_flush_hash_range;
421 htab_finish_init();