2 * native hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 #include <linux/spinlock.h>
13 #include <linux/bitops.h>
14 #include <linux/threads.h>
15 #include <linux/smp.h>
17 #include <asm/abs_addr.h>
18 #include <asm/machdep.h>
20 #include <asm/mmu_context.h>
21 #include <asm/pgtable.h>
22 #include <asm/tlbflush.h>
24 #include <asm/cputable.h>
26 #define HPTE_LOCK_BIT 3
28 static DEFINE_SPINLOCK(native_tlbie_lock
);
30 static inline void native_lock_hpte(hpte_t
*hptep
)
32 unsigned long *word
= &hptep
->v
;
35 if (!test_and_set_bit(HPTE_LOCK_BIT
, word
))
37 while(test_bit(HPTE_LOCK_BIT
, word
))
42 static inline void native_unlock_hpte(hpte_t
*hptep
)
44 unsigned long *word
= &hptep
->v
;
46 asm volatile("lwsync":::"memory");
47 clear_bit(HPTE_LOCK_BIT
, word
);
50 long native_hpte_insert(unsigned long hpte_group
, unsigned long va
,
51 unsigned long prpn
, unsigned long vflags
,
54 hpte_t
*hptep
= htab_address
+ hpte_group
;
55 unsigned long hpte_v
, hpte_r
;
58 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
59 if (! (hptep
->v
& HPTE_V_VALID
)) {
60 /* retry with lock held */
61 native_lock_hpte(hptep
);
62 if (! (hptep
->v
& HPTE_V_VALID
))
64 native_unlock_hpte(hptep
);
70 if (i
== HPTES_PER_GROUP
)
73 hpte_v
= (va
>> 23) << HPTE_V_AVPN_SHIFT
| vflags
| HPTE_V_VALID
;
74 if (vflags
& HPTE_V_LARGE
)
75 va
&= ~(1UL << HPTE_V_AVPN_SHIFT
);
76 hpte_r
= (prpn
<< HPTE_R_RPN_SHIFT
) | rflags
;
79 /* Guarantee the second dword is visible before the valid bit */
80 __asm__
__volatile__ ("eieio" : : : "memory");
82 * Now set the first dword including the valid bit
83 * NOTE: this also unlocks the hpte
87 __asm__
__volatile__ ("ptesync" : : : "memory");
89 return i
| (!!(vflags
& HPTE_V_SECONDARY
) << 3);
92 static long native_hpte_remove(unsigned long hpte_group
)
99 /* pick a random entry to start at */
100 slot_offset
= mftb() & 0x7;
102 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
103 hptep
= htab_address
+ hpte_group
+ slot_offset
;
106 if ((hpte_v
& HPTE_V_VALID
) && !(hpte_v
& HPTE_V_BOLTED
)) {
107 /* retry with lock held */
108 native_lock_hpte(hptep
);
110 if ((hpte_v
& HPTE_V_VALID
)
111 && !(hpte_v
& HPTE_V_BOLTED
))
113 native_unlock_hpte(hptep
);
120 if (i
== HPTES_PER_GROUP
)
123 /* Invalidate the hpte. NOTE: this also unlocks it */
129 static inline void set_pp_bit(unsigned long pp
, hpte_t
*addr
)
132 unsigned long *p
= &addr
->r
;
134 __asm__
__volatile__(
139 : "=&r" (old
), "=m" (*p
)
140 : "r" (pp
), "r" (p
), "m" (*p
)
145 * Only works on small pages. Yes its ugly to have to check each slot in
146 * the group but we only use this during bootup.
148 static long native_hpte_find(unsigned long vpn
)
154 unsigned long hpte_v
;
156 hash
= hpt_hash(vpn
, 0);
158 for (j
= 0; j
< 2; j
++) {
159 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
160 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
161 hptep
= htab_address
+ slot
;
164 if ((HPTE_V_AVPN_VAL(hpte_v
) == (vpn
>> 11))
165 && (hpte_v
& HPTE_V_VALID
)
166 && ( !!(hpte_v
& HPTE_V_SECONDARY
) == j
)) {
180 static long native_hpte_updatepp(unsigned long slot
, unsigned long newpp
,
181 unsigned long va
, int large
, int local
)
183 hpte_t
*hptep
= htab_address
+ slot
;
184 unsigned long hpte_v
;
185 unsigned long avpn
= va
>> 23;
191 native_lock_hpte(hptep
);
195 /* Even if we miss, we need to invalidate the TLB */
196 if ((HPTE_V_AVPN_VAL(hpte_v
) != avpn
)
197 || !(hpte_v
& HPTE_V_VALID
)) {
198 native_unlock_hpte(hptep
);
201 set_pp_bit(newpp
, hptep
);
202 native_unlock_hpte(hptep
);
205 /* Ensure it is out of the tlb too */
206 if (cpu_has_feature(CPU_FTR_TLBIEL
) && !large
&& local
) {
209 int lock_tlbie
= !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE
);
212 spin_lock(&native_tlbie_lock
);
215 spin_unlock(&native_tlbie_lock
);
222 * Update the page protection bits. Intended to be used to create
223 * guard pages for kernel data structures on pages which are bolted
224 * in the HPT. Assumes pages being operated on will not be stolen.
225 * Does not work on large pages.
227 * No need to lock here because we should be the only user.
229 static void native_hpte_updateboltedpp(unsigned long newpp
, unsigned long ea
)
231 unsigned long vsid
, va
, vpn
, flags
= 0;
234 int lock_tlbie
= !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE
);
236 vsid
= get_kernel_vsid(ea
);
237 va
= (vsid
<< 28) | (ea
& 0x0fffffff);
238 vpn
= va
>> PAGE_SHIFT
;
240 slot
= native_hpte_find(vpn
);
242 panic("could not find page to bolt\n");
243 hptep
= htab_address
+ slot
;
245 set_pp_bit(newpp
, hptep
);
247 /* Ensure it is out of the tlb too */
249 spin_lock_irqsave(&native_tlbie_lock
, flags
);
252 spin_unlock_irqrestore(&native_tlbie_lock
, flags
);
255 static void native_hpte_invalidate(unsigned long slot
, unsigned long va
,
256 int large
, int local
)
258 hpte_t
*hptep
= htab_address
+ slot
;
259 unsigned long hpte_v
;
260 unsigned long avpn
= va
>> 23;
262 int lock_tlbie
= !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE
);
267 local_irq_save(flags
);
268 native_lock_hpte(hptep
);
272 /* Even if we miss, we need to invalidate the TLB */
273 if ((HPTE_V_AVPN_VAL(hpte_v
) != avpn
)
274 || !(hpte_v
& HPTE_V_VALID
)) {
275 native_unlock_hpte(hptep
);
277 /* Invalidate the hpte. NOTE: this also unlocks it */
281 /* Invalidate the tlb */
282 if (cpu_has_feature(CPU_FTR_TLBIEL
) && !large
&& local
) {
286 spin_lock(&native_tlbie_lock
);
289 spin_unlock(&native_tlbie_lock
);
291 local_irq_restore(flags
);
295 * clear all mappings on kexec. All cpus are in real mode (or they will
296 * be when they isi), and we are the only one left. We rely on our kernel
297 * mapping being 0xC0's and the hardware ignoring those two real bits.
299 * TODO: add batching support when enabled. remember, no dynamic memory here,
300 * athough there is the control page available...
302 static void native_hpte_clear(void)
304 unsigned long slot
, slots
, flags
;
305 hpte_t
*hptep
= htab_address
;
306 unsigned long hpte_v
;
307 unsigned long pteg_count
;
309 pteg_count
= htab_hash_mask
+ 1;
311 local_irq_save(flags
);
313 /* we take the tlbie lock and hold it. Some hardware will
314 * deadlock if we try to tlbie from two processors at once.
316 spin_lock(&native_tlbie_lock
);
318 slots
= pteg_count
* HPTES_PER_GROUP
;
320 for (slot
= 0; slot
< slots
; slot
++, hptep
++) {
322 * we could lock the pte here, but we are the only cpu
323 * running, right? and for crash dump, we probably
324 * don't want to wait for a maybe bad cpu.
328 if (hpte_v
& HPTE_V_VALID
) {
330 tlbie(slot2va(hpte_v
, slot
), hpte_v
& HPTE_V_LARGE
);
334 spin_unlock(&native_tlbie_lock
);
335 local_irq_restore(flags
);
338 static void native_flush_hash_range(unsigned long context
,
339 unsigned long number
, int local
)
341 unsigned long vsid
, vpn
, va
, hash
, secondary
, slot
, flags
, avpn
;
344 unsigned long hpte_v
;
345 struct ppc64_tlb_batch
*batch
= &__get_cpu_var(ppc64_tlb_batch
);
347 /* XXX fix for large ptes */
348 unsigned long large
= 0;
350 local_irq_save(flags
);
353 for (i
= 0; i
< number
; i
++) {
354 if (batch
->addr
[i
] < KERNELBASE
)
355 vsid
= get_vsid(context
, batch
->addr
[i
]);
357 vsid
= get_kernel_vsid(batch
->addr
[i
]);
359 va
= (vsid
<< 28) | (batch
->addr
[i
] & 0x0fffffff);
360 batch
->vaddr
[j
] = va
;
362 vpn
= va
>> HPAGE_SHIFT
;
364 vpn
= va
>> PAGE_SHIFT
;
365 hash
= hpt_hash(vpn
, large
);
366 secondary
= (pte_val(batch
->pte
[i
]) & _PAGE_SECONDARY
) >> 15;
369 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
370 slot
+= (pte_val(batch
->pte
[i
]) & _PAGE_GROUP_IX
) >> 12;
372 hptep
= htab_address
+ slot
;
378 native_lock_hpte(hptep
);
382 /* Even if we miss, we need to invalidate the TLB */
383 if ((HPTE_V_AVPN_VAL(hpte_v
) != avpn
)
384 || !(hpte_v
& HPTE_V_VALID
)) {
385 native_unlock_hpte(hptep
);
387 /* Invalidate the hpte. NOTE: this also unlocks it */
394 if (cpu_has_feature(CPU_FTR_TLBIEL
) && !large
&& local
) {
395 asm volatile("ptesync":::"memory");
397 for (i
= 0; i
< j
; i
++)
398 __tlbiel(batch
->vaddr
[i
]);
400 asm volatile("ptesync":::"memory");
402 int lock_tlbie
= !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE
);
405 spin_lock(&native_tlbie_lock
);
407 asm volatile("ptesync":::"memory");
409 for (i
= 0; i
< j
; i
++)
410 __tlbie(batch
->vaddr
[i
], 0);
412 asm volatile("eieio; tlbsync; ptesync":::"memory");
415 spin_unlock(&native_tlbie_lock
);
418 local_irq_restore(flags
);
421 #ifdef CONFIG_PPC_PSERIES
422 /* Disable TLB batching on nighthawk */
423 static inline int tlb_batching_enabled(void)
425 struct device_node
*root
= of_find_node_by_path("/");
429 const char *model
= get_property(root
, "model", NULL
);
430 if (model
&& !strcmp(model
, "IBM,9076-N81"))
438 static inline int tlb_batching_enabled(void)
444 void hpte_init_native(void)
446 ppc_md
.hpte_invalidate
= native_hpte_invalidate
;
447 ppc_md
.hpte_updatepp
= native_hpte_updatepp
;
448 ppc_md
.hpte_updateboltedpp
= native_hpte_updateboltedpp
;
449 ppc_md
.hpte_insert
= native_hpte_insert
;
450 ppc_md
.hpte_remove
= native_hpte_remove
;
451 ppc_md
.hpte_clear_all
= native_hpte_clear
;
452 if (tlb_batching_enabled())
453 ppc_md
.flush_hash_range
= native_flush_hash_range
;