2 * native hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
18 #include <linux/threads.h>
19 #include <linux/smp.h>
21 #include <asm/machdep.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
27 #include <asm/cputable.h>
29 #include <asm/kexec.h>
30 #include <asm/ppc-opcode.h>
33 #define DBG_LOW(fmt...) udbg_printf(fmt)
35 #define DBG_LOW(fmt...)
38 #define HPTE_LOCK_BIT 3
40 DEFINE_RAW_SPINLOCK(native_tlbie_lock
);
42 static inline void __tlbie(unsigned long vpn
, int psize
, int apsize
, int ssize
)
48 * We need 14 to 65 bits of va for a tlibe of 4K page
49 * With vpn we ignore the lower VPN_SHIFT bits already.
50 * And top two bits are already ignored because we can
51 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
54 va
= vpn
<< VPN_SHIFT
;
56 * clear top 16 bits of 64bit va, non SLS segment
57 * Older versions of the architecture (2.02 and earler) require the
58 * masking of the top 16 bits.
60 va
&= ~(0xffffULL
<< 48);
64 /* clear out bits after (52) [0....52.....63] */
65 va
&= ~((1ul << (64 - 52)) - 1);
67 va
|= mmu_psize_defs
[apsize
].sllp
<< 6;
68 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
69 : : "r" (va
), "r"(0), "i" (CPU_FTR_ARCH_206
)
73 /* We need 14 to 14 + i bits of va */
74 penc
= mmu_psize_defs
[psize
].penc
[apsize
];
75 va
&= ~((1ul << mmu_psize_defs
[apsize
].shift
) - 1);
79 if (psize
!= apsize
) {
81 * MPSS, 64K base page size and 16MB parge page size
82 * We don't need all the bits, but rest of the bits
83 * must be ignored by the processor.
84 * vpn cover upto 65 bits of va. (0...65) and we need
90 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
91 : : "r" (va
), "r"(0), "i" (CPU_FTR_ARCH_206
)
97 static inline void __tlbiel(unsigned long vpn
, int psize
, int apsize
, int ssize
)
102 /* VPN_SHIFT can be atmost 12 */
103 va
= vpn
<< VPN_SHIFT
;
105 * clear top 16 bits of 64 bit va, non SLS segment
106 * Older versions of the architecture (2.02 and earler) require the
107 * masking of the top 16 bits.
109 va
&= ~(0xffffULL
<< 48);
113 /* clear out bits after(52) [0....52.....63] */
114 va
&= ~((1ul << (64 - 52)) - 1);
116 va
|= mmu_psize_defs
[apsize
].sllp
<< 6;
117 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
118 : : "r"(va
) : "memory");
121 /* We need 14 to 14 + i bits of va */
122 penc
= mmu_psize_defs
[psize
].penc
[apsize
];
123 va
&= ~((1ul << mmu_psize_defs
[apsize
].shift
) - 1);
127 if (psize
!= apsize
) {
129 * MPSS, 64K base page size and 16MB parge page size
130 * We don't need all the bits, but rest of the bits
131 * must be ignored by the processor.
132 * vpn cover upto 65 bits of va. (0...65) and we need
138 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
139 : : "r"(va
) : "memory");
145 static inline void tlbie(unsigned long vpn
, int psize
, int apsize
,
146 int ssize
, int local
)
148 unsigned int use_local
= local
&& mmu_has_feature(MMU_FTR_TLBIEL
);
149 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
152 use_local
= mmu_psize_defs
[psize
].tlbiel
;
153 if (lock_tlbie
&& !use_local
)
154 raw_spin_lock(&native_tlbie_lock
);
155 asm volatile("ptesync": : :"memory");
157 __tlbiel(vpn
, psize
, apsize
, ssize
);
158 asm volatile("ptesync": : :"memory");
160 __tlbie(vpn
, psize
, apsize
, ssize
);
161 asm volatile("eieio; tlbsync; ptesync": : :"memory");
163 if (lock_tlbie
&& !use_local
)
164 raw_spin_unlock(&native_tlbie_lock
);
167 static inline void native_lock_hpte(struct hash_pte
*hptep
)
169 unsigned long *word
= &hptep
->v
;
172 if (!test_and_set_bit_lock(HPTE_LOCK_BIT
, word
))
174 while(test_bit(HPTE_LOCK_BIT
, word
))
179 static inline void native_unlock_hpte(struct hash_pte
*hptep
)
181 unsigned long *word
= &hptep
->v
;
183 clear_bit_unlock(HPTE_LOCK_BIT
, word
);
186 static long native_hpte_insert(unsigned long hpte_group
, unsigned long vpn
,
187 unsigned long pa
, unsigned long rflags
,
188 unsigned long vflags
, int psize
, int apsize
, int ssize
)
190 struct hash_pte
*hptep
= htab_address
+ hpte_group
;
191 unsigned long hpte_v
, hpte_r
;
194 if (!(vflags
& HPTE_V_BOLTED
)) {
195 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
196 " rflags=%lx, vflags=%lx, psize=%d)\n",
197 hpte_group
, vpn
, pa
, rflags
, vflags
, psize
);
200 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
201 if (! (hptep
->v
& HPTE_V_VALID
)) {
202 /* retry with lock held */
203 native_lock_hpte(hptep
);
204 if (! (hptep
->v
& HPTE_V_VALID
))
206 native_unlock_hpte(hptep
);
212 if (i
== HPTES_PER_GROUP
)
215 hpte_v
= hpte_encode_v(vpn
, psize
, apsize
, ssize
) | vflags
| HPTE_V_VALID
;
216 hpte_r
= hpte_encode_r(pa
, psize
, apsize
) | rflags
;
218 if (!(vflags
& HPTE_V_BOLTED
)) {
219 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
224 /* Guarantee the second dword is visible before the valid bit */
227 * Now set the first dword including the valid bit
228 * NOTE: this also unlocks the hpte
232 __asm__
__volatile__ ("ptesync" : : : "memory");
234 return i
| (!!(vflags
& HPTE_V_SECONDARY
) << 3);
237 static long native_hpte_remove(unsigned long hpte_group
)
239 struct hash_pte
*hptep
;
242 unsigned long hpte_v
;
244 DBG_LOW(" remove(group=%lx)\n", hpte_group
);
246 /* pick a random entry to start at */
247 slot_offset
= mftb() & 0x7;
249 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
250 hptep
= htab_address
+ hpte_group
+ slot_offset
;
253 if ((hpte_v
& HPTE_V_VALID
) && !(hpte_v
& HPTE_V_BOLTED
)) {
254 /* retry with lock held */
255 native_lock_hpte(hptep
);
257 if ((hpte_v
& HPTE_V_VALID
)
258 && !(hpte_v
& HPTE_V_BOLTED
))
260 native_unlock_hpte(hptep
);
267 if (i
== HPTES_PER_GROUP
)
270 /* Invalidate the hpte. NOTE: this also unlocks it */
276 static inline int __hpte_actual_psize(unsigned int lp
, int psize
)
281 /* start from 1 ignoring MMU_PAGE_4K */
282 for (i
= 1; i
< MMU_PAGE_COUNT
; i
++) {
285 if (mmu_psize_defs
[psize
].penc
[i
] == -1)
288 * encoding bits per actual page size
289 * PTE LP actual page size
296 shift
= mmu_psize_defs
[i
].shift
- LP_SHIFT
;
299 mask
= (1 << shift
) - 1;
300 if ((lp
& mask
) == mmu_psize_defs
[psize
].penc
[i
])
306 static inline int hpte_actual_psize(struct hash_pte
*hptep
, int psize
)
308 /* Look at the 8 bit LP value */
309 unsigned int lp
= (hptep
->r
>> LP_SHIFT
) & ((1 << LP_BITS
) - 1);
311 if (!(hptep
->v
& HPTE_V_VALID
))
314 /* First check if it is large page */
315 if (!(hptep
->v
& HPTE_V_LARGE
))
318 return __hpte_actual_psize(lp
, psize
);
321 static long native_hpte_updatepp(unsigned long slot
, unsigned long newpp
,
322 unsigned long vpn
, int psize
, int ssize
,
325 struct hash_pte
*hptep
= htab_address
+ slot
;
326 unsigned long hpte_v
, want_v
;
330 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
332 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
333 vpn
, want_v
& HPTE_V_AVPN
, slot
, newpp
);
335 native_lock_hpte(hptep
);
338 actual_psize
= hpte_actual_psize(hptep
, psize
);
340 * We need to invalidate the TLB always because hpte_remove doesn't do
341 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
342 * random entry from it. When we do that we don't invalidate the TLB
343 * (hpte_remove) because we assume the old translation is still
344 * technically "valid".
346 if (actual_psize
< 0) {
347 actual_psize
= psize
;
351 if (!HPTE_V_COMPARE(hpte_v
, want_v
)) {
352 DBG_LOW(" -> miss\n");
355 DBG_LOW(" -> hit\n");
356 /* Update the HPTE */
357 hptep
->r
= (hptep
->r
& ~(HPTE_R_PP
| HPTE_R_N
)) |
358 (newpp
& (HPTE_R_PP
| HPTE_R_N
| HPTE_R_C
));
361 native_unlock_hpte(hptep
);
363 /* Ensure it is out of the tlb too. */
364 tlbie(vpn
, psize
, actual_psize
, ssize
, local
);
369 static long native_hpte_find(unsigned long vpn
, int psize
, int ssize
)
371 struct hash_pte
*hptep
;
375 unsigned long want_v
, hpte_v
;
377 hash
= hpt_hash(vpn
, mmu_psize_defs
[psize
].shift
, ssize
);
378 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
380 /* Bolted mappings are only ever in the primary group */
381 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
382 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
383 hptep
= htab_address
+ slot
;
386 if (HPTE_V_COMPARE(hpte_v
, want_v
) && (hpte_v
& HPTE_V_VALID
))
396 * Update the page protection bits. Intended to be used to create
397 * guard pages for kernel data structures on pages which are bolted
398 * in the HPT. Assumes pages being operated on will not be stolen.
400 * No need to lock here because we should be the only user.
402 static void native_hpte_updateboltedpp(unsigned long newpp
, unsigned long ea
,
403 int psize
, int ssize
)
409 struct hash_pte
*hptep
;
411 vsid
= get_kernel_vsid(ea
, ssize
);
412 vpn
= hpt_vpn(ea
, vsid
, ssize
);
414 slot
= native_hpte_find(vpn
, psize
, ssize
);
416 panic("could not find page to bolt\n");
417 hptep
= htab_address
+ slot
;
418 actual_psize
= hpte_actual_psize(hptep
, psize
);
419 if (actual_psize
< 0)
420 actual_psize
= psize
;
422 /* Update the HPTE */
423 hptep
->r
= (hptep
->r
& ~(HPTE_R_PP
| HPTE_R_N
)) |
424 (newpp
& (HPTE_R_PP
| HPTE_R_N
));
426 /* Ensure it is out of the tlb too. */
427 tlbie(vpn
, psize
, actual_psize
, ssize
, 0);
430 static void native_hpte_invalidate(unsigned long slot
, unsigned long vpn
,
431 int psize
, int ssize
, int local
)
433 struct hash_pte
*hptep
= htab_address
+ slot
;
434 unsigned long hpte_v
;
435 unsigned long want_v
;
439 local_irq_save(flags
);
441 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn
, slot
);
443 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
444 native_lock_hpte(hptep
);
447 actual_psize
= hpte_actual_psize(hptep
, psize
);
449 * We need to invalidate the TLB always because hpte_remove doesn't do
450 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
451 * random entry from it. When we do that we don't invalidate the TLB
452 * (hpte_remove) because we assume the old translation is still
453 * technically "valid".
455 if (actual_psize
< 0) {
456 actual_psize
= psize
;
457 native_unlock_hpte(hptep
);
460 if (!HPTE_V_COMPARE(hpte_v
, want_v
))
461 native_unlock_hpte(hptep
);
463 /* Invalidate the hpte. NOTE: this also unlocks it */
467 /* Invalidate the TLB */
468 tlbie(vpn
, psize
, actual_psize
, ssize
, local
);
469 local_irq_restore(flags
);
472 static void hpte_decode(struct hash_pte
*hpte
, unsigned long slot
,
473 int *psize
, int *apsize
, int *ssize
, unsigned long *vpn
)
475 unsigned long avpn
, pteg
, vpi
;
476 unsigned long hpte_v
= hpte
->v
;
477 unsigned long vsid
, seg_off
;
478 int size
, a_size
, shift
;
479 /* Look at the 8 bit LP value */
480 unsigned int lp
= (hpte
->r
>> LP_SHIFT
) & ((1 << LP_BITS
) - 1);
482 if (!(hpte_v
& HPTE_V_LARGE
)) {
484 a_size
= MMU_PAGE_4K
;
486 for (size
= 0; size
< MMU_PAGE_COUNT
; size
++) {
488 /* valid entries have a shift value */
489 if (!mmu_psize_defs
[size
].shift
)
492 a_size
= __hpte_actual_psize(lp
, size
);
497 /* This works for all page sizes, and for 256M and 1T segments */
498 *ssize
= hpte_v
>> HPTE_V_SSIZE_SHIFT
;
499 shift
= mmu_psize_defs
[size
].shift
;
501 avpn
= (HPTE_V_AVPN_VAL(hpte_v
) & ~mmu_psize_defs
[size
].avpnm
);
502 pteg
= slot
/ HPTES_PER_GROUP
;
503 if (hpte_v
& HPTE_V_SECONDARY
)
507 case MMU_SEGSIZE_256M
:
508 /* We only have 28 - 23 bits of seg_off in avpn */
509 seg_off
= (avpn
& 0x1f) << 23;
511 /* We can find more bits from the pteg value */
513 vpi
= (vsid
^ pteg
) & htab_hash_mask
;
514 seg_off
|= vpi
<< shift
;
516 *vpn
= vsid
<< (SID_SHIFT
- VPN_SHIFT
) | seg_off
>> VPN_SHIFT
;
518 /* We only have 40 - 23 bits of seg_off in avpn */
519 seg_off
= (avpn
& 0x1ffff) << 23;
522 vpi
= (vsid
^ (vsid
<< 25) ^ pteg
) & htab_hash_mask
;
523 seg_off
|= vpi
<< shift
;
525 *vpn
= vsid
<< (SID_SHIFT_1T
- VPN_SHIFT
) | seg_off
>> VPN_SHIFT
;
534 * clear all mappings on kexec. All cpus are in real mode (or they will
535 * be when they isi), and we are the only one left. We rely on our kernel
536 * mapping being 0xC0's and the hardware ignoring those two real bits.
538 * TODO: add batching support when enabled. remember, no dynamic memory here,
539 * athough there is the control page available...
541 static void native_hpte_clear(void)
543 unsigned long vpn
= 0;
544 unsigned long slot
, slots
, flags
;
545 struct hash_pte
*hptep
= htab_address
;
546 unsigned long hpte_v
;
547 unsigned long pteg_count
;
548 int psize
, apsize
, ssize
;
550 pteg_count
= htab_hash_mask
+ 1;
552 local_irq_save(flags
);
554 /* we take the tlbie lock and hold it. Some hardware will
555 * deadlock if we try to tlbie from two processors at once.
557 raw_spin_lock(&native_tlbie_lock
);
559 slots
= pteg_count
* HPTES_PER_GROUP
;
561 for (slot
= 0; slot
< slots
; slot
++, hptep
++) {
563 * we could lock the pte here, but we are the only cpu
564 * running, right? and for crash dump, we probably
565 * don't want to wait for a maybe bad cpu.
570 * Call __tlbie() here rather than tlbie() since we
571 * already hold the native_tlbie_lock.
573 if (hpte_v
& HPTE_V_VALID
) {
574 hpte_decode(hptep
, slot
, &psize
, &apsize
, &ssize
, &vpn
);
576 __tlbie(vpn
, psize
, apsize
, ssize
);
580 asm volatile("eieio; tlbsync; ptesync":::"memory");
581 raw_spin_unlock(&native_tlbie_lock
);
582 local_irq_restore(flags
);
586 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
587 * the lock all the time
589 static void native_flush_hash_range(unsigned long number
, int local
)
592 unsigned long hash
, index
, hidx
, shift
, slot
;
593 struct hash_pte
*hptep
;
594 unsigned long hpte_v
;
595 unsigned long want_v
;
598 struct ppc64_tlb_batch
*batch
= &__get_cpu_var(ppc64_tlb_batch
);
599 unsigned long psize
= batch
->psize
;
600 int ssize
= batch
->ssize
;
603 local_irq_save(flags
);
605 for (i
= 0; i
< number
; i
++) {
609 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
610 hash
= hpt_hash(vpn
, shift
, ssize
);
611 hidx
= __rpte_to_hidx(pte
, index
);
612 if (hidx
& _PTEIDX_SECONDARY
)
614 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
615 slot
+= hidx
& _PTEIDX_GROUP_IX
;
616 hptep
= htab_address
+ slot
;
617 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
618 native_lock_hpte(hptep
);
620 if (!HPTE_V_COMPARE(hpte_v
, want_v
) ||
621 !(hpte_v
& HPTE_V_VALID
))
622 native_unlock_hpte(hptep
);
625 } pte_iterate_hashed_end();
628 if (mmu_has_feature(MMU_FTR_TLBIEL
) &&
629 mmu_psize_defs
[psize
].tlbiel
&& local
) {
630 asm volatile("ptesync":::"memory");
631 for (i
= 0; i
< number
; i
++) {
635 pte_iterate_hashed_subpages(pte
, psize
,
637 __tlbiel(vpn
, psize
, psize
, ssize
);
638 } pte_iterate_hashed_end();
640 asm volatile("ptesync":::"memory");
642 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
645 raw_spin_lock(&native_tlbie_lock
);
647 asm volatile("ptesync":::"memory");
648 for (i
= 0; i
< number
; i
++) {
652 pte_iterate_hashed_subpages(pte
, psize
,
654 __tlbie(vpn
, psize
, psize
, ssize
);
655 } pte_iterate_hashed_end();
657 asm volatile("eieio; tlbsync; ptesync":::"memory");
660 raw_spin_unlock(&native_tlbie_lock
);
663 local_irq_restore(flags
);
666 void __init
hpte_init_native(void)
668 ppc_md
.hpte_invalidate
= native_hpte_invalidate
;
669 ppc_md
.hpte_updatepp
= native_hpte_updatepp
;
670 ppc_md
.hpte_updateboltedpp
= native_hpte_updateboltedpp
;
671 ppc_md
.hpte_insert
= native_hpte_insert
;
672 ppc_md
.hpte_remove
= native_hpte_remove
;
673 ppc_md
.hpte_clear_all
= native_hpte_clear
;
674 ppc_md
.flush_hash_range
= native_flush_hash_range
;