2 * native hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
18 #include <linux/threads.h>
19 #include <linux/smp.h>
21 #include <asm/machdep.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
27 #include <asm/cputable.h>
29 #include <asm/kexec.h>
30 #include <asm/ppc-opcode.h>
33 #define DBG_LOW(fmt...) udbg_printf(fmt)
35 #define DBG_LOW(fmt...)
39 #define HPTE_LOCK_BIT 3
41 #define HPTE_LOCK_BIT (56+3)
44 DEFINE_RAW_SPINLOCK(native_tlbie_lock
);
46 static inline void __tlbie(unsigned long vpn
, int psize
, int apsize
, int ssize
)
53 * We need 14 to 65 bits of va for a tlibe of 4K page
54 * With vpn we ignore the lower VPN_SHIFT bits already.
55 * And top two bits are already ignored because we can
56 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
59 va
= vpn
<< VPN_SHIFT
;
61 * clear top 16 bits of 64bit va, non SLS segment
62 * Older versions of the architecture (2.02 and earler) require the
63 * masking of the top 16 bits.
65 va
&= ~(0xffffULL
<< 48);
69 /* clear out bits after (52) [0....52.....63] */
70 va
&= ~((1ul << (64 - 52)) - 1);
72 sllp
= ((mmu_psize_defs
[apsize
].sllp
& SLB_VSID_L
) >> 6) |
73 ((mmu_psize_defs
[apsize
].sllp
& SLB_VSID_LP
) >> 4);
75 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
76 : : "r" (va
), "r"(0), "i" (CPU_FTR_ARCH_206
)
80 /* We need 14 to 14 + i bits of va */
81 penc
= mmu_psize_defs
[psize
].penc
[apsize
];
82 va
&= ~((1ul << mmu_psize_defs
[apsize
].shift
) - 1);
86 if (psize
!= apsize
) {
88 * MPSS, 64K base page size and 16MB parge page size
89 * We don't need all the bits, but rest of the bits
90 * must be ignored by the processor.
91 * vpn cover upto 65 bits of va. (0...65) and we need
97 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
98 : : "r" (va
), "r"(0), "i" (CPU_FTR_ARCH_206
)
104 static inline void __tlbiel(unsigned long vpn
, int psize
, int apsize
, int ssize
)
110 /* VPN_SHIFT can be atmost 12 */
111 va
= vpn
<< VPN_SHIFT
;
113 * clear top 16 bits of 64 bit va, non SLS segment
114 * Older versions of the architecture (2.02 and earler) require the
115 * masking of the top 16 bits.
117 va
&= ~(0xffffULL
<< 48);
121 /* clear out bits after(52) [0....52.....63] */
122 va
&= ~((1ul << (64 - 52)) - 1);
124 sllp
= ((mmu_psize_defs
[apsize
].sllp
& SLB_VSID_L
) >> 6) |
125 ((mmu_psize_defs
[apsize
].sllp
& SLB_VSID_LP
) >> 4);
127 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
128 : : "r"(va
) : "memory");
131 /* We need 14 to 14 + i bits of va */
132 penc
= mmu_psize_defs
[psize
].penc
[apsize
];
133 va
&= ~((1ul << mmu_psize_defs
[apsize
].shift
) - 1);
137 if (psize
!= apsize
) {
139 * MPSS, 64K base page size and 16MB parge page size
140 * We don't need all the bits, but rest of the bits
141 * must be ignored by the processor.
142 * vpn cover upto 65 bits of va. (0...65) and we need
148 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
149 : : "r"(va
) : "memory");
155 static inline void tlbie(unsigned long vpn
, int psize
, int apsize
,
156 int ssize
, int local
)
158 unsigned int use_local
= local
&& mmu_has_feature(MMU_FTR_TLBIEL
);
159 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
162 use_local
= mmu_psize_defs
[psize
].tlbiel
;
163 if (lock_tlbie
&& !use_local
)
164 raw_spin_lock(&native_tlbie_lock
);
165 asm volatile("ptesync": : :"memory");
167 __tlbiel(vpn
, psize
, apsize
, ssize
);
168 asm volatile("ptesync": : :"memory");
170 __tlbie(vpn
, psize
, apsize
, ssize
);
171 asm volatile("eieio; tlbsync; ptesync": : :"memory");
173 if (lock_tlbie
&& !use_local
)
174 raw_spin_unlock(&native_tlbie_lock
);
177 static inline void native_lock_hpte(struct hash_pte
*hptep
)
179 unsigned long *word
= (unsigned long *)&hptep
->v
;
182 if (!test_and_set_bit_lock(HPTE_LOCK_BIT
, word
))
184 while(test_bit(HPTE_LOCK_BIT
, word
))
189 static inline void native_unlock_hpte(struct hash_pte
*hptep
)
191 unsigned long *word
= (unsigned long *)&hptep
->v
;
193 clear_bit_unlock(HPTE_LOCK_BIT
, word
);
196 static long native_hpte_insert(unsigned long hpte_group
, unsigned long vpn
,
197 unsigned long pa
, unsigned long rflags
,
198 unsigned long vflags
, int psize
, int apsize
, int ssize
)
200 struct hash_pte
*hptep
= htab_address
+ hpte_group
;
201 unsigned long hpte_v
, hpte_r
;
204 if (!(vflags
& HPTE_V_BOLTED
)) {
205 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
206 " rflags=%lx, vflags=%lx, psize=%d)\n",
207 hpte_group
, vpn
, pa
, rflags
, vflags
, psize
);
210 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
211 if (! (be64_to_cpu(hptep
->v
) & HPTE_V_VALID
)) {
212 /* retry with lock held */
213 native_lock_hpte(hptep
);
214 if (! (be64_to_cpu(hptep
->v
) & HPTE_V_VALID
))
216 native_unlock_hpte(hptep
);
222 if (i
== HPTES_PER_GROUP
)
225 hpte_v
= hpte_encode_v(vpn
, psize
, apsize
, ssize
) | vflags
| HPTE_V_VALID
;
226 hpte_r
= hpte_encode_r(pa
, psize
, apsize
) | rflags
;
228 if (!(vflags
& HPTE_V_BOLTED
)) {
229 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
233 hptep
->r
= cpu_to_be64(hpte_r
);
234 /* Guarantee the second dword is visible before the valid bit */
237 * Now set the first dword including the valid bit
238 * NOTE: this also unlocks the hpte
240 hptep
->v
= cpu_to_be64(hpte_v
);
242 __asm__
__volatile__ ("ptesync" : : : "memory");
244 return i
| (!!(vflags
& HPTE_V_SECONDARY
) << 3);
247 static long native_hpte_remove(unsigned long hpte_group
)
249 struct hash_pte
*hptep
;
252 unsigned long hpte_v
;
254 DBG_LOW(" remove(group=%lx)\n", hpte_group
);
256 /* pick a random entry to start at */
257 slot_offset
= mftb() & 0x7;
259 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
260 hptep
= htab_address
+ hpte_group
+ slot_offset
;
261 hpte_v
= be64_to_cpu(hptep
->v
);
263 if ((hpte_v
& HPTE_V_VALID
) && !(hpte_v
& HPTE_V_BOLTED
)) {
264 /* retry with lock held */
265 native_lock_hpte(hptep
);
266 hpte_v
= be64_to_cpu(hptep
->v
);
267 if ((hpte_v
& HPTE_V_VALID
)
268 && !(hpte_v
& HPTE_V_BOLTED
))
270 native_unlock_hpte(hptep
);
277 if (i
== HPTES_PER_GROUP
)
280 /* Invalidate the hpte. NOTE: this also unlocks it */
286 static long native_hpte_updatepp(unsigned long slot
, unsigned long newpp
,
287 unsigned long vpn
, int bpsize
,
288 int apsize
, int ssize
, int local
)
290 struct hash_pte
*hptep
= htab_address
+ slot
;
291 unsigned long hpte_v
, want_v
;
294 want_v
= hpte_encode_avpn(vpn
, bpsize
, ssize
);
296 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
297 vpn
, want_v
& HPTE_V_AVPN
, slot
, newpp
);
299 native_lock_hpte(hptep
);
301 hpte_v
= be64_to_cpu(hptep
->v
);
303 * We need to invalidate the TLB always because hpte_remove doesn't do
304 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
305 * random entry from it. When we do that we don't invalidate the TLB
306 * (hpte_remove) because we assume the old translation is still
307 * technically "valid".
309 if (!HPTE_V_COMPARE(hpte_v
, want_v
) || !(hpte_v
& HPTE_V_VALID
)) {
310 DBG_LOW(" -> miss\n");
313 DBG_LOW(" -> hit\n");
314 /* Update the HPTE */
315 hptep
->r
= cpu_to_be64((be64_to_cpu(hptep
->r
) & ~(HPTE_R_PP
| HPTE_R_N
)) |
316 (newpp
& (HPTE_R_PP
| HPTE_R_N
| HPTE_R_C
)));
318 native_unlock_hpte(hptep
);
320 /* Ensure it is out of the tlb too. */
321 tlbie(vpn
, bpsize
, apsize
, ssize
, local
);
326 static long native_hpte_find(unsigned long vpn
, int psize
, int ssize
)
328 struct hash_pte
*hptep
;
332 unsigned long want_v
, hpte_v
;
334 hash
= hpt_hash(vpn
, mmu_psize_defs
[psize
].shift
, ssize
);
335 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
337 /* Bolted mappings are only ever in the primary group */
338 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
339 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
340 hptep
= htab_address
+ slot
;
341 hpte_v
= be64_to_cpu(hptep
->v
);
343 if (HPTE_V_COMPARE(hpte_v
, want_v
) && (hpte_v
& HPTE_V_VALID
))
353 * Update the page protection bits. Intended to be used to create
354 * guard pages for kernel data structures on pages which are bolted
355 * in the HPT. Assumes pages being operated on will not be stolen.
357 * No need to lock here because we should be the only user.
359 static void native_hpte_updateboltedpp(unsigned long newpp
, unsigned long ea
,
360 int psize
, int ssize
)
365 struct hash_pte
*hptep
;
367 vsid
= get_kernel_vsid(ea
, ssize
);
368 vpn
= hpt_vpn(ea
, vsid
, ssize
);
370 slot
= native_hpte_find(vpn
, psize
, ssize
);
372 panic("could not find page to bolt\n");
373 hptep
= htab_address
+ slot
;
375 /* Update the HPTE */
376 hptep
->r
= cpu_to_be64((be64_to_cpu(hptep
->r
) &
377 ~(HPTE_R_PP
| HPTE_R_N
)) |
378 (newpp
& (HPTE_R_PP
| HPTE_R_N
)));
380 * Ensure it is out of the tlb too. Bolted entries base and
381 * actual page size will be same.
383 tlbie(vpn
, psize
, psize
, ssize
, 0);
386 static void native_hpte_invalidate(unsigned long slot
, unsigned long vpn
,
387 int bpsize
, int apsize
, int ssize
, int local
)
389 struct hash_pte
*hptep
= htab_address
+ slot
;
390 unsigned long hpte_v
;
391 unsigned long want_v
;
394 local_irq_save(flags
);
396 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn
, slot
);
398 want_v
= hpte_encode_avpn(vpn
, bpsize
, ssize
);
399 native_lock_hpte(hptep
);
400 hpte_v
= be64_to_cpu(hptep
->v
);
403 * We need to invalidate the TLB always because hpte_remove doesn't do
404 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
405 * random entry from it. When we do that we don't invalidate the TLB
406 * (hpte_remove) because we assume the old translation is still
407 * technically "valid".
409 if (!HPTE_V_COMPARE(hpte_v
, want_v
) || !(hpte_v
& HPTE_V_VALID
))
410 native_unlock_hpte(hptep
);
412 /* Invalidate the hpte. NOTE: this also unlocks it */
415 /* Invalidate the TLB */
416 tlbie(vpn
, bpsize
, apsize
, ssize
, local
);
418 local_irq_restore(flags
);
421 static void native_hugepage_invalidate(struct mm_struct
*mm
,
422 unsigned char *hpte_slot_array
,
423 unsigned long addr
, int psize
)
427 struct hash_pte
*hptep
;
428 int actual_psize
= MMU_PAGE_16M
;
429 unsigned int max_hpte_count
, valid
;
430 unsigned long flags
, s_addr
= addr
;
431 unsigned long hpte_v
, want_v
, shift
;
432 unsigned long hidx
, vpn
= 0, vsid
, hash
, slot
;
434 shift
= mmu_psize_defs
[psize
].shift
;
435 max_hpte_count
= 1U << (PMD_SHIFT
- shift
);
437 local_irq_save(flags
);
438 for (i
= 0; i
< max_hpte_count
; i
++) {
439 valid
= hpte_valid(hpte_slot_array
, i
);
442 hidx
= hpte_hash_index(hpte_slot_array
, i
);
445 addr
= s_addr
+ (i
* (1ul << shift
));
446 if (!is_kernel_addr(addr
)) {
447 ssize
= user_segment_size(addr
);
448 vsid
= get_vsid(mm
->context
.id
, addr
, ssize
);
451 vsid
= get_kernel_vsid(addr
, mmu_kernel_ssize
);
452 ssize
= mmu_kernel_ssize
;
455 vpn
= hpt_vpn(addr
, vsid
, ssize
);
456 hash
= hpt_hash(vpn
, shift
, ssize
);
457 if (hidx
& _PTEIDX_SECONDARY
)
460 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
461 slot
+= hidx
& _PTEIDX_GROUP_IX
;
463 hptep
= htab_address
+ slot
;
464 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
465 native_lock_hpte(hptep
);
466 hpte_v
= be64_to_cpu(hptep
->v
);
468 /* Even if we miss, we need to invalidate the TLB */
469 if (!HPTE_V_COMPARE(hpte_v
, want_v
) || !(hpte_v
& HPTE_V_VALID
))
470 native_unlock_hpte(hptep
);
472 /* Invalidate the hpte. NOTE: this also unlocks it */
476 * Since this is a hugepage, we just need a single tlbie.
479 lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
481 raw_spin_lock(&native_tlbie_lock
);
483 asm volatile("ptesync":::"memory");
484 __tlbie(vpn
, psize
, actual_psize
, ssize
);
485 asm volatile("eieio; tlbsync; ptesync":::"memory");
488 raw_spin_unlock(&native_tlbie_lock
);
490 local_irq_restore(flags
);
493 static inline int __hpte_actual_psize(unsigned int lp
, int psize
)
498 /* start from 1 ignoring MMU_PAGE_4K */
499 for (i
= 1; i
< MMU_PAGE_COUNT
; i
++) {
502 if (mmu_psize_defs
[psize
].penc
[i
] == -1)
505 * encoding bits per actual page size
506 * PTE LP actual page size
513 shift
= mmu_psize_defs
[i
].shift
- LP_SHIFT
;
516 mask
= (1 << shift
) - 1;
517 if ((lp
& mask
) == mmu_psize_defs
[psize
].penc
[i
])
523 static void hpte_decode(struct hash_pte
*hpte
, unsigned long slot
,
524 int *psize
, int *apsize
, int *ssize
, unsigned long *vpn
)
526 unsigned long avpn
, pteg
, vpi
;
527 unsigned long hpte_v
= be64_to_cpu(hpte
->v
);
528 unsigned long hpte_r
= be64_to_cpu(hpte
->r
);
529 unsigned long vsid
, seg_off
;
530 int size
, a_size
, shift
;
531 /* Look at the 8 bit LP value */
532 unsigned int lp
= (hpte_r
>> LP_SHIFT
) & ((1 << LP_BITS
) - 1);
534 if (!(hpte_v
& HPTE_V_LARGE
)) {
536 a_size
= MMU_PAGE_4K
;
538 for (size
= 0; size
< MMU_PAGE_COUNT
; size
++) {
540 /* valid entries have a shift value */
541 if (!mmu_psize_defs
[size
].shift
)
544 a_size
= __hpte_actual_psize(lp
, size
);
549 /* This works for all page sizes, and for 256M and 1T segments */
550 *ssize
= hpte_v
>> HPTE_V_SSIZE_SHIFT
;
551 shift
= mmu_psize_defs
[size
].shift
;
553 avpn
= (HPTE_V_AVPN_VAL(hpte_v
) & ~mmu_psize_defs
[size
].avpnm
);
554 pteg
= slot
/ HPTES_PER_GROUP
;
555 if (hpte_v
& HPTE_V_SECONDARY
)
559 case MMU_SEGSIZE_256M
:
560 /* We only have 28 - 23 bits of seg_off in avpn */
561 seg_off
= (avpn
& 0x1f) << 23;
563 /* We can find more bits from the pteg value */
565 vpi
= (vsid
^ pteg
) & htab_hash_mask
;
566 seg_off
|= vpi
<< shift
;
568 *vpn
= vsid
<< (SID_SHIFT
- VPN_SHIFT
) | seg_off
>> VPN_SHIFT
;
571 /* We only have 40 - 23 bits of seg_off in avpn */
572 seg_off
= (avpn
& 0x1ffff) << 23;
575 vpi
= (vsid
^ (vsid
<< 25) ^ pteg
) & htab_hash_mask
;
576 seg_off
|= vpi
<< shift
;
578 *vpn
= vsid
<< (SID_SHIFT_1T
- VPN_SHIFT
) | seg_off
>> VPN_SHIFT
;
588 * clear all mappings on kexec. All cpus are in real mode (or they will
589 * be when they isi), and we are the only one left. We rely on our kernel
590 * mapping being 0xC0's and the hardware ignoring those two real bits.
592 * TODO: add batching support when enabled. remember, no dynamic memory here,
593 * athough there is the control page available...
595 static void native_hpte_clear(void)
597 unsigned long vpn
= 0;
598 unsigned long slot
, slots
, flags
;
599 struct hash_pte
*hptep
= htab_address
;
600 unsigned long hpte_v
;
601 unsigned long pteg_count
;
602 int psize
, apsize
, ssize
;
604 pteg_count
= htab_hash_mask
+ 1;
606 local_irq_save(flags
);
608 /* we take the tlbie lock and hold it. Some hardware will
609 * deadlock if we try to tlbie from two processors at once.
611 raw_spin_lock(&native_tlbie_lock
);
613 slots
= pteg_count
* HPTES_PER_GROUP
;
615 for (slot
= 0; slot
< slots
; slot
++, hptep
++) {
617 * we could lock the pte here, but we are the only cpu
618 * running, right? and for crash dump, we probably
619 * don't want to wait for a maybe bad cpu.
621 hpte_v
= be64_to_cpu(hptep
->v
);
624 * Call __tlbie() here rather than tlbie() since we
625 * already hold the native_tlbie_lock.
627 if (hpte_v
& HPTE_V_VALID
) {
628 hpte_decode(hptep
, slot
, &psize
, &apsize
, &ssize
, &vpn
);
630 __tlbie(vpn
, psize
, apsize
, ssize
);
634 asm volatile("eieio; tlbsync; ptesync":::"memory");
635 raw_spin_unlock(&native_tlbie_lock
);
636 local_irq_restore(flags
);
640 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
641 * the lock all the time
643 static void native_flush_hash_range(unsigned long number
, int local
)
646 unsigned long hash
, index
, hidx
, shift
, slot
;
647 struct hash_pte
*hptep
;
648 unsigned long hpte_v
;
649 unsigned long want_v
;
652 struct ppc64_tlb_batch
*batch
= &__get_cpu_var(ppc64_tlb_batch
);
653 unsigned long psize
= batch
->psize
;
654 int ssize
= batch
->ssize
;
657 local_irq_save(flags
);
659 for (i
= 0; i
< number
; i
++) {
663 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
664 hash
= hpt_hash(vpn
, shift
, ssize
);
665 hidx
= __rpte_to_hidx(pte
, index
);
666 if (hidx
& _PTEIDX_SECONDARY
)
668 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
669 slot
+= hidx
& _PTEIDX_GROUP_IX
;
670 hptep
= htab_address
+ slot
;
671 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
672 native_lock_hpte(hptep
);
673 hpte_v
= be64_to_cpu(hptep
->v
);
674 if (!HPTE_V_COMPARE(hpte_v
, want_v
) ||
675 !(hpte_v
& HPTE_V_VALID
))
676 native_unlock_hpte(hptep
);
679 } pte_iterate_hashed_end();
682 if (mmu_has_feature(MMU_FTR_TLBIEL
) &&
683 mmu_psize_defs
[psize
].tlbiel
&& local
) {
684 asm volatile("ptesync":::"memory");
685 for (i
= 0; i
< number
; i
++) {
689 pte_iterate_hashed_subpages(pte
, psize
,
691 __tlbiel(vpn
, psize
, psize
, ssize
);
692 } pte_iterate_hashed_end();
694 asm volatile("ptesync":::"memory");
696 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
699 raw_spin_lock(&native_tlbie_lock
);
701 asm volatile("ptesync":::"memory");
702 for (i
= 0; i
< number
; i
++) {
706 pte_iterate_hashed_subpages(pte
, psize
,
708 __tlbie(vpn
, psize
, psize
, ssize
);
709 } pte_iterate_hashed_end();
711 asm volatile("eieio; tlbsync; ptesync":::"memory");
714 raw_spin_unlock(&native_tlbie_lock
);
717 local_irq_restore(flags
);
720 void __init
hpte_init_native(void)
722 ppc_md
.hpte_invalidate
= native_hpte_invalidate
;
723 ppc_md
.hpte_updatepp
= native_hpte_updatepp
;
724 ppc_md
.hpte_updateboltedpp
= native_hpte_updateboltedpp
;
725 ppc_md
.hpte_insert
= native_hpte_insert
;
726 ppc_md
.hpte_remove
= native_hpte_remove
;
727 ppc_md
.hpte_clear_all
= native_hpte_clear
;
728 ppc_md
.flush_hash_range
= native_flush_hash_range
;
729 ppc_md
.hugepage_invalidate
= native_hugepage_invalidate
;