2 * native hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
18 #include <linux/processor.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
22 #include <asm/machdep.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgtable.h>
26 #include <asm/trace.h>
28 #include <asm/cputable.h>
30 #include <asm/kexec.h>
31 #include <asm/ppc-opcode.h>
32 #include <asm/feature-fixups.h>
34 #include <misc/cxl-base.h>
37 #define DBG_LOW(fmt...) udbg_printf(fmt)
39 #define DBG_LOW(fmt...)
43 #define HPTE_LOCK_BIT 3
45 #define HPTE_LOCK_BIT (56+3)
48 DEFINE_RAW_SPINLOCK(native_tlbie_lock
);
50 static inline void tlbiel_hash_set_isa206(unsigned int set
, unsigned int is
)
54 rb
= (set
<< PPC_BITLSHIFT(51)) | (is
<< PPC_BITLSHIFT(53));
56 asm volatile("tlbiel %0" : : "r" (rb
));
60 * tlbiel instruction for hash, set invalidation
61 * i.e., r=1 and is=01 or is=10 or is=11
63 static inline void tlbiel_hash_set_isa300(unsigned int set
, unsigned int is
,
65 unsigned int ric
, unsigned int prs
)
69 unsigned int r
= 0; /* hash format */
71 rb
= (set
<< PPC_BITLSHIFT(51)) | (is
<< PPC_BITLSHIFT(53));
72 rs
= ((unsigned long)pid
<< PPC_BITLSHIFT(31));
74 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
75 : : "r"(rb
), "r"(rs
), "i"(ric
), "i"(prs
), "r"(r
)
80 static void tlbiel_all_isa206(unsigned int num_sets
, unsigned int is
)
84 asm volatile("ptesync": : :"memory");
86 for (set
= 0; set
< num_sets
; set
++)
87 tlbiel_hash_set_isa206(set
, is
);
89 asm volatile("ptesync": : :"memory");
92 static void tlbiel_all_isa300(unsigned int num_sets
, unsigned int is
)
96 asm volatile("ptesync": : :"memory");
99 * Flush the first set of the TLB, and any caching of partition table
100 * entries. Then flush the remaining sets of the TLB. Hash mode uses
101 * partition scoped TLB translations.
103 tlbiel_hash_set_isa300(0, is
, 0, 2, 0);
104 for (set
= 1; set
< num_sets
; set
++)
105 tlbiel_hash_set_isa300(set
, is
, 0, 0, 0);
108 * Now invalidate the process table cache.
110 * From ISA v3.0B p. 1078:
111 * The following forms are invalid.
112 * * PRS=1, R=0, and RIC!=2 (The only process-scoped
113 * HPT caching is of the Process Table.)
115 tlbiel_hash_set_isa300(0, is
, 0, 2, 1);
117 asm volatile("ptesync": : :"memory");
120 void hash__tlbiel_all(unsigned int action
)
125 case TLB_INVAL_SCOPE_GLOBAL
:
128 case TLB_INVAL_SCOPE_LPID
:
135 if (early_cpu_has_feature(CPU_FTR_ARCH_300
))
136 tlbiel_all_isa300(POWER9_TLB_SETS_HASH
, is
);
137 else if (early_cpu_has_feature(CPU_FTR_ARCH_207S
))
138 tlbiel_all_isa206(POWER8_TLB_SETS
, is
);
139 else if (early_cpu_has_feature(CPU_FTR_ARCH_206
))
140 tlbiel_all_isa206(POWER7_TLB_SETS
, is
);
142 WARN(1, "%s called on pre-POWER7 CPU\n", __func__
);
144 asm volatile(PPC_INVALIDATE_ERAT
"; isync" : : :"memory");
147 static inline unsigned long ___tlbie(unsigned long vpn
, int psize
,
148 int apsize
, int ssize
)
155 * We need 14 to 65 bits of va for a tlibe of 4K page
156 * With vpn we ignore the lower VPN_SHIFT bits already.
157 * And top two bits are already ignored because we can
158 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
161 va
= vpn
<< VPN_SHIFT
;
163 * clear top 16 bits of 64bit va, non SLS segment
164 * Older versions of the architecture (2.02 and earler) require the
165 * masking of the top 16 bits.
167 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA
))
168 va
&= ~(0xffffULL
<< 48);
172 /* clear out bits after (52) [0....52.....63] */
173 va
&= ~((1ul << (64 - 52)) - 1);
175 sllp
= get_sllp_encoding(apsize
);
177 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
178 : : "r" (va
), "r"(0), "i" (CPU_FTR_ARCH_206
)
182 /* We need 14 to 14 + i bits of va */
183 penc
= mmu_psize_defs
[psize
].penc
[apsize
];
184 va
&= ~((1ul << mmu_psize_defs
[apsize
].shift
) - 1);
189 * We don't need all the bits, but rest of the bits
190 * must be ignored by the processor.
191 * vpn cover upto 65 bits of va. (0...65) and we need
194 va
|= (vpn
& 0xfe); /* AVAL */
196 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
197 : : "r" (va
), "r"(0), "i" (CPU_FTR_ARCH_206
)
204 static inline void fixup_tlbie(unsigned long vpn
, int psize
, int apsize
, int ssize
)
206 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG
)) {
207 /* Need the extra ptesync to ensure we don't reorder tlbie*/
208 asm volatile("ptesync": : :"memory");
209 ___tlbie(vpn
, psize
, apsize
, ssize
);
213 static inline void __tlbie(unsigned long vpn
, int psize
, int apsize
, int ssize
)
217 rb
= ___tlbie(vpn
, psize
, apsize
, ssize
);
218 trace_tlbie(0, 0, rb
, 0, 0, 0, 0);
221 static inline void __tlbiel(unsigned long vpn
, int psize
, int apsize
, int ssize
)
227 /* VPN_SHIFT can be atmost 12 */
228 va
= vpn
<< VPN_SHIFT
;
230 * clear top 16 bits of 64 bit va, non SLS segment
231 * Older versions of the architecture (2.02 and earler) require the
232 * masking of the top 16 bits.
234 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA
))
235 va
&= ~(0xffffULL
<< 48);
239 /* clear out bits after(52) [0....52.....63] */
240 va
&= ~((1ul << (64 - 52)) - 1);
242 sllp
= get_sllp_encoding(apsize
);
244 asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
245 : : "r" (va
), "i" (CPU_FTR_ARCH_206
)
249 /* We need 14 to 14 + i bits of va */
250 penc
= mmu_psize_defs
[psize
].penc
[apsize
];
251 va
&= ~((1ul << mmu_psize_defs
[apsize
].shift
) - 1);
256 * We don't need all the bits, but rest of the bits
257 * must be ignored by the processor.
258 * vpn cover upto 65 bits of va. (0...65) and we need
263 asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
264 : : "r" (va
), "i" (CPU_FTR_ARCH_206
)
268 trace_tlbie(0, 1, va
, 0, 0, 0, 0);
272 static inline void tlbie(unsigned long vpn
, int psize
, int apsize
,
273 int ssize
, int local
)
275 unsigned int use_local
;
276 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
278 use_local
= local
&& mmu_has_feature(MMU_FTR_TLBIEL
) && !cxl_ctx_in_use();
281 use_local
= mmu_psize_defs
[psize
].tlbiel
;
282 if (lock_tlbie
&& !use_local
)
283 raw_spin_lock(&native_tlbie_lock
);
284 asm volatile("ptesync": : :"memory");
286 __tlbiel(vpn
, psize
, apsize
, ssize
);
287 asm volatile("ptesync": : :"memory");
289 __tlbie(vpn
, psize
, apsize
, ssize
);
290 fixup_tlbie(vpn
, psize
, apsize
, ssize
);
291 asm volatile("eieio; tlbsync; ptesync": : :"memory");
293 if (lock_tlbie
&& !use_local
)
294 raw_spin_unlock(&native_tlbie_lock
);
297 static inline void native_lock_hpte(struct hash_pte
*hptep
)
299 unsigned long *word
= (unsigned long *)&hptep
->v
;
302 if (!test_and_set_bit_lock(HPTE_LOCK_BIT
, word
))
305 while(test_bit(HPTE_LOCK_BIT
, word
))
311 static inline void native_unlock_hpte(struct hash_pte
*hptep
)
313 unsigned long *word
= (unsigned long *)&hptep
->v
;
315 clear_bit_unlock(HPTE_LOCK_BIT
, word
);
318 static long native_hpte_insert(unsigned long hpte_group
, unsigned long vpn
,
319 unsigned long pa
, unsigned long rflags
,
320 unsigned long vflags
, int psize
, int apsize
, int ssize
)
322 struct hash_pte
*hptep
= htab_address
+ hpte_group
;
323 unsigned long hpte_v
, hpte_r
;
326 if (!(vflags
& HPTE_V_BOLTED
)) {
327 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
328 " rflags=%lx, vflags=%lx, psize=%d)\n",
329 hpte_group
, vpn
, pa
, rflags
, vflags
, psize
);
332 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
333 if (! (be64_to_cpu(hptep
->v
) & HPTE_V_VALID
)) {
334 /* retry with lock held */
335 native_lock_hpte(hptep
);
336 if (! (be64_to_cpu(hptep
->v
) & HPTE_V_VALID
))
338 native_unlock_hpte(hptep
);
344 if (i
== HPTES_PER_GROUP
)
347 hpte_v
= hpte_encode_v(vpn
, psize
, apsize
, ssize
) | vflags
| HPTE_V_VALID
;
348 hpte_r
= hpte_encode_r(pa
, psize
, apsize
) | rflags
;
350 if (!(vflags
& HPTE_V_BOLTED
)) {
351 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
355 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
356 hpte_r
= hpte_old_to_new_r(hpte_v
, hpte_r
);
357 hpte_v
= hpte_old_to_new_v(hpte_v
);
360 hptep
->r
= cpu_to_be64(hpte_r
);
361 /* Guarantee the second dword is visible before the valid bit */
364 * Now set the first dword including the valid bit
365 * NOTE: this also unlocks the hpte
367 hptep
->v
= cpu_to_be64(hpte_v
);
369 __asm__
__volatile__ ("ptesync" : : : "memory");
371 return i
| (!!(vflags
& HPTE_V_SECONDARY
) << 3);
374 static long native_hpte_remove(unsigned long hpte_group
)
376 struct hash_pte
*hptep
;
379 unsigned long hpte_v
;
381 DBG_LOW(" remove(group=%lx)\n", hpte_group
);
383 /* pick a random entry to start at */
384 slot_offset
= mftb() & 0x7;
386 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
387 hptep
= htab_address
+ hpte_group
+ slot_offset
;
388 hpte_v
= be64_to_cpu(hptep
->v
);
390 if ((hpte_v
& HPTE_V_VALID
) && !(hpte_v
& HPTE_V_BOLTED
)) {
391 /* retry with lock held */
392 native_lock_hpte(hptep
);
393 hpte_v
= be64_to_cpu(hptep
->v
);
394 if ((hpte_v
& HPTE_V_VALID
)
395 && !(hpte_v
& HPTE_V_BOLTED
))
397 native_unlock_hpte(hptep
);
404 if (i
== HPTES_PER_GROUP
)
407 /* Invalidate the hpte. NOTE: this also unlocks it */
413 static long native_hpte_updatepp(unsigned long slot
, unsigned long newpp
,
414 unsigned long vpn
, int bpsize
,
415 int apsize
, int ssize
, unsigned long flags
)
417 struct hash_pte
*hptep
= htab_address
+ slot
;
418 unsigned long hpte_v
, want_v
;
419 int ret
= 0, local
= 0;
421 want_v
= hpte_encode_avpn(vpn
, bpsize
, ssize
);
423 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
424 vpn
, want_v
& HPTE_V_AVPN
, slot
, newpp
);
426 hpte_v
= hpte_get_old_v(hptep
);
428 * We need to invalidate the TLB always because hpte_remove doesn't do
429 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
430 * random entry from it. When we do that we don't invalidate the TLB
431 * (hpte_remove) because we assume the old translation is still
432 * technically "valid".
434 if (!HPTE_V_COMPARE(hpte_v
, want_v
) || !(hpte_v
& HPTE_V_VALID
)) {
435 DBG_LOW(" -> miss\n");
438 native_lock_hpte(hptep
);
439 /* recheck with locks held */
440 hpte_v
= hpte_get_old_v(hptep
);
441 if (unlikely(!HPTE_V_COMPARE(hpte_v
, want_v
) ||
442 !(hpte_v
& HPTE_V_VALID
))) {
445 DBG_LOW(" -> hit\n");
446 /* Update the HPTE */
447 hptep
->r
= cpu_to_be64((be64_to_cpu(hptep
->r
) &
448 ~(HPTE_R_PPP
| HPTE_R_N
)) |
449 (newpp
& (HPTE_R_PPP
| HPTE_R_N
|
452 native_unlock_hpte(hptep
);
455 if (flags
& HPTE_LOCAL_UPDATE
)
458 * Ensure it is out of the tlb too if it is not a nohpte fault
460 if (!(flags
& HPTE_NOHPTE_UPDATE
))
461 tlbie(vpn
, bpsize
, apsize
, ssize
, local
);
466 static long native_hpte_find(unsigned long vpn
, int psize
, int ssize
)
468 struct hash_pte
*hptep
;
472 unsigned long want_v
, hpte_v
;
474 hash
= hpt_hash(vpn
, mmu_psize_defs
[psize
].shift
, ssize
);
475 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
477 /* Bolted mappings are only ever in the primary group */
478 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
479 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
481 hptep
= htab_address
+ slot
;
482 hpte_v
= hpte_get_old_v(hptep
);
483 if (HPTE_V_COMPARE(hpte_v
, want_v
) && (hpte_v
& HPTE_V_VALID
))
493 * Update the page protection bits. Intended to be used to create
494 * guard pages for kernel data structures on pages which are bolted
495 * in the HPT. Assumes pages being operated on will not be stolen.
497 * No need to lock here because we should be the only user.
499 static void native_hpte_updateboltedpp(unsigned long newpp
, unsigned long ea
,
500 int psize
, int ssize
)
505 struct hash_pte
*hptep
;
507 vsid
= get_kernel_vsid(ea
, ssize
);
508 vpn
= hpt_vpn(ea
, vsid
, ssize
);
510 slot
= native_hpte_find(vpn
, psize
, ssize
);
512 panic("could not find page to bolt\n");
513 hptep
= htab_address
+ slot
;
515 /* Update the HPTE */
516 hptep
->r
= cpu_to_be64((be64_to_cpu(hptep
->r
) &
517 ~(HPTE_R_PPP
| HPTE_R_N
)) |
518 (newpp
& (HPTE_R_PPP
| HPTE_R_N
)));
520 * Ensure it is out of the tlb too. Bolted entries base and
521 * actual page size will be same.
523 tlbie(vpn
, psize
, psize
, ssize
, 0);
527 * Remove a bolted kernel entry. Memory hotplug uses this.
529 * No need to lock here because we should be the only user.
531 static int native_hpte_removebolted(unsigned long ea
, int psize
, int ssize
)
536 struct hash_pte
*hptep
;
538 vsid
= get_kernel_vsid(ea
, ssize
);
539 vpn
= hpt_vpn(ea
, vsid
, ssize
);
541 slot
= native_hpte_find(vpn
, psize
, ssize
);
545 hptep
= htab_address
+ slot
;
547 VM_WARN_ON(!(be64_to_cpu(hptep
->v
) & HPTE_V_BOLTED
));
549 /* Invalidate the hpte */
552 /* Invalidate the TLB */
553 tlbie(vpn
, psize
, psize
, ssize
, 0);
558 static void native_hpte_invalidate(unsigned long slot
, unsigned long vpn
,
559 int bpsize
, int apsize
, int ssize
, int local
)
561 struct hash_pte
*hptep
= htab_address
+ slot
;
562 unsigned long hpte_v
;
563 unsigned long want_v
;
566 local_irq_save(flags
);
568 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn
, slot
);
570 want_v
= hpte_encode_avpn(vpn
, bpsize
, ssize
);
571 hpte_v
= hpte_get_old_v(hptep
);
573 if (HPTE_V_COMPARE(hpte_v
, want_v
) && (hpte_v
& HPTE_V_VALID
)) {
574 native_lock_hpte(hptep
);
575 /* recheck with locks held */
576 hpte_v
= hpte_get_old_v(hptep
);
578 if (HPTE_V_COMPARE(hpte_v
, want_v
) && (hpte_v
& HPTE_V_VALID
))
579 /* Invalidate the hpte. NOTE: this also unlocks it */
582 native_unlock_hpte(hptep
);
585 * We need to invalidate the TLB always because hpte_remove doesn't do
586 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
587 * random entry from it. When we do that we don't invalidate the TLB
588 * (hpte_remove) because we assume the old translation is still
589 * technically "valid".
591 tlbie(vpn
, bpsize
, apsize
, ssize
, local
);
593 local_irq_restore(flags
);
596 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
597 static void native_hugepage_invalidate(unsigned long vsid
,
599 unsigned char *hpte_slot_array
,
600 int psize
, int ssize
, int local
)
603 struct hash_pte
*hptep
;
604 int actual_psize
= MMU_PAGE_16M
;
605 unsigned int max_hpte_count
, valid
;
606 unsigned long flags
, s_addr
= addr
;
607 unsigned long hpte_v
, want_v
, shift
;
608 unsigned long hidx
, vpn
= 0, hash
, slot
;
610 shift
= mmu_psize_defs
[psize
].shift
;
611 max_hpte_count
= 1U << (PMD_SHIFT
- shift
);
613 local_irq_save(flags
);
614 for (i
= 0; i
< max_hpte_count
; i
++) {
615 valid
= hpte_valid(hpte_slot_array
, i
);
618 hidx
= hpte_hash_index(hpte_slot_array
, i
);
621 addr
= s_addr
+ (i
* (1ul << shift
));
622 vpn
= hpt_vpn(addr
, vsid
, ssize
);
623 hash
= hpt_hash(vpn
, shift
, ssize
);
624 if (hidx
& _PTEIDX_SECONDARY
)
627 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
628 slot
+= hidx
& _PTEIDX_GROUP_IX
;
630 hptep
= htab_address
+ slot
;
631 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
632 hpte_v
= hpte_get_old_v(hptep
);
634 /* Even if we miss, we need to invalidate the TLB */
635 if (HPTE_V_COMPARE(hpte_v
, want_v
) && (hpte_v
& HPTE_V_VALID
)) {
636 /* recheck with locks held */
637 native_lock_hpte(hptep
);
638 hpte_v
= hpte_get_old_v(hptep
);
640 if (HPTE_V_COMPARE(hpte_v
, want_v
) && (hpte_v
& HPTE_V_VALID
)) {
642 * Invalidate the hpte. NOTE: this also unlocks it
647 native_unlock_hpte(hptep
);
650 * We need to do tlb invalidate for all the address, tlbie
651 * instruction compares entry_VA in tlb with the VA specified
654 tlbie(vpn
, psize
, actual_psize
, ssize
, local
);
656 local_irq_restore(flags
);
659 static void native_hugepage_invalidate(unsigned long vsid
,
661 unsigned char *hpte_slot_array
,
662 int psize
, int ssize
, int local
)
664 WARN(1, "%s called without THP support\n", __func__
);
668 static void hpte_decode(struct hash_pte
*hpte
, unsigned long slot
,
669 int *psize
, int *apsize
, int *ssize
, unsigned long *vpn
)
671 unsigned long avpn
, pteg
, vpi
;
672 unsigned long hpte_v
= be64_to_cpu(hpte
->v
);
673 unsigned long hpte_r
= be64_to_cpu(hpte
->r
);
674 unsigned long vsid
, seg_off
;
675 int size
, a_size
, shift
;
676 /* Look at the 8 bit LP value */
677 unsigned int lp
= (hpte_r
>> LP_SHIFT
) & ((1 << LP_BITS
) - 1);
679 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
680 hpte_v
= hpte_new_to_old_v(hpte_v
, hpte_r
);
681 hpte_r
= hpte_new_to_old_r(hpte_r
);
683 if (!(hpte_v
& HPTE_V_LARGE
)) {
685 a_size
= MMU_PAGE_4K
;
687 size
= hpte_page_sizes
[lp
] & 0xf;
688 a_size
= hpte_page_sizes
[lp
] >> 4;
690 /* This works for all page sizes, and for 256M and 1T segments */
691 *ssize
= hpte_v
>> HPTE_V_SSIZE_SHIFT
;
692 shift
= mmu_psize_defs
[size
].shift
;
694 avpn
= (HPTE_V_AVPN_VAL(hpte_v
) & ~mmu_psize_defs
[size
].avpnm
);
695 pteg
= slot
/ HPTES_PER_GROUP
;
696 if (hpte_v
& HPTE_V_SECONDARY
)
700 case MMU_SEGSIZE_256M
:
701 /* We only have 28 - 23 bits of seg_off in avpn */
702 seg_off
= (avpn
& 0x1f) << 23;
704 /* We can find more bits from the pteg value */
706 vpi
= (vsid
^ pteg
) & htab_hash_mask
;
707 seg_off
|= vpi
<< shift
;
709 *vpn
= vsid
<< (SID_SHIFT
- VPN_SHIFT
) | seg_off
>> VPN_SHIFT
;
712 /* We only have 40 - 23 bits of seg_off in avpn */
713 seg_off
= (avpn
& 0x1ffff) << 23;
716 vpi
= (vsid
^ (vsid
<< 25) ^ pteg
) & htab_hash_mask
;
717 seg_off
|= vpi
<< shift
;
719 *vpn
= vsid
<< (SID_SHIFT_1T
- VPN_SHIFT
) | seg_off
>> VPN_SHIFT
;
729 * clear all mappings on kexec. All cpus are in real mode (or they will
730 * be when they isi), and we are the only one left. We rely on our kernel
731 * mapping being 0xC0's and the hardware ignoring those two real bits.
733 * This must be called with interrupts disabled.
735 * Taking the native_tlbie_lock is unsafe here due to the possibility of
736 * lockdep being on. On pre POWER5 hardware, not taking the lock could
737 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
738 * gets called during boot before secondary CPUs have come up and during
739 * crashdump and all bets are off anyway.
741 * TODO: add batching support when enabled. remember, no dynamic memory here,
742 * although there is the control page available...
744 static void native_hpte_clear(void)
746 unsigned long vpn
= 0;
747 unsigned long slot
, slots
;
748 struct hash_pte
*hptep
= htab_address
;
749 unsigned long hpte_v
;
750 unsigned long pteg_count
;
751 int psize
, apsize
, ssize
;
753 pteg_count
= htab_hash_mask
+ 1;
755 slots
= pteg_count
* HPTES_PER_GROUP
;
757 for (slot
= 0; slot
< slots
; slot
++, hptep
++) {
759 * we could lock the pte here, but we are the only cpu
760 * running, right? and for crash dump, we probably
761 * don't want to wait for a maybe bad cpu.
763 hpte_v
= be64_to_cpu(hptep
->v
);
766 * Call __tlbie() here rather than tlbie() since we can't take the
769 if (hpte_v
& HPTE_V_VALID
) {
770 hpte_decode(hptep
, slot
, &psize
, &apsize
, &ssize
, &vpn
);
772 ___tlbie(vpn
, psize
, apsize
, ssize
);
776 asm volatile("eieio; tlbsync; ptesync":::"memory");
780 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
781 * the lock all the time
783 static void native_flush_hash_range(unsigned long number
, int local
)
785 unsigned long vpn
= 0;
786 unsigned long hash
, index
, hidx
, shift
, slot
;
787 struct hash_pte
*hptep
;
788 unsigned long hpte_v
;
789 unsigned long want_v
;
792 struct ppc64_tlb_batch
*batch
= this_cpu_ptr(&ppc64_tlb_batch
);
793 unsigned long psize
= batch
->psize
;
794 int ssize
= batch
->ssize
;
796 unsigned int use_local
;
798 use_local
= local
&& mmu_has_feature(MMU_FTR_TLBIEL
) &&
799 mmu_psize_defs
[psize
].tlbiel
&& !cxl_ctx_in_use();
801 local_irq_save(flags
);
803 for (i
= 0; i
< number
; i
++) {
807 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
808 hash
= hpt_hash(vpn
, shift
, ssize
);
809 hidx
= __rpte_to_hidx(pte
, index
);
810 if (hidx
& _PTEIDX_SECONDARY
)
812 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
813 slot
+= hidx
& _PTEIDX_GROUP_IX
;
814 hptep
= htab_address
+ slot
;
815 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
816 hpte_v
= hpte_get_old_v(hptep
);
818 if (!HPTE_V_COMPARE(hpte_v
, want_v
) || !(hpte_v
& HPTE_V_VALID
))
820 /* lock and try again */
821 native_lock_hpte(hptep
);
822 hpte_v
= hpte_get_old_v(hptep
);
824 if (!HPTE_V_COMPARE(hpte_v
, want_v
) || !(hpte_v
& HPTE_V_VALID
))
825 native_unlock_hpte(hptep
);
829 } pte_iterate_hashed_end();
833 asm volatile("ptesync":::"memory");
834 for (i
= 0; i
< number
; i
++) {
838 pte_iterate_hashed_subpages(pte
, psize
,
840 __tlbiel(vpn
, psize
, psize
, ssize
);
841 } pte_iterate_hashed_end();
843 asm volatile("ptesync":::"memory");
845 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
848 raw_spin_lock(&native_tlbie_lock
);
850 asm volatile("ptesync":::"memory");
851 for (i
= 0; i
< number
; i
++) {
855 pte_iterate_hashed_subpages(pte
, psize
,
857 __tlbie(vpn
, psize
, psize
, ssize
);
858 } pte_iterate_hashed_end();
861 * Just do one more with the last used values.
863 fixup_tlbie(vpn
, psize
, psize
, ssize
);
864 asm volatile("eieio; tlbsync; ptesync":::"memory");
867 raw_spin_unlock(&native_tlbie_lock
);
870 local_irq_restore(flags
);
873 void __init
hpte_init_native(void)
875 mmu_hash_ops
.hpte_invalidate
= native_hpte_invalidate
;
876 mmu_hash_ops
.hpte_updatepp
= native_hpte_updatepp
;
877 mmu_hash_ops
.hpte_updateboltedpp
= native_hpte_updateboltedpp
;
878 mmu_hash_ops
.hpte_removebolted
= native_hpte_removebolted
;
879 mmu_hash_ops
.hpte_insert
= native_hpte_insert
;
880 mmu_hash_ops
.hpte_remove
= native_hpte_remove
;
881 mmu_hash_ops
.hpte_clear_all
= native_hpte_clear
;
882 mmu_hash_ops
.flush_hash_range
= native_flush_hash_range
;
883 mmu_hash_ops
.hugepage_invalidate
= native_hugepage_invalidate
;