Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / powerpc / mm / hash_native_64.c
bloba0675e91ad7d11318d8f630a378509e5703dfe70
1 /*
2 * native hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #undef DEBUG_LOW
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
17 #include <linux/of.h>
18 #include <linux/processor.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
22 #include <asm/machdep.h>
23 #include <asm/mmu.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgtable.h>
26 #include <asm/tlbflush.h>
27 #include <asm/trace.h>
28 #include <asm/tlb.h>
29 #include <asm/cputable.h>
30 #include <asm/udbg.h>
31 #include <asm/kexec.h>
32 #include <asm/ppc-opcode.h>
34 #include <misc/cxl-base.h>
36 #ifdef DEBUG_LOW
37 #define DBG_LOW(fmt...) udbg_printf(fmt)
38 #else
39 #define DBG_LOW(fmt...)
40 #endif
42 #ifdef __BIG_ENDIAN__
43 #define HPTE_LOCK_BIT 3
44 #else
45 #define HPTE_LOCK_BIT (56+3)
46 #endif
48 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
50 static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
52 unsigned long rb;
54 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
56 asm volatile("tlbiel %0" : : "r" (rb));
60 * tlbiel instruction for hash, set invalidation
61 * i.e., r=1 and is=01 or is=10 or is=11
63 static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
64 unsigned int pid,
65 unsigned int ric, unsigned int prs)
67 unsigned long rb;
68 unsigned long rs;
69 unsigned int r = 0; /* hash format */
71 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
72 rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
74 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
75 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
76 : "memory");
80 static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is)
82 unsigned int set;
84 asm volatile("ptesync": : :"memory");
86 for (set = 0; set < num_sets; set++)
87 tlbiel_hash_set_isa206(set, is);
89 asm volatile("ptesync": : :"memory");
92 static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
94 unsigned int set;
96 asm volatile("ptesync": : :"memory");
99 * Flush the first set of the TLB, and any caching of partition table
100 * entries. Then flush the remaining sets of the TLB. Hash mode uses
101 * partition scoped TLB translations.
103 tlbiel_hash_set_isa300(0, is, 0, 2, 0);
104 for (set = 1; set < num_sets; set++)
105 tlbiel_hash_set_isa300(set, is, 0, 0, 0);
108 * Now invalidate the process table cache.
110 * From ISA v3.0B p. 1078:
111 * The following forms are invalid.
112 * * PRS=1, R=0, and RIC!=2 (The only process-scoped
113 * HPT caching is of the Process Table.)
115 tlbiel_hash_set_isa300(0, is, 0, 2, 1);
117 asm volatile("ptesync": : :"memory");
120 void hash__tlbiel_all(unsigned int action)
122 unsigned int is;
124 switch (action) {
125 case TLB_INVAL_SCOPE_GLOBAL:
126 is = 3;
127 break;
128 case TLB_INVAL_SCOPE_LPID:
129 is = 2;
130 break;
131 default:
132 BUG();
135 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
136 tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is);
137 else if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
138 tlbiel_all_isa206(POWER8_TLB_SETS, is);
139 else if (early_cpu_has_feature(CPU_FTR_ARCH_206))
140 tlbiel_all_isa206(POWER7_TLB_SETS, is);
141 else
142 WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
144 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
147 static inline unsigned long ___tlbie(unsigned long vpn, int psize,
148 int apsize, int ssize)
150 unsigned long va;
151 unsigned int penc;
152 unsigned long sllp;
155 * We need 14 to 65 bits of va for a tlibe of 4K page
156 * With vpn we ignore the lower VPN_SHIFT bits already.
157 * And top two bits are already ignored because we can
158 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
159 * of 12.
161 va = vpn << VPN_SHIFT;
163 * clear top 16 bits of 64bit va, non SLS segment
164 * Older versions of the architecture (2.02 and earler) require the
165 * masking of the top 16 bits.
167 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
168 va &= ~(0xffffULL << 48);
170 switch (psize) {
171 case MMU_PAGE_4K:
172 /* clear out bits after (52) [0....52.....63] */
173 va &= ~((1ul << (64 - 52)) - 1);
174 va |= ssize << 8;
175 sllp = get_sllp_encoding(apsize);
176 va |= sllp << 5;
177 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
178 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
179 : "memory");
180 break;
181 default:
182 /* We need 14 to 14 + i bits of va */
183 penc = mmu_psize_defs[psize].penc[apsize];
184 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
185 va |= penc << 12;
186 va |= ssize << 8;
188 * AVAL bits:
189 * We don't need all the bits, but rest of the bits
190 * must be ignored by the processor.
191 * vpn cover upto 65 bits of va. (0...65) and we need
192 * 58..64 bits of va.
194 va |= (vpn & 0xfe); /* AVAL */
195 va |= 1; /* L */
196 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
197 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
198 : "memory");
199 break;
201 return va;
204 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
206 unsigned long rb;
208 rb = ___tlbie(vpn, psize, apsize, ssize);
209 trace_tlbie(0, 0, rb, 0, 0, 0, 0);
212 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
214 unsigned long va;
215 unsigned int penc;
216 unsigned long sllp;
218 /* VPN_SHIFT can be atmost 12 */
219 va = vpn << VPN_SHIFT;
221 * clear top 16 bits of 64 bit va, non SLS segment
222 * Older versions of the architecture (2.02 and earler) require the
223 * masking of the top 16 bits.
225 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
226 va &= ~(0xffffULL << 48);
228 switch (psize) {
229 case MMU_PAGE_4K:
230 /* clear out bits after(52) [0....52.....63] */
231 va &= ~((1ul << (64 - 52)) - 1);
232 va |= ssize << 8;
233 sllp = get_sllp_encoding(apsize);
234 va |= sllp << 5;
235 asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
236 : : "r" (va), "i" (CPU_FTR_ARCH_206)
237 : "memory");
238 break;
239 default:
240 /* We need 14 to 14 + i bits of va */
241 penc = mmu_psize_defs[psize].penc[apsize];
242 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
243 va |= penc << 12;
244 va |= ssize << 8;
246 * AVAL bits:
247 * We don't need all the bits, but rest of the bits
248 * must be ignored by the processor.
249 * vpn cover upto 65 bits of va. (0...65) and we need
250 * 58..64 bits of va.
252 va |= (vpn & 0xfe);
253 va |= 1; /* L */
254 asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
255 : : "r" (va), "i" (CPU_FTR_ARCH_206)
256 : "memory");
257 break;
259 trace_tlbie(0, 1, va, 0, 0, 0, 0);
263 static inline void tlbie(unsigned long vpn, int psize, int apsize,
264 int ssize, int local)
266 unsigned int use_local;
267 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
269 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
271 if (use_local)
272 use_local = mmu_psize_defs[psize].tlbiel;
273 if (lock_tlbie && !use_local)
274 raw_spin_lock(&native_tlbie_lock);
275 asm volatile("ptesync": : :"memory");
276 if (use_local) {
277 __tlbiel(vpn, psize, apsize, ssize);
278 asm volatile("ptesync": : :"memory");
279 } else {
280 __tlbie(vpn, psize, apsize, ssize);
281 asm volatile("eieio; tlbsync; ptesync": : :"memory");
283 if (lock_tlbie && !use_local)
284 raw_spin_unlock(&native_tlbie_lock);
287 static inline void native_lock_hpte(struct hash_pte *hptep)
289 unsigned long *word = (unsigned long *)&hptep->v;
291 while (1) {
292 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
293 break;
294 spin_begin();
295 while(test_bit(HPTE_LOCK_BIT, word))
296 spin_cpu_relax();
297 spin_end();
301 static inline void native_unlock_hpte(struct hash_pte *hptep)
303 unsigned long *word = (unsigned long *)&hptep->v;
305 clear_bit_unlock(HPTE_LOCK_BIT, word);
308 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
309 unsigned long pa, unsigned long rflags,
310 unsigned long vflags, int psize, int apsize, int ssize)
312 struct hash_pte *hptep = htab_address + hpte_group;
313 unsigned long hpte_v, hpte_r;
314 int i;
316 if (!(vflags & HPTE_V_BOLTED)) {
317 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
318 " rflags=%lx, vflags=%lx, psize=%d)\n",
319 hpte_group, vpn, pa, rflags, vflags, psize);
322 for (i = 0; i < HPTES_PER_GROUP; i++) {
323 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
324 /* retry with lock held */
325 native_lock_hpte(hptep);
326 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
327 break;
328 native_unlock_hpte(hptep);
331 hptep++;
334 if (i == HPTES_PER_GROUP)
335 return -1;
337 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
338 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
340 if (!(vflags & HPTE_V_BOLTED)) {
341 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
342 i, hpte_v, hpte_r);
345 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
346 hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
347 hpte_v = hpte_old_to_new_v(hpte_v);
350 hptep->r = cpu_to_be64(hpte_r);
351 /* Guarantee the second dword is visible before the valid bit */
352 eieio();
354 * Now set the first dword including the valid bit
355 * NOTE: this also unlocks the hpte
357 hptep->v = cpu_to_be64(hpte_v);
359 __asm__ __volatile__ ("ptesync" : : : "memory");
361 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
364 static long native_hpte_remove(unsigned long hpte_group)
366 struct hash_pte *hptep;
367 int i;
368 int slot_offset;
369 unsigned long hpte_v;
371 DBG_LOW(" remove(group=%lx)\n", hpte_group);
373 /* pick a random entry to start at */
374 slot_offset = mftb() & 0x7;
376 for (i = 0; i < HPTES_PER_GROUP; i++) {
377 hptep = htab_address + hpte_group + slot_offset;
378 hpte_v = be64_to_cpu(hptep->v);
380 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
381 /* retry with lock held */
382 native_lock_hpte(hptep);
383 hpte_v = be64_to_cpu(hptep->v);
384 if ((hpte_v & HPTE_V_VALID)
385 && !(hpte_v & HPTE_V_BOLTED))
386 break;
387 native_unlock_hpte(hptep);
390 slot_offset++;
391 slot_offset &= 0x7;
394 if (i == HPTES_PER_GROUP)
395 return -1;
397 /* Invalidate the hpte. NOTE: this also unlocks it */
398 hptep->v = 0;
400 return i;
403 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
404 unsigned long vpn, int bpsize,
405 int apsize, int ssize, unsigned long flags)
407 struct hash_pte *hptep = htab_address + slot;
408 unsigned long hpte_v, want_v;
409 int ret = 0, local = 0;
411 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
413 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
414 vpn, want_v & HPTE_V_AVPN, slot, newpp);
416 hpte_v = be64_to_cpu(hptep->v);
417 if (cpu_has_feature(CPU_FTR_ARCH_300))
418 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
420 * We need to invalidate the TLB always because hpte_remove doesn't do
421 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
422 * random entry from it. When we do that we don't invalidate the TLB
423 * (hpte_remove) because we assume the old translation is still
424 * technically "valid".
426 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
427 DBG_LOW(" -> miss\n");
428 ret = -1;
429 } else {
430 native_lock_hpte(hptep);
431 /* recheck with locks held */
432 hpte_v = be64_to_cpu(hptep->v);
433 if (cpu_has_feature(CPU_FTR_ARCH_300))
434 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
435 if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
436 !(hpte_v & HPTE_V_VALID))) {
437 ret = -1;
438 } else {
439 DBG_LOW(" -> hit\n");
440 /* Update the HPTE */
441 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
442 ~(HPTE_R_PPP | HPTE_R_N)) |
443 (newpp & (HPTE_R_PPP | HPTE_R_N |
444 HPTE_R_C)));
446 native_unlock_hpte(hptep);
449 if (flags & HPTE_LOCAL_UPDATE)
450 local = 1;
452 * Ensure it is out of the tlb too if it is not a nohpte fault
454 if (!(flags & HPTE_NOHPTE_UPDATE))
455 tlbie(vpn, bpsize, apsize, ssize, local);
457 return ret;
460 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
462 struct hash_pte *hptep;
463 unsigned long hash;
464 unsigned long i;
465 long slot;
466 unsigned long want_v, hpte_v;
468 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
469 want_v = hpte_encode_avpn(vpn, psize, ssize);
471 /* Bolted mappings are only ever in the primary group */
472 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
473 for (i = 0; i < HPTES_PER_GROUP; i++) {
474 hptep = htab_address + slot;
475 hpte_v = be64_to_cpu(hptep->v);
476 if (cpu_has_feature(CPU_FTR_ARCH_300))
477 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
479 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
480 /* HPTE matches */
481 return slot;
482 ++slot;
485 return -1;
489 * Update the page protection bits. Intended to be used to create
490 * guard pages for kernel data structures on pages which are bolted
491 * in the HPT. Assumes pages being operated on will not be stolen.
493 * No need to lock here because we should be the only user.
495 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
496 int psize, int ssize)
498 unsigned long vpn;
499 unsigned long vsid;
500 long slot;
501 struct hash_pte *hptep;
503 vsid = get_kernel_vsid(ea, ssize);
504 vpn = hpt_vpn(ea, vsid, ssize);
506 slot = native_hpte_find(vpn, psize, ssize);
507 if (slot == -1)
508 panic("could not find page to bolt\n");
509 hptep = htab_address + slot;
511 /* Update the HPTE */
512 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
513 ~(HPTE_R_PPP | HPTE_R_N)) |
514 (newpp & (HPTE_R_PPP | HPTE_R_N)));
516 * Ensure it is out of the tlb too. Bolted entries base and
517 * actual page size will be same.
519 tlbie(vpn, psize, psize, ssize, 0);
523 * Remove a bolted kernel entry. Memory hotplug uses this.
525 * No need to lock here because we should be the only user.
527 static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
529 unsigned long vpn;
530 unsigned long vsid;
531 long slot;
532 struct hash_pte *hptep;
534 vsid = get_kernel_vsid(ea, ssize);
535 vpn = hpt_vpn(ea, vsid, ssize);
537 slot = native_hpte_find(vpn, psize, ssize);
538 if (slot == -1)
539 return -ENOENT;
541 hptep = htab_address + slot;
543 VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));
545 /* Invalidate the hpte */
546 hptep->v = 0;
548 /* Invalidate the TLB */
549 tlbie(vpn, psize, psize, ssize, 0);
550 return 0;
554 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
555 int bpsize, int apsize, int ssize, int local)
557 struct hash_pte *hptep = htab_address + slot;
558 unsigned long hpte_v;
559 unsigned long want_v;
560 unsigned long flags;
562 local_irq_save(flags);
564 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
566 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
567 native_lock_hpte(hptep);
568 hpte_v = be64_to_cpu(hptep->v);
569 if (cpu_has_feature(CPU_FTR_ARCH_300))
570 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
573 * We need to invalidate the TLB always because hpte_remove doesn't do
574 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
575 * random entry from it. When we do that we don't invalidate the TLB
576 * (hpte_remove) because we assume the old translation is still
577 * technically "valid".
579 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
580 native_unlock_hpte(hptep);
581 else
582 /* Invalidate the hpte. NOTE: this also unlocks it */
583 hptep->v = 0;
585 /* Invalidate the TLB */
586 tlbie(vpn, bpsize, apsize, ssize, local);
588 local_irq_restore(flags);
591 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
592 static void native_hugepage_invalidate(unsigned long vsid,
593 unsigned long addr,
594 unsigned char *hpte_slot_array,
595 int psize, int ssize, int local)
597 int i;
598 struct hash_pte *hptep;
599 int actual_psize = MMU_PAGE_16M;
600 unsigned int max_hpte_count, valid;
601 unsigned long flags, s_addr = addr;
602 unsigned long hpte_v, want_v, shift;
603 unsigned long hidx, vpn = 0, hash, slot;
605 shift = mmu_psize_defs[psize].shift;
606 max_hpte_count = 1U << (PMD_SHIFT - shift);
608 local_irq_save(flags);
609 for (i = 0; i < max_hpte_count; i++) {
610 valid = hpte_valid(hpte_slot_array, i);
611 if (!valid)
612 continue;
613 hidx = hpte_hash_index(hpte_slot_array, i);
615 /* get the vpn */
616 addr = s_addr + (i * (1ul << shift));
617 vpn = hpt_vpn(addr, vsid, ssize);
618 hash = hpt_hash(vpn, shift, ssize);
619 if (hidx & _PTEIDX_SECONDARY)
620 hash = ~hash;
622 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
623 slot += hidx & _PTEIDX_GROUP_IX;
625 hptep = htab_address + slot;
626 want_v = hpte_encode_avpn(vpn, psize, ssize);
627 native_lock_hpte(hptep);
628 hpte_v = be64_to_cpu(hptep->v);
629 if (cpu_has_feature(CPU_FTR_ARCH_300))
630 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
632 /* Even if we miss, we need to invalidate the TLB */
633 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
634 native_unlock_hpte(hptep);
635 else
636 /* Invalidate the hpte. NOTE: this also unlocks it */
637 hptep->v = 0;
639 * We need to do tlb invalidate for all the address, tlbie
640 * instruction compares entry_VA in tlb with the VA specified
641 * here
643 tlbie(vpn, psize, actual_psize, ssize, local);
645 local_irq_restore(flags);
647 #else
648 static void native_hugepage_invalidate(unsigned long vsid,
649 unsigned long addr,
650 unsigned char *hpte_slot_array,
651 int psize, int ssize, int local)
653 WARN(1, "%s called without THP support\n", __func__);
655 #endif
657 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
658 int *psize, int *apsize, int *ssize, unsigned long *vpn)
660 unsigned long avpn, pteg, vpi;
661 unsigned long hpte_v = be64_to_cpu(hpte->v);
662 unsigned long hpte_r = be64_to_cpu(hpte->r);
663 unsigned long vsid, seg_off;
664 int size, a_size, shift;
665 /* Look at the 8 bit LP value */
666 unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
668 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
669 hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
670 hpte_r = hpte_new_to_old_r(hpte_r);
672 if (!(hpte_v & HPTE_V_LARGE)) {
673 size = MMU_PAGE_4K;
674 a_size = MMU_PAGE_4K;
675 } else {
676 size = hpte_page_sizes[lp] & 0xf;
677 a_size = hpte_page_sizes[lp] >> 4;
679 /* This works for all page sizes, and for 256M and 1T segments */
680 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
681 shift = mmu_psize_defs[size].shift;
683 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
684 pteg = slot / HPTES_PER_GROUP;
685 if (hpte_v & HPTE_V_SECONDARY)
686 pteg = ~pteg;
688 switch (*ssize) {
689 case MMU_SEGSIZE_256M:
690 /* We only have 28 - 23 bits of seg_off in avpn */
691 seg_off = (avpn & 0x1f) << 23;
692 vsid = avpn >> 5;
693 /* We can find more bits from the pteg value */
694 if (shift < 23) {
695 vpi = (vsid ^ pteg) & htab_hash_mask;
696 seg_off |= vpi << shift;
698 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
699 break;
700 case MMU_SEGSIZE_1T:
701 /* We only have 40 - 23 bits of seg_off in avpn */
702 seg_off = (avpn & 0x1ffff) << 23;
703 vsid = avpn >> 17;
704 if (shift < 23) {
705 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
706 seg_off |= vpi << shift;
708 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
709 break;
710 default:
711 *vpn = size = 0;
713 *psize = size;
714 *apsize = a_size;
718 * clear all mappings on kexec. All cpus are in real mode (or they will
719 * be when they isi), and we are the only one left. We rely on our kernel
720 * mapping being 0xC0's and the hardware ignoring those two real bits.
722 * This must be called with interrupts disabled.
724 * Taking the native_tlbie_lock is unsafe here due to the possibility of
725 * lockdep being on. On pre POWER5 hardware, not taking the lock could
726 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
727 * gets called during boot before secondary CPUs have come up and during
728 * crashdump and all bets are off anyway.
730 * TODO: add batching support when enabled. remember, no dynamic memory here,
731 * although there is the control page available...
733 static void native_hpte_clear(void)
735 unsigned long vpn = 0;
736 unsigned long slot, slots;
737 struct hash_pte *hptep = htab_address;
738 unsigned long hpte_v;
739 unsigned long pteg_count;
740 int psize, apsize, ssize;
742 pteg_count = htab_hash_mask + 1;
744 slots = pteg_count * HPTES_PER_GROUP;
746 for (slot = 0; slot < slots; slot++, hptep++) {
748 * we could lock the pte here, but we are the only cpu
749 * running, right? and for crash dump, we probably
750 * don't want to wait for a maybe bad cpu.
752 hpte_v = be64_to_cpu(hptep->v);
755 * Call __tlbie() here rather than tlbie() since we can't take the
756 * native_tlbie_lock.
758 if (hpte_v & HPTE_V_VALID) {
759 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
760 hptep->v = 0;
761 ___tlbie(vpn, psize, apsize, ssize);
765 asm volatile("eieio; tlbsync; ptesync":::"memory");
769 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
770 * the lock all the time
772 static void native_flush_hash_range(unsigned long number, int local)
774 unsigned long vpn;
775 unsigned long hash, index, hidx, shift, slot;
776 struct hash_pte *hptep;
777 unsigned long hpte_v;
778 unsigned long want_v;
779 unsigned long flags;
780 real_pte_t pte;
781 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
782 unsigned long psize = batch->psize;
783 int ssize = batch->ssize;
784 int i;
785 unsigned int use_local;
787 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
788 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
790 local_irq_save(flags);
792 for (i = 0; i < number; i++) {
793 vpn = batch->vpn[i];
794 pte = batch->pte[i];
796 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
797 hash = hpt_hash(vpn, shift, ssize);
798 hidx = __rpte_to_hidx(pte, index);
799 if (hidx & _PTEIDX_SECONDARY)
800 hash = ~hash;
801 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
802 slot += hidx & _PTEIDX_GROUP_IX;
803 hptep = htab_address + slot;
804 want_v = hpte_encode_avpn(vpn, psize, ssize);
805 native_lock_hpte(hptep);
806 hpte_v = be64_to_cpu(hptep->v);
807 if (cpu_has_feature(CPU_FTR_ARCH_300))
808 hpte_v = hpte_new_to_old_v(hpte_v,
809 be64_to_cpu(hptep->r));
810 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
811 !(hpte_v & HPTE_V_VALID))
812 native_unlock_hpte(hptep);
813 else
814 hptep->v = 0;
815 } pte_iterate_hashed_end();
818 if (use_local) {
819 asm volatile("ptesync":::"memory");
820 for (i = 0; i < number; i++) {
821 vpn = batch->vpn[i];
822 pte = batch->pte[i];
824 pte_iterate_hashed_subpages(pte, psize,
825 vpn, index, shift) {
826 __tlbiel(vpn, psize, psize, ssize);
827 } pte_iterate_hashed_end();
829 asm volatile("ptesync":::"memory");
830 } else {
831 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
833 if (lock_tlbie)
834 raw_spin_lock(&native_tlbie_lock);
836 asm volatile("ptesync":::"memory");
837 for (i = 0; i < number; i++) {
838 vpn = batch->vpn[i];
839 pte = batch->pte[i];
841 pte_iterate_hashed_subpages(pte, psize,
842 vpn, index, shift) {
843 __tlbie(vpn, psize, psize, ssize);
844 } pte_iterate_hashed_end();
846 asm volatile("eieio; tlbsync; ptesync":::"memory");
848 if (lock_tlbie)
849 raw_spin_unlock(&native_tlbie_lock);
852 local_irq_restore(flags);
855 static int native_register_proc_table(unsigned long base, unsigned long page_size,
856 unsigned long table_size)
858 unsigned long patb1 = base << 25; /* VSID */
860 patb1 |= (page_size << 5); /* sllp */
861 patb1 |= table_size;
863 partition_tb->patb1 = cpu_to_be64(patb1);
864 return 0;
867 void __init hpte_init_native(void)
869 mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
870 mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
871 mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
872 mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;
873 mmu_hash_ops.hpte_insert = native_hpte_insert;
874 mmu_hash_ops.hpte_remove = native_hpte_remove;
875 mmu_hash_ops.hpte_clear_all = native_hpte_clear;
876 mmu_hash_ops.flush_hash_range = native_flush_hash_range;
877 mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;
879 if (cpu_has_feature(CPU_FTR_ARCH_300))
880 register_process_table = native_register_proc_table;