Linux 3.17-rc2
[linux/fpc-iii.git] / arch / powerpc / mm / hash_native_64.c
blobafc0a8295f84c7097217855fae59f62b1ed6149e
1 /*
2 * native hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #undef DEBUG_LOW
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
17 #include <linux/of.h>
18 #include <linux/threads.h>
19 #include <linux/smp.h>
21 #include <asm/machdep.h>
22 #include <asm/mmu.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
26 #include <asm/tlb.h>
27 #include <asm/cputable.h>
28 #include <asm/udbg.h>
29 #include <asm/kexec.h>
30 #include <asm/ppc-opcode.h>
32 #ifdef DEBUG_LOW
33 #define DBG_LOW(fmt...) udbg_printf(fmt)
34 #else
35 #define DBG_LOW(fmt...)
36 #endif
38 #ifdef __BIG_ENDIAN__
39 #define HPTE_LOCK_BIT 3
40 #else
41 #define HPTE_LOCK_BIT (56+3)
42 #endif
44 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
46 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
48 unsigned long va;
49 unsigned int penc;
50 unsigned long sllp;
53 * We need 14 to 65 bits of va for a tlibe of 4K page
54 * With vpn we ignore the lower VPN_SHIFT bits already.
55 * And top two bits are already ignored because we can
56 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
57 * of 12.
59 va = vpn << VPN_SHIFT;
61 * clear top 16 bits of 64bit va, non SLS segment
62 * Older versions of the architecture (2.02 and earler) require the
63 * masking of the top 16 bits.
65 va &= ~(0xffffULL << 48);
67 switch (psize) {
68 case MMU_PAGE_4K:
69 /* clear out bits after (52) [0....52.....63] */
70 va &= ~((1ul << (64 - 52)) - 1);
71 va |= ssize << 8;
72 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
73 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
74 va |= sllp << 5;
75 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
76 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
77 : "memory");
78 break;
79 default:
80 /* We need 14 to 14 + i bits of va */
81 penc = mmu_psize_defs[psize].penc[apsize];
82 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
83 va |= penc << 12;
84 va |= ssize << 8;
86 * AVAL bits:
87 * We don't need all the bits, but rest of the bits
88 * must be ignored by the processor.
89 * vpn cover upto 65 bits of va. (0...65) and we need
90 * 58..64 bits of va.
92 va |= (vpn & 0xfe); /* AVAL */
93 va |= 1; /* L */
94 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
95 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
96 : "memory");
97 break;
101 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
103 unsigned long va;
104 unsigned int penc;
105 unsigned long sllp;
107 /* VPN_SHIFT can be atmost 12 */
108 va = vpn << VPN_SHIFT;
110 * clear top 16 bits of 64 bit va, non SLS segment
111 * Older versions of the architecture (2.02 and earler) require the
112 * masking of the top 16 bits.
114 va &= ~(0xffffULL << 48);
116 switch (psize) {
117 case MMU_PAGE_4K:
118 /* clear out bits after(52) [0....52.....63] */
119 va &= ~((1ul << (64 - 52)) - 1);
120 va |= ssize << 8;
121 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
122 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
123 va |= sllp << 5;
124 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
125 : : "r"(va) : "memory");
126 break;
127 default:
128 /* We need 14 to 14 + i bits of va */
129 penc = mmu_psize_defs[psize].penc[apsize];
130 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
131 va |= penc << 12;
132 va |= ssize << 8;
134 * AVAL bits:
135 * We don't need all the bits, but rest of the bits
136 * must be ignored by the processor.
137 * vpn cover upto 65 bits of va. (0...65) and we need
138 * 58..64 bits of va.
140 va |= (vpn & 0xfe);
141 va |= 1; /* L */
142 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
143 : : "r"(va) : "memory");
144 break;
149 static inline void tlbie(unsigned long vpn, int psize, int apsize,
150 int ssize, int local)
152 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
153 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
155 if (use_local)
156 use_local = mmu_psize_defs[psize].tlbiel;
157 if (lock_tlbie && !use_local)
158 raw_spin_lock(&native_tlbie_lock);
159 asm volatile("ptesync": : :"memory");
160 if (use_local) {
161 __tlbiel(vpn, psize, apsize, ssize);
162 asm volatile("ptesync": : :"memory");
163 } else {
164 __tlbie(vpn, psize, apsize, ssize);
165 asm volatile("eieio; tlbsync; ptesync": : :"memory");
167 if (lock_tlbie && !use_local)
168 raw_spin_unlock(&native_tlbie_lock);
171 static inline void native_lock_hpte(struct hash_pte *hptep)
173 unsigned long *word = (unsigned long *)&hptep->v;
175 while (1) {
176 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
177 break;
178 while(test_bit(HPTE_LOCK_BIT, word))
179 cpu_relax();
183 static inline void native_unlock_hpte(struct hash_pte *hptep)
185 unsigned long *word = (unsigned long *)&hptep->v;
187 clear_bit_unlock(HPTE_LOCK_BIT, word);
190 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
191 unsigned long pa, unsigned long rflags,
192 unsigned long vflags, int psize, int apsize, int ssize)
194 struct hash_pte *hptep = htab_address + hpte_group;
195 unsigned long hpte_v, hpte_r;
196 int i;
198 if (!(vflags & HPTE_V_BOLTED)) {
199 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
200 " rflags=%lx, vflags=%lx, psize=%d)\n",
201 hpte_group, vpn, pa, rflags, vflags, psize);
204 for (i = 0; i < HPTES_PER_GROUP; i++) {
205 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
206 /* retry with lock held */
207 native_lock_hpte(hptep);
208 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
209 break;
210 native_unlock_hpte(hptep);
213 hptep++;
216 if (i == HPTES_PER_GROUP)
217 return -1;
219 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
220 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
222 if (!(vflags & HPTE_V_BOLTED)) {
223 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
224 i, hpte_v, hpte_r);
227 hptep->r = cpu_to_be64(hpte_r);
228 /* Guarantee the second dword is visible before the valid bit */
229 eieio();
231 * Now set the first dword including the valid bit
232 * NOTE: this also unlocks the hpte
234 hptep->v = cpu_to_be64(hpte_v);
236 __asm__ __volatile__ ("ptesync" : : : "memory");
238 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
241 static long native_hpte_remove(unsigned long hpte_group)
243 struct hash_pte *hptep;
244 int i;
245 int slot_offset;
246 unsigned long hpte_v;
248 DBG_LOW(" remove(group=%lx)\n", hpte_group);
250 /* pick a random entry to start at */
251 slot_offset = mftb() & 0x7;
253 for (i = 0; i < HPTES_PER_GROUP; i++) {
254 hptep = htab_address + hpte_group + slot_offset;
255 hpte_v = be64_to_cpu(hptep->v);
257 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
258 /* retry with lock held */
259 native_lock_hpte(hptep);
260 hpte_v = be64_to_cpu(hptep->v);
261 if ((hpte_v & HPTE_V_VALID)
262 && !(hpte_v & HPTE_V_BOLTED))
263 break;
264 native_unlock_hpte(hptep);
267 slot_offset++;
268 slot_offset &= 0x7;
271 if (i == HPTES_PER_GROUP)
272 return -1;
274 /* Invalidate the hpte. NOTE: this also unlocks it */
275 hptep->v = 0;
277 return i;
280 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
281 unsigned long vpn, int bpsize,
282 int apsize, int ssize, int local)
284 struct hash_pte *hptep = htab_address + slot;
285 unsigned long hpte_v, want_v;
286 int ret = 0;
288 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
290 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
291 vpn, want_v & HPTE_V_AVPN, slot, newpp);
293 native_lock_hpte(hptep);
295 hpte_v = be64_to_cpu(hptep->v);
297 * We need to invalidate the TLB always because hpte_remove doesn't do
298 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
299 * random entry from it. When we do that we don't invalidate the TLB
300 * (hpte_remove) because we assume the old translation is still
301 * technically "valid".
303 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
304 DBG_LOW(" -> miss\n");
305 ret = -1;
306 } else {
307 DBG_LOW(" -> hit\n");
308 /* Update the HPTE */
309 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) |
310 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)));
312 native_unlock_hpte(hptep);
314 /* Ensure it is out of the tlb too. */
315 tlbie(vpn, bpsize, apsize, ssize, local);
317 return ret;
320 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
322 struct hash_pte *hptep;
323 unsigned long hash;
324 unsigned long i;
325 long slot;
326 unsigned long want_v, hpte_v;
328 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
329 want_v = hpte_encode_avpn(vpn, psize, ssize);
331 /* Bolted mappings are only ever in the primary group */
332 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
333 for (i = 0; i < HPTES_PER_GROUP; i++) {
334 hptep = htab_address + slot;
335 hpte_v = be64_to_cpu(hptep->v);
337 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
338 /* HPTE matches */
339 return slot;
340 ++slot;
343 return -1;
347 * Update the page protection bits. Intended to be used to create
348 * guard pages for kernel data structures on pages which are bolted
349 * in the HPT. Assumes pages being operated on will not be stolen.
351 * No need to lock here because we should be the only user.
353 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
354 int psize, int ssize)
356 unsigned long vpn;
357 unsigned long vsid;
358 long slot;
359 struct hash_pte *hptep;
361 vsid = get_kernel_vsid(ea, ssize);
362 vpn = hpt_vpn(ea, vsid, ssize);
364 slot = native_hpte_find(vpn, psize, ssize);
365 if (slot == -1)
366 panic("could not find page to bolt\n");
367 hptep = htab_address + slot;
369 /* Update the HPTE */
370 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
371 ~(HPTE_R_PP | HPTE_R_N)) |
372 (newpp & (HPTE_R_PP | HPTE_R_N)));
374 * Ensure it is out of the tlb too. Bolted entries base and
375 * actual page size will be same.
377 tlbie(vpn, psize, psize, ssize, 0);
380 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
381 int bpsize, int apsize, int ssize, int local)
383 struct hash_pte *hptep = htab_address + slot;
384 unsigned long hpte_v;
385 unsigned long want_v;
386 unsigned long flags;
388 local_irq_save(flags);
390 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
392 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
393 native_lock_hpte(hptep);
394 hpte_v = be64_to_cpu(hptep->v);
397 * We need to invalidate the TLB always because hpte_remove doesn't do
398 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
399 * random entry from it. When we do that we don't invalidate the TLB
400 * (hpte_remove) because we assume the old translation is still
401 * technically "valid".
403 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
404 native_unlock_hpte(hptep);
405 else
406 /* Invalidate the hpte. NOTE: this also unlocks it */
407 hptep->v = 0;
409 /* Invalidate the TLB */
410 tlbie(vpn, bpsize, apsize, ssize, local);
412 local_irq_restore(flags);
415 static void native_hugepage_invalidate(unsigned long vsid,
416 unsigned long addr,
417 unsigned char *hpte_slot_array,
418 int psize, int ssize)
420 int i;
421 struct hash_pte *hptep;
422 int actual_psize = MMU_PAGE_16M;
423 unsigned int max_hpte_count, valid;
424 unsigned long flags, s_addr = addr;
425 unsigned long hpte_v, want_v, shift;
426 unsigned long hidx, vpn = 0, hash, slot;
428 shift = mmu_psize_defs[psize].shift;
429 max_hpte_count = 1U << (PMD_SHIFT - shift);
431 local_irq_save(flags);
432 for (i = 0; i < max_hpte_count; i++) {
433 valid = hpte_valid(hpte_slot_array, i);
434 if (!valid)
435 continue;
436 hidx = hpte_hash_index(hpte_slot_array, i);
438 /* get the vpn */
439 addr = s_addr + (i * (1ul << shift));
440 vpn = hpt_vpn(addr, vsid, ssize);
441 hash = hpt_hash(vpn, shift, ssize);
442 if (hidx & _PTEIDX_SECONDARY)
443 hash = ~hash;
445 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
446 slot += hidx & _PTEIDX_GROUP_IX;
448 hptep = htab_address + slot;
449 want_v = hpte_encode_avpn(vpn, psize, ssize);
450 native_lock_hpte(hptep);
451 hpte_v = be64_to_cpu(hptep->v);
453 /* Even if we miss, we need to invalidate the TLB */
454 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
455 native_unlock_hpte(hptep);
456 else
457 /* Invalidate the hpte. NOTE: this also unlocks it */
458 hptep->v = 0;
460 * We need to do tlb invalidate for all the address, tlbie
461 * instruction compares entry_VA in tlb with the VA specified
462 * here
464 tlbie(vpn, psize, actual_psize, ssize, 0);
466 local_irq_restore(flags);
469 static inline int __hpte_actual_psize(unsigned int lp, int psize)
471 int i, shift;
472 unsigned int mask;
474 /* start from 1 ignoring MMU_PAGE_4K */
475 for (i = 1; i < MMU_PAGE_COUNT; i++) {
477 /* invalid penc */
478 if (mmu_psize_defs[psize].penc[i] == -1)
479 continue;
481 * encoding bits per actual page size
482 * PTE LP actual page size
483 * rrrr rrrz >=8KB
484 * rrrr rrzz >=16KB
485 * rrrr rzzz >=32KB
486 * rrrr zzzz >=64KB
487 * .......
489 shift = mmu_psize_defs[i].shift - LP_SHIFT;
490 if (shift > LP_BITS)
491 shift = LP_BITS;
492 mask = (1 << shift) - 1;
493 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
494 return i;
496 return -1;
499 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
500 int *psize, int *apsize, int *ssize, unsigned long *vpn)
502 unsigned long avpn, pteg, vpi;
503 unsigned long hpte_v = be64_to_cpu(hpte->v);
504 unsigned long hpte_r = be64_to_cpu(hpte->r);
505 unsigned long vsid, seg_off;
506 int size, a_size, shift;
507 /* Look at the 8 bit LP value */
508 unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
510 if (!(hpte_v & HPTE_V_LARGE)) {
511 size = MMU_PAGE_4K;
512 a_size = MMU_PAGE_4K;
513 } else {
514 for (size = 0; size < MMU_PAGE_COUNT; size++) {
516 /* valid entries have a shift value */
517 if (!mmu_psize_defs[size].shift)
518 continue;
520 a_size = __hpte_actual_psize(lp, size);
521 if (a_size != -1)
522 break;
525 /* This works for all page sizes, and for 256M and 1T segments */
526 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
527 shift = mmu_psize_defs[size].shift;
529 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
530 pteg = slot / HPTES_PER_GROUP;
531 if (hpte_v & HPTE_V_SECONDARY)
532 pteg = ~pteg;
534 switch (*ssize) {
535 case MMU_SEGSIZE_256M:
536 /* We only have 28 - 23 bits of seg_off in avpn */
537 seg_off = (avpn & 0x1f) << 23;
538 vsid = avpn >> 5;
539 /* We can find more bits from the pteg value */
540 if (shift < 23) {
541 vpi = (vsid ^ pteg) & htab_hash_mask;
542 seg_off |= vpi << shift;
544 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
545 break;
546 case MMU_SEGSIZE_1T:
547 /* We only have 40 - 23 bits of seg_off in avpn */
548 seg_off = (avpn & 0x1ffff) << 23;
549 vsid = avpn >> 17;
550 if (shift < 23) {
551 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
552 seg_off |= vpi << shift;
554 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
555 break;
556 default:
557 *vpn = size = 0;
559 *psize = size;
560 *apsize = a_size;
564 * clear all mappings on kexec. All cpus are in real mode (or they will
565 * be when they isi), and we are the only one left. We rely on our kernel
566 * mapping being 0xC0's and the hardware ignoring those two real bits.
568 * TODO: add batching support when enabled. remember, no dynamic memory here,
569 * athough there is the control page available...
571 static void native_hpte_clear(void)
573 unsigned long vpn = 0;
574 unsigned long slot, slots, flags;
575 struct hash_pte *hptep = htab_address;
576 unsigned long hpte_v;
577 unsigned long pteg_count;
578 int psize, apsize, ssize;
580 pteg_count = htab_hash_mask + 1;
582 local_irq_save(flags);
584 /* we take the tlbie lock and hold it. Some hardware will
585 * deadlock if we try to tlbie from two processors at once.
587 raw_spin_lock(&native_tlbie_lock);
589 slots = pteg_count * HPTES_PER_GROUP;
591 for (slot = 0; slot < slots; slot++, hptep++) {
593 * we could lock the pte here, but we are the only cpu
594 * running, right? and for crash dump, we probably
595 * don't want to wait for a maybe bad cpu.
597 hpte_v = be64_to_cpu(hptep->v);
600 * Call __tlbie() here rather than tlbie() since we
601 * already hold the native_tlbie_lock.
603 if (hpte_v & HPTE_V_VALID) {
604 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
605 hptep->v = 0;
606 __tlbie(vpn, psize, apsize, ssize);
610 asm volatile("eieio; tlbsync; ptesync":::"memory");
611 raw_spin_unlock(&native_tlbie_lock);
612 local_irq_restore(flags);
616 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
617 * the lock all the time
619 static void native_flush_hash_range(unsigned long number, int local)
621 unsigned long vpn;
622 unsigned long hash, index, hidx, shift, slot;
623 struct hash_pte *hptep;
624 unsigned long hpte_v;
625 unsigned long want_v;
626 unsigned long flags;
627 real_pte_t pte;
628 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
629 unsigned long psize = batch->psize;
630 int ssize = batch->ssize;
631 int i;
633 local_irq_save(flags);
635 for (i = 0; i < number; i++) {
636 vpn = batch->vpn[i];
637 pte = batch->pte[i];
639 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
640 hash = hpt_hash(vpn, shift, ssize);
641 hidx = __rpte_to_hidx(pte, index);
642 if (hidx & _PTEIDX_SECONDARY)
643 hash = ~hash;
644 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
645 slot += hidx & _PTEIDX_GROUP_IX;
646 hptep = htab_address + slot;
647 want_v = hpte_encode_avpn(vpn, psize, ssize);
648 native_lock_hpte(hptep);
649 hpte_v = be64_to_cpu(hptep->v);
650 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
651 !(hpte_v & HPTE_V_VALID))
652 native_unlock_hpte(hptep);
653 else
654 hptep->v = 0;
655 } pte_iterate_hashed_end();
658 if (mmu_has_feature(MMU_FTR_TLBIEL) &&
659 mmu_psize_defs[psize].tlbiel && local) {
660 asm volatile("ptesync":::"memory");
661 for (i = 0; i < number; i++) {
662 vpn = batch->vpn[i];
663 pte = batch->pte[i];
665 pte_iterate_hashed_subpages(pte, psize,
666 vpn, index, shift) {
667 __tlbiel(vpn, psize, psize, ssize);
668 } pte_iterate_hashed_end();
670 asm volatile("ptesync":::"memory");
671 } else {
672 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
674 if (lock_tlbie)
675 raw_spin_lock(&native_tlbie_lock);
677 asm volatile("ptesync":::"memory");
678 for (i = 0; i < number; i++) {
679 vpn = batch->vpn[i];
680 pte = batch->pte[i];
682 pte_iterate_hashed_subpages(pte, psize,
683 vpn, index, shift) {
684 __tlbie(vpn, psize, psize, ssize);
685 } pte_iterate_hashed_end();
687 asm volatile("eieio; tlbsync; ptesync":::"memory");
689 if (lock_tlbie)
690 raw_spin_unlock(&native_tlbie_lock);
693 local_irq_restore(flags);
696 void __init hpte_init_native(void)
698 ppc_md.hpte_invalidate = native_hpte_invalidate;
699 ppc_md.hpte_updatepp = native_hpte_updatepp;
700 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
701 ppc_md.hpte_insert = native_hpte_insert;
702 ppc_md.hpte_remove = native_hpte_remove;
703 ppc_md.hpte_clear_all = native_hpte_clear;
704 ppc_md.flush_hash_range = native_flush_hash_range;
705 ppc_md.hugepage_invalidate = native_hugepage_invalidate;