2 * native hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
18 #include <linux/threads.h>
19 #include <linux/smp.h>
21 #include <asm/machdep.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
27 #include <asm/cputable.h>
29 #include <asm/kexec.h>
30 #include <asm/ppc-opcode.h>
32 #include <misc/cxl-base.h>
35 #define DBG_LOW(fmt...) udbg_printf(fmt)
37 #define DBG_LOW(fmt...)
41 #define HPTE_LOCK_BIT 3
43 #define HPTE_LOCK_BIT (56+3)
46 DEFINE_RAW_SPINLOCK(native_tlbie_lock
);
48 static inline void __tlbie(unsigned long vpn
, int psize
, int apsize
, int ssize
)
55 * We need 14 to 65 bits of va for a tlibe of 4K page
56 * With vpn we ignore the lower VPN_SHIFT bits already.
57 * And top two bits are already ignored because we can
58 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
61 va
= vpn
<< VPN_SHIFT
;
63 * clear top 16 bits of 64bit va, non SLS segment
64 * Older versions of the architecture (2.02 and earler) require the
65 * masking of the top 16 bits.
67 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA
))
68 va
&= ~(0xffffULL
<< 48);
72 /* clear out bits after (52) [0....52.....63] */
73 va
&= ~((1ul << (64 - 52)) - 1);
75 sllp
= get_sllp_encoding(apsize
);
77 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
78 : : "r" (va
), "r"(0), "i" (CPU_FTR_ARCH_206
)
82 /* We need 14 to 14 + i bits of va */
83 penc
= mmu_psize_defs
[psize
].penc
[apsize
];
84 va
&= ~((1ul << mmu_psize_defs
[apsize
].shift
) - 1);
89 * We don't need all the bits, but rest of the bits
90 * must be ignored by the processor.
91 * vpn cover upto 65 bits of va. (0...65) and we need
94 va
|= (vpn
& 0xfe); /* AVAL */
96 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
97 : : "r" (va
), "r"(0), "i" (CPU_FTR_ARCH_206
)
103 static inline void __tlbiel(unsigned long vpn
, int psize
, int apsize
, int ssize
)
109 /* VPN_SHIFT can be atmost 12 */
110 va
= vpn
<< VPN_SHIFT
;
112 * clear top 16 bits of 64 bit va, non SLS segment
113 * Older versions of the architecture (2.02 and earler) require the
114 * masking of the top 16 bits.
116 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA
))
117 va
&= ~(0xffffULL
<< 48);
121 /* clear out bits after(52) [0....52.....63] */
122 va
&= ~((1ul << (64 - 52)) - 1);
124 sllp
= get_sllp_encoding(apsize
);
126 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
127 : : "r"(va
) : "memory");
130 /* We need 14 to 14 + i bits of va */
131 penc
= mmu_psize_defs
[psize
].penc
[apsize
];
132 va
&= ~((1ul << mmu_psize_defs
[apsize
].shift
) - 1);
137 * We don't need all the bits, but rest of the bits
138 * must be ignored by the processor.
139 * vpn cover upto 65 bits of va. (0...65) and we need
144 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
145 : : "r"(va
) : "memory");
151 static inline void tlbie(unsigned long vpn
, int psize
, int apsize
,
152 int ssize
, int local
)
154 unsigned int use_local
;
155 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
157 use_local
= local
&& mmu_has_feature(MMU_FTR_TLBIEL
) && !cxl_ctx_in_use();
160 use_local
= mmu_psize_defs
[psize
].tlbiel
;
161 if (lock_tlbie
&& !use_local
)
162 raw_spin_lock(&native_tlbie_lock
);
163 asm volatile("ptesync": : :"memory");
165 __tlbiel(vpn
, psize
, apsize
, ssize
);
166 asm volatile("ptesync": : :"memory");
168 __tlbie(vpn
, psize
, apsize
, ssize
);
169 asm volatile("eieio; tlbsync; ptesync": : :"memory");
171 if (lock_tlbie
&& !use_local
)
172 raw_spin_unlock(&native_tlbie_lock
);
175 static inline void native_lock_hpte(struct hash_pte
*hptep
)
177 unsigned long *word
= (unsigned long *)&hptep
->v
;
180 if (!test_and_set_bit_lock(HPTE_LOCK_BIT
, word
))
182 while(test_bit(HPTE_LOCK_BIT
, word
))
187 static inline void native_unlock_hpte(struct hash_pte
*hptep
)
189 unsigned long *word
= (unsigned long *)&hptep
->v
;
191 clear_bit_unlock(HPTE_LOCK_BIT
, word
);
194 static long native_hpte_insert(unsigned long hpte_group
, unsigned long vpn
,
195 unsigned long pa
, unsigned long rflags
,
196 unsigned long vflags
, int psize
, int apsize
, int ssize
)
198 struct hash_pte
*hptep
= htab_address
+ hpte_group
;
199 unsigned long hpte_v
, hpte_r
;
202 if (!(vflags
& HPTE_V_BOLTED
)) {
203 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
204 " rflags=%lx, vflags=%lx, psize=%d)\n",
205 hpte_group
, vpn
, pa
, rflags
, vflags
, psize
);
208 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
209 if (! (be64_to_cpu(hptep
->v
) & HPTE_V_VALID
)) {
210 /* retry with lock held */
211 native_lock_hpte(hptep
);
212 if (! (be64_to_cpu(hptep
->v
) & HPTE_V_VALID
))
214 native_unlock_hpte(hptep
);
220 if (i
== HPTES_PER_GROUP
)
223 hpte_v
= hpte_encode_v(vpn
, psize
, apsize
, ssize
) | vflags
| HPTE_V_VALID
;
224 hpte_r
= hpte_encode_r(pa
, psize
, apsize
) | rflags
;
226 if (!(vflags
& HPTE_V_BOLTED
)) {
227 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
231 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
232 hpte_r
= hpte_old_to_new_r(hpte_v
, hpte_r
);
233 hpte_v
= hpte_old_to_new_v(hpte_v
);
236 hptep
->r
= cpu_to_be64(hpte_r
);
237 /* Guarantee the second dword is visible before the valid bit */
240 * Now set the first dword including the valid bit
241 * NOTE: this also unlocks the hpte
243 hptep
->v
= cpu_to_be64(hpte_v
);
245 __asm__
__volatile__ ("ptesync" : : : "memory");
247 return i
| (!!(vflags
& HPTE_V_SECONDARY
) << 3);
250 static long native_hpte_remove(unsigned long hpte_group
)
252 struct hash_pte
*hptep
;
255 unsigned long hpte_v
;
257 DBG_LOW(" remove(group=%lx)\n", hpte_group
);
259 /* pick a random entry to start at */
260 slot_offset
= mftb() & 0x7;
262 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
263 hptep
= htab_address
+ hpte_group
+ slot_offset
;
264 hpte_v
= be64_to_cpu(hptep
->v
);
266 if ((hpte_v
& HPTE_V_VALID
) && !(hpte_v
& HPTE_V_BOLTED
)) {
267 /* retry with lock held */
268 native_lock_hpte(hptep
);
269 hpte_v
= be64_to_cpu(hptep
->v
);
270 if ((hpte_v
& HPTE_V_VALID
)
271 && !(hpte_v
& HPTE_V_BOLTED
))
273 native_unlock_hpte(hptep
);
280 if (i
== HPTES_PER_GROUP
)
283 /* Invalidate the hpte. NOTE: this also unlocks it */
289 static long native_hpte_updatepp(unsigned long slot
, unsigned long newpp
,
290 unsigned long vpn
, int bpsize
,
291 int apsize
, int ssize
, unsigned long flags
)
293 struct hash_pte
*hptep
= htab_address
+ slot
;
294 unsigned long hpte_v
, want_v
;
295 int ret
= 0, local
= 0;
297 want_v
= hpte_encode_avpn(vpn
, bpsize
, ssize
);
299 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
300 vpn
, want_v
& HPTE_V_AVPN
, slot
, newpp
);
302 hpte_v
= be64_to_cpu(hptep
->v
);
303 if (cpu_has_feature(CPU_FTR_ARCH_300
))
304 hpte_v
= hpte_new_to_old_v(hpte_v
, be64_to_cpu(hptep
->r
));
306 * We need to invalidate the TLB always because hpte_remove doesn't do
307 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
308 * random entry from it. When we do that we don't invalidate the TLB
309 * (hpte_remove) because we assume the old translation is still
310 * technically "valid".
312 if (!HPTE_V_COMPARE(hpte_v
, want_v
) || !(hpte_v
& HPTE_V_VALID
)) {
313 DBG_LOW(" -> miss\n");
316 native_lock_hpte(hptep
);
317 /* recheck with locks held */
318 hpte_v
= be64_to_cpu(hptep
->v
);
319 if (cpu_has_feature(CPU_FTR_ARCH_300
))
320 hpte_v
= hpte_new_to_old_v(hpte_v
, be64_to_cpu(hptep
->r
));
321 if (unlikely(!HPTE_V_COMPARE(hpte_v
, want_v
) ||
322 !(hpte_v
& HPTE_V_VALID
))) {
325 DBG_LOW(" -> hit\n");
326 /* Update the HPTE */
327 hptep
->r
= cpu_to_be64((be64_to_cpu(hptep
->r
) &
328 ~(HPTE_R_PPP
| HPTE_R_N
)) |
329 (newpp
& (HPTE_R_PPP
| HPTE_R_N
|
332 native_unlock_hpte(hptep
);
335 if (flags
& HPTE_LOCAL_UPDATE
)
338 * Ensure it is out of the tlb too if it is not a nohpte fault
340 if (!(flags
& HPTE_NOHPTE_UPDATE
))
341 tlbie(vpn
, bpsize
, apsize
, ssize
, local
);
346 static long native_hpte_find(unsigned long vpn
, int psize
, int ssize
)
348 struct hash_pte
*hptep
;
352 unsigned long want_v
, hpte_v
;
354 hash
= hpt_hash(vpn
, mmu_psize_defs
[psize
].shift
, ssize
);
355 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
357 /* Bolted mappings are only ever in the primary group */
358 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
359 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
360 hptep
= htab_address
+ slot
;
361 hpte_v
= be64_to_cpu(hptep
->v
);
362 if (cpu_has_feature(CPU_FTR_ARCH_300
))
363 hpte_v
= hpte_new_to_old_v(hpte_v
, be64_to_cpu(hptep
->r
));
365 if (HPTE_V_COMPARE(hpte_v
, want_v
) && (hpte_v
& HPTE_V_VALID
))
375 * Update the page protection bits. Intended to be used to create
376 * guard pages for kernel data structures on pages which are bolted
377 * in the HPT. Assumes pages being operated on will not be stolen.
379 * No need to lock here because we should be the only user.
381 static void native_hpte_updateboltedpp(unsigned long newpp
, unsigned long ea
,
382 int psize
, int ssize
)
387 struct hash_pte
*hptep
;
389 vsid
= get_kernel_vsid(ea
, ssize
);
390 vpn
= hpt_vpn(ea
, vsid
, ssize
);
392 slot
= native_hpte_find(vpn
, psize
, ssize
);
394 panic("could not find page to bolt\n");
395 hptep
= htab_address
+ slot
;
397 /* Update the HPTE */
398 hptep
->r
= cpu_to_be64((be64_to_cpu(hptep
->r
) &
399 ~(HPTE_R_PPP
| HPTE_R_N
)) |
400 (newpp
& (HPTE_R_PPP
| HPTE_R_N
)));
402 * Ensure it is out of the tlb too. Bolted entries base and
403 * actual page size will be same.
405 tlbie(vpn
, psize
, psize
, ssize
, 0);
408 static void native_hpte_invalidate(unsigned long slot
, unsigned long vpn
,
409 int bpsize
, int apsize
, int ssize
, int local
)
411 struct hash_pte
*hptep
= htab_address
+ slot
;
412 unsigned long hpte_v
;
413 unsigned long want_v
;
416 local_irq_save(flags
);
418 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn
, slot
);
420 want_v
= hpte_encode_avpn(vpn
, bpsize
, ssize
);
421 native_lock_hpte(hptep
);
422 hpte_v
= be64_to_cpu(hptep
->v
);
423 if (cpu_has_feature(CPU_FTR_ARCH_300
))
424 hpte_v
= hpte_new_to_old_v(hpte_v
, be64_to_cpu(hptep
->r
));
427 * We need to invalidate the TLB always because hpte_remove doesn't do
428 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
429 * random entry from it. When we do that we don't invalidate the TLB
430 * (hpte_remove) because we assume the old translation is still
431 * technically "valid".
433 if (!HPTE_V_COMPARE(hpte_v
, want_v
) || !(hpte_v
& HPTE_V_VALID
))
434 native_unlock_hpte(hptep
);
436 /* Invalidate the hpte. NOTE: this also unlocks it */
439 /* Invalidate the TLB */
440 tlbie(vpn
, bpsize
, apsize
, ssize
, local
);
442 local_irq_restore(flags
);
445 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
446 static void native_hugepage_invalidate(unsigned long vsid
,
448 unsigned char *hpte_slot_array
,
449 int psize
, int ssize
, int local
)
452 struct hash_pte
*hptep
;
453 int actual_psize
= MMU_PAGE_16M
;
454 unsigned int max_hpte_count
, valid
;
455 unsigned long flags
, s_addr
= addr
;
456 unsigned long hpte_v
, want_v
, shift
;
457 unsigned long hidx
, vpn
= 0, hash
, slot
;
459 shift
= mmu_psize_defs
[psize
].shift
;
460 max_hpte_count
= 1U << (PMD_SHIFT
- shift
);
462 local_irq_save(flags
);
463 for (i
= 0; i
< max_hpte_count
; i
++) {
464 valid
= hpte_valid(hpte_slot_array
, i
);
467 hidx
= hpte_hash_index(hpte_slot_array
, i
);
470 addr
= s_addr
+ (i
* (1ul << shift
));
471 vpn
= hpt_vpn(addr
, vsid
, ssize
);
472 hash
= hpt_hash(vpn
, shift
, ssize
);
473 if (hidx
& _PTEIDX_SECONDARY
)
476 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
477 slot
+= hidx
& _PTEIDX_GROUP_IX
;
479 hptep
= htab_address
+ slot
;
480 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
481 native_lock_hpte(hptep
);
482 hpte_v
= be64_to_cpu(hptep
->v
);
483 if (cpu_has_feature(CPU_FTR_ARCH_300
))
484 hpte_v
= hpte_new_to_old_v(hpte_v
, be64_to_cpu(hptep
->r
));
486 /* Even if we miss, we need to invalidate the TLB */
487 if (!HPTE_V_COMPARE(hpte_v
, want_v
) || !(hpte_v
& HPTE_V_VALID
))
488 native_unlock_hpte(hptep
);
490 /* Invalidate the hpte. NOTE: this also unlocks it */
493 * We need to do tlb invalidate for all the address, tlbie
494 * instruction compares entry_VA in tlb with the VA specified
497 tlbie(vpn
, psize
, actual_psize
, ssize
, local
);
499 local_irq_restore(flags
);
502 static void native_hugepage_invalidate(unsigned long vsid
,
504 unsigned char *hpte_slot_array
,
505 int psize
, int ssize
, int local
)
507 WARN(1, "%s called without THP support\n", __func__
);
511 static void hpte_decode(struct hash_pte
*hpte
, unsigned long slot
,
512 int *psize
, int *apsize
, int *ssize
, unsigned long *vpn
)
514 unsigned long avpn
, pteg
, vpi
;
515 unsigned long hpte_v
= be64_to_cpu(hpte
->v
);
516 unsigned long hpte_r
= be64_to_cpu(hpte
->r
);
517 unsigned long vsid
, seg_off
;
518 int size
, a_size
, shift
;
519 /* Look at the 8 bit LP value */
520 unsigned int lp
= (hpte_r
>> LP_SHIFT
) & ((1 << LP_BITS
) - 1);
522 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
523 hpte_v
= hpte_new_to_old_v(hpte_v
, hpte_r
);
524 hpte_r
= hpte_new_to_old_r(hpte_r
);
526 if (!(hpte_v
& HPTE_V_LARGE
)) {
528 a_size
= MMU_PAGE_4K
;
530 size
= hpte_page_sizes
[lp
] & 0xf;
531 a_size
= hpte_page_sizes
[lp
] >> 4;
533 /* This works for all page sizes, and for 256M and 1T segments */
534 *ssize
= hpte_v
>> HPTE_V_SSIZE_SHIFT
;
535 shift
= mmu_psize_defs
[size
].shift
;
537 avpn
= (HPTE_V_AVPN_VAL(hpte_v
) & ~mmu_psize_defs
[size
].avpnm
);
538 pteg
= slot
/ HPTES_PER_GROUP
;
539 if (hpte_v
& HPTE_V_SECONDARY
)
543 case MMU_SEGSIZE_256M
:
544 /* We only have 28 - 23 bits of seg_off in avpn */
545 seg_off
= (avpn
& 0x1f) << 23;
547 /* We can find more bits from the pteg value */
549 vpi
= (vsid
^ pteg
) & htab_hash_mask
;
550 seg_off
|= vpi
<< shift
;
552 *vpn
= vsid
<< (SID_SHIFT
- VPN_SHIFT
) | seg_off
>> VPN_SHIFT
;
555 /* We only have 40 - 23 bits of seg_off in avpn */
556 seg_off
= (avpn
& 0x1ffff) << 23;
559 vpi
= (vsid
^ (vsid
<< 25) ^ pteg
) & htab_hash_mask
;
560 seg_off
|= vpi
<< shift
;
562 *vpn
= vsid
<< (SID_SHIFT_1T
- VPN_SHIFT
) | seg_off
>> VPN_SHIFT
;
572 * clear all mappings on kexec. All cpus are in real mode (or they will
573 * be when they isi), and we are the only one left. We rely on our kernel
574 * mapping being 0xC0's and the hardware ignoring those two real bits.
576 * This must be called with interrupts disabled.
578 * Taking the native_tlbie_lock is unsafe here due to the possibility of
579 * lockdep being on. On pre POWER5 hardware, not taking the lock could
580 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
581 * gets called during boot before secondary CPUs have come up and during
582 * crashdump and all bets are off anyway.
584 * TODO: add batching support when enabled. remember, no dynamic memory here,
585 * although there is the control page available...
587 static void native_hpte_clear(void)
589 unsigned long vpn
= 0;
590 unsigned long slot
, slots
;
591 struct hash_pte
*hptep
= htab_address
;
592 unsigned long hpte_v
;
593 unsigned long pteg_count
;
594 int psize
, apsize
, ssize
;
596 pteg_count
= htab_hash_mask
+ 1;
598 slots
= pteg_count
* HPTES_PER_GROUP
;
600 for (slot
= 0; slot
< slots
; slot
++, hptep
++) {
602 * we could lock the pte here, but we are the only cpu
603 * running, right? and for crash dump, we probably
604 * don't want to wait for a maybe bad cpu.
606 hpte_v
= be64_to_cpu(hptep
->v
);
609 * Call __tlbie() here rather than tlbie() since we can't take the
612 if (hpte_v
& HPTE_V_VALID
) {
613 hpte_decode(hptep
, slot
, &psize
, &apsize
, &ssize
, &vpn
);
615 __tlbie(vpn
, psize
, apsize
, ssize
);
619 asm volatile("eieio; tlbsync; ptesync":::"memory");
623 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
624 * the lock all the time
626 static void native_flush_hash_range(unsigned long number
, int local
)
629 unsigned long hash
, index
, hidx
, shift
, slot
;
630 struct hash_pte
*hptep
;
631 unsigned long hpte_v
;
632 unsigned long want_v
;
635 struct ppc64_tlb_batch
*batch
= this_cpu_ptr(&ppc64_tlb_batch
);
636 unsigned long psize
= batch
->psize
;
637 int ssize
= batch
->ssize
;
639 unsigned int use_local
;
641 use_local
= local
&& mmu_has_feature(MMU_FTR_TLBIEL
) &&
642 mmu_psize_defs
[psize
].tlbiel
&& !cxl_ctx_in_use();
644 local_irq_save(flags
);
646 for (i
= 0; i
< number
; i
++) {
650 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
651 hash
= hpt_hash(vpn
, shift
, ssize
);
652 hidx
= __rpte_to_hidx(pte
, index
);
653 if (hidx
& _PTEIDX_SECONDARY
)
655 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
656 slot
+= hidx
& _PTEIDX_GROUP_IX
;
657 hptep
= htab_address
+ slot
;
658 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
659 native_lock_hpte(hptep
);
660 hpte_v
= be64_to_cpu(hptep
->v
);
661 if (cpu_has_feature(CPU_FTR_ARCH_300
))
662 hpte_v
= hpte_new_to_old_v(hpte_v
,
663 be64_to_cpu(hptep
->r
));
664 if (!HPTE_V_COMPARE(hpte_v
, want_v
) ||
665 !(hpte_v
& HPTE_V_VALID
))
666 native_unlock_hpte(hptep
);
669 } pte_iterate_hashed_end();
673 asm volatile("ptesync":::"memory");
674 for (i
= 0; i
< number
; i
++) {
678 pte_iterate_hashed_subpages(pte
, psize
,
680 __tlbiel(vpn
, psize
, psize
, ssize
);
681 } pte_iterate_hashed_end();
683 asm volatile("ptesync":::"memory");
685 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
688 raw_spin_lock(&native_tlbie_lock
);
690 asm volatile("ptesync":::"memory");
691 for (i
= 0; i
< number
; i
++) {
695 pte_iterate_hashed_subpages(pte
, psize
,
697 __tlbie(vpn
, psize
, psize
, ssize
);
698 } pte_iterate_hashed_end();
700 asm volatile("eieio; tlbsync; ptesync":::"memory");
703 raw_spin_unlock(&native_tlbie_lock
);
706 local_irq_restore(flags
);
709 static int native_register_proc_table(unsigned long base
, unsigned long page_size
,
710 unsigned long table_size
)
712 unsigned long patb1
= base
<< 25; /* VSID */
714 patb1
|= (page_size
<< 5); /* sllp */
717 partition_tb
->patb1
= cpu_to_be64(patb1
);
721 void __init
hpte_init_native(void)
723 mmu_hash_ops
.hpte_invalidate
= native_hpte_invalidate
;
724 mmu_hash_ops
.hpte_updatepp
= native_hpte_updatepp
;
725 mmu_hash_ops
.hpte_updateboltedpp
= native_hpte_updateboltedpp
;
726 mmu_hash_ops
.hpte_insert
= native_hpte_insert
;
727 mmu_hash_ops
.hpte_remove
= native_hpte_remove
;
728 mmu_hash_ops
.hpte_clear_all
= native_hpte_clear
;
729 mmu_hash_ops
.flush_hash_range
= native_flush_hash_range
;
730 mmu_hash_ops
.hugepage_invalidate
= native_hugepage_invalidate
;
732 if (cpu_has_feature(CPU_FTR_ARCH_300
))
733 register_process_table
= native_register_proc_table
;