2 * ultra.S: Don't expand these all over the place...
4 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
8 #include <asm/pgtable.h>
10 #include <asm/spitfire.h>
11 #include <asm/mmu_context.h>
15 #include <asm/thread_info.h>
16 #include <asm/cacheflush.h>
17 #include <asm/hypervisor.h>
18 #include <asm/cpudata.h>
20 /* Basically, most of the Spitfire vs. Cheetah madness
21 * has to do with the fact that Cheetah does not support
22 * IMMU flushes out of the secondary context. Someone needs
23 * to throw a south lake birthday party for the folks
24 * in Microelectronics who refused to fix this shit.
27 /* This file is meant to be read efficiently by the CPU, not humans.
28 * Staraj sie tego nikomu nie pierdolnac...
33 __flush_tlb_mm: /* 19 insns */
34 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
35 ldxa [%o1] ASI_DMMU, %g2
37 bne,pn %icc, __spitfire_flush_tlb_mm_slow
39 stxa %g0, [%g3] ASI_DMMU_DEMAP
40 stxa %g0, [%g3] ASI_IMMU_DEMAP
41 sethi %hi(KERNBASE), %g3
56 .globl __flush_tlb_page
57 __flush_tlb_page: /* 22 insns */
58 /* %o0 = context, %o1 = vaddr */
60 andn %g7, PSTATE_IE, %g2
62 mov SECONDARY_CONTEXT, %o4
63 ldxa [%o4] ASI_DMMU, %g2
64 stxa %o0, [%o4] ASI_DMMU
69 stxa %g0, [%o3] ASI_IMMU_DEMAP
70 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
72 stxa %g2, [%o4] ASI_DMMU
73 sethi %hi(KERNBASE), %o4
76 wrpr %g7, 0x0, %pstate
83 .globl __flush_tlb_pending
84 __flush_tlb_pending: /* 27 insns */
85 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
88 andn %g7, PSTATE_IE, %g2
90 mov SECONDARY_CONTEXT, %o4
91 ldxa [%o4] ASI_DMMU, %g2
92 stxa %o0, [%o4] ASI_DMMU
93 1: sub %o1, (1 << 3), %o1
99 stxa %g0, [%o3] ASI_IMMU_DEMAP
100 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
104 stxa %g2, [%o4] ASI_DMMU
105 sethi %hi(KERNBASE), %o4
108 wrpr %g7, 0x0, %pstate
115 .globl __flush_tlb_kernel_range
116 __flush_tlb_kernel_range: /* 31 insns */
117 /* %o0=start, %o1=end */
122 brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow
123 sethi %hi(PAGE_SIZE), %o4
125 or %o0, 0x20, %o0 ! Nucleus
126 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
127 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
131 2: sethi %hi(KERNBASE), %o3
150 __spitfire_flush_tlb_kernel_range_slow:
152 1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3
153 andcc %o3, 0x40, %g0 /* _PAGE_L_4U */
155 mov TLB_TAG_ACCESS, %o3
156 stxa %g0, [%o3] ASI_IMMU
157 stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS
159 2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3
162 mov TLB_TAG_ACCESS, %o3
163 stxa %g0, [%o3] ASI_DMMU
164 stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS
172 __spitfire_flush_tlb_mm_slow:
174 wrpr %g1, PSTATE_IE, %pstate
175 stxa %o0, [%o1] ASI_DMMU
176 stxa %g0, [%g3] ASI_DMMU_DEMAP
177 stxa %g0, [%g3] ASI_IMMU_DEMAP
179 stxa %g2, [%o1] ASI_DMMU
180 sethi %hi(KERNBASE), %o1
186 * The following code flushes one page_size worth.
188 .section .kprobes.text, "ax"
190 .globl __flush_icache_page
191 __flush_icache_page: /* %o0 = phys_page */
192 srlx %o0, PAGE_SHIFT, %o0
193 sethi %hi(PAGE_OFFSET), %g1
194 sllx %o0, PAGE_SHIFT, %o0
195 sethi %hi(PAGE_SIZE), %g2
196 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
198 1: subcc %g2, 32, %g2
204 #ifdef DCACHE_ALIASING_POSSIBLE
206 #if (PAGE_SHIFT != 13)
207 #error only page shift of 13 is supported by dcache flush
210 #define DTAG_MASK 0x3
212 /* This routine is Spitfire specific so the hardcoded
213 * D-cache size and line-size are OK.
216 .globl __flush_dcache_page
217 __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
218 sethi %hi(PAGE_OFFSET), %g1
219 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
220 sub %o0, %g1, %o0 ! physical address
221 srlx %o0, 11, %o0 ! make D-cache TAG
222 sethi %hi(1 << 14), %o2 ! D-cache size
223 sub %o2, (1 << 5), %o2 ! D-cache line size
224 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
225 andcc %o3, DTAG_MASK, %g0 ! Valid?
226 be,pn %xcc, 2f ! Nope, branch
227 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
228 cmp %o3, %o0 ! TAG match?
229 bne,pt %xcc, 2f ! Nope, branch
231 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
234 sub %o2, (1 << 5), %o2 ! D-cache line size
236 /* The I-cache does not snoop local stores so we
237 * better flush that too when necessary.
239 brnz,pt %o1, __flush_icache_page
244 #endif /* DCACHE_ALIASING_POSSIBLE */
248 /* Cheetah specific versions, patched at boot time. */
249 __cheetah_flush_tlb_mm: /* 19 insns */
251 andn %g7, PSTATE_IE, %g2
252 wrpr %g2, 0x0, %pstate
254 mov PRIMARY_CONTEXT, %o2
256 ldxa [%o2] ASI_DMMU, %g2
257 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
258 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
259 or %o0, %o1, %o0 /* Preserve nucleus page size fields */
260 stxa %o0, [%o2] ASI_DMMU
261 stxa %g0, [%g3] ASI_DMMU_DEMAP
262 stxa %g0, [%g3] ASI_IMMU_DEMAP
263 stxa %g2, [%o2] ASI_DMMU
264 sethi %hi(KERNBASE), %o2
268 wrpr %g7, 0x0, %pstate
270 __cheetah_flush_tlb_page: /* 22 insns */
271 /* %o0 = context, %o1 = vaddr */
273 andn %g7, PSTATE_IE, %g2
274 wrpr %g2, 0x0, %pstate
276 mov PRIMARY_CONTEXT, %o4
277 ldxa [%o4] ASI_DMMU, %g2
278 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
279 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
280 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
281 stxa %o0, [%o4] ASI_DMMU
285 stxa %g0, [%o3] ASI_IMMU_DEMAP
286 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
288 stxa %g2, [%o4] ASI_DMMU
289 sethi %hi(KERNBASE), %o4
293 wrpr %g7, 0x0, %pstate
295 __cheetah_flush_tlb_pending: /* 27 insns */
296 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
299 andn %g7, PSTATE_IE, %g2
300 wrpr %g2, 0x0, %pstate
302 mov PRIMARY_CONTEXT, %o4
303 ldxa [%o4] ASI_DMMU, %g2
304 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
305 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
306 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
307 stxa %o0, [%o4] ASI_DMMU
308 1: sub %o1, (1 << 3), %o1
313 stxa %g0, [%o3] ASI_IMMU_DEMAP
314 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
318 stxa %g2, [%o4] ASI_DMMU
319 sethi %hi(KERNBASE), %o4
323 wrpr %g7, 0x0, %pstate
325 __cheetah_flush_tlb_kernel_range: /* 31 insns */
326 /* %o0=start, %o1=end */
332 sethi %hi(PAGE_SIZE), %o4
334 or %o0, 0x20, %o0 ! Nucleus
335 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
336 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
340 2: sethi %hi(KERNBASE), %o3
345 stxa %g0, [%o4] ASI_DMMU_DEMAP
347 stxa %g0, [%o4] ASI_IMMU_DEMAP
359 #ifdef DCACHE_ALIASING_POSSIBLE
360 __cheetah_flush_dcache_page: /* 11 insns */
361 sethi %hi(PAGE_OFFSET), %g1
362 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
364 sethi %hi(PAGE_SIZE), %o4
365 1: subcc %o4, (1 << 5), %o4
366 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
370 retl /* I-cache flush never needed on Cheetah, see callers. */
372 #endif /* DCACHE_ALIASING_POSSIBLE */
374 /* Hypervisor specific versions, patched at boot time. */
375 __hypervisor_tlb_tl0_error:
378 call hypervisor_tlbop_error
383 __hypervisor_flush_tlb_mm: /* 19 insns */
384 mov %o0, %o2 /* ARG2: mmu context */
385 mov 0, %o0 /* ARG0: CPU lists unimplemented */
386 mov 0, %o1 /* ARG1: CPU lists unimplemented */
387 mov HV_MMU_ALL, %o3 /* ARG3: flags */
388 mov HV_FAST_MMU_DEMAP_CTX, %o5
391 mov HV_FAST_MMU_DEMAP_CTX, %o1
394 1: sethi %hi(__hypervisor_tlb_tl0_error), %o5
395 jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0
404 __hypervisor_flush_tlb_page: /* 22 insns */
405 /* %o0 = context, %o1 = vaddr */
407 mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
408 mov %g2, %o1 /* ARG1: mmu context */
409 mov HV_MMU_ALL, %o2 /* ARG2: flags */
410 srlx %o0, PAGE_SHIFT, %o0
411 sllx %o0, PAGE_SHIFT, %o0
412 ta HV_MMU_UNMAP_ADDR_TRAP
414 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
417 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
418 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
429 __hypervisor_flush_tlb_pending: /* 27 insns */
430 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
434 1: sub %g1, (1 << 3), %g1
435 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
436 mov %g3, %o1 /* ARG1: mmu context */
437 mov HV_MMU_ALL, %o2 /* ARG2: flags */
438 srlx %o0, PAGE_SHIFT, %o0
439 sllx %o0, PAGE_SHIFT, %o0
440 ta HV_MMU_UNMAP_ADDR_TRAP
442 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
447 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
448 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
459 __hypervisor_flush_tlb_kernel_range: /* 31 insns */
460 /* %o0=start, %o1=end */
467 sethi %hi(PAGE_SIZE), %g3
469 1: add %g1, %g2, %o0 /* ARG0: virtual address */
470 mov 0, %o1 /* ARG1: mmu context */
471 mov HV_MMU_ALL, %o2 /* ARG2: flags */
472 ta HV_MMU_UNMAP_ADDR_TRAP
474 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
479 3: sethi %hi(__hypervisor_tlb_tl0_error), %o2
480 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
482 4: mov 0, %o0 /* ARG0: CPU lists unimplemented */
483 mov 0, %o1 /* ARG1: CPU lists unimplemented */
484 mov 0, %o2 /* ARG2: mmu context == nucleus */
485 mov HV_MMU_ALL, %o3 /* ARG3: flags */
486 mov HV_FAST_MMU_DEMAP_CTX, %o5
489 mov HV_FAST_MMU_DEMAP_CTX, %o1
493 #ifdef DCACHE_ALIASING_POSSIBLE
494 /* XXX Niagara and friends have an 8K cache, so no aliasing is
495 * XXX possible, but nothing explicit in the Hypervisor API
496 * XXX guarantees this.
498 __hypervisor_flush_dcache_page: /* 2 insns */
515 /* These are all called by the slaves of a cross call, at
516 * trap level 1, with interrupts fully disabled.
519 * %g5 mm->context (all tlb flushes)
520 * %g1 address arg 1 (tlb page and range flushes)
521 * %g7 address arg 2 (tlb range flush only)
529 .globl xcall_flush_tlb_mm
530 xcall_flush_tlb_mm: /* 24 insns */
531 mov PRIMARY_CONTEXT, %g2
532 ldxa [%g2] ASI_DMMU, %g3
533 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
534 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
535 or %g5, %g4, %g5 /* Preserve nucleus page size fields */
536 stxa %g5, [%g2] ASI_DMMU
538 stxa %g0, [%g4] ASI_DMMU_DEMAP
539 stxa %g0, [%g4] ASI_IMMU_DEMAP
540 stxa %g3, [%g2] ASI_DMMU
556 .globl xcall_flush_tlb_page
557 xcall_flush_tlb_page: /* 20 insns */
558 /* %g5=context, %g1=vaddr */
559 mov PRIMARY_CONTEXT, %g4
560 ldxa [%g4] ASI_DMMU, %g2
561 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
562 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
564 mov PRIMARY_CONTEXT, %g4
565 stxa %g5, [%g4] ASI_DMMU
569 stxa %g0, [%g5] ASI_IMMU_DEMAP
570 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
572 stxa %g2, [%g4] ASI_DMMU
580 .globl xcall_flush_tlb_kernel_range
581 xcall_flush_tlb_kernel_range: /* 44 insns */
582 sethi %hi(PAGE_SIZE - 1), %g2
583 or %g2, %lo(PAGE_SIZE - 1), %g2
591 or %g1, 0x20, %g1 ! Nucleus
592 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
593 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
599 1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2
600 andcc %g2, 0x40, %g0 /* _PAGE_L_4U */
602 mov TLB_TAG_ACCESS, %g2
603 stxa %g0, [%g2] ASI_IMMU
604 stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS
606 2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2
609 mov TLB_TAG_ACCESS, %g2
610 stxa %g0, [%g2] ASI_DMMU
611 stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS
627 /* This runs in a very controlled environment, so we do
628 * not need to worry about BH races etc.
630 .globl xcall_sync_tick
633 661: rdpr %pstate, %g2
634 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
635 .section .sun4v_2insn_patch, "ax"
642 wrpr %g0, PIL_NORMAL_MAX, %pil
645 109: or %g7, %lo(109b), %g7
646 #ifdef CONFIG_TRACE_IRQFLAGS
647 call trace_hardirqs_off
650 call smp_synchronize_tick_client
653 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
655 .globl xcall_fetch_glob_regs
656 xcall_fetch_glob_regs:
657 sethi %hi(global_cpu_snapshot), %g1
658 or %g1, %lo(global_cpu_snapshot), %g1
663 stx %g7, [%g1 + GR_SNAP_TSTATE]
665 stx %g7, [%g1 + GR_SNAP_TPC]
667 stx %g7, [%g1 + GR_SNAP_TNPC]
668 stx %o7, [%g1 + GR_SNAP_O7]
669 stx %i7, [%g1 + GR_SNAP_I7]
670 /* Don't try this at home kids... */
676 stx %g7, [%g1 + GR_SNAP_RPC]
677 sethi %hi(trap_block), %g7
678 or %g7, %lo(trap_block), %g7
679 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
681 ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
682 stx %g3, [%g1 + GR_SNAP_THREAD]
685 .globl xcall_fetch_glob_pmu
686 xcall_fetch_glob_pmu:
687 sethi %hi(global_cpu_snapshot), %g1
688 or %g1, %lo(global_cpu_snapshot), %g1
693 stx %g7, [%g1 + (4 * 8)]
695 stx %g7, [%g1 + (0 * 8)]
698 .globl xcall_fetch_glob_pmu_n4
699 xcall_fetch_glob_pmu_n4:
700 sethi %hi(global_cpu_snapshot), %g1
701 or %g1, %lo(global_cpu_snapshot), %g1
706 ldxa [%g0] ASI_PIC, %g7
707 stx %g7, [%g1 + (4 * 8)]
709 ldxa [%g3] ASI_PIC, %g7
710 stx %g7, [%g1 + (5 * 8)]
712 ldxa [%g3] ASI_PIC, %g7
713 stx %g7, [%g1 + (6 * 8)]
715 ldxa [%g3] ASI_PIC, %g7
716 stx %g7, [%g1 + (7 * 8)]
722 mov HV_FAST_VT_GET_PERFREG, %o5
725 stx %o1, [%g1 + (3 * 8)]
726 mov HV_FAST_VT_GET_PERFREG, %o5
729 stx %o1, [%g1 + (2 * 8)]
730 mov HV_FAST_VT_GET_PERFREG, %o5
733 stx %o1, [%g1 + (1 * 8)]
734 mov HV_FAST_VT_GET_PERFREG, %o5
737 stx %o1, [%g1 + (0 * 8)]
745 __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
746 sethi %hi(PAGE_SIZE - 1), %g2
747 or %g2, %lo(PAGE_SIZE - 1), %g2
755 or %g1, 0x20, %g1 ! Nucleus
756 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
757 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
763 stxa %g0, [%g2] ASI_DMMU_DEMAP
765 stxa %g0, [%g2] ASI_IMMU_DEMAP
791 #ifdef DCACHE_ALIASING_POSSIBLE
793 .globl xcall_flush_dcache_page_cheetah
794 xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
795 sethi %hi(PAGE_SIZE), %g3
796 1: subcc %g3, (1 << 5), %g3
797 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
803 #endif /* DCACHE_ALIASING_POSSIBLE */
805 .globl xcall_flush_dcache_page_spitfire
806 xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
807 %g7 == kernel page virtual address
808 %g5 == (page->mapping != NULL) */
809 #ifdef DCACHE_ALIASING_POSSIBLE
810 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
811 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
812 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
813 1: ldxa [%g3] ASI_DCACHE_TAG, %g2
821 stxa %g0, [%g3] ASI_DCACHE_TAG
825 sub %g3, (1 << 5), %g3
828 #endif /* DCACHE_ALIASING_POSSIBLE */
829 sethi %hi(PAGE_SIZE), %g3
832 subcc %g3, (1 << 5), %g3
834 add %g7, (1 << 5), %g7
843 __hypervisor_tlb_xcall_error:
849 call hypervisor_tlbop_error_xcall
853 .globl __hypervisor_xcall_flush_tlb_mm
854 __hypervisor_xcall_flush_tlb_mm: /* 24 insns */
855 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
861 clr %o0 /* ARG0: CPU lists unimplemented */
862 clr %o1 /* ARG1: CPU lists unimplemented */
863 mov %g5, %o2 /* ARG2: mmu context */
864 mov HV_MMU_ALL, %o3 /* ARG3: flags */
865 mov HV_FAST_MMU_DEMAP_CTX, %o5
867 mov HV_FAST_MMU_DEMAP_CTX, %g6
877 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
878 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
881 .globl __hypervisor_xcall_flush_tlb_page
882 __hypervisor_xcall_flush_tlb_page: /* 20 insns */
883 /* %g5=ctx, %g1=vaddr */
887 mov %g1, %o0 /* ARG0: virtual address */
888 mov %g5, %o1 /* ARG1: mmu context */
889 mov HV_MMU_ALL, %o2 /* ARG2: flags */
890 srlx %o0, PAGE_SHIFT, %o0
891 sllx %o0, PAGE_SHIFT, %o0
892 ta HV_MMU_UNMAP_ADDR_TRAP
893 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
901 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
902 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
905 .globl __hypervisor_xcall_flush_tlb_kernel_range
906 __hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
907 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
908 sethi %hi(PAGE_SIZE - 1), %g2
909 or %g2, %lo(PAGE_SIZE - 1), %g2
920 1: add %g1, %g3, %o0 /* ARG0: virtual address */
921 mov 0, %o1 /* ARG1: mmu context */
922 mov HV_MMU_ALL, %o2 /* ARG2: flags */
923 ta HV_MMU_UNMAP_ADDR_TRAP
924 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
927 sethi %hi(PAGE_SIZE), %o2
935 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
936 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
940 mov 0, %o0 /* ARG0: CPU lists unimplemented */
941 mov 0, %o1 /* ARG1: CPU lists unimplemented */
942 mov 0, %o2 /* ARG2: mmu context == nucleus */
943 mov HV_MMU_ALL, %o3 /* ARG3: flags */
944 mov HV_FAST_MMU_DEMAP_CTX, %o5
949 mov HV_FAST_MMU_DEMAP_CTX, %g6
953 /* These just get rescheduled to PIL vectors. */
954 .globl xcall_call_function
956 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
959 .globl xcall_call_function_single
960 xcall_call_function_single:
961 wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
964 .globl xcall_receive_signal
965 xcall_receive_signal:
966 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
971 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
975 .globl xcall_kgdb_capture
977 wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
981 #endif /* CONFIG_SMP */
983 .globl cheetah_patch_cachetlbops
984 cheetah_patch_cachetlbops:
987 sethi %hi(__flush_tlb_mm), %o0
988 or %o0, %lo(__flush_tlb_mm), %o0
989 sethi %hi(__cheetah_flush_tlb_mm), %o1
990 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
994 sethi %hi(__flush_tlb_page), %o0
995 or %o0, %lo(__flush_tlb_page), %o0
996 sethi %hi(__cheetah_flush_tlb_page), %o1
997 or %o1, %lo(__cheetah_flush_tlb_page), %o1
1001 sethi %hi(__flush_tlb_pending), %o0
1002 or %o0, %lo(__flush_tlb_pending), %o0
1003 sethi %hi(__cheetah_flush_tlb_pending), %o1
1004 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
1008 sethi %hi(__flush_tlb_kernel_range), %o0
1009 or %o0, %lo(__flush_tlb_kernel_range), %o0
1010 sethi %hi(__cheetah_flush_tlb_kernel_range), %o1
1011 or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
1015 #ifdef DCACHE_ALIASING_POSSIBLE
1016 sethi %hi(__flush_dcache_page), %o0
1017 or %o0, %lo(__flush_dcache_page), %o0
1018 sethi %hi(__cheetah_flush_dcache_page), %o1
1019 or %o1, %lo(__cheetah_flush_dcache_page), %o1
1022 #endif /* DCACHE_ALIASING_POSSIBLE */
1025 sethi %hi(xcall_flush_tlb_kernel_range), %o0
1026 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
1027 sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
1028 or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
1031 #endif /* CONFIG_SMP */
1036 .globl hypervisor_patch_cachetlbops
1037 hypervisor_patch_cachetlbops:
1040 sethi %hi(__flush_tlb_mm), %o0
1041 or %o0, %lo(__flush_tlb_mm), %o0
1042 sethi %hi(__hypervisor_flush_tlb_mm), %o1
1043 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
1047 sethi %hi(__flush_tlb_page), %o0
1048 or %o0, %lo(__flush_tlb_page), %o0
1049 sethi %hi(__hypervisor_flush_tlb_page), %o1
1050 or %o1, %lo(__hypervisor_flush_tlb_page), %o1
1054 sethi %hi(__flush_tlb_pending), %o0
1055 or %o0, %lo(__flush_tlb_pending), %o0
1056 sethi %hi(__hypervisor_flush_tlb_pending), %o1
1057 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
1061 sethi %hi(__flush_tlb_kernel_range), %o0
1062 or %o0, %lo(__flush_tlb_kernel_range), %o0
1063 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
1064 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
1068 #ifdef DCACHE_ALIASING_POSSIBLE
1069 sethi %hi(__flush_dcache_page), %o0
1070 or %o0, %lo(__flush_dcache_page), %o0
1071 sethi %hi(__hypervisor_flush_dcache_page), %o1
1072 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
1075 #endif /* DCACHE_ALIASING_POSSIBLE */
1078 sethi %hi(xcall_flush_tlb_mm), %o0
1079 or %o0, %lo(xcall_flush_tlb_mm), %o0
1080 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
1081 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
1085 sethi %hi(xcall_flush_tlb_page), %o0
1086 or %o0, %lo(xcall_flush_tlb_page), %o0
1087 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
1088 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
1092 sethi %hi(xcall_flush_tlb_kernel_range), %o0
1093 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
1094 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1095 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1098 #endif /* CONFIG_SMP */