1 /* SPDX-License-Identifier: GPL-2.0 */
3 * ultra.S: Don't expand these all over the place...
5 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
9 #include <asm/pgtable.h>
11 #include <asm/spitfire.h>
12 #include <asm/mmu_context.h>
16 #include <asm/thread_info.h>
17 #include <asm/cacheflush.h>
18 #include <asm/hypervisor.h>
19 #include <asm/cpudata.h>
21 /* Basically, most of the Spitfire vs. Cheetah madness
22 * has to do with the fact that Cheetah does not support
23 * IMMU flushes out of the secondary context. Someone needs
24 * to throw a south lake birthday party for the folks
25 * in Microelectronics who refused to fix this shit.
28 /* This file is meant to be read efficiently by the CPU, not humans.
29 * Staraj sie tego nikomu nie pierdolnac...
34 __flush_tlb_mm: /* 19 insns */
35 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
36 ldxa [%o1] ASI_DMMU, %g2
38 bne,pn %icc, __spitfire_flush_tlb_mm_slow
40 stxa %g0, [%g3] ASI_DMMU_DEMAP
41 stxa %g0, [%g3] ASI_IMMU_DEMAP
42 sethi %hi(KERNBASE), %g3
57 .globl __flush_tlb_page
58 __flush_tlb_page: /* 22 insns */
59 /* %o0 = context, %o1 = vaddr */
61 andn %g7, PSTATE_IE, %g2
63 mov SECONDARY_CONTEXT, %o4
64 ldxa [%o4] ASI_DMMU, %g2
65 stxa %o0, [%o4] ASI_DMMU
70 stxa %g0, [%o3] ASI_IMMU_DEMAP
71 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
73 stxa %g2, [%o4] ASI_DMMU
74 sethi %hi(KERNBASE), %o4
77 wrpr %g7, 0x0, %pstate
84 .globl __flush_tlb_pending
85 __flush_tlb_pending: /* 27 insns */
86 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
89 andn %g7, PSTATE_IE, %g2
91 mov SECONDARY_CONTEXT, %o4
92 ldxa [%o4] ASI_DMMU, %g2
93 stxa %o0, [%o4] ASI_DMMU
94 1: sub %o1, (1 << 3), %o1
100 stxa %g0, [%o3] ASI_IMMU_DEMAP
101 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
105 stxa %g2, [%o4] ASI_DMMU
106 sethi %hi(KERNBASE), %o4
109 wrpr %g7, 0x0, %pstate
116 .globl __flush_tlb_kernel_range
117 __flush_tlb_kernel_range: /* 31 insns */
118 /* %o0=start, %o1=end */
123 brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow
124 sethi %hi(PAGE_SIZE), %o4
126 or %o0, 0x20, %o0 ! Nucleus
127 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
128 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
132 2: sethi %hi(KERNBASE), %o3
151 __spitfire_flush_tlb_kernel_range_slow:
153 1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3
154 andcc %o3, 0x40, %g0 /* _PAGE_L_4U */
156 mov TLB_TAG_ACCESS, %o3
157 stxa %g0, [%o3] ASI_IMMU
158 stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS
160 2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3
163 mov TLB_TAG_ACCESS, %o3
164 stxa %g0, [%o3] ASI_DMMU
165 stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS
173 __spitfire_flush_tlb_mm_slow:
175 wrpr %g1, PSTATE_IE, %pstate
176 stxa %o0, [%o1] ASI_DMMU
177 stxa %g0, [%g3] ASI_DMMU_DEMAP
178 stxa %g0, [%g3] ASI_IMMU_DEMAP
180 stxa %g2, [%o1] ASI_DMMU
181 sethi %hi(KERNBASE), %o1
187 * The following code flushes one page_size worth.
189 .section .kprobes.text, "ax"
191 .globl __flush_icache_page
192 __flush_icache_page: /* %o0 = phys_page */
193 srlx %o0, PAGE_SHIFT, %o0
194 sethi %hi(PAGE_OFFSET), %g1
195 sllx %o0, PAGE_SHIFT, %o0
196 sethi %hi(PAGE_SIZE), %g2
197 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
199 1: subcc %g2, 32, %g2
205 #ifdef DCACHE_ALIASING_POSSIBLE
207 #if (PAGE_SHIFT != 13)
208 #error only page shift of 13 is supported by dcache flush
211 #define DTAG_MASK 0x3
213 /* This routine is Spitfire specific so the hardcoded
214 * D-cache size and line-size are OK.
217 .globl __flush_dcache_page
218 __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
219 sethi %hi(PAGE_OFFSET), %g1
220 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
221 sub %o0, %g1, %o0 ! physical address
222 srlx %o0, 11, %o0 ! make D-cache TAG
223 sethi %hi(1 << 14), %o2 ! D-cache size
224 sub %o2, (1 << 5), %o2 ! D-cache line size
225 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
226 andcc %o3, DTAG_MASK, %g0 ! Valid?
227 be,pn %xcc, 2f ! Nope, branch
228 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
229 cmp %o3, %o0 ! TAG match?
230 bne,pt %xcc, 2f ! Nope, branch
232 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
235 sub %o2, (1 << 5), %o2 ! D-cache line size
237 /* The I-cache does not snoop local stores so we
238 * better flush that too when necessary.
240 brnz,pt %o1, __flush_icache_page
245 #endif /* DCACHE_ALIASING_POSSIBLE */
249 /* Cheetah specific versions, patched at boot time. */
250 __cheetah_flush_tlb_mm: /* 19 insns */
252 andn %g7, PSTATE_IE, %g2
253 wrpr %g2, 0x0, %pstate
255 mov PRIMARY_CONTEXT, %o2
257 ldxa [%o2] ASI_DMMU, %g2
258 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
259 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
260 or %o0, %o1, %o0 /* Preserve nucleus page size fields */
261 stxa %o0, [%o2] ASI_DMMU
262 stxa %g0, [%g3] ASI_DMMU_DEMAP
263 stxa %g0, [%g3] ASI_IMMU_DEMAP
264 stxa %g2, [%o2] ASI_DMMU
265 sethi %hi(KERNBASE), %o2
269 wrpr %g7, 0x0, %pstate
271 __cheetah_flush_tlb_page: /* 22 insns */
272 /* %o0 = context, %o1 = vaddr */
274 andn %g7, PSTATE_IE, %g2
275 wrpr %g2, 0x0, %pstate
277 mov PRIMARY_CONTEXT, %o4
278 ldxa [%o4] ASI_DMMU, %g2
279 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
280 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
281 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
282 stxa %o0, [%o4] ASI_DMMU
286 stxa %g0, [%o3] ASI_IMMU_DEMAP
287 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
289 stxa %g2, [%o4] ASI_DMMU
290 sethi %hi(KERNBASE), %o4
294 wrpr %g7, 0x0, %pstate
296 __cheetah_flush_tlb_pending: /* 27 insns */
297 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
300 andn %g7, PSTATE_IE, %g2
301 wrpr %g2, 0x0, %pstate
303 mov PRIMARY_CONTEXT, %o4
304 ldxa [%o4] ASI_DMMU, %g2
305 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
306 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
307 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
308 stxa %o0, [%o4] ASI_DMMU
309 1: sub %o1, (1 << 3), %o1
314 stxa %g0, [%o3] ASI_IMMU_DEMAP
315 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
319 stxa %g2, [%o4] ASI_DMMU
320 sethi %hi(KERNBASE), %o4
324 wrpr %g7, 0x0, %pstate
326 __cheetah_flush_tlb_kernel_range: /* 31 insns */
327 /* %o0=start, %o1=end */
333 sethi %hi(PAGE_SIZE), %o4
335 or %o0, 0x20, %o0 ! Nucleus
336 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
337 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
341 2: sethi %hi(KERNBASE), %o3
346 stxa %g0, [%o4] ASI_DMMU_DEMAP
348 stxa %g0, [%o4] ASI_IMMU_DEMAP
360 #ifdef DCACHE_ALIASING_POSSIBLE
361 __cheetah_flush_dcache_page: /* 11 insns */
362 sethi %hi(PAGE_OFFSET), %g1
363 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
365 sethi %hi(PAGE_SIZE), %o4
366 1: subcc %o4, (1 << 5), %o4
367 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
371 retl /* I-cache flush never needed on Cheetah, see callers. */
373 #endif /* DCACHE_ALIASING_POSSIBLE */
375 /* Hypervisor specific versions, patched at boot time. */
376 __hypervisor_tlb_tl0_error:
379 call hypervisor_tlbop_error
384 __hypervisor_flush_tlb_mm: /* 19 insns */
385 mov %o0, %o2 /* ARG2: mmu context */
386 mov 0, %o0 /* ARG0: CPU lists unimplemented */
387 mov 0, %o1 /* ARG1: CPU lists unimplemented */
388 mov HV_MMU_ALL, %o3 /* ARG3: flags */
389 mov HV_FAST_MMU_DEMAP_CTX, %o5
392 mov HV_FAST_MMU_DEMAP_CTX, %o1
395 1: sethi %hi(__hypervisor_tlb_tl0_error), %o5
396 jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0
405 __hypervisor_flush_tlb_page: /* 22 insns */
406 /* %o0 = context, %o1 = vaddr */
408 mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
409 mov %g2, %o1 /* ARG1: mmu context */
410 mov HV_MMU_ALL, %o2 /* ARG2: flags */
411 srlx %o0, PAGE_SHIFT, %o0
412 sllx %o0, PAGE_SHIFT, %o0
413 ta HV_MMU_UNMAP_ADDR_TRAP
415 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
418 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
419 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
430 __hypervisor_flush_tlb_pending: /* 27 insns */
431 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
435 1: sub %g1, (1 << 3), %g1
436 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
437 mov %g3, %o1 /* ARG1: mmu context */
438 mov HV_MMU_ALL, %o2 /* ARG2: flags */
439 srlx %o0, PAGE_SHIFT, %o0
440 sllx %o0, PAGE_SHIFT, %o0
441 ta HV_MMU_UNMAP_ADDR_TRAP
443 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
448 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
449 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
460 __hypervisor_flush_tlb_kernel_range: /* 31 insns */
461 /* %o0=start, %o1=end */
468 sethi %hi(PAGE_SIZE), %g3
470 1: add %g1, %g2, %o0 /* ARG0: virtual address */
471 mov 0, %o1 /* ARG1: mmu context */
472 mov HV_MMU_ALL, %o2 /* ARG2: flags */
473 ta HV_MMU_UNMAP_ADDR_TRAP
475 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
480 3: sethi %hi(__hypervisor_tlb_tl0_error), %o2
481 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
483 4: mov 0, %o0 /* ARG0: CPU lists unimplemented */
484 mov 0, %o1 /* ARG1: CPU lists unimplemented */
485 mov 0, %o2 /* ARG2: mmu context == nucleus */
486 mov HV_MMU_ALL, %o3 /* ARG3: flags */
487 mov HV_FAST_MMU_DEMAP_CTX, %o5
490 mov HV_FAST_MMU_DEMAP_CTX, %o1
494 #ifdef DCACHE_ALIASING_POSSIBLE
495 /* XXX Niagara and friends have an 8K cache, so no aliasing is
496 * XXX possible, but nothing explicit in the Hypervisor API
497 * XXX guarantees this.
499 __hypervisor_flush_dcache_page: /* 2 insns */
516 /* These are all called by the slaves of a cross call, at
517 * trap level 1, with interrupts fully disabled.
520 * %g5 mm->context (all tlb flushes)
521 * %g1 address arg 1 (tlb page and range flushes)
522 * %g7 address arg 2 (tlb range flush only)
530 .globl xcall_flush_tlb_mm
531 xcall_flush_tlb_mm: /* 24 insns */
532 mov PRIMARY_CONTEXT, %g2
533 ldxa [%g2] ASI_DMMU, %g3
534 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
535 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
536 or %g5, %g4, %g5 /* Preserve nucleus page size fields */
537 stxa %g5, [%g2] ASI_DMMU
539 stxa %g0, [%g4] ASI_DMMU_DEMAP
540 stxa %g0, [%g4] ASI_IMMU_DEMAP
541 stxa %g3, [%g2] ASI_DMMU
557 .globl xcall_flush_tlb_page
558 xcall_flush_tlb_page: /* 20 insns */
559 /* %g5=context, %g1=vaddr */
560 mov PRIMARY_CONTEXT, %g4
561 ldxa [%g4] ASI_DMMU, %g2
562 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
563 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
565 mov PRIMARY_CONTEXT, %g4
566 stxa %g5, [%g4] ASI_DMMU
570 stxa %g0, [%g5] ASI_IMMU_DEMAP
571 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
573 stxa %g2, [%g4] ASI_DMMU
581 .globl xcall_flush_tlb_kernel_range
582 xcall_flush_tlb_kernel_range: /* 44 insns */
583 sethi %hi(PAGE_SIZE - 1), %g2
584 or %g2, %lo(PAGE_SIZE - 1), %g2
592 or %g1, 0x20, %g1 ! Nucleus
593 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
594 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
600 1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2
601 andcc %g2, 0x40, %g0 /* _PAGE_L_4U */
603 mov TLB_TAG_ACCESS, %g2
604 stxa %g0, [%g2] ASI_IMMU
605 stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS
607 2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2
610 mov TLB_TAG_ACCESS, %g2
611 stxa %g0, [%g2] ASI_DMMU
612 stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS
628 /* This runs in a very controlled environment, so we do
629 * not need to worry about BH races etc.
631 .globl xcall_sync_tick
634 661: rdpr %pstate, %g2
635 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
636 .section .sun4v_2insn_patch, "ax"
643 wrpr %g0, PIL_NORMAL_MAX, %pil
646 109: or %g7, %lo(109b), %g7
647 #ifdef CONFIG_TRACE_IRQFLAGS
648 call trace_hardirqs_off
651 call smp_synchronize_tick_client
654 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
656 .globl xcall_fetch_glob_regs
657 xcall_fetch_glob_regs:
658 sethi %hi(global_cpu_snapshot), %g1
659 or %g1, %lo(global_cpu_snapshot), %g1
664 stx %g7, [%g1 + GR_SNAP_TSTATE]
666 stx %g7, [%g1 + GR_SNAP_TPC]
668 stx %g7, [%g1 + GR_SNAP_TNPC]
669 stx %o7, [%g1 + GR_SNAP_O7]
670 stx %i7, [%g1 + GR_SNAP_I7]
671 /* Don't try this at home kids... */
677 stx %g7, [%g1 + GR_SNAP_RPC]
678 sethi %hi(trap_block), %g7
679 or %g7, %lo(trap_block), %g7
680 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
682 ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
683 stx %g3, [%g1 + GR_SNAP_THREAD]
686 .globl xcall_fetch_glob_pmu
687 xcall_fetch_glob_pmu:
688 sethi %hi(global_cpu_snapshot), %g1
689 or %g1, %lo(global_cpu_snapshot), %g1
694 stx %g7, [%g1 + (4 * 8)]
696 stx %g7, [%g1 + (0 * 8)]
699 .globl xcall_fetch_glob_pmu_n4
700 xcall_fetch_glob_pmu_n4:
701 sethi %hi(global_cpu_snapshot), %g1
702 or %g1, %lo(global_cpu_snapshot), %g1
707 ldxa [%g0] ASI_PIC, %g7
708 stx %g7, [%g1 + (4 * 8)]
710 ldxa [%g3] ASI_PIC, %g7
711 stx %g7, [%g1 + (5 * 8)]
713 ldxa [%g3] ASI_PIC, %g7
714 stx %g7, [%g1 + (6 * 8)]
716 ldxa [%g3] ASI_PIC, %g7
717 stx %g7, [%g1 + (7 * 8)]
723 mov HV_FAST_VT_GET_PERFREG, %o5
726 stx %o1, [%g1 + (3 * 8)]
727 mov HV_FAST_VT_GET_PERFREG, %o5
730 stx %o1, [%g1 + (2 * 8)]
731 mov HV_FAST_VT_GET_PERFREG, %o5
734 stx %o1, [%g1 + (1 * 8)]
735 mov HV_FAST_VT_GET_PERFREG, %o5
738 stx %o1, [%g1 + (0 * 8)]
746 __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
747 sethi %hi(PAGE_SIZE - 1), %g2
748 or %g2, %lo(PAGE_SIZE - 1), %g2
756 or %g1, 0x20, %g1 ! Nucleus
757 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
758 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
764 stxa %g0, [%g2] ASI_DMMU_DEMAP
766 stxa %g0, [%g2] ASI_IMMU_DEMAP
792 #ifdef DCACHE_ALIASING_POSSIBLE
794 .globl xcall_flush_dcache_page_cheetah
795 xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
796 sethi %hi(PAGE_SIZE), %g3
797 1: subcc %g3, (1 << 5), %g3
798 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
804 #endif /* DCACHE_ALIASING_POSSIBLE */
806 .globl xcall_flush_dcache_page_spitfire
807 xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
808 %g7 == kernel page virtual address
809 %g5 == (page->mapping != NULL) */
810 #ifdef DCACHE_ALIASING_POSSIBLE
811 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
812 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
813 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
814 1: ldxa [%g3] ASI_DCACHE_TAG, %g2
822 stxa %g0, [%g3] ASI_DCACHE_TAG
826 sub %g3, (1 << 5), %g3
829 #endif /* DCACHE_ALIASING_POSSIBLE */
830 sethi %hi(PAGE_SIZE), %g3
833 subcc %g3, (1 << 5), %g3
835 add %g7, (1 << 5), %g7
844 __hypervisor_tlb_xcall_error:
850 call hypervisor_tlbop_error_xcall
854 .globl __hypervisor_xcall_flush_tlb_mm
855 __hypervisor_xcall_flush_tlb_mm: /* 24 insns */
856 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
862 clr %o0 /* ARG0: CPU lists unimplemented */
863 clr %o1 /* ARG1: CPU lists unimplemented */
864 mov %g5, %o2 /* ARG2: mmu context */
865 mov HV_MMU_ALL, %o3 /* ARG3: flags */
866 mov HV_FAST_MMU_DEMAP_CTX, %o5
868 mov HV_FAST_MMU_DEMAP_CTX, %g6
878 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
879 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
882 .globl __hypervisor_xcall_flush_tlb_page
883 __hypervisor_xcall_flush_tlb_page: /* 20 insns */
884 /* %g5=ctx, %g1=vaddr */
888 mov %g1, %o0 /* ARG0: virtual address */
889 mov %g5, %o1 /* ARG1: mmu context */
890 mov HV_MMU_ALL, %o2 /* ARG2: flags */
891 srlx %o0, PAGE_SHIFT, %o0
892 sllx %o0, PAGE_SHIFT, %o0
893 ta HV_MMU_UNMAP_ADDR_TRAP
894 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
902 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
903 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
906 .globl __hypervisor_xcall_flush_tlb_kernel_range
907 __hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
908 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
909 sethi %hi(PAGE_SIZE - 1), %g2
910 or %g2, %lo(PAGE_SIZE - 1), %g2
921 1: add %g1, %g3, %o0 /* ARG0: virtual address */
922 mov 0, %o1 /* ARG1: mmu context */
923 mov HV_MMU_ALL, %o2 /* ARG2: flags */
924 ta HV_MMU_UNMAP_ADDR_TRAP
925 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
928 sethi %hi(PAGE_SIZE), %o2
936 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
937 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
941 mov 0, %o0 /* ARG0: CPU lists unimplemented */
942 mov 0, %o1 /* ARG1: CPU lists unimplemented */
943 mov 0, %o2 /* ARG2: mmu context == nucleus */
944 mov HV_MMU_ALL, %o3 /* ARG3: flags */
945 mov HV_FAST_MMU_DEMAP_CTX, %o5
950 mov HV_FAST_MMU_DEMAP_CTX, %g6
954 /* These just get rescheduled to PIL vectors. */
955 .globl xcall_call_function
957 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
960 .globl xcall_call_function_single
961 xcall_call_function_single:
962 wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
965 .globl xcall_receive_signal
966 xcall_receive_signal:
967 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
972 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
976 .globl xcall_kgdb_capture
978 wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
982 #endif /* CONFIG_SMP */
984 .globl cheetah_patch_cachetlbops
985 cheetah_patch_cachetlbops:
988 sethi %hi(__flush_tlb_mm), %o0
989 or %o0, %lo(__flush_tlb_mm), %o0
990 sethi %hi(__cheetah_flush_tlb_mm), %o1
991 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
995 sethi %hi(__flush_tlb_page), %o0
996 or %o0, %lo(__flush_tlb_page), %o0
997 sethi %hi(__cheetah_flush_tlb_page), %o1
998 or %o1, %lo(__cheetah_flush_tlb_page), %o1
1002 sethi %hi(__flush_tlb_pending), %o0
1003 or %o0, %lo(__flush_tlb_pending), %o0
1004 sethi %hi(__cheetah_flush_tlb_pending), %o1
1005 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
1009 sethi %hi(__flush_tlb_kernel_range), %o0
1010 or %o0, %lo(__flush_tlb_kernel_range), %o0
1011 sethi %hi(__cheetah_flush_tlb_kernel_range), %o1
1012 or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
1016 #ifdef DCACHE_ALIASING_POSSIBLE
1017 sethi %hi(__flush_dcache_page), %o0
1018 or %o0, %lo(__flush_dcache_page), %o0
1019 sethi %hi(__cheetah_flush_dcache_page), %o1
1020 or %o1, %lo(__cheetah_flush_dcache_page), %o1
1023 #endif /* DCACHE_ALIASING_POSSIBLE */
1026 sethi %hi(xcall_flush_tlb_kernel_range), %o0
1027 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
1028 sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
1029 or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
1032 #endif /* CONFIG_SMP */
1037 .globl hypervisor_patch_cachetlbops
1038 hypervisor_patch_cachetlbops:
1041 sethi %hi(__flush_tlb_mm), %o0
1042 or %o0, %lo(__flush_tlb_mm), %o0
1043 sethi %hi(__hypervisor_flush_tlb_mm), %o1
1044 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
1048 sethi %hi(__flush_tlb_page), %o0
1049 or %o0, %lo(__flush_tlb_page), %o0
1050 sethi %hi(__hypervisor_flush_tlb_page), %o1
1051 or %o1, %lo(__hypervisor_flush_tlb_page), %o1
1055 sethi %hi(__flush_tlb_pending), %o0
1056 or %o0, %lo(__flush_tlb_pending), %o0
1057 sethi %hi(__hypervisor_flush_tlb_pending), %o1
1058 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
1062 sethi %hi(__flush_tlb_kernel_range), %o0
1063 or %o0, %lo(__flush_tlb_kernel_range), %o0
1064 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
1065 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
1069 #ifdef DCACHE_ALIASING_POSSIBLE
1070 sethi %hi(__flush_dcache_page), %o0
1071 or %o0, %lo(__flush_dcache_page), %o0
1072 sethi %hi(__hypervisor_flush_dcache_page), %o1
1073 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
1076 #endif /* DCACHE_ALIASING_POSSIBLE */
1079 sethi %hi(xcall_flush_tlb_mm), %o0
1080 or %o0, %lo(xcall_flush_tlb_mm), %o0
1081 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
1082 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
1086 sethi %hi(xcall_flush_tlb_page), %o0
1087 or %o0, %lo(xcall_flush_tlb_page), %o0
1088 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
1089 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
1093 sethi %hi(xcall_flush_tlb_kernel_range), %o0
1094 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
1095 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1096 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1099 #endif /* CONFIG_SMP */