4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
25 * Assembly code support for the Olympus-C module
32 #include <sys/asm_linkage.h>
34 #include <vm/hat_sfmmu.h>
35 #include <sys/machparam.h>
36 #include <sys/machcpuvar.h>
37 #include <sys/machthread.h>
38 #include <sys/machtrap.h>
39 #include <sys/privregs.h>
40 #include <sys/asm_linkage.h>
42 #include <sys/opl_olympus_regs.h>
43 #include <sys/opl_module.h>
44 #include <sys/xc_impl.h>
45 #include <sys/intreg.h>
46 #include <sys/async.h>
47 #include <sys/clock.h>
48 #include <sys/cmpregs.h>
51 #include <sys/traptrace.h>
52 #endif /* TRAPTRACE */
55 * Macro that flushes the entire Ecache.
58 * arg2 = ecache linesize
59 * arg3 = ecache flush address - Not used for olympus-C
61 #define ECACHE_FLUSHALL(arg1, arg2, arg3, tmp1) \
62 mov ASI_L2_CTRL_U2_FLUSH
, arg1; \
63 mov ASI_L2_CTRL_RW_ADDR
, arg2; \
64 stxa arg1
, [arg2
]ASI_L2_CTRL
67 * SPARC64-VI MMU and Cache operations.
74 vtag_flushpage
(caddr_t vaddr
, uint64_t sfmmup
)
79 ENTRY_NP
(vtag_flushpage
)
81 * flush page from the tlb
88 PANIC_IF_INTR_DISABLED_PSTR
(%o5
, opl_di_l3
, %g1
)
93 andn
%o5
, PSTATE_IE
, %o4
97 * Then, blow out the tlb
98 * Interrupts are disabled to prevent the primary ctx register
99 * from changing underneath us.
101 sethi
%hi
(ksfmmup
), %o3
102 ldx [%o3
+ %lo
(ksfmmup
)], %o3
104 bne,pt
%xcc
, 1f
! if
not kernel as
, go to
1
105 sethi
%hi
(FLUSH_ADDR
), %o3
107 * For Kernel demaps use primary. type = page implicitly
109 stxa
%g0
, [%o0
]ASI_DTLB_DEMAP
/* dmmu flush for KCONTEXT */
110 stxa
%g0
, [%o0
]ASI_ITLB_DEMAP
/* immu flush for KCONTEXT */
113 wrpr
%g0
, %o5
, %pstate
/* enable interrupts */
116 * User demap. We need to set the primary context properly.
117 * Secondary context cannot be used for SPARC64-VI IMMU.
122 SFMMU_CPU_CNUM
(%o1
, %g1
, %g2
) ! %g1
= sfmmu cnum on this CPU
124 ldub
[%o1
+ SFMMU_CEXT
], %o4
! %o4
= sfmmup-
>sfmmu_cext
125 sll
%o4
, CTXREG_EXT_SHIFT
, %o4
126 or %g1
, %o4
, %g1
! %g1
= primary pgsz | cnum
129 set MMU_PCONTEXT
, %o4
130 or DEMAP_PRIMARY | DEMAP_PAGE_TYPE
, %o0
, %o0
131 ldxa
[%o4
]ASI_DMMU
, %o2
! %o2
= save old ctxnum
132 srlx
%o2
, CTXREG_NEXT_SHIFT
, %o1
! need to preserve nucleus pgsz
133 sllx
%o1
, CTXREG_NEXT_SHIFT
, %o1
! %o1
= nucleus pgsz
134 or %g1
, %o1
, %g1
! %g1
= nucleus pgsz | primary pgsz | cnum
135 stxa
%g1
, [%o4
]ASI_DMMU
! wr new ctxum
137 stxa
%g0
, [%o0
]ASI_DTLB_DEMAP
138 stxa
%g0
, [%o0
]ASI_ITLB_DEMAP
139 stxa
%o2
, [%o4
]ASI_DMMU
/* restore old ctxnum */
144 wrpr
%g0
, %o5
, %pstate
/* enable interrupts */
145 SET_SIZE
(vtag_flushpage
)
158 ENTRY_NP2
(vtag_flushall
, demap_all
)
162 sethi
%hi
(FLUSH_ADDR
), %o3
163 set DEMAP_ALL_TYPE
, %g1
164 stxa
%g0
, [%g1
]ASI_DTLB_DEMAP
165 stxa
%g0
, [%g1
]ASI_ITLB_DEMAP
170 SET_SIZE
(vtag_flushall
)
179 vtag_flushpage_tl1
(uint64_t vaddr
, uint64_t sfmmup
)
184 ENTRY_NP
(vtag_flushpage_tl1
)
186 * x-trap to flush page from tlb and tsb
188 * %g1 = vaddr, zero-extended on 32-bit kernel
191 * assumes TSBE_TAG = 0
193 srln
%g1
, MMU_PAGESHIFT
, %g1
195 sethi
%hi
(ksfmmup
), %g3
196 ldx [%g3
+ %lo
(ksfmmup
)], %g3
198 bne,pt
%xcc
, 1f
! if
not kernel as
, go to
1
199 slln
%g1
, MMU_PAGESHIFT
, %g1
/* g1 = vaddr */
201 /* We need to demap in the kernel context */
202 or DEMAP_NUCLEUS | DEMAP_PAGE_TYPE
, %g1
, %g1
203 stxa
%g0
, [%g1
]ASI_DTLB_DEMAP
204 stxa
%g0
, [%g1
]ASI_ITLB_DEMAP
207 /* We need to demap in a user context */
208 or DEMAP_PRIMARY | DEMAP_PAGE_TYPE
, %g1
, %g1
210 SFMMU_CPU_CNUM
(%g2
, %g6
, %g3
) ! %g6
= sfmmu cnum on this CPU
212 ldub
[%g2
+ SFMMU_CEXT
], %g4
! %g4
= sfmmup-
>cext
213 sll
%g4
, CTXREG_EXT_SHIFT
, %g4
214 or %g6
, %g4
, %g6
! %g6
= primary pgsz | cnum
216 set MMU_PCONTEXT
, %g4
217 ldxa
[%g4
]ASI_DMMU
, %g5
! %g5
= save old ctxnum
218 srlx
%g5
, CTXREG_NEXT_SHIFT
, %g2
! %g2
= nucleus pgsz
219 sllx
%g2
, CTXREG_NEXT_SHIFT
, %g2
! preserve nucleus pgsz
220 or %g6
, %g2
, %g6
! %g6
= nucleus pgsz | primary pgsz | cnum
221 stxa
%g6
, [%g4
]ASI_DMMU
! wr new ctxum
222 stxa
%g0
, [%g1
]ASI_DTLB_DEMAP
223 stxa
%g0
, [%g1
]ASI_ITLB_DEMAP
224 stxa
%g5
, [%g4
]ASI_DMMU
! restore old ctxnum
226 SET_SIZE
(vtag_flushpage_tl1
)
235 vtag_flush_pgcnt_tl1
(uint64_t vaddr
, uint64_t sfmmup_pgcnt
)
240 ENTRY_NP
(vtag_flush_pgcnt_tl1
)
242 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
244 * %g1 = vaddr, zero-extended on 32-bit kernel
245 * %g2 = <sfmmup58|pgcnt6>
247 * NOTE: this handler relies on the fact that no
248 * interrupts or traps can occur during the loop
249 * issuing the TLB_DEMAP operations. It is assumed
250 * that interrupts are disabled and this code is
251 * fetching from the kernel locked text address.
253 * assumes TSBE_TAG = 0
255 set SFMMU_PGCNT_MASK
, %g4
256 and %g4
, %g2
, %g3
/* g3 = pgcnt - 1 */
257 add %g3
, 1, %g3
/* g3 = pgcnt */
259 andn
%g2
, SFMMU_PGCNT_MASK
, %g2
/* g2 = sfmmup */
260 srln
%g1
, MMU_PAGESHIFT
, %g1
262 sethi
%hi
(ksfmmup
), %g4
263 ldx [%g4
+ %lo
(ksfmmup
)], %g4
265 bne,pn
%xcc
, 1f
/* if not kernel as, go to 1 */
266 slln
%g1
, MMU_PAGESHIFT
, %g1
/* g1 = vaddr */
268 /* We need to demap in the kernel context */
269 or DEMAP_NUCLEUS | DEMAP_PAGE_TYPE
, %g1
, %g1
270 set MMU_PAGESIZE
, %g2
/* g2 = pgsize */
271 sethi
%hi
(FLUSH_ADDR
), %g5
273 stxa
%g0
, [%g1
]ASI_DTLB_DEMAP
274 stxa
%g0
, [%g1
]ASI_ITLB_DEMAP
275 flush
%g5
! flush required by immu
277 deccc
%g3
/* decr pgcnt */
279 add %g1
, %g2
, %g1
/* next page */
283 * We need to demap in a user context
288 SFMMU_CPU_CNUM
(%g2
, %g5
, %g6
) ! %g5
= sfmmu cnum on this CPU
290 or DEMAP_PRIMARY | DEMAP_PAGE_TYPE
, %g1
, %g1
292 ldub
[%g2
+ SFMMU_CEXT
], %g4
! %g4
= sfmmup-
>cext
293 sll
%g4
, CTXREG_EXT_SHIFT
, %g4
296 set MMU_PCONTEXT
, %g4
297 ldxa
[%g4
]ASI_DMMU
, %g6
/* rd old ctxnum */
298 srlx
%g6
, CTXREG_NEXT_SHIFT
, %g2
/* %g2 = nucleus pgsz */
299 sllx
%g2
, CTXREG_NEXT_SHIFT
, %g2
/* preserve nucleus pgsz */
300 or %g5
, %g2
, %g5
/* %g5 = nucleus pgsz | primary pgsz | cnum */
301 stxa
%g5
, [%g4
]ASI_DMMU
/* wr new ctxum */
303 set MMU_PAGESIZE
, %g2
/* g2 = pgsize */
304 sethi
%hi
(FLUSH_ADDR
), %g5
306 stxa
%g0
, [%g1
]ASI_DTLB_DEMAP
307 stxa
%g0
, [%g1
]ASI_ITLB_DEMAP
308 flush
%g5
! flush required by immu
310 deccc
%g3
/* decr pgcnt */
312 add %g1
, %g2
, %g1
/* next page */
314 stxa
%g6
, [%g4
]ASI_DMMU
/* restore old ctxnum */
316 SET_SIZE
(vtag_flush_pgcnt_tl1
)
325 vtag_flushall_tl1
(uint64_t dummy1
, uint64_t dummy2
)
330 ENTRY_NP
(vtag_flushall_tl1
)
332 * x-trap to flush tlb
334 set DEMAP_ALL_TYPE
, %g4
335 stxa
%g0
, [%g4
]ASI_DTLB_DEMAP
336 stxa
%g0
, [%g4
]ASI_ITLB_DEMAP
338 SET_SIZE
(vtag_flushall_tl1
)
344 * VAC (virtual address conflict) does not apply to OPL.
345 * VAC resolution is managed by the Olympus processor hardware.
346 * As a result, all OPL VAC flushing routines are no-ops.
353 vac_flushpage
(pfn_t pfnum
, int vcolor
)
361 SET_SIZE
(vac_flushpage
)
369 vac_flushpage_tl1
(uint64_t pfnum
, uint64_t vcolor
)
374 ENTRY_NP
(vac_flushpage_tl1
)
376 SET_SIZE
(vac_flushpage_tl1
)
385 vac_flushcolor
(int vcolor
, pfn_t pfnum
)
390 ENTRY
(vac_flushcolor
)
393 SET_SIZE
(vac_flushcolor
)
403 vac_flushcolor_tl1
(uint64_t vcolor
, uint64_t pfnum
)
408 ENTRY
(vac_flushcolor_tl1
)
410 SET_SIZE
(vac_flushcolor_tl1
)
425 * Determine whether or not the IDSR is busy.
426 * Entry: no arguments
427 * Returns: 1 if busy, 0 otherwise
430 ldxa
[%g0
]ASI_INTR_DISPATCH_STATUS
, %g1
446 init_mondo
(xcfunc_t
*func
, uint64_t arg1
, uint64_t arg2
)
451 init_mondo_nocheck
(xcfunc_t
*func
, uint64_t arg1
, uint64_t arg2
)
456 .global _dispatch_status_busy
457 _dispatch_status_busy
:
458 .asciz "ASI_INTR_DISPATCH_STATUS error: busy"
462 * Setup interrupt dispatch data registers
464 * %o0 - function or inumber to call
465 * %o1, %o2 - arguments (2 uint64_t's)
472 ! IDSR should
not be busy at the moment
474 ldxa
[%g0
]ASI_INTR_DISPATCH_STATUS
, %g1
478 sethi
%hi
(_dispatch_status_busy
), %o0
480 or %o0
, %lo
(_dispatch_status_busy
), %o0
483 ALTENTRY
(init_mondo_nocheck
)
485 ! interrupt vector dispatch data reg
0
491 stxa
%o0
, [%g1
]ASI_INTR_DISPATCH
494 ! interrupt vector dispatch data reg
1
496 stxa
%o1
, [%g2
]ASI_INTR_DISPATCH
499 ! interrupt vector dispatch data reg
2
501 stxa
%o2
, [%g3
]ASI_INTR_DISPATCH
506 SET_SIZE
(init_mondo_nocheck
)
516 shipit
(int upaid
, int bn
)
522 * Ship mondo to aid using busy/nack pair bn
525 sll
%o0
, IDCR_PID_SHIFT
, %g1
! IDCR
<23:14> = agent id
526 sll
%o1
, IDCR_BN_SHIFT
, %g2
! IDCR
<28:24> = b/n pair
527 or %g1
, IDCR_OFFSET
, %g1
! IDCR
<13:0> = 0x70
529 stxa
%g0
, [%g1
]ASI_INTR_DISPATCH
! interrupt vector dispatch
542 flush_instr_mem
(caddr_t vaddr
, size_t len
)
549 * Flush 1 page of the I-$ starting at vaddr
551 * %o1 bytes to be flushed
553 * SPARC64-VI maintains consistency of the on-chip Instruction Cache with
554 * the stores from all processors so that a FLUSH instruction is only needed
555 * to ensure pipeline is consistent. This means a single flush is sufficient at
556 * the end of a sequence of stores that updates the instruction stream to
557 * ensure correct operation.
560 ENTRY
(flush_instr_mem
)
561 flush
%o0
! address irrelevant
564 SET_SIZE
(flush_instr_mem
)
571 * %o0 - 64 bit physical address
573 * %o2 - ecache linesize
579 flush_ecache
(uint64_t physaddr
, size_t ecache_size
, size_t ecache_linesize
)
587 * Flush the entire Ecache.
589 ECACHE_FLUSHALL
(%o1
, %o2
, %o0
, %o4
)
592 SET_SIZE
(flush_ecache
)
600 kdi_flush_idcache
(int dcache_size
, int dcache_lsize
, int icache_size
,
608 * I/D cache flushing is not needed for OPL processors
610 ENTRY
(kdi_flush_idcache
)
613 SET_SIZE
(kdi_flush_idcache
)
619 * Simplified trap trace macro for OPL. Adapted from us3.
621 #define OPL_TRAPTRACE(ptr, scr1, scr2, label) \
622 CPU_INDEX
(scr1
, ptr
); \
623 sll scr1
, TRAPTR_SIZE_SHIFT
, scr1; \
624 set trap_trace_ctl
, ptr; \
625 add ptr
, scr1
, scr1; \
626 ld [scr1
+ TRAPTR_LIMIT
], ptr; \
628 be,pn
%icc
, label
/**/1; \
629 ldx [scr1
+ TRAPTR_PBASE
], ptr; \
630 ld [scr1
+ TRAPTR_OFFSET
], scr1; \
631 add ptr
, scr1
, ptr; \
633 wr
%g0
, TRAPTR_ASI
, %asi; \
635 stxa scr1
, [ptr
+ TRAP_ENT_TICK
]%asi; \
637 stha scr1
, [ptr
+ TRAP_ENT_TL
]%asi; \
639 stha scr1
, [ptr
+ TRAP_ENT_TT
]%asi; \
641 stna scr1
, [ptr
+ TRAP_ENT_TPC
]%asi; \
642 rdpr
%tstate
, scr1; \
643 stxa scr1
, [ptr
+ TRAP_ENT_TSTATE
]%asi; \
644 stna
%sp
, [ptr
+ TRAP_ENT_SP
]%asi; \
645 stna
%g0
, [ptr
+ TRAP_ENT_TR
]%asi; \
646 stna
%g0
, [ptr
+ TRAP_ENT_F1
]%asi; \
647 stna
%g0
, [ptr
+ TRAP_ENT_F2
]%asi; \
648 stna
%g0
, [ptr
+ TRAP_ENT_F3
]%asi; \
649 stna
%g0
, [ptr
+ TRAP_ENT_F4
]%asi; \
650 wr
%g0
, scr2
, %asi; \
651 CPU_INDEX
(ptr
, scr1
); \
652 sll ptr
, TRAPTR_SIZE_SHIFT
, ptr; \
653 set trap_trace_ctl
, scr1; \
654 add scr1
, ptr
, ptr; \
655 ld [ptr
+ TRAPTR_OFFSET
], scr1; \
656 ld [ptr
+ TRAPTR_LIMIT
], scr2; \
657 st scr1
, [ptr
+ TRAPTR_LAST_OFFSET
]; \
658 add scr1
, TRAP_ENT_SIZE
, scr1; \
659 sub scr2
, TRAP_ENT_SIZE
, scr2; \
661 movge
%icc
, 0, scr1; \
662 st scr1
, [ptr
+ TRAPTR_OFFSET
]; \
664 #endif /* TRAPTRACE */
669 * Macros facilitating error handling.
673 * Save alternative global registers reg1, reg2, reg3
674 * to scratchpad registers 1, 2, 3 respectively.
676 #define OPL_SAVE_GLOBAL(reg1, reg2, reg3) \
677 stxa reg1
, [%g0
]ASI_SCRATCHPAD ;\
678 mov OPL_SCRATCHPAD_SAVE_AG2
, reg1 ;\
679 stxa reg2
, [reg1
]ASI_SCRATCHPAD ;\
680 mov OPL_SCRATCHPAD_SAVE_AG3
, reg1 ;\
681 stxa reg3
, [reg1
]ASI_SCRATCHPAD
684 * Restore alternative global registers reg1, reg2, reg3
685 * from scratchpad registers 1, 2, 3 respectively.
687 #define OPL_RESTORE_GLOBAL(reg1, reg2, reg3) \
688 mov OPL_SCRATCHPAD_SAVE_AG3
, reg1 ;\
689 ldxa
[reg1
]ASI_SCRATCHPAD
, reg3 ;\
690 mov OPL_SCRATCHPAD_SAVE_AG2
, reg1 ;\
691 ldxa
[reg1
]ASI_SCRATCHPAD
, reg2 ;\
692 ldxa
[%g0
]ASI_SCRATCHPAD
, reg1
695 * Logs value `val' into the member `offset' of a structure
696 * at physical address `pa'
698 #define LOG_REG(pa, offset, val) \
699 add pa
, offset
, pa ;\
700 stxa val
, [pa
]ASI_MEM
702 #define FLUSH_ALL_TLB(tmp1) \
703 set DEMAP_ALL_TYPE
, tmp1 ;\
704 stxa
%g0
, [tmp1
]ASI_ITLB_DEMAP ;\
705 stxa
%g0
, [tmp1
]ASI_DTLB_DEMAP ;\
706 sethi
%hi
(FLUSH_ADDR
), tmp1 ;\
710 * Extracts the Physaddr to Logging Buffer field of the OPL_SCRATCHPAD_ERRLOG
711 * scratch register by zeroing all other fields. Result is in pa.
713 #define LOG_ADDR(pa) \
714 mov OPL_SCRATCHPAD_ERRLOG
, pa ;\
715 ldxa
[pa
]ASI_SCRATCHPAD
, pa ;\
716 sllx pa
, 64-ERRLOG_REG_EIDR_SHIFT
, pa ;\
717 srlx pa
, 64-ERRLOG_REG_EIDR_SHIFT+ERRLOG_REG_ERR_SHIFT
, pa ;\
718 sllx pa
, ERRLOG_REG_ERR_SHIFT
, pa
721 * Advance the per-cpu error log buffer pointer to the next
722 * ERRLOG_SZ entry, making sure that it will modulo (wraparound)
723 * ERRLOG_BUFSIZ boundary. The args logpa, bufmask, tmp are
724 * unused input registers for this macro.
727 * 1. logpa = contents of errorlog scratchpad register
728 * 2. bufmask = ERRLOG_BUFSIZ - 1
729 * 3. tmp = logpa & ~(bufmask) (tmp is now logbase)
730 * 4. logpa += ERRLOG_SZ
731 * 5. logpa = logpa & bufmask (get new offset to logbase)
732 * 4. logpa = tmp | logpa
733 * 7. write logpa back into errorlog scratchpad register
735 * new logpa = (logpa & ~bufmask) | ((logpa + ERRLOG_SZ) & bufmask)
738 #define UPDATE_LOGADD(logpa, bufmask, tmp) \
739 set OPL_SCRATCHPAD_ERRLOG
, tmp ;\
740 ldxa
[tmp
]ASI_SCRATCHPAD
, logpa ;\
741 set
(ERRLOG_BUFSZ-
1), bufmask ;\
742 andn logpa
, bufmask
, tmp ;\
743 add logpa
, ERRLOG_SZ
, logpa ;\
744 and logpa
, bufmask
, logpa ;\
745 or tmp
, logpa
, logpa ;\
746 set OPL_SCRATCHPAD_ERRLOG
, tmp ;\
747 stxa logpa
, [tmp
]ASI_SCRATCHPAD
749 /* Log error status registers into the log buffer */
750 #define LOG_SYNC_REG(sfsr, sfar, tmp) \
752 LOG_REG
(tmp
, LOG_SFSR_OFF
, sfsr
) ;\
755 LOG_REG
(tmp
, LOG_SFAR_OFF
, sfar
) ;\
758 LOG_REG
(tmp
, LOG_STICK_OFF
, sfar
) ;\
760 sllx tmp
, 32, sfar ;\
762 or sfar
, tmp
, sfar ;\
764 LOG_REG
(tmp
, LOG_TL_OFF
, sfar
) ;\
765 set OPL_SCRATCHPAD_ERRLOG
, tmp ;\
766 ldxa
[tmp
]ASI_SCRATCHPAD
, sfar ;\
768 LOG_REG
(tmp
, LOG_ASI3_OFF
, sfar
) ;\
771 LOG_REG
(tmp
, LOG_TPC_OFF
, sfar
) ;\
772 UPDATE_LOGADD
(sfsr
, sfar
, tmp
)
774 #define LOG_UGER_REG(uger, tmp, tmp2) \
777 LOG_REG
(tmp2
, LOG_UGER_OFF
, uger
) ;\
780 LOG_REG
(tmp
, LOG_STICK_OFF
, tmp2
) ;\
782 sllx tmp
, 32, tmp2 ;\
784 or tmp2
, tmp
, tmp2 ;\
786 LOG_REG
(tmp
, LOG_TL_OFF
, tmp2
) ;\
787 set OPL_SCRATCHPAD_ERRLOG
, tmp2 ;\
788 ldxa
[tmp2
]ASI_SCRATCHPAD
, tmp2 ;\
790 LOG_REG
(tmp
, LOG_ASI3_OFF
, tmp2
) ;\
791 rdpr
%tstate
, tmp2 ;\
793 LOG_REG
(tmp
, LOG_TSTATE_OFF
, tmp2
) ;\
796 LOG_REG
(tmp
, LOG_TPC_OFF
, tmp2
) ;\
797 UPDATE_LOGADD
(uger
, tmp
, tmp2
)
800 * Scrub the STICK_COMPARE register to clear error by updating
801 * it to a reasonable value for interrupt generation.
802 * Ensure that we observe the CPU_ENABLE flag so that we
803 * don't accidentally enable TICK interrupt in STICK_COMPARE
804 * i.e. no clock interrupt will be generated if CPU_ENABLE flag
807 #define UPDATE_STICK_COMPARE(tmp1, tmp2) \
808 CPU_ADDR
(tmp1
, tmp2
) ;\
809 lduh
[tmp1
+ CPU_FLAGS
], tmp2 ;\
810 andcc tmp2
, CPU_ENABLE
, %g0 ;\
811 set OPL_UGER_STICK_DIFF
, tmp2 ;\
813 add tmp1
, tmp2
, tmp1 ;\
815 sllx tmp2
, TICKINT_DIS_SHFT
, tmp2 ;\
816 or tmp1
, tmp2
, tmp2 ;\
817 movnz
%xcc
, tmp1
, tmp2 ;\
818 wr tmp2
, %g0
, STICK_COMPARE
821 * Reset registers that may be corrupted by IAUG_CRE error.
822 * To update interrupt handling related registers force the
825 #define IAG_CRE(tmp1, tmp2) \
826 set OPL_SCRATCHPAD_ERRLOG
, tmp1 ;\
827 ldxa
[tmp1
]ASI_SCRATCHPAD
, tmp1 ;\
828 srlx tmp1
, ERRLOG_REG_EIDR_SHIFT
, tmp1 ;\
829 set ERRLOG_REG_EIDR_MASK
, tmp2 ;\
830 and tmp1
, tmp2
, tmp1 ;\
831 stxa tmp1
, [%g0
]ASI_EIDR ;\
832 wr
%g0
, 0, SOFTINT ;\
833 sethi
%hi
(hres_last_tick
), tmp1 ;\
834 ldx [tmp1
+ %lo
(hres_last_tick
)], tmp1 ;\
835 set OPL_UGER_STICK_DIFF
, tmp2 ;\
836 add tmp1
, tmp2
, tmp1 ;\
837 wr tmp1
, %g0
, STICK ;\
838 UPDATE_STICK_COMPARE
(tmp1
, tmp2
)
841 #define CLEAR_FPREGS(tmp) \
842 wr
%g0
, FPRS_FEF
, %fprs ;\
844 sethi
%hi
(opl_clr_freg
), tmp ;\
845 or tmp
, %lo
(opl_clr_freg
), tmp ;\
881 #define CLEAR_GLOBALS() \
891 * We do not clear the alternative globals here because they
892 * are scratch registers, i.e. there is no code that reads from
893 * them without write to them firstly. In other words every
894 * read always follows write that makes extra write to the
895 * alternative globals unnecessary.
897 #define CLEAR_GEN_REGS(tmp1, label) \
898 set TSTATE_KERN
, tmp1 ;\
899 wrpr
%g0
, tmp1
, %tstate ;\
933 be,pt
%xcc
, label
/**/1 ;\
934 rdpr
%pstate
, tmp1 ;\
935 wrpr tmp1
, PSTATE_AG|PSTATE_IG
, %pstate ;\
937 rdpr
%pstate
, tmp1 ;\
938 wrpr tmp1
, PSTATE_IG|PSTATE_MG
, %pstate ;\
940 rdpr
%pstate
, tmp1 ;\
941 wrpr tmp1
, PSTATE_MG|PSTATE_AG
, %pstate ;\
942 ba,pt
%xcc
, label
/**/2 ;\
945 wrpr tmp1
, PSTATE_AG
, %pstate ;\
947 rdpr
%pstate
, tmp1 ;\
948 wrpr tmp1
, PSTATE_AG
, %pstate ;\
953 * Reset all window related registers
955 #define RESET_WINREG(tmp) \
956 sethi
%hi
(nwin_minus_one
), tmp ;\
957 ld [tmp
+ %lo
(nwin_minus_one
)], tmp ;\
958 wrpr
%g0
, tmp
, %cwp ;\
959 wrpr
%g0
, tmp
, %cleanwin ;\
961 wrpr
%g0
, tmp
, %cansave ;\
962 wrpr
%g0
, %g0
, %canrestore ;\
963 wrpr
%g0
, %g0
, %otherwin ;\
964 wrpr
%g0
, PIL_MAX
, %pil ;\
965 wrpr
%g0
, WSTATE_KERN
, %wstate
968 #define RESET_PREV_TSTATE(tmp1, tmp2, label) \
970 subcc tmp1
, 1, tmp1 ;\
971 bz
,pt
%xcc
, label
/**/1 ;\
973 wrpr tmp1
, %g0
, %tl ;\
974 set TSTATE_KERN
, tmp2 ;\
975 wrpr tmp2
, %g0
, %tstate ;\
976 wrpr
%g0
, %g0
, %tpc ;\
977 wrpr
%g0
, %g0
, %tnpc ;\
979 wrpr tmp1
, %g0
, %tl ;\
984 * %pstate, %pc, %npc are propagated to %tstate, %tpc, %tnpc,
985 * and we reset these regiseter here.
987 #define RESET_CUR_TSTATE(tmp) \
988 set TSTATE_KERN
, tmp ;\
989 wrpr
%g0
, tmp
, %tstate ;\
991 wrpr
%g0
, 0, %tnpc ;\
995 * In case of urgent errors some MMU registers may be
996 * corrupted, so we set here some reasonable values for
997 * them. Note that resetting MMU registers also reset the context
998 * info, we will need to reset the window registers to prevent
999 * spill/fill that depends on context info for correct behaviour.
1000 * Note that the TLBs must be flushed before programming the context
1005 #define RESET_MMU_REGS(tmp1, tmp2, tmp3) \
1006 FLUSH_ALL_TLB
(tmp1
) ;\
1007 set MMU_PCONTEXT
, tmp1 ;\
1008 sethi
%hi
(kcontextreg
), tmp2 ;\
1009 ldx [tmp2
+ %lo
(kcontextreg
)], tmp2 ;\
1010 stxa tmp2
, [tmp1
]ASI_DMMU ;\
1011 set MMU_SCONTEXT
, tmp1 ;\
1012 stxa tmp2
, [tmp1
]ASI_DMMU ;\
1013 sethi
%hi
(ktsb_base
), tmp1 ;\
1014 ldx [tmp1
+ %lo
(ktsb_base
)], tmp2 ;\
1015 mov MMU_TSB
, tmp3 ;\
1016 stxa tmp2
, [tmp3
]ASI_IMMU ;\
1017 stxa tmp2
, [tmp3
]ASI_DMMU ;\
1021 #define RESET_TSB_TAGPTR(tmp) \
1022 set MMU_TAG_ACCESS
, tmp ;\
1023 stxa
%g0
, [tmp
]ASI_IMMU ;\
1024 stxa
%g0
, [tmp
]ASI_DMMU ;\
1029 * In case of errors in the MMU_TSB_PREFETCH registers we have to
1030 * reset them. We can use "0" as the reset value, this way we set
1031 * the "V" bit of the registers to 0, which will disable the prefetch
1032 * so the values of the other fields are irrelevant.
1035 #define RESET_TSB_PREFETCH(tmp) \
1036 set VA_UTSBPREF_8K
, tmp ;\
1037 stxa
%g0
, [tmp
]ASI_ITSB_PREFETCH ;\
1038 set VA_UTSBPREF_4M
, tmp ;\
1039 stxa
%g0
, [tmp
]ASI_ITSB_PREFETCH ;\
1040 set VA_KTSBPREF_8K
, tmp ;\
1041 stxa
%g0
, [tmp
]ASI_ITSB_PREFETCH ;\
1042 set VA_KTSBPREF_4M
, tmp ;\
1043 stxa
%g0
, [tmp
]ASI_ITSB_PREFETCH ;\
1044 set VA_UTSBPREF_8K
, tmp ;\
1045 stxa
%g0
, [tmp
]ASI_DTSB_PREFETCH ;\
1046 set VA_UTSBPREF_4M
, tmp ;\
1047 stxa
%g0
, [tmp
]ASI_DTSB_PREFETCH ;\
1048 set VA_KTSBPREF_8K
, tmp ;\
1049 stxa
%g0
, [tmp
]ASI_DTSB_PREFETCH ;\
1050 set VA_KTSBPREF_4M
, tmp ;\
1051 stxa
%g0
, [tmp
]ASI_DTSB_PREFETCH
1055 * In case of errors in the MMU_SHARED_CONTEXT register we have to
1056 * reset its value. We can use "0" as the reset value, it will put
1057 * 0 in the IV field disabling the shared context support, and
1058 * making values of all the other fields of the register irrelevant.
1061 #define RESET_SHARED_CTXT(tmp) \
1062 set MMU_SHARED_CONTEXT
, tmp ;\
1063 stxa
%g0
, [tmp
]ASI_DMMU
1069 * In many cases, we need to force the thread into privilege mode because
1070 * privilege mode is only thing in which the system continue to work
1071 * due to undeterminable user mode information that come from register
1075 * If the error is secondary TSB related register parity, we have no idea
1076 * what value is supposed to be for it.
1078 * The below three cases %tstate is not accessible until it is overwritten
1079 * with some value, so we have no clue if the thread was running on user mode
1082 * If the error is %pstate parity, it propagates to %tstate.
1084 * No need to say the reason
1086 * If the error is %ccr or %asi parity, it propagates to %tstate
1088 * For the above four cases, user mode info may not be available for
1089 * sys_trap() and user_trap() to work consistently. So we have to force
1090 * the thread into privilege mode.
1092 * Forcing the thread to privilege mode requires forcing
1093 * regular %g7 to be CPU_THREAD. Because if it was running on user mode,
1094 * %g7 will be set in user_trap(). Also since the %sp may be in
1095 * an inconsistent state, we need to do a stack reset and switch to
1096 * something we know i.e. current thread's kernel stack.
1097 * We also reset the window registers and MMU registers just to
1100 * To set regular %g7, we need to clear PSTATE_AG bit and need to
1101 * use one local register. Note that we are panicking and will never
1102 * unwind back so it is ok to clobber a local.
1104 * If the thread was running in user mode, the %tpc value itself might be
1105 * within the range of OBP addresses. %tpc must be forced to be zero to prevent
1106 * sys_trap() from going to prom_trap()
1109 #define RESET_TO_PRIV(tmp, tmp1, tmp2, local) \
1110 RESET_MMU_REGS
(tmp
, tmp1
, tmp2
) ;\
1111 CPU_ADDR
(tmp
, tmp1
) ;\
1112 ldx [tmp
+ CPU_THREAD
], local ;\
1113 ldx [local
+ T_STACK
], tmp ;\
1114 sub tmp
, STACK_BIAS
, %sp ;\
1115 rdpr
%pstate
, tmp ;\
1116 wrpr tmp
, PSTATE_AG
, %pstate ;\
1118 rdpr
%pstate
, local ;\
1119 wrpr local
, PSTATE_AG
, %pstate ;\
1121 set TSTATE_KERN
, tmp ;\
1123 or tmp
, tmp1
, tmp ;\
1124 wrpr tmp
, %g0
, %tstate ;\
1137 * We normally don't expect CE traps since we disable the
1138 * 0x63 trap reporting at the start of day. There is a
1139 * small window before we disable them, so let check for
1140 * it. Otherwise, panic.
1146 ldxa
[%g1
]ASI_ECR
, %g1
1147 andcc
%g1
, ASI_ECR_RTE_UE | ASI_ECR_RTE_CEDG
, %g0
1153 * We did disabled the 0x63 trap reporting.
1154 * This shouldn't happen - panic.
1158 sethi
%hi
(sys_trap
), %g5
1159 jmp
%g5
+ %lo
(sys_trap
)
1175 * We don't use trap for CE detection.
1177 ENTRY_NP
(ce_err_tl1
)
1180 sethi
%hi
(sys_trap
), %g5
1181 jmp
%g5
+ %lo
(sys_trap
)
1183 SET_SIZE
(ce_err_tl1
)
1197 * async_err is the default handler for IAE/DAE traps.
1198 * For OPL, we patch in the right handler at start of day.
1199 * But if a IAE/DAE trap get generated before the handler
1200 * is patched, panic.
1205 sethi
%hi
(sys_trap
), %g5
1206 jmp
%g5
+ %lo
(sys_trap
)
1219 .global opl_clr_freg
1220 .global opl_cpu0_err_log
1232 * Common synchronous error trap handler (tt=0xA, 0x32)
1233 * All TL=0 and TL>0 0xA and 0x32 traps vector to this handler.
1234 * The error handling can be best summarized as follows:
1235 * 0. Do TRAPTRACE if enabled.
1236 * 1. Save globals %g1, %g2 & %g3 onto the scratchpad regs.
1237 * 2. The SFSR register is read and verified as valid by checking
1238 * SFSR.FV bit being set. If the SFSR.FV is not set, the
1239 * error cases cannot be decoded/determined and the SFPAR
1240 * register that contain the physical faultaddr is also
1241 * not valid. Also the SPFAR is only valid for UE/TO/BERR error
1242 * cases. Assuming the SFSR.FV is valid:
1243 * - BERR(bus error)/TO(timeout)/UE case
1244 * If any of these error cases are detected, read the SFPAR
1245 * to get the faultaddress. Generate ereport.
1246 * - TLB Parity case (only recoverable case)
1247 * For DAE, read SFAR for the faultaddress. For IAE,
1248 * use %tpc for faultaddress (SFAR is not valid in IAE)
1249 * Flush all the tlbs.
1250 * Subtract one from the recoverable error count stored in
1251 * the error log scratch register. If the threshold limit
1252 * is reached (zero) - generate ereport. Else
1253 * restore globals and retry (no ereport is generated).
1254 * - TLB Multiple hits
1255 * For DAE, read SFAR for the faultaddress. For IAE,
1256 * use %tpc for faultaddress (SFAR is not valid in IAE).
1257 * Flush all tlbs and generate ereport.
1258 * 3. TL=0 and TL>0 considerations
1259 * - Since both TL=0 & TL>1 traps are made to vector into
1260 * the same handler, the underlying assumption/design here is
1261 * that any nested error condition (if happens) occurs only
1262 * in the handler and the system is assumed to eventually
1263 * Red-mode. With this philosophy in mind, the recoverable
1264 * TLB Parity error case never check the TL level before it
1265 * retry. Note that this is ok for the TL>1 case (assuming we
1266 * don't have a nested error) since we always save the globals
1267 * %g1, %g2 & %g3 whenever we enter this trap handler.
1268 * - Additional TL=0 vs TL>1 handling includes:
1269 * - For UE error occuring under TL>1, special handling
1270 * is added to prevent the unlikely chance of a cpu-lockup
1271 * when a UE was originally detected in user stack and
1272 * the spill trap handler taken from sys_trap() so happened
1273 * to reference the same UE location. Under the above
1274 * condition (TL>1 and UE error), paranoid code is added
1275 * to reset window regs so that spill traps can't happen
1276 * during the unwind back to TL=0 handling.
1277 * Note that we can do that because we are not returning
1279 * 4. Ereport generation.
1280 * - Ereport generation is performed when we unwind to the TL=0
1281 * handling code via sys_trap(). on_trap()/lofault protection
1285 ENTRY_NP
(opl_sync_trap
)
1287 OPL_TRAPTRACE
(%g1
, %g2
, %g3
, opl_sync_trap_lb
)
1289 #endif /* TRAPTRACE */
1290 cmp %g1
, T_INSTR_ERROR
1293 ldxa
[%g3
]ASI_IMMU
, %g1
! IAE
trap case tt
= 0xa
1294 andcc
%g1
, SFSR_FV
, %g0
1295 bz
,a,pn
%xcc
, 2f
! Branch if SFSR is invalid
and
1296 rdpr
%tpc
, %g2
! use
%tpc for faultaddr instead
1298 sethi
%hi
(SFSR_UE|SFSR_BERR|SFSR_TO
), %g3
1299 andcc
%g1
, %g3
, %g0
! Check for UE
/BERR
/TO errors
1300 bz
,a,pt
%xcc
, 1f
! Branch if
not UE
/BERR
/TO
and
1301 rdpr
%tpc
, %g2
! use
%tpc as faultaddr
1302 set OPL_MMU_SFPAR
, %g3
! In the UE
/BERR
/TO cases
, use
1303 ba,pt
%xcc
, 2f
! SFPAR as faultaddr
1304 ldxa
[%g3
]ASI_IMMU
, %g2
1306 ldxa
[%g3
]ASI_DMMU
, %g1
! DAE
trap case tt
= 0x32
1307 andcc
%g1
, SFSR_FV
, %g0
1308 bnz
,pt
%xcc
, 7f
! branch if SFSR.FV is valid
1309 mov MMU_SFAR
, %g2
! set
%g2 to use SFAR
1310 ba,pt
%xcc
, 2f
! SFSR.FV is
not valid
, read SFAR
1311 ldxa
[%g2
]ASI_DMMU
, %g2
! for faultaddr
1313 sethi
%hi
(SFSR_UE|SFSR_BERR|SFSR_TO
), %g3
1314 andcc
%g1
, %g3
, %g0
! Check UE
/BERR
/TO for valid SFPAR
1315 movnz
%xcc
, OPL_MMU_SFPAR
, %g2
! Use SFPAR instead of SFAR for
1316 ldxa
[%g2
]ASI_DMMU
, %g2
! faultaddr
1318 sethi
%hi
(SFSR_TLB_PRT
), %g3
1320 bz
,pt
%xcc
, 8f
! branch for TLB multi-hit check
1323 * This is the TLB parity error case and it is the
1324 * only retryable error case.
1325 * Only %g1, %g2 and %g3 are allowed
1328 set OPL_SCRATCHPAD_ERRLOG
, %g3
1329 ldxa
[%g3
]ASI_SCRATCHPAD
, %g3
! Read errlog scratchreg
1330 and %g3
, ERRLOG_REG_NUMERR_MASK
, %g3
! Extract the error count
1331 subcc
%g3
, 1, %g0
! Subtract one from the count
1332 bz
,pn
%xcc
, 2f
! too many TLB parity errs in
a certain
1333 nop ! period
, branch to generate ereport
1334 LOG_SYNC_REG
(%g1
, %g2
, %g3
) ! Record into the error log
1335 set OPL_SCRATCHPAD_ERRLOG
, %g3
1336 ldxa
[%g3
]ASI_SCRATCHPAD
, %g2
1337 sub %g2
, 1, %g2
! decrement error counter by
1
1338 stxa
%g2
, [%g3
]ASI_SCRATCHPAD
! update the errlog scratchreg
1339 OPL_RESTORE_GLOBAL
(%g1
, %g2
, %g3
)
1342 sethi
%hi
(SFSR_TLB_MUL
), %g3
1344 bz
,pt
%xcc
, 2f
! check for the TLB multi-hit errors
1349 * non-retryable error handling
1350 * now we can use other registers since
1351 * we will not be returning back
1353 mov
%g1
, %g5
! %g5
= SFSR
1354 mov
%g2
, %g6
! %g6
= SFPAR
or SFAR
/tpc
1355 LOG_SYNC_REG
(%g1
, %g2
, %g3
) ! Record into the error log
1358 * Special case for UE on user stack.
1359 * There is a possibility that the same error may come back here
1360 * by touching the same UE in spill trap handler taken from
1361 * sys_trap(). It ends up with an infinite loop causing a cpu lockup.
1362 * Conditions for this handling this case are:
1363 * - SFSR_FV is valid and SFSR_UE is set
1364 * - we are at TL > 1
1365 * If the above conditions are true, we force %cansave to be a
1366 * big number to prevent spill trap in sys_trap(). Note that
1367 * we will not be returning back.
1369 rdpr
%tt
, %g4
! %g4
== ttype
1370 rdpr
%tl
, %g1
! %g1
== tl
1371 cmp %g1
, 1 ! Check if TL
== 1
1372 be,pt
%xcc
, 3f
! branch if we came from TL
=0
1374 andcc
%g5
, SFSR_FV
, %g0
! see if SFSR.FV is valid
1375 bz
,pn
%xcc
, 4f
! branch
, checking UE is meaningless
1376 sethi
%hi
(SFSR_UE
), %g2
1377 andcc
%g5
, %g2
, %g0
! check for UE
1378 bz
,pt
%xcc
, 4f
! branch if
not UE
1380 RESET_WINREG
(%g1
) ! reset windows to prevent spills
1382 RESET_USER_RTT_REGS
(%g2
, %g3
, opl_sync_trap_resetskip
)
1383 opl_sync_trap_resetskip
:
1384 mov
%g5
, %g3
! pass SFSR to the
3rd arg
1385 mov
%g6
, %g2
! pass SFAR to the
2nd arg
1386 set opl_cpu_isync_tl1_error
, %g1
1387 set opl_cpu_dsync_tl1_error
, %g6
1388 cmp %g4
, T_INSTR_ERROR
1389 movne
%icc
, %g6
, %g1
1393 mov
%g5
, %g3
! pass SFSR to the
3rd arg
1394 mov
%g6
, %g2
! pass SFAR to the
2nd arg
1395 set opl_cpu_isync_tl0_error
, %g1
1396 set opl_cpu_dsync_tl0_error
, %g6
1397 cmp %g4
, T_INSTR_ERROR
1398 movne
%icc
, %g6
, %g1
1400 sethi
%hi
(sys_trap
), %g5
1401 jmp
%g5
+ %lo
(sys_trap
)
1403 SET_SIZE
(opl_sync_trap
)
1412 * Common Urgent error trap handler (tt=0x40)
1413 * All TL=0 and TL>0 0x40 traps vector to this handler.
1414 * The error handling can be best summarized as follows:
1415 * 1. Read the Urgent error status register (UGERSR)
1416 * Faultaddress is N/A here and it is not collected.
1417 * 2. Check to see if we have a multiple errors case
1418 * If so, we enable WEAK_ED (weak error detection) bit
1419 * to prevent any potential error storms and branch directly
1420 * to generate ereport. (we don't decode/handle individual
1421 * error cases when we get a multiple error situation)
1422 * 3. Now look for the recoverable error cases which include
1423 * IUG_DTLB, IUG_ITLB or COREERR errors. If any of the
1424 * recoverable errors are detected, do the following:
1426 * - Verify that we came from TL=0, if not, generate
1427 * ereport. Note that the reason we don't recover
1428 * at TL>0 is because the AGs might be corrupted or
1429 * inconsistent. We can't save/restore them into
1430 * the scratchpad regs like we did for opl_sync_trap().
1431 * - Check the INSTEND[5:4] bits in the UGERSR. If the
1432 * value is 0x3 (11b), this error is not recoverable.
1434 * - Subtract one from the recoverable error count stored in
1435 * the error log scratch register. If the threshold limit
1436 * is reached (zero) - generate ereport.
1437 * - If the count is within the limit, update the count
1438 * in the error log register (subtract one). Log the error
1439 * info in the log buffer. Capture traptrace if enabled.
1440 * Retry (no ereport generated)
1441 * 4. The rest of the error cases are unrecoverable and will
1442 * be handled according (flushing regs, etc as required).
1443 * For details on these error cases (UGER_CRE, UGER_CTXT, etc..)
1444 * consult the OPL cpu/mem philosophy doc.
1445 * Ereport will be generated for these errors.
1446 * 5. Ereport generation.
1447 * - Ereport generation for urgent error trap always
1448 * result in a panic when we unwind to the TL=0 handling
1449 * code via sys_trap(). on_trap()/lofault protection do
1452 ENTRY_NP
(opl_uger_trap
)
1454 ldxa
[%g2
]ASI_AFSR
, %g1
! Read the UGERSR reg
1456 set UGESR_MULTI
, %g2
1457 andcc
%g1
, %g2
, %g0
! Check for Multi-errs
1458 bz
,pt
%xcc
, opl_uger_is_recover
! branch if
not Multi-errs
1461 ldxa
[%g2
]ASI_AFSR
, %g3
! Enable Weak error
1462 or %g3
, ASI_ECR_WEAK_ED
, %g3
! detect mode to prevent
1463 stxa
%g3
, [%g2
]ASI_AFSR
! potential error storms
1464 ba %xcc
, opl_uger_panic1
1467 opl_uger_is_recover
:
1468 set UGESR_CAN_RECOVER
, %g2
! Check for recoverable
1469 andcc
%g1
, %g2
, %g0
! errors i.e.IUG_DTLB
,
1470 bz
,pt
%xcc
, opl_uger_cre
! IUG_ITLB
or COREERR
1474 * Fall thru to handle recoverable case
1475 * Need to do the following additional checks to determine
1476 * if this is indeed recoverable.
1477 * 1. Error trap came from TL=0 and
1478 * 2. INSTEND[5:4] bits in UGERSR is not 0x3
1479 * 3. Recoverable error count limit not reached
1483 rdpr
%tl
, %g3
! Read TL
1484 cmp %g3
, 1 ! Check if we came from TL
=0
1485 bne,pt
%xcc
, opl_uger_panic
! branch if came from TL
>0
1487 srlx
%g1
, 4, %g2
! shift INSTEND
[5:4] -> [1:0]
1488 and %g2
, 3, %g2
! extract the shifted
[1:0] bits
1489 cmp %g2
, 3 ! check if INSTEND is recoverable
1490 be,pt
%xcc
, opl_uger_panic
! panic if
([1:0] = 11b)
1492 set OPL_SCRATCHPAD_ERRLOG
, %g3
1493 ldxa
[%g3
]ASI_SCRATCHPAD
, %g2
! Read errlog scratch reg
1494 and %g2
, ERRLOG_REG_NUMERR_MASK
, %g3
! Extract error count
and
1495 subcc
%g3
, 1, %g3
! subtract one from it
1496 bz
,pt
%xcc
, opl_uger_panic
! If count reached zero
, too many
1497 nop ! errors
, branch to generate ereport
1498 sub %g2
, 1, %g2
! Subtract one from the count
1499 set OPL_SCRATCHPAD_ERRLOG
, %g3
! and write back the updated
1500 stxa
%g2
, [%g3
]ASI_SCRATCHPAD
! count into the errlog reg
1501 LOG_UGER_REG
(%g1
, %g2
, %g3
) ! Log the error info
1503 OPL_TRAPTRACE
(%g1
, %g2
, %g3
, opl_uger_trap_lb
)
1504 #endif /* TRAPTRACE */
1505 retry
! retry
- no ereport
1508 * Process the rest of the unrecoverable error cases
1509 * All error cases below ultimately branch to either
1510 * opl_uger_panic or opl_uger_panic1.
1511 * opl_uger_panic1 is the same as opl_uger_panic except
1512 * for the additional execution of the RESET_TO_PRIV()
1513 * macro that does a heavy handed reset. Read the
1514 * comments for RESET_TO_PRIV() macro for more info.
1517 set UGESR_IAUG_CRE
, %g2
1519 bz
,pt
%xcc
, opl_uger_ctxt
1523 ldxa
[%g2
]ASI_AFSR
, %g3
1524 or %g3
, ASI_ECR_WEAK_ED
, %g3
1525 stxa
%g3
, [%g2
]ASI_AFSR
1526 ba %xcc
, opl_uger_panic
1530 set UGESR_IAUG_TSBCTXT
, %g2
1532 bz
,pt
%xcc
, opl_uger_tsbp
1535 cmp %g2
, JUPITER_IMPL
1538 RESET_SHARED_CTXT
(%g2
)
1540 RESET_MMU_REGS
(%g2
, %g3
, %g4
)
1541 ba %xcc
, opl_uger_panic
1545 set UGESR_IUG_TSBP
, %g2
1547 bz
,pt
%xcc
, opl_uger_pstate
1550 cmp %g2
, JUPITER_IMPL
1553 RESET_TSB_PREFETCH
(%g2
)
1555 RESET_TSB_TAGPTR
(%g2
)
1558 * IUG_TSBP error may corrupt MMU registers
1561 RESET_MMU_REGS
(%g2
, %g3
, %g4
)
1562 ba %xcc
, opl_uger_panic
1566 set UGESR_IUG_PSTATE
, %g2
1568 bz
,pt
%xcc
, opl_uger_tstate
1570 RESET_CUR_TSTATE
(%g2
)
1571 ba %xcc
, opl_uger_panic1
1575 set UGESR_IUG_TSTATE
, %g2
1577 bz
,pt
%xcc
, opl_uger_f
1579 RESET_PREV_TSTATE
(%g2
, %g3
, opl_uger_tstate_1
)
1580 ba %xcc
, opl_uger_panic1
1584 set UGESR_IUG_F
, %g2
1586 bz
,pt
%xcc
, opl_uger_r
1589 ba %xcc
, opl_uger_panic
1593 set UGESR_IUG_R
, %g2
1595 bz
,pt
%xcc
, opl_uger_panic1
1597 CLEAR_GEN_REGS
(%g2
, opl_uger_r_1
)
1598 ba %xcc
, opl_uger_panic1
1602 mov
%g1
, %g2
! %g2
= arg
#1
1603 LOG_UGER_REG
(%g1
, %g3
, %g4
)
1604 ba %xcc
, opl_uger_panic_cmn
1608 mov
%g1
, %g2
! %g2
= arg
#1
1609 LOG_UGER_REG
(%g1
, %g3
, %g4
)
1610 RESET_TO_PRIV
(%g1
, %g3
, %g4
, %l0
)
1613 * Set up the argument for sys_trap.
1614 * %g2 = arg #1 already set above
1617 RESET_USER_RTT_REGS
(%g4
, %g5
, opl_uger_panic_resetskip
)
1618 opl_uger_panic_resetskip
:
1619 rdpr
%tl
, %g3
! arg
#2
1620 set opl_cpu_urgent_error
, %g1
! pc
1621 sethi
%hi
(sys_trap
), %g5
1622 jmp
%g5
+ %lo
(sys_trap
)
1624 SET_SIZE
(opl_uger_trap
)
1632 opl_cleanw_subr
(void
)
1636 * OPL ta3 support (note please, that win_reg
1637 * area size for each cpu is 2^7 bytes)
1640 #define RESTORE_WREGS(tmp1, tmp2) \
1641 CPU_INDEX
(tmp1
, tmp2
) ;\
1642 sethi
%hi
(opl_ta3_save
), tmp2 ;\
1643 ldx [tmp2
+%lo
(opl_ta3_save
)], tmp2 ;\
1644 sllx tmp1
, 7, tmp1 ;\
1645 add tmp2
, tmp1
, tmp2 ;\
1646 ldx [tmp2
+ 0], %l0 ;\
1647 ldx [tmp2
+ 8], %l1 ;\
1648 ldx [tmp2
+ 16], %l2 ;\
1649 ldx [tmp2
+ 24], %l3 ;\
1650 ldx [tmp2
+ 32], %l4 ;\
1651 ldx [tmp2
+ 40], %l5 ;\
1652 ldx [tmp2
+ 48], %l6 ;\
1653 ldx [tmp2
+ 56], %l7 ;\
1654 ldx [tmp2
+ 64], %i0 ;\
1655 ldx [tmp2
+ 72], %i1 ;\
1656 ldx [tmp2
+ 80], %i2 ;\
1657 ldx [tmp2
+ 88], %i3 ;\
1658 ldx [tmp2
+ 96], %i4 ;\
1659 ldx [tmp2
+ 104], %i5 ;\
1660 ldx [tmp2
+ 112], %i6 ;\
1661 ldx [tmp2
+ 120], %i7
1663 #define SAVE_WREGS(tmp1, tmp2) \
1664 CPU_INDEX
(tmp1
, tmp2
) ;\
1665 sethi
%hi
(opl_ta3_save
), tmp2 ;\
1666 ldx [tmp2
+%lo
(opl_ta3_save
)], tmp2 ;\
1667 sllx tmp1
, 7, tmp1 ;\
1668 add tmp2
, tmp1
, tmp2 ;\
1669 stx %l0
, [tmp2
+ 0] ;\
1670 stx %l1
, [tmp2
+ 8] ;\
1671 stx %l2
, [tmp2
+ 16] ;\
1672 stx %l3
, [tmp2
+ 24] ;\
1673 stx %l4
, [tmp2
+ 32] ;\
1674 stx %l5
, [tmp2
+ 40] ;\
1675 stx %l6
, [tmp2
+ 48] ;\
1676 stx %l7
, [tmp2
+ 56] ;\
1677 stx %i0
, [tmp2
+ 64] ;\
1678 stx %i1
, [tmp2
+ 72] ;\
1679 stx %i2
, [tmp2
+ 80] ;\
1680 stx %i3
, [tmp2
+ 88] ;\
1681 stx %i4
, [tmp2
+ 96] ;\
1682 stx %i5
, [tmp2
+ 104] ;\
1683 stx %i6
, [tmp2
+ 112] ;\
1684 stx %i7
, [tmp2
+ 120]
1688 * The purpose of this function is to make sure that the restore
1689 * instruction after the flushw does not cause a fill trap. The sun4u
1690 * fill trap handler can not handle a tlb fault of an unmapped stack
1691 * except at the restore instruction at user_rtt. On OPL systems the
1692 * stack can get unmapped between the flushw and restore instructions
1693 * since multiple strands share the tlb.
1695 ENTRY_NP
(opl_ta3_trap
)
1700 SAVE_WREGS
(%g2
, %g6
)
1705 RESTORE_WREGS
(%g2
, %g5
)
1711 SET_SIZE
(opl_ta3_trap
)
1713 ENTRY_NP
(opl_cleanw_subr
)
1718 SAVE_WREGS
(%g2
, %g6
)
1723 RESTORE_WREGS
(%g2
, %g5
)
1729 SET_SIZE
(opl_cleanw_subr
)
1735 opl_serr_instr
(void
)
1740 * The actual trap handler for tt=0x0a, and tt=0x32
1742 ENTRY_NP
(opl_serr_instr
)
1743 OPL_SAVE_GLOBAL
(%g1
,%g2
,%g3
)
1744 sethi
%hi
(opl_sync_trap
), %g3
1745 jmp
%g3
+ %lo
(opl_sync_trap
)
1748 SET_SIZE
(opl_serr_instr
)
1755 opl_ugerr_instr
(void
)
1760 * The actual trap handler for tt=0x40
1762 ENTRY_NP
(opl_ugerr_instr
)
1763 sethi
%hi
(opl_uger_trap
), %g3
1764 jmp
%g3
+ %lo
(opl_uger_trap
)
1767 SET_SIZE
(opl_ugerr_instr
)
1779 * The actual trap handler for tt=0x103 (flushw)
1781 ENTRY_NP
(opl_ta3_instr
)
1782 sethi
%hi
(opl_ta3_trap
), %g3
1783 jmp
%g3
+ %lo
(opl_ta3_trap
)
1786 SET_SIZE
(opl_ta3_instr
)
1798 * The patch for the .clean_windows code
1800 ENTRY_NP
(opl_ta4_instr
)
1801 sethi
%hi
(opl_cleanw_subr
), %g3
1802 add %g3
, %lo
(opl_cleanw_subr
), %g3
1808 SET_SIZE
(opl_ta4_instr
)
1814 * Get timestamp (stick).
1818 stick_timestamp
(int64_t
*ts
)
1824 ENTRY_NP
(stick_timestamp
)
1825 rd STICK
, %g1
! read stick reg
1827 srlx
%g1
, 1, %g1
! clear npt bit
1830 stx %g1
, [%o0
] ! store the timestamp
1831 SET_SIZE
(stick_timestamp
)
1838 * Set STICK adjusted by skew.
1842 stick_adj
(int64_t skew
)
1849 rdpr
%pstate
, %g1
! save processor state
1850 andn
%g1
, PSTATE_IE
, %g3
1851 ba 1f
! cache align stick adj
1852 wrpr
%g0
, %g3
, %pstate
! turn off interrupts
1857 rd STICK
, %g4
! read stick reg
1858 add %g4
, %o0
, %o1
! adjust stick with skew
1859 wr
%o1
, %g0
, STICK
! write stick reg
1862 wrpr
%g1
, %pstate
! restore processor state
1869 * Debugger-specific stick retrieval
1873 kdi_get_stick
(uint64_t
*stickp
)
1880 ENTRY_NP
(kdi_get_stick
)
1885 SET_SIZE
(kdi_get_stick
)
1893 dtrace_blksuword32
(uintptr_t addr
, uint32_t
*data
, int tryagain
)
1898 ENTRY
(dtrace_blksuword32
)
1899 save
%sp
, -SA
(MINFRAME
+ 4), %sp
1902 andn
%l1
, PSTATE_IE
, %l2
! disable interrupts to
1903 wrpr
%g0
, %l2
, %pstate
! protect our FPU diddling
1906 andcc
%l0
, FPRS_FEF
, %g0
1907 bz
,a,pt
%xcc
, 1f
! if the fpu is disabled
1908 wr
%g0
, FPRS_FEF
, %fprs
! ... enable the fpu
1910 st %f0, [%fp
+ STACK_BIAS
- 4] ! save
%f0 to the stack
1914 * We're about to write a block full or either total garbage
1915 * (not kernel data, don't worry) or user floating-point data
1916 * (so it only _looks_ like garbage).
1918 ld [%i1
], %f0 ! modify the block
1920 stn
%l5
, [THREAD_REG
+ T_LOFAULT
] ! set up the lofault handler
1921 stda
%d0
, [%i0
]ASI_BLK_COMMIT_S
! store the modified block
1923 flush
%i0
! flush instruction pipeline
1924 stn
%g0
, [THREAD_REG
+ T_LOFAULT
] ! remove the lofault handler
1927 wr
%g0
, %l0
, %fprs
! restore
%fprs
1929 ld [%fp
+ STACK_BIAS
- 4], %f0 ! restore
%f0
1932 wrpr
%g0
, %l1
, %pstate
! restore interrupts
1935 restore
%g0
, %g0
, %o0
1939 stn
%g0
, [THREAD_REG
+ T_LOFAULT
] ! remove the lofault handler
1942 wr
%g0
, %l0
, %fprs
! restore
%fprs
1944 ld [%fp
+ STACK_BIAS
- 4], %f0 ! restore
%f0
1947 wrpr
%g0
, %l1
, %pstate
! restore interrupts
1950 * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
1951 * which deals with watchpoints. Otherwise, just return -1.
1956 restore
%g0
, -1, %o0
1958 call dtrace_blksuword32_err
1961 SET_SIZE
(dtrace_blksuword32
)
1967 ras_cntr_reset
(void
*arg
)
1971 ENTRY_NP
(ras_cntr_reset
)
1972 set OPL_SCRATCHPAD_ERRLOG
, %o1
1973 ldxa
[%o1
]ASI_SCRATCHPAD
, %o0
1974 or %o0
, ERRLOG_REG_NUMERR_MASK
, %o0
1976 stxa
%o0
, [%o1
]ASI_SCRATCHPAD
1977 SET_SIZE
(ras_cntr_reset
)
1983 opl_error_setup
(uint64_t cpu_err_log_pa
)
1988 ENTRY_NP
(opl_error_setup
)
1990 * Initialize the error log scratchpad register
1992 ldxa
[%g0
]ASI_EIDR
, %o2
1993 sethi
%hi
(ERRLOG_REG_EIDR_MASK
), %o1
1994 or %o1
, %lo
(ERRLOG_REG_EIDR_MASK
), %o1
1996 sllx
%o3
, ERRLOG_REG_EIDR_SHIFT
, %o2
1998 or %o3
, ERRLOG_REG_NUMERR_MASK
, %o0
1999 set OPL_SCRATCHPAD_ERRLOG
, %o1
2000 stxa
%o0
, [%o1
]ASI_SCRATCHPAD
2002 * Disable all restrainable error traps
2005 ldxa
[%o1
]ASI_AFSR
, %o0
2006 andn
%o0
, ASI_ECR_RTE_UE|ASI_ECR_RTE_CEDG
, %o0
2008 stxa
%o0
, [%o1
]ASI_AFSR
2009 SET_SIZE
(opl_error_setup
)
2015 cpu_early_feature_init
(void
)
2019 ENTRY_NP
(cpu_early_feature_init
)
2021 * Enable MMU translating multiple page sizes for
2025 ldxa
[%o0
] ASI_MCNTL
, %o1
2026 or %o1
, MCNTL_MPG_SITLB | MCNTL_MPG_SDTLB
, %o1
2027 stxa
%o1
, [%o0
] ASI_MCNTL
2029 * Demap all previous entries.
2031 sethi
%hi
(FLUSH_ADDR
), %o1
2032 set DEMAP_ALL_TYPE
, %o0
2033 stxa
%g0
, [%o0
]ASI_DTLB_DEMAP
2034 stxa
%g0
, [%o0
]ASI_ITLB_DEMAP
2037 SET_SIZE
(cpu_early_feature_init
)
2042 * This function is called for each (enabled) CPU. We use it to
2043 * initialize error handling related registers.
2047 cpu_feature_init
(void
)
2050 ENTRY
(cpu_feature_init
)
2052 ! get the device_id
and store the device_id
2053 ! in the appropriate cpunodes structure
2054 ! given the cpus index
2057 mulx
%o0
, CPU_NODE_SIZE
, %o0
2058 set cpunodes
+ DEVICE_ID
, %o1
2059 ldxa
[%g0
] ASI_DEVICE_SERIAL_ID
, %o2
2060 stx %o2
, [%o0
+ %o1
]
2062 ! initialize CPU registers
2066 SET_SIZE
(cpu_feature_init
)
2072 cpu_cleartickpnt
(void
)
2077 * Clear the NPT (non-privileged trap) bit in the %tick/%stick
2078 * registers. In an effort to make the change in the
2079 * tick/stick counter as consistent as possible, we disable
2080 * all interrupts while we're changing the registers. We also
2081 * ensure that the read and write instructions are in the same
2082 * line in the instruction cache.
2084 ENTRY_NP
(cpu_clearticknpt
)
2085 rdpr
%pstate
, %g1
/* save processor state */
2086 andn
%g1
, PSTATE_IE
, %g3
/* turn off */
2087 wrpr
%g0
, %g3
, %pstate
/* interrupts */
2088 rdpr
%tick
, %g2
/* get tick register */
2089 brgez
,pn
%g2
, 1f
/* if NPT bit off, we're done */
2090 mov
1, %g3
/* create mask */
2091 sllx
%g3
, 63, %g3
/* for NPT bit */
2093 .align 8 /* Ensure rd/wr in same i$ line */
2095 rdpr
%tick
, %g2
/* get tick register */
2096 wrpr
%g3
, %g2
, %tick
/* write tick register, */
2097 /* clearing NPT bit */
2099 rd STICK
, %g2
/* get stick register */
2100 brgez
,pn
%g2
, 3f
/* if NPT bit off, we're done */
2101 mov
1, %g3
/* create mask */
2102 sllx
%g3
, 63, %g3
/* for NPT bit */
2104 .align 8 /* Ensure rd/wr in same i$ line */
2106 rd STICK
, %g2
/* get stick register */
2107 wr
%g3
, %g2
, STICK
/* write stick register, */
2108 /* clearing NPT bit */
2111 wrpr
%g0
, %g1
, %pstate
/* restore processor state */
2113 SET_SIZE
(cpu_clearticknpt
)
2130 * Halt the current strand with the suspend instruction.
2131 * The compiler/asm currently does not support this suspend
2132 * instruction mnemonic, use byte code for now.
2134 ENTRY_NP
(cpu_halt_cpu
)
2138 SET_SIZE
(cpu_halt_cpu
)
2141 * Pause the current strand with the sleep instruction.
2142 * The compiler/asm currently does not support this sleep
2143 * instruction mnemonic, use byte code for now.
2145 ENTRY_NP
(cpu_smt_pause
)
2149 SET_SIZE
(cpu_smt_pause
)