8354 sync regcomp(3C) with upstream (fix make catalog)
[unleashed/tickless.git] / usr / src / uts / sun4u / cpu / opl_olympus_asm.s
blob4ad460b23b6b3472bc2372ed8e055e6081862f12
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
25 * Assembly code support for the Olympus-C module
28 #if !defined(lint)
29 #include "assym.h"
30 #endif /* lint */
32 #include <sys/asm_linkage.h>
33 #include <sys/mmu.h>
34 #include <vm/hat_sfmmu.h>
35 #include <sys/machparam.h>
36 #include <sys/machcpuvar.h>
37 #include <sys/machthread.h>
38 #include <sys/machtrap.h>
39 #include <sys/privregs.h>
40 #include <sys/asm_linkage.h>
41 #include <sys/trap.h>
42 #include <sys/opl_olympus_regs.h>
43 #include <sys/opl_module.h>
44 #include <sys/xc_impl.h>
45 #include <sys/intreg.h>
46 #include <sys/async.h>
47 #include <sys/clock.h>
48 #include <sys/cmpregs.h>
50 #ifdef TRAPTRACE
51 #include <sys/traptrace.h>
52 #endif /* TRAPTRACE */
55 * Macro that flushes the entire Ecache.
57 * arg1 = ecache size
58 * arg2 = ecache linesize
59 * arg3 = ecache flush address - Not used for olympus-C
61 #define ECACHE_FLUSHALL(arg1, arg2, arg3, tmp1) \
62 mov ASI_L2_CTRL_U2_FLUSH, arg1; \
63 mov ASI_L2_CTRL_RW_ADDR, arg2; \
64 stxa arg1, [arg2]ASI_L2_CTRL
67 * SPARC64-VI MMU and Cache operations.
70 #if defined(lint)
72 /* ARGSUSED */
73 void
74 vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
77 #else /* lint */
79 ENTRY_NP(vtag_flushpage)
81 * flush page from the tlb
83 * %o0 = vaddr
84 * %o1 = sfmmup
86 rdpr %pstate, %o5
87 #ifdef DEBUG
88 PANIC_IF_INTR_DISABLED_PSTR(%o5, opl_di_l3, %g1)
89 #endif /* DEBUG */
91 * disable ints
93 andn %o5, PSTATE_IE, %o4
94 wrpr %o4, 0, %pstate
97 * Then, blow out the tlb
98 * Interrupts are disabled to prevent the primary ctx register
99 * from changing underneath us.
101 sethi %hi(ksfmmup), %o3
102 ldx [%o3 + %lo(ksfmmup)], %o3
103 cmp %o3, %o1
104 bne,pt %xcc, 1f ! if not kernel as, go to 1
105 sethi %hi(FLUSH_ADDR), %o3
107 * For Kernel demaps use primary. type = page implicitly
109 stxa %g0, [%o0]ASI_DTLB_DEMAP /* dmmu flush for KCONTEXT */
110 stxa %g0, [%o0]ASI_ITLB_DEMAP /* immu flush for KCONTEXT */
111 flush %o3
112 retl
113 wrpr %g0, %o5, %pstate /* enable interrupts */
116 * User demap. We need to set the primary context properly.
117 * Secondary context cannot be used for SPARC64-VI IMMU.
118 * %o0 = vaddr
119 * %o1 = sfmmup
120 * %o3 = FLUSH_ADDR
122 SFMMU_CPU_CNUM(%o1, %g1, %g2) ! %g1 = sfmmu cnum on this CPU
124 ldub [%o1 + SFMMU_CEXT], %o4 ! %o4 = sfmmup->sfmmu_cext
125 sll %o4, CTXREG_EXT_SHIFT, %o4
126 or %g1, %o4, %g1 ! %g1 = primary pgsz | cnum
128 wrpr %g0, 1, %tl
129 set MMU_PCONTEXT, %o4
130 or DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %o0, %o0
131 ldxa [%o4]ASI_DMMU, %o2 ! %o2 = save old ctxnum
132 srlx %o2, CTXREG_NEXT_SHIFT, %o1 ! need to preserve nucleus pgsz
133 sllx %o1, CTXREG_NEXT_SHIFT, %o1 ! %o1 = nucleus pgsz
134 or %g1, %o1, %g1 ! %g1 = nucleus pgsz | primary pgsz | cnum
135 stxa %g1, [%o4]ASI_DMMU ! wr new ctxum
137 stxa %g0, [%o0]ASI_DTLB_DEMAP
138 stxa %g0, [%o0]ASI_ITLB_DEMAP
139 stxa %o2, [%o4]ASI_DMMU /* restore old ctxnum */
140 flush %o3
141 wrpr %g0, 0, %tl
143 retl
144 wrpr %g0, %o5, %pstate /* enable interrupts */
145 SET_SIZE(vtag_flushpage)
147 #endif /* lint */
150 #if defined(lint)
152 void
153 vtag_flushall(void)
156 #else /* lint */
158 ENTRY_NP2(vtag_flushall, demap_all)
160 * flush the tlb
162 sethi %hi(FLUSH_ADDR), %o3
163 set DEMAP_ALL_TYPE, %g1
164 stxa %g0, [%g1]ASI_DTLB_DEMAP
165 stxa %g0, [%g1]ASI_ITLB_DEMAP
166 flush %o3
167 retl
169 SET_SIZE(demap_all)
170 SET_SIZE(vtag_flushall)
172 #endif /* lint */
175 #if defined(lint)
177 /* ARGSUSED */
178 void
179 vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
182 #else /* lint */
184 ENTRY_NP(vtag_flushpage_tl1)
186 * x-trap to flush page from tlb and tsb
188 * %g1 = vaddr, zero-extended on 32-bit kernel
189 * %g2 = sfmmup
191 * assumes TSBE_TAG = 0
193 srln %g1, MMU_PAGESHIFT, %g1
195 sethi %hi(ksfmmup), %g3
196 ldx [%g3 + %lo(ksfmmup)], %g3
197 cmp %g3, %g2
198 bne,pt %xcc, 1f ! if not kernel as, go to 1
199 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
201 /* We need to demap in the kernel context */
202 or DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
203 stxa %g0, [%g1]ASI_DTLB_DEMAP
204 stxa %g0, [%g1]ASI_ITLB_DEMAP
205 retry
207 /* We need to demap in a user context */
208 or DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
210 SFMMU_CPU_CNUM(%g2, %g6, %g3) ! %g6 = sfmmu cnum on this CPU
212 ldub [%g2 + SFMMU_CEXT], %g4 ! %g4 = sfmmup->cext
213 sll %g4, CTXREG_EXT_SHIFT, %g4
214 or %g6, %g4, %g6 ! %g6 = primary pgsz | cnum
216 set MMU_PCONTEXT, %g4
217 ldxa [%g4]ASI_DMMU, %g5 ! %g5 = save old ctxnum
218 srlx %g5, CTXREG_NEXT_SHIFT, %g2 ! %g2 = nucleus pgsz
219 sllx %g2, CTXREG_NEXT_SHIFT, %g2 ! preserve nucleus pgsz
220 or %g6, %g2, %g6 ! %g6 = nucleus pgsz | primary pgsz | cnum
221 stxa %g6, [%g4]ASI_DMMU ! wr new ctxum
222 stxa %g0, [%g1]ASI_DTLB_DEMAP
223 stxa %g0, [%g1]ASI_ITLB_DEMAP
224 stxa %g5, [%g4]ASI_DMMU ! restore old ctxnum
225 retry
226 SET_SIZE(vtag_flushpage_tl1)
228 #endif /* lint */
231 #if defined(lint)
233 /* ARGSUSED */
234 void
235 vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
238 #else /* lint */
240 ENTRY_NP(vtag_flush_pgcnt_tl1)
242 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
244 * %g1 = vaddr, zero-extended on 32-bit kernel
245 * %g2 = <sfmmup58|pgcnt6>
247 * NOTE: this handler relies on the fact that no
248 * interrupts or traps can occur during the loop
249 * issuing the TLB_DEMAP operations. It is assumed
250 * that interrupts are disabled and this code is
251 * fetching from the kernel locked text address.
253 * assumes TSBE_TAG = 0
255 set SFMMU_PGCNT_MASK, %g4
256 and %g4, %g2, %g3 /* g3 = pgcnt - 1 */
257 add %g3, 1, %g3 /* g3 = pgcnt */
259 andn %g2, SFMMU_PGCNT_MASK, %g2 /* g2 = sfmmup */
260 srln %g1, MMU_PAGESHIFT, %g1
262 sethi %hi(ksfmmup), %g4
263 ldx [%g4 + %lo(ksfmmup)], %g4
264 cmp %g4, %g2
265 bne,pn %xcc, 1f /* if not kernel as, go to 1 */
266 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
268 /* We need to demap in the kernel context */
269 or DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
270 set MMU_PAGESIZE, %g2 /* g2 = pgsize */
271 sethi %hi(FLUSH_ADDR), %g5
273 stxa %g0, [%g1]ASI_DTLB_DEMAP
274 stxa %g0, [%g1]ASI_ITLB_DEMAP
275 flush %g5 ! flush required by immu
277 deccc %g3 /* decr pgcnt */
278 bnz,pt %icc,4b
279 add %g1, %g2, %g1 /* next page */
280 retry
283 * We need to demap in a user context
285 * g2 = sfmmup
286 * g3 = pgcnt
288 SFMMU_CPU_CNUM(%g2, %g5, %g6) ! %g5 = sfmmu cnum on this CPU
290 or DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
292 ldub [%g2 + SFMMU_CEXT], %g4 ! %g4 = sfmmup->cext
293 sll %g4, CTXREG_EXT_SHIFT, %g4
294 or %g5, %g4, %g5
296 set MMU_PCONTEXT, %g4
297 ldxa [%g4]ASI_DMMU, %g6 /* rd old ctxnum */
298 srlx %g6, CTXREG_NEXT_SHIFT, %g2 /* %g2 = nucleus pgsz */
299 sllx %g2, CTXREG_NEXT_SHIFT, %g2 /* preserve nucleus pgsz */
300 or %g5, %g2, %g5 /* %g5 = nucleus pgsz | primary pgsz | cnum */
301 stxa %g5, [%g4]ASI_DMMU /* wr new ctxum */
303 set MMU_PAGESIZE, %g2 /* g2 = pgsize */
304 sethi %hi(FLUSH_ADDR), %g5
306 stxa %g0, [%g1]ASI_DTLB_DEMAP
307 stxa %g0, [%g1]ASI_ITLB_DEMAP
308 flush %g5 ! flush required by immu
310 deccc %g3 /* decr pgcnt */
311 bnz,pt %icc,3b
312 add %g1, %g2, %g1 /* next page */
314 stxa %g6, [%g4]ASI_DMMU /* restore old ctxnum */
315 retry
316 SET_SIZE(vtag_flush_pgcnt_tl1)
318 #endif /* lint */
321 #if defined(lint)
323 /*ARGSUSED*/
324 void
325 vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
328 #else /* lint */
330 ENTRY_NP(vtag_flushall_tl1)
332 * x-trap to flush tlb
334 set DEMAP_ALL_TYPE, %g4
335 stxa %g0, [%g4]ASI_DTLB_DEMAP
336 stxa %g0, [%g4]ASI_ITLB_DEMAP
337 retry
338 SET_SIZE(vtag_flushall_tl1)
340 #endif /* lint */
344 * VAC (virtual address conflict) does not apply to OPL.
345 * VAC resolution is managed by the Olympus processor hardware.
346 * As a result, all OPL VAC flushing routines are no-ops.
349 #if defined(lint)
351 /* ARGSUSED */
352 void
353 vac_flushpage(pfn_t pfnum, int vcolor)
356 #else /* lint */
358 ENTRY(vac_flushpage)
359 retl
361 SET_SIZE(vac_flushpage)
363 #endif /* lint */
365 #if defined(lint)
367 /* ARGSUSED */
368 void
369 vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
372 #else /* lint */
374 ENTRY_NP(vac_flushpage_tl1)
375 retry
376 SET_SIZE(vac_flushpage_tl1)
378 #endif /* lint */
381 #if defined(lint)
383 /* ARGSUSED */
384 void
385 vac_flushcolor(int vcolor, pfn_t pfnum)
388 #else /* lint */
390 ENTRY(vac_flushcolor)
391 retl
393 SET_SIZE(vac_flushcolor)
395 #endif /* lint */
399 #if defined(lint)
401 /* ARGSUSED */
402 void
403 vac_flushcolor_tl1(uint64_t vcolor, uint64_t pfnum)
406 #else /* lint */
408 ENTRY(vac_flushcolor_tl1)
409 retry
410 SET_SIZE(vac_flushcolor_tl1)
412 #endif /* lint */
414 #if defined(lint)
417 idsr_busy(void)
419 return (0);
422 #else /* lint */
425 * Determine whether or not the IDSR is busy.
426 * Entry: no arguments
427 * Returns: 1 if busy, 0 otherwise
429 ENTRY(idsr_busy)
430 ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1
431 clr %o0
432 btst IDSR_BUSY, %g1
433 bz,a,pt %xcc, 1f
434 mov 1, %o0
436 retl
438 SET_SIZE(idsr_busy)
440 #endif /* lint */
442 #if defined(lint)
444 /* ARGSUSED */
445 void
446 init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
449 /* ARGSUSED */
450 void
451 init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
454 #else /* lint */
456 .global _dispatch_status_busy
457 _dispatch_status_busy:
458 .asciz "ASI_INTR_DISPATCH_STATUS error: busy"
459 .align 4
462 * Setup interrupt dispatch data registers
463 * Entry:
464 * %o0 - function or inumber to call
465 * %o1, %o2 - arguments (2 uint64_t's)
467 .seg "text"
469 ENTRY(init_mondo)
470 #ifdef DEBUG
472 ! IDSR should not be busy at the moment
474 ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1
475 btst IDSR_BUSY, %g1
476 bz,pt %xcc, 1f
478 sethi %hi(_dispatch_status_busy), %o0
479 call panic
480 or %o0, %lo(_dispatch_status_busy), %o0
481 #endif /* DEBUG */
483 ALTENTRY(init_mondo_nocheck)
485 ! interrupt vector dispatch data reg 0
488 mov IDDR_0, %g1
489 mov IDDR_1, %g2
490 mov IDDR_2, %g3
491 stxa %o0, [%g1]ASI_INTR_DISPATCH
494 ! interrupt vector dispatch data reg 1
496 stxa %o1, [%g2]ASI_INTR_DISPATCH
499 ! interrupt vector dispatch data reg 2
501 stxa %o2, [%g3]ASI_INTR_DISPATCH
503 membar #Sync
504 retl
506 SET_SIZE(init_mondo_nocheck)
507 SET_SIZE(init_mondo)
509 #endif /* lint */
512 #if defined(lint)
514 /* ARGSUSED */
515 void
516 shipit(int upaid, int bn)
517 { return; }
519 #else /* lint */
522 * Ship mondo to aid using busy/nack pair bn
524 ENTRY_NP(shipit)
525 sll %o0, IDCR_PID_SHIFT, %g1 ! IDCR<23:14> = agent id
526 sll %o1, IDCR_BN_SHIFT, %g2 ! IDCR<28:24> = b/n pair
527 or %g1, IDCR_OFFSET, %g1 ! IDCR<13:0> = 0x70
528 or %g1, %g2, %g1
529 stxa %g0, [%g1]ASI_INTR_DISPATCH ! interrupt vector dispatch
530 membar #Sync
531 retl
533 SET_SIZE(shipit)
535 #endif /* lint */
538 #if defined(lint)
540 /* ARGSUSED */
541 void
542 flush_instr_mem(caddr_t vaddr, size_t len)
545 #else /* lint */
548 * flush_instr_mem:
549 * Flush 1 page of the I-$ starting at vaddr
550 * %o0 vaddr
551 * %o1 bytes to be flushed
553 * SPARC64-VI maintains consistency of the on-chip Instruction Cache with
554 * the stores from all processors so that a FLUSH instruction is only needed
555 * to ensure pipeline is consistent. This means a single flush is sufficient at
556 * the end of a sequence of stores that updates the instruction stream to
557 * ensure correct operation.
560 ENTRY(flush_instr_mem)
561 flush %o0 ! address irrelevant
562 retl
564 SET_SIZE(flush_instr_mem)
566 #endif /* lint */
570 * flush_ecache:
571 * %o0 - 64 bit physical address
572 * %o1 - ecache size
573 * %o2 - ecache linesize
575 #if defined(lint)
577 /*ARGSUSED*/
578 void
579 flush_ecache(uint64_t physaddr, size_t ecache_size, size_t ecache_linesize)
582 #else /* !lint */
584 ENTRY(flush_ecache)
587 * Flush the entire Ecache.
589 ECACHE_FLUSHALL(%o1, %o2, %o0, %o4)
590 retl
592 SET_SIZE(flush_ecache)
594 #endif /* lint */
596 #if defined(lint)
598 /*ARGSUSED*/
599 void
600 kdi_flush_idcache(int dcache_size, int dcache_lsize, int icache_size,
601 int icache_lsize)
605 #else /* lint */
608 * I/D cache flushing is not needed for OPL processors
610 ENTRY(kdi_flush_idcache)
611 retl
613 SET_SIZE(kdi_flush_idcache)
615 #endif /* lint */
617 #ifdef TRAPTRACE
619 * Simplified trap trace macro for OPL. Adapted from us3.
621 #define OPL_TRAPTRACE(ptr, scr1, scr2, label) \
622 CPU_INDEX(scr1, ptr); \
623 sll scr1, TRAPTR_SIZE_SHIFT, scr1; \
624 set trap_trace_ctl, ptr; \
625 add ptr, scr1, scr1; \
626 ld [scr1 + TRAPTR_LIMIT], ptr; \
627 tst ptr; \
628 be,pn %icc, label/**/1; \
629 ldx [scr1 + TRAPTR_PBASE], ptr; \
630 ld [scr1 + TRAPTR_OFFSET], scr1; \
631 add ptr, scr1, ptr; \
632 rd %asi, scr2; \
633 wr %g0, TRAPTR_ASI, %asi; \
634 rd STICK, scr1; \
635 stxa scr1, [ptr + TRAP_ENT_TICK]%asi; \
636 rdpr %tl, scr1; \
637 stha scr1, [ptr + TRAP_ENT_TL]%asi; \
638 rdpr %tt, scr1; \
639 stha scr1, [ptr + TRAP_ENT_TT]%asi; \
640 rdpr %tpc, scr1; \
641 stna scr1, [ptr + TRAP_ENT_TPC]%asi; \
642 rdpr %tstate, scr1; \
643 stxa scr1, [ptr + TRAP_ENT_TSTATE]%asi; \
644 stna %sp, [ptr + TRAP_ENT_SP]%asi; \
645 stna %g0, [ptr + TRAP_ENT_TR]%asi; \
646 stna %g0, [ptr + TRAP_ENT_F1]%asi; \
647 stna %g0, [ptr + TRAP_ENT_F2]%asi; \
648 stna %g0, [ptr + TRAP_ENT_F3]%asi; \
649 stna %g0, [ptr + TRAP_ENT_F4]%asi; \
650 wr %g0, scr2, %asi; \
651 CPU_INDEX(ptr, scr1); \
652 sll ptr, TRAPTR_SIZE_SHIFT, ptr; \
653 set trap_trace_ctl, scr1; \
654 add scr1, ptr, ptr; \
655 ld [ptr + TRAPTR_OFFSET], scr1; \
656 ld [ptr + TRAPTR_LIMIT], scr2; \
657 st scr1, [ptr + TRAPTR_LAST_OFFSET]; \
658 add scr1, TRAP_ENT_SIZE, scr1; \
659 sub scr2, TRAP_ENT_SIZE, scr2; \
660 cmp scr1, scr2; \
661 movge %icc, 0, scr1; \
662 st scr1, [ptr + TRAPTR_OFFSET]; \
663 label/**/1:
664 #endif /* TRAPTRACE */
669 * Macros facilitating error handling.
673 * Save alternative global registers reg1, reg2, reg3
674 * to scratchpad registers 1, 2, 3 respectively.
676 #define OPL_SAVE_GLOBAL(reg1, reg2, reg3) \
677 stxa reg1, [%g0]ASI_SCRATCHPAD ;\
678 mov OPL_SCRATCHPAD_SAVE_AG2, reg1 ;\
679 stxa reg2, [reg1]ASI_SCRATCHPAD ;\
680 mov OPL_SCRATCHPAD_SAVE_AG3, reg1 ;\
681 stxa reg3, [reg1]ASI_SCRATCHPAD
684 * Restore alternative global registers reg1, reg2, reg3
685 * from scratchpad registers 1, 2, 3 respectively.
687 #define OPL_RESTORE_GLOBAL(reg1, reg2, reg3) \
688 mov OPL_SCRATCHPAD_SAVE_AG3, reg1 ;\
689 ldxa [reg1]ASI_SCRATCHPAD, reg3 ;\
690 mov OPL_SCRATCHPAD_SAVE_AG2, reg1 ;\
691 ldxa [reg1]ASI_SCRATCHPAD, reg2 ;\
692 ldxa [%g0]ASI_SCRATCHPAD, reg1
695 * Logs value `val' into the member `offset' of a structure
696 * at physical address `pa'
698 #define LOG_REG(pa, offset, val) \
699 add pa, offset, pa ;\
700 stxa val, [pa]ASI_MEM
702 #define FLUSH_ALL_TLB(tmp1) \
703 set DEMAP_ALL_TYPE, tmp1 ;\
704 stxa %g0, [tmp1]ASI_ITLB_DEMAP ;\
705 stxa %g0, [tmp1]ASI_DTLB_DEMAP ;\
706 sethi %hi(FLUSH_ADDR), tmp1 ;\
707 flush tmp1
710 * Extracts the Physaddr to Logging Buffer field of the OPL_SCRATCHPAD_ERRLOG
711 * scratch register by zeroing all other fields. Result is in pa.
713 #define LOG_ADDR(pa) \
714 mov OPL_SCRATCHPAD_ERRLOG, pa ;\
715 ldxa [pa]ASI_SCRATCHPAD, pa ;\
716 sllx pa, 64-ERRLOG_REG_EIDR_SHIFT, pa ;\
717 srlx pa, 64-ERRLOG_REG_EIDR_SHIFT+ERRLOG_REG_ERR_SHIFT, pa ;\
718 sllx pa, ERRLOG_REG_ERR_SHIFT, pa
721 * Advance the per-cpu error log buffer pointer to the next
722 * ERRLOG_SZ entry, making sure that it will modulo (wraparound)
723 * ERRLOG_BUFSIZ boundary. The args logpa, bufmask, tmp are
724 * unused input registers for this macro.
726 * Algorithm:
727 * 1. logpa = contents of errorlog scratchpad register
728 * 2. bufmask = ERRLOG_BUFSIZ - 1
729 * 3. tmp = logpa & ~(bufmask) (tmp is now logbase)
730 * 4. logpa += ERRLOG_SZ
731 * 5. logpa = logpa & bufmask (get new offset to logbase)
732 * 4. logpa = tmp | logpa
733 * 7. write logpa back into errorlog scratchpad register
735 * new logpa = (logpa & ~bufmask) | ((logpa + ERRLOG_SZ) & bufmask)
738 #define UPDATE_LOGADD(logpa, bufmask, tmp) \
739 set OPL_SCRATCHPAD_ERRLOG, tmp ;\
740 ldxa [tmp]ASI_SCRATCHPAD, logpa ;\
741 set (ERRLOG_BUFSZ-1), bufmask ;\
742 andn logpa, bufmask, tmp ;\
743 add logpa, ERRLOG_SZ, logpa ;\
744 and logpa, bufmask, logpa ;\
745 or tmp, logpa, logpa ;\
746 set OPL_SCRATCHPAD_ERRLOG, tmp ;\
747 stxa logpa, [tmp]ASI_SCRATCHPAD
749 /* Log error status registers into the log buffer */
750 #define LOG_SYNC_REG(sfsr, sfar, tmp) \
751 LOG_ADDR(tmp) ;\
752 LOG_REG(tmp, LOG_SFSR_OFF, sfsr) ;\
753 LOG_ADDR(tmp) ;\
754 mov tmp, sfsr ;\
755 LOG_REG(tmp, LOG_SFAR_OFF, sfar) ;\
756 rd STICK, sfar ;\
757 mov sfsr, tmp ;\
758 LOG_REG(tmp, LOG_STICK_OFF, sfar) ;\
759 rdpr %tl, tmp ;\
760 sllx tmp, 32, sfar ;\
761 rdpr %tt, tmp ;\
762 or sfar, tmp, sfar ;\
763 mov sfsr, tmp ;\
764 LOG_REG(tmp, LOG_TL_OFF, sfar) ;\
765 set OPL_SCRATCHPAD_ERRLOG, tmp ;\
766 ldxa [tmp]ASI_SCRATCHPAD, sfar ;\
767 mov sfsr, tmp ;\
768 LOG_REG(tmp, LOG_ASI3_OFF, sfar) ;\
769 rdpr %tpc, sfar ;\
770 mov sfsr, tmp ;\
771 LOG_REG(tmp, LOG_TPC_OFF, sfar) ;\
772 UPDATE_LOGADD(sfsr, sfar, tmp)
774 #define LOG_UGER_REG(uger, tmp, tmp2) \
775 LOG_ADDR(tmp) ;\
776 mov tmp, tmp2 ;\
777 LOG_REG(tmp2, LOG_UGER_OFF, uger) ;\
778 mov tmp, uger ;\
779 rd STICK, tmp2 ;\
780 LOG_REG(tmp, LOG_STICK_OFF, tmp2) ;\
781 rdpr %tl, tmp ;\
782 sllx tmp, 32, tmp2 ;\
783 rdpr %tt, tmp ;\
784 or tmp2, tmp, tmp2 ;\
785 mov uger, tmp ;\
786 LOG_REG(tmp, LOG_TL_OFF, tmp2) ;\
787 set OPL_SCRATCHPAD_ERRLOG, tmp2 ;\
788 ldxa [tmp2]ASI_SCRATCHPAD, tmp2 ;\
789 mov uger, tmp ;\
790 LOG_REG(tmp, LOG_ASI3_OFF, tmp2) ;\
791 rdpr %tstate, tmp2 ;\
792 mov uger, tmp ;\
793 LOG_REG(tmp, LOG_TSTATE_OFF, tmp2) ;\
794 rdpr %tpc, tmp2 ;\
795 mov uger, tmp ;\
796 LOG_REG(tmp, LOG_TPC_OFF, tmp2) ;\
797 UPDATE_LOGADD(uger, tmp, tmp2)
800 * Scrub the STICK_COMPARE register to clear error by updating
801 * it to a reasonable value for interrupt generation.
802 * Ensure that we observe the CPU_ENABLE flag so that we
803 * don't accidentally enable TICK interrupt in STICK_COMPARE
804 * i.e. no clock interrupt will be generated if CPU_ENABLE flag
805 * is off.
807 #define UPDATE_STICK_COMPARE(tmp1, tmp2) \
808 CPU_ADDR(tmp1, tmp2) ;\
809 lduh [tmp1 + CPU_FLAGS], tmp2 ;\
810 andcc tmp2, CPU_ENABLE, %g0 ;\
811 set OPL_UGER_STICK_DIFF, tmp2 ;\
812 rd STICK, tmp1 ;\
813 add tmp1, tmp2, tmp1 ;\
814 mov 1, tmp2 ;\
815 sllx tmp2, TICKINT_DIS_SHFT, tmp2 ;\
816 or tmp1, tmp2, tmp2 ;\
817 movnz %xcc, tmp1, tmp2 ;\
818 wr tmp2, %g0, STICK_COMPARE
821 * Reset registers that may be corrupted by IAUG_CRE error.
822 * To update interrupt handling related registers force the
823 * clock interrupt.
825 #define IAG_CRE(tmp1, tmp2) \
826 set OPL_SCRATCHPAD_ERRLOG, tmp1 ;\
827 ldxa [tmp1]ASI_SCRATCHPAD, tmp1 ;\
828 srlx tmp1, ERRLOG_REG_EIDR_SHIFT, tmp1 ;\
829 set ERRLOG_REG_EIDR_MASK, tmp2 ;\
830 and tmp1, tmp2, tmp1 ;\
831 stxa tmp1, [%g0]ASI_EIDR ;\
832 wr %g0, 0, SOFTINT ;\
833 sethi %hi(hres_last_tick), tmp1 ;\
834 ldx [tmp1 + %lo(hres_last_tick)], tmp1 ;\
835 set OPL_UGER_STICK_DIFF, tmp2 ;\
836 add tmp1, tmp2, tmp1 ;\
837 wr tmp1, %g0, STICK ;\
838 UPDATE_STICK_COMPARE(tmp1, tmp2)
841 #define CLEAR_FPREGS(tmp) \
842 wr %g0, FPRS_FEF, %fprs ;\
843 wr %g0, %g0, %gsr ;\
844 sethi %hi(opl_clr_freg), tmp ;\
845 or tmp, %lo(opl_clr_freg), tmp ;\
846 ldx [tmp], %fsr ;\
847 fzero %d0 ;\
848 fzero %d2 ;\
849 fzero %d4 ;\
850 fzero %d6 ;\
851 fzero %d8 ;\
852 fzero %d10 ;\
853 fzero %d12 ;\
854 fzero %d14 ;\
855 fzero %d16 ;\
856 fzero %d18 ;\
857 fzero %d20 ;\
858 fzero %d22 ;\
859 fzero %d24 ;\
860 fzero %d26 ;\
861 fzero %d28 ;\
862 fzero %d30 ;\
863 fzero %d32 ;\
864 fzero %d34 ;\
865 fzero %d36 ;\
866 fzero %d38 ;\
867 fzero %d40 ;\
868 fzero %d42 ;\
869 fzero %d44 ;\
870 fzero %d46 ;\
871 fzero %d48 ;\
872 fzero %d50 ;\
873 fzero %d52 ;\
874 fzero %d54 ;\
875 fzero %d56 ;\
876 fzero %d58 ;\
877 fzero %d60 ;\
878 fzero %d62 ;\
879 wr %g0, %g0, %fprs
881 #define CLEAR_GLOBALS() \
882 mov %g0, %g1 ;\
883 mov %g0, %g2 ;\
884 mov %g0, %g3 ;\
885 mov %g0, %g4 ;\
886 mov %g0, %g5 ;\
887 mov %g0, %g6 ;\
888 mov %g0, %g7
891 * We do not clear the alternative globals here because they
892 * are scratch registers, i.e. there is no code that reads from
893 * them without write to them firstly. In other words every
894 * read always follows write that makes extra write to the
895 * alternative globals unnecessary.
897 #define CLEAR_GEN_REGS(tmp1, label) \
898 set TSTATE_KERN, tmp1 ;\
899 wrpr %g0, tmp1, %tstate ;\
900 mov %g0, %y ;\
901 mov %g0, %asi ;\
902 mov %g0, %ccr ;\
903 mov %g0, %l0 ;\
904 mov %g0, %l1 ;\
905 mov %g0, %l2 ;\
906 mov %g0, %l3 ;\
907 mov %g0, %l4 ;\
908 mov %g0, %l5 ;\
909 mov %g0, %l6 ;\
910 mov %g0, %l7 ;\
911 mov %g0, %i0 ;\
912 mov %g0, %i1 ;\
913 mov %g0, %i2 ;\
914 mov %g0, %i3 ;\
915 mov %g0, %i4 ;\
916 mov %g0, %i5 ;\
917 mov %g0, %i6 ;\
918 mov %g0, %i7 ;\
919 mov %g0, %o1 ;\
920 mov %g0, %o2 ;\
921 mov %g0, %o3 ;\
922 mov %g0, %o4 ;\
923 mov %g0, %o5 ;\
924 mov %g0, %o6 ;\
925 mov %g0, %o7 ;\
926 mov %g0, %o0 ;\
927 mov %g0, %g4 ;\
928 mov %g0, %g5 ;\
929 mov %g0, %g6 ;\
930 mov %g0, %g7 ;\
931 rdpr %tl, tmp1 ;\
932 cmp tmp1, 1 ;\
933 be,pt %xcc, label/**/1 ;\
934 rdpr %pstate, tmp1 ;\
935 wrpr tmp1, PSTATE_AG|PSTATE_IG, %pstate ;\
936 CLEAR_GLOBALS() ;\
937 rdpr %pstate, tmp1 ;\
938 wrpr tmp1, PSTATE_IG|PSTATE_MG, %pstate ;\
939 CLEAR_GLOBALS() ;\
940 rdpr %pstate, tmp1 ;\
941 wrpr tmp1, PSTATE_MG|PSTATE_AG, %pstate ;\
942 ba,pt %xcc, label/**/2 ;\
943 nop ;\
944 label/**/1: ;\
945 wrpr tmp1, PSTATE_AG, %pstate ;\
946 CLEAR_GLOBALS() ;\
947 rdpr %pstate, tmp1 ;\
948 wrpr tmp1, PSTATE_AG, %pstate ;\
949 label/**/2:
953 * Reset all window related registers
955 #define RESET_WINREG(tmp) \
956 sethi %hi(nwin_minus_one), tmp ;\
957 ld [tmp + %lo(nwin_minus_one)], tmp ;\
958 wrpr %g0, tmp, %cwp ;\
959 wrpr %g0, tmp, %cleanwin ;\
960 sub tmp, 1, tmp ;\
961 wrpr %g0, tmp, %cansave ;\
962 wrpr %g0, %g0, %canrestore ;\
963 wrpr %g0, %g0, %otherwin ;\
964 wrpr %g0, PIL_MAX, %pil ;\
965 wrpr %g0, WSTATE_KERN, %wstate
968 #define RESET_PREV_TSTATE(tmp1, tmp2, label) \
969 rdpr %tl, tmp1 ;\
970 subcc tmp1, 1, tmp1 ;\
971 bz,pt %xcc, label/**/1 ;\
972 nop ;\
973 wrpr tmp1, %g0, %tl ;\
974 set TSTATE_KERN, tmp2 ;\
975 wrpr tmp2, %g0, %tstate ;\
976 wrpr %g0, %g0, %tpc ;\
977 wrpr %g0, %g0, %tnpc ;\
978 add tmp1, 1, tmp1 ;\
979 wrpr tmp1, %g0, %tl ;\
980 label/**/1:
984 * %pstate, %pc, %npc are propagated to %tstate, %tpc, %tnpc,
985 * and we reset these regiseter here.
987 #define RESET_CUR_TSTATE(tmp) \
988 set TSTATE_KERN, tmp ;\
989 wrpr %g0, tmp, %tstate ;\
990 wrpr %g0, 0, %tpc ;\
991 wrpr %g0, 0, %tnpc ;\
992 RESET_WINREG(tmp)
995 * In case of urgent errors some MMU registers may be
996 * corrupted, so we set here some reasonable values for
997 * them. Note that resetting MMU registers also reset the context
998 * info, we will need to reset the window registers to prevent
999 * spill/fill that depends on context info for correct behaviour.
1000 * Note that the TLBs must be flushed before programming the context
1001 * registers.
1004 #if !defined(lint)
1005 #define RESET_MMU_REGS(tmp1, tmp2, tmp3) \
1006 FLUSH_ALL_TLB(tmp1) ;\
1007 set MMU_PCONTEXT, tmp1 ;\
1008 sethi %hi(kcontextreg), tmp2 ;\
1009 ldx [tmp2 + %lo(kcontextreg)], tmp2 ;\
1010 stxa tmp2, [tmp1]ASI_DMMU ;\
1011 set MMU_SCONTEXT, tmp1 ;\
1012 stxa tmp2, [tmp1]ASI_DMMU ;\
1013 sethi %hi(ktsb_base), tmp1 ;\
1014 ldx [tmp1 + %lo(ktsb_base)], tmp2 ;\
1015 mov MMU_TSB, tmp3 ;\
1016 stxa tmp2, [tmp3]ASI_IMMU ;\
1017 stxa tmp2, [tmp3]ASI_DMMU ;\
1018 membar #Sync ;\
1019 RESET_WINREG(tmp1)
1021 #define RESET_TSB_TAGPTR(tmp) \
1022 set MMU_TAG_ACCESS, tmp ;\
1023 stxa %g0, [tmp]ASI_IMMU ;\
1024 stxa %g0, [tmp]ASI_DMMU ;\
1025 membar #Sync
1026 #endif /* lint */
1029 * In case of errors in the MMU_TSB_PREFETCH registers we have to
1030 * reset them. We can use "0" as the reset value, this way we set
1031 * the "V" bit of the registers to 0, which will disable the prefetch
1032 * so the values of the other fields are irrelevant.
1034 #if !defined(lint)
1035 #define RESET_TSB_PREFETCH(tmp) \
1036 set VA_UTSBPREF_8K, tmp ;\
1037 stxa %g0, [tmp]ASI_ITSB_PREFETCH ;\
1038 set VA_UTSBPREF_4M, tmp ;\
1039 stxa %g0, [tmp]ASI_ITSB_PREFETCH ;\
1040 set VA_KTSBPREF_8K, tmp ;\
1041 stxa %g0, [tmp]ASI_ITSB_PREFETCH ;\
1042 set VA_KTSBPREF_4M, tmp ;\
1043 stxa %g0, [tmp]ASI_ITSB_PREFETCH ;\
1044 set VA_UTSBPREF_8K, tmp ;\
1045 stxa %g0, [tmp]ASI_DTSB_PREFETCH ;\
1046 set VA_UTSBPREF_4M, tmp ;\
1047 stxa %g0, [tmp]ASI_DTSB_PREFETCH ;\
1048 set VA_KTSBPREF_8K, tmp ;\
1049 stxa %g0, [tmp]ASI_DTSB_PREFETCH ;\
1050 set VA_KTSBPREF_4M, tmp ;\
1051 stxa %g0, [tmp]ASI_DTSB_PREFETCH
1052 #endif /* lint */
1055 * In case of errors in the MMU_SHARED_CONTEXT register we have to
1056 * reset its value. We can use "0" as the reset value, it will put
1057 * 0 in the IV field disabling the shared context support, and
1058 * making values of all the other fields of the register irrelevant.
1060 #if !defined(lint)
1061 #define RESET_SHARED_CTXT(tmp) \
1062 set MMU_SHARED_CONTEXT, tmp ;\
1063 stxa %g0, [tmp]ASI_DMMU
1064 #endif /* lint */
1067 * RESET_TO_PRIV()
1069 * In many cases, we need to force the thread into privilege mode because
1070 * privilege mode is only thing in which the system continue to work
1071 * due to undeterminable user mode information that come from register
1072 * corruption.
1074 * - opl_uger_ctxt
1075 * If the error is secondary TSB related register parity, we have no idea
1076 * what value is supposed to be for it.
1078 * The below three cases %tstate is not accessible until it is overwritten
1079 * with some value, so we have no clue if the thread was running on user mode
1080 * or not
1081 * - opl_uger_pstate
1082 * If the error is %pstate parity, it propagates to %tstate.
1083 * - opl_uger_tstate
1084 * No need to say the reason
1085 * - opl_uger_r
1086 * If the error is %ccr or %asi parity, it propagates to %tstate
1088 * For the above four cases, user mode info may not be available for
1089 * sys_trap() and user_trap() to work consistently. So we have to force
1090 * the thread into privilege mode.
1092 * Forcing the thread to privilege mode requires forcing
1093 * regular %g7 to be CPU_THREAD. Because if it was running on user mode,
1094 * %g7 will be set in user_trap(). Also since the %sp may be in
1095 * an inconsistent state, we need to do a stack reset and switch to
1096 * something we know i.e. current thread's kernel stack.
1097 * We also reset the window registers and MMU registers just to
1098 * make sure.
1100 * To set regular %g7, we need to clear PSTATE_AG bit and need to
1101 * use one local register. Note that we are panicking and will never
1102 * unwind back so it is ok to clobber a local.
1104 * If the thread was running in user mode, the %tpc value itself might be
1105 * within the range of OBP addresses. %tpc must be forced to be zero to prevent
1106 * sys_trap() from going to prom_trap()
1109 #define RESET_TO_PRIV(tmp, tmp1, tmp2, local) \
1110 RESET_MMU_REGS(tmp, tmp1, tmp2) ;\
1111 CPU_ADDR(tmp, tmp1) ;\
1112 ldx [tmp + CPU_THREAD], local ;\
1113 ldx [local + T_STACK], tmp ;\
1114 sub tmp, STACK_BIAS, %sp ;\
1115 rdpr %pstate, tmp ;\
1116 wrpr tmp, PSTATE_AG, %pstate ;\
1117 mov local, %g7 ;\
1118 rdpr %pstate, local ;\
1119 wrpr local, PSTATE_AG, %pstate ;\
1120 wrpr %g0, 1, %tl ;\
1121 set TSTATE_KERN, tmp ;\
1122 rdpr %cwp, tmp1 ;\
1123 or tmp, tmp1, tmp ;\
1124 wrpr tmp, %g0, %tstate ;\
1125 wrpr %g0, %tpc
1128 #if defined(lint)
1130 void
1131 ce_err(void)
1134 #else /* lint */
1137 * We normally don't expect CE traps since we disable the
1138 * 0x63 trap reporting at the start of day. There is a
1139 * small window before we disable them, so let check for
1140 * it. Otherwise, panic.
1143 .align 128
1144 ENTRY_NP(ce_err)
1145 mov AFSR_ECR, %g1
1146 ldxa [%g1]ASI_ECR, %g1
1147 andcc %g1, ASI_ECR_RTE_UE | ASI_ECR_RTE_CEDG, %g0
1148 bz,pn %xcc, 1f
1150 retry
1153 * We did disabled the 0x63 trap reporting.
1154 * This shouldn't happen - panic.
1156 set trap, %g1
1157 rdpr %tt, %g3
1158 sethi %hi(sys_trap), %g5
1159 jmp %g5 + %lo(sys_trap)
1160 sub %g0, 1, %g4
1161 SET_SIZE(ce_err)
1163 #endif /* lint */
1166 #if defined(lint)
1168 void
1169 ce_err_tl1(void)
1172 #else /* lint */
1175 * We don't use trap for CE detection.
1177 ENTRY_NP(ce_err_tl1)
1178 set trap, %g1
1179 rdpr %tt, %g3
1180 sethi %hi(sys_trap), %g5
1181 jmp %g5 + %lo(sys_trap)
1182 sub %g0, 1, %g4
1183 SET_SIZE(ce_err_tl1)
1185 #endif /* lint */
1188 #if defined(lint)
1190 void
1191 async_err(void)
1194 #else /* lint */
1197 * async_err is the default handler for IAE/DAE traps.
1198 * For OPL, we patch in the right handler at start of day.
1199 * But if a IAE/DAE trap get generated before the handler
1200 * is patched, panic.
1202 ENTRY_NP(async_err)
1203 set trap, %g1
1204 rdpr %tt, %g3
1205 sethi %hi(sys_trap), %g5
1206 jmp %g5 + %lo(sys_trap)
1207 sub %g0, 1, %g4
1208 SET_SIZE(async_err)
1210 #endif /* lint */
1212 #if defined(lint)
1213 void
1214 opl_sync_trap(void)
1216 #else /* lint */
1218 .seg ".data"
1219 .global opl_clr_freg
1220 .global opl_cpu0_err_log
1222 .align 16
1223 opl_clr_freg:
1224 .word 0
1225 .align 16
1227 .align MMU_PAGESIZE
1228 opl_cpu0_err_log:
1229 .skip MMU_PAGESIZE
1232 * Common synchronous error trap handler (tt=0xA, 0x32)
1233 * All TL=0 and TL>0 0xA and 0x32 traps vector to this handler.
1234 * The error handling can be best summarized as follows:
1235 * 0. Do TRAPTRACE if enabled.
1236 * 1. Save globals %g1, %g2 & %g3 onto the scratchpad regs.
1237 * 2. The SFSR register is read and verified as valid by checking
1238 * SFSR.FV bit being set. If the SFSR.FV is not set, the
1239 * error cases cannot be decoded/determined and the SFPAR
1240 * register that contain the physical faultaddr is also
1241 * not valid. Also the SPFAR is only valid for UE/TO/BERR error
1242 * cases. Assuming the SFSR.FV is valid:
1243 * - BERR(bus error)/TO(timeout)/UE case
1244 * If any of these error cases are detected, read the SFPAR
1245 * to get the faultaddress. Generate ereport.
1246 * - TLB Parity case (only recoverable case)
1247 * For DAE, read SFAR for the faultaddress. For IAE,
1248 * use %tpc for faultaddress (SFAR is not valid in IAE)
1249 * Flush all the tlbs.
1250 * Subtract one from the recoverable error count stored in
1251 * the error log scratch register. If the threshold limit
1252 * is reached (zero) - generate ereport. Else
1253 * restore globals and retry (no ereport is generated).
1254 * - TLB Multiple hits
1255 * For DAE, read SFAR for the faultaddress. For IAE,
1256 * use %tpc for faultaddress (SFAR is not valid in IAE).
1257 * Flush all tlbs and generate ereport.
1258 * 3. TL=0 and TL>0 considerations
1259 * - Since both TL=0 & TL>1 traps are made to vector into
1260 * the same handler, the underlying assumption/design here is
1261 * that any nested error condition (if happens) occurs only
1262 * in the handler and the system is assumed to eventually
1263 * Red-mode. With this philosophy in mind, the recoverable
1264 * TLB Parity error case never check the TL level before it
1265 * retry. Note that this is ok for the TL>1 case (assuming we
1266 * don't have a nested error) since we always save the globals
1267 * %g1, %g2 & %g3 whenever we enter this trap handler.
1268 * - Additional TL=0 vs TL>1 handling includes:
1269 * - For UE error occuring under TL>1, special handling
1270 * is added to prevent the unlikely chance of a cpu-lockup
1271 * when a UE was originally detected in user stack and
1272 * the spill trap handler taken from sys_trap() so happened
1273 * to reference the same UE location. Under the above
1274 * condition (TL>1 and UE error), paranoid code is added
1275 * to reset window regs so that spill traps can't happen
1276 * during the unwind back to TL=0 handling.
1277 * Note that we can do that because we are not returning
1278 * back.
1279 * 4. Ereport generation.
1280 * - Ereport generation is performed when we unwind to the TL=0
1281 * handling code via sys_trap(). on_trap()/lofault protection
1282 * will apply there.
1285 ENTRY_NP(opl_sync_trap)
1286 #ifdef TRAPTRACE
1287 OPL_TRAPTRACE(%g1, %g2, %g3, opl_sync_trap_lb)
1288 rdpr %tt, %g1
1289 #endif /* TRAPTRACE */
1290 cmp %g1, T_INSTR_ERROR
1291 bne,pt %xcc, 0f
1292 mov MMU_SFSR, %g3
1293 ldxa [%g3]ASI_IMMU, %g1 ! IAE trap case tt = 0xa
1294 andcc %g1, SFSR_FV, %g0
1295 bz,a,pn %xcc, 2f ! Branch if SFSR is invalid and
1296 rdpr %tpc, %g2 ! use %tpc for faultaddr instead
1298 sethi %hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3
1299 andcc %g1, %g3, %g0 ! Check for UE/BERR/TO errors
1300 bz,a,pt %xcc, 1f ! Branch if not UE/BERR/TO and
1301 rdpr %tpc, %g2 ! use %tpc as faultaddr
1302 set OPL_MMU_SFPAR, %g3 ! In the UE/BERR/TO cases, use
1303 ba,pt %xcc, 2f ! SFPAR as faultaddr
1304 ldxa [%g3]ASI_IMMU, %g2
1306 ldxa [%g3]ASI_DMMU, %g1 ! DAE trap case tt = 0x32
1307 andcc %g1, SFSR_FV, %g0
1308 bnz,pt %xcc, 7f ! branch if SFSR.FV is valid
1309 mov MMU_SFAR, %g2 ! set %g2 to use SFAR
1310 ba,pt %xcc, 2f ! SFSR.FV is not valid, read SFAR
1311 ldxa [%g2]ASI_DMMU, %g2 ! for faultaddr
1313 sethi %hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3
1314 andcc %g1, %g3, %g0 ! Check UE/BERR/TO for valid SFPAR
1315 movnz %xcc, OPL_MMU_SFPAR, %g2 ! Use SFPAR instead of SFAR for
1316 ldxa [%g2]ASI_DMMU, %g2 ! faultaddr
1318 sethi %hi(SFSR_TLB_PRT), %g3
1319 andcc %g1, %g3, %g0
1320 bz,pt %xcc, 8f ! branch for TLB multi-hit check
1323 * This is the TLB parity error case and it is the
1324 * only retryable error case.
1325 * Only %g1, %g2 and %g3 are allowed
1327 FLUSH_ALL_TLB(%g3)
1328 set OPL_SCRATCHPAD_ERRLOG, %g3
1329 ldxa [%g3]ASI_SCRATCHPAD, %g3 ! Read errlog scratchreg
1330 and %g3, ERRLOG_REG_NUMERR_MASK, %g3! Extract the error count
1331 subcc %g3, 1, %g0 ! Subtract one from the count
1332 bz,pn %xcc, 2f ! too many TLB parity errs in a certain
1333 nop ! period, branch to generate ereport
1334 LOG_SYNC_REG(%g1, %g2, %g3) ! Record into the error log
1335 set OPL_SCRATCHPAD_ERRLOG, %g3
1336 ldxa [%g3]ASI_SCRATCHPAD, %g2
1337 sub %g2, 1, %g2 ! decrement error counter by 1
1338 stxa %g2, [%g3]ASI_SCRATCHPAD ! update the errlog scratchreg
1339 OPL_RESTORE_GLOBAL(%g1, %g2, %g3)
1340 retry
1342 sethi %hi(SFSR_TLB_MUL), %g3
1343 andcc %g1, %g3, %g0
1344 bz,pt %xcc, 2f ! check for the TLB multi-hit errors
1346 FLUSH_ALL_TLB(%g3)
1349 * non-retryable error handling
1350 * now we can use other registers since
1351 * we will not be returning back
1353 mov %g1, %g5 ! %g5 = SFSR
1354 mov %g2, %g6 ! %g6 = SFPAR or SFAR/tpc
1355 LOG_SYNC_REG(%g1, %g2, %g3) ! Record into the error log
1358 * Special case for UE on user stack.
1359 * There is a possibility that the same error may come back here
1360 * by touching the same UE in spill trap handler taken from
1361 * sys_trap(). It ends up with an infinite loop causing a cpu lockup.
1362 * Conditions for this handling this case are:
1363 * - SFSR_FV is valid and SFSR_UE is set
1364 * - we are at TL > 1
1365 * If the above conditions are true, we force %cansave to be a
1366 * big number to prevent spill trap in sys_trap(). Note that
1367 * we will not be returning back.
1369 rdpr %tt, %g4 ! %g4 == ttype
1370 rdpr %tl, %g1 ! %g1 == tl
1371 cmp %g1, 1 ! Check if TL == 1
1372 be,pt %xcc, 3f ! branch if we came from TL=0
1374 andcc %g5, SFSR_FV, %g0 ! see if SFSR.FV is valid
1375 bz,pn %xcc, 4f ! branch, checking UE is meaningless
1376 sethi %hi(SFSR_UE), %g2
1377 andcc %g5, %g2, %g0 ! check for UE
1378 bz,pt %xcc, 4f ! branch if not UE
1380 RESET_WINREG(%g1) ! reset windows to prevent spills
1382 RESET_USER_RTT_REGS(%g2, %g3, opl_sync_trap_resetskip)
1383 opl_sync_trap_resetskip:
1384 mov %g5, %g3 ! pass SFSR to the 3rd arg
1385 mov %g6, %g2 ! pass SFAR to the 2nd arg
1386 set opl_cpu_isync_tl1_error, %g1
1387 set opl_cpu_dsync_tl1_error, %g6
1388 cmp %g4, T_INSTR_ERROR
1389 movne %icc, %g6, %g1
1390 ba,pt %icc, 6f
1393 mov %g5, %g3 ! pass SFSR to the 3rd arg
1394 mov %g6, %g2 ! pass SFAR to the 2nd arg
1395 set opl_cpu_isync_tl0_error, %g1
1396 set opl_cpu_dsync_tl0_error, %g6
1397 cmp %g4, T_INSTR_ERROR
1398 movne %icc, %g6, %g1
1400 sethi %hi(sys_trap), %g5
1401 jmp %g5 + %lo(sys_trap)
1402 mov PIL_15, %g4
1403 SET_SIZE(opl_sync_trap)
1404 #endif /* lint */
1406 #if defined(lint)
1407 void
1408 opl_uger_trap(void)
1410 #else /* lint */
1412 * Common Urgent error trap handler (tt=0x40)
1413 * All TL=0 and TL>0 0x40 traps vector to this handler.
1414 * The error handling can be best summarized as follows:
1415 * 1. Read the Urgent error status register (UGERSR)
1416 * Faultaddress is N/A here and it is not collected.
1417 * 2. Check to see if we have a multiple errors case
1418 * If so, we enable WEAK_ED (weak error detection) bit
1419 * to prevent any potential error storms and branch directly
1420 * to generate ereport. (we don't decode/handle individual
1421 * error cases when we get a multiple error situation)
1422 * 3. Now look for the recoverable error cases which include
1423 * IUG_DTLB, IUG_ITLB or COREERR errors. If any of the
1424 * recoverable errors are detected, do the following:
1425 * - Flush all tlbs.
1426 * - Verify that we came from TL=0, if not, generate
1427 * ereport. Note that the reason we don't recover
1428 * at TL>0 is because the AGs might be corrupted or
1429 * inconsistent. We can't save/restore them into
1430 * the scratchpad regs like we did for opl_sync_trap().
1431 * - Check the INSTEND[5:4] bits in the UGERSR. If the
1432 * value is 0x3 (11b), this error is not recoverable.
1433 * Generate ereport.
1434 * - Subtract one from the recoverable error count stored in
1435 * the error log scratch register. If the threshold limit
1436 * is reached (zero) - generate ereport.
1437 * - If the count is within the limit, update the count
1438 * in the error log register (subtract one). Log the error
1439 * info in the log buffer. Capture traptrace if enabled.
1440 * Retry (no ereport generated)
1441 * 4. The rest of the error cases are unrecoverable and will
1442 * be handled according (flushing regs, etc as required).
1443 * For details on these error cases (UGER_CRE, UGER_CTXT, etc..)
1444 * consult the OPL cpu/mem philosophy doc.
1445 * Ereport will be generated for these errors.
1446 * 5. Ereport generation.
1447 * - Ereport generation for urgent error trap always
1448 * result in a panic when we unwind to the TL=0 handling
1449 * code via sys_trap(). on_trap()/lofault protection do
1450 * not apply there.
1452 ENTRY_NP(opl_uger_trap)
1453 set ASI_UGERSR, %g2
1454 ldxa [%g2]ASI_AFSR, %g1 ! Read the UGERSR reg
1456 set UGESR_MULTI, %g2
1457 andcc %g1, %g2, %g0 ! Check for Multi-errs
1458 bz,pt %xcc, opl_uger_is_recover ! branch if not Multi-errs
1460 set AFSR_ECR, %g2
1461 ldxa [%g2]ASI_AFSR, %g3 ! Enable Weak error
1462 or %g3, ASI_ECR_WEAK_ED, %g3 ! detect mode to prevent
1463 stxa %g3, [%g2]ASI_AFSR ! potential error storms
1464 ba %xcc, opl_uger_panic1
1467 opl_uger_is_recover:
1468 set UGESR_CAN_RECOVER, %g2 ! Check for recoverable
1469 andcc %g1, %g2, %g0 ! errors i.e.IUG_DTLB,
1470 bz,pt %xcc, opl_uger_cre ! IUG_ITLB or COREERR
1474 * Fall thru to handle recoverable case
1475 * Need to do the following additional checks to determine
1476 * if this is indeed recoverable.
1477 * 1. Error trap came from TL=0 and
1478 * 2. INSTEND[5:4] bits in UGERSR is not 0x3
1479 * 3. Recoverable error count limit not reached
1482 FLUSH_ALL_TLB(%g3)
1483 rdpr %tl, %g3 ! Read TL
1484 cmp %g3, 1 ! Check if we came from TL=0
1485 bne,pt %xcc, opl_uger_panic ! branch if came from TL>0
1487 srlx %g1, 4, %g2 ! shift INSTEND[5:4] -> [1:0]
1488 and %g2, 3, %g2 ! extract the shifted [1:0] bits
1489 cmp %g2, 3 ! check if INSTEND is recoverable
1490 be,pt %xcc, opl_uger_panic ! panic if ([1:0] = 11b)
1492 set OPL_SCRATCHPAD_ERRLOG, %g3
1493 ldxa [%g3]ASI_SCRATCHPAD, %g2 ! Read errlog scratch reg
1494 and %g2, ERRLOG_REG_NUMERR_MASK, %g3! Extract error count and
1495 subcc %g3, 1, %g3 ! subtract one from it
1496 bz,pt %xcc, opl_uger_panic ! If count reached zero, too many
1497 nop ! errors, branch to generate ereport
1498 sub %g2, 1, %g2 ! Subtract one from the count
1499 set OPL_SCRATCHPAD_ERRLOG, %g3 ! and write back the updated
1500 stxa %g2, [%g3]ASI_SCRATCHPAD ! count into the errlog reg
1501 LOG_UGER_REG(%g1, %g2, %g3) ! Log the error info
1502 #ifdef TRAPTRACE
1503 OPL_TRAPTRACE(%g1, %g2, %g3, opl_uger_trap_lb)
1504 #endif /* TRAPTRACE */
1505 retry ! retry - no ereport
1508 * Process the rest of the unrecoverable error cases
1509 * All error cases below ultimately branch to either
1510 * opl_uger_panic or opl_uger_panic1.
1511 * opl_uger_panic1 is the same as opl_uger_panic except
1512 * for the additional execution of the RESET_TO_PRIV()
1513 * macro that does a heavy handed reset. Read the
1514 * comments for RESET_TO_PRIV() macro for more info.
1516 opl_uger_cre:
1517 set UGESR_IAUG_CRE, %g2
1518 andcc %g1, %g2, %g0
1519 bz,pt %xcc, opl_uger_ctxt
1521 IAG_CRE(%g2, %g3)
1522 set AFSR_ECR, %g2
1523 ldxa [%g2]ASI_AFSR, %g3
1524 or %g3, ASI_ECR_WEAK_ED, %g3
1525 stxa %g3, [%g2]ASI_AFSR
1526 ba %xcc, opl_uger_panic
1529 opl_uger_ctxt:
1530 set UGESR_IAUG_TSBCTXT, %g2
1531 andcc %g1, %g2, %g0
1532 bz,pt %xcc, opl_uger_tsbp
1534 GET_CPU_IMPL(%g2)
1535 cmp %g2, JUPITER_IMPL
1536 bne %xcc, 1f
1538 RESET_SHARED_CTXT(%g2)
1540 RESET_MMU_REGS(%g2, %g3, %g4)
1541 ba %xcc, opl_uger_panic
1544 opl_uger_tsbp:
1545 set UGESR_IUG_TSBP, %g2
1546 andcc %g1, %g2, %g0
1547 bz,pt %xcc, opl_uger_pstate
1549 GET_CPU_IMPL(%g2)
1550 cmp %g2, JUPITER_IMPL
1551 bne %xcc, 1f
1553 RESET_TSB_PREFETCH(%g2)
1555 RESET_TSB_TAGPTR(%g2)
1558 * IUG_TSBP error may corrupt MMU registers
1559 * Reset them here.
1561 RESET_MMU_REGS(%g2, %g3, %g4)
1562 ba %xcc, opl_uger_panic
1565 opl_uger_pstate:
1566 set UGESR_IUG_PSTATE, %g2
1567 andcc %g1, %g2, %g0
1568 bz,pt %xcc, opl_uger_tstate
1570 RESET_CUR_TSTATE(%g2)
1571 ba %xcc, opl_uger_panic1
1574 opl_uger_tstate:
1575 set UGESR_IUG_TSTATE, %g2
1576 andcc %g1, %g2, %g0
1577 bz,pt %xcc, opl_uger_f
1579 RESET_PREV_TSTATE(%g2, %g3, opl_uger_tstate_1)
1580 ba %xcc, opl_uger_panic1
1583 opl_uger_f:
1584 set UGESR_IUG_F, %g2
1585 andcc %g1, %g2, %g0
1586 bz,pt %xcc, opl_uger_r
1588 CLEAR_FPREGS(%g2)
1589 ba %xcc, opl_uger_panic
1592 opl_uger_r:
1593 set UGESR_IUG_R, %g2
1594 andcc %g1, %g2, %g0
1595 bz,pt %xcc, opl_uger_panic1
1597 CLEAR_GEN_REGS(%g2, opl_uger_r_1)
1598 ba %xcc, opl_uger_panic1
1601 opl_uger_panic:
1602 mov %g1, %g2 ! %g2 = arg #1
1603 LOG_UGER_REG(%g1, %g3, %g4)
1604 ba %xcc, opl_uger_panic_cmn
1607 opl_uger_panic1:
1608 mov %g1, %g2 ! %g2 = arg #1
1609 LOG_UGER_REG(%g1, %g3, %g4)
1610 RESET_TO_PRIV(%g1, %g3, %g4, %l0)
1613 * Set up the argument for sys_trap.
1614 * %g2 = arg #1 already set above
1616 opl_uger_panic_cmn:
1617 RESET_USER_RTT_REGS(%g4, %g5, opl_uger_panic_resetskip)
1618 opl_uger_panic_resetskip:
1619 rdpr %tl, %g3 ! arg #2
1620 set opl_cpu_urgent_error, %g1 ! pc
1621 sethi %hi(sys_trap), %g5
1622 jmp %g5 + %lo(sys_trap)
1623 mov PIL_15, %g4
1624 SET_SIZE(opl_uger_trap)
1625 #endif /* lint */
1627 #if defined(lint)
1628 void
1629 opl_ta3_trap(void)
1631 void
1632 opl_cleanw_subr(void)
1634 #else /* lint */
1636 * OPL ta3 support (note please, that win_reg
1637 * area size for each cpu is 2^7 bytes)
1640 #define RESTORE_WREGS(tmp1, tmp2) \
1641 CPU_INDEX(tmp1, tmp2) ;\
1642 sethi %hi(opl_ta3_save), tmp2 ;\
1643 ldx [tmp2 +%lo(opl_ta3_save)], tmp2 ;\
1644 sllx tmp1, 7, tmp1 ;\
1645 add tmp2, tmp1, tmp2 ;\
1646 ldx [tmp2 + 0], %l0 ;\
1647 ldx [tmp2 + 8], %l1 ;\
1648 ldx [tmp2 + 16], %l2 ;\
1649 ldx [tmp2 + 24], %l3 ;\
1650 ldx [tmp2 + 32], %l4 ;\
1651 ldx [tmp2 + 40], %l5 ;\
1652 ldx [tmp2 + 48], %l6 ;\
1653 ldx [tmp2 + 56], %l7 ;\
1654 ldx [tmp2 + 64], %i0 ;\
1655 ldx [tmp2 + 72], %i1 ;\
1656 ldx [tmp2 + 80], %i2 ;\
1657 ldx [tmp2 + 88], %i3 ;\
1658 ldx [tmp2 + 96], %i4 ;\
1659 ldx [tmp2 + 104], %i5 ;\
1660 ldx [tmp2 + 112], %i6 ;\
1661 ldx [tmp2 + 120], %i7
1663 #define SAVE_WREGS(tmp1, tmp2) \
1664 CPU_INDEX(tmp1, tmp2) ;\
1665 sethi %hi(opl_ta3_save), tmp2 ;\
1666 ldx [tmp2 +%lo(opl_ta3_save)], tmp2 ;\
1667 sllx tmp1, 7, tmp1 ;\
1668 add tmp2, tmp1, tmp2 ;\
1669 stx %l0, [tmp2 + 0] ;\
1670 stx %l1, [tmp2 + 8] ;\
1671 stx %l2, [tmp2 + 16] ;\
1672 stx %l3, [tmp2 + 24] ;\
1673 stx %l4, [tmp2 + 32] ;\
1674 stx %l5, [tmp2 + 40] ;\
1675 stx %l6, [tmp2 + 48] ;\
1676 stx %l7, [tmp2 + 56] ;\
1677 stx %i0, [tmp2 + 64] ;\
1678 stx %i1, [tmp2 + 72] ;\
1679 stx %i2, [tmp2 + 80] ;\
1680 stx %i3, [tmp2 + 88] ;\
1681 stx %i4, [tmp2 + 96] ;\
1682 stx %i5, [tmp2 + 104] ;\
1683 stx %i6, [tmp2 + 112] ;\
1684 stx %i7, [tmp2 + 120]
1688 * The purpose of this function is to make sure that the restore
1689 * instruction after the flushw does not cause a fill trap. The sun4u
1690 * fill trap handler can not handle a tlb fault of an unmapped stack
1691 * except at the restore instruction at user_rtt. On OPL systems the
1692 * stack can get unmapped between the flushw and restore instructions
1693 * since multiple strands share the tlb.
1695 ENTRY_NP(opl_ta3_trap)
1696 set trap, %g1
1697 mov T_FLUSHW, %g3
1698 sub %g0, 1, %g4
1699 rdpr %cwp, %g5
1700 SAVE_WREGS(%g2, %g6)
1701 save
1702 flushw
1703 rdpr %cwp, %g6
1704 wrpr %g5, %cwp
1705 RESTORE_WREGS(%g2, %g5)
1706 wrpr %g6, %cwp
1707 restored
1708 restore
1710 ba,a fast_trap_done
1711 SET_SIZE(opl_ta3_trap)
1713 ENTRY_NP(opl_cleanw_subr)
1714 set trap, %g1
1715 mov T_FLUSHW, %g3
1716 sub %g0, 1, %g4
1717 rdpr %cwp, %g5
1718 SAVE_WREGS(%g2, %g6)
1719 save
1720 flushw
1721 rdpr %cwp, %g6
1722 wrpr %g5, %cwp
1723 RESTORE_WREGS(%g2, %g5)
1724 wrpr %g6, %cwp
1725 restored
1726 restore
1727 jmp %g7
1729 SET_SIZE(opl_cleanw_subr)
1730 #endif /* lint */
1732 #if defined(lint)
1734 void
1735 opl_serr_instr(void)
1738 #else /* lint */
1740 * The actual trap handler for tt=0x0a, and tt=0x32
1742 ENTRY_NP(opl_serr_instr)
1743 OPL_SAVE_GLOBAL(%g1,%g2,%g3)
1744 sethi %hi(opl_sync_trap), %g3
1745 jmp %g3 + %lo(opl_sync_trap)
1746 rdpr %tt, %g1
1747 .align 32
1748 SET_SIZE(opl_serr_instr)
1750 #endif /* lint */
1752 #if defined(lint)
1754 void
1755 opl_ugerr_instr(void)
1758 #else /* lint */
1760 * The actual trap handler for tt=0x40
1762 ENTRY_NP(opl_ugerr_instr)
1763 sethi %hi(opl_uger_trap), %g3
1764 jmp %g3 + %lo(opl_uger_trap)
1766 .align 32
1767 SET_SIZE(opl_ugerr_instr)
1769 #endif /* lint */
1771 #if defined(lint)
1773 void
1774 opl_ta3_instr(void)
1777 #else /* lint */
1779 * The actual trap handler for tt=0x103 (flushw)
1781 ENTRY_NP(opl_ta3_instr)
1782 sethi %hi(opl_ta3_trap), %g3
1783 jmp %g3 + %lo(opl_ta3_trap)
1785 .align 32
1786 SET_SIZE(opl_ta3_instr)
1788 #endif /* lint */
1790 #if defined(lint)
1792 void
1793 opl_ta4_instr(void)
1796 #else /* lint */
1798 * The patch for the .clean_windows code
1800 ENTRY_NP(opl_ta4_instr)
1801 sethi %hi(opl_cleanw_subr), %g3
1802 add %g3, %lo(opl_cleanw_subr), %g3
1803 jmpl %g3, %g7
1804 add %g7, 8, %g7
1808 SET_SIZE(opl_ta4_instr)
1810 #endif /* lint */
1812 #if defined(lint)
1814 * Get timestamp (stick).
1816 /* ARGSUSED */
1817 void
1818 stick_timestamp(int64_t *ts)
1822 #else /* lint */
1824 ENTRY_NP(stick_timestamp)
1825 rd STICK, %g1 ! read stick reg
1826 sllx %g1, 1, %g1
1827 srlx %g1, 1, %g1 ! clear npt bit
1829 retl
1830 stx %g1, [%o0] ! store the timestamp
1831 SET_SIZE(stick_timestamp)
1833 #endif /* lint */
1836 #if defined(lint)
1838 * Set STICK adjusted by skew.
1840 /* ARGSUSED */
1841 void
1842 stick_adj(int64_t skew)
1846 #else /* lint */
1848 ENTRY_NP(stick_adj)
1849 rdpr %pstate, %g1 ! save processor state
1850 andn %g1, PSTATE_IE, %g3
1851 ba 1f ! cache align stick adj
1852 wrpr %g0, %g3, %pstate ! turn off interrupts
1854 .align 16
1855 1: nop
1857 rd STICK, %g4 ! read stick reg
1858 add %g4, %o0, %o1 ! adjust stick with skew
1859 wr %o1, %g0, STICK ! write stick reg
1861 retl
1862 wrpr %g1, %pstate ! restore processor state
1863 SET_SIZE(stick_adj)
1865 #endif /* lint */
1867 #if defined(lint)
1869 * Debugger-specific stick retrieval
1871 /*ARGSUSED*/
1873 kdi_get_stick(uint64_t *stickp)
1875 return (0);
1878 #else /* lint */
1880 ENTRY_NP(kdi_get_stick)
1881 rd STICK, %g1
1882 stx %g1, [%o0]
1883 retl
1884 mov %g0, %o0
1885 SET_SIZE(kdi_get_stick)
1887 #endif /* lint */
1889 #if defined(lint)
1891 /*ARGSUSED*/
1893 dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain)
1894 { return (0); }
1896 #else
1898 ENTRY(dtrace_blksuword32)
1899 save %sp, -SA(MINFRAME + 4), %sp
1901 rdpr %pstate, %l1
1902 andn %l1, PSTATE_IE, %l2 ! disable interrupts to
1903 wrpr %g0, %l2, %pstate ! protect our FPU diddling
1905 rd %fprs, %l0
1906 andcc %l0, FPRS_FEF, %g0
1907 bz,a,pt %xcc, 1f ! if the fpu is disabled
1908 wr %g0, FPRS_FEF, %fprs ! ... enable the fpu
1910 st %f0, [%fp + STACK_BIAS - 4] ! save %f0 to the stack
1912 set 0f, %l5
1914 * We're about to write a block full or either total garbage
1915 * (not kernel data, don't worry) or user floating-point data
1916 * (so it only _looks_ like garbage).
1918 ld [%i1], %f0 ! modify the block
1919 membar #Sync
1920 stn %l5, [THREAD_REG + T_LOFAULT] ! set up the lofault handler
1921 stda %d0, [%i0]ASI_BLK_COMMIT_S ! store the modified block
1922 membar #Sync
1923 flush %i0 ! flush instruction pipeline
1924 stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler
1926 bz,a,pt %xcc, 1f
1927 wr %g0, %l0, %fprs ! restore %fprs
1929 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
1932 wrpr %g0, %l1, %pstate ! restore interrupts
1935 restore %g0, %g0, %o0
1938 membar #Sync
1939 stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler
1941 bz,a,pt %xcc, 1f
1942 wr %g0, %l0, %fprs ! restore %fprs
1944 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
1947 wrpr %g0, %l1, %pstate ! restore interrupts
1950 * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
1951 * which deals with watchpoints. Otherwise, just return -1.
1953 brnz,pt %i2, 1f
1956 restore %g0, -1, %o0
1958 call dtrace_blksuword32_err
1959 restore
1961 SET_SIZE(dtrace_blksuword32)
1962 #endif /* lint */
1964 #if defined(lint)
1965 /*ARGSUSED*/
1966 void
1967 ras_cntr_reset(void *arg)
1970 #else
1971 ENTRY_NP(ras_cntr_reset)
1972 set OPL_SCRATCHPAD_ERRLOG, %o1
1973 ldxa [%o1]ASI_SCRATCHPAD, %o0
1974 or %o0, ERRLOG_REG_NUMERR_MASK, %o0
1975 retl
1976 stxa %o0, [%o1]ASI_SCRATCHPAD
1977 SET_SIZE(ras_cntr_reset)
1978 #endif /* lint */
1980 #if defined(lint)
1981 /* ARGSUSED */
1982 void
1983 opl_error_setup(uint64_t cpu_err_log_pa)
1987 #else /* lint */
1988 ENTRY_NP(opl_error_setup)
1990 * Initialize the error log scratchpad register
1992 ldxa [%g0]ASI_EIDR, %o2
1993 sethi %hi(ERRLOG_REG_EIDR_MASK), %o1
1994 or %o1, %lo(ERRLOG_REG_EIDR_MASK), %o1
1995 and %o2, %o1, %o3
1996 sllx %o3, ERRLOG_REG_EIDR_SHIFT, %o2
1997 or %o2, %o0, %o3
1998 or %o3, ERRLOG_REG_NUMERR_MASK, %o0
1999 set OPL_SCRATCHPAD_ERRLOG, %o1
2000 stxa %o0, [%o1]ASI_SCRATCHPAD
2002 * Disable all restrainable error traps
2004 mov AFSR_ECR, %o1
2005 ldxa [%o1]ASI_AFSR, %o0
2006 andn %o0, ASI_ECR_RTE_UE|ASI_ECR_RTE_CEDG, %o0
2007 retl
2008 stxa %o0, [%o1]ASI_AFSR
2009 SET_SIZE(opl_error_setup)
2010 #endif /* lint */
2012 #if defined(lint)
2013 /* ARGSUSED */
2014 void
2015 cpu_early_feature_init(void)
2018 #else /* lint */
2019 ENTRY_NP(cpu_early_feature_init)
2021 * Enable MMU translating multiple page sizes for
2022 * sITLB and sDTLB.
2024 mov LSU_MCNTL, %o0
2025 ldxa [%o0] ASI_MCNTL, %o1
2026 or %o1, MCNTL_MPG_SITLB | MCNTL_MPG_SDTLB, %o1
2027 stxa %o1, [%o0] ASI_MCNTL
2029 * Demap all previous entries.
2031 sethi %hi(FLUSH_ADDR), %o1
2032 set DEMAP_ALL_TYPE, %o0
2033 stxa %g0, [%o0]ASI_DTLB_DEMAP
2034 stxa %g0, [%o0]ASI_ITLB_DEMAP
2035 retl
2036 flush %o1
2037 SET_SIZE(cpu_early_feature_init)
2038 #endif /* lint */
2040 #if defined(lint)
2042 * This function is called for each (enabled) CPU. We use it to
2043 * initialize error handling related registers.
2045 /*ARGSUSED*/
2046 void
2047 cpu_feature_init(void)
2049 #else /* lint */
2050 ENTRY(cpu_feature_init)
2052 ! get the device_id and store the device_id
2053 ! in the appropriate cpunodes structure
2054 ! given the cpus index
2056 CPU_INDEX(%o0, %o1)
2057 mulx %o0, CPU_NODE_SIZE, %o0
2058 set cpunodes + DEVICE_ID, %o1
2059 ldxa [%g0] ASI_DEVICE_SERIAL_ID, %o2
2060 stx %o2, [%o0 + %o1]
2062 ! initialize CPU registers
2064 ba opl_cpu_reg_init
2066 SET_SIZE(cpu_feature_init)
2067 #endif /* lint */
2069 #if defined(lint)
2071 void
2072 cpu_cleartickpnt(void)
2075 #else /* lint */
2077 * Clear the NPT (non-privileged trap) bit in the %tick/%stick
2078 * registers. In an effort to make the change in the
2079 * tick/stick counter as consistent as possible, we disable
2080 * all interrupts while we're changing the registers. We also
2081 * ensure that the read and write instructions are in the same
2082 * line in the instruction cache.
2084 ENTRY_NP(cpu_clearticknpt)
2085 rdpr %pstate, %g1 /* save processor state */
2086 andn %g1, PSTATE_IE, %g3 /* turn off */
2087 wrpr %g0, %g3, %pstate /* interrupts */
2088 rdpr %tick, %g2 /* get tick register */
2089 brgez,pn %g2, 1f /* if NPT bit off, we're done */
2090 mov 1, %g3 /* create mask */
2091 sllx %g3, 63, %g3 /* for NPT bit */
2092 ba,a,pt %xcc, 2f
2093 .align 8 /* Ensure rd/wr in same i$ line */
2095 rdpr %tick, %g2 /* get tick register */
2096 wrpr %g3, %g2, %tick /* write tick register, */
2097 /* clearing NPT bit */
2099 rd STICK, %g2 /* get stick register */
2100 brgez,pn %g2, 3f /* if NPT bit off, we're done */
2101 mov 1, %g3 /* create mask */
2102 sllx %g3, 63, %g3 /* for NPT bit */
2103 ba,a,pt %xcc, 4f
2104 .align 8 /* Ensure rd/wr in same i$ line */
2106 rd STICK, %g2 /* get stick register */
2107 wr %g3, %g2, STICK /* write stick register, */
2108 /* clearing NPT bit */
2110 jmp %g4 + 4
2111 wrpr %g0, %g1, %pstate /* restore processor state */
2113 SET_SIZE(cpu_clearticknpt)
2115 #endif /* lint */
2117 #if defined(lint)
2119 void
2120 cpu_halt_cpu(void)
2123 void
2124 cpu_smt_pause(void)
2127 #else /* lint */
2130 * Halt the current strand with the suspend instruction.
2131 * The compiler/asm currently does not support this suspend
2132 * instruction mnemonic, use byte code for now.
2134 ENTRY_NP(cpu_halt_cpu)
2135 .word 0x81b01040
2136 retl
2138 SET_SIZE(cpu_halt_cpu)
2141 * Pause the current strand with the sleep instruction.
2142 * The compiler/asm currently does not support this sleep
2143 * instruction mnemonic, use byte code for now.
2145 ENTRY_NP(cpu_smt_pause)
2146 .word 0x81b01060
2147 retl
2149 SET_SIZE(cpu_smt_pause)
2151 #endif /* lint */