4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/types.h>
26 #include <sys/kstat.h>
27 #include <sys/param.h>
28 #include <sys/stack.h>
29 #include <sys/regset.h>
30 #include <sys/thread.h>
32 #include <sys/procfs_isa.h>
34 #include <sys/cpuvar.h>
35 #include <sys/systm.h>
36 #include <sys/machpcb.h>
37 #include <sys/machasi.h>
39 #include <sys/fpu/fpusystm.h>
40 #include <sys/cpu_module.h>
41 #include <sys/privregs.h>
42 #include <sys/archsystm.h>
43 #include <sys/atomic.h>
44 #include <sys/cmn_err.h>
46 #include <sys/clock.h>
48 #include <sys/platform_module.h>
50 #include <sys/nvpair.h>
51 #include <sys/kdi_impl.h>
52 #include <sys/machsystm.h>
53 #include <sys/sysmacros.h>
54 #include <sys/promif.h>
55 #include <sys/pool_pset.h>
57 #include <sys/dumphdr.h>
58 #include <vm/seg_kmem.h>
59 #include <sys/hold_page.h>
61 #include <sys/ivintr.h>
62 #include <sys/clock_impl.h>
63 #include <sys/machclock.h>
65 int maxphys
= MMU_PAGESIZE
* 16; /* 128k */
66 int klustsize
= MMU_PAGESIZE
* 16; /* 128k */
69 * Initialize kernel thread's stack.
72 thread_stk_init(caddr_t stk
)
77 /* allocate extra space for floating point state */
78 stk
-= SA(sizeof (kfpu_t
) + GSR_SIZE
);
79 align
= (uintptr_t)stk
& 0x3f;
80 stk
-= align
; /* force v9_fpu to be 16 byte aligned */
88 #define WIN32_SIZE (MAXWIN * sizeof (struct rwindow32))
89 #define WIN64_SIZE (MAXWIN * sizeof (struct rwindow64))
91 kmem_cache_t
*wbuf32_cache
;
92 kmem_cache_t
*wbuf64_cache
;
95 lwp_stk_cache_init(void)
98 * Window buffers are allocated from the static arena
99 * because they are accessed at TL>0. We also must use
100 * KMC_NOHASH to prevent them from straddling page
101 * boundaries as they are accessed by physical address.
103 wbuf32_cache
= kmem_cache_create("wbuf32_cache", WIN32_SIZE
,
104 0, NULL
, NULL
, NULL
, NULL
, static_arena
, KMC_NOHASH
);
105 wbuf64_cache
= kmem_cache_create("wbuf64_cache", WIN64_SIZE
,
106 0, NULL
, NULL
, NULL
, NULL
, static_arena
, KMC_NOHASH
);
110 * Initialize lwp's kernel stack.
111 * Note that now that the floating point register save area (kfpu_t)
112 * has been broken out from machpcb and aligned on a 64 byte boundary so that
113 * we can do block load/stores to/from it, there are a couple of potential
114 * optimizations to save stack space. 1. The floating point register save
115 * area could be aligned on a 16 byte boundary, and the floating point code
116 * changed to (a) check the alignment and (b) use different save/restore
117 * macros depending upon the alignment. 2. The lwp_stk_init code below
118 * could be changed to calculate if less space would be wasted if machpcb
119 * was first instead of second. However there is a REGOFF macro used in
120 * locore, syscall_trap, machdep and mlsetup that assumes that the saved
121 * register area is a fixed distance from the %sp, and would have to be
122 * changed to a pointer or something...JJ said later.
125 lwp_stk_init(klwp_t
*lwp
, caddr_t stk
)
127 struct machpcb
*mpcb
;
131 stk
-= SA(sizeof (kfpu_t
) + GSR_SIZE
);
132 aln
= (uintptr_t)stk
& 0x3F;
135 stk
-= SA(sizeof (struct machpcb
));
136 mpcb
= (struct machpcb
*)stk
;
137 bzero(mpcb
, sizeof (struct machpcb
));
138 bzero(fp
, sizeof (kfpu_t
) + GSR_SIZE
);
139 lwp
->lwp_regs
= (void *)&mpcb
->mpcb_regs
;
140 lwp
->lwp_fpu
= (void *)fp
;
142 mpcb
->mpcb_fpu
->fpu_q
= mpcb
->mpcb_fpu_q
;
143 mpcb
->mpcb_thread
= lwp
->lwp_thread
;
144 mpcb
->mpcb_wbcnt
= 0;
145 if (lwp
->lwp_procp
->p_model
== DATAMODEL_ILP32
) {
146 mpcb
->mpcb_wstate
= WSTATE_USER32
;
147 mpcb
->mpcb_wbuf
= kmem_cache_alloc(wbuf32_cache
, KM_SLEEP
);
149 mpcb
->mpcb_wstate
= WSTATE_USER64
;
150 mpcb
->mpcb_wbuf
= kmem_cache_alloc(wbuf64_cache
, KM_SLEEP
);
152 ASSERT(((uintptr_t)mpcb
->mpcb_wbuf
& 7) == 0);
153 mpcb
->mpcb_wbuf_pa
= va_to_pa(mpcb
->mpcb_wbuf
);
154 mpcb
->mpcb_pa
= va_to_pa(mpcb
);
159 lwp_stk_fini(klwp_t
*lwp
)
161 struct machpcb
*mpcb
= lwptompcb(lwp
);
164 * there might be windows still in the wbuf due to unmapped
165 * stack, misaligned stack pointer, etc. We just free it.
167 mpcb
->mpcb_wbcnt
= 0;
168 if (mpcb
->mpcb_wstate
== WSTATE_USER32
)
169 kmem_cache_free(wbuf32_cache
, mpcb
->mpcb_wbuf
);
171 kmem_cache_free(wbuf64_cache
, mpcb
->mpcb_wbuf
);
172 mpcb
->mpcb_wbuf
= NULL
;
173 mpcb
->mpcb_wbuf_pa
= -1;
178 * Copy regs from parent to child.
181 lwp_forkregs(klwp_t
*lwp
, klwp_t
*clwp
)
183 kthread_t
*t
, *pt
= lwptot(lwp
);
184 struct machpcb
*mpcb
= lwptompcb(clwp
);
185 struct machpcb
*pmpcb
= lwptompcb(lwp
);
186 kfpu_t
*fp
, *pfp
= lwptofpu(lwp
);
190 t
= mpcb
->mpcb_thread
;
192 * remember child's fp and wbuf since they will get erased during
196 wbuf
= mpcb
->mpcb_wbuf
;
197 wstate
= mpcb
->mpcb_wstate
;
199 * Don't copy mpcb_frame since we hand-crafted it
202 bcopy(lwp
->lwp_regs
, clwp
->lwp_regs
, sizeof (struct machpcb
) - REGOFF
);
203 mpcb
->mpcb_thread
= t
;
205 fp
->fpu_q
= mpcb
->mpcb_fpu_q
;
208 * It is theoretically possibly for the lwp's wstate to
209 * be different from its value assigned in lwp_stk_init,
210 * since lwp_stk_init assumed the data model of the process.
211 * Here, we took on the data model of the cloned lwp.
213 if (mpcb
->mpcb_wstate
!= wstate
) {
214 if (wstate
== WSTATE_USER32
) {
215 kmem_cache_free(wbuf32_cache
, wbuf
);
216 wbuf
= kmem_cache_alloc(wbuf64_cache
, KM_SLEEP
);
217 wstate
= WSTATE_USER64
;
219 kmem_cache_free(wbuf64_cache
, wbuf
);
220 wbuf
= kmem_cache_alloc(wbuf32_cache
, KM_SLEEP
);
221 wstate
= WSTATE_USER32
;
225 mpcb
->mpcb_pa
= va_to_pa(mpcb
);
226 mpcb
->mpcb_wbuf
= wbuf
;
227 mpcb
->mpcb_wbuf_pa
= va_to_pa(wbuf
);
229 ASSERT(mpcb
->mpcb_wstate
== wstate
);
231 if (mpcb
->mpcb_wbcnt
!= 0) {
232 bcopy(pmpcb
->mpcb_wbuf
, mpcb
->mpcb_wbuf
,
233 mpcb
->mpcb_wbcnt
* ((mpcb
->mpcb_wstate
== WSTATE_USER32
) ?
234 sizeof (struct rwindow32
) : sizeof (struct rwindow64
)));
238 pfp
->fpu_fprs
= _fp_read_fprs();
239 if ((pfp
->fpu_en
) || (pfp
->fpu_fprs
& FPRS_FEF
)) {
240 if (pt
== curthread
&& fpu_exists
) {
241 save_gsr(clwp
->lwp_fpu
);
244 gsr
= get_gsr(lwp
->lwp_fpu
);
245 set_gsr(gsr
, clwp
->lwp_fpu
);
255 lwp_freeregs(klwp_t
*lwp
, int isexec
)
257 kfpu_t
*fp
= lwptofpu(lwp
);
259 if (lwptot(lwp
) == curthread
)
260 fp
->fpu_fprs
= _fp_read_fprs();
261 if ((fp
->fpu_en
) || (fp
->fpu_fprs
& FPRS_FEF
))
266 * These function are currently unused on sparc.
270 lwp_attach_brand_hdlrs(klwp_t
*lwp
)
275 lwp_detach_brand_hdlrs(klwp_t
*lwp
)
279 * fill in the extra register state area specified with the
280 * specified lwp's platform-dependent non-floating-point extra
281 * register state information
285 xregs_getgfiller(klwp_id_t lwp
, caddr_t xrp
)
287 /* for sun4u nothing to do here, added for symmetry */
291 * fill in the extra register state area specified with the specified lwp's
292 * platform-dependent floating-point extra register state information.
293 * NOTE: 'lwp' might not correspond to 'curthread' since this is
294 * called from code in /proc to get the registers of another lwp.
297 xregs_getfpfiller(klwp_id_t lwp
, caddr_t xrp
)
299 prxregset_t
*xregs
= (prxregset_t
*)xrp
;
300 kfpu_t
*fp
= lwptofpu(lwp
);
301 uint32_t fprs
= (FPRS_FEF
|FPRS_DU
|FPRS_DL
);
305 * fp_fksave() does not flush the GSR register into
306 * the lwp area, so do it now
309 if (ttolwp(curthread
) == lwp
&& fpu_exists
) {
310 fp
->fpu_fprs
= _fp_read_fprs();
311 if ((fp
->fpu_fprs
& FPRS_FEF
) != FPRS_FEF
) {
312 _fp_write_fprs(fprs
);
313 fp
->fpu_fprs
= (V9_FPU_FPRS_TYPE
)fprs
;
319 PRXREG_GSR(xregs
) = gsr
;
323 * set the specified lwp's platform-dependent non-floating-point
324 * extra register state based on the specified input
328 xregs_setgfiller(klwp_id_t lwp
, caddr_t xrp
)
330 /* for sun4u nothing to do here, added for symmetry */
334 * set the specified lwp's platform-dependent floating-point
335 * extra register state based on the specified input
338 xregs_setfpfiller(klwp_id_t lwp
, caddr_t xrp
)
340 prxregset_t
*xregs
= (prxregset_t
*)xrp
;
341 kfpu_t
*fp
= lwptofpu(lwp
);
342 uint32_t fprs
= (FPRS_FEF
|FPRS_DU
|FPRS_DL
);
343 uint64_t gsr
= PRXREG_GSR(xregs
);
346 set_gsr(gsr
, lwptofpu(lwp
));
348 if ((lwp
== ttolwp(curthread
)) && fpu_exists
) {
349 fp
->fpu_fprs
= _fp_read_fprs();
350 if ((fp
->fpu_fprs
& FPRS_FEF
) != FPRS_FEF
) {
351 _fp_write_fprs(fprs
);
352 fp
->fpu_fprs
= (V9_FPU_FPRS_TYPE
)fprs
;
354 restore_gsr(lwptofpu(lwp
));
360 * fill in the sun4u asrs, ie, the lwp's platform-dependent
361 * non-floating-point extra register state information
365 getasrs(klwp_t
*lwp
, asrset_t asr
)
367 /* for sun4u nothing to do here, added for symmetry */
371 * fill in the sun4u asrs, ie, the lwp's platform-dependent
372 * floating-point extra register state information
375 getfpasrs(klwp_t
*lwp
, asrset_t asr
)
377 kfpu_t
*fp
= lwptofpu(lwp
);
378 uint32_t fprs
= (FPRS_FEF
|FPRS_DU
|FPRS_DL
);
381 if (ttolwp(curthread
) == lwp
)
382 fp
->fpu_fprs
= _fp_read_fprs();
383 if ((fp
->fpu_en
) || (fp
->fpu_fprs
& FPRS_FEF
)) {
384 if (fpu_exists
&& ttolwp(curthread
) == lwp
) {
385 if ((fp
->fpu_fprs
& FPRS_FEF
) != FPRS_FEF
) {
386 _fp_write_fprs(fprs
);
387 fp
->fpu_fprs
= (V9_FPU_FPRS_TYPE
)fprs
;
391 asr
[ASR_GSR
] = (int64_t)get_gsr(fp
);
397 * set the sun4u asrs, ie, the lwp's platform-dependent
398 * non-floating-point extra register state information
402 setasrs(klwp_t
*lwp
, asrset_t asr
)
404 /* for sun4u nothing to do here, added for symmetry */
408 setfpasrs(klwp_t
*lwp
, asrset_t asr
)
410 kfpu_t
*fp
= lwptofpu(lwp
);
411 uint32_t fprs
= (FPRS_FEF
|FPRS_DU
|FPRS_DL
);
414 if (ttolwp(curthread
) == lwp
)
415 fp
->fpu_fprs
= _fp_read_fprs();
416 if ((fp
->fpu_en
) || (fp
->fpu_fprs
& FPRS_FEF
)) {
417 set_gsr(asr
[ASR_GSR
], fp
);
418 if (fpu_exists
&& ttolwp(curthread
) == lwp
) {
419 if ((fp
->fpu_fprs
& FPRS_FEF
) != FPRS_FEF
) {
420 _fp_write_fprs(fprs
);
421 fp
->fpu_fprs
= (V9_FPU_FPRS_TYPE
)fprs
;
430 * Create interrupt kstats for this CPU.
433 cpu_create_intrstat(cpu_t
*cp
)
438 char name
[KSTAT_STRLEN
];
441 ASSERT(MUTEX_HELD(&cpu_lock
));
443 if (pool_pset_enabled())
444 zoneid
= GLOBAL_ZONEID
;
448 intr_ksp
= kstat_create_zone("cpu", cp
->cpu_id
, "intrstat", "misc",
449 KSTAT_TYPE_NAMED
, PIL_MAX
* 2, NULL
, zoneid
);
452 * Initialize each PIL's named kstat
454 if (intr_ksp
!= NULL
) {
455 intr_ksp
->ks_update
= cpu_kstat_intrstat_update
;
456 knp
= (kstat_named_t
*)intr_ksp
->ks_data
;
457 intr_ksp
->ks_private
= cp
;
458 for (i
= 0; i
< PIL_MAX
; i
++) {
459 (void) snprintf(name
, KSTAT_STRLEN
, "level-%d-time",
461 kstat_named_init(&knp
[i
* 2], name
, KSTAT_DATA_UINT64
);
462 (void) snprintf(name
, KSTAT_STRLEN
, "level-%d-count",
464 kstat_named_init(&knp
[(i
* 2) + 1], name
,
467 kstat_install(intr_ksp
);
472 * Delete interrupt kstats for this CPU.
475 cpu_delete_intrstat(cpu_t
*cp
)
477 kstat_delete_byname_zone("cpu", cp
->cpu_id
, "intrstat", ALL_ZONES
);
481 * Convert interrupt statistics from CPU ticks to nanoseconds and
485 cpu_kstat_intrstat_update(kstat_t
*ksp
, int rw
)
487 kstat_named_t
*knp
= ksp
->ks_data
;
488 cpu_t
*cpup
= (cpu_t
*)ksp
->ks_private
;
491 if (rw
== KSTAT_WRITE
)
495 * We use separate passes to copy and convert the statistics to
496 * nanoseconds. This assures that the snapshot of the data is as
497 * self-consistent as possible.
500 for (i
= 0; i
< PIL_MAX
; i
++) {
501 knp
[i
* 2].value
.ui64
= cpup
->cpu_m
.intrstat
[i
+ 1][0];
502 knp
[(i
* 2) + 1].value
.ui64
= cpup
->cpu_stats
.sys
.intr
[i
];
505 for (i
= 0; i
< PIL_MAX
; i
++) {
506 knp
[i
* 2].value
.ui64
=
507 (uint64_t)tick2ns((hrtime_t
)knp
[i
* 2].value
.ui64
,
515 * Called by common/os/cpu.c for psrinfo(1m) kstats
518 cpu_fru_fmri(cpu_t
*cp
)
520 return (cpunodes
[cp
->cpu_id
].fru_fmri
);
524 * An interrupt thread is ending a time slice, so compute the interval it
525 * ran for and update the statistic for its PIL.
528 cpu_intr_swtch_enter(kthread_id_t t
)
534 ASSERT((t
->t_flag
& T_INTR_THREAD
) != 0);
535 ASSERT(t
->t_pil
> 0 && t
->t_pil
<= LOCK_LEVEL
);
538 * We could be here with a zero timestamp. This could happen if:
539 * an interrupt thread which no longer has a pinned thread underneath
540 * it (i.e. it blocked at some point in its past) has finished running
541 * its handler. intr_thread() updated the interrupt statistic for its
542 * PIL and zeroed its timestamp. Since there was no pinned thread to
543 * return to, swtch() gets called and we end up here.
545 * It can also happen if an interrupt thread in intr_thread() calls
546 * preempt. It will have already taken care of updating stats. In
547 * this event, the interrupt thread will be runnable.
549 if (t
->t_intr_start
) {
551 start
= t
->t_intr_start
;
552 interval
= CLOCK_TICK_COUNTER() - start
;
553 } while (atomic_cas_64(&t
->t_intr_start
, start
, 0) != start
);
555 if (cpu
->cpu_m
.divisor
> 1)
556 interval
*= cpu
->cpu_m
.divisor
;
557 cpu
->cpu_m
.intrstat
[t
->t_pil
][0] += interval
;
559 atomic_add_64((uint64_t *)&cpu
->cpu_intracct
[cpu
->cpu_mstate
],
562 ASSERT(t
->t_intr
== NULL
|| t
->t_state
== TS_RUN
);
567 * An interrupt thread is returning from swtch(). Place a starting timestamp
568 * in its thread structure.
571 cpu_intr_swtch_exit(kthread_id_t t
)
575 ASSERT((t
->t_flag
& T_INTR_THREAD
) != 0);
576 ASSERT(t
->t_pil
> 0 && t
->t_pil
<= LOCK_LEVEL
);
579 ts
= t
->t_intr_start
;
580 } while (atomic_cas_64(&t
->t_intr_start
, ts
, CLOCK_TICK_COUNTER()) !=
586 blacklist(int cmd
, const char *scheme
, nvlist_t
*fmri
, const char *class)
589 return (plat_blacklist(cmd
, scheme
, fmri
, class));
595 kdi_pread(caddr_t buf
, size_t nbytes
, uint64_t addr
, size_t *ncopiedp
)
597 extern void kdi_flush_caches(void);
605 /* We might not begin on a word boundary. */
606 if ((slop
= addr
& 3) != 0) {
607 word
= ldphys(addr
& ~3);
608 for (i
= slop
; i
< 4 && nbytes
> 0; i
++, nbytes
--, nread
++)
609 *buf
++ = ((uchar_t
*)&word
)[i
];
610 addr
= roundup(addr
, 4);
615 for (i
= 0; i
< 4 && nbytes
> 0; i
++, nbytes
--, nread
++, addr
++)
616 *buf
++ = ((uchar_t
*)&word
)[i
];
626 kdi_pwrite(caddr_t buf
, size_t nbytes
, uint64_t addr
, size_t *ncopiedp
)
628 extern void kdi_flush_caches(void);
635 /* We might not begin on a word boundary. */
636 if ((slop
= addr
& 3) != 0) {
637 word
= ldphys(addr
& ~3);
638 for (i
= slop
; i
< 4 && nbytes
> 0; i
++, nbytes
--, nwritten
++)
639 ((uchar_t
*)&word
)[i
] = *buf
++;
640 stphys(addr
& ~3, word
);
641 addr
= roundup(addr
, 4);
645 for (word
= 0, i
= 0; i
< 4; i
++, nbytes
--, nwritten
++)
646 ((uchar_t
*)&word
)[i
] = *buf
++;
651 /* We might not end with a whole word. */
654 for (i
= 0; nbytes
> 0; i
++, nbytes
--, nwritten
++)
655 ((uchar_t
*)&word
)[i
] = *buf
++;
662 *ncopiedp
= nwritten
;
667 kdi_kernpanic(struct regs
*regs
, uint_t tt
)
669 sync_reg_buf
= *regs
;
676 kdi_plat_call(void (*platfn
)(void))
678 if (platfn
!= NULL
) {
679 prom_suspend_prepost();
681 prom_resume_prepost();
686 * kdi_system_claim and release are defined here for all sun4 platforms and
687 * pointed to by mach_kdi_init() to provide default callbacks for such systems.
688 * Specific sun4u or sun4v platforms may implement their own claim and release
689 * routines, at which point their respective callbacks will be updated.
692 kdi_system_claim(void)
698 kdi_system_release(void)
700 lbolt_debug_return();
704 mach_kdi_init(kdi_t
*kdi
)
706 kdi
->kdi_plat_call
= kdi_plat_call
;
707 kdi
->kdi_kmdb_enter
= kmdb_enter
;
708 kdi
->pkdi_system_claim
= kdi_system_claim
;
709 kdi
->pkdi_system_release
= kdi_system_release
;
710 kdi
->mkdi_cpu_index
= kdi_cpu_index
;
711 kdi
->mkdi_trap_vatotte
= kdi_trap_vatotte
;
712 kdi
->mkdi_kernpanic
= kdi_kernpanic
;
717 * get_cpu_mstate() is passed an array of timestamps, NCMSTATES
718 * long, and it fills in the array with the time spent on cpu in
719 * each of the mstates, where time is returned in nsec.
721 * No guarantee is made that the returned values in times[] will
722 * monotonically increase on sequential calls, although this will
723 * be true in the long run. Any such guarantee must be handled by
724 * the caller, if needed. This can happen if we fail to account
725 * for elapsed time due to a generation counter conflict, yet we
726 * did account for it on a prior call (see below).
728 * The complication is that the cpu in question may be updating
729 * its microstate at the same time that we are reading it.
730 * Because the microstate is only updated when the CPU's state
731 * changes, the values in cpu_intracct[] can be indefinitely out
732 * of date. To determine true current values, it is necessary to
733 * compare the current time with cpu_mstate_start, and add the
734 * difference to times[cpu_mstate].
736 * This can be a problem if those values are changing out from
737 * under us. Because the code path in new_cpu_mstate() is
738 * performance critical, we have not added a lock to it. Instead,
739 * we have added a generation counter. Before beginning
740 * modifications, the counter is set to 0. After modifications,
741 * it is set to the old value plus one.
743 * get_cpu_mstate() will not consider the values of cpu_mstate
744 * and cpu_mstate_start to be usable unless the value of
745 * cpu_mstate_gen is both non-zero and unchanged, both before and
746 * after reading the mstate information. Note that we must
747 * protect against out-of-order loads around accesses to the
748 * generation counter. Also, this is a best effort approach in
749 * that we do not retry should the counter be found to have
752 * cpu_intracct[] is used to identify time spent in each CPU
753 * mstate while handling interrupts. Such time should be reported
754 * against system time, and so is subtracted out from its
755 * corresponding cpu_acct[] time and added to
756 * cpu_acct[CMS_SYSTEM]. Additionally, intracct time is stored in
757 * %ticks, but acct time may be stored as %sticks, thus requiring
758 * different conversions before they can be compared.
762 get_cpu_mstate(cpu_t
*cpu
, hrtime_t
*times
)
768 hrtime_t intracct
[NCMSTATES
];
771 * Load all volatile state under the protection of membar.
772 * cpu_acct[cpu_mstate] must be loaded to avoid double counting
773 * of (now - cpu_mstate_start) by a change in CPU mstate that
774 * arrives after we make our last check of cpu_mstate_gen.
777 now
= gethrtime_unscaled();
778 gen
= cpu
->cpu_mstate_gen
;
780 membar_consumer(); /* guarantee load ordering */
781 start
= cpu
->cpu_mstate_start
;
782 state
= cpu
->cpu_mstate
;
783 for (i
= 0; i
< NCMSTATES
; i
++) {
784 intracct
[i
] = cpu
->cpu_intracct
[i
];
785 times
[i
] = cpu
->cpu_acct
[i
];
787 membar_consumer(); /* guarantee load ordering */
789 if (gen
!= 0 && gen
== cpu
->cpu_mstate_gen
&& now
> start
)
790 times
[state
] += now
- start
;
792 for (i
= 0; i
< NCMSTATES
; i
++) {
793 scalehrtime(×
[i
]);
794 intracct
[i
] = tick2ns((hrtime_t
)intracct
[i
], cpu
->cpu_id
);
797 for (i
= 0; i
< NCMSTATES
; i
++) {
800 times
[i
] -= intracct
[i
];
802 intracct
[i
] += times
[i
];
805 times
[CMS_SYSTEM
] += intracct
[i
];
810 mach_cpu_pause(volatile char *safe
)
813 * This cpu is now safe.
816 membar_enter(); /* make sure stores are flushed */
819 * Now we wait. When we are allowed to continue, safe
820 * will be set to PAUSE_IDLE.
822 while (*safe
!= PAUSE_IDLE
)
828 plat_mem_do_mmio(struct uio
*uio
, enum uio_rw rw
)
833 /* cpu threshold for compressed dumps */
835 uint_t dump_plat_mincpu_default
= DUMP_PLAT_SUN4V_MINCPU
;
837 uint_t dump_plat_mincpu_default
= DUMP_PLAT_SUN4U_MINCPU
;
853 dump_plat_data(void *dump_cdata
)
860 plat_hold_page(pfn_t pfn
, int lock
, page_t
**pp_ret
)
862 return (PLAT_HOLD_OK
);
867 plat_release_page(page_t
*pp
)
873 progressbar_key_abort(ldi_ident_t li
)
878 * We need to post a soft interrupt to reprogram the lbolt cyclic when
879 * switching from event to cyclic driven lbolt. The following code adds
880 * and posts the softint for sun4 platforms.
882 static uint64_t lbolt_softint_inum
;
885 lbolt_softint_add(void)
887 lbolt_softint_inum
= add_softintr(LOCK_LEVEL
,
888 (softintrfunc
)lbolt_ev_to_cyclic
, NULL
, SOFTINT_MT
);
892 lbolt_softint_post(void)
894 setsoftint(lbolt_softint_inum
);