4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * CPU support routines for DR
32 #include <sys/debug.h>
33 #include <sys/types.h>
34 #include <sys/errno.h>
36 #include <sys/dditypes.h>
37 #include <sys/devops.h>
38 #include <sys/modctl.h>
42 #include <sys/sunddi.h>
43 #include <sys/sunndi.h>
44 #include <sys/ndi_impldefs.h>
47 #include <sys/processor.h>
48 #include <sys/cpuvar.h>
49 #include <sys/mem_config.h>
50 #include <sys/promif.h>
51 #include <sys/x_call.h>
52 #include <sys/cpu_sgnblk_defs.h>
53 #include <sys/membar.h>
54 #include <sys/stack.h>
55 #include <sys/sysmacros.h>
56 #include <sys/machsystm.h>
57 #include <sys/spitregs.h>
59 #include <sys/archsystm.h>
60 #include <vm/hat_sfmmu.h>
63 #include <sys/x_call.h>
64 #include <sys/cpu_module.h>
65 #include <sys/cpu_impl.h>
67 #include <sys/autoconf.h>
68 #include <sys/cmn_err.h>
71 #include <sys/dr_util.h>
74 #include <sys/starfire.h>
75 extern struct cpu
*SIGBCPU
;
77 /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */
78 static char *dr_ie_fmt
= "dr_cpu.c %d";
79 #endif /* _STARFIRE */
82 dr_cpu_unit_is_sane(dr_board_t
*bp
, dr_cpu_unit_t
*cp
)
88 * cpuid and unit number should never be different
89 * than they were at discovery/connect time
91 ASSERT(drmach_cpu_get_id(cp
->sbc_cm
.sbdev_id
, &cpuid
) == 0);
93 ASSERT(cp
->sbc_cm
.sbdev_bp
== bp
);
94 ASSERT(cp
->sbc_cm
.sbdev_type
== SBD_COMP_CPU
);
95 ASSERT(cp
->sbc_cpu_id
== cpuid
);
105 dr_errno2ecode(int error
)
133 dr_cpu_set_prop(dr_cpu_unit_t
*cp
)
139 char *cache_str
= NULL
;
141 err
= drmach_get_dip(cp
->sbc_cm
.sbdev_id
, &dip
);
143 DRERR_SET_C(&cp
->sbc_cm
.sbdev_error
, &err
);
150 * Do not report an error on Starfire since
151 * the dip will not be created until after
152 * the CPU has been configured.
154 DR_DEV_INTERNAL_ERROR(&cp
->sbc_cm
);
155 #endif /* !_STARFIRE */
159 /* read in the CPU speed */
162 * If the property is not found in the CPU node, it has to be
163 * kept in the core or cmp node so we just keep looking.
165 clock_freq
= (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY
, dip
, 0,
166 "clock-frequency", 0);
168 ASSERT(clock_freq
!= 0);
171 * The ecache property string is not the same
172 * for all CPU implementations.
175 switch (cp
->sbc_cpu_impl
) {
178 case CHEETAH_PLUS_IMPL
:
179 cache_str
= "ecache-size";
184 cache_str
= "l2-cache-size";
187 cache_str
= "l3-cache-size";
190 cmn_err(CE_WARN
, "Unknown cpu implementation=0x%x",
196 if (cache_str
!= NULL
) {
197 /* read in the ecache size */
199 * If the property is not found in the CPU node,
200 * it has to be kept in the core or cmp node so
201 * we just keep looking.
204 ecache_size
= ddi_prop_get_int(DDI_DEV_T_ANY
, dip
, 0,
208 ASSERT(ecache_size
!= 0);
210 /* convert to the proper units */
211 cp
->sbc_speed
= (clock_freq
+ 500000) / 1000000;
212 cp
->sbc_ecache
= ecache_size
/ (1024 * 1024);
216 dr_init_cpu_unit(dr_cpu_unit_t
*cp
)
219 dr_state_t new_state
;
223 if (DR_DEV_IS_ATTACHED(&cp
->sbc_cm
)) {
224 new_state
= DR_STATE_CONFIGURED
;
225 cp
->sbc_cm
.sbdev_cond
= SBD_COND_OK
;
226 } else if (DR_DEV_IS_PRESENT(&cp
->sbc_cm
)) {
227 new_state
= DR_STATE_CONNECTED
;
228 cp
->sbc_cm
.sbdev_cond
= SBD_COND_OK
;
230 new_state
= DR_STATE_EMPTY
;
231 cp
->sbc_cm
.sbdev_cond
= SBD_COND_UNKNOWN
;
234 if (DR_DEV_IS_PRESENT(&cp
->sbc_cm
)) {
235 err
= drmach_cpu_get_id(cp
->sbc_cm
.sbdev_id
, &cpuid
);
237 DRERR_SET_C(&cp
->sbc_cm
.sbdev_error
, &err
);
238 new_state
= DR_STATE_FATAL
;
242 err
= drmach_cpu_get_impl(cp
->sbc_cm
.sbdev_id
, &impl
);
244 DRERR_SET_C(&cp
->sbc_cm
.sbdev_error
, &err
);
245 new_state
= DR_STATE_FATAL
;
250 cp
->sbc_cpu_impl
= -1;
254 cp
->sbc_cpu_id
= cpuid
;
255 cp
->sbc_cpu_impl
= impl
;
257 /* if true at init time, it must always be true */
258 ASSERT(dr_cpu_unit_is_sane(cp
->sbc_cm
.sbdev_bp
, cp
));
260 mutex_enter(&cpu_lock
);
261 if ((cpuid
>= 0) && cpu
[cpuid
])
262 cp
->sbc_cpu_flags
= cpu
[cpuid
]->cpu_flags
;
264 cp
->sbc_cpu_flags
= P_OFFLINE
| P_POWEROFF
;
265 mutex_exit(&cpu_lock
);
270 /* delay transition until fully initialized */
271 dr_device_transition(&cp
->sbc_cm
, new_state
);
275 dr_pre_attach_cpu(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
280 static fn_t f
= "dr_pre_attach_cpu";
282 PR_CPU("%s...\n", f
);
284 for (next_cpu
= 0, i
= 0; i
< devnum
; i
++) {
285 dr_cpu_unit_t
*up
= (dr_cpu_unit_t
*)devlist
[i
];
287 ASSERT(dr_cpu_unit_is_sane(hp
->h_bd
, up
));
290 * Print a console message for each attachment
291 * point. For CMP devices, this means that only
292 * one message should be printed, no matter how
293 * many cores are actually present.
295 curr_cpu
= DR_UNUM2SBD_UNUM(up
->sbc_cm
.sbdev_unum
,
297 if (curr_cpu
>= next_cpu
) {
298 cmn_err(CE_CONT
, "OS configure %s",
299 up
->sbc_cm
.sbdev_path
);
300 next_cpu
= curr_cpu
+ 1;
303 if (up
->sbc_cm
.sbdev_state
== DR_STATE_UNCONFIGURED
) {
305 * If we're coming from the UNCONFIGURED
306 * state then the cpu's sigblock will
307 * still be mapped in. Need to unmap it
308 * before continuing with attachment.
310 PR_CPU("%s: unmapping sigblk for cpu %d\n", f
,
313 CPU_SGN_MAPOUT(up
->sbc_cpu_id
);
318 * Block out status threads while creating
319 * devinfo tree branches
321 dr_lock_status(hp
->h_bd
);
322 ndi_devi_enter(ddi_root_node(), (int *)(&hp
->h_ndi
));
323 mutex_enter(&cpu_lock
);
330 dr_attach_cpu(dr_handle_t
*hp
, dr_common_unit_t
*cp
)
336 ASSERT(MUTEX_HELD(&cpu_lock
));
338 err
= drmach_configure(cp
->sbdev_id
, 0);
340 DRERR_SET_C(&cp
->sbdev_error
, &err
);
344 err
= drmach_cpu_get_id(cp
->sbdev_id
, &cpuid
);
346 DRERR_SET_C(&cp
->sbdev_error
, &err
);
348 err
= drmach_unconfigure(cp
->sbdev_id
, DEVI_BRANCH_DESTROY
);
351 } else if ((rv
= cpu_configure(cpuid
)) != 0) {
352 dr_dev_err(CE_WARN
, cp
, dr_errno2ecode(rv
));
353 err
= drmach_unconfigure(cp
->sbdev_id
, DEVI_BRANCH_DESTROY
);
362 * sbd error policy: Does not stop on error. Processes all units in list.
365 dr_post_attach_cpu(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
369 static fn_t f
= "dr_post_attach_cpu";
371 PR_CPU("%s...\n", f
);
373 /* Startup and online newly-attached CPUs */
374 for (i
= 0; i
< devnum
; i
++) {
375 dr_cpu_unit_t
*up
= (dr_cpu_unit_t
*)devlist
[i
];
378 ASSERT(dr_cpu_unit_is_sane(hp
->h_bd
, up
));
380 cp
= cpu_get(up
->sbc_cpu_id
);
382 cmn_err(CE_WARN
, "%s: cpu_get failed for cpu %d",
387 if (cpu_is_poweredoff(cp
)) {
388 if (cpu_poweron(cp
) != 0) {
389 dr_dev_err(CE_WARN
, &up
->sbc_cm
, ESBD_CPUSTART
);
392 PR_CPU("%s: cpu %d powered ON\n", f
, up
->sbc_cpu_id
);
395 if (cpu_is_offline(cp
)) {
396 PR_CPU("%s: onlining cpu %d...\n", f
, up
->sbc_cpu_id
);
398 if (cpu_online(cp
) != 0) {
399 dr_dev_err(CE_WARN
, &up
->sbc_cm
, ESBD_ONLINE
);
406 mutex_exit(&cpu_lock
);
407 ndi_devi_exit(ddi_root_node(), hp
->h_ndi
);
408 dr_unlock_status(hp
->h_bd
);
419 * sbd error policy: Stops on first error.
422 dr_pre_release_cpu(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
424 int c
, cix
, i
, lastoffline
= -1, rv
= 0;
430 static fn_t f
= "dr_pre_release_cpu";
433 devset
= DR_DEVS_PRESENT(hp
->h_bd
);
435 /* allocate status struct storage. */
436 ds
= (sbd_dev_stat_t
*) kmem_zalloc(sizeof (sbd_dev_stat_t
) *
437 MAX_CPU_UNITS_PER_BOARD
, KM_SLEEP
);
439 cix
= dr_cpu_status(hp
, devset
, ds
);
441 mutex_enter(&cpu_lock
);
443 for (i
= 0; i
< devnum
; i
++) {
444 up
= (dr_cpu_unit_t
*)devlist
[i
];
445 ASSERT(dr_cpu_unit_is_sane(hp
->h_bd
, up
));
448 * The STARCAT platform borrows cpus for use by POST in
449 * iocage testing. These cpus cannot be unconfigured
450 * while they are in use for the iocage.
451 * This check determines if a CPU is currently in use
452 * for iocage testing, and if so, returns a "Device busy"
455 for (c
= 0; c
< cix
; c
++) {
456 if (ds
[c
].d_cpu
.cs_unit
== up
->sbc_cm
.sbdev_unum
) {
457 if (ds
[c
].d_cpu
.cs_busy
) {
458 dr_dev_err(CE_WARN
, &up
->sbc_cm
,
467 cpuid
= up
->sbc_cpu_id
;
468 if ((cp
= cpu_get(cpuid
)) == NULL
) {
469 dr_dev_err(CE_WARN
, &up
->sbc_cm
, ESBD_OFFLINE
);
474 /* used by dr_cancel_cpu during error flow */
475 up
->sbc_cpu_flags
= cp
->cpu_flags
;
477 if (CPU_ACTIVE(cp
)) {
478 if (dr_cmd_flags(hp
) & SBD_FLAG_FORCE
)
479 cpu_flags
= CPU_FORCED
;
481 PR_CPU("%s: offlining cpu %d\n", f
, cpuid
);
482 if (cpu_offline(cp
, cpu_flags
)) {
483 PR_CPU("%s: failed to offline cpu %d\n", f
,
485 dr_dev_err(CE_WARN
, &up
->sbc_cm
, ESBD_OFFLINE
);
486 if (disp_bound_threads(cp
, 0)) {
487 cmn_err(CE_WARN
, "%s: thread(s) bound "
488 "to cpu %d", f
, cp
->cpu_id
);
499 err
= drmach_release(up
->sbc_cm
.sbdev_id
);
501 DRERR_SET_C(&up
->sbc_cm
.sbdev_error
, &err
);
508 mutex_exit(&cpu_lock
);
512 * Need to unwind others since at this level (pre-release)
513 * the device state has not yet transitioned and failures
514 * will prevent us from reaching the "post" release
515 * function where states are normally transitioned.
517 for (i
= lastoffline
; i
>= 0; i
--) {
518 up
= (dr_cpu_unit_t
*)devlist
[i
];
519 (void) dr_cancel_cpu(up
);
523 kmem_free(ds
, sizeof (sbd_dev_stat_t
) * MAX_CPU_UNITS_PER_BOARD
);
530 * sbd error policy: Stops on first error.
533 dr_pre_detach_cpu(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
541 static fn_t f
= "dr_pre_detach_cpu";
543 PR_CPU("%s...\n", f
);
546 * Block out status threads while destroying devinfo tree
549 dr_lock_status(hp
->h_bd
);
550 mutex_enter(&cpu_lock
);
552 for (next_cpu
= 0, i
= 0; i
< devnum
; i
++) {
553 dr_cpu_unit_t
*up
= (dr_cpu_unit_t
*)devlist
[i
];
556 ASSERT(dr_cpu_unit_is_sane(hp
->h_bd
, up
));
558 cp
= cpu_get(up
->sbc_cpu_id
);
563 * Print a console message for each attachment
564 * point. For CMP devices, this means that only
565 * one message should be printed, no matter how
566 * many cores are actually present.
568 curr_cpu
= DR_UNUM2SBD_UNUM(up
->sbc_cm
.sbdev_unum
,
570 if (curr_cpu
>= next_cpu
) {
571 cmn_err(CE_CONT
, "OS unconfigure %s\n",
572 up
->sbc_cm
.sbdev_path
);
573 next_cpu
= curr_cpu
+ 1;
577 * CPUs were offlined during Release.
579 if (cpu_is_poweredoff(cp
)) {
580 PR_CPU("%s: cpu %d already powered OFF\n",
585 if (!cpu_is_offline(cp
)) {
586 if (dr_cmd_flags(hp
) & SBD_FLAG_FORCE
)
587 cpu_flags
= CPU_FORCED
;
588 /* cpu was onlined after release. Offline it again */
589 PR_CPU("%s: offlining cpu %d\n", f
, up
->sbc_cpu_id
);
590 if (cpu_offline(cp
, cpu_flags
)) {
591 PR_CPU("%s: failed to offline cpu %d\n",
593 dr_dev_err(CE_WARN
, &up
->sbc_cm
, ESBD_OFFLINE
);
594 if (disp_bound_threads(cp
, 0)) {
595 cmn_err(CE_WARN
, "%s: thread(s) bound "
596 "to cpu %d", f
, cp
->cpu_id
);
601 if (cpu_poweroff(cp
) != 0) {
602 dr_dev_err(CE_WARN
, &up
->sbc_cm
, ESBD_CPUSTOP
);
605 PR_CPU("%s: cpu %d powered OFF\n", f
, up
->sbc_cpu_id
);
612 mutex_exit(&cpu_lock
);
613 dr_unlock_status(hp
->h_bd
);
619 dr_detach_cpu(dr_handle_t
*hp
, dr_common_unit_t
*cp
)
625 ASSERT(MUTEX_HELD(&cpu_lock
));
627 err
= drmach_cpu_get_id(cp
->sbdev_id
, &cpuid
);
629 DRERR_SET_C(&cp
->sbdev_error
, &err
);
630 } else if ((rv
= cpu_unconfigure(cpuid
)) != 0) {
631 dr_dev_err(CE_IGNORE
, cp
, dr_errno2ecode(rv
));
633 err
= drmach_unconfigure(cp
->sbdev_id
, DEVI_BRANCH_DESTROY
);
635 DRERR_SET_C(&cp
->sbdev_error
, &err
);
642 dr_post_detach_cpu(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
644 static fn_t f
= "dr_post_detach_cpu";
646 PR_CPU("%s...\n", f
);
649 mutex_exit(&cpu_lock
);
650 dr_unlock_status(hp
->h_bd
);
656 dr_fill_cpu_stat(dr_cpu_unit_t
*cp
, drmach_status_t
*pstat
, sbd_cpu_stat_t
*csp
)
658 ASSERT(cp
&& pstat
&& csp
);
660 /* Fill in the common status information */
661 bzero((caddr_t
)csp
, sizeof (*csp
));
662 csp
->cs_type
= cp
->sbc_cm
.sbdev_type
;
663 csp
->cs_unit
= cp
->sbc_cm
.sbdev_unum
;
664 (void) strncpy(csp
->cs_name
, pstat
->type
, sizeof (csp
->cs_name
));
665 csp
->cs_cond
= cp
->sbc_cm
.sbdev_cond
;
666 csp
->cs_busy
= cp
->sbc_cm
.sbdev_busy
| pstat
->busy
;
667 csp
->cs_time
= cp
->sbc_cm
.sbdev_time
;
668 csp
->cs_ostate
= cp
->sbc_cm
.sbdev_ostate
;
671 /* CPU specific status data */
672 csp
->cs_cpuid
= cp
->sbc_cpu_id
;
675 csp
->cs_isbootproc
= (SIGBCPU
->cpu_id
== cp
->sbc_cpu_id
) ? 1 : 0;
676 #endif /* _STARFIRE */
679 * If the speed and ecache properties have not been
680 * cached yet, read them in from the device tree.
682 if ((cp
->sbc_speed
== 0) || (cp
->sbc_ecache
== 0))
685 /* use the cached speed and ecache values */
686 csp
->cs_speed
= cp
->sbc_speed
;
687 csp
->cs_ecache
= cp
->sbc_ecache
;
689 mutex_enter(&cpu_lock
);
690 if (!cpu_get(csp
->cs_cpuid
)) {
691 /* ostate must be UNCONFIGURED */
692 csp
->cs_cm
.c_ostate
= SBD_STAT_UNCONFIGURED
;
694 mutex_exit(&cpu_lock
);
698 dr_fill_cmp_stat(sbd_cpu_stat_t
*csp
, int ncores
, int impl
, sbd_cmp_stat_t
*psp
)
702 ASSERT(csp
&& psp
&& (ncores
>= 1));
704 bzero((caddr_t
)psp
, sizeof (*psp
));
707 * Fill in the common status information based
708 * on the data for the first core.
710 psp
->ps_type
= SBD_COMP_CMP
;
711 psp
->ps_unit
= DR_UNUM2SBD_UNUM(csp
->cs_unit
, SBD_COMP_CMP
);
712 (void) strncpy(psp
->ps_name
, csp
->cs_name
, sizeof (psp
->ps_name
));
713 psp
->ps_cond
= csp
->cs_cond
;
714 psp
->ps_busy
= csp
->cs_busy
;
715 psp
->ps_time
= csp
->cs_time
;
716 psp
->ps_ostate
= csp
->cs_ostate
;
717 psp
->ps_suspend
= csp
->cs_suspend
;
719 /* CMP specific status data */
720 *psp
->ps_cpuid
= csp
->cs_cpuid
;
722 psp
->ps_speed
= csp
->cs_speed
;
723 psp
->ps_ecache
= csp
->cs_ecache
;
726 * Walk through the data for the remaining cores.
727 * Make any adjustments to the common status data,
728 * or the shared CMP specific data if necessary.
730 for (core
= 1; core
< ncores
; core
++) {
733 * The following properties should be the same
734 * for all the cores of the CMP.
736 ASSERT(psp
->ps_unit
== DR_UNUM2SBD_UNUM(csp
[core
].cs_unit
,
738 ASSERT(psp
->ps_speed
== csp
[core
].cs_speed
);
740 psp
->ps_cpuid
[core
] = csp
[core
].cs_cpuid
;
744 * Jaguar has a split ecache, so the ecache
745 * for each core must be added together to
746 * get the total ecache for the whole chip.
748 if (IS_JAGUAR(impl
)) {
749 psp
->ps_ecache
+= csp
[core
].cs_ecache
;
752 /* adjust time if necessary */
753 if (csp
[core
].cs_time
> psp
->ps_time
) {
754 psp
->ps_time
= csp
[core
].cs_time
;
757 psp
->ps_busy
|= csp
[core
].cs_busy
;
760 * If any of the cores are configured, the
761 * entire CMP is marked as configured.
763 if (csp
[core
].cs_ostate
== SBD_STAT_CONFIGURED
) {
764 psp
->ps_ostate
= csp
[core
].cs_ostate
;
770 dr_cpu_status(dr_handle_t
*hp
, dr_devset_t devset
, sbd_dev_stat_t
*dsp
)
776 sbd_cpu_stat_t cstat
[MAX_CORES_PER_CMP
];
782 devset
&= DR_DEVS_PRESENT(bp
);
785 * Treat every CPU as a CMP. In the case where the
786 * device is not a CMP, treat it as a CMP with only
789 for (cmp
= 0; cmp
< MAX_CMP_UNITS_PER_BOARD
; cmp
++) {
793 drmach_status_t pstat
;
797 if ((devset
& DEVSET(SBD_COMP_CMP
, cmp
)) == 0) {
803 for (core
= 0; core
< MAX_CORES_PER_CMP
; core
++) {
805 cp
= dr_get_cpu_unit(bp
, DR_CMP_CORE_UNUM(cmp
, core
));
807 if (cp
->sbc_cm
.sbdev_state
== DR_STATE_EMPTY
) {
808 /* present, but not fully initialized */
812 ASSERT(dr_cpu_unit_is_sane(hp
->h_bd
, cp
));
814 /* skip if not present */
815 if (cp
->sbc_cm
.sbdev_id
== (drmachid_t
)0) {
819 /* fetch platform status */
820 err
= drmach_status(cp
->sbc_cm
.sbdev_id
, &pstat
);
822 DRERR_SET_C(&cp
->sbc_cm
.sbdev_error
, &err
);
826 dr_fill_cpu_stat(cp
, &pstat
, &cstat
[ncores
++]);
828 * We should set impl here because the last core
829 * found might be EMPTY or not present.
831 impl
= cp
->sbc_cpu_impl
;
839 * Store the data to the outgoing array. If the
840 * device is a CMP, combine all the data for the
841 * cores into a single stat structure.
843 * The check for a CMP device uses the last core
844 * found, assuming that all cores will have the
845 * same implementation.
848 if (CPU_IMPL_IS_CMP(impl
)) {
849 psp
= (sbd_cmp_stat_t
*)dsp
;
850 dr_fill_cmp_stat(cstat
, ncores
, impl
, psp
);
853 bcopy(cstat
, dsp
, sizeof (sbd_cpu_stat_t
));
864 * Cancel previous release operation for cpu.
865 * For cpus this means simply bringing cpus that
866 * were offline back online. Note that they had
867 * to have been online at the time there were
871 dr_cancel_cpu(dr_cpu_unit_t
*up
)
874 static fn_t f
= "dr_cancel_cpu";
876 ASSERT(dr_cpu_unit_is_sane(up
->sbc_cm
.sbdev_bp
, up
));
878 if (cpu_flagged_active(up
->sbc_cpu_flags
)) {
882 * CPU had been online, go ahead
883 * bring it back online.
885 PR_CPU("%s: bringing cpu %d back ONLINE\n", f
, up
->sbc_cpu_id
);
887 mutex_enter(&cpu_lock
);
888 cp
= cpu
[up
->sbc_cpu_id
];
890 if (cpu_is_poweredoff(cp
)) {
891 if (cpu_poweron(cp
)) {
892 cmn_err(CE_WARN
, "%s: failed to power-on "
893 "cpu %d", f
, up
->sbc_cpu_id
);
898 if (cpu_is_offline(cp
)) {
899 if (cpu_online(cp
)) {
900 cmn_err(CE_WARN
, "%s: failed to online cpu %d",
906 if (cpu_is_online(cp
)) {
907 if (cpu_flagged_nointr(up
->sbc_cpu_flags
)) {
908 if (cpu_intr_disable(cp
) != 0) {
909 cmn_err(CE_WARN
, "%s: failed to "
910 "disable interrupts on cpu %d", f
,
916 mutex_exit(&cpu_lock
);
923 dr_disconnect_cpu(dr_cpu_unit_t
*up
)
926 static fn_t f
= "dr_disconnect_cpu";
928 PR_CPU("%s...\n", f
);
930 ASSERT((up
->sbc_cm
.sbdev_state
== DR_STATE_CONNECTED
) ||
931 (up
->sbc_cm
.sbdev_state
== DR_STATE_UNCONFIGURED
));
933 ASSERT(dr_cpu_unit_is_sane(up
->sbc_cm
.sbdev_bp
, up
));
935 if (up
->sbc_cm
.sbdev_state
== DR_STATE_CONNECTED
) {
937 * Cpus were never brought in and so are still
938 * effectively disconnected, so nothing to do here.
940 PR_CPU("%s: cpu %d never brought in\n", f
, up
->sbc_cpu_id
);
944 err
= drmach_cpu_disconnect(up
->sbc_cm
.sbdev_id
);
948 DRERR_SET_C(&up
->sbc_cm
.sbdev_error
, &err
);