4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 * Copyright (c) 2010, Intel Corporation.
28 * All rights reserved.
32 * CPU support routines for DR
36 #include <sys/debug.h>
37 #include <sys/types.h>
38 #include <sys/errno.h>
39 #include <sys/dditypes.h>
41 #include <sys/sunddi.h>
42 #include <sys/sunndi.h>
43 #include <sys/ndi_impldefs.h>
45 #include <sys/processor.h>
46 #include <sys/cpuvar.h>
47 #include <sys/promif.h>
48 #include <sys/sysmacros.h>
49 #include <sys/archsystm.h>
50 #include <sys/machsystm.h>
51 #include <sys/cpu_module.h>
52 #include <sys/cmn_err.h>
55 #include <sys/dr_util.h>
57 /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */
58 static char *dr_ie_fmt
= "dr_cpu.c %d";
61 dr_cpu_unit_is_sane(dr_board_t
*bp
, dr_cpu_unit_t
*cp
)
64 ASSERT(cp
->sbc_cm
.sbdev_bp
== bp
);
65 ASSERT(cp
->sbc_cm
.sbdev_type
== SBD_COMP_CPU
);
75 dr_errno2ecode(int error
)
103 * On x86, the "clock-frequency" and cache size device properties may be
104 * unavailable before CPU starts. If they are unavailabe, just set them to zero.
107 dr_cpu_set_prop(dr_cpu_unit_t
*cp
)
113 char *cache_str
= NULL
;
115 err
= drmach_get_dip(cp
->sbc_cm
.sbdev_id
, &dip
);
117 DRERR_SET_C(&cp
->sbc_cm
.sbdev_error
, &err
);
122 DR_DEV_INTERNAL_ERROR(&cp
->sbc_cm
);
126 /* read in the CPU speed */
127 clock_freq
= (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY
, dip
,
128 DDI_PROP_DONTPASS
, "clock-frequency", 0);
131 * The ecache property string is not the same
132 * for all CPU implementations.
134 switch (cp
->sbc_cpu_impl
) {
135 case X86_CPU_IMPL_NEHALEM_EX
:
136 cache_str
= "l3-cache-size";
139 cmn_err(CE_WARN
, "Unknown cpu implementation=0x%x",
144 if (cache_str
!= NULL
) {
145 /* read in the ecache size */
147 * If the property is not found in the CPU node,
148 * it has to be kept in the core or cmp node so
149 * we just keep looking.
152 ecache_size
= ddi_prop_get_int(DDI_DEV_T_ANY
, dip
, 0,
156 /* convert to the proper units */
157 cp
->sbc_speed
= (clock_freq
+ 500000) / 1000000;
158 cp
->sbc_ecache
= ecache_size
/ (1024 * 1024);
162 dr_init_cpu_unit(dr_cpu_unit_t
*cp
)
165 dr_state_t new_state
;
169 if (DR_DEV_IS_ATTACHED(&cp
->sbc_cm
)) {
170 new_state
= DR_STATE_CONFIGURED
;
171 cp
->sbc_cm
.sbdev_cond
= SBD_COND_OK
;
172 } else if (DR_DEV_IS_PRESENT(&cp
->sbc_cm
)) {
173 new_state
= DR_STATE_CONNECTED
;
174 cp
->sbc_cm
.sbdev_cond
= SBD_COND_OK
;
176 new_state
= DR_STATE_EMPTY
;
177 cp
->sbc_cm
.sbdev_cond
= SBD_COND_UNKNOWN
;
180 if (DR_DEV_IS_PRESENT(&cp
->sbc_cm
)) {
181 err
= drmach_cpu_get_id(cp
->sbc_cm
.sbdev_id
, &cpuid
);
183 DRERR_SET_C(&cp
->sbc_cm
.sbdev_error
, &err
);
184 new_state
= DR_STATE_FATAL
;
188 err
= drmach_cpu_get_impl(cp
->sbc_cm
.sbdev_id
, &impl
);
190 DRERR_SET_C(&cp
->sbc_cm
.sbdev_error
, &err
);
191 new_state
= DR_STATE_FATAL
;
196 cp
->sbc_cpu_impl
= -1;
200 cp
->sbc_cpu_id
= cpuid
;
201 cp
->sbc_cpu_impl
= impl
;
203 /* if true at init time, it must always be true */
204 ASSERT(dr_cpu_unit_is_sane(cp
->sbc_cm
.sbdev_bp
, cp
));
206 mutex_enter(&cpu_lock
);
207 if ((cpuid
>= 0) && cpu
[cpuid
])
208 cp
->sbc_cpu_flags
= cpu
[cpuid
]->cpu_flags
;
210 cp
->sbc_cpu_flags
= P_OFFLINE
| P_POWEROFF
;
211 mutex_exit(&cpu_lock
);
216 /* delay transition until fully initialized */
217 dr_device_transition(&cp
->sbc_cm
, new_state
);
221 dr_pre_attach_cpu(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
224 static fn_t f
= "dr_pre_attach_cpu";
226 PR_CPU("%s...\n", f
);
228 for (i
= 0; i
< devnum
; i
++) {
229 dr_cpu_unit_t
*up
= (dr_cpu_unit_t
*)devlist
[i
];
231 ASSERT(dr_cpu_unit_is_sane(hp
->h_bd
, up
));
234 * Print a console message for each attachment
235 * point. For CMP devices, this means that only
236 * one message should be printed, no matter how
237 * many cores are actually present.
239 if ((up
->sbc_cm
.sbdev_unum
% MAX_CORES_PER_CMP
) == 0) {
240 cmn_err(CE_CONT
, "OS configure %s",
241 up
->sbc_cm
.sbdev_path
);
246 * Block out status threads while creating
247 * devinfo tree branches
249 dr_lock_status(hp
->h_bd
);
250 ndi_devi_enter(ddi_root_node(), (int *)(&hp
->h_ndi
));
251 mutex_enter(&cpu_lock
);
258 dr_attach_cpu(dr_handle_t
*hp
, dr_common_unit_t
*cp
)
264 ASSERT(MUTEX_HELD(&cpu_lock
));
266 err
= drmach_configure(cp
->sbdev_id
, 0);
268 DRERR_SET_C(&cp
->sbdev_error
, &err
);
272 err
= drmach_cpu_get_id(cp
->sbdev_id
, &cpuid
);
274 DRERR_SET_C(&cp
->sbdev_error
, &err
);
276 err
= drmach_unconfigure(cp
->sbdev_id
, DEVI_BRANCH_DESTROY
);
279 } else if ((rv
= cpu_configure(cpuid
)) != 0) {
280 dr_dev_err(CE_WARN
, cp
, dr_errno2ecode(rv
));
281 err
= drmach_unconfigure(cp
->sbdev_id
, DEVI_BRANCH_DESTROY
);
285 dr_cpu_unit_t
*up
= (dr_cpu_unit_t
*)cp
;
286 up
->sbc_cpu_id
= cpuid
;
293 * sbd error policy: Does not stop on error. Processes all units in list.
296 dr_post_attach_cpu(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
300 static fn_t f
= "dr_post_attach_cpu";
302 PR_CPU("%s...\n", f
);
304 /* Startup and online newly-attached CPUs */
305 for (i
= 0; i
< devnum
; i
++) {
306 dr_cpu_unit_t
*up
= (dr_cpu_unit_t
*)devlist
[i
];
309 ASSERT(dr_cpu_unit_is_sane(hp
->h_bd
, up
));
311 cp
= cpu_get(up
->sbc_cpu_id
);
313 cmn_err(CE_WARN
, "%s: cpu_get failed for cpu %d",
318 if (cpu_is_poweredoff(cp
)) {
319 if (cpu_poweron(cp
) != 0) {
320 dr_dev_err(CE_WARN
, &up
->sbc_cm
, ESBD_CPUSTART
);
323 PR_CPU("%s: cpu %d powered ON\n", f
, up
->sbc_cpu_id
);
326 if (cpu_is_offline(cp
)) {
327 PR_CPU("%s: onlining cpu %d...\n", f
, up
->sbc_cpu_id
);
329 if (cpu_online(cp
) != 0) {
330 dr_dev_err(CE_WARN
, &up
->sbc_cm
, ESBD_ONLINE
);
337 mutex_exit(&cpu_lock
);
338 ndi_devi_exit(ddi_root_node(), hp
->h_ndi
);
339 dr_unlock_status(hp
->h_bd
);
350 * sbd error policy: Stops on first error.
353 dr_pre_release_cpu(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
355 int c
, cix
, i
, lastoffline
= -1, rv
= 0;
361 static fn_t f
= "dr_pre_release_cpu";
364 devset
= DR_DEVS_PRESENT(hp
->h_bd
);
366 /* allocate status struct storage. */
367 ds
= (sbd_dev_stat_t
*) kmem_zalloc(sizeof (sbd_dev_stat_t
) *
368 MAX_CPU_UNITS_PER_BOARD
, KM_SLEEP
);
370 cix
= dr_cpu_status(hp
, devset
, ds
);
372 mutex_enter(&cpu_lock
);
374 for (i
= 0; i
< devnum
; i
++) {
375 up
= (dr_cpu_unit_t
*)devlist
[i
];
376 if (!DR_DEV_IS_ATTACHED(&up
->sbc_cm
)) {
379 ASSERT(dr_cpu_unit_is_sane(hp
->h_bd
, up
));
382 * On x86 systems, some CPUs can't be unconfigured.
383 * For example, CPU0 can't be unconfigured because many other
384 * components have a dependency on it.
385 * This check determines if a CPU is currently in use and
386 * returns a "Device busy" error if so.
388 for (c
= 0; c
< cix
; c
++) {
389 if (ds
[c
].d_cpu
.cs_unit
== up
->sbc_cm
.sbdev_unum
) {
390 if (ds
[c
].d_cpu
.cs_busy
) {
391 dr_dev_err(CE_WARN
, &up
->sbc_cm
,
401 cpuid
= up
->sbc_cpu_id
;
402 if ((cp
= cpu_get(cpuid
)) == NULL
) {
403 dr_dev_err(CE_WARN
, &up
->sbc_cm
, ESBD_OFFLINE
);
408 /* used by dr_cancel_cpu during error flow */
409 up
->sbc_cpu_flags
= cp
->cpu_flags
;
411 if (CPU_ACTIVE(cp
)) {
412 if (dr_cmd_flags(hp
) & SBD_FLAG_FORCE
)
413 cpu_flags
= CPU_FORCED
;
415 PR_CPU("%s: offlining cpu %d\n", f
, cpuid
);
416 if (cpu_offline(cp
, cpu_flags
)) {
417 PR_CPU("%s: failed to offline cpu %d\n", f
,
419 dr_dev_err(CE_WARN
, &up
->sbc_cm
, ESBD_OFFLINE
);
420 if (disp_bound_threads(cp
, 0)) {
421 cmn_err(CE_WARN
, "%s: thread(s) bound "
422 "to cpu %d", f
, cp
->cpu_id
);
433 err
= drmach_release(up
->sbc_cm
.sbdev_id
);
435 DRERR_SET_C(&up
->sbc_cm
.sbdev_error
, &err
);
442 mutex_exit(&cpu_lock
);
446 * Need to unwind others since at this level (pre-release)
447 * the device state has not yet transitioned and failures
448 * will prevent us from reaching the "post" release
449 * function where states are normally transitioned.
451 for (i
= lastoffline
; i
>= 0; i
--) {
452 up
= (dr_cpu_unit_t
*)devlist
[i
];
453 (void) dr_cancel_cpu(up
);
457 kmem_free(ds
, sizeof (sbd_dev_stat_t
) * MAX_CPU_UNITS_PER_BOARD
);
464 * sbd error policy: Stops on first error.
467 dr_pre_detach_cpu(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
473 static fn_t f
= "dr_pre_detach_cpu";
475 PR_CPU("%s...\n", f
);
478 * Block out status threads while destroying devinfo tree
481 dr_lock_status(hp
->h_bd
);
482 mutex_enter(&cpu_lock
);
484 for (i
= 0; i
< devnum
; i
++) {
485 dr_cpu_unit_t
*up
= (dr_cpu_unit_t
*)devlist
[i
];
488 if (!DR_DEV_IS_ATTACHED(&up
->sbc_cm
)) {
492 ASSERT(dr_cpu_unit_is_sane(hp
->h_bd
, up
));
494 cp
= cpu_get(up
->sbc_cpu_id
);
499 * Print a console message for each attachment
500 * point. For CMP devices, this means that only
501 * one message should be printed, no matter how
502 * many cores are actually present.
504 if ((up
->sbc_cm
.sbdev_unum
% MAX_CORES_PER_CMP
) == 0) {
505 cmn_err(CE_CONT
, "OS unconfigure %s\n",
506 up
->sbc_cm
.sbdev_path
);
510 * CPUs were offlined during Release.
512 if (cpu_is_poweredoff(cp
)) {
513 PR_CPU("%s: cpu %d already powered OFF\n",
518 if (!cpu_is_offline(cp
)) {
519 if (dr_cmd_flags(hp
) & SBD_FLAG_FORCE
)
520 cpu_flags
= CPU_FORCED
;
521 /* cpu was onlined after release. Offline it again */
522 PR_CPU("%s: offlining cpu %d\n", f
, up
->sbc_cpu_id
);
523 if (cpu_offline(cp
, cpu_flags
)) {
524 PR_CPU("%s: failed to offline cpu %d\n",
526 dr_dev_err(CE_WARN
, &up
->sbc_cm
, ESBD_OFFLINE
);
527 if (disp_bound_threads(cp
, 0)) {
528 cmn_err(CE_WARN
, "%s: thread(s) bound "
529 "to cpu %d", f
, cp
->cpu_id
);
534 if (cpu_poweroff(cp
) != 0) {
535 dr_dev_err(CE_WARN
, &up
->sbc_cm
, ESBD_CPUSTOP
);
538 PR_CPU("%s: cpu %d powered OFF\n", f
, up
->sbc_cpu_id
);
545 mutex_exit(&cpu_lock
);
546 dr_unlock_status(hp
->h_bd
);
552 dr_detach_cpu(dr_handle_t
*hp
, dr_common_unit_t
*cp
)
557 dr_cpu_unit_t
*up
= (dr_cpu_unit_t
*)cp
;
559 ASSERT(MUTEX_HELD(&cpu_lock
));
561 if (!DR_DEV_IS_ATTACHED(&up
->sbc_cm
)) {
565 err
= drmach_cpu_get_id(cp
->sbdev_id
, &cpuid
);
567 DRERR_SET_C(&cp
->sbdev_error
, &err
);
568 } else if ((rv
= cpu_unconfigure(cpuid
)) != 0) {
569 dr_dev_err(CE_IGNORE
, cp
, dr_errno2ecode(rv
));
571 err
= drmach_unconfigure(cp
->sbdev_id
, DEVI_BRANCH_DESTROY
);
573 DRERR_SET_C(&cp
->sbdev_error
, &err
);
582 dr_post_detach_cpu(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
584 static fn_t f
= "dr_post_detach_cpu";
586 PR_CPU("%s...\n", f
);
589 mutex_exit(&cpu_lock
);
590 dr_unlock_status(hp
->h_bd
);
596 dr_fill_cpu_stat(dr_cpu_unit_t
*cp
, drmach_status_t
*pstat
, sbd_cpu_stat_t
*csp
)
598 ASSERT(cp
&& pstat
&& csp
);
600 /* Fill in the common status information */
601 bzero((caddr_t
)csp
, sizeof (*csp
));
602 csp
->cs_type
= cp
->sbc_cm
.sbdev_type
;
603 csp
->cs_unit
= cp
->sbc_cm
.sbdev_unum
;
604 (void) strlcpy(csp
->cs_name
, pstat
->type
, sizeof (csp
->cs_name
));
605 csp
->cs_cond
= cp
->sbc_cm
.sbdev_cond
;
606 csp
->cs_busy
= cp
->sbc_cm
.sbdev_busy
| pstat
->busy
;
607 csp
->cs_time
= cp
->sbc_cm
.sbdev_time
;
608 csp
->cs_ostate
= cp
->sbc_cm
.sbdev_ostate
;
611 /* CPU specific status data */
612 csp
->cs_cpuid
= cp
->sbc_cpu_id
;
615 * If the speed and ecache properties have not been
616 * cached yet, read them in from the device tree.
618 if ((cp
->sbc_speed
== 0) || (cp
->sbc_ecache
== 0))
621 /* use the cached speed and ecache values */
622 csp
->cs_speed
= cp
->sbc_speed
;
623 csp
->cs_ecache
= cp
->sbc_ecache
;
625 mutex_enter(&cpu_lock
);
626 if (!cpu_get(csp
->cs_cpuid
)) {
627 /* ostate must be UNCONFIGURED */
628 csp
->cs_cm
.c_ostate
= SBD_STAT_UNCONFIGURED
;
630 mutex_exit(&cpu_lock
);
635 dr_fill_cmp_stat(sbd_cpu_stat_t
*csp
, int ncores
, int impl
, sbd_cmp_stat_t
*psp
)
639 ASSERT(csp
&& psp
&& (ncores
>= 1));
641 bzero((caddr_t
)psp
, sizeof (*psp
));
644 * Fill in the common status information based
645 * on the data for the first core.
647 psp
->ps_type
= SBD_COMP_CMP
;
648 psp
->ps_unit
= DR_UNUM2SBD_UNUM(csp
->cs_unit
, SBD_COMP_CMP
);
649 (void) strlcpy(psp
->ps_name
, csp
->cs_name
, sizeof (psp
->ps_name
));
650 psp
->ps_cond
= csp
->cs_cond
;
651 psp
->ps_busy
= csp
->cs_busy
;
652 psp
->ps_time
= csp
->cs_time
;
653 psp
->ps_ostate
= csp
->cs_ostate
;
654 psp
->ps_suspend
= csp
->cs_suspend
;
656 /* CMP specific status data */
657 *psp
->ps_cpuid
= csp
->cs_cpuid
;
659 psp
->ps_speed
= csp
->cs_speed
;
660 psp
->ps_ecache
= csp
->cs_ecache
;
663 * Walk through the data for the remaining cores.
664 * Make any adjustments to the common status data,
665 * or the shared CMP specific data if necessary.
667 for (core
= 1; core
< ncores
; core
++) {
669 * The following properties should be the same
670 * for all the cores of the CMP.
672 ASSERT(psp
->ps_unit
== DR_UNUM2SBD_UNUM(csp
[core
].cs_unit
,
675 if (csp
[core
].cs_speed
> psp
->ps_speed
)
676 psp
->ps_speed
= csp
[core
].cs_speed
;
677 if (csp
[core
].cs_ecache
> psp
->ps_ecache
)
678 psp
->ps_ecache
= csp
[core
].cs_ecache
;
680 psp
->ps_cpuid
[core
] = csp
[core
].cs_cpuid
;
683 /* adjust time if necessary */
684 if (csp
[core
].cs_time
> psp
->ps_time
) {
685 psp
->ps_time
= csp
[core
].cs_time
;
688 psp
->ps_busy
|= csp
[core
].cs_busy
;
691 * If any of the cores are configured, the
692 * entire CMP is marked as configured.
694 if (csp
[core
].cs_ostate
== SBD_STAT_CONFIGURED
) {
695 psp
->ps_ostate
= csp
[core
].cs_ostate
;
701 dr_cpu_status(dr_handle_t
*hp
, dr_devset_t devset
, sbd_dev_stat_t
*dsp
)
707 sbd_cpu_stat_t
*cstat
;
713 devset
&= DR_DEVS_PRESENT(bp
);
714 cstat
= kmem_zalloc(sizeof (sbd_cpu_stat_t
) * MAX_CORES_PER_CMP
,
718 * Treat every CPU as a CMP. In the case where the
719 * device is not a CMP, treat it as a CMP with only
722 for (cmp
= 0; cmp
< MAX_CMP_UNITS_PER_BOARD
; cmp
++) {
725 drmach_status_t pstat
;
729 if ((devset
& DEVSET(SBD_COMP_CMP
, cmp
)) == 0) {
735 for (core
= 0; core
< MAX_CORES_PER_CMP
; core
++) {
737 cp
= dr_get_cpu_unit(bp
, DR_CMP_CORE_UNUM(cmp
, core
));
739 if (cp
->sbc_cm
.sbdev_state
== DR_STATE_EMPTY
) {
740 /* present, but not fully initialized */
744 ASSERT(dr_cpu_unit_is_sane(hp
->h_bd
, cp
));
746 /* skip if not present */
747 if (cp
->sbc_cm
.sbdev_id
== (drmachid_t
)0) {
751 /* fetch platform status */
752 err
= drmach_status(cp
->sbc_cm
.sbdev_id
, &pstat
);
754 DRERR_SET_C(&cp
->sbc_cm
.sbdev_error
, &err
);
758 dr_fill_cpu_stat(cp
, &pstat
, &cstat
[ncores
++]);
760 * We should set impl here because the last core
761 * found might be EMPTY or not present.
763 impl
= cp
->sbc_cpu_impl
;
771 * Store the data to the outgoing array. If the
772 * device is a CMP, combine all the data for the
773 * cores into a single stat structure.
775 * The check for a CMP device uses the last core
776 * found, assuming that all cores will have the
777 * same implementation.
779 if (CPU_IMPL_IS_CMP(impl
)) {
780 psp
= (sbd_cmp_stat_t
*)dsp
;
781 dr_fill_cmp_stat(cstat
, ncores
, impl
, psp
);
784 bcopy(cstat
, dsp
, sizeof (sbd_cpu_stat_t
));
791 kmem_free(cstat
, sizeof (sbd_cpu_stat_t
) * MAX_CORES_PER_CMP
);
797 * Cancel previous release operation for cpu.
798 * For cpus this means simply bringing cpus that
799 * were offline back online. Note that they had
800 * to have been online at the time there were
804 dr_cancel_cpu(dr_cpu_unit_t
*up
)
807 static fn_t f
= "dr_cancel_cpu";
809 ASSERT(dr_cpu_unit_is_sane(up
->sbc_cm
.sbdev_bp
, up
));
811 if (cpu_flagged_active(up
->sbc_cpu_flags
)) {
815 * CPU had been online, go ahead
816 * bring it back online.
818 PR_CPU("%s: bringing cpu %d back ONLINE\n", f
, up
->sbc_cpu_id
);
820 mutex_enter(&cpu_lock
);
821 cp
= cpu
[up
->sbc_cpu_id
];
823 if (cpu_is_poweredoff(cp
)) {
824 if (cpu_poweron(cp
)) {
825 cmn_err(CE_WARN
, "%s: failed to power-on "
826 "cpu %d", f
, up
->sbc_cpu_id
);
831 if (rv
== 0 && cpu_is_offline(cp
)) {
832 if (cpu_online(cp
)) {
833 cmn_err(CE_WARN
, "%s: failed to online cpu %d",
839 if (rv
== 0 && cpu_is_online(cp
)) {
840 if (cpu_flagged_nointr(up
->sbc_cpu_flags
)) {
841 if (cpu_intr_disable(cp
) != 0) {
842 cmn_err(CE_WARN
, "%s: failed to "
843 "disable interrupts on cpu %d", f
,
849 mutex_exit(&cpu_lock
);
856 dr_disconnect_cpu(dr_cpu_unit_t
*up
)
859 static fn_t f
= "dr_disconnect_cpu";
861 PR_CPU("%s...\n", f
);
863 ASSERT((up
->sbc_cm
.sbdev_state
== DR_STATE_CONNECTED
) ||
864 (up
->sbc_cm
.sbdev_state
== DR_STATE_UNCONFIGURED
));
866 ASSERT(dr_cpu_unit_is_sane(up
->sbc_cm
.sbdev_bp
, up
));
868 if (up
->sbc_cm
.sbdev_state
== DR_STATE_CONNECTED
) {
870 * Cpus were never brought in and so are still
871 * effectively disconnected, so nothing to do here.
873 PR_CPU("%s: cpu %d never brought in\n", f
, up
->sbc_cpu_id
);
877 err
= drmach_cpu_disconnect(up
->sbc_cm
.sbdev_id
);
881 DRERR_SET_C(&up
->sbc_cm
.sbdev_error
, &err
);