dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / uts / i86pc / io / dr / dr_cpu.c
bloba8933109779d8823c829711dae631c72fc40a802
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 * Copyright (c) 2010, Intel Corporation.
28 * All rights reserved.
32 * CPU support routines for DR
35 #include <sys/note.h>
36 #include <sys/debug.h>
37 #include <sys/types.h>
38 #include <sys/errno.h>
39 #include <sys/dditypes.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/sunndi.h>
43 #include <sys/ndi_impldefs.h>
44 #include <sys/kmem.h>
45 #include <sys/processor.h>
46 #include <sys/cpuvar.h>
47 #include <sys/promif.h>
48 #include <sys/sysmacros.h>
49 #include <sys/archsystm.h>
50 #include <sys/machsystm.h>
51 #include <sys/cpu_module.h>
52 #include <sys/cmn_err.h>
54 #include <sys/dr.h>
55 #include <sys/dr_util.h>
57 /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */
58 static char *dr_ie_fmt = "dr_cpu.c %d";
60 int
61 dr_cpu_unit_is_sane(dr_board_t *bp, dr_cpu_unit_t *cp)
63 #ifdef DEBUG
64 ASSERT(cp->sbc_cm.sbdev_bp == bp);
65 ASSERT(cp->sbc_cm.sbdev_type == SBD_COMP_CPU);
66 #else
67 _NOTE(ARGUNUSED(bp))
68 _NOTE(ARGUNUSED(cp))
69 #endif
71 return (1);
74 static int
75 dr_errno2ecode(int error)
77 int rv;
79 switch (error) {
80 case EBUSY:
81 rv = ESBD_BUSY;
82 break;
83 case EINVAL:
84 rv = ESBD_INVAL;
85 break;
86 case EALREADY:
87 rv = ESBD_ALREADY;
88 break;
89 case ENODEV:
90 rv = ESBD_NODEV;
91 break;
92 case ENOMEM:
93 rv = ESBD_NOMEM;
94 break;
95 default:
96 rv = ESBD_INVAL;
99 return (rv);
103 * On x86, the "clock-frequency" and cache size device properties may be
104 * unavailable before CPU starts. If they are unavailabe, just set them to zero.
106 static void
107 dr_cpu_set_prop(dr_cpu_unit_t *cp)
109 sbd_error_t *err;
110 dev_info_t *dip;
111 uint64_t clock_freq;
112 int ecache_size = 0;
113 char *cache_str = NULL;
115 err = drmach_get_dip(cp->sbc_cm.sbdev_id, &dip);
116 if (err) {
117 DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
118 return;
121 if (dip == NULL) {
122 DR_DEV_INTERNAL_ERROR(&cp->sbc_cm);
123 return;
126 /* read in the CPU speed */
127 clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
128 DDI_PROP_DONTPASS, "clock-frequency", 0);
131 * The ecache property string is not the same
132 * for all CPU implementations.
134 switch (cp->sbc_cpu_impl) {
135 case X86_CPU_IMPL_NEHALEM_EX:
136 cache_str = "l3-cache-size";
137 break;
138 default:
139 cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
140 cp->sbc_cpu_impl);
141 break;
144 if (cache_str != NULL) {
145 /* read in the ecache size */
147 * If the property is not found in the CPU node,
148 * it has to be kept in the core or cmp node so
149 * we just keep looking.
152 ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
153 cache_str, 0);
156 /* convert to the proper units */
157 cp->sbc_speed = (clock_freq + 500000) / 1000000;
158 cp->sbc_ecache = ecache_size / (1024 * 1024);
161 void
162 dr_init_cpu_unit(dr_cpu_unit_t *cp)
164 sbd_error_t *err;
165 dr_state_t new_state;
166 int cpuid;
167 int impl;
169 if (DR_DEV_IS_ATTACHED(&cp->sbc_cm)) {
170 new_state = DR_STATE_CONFIGURED;
171 cp->sbc_cm.sbdev_cond = SBD_COND_OK;
172 } else if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
173 new_state = DR_STATE_CONNECTED;
174 cp->sbc_cm.sbdev_cond = SBD_COND_OK;
175 } else {
176 new_state = DR_STATE_EMPTY;
177 cp->sbc_cm.sbdev_cond = SBD_COND_UNKNOWN;
180 if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
181 err = drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid);
182 if (err) {
183 DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
184 new_state = DR_STATE_FATAL;
185 goto done;
188 err = drmach_cpu_get_impl(cp->sbc_cm.sbdev_id, &impl);
189 if (err) {
190 DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
191 new_state = DR_STATE_FATAL;
192 goto done;
194 } else {
195 cp->sbc_cpu_id = -1;
196 cp->sbc_cpu_impl = -1;
197 goto done;
200 cp->sbc_cpu_id = cpuid;
201 cp->sbc_cpu_impl = impl;
203 /* if true at init time, it must always be true */
204 ASSERT(dr_cpu_unit_is_sane(cp->sbc_cm.sbdev_bp, cp));
206 mutex_enter(&cpu_lock);
207 if ((cpuid >= 0) && cpu[cpuid])
208 cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
209 else
210 cp->sbc_cpu_flags = P_OFFLINE | P_POWEROFF;
211 mutex_exit(&cpu_lock);
213 dr_cpu_set_prop(cp);
215 done:
216 /* delay transition until fully initialized */
217 dr_device_transition(&cp->sbc_cm, new_state);
221 dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
223 int i;
224 static fn_t f = "dr_pre_attach_cpu";
226 PR_CPU("%s...\n", f);
228 for (i = 0; i < devnum; i++) {
229 dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
231 ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
234 * Print a console message for each attachment
235 * point. For CMP devices, this means that only
236 * one message should be printed, no matter how
237 * many cores are actually present.
239 if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
240 cmn_err(CE_CONT, "OS configure %s",
241 up->sbc_cm.sbdev_path);
246 * Block out status threads while creating
247 * devinfo tree branches
249 dr_lock_status(hp->h_bd);
250 ndi_devi_enter(ddi_root_node(), (int *)(&hp->h_ndi));
251 mutex_enter(&cpu_lock);
253 return (0);
256 /*ARGSUSED*/
257 void
258 dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
260 sbd_error_t *err;
261 processorid_t cpuid;
262 int rv;
264 ASSERT(MUTEX_HELD(&cpu_lock));
266 err = drmach_configure(cp->sbdev_id, 0);
267 if (err) {
268 DRERR_SET_C(&cp->sbdev_error, &err);
269 return;
272 err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
273 if (err) {
274 DRERR_SET_C(&cp->sbdev_error, &err);
276 err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
277 if (err)
278 sbd_err_clear(&err);
279 } else if ((rv = cpu_configure(cpuid)) != 0) {
280 dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
281 err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
282 if (err)
283 sbd_err_clear(&err);
284 } else {
285 dr_cpu_unit_t *up = (dr_cpu_unit_t *)cp;
286 up->sbc_cpu_id = cpuid;
291 * dr_post_attach_cpu
293 * sbd error policy: Does not stop on error. Processes all units in list.
296 dr_post_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
298 int i;
299 int errflag = 0;
300 static fn_t f = "dr_post_attach_cpu";
302 PR_CPU("%s...\n", f);
304 /* Startup and online newly-attached CPUs */
305 for (i = 0; i < devnum; i++) {
306 dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
307 struct cpu *cp;
309 ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
311 cp = cpu_get(up->sbc_cpu_id);
312 if (cp == NULL) {
313 cmn_err(CE_WARN, "%s: cpu_get failed for cpu %d",
314 f, up->sbc_cpu_id);
315 continue;
318 if (cpu_is_poweredoff(cp)) {
319 if (cpu_poweron(cp) != 0) {
320 dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTART);
321 errflag = 1;
323 PR_CPU("%s: cpu %d powered ON\n", f, up->sbc_cpu_id);
326 if (cpu_is_offline(cp)) {
327 PR_CPU("%s: onlining cpu %d...\n", f, up->sbc_cpu_id);
329 if (cpu_online(cp) != 0) {
330 dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_ONLINE);
331 errflag = 1;
337 mutex_exit(&cpu_lock);
338 ndi_devi_exit(ddi_root_node(), hp->h_ndi);
339 dr_unlock_status(hp->h_bd);
341 if (errflag)
342 return (-1);
343 else
344 return (0);
348 * dr_pre_release_cpu
350 * sbd error policy: Stops on first error.
353 dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
355 int c, cix, i, lastoffline = -1, rv = 0;
356 processorid_t cpuid;
357 struct cpu *cp;
358 dr_cpu_unit_t *up;
359 dr_devset_t devset;
360 sbd_dev_stat_t *ds;
361 static fn_t f = "dr_pre_release_cpu";
362 int cpu_flags = 0;
364 devset = DR_DEVS_PRESENT(hp->h_bd);
366 /* allocate status struct storage. */
367 ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
368 MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
370 cix = dr_cpu_status(hp, devset, ds);
372 mutex_enter(&cpu_lock);
374 for (i = 0; i < devnum; i++) {
375 up = (dr_cpu_unit_t *)devlist[i];
376 if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
377 continue;
379 ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
382 * On x86 systems, some CPUs can't be unconfigured.
383 * For example, CPU0 can't be unconfigured because many other
384 * components have a dependency on it.
385 * This check determines if a CPU is currently in use and
386 * returns a "Device busy" error if so.
388 for (c = 0; c < cix; c++) {
389 if (ds[c].d_cpu.cs_unit == up->sbc_cm.sbdev_unum) {
390 if (ds[c].d_cpu.cs_busy) {
391 dr_dev_err(CE_WARN, &up->sbc_cm,
392 ESBD_BUSY);
393 rv = -1;
394 break;
398 if (c < cix)
399 break;
401 cpuid = up->sbc_cpu_id;
402 if ((cp = cpu_get(cpuid)) == NULL) {
403 dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
404 rv = -1;
405 break;
408 /* used by dr_cancel_cpu during error flow */
409 up->sbc_cpu_flags = cp->cpu_flags;
411 if (CPU_ACTIVE(cp)) {
412 if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
413 cpu_flags = CPU_FORCED;
415 PR_CPU("%s: offlining cpu %d\n", f, cpuid);
416 if (cpu_offline(cp, cpu_flags)) {
417 PR_CPU("%s: failed to offline cpu %d\n", f,
418 cpuid);
419 dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
420 if (disp_bound_threads(cp, 0)) {
421 cmn_err(CE_WARN, "%s: thread(s) bound "
422 "to cpu %d", f, cp->cpu_id);
424 rv = -1;
425 break;
426 } else
427 lastoffline = i;
430 if (!rv) {
431 sbd_error_t *err;
433 err = drmach_release(up->sbc_cm.sbdev_id);
434 if (err) {
435 DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
436 rv = -1;
437 break;
442 mutex_exit(&cpu_lock);
444 if (rv) {
446 * Need to unwind others since at this level (pre-release)
447 * the device state has not yet transitioned and failures
448 * will prevent us from reaching the "post" release
449 * function where states are normally transitioned.
451 for (i = lastoffline; i >= 0; i--) {
452 up = (dr_cpu_unit_t *)devlist[i];
453 (void) dr_cancel_cpu(up);
457 kmem_free(ds, sizeof (sbd_dev_stat_t) * MAX_CPU_UNITS_PER_BOARD);
458 return (rv);
462 * dr_pre_detach_cpu
464 * sbd error policy: Stops on first error.
467 dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
469 _NOTE(ARGUNUSED(hp))
471 int i;
472 int cpu_flags = 0;
473 static fn_t f = "dr_pre_detach_cpu";
475 PR_CPU("%s...\n", f);
478 * Block out status threads while destroying devinfo tree
479 * branches
481 dr_lock_status(hp->h_bd);
482 mutex_enter(&cpu_lock);
484 for (i = 0; i < devnum; i++) {
485 dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
486 struct cpu *cp;
488 if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
489 continue;
492 ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
494 cp = cpu_get(up->sbc_cpu_id);
495 if (cp == NULL)
496 continue;
499 * Print a console message for each attachment
500 * point. For CMP devices, this means that only
501 * one message should be printed, no matter how
502 * many cores are actually present.
504 if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
505 cmn_err(CE_CONT, "OS unconfigure %s\n",
506 up->sbc_cm.sbdev_path);
510 * CPUs were offlined during Release.
512 if (cpu_is_poweredoff(cp)) {
513 PR_CPU("%s: cpu %d already powered OFF\n",
514 f, up->sbc_cpu_id);
515 continue;
518 if (!cpu_is_offline(cp)) {
519 if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
520 cpu_flags = CPU_FORCED;
521 /* cpu was onlined after release. Offline it again */
522 PR_CPU("%s: offlining cpu %d\n", f, up->sbc_cpu_id);
523 if (cpu_offline(cp, cpu_flags)) {
524 PR_CPU("%s: failed to offline cpu %d\n",
525 f, up->sbc_cpu_id);
526 dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
527 if (disp_bound_threads(cp, 0)) {
528 cmn_err(CE_WARN, "%s: thread(s) bound "
529 "to cpu %d", f, cp->cpu_id);
531 goto err;
534 if (cpu_poweroff(cp) != 0) {
535 dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTOP);
536 goto err;
537 } else {
538 PR_CPU("%s: cpu %d powered OFF\n", f, up->sbc_cpu_id);
542 return (0);
544 err:
545 mutex_exit(&cpu_lock);
546 dr_unlock_status(hp->h_bd);
547 return (-1);
550 /*ARGSUSED*/
551 void
552 dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
554 sbd_error_t *err;
555 processorid_t cpuid;
556 int rv;
557 dr_cpu_unit_t *up = (dr_cpu_unit_t *)cp;
559 ASSERT(MUTEX_HELD(&cpu_lock));
561 if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
562 return;
565 err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
566 if (err) {
567 DRERR_SET_C(&cp->sbdev_error, &err);
568 } else if ((rv = cpu_unconfigure(cpuid)) != 0) {
569 dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
570 } else {
571 err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
572 if (err) {
573 DRERR_SET_C(&cp->sbdev_error, &err);
574 } else {
575 up->sbc_cpu_id = -1;
580 /*ARGSUSED1*/
582 dr_post_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
584 static fn_t f = "dr_post_detach_cpu";
586 PR_CPU("%s...\n", f);
587 hp->h_ndi = 0;
589 mutex_exit(&cpu_lock);
590 dr_unlock_status(hp->h_bd);
592 return (0);
595 static void
596 dr_fill_cpu_stat(dr_cpu_unit_t *cp, drmach_status_t *pstat, sbd_cpu_stat_t *csp)
598 ASSERT(cp && pstat && csp);
600 /* Fill in the common status information */
601 bzero((caddr_t)csp, sizeof (*csp));
602 csp->cs_type = cp->sbc_cm.sbdev_type;
603 csp->cs_unit = cp->sbc_cm.sbdev_unum;
604 (void) strlcpy(csp->cs_name, pstat->type, sizeof (csp->cs_name));
605 csp->cs_cond = cp->sbc_cm.sbdev_cond;
606 csp->cs_busy = cp->sbc_cm.sbdev_busy | pstat->busy;
607 csp->cs_time = cp->sbc_cm.sbdev_time;
608 csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
609 csp->cs_suspend = 0;
611 /* CPU specific status data */
612 csp->cs_cpuid = cp->sbc_cpu_id;
615 * If the speed and ecache properties have not been
616 * cached yet, read them in from the device tree.
618 if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
619 dr_cpu_set_prop(cp);
621 /* use the cached speed and ecache values */
622 csp->cs_speed = cp->sbc_speed;
623 csp->cs_ecache = cp->sbc_ecache;
625 mutex_enter(&cpu_lock);
626 if (!cpu_get(csp->cs_cpuid)) {
627 /* ostate must be UNCONFIGURED */
628 csp->cs_cm.c_ostate = SBD_STAT_UNCONFIGURED;
630 mutex_exit(&cpu_lock);
633 /*ARGSUSED2*/
634 static void
635 dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
637 int core;
639 ASSERT(csp && psp && (ncores >= 1));
641 bzero((caddr_t)psp, sizeof (*psp));
644 * Fill in the common status information based
645 * on the data for the first core.
647 psp->ps_type = SBD_COMP_CMP;
648 psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit, SBD_COMP_CMP);
649 (void) strlcpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
650 psp->ps_cond = csp->cs_cond;
651 psp->ps_busy = csp->cs_busy;
652 psp->ps_time = csp->cs_time;
653 psp->ps_ostate = csp->cs_ostate;
654 psp->ps_suspend = csp->cs_suspend;
656 /* CMP specific status data */
657 *psp->ps_cpuid = csp->cs_cpuid;
658 psp->ps_ncores = 1;
659 psp->ps_speed = csp->cs_speed;
660 psp->ps_ecache = csp->cs_ecache;
663 * Walk through the data for the remaining cores.
664 * Make any adjustments to the common status data,
665 * or the shared CMP specific data if necessary.
667 for (core = 1; core < ncores; core++) {
669 * The following properties should be the same
670 * for all the cores of the CMP.
672 ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit,
673 SBD_COMP_CMP));
675 if (csp[core].cs_speed > psp->ps_speed)
676 psp->ps_speed = csp[core].cs_speed;
677 if (csp[core].cs_ecache > psp->ps_ecache)
678 psp->ps_ecache = csp[core].cs_ecache;
680 psp->ps_cpuid[core] = csp[core].cs_cpuid;
681 psp->ps_ncores++;
683 /* adjust time if necessary */
684 if (csp[core].cs_time > psp->ps_time) {
685 psp->ps_time = csp[core].cs_time;
688 psp->ps_busy |= csp[core].cs_busy;
691 * If any of the cores are configured, the
692 * entire CMP is marked as configured.
694 if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
695 psp->ps_ostate = csp[core].cs_ostate;
701 dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
703 int cmp;
704 int core;
705 int ncpu;
706 dr_board_t *bp;
707 sbd_cpu_stat_t *cstat;
708 int impl;
710 bp = hp->h_bd;
711 ncpu = 0;
713 devset &= DR_DEVS_PRESENT(bp);
714 cstat = kmem_zalloc(sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP,
715 KM_SLEEP);
718 * Treat every CPU as a CMP. In the case where the
719 * device is not a CMP, treat it as a CMP with only
720 * one core.
722 for (cmp = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
723 int ncores;
724 dr_cpu_unit_t *cp;
725 drmach_status_t pstat;
726 sbd_error_t *err;
727 sbd_cmp_stat_t *psp;
729 if ((devset & DEVSET(SBD_COMP_CMP, cmp)) == 0) {
730 continue;
733 ncores = 0;
735 for (core = 0; core < MAX_CORES_PER_CMP; core++) {
737 cp = dr_get_cpu_unit(bp, DR_CMP_CORE_UNUM(cmp, core));
739 if (cp->sbc_cm.sbdev_state == DR_STATE_EMPTY) {
740 /* present, but not fully initialized */
741 continue;
744 ASSERT(dr_cpu_unit_is_sane(hp->h_bd, cp));
746 /* skip if not present */
747 if (cp->sbc_cm.sbdev_id == (drmachid_t)0) {
748 continue;
751 /* fetch platform status */
752 err = drmach_status(cp->sbc_cm.sbdev_id, &pstat);
753 if (err) {
754 DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
755 continue;
758 dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
760 * We should set impl here because the last core
761 * found might be EMPTY or not present.
763 impl = cp->sbc_cpu_impl;
766 if (ncores == 0) {
767 continue;
771 * Store the data to the outgoing array. If the
772 * device is a CMP, combine all the data for the
773 * cores into a single stat structure.
775 * The check for a CMP device uses the last core
776 * found, assuming that all cores will have the
777 * same implementation.
779 if (CPU_IMPL_IS_CMP(impl)) {
780 psp = (sbd_cmp_stat_t *)dsp;
781 dr_fill_cmp_stat(cstat, ncores, impl, psp);
782 } else {
783 ASSERT(ncores == 1);
784 bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
787 dsp++;
788 ncpu++;
791 kmem_free(cstat, sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP);
793 return (ncpu);
797 * Cancel previous release operation for cpu.
798 * For cpus this means simply bringing cpus that
799 * were offline back online. Note that they had
800 * to have been online at the time there were
801 * released.
804 dr_cancel_cpu(dr_cpu_unit_t *up)
806 int rv = 0;
807 static fn_t f = "dr_cancel_cpu";
809 ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
811 if (cpu_flagged_active(up->sbc_cpu_flags)) {
812 struct cpu *cp;
815 * CPU had been online, go ahead
816 * bring it back online.
818 PR_CPU("%s: bringing cpu %d back ONLINE\n", f, up->sbc_cpu_id);
820 mutex_enter(&cpu_lock);
821 cp = cpu[up->sbc_cpu_id];
823 if (cpu_is_poweredoff(cp)) {
824 if (cpu_poweron(cp)) {
825 cmn_err(CE_WARN, "%s: failed to power-on "
826 "cpu %d", f, up->sbc_cpu_id);
827 rv = -1;
831 if (rv == 0 && cpu_is_offline(cp)) {
832 if (cpu_online(cp)) {
833 cmn_err(CE_WARN, "%s: failed to online cpu %d",
834 f, up->sbc_cpu_id);
835 rv = -1;
839 if (rv == 0 && cpu_is_online(cp)) {
840 if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
841 if (cpu_intr_disable(cp) != 0) {
842 cmn_err(CE_WARN, "%s: failed to "
843 "disable interrupts on cpu %d", f,
844 up->sbc_cpu_id);
849 mutex_exit(&cpu_lock);
852 return (rv);
856 dr_disconnect_cpu(dr_cpu_unit_t *up)
858 sbd_error_t *err;
859 static fn_t f = "dr_disconnect_cpu";
861 PR_CPU("%s...\n", f);
863 ASSERT((up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) ||
864 (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED));
866 ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
868 if (up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) {
870 * Cpus were never brought in and so are still
871 * effectively disconnected, so nothing to do here.
873 PR_CPU("%s: cpu %d never brought in\n", f, up->sbc_cpu_id);
874 return (0);
877 err = drmach_cpu_disconnect(up->sbc_cm.sbdev_id);
878 if (err == NULL)
879 return (0);
880 else {
881 DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
882 return (-1);
884 /*NOTREACHED*/