4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
27 * A CPR derivative specifically for starfire/starcat
30 #include <sys/types.h>
31 #include <sys/systm.h>
32 #include <sys/machparam.h>
33 #include <sys/machsystm.h>
36 #include <sys/sunddi.h>
37 #include <sys/sunndi.h>
38 #include <sys/devctl.h>
42 #include <sys/ddi_impldefs.h>
43 #include <sys/ndi_impldefs.h>
44 #include <sys/obpdefs.h>
45 #include <sys/cmn_err.h>
46 #include <sys/debug.h>
47 #include <sys/errno.h>
48 #include <sys/callb.h>
49 #include <sys/clock.h>
50 #include <sys/x_call.h>
51 #include <sys/cpuvar.h>
55 #include <sys/cpu_sgnblk_defs.h>
57 #include <sys/dr_util.h>
59 #include <sys/promif.h>
61 #include <sys/cyclic.h>
63 extern void e_ddi_enter_driver_list(struct devnames
*dnp
, int *listcnt
);
64 extern void e_ddi_exit_driver_list(struct devnames
*dnp
, int listcnt
);
65 extern int is_pseudo_device(dev_info_t
*dip
);
67 extern kmutex_t cpu_lock
;
68 extern dr_unsafe_devs_t dr_unsafe_devs
;
70 static int dr_is_real_device(dev_info_t
*dip
);
71 static int dr_is_unsafe_major(major_t major
);
72 static int dr_bypass_device(char *dname
);
73 static int dr_check_dip(dev_info_t
*dip
, void *arg
, uint_t ref
);
74 static int dr_resolve_devname(dev_info_t
*dip
, char *buffer
,
76 static sbd_error_t
*drerr_int(int e_code
, uint64_t *arr
, int idx
,
78 static int dr_add_int(uint64_t *arr
, int idx
, int len
,
81 int dr_pt_test_suspend(dr_handle_t
*hp
);
84 * dr_quiesce.c interface
85 * NOTE: states used internally by dr_suspend and dr_resume
87 typedef enum dr_suspend_state
{
95 dr_handle_t
*sr_dr_handlep
;
96 dev_info_t
*sr_failed_dip
;
97 suspend_state_t sr_suspend_state
;
99 uint64_t sr_err_ints
[DR_MAX_ERR_INT
];
103 #define SR_FLAG_WATCHDOG 0x1
107 * This hack will go away before RTI. Just for testing.
108 * List of drivers to bypass when performing a suspend.
110 static char *dr_bypass_list
[] = {
115 #define SKIP_SYNC /* bypass sync ops in dr_suspend */
118 * dr_skip_user_threads is used to control if user threads should
119 * be suspended. If dr_skip_user_threads is true, the rest of the
120 * flags are not used; if it is false, dr_check_user_stop_result
121 * will be used to control whether or not we need to check suspend
122 * result, and dr_allow_blocked_threads will be used to control
123 * whether or not we allow suspend to continue if there are blocked
124 * threads. We allow all combinations of dr_check_user_stop_result
125 * and dr_allow_block_threads, even though it might not make much
126 * sense to not allow block threads when we don't even check stop
129 static int dr_skip_user_threads
= 0; /* default to FALSE */
130 static int dr_check_user_stop_result
= 1; /* default to TRUE */
131 static int dr_allow_blocked_threads
= 1; /* default to TRUE */
133 #define DR_CPU_LOOP_MSEC 1000
138 ASSERT(MUTEX_HELD(&cpu_lock
));
147 ASSERT(MUTEX_HELD(&cpu_lock
));
154 dr_get_sr_handle(dr_handle_t
*hp
)
158 srh
= GETSTRUCT(dr_sr_handle_t
, 1);
159 srh
->sr_dr_handlep
= hp
;
165 dr_release_sr_handle(dr_sr_handle_t
*srh
)
167 ASSERT(srh
->sr_failed_dip
== NULL
);
168 FREESTRUCT(srh
, dr_sr_handle_t
, 1);
172 dr_is_real_device(dev_info_t
*dip
)
174 struct regspec
*regbuf
= NULL
;
178 if (ddi_get_driver(dip
) == NULL
)
181 if (DEVI(dip
)->devi_pm_flags
& (PMC_NEEDS_SR
|PMC_PARENTAL_SR
))
183 if (DEVI(dip
)->devi_pm_flags
& PMC_NO_SR
)
187 * now the general case
189 rc
= ddi_getlongprop(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
, "reg",
190 (caddr_t
)®buf
, &length
);
191 ASSERT(rc
!= DDI_PROP_NO_MEMORY
);
192 if (rc
!= DDI_PROP_SUCCESS
) {
195 if ((length
> 0) && (regbuf
!= NULL
))
196 kmem_free(regbuf
, length
);
202 dr_is_unsafe_major(major_t major
)
207 if ((dname
= ddi_major_to_name(major
)) == NULL
) {
208 PR_QR("dr_is_unsafe_major: invalid major # %d\n", major
);
212 ndevs
= dr_unsafe_devs
.ndevs
;
213 for (i
= 0, cpp
= dr_unsafe_devs
.devnames
; i
< ndevs
; i
++) {
214 if (strcmp(dname
, *cpp
++) == 0)
221 dr_bypass_device(char *dname
)
229 /* check the bypass list */
230 for (i
= 0, lname
= &dr_bypass_list
[i
]; **lname
!= '\0'; lname
++) {
231 if (strcmp(dname
, dr_bypass_list
[i
++]) == 0)
238 dr_resolve_devname(dev_info_t
*dip
, char *buffer
, char *alias
)
243 *buffer
= *alias
= 0;
248 if ((name
= ddi_get_name(dip
)) == NULL
)
249 name
= "<null name>";
253 if ((devmajor
= ddi_name_to_major(aka
)) != -1)
254 aka
= ddi_major_to_name(devmajor
);
256 (void) strcpy(buffer
, name
);
258 if (strcmp(name
, aka
))
259 (void) strcpy(alias
, aka
);
268 int *refcount_non_gldv3
;
276 dr_check_dip(dev_info_t
*dip
, void *arg
, uint_t ref
)
280 struct dr_ref
*rp
= (struct dr_ref
*)arg
;
283 return (DDI_WALK_CONTINUE
);
285 if (!dr_is_real_device(dip
))
286 return (DDI_WALK_CONTINUE
);
288 dname
= ddi_binding_name(dip
);
290 if (dr_bypass_device(dname
))
291 return (DDI_WALK_CONTINUE
);
293 if (dname
&& ((major
= ddi_name_to_major(dname
)) != (major_t
)-1)) {
294 if (ref
&& rp
->refcount
) {
295 *rp
->refcount
+= ref
;
296 PR_QR("\n %s (major# %d) is referenced(%u)\n", dname
,
299 if (ref
&& rp
->refcount_non_gldv3
) {
300 if (NETWORK_PHYSDRV(major
) && !GLDV3_DRV(major
))
301 *rp
->refcount_non_gldv3
+= ref
;
303 if (dr_is_unsafe_major(major
) && i_ddi_devi_attached(dip
)) {
304 PR_QR("\n %s (major# %d) not hotpluggable\n", dname
,
306 if (rp
->arr
!= NULL
&& rp
->idx
!= NULL
)
307 *rp
->idx
= dr_add_int(rp
->arr
, *rp
->idx
,
308 rp
->len
, (uint64_t)major
);
311 return (DDI_WALK_CONTINUE
);
315 dr_check_unsafe_major(dev_info_t
*dip
, void *arg
)
317 return (dr_check_dip(dip
, arg
, 0));
323 dr_check_devices(dev_info_t
*dip
, int *refcount
, dr_handle_t
*handle
,
324 uint64_t *arr
, int *idx
, int len
, int *refcount_non_gldv3
)
326 struct dr_ref bref
= {0};
331 bref
.refcount
= refcount
;
332 bref
.refcount_non_gldv3
= refcount_non_gldv3
;
337 ASSERT(e_ddi_branch_held(dip
));
338 (void) e_ddi_branch_referenced(dip
, dr_check_dip
, &bref
);
342 * The "dip" argument's parent (if it exists) must be held busy.
345 dr_suspend_devices(dev_info_t
*dip
, dr_sr_handle_t
*srh
)
353 * If dip is the root node, it has no siblings and it is
354 * always held. If dip is not the root node, dr_suspend_devices()
355 * will be invoked with the parent held busy.
357 for (; dip
!= NULL
; dip
= ddi_get_next_sibling(dip
)) {
358 char d_name
[40], d_alias
[40], *d_info
;
360 ndi_devi_enter(dip
, &circ
);
361 if (dr_suspend_devices(ddi_get_child(dip
), srh
)) {
362 ndi_devi_exit(dip
, circ
);
365 ndi_devi_exit(dip
, circ
);
367 if (!dr_is_real_device(dip
))
371 if ((dname
= ddi_binding_name(dip
)) != NULL
)
372 major
= ddi_name_to_major(dname
);
374 if (dr_bypass_device(dname
)) {
375 PR_QR(" bypassed suspend of %s (major# %d)\n", dname
,
380 if (drmach_verify_sr(dip
, 1)) {
381 PR_QR(" bypassed suspend of %s (major# %d)\n", dname
,
386 if ((d_info
= ddi_get_name_addr(dip
)) == NULL
)
390 if (dr_resolve_devname(dip
, d_name
, d_alias
) == 0) {
391 if (d_alias
[0] != 0) {
392 prom_printf("\tsuspending %s@%s (aka %s)\n",
393 d_name
, d_info
, d_alias
);
395 prom_printf("\tsuspending %s@%s\n", d_name
,
399 prom_printf("\tsuspending %s@%s\n", dname
, d_info
);
402 if (devi_detach(dip
, DDI_SUSPEND
) != DDI_SUCCESS
) {
403 prom_printf("\tFAILED to suspend %s@%s\n",
404 d_name
[0] ? d_name
: dname
, d_info
);
406 srh
->sr_err_idx
= dr_add_int(srh
->sr_err_ints
,
407 srh
->sr_err_idx
, DR_MAX_ERR_INT
, (uint64_t)major
);
410 srh
->sr_failed_dip
= dip
;
412 handle
= srh
->sr_dr_handlep
;
413 dr_op_err(CE_IGNORE
, handle
, ESBD_SUSPEND
, "%s@%s",
414 d_name
[0] ? d_name
: dname
, d_info
);
416 return (DDI_FAILURE
);
420 return (DDI_SUCCESS
);
424 dr_resume_devices(dev_info_t
*start
, dr_sr_handle_t
*srh
)
427 dev_info_t
*dip
, *next
, *last
= NULL
;
434 /* attach in reverse device tree order */
435 while (last
!= start
) {
437 next
= ddi_get_next_sibling(dip
);
438 while (next
!= last
&& dip
!= srh
->sr_failed_dip
) {
440 next
= ddi_get_next_sibling(dip
);
442 if (dip
== srh
->sr_failed_dip
) {
443 /* release hold acquired in dr_suspend_devices() */
444 srh
->sr_failed_dip
= NULL
;
446 } else if (dr_is_real_device(dip
) &&
447 srh
->sr_failed_dip
== NULL
) {
449 if ((bn
= ddi_binding_name(dip
)) != NULL
) {
450 major
= ddi_name_to_major(bn
);
454 if (!dr_bypass_device(bn
) &&
455 !drmach_verify_sr(dip
, 0)) {
456 char d_name
[40], d_alias
[40], *d_info
;
459 d_info
= ddi_get_name_addr(dip
);
463 if (!dr_resolve_devname(dip
, d_name
, d_alias
)) {
464 if (d_alias
[0] != 0) {
465 prom_printf("\tresuming "
466 "%s@%s (aka %s)\n", d_name
,
469 prom_printf("\tresuming "
470 "%s@%s\n", d_name
, d_info
);
473 prom_printf("\tresuming %s@%s\n", bn
,
477 if (devi_attach(dip
, DDI_RESUME
) !=
480 * Print a console warning,
481 * set an e_code of ESBD_RESUME,
482 * and save the driver major
483 * number in the e_rsc.
485 prom_printf("\tFAILED to resume %s@%s",
486 d_name
[0] ? d_name
: bn
, d_info
);
489 dr_add_int(srh
->sr_err_ints
,
490 srh
->sr_err_idx
, DR_MAX_ERR_INT
,
493 handle
= srh
->sr_dr_handlep
;
495 dr_op_err(CE_IGNORE
, handle
,
496 ESBD_RESUME
, "%s@%s",
497 d_name
[0] ? d_name
: bn
, d_info
);
502 /* Hold parent busy while walking its children */
503 ndi_devi_enter(dip
, &circ
);
504 dr_resume_devices(ddi_get_child(dip
), srh
);
505 ndi_devi_exit(dip
, circ
);
511 * True if thread is virtually stopped. Similar to CPR_VSTOPPED
512 * but from DR point of view. These user threads are waiting in
513 * the kernel. Once they complete in the kernel, they will process
514 * the stop signal and stop.
516 #define DR_VSTOPPED(t) \
517 ((t)->t_state == TS_SLEEP && \
518 (t)->t_wchan != NULL && \
520 ((t)->t_proc_flag & TP_CHKPT))
524 dr_stop_user_threads(dr_sr_handle_t
*srh
)
528 dr_handle_t
*handle
= srh
->sr_dr_handlep
;
529 static fn_t f
= "dr_stop_user_threads";
532 extern void add_one_utstop();
533 extern void utstop_timedwait(clock_t);
534 extern void utstop_init(void);
536 #define DR_UTSTOP_RETRY 4
537 #define DR_UTSTOP_WAIT hz
539 if (dr_skip_user_threads
)
540 return (DDI_SUCCESS
);
544 /* we need to try a few times to get past fork, etc. */
546 for (count
= 0; count
< DR_UTSTOP_RETRY
; count
++) {
547 /* walk the entire threadlist */
548 mutex_enter(&pidlock
);
549 for (tp
= curthread
->t_next
; tp
!= curthread
; tp
= tp
->t_next
) {
550 proc_t
*p
= ttoproc(tp
);
552 /* handle kernel threads separately */
553 if (p
->p_as
== &kas
|| p
->p_stat
== SZOMB
)
556 mutex_enter(&p
->p_lock
);
559 if (tp
->t_state
== TS_STOPPED
) {
560 /* add another reason to stop this thread */
561 tp
->t_schedflag
&= ~TS_RESUME
;
563 tp
->t_proc_flag
|= TP_CHKPT
;
566 mutex_exit(&p
->p_lock
);
568 mutex_enter(&p
->p_lock
);
573 if (ISWAKEABLE(tp
) || ISWAITING(tp
)) {
579 /* grab thread if needed */
580 if (tp
->t_state
== TS_ONPROC
&& tp
->t_cpu
!= CPU
)
581 poke_cpu(tp
->t_cpu
->cpu_id
);
585 mutex_exit(&p
->p_lock
);
587 mutex_exit(&pidlock
);
590 /* let everything catch up */
591 utstop_timedwait(count
* count
* DR_UTSTOP_WAIT
);
594 /* now, walk the threadlist again to see if we are done */
595 mutex_enter(&pidlock
);
596 for (tp
= curthread
->t_next
, bailout
= 0;
597 tp
!= curthread
; tp
= tp
->t_next
) {
598 proc_t
*p
= ttoproc(tp
);
600 /* handle kernel threads separately */
601 if (p
->p_as
== &kas
|| p
->p_stat
== SZOMB
)
605 * If this thread didn't stop, and we don't allow
606 * unstopped blocked threads, bail.
609 if (!CPR_ISTOPPED(tp
) &&
610 !(dr_allow_blocked_threads
&&
613 if (count
== DR_UTSTOP_RETRY
- 1) {
615 * save the pid for later reporting
618 dr_add_int(srh
->sr_err_ints
,
619 srh
->sr_err_idx
, DR_MAX_ERR_INT
,
622 cmn_err(CE_WARN
, "%s: "
623 "failed to stop thread: "
624 "process=%s, pid=%d",
625 f
, p
->p_user
.u_psargs
, p
->p_pid
);
627 PR_QR("%s: failed to stop thread: "
628 "process=%s, pid=%d, t_id=0x%p, "
629 "t_state=0x%x, t_proc_flag=0x%x, "
630 "t_schedflag=0x%x\n",
631 f
, p
->p_user
.u_psargs
, p
->p_pid
,
632 (void *)tp
, tp
->t_state
,
633 tp
->t_proc_flag
, tp
->t_schedflag
);
639 mutex_exit(&pidlock
);
641 /* were all the threads stopped? */
646 /* were we unable to stop all threads after a few tries? */
648 handle
->h_err
= drerr_int(ESBD_UTHREAD
, srh
->sr_err_ints
,
653 return (DDI_SUCCESS
);
657 dr_start_user_threads(void)
661 mutex_enter(&pidlock
);
663 /* walk all threads and release them */
664 for (tp
= curthread
->t_next
; tp
!= curthread
; tp
= tp
->t_next
) {
665 proc_t
*p
= ttoproc(tp
);
667 /* skip kernel threads */
668 if (ttoproc(tp
)->p_as
== &kas
)
671 mutex_enter(&p
->p_lock
);
672 tp
->t_proc_flag
&= ~TP_CHKPT
;
673 mutex_exit(&p
->p_lock
);
676 if (CPR_ISTOPPED(tp
)) {
677 /* back on the runq */
678 tp
->t_schedflag
|= TS_RESUME
;
684 mutex_exit(&pidlock
);
688 dr_signal_user(int sig
)
692 mutex_enter(&pidlock
);
694 for (p
= practive
; p
!= NULL
; p
= p
->p_next
) {
695 /* only user threads */
696 if (p
->p_exec
== NULL
|| p
->p_stat
== SZOMB
||
697 p
== proc_init
|| p
== ttoproc(curthread
))
700 mutex_enter(&p
->p_lock
);
701 sigtoproc(p
, NULL
, sig
);
702 mutex_exit(&p
->p_lock
);
705 mutex_exit(&pidlock
);
707 /* add a bit of delay */
712 dr_resume(dr_sr_handle_t
*srh
)
714 if (srh
->sr_suspend_state
< DR_SRSTATE_FULL
) {
716 * Update the signature block.
717 * If cpus are not paused, this can be done now.
718 * See comments below.
720 CPU_SIGNATURE(OS_SIG
, SIGST_RESUME_INPROGRESS
, SIGSUBST_NULL
,
724 switch (srh
->sr_suspend_state
) {
725 case DR_SRSTATE_FULL
:
727 ASSERT(MUTEX_HELD(&cpu_lock
));
730 * Prevent false alarm in tod_validate() due to tod
731 * value change between suspend and resume
733 mutex_enter(&tod_lock
);
734 tod_status_set(TOD_DR_RESUME_DONE
);
735 mutex_exit(&tod_lock
);
737 dr_enable_intr(); /* enable intr & clock */
740 mutex_exit(&cpu_lock
);
743 * Update the signature block.
744 * This must not be done while cpus are paused, since on
745 * Starcat the cpu signature update aquires an adaptive
746 * mutex in the iosram driver. Blocking with cpus paused
747 * can lead to deadlock.
749 CPU_SIGNATURE(OS_SIG
, SIGST_RESUME_INPROGRESS
, SIGSUBST_NULL
,
753 * If we suspended hw watchdog at suspend,
756 if (srh
->sr_flags
& (SR_FLAG_WATCHDOG
)) {
757 mutex_enter(&tod_lock
);
758 tod_ops
.tod_set_watchdog_timer(
759 watchdog_timeout_seconds
);
760 mutex_exit(&tod_lock
);
764 * This should only be called if drmach_suspend_last()
765 * was called and state transitioned to DR_SRSTATE_FULL
766 * to prevent resume attempts on device instances that
767 * were not previously suspended.
769 drmach_resume_first();
773 case DR_SRSTATE_DRIVER
:
779 /* no parent dip to hold busy */
780 dr_resume_devices(ddi_root_node(), srh
);
782 if (srh
->sr_err_idx
&& srh
->sr_dr_handlep
) {
783 (srh
->sr_dr_handlep
)->h_err
= drerr_int(ESBD_RESUME
,
784 srh
->sr_err_ints
, srh
->sr_err_idx
, 1);
788 * resume the lock manager
794 case DR_SRSTATE_USER
:
796 * finally, resume user threads
798 if (!dr_skip_user_threads
) {
799 prom_printf("DR: resuming user threads...\n");
800 dr_start_user_threads();
804 case DR_SRSTATE_BEGIN
:
807 * let those who care know that we've just resumed
809 PR_QR("sending SIGTHAW...\n");
810 dr_signal_user(SIGTHAW
);
815 * update the signature block
817 CPU_SIGNATURE(OS_SIG
, SIGST_RUN
, SIGSUBST_NULL
, CPU
->cpu_id
);
819 prom_printf("DR: resume COMPLETED\n");
823 dr_suspend(dr_sr_handle_t
*srh
)
828 uint64_t dev_errs
[DR_MAX_ERR_INT
];
829 int rc
= DDI_SUCCESS
;
831 handle
= srh
->sr_dr_handlep
;
833 force
= dr_cmd_flags(handle
) & SBD_FLAG_FORCE
;
836 * update the signature block
838 CPU_SIGNATURE(OS_SIG
, SIGST_QUIESCE_INPROGRESS
, SIGSUBST_NULL
,
841 prom_printf("\nDR: suspending user threads...\n");
842 srh
->sr_suspend_state
= DR_SRSTATE_USER
;
843 if (((rc
= dr_stop_user_threads(srh
)) != DDI_SUCCESS
) &&
844 dr_check_user_stop_result
) {
850 struct dr_ref drc
= {0};
852 prom_printf("\nDR: checking devices...\n");
856 drc
.idx
= &dev_errs_idx
;
857 drc
.len
= DR_MAX_ERR_INT
;
860 * Since the root node can never go away, it
861 * doesn't have to be held.
863 ddi_walk_devs(ddi_root_node(), dr_check_unsafe_major
, &drc
);
865 handle
->h_err
= drerr_int(ESBD_UNSAFE
, dev_errs
,
868 return (DDI_FAILURE
);
872 prom_printf("\nDR: dr_suspend invoked with force flag\n");
877 * This sync swap out all user pages
883 * special treatment for lock manager
889 * sync the file system in case we never make it back
895 * now suspend drivers
897 prom_printf("DR: suspending drivers...\n");
898 srh
->sr_suspend_state
= DR_SRSTATE_DRIVER
;
900 /* No parent to hold busy */
901 if ((rc
= dr_suspend_devices(ddi_root_node(), srh
)) != DDI_SUCCESS
) {
902 if (srh
->sr_err_idx
&& srh
->sr_dr_handlep
) {
903 (srh
->sr_dr_handlep
)->h_err
= drerr_int(ESBD_SUSPEND
,
904 srh
->sr_err_ints
, srh
->sr_err_idx
, 1);
910 drmach_suspend_last();
913 * finally, grab all cpus
915 srh
->sr_suspend_state
= DR_SRSTATE_FULL
;
918 * if watchdog was activated, disable it
920 if (watchdog_activated
) {
921 mutex_enter(&tod_lock
);
922 tod_ops
.tod_clear_watchdog_timer();
923 mutex_exit(&tod_lock
);
924 srh
->sr_flags
|= SR_FLAG_WATCHDOG
;
926 srh
->sr_flags
&= ~(SR_FLAG_WATCHDOG
);
930 * Update the signature block.
931 * This must be done before cpus are paused, since on Starcat the
932 * cpu signature update aquires an adaptive mutex in the iosram driver.
933 * Blocking with cpus paused can lead to deadlock.
935 CPU_SIGNATURE(OS_SIG
, SIGST_QUIESCED
, SIGSUBST_NULL
, CPU
->cpu_id
);
937 mutex_enter(&cpu_lock
);
938 pause_cpus(NULL
, NULL
);
945 dr_pt_test_suspend(dr_handle_t
*hp
)
950 static fn_t f
= "dr_pt_test_suspend";
954 srh
= dr_get_sr_handle(hp
);
955 if ((err
= dr_suspend(srh
)) == DDI_SUCCESS
) {
957 if ((hp
->h_err
) && ((psmerr
= hp
->h_err
->e_code
) != 0)) {
958 PR_QR("%s: error on dr_resume()", f
);
961 PR_QR("Couldn't resume devices: %s\n",
962 DR_GET_E_RSC(hp
->h_err
));
966 PR_ALL("psmerr is ESBD_KTHREAD\n");
969 PR_ALL("Resume error unknown = %d\n", psmerr
);
974 PR_ALL("%s: dr_suspend() failed, err = 0x%x\n", f
, err
);
975 psmerr
= hp
->h_err
? hp
->h_err
->e_code
: ESBD_NOERROR
;
978 PR_ALL("Unsafe devices (major #): %s\n",
979 DR_GET_E_RSC(hp
->h_err
));
983 PR_ALL("RT threads (PIDs): %s\n",
984 DR_GET_E_RSC(hp
->h_err
));
988 PR_ALL("User threads (PIDs): %s\n",
989 DR_GET_E_RSC(hp
->h_err
));
993 PR_ALL("Non-suspendable devices (major #): %s\n",
994 DR_GET_E_RSC(hp
->h_err
));
998 PR_ALL("Could not resume devices (major #): %s\n",
999 DR_GET_E_RSC(hp
->h_err
));
1003 PR_ALL("psmerr is ESBD_KTHREAD\n");
1007 PR_ALL("sbd_error_t error code not set\n");
1011 PR_ALL("Unknown error psmerr = %d\n", psmerr
);
1015 dr_release_sr_handle(srh
);
1021 * Add a new integer value to the end of an array. Don't allow duplicates to
1022 * appear in the array, and don't allow the array to overflow. Return the new
1023 * total number of entries in the array.
1026 dr_add_int(uint64_t *arr
, int idx
, int len
, uint64_t val
)
1036 for (i
= 0; i
< idx
; i
++) {
1047 * Construct an sbd_error_t featuring a string representation of an array of
1048 * integers as its e_rsc.
1050 static sbd_error_t
*
1051 drerr_int(int e_code
, uint64_t *arr
, int idx
, int majors
)
1053 int i
, n
, buf_len
, buf_idx
, buf_avail
;
1056 sbd_error_t
*new_sbd_err
;
1057 static char s_ellipsis
[] = "...";
1059 if (arr
== NULL
|| idx
<= 0)
1062 /* MAXPATHLEN is the size of the e_rsc field in sbd_error_t. */
1063 buf
= (char *)kmem_zalloc(MAXPATHLEN
, KM_SLEEP
);
1066 * This is the total working area of the buffer. It must be computed
1067 * as the size of 'buf', minus reserved space for the null terminator
1068 * and the ellipsis string.
1070 buf_len
= MAXPATHLEN
- (strlen(s_ellipsis
) + 1);
1072 /* Construct a string representation of the array values */
1073 for (buf_idx
= 0, i
= 0; i
< idx
; i
++) {
1074 buf_avail
= buf_len
- buf_idx
;
1076 dname
= ddi_major_to_name(arr
[i
]);
1078 n
= snprintf(&buf
[buf_idx
], buf_avail
, "%s, ",
1081 n
= snprintf(&buf
[buf_idx
], buf_avail
,
1082 "major %lu, ", arr
[i
]);
1085 n
= snprintf(&buf
[buf_idx
], buf_avail
, "%lu, ", arr
[i
]);
1088 /* An ellipsis gets appended when no more values fit */
1089 if (n
>= buf_avail
) {
1090 (void) strcpy(&buf
[buf_idx
], s_ellipsis
);
1097 /* If all the contents fit, remove the trailing comma */
1098 if (n
< buf_avail
) {
1099 buf
[--buf_idx
] = '\0';
1100 buf
[--buf_idx
] = '\0';
1103 /* Return an sbd_error_t with the buffer and e_code */
1104 new_sbd_err
= drerr_new(1, e_code
, buf
);
1105 kmem_free(buf
, MAXPATHLEN
);
1106 return (new_sbd_err
);