4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
27 * A CPR derivative specifically for starfire/starcat
28 * X86 doesn't make use of the quiesce interfaces, it's kept for simplicity.
31 #include <sys/types.h>
32 #include <sys/systm.h>
33 #include <sys/machparam.h>
34 #include <sys/machsystm.h>
37 #include <sys/sunddi.h>
38 #include <sys/sunndi.h>
39 #include <sys/devctl.h>
43 #include <sys/ddi_impldefs.h>
44 #include <sys/ndi_impldefs.h>
45 #include <sys/obpdefs.h>
46 #include <sys/cmn_err.h>
47 #include <sys/debug.h>
48 #include <sys/errno.h>
49 #include <sys/callb.h>
50 #include <sys/clock.h>
51 #include <sys/x_call.h>
52 #include <sys/cpuvar.h>
55 #include <sys/promif.h>
57 #include <sys/cyclic.h>
60 #include <sys/dr_util.h>
62 extern void e_ddi_enter_driver_list(struct devnames
*dnp
, int *listcnt
);
63 extern void e_ddi_exit_driver_list(struct devnames
*dnp
, int listcnt
);
64 extern int is_pseudo_device(dev_info_t
*dip
);
66 extern kmutex_t cpu_lock
;
67 extern dr_unsafe_devs_t dr_unsafe_devs
;
69 static int dr_is_real_device(dev_info_t
*dip
);
70 static int dr_is_unsafe_major(major_t major
);
71 static int dr_bypass_device(char *dname
);
72 static int dr_check_dip(dev_info_t
*dip
, void *arg
, uint_t ref
);
73 static int dr_resolve_devname(dev_info_t
*dip
, char *buffer
,
75 static sbd_error_t
*drerr_int(int e_code
, uint64_t *arr
, int idx
,
77 static int dr_add_int(uint64_t *arr
, int idx
, int len
,
80 int dr_pt_test_suspend(dr_handle_t
*hp
);
83 * dr_quiesce.c interface
84 * NOTE: states used internally by dr_suspend and dr_resume
86 typedef enum dr_suspend_state
{
94 dr_handle_t
*sr_dr_handlep
;
95 dev_info_t
*sr_failed_dip
;
96 suspend_state_t sr_suspend_state
;
98 uint64_t sr_err_ints
[DR_MAX_ERR_INT
];
102 #define SR_FLAG_WATCHDOG 0x1
106 * This hack will go away before RTI. Just for testing.
107 * List of drivers to bypass when performing a suspend.
109 static char *dr_bypass_list
[] = {
114 #define SKIP_SYNC /* bypass sync ops in dr_suspend */
117 * dr_skip_user_threads is used to control if user threads should
118 * be suspended. If dr_skip_user_threads is true, the rest of the
119 * flags are not used; if it is false, dr_check_user_stop_result
120 * will be used to control whether or not we need to check suspend
121 * result, and dr_allow_blocked_threads will be used to control
122 * whether or not we allow suspend to continue if there are blocked
123 * threads. We allow all combinations of dr_check_user_stop_result
124 * and dr_allow_block_threads, even though it might not make much
125 * sense to not allow block threads when we don't even check stop
128 static int dr_skip_user_threads
= 0; /* default to FALSE */
129 static int dr_check_user_stop_result
= 1; /* default to TRUE */
130 static int dr_allow_blocked_threads
= 1; /* default to TRUE */
132 #define DR_CPU_LOOP_MSEC 1000
137 ASSERT(MUTEX_HELD(&cpu_lock
));
146 ASSERT(MUTEX_HELD(&cpu_lock
));
153 dr_get_sr_handle(dr_handle_t
*hp
)
157 srh
= GETSTRUCT(dr_sr_handle_t
, 1);
158 srh
->sr_dr_handlep
= hp
;
164 dr_release_sr_handle(dr_sr_handle_t
*srh
)
166 ASSERT(srh
->sr_failed_dip
== NULL
);
167 FREESTRUCT(srh
, dr_sr_handle_t
, 1);
171 dr_is_real_device(dev_info_t
*dip
)
173 struct regspec
*regbuf
= NULL
;
177 if (ddi_get_driver(dip
) == NULL
)
180 if (DEVI(dip
)->devi_pm_flags
& (PMC_NEEDS_SR
|PMC_PARENTAL_SR
))
182 if (DEVI(dip
)->devi_pm_flags
& PMC_NO_SR
)
186 * now the general case
188 rc
= ddi_getlongprop(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
, "reg",
189 (caddr_t
)®buf
, &length
);
190 ASSERT(rc
!= DDI_PROP_NO_MEMORY
);
191 if (rc
!= DDI_PROP_SUCCESS
) {
194 if ((length
> 0) && (regbuf
!= NULL
))
195 kmem_free(regbuf
, length
);
201 dr_is_unsafe_major(major_t major
)
206 if ((dname
= ddi_major_to_name(major
)) == NULL
) {
207 PR_QR("dr_is_unsafe_major: invalid major # %d\n", major
);
211 ndevs
= dr_unsafe_devs
.ndevs
;
212 for (i
= 0, cpp
= dr_unsafe_devs
.devnames
; i
< ndevs
; i
++) {
213 if (strcmp(dname
, *cpp
++) == 0)
220 dr_bypass_device(char *dname
)
228 /* check the bypass list */
229 for (i
= 0, lname
= &dr_bypass_list
[i
]; **lname
!= '\0'; lname
++) {
230 if (strcmp(dname
, dr_bypass_list
[i
++]) == 0)
237 dr_resolve_devname(dev_info_t
*dip
, char *buffer
, char *alias
)
242 *buffer
= *alias
= 0;
247 if ((name
= ddi_get_name(dip
)) == NULL
)
248 name
= "<null name>";
252 if ((devmajor
= ddi_name_to_major(aka
)) != DDI_MAJOR_T_NONE
)
253 aka
= ddi_major_to_name(devmajor
);
255 (void) strcpy(buffer
, name
);
257 if (strcmp(name
, aka
))
258 (void) strcpy(alias
, aka
);
267 int *refcount_non_gldv3
;
275 dr_check_dip(dev_info_t
*dip
, void *arg
, uint_t ref
)
279 struct dr_ref
*rp
= (struct dr_ref
*)arg
;
282 return (DDI_WALK_CONTINUE
);
284 if (!dr_is_real_device(dip
))
285 return (DDI_WALK_CONTINUE
);
287 dname
= ddi_binding_name(dip
);
289 if (dr_bypass_device(dname
))
290 return (DDI_WALK_CONTINUE
);
292 if (dname
&& ((major
= ddi_name_to_major(dname
)) != (major_t
)-1)) {
293 if (ref
&& rp
->refcount
) {
294 *rp
->refcount
+= ref
;
295 PR_QR("\n %s (major# %d) is referenced(%u)\n", dname
,
298 if (ref
&& rp
->refcount_non_gldv3
) {
299 if (NETWORK_PHYSDRV(major
) && !GLDV3_DRV(major
))
300 *rp
->refcount_non_gldv3
+= ref
;
302 if (dr_is_unsafe_major(major
) && i_ddi_devi_attached(dip
)) {
303 PR_QR("\n %s (major# %d) not hotpluggable\n", dname
,
305 if (rp
->arr
!= NULL
&& rp
->idx
!= NULL
)
306 *rp
->idx
= dr_add_int(rp
->arr
, *rp
->idx
,
307 rp
->len
, (uint64_t)major
);
310 return (DDI_WALK_CONTINUE
);
314 dr_check_unsafe_major(dev_info_t
*dip
, void *arg
)
316 return (dr_check_dip(dip
, arg
, 0));
322 dr_check_devices(dev_info_t
*dip
, int *refcount
, dr_handle_t
*handle
,
323 uint64_t *arr
, int *idx
, int len
, int *refcount_non_gldv3
)
325 struct dr_ref bref
= {0};
330 bref
.refcount
= refcount
;
331 bref
.refcount_non_gldv3
= refcount_non_gldv3
;
336 ASSERT(e_ddi_branch_held(dip
));
337 (void) e_ddi_branch_referenced(dip
, dr_check_dip
, &bref
);
341 * The "dip" argument's parent (if it exists) must be held busy.
344 dr_suspend_devices(dev_info_t
*dip
, dr_sr_handle_t
*srh
)
352 * If dip is the root node, it has no siblings and it is
353 * always held. If dip is not the root node, dr_suspend_devices()
354 * will be invoked with the parent held busy.
356 for (; dip
!= NULL
; dip
= ddi_get_next_sibling(dip
)) {
357 char d_name
[40], d_alias
[40], *d_info
;
359 ndi_devi_enter(dip
, &circ
);
360 if (dr_suspend_devices(ddi_get_child(dip
), srh
)) {
361 ndi_devi_exit(dip
, circ
);
364 ndi_devi_exit(dip
, circ
);
366 if (!dr_is_real_device(dip
))
370 if ((dname
= ddi_binding_name(dip
)) != NULL
)
371 major
= ddi_name_to_major(dname
);
373 if (dr_bypass_device(dname
)) {
374 PR_QR(" bypassed suspend of %s (major# %d)\n", dname
,
379 if (drmach_verify_sr(dip
, 1)) {
380 PR_QR(" bypassed suspend of %s (major# %d)\n", dname
,
385 if ((d_info
= ddi_get_name_addr(dip
)) == NULL
)
389 if (dr_resolve_devname(dip
, d_name
, d_alias
) == 0) {
390 if (d_alias
[0] != 0) {
391 prom_printf("\tsuspending %s@%s (aka %s)\n",
392 d_name
, d_info
, d_alias
);
394 prom_printf("\tsuspending %s@%s\n", d_name
,
398 prom_printf("\tsuspending %s@%s\n", dname
, d_info
);
401 if (devi_detach(dip
, DDI_SUSPEND
) != DDI_SUCCESS
) {
402 prom_printf("\tFAILED to suspend %s@%s\n",
403 d_name
[0] ? d_name
: dname
, d_info
);
405 srh
->sr_err_idx
= dr_add_int(srh
->sr_err_ints
,
406 srh
->sr_err_idx
, DR_MAX_ERR_INT
, (uint64_t)major
);
409 srh
->sr_failed_dip
= dip
;
411 handle
= srh
->sr_dr_handlep
;
412 dr_op_err(CE_IGNORE
, handle
, ESBD_SUSPEND
, "%s@%s",
413 d_name
[0] ? d_name
: dname
, d_info
);
415 return (DDI_FAILURE
);
419 return (DDI_SUCCESS
);
423 dr_resume_devices(dev_info_t
*start
, dr_sr_handle_t
*srh
)
426 dev_info_t
*dip
, *next
, *last
= NULL
;
433 /* attach in reverse device tree order */
434 while (last
!= start
) {
436 next
= ddi_get_next_sibling(dip
);
437 while (next
!= last
&& dip
!= srh
->sr_failed_dip
) {
439 next
= ddi_get_next_sibling(dip
);
441 if (dip
== srh
->sr_failed_dip
) {
442 /* release hold acquired in dr_suspend_devices() */
443 srh
->sr_failed_dip
= NULL
;
445 } else if (dr_is_real_device(dip
) &&
446 srh
->sr_failed_dip
== NULL
) {
448 if ((bn
= ddi_binding_name(dip
)) != NULL
) {
449 major
= ddi_name_to_major(bn
);
453 if (!dr_bypass_device(bn
) &&
454 !drmach_verify_sr(dip
, 0)) {
455 char d_name
[40], d_alias
[40], *d_info
;
458 d_info
= ddi_get_name_addr(dip
);
462 if (!dr_resolve_devname(dip
, d_name
, d_alias
)) {
463 if (d_alias
[0] != 0) {
464 prom_printf("\tresuming "
465 "%s@%s (aka %s)\n", d_name
,
468 prom_printf("\tresuming "
469 "%s@%s\n", d_name
, d_info
);
472 prom_printf("\tresuming %s@%s\n", bn
,
476 if (devi_attach(dip
, DDI_RESUME
) !=
479 * Print a console warning,
480 * set an e_code of ESBD_RESUME,
481 * and save the driver major
482 * number in the e_rsc.
484 prom_printf("\tFAILED to resume %s@%s",
485 d_name
[0] ? d_name
: bn
, d_info
);
488 dr_add_int(srh
->sr_err_ints
,
489 srh
->sr_err_idx
, DR_MAX_ERR_INT
,
492 handle
= srh
->sr_dr_handlep
;
494 dr_op_err(CE_IGNORE
, handle
,
495 ESBD_RESUME
, "%s@%s",
496 d_name
[0] ? d_name
: bn
, d_info
);
501 /* Hold parent busy while walking its children */
502 ndi_devi_enter(dip
, &circ
);
503 dr_resume_devices(ddi_get_child(dip
), srh
);
504 ndi_devi_exit(dip
, circ
);
510 * True if thread is virtually stopped. Similar to CPR_VSTOPPED
511 * but from DR point of view. These user threads are waiting in
512 * the kernel. Once they complete in the kernel, they will process
513 * the stop signal and stop.
515 #define DR_VSTOPPED(t) \
516 ((t)->t_state == TS_SLEEP && \
517 (t)->t_wchan != NULL && \
519 ((t)->t_proc_flag & TP_CHKPT))
523 dr_stop_user_threads(dr_sr_handle_t
*srh
)
527 dr_handle_t
*handle
= srh
->sr_dr_handlep
;
528 static fn_t f
= "dr_stop_user_threads";
531 extern void add_one_utstop();
532 extern void utstop_timedwait(clock_t);
533 extern void utstop_init(void);
535 #define DR_UTSTOP_RETRY 4
536 #define DR_UTSTOP_WAIT hz
538 if (dr_skip_user_threads
)
539 return (DDI_SUCCESS
);
543 /* we need to try a few times to get past fork, etc. */
545 for (count
= 0; count
< DR_UTSTOP_RETRY
; count
++) {
546 /* walk the entire threadlist */
547 mutex_enter(&pidlock
);
548 for (tp
= curthread
->t_next
; tp
!= curthread
; tp
= tp
->t_next
) {
549 proc_t
*p
= ttoproc(tp
);
551 /* handle kernel threads separately */
552 if (p
->p_as
== &kas
|| p
->p_stat
== SZOMB
)
555 mutex_enter(&p
->p_lock
);
558 if (tp
->t_state
== TS_STOPPED
) {
559 /* add another reason to stop this thread */
560 tp
->t_schedflag
&= ~TS_RESUME
;
562 tp
->t_proc_flag
|= TP_CHKPT
;
565 mutex_exit(&p
->p_lock
);
567 mutex_enter(&p
->p_lock
);
572 if (ISWAKEABLE(tp
) || ISWAITING(tp
)) {
578 /* grab thread if needed */
579 if (tp
->t_state
== TS_ONPROC
&& tp
->t_cpu
!= CPU
)
580 poke_cpu(tp
->t_cpu
->cpu_id
);
584 mutex_exit(&p
->p_lock
);
586 mutex_exit(&pidlock
);
589 /* let everything catch up */
590 utstop_timedwait(count
* count
* DR_UTSTOP_WAIT
);
593 /* now, walk the threadlist again to see if we are done */
594 mutex_enter(&pidlock
);
595 for (tp
= curthread
->t_next
, bailout
= 0;
596 tp
!= curthread
; tp
= tp
->t_next
) {
597 proc_t
*p
= ttoproc(tp
);
599 /* handle kernel threads separately */
600 if (p
->p_as
== &kas
|| p
->p_stat
== SZOMB
)
604 * If this thread didn't stop, and we don't allow
605 * unstopped blocked threads, bail.
608 if (!CPR_ISTOPPED(tp
) &&
609 !(dr_allow_blocked_threads
&&
612 if (count
== DR_UTSTOP_RETRY
- 1) {
614 * save the pid for later reporting
617 dr_add_int(srh
->sr_err_ints
,
618 srh
->sr_err_idx
, DR_MAX_ERR_INT
,
621 cmn_err(CE_WARN
, "%s: "
622 "failed to stop thread: "
623 "process=%s, pid=%d",
624 f
, p
->p_user
.u_psargs
, p
->p_pid
);
626 PR_QR("%s: failed to stop thread: "
627 "process=%s, pid=%d, t_id=0x%p, "
628 "t_state=0x%x, t_proc_flag=0x%x, "
629 "t_schedflag=0x%x\n",
630 f
, p
->p_user
.u_psargs
, p
->p_pid
,
631 (void *)tp
, tp
->t_state
,
632 tp
->t_proc_flag
, tp
->t_schedflag
);
638 mutex_exit(&pidlock
);
640 /* were all the threads stopped? */
645 /* were we unable to stop all threads after a few tries? */
647 handle
->h_err
= drerr_int(ESBD_UTHREAD
, srh
->sr_err_ints
,
652 return (DDI_SUCCESS
);
656 dr_start_user_threads(void)
660 mutex_enter(&pidlock
);
662 /* walk all threads and release them */
663 for (tp
= curthread
->t_next
; tp
!= curthread
; tp
= tp
->t_next
) {
664 proc_t
*p
= ttoproc(tp
);
666 /* skip kernel threads */
667 if (ttoproc(tp
)->p_as
== &kas
)
670 mutex_enter(&p
->p_lock
);
671 tp
->t_proc_flag
&= ~TP_CHKPT
;
672 mutex_exit(&p
->p_lock
);
675 if (CPR_ISTOPPED(tp
)) {
676 /* back on the runq */
677 tp
->t_schedflag
|= TS_RESUME
;
683 mutex_exit(&pidlock
);
687 dr_signal_user(int sig
)
691 mutex_enter(&pidlock
);
693 for (p
= practive
; p
!= NULL
; p
= p
->p_next
) {
694 /* only user threads */
695 if (p
->p_exec
== NULL
|| p
->p_stat
== SZOMB
||
696 p
== proc_init
|| p
== ttoproc(curthread
))
699 mutex_enter(&p
->p_lock
);
700 sigtoproc(p
, NULL
, sig
);
701 mutex_exit(&p
->p_lock
);
704 mutex_exit(&pidlock
);
706 /* add a bit of delay */
711 dr_resume(dr_sr_handle_t
*srh
)
713 switch (srh
->sr_suspend_state
) {
714 case DR_SRSTATE_FULL
:
716 ASSERT(MUTEX_HELD(&cpu_lock
));
719 * Prevent false alarm in tod_validate() due to tod
720 * value change between suspend and resume
722 mutex_enter(&tod_lock
);
723 tod_status_set(TOD_DR_RESUME_DONE
);
724 mutex_exit(&tod_lock
);
726 dr_enable_intr(); /* enable intr & clock */
729 mutex_exit(&cpu_lock
);
732 * This should only be called if drmach_suspend_last()
733 * was called and state transitioned to DR_SRSTATE_FULL
734 * to prevent resume attempts on device instances that
735 * were not previously suspended.
737 drmach_resume_first();
741 case DR_SRSTATE_DRIVER
:
747 /* no parent dip to hold busy */
748 dr_resume_devices(ddi_root_node(), srh
);
750 if (srh
->sr_err_idx
&& srh
->sr_dr_handlep
) {
751 (srh
->sr_dr_handlep
)->h_err
= drerr_int(ESBD_RESUME
,
752 srh
->sr_err_ints
, srh
->sr_err_idx
, 1);
756 * resume the lock manager
762 case DR_SRSTATE_USER
:
764 * finally, resume user threads
766 if (!dr_skip_user_threads
) {
767 prom_printf("DR: resuming user threads...\n");
768 dr_start_user_threads();
772 case DR_SRSTATE_BEGIN
:
775 * let those who care know that we've just resumed
777 PR_QR("sending SIGTHAW...\n");
778 dr_signal_user(SIGTHAW
);
782 prom_printf("DR: resume COMPLETED\n");
786 dr_suspend(dr_sr_handle_t
*srh
)
791 uint64_t dev_errs
[DR_MAX_ERR_INT
];
792 int rc
= DDI_SUCCESS
;
794 handle
= srh
->sr_dr_handlep
;
796 force
= dr_cmd_flags(handle
) & SBD_FLAG_FORCE
;
798 prom_printf("\nDR: suspending user threads...\n");
799 srh
->sr_suspend_state
= DR_SRSTATE_USER
;
800 if (((rc
= dr_stop_user_threads(srh
)) != DDI_SUCCESS
) &&
801 dr_check_user_stop_result
) {
807 struct dr_ref drc
= {0};
809 prom_printf("\nDR: checking devices...\n");
813 drc
.idx
= &dev_errs_idx
;
814 drc
.len
= DR_MAX_ERR_INT
;
817 * Since the root node can never go away, it
818 * doesn't have to be held.
820 ddi_walk_devs(ddi_root_node(), dr_check_unsafe_major
, &drc
);
822 handle
->h_err
= drerr_int(ESBD_UNSAFE
, dev_errs
,
825 return (DDI_FAILURE
);
829 prom_printf("\nDR: dr_suspend invoked with force flag\n");
834 * This sync swap out all user pages
840 * special treatment for lock manager
846 * sync the file system in case we never make it back
852 * now suspend drivers
854 prom_printf("DR: suspending drivers...\n");
855 srh
->sr_suspend_state
= DR_SRSTATE_DRIVER
;
857 /* No parent to hold busy */
858 if ((rc
= dr_suspend_devices(ddi_root_node(), srh
)) != DDI_SUCCESS
) {
859 if (srh
->sr_err_idx
&& srh
->sr_dr_handlep
) {
860 (srh
->sr_dr_handlep
)->h_err
= drerr_int(ESBD_SUSPEND
,
861 srh
->sr_err_ints
, srh
->sr_err_idx
, 1);
867 drmach_suspend_last();
870 * finally, grab all cpus
872 srh
->sr_suspend_state
= DR_SRSTATE_FULL
;
874 mutex_enter(&cpu_lock
);
875 pause_cpus(NULL
, NULL
);
882 dr_pt_test_suspend(dr_handle_t
*hp
)
887 static fn_t f
= "dr_pt_test_suspend";
891 srh
= dr_get_sr_handle(hp
);
892 if ((err
= dr_suspend(srh
)) == DDI_SUCCESS
) {
894 if ((hp
->h_err
) && ((psmerr
= hp
->h_err
->e_code
) != 0)) {
895 PR_QR("%s: error on dr_resume()", f
);
898 PR_QR("Couldn't resume devices: %s\n",
899 DR_GET_E_RSC(hp
->h_err
));
903 PR_ALL("psmerr is ESBD_KTHREAD\n");
906 PR_ALL("Resume error unknown = %d\n", psmerr
);
911 PR_ALL("%s: dr_suspend() failed, err = 0x%x\n", f
, err
);
912 psmerr
= hp
->h_err
? hp
->h_err
->e_code
: ESBD_NOERROR
;
915 PR_ALL("Unsafe devices (major #): %s\n",
916 DR_GET_E_RSC(hp
->h_err
));
920 PR_ALL("RT threads (PIDs): %s\n",
921 DR_GET_E_RSC(hp
->h_err
));
925 PR_ALL("User threads (PIDs): %s\n",
926 DR_GET_E_RSC(hp
->h_err
));
930 PR_ALL("Non-suspendable devices (major #): %s\n",
931 DR_GET_E_RSC(hp
->h_err
));
935 PR_ALL("Could not resume devices (major #): %s\n",
936 DR_GET_E_RSC(hp
->h_err
));
940 PR_ALL("psmerr is ESBD_KTHREAD\n");
944 PR_ALL("sbd_error_t error code not set\n");
948 PR_ALL("Unknown error psmerr = %d\n", psmerr
);
952 dr_release_sr_handle(srh
);
958 * Add a new integer value to the end of an array. Don't allow duplicates to
959 * appear in the array, and don't allow the array to overflow. Return the new
960 * total number of entries in the array.
963 dr_add_int(uint64_t *arr
, int idx
, int len
, uint64_t val
)
973 for (i
= 0; i
< idx
; i
++) {
984 * Construct an sbd_error_t featuring a string representation of an array of
985 * integers as its e_rsc.
988 drerr_int(int e_code
, uint64_t *arr
, int idx
, int majors
)
990 int i
, n
, buf_len
, buf_idx
, buf_avail
;
993 sbd_error_t
*new_sbd_err
;
994 static char s_ellipsis
[] = "...";
996 if (arr
== NULL
|| idx
<= 0)
999 /* MAXPATHLEN is the size of the e_rsc field in sbd_error_t. */
1000 buf
= kmem_zalloc(MAXPATHLEN
, KM_SLEEP
);
1003 * This is the total working area of the buffer. It must be computed
1004 * as the size of 'buf', minus reserved space for the null terminator
1005 * and the ellipsis string.
1007 buf_len
= MAXPATHLEN
- (strlen(s_ellipsis
) + 1);
1009 /* Construct a string representation of the array values */
1010 for (buf_idx
= 0, i
= 0; i
< idx
; i
++) {
1011 buf_avail
= buf_len
- buf_idx
;
1013 dname
= ddi_major_to_name(arr
[i
]);
1015 n
= snprintf(&buf
[buf_idx
], buf_avail
, "%s, ",
1018 n
= snprintf(&buf
[buf_idx
], buf_avail
,
1019 "major %" PRIu64
", ", arr
[i
]);
1022 n
= snprintf(&buf
[buf_idx
], buf_avail
, "%" PRIu64
", ",
1026 /* An ellipsis gets appended when no more values fit */
1027 if (n
>= buf_avail
) {
1028 (void) strcpy(&buf
[buf_idx
], s_ellipsis
);
1035 /* If all the contents fit, remove the trailing comma */
1036 if (n
< buf_avail
) {
1037 buf
[--buf_idx
] = '\0';
1038 buf
[--buf_idx
] = '\0';
1041 /* Return an sbd_error_t with the buffer and e_code */
1042 new_sbd_err
= drerr_new(1, e_code
, buf
);
1043 kmem_free(buf
, MAXPATHLEN
);
1044 return (new_sbd_err
);