4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
37 #include <sys/types.h>
38 #include <sys/varargs.h>
39 #include <sys/sysevent.h>
40 #include <sys/sysevent_impl.h>
42 #include "libsysevent.h"
43 #include "libsysevent_impl.h"
46 * The functions below deal with the General Purpose Event Handling framework
48 * sysevent_evc_bind - create/bind application to named channel
49 * sysevent_evc_unbind - unbind from previously bound/created channel
50 * sysevent_evc_subscribe - subscribe to existing event channel
51 * sysevent_evc_unsubscribe - unsubscribe from existing event channel
52 * sysevent_evc_publish - generate a system event via an event channel
53 * sysevent_evc_control - various channel based control operation
56 static void kill_door_servers(evchan_subscr_t
*);
58 #define misaligned(p) ((uintptr_t)(p) & 3) /* 4-byte alignment required */
60 static pthread_key_t nrkey
= PTHREAD_ONCE_KEY_NP
;
63 * If the current thread is a door server thread servicing a door created
64 * for us in sysevent_evc_xsubscribe, then an attempt to unsubscribe from
65 * within door invocation context on the same channel will deadlock in the
66 * kernel waiting for our own invocation to complete. Such calls are
67 * forbidden, and we abort if they are encountered (better than hanging
70 * We'd like to offer this detection to subscriptions established with
71 * sysevent_evc_subscribe, but we don't have control over the door service
72 * threads in that case. Perhaps the fix is to always use door_xcreate
73 * even for sysevent_evc_subscribe?
76 will_deadlock(evchan_t
*scp
)
78 evchan_subscr_t
*subp
= pthread_getspecific(nrkey
);
79 evchan_impl_hdl_t
*hdl
= EVCHAN_IMPL_HNDL(scp
);
81 return (subp
!= NULL
&& subp
->ev_subhead
== hdl
? B_TRUE
: B_FALSE
);
85 * Check syntax of a channel name
88 sysevent_is_chan_name(const char *str
)
90 for (; *str
!= '\0'; str
++) {
91 if (!EVCH_ISCHANCHAR(*str
))
99 * Check for printable characters
102 strisprint(const char *s
)
104 for (; *s
!= '\0'; s
++) {
105 if (*s
< ' ' || *s
> '~')
113 * sysevent_evc_bind - Create/bind application to named channel
116 sysevent_evc_bind(const char *channel
, evchan_t
**scpp
, uint32_t flags
)
120 sev_bind_args_t uargs
;
123 if (scpp
== NULL
|| misaligned(scpp
)) {
124 return (errno
= EINVAL
);
127 /* Provide useful value in error case */
130 if (channel
== NULL
||
131 (chanlen
= strlen(channel
) + 1) > MAX_CHNAME_LEN
) {
132 return (errno
= EINVAL
);
135 /* Check channel syntax */
136 if (!sysevent_is_chan_name(channel
)) {
137 return (errno
= EINVAL
);
140 if (flags
& ~EVCH_B_FLAGS
) {
141 return (errno
= EINVAL
);
144 scp
= calloc(1, sizeof (evchan_impl_hdl_t
));
146 return (errno
= ENOMEM
);
150 * Enable sysevent driver. Fallback if the device link doesn't exist;
151 * this situation can arise if a channel is bound early in system
152 * startup, prior to devfsadm(1M) being invoked.
154 EV_FD(scp
) = open(DEVSYSEVENT
, O_RDWR
);
155 if (EV_FD(scp
) == -1) {
156 if (errno
!= ENOENT
) {
157 ec
= errno
== EACCES
? EPERM
: errno
;
162 EV_FD(scp
) = open(DEVICESYSEVENT
, O_RDWR
);
163 if (EV_FD(scp
) == -1) {
164 ec
= errno
== EACCES
? EPERM
: errno
;
171 * Force to close the fd's when process is doing exec.
172 * The driver will then release stale binding handles.
173 * The driver will release also the associated subscriptions
174 * if EVCH_SUB_KEEP flag was not set.
176 (void) fcntl(EV_FD(scp
), F_SETFD
, FD_CLOEXEC
);
178 uargs
.chan_name
.name
= (uintptr_t)channel
;
179 uargs
.chan_name
.len
= chanlen
;
182 if (ioctl(EV_FD(scp
), SEV_CHAN_OPEN
, &uargs
) != 0) {
184 (void) close(EV_FD(scp
));
189 /* Needed to detect a fork() */
190 EV_PID(scp
) = getpid();
191 (void) mutex_init(EV_LOCK(scp
), USYNC_THREAD
, NULL
);
199 * sysevent_evc_unbind - Unbind from previously bound/created channel
202 sysevent_evc_unbind(evchan_t
*scp
)
204 sev_unsubscribe_args_t uargs
;
205 evchan_subscr_t
*subp
;
208 if (scp
== NULL
|| misaligned(scp
))
209 return (errno
= EINVAL
);
211 if (will_deadlock(scp
))
212 return (errno
= EDEADLK
);
214 (void) mutex_lock(EV_LOCK(scp
));
217 * Unsubscribe, if we are in the process which did the bind.
219 if (EV_PID(scp
) == getpid()) {
220 uargs
.sid
.name
= NULL
;
223 * The unsubscribe ioctl will block until all door upcalls have
226 if (ioctl(EV_FD(scp
), SEV_UNSUBSCRIBE
, (intptr_t)&uargs
) != 0) {
228 (void) mutex_unlock(EV_LOCK(scp
));
229 return (errno
= errcp
);
233 while ((subp
= EV_SUB_NEXT(scp
)) != NULL
) {
234 EV_SUB_NEXT(scp
) = subp
->evsub_next
;
236 /* If door_xcreate was applied we can clean up */
237 if (subp
->evsub_attr
)
238 kill_door_servers(subp
);
240 if (door_revoke(subp
->evsub_door_desc
) != 0 && errno
== EPERM
)
241 (void) close(subp
->evsub_door_desc
);
243 free(subp
->evsub_sid
);
247 (void) mutex_unlock(EV_LOCK(scp
));
250 * The close of the driver will do the unsubscribe if a) it is the last
251 * close and b) we are in a child which inherited subscriptions.
253 (void) close(EV_FD(scp
));
254 (void) mutex_destroy(EV_LOCK(scp
));
261 * sysevent_evc_publish - Generate a system event via an event channel
264 sysevent_evc_publish(evchan_t
*scp
, const char *class,
265 const char *subclass
, const char *vendor
,
266 const char *pub_name
, nvlist_t
*attr_list
,
270 sev_publish_args_t uargs
;
274 if (scp
== NULL
|| misaligned(scp
)) {
275 return (errno
= EINVAL
);
278 /* No inheritance of binding handles via fork() */
279 if (EV_PID(scp
) != getpid()) {
280 return (errno
= EINVAL
);
283 ev
= sysevent_alloc_event((char *)class, (char *)subclass
,
284 (char *)vendor
, (char *)pub_name
, attr_list
);
289 uargs
.ev
.name
= (uintptr_t)ev
;
290 uargs
.ev
.len
= SE_SIZE(ev
);
293 (void) mutex_lock(EV_LOCK(scp
));
295 rc
= ioctl(EV_FD(scp
), SEV_PUBLISH
, (intptr_t)&uargs
);
298 (void) mutex_unlock(EV_LOCK(scp
));
309 * Generic callback which catches events from the kernel and calls
310 * subscribers call back routine.
312 * Kernel guarantees that door_upcalls are disabled when unsubscription
313 * was issued that's why cookie points always to a valid evchan_subscr_t *.
315 * Furthermore it's not necessary to lock subp because the sysevent
316 * framework guarantees no unsubscription until door_return.
320 door_upcall(void *cookie
, char *args
, size_t alen
,
321 door_desc_t
*ddp
, uint_t ndid
)
323 evchan_subscr_t
*subp
= EVCHAN_SUBSCR(cookie
);
327 * If we've been invoked simply to kill the thread then
330 if (subp
->evsub_state
== EVCHAN_SUB_STATE_CLOSING
)
333 if (args
== NULL
|| alen
<= (size_t)0) {
334 /* Skip callback execution */
337 rval
= subp
->evsub_func((sysevent_t
*)(void *)args
,
342 * Fill in return values for door_return
344 alen
= sizeof (rval
);
345 bcopy(&rval
, args
, alen
);
347 (void) door_return(args
, alen
, NULL
, 0);
350 static pthread_once_t xsub_thrattr_once
= PTHREAD_ONCE_INIT
;
351 static pthread_attr_t xsub_thrattr
;
354 xsub_thrattr_init(void)
356 (void) pthread_attr_init(&xsub_thrattr
);
357 (void) pthread_attr_setdetachstate(&xsub_thrattr
,
358 PTHREAD_CREATE_DETACHED
);
359 (void) pthread_attr_setscope(&xsub_thrattr
, PTHREAD_SCOPE_SYSTEM
);
363 * Our door server create function is only called during initial
364 * door_xcreate since we specify DOOR_NO_DEPLETION_CB.
367 xsub_door_server_create(door_info_t
*dip
, void *(*startf
)(void *),
368 void *startfarg
, void *cookie
)
370 evchan_subscr_t
*subp
= EVCHAN_SUBSCR(cookie
);
371 struct sysevent_subattr_impl
*xsa
= subp
->evsub_attr
;
372 pthread_attr_t
*thrattr
;
376 if (subp
->evsub_state
== EVCHAN_SUB_STATE_CLOSING
)
377 return (0); /* shouldn't happen, but just in case */
380 * If sysevent_evc_xsubscribe was called electing to use a
381 * different door server create function then let it take it
384 if (xsa
->xs_thrcreate
) {
385 return (xsa
->xs_thrcreate(dip
, startf
, startfarg
,
386 xsa
->xs_thrcreate_cookie
));
389 if (xsa
->xs_thrattr
== NULL
) {
390 (void) pthread_once(&xsub_thrattr_once
, xsub_thrattr_init
);
391 thrattr
= &xsub_thrattr
;
393 thrattr
= xsa
->xs_thrattr
;
396 (void) pthread_sigmask(SIG_SETMASK
, &xsa
->xs_sigmask
, &oset
);
397 err
= pthread_create(NULL
, thrattr
, startf
, startfarg
);
398 (void) pthread_sigmask(SIG_SETMASK
, &oset
, NULL
);
400 return (err
== 0 ? 1 : -1);
404 xsub_door_server_setup(void *cookie
)
406 evchan_subscr_t
*subp
= EVCHAN_SUBSCR(cookie
);
407 struct sysevent_subattr_impl
*xsa
= subp
->evsub_attr
;
409 if (xsa
->xs_thrsetup
== NULL
) {
410 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE
, NULL
);
411 (void) pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED
, NULL
);
414 (void) pthread_setspecific(nrkey
, (void *)subp
);
416 if (xsa
->xs_thrsetup
)
417 xsa
->xs_thrsetup(xsa
->xs_thrsetup_cookie
);
421 * Cause private door server threads to exit. We have already performed the
422 * unsubscribe ioctl which stops new invocations and waits until all
423 * existing invocations are complete. So all server threads should be
424 * blocked in door_return. The door has not yet been revoked. We will
425 * invoke repeatedly after setting the evsub_state to be noticed on
426 * wakeup; each invocation will result in the death of one server thread.
428 * You'd think it would be easier to kill these threads, such as through
429 * pthread_cancel. Unfortunately door_return is not a cancellation point,
430 * and if you do cancel a thread blocked in door_return the EINTR check in
431 * the door_return assembly logic causes us to loop with EINTR forever!
434 kill_door_servers(evchan_subscr_t
*subp
)
438 bzero(&da
, sizeof (da
));
439 subp
->evsub_state
= EVCHAN_SUB_STATE_CLOSING
;
442 (void) door_call(subp
->evsub_door_desc
, &da
);
446 sysevent_evc_subscribe_cmn(evchan_t
*scp
, const char *sid
, const char *class,
447 int (*event_handler
)(sysevent_t
*ev
, void *cookie
),
448 void *cookie
, uint32_t flags
, struct sysevent_subattr_impl
*xsa
)
450 evchan_subscr_t
*subp
;
452 sev_subscribe_args_t uargs
;
457 if (scp
== NULL
|| misaligned(scp
) || sid
== NULL
|| class == NULL
) {
458 return (errno
= EINVAL
);
461 /* No inheritance of binding handles via fork() */
462 if (EV_PID(scp
) != getpid()) {
463 return (errno
= EINVAL
);
466 if ((sid_len
= strlen(sid
) + 1) > MAX_SUBID_LEN
|| sid_len
== 1 ||
467 (class_len
= strlen(class) + 1) > MAX_CLASS_LEN
) {
468 return (errno
= EINVAL
);
471 /* Check for printable characters */
472 if (!strisprint(sid
)) {
473 return (errno
= EINVAL
);
476 if (event_handler
== NULL
) {
477 return (errno
= EINVAL
);
480 if (pthread_key_create_once_np(&nrkey
, NULL
) != 0)
481 return (errno
); /* ENOMEM or EAGAIN */
483 /* Create subscriber data */
484 if ((subp
= calloc(1, sizeof (evchan_subscr_t
))) == NULL
) {
488 if ((subp
->evsub_sid
= strdup(sid
)) == NULL
) {
495 * EC_ALL string will not be copied to kernel - NULL is assumed
497 if (strcmp(class, EC_ALL
) == 0) {
503 * Fill this in now for the xsub_door_server_setup dance
505 subp
->ev_subhead
= EVCHAN_IMPL_HNDL(scp
);
506 subp
->evsub_state
= EVCHAN_SUB_STATE_ACTIVE
;
509 upcall_door
= door_create(door_upcall
, (void *)subp
,
510 DOOR_REFUSE_DESC
| DOOR_NO_CANCEL
);
512 subp
->evsub_attr
= xsa
;
515 * Create a private door with exactly one thread to
516 * service the callbacks (the GPEC kernel implementation
517 * serializes deliveries for each subscriber id).
519 upcall_door
= door_xcreate(door_upcall
, (void *)subp
,
520 DOOR_REFUSE_DESC
| DOOR_NO_CANCEL
| DOOR_NO_DEPLETION_CB
,
521 xsub_door_server_create
, xsub_door_server_setup
,
525 if (upcall_door
== -1) {
527 free(subp
->evsub_sid
);
532 /* Complete subscriber information */
533 subp
->evsub_door_desc
= upcall_door
;
534 subp
->evsub_func
= event_handler
;
535 subp
->evsub_cookie
= cookie
;
537 (void) mutex_lock(EV_LOCK(scp
));
539 uargs
.sid
.name
= (uintptr_t)sid
;
540 uargs
.sid
.len
= sid_len
;
541 uargs
.class_info
.name
= (uintptr_t)class;
542 uargs
.class_info
.len
= class_len
;
543 uargs
.door_desc
= subp
->evsub_door_desc
;
545 if (ioctl(EV_FD(scp
), SEV_SUBSCRIBE
, (intptr_t)&uargs
) != 0) {
547 (void) mutex_unlock(EV_LOCK(scp
));
549 kill_door_servers(subp
);
550 (void) door_revoke(upcall_door
);
551 free(subp
->evsub_sid
);
556 /* Attach to subscriber list */
557 subp
->evsub_next
= EV_SUB_NEXT(scp
);
558 EV_SUB_NEXT(scp
) = subp
;
560 (void) mutex_unlock(EV_LOCK(scp
));
566 * sysevent_evc_subscribe - subscribe to an existing event channel
567 * using a non-private door (which will create as many server threads
568 * as the apparent maximum concurrency requirements suggest).
571 sysevent_evc_subscribe(evchan_t
*scp
, const char *sid
, const char *class,
572 int (*event_handler
)(sysevent_t
*ev
, void *cookie
),
573 void *cookie
, uint32_t flags
)
575 return (sysevent_evc_subscribe_cmn(scp
, sid
, class, event_handler
,
576 cookie
, flags
, NULL
));
580 subattr_dfltinit(struct sysevent_subattr_impl
*xsa
)
582 (void) sigfillset(&xsa
->xs_sigmask
);
583 (void) sigdelset(&xsa
->xs_sigmask
, SIGABRT
);
586 static struct sysevent_subattr_impl dfltsa
;
587 pthread_once_t dfltsa_inited
= PTHREAD_ONCE_INIT
;
592 subattr_dfltinit(&dfltsa
);
596 * sysevent_evc_subscribe - subscribe to an existing event channel
597 * using a private door with control over thread creation.
600 sysevent_evc_xsubscribe(evchan_t
*scp
, const char *sid
, const char *class,
601 int (*event_handler
)(sysevent_t
*ev
, void *cookie
),
602 void *cookie
, uint32_t flags
, sysevent_subattr_t
*attr
)
604 struct sysevent_subattr_impl
*xsa
;
607 xsa
= (struct sysevent_subattr_impl
*)attr
;
610 (void) pthread_once(&dfltsa_inited
, init_dfltsa
);
613 return (sysevent_evc_subscribe_cmn(scp
, sid
, class, event_handler
,
614 cookie
, flags
, xsa
));
618 sysevent_subattr_alloc(void)
620 struct sysevent_subattr_impl
*xsa
= calloc(1, sizeof (*xsa
));
623 subattr_dfltinit(xsa
);
625 return (xsa
!= NULL
? (sysevent_subattr_t
*)xsa
: NULL
);
629 sysevent_subattr_free(sysevent_subattr_t
*attr
)
631 struct sysevent_subattr_impl
*xsa
=
632 (struct sysevent_subattr_impl
*)attr
;
638 sysevent_subattr_thrcreate(sysevent_subattr_t
*attr
,
639 door_xcreate_server_func_t
*thrcreate
, void *cookie
)
641 struct sysevent_subattr_impl
*xsa
=
642 (struct sysevent_subattr_impl
*)attr
;
644 xsa
->xs_thrcreate
= thrcreate
;
645 xsa
->xs_thrcreate_cookie
= cookie
;
649 sysevent_subattr_thrsetup(sysevent_subattr_t
*attr
,
650 door_xcreate_thrsetup_func_t
*thrsetup
, void *cookie
)
652 struct sysevent_subattr_impl
*xsa
=
653 (struct sysevent_subattr_impl
*)attr
;
655 xsa
->xs_thrsetup
= thrsetup
;
656 xsa
->xs_thrsetup_cookie
= cookie
;
660 sysevent_subattr_sigmask(sysevent_subattr_t
*attr
, sigset_t
*set
)
662 struct sysevent_subattr_impl
*xsa
=
663 (struct sysevent_subattr_impl
*)attr
;
666 xsa
->xs_sigmask
= *set
;
668 (void) sigfillset(&xsa
->xs_sigmask
);
669 (void) sigdelset(&xsa
->xs_sigmask
, SIGABRT
);
674 sysevent_subattr_thrattr(sysevent_subattr_t
*attr
, pthread_attr_t
*thrattr
)
676 struct sysevent_subattr_impl
*xsa
=
677 (struct sysevent_subattr_impl
*)attr
;
679 xsa
->xs_thrattr
= thrattr
;
683 * sysevent_evc_unsubscribe - Unsubscribe from an existing event channel
686 sysevent_evc_unsubscribe(evchan_t
*scp
, const char *sid
)
688 int all_subscribers
= 0;
689 sev_unsubscribe_args_t uargs
;
690 evchan_subscr_t
*subp
, *prevsubp
, *tofree
;
694 if (scp
== NULL
|| misaligned(scp
))
695 return (errno
= EINVAL
);
697 if (sid
== NULL
|| strlen(sid
) == 0 ||
698 (strlen(sid
) >= MAX_SUBID_LEN
))
699 return (errno
= EINVAL
);
701 /* No inheritance of binding handles via fork() */
702 if (EV_PID(scp
) != getpid())
703 return (errno
= EINVAL
);
705 if (strcmp(sid
, EVCH_ALLSUB
) == 0) {
707 /* Indicates all subscriber id's for this channel */
708 uargs
.sid
.name
= NULL
;
711 uargs
.sid
.name
= (uintptr_t)sid
;
712 uargs
.sid
.len
= strlen(sid
) + 1;
715 if (will_deadlock(scp
))
716 return (errno
= EDEADLK
);
718 (void) mutex_lock(EV_LOCK(scp
));
721 * The unsubscribe ioctl will block until all door upcalls have drained.
723 rc
= ioctl(EV_FD(scp
), SEV_UNSUBSCRIBE
, (intptr_t)&uargs
);
727 (void) mutex_unlock(EV_LOCK(scp
));
728 return (errno
= errcp
); /* EFAULT, ENXIO, EINVAL possible */
733 * Search for the matching subscriber. If EVCH_ALLSUB was specified
734 * then the ioctl above will have returned 0 even if there are
735 * no subscriptions, so the initial EV_SUB_NEXT can be NULL.
738 subp
= EV_SUB_NEXT(scp
);
739 while (subp
!= NULL
) {
740 if (all_subscribers
|| strcmp(subp
->evsub_sid
, sid
) == 0) {
741 if (prevsubp
== NULL
) {
742 EV_SUB_NEXT(scp
) = subp
->evsub_next
;
744 prevsubp
->evsub_next
= subp
->evsub_next
;
748 subp
= subp
->evsub_next
;
750 /* If door_xcreate was applied we can clean up */
751 if (tofree
->evsub_attr
)
752 kill_door_servers(tofree
);
754 (void) door_revoke(tofree
->evsub_door_desc
);
755 free(tofree
->evsub_sid
);
758 /* Freed single subscriber already? */
759 if (all_subscribers
== 0)
763 subp
= subp
->evsub_next
;
767 (void) mutex_unlock(EV_LOCK(scp
));
773 * sysevent_evc_control - Various channel based control operation
776 sysevent_evc_control(evchan_t
*scp
, int cmd
, /* arg */ ...)
780 sev_control_args_t uargs
;
783 if (scp
== NULL
|| misaligned(scp
)) {
784 return (errno
= EINVAL
);
787 /* No inheritance of binding handles via fork() */
788 if (EV_PID(scp
) != getpid()) {
789 return (errno
= EINVAL
);
796 (void) mutex_lock(EV_LOCK(scp
));
799 case EVCH_GET_CHAN_LEN
:
800 case EVCH_GET_CHAN_LEN_MAX
:
801 chlenp
= va_arg(ap
, uint32_t *);
802 if (chlenp
== NULL
|| misaligned(chlenp
)) {
806 rc
= ioctl(EV_FD(scp
), SEV_CHAN_CONTROL
, (intptr_t)&uargs
);
807 *chlenp
= uargs
.value
;
810 case EVCH_SET_CHAN_LEN
:
811 /* Range change will be handled in framework */
812 uargs
.value
= va_arg(ap
, uint32_t);
813 rc
= ioctl(EV_FD(scp
), SEV_CHAN_CONTROL
, (intptr_t)&uargs
);
820 (void) mutex_unlock(EV_LOCK(scp
));
832 sysevent_evc_setpropnvl(evchan_t
*scp
, nvlist_t
*nvl
)
834 sev_propnvl_args_t uargs
;
839 if (scp
== NULL
|| misaligned(scp
))
840 return (errno
= EINVAL
);
843 nvlist_pack(nvl
, &buf
, &nvlsz
, NV_ENCODE_NATIVE
, 0) != 0)
846 uargs
.packednvl
.name
= (uint64_t)(uintptr_t)buf
;
847 uargs
.packednvl
.len
= (uint32_t)nvlsz
;
849 rc
= ioctl(EV_FD(scp
), SEV_SETPROPNVL
, (intptr_t)&uargs
);
858 sysevent_evc_getpropnvl(evchan_t
*scp
, nvlist_t
**nvlp
)
860 sev_propnvl_args_t uargs
;
861 char buf
[1024], *bufp
= buf
; /* stack buffer */
862 size_t sz
= sizeof (buf
);
863 char *buf2
= NULL
; /* allocated if stack buf too small */
867 if (scp
== NULL
|| misaligned(scp
) || nvlp
== NULL
)
868 return (errno
= EINVAL
);
873 uargs
.packednvl
.name
= (uint64_t)(uintptr_t)bufp
;
874 uargs
.packednvl
.len
= (uint32_t)sz
;
876 rc
= ioctl(EV_FD(scp
), SEV_GETPROPNVL
, (intptr_t)&uargs
);
879 return (errno
= E2BIG
); /* driver refuses to copyout */
882 * If the packed nvlist is too big for the buffer size we offered
883 * then the ioctl returns EOVERFLOW and indicates in the 'len'
884 * the size required for the current property nvlist generation
885 * (itself returned in the generation member).
887 if (rc
== EOVERFLOW
&&
888 (buf2
== NULL
|| uargs
.generation
!= expgen
)) {
892 if ((sz
= uargs
.packednvl
.len
) > 1024 * 1024)
895 bufp
= buf2
= malloc(sz
);
898 return (errno
= ENOMEM
);
900 expgen
= uargs
.generation
;
905 * The chan prop nvlist can be absent, in which case the ioctl
906 * returns success and uargs.packednvl.len of 0; we have already
907 * set *nvlp to NULL. Otherwise we must unpack the nvl.
909 if (rc
== 0 && uargs
.packednvl
.len
!= 0 &&
910 nvlist_unpack(bufp
, uargs
.packednvl
.len
, nvlp
, 0) != 0)
916 return (rc
? errno
= rc
: 0);