4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
26 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
31 * UNIX Device Driver Interface functions
33 * This file contains functions that are to be added to the kernel
34 * to put the interface presented to drivers in conformance with
35 * the DDI standard. Of the functions added to the kernel, 17 are
36 * function equivalents of existing macros in sysmacros.h,
37 * stream.h, and param.h
39 * 17 additional functions -- drv_getparm(), drv_setparm(),
40 * getrbuf(), freerbuf(),
41 * getemajor(), geteminor(), etoimajor(), itoemajor(), drv_usectohz(),
42 * drv_hztousec(), drv_usecwait(), drv_priv(), and kvtoppid() --
43 * are specified by DDI to exist in the kernel and are implemented here.
45 * Note that putnext() and put() are not in this file. The C version of
46 * these routines are in kernel/os/putnext.c and assembly versions
47 * might exist for some architectures.
50 #include <sys/types.h>
51 #include <sys/param.h>
52 #include <sys/t_lock.h>
54 #include <sys/systm.h>
55 #include <sys/cpuvar.h>
56 #include <sys/signal.h>
59 #include <sys/errno.h>
62 #include <sys/cmn_err.h>
63 #include <sys/stream.h>
64 #include <sys/strsubr.h>
69 #include <sys/vnode.h>
72 #include <sys/session.h>
74 #include <sys/sunddi.h>
75 #include <sys/esunddi.h>
76 #include <sys/mkdev.h>
77 #include <sys/debug.h>
78 #include <sys/vtrace.h>
81 * return internal major number corresponding to device
82 * number (new format) argument
88 return ((major_t
)((dev
>> NBITSMINOR64
) & MAXMAJ64
));
90 return ((major_t
)((dev
>> NBITSMINOR
) & MAXMAJ
));
95 * return external major number corresponding to device
96 * number (new format) argument
102 return ((major_t
)((dev
>> NBITSMINOR64
) & MAXMAJ64
));
104 return ((major_t
)((dev
>> NBITSMINOR
) & MAXMAJ
));
109 * return internal minor number corresponding to device
110 * number (new format) argument
116 return ((minor_t
)(dev
& MAXMIN64
));
118 return ((minor_t
)(dev
& MAXMIN
));
123 * return external minor number corresponding to device
124 * number (new format) argument
130 return ((minor_t
)(dev
& MAXMIN64
));
132 return ((minor_t
)(dev
& MAXMIN
));
137 * return internal major number corresponding to external
141 etoimajor(major_t emajnum
)
144 if (emajnum
>= devcnt
)
145 return (-1); /* invalid external major */
147 if (emajnum
> MAXMAJ
|| emajnum
>= devcnt
)
148 return (-1); /* invalid external major */
150 return ((int)emajnum
);
154 * return external major number corresponding to internal
155 * major number argument or -1 if no external major number
156 * can be found after lastemaj that maps to the internal
157 * major number. Pass a lastemaj val of -1 to start
158 * the search initially. (Typical use of this function is
162 * while ((lastemaj = itoemajor(imag, lastemaj)) != -1)
163 * { process major number }
166 itoemajor(major_t imajnum
, int lastemaj
)
168 if (imajnum
>= devcnt
)
172 * if lastemaj == -1 then start from beginning of
173 * the (imaginary) MAJOR table
179 * given that there's a 1-1 mapping of internal to external
180 * major numbers, searching is somewhat pointless ... let's
181 * just go there directly.
183 if (++lastemaj
< devcnt
&& imajnum
< devcnt
)
189 * encode external major and minor number arguments into a
190 * new format device number
193 makedevice(major_t maj
, minor_t minor
)
196 return (((dev_t
)maj
<< NBITSMINOR64
) | (minor
& MAXMIN64
));
198 return (((dev_t
)maj
<< NBITSMINOR
) | (minor
& MAXMIN
));
203 * cmpdev - compress new device format to old device format
212 major_d
= dev
>> NBITSMINOR64
;
213 minor_d
= dev
& MAXMIN64
;
215 major_d
= dev
>> NBITSMINOR
;
216 minor_d
= dev
& MAXMIN
;
218 if (major_d
> OMAXMAJ
|| minor_d
> OMAXMIN
)
219 return ((o_dev_t
)NODEV
);
220 return ((o_dev_t
)((major_d
<< ONBITSMINOR
) | minor_d
));
229 major_d
= ((dev
>> ONBITSMINOR
) & OMAXMAJ
);
230 minor_d
= (dev
& OMAXMIN
);
232 return ((((dev_t
)major_d
<< NBITSMINOR64
) | minor_d
));
234 return ((((dev_t
)major_d
<< NBITSMINOR
) | minor_d
));
239 * return true (1) if the message type input is a data
240 * message type, 0 otherwise
244 datamsg(unsigned char db_type
)
246 return (db_type
== M_DATA
|| db_type
== M_PROTO
||
247 db_type
== M_PCPROTO
|| db_type
== M_DELAY
);
251 * return a pointer to the other queue in the queue pair of qp
260 * return a pointer to the read queue in the queue pair of qp.
270 * return a pointer to the write queue in the queue pair of qp.
275 return (_SAMESTR(q
));
279 * return a pointer to the write queue in the queue pair of qp.
288 * store value of kernel parameter associated with parm
291 drv_getparm(unsigned int parm
, void *valuep
)
298 *(proc_t
**)valuep
= p
;
301 mutex_enter(&p
->p_lock
);
302 *(pid_t
*)valuep
= p
->p_pgrp
;
303 mutex_exit(&p
->p_lock
);
306 *(clock_t *)valuep
= ddi_get_lbolt();
309 if ((now
= gethrestime_sec()) == 0) {
311 mutex_enter(&tod_lock
);
313 mutex_exit(&tod_lock
);
314 *(time_t *)valuep
= ts
.tv_sec
;
316 *(time_t *)valuep
= now
;
320 *(pid_t
*)valuep
= p
->p_pid
;
323 mutex_enter(&p
->p_splock
);
324 *(pid_t
*)valuep
= p
->p_sessp
->s_sid
;
325 mutex_exit(&p
->p_splock
);
328 *(cred_t
**)valuep
= CRED();
338 * set value of kernel parameter associated with parm
341 drv_setparm(unsigned int parm
, unsigned long value
)
345 CPU_STATS_ADDQ(CPU
, sys
, rcvint
, value
);
348 CPU_STATS_ADDQ(CPU
, sys
, xmtint
, value
);
351 CPU_STATS_ADDQ(CPU
, sys
, mdmint
, value
);
354 CPU_STATS_ADDQ(CPU
, sys
, rawch
, value
);
357 CPU_STATS_ADDQ(CPU
, sys
, canch
, value
);
360 CPU_STATS_ADDQ(CPU
, sys
, outch
, value
);
370 * allocate space for buffer header and return pointer to it.
371 * preferred means of obtaining space for a local buf header.
372 * returns pointer to buf upon success, NULL for failure
379 bp
= kmem_alloc(sizeof (struct buf
), sleep
);
388 * free up space allocated by getrbuf()
391 freerbuf(struct buf
*bp
)
394 kmem_free(bp
, sizeof (struct buf
));
398 * convert byte count input to logical page units
399 * (byte counts that are not a page-size multiple
403 btop(size_t numbytes
)
405 return (numbytes
>> PAGESHIFT
);
409 * convert byte count input to logical page units
410 * (byte counts that are not a page-size multiple
414 btopr(size_t numbytes
)
416 return ((numbytes
+ PAGEOFFSET
) >> PAGESHIFT
);
420 * convert size in pages to bytes.
423 ptob(pgcnt_t numpages
)
425 return (numpages
<< PAGESHIFT
);
428 #define MAXCLOCK_T LONG_MAX
431 * Convert from system time units (hz) to microseconds.
433 * If ticks <= 0, return 0.
434 * If converting ticks to usecs would overflow, return MAXCLOCK_T.
435 * Otherwise, convert ticks to microseconds.
438 drv_hztousec(clock_t ticks
)
443 if (ticks
> MAXCLOCK_T
/ usec_per_tick
)
446 return (TICK_TO_USEC(ticks
));
451 * Convert from microseconds to system time units (hz), rounded up.
453 * If ticks <= 0, return 0.
454 * Otherwise, convert microseconds to ticks, rounding up.
457 drv_usectohz(clock_t microsecs
)
462 return (USEC_TO_TICK_ROUNDUP(microsecs
));
467 * drv_usecwait implemented in each architecture's machine
468 * specific code somewhere. For sparc, it is the alternate entry
469 * to usec_delay (eventually usec_delay goes away). See
470 * sparc/os/ml/sparc_subr.s
475 * bcanputnext, canputnext assume called from timeout, bufcall,
476 * or esballoc free routines. since these are driven by
477 * clock interrupts, instead of system calls the appropriate plumbing
478 * locks have not been acquired.
481 bcanputnext(queue_t
*q
, unsigned char band
)
486 ret
= bcanput(q
->q_next
, band
);
492 canputnext(queue_t
*q
)
495 struct stdata
*stp
= STREAM(q
);
498 TRACE_1(TR_FAC_STREAMS_FR
, TR_CANPUTNEXT_IN
,
499 "canputnext?:%p\n", q
);
501 if (stp
->sd_ciputctrl
!= NULL
) {
502 int ix
= CPU
->cpu_seqid
& stp
->sd_nciputctrl
;
503 sdlock
= &stp
->sd_ciputctrl
[ix
].ciputctrl_lock
;
506 mutex_enter(sdlock
= &stp
->sd_reflock
);
508 /* get next module forward with a service queue */
509 q
= q
->q_next
->q_nfsrv
;
512 /* this is for loopback transports, they should not do a canputnext */
513 ASSERT(STRMATED(q
->q_stream
) || STREAM(q
) == STREAM(qofsq
));
515 if (!(q
->q_flag
& QFULL
)) {
517 TRACE_2(TR_FAC_STREAMS_FR
, TR_CANPUTNEXT_OUT
,
518 "canputnext:%p %d", q
, 1);
522 if (sdlock
!= &stp
->sd_reflock
) {
524 mutex_enter(&stp
->sd_reflock
);
527 /* the above is the most frequently used path */
529 ASSERT(stp
->sd_refcnt
!= 0); /* Wraparound */
530 mutex_exit(&stp
->sd_reflock
);
532 mutex_enter(QLOCK(q
));
533 if (q
->q_flag
& QFULL
) {
535 mutex_exit(QLOCK(q
));
536 TRACE_2(TR_FAC_STREAMS_FR
, TR_CANPUTNEXT_OUT
,
537 "canputnext:%p %d", q
, 0);
542 mutex_exit(QLOCK(q
));
543 TRACE_2(TR_FAC_STREAMS_FR
, TR_CANPUTNEXT_OUT
, "canputnext:%p %d", q
, 1);
551 * Open has progressed to the point where it is safe to send/receive messages.
553 * "qprocson enables the put and service routines of the driver
554 * or module... Prior to the call to qprocson, the put and service
555 * routines of a newly pushed module or newly opened driver are
556 * disabled. For the module, messages flow around it as if it
557 * were not present in the stream... qprocson must be called by
558 * the first open of a module or driver after allocation and
559 * initialization of any resource on which the put and service
562 * Note that before calling qprocson a module/driver could itself cause its
563 * put or service procedures to be run by using put() or qenable().
568 ASSERT(q
->q_flag
& QREADR
);
570 * Do not call insertq() if it is a re-open. But if _QINSERTING
571 * is set, q_next will not be NULL and we need to call insertq().
573 if ((q
->q_next
== NULL
&& WR(q
)->q_next
== NULL
) ||
574 (q
->q_flag
& _QINSERTING
))
575 insertq(STREAM(q
), q
);
579 * Close has reached a point where it can no longer allow put/service
582 * "qprocsoff disables the put and service routines of the driver
583 * or module... When the routines are disabled in a module, messages
584 * flow around the module as if it were not present in the stream.
585 * qprocsoff must be called by the close routine of a driver or module
586 * before deallocating any resources on which the driver/module's
587 * put and service routines depend. qprocsoff will remove the
588 * queue's service routines from the list of service routines to be
589 * run and waits until any concurrent put or service routines are
592 * Note that after calling qprocsoff a module/driver could itself cause its
593 * put procedures to be run by using put().
596 qprocsoff(queue_t
*q
)
598 ASSERT(q
->q_flag
& QREADR
);
599 if (q
->q_flag
& QWCLOSE
) {
600 /* Called more than once */
608 * "freezestr() freezes the state of the entire STREAM containing
609 * the queue pair q. A frozen STREAM blocks any thread
610 * attempting to enter any open, close, put or service routine
611 * belonging to any queue instance in the STREAM, and blocks
612 * any thread currently within the STREAM if it attempts to put
613 * messages onto or take messages off of any queue within the
614 * STREAM (with the sole exception of the caller). Threads
615 * blocked by this mechanism remain so until the STREAM is
616 * thawed by a call to unfreezestr().
618 * Use strblock to set SQ_FROZEN in all syncqs in the stream (prevents
619 * further entry into put, service, open, and close procedures) and
620 * grab (and hold) all the QLOCKs in the stream (to block putq, getq etc.)
622 * Note: this has to be the only code that acquires one QLOCK while holding
623 * another QLOCK (otherwise we would have locking hirarchy/ordering violations.)
626 freezestr(queue_t
*q
)
628 struct stdata
*stp
= STREAM(q
);
631 * Increment refcnt to prevent q_next from changing during the strblock
632 * as well as while the stream is frozen.
637 ASSERT(stp
->sd_freezer
== NULL
);
638 stp
->sd_freezer
= curthread
;
639 for (q
= stp
->sd_wrq
; q
!= NULL
; q
= SAMESTR(q
) ? q
->q_next
: NULL
) {
640 mutex_enter(QLOCK(q
));
641 mutex_enter(QLOCK(RD(q
)));
646 * Undo what freezestr did.
647 * Have to drop the QLOCKs before the strunblock since strunblock will
648 * potentially call other put procedures.
651 unfreezestr(queue_t
*q
)
653 struct stdata
*stp
= STREAM(q
);
656 for (q1
= stp
->sd_wrq
; q1
!= NULL
;
657 q1
= SAMESTR(q1
) ? q1
->q_next
: NULL
) {
658 mutex_exit(QLOCK(q1
));
659 mutex_exit(QLOCK(RD(q1
)));
661 ASSERT(stp
->sd_freezer
== curthread
);
662 stp
->sd_freezer
= NULL
;
668 * Used by open and close procedures to "sleep" waiting for messages to
669 * arrive. Note: can only be used in open and close procedures.
671 * Lower the gate and let in either messages on the syncq (if there are
672 * any) or put/service procedures.
674 * If the queue has an outer perimeter this will not prevent entry into this
675 * syncq (since outer_enter does not set SQ_WRITER on the syncq that gets the
676 * exclusive access to the outer perimeter.)
678 * Return 0 is the cv_wait_sig was interrupted; otherwise 1.
680 * It only makes sense to grab sq_putlocks for !SQ_CIOC sync queues because
681 * otherwise put entry points were not blocked in the first place. if this is
682 * SQ_CIOC then qwait is used to wait for service procedure to run since syncq
683 * is always SQ_CIPUT if it is SQ_CIOC.
685 * Note that SQ_EXCL is dropped and SQ_WANTEXITWAKEUP set in sq_flags
686 * atomically under sq_putlocks to make sure putnext will not miss a pending
690 qwait_sig(queue_t
*q
)
698 * Perform the same operations as a leavesq(sq, SQ_OPENCLOSE)
699 * while detecting all cases where the perimeter is entered
700 * so that qwait_sig can return to the caller.
702 * Drain the syncq if possible. Otherwise reset SQ_EXCL and
703 * wait for a thread to leave the syncq.
707 is_sq_cioc
= (sq
->sq_type
& SQ_CIOC
) ? 1 : 0;
708 ASSERT(sq
->sq_outer
== NULL
|| sq
->sq_outer
->sq_flags
& SQ_WRITER
);
709 outer
= sq
->sq_outer
;
711 * XXX this does not work if there is only an outer perimeter.
712 * The semantics of qwait/qwait_sig are undefined in this case.
717 mutex_enter(SQLOCK(sq
));
718 if (is_sq_cioc
== 0) {
719 SQ_PUTLOCKS_ENTER(sq
);
721 flags
= sq
->sq_flags
;
723 * Drop SQ_EXCL and sq_count but hold the SQLOCK
724 * to prevent any undetected entry and exit into the perimeter.
726 ASSERT(sq
->sq_count
> 0);
729 if (is_sq_cioc
== 0) {
730 ASSERT(flags
& SQ_EXCL
);
734 * Unblock any thread blocked in an entersq or outer_enter.
735 * Note: we do not unblock a thread waiting in qwait/qwait_sig,
736 * since that could lead to livelock with two threads in
737 * qwait for the same (per module) inner perimeter.
739 if (flags
& SQ_WANTWAKEUP
) {
740 cv_broadcast(&sq
->sq_wait
);
741 flags
&= ~SQ_WANTWAKEUP
;
743 sq
->sq_flags
= flags
;
744 if ((flags
& SQ_QUEUED
) && !(flags
& SQ_STAYAWAY
)) {
745 if (is_sq_cioc
== 0) {
746 SQ_PUTLOCKS_EXIT(sq
);
748 /* drain_syncq() drops SQLOCK */
750 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq
)));
751 entersq(sq
, SQ_OPENCLOSE
);
755 * Sleep on sq_exitwait to only be woken up when threads leave the
756 * put or service procedures. We can not sleep on sq_wait since an
757 * outer_exit in a qwait running in the same outer perimeter would
758 * cause a livelock "ping-pong" between two or more qwait'ers.
761 sq
->sq_flags
|= SQ_WANTEXWAKEUP
;
762 if (is_sq_cioc
== 0) {
763 SQ_PUTLOCKS_EXIT(sq
);
765 ret
= cv_wait_sig(&sq
->sq_exitwait
, SQLOCK(sq
));
766 if (is_sq_cioc
== 0) {
767 SQ_PUTLOCKS_ENTER(sq
);
769 } while (ret
&& (sq
->sq_flags
& SQ_WANTEXWAKEUP
));
770 if (is_sq_cioc
== 0) {
771 SQ_PUTLOCKS_EXIT(sq
);
773 mutex_exit(SQLOCK(sq
));
776 * Re-enter the perimeters again
778 entersq(sq
, SQ_OPENCLOSE
);
783 * Used by open and close procedures to "sleep" waiting for messages to
784 * arrive. Note: can only be used in open and close procedures.
786 * Lower the gate and let in either messages on the syncq (if there are
787 * any) or put/service procedures.
789 * If the queue has an outer perimeter this will not prevent entry into this
790 * syncq (since outer_enter does not set SQ_WRITER on the syncq that gets the
791 * exclusive access to the outer perimeter.)
793 * It only makes sense to grab sq_putlocks for !SQ_CIOC sync queues because
794 * otherwise put entry points were not blocked in the first place. if this is
795 * SQ_CIOC then qwait is used to wait for service procedure to run since syncq
796 * is always SQ_CIPUT if it is SQ_CIOC.
798 * Note that SQ_EXCL is dropped and SQ_WANTEXITWAKEUP set in sq_flags
799 * atomically under sq_putlocks to make sure putnext will not miss a pending
810 * Perform the same operations as a leavesq(sq, SQ_OPENCLOSE)
811 * while detecting all cases where the perimeter is entered
812 * so that qwait can return to the caller.
814 * Drain the syncq if possible. Otherwise reset SQ_EXCL and
815 * wait for a thread to leave the syncq.
819 is_sq_cioc
= (sq
->sq_type
& SQ_CIOC
) ? 1 : 0;
820 ASSERT(sq
->sq_outer
== NULL
|| sq
->sq_outer
->sq_flags
& SQ_WRITER
);
821 outer
= sq
->sq_outer
;
823 * XXX this does not work if there is only an outer perimeter.
824 * The semantics of qwait/qwait_sig are undefined in this case.
829 mutex_enter(SQLOCK(sq
));
830 if (is_sq_cioc
== 0) {
831 SQ_PUTLOCKS_ENTER(sq
);
833 flags
= sq
->sq_flags
;
835 * Drop SQ_EXCL and sq_count but hold the SQLOCK
836 * to prevent any undetected entry and exit into the perimeter.
838 ASSERT(sq
->sq_count
> 0);
841 if (is_sq_cioc
== 0) {
842 ASSERT(flags
& SQ_EXCL
);
846 * Unblock any thread blocked in an entersq or outer_enter.
847 * Note: we do not unblock a thread waiting in qwait/qwait_sig,
848 * since that could lead to livelock with two threads in
849 * qwait for the same (per module) inner perimeter.
851 if (flags
& SQ_WANTWAKEUP
) {
852 cv_broadcast(&sq
->sq_wait
);
853 flags
&= ~SQ_WANTWAKEUP
;
855 sq
->sq_flags
= flags
;
856 if ((flags
& SQ_QUEUED
) && !(flags
& SQ_STAYAWAY
)) {
857 if (is_sq_cioc
== 0) {
858 SQ_PUTLOCKS_EXIT(sq
);
860 /* drain_syncq() drops SQLOCK */
862 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq
)));
863 entersq(sq
, SQ_OPENCLOSE
);
867 * Sleep on sq_exitwait to only be woken up when threads leave the
868 * put or service procedures. We can not sleep on sq_wait since an
869 * outer_exit in a qwait running in the same outer perimeter would
870 * cause a livelock "ping-pong" between two or more qwait'ers.
873 sq
->sq_flags
|= SQ_WANTEXWAKEUP
;
874 if (is_sq_cioc
== 0) {
875 SQ_PUTLOCKS_EXIT(sq
);
877 cv_wait(&sq
->sq_exitwait
, SQLOCK(sq
));
878 if (is_sq_cioc
== 0) {
879 SQ_PUTLOCKS_ENTER(sq
);
881 } while (sq
->sq_flags
& SQ_WANTEXWAKEUP
);
882 if (is_sq_cioc
== 0) {
883 SQ_PUTLOCKS_EXIT(sq
);
885 mutex_exit(SQLOCK(sq
));
888 * Re-enter the perimeters again
890 entersq(sq
, SQ_OPENCLOSE
);
894 * Used for the synchronous streams entrypoints when sleeping outside
895 * the perimeters. Must never be called from regular put entrypoint.
897 * There's no need to grab sq_putlocks here (which only exist for CIPUT sync
898 * queues). If it is CIPUT sync queue put entry points were not blocked in the
899 * first place by rwnext/infonext which are treated as put entrypoints for
900 * permiter syncronization purposes.
902 * Consolidation private.
909 boolean_t gotsignal
= B_FALSE
;
912 * Perform the same operations as a leavesq(sq, SQ_PUT)
913 * while detecting all cases where the perimeter is entered
914 * so that qwait_rw can return to the caller.
916 * Drain the syncq if possible. Otherwise reset SQ_EXCL and
917 * wait for a thread to leave the syncq.
922 mutex_enter(SQLOCK(sq
));
923 flags
= sq
->sq_flags
;
925 * Drop SQ_EXCL and sq_count but hold the SQLOCK until to prevent any
926 * undetected entry and exit into the perimeter.
928 ASSERT(sq
->sq_count
> 0);
930 if (!(sq
->sq_type
& SQ_CIPUT
)) {
931 ASSERT(flags
& SQ_EXCL
);
935 * Unblock any thread blocked in an entersq or outer_enter.
936 * Note: we do not unblock a thread waiting in qwait/qwait_sig,
937 * since that could lead to livelock with two threads in
938 * qwait for the same (per module) inner perimeter.
940 if (flags
& SQ_WANTWAKEUP
) {
941 cv_broadcast(&sq
->sq_wait
);
942 flags
&= ~SQ_WANTWAKEUP
;
944 sq
->sq_flags
= flags
;
945 if ((flags
& SQ_QUEUED
) && !(flags
& SQ_STAYAWAY
)) {
946 /* drain_syncq() drops SQLOCK */
948 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq
)));
953 * Sleep on sq_exitwait to only be woken up when threads leave the
954 * put or service procedures. We can not sleep on sq_wait since an
955 * outer_exit in a qwait running in the same outer perimeter would
956 * cause a livelock "ping-pong" between two or more qwait'ers.
959 sq
->sq_flags
|= SQ_WANTEXWAKEUP
;
960 if (cv_wait_sig(&sq
->sq_exitwait
, SQLOCK(sq
)) <= 0) {
961 sq
->sq_flags
&= ~SQ_WANTEXWAKEUP
;
965 } while (sq
->sq_flags
& SQ_WANTEXWAKEUP
);
966 mutex_exit(SQLOCK(sq
));
969 * Re-enter the perimeters again
976 * Asynchronously upgrade to exclusive access at either the inner or
980 qwriter(queue_t
*q
, mblk_t
*mp
, void (*func
)(), int perim
)
982 if (perim
== PERIM_INNER
)
983 qwriter_inner(q
, mp
, func
);
984 else if (perim
== PERIM_OUTER
)
985 qwriter_outer(q
, mp
, func
);
987 panic("qwriter: wrong \"perimeter\" parameter");
991 * Schedule a synchronous streams timeout
994 qtimeout(queue_t
*q
, void (*func
)(void *), void *arg
, clock_t tim
)
1002 * you don't want the timeout firing before its params are set up
1003 * callbparams_alloc() acquires SQLOCK(sq)
1004 * qtimeout() can't fail and can't sleep, so panic if memory is not
1007 cbp
= callbparams_alloc(sq
, func
, arg
, KM_NOSLEEP
| KM_PANIC
);
1009 * the callbflags in the sq use the same flags. They get anded
1010 * in the callbwrapper to determine if a qun* of this callback type
1011 * is required. This is not a request to cancel.
1013 cbp
->cbp_flags
= SQ_CANCEL_TOUT
;
1014 /* check new timeout version return codes */
1015 tid
= timeout(qcallbwrapper
, cbp
, tim
);
1016 cbp
->cbp_id
= (callbparams_id_t
)tid
;
1017 mutex_exit(SQLOCK(sq
));
1018 /* use local id because the cbp memory could be free by now */
1023 qbufcall(queue_t
*q
, size_t size
, uint_t pri
, void (*func
)(void *), void *arg
)
1031 * you don't want the timeout firing before its params are set up
1032 * callbparams_alloc() acquires SQLOCK(sq) if successful.
1034 cbp
= callbparams_alloc(sq
, func
, arg
, KM_NOSLEEP
);
1036 return ((bufcall_id_t
)0);
1039 * the callbflags in the sq use the same flags. They get anded
1040 * in the callbwrapper to determine if a qun* of this callback type
1041 * is required. This is not a request to cancel.
1043 cbp
->cbp_flags
= SQ_CANCEL_BUFCALL
;
1044 /* check new timeout version return codes */
1045 bid
= bufcall(size
, pri
, qcallbwrapper
, cbp
);
1046 cbp
->cbp_id
= (callbparams_id_t
)bid
;
1048 callbparams_free(sq
, cbp
);
1050 mutex_exit(SQLOCK(sq
));
1051 /* use local id because the params memory could be free by now */
1056 * cancel a timeout callback which enters the inner perimeter.
1057 * cancelling of all callback types on a given syncq is serialized.
1058 * the SQ_CALLB_BYPASSED flag indicates that the callback fn did
1059 * not execute. The quntimeout return value needs to reflect this.
1060 * As with out existing callback programming model - callbacks must
1061 * be cancelled before a close completes - so ensuring that the sq
1062 * is valid when the callback wrapper is executed.
1065 quntimeout(queue_t
*q
, timeout_id_t id
)
1067 syncq_t
*sq
= q
->q_syncq
;
1070 mutex_enter(SQLOCK(sq
));
1071 /* callbacks are processed serially on each syncq */
1072 while (sq
->sq_callbflags
& SQ_CALLB_CANCEL_MASK
) {
1073 sq
->sq_flags
|= SQ_WANTWAKEUP
;
1074 cv_wait(&sq
->sq_wait
, SQLOCK(sq
));
1076 sq
->sq_cancelid
= (callbparams_id_t
)id
;
1077 sq
->sq_callbflags
= SQ_CANCEL_TOUT
;
1078 if (sq
->sq_flags
& SQ_WANTWAKEUP
) {
1079 cv_broadcast(&sq
->sq_wait
);
1080 sq
->sq_flags
&= ~SQ_WANTWAKEUP
;
1082 mutex_exit(SQLOCK(sq
));
1083 ret
= untimeout(id
);
1084 mutex_enter(SQLOCK(sq
));
1086 /* The wrapper was never called - need to free based on id */
1087 callbparams_free_id(sq
, (callbparams_id_t
)id
, SQ_CANCEL_TOUT
);
1089 if (sq
->sq_callbflags
& SQ_CALLB_BYPASSED
) {
1090 ret
= 0; /* this was how much time left */
1092 sq
->sq_callbflags
= 0;
1093 if (sq
->sq_flags
& SQ_WANTWAKEUP
) {
1094 cv_broadcast(&sq
->sq_wait
);
1095 sq
->sq_flags
&= ~SQ_WANTWAKEUP
;
1097 mutex_exit(SQLOCK(sq
));
1103 qunbufcall(queue_t
*q
, bufcall_id_t id
)
1105 syncq_t
*sq
= q
->q_syncq
;
1107 mutex_enter(SQLOCK(sq
));
1108 /* callbacks are processed serially on each syncq */
1109 while (sq
->sq_callbflags
& SQ_CALLB_CANCEL_MASK
) {
1110 sq
->sq_flags
|= SQ_WANTWAKEUP
;
1111 cv_wait(&sq
->sq_wait
, SQLOCK(sq
));
1113 sq
->sq_cancelid
= (callbparams_id_t
)id
;
1114 sq
->sq_callbflags
= SQ_CANCEL_BUFCALL
;
1115 if (sq
->sq_flags
& SQ_WANTWAKEUP
) {
1116 cv_broadcast(&sq
->sq_wait
);
1117 sq
->sq_flags
&= ~SQ_WANTWAKEUP
;
1119 mutex_exit(SQLOCK(sq
));
1121 mutex_enter(SQLOCK(sq
));
1123 * No indication from unbufcall if the callback has already run.
1124 * Always attempt to free it.
1126 callbparams_free_id(sq
, (callbparams_id_t
)id
, SQ_CANCEL_BUFCALL
);
1127 sq
->sq_callbflags
= 0;
1128 if (sq
->sq_flags
& SQ_WANTWAKEUP
) {
1129 cv_broadcast(&sq
->sq_wait
);
1130 sq
->sq_flags
&= ~SQ_WANTWAKEUP
;
1132 mutex_exit(SQLOCK(sq
));
1136 * Associate the stream with an instance of the bottom driver. This
1137 * function is called by APIs that establish or modify the hardware
1138 * association (ppa) of an open stream. Two examples of such
1139 * post-open(9E) APIs are the dlpi(7p) DL_ATTACH_REQ message, and the
1140 * ndd(1M) "instance=" ioctl(2). This interface may be called from a
1141 * stream driver's wput procedure and from within syncq perimeters,
1142 * so it can't block.
1144 * The qassociate() "model" is that it should drive attach(9E), yet it
1145 * can't really do that because driving attach(9E) is a blocking
1146 * operation. Instead, the qassociate() implementation has complex
1147 * dependencies on the implementation behavior of other parts of the
1148 * kernel to ensure all appropriate instances (ones that have not been
1149 * made inaccessible by DR) are attached at stream open() time, and
1150 * that they will not autodetach. The code relies on the fact that an
1151 * open() of a stream that ends up using qassociate() always occurs on
1152 * a minor node created with CLONE_DEV. The open() comes through
1153 * clnopen() and since clnopen() calls ddi_hold_installed_driver() we
1154 * attach all instances and mark them DN_NO_AUTODETACH (given
1155 * DN_DRIVER_HELD is maintained correctly).
1157 * Since qassociate() can't really drive attach(9E), there are corner
1158 * cases where the compromise described above leads to qassociate()
1159 * returning failure. This can happen when administrative functions
1160 * that cause detach(9E), such as "update_drv" or "modunload -i", are
1161 * performed on the driver between the time the stream was opened and
1162 * the time its hardware association was established. Although this can
1163 * theoretically be an arbitrary amount of time, in practice the window
1164 * is usually quite small, since applications almost always issue their
1165 * hardware association request immediately after opening the stream,
1166 * and do not typically switch association while open. When these
1167 * corner cases occur, and qassociate() finds the requested instance
1168 * detached, it will return failure. This failure should be propagated
1169 * to the requesting administrative application using the appropriate
1170 * post-open(9E) API error mechanism.
1172 * All qassociate() callers are expected to check for and gracefully handle
1173 * failure return, propagating errors back to the requesting administrative
1177 qassociate(queue_t
*q
, int instance
)
1183 if (instance
== -1) {
1184 ddi_assoc_queue_with_devi(q
, NULL
);
1188 vp
= STREAM(q
)->sd_vnode
;
1189 major
= getmajor(vp
->v_rdev
);
1190 dip
= ddi_hold_devi_by_instance(major
, instance
,
1191 E_DDI_HOLD_DEVI_NOATTACH
);
1195 ddi_assoc_queue_with_devi(q
, dip
);
1196 ddi_release_devi(dip
);
1201 * This routine is the SVR4MP 'replacement' for
1202 * hat_getkpfnum. The only major difference is
1203 * the return value for illegal addresses - since
1204 * sunm_getkpfnum() and srmmu_getkpfnum() both
1205 * return '-1' for bogus mappings, we can (more or
1206 * less) return the value directly.
1209 kvtoppid(caddr_t addr
)
1211 return ((ppid_t
)hat_getpfnum(kas
.a_hat
, addr
));
1215 * This is used to set the timeout value for cv_timed_wait() or
1216 * cv_timedwait_sig().
1219 time_to_wait(clock_t *now
, clock_t time
)
1221 *now
= ddi_get_lbolt() + time
;