4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
28 * Copyright (c) 2016 by Delphix. All rights reserved.
31 #include <sys/types.h>
32 #include <sys/sysmacros.h>
33 #include <sys/param.h>
34 #include <sys/errno.h>
35 #include <sys/signal.h>
40 #include <sys/vnode.h>
42 #include <sys/session.h>
43 #include <sys/stream.h>
44 #include <sys/strsubr.h>
45 #include <sys/stropts.h>
47 #include <sys/systm.h>
48 #include <sys/cpuvar.h>
50 #include <sys/cmn_err.h>
51 #include <sys/priocntl.h>
52 #include <sys/procset.h>
54 #include <sys/bitmap.h>
56 #include <sys/siginfo.h>
57 #include <sys/vtrace.h>
58 #include <sys/callb.h>
59 #include <sys/debug.h>
60 #include <sys/modctl.h>
61 #include <sys/vmsystm.h>
63 #include <sys/atomic.h>
64 #include <sys/suntpi.h>
65 #include <sys/strlog.h>
66 #include <sys/promif.h>
67 #include <sys/project.h>
69 #include <sys/taskq.h>
70 #include <sys/sunddi.h>
71 #include <sys/sunldi_impl.h>
72 #include <sys/strsun.h>
73 #include <sys/isa_defs.h>
74 #include <sys/multidata.h>
75 #include <sys/pattr.h>
76 #include <sys/strft.h>
77 #include <sys/fs/snode.h>
80 #include <sys/sunldi.h>
82 #include <sys/netstack.h>
84 #define O_SAMESTR(q) (((q)->q_next) && \
85 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR)))
89 * The variables and routines in this file are private, belonging
90 * to the STREAMS subsystem. These should not be used by modules
91 * or drivers. Compatibility will not be guaranteed.
95 * Id value used to distinguish between different multiplexor links.
97 static int32_t lnk_id
= 0;
99 #define STREAMS_LOPRI MINCLSYSPRI
100 static pri_t streams_lopri
= STREAMS_LOPRI
;
102 #define STRSTAT(x) (str_statistics.x.value.ui64++)
103 typedef struct str_stat
{
104 kstat_named_t sqenables
;
105 kstat_named_t stenables
;
106 kstat_named_t syncqservice
;
107 kstat_named_t freebs
;
108 kstat_named_t qwr_outer
;
109 kstat_named_t rservice
;
110 kstat_named_t strwaits
;
111 kstat_named_t taskqfails
;
112 kstat_named_t bufcalls
;
113 kstat_named_t qhelps
;
114 kstat_named_t qremoved
;
115 kstat_named_t sqremoved
;
116 kstat_named_t bcwaits
;
117 kstat_named_t sqtoomany
;
120 static str_stat_t str_statistics
= {
121 { "sqenables", KSTAT_DATA_UINT64
},
122 { "stenables", KSTAT_DATA_UINT64
},
123 { "syncqservice", KSTAT_DATA_UINT64
},
124 { "freebs", KSTAT_DATA_UINT64
},
125 { "qwr_outer", KSTAT_DATA_UINT64
},
126 { "rservice", KSTAT_DATA_UINT64
},
127 { "strwaits", KSTAT_DATA_UINT64
},
128 { "taskqfails", KSTAT_DATA_UINT64
},
129 { "bufcalls", KSTAT_DATA_UINT64
},
130 { "qhelps", KSTAT_DATA_UINT64
},
131 { "qremoved", KSTAT_DATA_UINT64
},
132 { "sqremoved", KSTAT_DATA_UINT64
},
133 { "bcwaits", KSTAT_DATA_UINT64
},
134 { "sqtoomany", KSTAT_DATA_UINT64
},
137 static kstat_t
*str_kstat
;
140 * qrunflag was used previously to control background scheduling of queues. It
141 * is not used anymore, but kept here in case some module still wants to access
142 * it via qready() and setqsched macros.
144 char qrunflag
; /* Unused */
147 * Most of the streams scheduling is done via task queues. Task queues may fail
148 * for non-sleep dispatches, so there are two backup threads servicing failed
149 * requests for queues and syncqs. Both of these threads also service failed
150 * dispatches freebs requests. Queues are put in the list specified by `qhead'
151 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs
152 * requests are put into `freebs_list' which has no tail pointer. All three
153 * lists are protected by a single `service_queue' lock and use
154 * `services_to_run' condition variable for signaling background threads. Use of
155 * a single lock should not be a problem because it is only used under heavy
156 * loads when task queues start to fail and at that time it may be a good idea
157 * to throttle scheduling requests.
159 * NOTE: queues and syncqs should be scheduled by two separate threads because
160 * queue servicing may be blocked waiting for a syncq which may be also
161 * scheduled for background execution. This may create a deadlock when only one
162 * thread is used for both.
165 static taskq_t
*streams_taskq
; /* Used for most STREAMS scheduling */
167 static kmutex_t service_queue
; /* protects all of servicing vars */
168 static kcondvar_t services_to_run
; /* wake up background service thread */
169 static kcondvar_t syncqs_to_run
; /* wake up background service thread */
172 * List of queues scheduled for background processing due to lack of resources
173 * in the task queues. Protected by service_queue lock;
175 static struct queue
*qhead
;
176 static struct queue
*qtail
;
179 * Same list for syncqs
181 static syncq_t
*sqhead
;
182 static syncq_t
*sqtail
;
184 static mblk_t
*freebs_list
; /* list of buffers to free */
187 * Backup threads for servicing queues and syncqs
189 kthread_t
*streams_qbkgrnd_thread
;
190 kthread_t
*streams_sqbkgrnd_thread
;
193 * Bufcalls related variables.
195 struct bclist strbcalls
; /* list of waiting bufcalls */
196 kmutex_t strbcall_lock
; /* protects bufcall list (strbcalls) */
197 kcondvar_t strbcall_cv
; /* Signaling when a bufcall is added */
198 kmutex_t bcall_monitor
; /* sleep/wakeup style monitor */
199 kcondvar_t bcall_cv
; /* wait 'till executing bufcall completes */
200 kthread_t
*bc_bkgrnd_thread
; /* Thread to service bufcall requests */
202 kmutex_t strresources
; /* protects global resources */
203 kmutex_t muxifier
; /* single-threads multiplexor creation */
205 static void *str_stack_init(netstackid_t stackid
, netstack_t
*ns
);
206 static void str_stack_shutdown(netstackid_t stackid
, void *arg
);
207 static void str_stack_fini(netstackid_t stackid
, void *arg
);
210 * run_queues is no longer used, but is kept in case some 3rd party
211 * module/driver decides to use it.
216 * sq_max_size is the depth of the syncq (in number of messages) before
217 * qfill_syncq() starts QFULL'ing destination queues. As its primary
218 * consumer - IP is no longer D_MTPERMOD, but there may be other
219 * modules/drivers depend on this syncq flow control, we prefer to
220 * choose a large number as the default value. For potential
221 * performance gain, this value is tunable in /etc/system.
223 int sq_max_size
= 10000;
226 * The number of ciputctrl structures per syncq and stream we create when
230 int max_n_ciputctrl
= 16;
232 * If n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache.
234 int min_n_ciputctrl
= 2;
237 * Per-driver/module syncqs
238 * ========================
240 * For drivers/modules that use PERMOD or outer syncqs we keep a list of
241 * perdm structures, new entries being added (and new syncqs allocated) when
242 * setq() encounters a module/driver with a streamtab that it hasn't seen
244 * The reason for this mechanism is that some modules and drivers share a
245 * common streamtab and it is necessary for those modules and drivers to also
246 * share a common PERMOD syncq.
248 * perdm_list --> dm_str == streamtab_1
251 * dm_next --> dm_str == streamtab_2
254 * dm_next --> ... NULL
256 * The dm_ref field is incremented for each new driver/module that takes
257 * a reference to the perdm structure and hence shares the syncq.
258 * References are held in the fmodsw_impl_t structure for each STREAMS module
259 * or the dev_impl array (indexed by device major number) for each driver.
261 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL
263 * | ______________/ | |
265 * dev_impl: ...|x|y|... module A module B
267 * When a module/driver is unloaded the reference count is decremented and,
268 * when it falls to zero, the perdm structure is removed from the list and
269 * the syncq is freed (see rele_dm()).
271 perdm_t
*perdm_list
= NULL
;
272 static krwlock_t perdm_rwlock
;
273 cdevsw_impl_t
*devimpl
;
275 extern struct qinit strdata
;
276 extern struct qinit stwdata
;
278 static void runservice(queue_t
*);
279 static void streams_bufcall_service(void);
280 static void streams_qbkgrnd_service(void);
281 static void streams_sqbkgrnd_service(void);
282 static syncq_t
*new_syncq(void);
283 static void free_syncq(syncq_t
*);
284 static void outer_insert(syncq_t
*, syncq_t
*);
285 static void outer_remove(syncq_t
*, syncq_t
*);
286 static void write_now(syncq_t
*);
287 static void clr_qfull(queue_t
*);
288 static void runbufcalls(void);
289 static void sqenable(syncq_t
*);
290 static void sqfill_events(syncq_t
*, queue_t
*, mblk_t
*, void (*)());
291 static void wait_q_syncq(queue_t
*);
292 static void backenable_insertedq(queue_t
*);
294 static void queue_service(queue_t
*);
295 static void stream_service(stdata_t
*);
296 static void syncq_service(syncq_t
*);
297 static void qwriter_outer_service(syncq_t
*);
298 static void mblk_free(mblk_t
*);
300 static int qprocsareon(queue_t
*);
303 static void set_nfsrv_ptr(queue_t
*, queue_t
*, queue_t
*, queue_t
*);
304 static void reset_nfsrv_ptr(queue_t
*, queue_t
*);
305 void set_qfull(queue_t
*);
307 static void sq_run_events(syncq_t
*);
308 static int propagate_syncq(queue_t
*);
310 static void blocksq(syncq_t
*, ushort_t
, int);
311 static void unblocksq(syncq_t
*, ushort_t
, int);
312 static int dropsq(syncq_t
*, uint16_t);
313 static void emptysq(syncq_t
*);
314 static sqlist_t
*sqlist_alloc(struct stdata
*, int);
315 static void sqlist_free(sqlist_t
*);
316 static sqlist_t
*sqlist_build(queue_t
*, struct stdata
*, boolean_t
);
317 static void sqlist_insert(sqlist_t
*, syncq_t
*);
318 static void sqlist_insertall(sqlist_t
*, queue_t
*);
320 static void strsetuio(stdata_t
*);
322 struct kmem_cache
*stream_head_cache
;
323 struct kmem_cache
*queue_cache
;
324 struct kmem_cache
*syncq_cache
;
325 struct kmem_cache
*qband_cache
;
326 struct kmem_cache
*linkinfo_cache
;
327 struct kmem_cache
*ciputctrl_cache
= NULL
;
329 static linkinfo_t
*linkinfo_list
;
331 /* Global esballoc throttling queue */
332 static esb_queue_t system_esbq
;
334 /* Array of esballoc throttling queues, of length esbq_nelem */
335 static esb_queue_t
*volatile system_esbq_array
;
336 static int esbq_nelem
;
337 static kmutex_t esbq_lock
;
338 static int esbq_log2_cpus_per_q
= 0;
340 /* Scale the system_esbq length by setting number of CPUs per queue. */
341 uint_t esbq_cpus_per_q
= 1;
344 * esballoc tunable parameters.
346 int esbq_max_qlen
= 0x16; /* throttled queue length */
347 clock_t esbq_timeout
= 0x8; /* timeout to process esb queue */
350 * Routines to handle esballoc queueing.
352 static void esballoc_process_queue(esb_queue_t
*);
353 static void esballoc_enqueue_mblk(mblk_t
*);
354 static void esballoc_timer(void *);
355 static void esballoc_set_timer(esb_queue_t
*, clock_t);
356 static void esballoc_mblk_free(mblk_t
*);
359 * Qinit structure and Module_info structures
360 * for passthru read and write queues
363 static void pass_wput(queue_t
*, mblk_t
*);
364 static queue_t
*link_addpassthru(stdata_t
*);
365 static void link_rempassthru(queue_t
*);
367 struct module_info passthru_info
= {
376 struct qinit passthru_rinit
= {
386 struct qinit passthru_winit
= {
387 (int (*)()) pass_wput
,
397 * Verify correctness of list head/tail pointers.
399 #define LISTCHECK(head, tail, link) { \
401 IMPLY(tail != NULL, tail->link == NULL); \
405 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail'
406 * using a `link' field.
408 #define ENQUEUE(el, head, tail, link) { \
409 ASSERT(el->link == NULL); \
410 LISTCHECK(head, tail, link); \
419 * Dequeue the first element of the list denoted by `head' and `tail' pointers
420 * using a `link' field and put result into `el'.
422 #define DQ(el, head, tail, link) { \
423 LISTCHECK(head, tail, link); \
425 if (head != NULL) { \
434 * Remove `el' from the list using `chase' and `curr' pointers and return result
437 #define RMQ(el, head, tail, link, chase, curr, succeed) { \
438 LISTCHECK(head, tail, link); \
441 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \
443 if (curr != NULL) { \
445 ASSERT(curr == el); \
447 chase->link = curr->link; \
454 LISTCHECK(head, tail, link); \
457 /* Handling of delayed messages on the inner syncq. */
460 * DEBUG versions should use function versions (to simplify tracing) and
461 * non-DEBUG kernels should use macro versions.
465 * Put a queue on the syncq list of queues.
466 * Assumes SQLOCK held.
468 #define SQPUT_Q(sq, qp) \
470 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
471 if (!(qp->q_sqflags & Q_SQQUEUED)) { \
472 /* The queue should not be linked anywhere */ \
473 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \
474 /* Head and tail may only be NULL simultaneously */ \
475 EQUIV(sq->sq_head, sq->sq_tail); \
476 /* Queue may be only enqueued on its syncq */ \
477 ASSERT(sq == qp->q_syncq); \
478 /* Check the correctness of SQ_MESSAGES flag */ \
479 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \
480 /* Sanity check first/last elements of the list */ \
481 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\
482 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\
484 * Sanity check of priority field: empty queue should \
485 * have zero priority \
486 * and nqueues equal to zero. \
488 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \
489 /* Sanity check of sq_nqueues field */ \
490 EQUIV(sq->sq_head, sq->sq_nqueues); \
491 if (sq->sq_head == NULL) { \
492 sq->sq_head = sq->sq_tail = qp; \
493 sq->sq_flags |= SQ_MESSAGES; \
494 } else if (qp->q_spri == 0) { \
495 qp->q_sqprev = sq->sq_tail; \
496 sq->sq_tail->q_sqnext = qp; \
500 * Put this queue in priority order: higher \
501 * priority gets closer to the head. \
503 queue_t **qpp = &sq->sq_tail; \
504 queue_t *qnext = NULL; \
506 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \
508 qpp = &(*qpp)->q_sqprev; \
510 qp->q_sqnext = qnext; \
511 qp->q_sqprev = *qpp; \
512 if (*qpp != NULL) { \
513 (*qpp)->q_sqnext = qp; \
516 sq->sq_pri = sq->sq_head->q_spri; \
520 qp->q_sqflags |= Q_SQQUEUED; \
521 qp->q_sqtstamp = ddi_get_lbolt(); \
527 * Remove a queue from the syncq list
528 * Assumes SQLOCK held.
530 #define SQRM_Q(sq, qp) \
532 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
533 ASSERT(qp->q_sqflags & Q_SQQUEUED); \
534 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \
535 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \
536 /* Check that the queue is actually in the list */ \
537 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \
538 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \
539 ASSERT(sq->sq_nqueues != 0); \
540 if (qp->q_sqprev == NULL) { \
541 /* First queue on list, make head q_sqnext */ \
542 sq->sq_head = qp->q_sqnext; \
544 /* Make prev->next == next */ \
545 qp->q_sqprev->q_sqnext = qp->q_sqnext; \
547 if (qp->q_sqnext == NULL) { \
548 /* Last queue on list, make tail sqprev */ \
549 sq->sq_tail = qp->q_sqprev; \
551 /* Make next->prev == prev */ \
552 qp->q_sqnext->q_sqprev = qp->q_sqprev; \
554 /* clear out references on this queue */ \
555 qp->q_sqprev = qp->q_sqnext = NULL; \
556 qp->q_sqflags &= ~Q_SQQUEUED; \
557 /* If there is nothing queued, clear SQ_MESSAGES */ \
558 if (sq->sq_head != NULL) { \
559 sq->sq_pri = sq->sq_head->q_spri; \
561 sq->sq_flags &= ~SQ_MESSAGES; \
565 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \
566 (sq->sq_flags & SQ_QUEUED) == 0); \
569 /* Hide the definition from the header file. */
575 * Put a message on the queue syncq.
576 * Assumes QLOCK held.
578 #define SQPUT_MP(qp, mp) \
580 ASSERT(MUTEX_HELD(QLOCK(qp))); \
581 ASSERT(qp->q_sqhead == NULL || \
582 (qp->q_sqtail != NULL && \
583 qp->q_sqtail->b_next == NULL)); \
585 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \
586 if (qp->q_sqhead == NULL) { \
587 qp->q_sqhead = qp->q_sqtail = mp; \
589 qp->q_sqtail->b_next = mp; \
592 ASSERT(qp->q_syncqmsgs > 0); \
596 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \
597 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
598 if ((sq)->sq_ciputctrl != NULL) { \
600 int nlocks = (sq)->sq_nciputctrl; \
601 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
602 ASSERT((sq)->sq_type & SQ_CIPUT); \
603 for (i = 0; i <= nlocks; i++) { \
604 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
605 cip[i].ciputctrl_count |= SQ_FASTPUT; \
611 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \
612 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
613 if ((sq)->sq_ciputctrl != NULL) { \
615 int nlocks = (sq)->sq_nciputctrl; \
616 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
617 ASSERT((sq)->sq_type & SQ_CIPUT); \
618 for (i = 0; i <= nlocks; i++) { \
619 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
620 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \
626 * Run service procedures for all queues in the stream head.
628 #define STR_SERVICE(stp, q) { \
629 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \
630 while (stp->sd_qhead != NULL) { \
631 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \
632 ASSERT(stp->sd_nqueues > 0); \
634 ASSERT(!(q->q_flag & QINSERVICE)); \
635 mutex_exit(&stp->sd_qlock); \
637 mutex_enter(&stp->sd_qlock); \
639 ASSERT(stp->sd_nqueues == 0); \
640 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \
644 * Constructor/destructor routines for the stream head cache
648 stream_head_constructor(void *buf
, void *cdrarg
, int kmflags
)
652 mutex_init(&stp
->sd_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
653 mutex_init(&stp
->sd_reflock
, NULL
, MUTEX_DEFAULT
, NULL
);
654 mutex_init(&stp
->sd_qlock
, NULL
, MUTEX_DEFAULT
, NULL
);
655 cv_init(&stp
->sd_monitor
, NULL
, CV_DEFAULT
, NULL
);
656 cv_init(&stp
->sd_iocmonitor
, NULL
, CV_DEFAULT
, NULL
);
657 cv_init(&stp
->sd_refmonitor
, NULL
, CV_DEFAULT
, NULL
);
658 cv_init(&stp
->sd_qcv
, NULL
, CV_DEFAULT
, NULL
);
659 cv_init(&stp
->sd_zcopy_wait
, NULL
, CV_DEFAULT
, NULL
);
667 stream_head_destructor(void *buf
, void *cdrarg
)
671 mutex_destroy(&stp
->sd_lock
);
672 mutex_destroy(&stp
->sd_reflock
);
673 mutex_destroy(&stp
->sd_qlock
);
674 cv_destroy(&stp
->sd_monitor
);
675 cv_destroy(&stp
->sd_iocmonitor
);
676 cv_destroy(&stp
->sd_refmonitor
);
677 cv_destroy(&stp
->sd_qcv
);
678 cv_destroy(&stp
->sd_zcopy_wait
);
682 * Constructor/destructor routines for the queue cache
686 queue_constructor(void *buf
, void *cdrarg
, int kmflags
)
688 queinfo_t
*qip
= buf
;
689 queue_t
*qp
= &qip
->qu_rqueue
;
690 queue_t
*wqp
= &qip
->qu_wqueue
;
691 syncq_t
*sq
= &qip
->qu_syncq
;
705 mutex_init(QLOCK(qp
), NULL
, MUTEX_DEFAULT
, NULL
);
706 cv_init(&qp
->q_wait
, NULL
, CV_DEFAULT
, NULL
);
712 wqp
->q_sqhead
= NULL
;
713 wqp
->q_sqtail
= NULL
;
714 wqp
->q_sqnext
= NULL
;
715 wqp
->q_sqprev
= NULL
;
720 mutex_init(QLOCK(wqp
), NULL
, MUTEX_DEFAULT
, NULL
);
721 cv_init(&wqp
->q_wait
, NULL
, CV_DEFAULT
, NULL
);
725 sq
->sq_evhead
= NULL
;
726 sq
->sq_evtail
= NULL
;
727 sq
->sq_callbpend
= NULL
;
733 sq
->sq_servcount
= 0;
738 mutex_init(&sq
->sq_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
739 cv_init(&sq
->sq_wait
, NULL
, CV_DEFAULT
, NULL
);
740 cv_init(&sq
->sq_exitwait
, NULL
, CV_DEFAULT
, NULL
);
747 queue_destructor(void *buf
, void *cdrarg
)
749 queinfo_t
*qip
= buf
;
750 queue_t
*qp
= &qip
->qu_rqueue
;
751 queue_t
*wqp
= &qip
->qu_wqueue
;
752 syncq_t
*sq
= &qip
->qu_syncq
;
754 ASSERT(qp
->q_sqhead
== NULL
);
755 ASSERT(wqp
->q_sqhead
== NULL
);
756 ASSERT(qp
->q_sqnext
== NULL
);
757 ASSERT(wqp
->q_sqnext
== NULL
);
758 ASSERT(qp
->q_rwcnt
== 0);
759 ASSERT(wqp
->q_rwcnt
== 0);
761 mutex_destroy(&qp
->q_lock
);
762 cv_destroy(&qp
->q_wait
);
764 mutex_destroy(&wqp
->q_lock
);
765 cv_destroy(&wqp
->q_wait
);
767 mutex_destroy(&sq
->sq_lock
);
768 cv_destroy(&sq
->sq_wait
);
769 cv_destroy(&sq
->sq_exitwait
);
773 * Constructor/destructor routines for the syncq cache
777 syncq_constructor(void *buf
, void *cdrarg
, int kmflags
)
781 bzero(buf
, sizeof (syncq_t
));
783 mutex_init(&sq
->sq_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
784 cv_init(&sq
->sq_wait
, NULL
, CV_DEFAULT
, NULL
);
785 cv_init(&sq
->sq_exitwait
, NULL
, CV_DEFAULT
, NULL
);
792 syncq_destructor(void *buf
, void *cdrarg
)
796 ASSERT(sq
->sq_head
== NULL
);
797 ASSERT(sq
->sq_tail
== NULL
);
798 ASSERT(sq
->sq_evhead
== NULL
);
799 ASSERT(sq
->sq_evtail
== NULL
);
800 ASSERT(sq
->sq_callbpend
== NULL
);
801 ASSERT(sq
->sq_callbflags
== 0);
802 ASSERT(sq
->sq_outer
== NULL
);
803 ASSERT(sq
->sq_onext
== NULL
);
804 ASSERT(sq
->sq_oprev
== NULL
);
805 ASSERT(sq
->sq_next
== NULL
);
806 ASSERT(sq
->sq_needexcl
== 0);
807 ASSERT(sq
->sq_svcflags
== 0);
808 ASSERT(sq
->sq_servcount
== 0);
809 ASSERT(sq
->sq_nqueues
== 0);
810 ASSERT(sq
->sq_pri
== 0);
811 ASSERT(sq
->sq_count
== 0);
812 ASSERT(sq
->sq_rmqcount
== 0);
813 ASSERT(sq
->sq_cancelid
== 0);
814 ASSERT(sq
->sq_ciputctrl
== NULL
);
815 ASSERT(sq
->sq_nciputctrl
== 0);
816 ASSERT(sq
->sq_type
== 0);
817 ASSERT(sq
->sq_flags
== 0);
819 mutex_destroy(&sq
->sq_lock
);
820 cv_destroy(&sq
->sq_wait
);
821 cv_destroy(&sq
->sq_exitwait
);
826 ciputctrl_constructor(void *buf
, void *cdrarg
, int kmflags
)
828 ciputctrl_t
*cip
= buf
;
831 for (i
= 0; i
< n_ciputctrl
; i
++) {
832 cip
[i
].ciputctrl_count
= SQ_FASTPUT
;
833 mutex_init(&cip
[i
].ciputctrl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
841 ciputctrl_destructor(void *buf
, void *cdrarg
)
843 ciputctrl_t
*cip
= buf
;
846 for (i
= 0; i
< n_ciputctrl
; i
++) {
847 ASSERT(cip
[i
].ciputctrl_count
& SQ_FASTPUT
);
848 mutex_destroy(&cip
[i
].ciputctrl_lock
);
853 * Init routine run from main at boot time.
858 int ncpus
= ((boot_max_ncpus
== -1) ? max_ncpus
: boot_max_ncpus
);
860 stream_head_cache
= kmem_cache_create("stream_head_cache",
861 sizeof (stdata_t
), 0,
862 stream_head_constructor
, stream_head_destructor
, NULL
,
865 queue_cache
= kmem_cache_create("queue_cache", sizeof (queinfo_t
), 0,
866 queue_constructor
, queue_destructor
, NULL
, NULL
, NULL
, 0);
868 syncq_cache
= kmem_cache_create("syncq_cache", sizeof (syncq_t
), 0,
869 syncq_constructor
, syncq_destructor
, NULL
, NULL
, NULL
, 0);
871 qband_cache
= kmem_cache_create("qband_cache",
872 sizeof (qband_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
874 linkinfo_cache
= kmem_cache_create("linkinfo_cache",
875 sizeof (linkinfo_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
878 n_ciputctrl
= 1 << highbit(n_ciputctrl
- 1);
879 ASSERT(n_ciputctrl
>= 1);
880 n_ciputctrl
= MIN(n_ciputctrl
, max_n_ciputctrl
);
881 if (n_ciputctrl
>= min_n_ciputctrl
) {
882 ciputctrl_cache
= kmem_cache_create("ciputctrl_cache",
883 sizeof (ciputctrl_t
) * n_ciputctrl
,
884 sizeof (ciputctrl_t
), ciputctrl_constructor
,
885 ciputctrl_destructor
, NULL
, NULL
, NULL
, 0);
888 streams_taskq
= system_taskq
;
890 if (streams_taskq
== NULL
)
891 panic("strinit: no memory for streams taskq!");
893 bc_bkgrnd_thread
= thread_create(NULL
, 0,
894 streams_bufcall_service
, NULL
, 0, &p0
, TS_RUN
, streams_lopri
);
896 streams_qbkgrnd_thread
= thread_create(NULL
, 0,
897 streams_qbkgrnd_service
, NULL
, 0, &p0
, TS_RUN
, streams_lopri
);
899 streams_sqbkgrnd_thread
= thread_create(NULL
, 0,
900 streams_sqbkgrnd_service
, NULL
, 0, &p0
, TS_RUN
, streams_lopri
);
903 * Create STREAMS kstats.
905 str_kstat
= kstat_create("streams", 0, "strstat",
906 "net", KSTAT_TYPE_NAMED
,
907 sizeof (str_statistics
) / sizeof (kstat_named_t
),
910 if (str_kstat
!= NULL
) {
911 str_kstat
->ks_data
= &str_statistics
;
912 kstat_install(str_kstat
);
916 * TPI support routine initialisation.
921 * Handle to have autopush and persistent link information per
923 * Note: uses shutdown hook instead of destroy hook so that the
924 * persistent links can be torn down before the destroy hooks
925 * in the TCP/IP stack are called.
927 netstack_register(NS_STR
, str_stack_init
, str_stack_shutdown
,
932 str_sendsig(vnode_t
*vp
, int event
, uchar_t band
, int error
)
936 ASSERT(vp
->v_stream
);
938 /* Have to hold sd_lock to prevent siglist from changing */
939 mutex_enter(&stp
->sd_lock
);
940 if (stp
->sd_sigflags
& event
)
941 strsendsig(stp
->sd_siglist
, event
, band
, error
);
942 mutex_exit(&stp
->sd_lock
);
946 * Send the "sevent" set of signals to a process.
947 * This might send more than one signal if the process is registered
948 * for multiple events. The caller should pass in an sevent that only
949 * includes the events for which the process has registered.
952 dosendsig(proc_t
*proc
, int events
, int sevent
, k_siginfo_t
*info
,
953 uchar_t band
, int error
)
955 ASSERT(MUTEX_HELD(&proc
->p_lock
));
960 if (sevent
& S_ERROR
) {
962 info
->si_code
= POLL_ERR
;
963 info
->si_errno
= error
;
964 TRACE_2(TR_FAC_STREAMS_FR
, TR_STRSENDSIG
,
965 "strsendsig:proc %p info %p", proc
, info
);
966 sigaddq(proc
, NULL
, info
, KM_NOSLEEP
);
969 if (sevent
& S_HANGUP
) {
971 info
->si_code
= POLL_HUP
;
972 TRACE_2(TR_FAC_STREAMS_FR
, TR_STRSENDSIG
,
973 "strsendsig:proc %p info %p", proc
, info
);
974 sigaddq(proc
, NULL
, info
, KM_NOSLEEP
);
976 if (sevent
& S_HIPRI
) {
978 info
->si_code
= POLL_PRI
;
979 TRACE_2(TR_FAC_STREAMS_FR
, TR_STRSENDSIG
,
980 "strsendsig:proc %p info %p", proc
, info
);
981 sigaddq(proc
, NULL
, info
, KM_NOSLEEP
);
983 if (sevent
& S_RDBAND
) {
985 if (events
& S_BANDURG
)
986 sigtoproc(proc
, NULL
, SIGURG
);
988 sigtoproc(proc
, NULL
, SIGPOLL
);
990 if (sevent
& S_WRBAND
) {
992 sigtoproc(proc
, NULL
, SIGPOLL
);
994 if (sevent
& S_INPUT
) {
996 info
->si_code
= POLL_IN
;
997 info
->si_band
= band
;
998 TRACE_2(TR_FAC_STREAMS_FR
, TR_STRSENDSIG
,
999 "strsendsig:proc %p info %p", proc
, info
);
1000 sigaddq(proc
, NULL
, info
, KM_NOSLEEP
);
1003 if (sevent
& S_OUTPUT
) {
1004 sevent
&= ~S_OUTPUT
;
1005 info
->si_code
= POLL_OUT
;
1006 info
->si_band
= band
;
1007 TRACE_2(TR_FAC_STREAMS_FR
, TR_STRSENDSIG
,
1008 "strsendsig:proc %p info %p", proc
, info
);
1009 sigaddq(proc
, NULL
, info
, KM_NOSLEEP
);
1012 if (sevent
& S_MSG
) {
1014 info
->si_code
= POLL_MSG
;
1015 info
->si_band
= band
;
1016 TRACE_2(TR_FAC_STREAMS_FR
, TR_STRSENDSIG
,
1017 "strsendsig:proc %p info %p", proc
, info
);
1018 sigaddq(proc
, NULL
, info
, KM_NOSLEEP
);
1021 if (sevent
& S_RDNORM
) {
1022 sevent
&= ~S_RDNORM
;
1023 sigtoproc(proc
, NULL
, SIGPOLL
);
1026 panic("strsendsig: unknown event(s) %x", sevent
);
1031 * Send SIGPOLL/SIGURG signal to all processes and process groups
1032 * registered on the given signal list that want a signal for at
1033 * least one of the specified events.
1035 * Must be called with exclusive access to siglist (caller holding sd_lock).
1037 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding
1038 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure
1039 * while it is in the siglist.
1041 * For performance reasons (MP scalability) the code drops pidlock
1042 * when sending signals to a single process.
1043 * When sending to a process group the code holds
1044 * pidlock to prevent the membership in the process group from changing
1045 * while walking the p_pglink list.
1048 strsendsig(strsig_t
*siglist
, int event
, uchar_t band
, int error
)
1055 info
.si_signo
= SIGPOLL
;
1057 for (ssp
= siglist
; ssp
; ssp
= ssp
->ss_next
) {
1060 sevent
= ssp
->ss_events
& event
;
1064 if ((pidp
= ssp
->ss_pidp
) == NULL
) {
1065 /* pid was released but still on event list */
1070 if (ssp
->ss_pid
> 0) {
1072 * XXX This unfortunately still generates
1073 * a signal when a fd is closed but
1074 * the proc is active.
1076 ASSERT(ssp
->ss_pid
== pidp
->pid_id
);
1078 mutex_enter(&pidlock
);
1079 proc
= prfind_zone(pidp
->pid_id
, ALL_ZONES
);
1081 mutex_exit(&pidlock
);
1084 mutex_enter(&proc
->p_lock
);
1085 mutex_exit(&pidlock
);
1086 dosendsig(proc
, ssp
->ss_events
, sevent
, &info
,
1088 mutex_exit(&proc
->p_lock
);
1091 * Send to process group. Hold pidlock across
1092 * calls to dosendsig().
1094 pid_t pgrp
= -ssp
->ss_pid
;
1096 mutex_enter(&pidlock
);
1097 proc
= pgfind_zone(pgrp
, ALL_ZONES
);
1098 while (proc
!= NULL
) {
1099 mutex_enter(&proc
->p_lock
);
1100 dosendsig(proc
, ssp
->ss_events
, sevent
,
1101 &info
, band
, error
);
1102 mutex_exit(&proc
->p_lock
);
1103 proc
= proc
->p_pglink
;
1105 mutex_exit(&pidlock
);
1111 * Attach a stream device or module.
1112 * qp is a read queue; the new queue goes in so its next
1113 * read ptr is the argument, and the write queue corresponding
1114 * to the argument points to this queue. Return 0 on success,
1115 * or a non-zero errno on failure.
1118 qattach(queue_t
*qp
, dev_t
*devp
, int oflag
, cred_t
*crp
, fmodsw_impl_t
*fp
,
1119 boolean_t is_insert
)
1123 struct streamtab
*str
;
1134 STREAM(rq
) = STREAM(wrq
) = STREAM(qp
);
1138 qflag
= fp
->f_qflag
;
1139 sqtype
= fp
->f_sqtype
;
1141 IMPLY((qflag
& (QPERMOD
| QMTOUTPERIM
)), dmp
!= NULL
);
1145 * stash away a pointer to the module structure so we can
1146 * unref it in qdetach.
1152 major
= getmajor(*devp
);
1153 dp
= &devimpl
[major
];
1156 ASSERT(str
== STREAMSTAB(major
));
1158 qflag
= dp
->d_qflag
;
1159 ASSERT(qflag
& QISDRV
);
1160 sqtype
= dp
->d_sqtype
;
1162 /* create perdm_t if needed */
1163 if (NEED_DM(dp
->d_dmp
, qflag
))
1164 dp
->d_dmp
= hold_dm(str
, qflag
, sqtype
);
1170 TRACE_2(TR_FAC_STREAMS_FR
, TR_QATTACH_FLAGS
,
1171 "qattach:qflag == %X(%X)", qflag
, *devp
);
1173 /* setq might sleep in allocator - avoid holding locks. */
1174 setq(rq
, str
->st_rdinit
, str
->st_wrinit
, dmp
, qflag
, sqtype
, B_FALSE
);
1177 * Before calling the module's open routine, set up the q_next
1178 * pointer for inserting a module in the middle of a stream.
1180 * Note that we can always set _QINSERTING and set up q_next
1181 * pointer for both inserting and pushing a module. Then there
1182 * is no need for the is_insert parameter. In insertq(), called
1183 * by qprocson(), assume that q_next of the new module always points
1184 * to the correct queue and use it for insertion. Everything should
1185 * work out fine. But in the first release of _I_INSERT, we
1186 * distinguish between inserting and pushing to make sure that
1187 * pushing a module follows the same code path as before.
1190 rq
->q_flag
|= _QINSERTING
;
1195 * If there is an outer perimeter get exclusive access during
1196 * the open procedure. Bump up the reference count on the queue.
1198 entersq(rq
->q_syncq
, SQ_OPENCLOSE
);
1199 error
= (*rq
->q_qinfo
->qi_qopen
)(rq
, devp
, oflag
, sflag
, crp
);
1202 leavesq(rq
->q_syncq
, SQ_OPENCLOSE
);
1203 ASSERT(qprocsareon(rq
));
1207 rq
->q_flag
&= ~_QINSERTING
;
1208 if (backq(wrq
) != NULL
&& backq(wrq
)->q_next
== wrq
)
1210 leavesq(rq
->q_syncq
, SQ_OPENCLOSE
);
1211 rq
->q_next
= wrq
->q_next
= NULL
;
1212 qdetach(rq
, 0, 0, crp
, B_FALSE
);
1217 * Handle second open of stream. For modules, set the
1218 * last argument to MODOPEN and do not pass any open flags.
1219 * Ignore dummydev since this is not the first open.
1222 qreopen(queue_t
*qp
, dev_t
*devp
, int flag
, cred_t
*crp
)
1226 queue_t
*wqp
= _WR(qp
);
1228 ASSERT(qp
->q_flag
& QREADR
);
1229 entersq(qp
->q_syncq
, SQ_OPENCLOSE
);
1232 if (error
= ((*qp
->q_qinfo
->qi_qopen
)(qp
, &dummydev
,
1233 (wqp
->q_next
? 0 : flag
), (wqp
->q_next
? MODOPEN
: 0), crp
))) {
1234 leavesq(qp
->q_syncq
, SQ_OPENCLOSE
);
1235 mutex_enter(&STREAM(qp
)->sd_lock
);
1236 qp
->q_stream
->sd_flag
|= STREOPENFAIL
;
1237 mutex_exit(&STREAM(qp
)->sd_lock
);
1240 leavesq(qp
->q_syncq
, SQ_OPENCLOSE
);
1243 * successful open should have done qprocson()
1245 ASSERT(qprocsareon(_RD(qp
)));
1250 * Detach a stream module or device.
1251 * If clmode == 1 then the module or driver was opened and its
1252 * close routine must be called. If clmode == 0, the module
1253 * or driver was never opened or the open failed, and so its close
1254 * should not be called.
1257 qdetach(queue_t
*qp
, int clmode
, int flag
, cred_t
*crp
, boolean_t is_remove
)
1259 queue_t
*wqp
= _WR(qp
);
1260 ASSERT(STREAM(qp
)->sd_flag
& (STRCLOSE
|STWOPEN
|STRPLUMB
));
1262 if (STREAM_NEEDSERVICE(STREAM(qp
)))
1263 stream_runservice(STREAM(qp
));
1267 * Make sure that all the messages on the write side syncq are
1268 * processed and nothing is left. Since we are closing, no new
1269 * messages may appear there.
1273 entersq(qp
->q_syncq
, SQ_OPENCLOSE
);
1275 mutex_enter(QLOCK(qp
));
1276 qp
->q_flag
|= _QREMOVING
;
1277 mutex_exit(QLOCK(qp
));
1279 (*qp
->q_qinfo
->qi_qclose
)(qp
, flag
, crp
);
1281 * Check that qprocsoff() was actually called.
1283 ASSERT((qp
->q_flag
& QWCLOSE
) && (wqp
->q_flag
& QWCLOSE
));
1285 leavesq(qp
->q_syncq
, SQ_OPENCLOSE
);
1291 * Allow any threads blocked in entersq to proceed and discover
1292 * the QWCLOSE is set.
1293 * Note: This assumes that all users of entersq check QWCLOSE.
1294 * Currently runservice is the only entersq that can happen
1295 * after removeq has finished.
1296 * Removeq will have discarded all messages destined to the closing
1297 * pair of queues from the syncq.
1298 * NOTE: Calling a function inside an assert is unconventional.
1299 * However, it does not cause any problem since flush_syncq() does
1300 * not change any state except when it returns non-zero i.e.
1301 * when the assert will trigger.
1303 ASSERT(flush_syncq(qp
->q_syncq
, qp
) == 0);
1304 ASSERT(flush_syncq(wqp
->q_syncq
, wqp
) == 0);
1305 ASSERT((qp
->q_flag
& QPERMOD
) ||
1306 ((qp
->q_syncq
->sq_head
== NULL
) &&
1307 (wqp
->q_syncq
->sq_head
== NULL
)));
1309 /* release any fmodsw_impl_t structure held on behalf of the queue */
1310 ASSERT(qp
->q_fp
!= NULL
|| qp
->q_flag
& QISDRV
);
1311 if (qp
->q_fp
!= NULL
)
1312 fmodsw_rele(qp
->q_fp
);
1314 /* freeq removes us from the outer perimeter if any */
1318 /* Prevent service procedures from being called */
1320 disable_svc(queue_t
*qp
)
1322 queue_t
*wqp
= _WR(qp
);
1324 ASSERT(qp
->q_flag
& QREADR
);
1325 mutex_enter(QLOCK(qp
));
1326 qp
->q_flag
|= QWCLOSE
;
1327 mutex_exit(QLOCK(qp
));
1328 mutex_enter(QLOCK(wqp
));
1329 wqp
->q_flag
|= QWCLOSE
;
1330 mutex_exit(QLOCK(wqp
));
1333 /* Allow service procedures to be called again */
1335 enable_svc(queue_t
*qp
)
1337 queue_t
*wqp
= _WR(qp
);
1339 ASSERT(qp
->q_flag
& QREADR
);
1340 mutex_enter(QLOCK(qp
));
1341 qp
->q_flag
&= ~QWCLOSE
;
1342 mutex_exit(QLOCK(qp
));
1343 mutex_enter(QLOCK(wqp
));
1344 wqp
->q_flag
&= ~QWCLOSE
;
1345 mutex_exit(QLOCK(wqp
));
1349 * Remove queue from qhead/qtail if it is enabled.
1350 * Only reset QENAB if the queue was removed from the runlist.
1351 * A queue goes through 3 stages:
1352 * It is on the service list and QENAB is set.
1353 * It is removed from the service list but QENAB is still set.
1354 * QENAB gets changed to QINSERVICE.
1355 * QINSERVICE is reset (when the service procedure is done)
1356 * Thus we can not reset QENAB unless we actually removed it from the service
1360 remove_runlist(queue_t
*qp
)
1362 if (qp
->q_flag
& QENAB
&& qhead
!= NULL
) {
1367 mutex_enter(&service_queue
);
1368 RMQ(qp
, qhead
, qtail
, q_link
, q_chase
, q_curr
, removed
);
1369 mutex_exit(&service_queue
);
1372 qp
->q_flag
&= ~QENAB
;
1379 * Wait for any pending service processing to complete.
1380 * The removal of queues from the runlist is not atomic with the
1381 * clearing of the QENABLED flag and setting the INSERVICE flag.
1382 * consequently it is possible for remove_runlist in strclose
1383 * to not find the queue on the runlist but for it to be QENABLED
1384 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED
1385 * as well as INSERVICE.
1388 wait_svc(queue_t
*qp
)
1390 queue_t
*wqp
= _WR(qp
);
1392 ASSERT(qp
->q_flag
& QREADR
);
1395 * Try to remove queues from qhead/qtail list.
1397 if (qhead
!= NULL
) {
1399 remove_runlist(wqp
);
1402 * Wait till the syncqs associated with the queue disappear from the
1403 * background processing list.
1404 * This only needs to be done for non-PERMOD perimeters since
1405 * for PERMOD perimeters the syncq may be shared and will only be freed
1406 * when the last module/driver is unloaded.
1407 * If for PERMOD perimeters queue was on the syncq list, removeq()
1408 * should call propagate_syncq() or drain_syncq() for it. Both of these
1409 * functions remove the queue from its syncq list, so sqthread will not
1410 * try to access the queue.
1412 if (!(qp
->q_flag
& QPERMOD
)) {
1413 syncq_t
*rsq
= qp
->q_syncq
;
1414 syncq_t
*wsq
= wqp
->q_syncq
;
1417 * Disable rsq and wsq and wait for any background processing of
1418 * syncq to complete.
1425 mutex_enter(QLOCK(qp
));
1426 while (qp
->q_flag
& (QINSERVICE
|QENAB
))
1427 cv_wait(&qp
->q_wait
, QLOCK(qp
));
1428 mutex_exit(QLOCK(qp
));
1429 mutex_enter(QLOCK(wqp
));
1430 while (wqp
->q_flag
& (QINSERVICE
|QENAB
))
1431 cv_wait(&wqp
->q_wait
, QLOCK(wqp
));
1432 mutex_exit(QLOCK(wqp
));
1436 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'.
1437 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may
1438 * also be set, and is passed through to allocb_cred_wait().
1440 * Returns errno on failure, zero on success.
1443 putiocd(mblk_t
*bp
, char *arg
, int flag
, cred_t
*cr
)
1449 ASSERT((flag
& (U_TO_K
| K_TO_K
)) == U_TO_K
||
1450 (flag
& (U_TO_K
| K_TO_K
)) == K_TO_K
);
1452 if (bp
->b_datap
->db_type
== M_IOCTL
) {
1453 count
= ((struct iocblk
*)bp
->b_rptr
)->ioc_count
;
1455 ASSERT(bp
->b_datap
->db_type
== M_COPYIN
);
1456 count
= ((struct copyreq
*)bp
->b_rptr
)->cq_size
;
1459 * strdoioctl validates ioc_count, so if this assert fails it
1460 * cannot be due to user error.
1464 if ((tmp
= allocb_cred_wait(count
, (flag
& STR_NOSIG
), &error
, cr
,
1465 curproc
->p_pid
)) == NULL
) {
1468 error
= strcopyin(arg
, tmp
->b_wptr
, count
, flag
& (U_TO_K
|K_TO_K
));
1473 DB_CPID(tmp
) = curproc
->p_pid
;
1474 tmp
->b_wptr
+= count
;
1481 * Copy ioctl data to user-land. Return non-zero errno on failure,
1485 getiocd(mblk_t
*bp
, char *arg
, int copymode
)
1491 if (bp
->b_datap
->db_type
== M_IOCACK
)
1492 count
= ((struct iocblk
*)bp
->b_rptr
)->ioc_count
;
1494 ASSERT(bp
->b_datap
->db_type
== M_COPYOUT
);
1495 count
= ((struct copyreq
*)bp
->b_rptr
)->cq_size
;
1499 for (bp
= bp
->b_cont
; bp
&& count
;
1500 count
-= n
, bp
= bp
->b_cont
, arg
+= n
) {
1501 n
= MIN(count
, bp
->b_wptr
- bp
->b_rptr
);
1502 error
= strcopyout(bp
->b_rptr
, arg
, n
, copymode
);
1511 * Allocate a linkinfo entry given the write queue of the
1512 * bottom module of the top stream and the write queue of the
1513 * stream head of the bottom stream.
1516 alloclink(queue_t
*qup
, queue_t
*qdown
, file_t
*fpdown
)
1520 linkp
= kmem_cache_alloc(linkinfo_cache
, KM_SLEEP
);
1522 linkp
->li_lblk
.l_qtop
= qup
;
1523 linkp
->li_lblk
.l_qbot
= qdown
;
1524 linkp
->li_fpdown
= fpdown
;
1526 mutex_enter(&strresources
);
1527 linkp
->li_next
= linkinfo_list
;
1528 linkp
->li_prev
= NULL
;
1530 linkp
->li_next
->li_prev
= linkp
;
1531 linkinfo_list
= linkp
;
1532 linkp
->li_lblk
.l_index
= ++lnk_id
;
1533 ASSERT(lnk_id
!= 0); /* this should never wrap in practice */
1534 mutex_exit(&strresources
);
1540 * Free a linkinfo entry.
1543 lbfree(linkinfo_t
*linkp
)
1545 mutex_enter(&strresources
);
1547 linkp
->li_next
->li_prev
= linkp
->li_prev
;
1549 linkp
->li_prev
->li_next
= linkp
->li_next
;
1551 linkinfo_list
= linkp
->li_next
;
1552 mutex_exit(&strresources
);
1554 kmem_cache_free(linkinfo_cache
, linkp
);
1558 * Check for a potential linking cycle.
1559 * Return 1 if a link will result in a cycle,
1563 linkcycle(stdata_t
*upstp
, stdata_t
*lostp
, str_stack_t
*ss
)
1565 struct mux_node
*np
;
1566 struct mux_edge
*ep
;
1571 * if the lower stream is a pipe/FIFO, return, since link
1572 * cycles can not happen on pipes/FIFOs
1574 if (lostp
->sd_vnode
->v_type
== VFIFO
)
1577 for (i
= 0; i
< ss
->ss_devcnt
; i
++) {
1578 np
= &ss
->ss_mux_nodes
[i
];
1581 lomaj
= getmajor(lostp
->sd_vnode
->v_rdev
);
1582 upmaj
= getmajor(upstp
->sd_vnode
->v_rdev
);
1583 np
= &ss
->ss_mux_nodes
[lomaj
];
1585 if (!MUX_DIDVISIT(np
)) {
1586 if (np
->mn_imaj
== upmaj
)
1588 if (np
->mn_outp
== NULL
) {
1590 if (np
->mn_originp
== NULL
)
1592 np
= np
->mn_originp
;
1596 np
->mn_startp
= np
->mn_outp
;
1598 if (np
->mn_startp
== NULL
) {
1599 if (np
->mn_originp
== NULL
)
1602 np
= np
->mn_originp
;
1607 * If ep->me_nodep is a FIFO (me_nodep == NULL),
1608 * ignore the edge and move on. ep->me_nodep gets
1609 * set to NULL in mux_addedge() if it is a FIFO.
1613 np
->mn_startp
= ep
->me_nextp
;
1614 if (ep
->me_nodep
== NULL
)
1616 ep
->me_nodep
->mn_originp
= np
;
1623 * Find linkinfo entry corresponding to the parameters.
1626 findlinks(stdata_t
*stp
, int index
, int type
, str_stack_t
*ss
)
1629 struct mux_edge
*mep
;
1630 struct mux_node
*mnp
;
1633 mutex_enter(&strresources
);
1634 if ((type
& LINKTYPEMASK
) == LINKNORMAL
) {
1635 qup
= getendq(stp
->sd_wrq
);
1636 for (linkp
= linkinfo_list
; linkp
; linkp
= linkp
->li_next
) {
1637 if ((qup
== linkp
->li_lblk
.l_qtop
) &&
1638 (!index
|| (index
== linkp
->li_lblk
.l_index
))) {
1639 mutex_exit(&strresources
);
1644 ASSERT((type
& LINKTYPEMASK
) == LINKPERSIST
);
1645 mnp
= &ss
->ss_mux_nodes
[getmajor(stp
->sd_vnode
->v_rdev
)];
1648 if ((index
== 0) || (index
== mep
->me_muxid
))
1650 mep
= mep
->me_nextp
;
1653 mutex_exit(&strresources
);
1656 for (linkp
= linkinfo_list
; linkp
; linkp
= linkp
->li_next
) {
1657 if ((!linkp
->li_lblk
.l_qtop
) &&
1658 (mep
->me_muxid
== linkp
->li_lblk
.l_index
)) {
1659 mutex_exit(&strresources
);
1664 mutex_exit(&strresources
);
1669 * Given a queue ptr, follow the chain of q_next pointers until you reach the
1670 * last queue on the chain and return it.
1682 * Wait for the syncq count to drop to zero.
1683 * sq could be either outer or inner.
1687 wait_syncq(syncq_t
*sq
)
1691 mutex_enter(SQLOCK(sq
));
1692 count
= sq
->sq_count
;
1693 SQ_PUTLOCKS_ENTER(sq
);
1694 SUM_SQ_PUTCOUNTS(sq
, count
);
1695 while (count
!= 0) {
1696 sq
->sq_flags
|= SQ_WANTWAKEUP
;
1697 SQ_PUTLOCKS_EXIT(sq
);
1698 cv_wait(&sq
->sq_wait
, SQLOCK(sq
));
1699 count
= sq
->sq_count
;
1700 SQ_PUTLOCKS_ENTER(sq
);
1701 SUM_SQ_PUTCOUNTS(sq
, count
);
1703 SQ_PUTLOCKS_EXIT(sq
);
1704 mutex_exit(SQLOCK(sq
));
1708 * Wait while there are any messages for the queue in its syncq.
1711 wait_q_syncq(queue_t
*q
)
1713 if ((q
->q_sqflags
& Q_SQQUEUED
) || (q
->q_syncqmsgs
> 0)) {
1714 syncq_t
*sq
= q
->q_syncq
;
1716 mutex_enter(SQLOCK(sq
));
1717 while ((q
->q_sqflags
& Q_SQQUEUED
) || (q
->q_syncqmsgs
> 0)) {
1718 sq
->sq_flags
|= SQ_WANTWAKEUP
;
1719 cv_wait(&sq
->sq_wait
, SQLOCK(sq
));
1721 mutex_exit(SQLOCK(sq
));
1727 mlink_file(vnode_t
*vp
, int cmd
, struct file
*fpdown
, cred_t
*crp
, int *rvalp
,
1731 struct strioctl strioc
;
1732 struct linkinfo
*linkp
;
1733 struct stdata
*stpdown
;
1734 struct streamtab
*str
;
1747 TRACE_1(TR_FAC_STREAMS_FR
,
1748 TR_I_LINK
, "I_LINK/I_PLINK:stp %p", stp
);
1750 * Test for invalid upper stream
1752 if (stp
->sd_flag
& STRHUP
) {
1755 if (vp
->v_type
== VFIFO
) {
1758 if (stp
->sd_strtab
== NULL
) {
1761 if (!stp
->sd_strtab
->st_muxwinit
) {
1764 if (fpdown
== NULL
) {
1767 ns
= netstack_find_by_cred(crp
);
1769 ss
= ns
->netstack_str
;
1772 if (getmajor(stp
->sd_vnode
->v_rdev
) >= ss
->ss_devcnt
) {
1773 netstack_rele(ss
->ss_netstack
);
1776 mutex_enter(&muxifier
);
1777 if (stp
->sd_flag
& STPLEX
) {
1778 mutex_exit(&muxifier
);
1779 netstack_rele(ss
->ss_netstack
);
1784 * Test for invalid lower stream.
1785 * The check for the v_type != VFIFO and having a major
1786 * number not >= devcnt is done to avoid problems with
1787 * adding mux_node entry past the end of mux_nodes[].
1788 * For FIFO's we don't add an entry so this isn't a
1791 if (((stpdown
= fpdown
->f_vnode
->v_stream
) == NULL
) ||
1792 (stpdown
== stp
) || (stpdown
->sd_flag
&
1793 (STPLEX
|STRHUP
|STRDERR
|STWRERR
|IOCWAIT
|STRPLUMB
)) ||
1794 ((stpdown
->sd_vnode
->v_type
!= VFIFO
) &&
1795 (getmajor(stpdown
->sd_vnode
->v_rdev
) >= ss
->ss_devcnt
)) ||
1796 linkcycle(stp
, stpdown
, ss
)) {
1797 mutex_exit(&muxifier
);
1798 netstack_rele(ss
->ss_netstack
);
1801 TRACE_1(TR_FAC_STREAMS_FR
,
1802 TR_STPDOWN
, "stpdown:%p", stpdown
);
1803 rq
= getendq(stp
->sd_wrq
);
1807 linkp
= alloclink(rq
, stpdown
->sd_wrq
, fpdown
);
1809 strioc
.ic_cmd
= cmd
;
1810 strioc
.ic_timout
= INFTIM
;
1811 strioc
.ic_len
= sizeof (struct linkblk
);
1812 strioc
.ic_dp
= (char *)&linkp
->li_lblk
;
1815 * STRPLUMB protects plumbing changes and should be set before
1816 * link_addpassthru()/link_rempassthru() are called, so it is set here
1817 * and cleared in the end of mlink when passthru queue is removed.
1818 * Setting of STRPLUMB prevents reopens of the stream while passthru
1819 * queue is in-place (it is not a proper module and doesn't have open
1822 * STPLEX prevents any threads from entering the stream from above. It
1823 * can't be set before the call to link_addpassthru() because putnext
1824 * from below may cause stream head I/O routines to be called and these
1825 * routines assert that STPLEX is not set. After link_addpassthru()
1826 * nothing may come from below since the pass queue syncq is blocked.
1827 * Note also that STPLEX should be cleared before the call to
1828 * link_rempassthru() since when messages start flowing to the stream
1829 * head (e.g. because of message propagation from the pass queue) stream
1830 * head I/O routines may be called with STPLEX flag set.
1832 * When STPLEX is set, nothing may come into the stream from above and
1833 * it is safe to do a setq which will change stream head. So, the
1834 * correct sequence of actions is:
1837 * 2) Call link_addpassthru()
1839 * 4) Call setq and update the stream state
1841 * 6) Call link_rempassthru()
1844 * The same sequence applies to munlink() code.
1846 mutex_enter(&stpdown
->sd_lock
);
1847 stpdown
->sd_flag
|= STRPLUMB
;
1848 mutex_exit(&stpdown
->sd_lock
);
1850 * Add passthru queue below lower mux. This will block
1851 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
1853 passq
= link_addpassthru(stpdown
);
1855 mutex_enter(&stpdown
->sd_lock
);
1856 stpdown
->sd_flag
|= STPLEX
;
1857 mutex_exit(&stpdown
->sd_lock
);
1859 rq
= _RD(stpdown
->sd_wrq
);
1861 * There may be messages in the streamhead's syncq due to messages
1862 * that arrived before link_addpassthru() was done. To avoid
1863 * background processing of the syncq happening simultaneous with
1864 * setq processing, we disable the streamhead syncq and wait until
1865 * existing background thread finishes working on it.
1867 wait_sq_svc(rq
->q_syncq
);
1868 passyncq
= passq
->q_syncq
;
1869 if (!(passyncq
->sq_flags
& SQ_BLOCKED
))
1870 blocksq(passyncq
, SQ_BLOCKED
, 0);
1872 ASSERT((rq
->q_flag
& QMT_TYPEMASK
) == QMTSAFE
);
1873 ASSERT(rq
->q_syncq
== SQ(rq
) && _WR(rq
)->q_syncq
== SQ(rq
));
1874 rq
->q_ptr
= _WR(rq
)->q_ptr
= NULL
;
1876 /* setq might sleep in allocator - avoid holding locks. */
1877 /* Note: we are holding muxifier here. */
1879 str
= stp
->sd_strtab
;
1880 dp
= &devimpl
[getmajor(vp
->v_rdev
)];
1881 ASSERT(dp
->d_str
== str
);
1883 qflag
= dp
->d_qflag
;
1884 sqtype
= dp
->d_sqtype
;
1886 /* create perdm_t if needed */
1887 if (NEED_DM(dp
->d_dmp
, qflag
))
1888 dp
->d_dmp
= hold_dm(str
, qflag
, sqtype
);
1892 setq(rq
, str
->st_muxrinit
, str
->st_muxwinit
, dmp
, qflag
, sqtype
,
1896 * XXX Remove any "odd" messages from the queue.
1897 * Keep only M_DATA, M_PROTO, M_PCPROTO.
1899 error
= strdoioctl(stp
, &strioc
, FNATIVE
,
1900 K_TO_K
| STR_NOERROR
| STR_NOSIG
, crp
, rvalp
);
1904 if (!(passyncq
->sq_flags
& SQ_BLOCKED
))
1905 blocksq(passyncq
, SQ_BLOCKED
, 0);
1907 * Restore the stream head queue and then remove
1908 * the passq. Turn off STPLEX before we turn on
1909 * the stream by removing the passq.
1911 rq
->q_ptr
= _WR(rq
)->q_ptr
= stpdown
;
1912 setq(rq
, &strdata
, &stwdata
, NULL
, QMTSAFE
, SQ_CI
|SQ_CO
,
1915 mutex_enter(&stpdown
->sd_lock
);
1916 stpdown
->sd_flag
&= ~STPLEX
;
1917 mutex_exit(&stpdown
->sd_lock
);
1919 link_rempassthru(passq
);
1921 mutex_enter(&stpdown
->sd_lock
);
1922 stpdown
->sd_flag
&= ~STRPLUMB
;
1923 /* Wakeup anyone waiting for STRPLUMB to clear. */
1924 cv_broadcast(&stpdown
->sd_monitor
);
1925 mutex_exit(&stpdown
->sd_lock
);
1927 mutex_exit(&muxifier
);
1928 netstack_rele(ss
->ss_netstack
);
1931 mutex_enter(&fpdown
->f_tlock
);
1933 mutex_exit(&fpdown
->f_tlock
);
1936 * if we've made it here the linkage is all set up so we should also
1937 * set up the layered driver linkages
1940 ASSERT((cmd
== I_LINK
) || (cmd
== I_PLINK
));
1941 if (cmd
== I_LINK
) {
1942 ldi_mlink_fp(stp
, fpdown
, lhlink
, LINKNORMAL
);
1944 ldi_mlink_fp(stp
, fpdown
, lhlink
, LINKPERSIST
);
1947 link_rempassthru(passq
);
1949 mux_addedge(stp
, stpdown
, linkp
->li_lblk
.l_index
, ss
);
1952 * Mark the upper stream as having dependent links
1953 * so that strclose can clean it up.
1955 if (cmd
== I_LINK
) {
1956 mutex_enter(&stp
->sd_lock
);
1957 stp
->sd_flag
|= STRHASLINKS
;
1958 mutex_exit(&stp
->sd_lock
);
1961 * Wake up any other processes that may have been
1962 * waiting on the lower stream. These will all
1965 mutex_enter(&stpdown
->sd_lock
);
1966 /* The passthru module is removed so we may release STRPLUMB */
1967 stpdown
->sd_flag
&= ~STRPLUMB
;
1968 cv_broadcast(&rq
->q_wait
);
1969 cv_broadcast(&_WR(rq
)->q_wait
);
1970 cv_broadcast(&stpdown
->sd_monitor
);
1971 mutex_exit(&stpdown
->sd_lock
);
1972 mutex_exit(&muxifier
);
1973 *rvalp
= linkp
->li_lblk
.l_index
;
1974 netstack_rele(ss
->ss_netstack
);
1979 mlink(vnode_t
*vp
, int cmd
, int arg
, cred_t
*crp
, int *rvalp
, int lhlink
)
1982 struct file
*fpdown
;
1985 ret
= mlink_file(vp
, cmd
, fpdown
, crp
, rvalp
, lhlink
);
1992 * Unlink a multiplexor link. Stp is the controlling stream for the
1993 * link, and linkp points to the link's entry in the linkinfo list.
1994 * The muxifier lock must be held on entry and is dropped on exit.
1996 * NOTE : Currently it is assumed that mux would process all the messages
1997 * sitting on it's queue before ACKing the UNLINK. It is the responsibility
1998 * of the mux to handle all the messages that arrive before UNLINK.
1999 * If the mux has to send down messages on its lower stream before
2000 * ACKing I_UNLINK, then it *should* know to handle messages even
2001 * after the UNLINK is acked (actually it should be able to handle till we
2002 * re-block the read side of the pass queue here). If the mux does not
2003 * open up the lower stream, any messages that arrive during UNLINK
2004 * will be put in the stream head. In the case of lower stream opening
2005 * up, some messages might land in the stream head depending on when
2006 * the message arrived and when the read side of the pass queue was
2010 munlink(stdata_t
*stp
, linkinfo_t
*linkp
, int flag
, cred_t
*crp
, int *rvalp
,
2013 struct strioctl strioc
;
2014 struct stdata
*stpdown
;
2021 ASSERT(MUTEX_HELD(&muxifier
));
2023 stpdown
= linkp
->li_fpdown
->f_vnode
->v_stream
;
2026 * See the comment in mlink() concerning STRPLUMB/STPLEX flags.
2028 mutex_enter(&stpdown
->sd_lock
);
2029 stpdown
->sd_flag
|= STRPLUMB
;
2030 mutex_exit(&stpdown
->sd_lock
);
2033 * Add passthru queue below lower mux. This will block
2034 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
2036 passq
= link_addpassthru(stpdown
);
2038 if ((flag
& LINKTYPEMASK
) == LINKNORMAL
)
2039 strioc
.ic_cmd
= I_UNLINK
;
2041 strioc
.ic_cmd
= I_PUNLINK
;
2042 strioc
.ic_timout
= INFTIM
;
2043 strioc
.ic_len
= sizeof (struct linkblk
);
2044 strioc
.ic_dp
= (char *)&linkp
->li_lblk
;
2046 error
= strdoioctl(stp
, &strioc
, FNATIVE
,
2047 K_TO_K
| STR_NOERROR
| STR_NOSIG
, crp
, rvalp
);
2050 * If there was an error and this is not called via strclose,
2051 * return to the user. Otherwise, pretend there was no error
2052 * and close the link.
2055 if (flag
& LINKCLOSE
) {
2056 cmn_err(CE_WARN
, "KERNEL: munlink: could not perform "
2057 "unlink ioctl, closing anyway (%d)\n", error
);
2059 link_rempassthru(passq
);
2060 mutex_enter(&stpdown
->sd_lock
);
2061 stpdown
->sd_flag
&= ~STRPLUMB
;
2062 cv_broadcast(&stpdown
->sd_monitor
);
2063 mutex_exit(&stpdown
->sd_lock
);
2064 mutex_exit(&muxifier
);
2069 mux_rmvedge(stp
, linkp
->li_lblk
.l_index
, ss
);
2070 fpdown
= linkp
->li_fpdown
;
2074 * We go ahead and drop muxifier here--it's a nasty global lock that
2075 * can slow others down. It's okay to since attempts to mlink() this
2076 * stream will be stopped because STPLEX is still set in the stdata
2077 * structure, and munlink() is stopped because mux_rmvedge() and
2078 * lbfree() have removed it from mux_nodes[] and linkinfo_list,
2079 * respectively. Note that we defer the closef() of fpdown until
2080 * after we drop muxifier since strclose() can call munlinkall().
2082 mutex_exit(&muxifier
);
2084 wrq
= stpdown
->sd_wrq
;
2088 * Get rid of outstanding service procedure runs, before we make
2089 * it a stream head, since a stream head doesn't have any service
2096 * Since we don't disable the syncq for QPERMOD, we wait for whatever
2097 * is queued up to be finished. mux should take care that nothing is
2098 * send down to this queue. We should do it now as we're going to block
2099 * passyncq if it was unblocked.
2101 if (wrq
->q_flag
& QPERMOD
) {
2102 syncq_t
*sq
= wrq
->q_syncq
;
2104 mutex_enter(SQLOCK(sq
));
2105 while (wrq
->q_sqflags
& Q_SQQUEUED
) {
2106 sq
->sq_flags
|= SQ_WANTWAKEUP
;
2107 cv_wait(&sq
->sq_wait
, SQLOCK(sq
));
2109 mutex_exit(SQLOCK(sq
));
2111 passyncq
= passq
->q_syncq
;
2112 if (!(passyncq
->sq_flags
& SQ_BLOCKED
)) {
2114 syncq_t
*sq
, *outer
;
2117 * Messages could be flowing from underneath. We will
2118 * block the read side of the passq. This would be
2119 * sufficient for QPAIR and QPERQ muxes to ensure
2120 * that no data is flowing up into this queue
2121 * and hence no thread active in this instance of
2122 * lower mux. But for QPERMOD and QMTOUTPERIM there
2123 * could be messages on the inner and outer/inner
2124 * syncqs respectively. We will wait for them to drain.
2125 * Because passq is blocked messages end up in the syncq
2126 * And qfill_syncq could possibly end up setting QFULL
2127 * which will access the rq->q_flag. Hence, we have to
2128 * acquire the QLOCK in setq.
2130 * XXX Messages can also flow from top into this
2131 * queue though the unlink is over (Ex. some instance
2132 * in putnext() called from top that has still not
2133 * accessed this queue. And also putq(lowerq) ?).
2134 * Solution : How about blocking the l_qtop queue ?
2135 * Do we really care about such pure D_MP muxes ?
2138 blocksq(passyncq
, SQ_BLOCKED
, 0);
2141 if ((outer
= sq
->sq_outer
) != NULL
) {
2144 * We have to just wait for the outer sq_count
2145 * drop to zero. As this does not prevent new
2146 * messages to enter the outer perimeter, this
2147 * is subject to starvation.
2149 * NOTE :Because of blocksq above, messages could
2150 * be in the inner syncq only because of some
2151 * thread holding the outer perimeter exclusively.
2152 * Hence it would be sufficient to wait for the
2153 * exclusive holder of the outer perimeter to drain
2154 * the inner and outer syncqs. But we will not depend
2155 * on this feature and hence check the inner syncqs
2163 * There could be messages destined for
2164 * this queue. Let the exclusive holder
2169 ASSERT((rq
->q_flag
& QPERMOD
) ||
2170 ((rq
->q_syncq
->sq_head
== NULL
) &&
2171 (_WR(rq
)->q_syncq
->sq_head
== NULL
)));
2175 * We haven't taken care of QPERMOD case yet. QPERMOD is a special
2176 * case as we don't disable its syncq or remove it off the syncq
2179 if (rq
->q_flag
& QPERMOD
) {
2180 syncq_t
*sq
= rq
->q_syncq
;
2182 mutex_enter(SQLOCK(sq
));
2183 while (rq
->q_sqflags
& Q_SQQUEUED
) {
2184 sq
->sq_flags
|= SQ_WANTWAKEUP
;
2185 cv_wait(&sq
->sq_wait
, SQLOCK(sq
));
2187 mutex_exit(SQLOCK(sq
));
2191 * flush_syncq changes states only when there are some messages to
2192 * free, i.e. when it returns non-zero value to return.
2194 ASSERT(flush_syncq(rq
->q_syncq
, rq
) == 0);
2195 ASSERT(flush_syncq(wrq
->q_syncq
, wrq
) == 0);
2198 * Nobody else should know about this queue now.
2199 * If the mux did not process the messages before
2200 * acking the I_UNLINK, free them now.
2203 flushq(rq
, FLUSHALL
);
2204 flushq(_WR(rq
), FLUSHALL
);
2207 * Convert the mux lower queue into a stream head queue.
2208 * Turn off STPLEX before we turn on the stream by removing the passq.
2210 rq
->q_ptr
= wrq
->q_ptr
= stpdown
;
2211 setq(rq
, &strdata
, &stwdata
, NULL
, QMTSAFE
, SQ_CI
|SQ_CO
, B_TRUE
);
2213 ASSERT((rq
->q_flag
& QMT_TYPEMASK
) == QMTSAFE
);
2214 ASSERT(rq
->q_syncq
== SQ(rq
) && _WR(rq
)->q_syncq
== SQ(rq
));
2219 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still
2220 * needs to be set to prevent reopen() of the stream - such reopen may
2221 * try to call non-existent pass queue open routine and panic.
2223 mutex_enter(&stpdown
->sd_lock
);
2224 stpdown
->sd_flag
&= ~STPLEX
;
2225 mutex_exit(&stpdown
->sd_lock
);
2227 ASSERT(((flag
& LINKTYPEMASK
) == LINKNORMAL
) ||
2228 ((flag
& LINKTYPEMASK
) == LINKPERSIST
));
2230 /* clean up the layered driver linkages */
2231 if ((flag
& LINKTYPEMASK
) == LINKNORMAL
) {
2232 ldi_munlink_fp(stp
, fpdown
, LINKNORMAL
);
2234 ldi_munlink_fp(stp
, fpdown
, LINKPERSIST
);
2237 link_rempassthru(passq
);
2240 * Now all plumbing changes are finished and STRPLUMB is no
2243 mutex_enter(&stpdown
->sd_lock
);
2244 stpdown
->sd_flag
&= ~STRPLUMB
;
2245 cv_broadcast(&stpdown
->sd_monitor
);
2246 mutex_exit(&stpdown
->sd_lock
);
2248 (void) closef(fpdown
);
2253 * Unlink all multiplexor links for which stp is the controlling stream.
2254 * Return 0, or a non-zero errno on failure.
2257 munlinkall(stdata_t
*stp
, int flag
, cred_t
*crp
, int *rvalp
, str_stack_t
*ss
)
2262 mutex_enter(&muxifier
);
2263 while (linkp
= findlinks(stp
, 0, flag
, ss
)) {
2265 * munlink() releases the muxifier lock.
2267 if (error
= munlink(stp
, linkp
, flag
, crp
, rvalp
, ss
))
2269 mutex_enter(&muxifier
);
2271 mutex_exit(&muxifier
);
2276 * A multiplexor link has been made. Add an
2277 * edge to the directed graph.
2280 mux_addedge(stdata_t
*upstp
, stdata_t
*lostp
, int muxid
, str_stack_t
*ss
)
2282 struct mux_node
*np
;
2283 struct mux_edge
*ep
;
2287 upmaj
= getmajor(upstp
->sd_vnode
->v_rdev
);
2288 lomaj
= getmajor(lostp
->sd_vnode
->v_rdev
);
2289 np
= &ss
->ss_mux_nodes
[upmaj
];
2292 while (ep
->me_nextp
)
2294 ep
->me_nextp
= kmem_alloc(sizeof (struct mux_edge
), KM_SLEEP
);
2297 np
->mn_outp
= kmem_alloc(sizeof (struct mux_edge
), KM_SLEEP
);
2300 ep
->me_nextp
= NULL
;
2301 ep
->me_muxid
= muxid
;
2303 * Save the dev_t for the purposes of str_stack_shutdown.
2304 * str_stack_shutdown assumes that the device allows reopen, since
2305 * this dev_t is the one after any cloning by xx_open().
2306 * Would prefer finding the dev_t from before any cloning,
2307 * but specfs doesn't retain that.
2309 ep
->me_dev
= upstp
->sd_vnode
->v_rdev
;
2310 if (lostp
->sd_vnode
->v_type
== VFIFO
)
2311 ep
->me_nodep
= NULL
;
2313 ep
->me_nodep
= &ss
->ss_mux_nodes
[lomaj
];
2317 * A multiplexor link has been removed. Remove the
2318 * edge in the directed graph.
2321 mux_rmvedge(stdata_t
*upstp
, int muxid
, str_stack_t
*ss
)
2323 struct mux_node
*np
;
2324 struct mux_edge
*ep
;
2325 struct mux_edge
*pep
= NULL
;
2328 upmaj
= getmajor(upstp
->sd_vnode
->v_rdev
);
2329 np
= &ss
->ss_mux_nodes
[upmaj
];
2330 ASSERT(np
->mn_outp
!= NULL
);
2333 if (ep
->me_muxid
== muxid
) {
2335 pep
->me_nextp
= ep
->me_nextp
;
2337 np
->mn_outp
= ep
->me_nextp
;
2338 kmem_free(ep
, sizeof (struct mux_edge
));
2344 ASSERT(0); /* should not reach here */
2348 * Translate the device flags (from conf.h) to the corresponding
2349 * qflag and sq_flag (type) values.
2352 devflg_to_qflag(struct streamtab
*stp
, uint32_t devflag
, uint32_t *qflagp
,
2356 uint32_t sqtype
= 0;
2358 if (devflag
& _D_OLD
)
2361 /* Inner perimeter presence and scope */
2362 switch (devflag
& D_MTINNER_MASK
) {
2370 case D_MTQPAIR
|D_MP
:
2373 case D_MTPERMOD
|D_MP
:
2380 /* Outer perimeter */
2381 if (devflag
& D_MTOUTPERIM
) {
2382 switch (devflag
& D_MTINNER_MASK
) {
2385 case D_MTQPAIR
|D_MP
:
2390 qflag
|= QMTOUTPERIM
;
2393 /* Inner perimeter modifiers */
2394 if (devflag
& D_MTINNER_MOD
) {
2395 switch (devflag
& D_MTINNER_MASK
) {
2401 if (devflag
& D_MTPUTSHARED
)
2403 if (devflag
& _D_MTOCSHARED
) {
2405 * The code in putnext assumes that it has the
2406 * highest concurrency by not checking sq_count.
2407 * Thus _D_MTOCSHARED can only be supported when
2408 * D_MTPUTSHARED is set.
2410 if (!(devflag
& D_MTPUTSHARED
))
2414 if (devflag
& _D_MTCBSHARED
) {
2416 * The code in putnext assumes that it has the
2417 * highest concurrency by not checking sq_count.
2418 * Thus _D_MTCBSHARED can only be supported when
2419 * D_MTPUTSHARED is set.
2421 if (!(devflag
& D_MTPUTSHARED
))
2425 if (devflag
& _D_MTSVCSHARED
) {
2427 * The code in putnext assumes that it has the
2428 * highest concurrency by not checking sq_count.
2429 * Thus _D_MTSVCSHARED can only be supported when
2430 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is
2431 * supported only for QPERMOD.
2433 if (!(devflag
& D_MTPUTSHARED
) || !(qflag
& QPERMOD
))
2439 /* Default outer perimeter concurrency */
2442 /* Outer perimeter modifiers */
2443 if (devflag
& D_MTOCEXCL
) {
2444 if (!(devflag
& D_MTOUTPERIM
)) {
2445 /* No outer perimeter */
2451 /* Synchronous Streams extended qinit structure */
2452 if (devflag
& D_SYNCSTR
)
2456 * Private flag used by a transport module to indicate
2457 * to sockfs that it supports direct-access mode without
2458 * having to go through STREAMS.
2460 if (devflag
& _D_DIRECT
) {
2461 /* Reject unless the module is fully-MT (no perimeter) */
2462 if ((qflag
& QMT_TYPEMASK
) != QMTSAFE
)
2473 "stropen: bad MT flags (0x%x) in driver '%s'",
2474 (int)(qflag
& D_MTSAFETY_MASK
),
2475 stp
->st_rdinit
->qi_minfo
->mi_idname
);
2481 * Set the interface values for a pair of queues (qinit structure,
2482 * packet sizes, water marks).
2483 * setq assumes that the caller does not have a claim (entersq or claimq)
2487 setq(queue_t
*rq
, struct qinit
*rinit
, struct qinit
*winit
,
2488 perdm_t
*dmp
, uint32_t qflag
, uint32_t sqtype
, boolean_t lock_needed
)
2491 syncq_t
*sq
, *outer
;
2493 ASSERT(rq
->q_flag
& QREADR
);
2494 ASSERT((qflag
& QMT_TYPEMASK
) != 0);
2495 IMPLY((qflag
& (QPERMOD
| QMTOUTPERIM
)), dmp
!= NULL
);
2498 rq
->q_qinfo
= rinit
;
2499 rq
->q_hiwat
= rinit
->qi_minfo
->mi_hiwat
;
2500 rq
->q_lowat
= rinit
->qi_minfo
->mi_lowat
;
2501 rq
->q_minpsz
= rinit
->qi_minfo
->mi_minpsz
;
2502 rq
->q_maxpsz
= rinit
->qi_minfo
->mi_maxpsz
;
2503 wq
->q_qinfo
= winit
;
2504 wq
->q_hiwat
= winit
->qi_minfo
->mi_hiwat
;
2505 wq
->q_lowat
= winit
->qi_minfo
->mi_lowat
;
2506 wq
->q_minpsz
= winit
->qi_minfo
->mi_minpsz
;
2507 wq
->q_maxpsz
= winit
->qi_minfo
->mi_maxpsz
;
2509 /* Remove old syncqs */
2511 outer
= sq
->sq_outer
;
2512 if (outer
!= NULL
) {
2513 ASSERT(wq
->q_syncq
->sq_outer
== outer
);
2514 outer_remove(outer
, rq
->q_syncq
);
2515 if (wq
->q_syncq
!= rq
->q_syncq
)
2516 outer_remove(outer
, wq
->q_syncq
);
2518 ASSERT(sq
->sq_outer
== NULL
);
2519 ASSERT(sq
->sq_onext
== NULL
&& sq
->sq_oprev
== NULL
);
2522 if (!(rq
->q_flag
& QPERMOD
))
2524 if (wq
->q_syncq
== rq
->q_syncq
)
2528 if (wq
->q_syncq
!= NULL
&& wq
->q_syncq
!= sq
&&
2529 wq
->q_syncq
!= SQ(rq
)) {
2530 free_syncq(wq
->q_syncq
);
2533 ASSERT(rq
->q_syncq
== NULL
|| (rq
->q_syncq
->sq_head
== NULL
&&
2534 rq
->q_syncq
->sq_tail
== NULL
));
2535 ASSERT(wq
->q_syncq
== NULL
|| (wq
->q_syncq
->sq_head
== NULL
&&
2536 wq
->q_syncq
->sq_tail
== NULL
));
2538 if (!(rq
->q_flag
& QPERMOD
) &&
2539 rq
->q_syncq
!= NULL
&& rq
->q_syncq
->sq_ciputctrl
!= NULL
) {
2540 ASSERT(rq
->q_syncq
->sq_nciputctrl
== n_ciputctrl
- 1);
2541 SUMCHECK_CIPUTCTRL_COUNTS(rq
->q_syncq
->sq_ciputctrl
,
2542 rq
->q_syncq
->sq_nciputctrl
, 0);
2543 ASSERT(ciputctrl_cache
!= NULL
);
2544 kmem_cache_free(ciputctrl_cache
, rq
->q_syncq
->sq_ciputctrl
);
2545 rq
->q_syncq
->sq_ciputctrl
= NULL
;
2546 rq
->q_syncq
->sq_nciputctrl
= 0;
2549 if (!(wq
->q_flag
& QPERMOD
) &&
2550 wq
->q_syncq
!= NULL
&& wq
->q_syncq
->sq_ciputctrl
!= NULL
) {
2551 ASSERT(wq
->q_syncq
->sq_nciputctrl
== n_ciputctrl
- 1);
2552 SUMCHECK_CIPUTCTRL_COUNTS(wq
->q_syncq
->sq_ciputctrl
,
2553 wq
->q_syncq
->sq_nciputctrl
, 0);
2554 ASSERT(ciputctrl_cache
!= NULL
);
2555 kmem_cache_free(ciputctrl_cache
, wq
->q_syncq
->sq_ciputctrl
);
2556 wq
->q_syncq
->sq_ciputctrl
= NULL
;
2557 wq
->q_syncq
->sq_nciputctrl
= 0;
2561 ASSERT(sq
->sq_head
== NULL
&& sq
->sq_tail
== NULL
);
2562 ASSERT(sq
->sq_outer
== NULL
);
2563 ASSERT(sq
->sq_onext
== NULL
&& sq
->sq_oprev
== NULL
);
2566 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS
2567 * bits in sq_flag based on the sqtype.
2569 ASSERT((sq
->sq_flags
& ~SQ_TYPES_IN_FLAGS
) == 0);
2571 rq
->q_syncq
= wq
->q_syncq
= sq
;
2572 sq
->sq_type
= sqtype
;
2573 sq
->sq_flags
= (sqtype
& SQ_TYPES_IN_FLAGS
);
2576 * We are making sq_svcflags zero,
2577 * resetting SQ_DISABLED in case it was set by
2578 * wait_svc() in the munlink path.
2581 ASSERT((sq
->sq_svcflags
& SQ_SERVICE
) == 0);
2582 sq
->sq_svcflags
= 0;
2585 * We need to acquire the lock here for the mlink and munlink case,
2586 * where canputnext, backenable, etc can access the q_flag.
2589 mutex_enter(QLOCK(rq
));
2590 rq
->q_flag
= (rq
->q_flag
& ~QMT_TYPEMASK
) | QWANTR
| qflag
;
2591 mutex_exit(QLOCK(rq
));
2592 mutex_enter(QLOCK(wq
));
2593 wq
->q_flag
= (wq
->q_flag
& ~QMT_TYPEMASK
) | QWANTR
| qflag
;
2594 mutex_exit(QLOCK(wq
));
2596 rq
->q_flag
= (rq
->q_flag
& ~QMT_TYPEMASK
) | QWANTR
| qflag
;
2597 wq
->q_flag
= (wq
->q_flag
& ~QMT_TYPEMASK
) | QWANTR
| qflag
;
2600 if (qflag
& QPERQ
) {
2601 /* Allocate a separate syncq for the write side */
2603 sq
->sq_type
= rq
->q_syncq
->sq_type
;
2604 sq
->sq_flags
= rq
->q_syncq
->sq_flags
;
2605 ASSERT(sq
->sq_outer
== NULL
&& sq
->sq_onext
== NULL
&&
2606 sq
->sq_oprev
== NULL
);
2609 if (qflag
& QPERMOD
) {
2613 * Assert that we do have an inner perimeter syncq and that it
2614 * does not have an outer perimeter associated with it.
2616 ASSERT(sq
->sq_outer
== NULL
&& sq
->sq_onext
== NULL
&&
2617 sq
->sq_oprev
== NULL
);
2618 rq
->q_syncq
= wq
->q_syncq
= sq
;
2620 if (qflag
& QMTOUTPERIM
) {
2623 ASSERT(outer
->sq_outer
== NULL
);
2624 outer_insert(outer
, rq
->q_syncq
);
2625 if (wq
->q_syncq
!= rq
->q_syncq
)
2626 outer_insert(outer
, wq
->q_syncq
);
2628 ASSERT((rq
->q_syncq
->sq_flags
& SQ_TYPES_IN_FLAGS
) ==
2629 (rq
->q_syncq
->sq_type
& SQ_TYPES_IN_FLAGS
));
2630 ASSERT((wq
->q_syncq
->sq_flags
& SQ_TYPES_IN_FLAGS
) ==
2631 (wq
->q_syncq
->sq_type
& SQ_TYPES_IN_FLAGS
));
2632 ASSERT((rq
->q_flag
& QMT_TYPEMASK
) == (qflag
& QMT_TYPEMASK
));
2635 * Initialize struio() types.
2638 (rq
->q_flag
& QSYNCSTR
) ? rinit
->qi_struiot
: STRUIOT_NONE
;
2640 (wq
->q_flag
& QSYNCSTR
) ? winit
->qi_struiot
: STRUIOT_NONE
;
2644 hold_dm(struct streamtab
*str
, uint32_t qflag
, uint32_t sqtype
)
2651 ASSERT(str
!= NULL
);
2652 ASSERT(qflag
& (QPERMOD
| QMTOUTPERIM
));
2654 rw_enter(&perdm_rwlock
, RW_READER
);
2655 for (p
= perdm_list
; p
!= NULL
; p
= p
->dm_next
) {
2656 if (p
->dm_str
== str
) { /* found one */
2657 atomic_inc_32(&(p
->dm_ref
));
2658 rw_exit(&perdm_rwlock
);
2662 rw_exit(&perdm_rwlock
);
2665 if (qflag
& QPERMOD
) {
2666 sq
->sq_type
= sqtype
| SQ_PERMOD
;
2667 sq
->sq_flags
= sqtype
& SQ_TYPES_IN_FLAGS
;
2669 ASSERT(qflag
& QMTOUTPERIM
);
2670 sq
->sq_onext
= sq
->sq_oprev
= sq
;
2673 dmp
= kmem_alloc(sizeof (perdm_t
), KM_SLEEP
);
2677 dmp
->dm_next
= NULL
;
2679 rw_enter(&perdm_rwlock
, RW_WRITER
);
2680 for (pp
= &perdm_list
; (p
= *pp
) != NULL
; pp
= &(p
->dm_next
)) {
2681 if (p
->dm_str
== str
) { /* already present */
2683 rw_exit(&perdm_rwlock
);
2685 kmem_free(dmp
, sizeof (perdm_t
));
2691 rw_exit(&perdm_rwlock
);
2696 rele_dm(perdm_t
*dmp
)
2701 rw_enter(&perdm_rwlock
, RW_WRITER
);
2702 ASSERT(dmp
->dm_ref
> 0);
2704 if (--dmp
->dm_ref
> 0) {
2705 rw_exit(&perdm_rwlock
);
2709 for (pp
= &perdm_list
; (p
= *pp
) != NULL
; pp
= &(p
->dm_next
))
2714 rw_exit(&perdm_rwlock
);
2717 * Wait for any background processing that relies on the
2718 * syncq to complete before it is freed.
2720 wait_sq_svc(p
->dm_sq
);
2721 free_syncq(p
->dm_sq
);
2722 kmem_free(p
, sizeof (perdm_t
));
2726 * Make a protocol message given control and data buffers.
2727 * n.b., this can block; be careful of what locks you hold when calling it.
2729 * If sd_maxblk is less than *iosize this routine can fail part way through
2730 * (due to an allocation failure). In this case on return *iosize will contain
2731 * the amount that was consumed. Otherwise *iosize will not be modified
2732 * i.e. it will contain the amount that was consumed.
2736 struct strbuf
*mctl
,
2743 mblk_t
*mpctl
= NULL
;
2744 mblk_t
*mpdata
= NULL
;
2747 ASSERT(uiop
!= NULL
);
2750 /* Create control part, if any */
2751 if ((mctl
!= NULL
) && (mctl
->len
>= 0)) {
2752 error
= strmakectl(mctl
, flag
, uiop
->uio_fmode
, &mpctl
);
2756 /* Create data part, if any */
2758 error
= strmakedata(iosize
, uiop
, stp
, flag
, &mpdata
);
2764 if (mpctl
!= NULL
) {
2766 linkb(mpctl
, mpdata
);
2775 * Make the control part of a protocol message given a control buffer.
2776 * n.b., this can block; be careful of what locks you hold when calling it.
2780 struct strbuf
*mctl
,
2786 unsigned char msgtype
;
2788 cred_t
*cr
= CRED();
2790 /* We do not support interrupt threads using the stream head to send */
2795 * Create control part of message, if any.
2797 if ((mctl
!= NULL
) && (mctl
->len
>= 0)) {
2802 if (flag
& RS_HIPRI
)
2803 msgtype
= M_PCPROTO
;
2807 ctlcount
= mctl
->len
;
2811 * Give modules a better chance to reuse M_PROTO/M_PCPROTO
2812 * blocks by increasing the size to something more usable.
2814 allocsz
= MAX(ctlcount
, 64);
2817 * Range checking has already been done; simply try
2818 * to allocate a message block for the ctl part.
2820 while ((bp
= allocb_cred(allocsz
, cr
,
2821 curproc
->p_pid
)) == NULL
) {
2822 if (fflag
& (FNDELAY
|FNONBLOCK
))
2824 if (error
= strwaitbuf(allocsz
, BPRI_MED
))
2828 bp
->b_datap
->db_type
= msgtype
;
2829 if (copyin(base
, bp
->b_wptr
, ctlcount
)) {
2833 bp
->b_wptr
+= ctlcount
;
2840 * Make a protocol message given data buffers.
2841 * n.b., this can block; be careful of what locks you hold when calling it.
2843 * If sd_maxblk is less than *iosize this routine can fail part way through
2844 * (due to an allocation failure). In this case on return *iosize will contain
2845 * the amount that was consumed. Otherwise *iosize will not be modified
2846 * i.e. it will contain the amount that was consumed.
2858 int wroff
= (int)stp
->sd_wroff
;
2859 int tail_len
= (int)stp
->sd_tail
;
2860 int extra
= wroff
+ tail_len
;
2863 ssize_t count
= *iosize
;
2870 /* We do not support interrupt threads using the stream head to send */
2874 maxblk
= stp
->sd_maxblk
;
2875 if (maxblk
== INFPSZ
)
2879 * Create data part of message, if any.
2887 size
= MIN(count
, maxblk
);
2889 while ((bp
= allocb_cred(size
+ extra
, cr
,
2890 curproc
->p_pid
)) == NULL
) {
2892 if ((uiop
->uio_fmode
& (FNDELAY
|FNONBLOCK
)) ||
2893 (error
= strwaitbuf(size
+ extra
, BPRI_MED
)) != 0) {
2894 if (count
== *iosize
) {
2905 dp
->db_cpid
= curproc
->p_pid
;
2906 ASSERT(wroff
<= dp
->db_lim
- bp
->b_wptr
);
2907 bp
->b_wptr
= bp
->b_rptr
= bp
->b_rptr
+ wroff
;
2909 if (flag
& STRUIO_POSTPONE
) {
2911 * Setup the stream uio portion of the
2912 * dblk for subsequent use by struioget().
2914 dp
->db_struioflag
= STRUIO_SPEC
;
2915 dp
->db_cksumstart
= 0;
2916 dp
->db_cksumstuff
= 0;
2917 dp
->db_cksumend
= size
;
2918 *(long long *)dp
->db_struioun
.data
= 0ll;
2921 if (stp
->sd_copyflag
& STRCOPYCACHED
)
2922 uiop
->uio_extflg
|= UIO_COPY_CACHED
;
2925 error
= uiomove(bp
->b_wptr
, size
, UIO_WRITE
,
2935 if (stp
->sd_wputdatafunc
!= NULL
) {
2938 newbp
= (stp
->sd_wputdatafunc
)(stp
->sd_vnode
,
2939 bp
, NULL
, NULL
, NULL
, NULL
);
2940 if (newbp
== NULL
) {
2955 } while (count
> 0);
2962 * Wait for a buffer to become available. Return non-zero errno
2963 * if not able to wait, 0 if buffer is probably there.
2966 strwaitbuf(size_t size
, int pri
)
2970 mutex_enter(&bcall_monitor
);
2971 if ((id
= bufcall(size
, pri
, (void (*)(void *))cv_broadcast
,
2972 &ttoproc(curthread
)->p_flag_cv
)) == 0) {
2973 mutex_exit(&bcall_monitor
);
2976 if (!cv_wait_sig(&(ttoproc(curthread
)->p_flag_cv
), &bcall_monitor
)) {
2978 mutex_exit(&bcall_monitor
);
2982 mutex_exit(&bcall_monitor
);
2987 * This function waits for a read or write event to happen on a stream.
2988 * fmode can specify FNDELAY and/or FNONBLOCK.
2989 * The timeout is in ms with -1 meaning infinite.
2990 * The flag values work as follows:
2991 * READWAIT Check for read side errors, send M_READ
2992 * GETWAIT Check for read side errors, no M_READ
2993 * WRITEWAIT Check for write side errors.
2994 * NOINTR Do not return error if nonblocking or timeout.
2995 * STR_NOERROR Ignore all errors except STPLEX.
2996 * STR_NOSIG Ignore/hold signals during the duration of the call.
2997 * STR_PEEK Pass through the strgeterr().
3000 strwaitq(stdata_t
*stp
, int flag
, ssize_t count
, int fmode
, clock_t timout
,
3005 kcondvar_t
*sleepon
;
3010 ASSERT(MUTEX_HELD(&stp
->sd_lock
));
3011 if ((flag
& READWAIT
) || (flag
& GETWAIT
)) {
3013 sleepon
= &_RD(stp
->sd_wrq
)->q_wait
;
3014 errs
= STRDERR
|STPLEX
;
3017 sleepon
= &stp
->sd_wrq
->q_wait
;
3018 errs
= STWRERR
|STRHUP
|STPLEX
;
3020 if (flag
& STR_NOERROR
)
3023 if (stp
->sd_wakeq
& slpflg
) {
3025 * A strwakeq() is pending, no need to sleep.
3027 stp
->sd_wakeq
&= ~slpflg
;
3032 if (stp
->sd_flag
& errs
) {
3034 * Check for errors before going to sleep since the
3035 * caller might not have checked this while holding
3038 error
= strgeterr(stp
, errs
, (flag
& STR_PEEK
));
3046 * If any module downstream has requested read notification
3047 * by setting SNDMREAD flag using M_SETOPTS, send a message
3050 if ((flag
& READWAIT
) && (stp
->sd_flag
& SNDMREAD
)) {
3051 mutex_exit(&stp
->sd_lock
);
3052 if (!(mp
= allocb_wait(sizeof (ssize_t
), BPRI_MED
,
3053 (flag
& STR_NOSIG
), &error
))) {
3054 mutex_enter(&stp
->sd_lock
);
3058 mp
->b_datap
->db_type
= M_READ
;
3059 rd_count
= (ssize_t
*)mp
->b_wptr
;
3061 mp
->b_wptr
+= sizeof (ssize_t
);
3063 * Send the number of bytes requested by the
3064 * read as the argument to M_READ.
3066 stream_willservice(stp
);
3067 putnext(stp
->sd_wrq
, mp
);
3068 stream_runservice(stp
);
3069 mutex_enter(&stp
->sd_lock
);
3072 * If any data arrived due to inline processing
3073 * of putnext(), don't sleep.
3075 if (_RD(stp
->sd_wrq
)->q_first
!= NULL
) {
3081 if (fmode
& (FNDELAY
|FNONBLOCK
)) {
3082 if (!(flag
& NOINTR
))
3090 stp
->sd_flag
|= slpflg
;
3091 TRACE_5(TR_FAC_STREAMS_FR
, TR_STRWAITQ_WAIT2
,
3092 "strwaitq sleeps (2):%p, %X, %lX, %X, %p",
3093 stp
, flag
, count
, fmode
, done
);
3095 rval
= str_cv_wait(sleepon
, &stp
->sd_lock
, timout
, flag
& STR_NOSIG
);
3098 TRACE_5(TR_FAC_STREAMS_FR
, TR_STRWAITQ_WAKE2
,
3099 "strwaitq awakes(2):%X, %X, %X, %X, %X",
3100 stp
, flag
, count
, fmode
, done
);
3101 } else if (rval
== 0) {
3102 TRACE_5(TR_FAC_STREAMS_FR
, TR_STRWAITQ_INTR2
,
3103 "strwaitq interrupt #2:%p, %X, %lX, %X, %p",
3104 stp
, flag
, count
, fmode
, done
);
3105 stp
->sd_flag
&= ~slpflg
;
3106 cv_broadcast(sleepon
);
3107 if (!(flag
& NOINTR
))
3115 TRACE_5(TR_FAC_STREAMS_FR
, TR_STRWAITQ_TIME
,
3116 "strwaitq timeout:%p, %X, %lX, %X, %p",
3117 stp
, flag
, count
, fmode
, done
);
3119 if (!(flag
& NOINTR
))
3125 * If the caller implements delayed errors (i.e. queued after data)
3126 * we can not check for errors here since data as well as an
3127 * error might have arrived at the stream head. We return to
3128 * have the caller check the read queue before checking for errors.
3130 if ((stp
->sd_flag
& errs
) && !(flag
& STR_DELAYERR
)) {
3131 error
= strgeterr(stp
, errs
, (flag
& STR_PEEK
));
3142 * Perform job control discipline access checks.
3143 * Return 0 for success and the errno for failure.
3146 #define cantsend(p, t, sig) \
3147 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig))
3150 straccess(struct stdata
*stp
, enum jcaccess mode
)
3152 extern kcondvar_t lbolt_cv
; /* XXX: should be in a header file */
3153 kthread_t
*t
= curthread
;
3154 proc_t
*p
= ttoproc(t
);
3157 ASSERT(mutex_owned(&stp
->sd_lock
));
3159 if (stp
->sd_sidp
== NULL
|| stp
->sd_vnode
->v_type
== VFIFO
)
3162 mutex_enter(&p
->p_lock
); /* protects p_pgidp */
3165 mutex_enter(&p
->p_splock
); /* protects p->p_sessp */
3167 mutex_enter(&sp
->s_lock
); /* protects sp->* */
3170 * If this is not the calling process's controlling terminal
3171 * or if the calling process is already in the foreground
3172 * then allow access.
3174 if (sp
->s_dev
!= stp
->sd_vnode
->v_rdev
||
3175 p
->p_pgidp
== stp
->sd_pgidp
) {
3176 mutex_exit(&sp
->s_lock
);
3177 mutex_exit(&p
->p_splock
);
3178 mutex_exit(&p
->p_lock
);
3183 * Check to see if controlling terminal has been deallocated.
3185 if (sp
->s_vp
== NULL
) {
3186 if (!cantsend(p
, t
, SIGHUP
))
3187 sigtoproc(p
, t
, SIGHUP
);
3188 mutex_exit(&sp
->s_lock
);
3189 mutex_exit(&p
->p_splock
);
3190 mutex_exit(&p
->p_lock
);
3194 mutex_exit(&sp
->s_lock
);
3195 mutex_exit(&p
->p_splock
);
3197 if (mode
== JCGETP
) {
3198 mutex_exit(&p
->p_lock
);
3202 if (mode
== JCREAD
) {
3203 if (p
->p_detached
|| cantsend(p
, t
, SIGTTIN
)) {
3204 mutex_exit(&p
->p_lock
);
3207 mutex_exit(&p
->p_lock
);
3208 mutex_exit(&stp
->sd_lock
);
3209 pgsignal(p
->p_pgidp
, SIGTTIN
);
3210 mutex_enter(&stp
->sd_lock
);
3211 mutex_enter(&p
->p_lock
);
3212 } else { /* mode == JCWRITE or JCSETP */
3213 if ((mode
== JCWRITE
&& !(stp
->sd_flag
& STRTOSTOP
)) ||
3214 cantsend(p
, t
, SIGTTOU
)) {
3215 mutex_exit(&p
->p_lock
);
3218 if (p
->p_detached
) {
3219 mutex_exit(&p
->p_lock
);
3222 mutex_exit(&p
->p_lock
);
3223 mutex_exit(&stp
->sd_lock
);
3224 pgsignal(p
->p_pgidp
, SIGTTOU
);
3225 mutex_enter(&stp
->sd_lock
);
3226 mutex_enter(&p
->p_lock
);
3230 * We call cv_wait_sig_swap() to cause the appropriate
3231 * action for the jobcontrol signal to take place.
3232 * If the signal is being caught, we will take the
3233 * EINTR error return. Otherwise, the default action
3234 * of causing the process to stop will take place.
3235 * In this case, we rely on the periodic cv_broadcast() on
3236 * &lbolt_cv to wake us up to loop around and test again.
3237 * We can't get here if the signal is ignored or
3238 * if the current thread is blocking the signal.
3240 mutex_exit(&stp
->sd_lock
);
3241 if (!cv_wait_sig_swap(&lbolt_cv
, &p
->p_lock
)) {
3242 mutex_exit(&p
->p_lock
);
3243 mutex_enter(&stp
->sd_lock
);
3246 mutex_exit(&p
->p_lock
);
3247 mutex_enter(&stp
->sd_lock
);
3248 mutex_enter(&p
->p_lock
);
3253 * Return size of message of block type (bp->b_datap->db_type)
3256 xmsgsize(mblk_t
*bp
)
3261 type
= bp
->b_datap
->db_type
;
3263 for (; bp
; bp
= bp
->b_cont
) {
3264 if (type
!= bp
->b_datap
->db_type
)
3266 ASSERT(bp
->b_wptr
>= bp
->b_rptr
);
3267 count
+= bp
->b_wptr
- bp
->b_rptr
;
3273 * Allocate a stream head.
3276 shalloc(queue_t
*qp
)
3280 stp
= kmem_cache_alloc(stream_head_cache
, KM_SLEEP
);
3282 stp
->sd_wrq
= _WR(qp
);
3283 stp
->sd_strtab
= NULL
;
3285 stp
->sd_mate
= NULL
;
3286 stp
->sd_freezer
= NULL
;
3290 stp
->sd_struiowrq
= NULL
;
3291 stp
->sd_struiordq
= NULL
;
3292 stp
->sd_struiodnak
= 0;
3293 stp
->sd_struionak
= NULL
;
3294 stp
->sd_t_audit_data
= NULL
;
3295 stp
->sd_rput_opt
= 0;
3296 stp
->sd_wput_opt
= 0;
3297 stp
->sd_read_opt
= 0;
3298 stp
->sd_rprotofunc
= strrput_proto
;
3299 stp
->sd_rmiscfunc
= strrput_misc
;
3300 stp
->sd_rderrfunc
= stp
->sd_wrerrfunc
= NULL
;
3301 stp
->sd_rputdatafunc
= stp
->sd_wputdatafunc
= NULL
;
3302 stp
->sd_ciputctrl
= NULL
;
3303 stp
->sd_nciputctrl
= 0;
3304 stp
->sd_qhead
= NULL
;
3305 stp
->sd_qtail
= NULL
;
3306 stp
->sd_servid
= NULL
;
3307 stp
->sd_nqueues
= 0;
3308 stp
->sd_svcflags
= 0;
3309 stp
->sd_copyflag
= 0;
3315 * Free a stream head.
3318 shfree(stdata_t
*stp
)
3320 ASSERT(MUTEX_NOT_HELD(&stp
->sd_lock
));
3324 mutex_enter(&stp
->sd_qlock
);
3325 while (stp
->sd_svcflags
& STRS_SCHEDULED
) {
3327 cv_wait(&stp
->sd_qcv
, &stp
->sd_qlock
);
3329 mutex_exit(&stp
->sd_qlock
);
3331 if (stp
->sd_ciputctrl
!= NULL
) {
3332 ASSERT(stp
->sd_nciputctrl
== n_ciputctrl
- 1);
3333 SUMCHECK_CIPUTCTRL_COUNTS(stp
->sd_ciputctrl
,
3334 stp
->sd_nciputctrl
, 0);
3335 ASSERT(ciputctrl_cache
!= NULL
);
3336 kmem_cache_free(ciputctrl_cache
, stp
->sd_ciputctrl
);
3337 stp
->sd_ciputctrl
= NULL
;
3338 stp
->sd_nciputctrl
= 0;
3340 ASSERT(stp
->sd_qhead
== NULL
);
3341 ASSERT(stp
->sd_qtail
== NULL
);
3342 ASSERT(stp
->sd_nqueues
== 0);
3343 kmem_cache_free(stream_head_cache
, stp
);
3347 * Allocate a pair of queues and a syncq for the pair
3356 qip
= kmem_cache_alloc(queue_cache
, KM_SLEEP
);
3358 qp
= &qip
->qu_rqueue
;
3359 wqp
= &qip
->qu_wqueue
;
3360 sq
= &qip
->qu_syncq
;
3365 qp
->q_flag
= QUSE
| QREADR
;
3367 qp
->q_stream
= NULL
;
3372 qp
->q_syncqmsgs
= 0;
3382 wqp
->q_bandp
= NULL
;
3383 wqp
->q_stream
= NULL
;
3386 wqp
->q_nfsrv
= NULL
;
3387 wqp
->q_draining
= 0;
3388 wqp
->q_syncqmsgs
= 0;
3390 wqp
->q_sqtstamp
= 0;
3394 sq
->sq_rmqcount
= 0;
3397 sq
->sq_callbflags
= 0;
3398 sq
->sq_cancelid
= 0;
3399 sq
->sq_ciputctrl
= NULL
;
3400 sq
->sq_nciputctrl
= 0;
3401 sq
->sq_needexcl
= 0;
3402 sq
->sq_svcflags
= 0;
3408 * Free a pair of queues and the "attached" syncq.
3409 * Discard any messages left on the syncq(s), remove the syncq(s) from the
3410 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq.
3415 qband_t
*qbp
, *nqbp
;
3416 syncq_t
*sq
, *outer
;
3417 queue_t
*wqp
= _WR(qp
);
3419 ASSERT(qp
->q_flag
& QREADR
);
3422 * If a previously dispatched taskq job is scheduled to run
3423 * sync_service() or a service routine is scheduled for the
3424 * queues about to be freed, wait here until all service is
3425 * done on the queue and all associated queues and syncqs.
3429 (void) flush_syncq(qp
->q_syncq
, qp
);
3430 (void) flush_syncq(wqp
->q_syncq
, wqp
);
3431 ASSERT(qp
->q_syncqmsgs
== 0 && wqp
->q_syncqmsgs
== 0);
3434 * Flush the queues before q_next is set to NULL This is needed
3435 * in order to backenable any downstream queue before we go away.
3436 * Note: we are already removed from the stream so that the
3437 * backenabling will not cause any messages to be delivered to our
3440 flushq(qp
, FLUSHALL
);
3441 flushq(wqp
, FLUSHALL
);
3443 /* Tidy up - removeq only does a half-remove from stream */
3444 qp
->q_next
= wqp
->q_next
= NULL
;
3445 ASSERT(!(qp
->q_flag
& QENAB
));
3446 ASSERT(!(wqp
->q_flag
& QENAB
));
3448 outer
= qp
->q_syncq
->sq_outer
;
3449 if (outer
!= NULL
) {
3450 outer_remove(outer
, qp
->q_syncq
);
3451 if (wqp
->q_syncq
!= qp
->q_syncq
)
3452 outer_remove(outer
, wqp
->q_syncq
);
3455 * Free any syncqs that are outside what allocq returned.
3457 if (qp
->q_syncq
!= SQ(qp
) && !(qp
->q_flag
& QPERMOD
))
3458 free_syncq(qp
->q_syncq
);
3459 if (qp
->q_syncq
!= wqp
->q_syncq
&& wqp
->q_syncq
!= SQ(qp
))
3460 free_syncq(wqp
->q_syncq
);
3462 ASSERT((qp
->q_sqflags
& (Q_SQQUEUED
| Q_SQDRAINING
)) == 0);
3463 ASSERT((wqp
->q_sqflags
& (Q_SQQUEUED
| Q_SQDRAINING
)) == 0);
3464 ASSERT(MUTEX_NOT_HELD(QLOCK(qp
)));
3465 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp
)));
3467 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq
)));
3468 ASSERT(sq
->sq_head
== NULL
&& sq
->sq_tail
== NULL
);
3469 ASSERT(sq
->sq_outer
== NULL
);
3470 ASSERT(sq
->sq_onext
== NULL
&& sq
->sq_oprev
== NULL
);
3471 ASSERT(sq
->sq_callbpend
== NULL
);
3472 ASSERT(sq
->sq_needexcl
== 0);
3474 if (sq
->sq_ciputctrl
!= NULL
) {
3475 ASSERT(sq
->sq_nciputctrl
== n_ciputctrl
- 1);
3476 SUMCHECK_CIPUTCTRL_COUNTS(sq
->sq_ciputctrl
,
3477 sq
->sq_nciputctrl
, 0);
3478 ASSERT(ciputctrl_cache
!= NULL
);
3479 kmem_cache_free(ciputctrl_cache
, sq
->sq_ciputctrl
);
3480 sq
->sq_ciputctrl
= NULL
;
3481 sq
->sq_nciputctrl
= 0;
3484 ASSERT(qp
->q_first
== NULL
&& wqp
->q_first
== NULL
);
3485 ASSERT(qp
->q_count
== 0 && wqp
->q_count
== 0);
3486 ASSERT(qp
->q_mblkcnt
== 0 && wqp
->q_mblkcnt
== 0);
3488 qp
->q_flag
&= ~QUSE
;
3489 wqp
->q_flag
&= ~QUSE
;
3491 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */
3492 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */
3496 nqbp
= qbp
->qb_next
;
3502 nqbp
= qbp
->qb_next
;
3506 kmem_cache_free(queue_cache
, qp
);
3510 * Allocate a qband structure.
3517 qbp
= kmem_cache_alloc(qband_cache
, KM_NOSLEEP
);
3521 qbp
->qb_next
= NULL
;
3523 qbp
->qb_mblkcnt
= 0;
3524 qbp
->qb_first
= NULL
;
3525 qbp
->qb_last
= NULL
;
3532 * Free a qband structure.
3535 freeband(qband_t
*qbp
)
3537 kmem_cache_free(qband_cache
, qbp
);
3541 * Just like putnextctl(9F), except that allocb_wait() is used.
3543 * Consolidation Private, and of course only callable from the stream head or
3544 * routines that may block.
3547 putnextctl_wait(queue_t
*q
, int type
)
3552 if ((datamsg(type
) && (type
!= M_DELAY
)) ||
3553 (bp
= allocb_wait(0, BPRI_HI
, 0, &error
)) == NULL
)
3556 bp
->b_datap
->db_type
= (unsigned char)type
;
3562 * Run any possible bufcalls.
3569 mutex_enter(&bcall_monitor
);
3570 mutex_enter(&strbcall_lock
);
3572 if (strbcalls
.bc_head
) {
3577 * count how many events are on the list
3578 * now so we can check to avoid looping
3579 * in low memory situations
3582 for (bcp
= strbcalls
.bc_head
; bcp
; bcp
= bcp
->bc_next
)
3586 * get estimate of available memory from kmem_avail().
3587 * awake all bufcall functions waiting for
3588 * memory whose request could be satisfied
3589 * by 'count' memory and let 'em fight for it.
3591 count
= kmem_avail();
3592 while ((bcp
= strbcalls
.bc_head
) != NULL
&& nevent
) {
3595 if (bcp
->bc_size
<= count
) {
3596 bcp
->bc_executor
= curthread
;
3597 mutex_exit(&strbcall_lock
);
3598 (*bcp
->bc_func
)(bcp
->bc_arg
);
3599 mutex_enter(&strbcall_lock
);
3600 bcp
->bc_executor
= NULL
;
3601 cv_broadcast(&bcall_cv
);
3602 strbcalls
.bc_head
= bcp
->bc_next
;
3603 kmem_free(bcp
, sizeof (strbufcall_t
));
3606 * too big, try again later - note
3607 * that nevent was decremented above
3608 * so we won't retry this one on this
3609 * iteration of the loop
3611 if (bcp
->bc_next
!= NULL
) {
3612 strbcalls
.bc_head
= bcp
->bc_next
;
3613 bcp
->bc_next
= NULL
;
3614 strbcalls
.bc_tail
->bc_next
= bcp
;
3615 strbcalls
.bc_tail
= bcp
;
3619 if (strbcalls
.bc_head
== NULL
)
3620 strbcalls
.bc_tail
= NULL
;
3623 mutex_exit(&strbcall_lock
);
3624 mutex_exit(&bcall_monitor
);
3629 * Actually run queue's service routine.
3632 runservice(queue_t
*q
)
3636 ASSERT(q
->q_qinfo
->qi_srvp
);
3638 entersq(q
->q_syncq
, SQ_SVC
);
3639 TRACE_1(TR_FAC_STREAMS_FR
, TR_QRUNSERVICE_START
,
3640 "runservice starts:%p", q
);
3642 if (!(q
->q_flag
& QWCLOSE
))
3643 (*q
->q_qinfo
->qi_srvp
)(q
);
3645 TRACE_1(TR_FAC_STREAMS_FR
, TR_QRUNSERVICE_END
,
3646 "runservice ends:(%p)", q
);
3648 leavesq(q
->q_syncq
, SQ_SVC
);
3650 mutex_enter(QLOCK(q
));
3651 if (q
->q_flag
& QENAB
) {
3652 q
->q_flag
&= ~QENAB
;
3653 mutex_exit(QLOCK(q
));
3656 q
->q_flag
&= ~QINSERVICE
;
3657 q
->q_flag
&= ~QBACK
;
3658 for (qbp
= q
->q_bandp
; qbp
; qbp
= qbp
->qb_next
)
3659 qbp
->qb_flag
&= ~QB_BACK
;
3661 * Wakeup thread waiting for the service procedure
3662 * to be run (strclose and qdetach).
3664 cv_broadcast(&q
->q_wait
);
3666 mutex_exit(QLOCK(q
));
3670 * Background processing of bufcalls.
3673 streams_bufcall_service(void)
3675 callb_cpr_t cprinfo
;
3677 CALLB_CPR_INIT(&cprinfo
, &strbcall_lock
, callb_generic_cpr
,
3678 "streams_bufcall_service");
3680 mutex_enter(&strbcall_lock
);
3683 if (strbcalls
.bc_head
!= NULL
&& kmem_avail() > 0) {
3684 mutex_exit(&strbcall_lock
);
3686 mutex_enter(&strbcall_lock
);
3688 if (strbcalls
.bc_head
!= NULL
) {
3690 /* Wait for memory to become available */
3691 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
3692 (void) cv_reltimedwait(&memavail_cv
, &strbcall_lock
,
3693 SEC_TO_TICK(60), TR_CLOCK_TICK
);
3694 CALLB_CPR_SAFE_END(&cprinfo
, &strbcall_lock
);
3697 /* Wait for new work to arrive */
3698 if (strbcalls
.bc_head
== NULL
) {
3699 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
3700 cv_wait(&strbcall_cv
, &strbcall_lock
);
3701 CALLB_CPR_SAFE_END(&cprinfo
, &strbcall_lock
);
3707 * Background processing of streams background tasks which failed
3711 streams_qbkgrnd_service(void)
3713 callb_cpr_t cprinfo
;
3716 CALLB_CPR_INIT(&cprinfo
, &service_queue
, callb_generic_cpr
,
3717 "streams_bkgrnd_service");
3719 mutex_enter(&service_queue
);
3723 * Wait for work to arrive.
3725 while ((freebs_list
== NULL
) && (qhead
== NULL
)) {
3726 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
3727 cv_wait(&services_to_run
, &service_queue
);
3728 CALLB_CPR_SAFE_END(&cprinfo
, &service_queue
);
3731 * Handle all pending freebs requests to free memory.
3733 while (freebs_list
!= NULL
) {
3734 mblk_t
*mp
= freebs_list
;
3735 freebs_list
= mp
->b_next
;
3736 mutex_exit(&service_queue
);
3738 mutex_enter(&service_queue
);
3741 * Run pending queues.
3743 while (qhead
!= NULL
) {
3744 DQ(q
, qhead
, qtail
, q_link
);
3746 mutex_exit(&service_queue
);
3748 mutex_enter(&service_queue
);
3750 ASSERT(qhead
== NULL
&& qtail
== NULL
);
3755 * Background processing of streams background tasks which failed
3759 streams_sqbkgrnd_service(void)
3761 callb_cpr_t cprinfo
;
3764 CALLB_CPR_INIT(&cprinfo
, &service_queue
, callb_generic_cpr
,
3765 "streams_sqbkgrnd_service");
3767 mutex_enter(&service_queue
);
3771 * Wait for work to arrive.
3773 while (sqhead
== NULL
) {
3774 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
3775 cv_wait(&syncqs_to_run
, &service_queue
);
3776 CALLB_CPR_SAFE_END(&cprinfo
, &service_queue
);
3780 * Run pending syncqs.
3782 while (sqhead
!= NULL
) {
3783 DQ(sq
, sqhead
, sqtail
, sq_next
);
3785 ASSERT(sq
->sq_svcflags
& SQ_BGTHREAD
);
3786 mutex_exit(&service_queue
);
3788 mutex_enter(&service_queue
);
3794 * Disable the syncq and wait for background syncq processing to complete.
3795 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the
3799 wait_sq_svc(syncq_t
*sq
)
3801 mutex_enter(SQLOCK(sq
));
3802 sq
->sq_svcflags
|= SQ_DISABLED
;
3803 if (sq
->sq_svcflags
& SQ_BGTHREAD
) {
3808 ASSERT(sq
->sq_servcount
== 1);
3809 mutex_enter(&service_queue
);
3810 RMQ(sq
, sqhead
, sqtail
, sq_next
, sq_chase
, sq_curr
, removed
);
3811 mutex_exit(&service_queue
);
3813 sq
->sq_svcflags
&= ~SQ_BGTHREAD
;
3814 sq
->sq_servcount
= 0;
3819 while (sq
->sq_servcount
!= 0) {
3820 sq
->sq_flags
|= SQ_WANTWAKEUP
;
3821 cv_wait(&sq
->sq_wait
, SQLOCK(sq
));
3824 mutex_exit(SQLOCK(sq
));
3828 * Put a syncq on the list of syncq's to be serviced by the sqthread.
3829 * Add the argument to the end of the sqhead list and set the flag
3830 * indicating this syncq has been enabled. If it has already been
3831 * enabled, don't do anything.
3832 * This routine assumes that SQLOCK is held.
3833 * NOTE that the lock order is to have the SQLOCK first,
3834 * so if the service_syncq lock is held, we need to release it
3835 * before acquiring the SQLOCK (mostly relevant for the background
3836 * thread, and this seems to be common among the STREAMS global locks).
3837 * Note that the sq_svcflags are protected by the SQLOCK.
3840 sqenable(syncq_t
*sq
)
3843 * This is probably not important except for where I believe it
3844 * is being called. At that point, it should be held (and it
3845 * is a pain to release it just for this routine, so don't do
3848 ASSERT(MUTEX_HELD(SQLOCK(sq
)));
3850 IMPLY(sq
->sq_servcount
== 0, sq
->sq_next
== NULL
);
3851 IMPLY(sq
->sq_next
!= NULL
, sq
->sq_svcflags
& SQ_BGTHREAD
);
3854 * Do not put on list if background thread is scheduled or
3855 * syncq is disabled.
3857 if (sq
->sq_svcflags
& (SQ_DISABLED
| SQ_BGTHREAD
))
3861 * Check whether we should enable sq at all.
3862 * Non PERMOD syncqs may be drained by at most one thread.
3863 * PERMOD syncqs may be drained by several threads but we limit the
3864 * total amount to the lesser of
3865 * Number of queues on the squeue and
3868 if (sq
->sq_servcount
!= 0) {
3869 if (((sq
->sq_type
& SQ_PERMOD
) == 0) ||
3870 (sq
->sq_servcount
>= MIN(sq
->sq_nqueues
, ncpus_online
))) {
3876 sq
->sq_tstamp
= ddi_get_lbolt();
3879 /* Attempt a taskq dispatch */
3880 sq
->sq_servid
= (void *)taskq_dispatch(streams_taskq
,
3881 (task_func_t
*)syncq_service
, sq
, TQ_NOSLEEP
| TQ_NOQUEUE
);
3882 if (sq
->sq_servid
!= NULL
) {
3888 * This taskq dispatch failed, but a previous one may have succeeded.
3889 * Don't try to schedule on the background thread whilst there is
3890 * outstanding taskq processing.
3892 if (sq
->sq_servcount
!= 0)
3896 * System is low on resources and can't perform a non-sleeping
3897 * dispatch. Schedule the syncq for a background thread and mark the
3898 * syncq to avoid any further taskq dispatch attempts.
3900 mutex_enter(&service_queue
);
3901 STRSTAT(taskqfails
);
3902 ENQUEUE(sq
, sqhead
, sqtail
, sq_next
);
3903 sq
->sq_svcflags
|= SQ_BGTHREAD
;
3904 sq
->sq_servcount
= 1;
3905 cv_signal(&syncqs_to_run
);
3906 mutex_exit(&service_queue
);
3910 * Note: fifo_close() depends on the mblk_t on the queue being freed
3911 * asynchronously. The asynchronous freeing of messages breaks the
3912 * recursive call chain of fifo_close() while there are I_SENDFD type of
3913 * messages referring to other file pointers on the queue. Then when
3914 * closing pipes it can avoid stack overflow in case of daisy-chained
3915 * pipes, and also avoid deadlock in case of fifonode_t pairs (which
3916 * share the same fifolock_t).
3918 * No need to kpreempt_disable to access cpu_seqid. If we migrate and
3919 * the esb queue does not match the new CPU, that is OK.
3922 freebs_enqueue(mblk_t
*mp
, dblk_t
*dbp
)
3924 int qindex
= CPU
->cpu_seqid
>> esbq_log2_cpus_per_q
;
3927 ASSERT(dbp
->db_mblk
== mp
);
3928 ASSERT(qindex
< esbq_nelem
);
3930 eqp
= system_esbq_array
;
3934 mutex_enter(&esbq_lock
);
3935 if (kmem_ready
&& system_esbq_array
== NULL
)
3936 system_esbq_array
= (esb_queue_t
*)kmem_zalloc(
3937 esbq_nelem
* sizeof (esb_queue_t
), KM_NOSLEEP
);
3938 mutex_exit(&esbq_lock
);
3939 eqp
= system_esbq_array
;
3947 * Check data sanity. The dblock should have non-empty free function.
3948 * It is better to panic here then later when the dblock is freed
3949 * asynchronously when the context is lost.
3951 if (dbp
->db_frtnp
->free_func
== NULL
) {
3952 panic("freebs_enqueue: dblock %p has a NULL free callback",
3956 mutex_enter(&eqp
->eq_lock
);
3957 /* queue the new mblk on the esballoc queue */
3958 if (eqp
->eq_head
== NULL
) {
3959 eqp
->eq_head
= eqp
->eq_tail
= mp
;
3961 eqp
->eq_tail
->b_next
= mp
;
3966 /* If we're the first thread to reach the threshold, process */
3967 if (eqp
->eq_len
>= esbq_max_qlen
&&
3968 !(eqp
->eq_flags
& ESBQ_PROCESSING
))
3969 esballoc_process_queue(eqp
);
3971 esballoc_set_timer(eqp
, esbq_timeout
);
3972 mutex_exit(&eqp
->eq_lock
);
3976 esballoc_process_queue(esb_queue_t
*eqp
)
3980 ASSERT(MUTEX_HELD(&eqp
->eq_lock
));
3982 eqp
->eq_flags
|= ESBQ_PROCESSING
;
3986 * Detach the message chain for processing.
3989 eqp
->eq_tail
->b_next
= NULL
;
3990 eqp
->eq_head
= eqp
->eq_tail
= NULL
;
3992 mutex_exit(&eqp
->eq_lock
);
3995 * Process the message chain.
3997 esballoc_enqueue_mblk(mp
);
3998 mutex_enter(&eqp
->eq_lock
);
3999 } while ((eqp
->eq_len
>= esbq_max_qlen
) && (eqp
->eq_len
> 0));
4001 eqp
->eq_flags
&= ~ESBQ_PROCESSING
;
4005 * taskq callback routine to free esballoced mblk's
4008 esballoc_mblk_free(mblk_t
*mp
)
4012 for (; mp
!= NULL
; mp
= nextmp
) {
4013 nextmp
= mp
->b_next
;
4020 esballoc_enqueue_mblk(mblk_t
*mp
)
4023 if (taskq_dispatch(system_taskq
, (task_func_t
*)esballoc_mblk_free
, mp
,
4024 TQ_NOSLEEP
) == (uintptr_t)NULL
) {
4025 mblk_t
*first_mp
= mp
;
4027 * System is low on resources and can't perform a non-sleeping
4028 * dispatch. Schedule for a background thread.
4030 mutex_enter(&service_queue
);
4031 STRSTAT(taskqfails
);
4033 while (mp
->b_next
!= NULL
)
4036 mp
->b_next
= freebs_list
;
4037 freebs_list
= first_mp
;
4038 cv_signal(&services_to_run
);
4039 mutex_exit(&service_queue
);
4044 esballoc_timer(void *arg
)
4046 esb_queue_t
*eqp
= arg
;
4048 mutex_enter(&eqp
->eq_lock
);
4049 eqp
->eq_flags
&= ~ESBQ_TIMER
;
4051 if (!(eqp
->eq_flags
& ESBQ_PROCESSING
) &&
4053 esballoc_process_queue(eqp
);
4055 esballoc_set_timer(eqp
, esbq_timeout
);
4056 mutex_exit(&eqp
->eq_lock
);
4060 esballoc_set_timer(esb_queue_t
*eqp
, clock_t eq_timeout
)
4062 ASSERT(MUTEX_HELD(&eqp
->eq_lock
));
4064 if (eqp
->eq_len
> 0 && !(eqp
->eq_flags
& ESBQ_TIMER
)) {
4065 (void) timeout(esballoc_timer
, eqp
, eq_timeout
);
4066 eqp
->eq_flags
|= ESBQ_TIMER
;
4071 * Setup esbq array length based upon NCPU scaled by CPUs per
4072 * queue. Use static system_esbq until kmem_ready and we can
4073 * create an array in freebs_enqueue().
4076 esballoc_queue_init(void)
4078 esbq_log2_cpus_per_q
= highbit(esbq_cpus_per_q
- 1);
4079 esbq_cpus_per_q
= 1 << esbq_log2_cpus_per_q
;
4080 esbq_nelem
= howmany(NCPU
, esbq_cpus_per_q
);
4081 system_esbq
.eq_len
= 0;
4082 system_esbq
.eq_head
= system_esbq
.eq_tail
= NULL
;
4083 system_esbq
.eq_flags
= 0;
4087 * Set the QBACK or QB_BACK flag in the given queue for
4088 * the given priority band.
4091 setqback(queue_t
*q
, unsigned char pri
)
4097 ASSERT(MUTEX_HELD(QLOCK(q
)));
4099 if (pri
> q
->q_nband
) {
4102 qbpp
= &(*qbpp
)->qb_next
;
4103 while (pri
> q
->q_nband
) {
4104 if ((*qbpp
= allocband()) == NULL
) {
4106 "setqback: can't allocate qband\n");
4109 (*qbpp
)->qb_hiwat
= q
->q_hiwat
;
4110 (*qbpp
)->qb_lowat
= q
->q_lowat
;
4112 qbpp
= &(*qbpp
)->qb_next
;
4119 qbp
->qb_flag
|= QB_BACK
;
4126 strcopyin(void *from
, void *to
, size_t len
, int copyflag
)
4128 if (copyflag
& U_TO_K
) {
4129 ASSERT((copyflag
& K_TO_K
) == 0);
4130 if (copyin(from
, to
, len
))
4133 ASSERT(copyflag
& K_TO_K
);
4134 bcopy(from
, to
, len
);
4140 strcopyout(void *from
, void *to
, size_t len
, int copyflag
)
4142 if (copyflag
& U_TO_K
) {
4143 if (copyout(from
, to
, len
))
4146 ASSERT(copyflag
& K_TO_K
);
4147 bcopy(from
, to
, len
);
4153 * strsignal_nolock() posts a signal to the process(es) at the stream head.
4154 * It assumes that the stream head lock is already held, whereas strsignal()
4155 * acquires the lock first. This routine was created because a few callers
4156 * release the stream head lock before calling only to re-acquire it after
4160 strsignal_nolock(stdata_t
*stp
, int sig
, uchar_t band
)
4162 ASSERT(MUTEX_HELD(&stp
->sd_lock
));
4165 if (stp
->sd_sigflags
& S_MSG
)
4166 strsendsig(stp
->sd_siglist
, S_MSG
, band
, 0);
4170 pgsignal(stp
->sd_pgidp
, sig
);
4176 strsignal(stdata_t
*stp
, int sig
, int32_t band
)
4178 TRACE_3(TR_FAC_STREAMS_FR
, TR_SENDSIG
,
4179 "strsignal:%p, %X, %X", stp
, sig
, band
);
4181 mutex_enter(&stp
->sd_lock
);
4184 if (stp
->sd_sigflags
& S_MSG
)
4185 strsendsig(stp
->sd_siglist
, S_MSG
, (uchar_t
)band
, 0);
4189 if (stp
->sd_pgidp
) {
4190 pgsignal(stp
->sd_pgidp
, sig
);
4194 mutex_exit(&stp
->sd_lock
);
4198 strhup(stdata_t
*stp
)
4200 ASSERT(mutex_owned(&stp
->sd_lock
));
4201 pollwakeup(&stp
->sd_pollist
, POLLHUP
);
4202 if (stp
->sd_sigflags
& S_HANGUP
)
4203 strsendsig(stp
->sd_siglist
, S_HANGUP
, 0, 0);
4207 * Backenable the first queue upstream from `q' with a service procedure.
4210 backenable(queue_t
*q
, uchar_t pri
)
4215 * Our presence might not prevent other modules in our own
4216 * stream from popping/pushing since the caller of getq might not
4217 * have a claim on the queue (some drivers do a getq on somebody
4218 * else's queue - they know that the queue itself is not going away
4219 * but the framework has to guarantee q_next in that stream).
4223 /* Find nearest back queue with service proc */
4224 for (nq
= backq(q
); nq
&& !nq
->q_qinfo
->qi_srvp
; nq
= backq(nq
)) {
4225 ASSERT(STRMATED(q
->q_stream
) || STREAM(q
) == STREAM(nq
));
4231 * backenable can be called either with no locks held
4232 * or with the stream frozen (the latter occurs when a module
4233 * calls rmvq with the stream frozen). If the stream is frozen
4234 * by the caller the caller will hold all qlocks in the stream.
4235 * Note that a frozen stream doesn't freeze a mated stream,
4236 * so we explicitly check for that.
4238 freezer
= STREAM(q
)->sd_freezer
;
4239 if (freezer
!= curthread
|| STREAM(q
) != STREAM(nq
)) {
4240 mutex_enter(QLOCK(nq
));
4244 ASSERT(frozenstr(q
));
4245 ASSERT(MUTEX_HELD(QLOCK(q
)));
4246 ASSERT(MUTEX_HELD(QLOCK(nq
)));
4251 if (freezer
!= curthread
|| STREAM(q
) != STREAM(nq
))
4252 mutex_exit(QLOCK(nq
));
4258 * Return the appropriate errno when one of flags_to_check is set
4259 * in sd_flags. Uses the exported error routines if they are set.
4260 * Will return 0 if non error is set (or if the exported error routines
4261 * do not return an error).
4263 * If there is both a read and write error to check, we prefer the read error.
4264 * Also, give preference to recorded errno's over the error functions.
4265 * The flags that are handled are:
4266 * STPLEX return EINVAL
4267 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST)
4268 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST)
4269 * STRHUP return sd_werror
4271 * If the caller indicates that the operation is a peek, a nonpersistent error
4275 strgeterr(stdata_t
*stp
, int32_t flags_to_check
, int ispeek
)
4277 int32_t sd_flag
= stp
->sd_flag
& flags_to_check
;
4280 ASSERT(MUTEX_HELD(&stp
->sd_lock
));
4281 ASSERT((flags_to_check
& ~(STRDERR
|STWRERR
|STRHUP
|STPLEX
)) == 0);
4282 if (sd_flag
& STPLEX
)
4284 else if (sd_flag
& STRDERR
) {
4285 error
= stp
->sd_rerror
;
4286 if ((stp
->sd_flag
& STRDERRNONPERSIST
) && !ispeek
) {
4288 * Read errors are non-persistent i.e. discarded once
4289 * returned to a non-peeking caller,
4292 stp
->sd_flag
&= ~STRDERR
;
4294 if (error
== 0 && stp
->sd_rderrfunc
!= NULL
) {
4297 error
= (*stp
->sd_rderrfunc
)(stp
->sd_vnode
, ispeek
,
4300 stp
->sd_flag
&= ~STRDERR
;
4301 stp
->sd_rderrfunc
= NULL
;
4304 } else if (sd_flag
& STWRERR
) {
4305 error
= stp
->sd_werror
;
4306 if ((stp
->sd_flag
& STWRERRNONPERSIST
) && !ispeek
) {
4308 * Write errors are non-persistent i.e. discarded once
4309 * returned to a non-peeking caller,
4312 stp
->sd_flag
&= ~STWRERR
;
4314 if (error
== 0 && stp
->sd_wrerrfunc
!= NULL
) {
4317 error
= (*stp
->sd_wrerrfunc
)(stp
->sd_vnode
, ispeek
,
4320 stp
->sd_flag
&= ~STWRERR
;
4321 stp
->sd_wrerrfunc
= NULL
;
4324 } else if (sd_flag
& STRHUP
) {
4325 /* sd_werror set when STRHUP */
4326 error
= stp
->sd_werror
;
4333 * Single-thread open/close/push/pop
4334 * for twisted streams also
4337 strstartplumb(stdata_t
*stp
, int flag
, int cmd
)
4342 if (STRMATED(stp
)) {
4343 struct stdata
*stmatep
= stp
->sd_mate
;
4348 while (stmatep
->sd_flag
& (STWOPEN
|STRCLOSE
|STRPLUMB
)) {
4349 if ((cmd
== I_POP
) &&
4350 (flag
& (FNDELAY
|FNONBLOCK
))) {
4351 STRUNLOCKMATES(stp
);
4355 mutex_exit(&stp
->sd_lock
);
4356 if (!cv_wait_sig(&stmatep
->sd_monitor
,
4357 &stmatep
->sd_lock
)) {
4358 mutex_exit(&stmatep
->sd_lock
);
4361 mutex_exit(&stmatep
->sd_lock
);
4364 while (stp
->sd_flag
& (STWOPEN
|STRCLOSE
|STRPLUMB
)) {
4365 if ((cmd
== I_POP
) &&
4366 (flag
& (FNDELAY
|FNONBLOCK
))) {
4367 STRUNLOCKMATES(stp
);
4371 mutex_exit(&stmatep
->sd_lock
);
4372 if (!cv_wait_sig(&stp
->sd_monitor
,
4374 mutex_exit(&stp
->sd_lock
);
4377 mutex_exit(&stp
->sd_lock
);
4380 if (stp
->sd_flag
& (STRDERR
|STWRERR
|STRHUP
|STPLEX
)) {
4381 error
= strgeterr(stp
,
4382 STRDERR
|STWRERR
|STRHUP
|STPLEX
, 0);
4384 STRUNLOCKMATES(stp
);
4389 stp
->sd_flag
|= STRPLUMB
;
4390 STRUNLOCKMATES(stp
);
4392 mutex_enter(&stp
->sd_lock
);
4393 while (stp
->sd_flag
& (STWOPEN
|STRCLOSE
|STRPLUMB
)) {
4394 if (((cmd
== I_POP
) || (cmd
== _I_REMOVE
)) &&
4395 (flag
& (FNDELAY
|FNONBLOCK
))) {
4396 mutex_exit(&stp
->sd_lock
);
4399 if (!cv_wait_sig(&stp
->sd_monitor
, &stp
->sd_lock
)) {
4400 mutex_exit(&stp
->sd_lock
);
4403 if (stp
->sd_flag
& (STRDERR
|STWRERR
|STRHUP
|STPLEX
)) {
4404 error
= strgeterr(stp
,
4405 STRDERR
|STWRERR
|STRHUP
|STPLEX
, 0);
4407 mutex_exit(&stp
->sd_lock
);
4412 stp
->sd_flag
|= STRPLUMB
;
4413 mutex_exit(&stp
->sd_lock
);
4419 * Complete the plumbing operation associated with stream `stp'.
4422 strendplumb(stdata_t
*stp
)
4424 ASSERT(MUTEX_HELD(&stp
->sd_lock
));
4425 ASSERT(stp
->sd_flag
& STRPLUMB
);
4426 stp
->sd_flag
&= ~STRPLUMB
;
4427 cv_broadcast(&stp
->sd_monitor
);
4431 * This describes how the STREAMS framework handles synchronization
4432 * during open/push and close/pop.
4433 * The key interfaces for open and close are qprocson and qprocsoff,
4434 * respectively. While the close case in general is harder both open
4435 * have close have significant similarities.
4437 * During close the STREAMS framework has to both ensure that there
4438 * are no stale references to the queue pair (and syncq) that
4439 * are being closed and also provide the guarantees that are documented
4441 * If there are stale references to the queue that is closing it can
4442 * result in kernel memory corruption or kernel panics.
4444 * Note that is it up to the module/driver to ensure that it itself
4445 * does not have any stale references to the closing queues once its close
4446 * routine returns. This includes:
4447 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines
4448 * associated with the queues. For timeout and bufcall callbacks the
4449 * module/driver also has to ensure (or wait for) any callbacks that
4451 * - If the module/driver is using esballoc it has to ensure that any
4452 * esballoc free functions do not refer to a queue that has closed.
4453 * (Note that in general the close routine can not wait for the esballoc'ed
4454 * messages to be freed since that can cause a deadlock.)
4455 * - Cancelling any interrupts that refer to the closing queues and
4456 * also ensuring that there are no interrupts in progress that will
4457 * refer to the closing queues once the close routine returns.
4458 * - For multiplexors removing any driver global state that refers to
4459 * the closing queue and also ensuring that there are no threads in
4460 * the multiplexor that has picked up a queue pointer but not yet
4461 * finished using it.
4463 * In addition, a driver/module can only reference the q_next pointer
4464 * in its open, close, put, or service procedures or in a
4465 * qtimeout/qbufcall callback procedure executing "on" the correct
4466 * stream. Thus it can not reference the q_next pointer in an interrupt
4467 * routine or a timeout, bufcall or esballoc callback routine. Likewise
4468 * it can not reference q_next of a different queue e.g. in a mux that
4469 * passes messages from one queues put/service procedure to another queue.
4470 * In all the cases when the driver/module can not access the q_next
4471 * field it must use the *next* versions e.g. canputnext instead of
4472 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...).
4475 * Assuming that the driver/module conforms to the above constraints
4476 * the STREAMS framework has to avoid stale references to q_next for all
4477 * the framework internal cases which include (but are not limited to):
4478 * - Threads in canput/canputnext/backenable and elsewhere that are
4480 * - Messages on a syncq that have a reference to the queue through b_queue.
4481 * - Messages on an outer perimeter (syncq) that have a reference to the
4482 * queue through b_queue.
4483 * - Threads that use q_nfsrv (e.g. canput) to find a queue.
4484 * Note that only canput and bcanput use q_nfsrv without any locking.
4486 * The STREAMS framework providing the qprocsoff(9F) guarantees means that
4487 * after qprocsoff returns, the framework has to ensure that no threads can
4488 * enter the put or service routines for the closing read or write-side queue.
4489 * In addition to preventing "direct" entry into the put procedures
4490 * the framework also has to prevent messages being drained from
4491 * the syncq or the outer perimeter.
4492 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only
4493 * mechanism to prevent qwriter(PERIM_OUTER) from running after
4494 * qprocsoff has returned.
4495 * Note that if a module/driver uses put(9F) on one of its own queues
4496 * it is up to the module/driver to ensure that the put() doesn't
4497 * get called when the queue is closing.
4500 * The framework aspects of the above "contract" is implemented by
4501 * qprocsoff, removeq, and strlock:
4502 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from
4503 * entering the service procedures.
4504 * - strlock acquires the sd_lock and sd_reflock to prevent putnext,
4505 * canputnext, backenable etc from dereferencing the q_next that will
4507 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext
4508 * or other q_next walker that uses claimstr/releasestr to finish.
4509 * - optionally for every syncq in the stream strlock acquires all the
4510 * sq_lock's and waits for all sq_counts to drop to a value that indicates
4511 * that no thread executes in the put or service procedures and that no
4512 * thread is draining into the module/driver. This ensures that no
4513 * open, close, put, service, or qtimeout/qbufcall callback procedure is
4514 * currently executing hence no such thread can end up with the old stale
4515 * q_next value and no canput/backenable can have the old stale
4517 * - qdetach (wait_svc) makes sure that any scheduled or running threads
4518 * have either finished or observed the QWCLOSE flag and gone away.
4523 * Get all the locks necessary to change q_next.
4525 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the
4526 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that
4527 * the only threads inside the syncq are threads currently calling removeq().
4528 * Since threads calling removeq() are in the process of removing their queues
4529 * from the stream, we do not need to worry about them accessing a stale q_next
4530 * pointer and thus we do not need to wait for them to exit (in fact, waiting
4531 * for them can cause deadlock).
4533 * This routine is subject to starvation since it does not set any flag to
4534 * prevent threads from entering a module in the stream (i.e. sq_count can
4535 * increase on some syncq while it is waiting on some other syncq).
4537 * Assumes that only one thread attempts to call strlock for a given
4538 * stream. If this is not the case the two threads would deadlock.
4539 * This assumption is guaranteed since strlock is only called by insertq
4540 * and removeq and streams plumbing changes are single-threaded for
4541 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags.
4543 * For pipes, it is not difficult to atomically designate a pair of streams
4544 * to be mated. Once mated atomically by the framework the twisted pair remain
4545 * configured that way until dismantled atomically by the framework.
4546 * When plumbing takes place on a twisted stream it is necessary to ensure that
4547 * this operation is done exclusively on the twisted stream since two such
4548 * operations, each initiated on different ends of the pipe will deadlock
4549 * waiting for each other to complete.
4551 * On entry, no locks should be held.
4552 * The locks acquired and held by strlock depends on a few factors.
4553 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired
4554 * and held on exit and all sq_count are at an acceptable level.
4555 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with
4556 * sd_refcnt being zero.
4560 strlock(struct stdata
*stp
, sqlist_t
*sqlist
)
4562 syncql_t
*sql
, *sql2
;
4565 * Wait for any claimstr to go away.
4567 if (STRMATED(stp
)) {
4568 struct stdata
*stp1
, *stp2
;
4572 * Note that the selection of locking order is not
4573 * important, just that they are always acquired in
4574 * the same order. To assure this, we choose this
4575 * order based on the value of the pointer, and since
4576 * the pointer will not change for the life of this
4577 * pair, we will always grab the locks in the same
4578 * order (and hence, prevent deadlocks).
4580 if (&(stp
->sd_lock
) > &((stp
->sd_mate
)->sd_lock
)) {
4582 stp2
= stp
->sd_mate
;
4585 stp1
= stp
->sd_mate
;
4587 mutex_enter(&stp1
->sd_reflock
);
4588 if (stp1
->sd_refcnt
> 0) {
4589 STRUNLOCKMATES(stp
);
4590 cv_wait(&stp1
->sd_refmonitor
, &stp1
->sd_reflock
);
4591 mutex_exit(&stp1
->sd_reflock
);
4594 mutex_enter(&stp2
->sd_reflock
);
4595 if (stp2
->sd_refcnt
> 0) {
4596 STRUNLOCKMATES(stp
);
4597 mutex_exit(&stp1
->sd_reflock
);
4598 cv_wait(&stp2
->sd_refmonitor
, &stp2
->sd_reflock
);
4599 mutex_exit(&stp2
->sd_reflock
);
4602 STREAM_PUTLOCKS_ENTER(stp1
);
4603 STREAM_PUTLOCKS_ENTER(stp2
);
4605 mutex_enter(&stp
->sd_lock
);
4606 mutex_enter(&stp
->sd_reflock
);
4607 while (stp
->sd_refcnt
> 0) {
4608 mutex_exit(&stp
->sd_lock
);
4609 cv_wait(&stp
->sd_refmonitor
, &stp
->sd_reflock
);
4610 if (mutex_tryenter(&stp
->sd_lock
) == 0) {
4611 mutex_exit(&stp
->sd_reflock
);
4612 mutex_enter(&stp
->sd_lock
);
4613 mutex_enter(&stp
->sd_reflock
);
4616 STREAM_PUTLOCKS_ENTER(stp
);
4622 for (sql
= sqlist
->sqlist_head
; sql
; sql
= sql
->sql_next
) {
4623 syncq_t
*sq
= sql
->sql_sq
;
4626 mutex_enter(SQLOCK(sq
));
4627 count
= sq
->sq_count
;
4628 ASSERT(sq
->sq_rmqcount
<= count
);
4629 SQ_PUTLOCKS_ENTER(sq
);
4630 SUM_SQ_PUTCOUNTS(sq
, count
);
4631 if (count
== sq
->sq_rmqcount
)
4634 /* Failed - drop all locks that we have acquired so far */
4635 if (STRMATED(stp
)) {
4636 STREAM_PUTLOCKS_EXIT(stp
);
4637 STREAM_PUTLOCKS_EXIT(stp
->sd_mate
);
4638 STRUNLOCKMATES(stp
);
4639 mutex_exit(&stp
->sd_reflock
);
4640 mutex_exit(&stp
->sd_mate
->sd_reflock
);
4642 STREAM_PUTLOCKS_EXIT(stp
);
4643 mutex_exit(&stp
->sd_lock
);
4644 mutex_exit(&stp
->sd_reflock
);
4646 for (sql2
= sqlist
->sqlist_head
; sql2
!= sql
;
4647 sql2
= sql2
->sql_next
) {
4648 SQ_PUTLOCKS_EXIT(sql2
->sql_sq
);
4649 mutex_exit(SQLOCK(sql2
->sql_sq
));
4653 * The wait loop below may starve when there are many threads
4654 * claiming the syncq. This is especially a problem with permod
4655 * syncqs (IP). To lessen the impact of the problem we increment
4656 * sq_needexcl and clear fastbits so that putnexts will slow
4657 * down and call sqenable instead of draining right away.
4660 SQ_PUTCOUNT_CLRFAST_LOCKED(sq
);
4661 while (count
> sq
->sq_rmqcount
) {
4662 sq
->sq_flags
|= SQ_WANTWAKEUP
;
4663 SQ_PUTLOCKS_EXIT(sq
);
4664 cv_wait(&sq
->sq_wait
, SQLOCK(sq
));
4665 count
= sq
->sq_count
;
4666 SQ_PUTLOCKS_ENTER(sq
);
4667 SUM_SQ_PUTCOUNTS(sq
, count
);
4670 if (sq
->sq_needexcl
== 0)
4671 SQ_PUTCOUNT_SETFAST_LOCKED(sq
);
4672 SQ_PUTLOCKS_EXIT(sq
);
4673 ASSERT(count
== sq
->sq_rmqcount
);
4674 mutex_exit(SQLOCK(sq
));
4680 * Drop all the locks that strlock acquired.
4683 strunlock(struct stdata
*stp
, sqlist_t
*sqlist
)
4687 if (STRMATED(stp
)) {
4688 STREAM_PUTLOCKS_EXIT(stp
);
4689 STREAM_PUTLOCKS_EXIT(stp
->sd_mate
);
4690 STRUNLOCKMATES(stp
);
4691 mutex_exit(&stp
->sd_reflock
);
4692 mutex_exit(&stp
->sd_mate
->sd_reflock
);
4694 STREAM_PUTLOCKS_EXIT(stp
);
4695 mutex_exit(&stp
->sd_lock
);
4696 mutex_exit(&stp
->sd_reflock
);
4702 for (sql
= sqlist
->sqlist_head
; sql
; sql
= sql
->sql_next
) {
4703 SQ_PUTLOCKS_EXIT(sql
->sql_sq
);
4704 mutex_exit(SQLOCK(sql
->sql_sq
));
4709 * When the module has service procedure, we need check if the next
4710 * module which has service procedure is in flow control to trigger
4714 backenable_insertedq(queue_t
*q
)
4719 if (q
->q_qinfo
->qi_srvp
!= NULL
&& q
->q_next
!= NULL
) {
4720 if (q
->q_next
->q_nfsrv
->q_flag
& QWANTW
)
4723 qbp
= q
->q_next
->q_nfsrv
->q_bandp
;
4724 for (; qbp
!= NULL
; qbp
= qbp
->qb_next
)
4725 if ((qbp
->qb_flag
& QB_WANTW
) && qbp
->qb_first
!= NULL
)
4726 backenable(q
, qbp
->qb_first
->b_band
);
4732 * Given two read queues, insert a new single one after another.
4734 * This routine acquires all the necessary locks in order to change
4735 * q_next and related pointer using strlock().
4736 * It depends on the stream head ensuring that there are no concurrent
4737 * insertq or removeq on the same stream. The stream head ensures this
4738 * using the flags STWOPEN, STRCLOSE, and STRPLUMB.
4740 * Note that no syncq locks are held during the q_next change. This is
4741 * applied to all streams since, unlike removeq, there is no problem of stale
4742 * pointers when adding a module to the stream. Thus drivers/modules that do a
4743 * canput(rq->q_next) would never get a closed/freed queue pointer even if we
4744 * applied this optimization to all streams.
4747 insertq(struct stdata
*stp
, queue_t
*new)
4751 queue_t
*wnew
= _WR(new);
4752 boolean_t have_fifo
= B_FALSE
;
4754 if (new->q_flag
& _QINSERTING
) {
4755 ASSERT(stp
->sd_vnode
->v_type
!= VFIFO
);
4756 after
= new->q_next
;
4757 wafter
= _WR(new->q_next
);
4759 after
= _RD(stp
->sd_wrq
);
4760 wafter
= stp
->sd_wrq
;
4763 TRACE_2(TR_FAC_STREAMS_FR
, TR_INSERTQ
,
4764 "insertq:%p, %p", after
, new);
4765 ASSERT(after
->q_flag
& QREADR
);
4766 ASSERT(new->q_flag
& QREADR
);
4770 /* Do we have a FIFO? */
4771 if (wafter
->q_next
== after
) {
4775 wnew
->q_next
= wafter
->q_next
;
4777 new->q_next
= after
;
4779 set_nfsrv_ptr(new, wnew
, after
, wafter
);
4781 * set_nfsrv_ptr() needs to know if this is an insertion or not,
4782 * so only reset this flag after calling it.
4784 new->q_flag
&= ~_QINSERTING
;
4787 wafter
->q_next
= wnew
;
4790 _OTHERQ(wafter
->q_next
)->q_next
= new;
4791 wafter
->q_next
= wnew
;
4795 /* The QEND flag might have to be updated for the upstream guy */
4798 ASSERT(_SAMESTR(new) == O_SAMESTR(new));
4799 ASSERT(_SAMESTR(wnew
) == O_SAMESTR(wnew
));
4800 ASSERT(_SAMESTR(after
) == O_SAMESTR(after
));
4801 ASSERT(_SAMESTR(wafter
) == O_SAMESTR(wafter
));
4805 * If this was a module insertion, bump the push count.
4807 if (!(new->q_flag
& QISDRV
))
4810 strunlock(stp
, NULL
);
4812 /* check if the write Q needs backenable */
4813 backenable_insertedq(wnew
);
4815 /* check if the read Q needs backenable */
4816 backenable_insertedq(new);
4820 * Given a read queue, unlink it from any neighbors.
4822 * This routine acquires all the necessary locks in order to
4823 * change q_next and related pointers and also guard against
4824 * stale references (e.g. through q_next) to the queue that
4825 * is being removed. It also plays part of the role in ensuring
4826 * that the module's/driver's put procedure doesn't get called
4827 * after qprocsoff returns.
4829 * Removeq depends on the stream head ensuring that there are
4830 * no concurrent insertq or removeq on the same stream. The
4831 * stream head ensures this using the flags STWOPEN, STRCLOSE and
4834 * The set of locks needed to remove the queue is different in
4837 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after
4838 * waiting for the syncq reference count to drop to 0 indicating that no
4839 * non-close threads are present anywhere in the stream. This ensures that any
4840 * module/driver can reference q_next in its open, close, put, or service
4843 * The sq_rmqcount counter tracks the number of threads inside removeq().
4844 * strlock() ensures that there is either no threads executing inside perimeter
4845 * or there is only a thread calling qprocsoff().
4847 * strlock() compares the value of sq_count with the number of threads inside
4848 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup
4849 * any threads waiting in strlock() when the sq_rmqcount increases.
4853 removeq(queue_t
*qp
)
4855 queue_t
*wqp
= _WR(qp
);
4856 struct stdata
*stp
= STREAM(qp
);
4857 sqlist_t
*sqlist
= NULL
;
4860 syncq_t
*sq
= qp
->q_syncq
;
4861 syncq_t
*wsq
= wqp
->q_syncq
;
4865 TRACE_2(TR_FAC_STREAMS_FR
, TR_REMOVEQ
,
4866 "removeq:%p %p", qp
, wqp
);
4867 ASSERT(qp
->q_flag
&QREADR
);
4870 * For queues using Synchronous streams, we must wait for all threads in
4871 * rwnext() to drain out before proceeding.
4873 if (qp
->q_flag
& QSYNCSTR
) {
4874 /* First, we need wakeup any threads blocked in rwnext() */
4875 mutex_enter(SQLOCK(sq
));
4876 if (sq
->sq_flags
& SQ_WANTWAKEUP
) {
4877 sq
->sq_flags
&= ~SQ_WANTWAKEUP
;
4878 cv_broadcast(&sq
->sq_wait
);
4880 mutex_exit(SQLOCK(sq
));
4883 mutex_enter(SQLOCK(wsq
));
4884 if (wsq
->sq_flags
& SQ_WANTWAKEUP
) {
4885 wsq
->sq_flags
&= ~SQ_WANTWAKEUP
;
4886 cv_broadcast(&wsq
->sq_wait
);
4888 mutex_exit(SQLOCK(wsq
));
4891 mutex_enter(QLOCK(qp
));
4892 while (qp
->q_rwcnt
> 0) {
4893 qp
->q_flag
|= QWANTRMQSYNC
;
4894 cv_wait(&qp
->q_wait
, QLOCK(qp
));
4896 mutex_exit(QLOCK(qp
));
4898 mutex_enter(QLOCK(wqp
));
4899 while (wqp
->q_rwcnt
> 0) {
4900 wqp
->q_flag
|= QWANTRMQSYNC
;
4901 cv_wait(&wqp
->q_wait
, QLOCK(wqp
));
4903 mutex_exit(QLOCK(wqp
));
4906 mutex_enter(SQLOCK(sq
));
4908 if (sq
->sq_flags
& SQ_WANTWAKEUP
) {
4909 sq
->sq_flags
&= ~SQ_WANTWAKEUP
;
4910 cv_broadcast(&sq
->sq_wait
);
4912 mutex_exit(SQLOCK(sq
));
4914 isdriver
= (qp
->q_flag
& QISDRV
);
4916 sqlist
= sqlist_build(qp
, stp
, STRMATED(stp
));
4917 strlock(stp
, sqlist
);
4919 reset_nfsrv_ptr(qp
, wqp
);
4921 ASSERT(wqp
->q_next
== NULL
|| backq(qp
)->q_next
== qp
);
4922 ASSERT(qp
->q_next
== NULL
|| backq(wqp
)->q_next
== wqp
);
4923 /* Do we have a FIFO? */
4924 if (wqp
->q_next
== qp
) {
4925 stp
->sd_wrq
->q_next
= _RD(stp
->sd_wrq
);
4928 backq(qp
)->q_next
= qp
->q_next
;
4930 backq(wqp
)->q_next
= wqp
->q_next
;
4933 /* The QEND flag might have to be updated for the upstream guy */
4935 set_qend(qp
->q_next
);
4937 ASSERT(_SAMESTR(stp
->sd_wrq
) == O_SAMESTR(stp
->sd_wrq
));
4938 ASSERT(_SAMESTR(_RD(stp
->sd_wrq
)) == O_SAMESTR(_RD(stp
->sd_wrq
)));
4941 * Move any messages destined for the put procedures to the next
4942 * syncq in line. Otherwise free them.
4946 * Quick check to see whether there are any messages or events.
4948 if (qp
->q_syncqmsgs
!= 0 || (qp
->q_syncq
->sq_flags
& SQ_EVENTS
))
4949 moved
+= propagate_syncq(qp
);
4950 if (wqp
->q_syncqmsgs
!= 0 ||
4951 (wqp
->q_syncq
->sq_flags
& SQ_EVENTS
))
4952 moved
+= propagate_syncq(wqp
);
4957 * If this was a module removal, decrement the push count.
4962 strunlock(stp
, sqlist
);
4963 sqlist_free(sqlist
);
4966 * Make sure any messages that were propagated are drained.
4967 * Also clear any QFULL bit caused by messages that were propagated.
4970 if (qp
->q_next
!= NULL
) {
4973 * For the driver calling qprocsoff, propagate_syncq
4974 * frees all the messages instead of putting it in
4977 if (!isdriver
&& (moved
> 0))
4978 emptysq(qp
->q_next
->q_syncq
);
4980 if (wqp
->q_next
!= NULL
) {
4983 * We come here for any pop of a module except for the
4984 * case of driver being removed. We don't call emptysq
4985 * if we did not move any messages. This will avoid holding
4986 * PERMOD syncq locks in emptysq
4989 emptysq(wqp
->q_next
->q_syncq
);
4992 mutex_enter(SQLOCK(sq
));
4994 mutex_exit(SQLOCK(sq
));
4998 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or
4999 * SQ_WRITER) on a syncq.
5000 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the
5001 * sync queue and waits until sq_count reaches maxcnt.
5003 * If maxcnt is -1 there's no need to grab sq_putlocks since the caller
5004 * does not care about putnext threads that are in the middle of calling put
5007 * This routine is used for both inner and outer syncqs.
5010 blocksq(syncq_t
*sq
, ushort_t flag
, int maxcnt
)
5014 mutex_enter(SQLOCK(sq
));
5016 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset.
5017 * SQ_FROZEN will be set if there is a frozen stream that has a
5018 * queue which also refers to this "shared" syncq.
5019 * SQ_BLOCKED will be set if there is "off" queue which also
5020 * refers to this "shared" syncq.
5023 count
= sq
->sq_count
;
5024 SQ_PUTLOCKS_ENTER(sq
);
5025 SQ_PUTCOUNT_CLRFAST_LOCKED(sq
);
5026 SUM_SQ_PUTCOUNTS(sq
, count
);
5029 ASSERT(sq
->sq_needexcl
!= 0); /* wraparound */
5031 while ((sq
->sq_flags
& flag
) ||
5032 (maxcnt
!= -1 && count
> (unsigned)maxcnt
)) {
5033 sq
->sq_flags
|= SQ_WANTWAKEUP
;
5035 SQ_PUTLOCKS_EXIT(sq
);
5037 cv_wait(&sq
->sq_wait
, SQLOCK(sq
));
5039 count
= sq
->sq_count
;
5040 SQ_PUTLOCKS_ENTER(sq
);
5041 SUM_SQ_PUTCOUNTS(sq
, count
);
5045 sq
->sq_flags
|= flag
;
5046 ASSERT(maxcnt
== -1 || count
== maxcnt
);
5048 if (sq
->sq_needexcl
== 0) {
5049 SQ_PUTCOUNT_SETFAST_LOCKED(sq
);
5051 SQ_PUTLOCKS_EXIT(sq
);
5052 } else if (sq
->sq_needexcl
== 0) {
5053 SQ_PUTCOUNT_SETFAST(sq
);
5056 mutex_exit(SQLOCK(sq
));
5060 * Reset a flag that was set with blocksq.
5062 * Can not use this routine to reset SQ_WRITER.
5064 * If "isouter" is set then the syncq is assumed to be an outer perimeter
5065 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread
5066 * to handle the queued qwriter operations.
5068 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5069 * sq_putlocks are used.
5072 unblocksq(syncq_t
*sq
, uint16_t resetflag
, int isouter
)
5076 mutex_enter(SQLOCK(sq
));
5077 ASSERT(resetflag
!= SQ_WRITER
);
5078 ASSERT(sq
->sq_flags
& resetflag
);
5079 flags
= sq
->sq_flags
& ~resetflag
;
5080 sq
->sq_flags
= flags
;
5081 if (flags
& (SQ_QUEUED
| SQ_WANTWAKEUP
)) {
5082 if (flags
& SQ_WANTWAKEUP
) {
5083 flags
&= ~SQ_WANTWAKEUP
;
5084 cv_broadcast(&sq
->sq_wait
);
5086 sq
->sq_flags
= flags
;
5087 if ((flags
& SQ_QUEUED
) && !(flags
& (SQ_STAYAWAY
|SQ_EXCL
))) {
5089 /* drain_syncq drops SQLOCK */
5095 mutex_exit(SQLOCK(sq
));
5099 * Reset a flag that was set with blocksq.
5100 * Does not drain the syncq. Use emptysq() for that.
5101 * Returns 1 if SQ_QUEUED is set. Otherwise 0.
5103 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5104 * sq_putlocks are used.
5107 dropsq(syncq_t
*sq
, uint16_t resetflag
)
5111 mutex_enter(SQLOCK(sq
));
5112 ASSERT(sq
->sq_flags
& resetflag
);
5113 flags
= sq
->sq_flags
& ~resetflag
;
5114 if (flags
& SQ_WANTWAKEUP
) {
5115 flags
&= ~SQ_WANTWAKEUP
;
5116 cv_broadcast(&sq
->sq_wait
);
5118 sq
->sq_flags
= flags
;
5119 mutex_exit(SQLOCK(sq
));
5120 if (flags
& SQ_QUEUED
)
5126 * Empty all the messages on a syncq.
5128 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5129 * sq_putlocks are used.
5132 emptysq(syncq_t
*sq
)
5136 mutex_enter(SQLOCK(sq
));
5137 flags
= sq
->sq_flags
;
5138 if ((flags
& SQ_QUEUED
) && !(flags
& (SQ_STAYAWAY
|SQ_EXCL
))) {
5140 * To prevent potential recursive invocation of drain_syncq we
5141 * do not call drain_syncq if count is non-zero.
5143 if (sq
->sq_count
== 0) {
5144 /* drain_syncq() drops SQLOCK */
5150 mutex_exit(SQLOCK(sq
));
5154 * Ordered insert while removing duplicates.
5157 sqlist_insert(sqlist_t
*sqlist
, syncq_t
*sqp
)
5159 syncql_t
*sqlp
, **prev_sqlpp
, *new_sqlp
;
5161 prev_sqlpp
= &sqlist
->sqlist_head
;
5162 while ((sqlp
= *prev_sqlpp
) != NULL
) {
5163 if (sqlp
->sql_sq
>= sqp
) {
5164 if (sqlp
->sql_sq
== sqp
) /* duplicate */
5168 prev_sqlpp
= &sqlp
->sql_next
;
5170 new_sqlp
= &sqlist
->sqlist_array
[sqlist
->sqlist_index
++];
5171 ASSERT((char *)new_sqlp
< (char *)sqlist
+ sqlist
->sqlist_size
);
5172 new_sqlp
->sql_next
= sqlp
;
5173 new_sqlp
->sql_sq
= sqp
;
5174 *prev_sqlpp
= new_sqlp
;
5178 * Walk the write side queues until we hit either the driver
5179 * or a twist in the stream (_SAMESTR will return false in both
5180 * these cases) then turn around and walk the read side queues
5181 * back up to the stream head.
5184 sqlist_insertall(sqlist_t
*sqlist
, queue_t
*q
)
5187 sqlist_insert(sqlist
, q
->q_syncq
);
5191 else if (!(q
->q_flag
& QREADR
))
5199 * Allocate and build a list of all syncqs in a stream and the syncq(s)
5200 * associated with the "q" parameter. The resulting list is sorted in a
5201 * canonical order and is free of duplicates.
5202 * Assumes the passed queue is a _RD(q).
5205 sqlist_build(queue_t
*q
, struct stdata
*stp
, boolean_t do_twist
)
5207 sqlist_t
*sqlist
= sqlist_alloc(stp
, KM_SLEEP
);
5210 * start with the current queue/qpair
5212 ASSERT(q
->q_flag
& QREADR
);
5214 sqlist_insert(sqlist
, q
->q_syncq
);
5215 sqlist_insert(sqlist
, _WR(q
)->q_syncq
);
5217 sqlist_insertall(sqlist
, stp
->sd_wrq
);
5219 sqlist_insertall(sqlist
, stp
->sd_mate
->sd_wrq
);
5225 sqlist_alloc(struct stdata
*stp
, int kmflag
)
5231 * Allocate 2 syncql_t's for each pushed module. Note that
5232 * the sqlist_t structure already has 4 syncql_t's built in:
5233 * 2 for the stream head, and 2 for the driver/other stream head.
5235 sqlist_size
= 2 * sizeof (syncql_t
) * stp
->sd_pushcnt
+
5238 sqlist_size
+= 2 * sizeof (syncql_t
) * stp
->sd_mate
->sd_pushcnt
;
5239 sqlist
= kmem_alloc(sqlist_size
, kmflag
);
5241 sqlist
->sqlist_head
= NULL
;
5242 sqlist
->sqlist_size
= sqlist_size
;
5243 sqlist
->sqlist_index
= 0;
5249 * Free the list created by sqlist_alloc()
5252 sqlist_free(sqlist_t
*sqlist
)
5254 kmem_free(sqlist
, sqlist
->sqlist_size
);
5258 * Prevent any new entries into any syncq in this stream.
5259 * Used by freezestr.
5262 strblock(queue_t
*q
)
5271 ASSERT(stp
!= NULL
);
5274 * Get a sorted list with all the duplicates removed containing
5275 * all the syncqs referenced by this stream.
5277 sqlist
= sqlist_build(q
, stp
, B_FALSE
);
5278 for (sql
= sqlist
->sqlist_head
; sql
!= NULL
; sql
= sql
->sql_next
)
5279 blocksq(sql
->sql_sq
, SQ_FROZEN
, -1);
5280 sqlist_free(sqlist
);
5284 * Release the block on new entries into this stream
5287 strunblock(queue_t
*q
)
5297 * Get a sorted list with all the duplicates removed containing
5298 * all the syncqs referenced by this stream.
5299 * Have to drop the SQ_FROZEN flag on all the syncqs before
5300 * starting to drain them; otherwise the draining might
5301 * cause a freezestr in some module on the stream (which
5305 ASSERT(stp
!= NULL
);
5306 sqlist
= sqlist_build(q
, stp
, B_FALSE
);
5308 for (sql
= sqlist
->sqlist_head
; sql
!= NULL
; sql
= sql
->sql_next
)
5309 drain_needed
+= dropsq(sql
->sql_sq
, SQ_FROZEN
);
5311 for (sql
= sqlist
->sqlist_head
; sql
!= NULL
;
5312 sql
= sql
->sql_next
)
5313 emptysq(sql
->sql_sq
);
5315 sqlist_free(sqlist
);
5320 qprocsareon(queue_t
*rq
)
5322 if (rq
->q_next
== NULL
)
5324 return (_WR(rq
->q_next
)->q_next
== _WR(rq
));
5328 qclaimed(queue_t
*q
)
5332 count
= q
->q_syncq
->sq_count
;
5333 SUM_SQ_PUTCOUNTS(q
->q_syncq
, count
);
5334 return (count
!= 0);
5338 * Check if anyone has frozen this stream with freezestr
5341 frozenstr(queue_t
*q
)
5343 return ((q
->q_syncq
->sq_flags
& SQ_FROZEN
) != 0);
5349 * Obsoleted interface. Should not be used.
5354 entersq(q
->q_syncq
, SQ_CALLBACK
);
5360 leavesq(q
->q_syncq
, SQ_CALLBACK
);
5364 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits
5366 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter
5367 * calls and the running of open, close and service procedures.
5369 * If c_inner bit is set no need to grab sq_putlocks since we don't care
5370 * if other threads have entered or are entering put entry point.
5372 * If c_inner bit is set it might have been possible to use
5373 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize
5374 * open/close path for IP) but since the count may need to be decremented in
5375 * qwait() we wouldn't know which counter to decrement. Currently counter is
5376 * selected by current cpu_seqid and current CPU can change at any moment. XXX
5377 * in the future we might use curthread id bits to select the counter and this
5378 * would stay constant across routine calls.
5381 entersq(syncq_t
*sq
, int entrypoint
)
5385 uint16_t waitflags
= SQ_STAYAWAY
| SQ_EVENTS
| SQ_EXCL
;
5387 uint_t c_inner
= entrypoint
& SQ_CI
;
5388 uint_t c_outer
= entrypoint
& SQ_CO
;
5391 * Increment ref count to keep closes out of this queue.
5394 ASSERT(c_inner
&& c_outer
);
5395 mutex_enter(SQLOCK(sq
));
5396 flags
= sq
->sq_flags
;
5398 if (!(type
& c_inner
)) {
5399 /* Make sure all putcounts now use slowlock. */
5400 count
= sq
->sq_count
;
5401 SQ_PUTLOCKS_ENTER(sq
);
5402 SQ_PUTCOUNT_CLRFAST_LOCKED(sq
);
5403 SUM_SQ_PUTCOUNTS(sq
, count
);
5405 ASSERT(sq
->sq_needexcl
!= 0); /* wraparound */
5406 waitflags
|= SQ_MESSAGES
;
5409 * Wait until we can enter the inner perimeter.
5410 * If we want exclusive access we wait until sq_count is 0.
5411 * We have to do this before entering the outer perimeter in order
5412 * to preserve put/close message ordering.
5414 while ((flags
& waitflags
) || (!(type
& c_inner
) && count
!= 0)) {
5415 sq
->sq_flags
= flags
| SQ_WANTWAKEUP
;
5416 if (!(type
& c_inner
)) {
5417 SQ_PUTLOCKS_EXIT(sq
);
5419 cv_wait(&sq
->sq_wait
, SQLOCK(sq
));
5420 if (!(type
& c_inner
)) {
5421 count
= sq
->sq_count
;
5422 SQ_PUTLOCKS_ENTER(sq
);
5423 SUM_SQ_PUTCOUNTS(sq
, count
);
5425 flags
= sq
->sq_flags
;
5428 if (!(type
& c_inner
)) {
5429 ASSERT(sq
->sq_needexcl
> 0);
5431 if (sq
->sq_needexcl
== 0) {
5432 SQ_PUTCOUNT_SETFAST_LOCKED(sq
);
5436 /* Check if we need to enter the outer perimeter */
5437 if (!(type
& c_outer
)) {
5439 * We have to enter the outer perimeter exclusively before
5440 * we can increment sq_count to avoid deadlock. This implies
5441 * that we have to re-check sq_flags and sq_count.
5443 * is it possible to have c_inner set when c_outer is not set?
5445 if (!(type
& c_inner
)) {
5446 SQ_PUTLOCKS_EXIT(sq
);
5448 mutex_exit(SQLOCK(sq
));
5449 outer_enter(sq
->sq_outer
, SQ_GOAWAY
);
5450 mutex_enter(SQLOCK(sq
));
5451 flags
= sq
->sq_flags
;
5453 * there should be no need to recheck sq_putcounts
5454 * because outer_enter() has already waited for them to clear
5455 * after setting SQ_WRITER.
5457 count
= sq
->sq_count
;
5460 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead
5461 * of doing an ASSERT internally. Others should do
5463 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0);
5464 * without the need to #ifdef DEBUG it.
5466 SUMCHECK_SQ_PUTCOUNTS(sq
, 0);
5468 while ((flags
& (SQ_EXCL
|SQ_BLOCKED
|SQ_FROZEN
)) ||
5469 (!(type
& c_inner
) && count
!= 0)) {
5470 sq
->sq_flags
= flags
| SQ_WANTWAKEUP
;
5471 cv_wait(&sq
->sq_wait
, SQLOCK(sq
));
5472 count
= sq
->sq_count
;
5473 flags
= sq
->sq_flags
;
5478 ASSERT(sq
->sq_count
!= 0); /* Wraparound */
5479 if (!(type
& c_inner
)) {
5480 /* Exclusive entry */
5481 ASSERT(sq
->sq_count
== 1);
5482 sq
->sq_flags
|= SQ_EXCL
;
5483 if (type
& c_outer
) {
5484 SQ_PUTLOCKS_EXIT(sq
);
5487 mutex_exit(SQLOCK(sq
));
5491 * Leave a syncq. Announce to framework that closes may proceed.
5492 * c_inner and c_outer specify which concurrency bits to check.
5494 * Must never be called from driver or module put entry point.
5496 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5497 * sq_putlocks are used.
5500 leavesq(syncq_t
*sq
, int entrypoint
)
5504 uint_t c_outer
= entrypoint
& SQ_CO
;
5506 uint_t c_inner
= entrypoint
& SQ_CI
;
5510 * Decrement ref count, drain the syncq if possible, and wake up
5511 * any waiting close.
5514 ASSERT(c_inner
&& c_outer
);
5515 mutex_enter(SQLOCK(sq
));
5516 flags
= sq
->sq_flags
;
5518 if (flags
& (SQ_QUEUED
|SQ_WANTWAKEUP
|SQ_WANTEXWAKEUP
)) {
5520 if (flags
& SQ_WANTWAKEUP
) {
5521 flags
&= ~SQ_WANTWAKEUP
;
5522 cv_broadcast(&sq
->sq_wait
);
5524 if (flags
& SQ_WANTEXWAKEUP
) {
5525 flags
&= ~SQ_WANTEXWAKEUP
;
5526 cv_broadcast(&sq
->sq_exitwait
);
5529 if ((flags
& SQ_QUEUED
) && !(flags
& SQ_STAYAWAY
)) {
5531 * The syncq needs to be drained. "Exit" the syncq
5532 * before calling drain_syncq.
5534 ASSERT(sq
->sq_count
!= 0);
5536 ASSERT((flags
& SQ_EXCL
) || (type
& c_inner
));
5537 sq
->sq_flags
= flags
& ~SQ_EXCL
;
5539 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq
)));
5540 /* Check if we need to exit the outer perimeter */
5541 /* XXX will this ever be true? */
5542 if (!(type
& c_outer
))
5543 outer_exit(sq
->sq_outer
);
5547 ASSERT(sq
->sq_count
!= 0);
5549 ASSERT((flags
& SQ_EXCL
) || (type
& c_inner
));
5550 sq
->sq_flags
= flags
& ~SQ_EXCL
;
5551 mutex_exit(SQLOCK(sq
));
5553 /* Check if we need to exit the outer perimeter */
5554 if (!(sq
->sq_type
& c_outer
))
5555 outer_exit(sq
->sq_outer
);
5559 * Prevent q_next from changing in this stream by incrementing sq_count.
5561 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5562 * sq_putlocks are used.
5567 syncq_t
*sq
= qp
->q_syncq
;
5569 mutex_enter(SQLOCK(sq
));
5571 ASSERT(sq
->sq_count
!= 0); /* Wraparound */
5572 mutex_exit(SQLOCK(sq
));
5578 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5579 * sq_putlocks are used.
5582 releaseq(queue_t
*qp
)
5584 syncq_t
*sq
= qp
->q_syncq
;
5587 mutex_enter(SQLOCK(sq
));
5588 ASSERT(sq
->sq_count
> 0);
5591 flags
= sq
->sq_flags
;
5592 if (flags
& (SQ_WANTWAKEUP
|SQ_QUEUED
)) {
5593 if (flags
& SQ_WANTWAKEUP
) {
5594 flags
&= ~SQ_WANTWAKEUP
;
5595 cv_broadcast(&sq
->sq_wait
);
5597 sq
->sq_flags
= flags
;
5598 if ((flags
& SQ_QUEUED
) && !(flags
& (SQ_STAYAWAY
|SQ_EXCL
))) {
5600 * To prevent potential recursive invocation of
5601 * drain_syncq we do not call drain_syncq if count is
5604 if (sq
->sq_count
== 0) {
5611 mutex_exit(SQLOCK(sq
));
5615 * Prevent q_next from changing in this stream by incrementing sd_refcnt.
5618 claimstr(queue_t
*qp
)
5620 struct stdata
*stp
= STREAM(qp
);
5622 mutex_enter(&stp
->sd_reflock
);
5624 ASSERT(stp
->sd_refcnt
!= 0); /* Wraparound */
5625 mutex_exit(&stp
->sd_reflock
);
5632 releasestr(queue_t
*qp
)
5634 struct stdata
*stp
= STREAM(qp
);
5636 mutex_enter(&stp
->sd_reflock
);
5637 ASSERT(stp
->sd_refcnt
!= 0);
5638 if (--stp
->sd_refcnt
== 0)
5639 cv_broadcast(&stp
->sd_refmonitor
);
5640 mutex_exit(&stp
->sd_reflock
);
5646 return (kmem_cache_alloc(syncq_cache
, KM_SLEEP
));
5650 free_syncq(syncq_t
*sq
)
5652 ASSERT(sq
->sq_head
== NULL
);
5653 ASSERT(sq
->sq_outer
== NULL
);
5654 ASSERT(sq
->sq_callbpend
== NULL
);
5655 ASSERT((sq
->sq_onext
== NULL
&& sq
->sq_oprev
== NULL
) ||
5656 (sq
->sq_onext
== sq
&& sq
->sq_oprev
== sq
));
5658 if (sq
->sq_ciputctrl
!= NULL
) {
5659 ASSERT(sq
->sq_nciputctrl
== n_ciputctrl
- 1);
5660 SUMCHECK_CIPUTCTRL_COUNTS(sq
->sq_ciputctrl
,
5661 sq
->sq_nciputctrl
, 0);
5662 ASSERT(ciputctrl_cache
!= NULL
);
5663 kmem_cache_free(ciputctrl_cache
, sq
->sq_ciputctrl
);
5667 sq
->sq_evhead
= NULL
;
5668 sq
->sq_evtail
= NULL
;
5669 sq
->sq_ciputctrl
= NULL
;
5670 sq
->sq_nciputctrl
= 0;
5672 sq
->sq_rmqcount
= 0;
5673 sq
->sq_callbflags
= 0;
5674 sq
->sq_cancelid
= 0;
5676 sq
->sq_needexcl
= 0;
5677 sq
->sq_svcflags
= 0;
5680 sq
->sq_onext
= NULL
;
5681 sq
->sq_oprev
= NULL
;
5684 sq
->sq_servcount
= 0;
5686 kmem_cache_free(syncq_cache
, sq
);
5689 /* Outer perimeter code */
5692 * The outer syncq uses the fields and flags in the syncq slightly
5693 * differently from the inner syncqs.
5694 * sq_count Incremented when there are pending or running
5695 * writers at the outer perimeter to prevent the set of
5696 * inner syncqs that belong to the outer perimeter from
5698 * sq_head/tail List of deferred qwriter(OUTER) operations.
5700 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while
5701 * inner syncqs are added to or removed from the
5703 * SQ_QUEUED sq_head/tail has messages or events queued.
5705 * SQ_WRITER A thread is currently traversing all the inner syncqs
5706 * setting the SQ_WRITER flag.
5710 * Get write access at the outer perimeter.
5711 * Note that read access is done by entersq, putnext, and put by simply
5712 * incrementing sq_count in the inner syncq.
5714 * Waits until "flags" is no longer set in the outer to prevent multiple
5715 * threads from having write access at the same time. SQ_WRITER has to be part
5718 * Increases sq_count on the outer syncq to keep away outer_insert/remove
5719 * until the outer_exit is finished.
5721 * outer_enter is vulnerable to starvation since it does not prevent new
5722 * threads from entering the inner syncqs while it is waiting for sq_count to
5726 outer_enter(syncq_t
*outer
, uint16_t flags
)
5732 ASSERT(outer
->sq_outer
== NULL
&& outer
->sq_onext
!= NULL
&&
5733 outer
->sq_oprev
!= NULL
);
5734 ASSERT(flags
& SQ_WRITER
);
5737 mutex_enter(SQLOCK(outer
));
5738 while (outer
->sq_flags
& flags
) {
5739 outer
->sq_flags
|= SQ_WANTWAKEUP
;
5740 cv_wait(&outer
->sq_wait
, SQLOCK(outer
));
5743 ASSERT(!(outer
->sq_flags
& SQ_WRITER
));
5744 outer
->sq_flags
|= SQ_WRITER
;
5746 ASSERT(outer
->sq_count
!= 0); /* wraparound */
5749 * Set SQ_WRITER on all the inner syncqs while holding
5750 * the SQLOCK on the outer syncq. This ensures that the changing
5751 * of SQ_WRITER is atomic under the outer SQLOCK.
5753 for (sq
= outer
->sq_onext
; sq
!= outer
; sq
= sq
->sq_onext
) {
5754 mutex_enter(SQLOCK(sq
));
5755 count
= sq
->sq_count
;
5756 SQ_PUTLOCKS_ENTER(sq
);
5757 sq
->sq_flags
|= SQ_WRITER
;
5758 SUM_SQ_PUTCOUNTS(sq
, count
);
5761 SQ_PUTLOCKS_EXIT(sq
);
5762 mutex_exit(SQLOCK(sq
));
5764 mutex_exit(SQLOCK(outer
));
5767 * Get everybody out of the syncqs sequentially.
5768 * Note that we don't actually need to acquire the PUTLOCKS, since
5769 * we have already cleared the fastbit, and set QWRITER. By
5770 * definition, the count can not increase since putnext will
5771 * take the slowlock path (and the purpose of acquiring the
5772 * putlocks was to make sure it didn't increase while we were
5775 * Note that we still acquire the PUTLOCKS to be safe.
5778 for (sq
= outer
->sq_onext
; sq
!= outer
; sq
= sq
->sq_onext
) {
5779 mutex_enter(SQLOCK(sq
));
5780 count
= sq
->sq_count
;
5781 SQ_PUTLOCKS_ENTER(sq
);
5782 SUM_SQ_PUTCOUNTS(sq
, count
);
5783 while (count
!= 0) {
5784 sq
->sq_flags
|= SQ_WANTWAKEUP
;
5785 SQ_PUTLOCKS_EXIT(sq
);
5786 cv_wait(&sq
->sq_wait
, SQLOCK(sq
));
5787 count
= sq
->sq_count
;
5788 SQ_PUTLOCKS_ENTER(sq
);
5789 SUM_SQ_PUTCOUNTS(sq
, count
);
5791 SQ_PUTLOCKS_EXIT(sq
);
5792 mutex_exit(SQLOCK(sq
));
5795 * Verify that none of the flags got set while we
5796 * were waiting for the sq_counts to drop.
5797 * If this happens we exit and retry entering the
5800 mutex_enter(SQLOCK(outer
));
5801 if (outer
->sq_flags
& (flags
& ~SQ_WRITER
)) {
5802 mutex_exit(SQLOCK(outer
));
5806 mutex_exit(SQLOCK(outer
));
5811 * Drop the write access at the outer perimeter.
5812 * Read access is dropped implicitly (by putnext, put, and leavesq) by
5813 * decrementing sq_count.
5816 outer_exit(syncq_t
*outer
)
5822 ASSERT(outer
->sq_outer
== NULL
&& outer
->sq_onext
!= NULL
&&
5823 outer
->sq_oprev
!= NULL
);
5824 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer
)));
5827 * Atomically (from the perspective of threads calling become_writer)
5828 * drop the write access at the outer perimeter by holding
5829 * SQLOCK(outer) across all the dropsq calls and the resetting of
5831 * This defines a locking order between the outer perimeter
5832 * SQLOCK and the inner perimeter SQLOCKs.
5834 mutex_enter(SQLOCK(outer
));
5835 flags
= outer
->sq_flags
;
5836 ASSERT(outer
->sq_flags
& SQ_WRITER
);
5837 if (flags
& SQ_QUEUED
) {
5839 flags
= outer
->sq_flags
;
5843 * sq_onext is stable since sq_count has not yet been decreased.
5844 * Reset the SQ_WRITER flags in all syncqs.
5845 * After dropping SQ_WRITER on the outer syncq we empty all the
5849 for (sq
= outer
->sq_onext
; sq
!= outer
; sq
= sq
->sq_onext
)
5850 drain_needed
+= dropsq(sq
, SQ_WRITER
);
5851 ASSERT(!(outer
->sq_flags
& SQ_QUEUED
));
5852 flags
&= ~SQ_WRITER
;
5854 outer
->sq_flags
= flags
;
5855 mutex_exit(SQLOCK(outer
));
5856 for (sq
= outer
->sq_onext
; sq
!= outer
; sq
= sq
->sq_onext
)
5858 mutex_enter(SQLOCK(outer
));
5859 flags
= outer
->sq_flags
;
5861 if (flags
& SQ_WANTWAKEUP
) {
5862 flags
&= ~SQ_WANTWAKEUP
;
5863 cv_broadcast(&outer
->sq_wait
);
5865 outer
->sq_flags
= flags
;
5866 ASSERT(outer
->sq_count
> 0);
5868 mutex_exit(SQLOCK(outer
));
5872 * Add another syncq to an outer perimeter.
5873 * Block out all other access to the outer perimeter while it is being
5874 * changed using blocksq.
5875 * Assumes that the caller has *not* done an outer_enter.
5877 * Vulnerable to starvation in blocksq.
5880 outer_insert(syncq_t
*outer
, syncq_t
*sq
)
5882 ASSERT(outer
->sq_outer
== NULL
&& outer
->sq_onext
!= NULL
&&
5883 outer
->sq_oprev
!= NULL
);
5884 ASSERT(sq
->sq_outer
== NULL
&& sq
->sq_onext
== NULL
&&
5885 sq
->sq_oprev
== NULL
); /* Can't be in an outer perimeter */
5887 /* Get exclusive access to the outer perimeter list */
5888 blocksq(outer
, SQ_BLOCKED
, 0);
5889 ASSERT(outer
->sq_flags
& SQ_BLOCKED
);
5890 ASSERT(!(outer
->sq_flags
& SQ_WRITER
));
5892 mutex_enter(SQLOCK(sq
));
5893 sq
->sq_outer
= outer
;
5894 outer
->sq_onext
->sq_oprev
= sq
;
5895 sq
->sq_onext
= outer
->sq_onext
;
5896 outer
->sq_onext
= sq
;
5897 sq
->sq_oprev
= outer
;
5898 mutex_exit(SQLOCK(sq
));
5899 unblocksq(outer
, SQ_BLOCKED
, 1);
5903 * Remove a syncq from an outer perimeter.
5904 * Block out all other access to the outer perimeter while it is being
5905 * changed using blocksq.
5906 * Assumes that the caller has *not* done an outer_enter.
5908 * Vulnerable to starvation in blocksq.
5911 outer_remove(syncq_t
*outer
, syncq_t
*sq
)
5913 ASSERT(outer
->sq_outer
== NULL
&& outer
->sq_onext
!= NULL
&&
5914 outer
->sq_oprev
!= NULL
);
5915 ASSERT(sq
->sq_outer
== outer
);
5917 /* Get exclusive access to the outer perimeter list */
5918 blocksq(outer
, SQ_BLOCKED
, 0);
5919 ASSERT(outer
->sq_flags
& SQ_BLOCKED
);
5920 ASSERT(!(outer
->sq_flags
& SQ_WRITER
));
5922 mutex_enter(SQLOCK(sq
));
5923 sq
->sq_outer
= NULL
;
5924 sq
->sq_onext
->sq_oprev
= sq
->sq_oprev
;
5925 sq
->sq_oprev
->sq_onext
= sq
->sq_onext
;
5926 sq
->sq_oprev
= sq
->sq_onext
= NULL
;
5927 mutex_exit(SQLOCK(sq
));
5928 unblocksq(outer
, SQ_BLOCKED
, 1);
5932 * Queue a deferred qwriter(OUTER) callback for this outer perimeter.
5933 * If this is the first callback for this outer perimeter then add
5934 * this outer perimeter to the list of outer perimeters that
5935 * the qwriter_outer_thread will process.
5937 * Increments sq_count in the outer syncq to prevent the membership
5938 * of the outer perimeter (in terms of inner syncqs) to change while
5939 * the callback is pending.
5942 queue_writer(syncq_t
*outer
, void (*func
)(), queue_t
*q
, mblk_t
*mp
)
5944 ASSERT(MUTEX_HELD(SQLOCK(outer
)));
5946 mp
->b_prev
= (mblk_t
*)func
;
5949 outer
->sq_count
++; /* Decremented when dequeued */
5950 ASSERT(outer
->sq_count
!= 0); /* Wraparound */
5951 if (outer
->sq_evhead
== NULL
) {
5952 /* First message. */
5953 outer
->sq_evhead
= outer
->sq_evtail
= mp
;
5954 outer
->sq_flags
|= SQ_EVENTS
;
5955 mutex_exit(SQLOCK(outer
));
5957 (void) taskq_dispatch(streams_taskq
,
5958 (task_func_t
*)qwriter_outer_service
, outer
, TQ_SLEEP
);
5960 ASSERT(outer
->sq_flags
& SQ_EVENTS
);
5961 outer
->sq_evtail
->b_next
= mp
;
5962 outer
->sq_evtail
= mp
;
5963 mutex_exit(SQLOCK(outer
));
5968 * Try and upgrade to write access at the outer perimeter. If this can
5969 * not be done without blocking then queue the callback to be done
5970 * by the qwriter_outer_thread.
5972 * This routine can only be called from put or service procedures plus
5973 * asynchronous callback routines that have properly entered the queue (with
5974 * entersq). Thus qwriter(OUTER) assumes the caller has one claim on the syncq
5975 * associated with q.
5978 qwriter_outer(queue_t
*q
, mblk_t
*mp
, void (*func
)())
5980 syncq_t
*osq
, *sq
, *outer
;
5985 outer
= osq
->sq_outer
;
5987 panic("qwriter(PERIM_OUTER): no outer perimeter");
5988 ASSERT(outer
->sq_outer
== NULL
&& outer
->sq_onext
!= NULL
&&
5989 outer
->sq_oprev
!= NULL
);
5991 mutex_enter(SQLOCK(outer
));
5992 flags
= outer
->sq_flags
;
5994 * If some thread is traversing sq_next, or if we are blocked by
5995 * outer_insert or outer_remove, or if the we already have queued
5996 * callbacks, then queue this callback for later processing.
5998 * Also queue the qwriter for an interrupt thread in order
5999 * to reduce the time spent running at high IPL.
6000 * to identify there are events.
6002 if ((flags
& SQ_GOAWAY
) || (curthread
->t_pri
>= kpreemptpri
)) {
6004 * Queue the become_writer request.
6005 * The queueing is atomic under SQLOCK(outer) in order
6006 * to synchronize with outer_exit.
6007 * queue_writer will drop the outer SQLOCK
6009 if (flags
& SQ_BLOCKED
) {
6010 /* Must set SQ_WRITER on inner perimeter */
6011 mutex_enter(SQLOCK(osq
));
6012 osq
->sq_flags
|= SQ_WRITER
;
6013 mutex_exit(SQLOCK(osq
));
6015 if (!(flags
& SQ_WRITER
)) {
6017 * The outer could have been SQ_BLOCKED thus
6018 * SQ_WRITER might not be set on the inner.
6020 mutex_enter(SQLOCK(osq
));
6021 osq
->sq_flags
|= SQ_WRITER
;
6022 mutex_exit(SQLOCK(osq
));
6024 ASSERT(osq
->sq_flags
& SQ_WRITER
);
6026 queue_writer(outer
, func
, q
, mp
);
6030 * We are half-way to exclusive access to the outer perimeter.
6031 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove
6032 * while the inner syncqs are traversed.
6035 ASSERT(outer
->sq_count
!= 0); /* wraparound */
6038 * Check if we can run the function immediately. Mark all
6039 * syncqs with the writer flag to prevent new entries into
6040 * put and service procedures.
6042 * Set SQ_WRITER on all the inner syncqs while holding
6043 * the SQLOCK on the outer syncq. This ensures that the changing
6044 * of SQ_WRITER is atomic under the outer SQLOCK.
6047 for (sq
= outer
->sq_onext
; sq
!= outer
; sq
= sq
->sq_onext
) {
6049 uint_t maxcnt
= (sq
== osq
) ? 1 : 0;
6051 mutex_enter(SQLOCK(sq
));
6052 count
= sq
->sq_count
;
6053 SQ_PUTLOCKS_ENTER(sq
);
6054 SUM_SQ_PUTCOUNTS(sq
, count
);
6055 if (sq
->sq_count
> maxcnt
)
6057 sq
->sq_flags
|= SQ_WRITER
;
6058 SQ_PUTLOCKS_EXIT(sq
);
6059 mutex_exit(SQLOCK(sq
));
6063 * Some other thread has a read claim on the outer perimeter.
6064 * Queue the callback for deferred processing.
6066 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER
6067 * so that other qwriter(OUTER) calls will queue their
6068 * callbacks as well. queue_writer increments sq_count so we
6069 * decrement to compensate for the our increment.
6071 * Dropping SQ_WRITER enables the writer thread to work
6072 * on this outer perimeter.
6074 outer
->sq_flags
= flags
;
6075 queue_writer(outer
, func
, q
, mp
);
6076 /* queue_writer dropper the lock */
6077 mutex_enter(SQLOCK(outer
));
6078 ASSERT(outer
->sq_count
> 0);
6080 ASSERT(outer
->sq_flags
& SQ_WRITER
);
6081 flags
= outer
->sq_flags
;
6082 flags
&= ~SQ_WRITER
;
6083 if (flags
& SQ_WANTWAKEUP
) {
6084 flags
&= ~SQ_WANTWAKEUP
;
6085 cv_broadcast(&outer
->sq_wait
);
6087 outer
->sq_flags
= flags
;
6088 mutex_exit(SQLOCK(outer
));
6091 outer
->sq_flags
= flags
;
6092 mutex_exit(SQLOCK(outer
));
6095 /* Can run it immediately */
6102 * Dequeue all writer callbacks from the outer perimeter and run them.
6105 write_now(syncq_t
*outer
)
6111 ASSERT(MUTEX_HELD(SQLOCK(outer
)));
6112 ASSERT(outer
->sq_outer
== NULL
&& outer
->sq_onext
!= NULL
&&
6113 outer
->sq_oprev
!= NULL
);
6114 while ((mp
= outer
->sq_evhead
) != NULL
) {
6116 * queues cannot be placed on the queuelist on the outer
6119 ASSERT(!(outer
->sq_flags
& SQ_MESSAGES
));
6120 ASSERT((outer
->sq_flags
& SQ_EVENTS
));
6122 outer
->sq_evhead
= mp
->b_next
;
6123 if (outer
->sq_evhead
== NULL
) {
6124 outer
->sq_evtail
= NULL
;
6125 outer
->sq_flags
&= ~SQ_EVENTS
;
6127 ASSERT(outer
->sq_count
!= 0);
6128 outer
->sq_count
--; /* Incremented when enqueued. */
6129 mutex_exit(SQLOCK(outer
));
6131 * Drop the message if the queue is closing.
6132 * Make sure that the queue is "claimed" when the callback
6133 * is run in order to satisfy various ASSERTs.
6136 func
= (void (*)())mp
->b_prev
;
6137 ASSERT(func
!= NULL
);
6138 mp
->b_next
= mp
->b_prev
= NULL
;
6139 if (q
->q_flag
& QWCLOSE
) {
6146 mutex_enter(SQLOCK(outer
));
6148 ASSERT(MUTEX_HELD(SQLOCK(outer
)));
6152 * The list of messages on the inner syncq is effectively hashed
6153 * by destination queue. These destination queues are doubly
6154 * linked lists (hopefully) in priority order. Messages are then
6155 * put on the queue referenced by the q_sqhead/q_sqtail elements.
6156 * Additional messages are linked together by the b_next/b_prev
6157 * elements in the mblk, with (similar to putq()) the first message
6158 * having a NULL b_prev and the last message having a NULL b_next.
6160 * Events, such as qwriter callbacks, are put onto a list in FIFO
6161 * order referenced by sq_evhead, and sq_evtail. This is a singly
6162 * linked list, and messages here MUST be processed in the order queued.
6166 * Run the events on the syncq event list (sq_evhead).
6167 * Assumes there is only one claim on the syncq, it is
6168 * already exclusive (SQ_EXCL set), and the SQLOCK held.
6169 * Messages here are processed in order, with the SQ_EXCL bit
6170 * held all the way through till the last message is processed.
6173 sq_run_events(syncq_t
*sq
)
6177 uint16_t flags
= sq
->sq_flags
;
6180 ASSERT(MUTEX_HELD(SQLOCK(sq
)));
6181 ASSERT((sq
->sq_outer
== NULL
&& sq
->sq_onext
== NULL
&&
6182 sq
->sq_oprev
== NULL
) ||
6183 (sq
->sq_outer
!= NULL
&& sq
->sq_onext
!= NULL
&&
6184 sq
->sq_oprev
!= NULL
));
6186 ASSERT(flags
& SQ_EXCL
);
6187 ASSERT(sq
->sq_count
== 1);
6190 * We need to process all of the events on this list. It
6191 * is possible that new events will be added while we are
6192 * away processing a callback, so on every loop, we start
6193 * back at the beginning of the list.
6196 * We have to reaccess sq_evhead since there is a
6197 * possibility of a new entry while we were running
6200 for (bp
= sq
->sq_evhead
; bp
!= NULL
; bp
= sq
->sq_evhead
) {
6201 ASSERT(bp
->b_queue
->q_syncq
== sq
);
6202 ASSERT(sq
->sq_flags
& SQ_EVENTS
);
6205 func
= (void (*)())bp
->b_prev
;
6206 ASSERT(func
!= NULL
);
6209 * Messages from the event queue must be taken off in
6212 ASSERT(sq
->sq_evhead
== bp
);
6213 sq
->sq_evhead
= bp
->b_next
;
6215 if (bp
->b_next
== NULL
) {
6217 ASSERT(sq
->sq_evtail
== bp
);
6218 sq
->sq_evtail
= NULL
;
6219 sq
->sq_flags
&= ~SQ_EVENTS
;
6221 bp
->b_prev
= bp
->b_next
= NULL
;
6222 ASSERT(bp
->b_datap
->db_ref
!= 0);
6224 mutex_exit(SQLOCK(sq
));
6228 mutex_enter(SQLOCK(sq
));
6230 * re-read the flags, since they could have changed.
6232 flags
= sq
->sq_flags
;
6233 ASSERT(flags
& SQ_EXCL
);
6235 ASSERT(sq
->sq_evhead
== NULL
&& sq
->sq_evtail
== NULL
);
6236 ASSERT(!(sq
->sq_flags
& SQ_EVENTS
));
6238 if (flags
& SQ_WANTWAKEUP
) {
6239 flags
&= ~SQ_WANTWAKEUP
;
6240 cv_broadcast(&sq
->sq_wait
);
6242 if (flags
& SQ_WANTEXWAKEUP
) {
6243 flags
&= ~SQ_WANTEXWAKEUP
;
6244 cv_broadcast(&sq
->sq_exitwait
);
6246 sq
->sq_flags
= flags
;
6250 * Put messages on the event list.
6251 * If we can go exclusive now, do so and process the event list, otherwise
6252 * let the last claim service this list (or wake the sqthread).
6253 * This procedure assumes SQLOCK is held. To run the event list, it
6254 * must be called with no claims.
6257 sqfill_events(syncq_t
*sq
, queue_t
*q
, mblk_t
*mp
, void (*func
)())
6261 ASSERT(MUTEX_HELD(SQLOCK(sq
)));
6262 ASSERT(func
!= NULL
);
6265 * This is a callback. Add it to the list of callbacks
6266 * and see about upgrading.
6268 mp
->b_prev
= (mblk_t
*)func
;
6271 if (sq
->sq_evhead
== NULL
) {
6272 sq
->sq_evhead
= sq
->sq_evtail
= mp
;
6273 sq
->sq_flags
|= SQ_EVENTS
;
6275 ASSERT(sq
->sq_evtail
!= NULL
);
6276 ASSERT(sq
->sq_evtail
->b_next
== NULL
);
6277 ASSERT(sq
->sq_flags
& SQ_EVENTS
);
6278 sq
->sq_evtail
->b_next
= mp
;
6282 * We have set SQ_EVENTS, so threads will have to
6283 * unwind out of the perimeter, and new entries will
6284 * not grab a putlock. But we still need to know
6285 * how many threads have already made a claim to the
6286 * syncq, so grab the putlocks, and sum the counts.
6287 * If there are no claims on the syncq, we can upgrade
6288 * to exclusive, and run the event list.
6289 * NOTE: We hold the SQLOCK, so we can just grab the
6292 count
= sq
->sq_count
;
6293 SQ_PUTLOCKS_ENTER(sq
);
6294 SUM_SQ_PUTCOUNTS(sq
, count
);
6296 * We have no claim, so we need to check if there
6297 * are no others, then we can upgrade.
6300 * There are currently no claims on
6301 * the syncq by this thread (at least on this entry). The thread who has
6302 * the claim should drain syncq.
6306 * Can't upgrade - other threads inside.
6308 SQ_PUTLOCKS_EXIT(sq
);
6309 mutex_exit(SQLOCK(sq
));
6313 * Need to set SQ_EXCL and make a claim on the syncq.
6315 ASSERT((sq
->sq_flags
& SQ_EXCL
) == 0);
6316 sq
->sq_flags
|= SQ_EXCL
;
6317 ASSERT(sq
->sq_count
== 0);
6319 SQ_PUTLOCKS_EXIT(sq
);
6321 /* Process the events list */
6325 * Release our claim...
6330 * And release SQ_EXCL.
6331 * We don't need to acquire the putlocks to release
6332 * SQ_EXCL, since we are exclusive, and hold the SQLOCK.
6334 sq
->sq_flags
&= ~SQ_EXCL
;
6337 * sq_run_events should have released SQ_EXCL
6339 ASSERT(!(sq
->sq_flags
& SQ_EXCL
));
6342 * If anything happened while we were running the
6343 * events (or was there before), we need to process
6344 * them now. We shouldn't be exclusive sine we
6345 * released the perimeter above (plus, we asserted
6348 if (!(sq
->sq_flags
& SQ_STAYAWAY
) && (sq
->sq_flags
& SQ_QUEUED
))
6351 mutex_exit(SQLOCK(sq
));
6355 * Perform delayed processing. The caller has to make sure that it is safe
6356 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are
6359 * Assume that the caller has NO claims on the syncq. However, a claim
6360 * on the syncq does not indicate that a thread is draining the syncq.
6361 * There may be more claims on the syncq than there are threads draining
6362 * (i.e. #_threads_draining <= sq_count)
6364 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set
6365 * in order to preserve qwriter(OUTER) ordering constraints.
6367 * sq_putcount only needs to be checked when dispatching the queued
6368 * writer call for CIPUT sync queue, but this is handled in sq_run_events.
6371 drain_syncq(syncq_t
*sq
)
6375 uint16_t type
= sq
->sq_type
;
6376 uint16_t flags
= sq
->sq_flags
;
6377 boolean_t bg_service
= sq
->sq_svcflags
& SQ_SERVICE
;
6379 TRACE_1(TR_FAC_STREAMS_FR
, TR_DRAIN_SYNCQ_START
,
6380 "drain_syncq start:%p", sq
);
6381 ASSERT(MUTEX_HELD(SQLOCK(sq
)));
6382 ASSERT((sq
->sq_outer
== NULL
&& sq
->sq_onext
== NULL
&&
6383 sq
->sq_oprev
== NULL
) ||
6384 (sq
->sq_outer
!= NULL
&& sq
->sq_onext
!= NULL
&&
6385 sq
->sq_oprev
!= NULL
));
6388 * Drop SQ_SERVICE flag.
6391 sq
->sq_svcflags
&= ~SQ_SERVICE
;
6394 * If SQ_EXCL is set, someone else is processing this syncq - let them
6397 if (flags
& SQ_EXCL
) {
6399 ASSERT(sq
->sq_servcount
!= 0);
6402 mutex_exit(SQLOCK(sq
));
6407 * This routine can be called by a background thread if
6408 * it was scheduled by a hi-priority thread. SO, if there are
6409 * NOT messages queued, return (remember, we have the SQLOCK,
6410 * and it cannot change until we release it). Wakeup any waiters also.
6412 if (!(flags
& SQ_QUEUED
)) {
6413 if (flags
& SQ_WANTWAKEUP
) {
6414 flags
&= ~SQ_WANTWAKEUP
;
6415 cv_broadcast(&sq
->sq_wait
);
6417 if (flags
& SQ_WANTEXWAKEUP
) {
6418 flags
&= ~SQ_WANTEXWAKEUP
;
6419 cv_broadcast(&sq
->sq_exitwait
);
6421 sq
->sq_flags
= flags
;
6423 ASSERT(sq
->sq_servcount
!= 0);
6426 mutex_exit(SQLOCK(sq
));
6431 * If this is not a concurrent put perimeter, we need to
6432 * become exclusive to drain. Also, if not CIPUT, we would
6433 * not have acquired a putlock, so we don't need to check
6434 * the putcounts. If not entering with a claim, we test
6435 * for sq_count == 0.
6438 if (!(type
& SQ_CIPUT
)) {
6439 if (sq
->sq_count
> 1) {
6441 ASSERT(sq
->sq_servcount
!= 0);
6444 mutex_exit(SQLOCK(sq
));
6447 sq
->sq_flags
|= SQ_EXCL
;
6451 * This is where we make a claim to the syncq.
6452 * This can either be done by incrementing a putlock, or
6453 * the sq_count. But since we already have the SQLOCK
6454 * here, we just bump the sq_count.
6456 * Note that after we make a claim, we need to let the code
6457 * fall through to the end of this routine to clean itself
6458 * up. A return in the while loop will put the syncq in a
6462 ASSERT(sq
->sq_count
!= 0); /* wraparound */
6464 while ((flags
= sq
->sq_flags
) & SQ_QUEUED
) {
6466 * If we are told to stayaway or went exclusive,
6469 if (flags
& (SQ_STAYAWAY
)) {
6474 * If there are events to run, do so.
6475 * We have one claim to the syncq, so if there are
6476 * more than one, other threads are running.
6478 if (sq
->sq_evhead
!= NULL
) {
6479 ASSERT(sq
->sq_flags
& SQ_EVENTS
);
6481 count
= sq
->sq_count
;
6482 SQ_PUTLOCKS_ENTER(sq
);
6483 SUM_SQ_PUTCOUNTS(sq
, count
);
6485 SQ_PUTLOCKS_EXIT(sq
);
6486 /* Can't upgrade - other threads inside */
6489 ASSERT((flags
& SQ_EXCL
) == 0);
6490 sq
->sq_flags
= flags
| SQ_EXCL
;
6491 SQ_PUTLOCKS_EXIT(sq
);
6493 * we have the only claim, run the events,
6494 * sq_run_events will clear the SQ_EXCL flag.
6499 * If this is a CIPUT perimeter, we need
6500 * to drop the SQ_EXCL flag so we can properly
6501 * continue draining the syncq.
6503 if (type
& SQ_CIPUT
) {
6504 ASSERT(sq
->sq_flags
& SQ_EXCL
);
6505 sq
->sq_flags
&= ~SQ_EXCL
;
6509 * And go back to the beginning just in case
6510 * anything changed while we were away.
6512 ASSERT((sq
->sq_flags
& SQ_EXCL
) || (type
& SQ_CIPUT
));
6516 ASSERT(sq
->sq_evhead
== NULL
);
6517 ASSERT(!(sq
->sq_flags
& SQ_EVENTS
));
6520 * Find the queue that is not draining.
6522 * q_draining is protected by QLOCK which we do not hold.
6523 * But if it was set, then a thread was draining, and if it gets
6524 * cleared, then it was because the thread has successfully
6525 * drained the syncq, or a GOAWAY state occurred. For the GOAWAY
6526 * state to happen, a thread needs the SQLOCK which we hold, and
6527 * if there was such a flag, we would have already seen it.
6530 for (qp
= sq
->sq_head
;
6531 qp
!= NULL
&& (qp
->q_draining
||
6532 (qp
->q_sqflags
& Q_SQDRAINING
));
6540 * We have a queue to work on, and we hold the
6541 * SQLOCK and one claim, call qdrain_syncq.
6542 * This means we need to release the SQLOCK and
6543 * acquire the QLOCK (OK since we have a claim).
6544 * Note that qdrain_syncq will actually dequeue
6545 * this queue from the sq_head list when it is
6546 * convinced all the work is done and release
6547 * the QLOCK before returning.
6549 qp
->q_sqflags
|= Q_SQDRAINING
;
6550 mutex_exit(SQLOCK(sq
));
6551 mutex_enter(QLOCK(qp
));
6552 qdrain_syncq(sq
, qp
);
6553 mutex_enter(SQLOCK(sq
));
6555 /* The queue is drained */
6556 ASSERT(qp
->q_sqflags
& Q_SQDRAINING
);
6557 qp
->q_sqflags
&= ~Q_SQDRAINING
;
6559 * NOTE: After this point qp should not be used since it may be
6564 ASSERT(MUTEX_HELD(SQLOCK(sq
)));
6565 flags
= sq
->sq_flags
;
6568 * sq->sq_head cannot change because we hold the
6569 * sqlock. However, a thread CAN decide that it is no longer
6570 * going to drain that queue. However, this should be due to
6571 * a GOAWAY state, and we should see that here.
6573 * This loop is not very efficient. One solution may be adding a second
6574 * pointer to the "draining" queue, but it is difficult to do when
6575 * queues are inserted in the middle due to priority ordering. Another
6576 * possibility is to yank the queue out of the sq list and put it onto
6577 * the "draining list" and then put it back if it can't be drained.
6580 ASSERT((sq
->sq_head
== NULL
) || (flags
& SQ_GOAWAY
) ||
6581 (type
& SQ_CI
) || sq
->sq_head
->q_draining
);
6583 /* Drop SQ_EXCL for non-CIPUT perimeters */
6584 if (!(type
& SQ_CIPUT
))
6586 ASSERT((flags
& SQ_EXCL
) == 0);
6588 /* Wake up any waiters. */
6589 if (flags
& SQ_WANTWAKEUP
) {
6590 flags
&= ~SQ_WANTWAKEUP
;
6591 cv_broadcast(&sq
->sq_wait
);
6593 if (flags
& SQ_WANTEXWAKEUP
) {
6594 flags
&= ~SQ_WANTEXWAKEUP
;
6595 cv_broadcast(&sq
->sq_exitwait
);
6597 sq
->sq_flags
= flags
;
6599 ASSERT(sq
->sq_count
!= 0);
6600 /* Release our claim. */
6604 ASSERT(sq
->sq_servcount
!= 0);
6608 mutex_exit(SQLOCK(sq
));
6610 TRACE_1(TR_FAC_STREAMS_FR
, TR_DRAIN_SYNCQ_END
,
6611 "drain_syncq end:%p", sq
);
6617 * qdrain_syncq can be called (currently) from only one of two places:
6619 * putnext (or some variation of it).
6623 * If called from drain_syncq, we found it in the list of queues needing
6624 * service, so there is work to be done (or it wouldn't be in the list).
6626 * If called from some putnext variation, it was because the
6627 * perimeter is open, but messages are blocking a putnext and
6628 * there is not a thread working on it. Now a thread could start
6629 * working on it while we are getting ready to do so ourself, but
6630 * the thread would set the q_draining flag, and we can spin out.
6632 * As for qwait(_sig), I think I shall let it continue to call
6633 * drain_syncq directly (after all, it will get here eventually).
6635 * qdrain_syncq has to terminate when:
6636 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering
6637 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering
6643 * Will release QLOCK before returning
6646 qdrain_syncq(syncq_t
*sq
, queue_t
*q
)
6653 TRACE_1(TR_FAC_STREAMS_FR
, TR_DRAIN_SYNCQ_START
,
6654 "drain_syncq start:%p", sq
);
6655 ASSERT(q
->q_syncq
== sq
);
6656 ASSERT(MUTEX_HELD(QLOCK(q
)));
6657 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq
)));
6659 * For non-CIPUT perimeters, we should be called with the exclusive bit
6660 * set already. For CIPUT perimeters, we will be doing a concurrent
6661 * drain, so it better not be set.
6663 ASSERT((sq
->sq_flags
& (SQ_EXCL
|SQ_CIPUT
)));
6664 ASSERT(!((sq
->sq_type
& SQ_CIPUT
) && (sq
->sq_flags
& SQ_EXCL
)));
6665 ASSERT((sq
->sq_type
& SQ_CIPUT
) || (sq
->sq_flags
& SQ_EXCL
));
6667 * All outer pointers are set, or none of them are
6669 ASSERT((sq
->sq_outer
== NULL
&& sq
->sq_onext
== NULL
&&
6670 sq
->sq_oprev
== NULL
) ||
6671 (sq
->sq_outer
!= NULL
&& sq
->sq_onext
!= NULL
&&
6672 sq
->sq_oprev
!= NULL
));
6674 count
= sq
->sq_count
;
6676 * This is OK without the putlocks, because we have one
6677 * claim either from the sq_count, or a putcount. We could
6678 * get an erroneous value from other counts, but ours won't
6679 * change, so one way or another, we will have at least a
6682 SUM_SQ_PUTCOUNTS(sq
, count
);
6687 * The first thing to do is find out if a thread is already draining
6688 * this queue. If so, we are done, just return.
6690 if (q
->q_draining
) {
6691 mutex_exit(QLOCK(q
));
6696 * If the perimeter is exclusive, there is nothing we can do right now,
6697 * go away. Note that there is nothing to prevent this case from
6698 * changing right after this check, but the spin-out will catch it.
6701 /* Tell other threads that we are draining this queue */
6702 q
->q_draining
= 1; /* Protected by QLOCK */
6705 * If there is nothing to do, clear QFULL as necessary. This caters for
6706 * the case where an empty queue was enqueued onto the syncq.
6708 if (q
->q_sqhead
== NULL
) {
6709 ASSERT(q
->q_syncqmsgs
== 0);
6710 mutex_exit(QLOCK(q
));
6712 mutex_enter(QLOCK(q
));
6716 * Note that q_sqhead must be re-checked here in case another message
6717 * was enqueued whilst QLOCK was dropped during the call to clr_qfull.
6719 for (bp
= q
->q_sqhead
; bp
!= NULL
; bp
= q
->q_sqhead
) {
6721 * Because we can enter this routine just because a putnext is
6722 * blocked, we need to spin out if the perimeter wants to go
6723 * exclusive as well as just blocked. We need to spin out also
6724 * if events are queued on the syncq.
6725 * Don't check for SQ_EXCL, because non-CIPUT perimeters would
6726 * set it, and it can't become exclusive while we hold a claim.
6728 if (sq
->sq_flags
& (SQ_STAYAWAY
| SQ_EVENTS
)) {
6734 * Since we are in qdrain_syncq, we already know the queue,
6735 * but for sanity, we want to check this against the qp that
6736 * was passed in by bp->b_queue.
6739 ASSERT(bp
->b_queue
== q
);
6740 ASSERT(bp
->b_queue
->q_syncq
== sq
);
6744 * We would have the following check in the DEBUG code:
6746 * if (bp->b_prev != NULL) {
6747 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp);
6750 * This can't be done, however, since IP modifies qinfo
6751 * structure at run-time (switching between IPv4 qinfo and IPv6
6752 * qinfo), invalidating the check.
6753 * So the assignment to func is left here, but the ASSERT itself
6754 * is removed until the whole issue is resolved.
6757 ASSERT(q
->q_sqhead
== bp
);
6758 q
->q_sqhead
= bp
->b_next
;
6759 bp
->b_prev
= bp
->b_next
= NULL
;
6760 ASSERT(q
->q_syncqmsgs
> 0);
6761 mutex_exit(QLOCK(q
));
6763 ASSERT(bp
->b_datap
->db_ref
!= 0);
6765 (void) (*q
->q_qinfo
->qi_putp
)(q
, bp
);
6767 mutex_enter(QLOCK(q
));
6770 * q_syncqmsgs should only be decremented after executing the
6771 * put procedure to avoid message re-ordering. This is due to an
6772 * optimisation in putnext() which can call the put procedure
6773 * directly if it sees q_syncqmsgs == 0 (despite Q_SQQUEUED
6776 * We also need to clear QFULL in the next service procedure
6777 * queue if this is the last message destined for that queue.
6779 * It would make better sense to have some sort of tunable for
6780 * the low water mark, but these semantics are not yet defined.
6781 * So, alas, we use a constant.
6783 if (--q
->q_syncqmsgs
== 0) {
6784 mutex_exit(QLOCK(q
));
6786 mutex_enter(QLOCK(q
));
6790 * Always clear SQ_EXCL when CIPUT in order to handle
6791 * qwriter(INNER). The putp() can call qwriter and get exclusive
6792 * access IFF this is the only claim. So, we need to test for
6793 * this possibility, acquire the mutex and clear the bit.
6795 if ((sq
->sq_type
& SQ_CIPUT
) && (sq
->sq_flags
& SQ_EXCL
)) {
6796 mutex_enter(SQLOCK(sq
));
6797 sq
->sq_flags
&= ~SQ_EXCL
;
6798 mutex_exit(SQLOCK(sq
));
6803 * We should either have no messages on this queue, or we were told to
6804 * goaway by a waiter (which we will wake up at the end of this
6807 ASSERT((q
->q_sqhead
== NULL
) ||
6808 (sq
->sq_flags
& (SQ_STAYAWAY
| SQ_EVENTS
)));
6810 ASSERT(MUTEX_HELD(QLOCK(q
)));
6811 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq
)));
6813 /* Remove the q from the syncq list if all the messages are drained. */
6814 if (q
->q_sqhead
== NULL
) {
6815 ASSERT(q
->q_syncqmsgs
== 0);
6816 mutex_enter(SQLOCK(sq
));
6817 if (q
->q_sqflags
& Q_SQQUEUED
)
6819 mutex_exit(SQLOCK(sq
));
6821 * Since the queue is removed from the list, reset its priority.
6827 * Remember, the q_draining flag is used to let another thread know
6828 * that there is a thread currently draining the messages for a queue.
6829 * Since we are now done with this queue (even if there may be messages
6830 * still there), we need to clear this flag so some thread will work on
6833 ASSERT(q
->q_draining
);
6836 /* Called with a claim, so OK to drop all locks. */
6837 mutex_exit(QLOCK(q
));
6839 TRACE_1(TR_FAC_STREAMS_FR
, TR_DRAIN_SYNCQ_END
,
6840 "drain_syncq end:%p", sq
);
6842 /* END OF QDRAIN_SYNCQ */
6846 * This is the mate to qdrain_syncq, except that it is putting the message onto
6847 * the queue instead of draining. Since the message is destined for the queue
6848 * that is selected, there is no need to identify the function because the
6849 * message is intended for the put routine for the queue. For debug kernels,
6850 * this routine will do it anyway just in case.
6852 * After the message is enqueued on the syncq, it calls putnext_tail()
6853 * which will schedule a background thread to actually process the message.
6855 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and
6856 * SQLOCK(sq) and QLOCK(q) are not held.
6859 qfill_syncq(syncq_t
*sq
, queue_t
*q
, mblk_t
*mp
)
6861 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq
)));
6862 ASSERT(MUTEX_NOT_HELD(QLOCK(q
)));
6863 ASSERT(sq
->sq_count
> 0);
6864 ASSERT(q
->q_syncq
== sq
);
6865 ASSERT((sq
->sq_outer
== NULL
&& sq
->sq_onext
== NULL
&&
6866 sq
->sq_oprev
== NULL
) ||
6867 (sq
->sq_outer
!= NULL
&& sq
->sq_onext
!= NULL
&&
6868 sq
->sq_oprev
!= NULL
));
6870 mutex_enter(QLOCK(q
));
6874 * This is used for debug in the qfill_syncq/qdrain_syncq case
6875 * to trace the queue that the message is intended for. Note
6876 * that the original use was to identify the queue and function
6877 * to call on the drain. In the new syncq, we have the context
6878 * of the queue that we are draining, so call it's putproc and
6879 * don't rely on the saved values. But for debug this is still
6880 * useful information.
6882 mp
->b_prev
= (mblk_t
*)q
->q_qinfo
->qi_putp
;
6886 ASSERT(q
->q_syncq
== sq
);
6888 * Enqueue the message on the list.
6889 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to
6890 * protect it. So it's ok to acquire SQLOCK after SQPUT_MP().
6893 mutex_enter(SQLOCK(sq
));
6896 * And queue on syncq for scheduling, if not already queued.
6897 * Note that we need the SQLOCK for this, and for testing flags
6898 * at the end to see if we will drain. So grab it now, and
6899 * release it before we call qdrain_syncq or return.
6901 if (!(q
->q_sqflags
& Q_SQQUEUED
)) {
6902 q
->q_spri
= curthread
->t_pri
;
6908 * All of these conditions MUST be true!
6910 ASSERT(sq
->sq_tail
!= NULL
);
6911 if (sq
->sq_tail
== sq
->sq_head
) {
6912 ASSERT((q
->q_sqprev
== NULL
) &&
6913 (q
->q_sqnext
== NULL
));
6915 ASSERT((q
->q_sqprev
!= NULL
) ||
6916 (q
->q_sqnext
!= NULL
));
6918 ASSERT(sq
->sq_flags
& SQ_QUEUED
);
6919 ASSERT(q
->q_syncqmsgs
!= 0);
6920 ASSERT(q
->q_sqflags
& Q_SQQUEUED
);
6923 mutex_exit(QLOCK(q
));
6925 * SQLOCK is still held, so sq_count can be safely decremented.
6929 putnext_tail(sq
, q
, 0);
6930 /* Should not reference sq or q after this point. */
6933 /* End of qfill_syncq */
6936 * Remove all messages from a syncq (if qp is NULL) or remove all messages
6937 * that would be put into qp by drain_syncq.
6938 * Used when deleting the syncq (qp == NULL) or when detaching
6939 * a queue (qp != NULL).
6940 * Return non-zero if one or more messages were freed.
6942 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
6943 * sq_putlocks are used.
6945 * NOTE: This function assumes that it is called from the close() context and
6946 * that all the queues in the syncq are going away. For this reason it doesn't
6947 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is
6948 * currently valid, but it is useful to rethink this function to behave properly
6952 flush_syncq(syncq_t
*sq
, queue_t
*qp
)
6954 mblk_t
*bp
, *mp_head
, *mp_next
, *mp_prev
;
6958 mutex_enter(SQLOCK(sq
));
6961 * Before we leave, we need to make sure there are no
6962 * events listed for this queue. All events for this queue
6963 * will just be freed.
6965 if (qp
!= NULL
&& sq
->sq_evhead
!= NULL
) {
6966 ASSERT(sq
->sq_flags
& SQ_EVENTS
);
6969 for (bp
= sq
->sq_evhead
; bp
!= NULL
; bp
= mp_next
) {
6970 mp_next
= bp
->b_next
;
6971 if (bp
->b_queue
== qp
) {
6972 /* Delete this message */
6973 if (mp_prev
!= NULL
) {
6974 mp_prev
->b_next
= mp_next
;
6976 * Update sq_evtail if the last element
6979 if (bp
== sq
->sq_evtail
) {
6980 ASSERT(mp_next
== NULL
);
6981 sq
->sq_evtail
= mp_prev
;
6984 sq
->sq_evhead
= mp_next
;
6985 if (sq
->sq_evhead
== NULL
)
6986 sq
->sq_flags
&= ~SQ_EVENTS
;
6987 bp
->b_prev
= bp
->b_next
= NULL
;
6998 * - match qp if qp is set, remove it's messages
6999 * - all if qp is not set
7003 ASSERT(q
->q_syncq
== sq
);
7004 if ((qp
== NULL
) || (qp
== q
)) {
7006 * Yank the messages as a list off the queue
7008 mp_head
= q
->q_sqhead
;
7010 * We do not have QLOCK(q) here (which is safe due to
7011 * assumptions mentioned above). To obtain the lock we
7012 * need to release SQLOCK which may allow lots of things
7013 * to change upon us. This place requires more analysis.
7015 q
->q_sqhead
= q
->q_sqtail
= NULL
;
7016 ASSERT(mp_head
->b_queue
&&
7017 mp_head
->b_queue
->q_syncq
== sq
);
7020 * Free each of the messages.
7022 for (bp
= mp_head
; bp
!= NULL
; bp
= mp_next
) {
7023 mp_next
= bp
->b_next
;
7024 bp
->b_prev
= bp
->b_next
= NULL
;
7029 * Now remove the queue from the syncq.
7031 ASSERT(q
->q_sqflags
& Q_SQQUEUED
);
7037 * If qp was specified, we are done with it and are
7038 * going to drop SQLOCK(sq) and return. We wakeup syncq
7039 * waiters while we still have the SQLOCK.
7041 if ((qp
!= NULL
) && (sq
->sq_flags
& SQ_WANTWAKEUP
)) {
7042 sq
->sq_flags
&= ~SQ_WANTWAKEUP
;
7043 cv_broadcast(&sq
->sq_wait
);
7045 /* Drop SQLOCK across clr_qfull */
7046 mutex_exit(SQLOCK(sq
));
7049 * We avoid doing the test that drain_syncq does and
7050 * unconditionally clear qfull for every flushed
7051 * message. Since flush_syncq is only called during
7052 * close this should not be a problem.
7058 mutex_enter(SQLOCK(sq
));
7060 * The head was removed by SQRM_Q above.
7061 * reread the new head and flush it.
7068 ASSERT(MUTEX_HELD(SQLOCK(sq
)));
7071 if (sq
->sq_flags
& SQ_WANTWAKEUP
) {
7072 sq
->sq_flags
&= ~SQ_WANTWAKEUP
;
7073 cv_broadcast(&sq
->sq_wait
);
7076 mutex_exit(SQLOCK(sq
));
7081 * Propagate all messages from a syncq to the next syncq that are associated
7082 * with the specified queue. If the queue is attached to a driver or if the
7083 * messages have been added due to a qwriter(PERIM_INNER), free the messages.
7085 * Assumes that the stream is strlock()'ed. We don't come here if there
7086 * are no messages to propagate.
7088 * NOTE : If the queue is attached to a driver, all the messages are freed
7089 * as there is no point in propagating the messages from the driver syncq
7090 * to the closing stream head which will in turn get freed later.
7093 propagate_syncq(queue_t
*qp
)
7095 mblk_t
*bp
, *head
, *tail
, *prev
, *next
;
7102 pri_t priority
= curthread
->t_pri
;
7108 ASSERT(MUTEX_HELD(SQLOCK(sq
)));
7110 SQ_PUTLOCKS_HELD(sq
);
7112 * As entersq() does not increment the sq_count for
7113 * the write side, check sq_count for non-QPERQ
7116 ASSERT((qp
->q_flag
& QPERQ
) || (sq
->sq_count
>= 1));
7119 * propagate_syncq() can be called because of either messages on the
7120 * queue syncq or because on events on the queue syncq. Do actual
7121 * message propagations if there are any messages.
7123 if (qp
->q_syncqmsgs
) {
7124 isdriver
= (qp
->q_flag
& QISDRV
);
7129 ASSERT(MUTEX_HELD(SQLOCK(nsq
)));
7131 SQ_PUTLOCKS_HELD(nsq
);
7133 func
= (void (*)())nqp
->q_qinfo
->qi_putp
;
7138 priority
= MAX(qp
->q_spri
, priority
);
7140 head
= qp
->q_sqhead
;
7141 tail
= qp
->q_sqtail
;
7142 qp
->q_sqhead
= qp
->q_sqtail
= NULL
;
7143 qp
->q_syncqmsgs
= 0;
7146 * Walk the list of messages, and free them if this is a driver,
7147 * otherwise reset the b_prev and b_queue value to the new putp.
7148 * Afterward, we will just add the head to the end of the next
7149 * syncq, and point the tail to the end of this one.
7152 for (bp
= head
; bp
!= NULL
; bp
= next
) {
7155 bp
->b_prev
= bp
->b_next
= NULL
;
7159 /* Change the q values for this message */
7162 bp
->b_prev
= (mblk_t
*)func
;
7167 * Attach list of messages to the end of the new queue (if there
7168 * is a list of messages).
7171 if (!isdriver
&& head
!= NULL
) {
7172 ASSERT(tail
!= NULL
);
7173 if (nqp
->q_sqhead
== NULL
) {
7174 nqp
->q_sqhead
= head
;
7176 ASSERT(nqp
->q_sqtail
!= NULL
);
7177 nqp
->q_sqtail
->b_next
= head
;
7179 nqp
->q_sqtail
= tail
;
7181 * When messages are moved from high priority queue to
7182 * another queue, the destination queue priority is
7186 if (priority
> nqp
->q_spri
)
7187 nqp
->q_spri
= priority
;
7191 nqp
->q_syncqmsgs
+= moved
;
7192 ASSERT(nqp
->q_syncqmsgs
!= 0);
7197 * Before we leave, we need to make sure there are no
7198 * events listed for this queue. All events for this queue
7199 * will just be freed.
7201 if (sq
->sq_evhead
!= NULL
) {
7202 ASSERT(sq
->sq_flags
& SQ_EVENTS
);
7204 for (bp
= sq
->sq_evhead
; bp
!= NULL
; bp
= next
) {
7206 if (bp
->b_queue
== qp
) {
7207 /* Delete this message */
7209 prev
->b_next
= next
;
7211 * Update sq_evtail if the last element
7214 if (bp
== sq
->sq_evtail
) {
7215 ASSERT(next
== NULL
);
7216 sq
->sq_evtail
= prev
;
7219 sq
->sq_evhead
= next
;
7220 if (sq
->sq_evhead
== NULL
)
7221 sq
->sq_flags
&= ~SQ_EVENTS
;
7222 bp
->b_prev
= bp
->b_next
= NULL
;
7230 flags
= sq
->sq_flags
;
7232 /* Wake up any waiter before leaving. */
7233 if (flags
& SQ_WANTWAKEUP
) {
7234 flags
&= ~SQ_WANTWAKEUP
;
7235 cv_broadcast(&sq
->sq_wait
);
7237 sq
->sq_flags
= flags
;
7243 * Try and upgrade to exclusive access at the inner perimeter. If this can
7244 * not be done without blocking then request will be queued on the syncq
7245 * and drain_syncq will run it later.
7247 * This routine can only be called from put or service procedures plus
7248 * asynchronous callback routines that have properly entered the queue (with
7249 * entersq). Thus qwriter_inner assumes the caller has one claim on the syncq
7250 * associated with q.
7253 qwriter_inner(queue_t
*q
, mblk_t
*mp
, void (*func
)())
7255 syncq_t
*sq
= q
->q_syncq
;
7258 mutex_enter(SQLOCK(sq
));
7259 count
= sq
->sq_count
;
7260 SQ_PUTLOCKS_ENTER(sq
);
7261 SUM_SQ_PUTCOUNTS(sq
, count
);
7263 ASSERT(sq
->sq_type
& (SQ_CIPUT
|SQ_CISVC
));
7267 * Can upgrade. This case also handles nested qwriter calls
7268 * (when the qwriter callback function calls qwriter). In that
7269 * case SQ_EXCL is already set.
7271 sq
->sq_flags
|= SQ_EXCL
;
7272 SQ_PUTLOCKS_EXIT(sq
);
7273 mutex_exit(SQLOCK(sq
));
7276 * Assumes that leavesq, putnext, and drain_syncq will reset
7277 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on
7278 * until putnext, leavesq, or drain_syncq drops it.
7279 * That way we handle nested qwriter(INNER) without dropping
7280 * SQ_EXCL until the outermost qwriter callback routine is
7285 SQ_PUTLOCKS_EXIT(sq
);
7286 sqfill_events(sq
, q
, mp
, func
);
7290 * Synchronous callback support functions
7294 * Allocate a callback parameter structure.
7295 * Assumes that caller initializes the flags and the id.
7296 * Acquires SQLOCK(sq) if non-NULL is returned.
7299 callbparams_alloc(syncq_t
*sq
, void (*func
)(void *), void *arg
, int kmflags
)
7302 size_t size
= sizeof (callbparams_t
);
7304 cbp
= kmem_alloc(size
, kmflags
& ~KM_PANIC
);
7307 * Only try tryhard allocation if the caller is ready to panic.
7308 * Otherwise just fail.
7311 if (kmflags
& KM_PANIC
)
7312 cbp
= kmem_alloc_tryhard(sizeof (callbparams_t
),
7318 ASSERT(size
>= sizeof (callbparams_t
));
7319 cbp
->cbp_size
= size
;
7321 cbp
->cbp_func
= func
;
7323 mutex_enter(SQLOCK(sq
));
7324 cbp
->cbp_next
= sq
->sq_callbpend
;
7325 sq
->sq_callbpend
= cbp
;
7330 callbparams_free(syncq_t
*sq
, callbparams_t
*cbp
)
7332 callbparams_t
**pp
, *p
;
7334 ASSERT(MUTEX_HELD(SQLOCK(sq
)));
7336 for (pp
= &sq
->sq_callbpend
; (p
= *pp
) != NULL
; pp
= &p
->cbp_next
) {
7339 kmem_free(p
, p
->cbp_size
);
7343 (void) (STRLOG(0, 0, 0, SL_CONSOLE
,
7344 "callbparams_free: not found\n"));
7348 callbparams_free_id(syncq_t
*sq
, callbparams_id_t id
, int32_t flag
)
7350 callbparams_t
**pp
, *p
;
7352 ASSERT(MUTEX_HELD(SQLOCK(sq
)));
7354 for (pp
= &sq
->sq_callbpend
; (p
= *pp
) != NULL
; pp
= &p
->cbp_next
) {
7355 if (p
->cbp_id
== id
&& p
->cbp_flags
== flag
) {
7357 kmem_free(p
, p
->cbp_size
);
7361 (void) (STRLOG(0, 0, 0, SL_CONSOLE
,
7362 "callbparams_free_id: not found\n"));
7366 * Callback wrapper function used by once-only callbacks that can be
7367 * cancelled (qtimeout and qbufcall)
7368 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be
7369 * cancelled by the qun* functions.
7372 qcallbwrapper(void *arg
)
7374 callbparams_t
*cbp
= arg
;
7377 uint16_t waitflags
= SQ_STAYAWAY
| SQ_EVENTS
| SQ_EXCL
;
7381 mutex_enter(SQLOCK(sq
));
7383 if (!(type
& SQ_CICB
)) {
7384 count
= sq
->sq_count
;
7385 SQ_PUTLOCKS_ENTER(sq
);
7386 SQ_PUTCOUNT_CLRFAST_LOCKED(sq
);
7387 SUM_SQ_PUTCOUNTS(sq
, count
);
7389 ASSERT(sq
->sq_needexcl
!= 0); /* wraparound */
7390 waitflags
|= SQ_MESSAGES
;
7392 /* Can not handle exclusive entry at outer perimeter */
7393 ASSERT(type
& SQ_COCB
);
7395 while ((sq
->sq_flags
& waitflags
) || (!(type
& SQ_CICB
) &&count
!= 0)) {
7396 if ((sq
->sq_callbflags
& cbp
->cbp_flags
) &&
7397 (sq
->sq_cancelid
== cbp
->cbp_id
)) {
7398 /* timeout has been cancelled */
7399 sq
->sq_callbflags
|= SQ_CALLB_BYPASSED
;
7400 callbparams_free(sq
, cbp
);
7401 if (!(type
& SQ_CICB
)) {
7402 ASSERT(sq
->sq_needexcl
> 0);
7404 if (sq
->sq_needexcl
== 0) {
7405 SQ_PUTCOUNT_SETFAST_LOCKED(sq
);
7407 SQ_PUTLOCKS_EXIT(sq
);
7409 mutex_exit(SQLOCK(sq
));
7412 sq
->sq_flags
|= SQ_WANTWAKEUP
;
7413 if (!(type
& SQ_CICB
)) {
7414 SQ_PUTLOCKS_EXIT(sq
);
7416 cv_wait(&sq
->sq_wait
, SQLOCK(sq
));
7417 if (!(type
& SQ_CICB
)) {
7418 count
= sq
->sq_count
;
7419 SQ_PUTLOCKS_ENTER(sq
);
7420 SUM_SQ_PUTCOUNTS(sq
, count
);
7425 ASSERT(sq
->sq_count
!= 0); /* Wraparound */
7426 if (!(type
& SQ_CICB
)) {
7428 sq
->sq_flags
|= SQ_EXCL
;
7429 ASSERT(sq
->sq_needexcl
> 0);
7431 if (sq
->sq_needexcl
== 0) {
7432 SQ_PUTCOUNT_SETFAST_LOCKED(sq
);
7434 SQ_PUTLOCKS_EXIT(sq
);
7437 mutex_exit(SQLOCK(sq
));
7439 cbp
->cbp_func(cbp
->cbp_arg
);
7442 * We drop the lock only for leavesq to re-acquire it.
7443 * Possible optimization is inline of leavesq.
7445 mutex_enter(SQLOCK(sq
));
7446 callbparams_free(sq
, cbp
);
7447 mutex_exit(SQLOCK(sq
));
7448 leavesq(sq
, SQ_CALLBACK
);
7452 * No need to grab sq_putlocks here. See comment in strsubr.h that
7453 * explains when sq_putlocks are used.
7455 * sq_count (or one of the sq_putcounts) has already been
7456 * decremented by the caller, and if SQ_QUEUED, we need to call
7457 * drain_syncq (the global syncq drain).
7458 * If putnext_tail is called with the SQ_EXCL bit set, we are in
7459 * one of two states, non-CIPUT perimeter, and we need to clear
7460 * it, or we went exclusive in the put procedure. In any case,
7461 * we want to clear the bit now, and it is probably easier to do
7462 * this at the beginning of this function (remember, we hold
7463 * the SQLOCK). Lastly, if there are other messages queued
7464 * on the syncq (and not for our destination), enable the syncq
7465 * for background work.
7470 putnext_tail(syncq_t
*sq
, queue_t
*qp
, uint32_t passflags
)
7472 uint16_t flags
= sq
->sq_flags
;
7474 ASSERT(MUTEX_HELD(SQLOCK(sq
)));
7475 ASSERT(MUTEX_NOT_HELD(QLOCK(qp
)));
7477 /* Clear SQ_EXCL if set in passflags */
7478 if (passflags
& SQ_EXCL
) {
7481 if (flags
& SQ_WANTWAKEUP
) {
7482 flags
&= ~SQ_WANTWAKEUP
;
7483 cv_broadcast(&sq
->sq_wait
);
7485 if (flags
& SQ_WANTEXWAKEUP
) {
7486 flags
&= ~SQ_WANTEXWAKEUP
;
7487 cv_broadcast(&sq
->sq_exitwait
);
7489 sq
->sq_flags
= flags
;
7492 * We have cleared SQ_EXCL if we were asked to, and started
7493 * the wakeup process for waiters. If there are no writers
7494 * then we need to drain the syncq if we were told to, or
7495 * enable the background thread to do it.
7497 if (!(flags
& (SQ_STAYAWAY
|SQ_EXCL
))) {
7498 if ((passflags
& SQ_QUEUED
) ||
7499 (sq
->sq_svcflags
& SQ_DISABLED
)) {
7500 /* drain_syncq will take care of events in the list */
7503 } else if (flags
& SQ_QUEUED
) {
7507 /* Drop the SQLOCK on exit */
7508 mutex_exit(SQLOCK(sq
));
7509 TRACE_3(TR_FAC_STREAMS_FR
, TR_PUTNEXT_END
,
7510 "putnext_end:(%p, %p, %p) done", NULL
, qp
, sq
);
7514 set_qend(queue_t
*q
)
7516 mutex_enter(QLOCK(q
));
7521 mutex_exit(QLOCK(q
));
7523 mutex_enter(QLOCK(q
));
7528 mutex_exit(QLOCK(q
));
7532 * Set QFULL in next service procedure queue (that cares) if not already
7533 * set and if there are already more messages on the syncq than
7534 * sq_max_size. If sq_max_size is 0, no flow control will be asserted on
7537 * The fq here is the next queue with a service procedure. This is where
7538 * we would fail canputnext, so this is where we need to set QFULL.
7539 * In the case when fq != q we need to take QLOCK(fq) to set QFULL flag.
7541 * We already have QLOCK at this point. To avoid cross-locks with
7542 * freezestr() which grabs all QLOCKs and with strlock() which grabs both
7543 * SQLOCK and sd_reflock, we need to drop respective locks first.
7546 set_qfull(queue_t
*q
)
7550 ASSERT(MUTEX_HELD(QLOCK(q
)));
7551 if ((sq_max_size
!= 0) && (!(q
->q_nfsrv
->q_flag
& QFULL
)) &&
7552 (q
->q_syncqmsgs
> sq_max_size
)) {
7553 if ((fq
= q
->q_nfsrv
) == q
) {
7554 fq
->q_flag
|= QFULL
;
7556 mutex_exit(QLOCK(q
));
7557 mutex_enter(QLOCK(fq
));
7558 fq
->q_flag
|= QFULL
;
7559 mutex_exit(QLOCK(fq
));
7560 mutex_enter(QLOCK(q
));
7566 clr_qfull(queue_t
*q
)
7571 /* Fast check if there is any work to do before getting the lock. */
7572 if ((q
->q_flag
& (QFULL
|QWANTW
)) == 0) {
7577 * Do not reset QFULL (and backenable) if the q_count is the reason
7578 * for QFULL being set.
7580 mutex_enter(QLOCK(q
));
7582 * If queue is empty i.e q_mblkcnt is zero, queue can not be full.
7583 * Hence clear the QFULL.
7584 * If both q_count and q_mblkcnt are less than the hiwat mark,
7587 if (q
->q_mblkcnt
== 0 || ((q
->q_count
< q
->q_hiwat
) &&
7588 (q
->q_mblkcnt
< q
->q_hiwat
))) {
7589 q
->q_flag
&= ~QFULL
;
7591 * A little more confusing, how about this way:
7592 * if someone wants to write,
7594 * both counts are less than the lowat mark
7596 * the lowat mark is zero
7600 if ((q
->q_flag
& QWANTW
) &&
7601 (((q
->q_count
< q
->q_lowat
) &&
7602 (q
->q_mblkcnt
< q
->q_lowat
)) || q
->q_lowat
== 0)) {
7603 q
->q_flag
&= ~QWANTW
;
7604 mutex_exit(QLOCK(q
));
7607 mutex_exit(QLOCK(q
));
7609 mutex_exit(QLOCK(q
));
7613 * Set the forward service procedure pointer.
7615 * Called at insert-time to cache a queue's next forward service procedure in
7616 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted
7617 * has a service procedure then q_nfsrv points to itself. If the queue to be
7618 * inserted does not have a service procedure, then q_nfsrv points to the next
7619 * queue forward that has a service procedure. If the queue is at the logical
7620 * end of the stream (driver for write side, stream head for the read side)
7621 * and does not have a service procedure, then q_nfsrv also points to itself.
7625 queue_t
*rnew
, /* read queue pointer to new module */
7626 queue_t
*wnew
, /* write queue pointer to new module */
7627 queue_t
*prev_rq
, /* read queue pointer to the module above */
7628 queue_t
*prev_wq
) /* write queue pointer to the module above */
7632 if (prev_wq
->q_next
== NULL
) {
7634 * Insert the driver, initialize the driver and stream head.
7635 * In this case, prev_rq/prev_wq should be the stream head.
7636 * _I_INSERT does not allow inserting a driver. Make sure
7637 * that it is not an insertion.
7639 ASSERT(!(rnew
->q_flag
& _QINSERTING
));
7640 wnew
->q_nfsrv
= wnew
;
7641 if (rnew
->q_qinfo
->qi_srvp
)
7642 rnew
->q_nfsrv
= rnew
;
7644 rnew
->q_nfsrv
= prev_rq
;
7645 prev_rq
->q_nfsrv
= prev_rq
;
7646 prev_wq
->q_nfsrv
= prev_wq
;
7649 * set up read side q_nfsrv pointer. This MUST be done
7650 * before setting the write side, because the setting of
7651 * the write side for a fifo may depend on it.
7653 * Suppose we have a fifo that only has pipemod pushed.
7654 * pipemod has no read or write service procedures, so
7655 * nfsrv for both pipemod queues points to prev_rq (the
7656 * stream read head). Now push bufmod (which has only a
7657 * read service procedure). Doing the write side first,
7658 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which
7659 * is WRONG; the next queue forward from wnew with a
7660 * service procedure will be rnew, not the stream read head.
7661 * Since the downstream queue (which in the case of a fifo
7662 * is the read queue rnew) can affect upstream queues, it
7663 * needs to be done first. Setting up the read side first
7664 * sets nfsrv for both pipemod queues to rnew and then
7665 * when the write side is set up, wnew-q_nfsrv will also
7668 if (rnew
->q_qinfo
->qi_srvp
) {
7670 * use _OTHERQ() because, if this is a pipe, next
7671 * module may have been pushed from other end and
7672 * q_next could be a read queue.
7674 qp
= _OTHERQ(prev_wq
->q_next
);
7675 while (qp
&& qp
->q_nfsrv
!= qp
) {
7679 rnew
->q_nfsrv
= rnew
;
7681 rnew
->q_nfsrv
= prev_rq
->q_nfsrv
;
7683 /* set up write side q_nfsrv pointer */
7684 if (wnew
->q_qinfo
->qi_srvp
) {
7685 wnew
->q_nfsrv
= wnew
;
7688 * For insertion, need to update nfsrv of the modules
7689 * above which do not have a service routine.
7691 if (rnew
->q_flag
& _QINSERTING
) {
7693 qp
!= NULL
&& qp
->q_nfsrv
!= qp
;
7695 qp
->q_nfsrv
= wnew
->q_nfsrv
;
7699 if (prev_wq
->q_next
== prev_rq
)
7701 * Since prev_wq/prev_rq are the middle of a
7702 * fifo, wnew/rnew will also be the middle of
7703 * a fifo and wnew's nfsrv is same as rnew's.
7705 wnew
->q_nfsrv
= rnew
->q_nfsrv
;
7707 wnew
->q_nfsrv
= prev_wq
->q_next
->q_nfsrv
;
7713 * Reset the forward service procedure pointer; called at remove-time.
7716 reset_nfsrv_ptr(queue_t
*rqp
, queue_t
*wqp
)
7720 /* Reset the write side q_nfsrv pointer for _I_REMOVE */
7721 if ((rqp
->q_flag
& _QREMOVING
) && (wqp
->q_qinfo
->qi_srvp
!= NULL
)) {
7722 for (tmp_qp
= backq(wqp
);
7723 tmp_qp
!= NULL
&& tmp_qp
->q_nfsrv
== wqp
;
7724 tmp_qp
= backq(tmp_qp
)) {
7725 tmp_qp
->q_nfsrv
= wqp
->q_nfsrv
;
7729 /* reset the read side q_nfsrv pointer */
7730 if (rqp
->q_qinfo
->qi_srvp
) {
7731 if (wqp
->q_next
) { /* non-driver case */
7732 tmp_qp
= _OTHERQ(wqp
->q_next
);
7733 while (tmp_qp
&& tmp_qp
->q_nfsrv
== rqp
) {
7734 /* Note that rqp->q_next cannot be NULL */
7735 ASSERT(rqp
->q_next
!= NULL
);
7736 tmp_qp
->q_nfsrv
= rqp
->q_next
->q_nfsrv
;
7737 tmp_qp
= backq(tmp_qp
);
7744 * This routine should be called after all stream geometry changes to update
7745 * the stream head cached struio() rd/wr queue pointers. Note must be called
7746 * with the streamlock()ed.
7748 * Note: only enables Synchronous STREAMS for a side of a Stream which has
7749 * an explicit synchronous barrier module queue. That is, a queue that
7750 * has specified a struio() type.
7753 strsetuio(stdata_t
*stp
)
7757 if (stp
->sd_flag
& STPLEX
) {
7759 * Not streamhead, but a mux, so no Synchronous STREAMS.
7761 stp
->sd_struiowrq
= NULL
;
7762 stp
->sd_struiordq
= NULL
;
7766 * Scan the write queue(s) while synchronous
7767 * until we find a qinfo uio type specified.
7769 wrq
= stp
->sd_wrq
->q_next
;
7771 if (wrq
->q_struiot
== STRUIOT_NONE
) {
7775 if (wrq
->q_struiot
!= STRUIOT_DONTCARE
)
7777 if (! _SAMESTR(wrq
)) {
7783 stp
->sd_struiowrq
= wrq
;
7785 * Scan the read queue(s) while synchronous
7786 * until we find a qinfo uio type specified.
7788 wrq
= stp
->sd_wrq
->q_next
;
7790 if (_RD(wrq
)->q_struiot
== STRUIOT_NONE
) {
7794 if (_RD(wrq
)->q_struiot
!= STRUIOT_DONTCARE
)
7796 if (! _SAMESTR(wrq
)) {
7802 stp
->sd_struiordq
= wrq
? _RD(wrq
) : 0;
7806 * pass_wput, unblocks the passthru queues, so that
7807 * messages can arrive at muxs lower read queue, before
7808 * I_LINK/I_UNLINK is acked/nacked.
7811 pass_wput(queue_t
*q
, mblk_t
*mp
)
7815 sq
= _RD(q
)->q_syncq
;
7816 if (sq
->sq_flags
& SQ_BLOCKED
)
7817 unblocksq(sq
, SQ_BLOCKED
, 0);
7822 * Set up queues for the link/unlink.
7823 * Create a new queue and block it and then insert it
7824 * below the stream head on the lower stream.
7825 * This prevents any messages from arriving during the setq
7826 * as well as while the mux is processing the LINK/I_UNLINK.
7827 * The blocked passq is unblocked once the LINK/I_UNLINK has
7828 * been acked or nacked or if a message is generated and sent
7829 * down muxs write put procedure.
7832 * After the new queue is inserted, all messages coming from below are
7833 * blocked. The call to strlock will ensure that all activity in the stream head
7834 * read queue syncq is stopped (sq_count drops to zero).
7837 link_addpassthru(stdata_t
*stpdown
)
7843 STREAM(passq
) = STREAM(_WR(passq
)) = stpdown
;
7844 /* setq might sleep in allocator - avoid holding locks. */
7845 setq(passq
, &passthru_rinit
, &passthru_winit
, NULL
, QPERQ
,
7846 SQ_CI
|SQ_CO
, B_FALSE
);
7848 blocksq(passq
->q_syncq
, SQ_BLOCKED
, 1);
7849 insertq(STREAM(passq
), passq
);
7852 * Use strlock() to wait for the stream head sq_count to drop to zero
7853 * since we are going to change q_ptr in the stream head. Note that
7854 * insertq() doesn't wait for any syncq counts to drop to zero.
7856 sqlist
.sqlist_head
= NULL
;
7857 sqlist
.sqlist_index
= 0;
7858 sqlist
.sqlist_size
= sizeof (sqlist_t
);
7859 sqlist_insert(&sqlist
, _RD(stpdown
->sd_wrq
)->q_syncq
);
7860 strlock(stpdown
, &sqlist
);
7861 strunlock(stpdown
, &sqlist
);
7868 * Let messages flow up into the mux by removing
7872 link_rempassthru(queue_t
*passq
)
7881 * Wait for the condition variable pointed to by `cvp' to be signaled,
7882 * or for `tim' milliseconds to elapse, whichever comes first. If `tim'
7883 * is negative, then there is no time limit. If `nosigs' is non-zero,
7884 * then the wait will be non-interruptible.
7886 * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout.
7889 str_cv_wait(kcondvar_t
*cvp
, kmutex_t
*mp
, clock_t tim
, int nosigs
)
7898 ret
= cv_wait_sig(cvp
, mp
);
7900 } else if (tim
> 0) {
7902 * convert milliseconds to clock ticks
7905 ret
= cv_reltimedwait(cvp
, mp
,
7906 MSEC_TO_TICK_ROUNDUP(tim
), TR_CLOCK_TICK
);
7908 ret
= cv_reltimedwait_sig(cvp
, mp
,
7909 MSEC_TO_TICK_ROUNDUP(tim
), TR_CLOCK_TICK
);
7918 * Wait until the stream head can determine if it is at the mark but
7919 * don't wait forever to prevent a race condition between the "mark" state
7920 * in the stream head and any mark state in the caller/user of this routine.
7922 * This is used by sockets and for a socket it would be incorrect
7923 * to return a failure for SIOCATMARK when there is no data in the receive
7924 * queue and the marked urgent data is traveling up the stream.
7926 * This routine waits until the mark is known by waiting for one of these
7928 * The stream head read queue becoming non-empty (including an EOF).
7929 * The STRATMARK flag being set (due to a MSGMARKNEXT message).
7930 * The STRNOTATMARK flag being set (which indicates that the transport
7931 * has sent a MSGNOTMARKNEXT message to indicate that it is not at
7934 * The routine returns 1 if the stream is at the mark; 0 if it can
7935 * be determined that the stream is not at the mark.
7936 * If the wait times out and it can't determine
7937 * whether or not the stream might be at the mark the routine will return -1.
7939 * Note: This routine should only be used when a mark is pending i.e.,
7940 * in the socket case the SIGURG has been posted.
7941 * Note2: This can not wakeup just because synchronous streams indicate
7942 * that data is available since it is not possible to use the synchronous
7943 * streams interfaces to determine the b_flag value for the data queued below
7947 strwaitmark(vnode_t
*vp
)
7949 struct stdata
*stp
= vp
->v_stream
;
7950 queue_t
*rq
= _RD(stp
->sd_wrq
);
7953 mutex_enter(&stp
->sd_lock
);
7954 while (rq
->q_first
== NULL
&&
7955 !(stp
->sd_flag
& (STRATMARK
|STRNOTATMARK
|STREOF
))) {
7956 stp
->sd_flag
|= RSLEEP
;
7958 /* Wait for 100 milliseconds for any state change. */
7959 if (str_cv_wait(&rq
->q_wait
, &stp
->sd_lock
, 100, 1) == -1) {
7960 mutex_exit(&stp
->sd_lock
);
7964 if (stp
->sd_flag
& STRATMARK
)
7966 else if (rq
->q_first
!= NULL
&& (rq
->q_first
->b_flag
& MSGMARK
))
7971 mutex_exit(&stp
->sd_lock
);
7976 * Set a read side error. If persist is set change the socket error
7977 * to persistent. If errfunc is set install the function as the exported
7981 strsetrerror(vnode_t
*vp
, int error
, int persist
, errfunc_t errfunc
)
7983 struct stdata
*stp
= vp
->v_stream
;
7985 mutex_enter(&stp
->sd_lock
);
7986 stp
->sd_rerror
= error
;
7987 if (error
== 0 && errfunc
== NULL
)
7988 stp
->sd_flag
&= ~STRDERR
;
7990 stp
->sd_flag
|= STRDERR
;
7992 stp
->sd_flag
&= ~STRDERRNONPERSIST
;
7994 stp
->sd_flag
|= STRDERRNONPERSIST
;
7996 stp
->sd_rderrfunc
= errfunc
;
7997 if (error
!= 0 || errfunc
!= NULL
) {
7998 cv_broadcast(&_RD(stp
->sd_wrq
)->q_wait
); /* readers */
7999 cv_broadcast(&stp
->sd_wrq
->q_wait
); /* writers */
8000 cv_broadcast(&stp
->sd_monitor
); /* ioctllers */
8002 mutex_exit(&stp
->sd_lock
);
8003 pollwakeup(&stp
->sd_pollist
, POLLERR
);
8004 mutex_enter(&stp
->sd_lock
);
8006 if (stp
->sd_sigflags
& S_ERROR
)
8007 strsendsig(stp
->sd_siglist
, S_ERROR
, 0, error
);
8009 mutex_exit(&stp
->sd_lock
);
8013 * Set a write side error. If persist is set change the socket error
8017 strsetwerror(vnode_t
*vp
, int error
, int persist
, errfunc_t errfunc
)
8019 struct stdata
*stp
= vp
->v_stream
;
8021 mutex_enter(&stp
->sd_lock
);
8022 stp
->sd_werror
= error
;
8023 if (error
== 0 && errfunc
== NULL
)
8024 stp
->sd_flag
&= ~STWRERR
;
8026 stp
->sd_flag
|= STWRERR
;
8028 stp
->sd_flag
&= ~STWRERRNONPERSIST
;
8030 stp
->sd_flag
|= STWRERRNONPERSIST
;
8032 stp
->sd_wrerrfunc
= errfunc
;
8033 if (error
!= 0 || errfunc
!= NULL
) {
8034 cv_broadcast(&_RD(stp
->sd_wrq
)->q_wait
); /* readers */
8035 cv_broadcast(&stp
->sd_wrq
->q_wait
); /* writers */
8036 cv_broadcast(&stp
->sd_monitor
); /* ioctllers */
8038 mutex_exit(&stp
->sd_lock
);
8039 pollwakeup(&stp
->sd_pollist
, POLLERR
);
8040 mutex_enter(&stp
->sd_lock
);
8042 if (stp
->sd_sigflags
& S_ERROR
)
8043 strsendsig(stp
->sd_siglist
, S_ERROR
, 0, error
);
8045 mutex_exit(&stp
->sd_lock
);
8049 * Make the stream return 0 (EOF) when all data has been read.
8050 * No effect on write side.
8053 strseteof(vnode_t
*vp
, int eof
)
8055 struct stdata
*stp
= vp
->v_stream
;
8057 mutex_enter(&stp
->sd_lock
);
8059 stp
->sd_flag
&= ~STREOF
;
8060 mutex_exit(&stp
->sd_lock
);
8063 stp
->sd_flag
|= STREOF
;
8064 if (stp
->sd_flag
& RSLEEP
) {
8065 stp
->sd_flag
&= ~RSLEEP
;
8066 cv_broadcast(&_RD(stp
->sd_wrq
)->q_wait
);
8069 mutex_exit(&stp
->sd_lock
);
8070 pollwakeup(&stp
->sd_pollist
, POLLIN
|POLLRDNORM
);
8071 mutex_enter(&stp
->sd_lock
);
8073 if (stp
->sd_sigflags
& (S_INPUT
|S_RDNORM
))
8074 strsendsig(stp
->sd_siglist
, S_INPUT
|S_RDNORM
, 0, 0);
8075 mutex_exit(&stp
->sd_lock
);
8079 strflushrq(vnode_t
*vp
, int flag
)
8081 struct stdata
*stp
= vp
->v_stream
;
8083 mutex_enter(&stp
->sd_lock
);
8084 flushq(_RD(stp
->sd_wrq
), flag
);
8085 mutex_exit(&stp
->sd_lock
);
8089 strsetrputhooks(vnode_t
*vp
, uint_t flags
,
8090 msgfunc_t protofunc
, msgfunc_t miscfunc
)
8092 struct stdata
*stp
= vp
->v_stream
;
8094 mutex_enter(&stp
->sd_lock
);
8096 if (protofunc
== NULL
)
8097 stp
->sd_rprotofunc
= strrput_proto
;
8099 stp
->sd_rprotofunc
= protofunc
;
8101 if (miscfunc
== NULL
)
8102 stp
->sd_rmiscfunc
= strrput_misc
;
8104 stp
->sd_rmiscfunc
= miscfunc
;
8106 if (flags
& SH_CONSOL_DATA
)
8107 stp
->sd_rput_opt
|= SR_CONSOL_DATA
;
8109 stp
->sd_rput_opt
&= ~SR_CONSOL_DATA
;
8111 if (flags
& SH_SIGALLDATA
)
8112 stp
->sd_rput_opt
|= SR_SIGALLDATA
;
8114 stp
->sd_rput_opt
&= ~SR_SIGALLDATA
;
8116 if (flags
& SH_IGN_ZEROLEN
)
8117 stp
->sd_rput_opt
|= SR_IGN_ZEROLEN
;
8119 stp
->sd_rput_opt
&= ~SR_IGN_ZEROLEN
;
8121 mutex_exit(&stp
->sd_lock
);
8125 strsetwputhooks(vnode_t
*vp
, uint_t flags
, clock_t closetime
)
8127 struct stdata
*stp
= vp
->v_stream
;
8129 mutex_enter(&stp
->sd_lock
);
8130 stp
->sd_closetime
= closetime
;
8132 if (flags
& SH_SIGPIPE
)
8133 stp
->sd_wput_opt
|= SW_SIGPIPE
;
8135 stp
->sd_wput_opt
&= ~SW_SIGPIPE
;
8136 if (flags
& SH_RECHECK_ERR
)
8137 stp
->sd_wput_opt
|= SW_RECHECK_ERR
;
8139 stp
->sd_wput_opt
&= ~SW_RECHECK_ERR
;
8141 mutex_exit(&stp
->sd_lock
);
8145 strsetrwputdatahooks(vnode_t
*vp
, msgfunc_t rdatafunc
, msgfunc_t wdatafunc
)
8147 struct stdata
*stp
= vp
->v_stream
;
8149 mutex_enter(&stp
->sd_lock
);
8151 stp
->sd_rputdatafunc
= rdatafunc
;
8152 stp
->sd_wputdatafunc
= wdatafunc
;
8154 mutex_exit(&stp
->sd_lock
);
8157 /* Used within framework when the queue is already locked */
8159 qenable_locked(queue_t
*q
)
8161 stdata_t
*stp
= STREAM(q
);
8163 ASSERT(MUTEX_HELD(QLOCK(q
)));
8165 if (!q
->q_qinfo
->qi_srvp
)
8169 * Do not place on run queue if already enabled or closing.
8171 if (q
->q_flag
& (QWCLOSE
|QENAB
))
8175 * mark queue enabled and place on run list if it is not already being
8176 * serviced. If it is serviced, the runservice() function will detect
8177 * that QENAB is set and call service procedure before clearing
8181 if (q
->q_flag
& QINSERVICE
)
8184 /* Record the time of qenable */
8185 q
->q_qtstamp
= ddi_get_lbolt();
8188 * Put the queue in the stp list and schedule it for background
8189 * processing if it is not already scheduled or if stream head does not
8190 * intent to process it in the foreground later by setting
8191 * STRS_WILLSERVICE flag.
8193 mutex_enter(&stp
->sd_qlock
);
8195 * If there are already something on the list, stp flags should show
8196 * intention to drain it.
8198 IMPLY(STREAM_NEEDSERVICE(stp
),
8199 (stp
->sd_svcflags
& (STRS_WILLSERVICE
| STRS_SCHEDULED
)));
8201 ENQUEUE(q
, stp
->sd_qhead
, stp
->sd_qtail
, q_link
);
8205 * If no one will drain this stream we are the first producer and
8206 * need to schedule it for background thread.
8208 if (!(stp
->sd_svcflags
& (STRS_WILLSERVICE
| STRS_SCHEDULED
))) {
8210 * No one will service this stream later, so we have to
8214 stp
->sd_svcflags
|= STRS_SCHEDULED
;
8215 stp
->sd_servid
= (void *)taskq_dispatch(streams_taskq
,
8216 (task_func_t
*)stream_service
, stp
, TQ_NOSLEEP
|TQ_NOQUEUE
);
8218 if (stp
->sd_servid
== NULL
) {
8220 * Task queue failed so fail over to the backup
8223 STRSTAT(taskqfails
);
8225 * It is safe to clear STRS_SCHEDULED flag because it
8226 * was set by this thread above.
8228 stp
->sd_svcflags
&= ~STRS_SCHEDULED
;
8231 * Failover scheduling is protected by service_queue
8234 mutex_enter(&service_queue
);
8235 ASSERT((stp
->sd_qhead
== q
) && (stp
->sd_qtail
== q
));
8236 ASSERT(q
->q_link
== NULL
);
8238 * Append the queue to qhead/qtail list.
8246 * Clear stp queue list.
8248 stp
->sd_qhead
= stp
->sd_qtail
= NULL
;
8249 stp
->sd_nqueues
= 0;
8251 * Wakeup background queue processing thread.
8253 cv_signal(&services_to_run
);
8254 mutex_exit(&service_queue
);
8257 mutex_exit(&stp
->sd_qlock
);
8261 queue_service(queue_t
*q
)
8264 * The queue in the list should have
8265 * QENAB flag set and should not have
8266 * QINSERVICE flag set. QINSERVICE is
8267 * set when the queue is dequeued and
8268 * qenable_locked doesn't enqueue a
8269 * queue with QINSERVICE set.
8272 ASSERT(!(q
->q_flag
& QINSERVICE
));
8273 ASSERT((q
->q_flag
& QENAB
));
8274 mutex_enter(QLOCK(q
));
8275 q
->q_flag
&= ~QENAB
;
8276 q
->q_flag
|= QINSERVICE
;
8277 mutex_exit(QLOCK(q
));
8282 syncq_service(syncq_t
*sq
)
8284 STRSTAT(syncqservice
);
8285 mutex_enter(SQLOCK(sq
));
8286 ASSERT(!(sq
->sq_svcflags
& SQ_SERVICE
));
8287 ASSERT(sq
->sq_servcount
!= 0);
8288 ASSERT(sq
->sq_next
== NULL
);
8290 /* if we came here from the background thread, clear the flag */
8291 if (sq
->sq_svcflags
& SQ_BGTHREAD
)
8292 sq
->sq_svcflags
&= ~SQ_BGTHREAD
;
8294 /* let drain_syncq know that it's being called in the background */
8295 sq
->sq_svcflags
|= SQ_SERVICE
;
8300 qwriter_outer_service(syncq_t
*outer
)
8303 * Note that SQ_WRITER is used on the outer perimeter
8304 * to signal that a qwriter(OUTER) is either investigating
8305 * running or that it is actually running a function.
8307 outer_enter(outer
, SQ_BLOCKED
|SQ_WRITER
);
8310 * All inner syncq are empty and have SQ_WRITER set
8311 * to block entering the outer perimeter.
8313 * We do not need to explicitly call write_now since
8314 * outer_exit does it for us.
8320 mblk_free(mblk_t
*mp
)
8322 dblk_t
*dbp
= mp
->b_datap
;
8323 frtn_t
*frp
= dbp
->db_frtnp
;
8326 if (dbp
->db_fthdr
!= NULL
)
8329 ASSERT(dbp
->db_fthdr
== NULL
);
8330 frp
->free_func(frp
->free_arg
);
8331 ASSERT(dbp
->db_mblk
== mp
);
8333 if (dbp
->db_credp
!= NULL
) {
8334 crfree(dbp
->db_credp
);
8335 dbp
->db_credp
= NULL
;
8338 dbp
->db_struioflag
= 0;
8339 dbp
->db_struioun
.cksum
.flags
= 0;
8341 kmem_cache_free(dbp
->db_cache
, dbp
);
8345 * Background processing of the stream queue list.
8348 stream_service(stdata_t
*stp
)
8352 mutex_enter(&stp
->sd_qlock
);
8354 STR_SERVICE(stp
, q
);
8356 stp
->sd_svcflags
&= ~STRS_SCHEDULED
;
8357 stp
->sd_servid
= NULL
;
8358 cv_signal(&stp
->sd_qcv
);
8359 mutex_exit(&stp
->sd_qlock
);
8363 * Foreground processing of the stream queue list.
8366 stream_runservice(stdata_t
*stp
)
8370 mutex_enter(&stp
->sd_qlock
);
8373 * We are going to drain this stream queue list, so qenable_locked will
8374 * not schedule it until we finish.
8376 stp
->sd_svcflags
|= STRS_WILLSERVICE
;
8378 STR_SERVICE(stp
, q
);
8380 stp
->sd_svcflags
&= ~STRS_WILLSERVICE
;
8381 mutex_exit(&stp
->sd_qlock
);
8383 * Help backup background thread to drain the qhead/qtail list.
8385 while (qhead
!= NULL
) {
8387 mutex_enter(&service_queue
);
8388 DQ(q
, qhead
, qtail
, q_link
);
8389 mutex_exit(&service_queue
);
8396 stream_willservice(stdata_t
*stp
)
8398 mutex_enter(&stp
->sd_qlock
);
8399 stp
->sd_svcflags
|= STRS_WILLSERVICE
;
8400 mutex_exit(&stp
->sd_qlock
);
8404 * Replace the cred currently in the mblk with a different one.
8405 * Also update db_cpid.
8408 mblk_setcred(mblk_t
*mp
, cred_t
*cr
, pid_t cpid
)
8410 dblk_t
*dbp
= mp
->b_datap
;
8411 cred_t
*ocr
= dbp
->db_credp
;
8416 crhold(dbp
->db_credp
= cr
);
8420 /* Don't overwrite with NOPID */
8422 dbp
->db_cpid
= cpid
;
8426 * If the src message has a cred, then replace the cred currently in the mblk
8428 * Also update db_cpid.
8431 mblk_copycred(mblk_t
*mp
, const mblk_t
*src
)
8433 dblk_t
*dbp
= mp
->b_datap
;
8437 cr
= msg_getcred(src
, &cpid
);
8441 ocr
= dbp
->db_credp
;
8443 crhold(dbp
->db_credp
= cr
);
8447 /* Don't overwrite with NOPID */
8449 dbp
->db_cpid
= cpid
;
8453 hcksum_assoc(mblk_t
*mp
, multidata_t
*mmd
, pdesc_t
*pd
,
8454 uint32_t start
, uint32_t stuff
, uint32_t end
, uint32_t value
,
8455 uint32_t flags
, int km_flags
)
8459 ASSERT(DB_TYPE(mp
) == M_DATA
|| DB_TYPE(mp
) == M_MULTIDATA
);
8460 if (mp
->b_datap
->db_type
== M_DATA
) {
8461 /* Associate values for M_DATA type */
8462 DB_CKSUMSTART(mp
) = (intptr_t)start
;
8463 DB_CKSUMSTUFF(mp
) = (intptr_t)stuff
;
8464 DB_CKSUMEND(mp
) = (intptr_t)end
;
8465 DB_CKSUMFLAGS(mp
) = flags
;
8466 DB_CKSUM16(mp
) = (uint16_t)value
;
8469 pattrinfo_t pa_info
;
8471 ASSERT(mmd
!= NULL
);
8473 pa_info
.type
= PATTR_HCKSUM
;
8474 pa_info
.len
= sizeof (pattr_hcksum_t
);
8476 if (mmd_addpattr(mmd
, pd
, &pa_info
, B_TRUE
, km_flags
) != NULL
) {
8477 pattr_hcksum_t
*hck
= (pattr_hcksum_t
*)pa_info
.buf
;
8479 hck
->hcksum_start_offset
= start
;
8480 hck
->hcksum_stuff_offset
= stuff
;
8481 hck
->hcksum_end_offset
= end
;
8482 hck
->hcksum_cksum_val
.inet_cksum
= (uint16_t)value
;
8483 hck
->hcksum_flags
= flags
;
8492 hcksum_retrieve(mblk_t
*mp
, multidata_t
*mmd
, pdesc_t
*pd
,
8493 uint32_t *start
, uint32_t *stuff
, uint32_t *end
,
8494 uint32_t *value
, uint32_t *flags
)
8496 ASSERT(DB_TYPE(mp
) == M_DATA
|| DB_TYPE(mp
) == M_MULTIDATA
);
8497 if (mp
->b_datap
->db_type
== M_DATA
) {
8498 if (flags
!= NULL
) {
8499 *flags
= DB_CKSUMFLAGS(mp
) & HCK_FLAGS
;
8500 if ((*flags
& (HCK_PARTIALCKSUM
|
8501 HCK_FULLCKSUM
)) != 0) {
8503 *value
= (uint32_t)DB_CKSUM16(mp
);
8504 if ((*flags
& HCK_PARTIALCKSUM
) != 0) {
8507 (uint32_t)DB_CKSUMSTART(mp
);
8510 (uint32_t)DB_CKSUMSTUFF(mp
);
8513 (uint32_t)DB_CKSUMEND(mp
);
8518 pattrinfo_t hck_attr
= {PATTR_HCKSUM
};
8520 ASSERT(mmd
!= NULL
);
8522 /* get hardware checksum attribute */
8523 if (mmd_getpattr(mmd
, pd
, &hck_attr
) != NULL
) {
8524 pattr_hcksum_t
*hck
= (pattr_hcksum_t
*)hck_attr
.buf
;
8526 ASSERT(hck_attr
.len
>= sizeof (pattr_hcksum_t
));
8528 *flags
= hck
->hcksum_flags
;
8530 *start
= hck
->hcksum_start_offset
;
8532 *stuff
= hck
->hcksum_stuff_offset
;
8534 *end
= hck
->hcksum_end_offset
;
8537 hck
->hcksum_cksum_val
.inet_cksum
;
8543 lso_info_set(mblk_t
*mp
, uint32_t mss
, uint32_t flags
)
8545 ASSERT(DB_TYPE(mp
) == M_DATA
);
8546 ASSERT((flags
& ~HW_LSO_FLAGS
) == 0);
8549 DB_LSOFLAGS(mp
) |= flags
;
8550 DB_LSOMSS(mp
) = mss
;
8554 lso_info_cleanup(mblk_t
*mp
)
8556 ASSERT(DB_TYPE(mp
) == M_DATA
);
8558 /* Clear the flags */
8559 DB_LSOFLAGS(mp
) &= ~HW_LSO_FLAGS
;
8564 * Checksum buffer *bp for len bytes with psum partial checksum,
8565 * or 0 if none, and return the 16 bit partial checksum.
8568 bcksum(uchar_t
*bp
, int len
, unsigned int psum
)
8571 extern unsigned int ip_ocsum();
8573 if (((intptr_t)bp
& 1) == 0 && !odd
) {
8575 * Bp is 16 bit aligned and len is multiple of 16 bit word.
8577 return (ip_ocsum((ushort_t
*)bp
, len
>> 1, psum
));
8579 if (((intptr_t)bp
& 1) != 0) {
8581 * Bp isn't 16 bit aligned.
8585 #ifdef _LITTLE_ENDIAN
8592 tsum
= ip_ocsum((ushort_t
*)bp
, len
>> 1, 0);
8593 psum
+= (tsum
<< 8) & 0xffff | (tsum
>> 8);
8596 #ifdef _LITTLE_ENDIAN
8604 * Bp is 16 bit aligned.
8606 psum
= ip_ocsum((ushort_t
*)bp
, len
>> 1, psum
);
8609 #ifdef _LITTLE_ENDIAN
8617 * Normalize psum to 16 bits before returning the new partial
8618 * checksum. The max psum value before normalization is 0x3FDFE.
8620 return ((psum
>> 16) + (psum
& 0xFFFF));
8624 is_vmloaned_mblk(mblk_t
*mp
, multidata_t
*mmd
, pdesc_t
*pd
)
8628 ASSERT(DB_TYPE(mp
) == M_DATA
|| DB_TYPE(mp
) == M_MULTIDATA
);
8629 if (DB_TYPE(mp
) == M_DATA
) {
8630 rc
= (((mp
)->b_datap
->db_struioflag
& STRUIO_ZC
) != 0);
8632 pattrinfo_t zcopy_attr
= {PATTR_ZCOPY
};
8634 ASSERT(mmd
!= NULL
);
8635 rc
= (mmd_getpattr(mmd
, pd
, &zcopy_attr
) != NULL
);
8641 freemsgchain(mblk_t
*mp
)
8645 while (mp
!= NULL
) {
8655 copymsgchain(mblk_t
*mp
)
8658 mblk_t
**nmpp
= &nmp
;
8660 for (; mp
!= NULL
; mp
= mp
->b_next
) {
8661 if ((*nmpp
= copymsg(mp
)) == NULL
) {
8666 nmpp
= &((*nmpp
)->b_next
);
8672 /* NOTE: Do not add code after this point. */
8676 * Replacement for QLOCK macro for those that can't use it.
8681 return (&(q
)->q_lock
);
8685 * Dummy runqueues/queuerun functions functions for backwards compatibility.
8700 * Initialize the STR stack instance, which tracks autopush and persistent
8705 str_stack_init(netstackid_t stackid
, netstack_t
*ns
)
8710 ss
= (str_stack_t
*)kmem_zalloc(sizeof (*ss
), KM_SLEEP
);
8711 ss
->ss_netstack
= ns
;
8719 * set up mux_node structures.
8721 ss
->ss_devcnt
= devcnt
; /* In case it should change before free */
8722 ss
->ss_mux_nodes
= kmem_zalloc((sizeof (struct mux_node
) *
8723 ss
->ss_devcnt
), KM_SLEEP
);
8724 for (i
= 0; i
< ss
->ss_devcnt
; i
++)
8725 ss
->ss_mux_nodes
[i
].mn_imaj
= i
;
8730 * Note: run at zone shutdown and not destroy so that the PLINKs are
8731 * gone by the time other cleanup happens from the destroy callbacks.
8734 str_stack_shutdown(netstackid_t stackid
, void *arg
)
8736 str_stack_t
*ss
= (str_stack_t
*)arg
;
8740 cr
= zone_get_kcred(netstackid_to_zoneid(stackid
));
8743 /* Undo all the I_PLINKs for this zone */
8744 for (i
= 0; i
< ss
->ss_devcnt
; i
++) {
8745 struct mux_edge
*ep
;
8752 ep
= ss
->ss_mux_nodes
[i
].mn_outp
;
8755 ret
= ldi_ident_from_major((major_t
)i
, &li
);
8760 ret
= ldi_open_by_dev(&rdev
, OTYP_CHR
, FREAD
|FWRITE
,
8763 ldi_ident_release(li
);
8767 ret
= ldi_ioctl(lh
, I_PUNLINK
, (intptr_t)MUXID_ALL
, FKIOCTL
,
8770 (void) ldi_close(lh
, FREAD
|FWRITE
, cr
);
8771 ldi_ident_release(li
);
8774 (void) ldi_close(lh
, FREAD
|FWRITE
, cr
);
8776 /* Close layered handles */
8777 ldi_ident_release(li
);
8783 kmem_free(ss
->ss_mux_nodes
, sizeof (struct mux_node
) * ss
->ss_devcnt
);
8784 ss
->ss_mux_nodes
= NULL
;
8788 * Free the structure; str_stack_shutdown did the other cleanup work.
8792 str_stack_fini(netstackid_t stackid
, void *arg
)
8794 str_stack_t
*ss
= (str_stack_t
*)arg
;
8796 kmem_free(ss
, sizeof (*ss
));