2 * Copyright (c) 2000-2001 Boris Popov
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
36 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
37 * Use is subject to license terms.
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/atomic.h>
49 #include <sys/thread.h>
52 #include <sys/unistd.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/types.h>
57 #include <sys/sunddi.h>
58 #include <sys/stream.h>
59 #include <sys/strsun.h>
61 #include <sys/class.h>
63 #include <sys/cmn_err.h>
67 #include <netsmb/smb_osdep.h>
69 #include <netsmb/smb.h>
70 #include <netsmb/smb_conn.h>
71 #include <netsmb/smb_rq.h>
72 #include <netsmb/smb_subr.h>
73 #include <netsmb/smb_tran.h>
74 #include <netsmb/smb_trantcp.h>
76 int smb_iod_send_echo(smb_vc_t
*);
79 * This is set/cleared when smbfs loads/unloads
80 * No locks should be necessary, because smbfs
81 * can't unload until all the mounts are gone.
83 static smb_fscb_t
*fscb
;
85 smb_fscb_set(smb_fscb_t
*cb
)
91 smb_iod_share_disconnected(smb_share_t
*ssp
)
94 smb_share_invalidate(ssp
);
97 if (fscb
&& fscb
->fscb_disconn
) {
98 fscb
->fscb_disconn(ssp
);
103 * State changes are important and infrequent.
104 * Make them easily observable via dtrace.
107 smb_iod_newstate(struct smb_vc
*vcp
, int state
)
109 vcp
->vc_state
= state
;
112 /* Lock Held version of the next function. */
114 smb_iod_rqprocessed_LH(
119 rqp
->sr_flags
|= flags
;
120 rqp
->sr_lerror
= error
;
122 rqp
->sr_state
= SMBRQ_NOTIFIED
;
123 cv_broadcast(&rqp
->sr_cond
);
134 smb_iod_rqprocessed_LH(rqp
, error
, flags
);
139 smb_iod_invrq(struct smb_vc
*vcp
)
144 * Invalidate all outstanding requests for this connection
146 rw_enter(&vcp
->iod_rqlock
, RW_READER
);
147 TAILQ_FOREACH(rqp
, &vcp
->iod_rqlist
, sr_link
) {
148 smb_iod_rqprocessed(rqp
, ENOTCONN
, SMBR_RESTART
);
150 rw_exit(&vcp
->iod_rqlock
);
154 * Called by smb_vc_rele, smb_vc_kill, and by the driver
155 * close entry point if the IOD closes its dev handle.
157 * Forcibly kill the connection and IOD.
160 smb_iod_disconnect(struct smb_vc
*vcp
)
164 * Inform everyone of the state change.
167 if (vcp
->vc_state
!= SMBIOD_ST_DEAD
) {
168 smb_iod_newstate(vcp
, SMBIOD_ST_DEAD
);
169 cv_broadcast(&vcp
->vc_statechg
);
174 * Let's be safe here and avoid doing any
175 * call across the network while trying to
176 * shut things down. If we just disconnect,
177 * the server will take care of the logoff.
179 SMB_TRAN_DISCONNECT(vcp
);
185 * Called by _addrq (for internal requests)
186 * and _sendall (via _addrq, _multirq, _waitrq)
189 smb_iod_sendrq(struct smb_rq
*rqp
)
191 struct smb_vc
*vcp
= rqp
->sr_vc
;
196 ASSERT(SEMA_HELD(&vcp
->vc_sendlock
));
197 ASSERT(RW_READ_HELD(&vcp
->iod_rqlock
));
200 * Note: Anything special for SMBR_INTERNAL here?
202 if (vcp
->vc_state
!= SMBIOD_ST_VCACTIVE
) {
203 SMBIODEBUG("bad vc_state=%d\n", vcp
->vc_state
);
209 * On the first send, set the MID and (maybe)
210 * the signing sequence numbers. The increments
211 * here are serialized by vc_sendlock
213 if (rqp
->sr_sendcnt
== 0) {
215 rqp
->sr_mid
= vcp
->vc_next_mid
++;
217 if (rqp
->sr_rqflags2
& SMB_FLAGS2_SECURITY_SIGNATURE
) {
219 * We're signing requests and verifying
220 * signatures on responses. Set the
221 * sequence numbers of the request and
222 * response here, used in smb_rq_verify.
224 rqp
->sr_seqno
= vcp
->vc_next_seq
++;
225 rqp
->sr_rseqno
= vcp
->vc_next_seq
++;
228 /* Fill in UID, TID, MID, etc. */
232 * Sign the message now that we're finally done
233 * filling in the SMB header fields, etc.
235 if (rqp
->sr_rqflags2
& SMB_FLAGS2_SECURITY_SIGNATURE
) {
239 if (rqp
->sr_sendcnt
++ >= 60/SMBSBTIMO
) { /* one minute */
240 smb_iod_rqprocessed(rqp
, rqp
->sr_lerror
, SMBR_RESTART
);
242 * If all attempts to send a request failed, then
243 * something is seriously hosed.
249 * Replaced m_copym() with Solaris copymsg() which does the same
250 * work when we want to do a M_COPYALL.
251 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
253 m
= copymsg(rqp
->sr_rq
.mb_top
);
256 DTRACE_PROBE2(smb_iod_sendrq
,
257 (smb_rq_t
*), rqp
, (mblk_t
*), m
);
259 SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp
->sr_mid
, 0, 0, 0);
264 error
= SMB_TRAN_SEND(vcp
, m
);
265 m
= 0; /* consumed by SEND */
269 rqp
->sr_lerror
= error
;
272 rqp
->sr_flags
|= SMBR_SENT
;
273 rqp
->sr_state
= SMBRQ_SENT
;
274 if (rqp
->sr_flags
& SMBR_SENDWAIT
)
275 cv_broadcast(&rqp
->sr_cond
);
280 * Check for fatal errors
282 if (SMB_TRAN_FATAL(vcp
, error
)) {
284 * No further attempts should be made
286 SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error
);
290 SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error
);
293 /* If proc waiting on rqp was signaled... */
294 if (smb_rq_intr(rqp
))
295 smb_iod_rqprocessed(rqp
, EINTR
, 0);
302 smb_iod_recv1(struct smb_vc
*vcp
, mblk_t
**mpp
)
310 error
= SMB_TRAN_RECV(vcp
, &m
);
317 m
= m_pullup(m
, SMB_HDRLEN
);
323 * Check the SMB header
325 hp
= mtod(m
, uchar_t
*);
326 if (bcmp(hp
, SMB_SIGNATURE
, SMB_SIGLEN
) != 0) {
336 * Process incoming packets
338 * This is the "reader" loop, run by the IOD thread
339 * while in state SMBIOD_ST_VCACTIVE. The loop now
340 * simply blocks in the socket recv until either a
341 * message arrives, or a disconnect.
343 * Any non-zero error means the IOD should terminate.
346 smb_iod_recvall(struct smb_vc
*vcp
)
353 int etime_count
= 0; /* for "server not responding", etc. */
357 * Check whether someone "killed" this VC,
358 * or is asking the IOD to terminate.
361 if (vcp
->vc_state
!= SMBIOD_ST_VCACTIVE
) {
362 SMBIODEBUG("bad vc_state=%d\n", vcp
->vc_state
);
367 if (vcp
->iod_flags
& SMBIOD_SHUTDOWN
) {
368 SMBIODEBUG("SHUTDOWN set\n");
369 /* This IOD thread will terminate. */
371 smb_iod_newstate(vcp
, SMBIOD_ST_DEAD
);
372 cv_broadcast(&vcp
->vc_statechg
);
379 error
= smb_iod_recv1(vcp
, &m
);
381 if (error
== ETIME
&&
382 vcp
->iod_rqlist
.tqh_first
!= NULL
) {
384 * Nothing received for 15 seconds and
385 * we have requests in the queue.
390 * Once, at 15 sec. notify callbacks
391 * and print the warning message.
393 if (etime_count
== 1) {
394 /* Was: smb_iod_notify_down(vcp); */
395 if (fscb
&& fscb
->fscb_down
)
396 smb_vc_walkshares(vcp
,
398 zprintf(vcp
->vc_zoneid
,
399 "SMB server %s not responding\n",
404 * At 30 sec. try sending an echo, and then
405 * once a minute thereafter.
407 if ((etime_count
& 3) == 2) {
408 (void) smb_iod_send_echo(vcp
);
412 } /* ETIME && requests in queue */
414 if (error
== ETIME
) {
416 * If the IOD thread holds the last reference
417 * to this VC, let the IOD thread terminate.
419 if (vcp
->vc_co
.co_usecount
> 1)
422 if (vcp
->vc_co
.co_usecount
== 1) {
423 smb_iod_newstate(vcp
, SMBIOD_ST_DEAD
);
430 } /* error == ETIME */
434 * The recv. above returned some error
435 * we can't continue from i.e. ENOTCONN.
436 * It's dangerous to continue here.
437 * (possible infinite loop!)
439 * If we have requests enqueued, next
440 * state is reconnecting, else idle.
444 state
= (vcp
->iod_rqlist
.tqh_first
!= NULL
) ?
445 SMBIOD_ST_RECONNECT
: SMBIOD_ST_IDLE
;
446 smb_iod_newstate(vcp
, state
);
447 cv_broadcast(&vcp
->vc_statechg
);
454 * Received something. Yea!
459 zprintf(vcp
->vc_zoneid
, "SMB server %s OK\n",
462 /* Was: smb_iod_notify_up(vcp); */
463 if (fscb
&& fscb
->fscb_up
)
464 smb_vc_walkshares(vcp
, fscb
->fscb_up
);
468 * Have an SMB packet. The SMB header was
469 * checked in smb_iod_recv1().
470 * Find the request...
472 hp
= mtod(m
, uchar_t
*);
474 mid
= letohs(SMB_HDRMID(hp
));
475 SMBIODEBUG("mid %04x\n", (uint_t
)mid
);
477 rw_enter(&vcp
->iod_rqlock
, RW_READER
);
478 TAILQ_FOREACH(rqp
, &vcp
->iod_rqlist
, sr_link
) {
480 if (rqp
->sr_mid
!= mid
)
483 DTRACE_PROBE2(smb_iod_recvrq
,
484 (smb_rq_t
*), rqp
, (mblk_t
*), m
);
488 if (rqp
->sr_rp
.md_top
== NULL
) {
489 md_initm(&rqp
->sr_rp
, m
);
491 if (rqp
->sr_flags
& SMBR_MULTIPACKET
) {
492 md_append_record(&rqp
->sr_rp
, m
);
495 SMBSDEBUG("duplicate response %d "
500 smb_iod_rqprocessed_LH(rqp
, 0, 0);
506 int cmd
= SMB_HDRCMD(hp
);
508 if (cmd
!= SMB_COM_ECHO
)
509 SMBSDEBUG("drop resp: mid %d, cmd %d\n",
511 /* smb_printrqlist(vcp); */
514 rw_exit(&vcp
->iod_rqlock
);
522 * The IOD receiver thread has requests pending and
523 * has not received anything in a while. Try to
524 * send an SMB echo request. It's tricky to do a
525 * send from the IOD thread because we can't block.
527 * Using tmo=SMBNOREPLYWAIT in the request
528 * so smb_rq_reply will skip smb_iod_waitrq.
529 * The smb_smb_echo call uses SMBR_INTERNAL
530 * to avoid calling smb_iod_sendall().
533 smb_iod_send_echo(smb_vc_t
*vcp
)
538 smb_credinit(&scred
, NULL
);
539 err
= smb_smb_echo(vcp
, &scred
, SMBNOREPLYWAIT
);
540 smb_credrele(&scred
);
545 * The IOD thread is now just a "reader",
546 * so no more smb_iod_request(). Yea!
550 * Place request in the queue, and send it now if possible.
551 * Called with no locks held.
554 smb_iod_addrq(struct smb_rq
*rqp
)
556 struct smb_vc
*vcp
= rqp
->sr_vc
;
557 int error
, save_newrq
;
559 ASSERT(rqp
->sr_cred
);
562 * State should be correct after the check in
563 * smb_rq_enqueue(), but we dropped locks...
565 if (vcp
->vc_state
!= SMBIOD_ST_VCACTIVE
) {
566 SMBIODEBUG("bad vc_state=%d\n", vcp
->vc_state
);
571 * Requests from the IOD itself are marked _INTERNAL,
572 * and get some special treatment to avoid blocking
573 * the reader thread (so we don't deadlock).
574 * The request is not yet on the queue, so we can
575 * modify it's state here without locks.
576 * Only thing using this now is ECHO.
578 rqp
->sr_owner
= curthread
;
579 if (rqp
->sr_owner
== vcp
->iod_thr
) {
580 rqp
->sr_flags
|= SMBR_INTERNAL
;
583 * This is a request from the IOD thread.
584 * Always send directly from this thread.
585 * Note lock order: iod_rqlist, vc_sendlock
587 rw_enter(&vcp
->iod_rqlock
, RW_WRITER
);
588 TAILQ_INSERT_HEAD(&vcp
->iod_rqlist
, rqp
, sr_link
);
589 rw_downgrade(&vcp
->iod_rqlock
);
592 * Note: iod_sendrq expects vc_sendlock,
593 * so take that here, but carefully:
594 * Never block the IOD thread here.
596 if (sema_tryp(&vcp
->vc_sendlock
) == 0) {
597 SMBIODEBUG("sendlock busy\n");
600 /* Have vc_sendlock */
601 error
= smb_iod_sendrq(rqp
);
602 sema_v(&vcp
->vc_sendlock
);
605 rw_exit(&vcp
->iod_rqlock
);
608 * In the non-error case, _removerq
609 * is done by either smb_rq_reply
613 smb_iod_removerq(rqp
);
618 rw_enter(&vcp
->iod_rqlock
, RW_WRITER
);
620 TAILQ_INSERT_TAIL(&vcp
->iod_rqlist
, rqp
, sr_link
);
621 /* iod_rqlock/WRITER protects iod_newrq */
622 save_newrq
= vcp
->iod_newrq
;
625 rw_exit(&vcp
->iod_rqlock
);
628 * Now send any requests that need to be sent,
629 * including the one we just put on the list.
630 * Only the thread that found iod_newrq==0
631 * needs to run the send loop.
634 smb_iod_sendall(vcp
);
640 * Mark an SMBR_MULTIPACKET request as
641 * needing another send. Similar to the
642 * "normal" part of smb_iod_addrq.
645 smb_iod_multirq(struct smb_rq
*rqp
)
647 struct smb_vc
*vcp
= rqp
->sr_vc
;
650 ASSERT(rqp
->sr_flags
& SMBR_MULTIPACKET
);
652 if (rqp
->sr_flags
& SMBR_INTERNAL
)
655 if (vcp
->vc_state
!= SMBIOD_ST_VCACTIVE
) {
656 SMBIODEBUG("bad vc_state=%d\n", vcp
->vc_state
);
660 rw_enter(&vcp
->iod_rqlock
, RW_WRITER
);
662 /* Already on iod_rqlist, just reset state. */
663 rqp
->sr_state
= SMBRQ_NOTSENT
;
665 /* iod_rqlock/WRITER protects iod_newrq */
666 save_newrq
= vcp
->iod_newrq
;
669 rw_exit(&vcp
->iod_rqlock
);
672 * Now send any requests that need to be sent,
673 * including the one we just marked NOTSENT.
674 * Only the thread that found iod_newrq==0
675 * needs to run the send loop.
678 smb_iod_sendall(vcp
);
685 smb_iod_removerq(struct smb_rq
*rqp
)
687 struct smb_vc
*vcp
= rqp
->sr_vc
;
689 rw_enter(&vcp
->iod_rqlock
, RW_WRITER
);
692 * Make sure we have not already removed it.
693 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
694 * XXX: Don't like the constant 1 here...
696 ASSERT(rqp
->sr_link
.tqe_next
!= (void *)1L);
698 TAILQ_REMOVE(&vcp
->iod_rqlist
, rqp
, sr_link
);
699 rw_exit(&vcp
->iod_rqlock
);
705 * Wait for a request to complete.
707 * For normal requests, we need to deal with
708 * ioc_muxcnt dropping below vc_maxmux by
709 * making arrangements to send more...
712 smb_iod_waitrq(struct smb_rq
*rqp
)
714 struct smb_vc
*vcp
= rqp
->sr_vc
;
715 clock_t tr
, tmo1
, tmo2
;
718 if (rqp
->sr_flags
& SMBR_INTERNAL
) {
719 ASSERT((rqp
->sr_flags
& SMBR_MULTIPACKET
) == 0);
720 smb_iod_removerq(rqp
);
725 * Make sure this is NOT the IOD thread,
726 * or the wait below will stop the reader.
728 ASSERT(curthread
!= vcp
->iod_thr
);
733 * First, wait for the request to be sent. Normally the send
734 * has already happened by the time we get here. However, if
735 * we have more than maxmux entries in the request list, our
736 * request may not be sent until other requests complete.
737 * The wait in this case is due to local I/O demands, so
738 * we don't want the server response timeout to apply.
740 * If a request is allowed to interrupt this wait, then the
741 * request is cancelled and never sent OTW. Some kinds of
742 * requests should never be cancelled (i.e. close) and those
743 * are marked SMBR_NOINTR_SEND so they either go eventually,
744 * or a connection close will terminate them with ENOTCONN.
746 while (rqp
->sr_state
== SMBRQ_NOTSENT
) {
747 rqp
->sr_flags
|= SMBR_SENDWAIT
;
748 if (rqp
->sr_flags
& SMBR_NOINTR_SEND
) {
749 cv_wait(&rqp
->sr_cond
, &rqp
->sr_lock
);
752 rc
= cv_wait_sig(&rqp
->sr_cond
, &rqp
->sr_lock
);
753 rqp
->sr_flags
&= ~SMBR_SENDWAIT
;
755 SMBIODEBUG("EINTR in sendwait, rqp=%p\n", rqp
);
762 * The request has been sent. Now wait for the response,
763 * with the timeout specified for this request.
764 * Compute all the deadlines now, so we effectively
765 * start the timer(s) after the request is sent.
767 if (smb_timo_notice
&& (smb_timo_notice
< rqp
->sr_timo
))
768 tmo1
= SEC_TO_TICK(smb_timo_notice
);
771 tmo2
= ddi_get_lbolt() + SEC_TO_TICK(rqp
->sr_timo
);
774 * As above, we don't want to allow interrupt for some
775 * requests like open, because we could miss a succesful
776 * response and therefore "leak" a FID. Such requests
777 * are marked SMBR_NOINTR_RECV to prevent that.
779 * If "slow server" warnings are enabled, wait first
780 * for the "notice" timeout, and warn if expired.
782 if (tmo1
&& rqp
->sr_rpgen
== rqp
->sr_rplast
) {
783 if (rqp
->sr_flags
& SMBR_NOINTR_RECV
)
784 tr
= cv_reltimedwait(&rqp
->sr_cond
,
785 &rqp
->sr_lock
, tmo1
, TR_CLOCK_TICK
);
787 tr
= cv_reltimedwait_sig(&rqp
->sr_cond
,
788 &rqp
->sr_lock
, tmo1
, TR_CLOCK_TICK
);
795 DTRACE_PROBE1(smb_iod_waitrq1
,
799 /* Want this to go ONLY to the user. */
800 uprintf("SMB server %s has not responded"
801 " to request %d after %d seconds..."
802 " (still waiting).\n", vcp
->vc_srvname
,
803 rqp
->sr_mid
, smb_timo_notice
);
809 * Keep waiting until tmo2 is expired.
811 while (rqp
->sr_rpgen
== rqp
->sr_rplast
) {
812 if (rqp
->sr_flags
& SMBR_NOINTR_RECV
)
813 tr
= cv_timedwait(&rqp
->sr_cond
,
814 &rqp
->sr_lock
, tmo2
);
816 tr
= cv_timedwait_sig(&rqp
->sr_cond
,
817 &rqp
->sr_lock
, tmo2
);
824 DTRACE_PROBE1(smb_iod_waitrq2
,
828 /* Want this to go ONLY to the user. */
829 uprintf("SMB server %s has not responded"
830 " to request %d after %d seconds..."
831 " (giving up).\n", vcp
->vc_srvname
,
832 rqp
->sr_mid
, rqp
->sr_timo
);
839 error
= rqp
->sr_lerror
;
846 * MULTIPACKET request must stay in the list.
847 * They may need additional responses.
849 if ((rqp
->sr_flags
& SMBR_MULTIPACKET
) == 0)
850 smb_iod_removerq(rqp
);
853 * Some request has been completed.
854 * If we reached the mux limit,
855 * re-run the send loop...
857 if (vcp
->iod_muxfull
)
858 smb_iod_sendall(vcp
);
864 * Shutdown all outstanding I/O requests on the specified share with
865 * ENXIO; used when unmounting a share. (There shouldn't be any for a
866 * non-forced unmount; if this is a forced unmount, we have to shutdown
867 * the requests as part of the unmount process.)
870 smb_iod_shutdown_share(struct smb_share
*ssp
)
872 struct smb_vc
*vcp
= SSTOVC(ssp
);
876 * Loop through the list of requests and shutdown the ones
877 * that are for the specified share.
879 rw_enter(&vcp
->iod_rqlock
, RW_READER
);
880 TAILQ_FOREACH(rqp
, &vcp
->iod_rqlist
, sr_link
) {
881 if (rqp
->sr_state
!= SMBRQ_NOTIFIED
&& rqp
->sr_share
== ssp
)
882 smb_iod_rqprocessed(rqp
, EIO
, 0);
884 rw_exit(&vcp
->iod_rqlock
);
888 * Send all requests that need sending.
889 * Called from _addrq, _multirq, _waitrq
892 smb_iod_sendall(smb_vc_t
*vcp
)
898 * Clear "newrq" to make sure threads adding
899 * new requests will run this function again.
901 rw_enter(&vcp
->iod_rqlock
, RW_WRITER
);
905 * We only read iod_rqlist, so downgrade rwlock.
906 * This allows the IOD to handle responses while
907 * some requesting thread may be blocked in send.
909 rw_downgrade(&vcp
->iod_rqlock
);
912 * Serialize to prevent multiple senders.
913 * Note lock order: iod_rqlock, vc_sendlock
915 sema_p(&vcp
->vc_sendlock
);
918 * Walk the list of requests and send when possible.
919 * We avoid having more than vc_maxmux requests
920 * outstanding to the server by traversing only
921 * vc_maxmux entries into this list. Simple!
923 ASSERT(vcp
->vc_maxmux
> 0);
925 TAILQ_FOREACH(rqp
, &vcp
->iod_rqlist
, sr_link
) {
927 if (vcp
->vc_state
!= SMBIOD_ST_VCACTIVE
) {
928 error
= ENOTCONN
; /* stop everything! */
932 if (rqp
->sr_state
== SMBRQ_NOTSENT
) {
933 error
= smb_iod_sendrq(rqp
);
938 if (++muxcnt
== vcp
->vc_maxmux
) {
939 SMBIODEBUG("muxcnt == vc_maxmux\n");
946 * If we have vc_maxmux requests outstanding,
947 * arrange for _waitrq to call _sendall as
948 * requests are completed.
951 (muxcnt
< vcp
->vc_maxmux
) ? 0 : 1;
953 sema_v(&vcp
->vc_sendlock
);
954 rw_exit(&vcp
->iod_rqlock
);
958 smb_iod_vc_work(struct smb_vc
*vcp
, cred_t
*cr
)
960 struct file
*fp
= NULL
;
964 * This is called by the one-and-only
965 * IOD thread for this VC.
967 ASSERT(vcp
->iod_thr
== curthread
);
970 * Get the network transport file pointer,
971 * and "loan" it to our transport module.
973 if ((fp
= getf(vcp
->vc_tran_fd
)) == NULL
) {
977 if ((err
= SMB_TRAN_LOAN_FP(vcp
, fp
, cr
)) != 0)
981 * In case of reconnect, tell any enqueued requests
985 vcp
->vc_genid
++; /* possibly new connection */
986 smb_iod_newstate(vcp
, SMBIOD_ST_VCACTIVE
);
987 cv_broadcast(&vcp
->vc_statechg
);
991 * The above cv_broadcast should be sufficient to
992 * get requests going again.
994 * If we have a callback function, run it.
995 * Was: smb_iod_notify_connected()
997 if (fscb
&& fscb
->fscb_connect
)
998 smb_vc_walkshares(vcp
, fscb
->fscb_connect
);
1001 * Run the "reader" loop.
1003 err
= smb_iod_recvall(vcp
);
1006 * The reader loop returned, so we must have a
1007 * new state. (disconnected or reconnecting)
1009 * Notify shares of the disconnect.
1010 * Was: smb_iod_notify_disconnect()
1012 smb_vc_walkshares(vcp
, smb_iod_share_disconnected
);
1015 * The reader loop function returns only when
1016 * there's been an error on the connection, or
1017 * this VC has no more references. It also
1018 * updates the state before it returns.
1020 * Tell any requests to give up or restart.
1025 /* Recall the file descriptor loan. */
1026 (void) SMB_TRAN_LOAN_FP(vcp
, NULL
, cr
);
1028 releasef(vcp
->vc_tran_fd
);
1035 * Wait around for someone to ask to use this VC.
1036 * If the VC has only the IOD reference, then
1037 * wait only a minute or so, then drop it.
1040 smb_iod_vc_idle(struct smb_vc
*vcp
)
1042 clock_t tr
, delta
= SEC_TO_TICK(15);
1046 * This is called by the one-and-only
1047 * IOD thread for this VC.
1049 ASSERT(vcp
->iod_thr
== curthread
);
1052 while (vcp
->vc_state
== SMBIOD_ST_IDLE
) {
1053 tr
= cv_reltimedwait_sig(&vcp
->iod_idle
, &vcp
->vc_lock
,
1054 delta
, TR_CLOCK_TICK
);
1061 if (vcp
->vc_co
.co_usecount
== 1) {
1062 /* Let this IOD terminate. */
1063 smb_iod_newstate(vcp
, SMBIOD_ST_DEAD
);
1064 /* nobody to cv_broadcast */
1075 * After a failed reconnect attempt, smbiod will
1076 * call this to make current requests error out.
1079 smb_iod_vc_rcfail(struct smb_vc
*vcp
)
1085 * This is called by the one-and-only
1086 * IOD thread for this VC.
1088 ASSERT(vcp
->iod_thr
== curthread
);
1090 if (vcp
->vc_state
!= SMBIOD_ST_RECONNECT
)
1095 smb_iod_newstate(vcp
, SMBIOD_ST_RCFAILED
);
1096 cv_broadcast(&vcp
->vc_statechg
);
1099 * Short wait here for two reasons:
1100 * (1) Give requests a chance to error out.
1101 * (2) Prevent immediate retry.
1103 tr
= cv_reltimedwait_sig(&vcp
->iod_idle
, &vcp
->vc_lock
,
1104 SEC_TO_TICK(5), TR_CLOCK_TICK
);
1108 smb_iod_newstate(vcp
, SMBIOD_ST_IDLE
);
1109 cv_broadcast(&vcp
->vc_statechg
);
1117 * Ask the IOD to reconnect (if not already underway)
1118 * then wait for the reconnect to finish.
1121 smb_iod_reconnect(struct smb_vc
*vcp
)
1127 switch (vcp
->vc_state
) {
1129 case SMBIOD_ST_IDLE
:
1130 smb_iod_newstate(vcp
, SMBIOD_ST_RECONNECT
);
1131 cv_signal(&vcp
->iod_idle
);
1134 case SMBIOD_ST_RECONNECT
:
1135 rv
= cv_wait_sig(&vcp
->vc_statechg
, &vcp
->vc_lock
);
1142 case SMBIOD_ST_VCACTIVE
:
1143 err
= 0; /* success! */
1146 case SMBIOD_ST_RCFAILED
:
1147 case SMBIOD_ST_DEAD
: