Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / fs / smbclnt / netsmb / smb_iod.c
blobdb82fa0958950b5ec4b5c06fc52409f891ce73d7
1 /*
2 * Copyright (c) 2000-2001 Boris Popov
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
36 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
37 * Use is subject to license terms.
40 #ifdef DEBUG
41 /* See sys/queue.h */
42 #define QUEUEDEBUG 1
43 #endif
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/atomic.h>
48 #include <sys/proc.h>
49 #include <sys/thread.h>
50 #include <sys/file.h>
51 #include <sys/kmem.h>
52 #include <sys/unistd.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/types.h>
56 #include <sys/ddi.h>
57 #include <sys/sunddi.h>
58 #include <sys/stream.h>
59 #include <sys/strsun.h>
60 #include <sys/time.h>
61 #include <sys/class.h>
62 #include <sys/disp.h>
63 #include <sys/cmn_err.h>
64 #include <sys/zone.h>
65 #include <sys/sdt.h>
67 #include <netsmb/smb_osdep.h>
69 #include <netsmb/smb.h>
70 #include <netsmb/smb_conn.h>
71 #include <netsmb/smb_rq.h>
72 #include <netsmb/smb_subr.h>
73 #include <netsmb/smb_tran.h>
74 #include <netsmb/smb_trantcp.h>
76 int smb_iod_send_echo(smb_vc_t *);
79 * This is set/cleared when smbfs loads/unloads
80 * No locks should be necessary, because smbfs
81 * can't unload until all the mounts are gone.
83 static smb_fscb_t *fscb;
84 void
85 smb_fscb_set(smb_fscb_t *cb)
87 fscb = cb;
90 static void
91 smb_iod_share_disconnected(smb_share_t *ssp)
94 smb_share_invalidate(ssp);
96 /* smbfs_dead() */
97 if (fscb && fscb->fscb_disconn) {
98 fscb->fscb_disconn(ssp);
103 * State changes are important and infrequent.
104 * Make them easily observable via dtrace.
106 void
107 smb_iod_newstate(struct smb_vc *vcp, int state)
109 vcp->vc_state = state;
112 /* Lock Held version of the next function. */
113 static inline void
114 smb_iod_rqprocessed_LH(
115 struct smb_rq *rqp,
116 int error,
117 int flags)
119 rqp->sr_flags |= flags;
120 rqp->sr_lerror = error;
121 rqp->sr_rpgen++;
122 rqp->sr_state = SMBRQ_NOTIFIED;
123 cv_broadcast(&rqp->sr_cond);
126 static void
127 smb_iod_rqprocessed(
128 struct smb_rq *rqp,
129 int error,
130 int flags)
133 SMBRQ_LOCK(rqp);
134 smb_iod_rqprocessed_LH(rqp, error, flags);
135 SMBRQ_UNLOCK(rqp);
138 static void
139 smb_iod_invrq(struct smb_vc *vcp)
141 struct smb_rq *rqp;
144 * Invalidate all outstanding requests for this connection
146 rw_enter(&vcp->iod_rqlock, RW_READER);
147 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
148 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
150 rw_exit(&vcp->iod_rqlock);
154 * Called by smb_vc_rele, smb_vc_kill, and by the driver
155 * close entry point if the IOD closes its dev handle.
157 * Forcibly kill the connection and IOD.
159 void
160 smb_iod_disconnect(struct smb_vc *vcp)
164 * Inform everyone of the state change.
166 SMB_VC_LOCK(vcp);
167 if (vcp->vc_state != SMBIOD_ST_DEAD) {
168 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
169 cv_broadcast(&vcp->vc_statechg);
171 SMB_VC_UNLOCK(vcp);
174 * Let's be safe here and avoid doing any
175 * call across the network while trying to
176 * shut things down. If we just disconnect,
177 * the server will take care of the logoff.
179 SMB_TRAN_DISCONNECT(vcp);
183 * Send one request.
185 * Called by _addrq (for internal requests)
186 * and _sendall (via _addrq, _multirq, _waitrq)
188 static int
189 smb_iod_sendrq(struct smb_rq *rqp)
191 struct smb_vc *vcp = rqp->sr_vc;
192 mblk_t *m;
193 int error;
195 ASSERT(vcp);
196 ASSERT(SEMA_HELD(&vcp->vc_sendlock));
197 ASSERT(RW_READ_HELD(&vcp->iod_rqlock));
200 * Note: Anything special for SMBR_INTERNAL here?
202 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
203 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
204 return (ENOTCONN);
209 * On the first send, set the MID and (maybe)
210 * the signing sequence numbers. The increments
211 * here are serialized by vc_sendlock
213 if (rqp->sr_sendcnt == 0) {
215 rqp->sr_mid = vcp->vc_next_mid++;
217 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
219 * We're signing requests and verifying
220 * signatures on responses. Set the
221 * sequence numbers of the request and
222 * response here, used in smb_rq_verify.
224 rqp->sr_seqno = vcp->vc_next_seq++;
225 rqp->sr_rseqno = vcp->vc_next_seq++;
228 /* Fill in UID, TID, MID, etc. */
229 smb_rq_fillhdr(rqp);
232 * Sign the message now that we're finally done
233 * filling in the SMB header fields, etc.
235 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
236 smb_rq_sign(rqp);
239 if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */
240 smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART);
242 * If all attempts to send a request failed, then
243 * something is seriously hosed.
245 return (ENOTCONN);
249 * Replaced m_copym() with Solaris copymsg() which does the same
250 * work when we want to do a M_COPYALL.
251 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
253 m = copymsg(rqp->sr_rq.mb_top);
255 #ifdef DTRACE_PROBE
256 DTRACE_PROBE2(smb_iod_sendrq,
257 (smb_rq_t *), rqp, (mblk_t *), m);
258 #else
259 SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
260 #endif
261 m_dumpm(m);
263 if (m != NULL) {
264 error = SMB_TRAN_SEND(vcp, m);
265 m = 0; /* consumed by SEND */
266 } else
267 error = ENOBUFS;
269 rqp->sr_lerror = error;
270 if (error == 0) {
271 SMBRQ_LOCK(rqp);
272 rqp->sr_flags |= SMBR_SENT;
273 rqp->sr_state = SMBRQ_SENT;
274 if (rqp->sr_flags & SMBR_SENDWAIT)
275 cv_broadcast(&rqp->sr_cond);
276 SMBRQ_UNLOCK(rqp);
277 return (0);
280 * Check for fatal errors
282 if (SMB_TRAN_FATAL(vcp, error)) {
284 * No further attempts should be made
286 SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
287 return (ENOTCONN);
289 if (error)
290 SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error);
292 #ifdef APPLE
293 /* If proc waiting on rqp was signaled... */
294 if (smb_rq_intr(rqp))
295 smb_iod_rqprocessed(rqp, EINTR, 0);
296 #endif
298 return (0);
301 static int
302 smb_iod_recv1(struct smb_vc *vcp, mblk_t **mpp)
304 mblk_t *m;
305 uchar_t *hp;
306 int error;
308 top:
309 m = NULL;
310 error = SMB_TRAN_RECV(vcp, &m);
311 if (error == EAGAIN)
312 goto top;
313 if (error)
314 return (error);
315 ASSERT(m);
317 m = m_pullup(m, SMB_HDRLEN);
318 if (m == NULL) {
319 return (ENOSR);
323 * Check the SMB header
325 hp = mtod(m, uchar_t *);
326 if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
327 m_freem(m);
328 return (EPROTO);
331 *mpp = m;
332 return (0);
336 * Process incoming packets
338 * This is the "reader" loop, run by the IOD thread
339 * while in state SMBIOD_ST_VCACTIVE. The loop now
340 * simply blocks in the socket recv until either a
341 * message arrives, or a disconnect.
343 * Any non-zero error means the IOD should terminate.
346 smb_iod_recvall(struct smb_vc *vcp)
348 struct smb_rq *rqp;
349 mblk_t *m;
350 uchar_t *hp;
351 ushort_t mid;
352 int error = 0;
353 int etime_count = 0; /* for "server not responding", etc. */
355 for (;;) {
357 * Check whether someone "killed" this VC,
358 * or is asking the IOD to terminate.
361 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
362 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
363 error = 0;
364 break;
367 if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
368 SMBIODEBUG("SHUTDOWN set\n");
369 /* This IOD thread will terminate. */
370 SMB_VC_LOCK(vcp);
371 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
372 cv_broadcast(&vcp->vc_statechg);
373 SMB_VC_UNLOCK(vcp);
374 error = EINTR;
375 break;
378 m = NULL;
379 error = smb_iod_recv1(vcp, &m);
381 if (error == ETIME &&
382 vcp->iod_rqlist.tqh_first != NULL) {
384 * Nothing received for 15 seconds and
385 * we have requests in the queue.
387 etime_count++;
390 * Once, at 15 sec. notify callbacks
391 * and print the warning message.
393 if (etime_count == 1) {
394 /* Was: smb_iod_notify_down(vcp); */
395 if (fscb && fscb->fscb_down)
396 smb_vc_walkshares(vcp,
397 fscb->fscb_down);
398 zprintf(vcp->vc_zoneid,
399 "SMB server %s not responding\n",
400 vcp->vc_srvname);
404 * At 30 sec. try sending an echo, and then
405 * once a minute thereafter.
407 if ((etime_count & 3) == 2) {
408 (void) smb_iod_send_echo(vcp);
411 continue;
412 } /* ETIME && requests in queue */
414 if (error == ETIME) {
416 * If the IOD thread holds the last reference
417 * to this VC, let the IOD thread terminate.
419 if (vcp->vc_co.co_usecount > 1)
420 continue;
421 SMB_VC_LOCK(vcp);
422 if (vcp->vc_co.co_usecount == 1) {
423 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
424 SMB_VC_UNLOCK(vcp);
425 error = 0;
426 break;
428 SMB_VC_UNLOCK(vcp);
429 continue;
430 } /* error == ETIME */
432 if (error) {
434 * The recv. above returned some error
435 * we can't continue from i.e. ENOTCONN.
436 * It's dangerous to continue here.
437 * (possible infinite loop!)
439 * If we have requests enqueued, next
440 * state is reconnecting, else idle.
442 int state;
443 SMB_VC_LOCK(vcp);
444 state = (vcp->iod_rqlist.tqh_first != NULL) ?
445 SMBIOD_ST_RECONNECT : SMBIOD_ST_IDLE;
446 smb_iod_newstate(vcp, state);
447 cv_broadcast(&vcp->vc_statechg);
448 SMB_VC_UNLOCK(vcp);
449 error = 0;
450 break;
454 * Received something. Yea!
456 if (etime_count) {
457 etime_count = 0;
459 zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
460 vcp->vc_srvname);
462 /* Was: smb_iod_notify_up(vcp); */
463 if (fscb && fscb->fscb_up)
464 smb_vc_walkshares(vcp, fscb->fscb_up);
468 * Have an SMB packet. The SMB header was
469 * checked in smb_iod_recv1().
470 * Find the request...
472 hp = mtod(m, uchar_t *);
473 /*LINTED*/
474 mid = letohs(SMB_HDRMID(hp));
475 SMBIODEBUG("mid %04x\n", (uint_t)mid);
477 rw_enter(&vcp->iod_rqlock, RW_READER);
478 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
480 if (rqp->sr_mid != mid)
481 continue;
483 DTRACE_PROBE2(smb_iod_recvrq,
484 (smb_rq_t *), rqp, (mblk_t *), m);
485 m_dumpm(m);
487 SMBRQ_LOCK(rqp);
488 if (rqp->sr_rp.md_top == NULL) {
489 md_initm(&rqp->sr_rp, m);
490 } else {
491 if (rqp->sr_flags & SMBR_MULTIPACKET) {
492 md_append_record(&rqp->sr_rp, m);
493 } else {
494 SMBRQ_UNLOCK(rqp);
495 SMBSDEBUG("duplicate response %d "
496 "(ignored)\n", mid);
497 break;
500 smb_iod_rqprocessed_LH(rqp, 0, 0);
501 SMBRQ_UNLOCK(rqp);
502 break;
505 if (rqp == NULL) {
506 int cmd = SMB_HDRCMD(hp);
508 if (cmd != SMB_COM_ECHO)
509 SMBSDEBUG("drop resp: mid %d, cmd %d\n",
510 (uint_t)mid, cmd);
511 /* smb_printrqlist(vcp); */
512 m_freem(m);
514 rw_exit(&vcp->iod_rqlock);
518 return (error);
522 * The IOD receiver thread has requests pending and
523 * has not received anything in a while. Try to
524 * send an SMB echo request. It's tricky to do a
525 * send from the IOD thread because we can't block.
527 * Using tmo=SMBNOREPLYWAIT in the request
528 * so smb_rq_reply will skip smb_iod_waitrq.
529 * The smb_smb_echo call uses SMBR_INTERNAL
530 * to avoid calling smb_iod_sendall().
533 smb_iod_send_echo(smb_vc_t *vcp)
535 smb_cred_t scred;
536 int err;
538 smb_credinit(&scred, NULL);
539 err = smb_smb_echo(vcp, &scred, SMBNOREPLYWAIT);
540 smb_credrele(&scred);
541 return (err);
545 * The IOD thread is now just a "reader",
546 * so no more smb_iod_request(). Yea!
550 * Place request in the queue, and send it now if possible.
551 * Called with no locks held.
554 smb_iod_addrq(struct smb_rq *rqp)
556 struct smb_vc *vcp = rqp->sr_vc;
557 int error, save_newrq;
559 ASSERT(rqp->sr_cred);
562 * State should be correct after the check in
563 * smb_rq_enqueue(), but we dropped locks...
565 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
566 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
567 return (ENOTCONN);
571 * Requests from the IOD itself are marked _INTERNAL,
572 * and get some special treatment to avoid blocking
573 * the reader thread (so we don't deadlock).
574 * The request is not yet on the queue, so we can
575 * modify it's state here without locks.
576 * Only thing using this now is ECHO.
578 rqp->sr_owner = curthread;
579 if (rqp->sr_owner == vcp->iod_thr) {
580 rqp->sr_flags |= SMBR_INTERNAL;
583 * This is a request from the IOD thread.
584 * Always send directly from this thread.
585 * Note lock order: iod_rqlist, vc_sendlock
587 rw_enter(&vcp->iod_rqlock, RW_WRITER);
588 TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link);
589 rw_downgrade(&vcp->iod_rqlock);
592 * Note: iod_sendrq expects vc_sendlock,
593 * so take that here, but carefully:
594 * Never block the IOD thread here.
596 if (sema_tryp(&vcp->vc_sendlock) == 0) {
597 SMBIODEBUG("sendlock busy\n");
598 error = EAGAIN;
599 } else {
600 /* Have vc_sendlock */
601 error = smb_iod_sendrq(rqp);
602 sema_v(&vcp->vc_sendlock);
605 rw_exit(&vcp->iod_rqlock);
608 * In the non-error case, _removerq
609 * is done by either smb_rq_reply
610 * or smb_iod_waitrq.
612 if (error)
613 smb_iod_removerq(rqp);
615 return (error);
618 rw_enter(&vcp->iod_rqlock, RW_WRITER);
620 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
621 /* iod_rqlock/WRITER protects iod_newrq */
622 save_newrq = vcp->iod_newrq;
623 vcp->iod_newrq++;
625 rw_exit(&vcp->iod_rqlock);
628 * Now send any requests that need to be sent,
629 * including the one we just put on the list.
630 * Only the thread that found iod_newrq==0
631 * needs to run the send loop.
633 if (save_newrq == 0)
634 smb_iod_sendall(vcp);
636 return (0);
640 * Mark an SMBR_MULTIPACKET request as
641 * needing another send. Similar to the
642 * "normal" part of smb_iod_addrq.
645 smb_iod_multirq(struct smb_rq *rqp)
647 struct smb_vc *vcp = rqp->sr_vc;
648 int save_newrq;
650 ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
652 if (rqp->sr_flags & SMBR_INTERNAL)
653 return (EINVAL);
655 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
656 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
657 return (ENOTCONN);
660 rw_enter(&vcp->iod_rqlock, RW_WRITER);
662 /* Already on iod_rqlist, just reset state. */
663 rqp->sr_state = SMBRQ_NOTSENT;
665 /* iod_rqlock/WRITER protects iod_newrq */
666 save_newrq = vcp->iod_newrq;
667 vcp->iod_newrq++;
669 rw_exit(&vcp->iod_rqlock);
672 * Now send any requests that need to be sent,
673 * including the one we just marked NOTSENT.
674 * Only the thread that found iod_newrq==0
675 * needs to run the send loop.
677 if (save_newrq == 0)
678 smb_iod_sendall(vcp);
680 return (0);
684 void
685 smb_iod_removerq(struct smb_rq *rqp)
687 struct smb_vc *vcp = rqp->sr_vc;
689 rw_enter(&vcp->iod_rqlock, RW_WRITER);
690 #ifdef QUEUEDEBUG
692 * Make sure we have not already removed it.
693 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
694 * XXX: Don't like the constant 1 here...
696 ASSERT(rqp->sr_link.tqe_next != (void *)1L);
697 #endif
698 TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
699 rw_exit(&vcp->iod_rqlock);
705 * Wait for a request to complete.
707 * For normal requests, we need to deal with
708 * ioc_muxcnt dropping below vc_maxmux by
709 * making arrangements to send more...
712 smb_iod_waitrq(struct smb_rq *rqp)
714 struct smb_vc *vcp = rqp->sr_vc;
715 clock_t tr, tmo1, tmo2;
716 int error, rc;
718 if (rqp->sr_flags & SMBR_INTERNAL) {
719 ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
720 smb_iod_removerq(rqp);
721 return (EAGAIN);
725 * Make sure this is NOT the IOD thread,
726 * or the wait below will stop the reader.
728 ASSERT(curthread != vcp->iod_thr);
730 SMBRQ_LOCK(rqp);
733 * First, wait for the request to be sent. Normally the send
734 * has already happened by the time we get here. However, if
735 * we have more than maxmux entries in the request list, our
736 * request may not be sent until other requests complete.
737 * The wait in this case is due to local I/O demands, so
738 * we don't want the server response timeout to apply.
740 * If a request is allowed to interrupt this wait, then the
741 * request is cancelled and never sent OTW. Some kinds of
742 * requests should never be cancelled (i.e. close) and those
743 * are marked SMBR_NOINTR_SEND so they either go eventually,
744 * or a connection close will terminate them with ENOTCONN.
746 while (rqp->sr_state == SMBRQ_NOTSENT) {
747 rqp->sr_flags |= SMBR_SENDWAIT;
748 if (rqp->sr_flags & SMBR_NOINTR_SEND) {
749 cv_wait(&rqp->sr_cond, &rqp->sr_lock);
750 rc = 1;
751 } else
752 rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock);
753 rqp->sr_flags &= ~SMBR_SENDWAIT;
754 if (rc == 0) {
755 SMBIODEBUG("EINTR in sendwait, rqp=%p\n", rqp);
756 error = EINTR;
757 goto out;
762 * The request has been sent. Now wait for the response,
763 * with the timeout specified for this request.
764 * Compute all the deadlines now, so we effectively
765 * start the timer(s) after the request is sent.
767 if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
768 tmo1 = SEC_TO_TICK(smb_timo_notice);
769 else
770 tmo1 = 0;
771 tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
774 * As above, we don't want to allow interrupt for some
775 * requests like open, because we could miss a succesful
776 * response and therefore "leak" a FID. Such requests
777 * are marked SMBR_NOINTR_RECV to prevent that.
779 * If "slow server" warnings are enabled, wait first
780 * for the "notice" timeout, and warn if expired.
782 if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
783 if (rqp->sr_flags & SMBR_NOINTR_RECV)
784 tr = cv_reltimedwait(&rqp->sr_cond,
785 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
786 else
787 tr = cv_reltimedwait_sig(&rqp->sr_cond,
788 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
789 if (tr == 0) {
790 error = EINTR;
791 goto out;
793 if (tr < 0) {
794 #ifdef DTRACE_PROBE
795 DTRACE_PROBE1(smb_iod_waitrq1,
796 (smb_rq_t *), rqp);
797 #endif
798 #ifdef NOT_YET
799 /* Want this to go ONLY to the user. */
800 uprintf("SMB server %s has not responded"
801 " to request %d after %d seconds..."
802 " (still waiting).\n", vcp->vc_srvname,
803 rqp->sr_mid, smb_timo_notice);
804 #endif
809 * Keep waiting until tmo2 is expired.
811 while (rqp->sr_rpgen == rqp->sr_rplast) {
812 if (rqp->sr_flags & SMBR_NOINTR_RECV)
813 tr = cv_timedwait(&rqp->sr_cond,
814 &rqp->sr_lock, tmo2);
815 else
816 tr = cv_timedwait_sig(&rqp->sr_cond,
817 &rqp->sr_lock, tmo2);
818 if (tr == 0) {
819 error = EINTR;
820 goto out;
822 if (tr < 0) {
823 #ifdef DTRACE_PROBE
824 DTRACE_PROBE1(smb_iod_waitrq2,
825 (smb_rq_t *), rqp);
826 #endif
827 #ifdef NOT_YET
828 /* Want this to go ONLY to the user. */
829 uprintf("SMB server %s has not responded"
830 " to request %d after %d seconds..."
831 " (giving up).\n", vcp->vc_srvname,
832 rqp->sr_mid, rqp->sr_timo);
833 #endif
834 error = ETIME;
835 goto out;
837 /* got wakeup */
839 error = rqp->sr_lerror;
840 rqp->sr_rplast++;
842 out:
843 SMBRQ_UNLOCK(rqp);
846 * MULTIPACKET request must stay in the list.
847 * They may need additional responses.
849 if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
850 smb_iod_removerq(rqp);
853 * Some request has been completed.
854 * If we reached the mux limit,
855 * re-run the send loop...
857 if (vcp->iod_muxfull)
858 smb_iod_sendall(vcp);
860 return (error);
864 * Shutdown all outstanding I/O requests on the specified share with
865 * ENXIO; used when unmounting a share. (There shouldn't be any for a
866 * non-forced unmount; if this is a forced unmount, we have to shutdown
867 * the requests as part of the unmount process.)
869 void
870 smb_iod_shutdown_share(struct smb_share *ssp)
872 struct smb_vc *vcp = SSTOVC(ssp);
873 struct smb_rq *rqp;
876 * Loop through the list of requests and shutdown the ones
877 * that are for the specified share.
879 rw_enter(&vcp->iod_rqlock, RW_READER);
880 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
881 if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
882 smb_iod_rqprocessed(rqp, EIO, 0);
884 rw_exit(&vcp->iod_rqlock);
888 * Send all requests that need sending.
889 * Called from _addrq, _multirq, _waitrq
891 void
892 smb_iod_sendall(smb_vc_t *vcp)
894 struct smb_rq *rqp;
895 int error, muxcnt;
898 * Clear "newrq" to make sure threads adding
899 * new requests will run this function again.
901 rw_enter(&vcp->iod_rqlock, RW_WRITER);
902 vcp->iod_newrq = 0;
905 * We only read iod_rqlist, so downgrade rwlock.
906 * This allows the IOD to handle responses while
907 * some requesting thread may be blocked in send.
909 rw_downgrade(&vcp->iod_rqlock);
912 * Serialize to prevent multiple senders.
913 * Note lock order: iod_rqlock, vc_sendlock
915 sema_p(&vcp->vc_sendlock);
918 * Walk the list of requests and send when possible.
919 * We avoid having more than vc_maxmux requests
920 * outstanding to the server by traversing only
921 * vc_maxmux entries into this list. Simple!
923 ASSERT(vcp->vc_maxmux > 0);
924 error = muxcnt = 0;
925 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
927 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
928 error = ENOTCONN; /* stop everything! */
929 break;
932 if (rqp->sr_state == SMBRQ_NOTSENT) {
933 error = smb_iod_sendrq(rqp);
934 if (error)
935 break;
938 if (++muxcnt == vcp->vc_maxmux) {
939 SMBIODEBUG("muxcnt == vc_maxmux\n");
940 break;
946 * If we have vc_maxmux requests outstanding,
947 * arrange for _waitrq to call _sendall as
948 * requests are completed.
950 vcp->iod_muxfull =
951 (muxcnt < vcp->vc_maxmux) ? 0 : 1;
953 sema_v(&vcp->vc_sendlock);
954 rw_exit(&vcp->iod_rqlock);
958 smb_iod_vc_work(struct smb_vc *vcp, cred_t *cr)
960 struct file *fp = NULL;
961 int err = 0;
964 * This is called by the one-and-only
965 * IOD thread for this VC.
967 ASSERT(vcp->iod_thr == curthread);
970 * Get the network transport file pointer,
971 * and "loan" it to our transport module.
973 if ((fp = getf(vcp->vc_tran_fd)) == NULL) {
974 err = EBADF;
975 goto out;
977 if ((err = SMB_TRAN_LOAN_FP(vcp, fp, cr)) != 0)
978 goto out;
981 * In case of reconnect, tell any enqueued requests
982 * then can GO!
984 SMB_VC_LOCK(vcp);
985 vcp->vc_genid++; /* possibly new connection */
986 smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE);
987 cv_broadcast(&vcp->vc_statechg);
988 SMB_VC_UNLOCK(vcp);
991 * The above cv_broadcast should be sufficient to
992 * get requests going again.
994 * If we have a callback function, run it.
995 * Was: smb_iod_notify_connected()
997 if (fscb && fscb->fscb_connect)
998 smb_vc_walkshares(vcp, fscb->fscb_connect);
1001 * Run the "reader" loop.
1003 err = smb_iod_recvall(vcp);
1006 * The reader loop returned, so we must have a
1007 * new state. (disconnected or reconnecting)
1009 * Notify shares of the disconnect.
1010 * Was: smb_iod_notify_disconnect()
1012 smb_vc_walkshares(vcp, smb_iod_share_disconnected);
1015 * The reader loop function returns only when
1016 * there's been an error on the connection, or
1017 * this VC has no more references. It also
1018 * updates the state before it returns.
1020 * Tell any requests to give up or restart.
1022 smb_iod_invrq(vcp);
1024 out:
1025 /* Recall the file descriptor loan. */
1026 (void) SMB_TRAN_LOAN_FP(vcp, NULL, cr);
1027 if (fp != NULL) {
1028 releasef(vcp->vc_tran_fd);
1031 return (err);
1035 * Wait around for someone to ask to use this VC.
1036 * If the VC has only the IOD reference, then
1037 * wait only a minute or so, then drop it.
1040 smb_iod_vc_idle(struct smb_vc *vcp)
1042 clock_t tr, delta = SEC_TO_TICK(15);
1043 int err = 0;
1046 * This is called by the one-and-only
1047 * IOD thread for this VC.
1049 ASSERT(vcp->iod_thr == curthread);
1051 SMB_VC_LOCK(vcp);
1052 while (vcp->vc_state == SMBIOD_ST_IDLE) {
1053 tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1054 delta, TR_CLOCK_TICK);
1055 if (tr == 0) {
1056 err = EINTR;
1057 break;
1059 if (tr < 0) {
1060 /* timeout */
1061 if (vcp->vc_co.co_usecount == 1) {
1062 /* Let this IOD terminate. */
1063 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
1064 /* nobody to cv_broadcast */
1065 break;
1069 SMB_VC_UNLOCK(vcp);
1071 return (err);
1075 * After a failed reconnect attempt, smbiod will
1076 * call this to make current requests error out.
1079 smb_iod_vc_rcfail(struct smb_vc *vcp)
1081 clock_t tr;
1082 int err = 0;
1085 * This is called by the one-and-only
1086 * IOD thread for this VC.
1088 ASSERT(vcp->iod_thr == curthread);
1090 if (vcp->vc_state != SMBIOD_ST_RECONNECT)
1091 return (EINVAL);
1093 SMB_VC_LOCK(vcp);
1095 smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED);
1096 cv_broadcast(&vcp->vc_statechg);
1099 * Short wait here for two reasons:
1100 * (1) Give requests a chance to error out.
1101 * (2) Prevent immediate retry.
1103 tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1104 SEC_TO_TICK(5), TR_CLOCK_TICK);
1105 if (tr == 0)
1106 err = EINTR;
1108 smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
1109 cv_broadcast(&vcp->vc_statechg);
1111 SMB_VC_UNLOCK(vcp);
1113 return (err);
1117 * Ask the IOD to reconnect (if not already underway)
1118 * then wait for the reconnect to finish.
1121 smb_iod_reconnect(struct smb_vc *vcp)
1123 int err = 0, rv;
1125 SMB_VC_LOCK(vcp);
1126 again:
1127 switch (vcp->vc_state) {
1129 case SMBIOD_ST_IDLE:
1130 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
1131 cv_signal(&vcp->iod_idle);
1132 /* FALLTHROUGH */
1134 case SMBIOD_ST_RECONNECT:
1135 rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock);
1136 if (rv == 0) {
1137 err = EINTR;
1138 break;
1140 goto again;
1142 case SMBIOD_ST_VCACTIVE:
1143 err = 0; /* success! */
1144 break;
1146 case SMBIOD_ST_RCFAILED:
1147 case SMBIOD_ST_DEAD:
1148 default:
1149 err = ENOTCONN;
1150 break;
1153 SMB_VC_UNLOCK(vcp);
1154 return (err);