1 /* $NetBSD: smb_iod.c,v 1.34 2009/09/04 16:16:52 pooka Exp $ */
4 * Copyright (c) 2000-2001 Boris Popov
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Boris Popov.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * FreeBSD: src/sys/netsmb/smb_iod.c,v 1.4 2001/12/09 17:48:08 arr Exp
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: smb_iod.c,v 1.34 2009/09/04 16:16:52 pooka Exp $");
40 #include <sys/param.h>
41 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/malloc.h>
47 #include <sys/unistd.h>
49 #include <netsmb/smb.h>
50 #include <netsmb/smb_conn.h>
51 #include <netsmb/smb_rq.h>
52 #include <netsmb/smb_tran.h>
53 #include <netsmb/smb_trantcp.h>
55 #define SMB_IOD_EVLOCKPTR(iod) (&((iod)->iod_evlock))
56 #define SMB_IOD_EVLOCK(iod) smb_sl_lock(&((iod)->iod_evlock))
57 #define SMB_IOD_EVUNLOCK(iod) smb_sl_unlock(&((iod)->iod_evlock))
59 #define SMB_IOD_RQLOCKPTR(iod) (&((iod)->iod_rqlock))
60 #define SMB_IOD_RQLOCK(iod) smb_sl_lock(&((iod)->iod_rqlock))
61 #define SMB_IOD_RQUNLOCK(iod) smb_sl_unlock(&((iod)->iod_rqlock))
63 #define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags)
65 static MALLOC_DEFINE(M_SMBIOD
, "SMBIOD", "SMB network io daemon");
67 static int smb_iod_next
;
69 static bool smb_iod_sendall(struct smbiod
*iod
);
70 static int smb_iod_disconnect(struct smbiod
*iod
);
71 static void smb_iod_thread(void *);
74 smb_iod_rqprocessed(struct smb_rq
*rqp
, int error
)
77 rqp
->sr_lerror
= error
;
79 rqp
->sr_state
= SMBRQ_NOTIFIED
;
80 wakeup(&rqp
->sr_state
);
82 callout_stop(&rqp
->sr_timo_ch
);
83 if (rqp
->sr_recvcallback
)
84 (*rqp
->sr_recvcallback
)(rqp
->sr_recvarg
);
89 smb_iod_rqtimedout(void *arg
)
91 smb_iod_rqprocessed((struct smb_rq
*)arg
, ETIMEDOUT
);
95 smb_iod_invrq(struct smbiod
*iod
)
100 * Invalidate all outstanding requests for this connection
103 SIMPLEQ_FOREACH(rqp
, &iod
->iod_rqlist
, sr_link
) {
104 if (rqp
->sr_flags
& SMBR_INTERNAL
)
106 rqp
->sr_flags
|= SMBR_RESTART
;
107 smb_iod_rqprocessed(rqp
, ENOTCONN
);
109 SMB_IOD_RQUNLOCK(iod
);
113 smb_iod_closetran(struct smbiod
*iod
)
115 struct smb_vc
*vcp
= iod
->iod_vc
;
116 struct lwp
*l
= iod
->iod_l
;
118 if (vcp
->vc_tdata
== NULL
)
120 SMB_TRAN_DISCONNECT(vcp
, l
);
121 SMB_TRAN_DONE(vcp
, l
);
122 vcp
->vc_tdata
= NULL
;
126 smb_iod_dead(struct smbiod
*iod
)
128 iod
->iod_state
= SMBIOD_ST_DEAD
;
129 smb_iod_closetran(iod
);
134 smb_iod_connect(struct smbiod
*iod
)
136 struct smb_vc
*vcp
= iod
->iod_vc
;
137 struct lwp
*l
= iod
->iod_l
;
140 SMBIODEBUG(("%d\n", iod
->iod_state
));
141 switch(iod
->iod_state
) {
142 case SMBIOD_ST_VCACTIVE
:
143 SMBIODEBUG(("called for already opened connection\n"));
146 return ENOTCONN
; /* XXX: last error code ? */
152 #define ithrow(cmd) \
156 ithrow(SMB_TRAN_CREATE(vcp
, l
));
157 SMBIODEBUG(("tcreate\n"));
159 ithrow(SMB_TRAN_BIND(vcp
, vcp
->vc_laddr
, l
));
161 SMBIODEBUG(("tbind\n"));
162 ithrow(SMB_TRAN_CONNECT(vcp
, vcp
->vc_paddr
, l
));
163 SMB_TRAN_SETPARAM(vcp
, SMBTP_SELECTID
, &iod
->iod_flags
);
164 iod
->iod_state
= SMBIOD_ST_TRANACTIVE
;
165 SMBIODEBUG(("tconnect\n"));
166 /* vcp->vc_mid = 0;*/
167 ithrow(smb_smb_negotiate(vcp
, &iod
->iod_scred
));
168 SMBIODEBUG(("snegotiate\n"));
169 ithrow(smb_smb_ssnsetup(vcp
, &iod
->iod_scred
));
170 iod
->iod_state
= SMBIOD_ST_VCACTIVE
;
174 SMBIODEBUG(("completed\n"));
185 smb_iod_disconnect(struct smbiod
*iod
)
187 struct smb_vc
*vcp
= iod
->iod_vc
;
190 if (iod
->iod_state
== SMBIOD_ST_VCACTIVE
) {
191 smb_smb_ssnclose(vcp
, &iod
->iod_scred
);
192 iod
->iod_state
= SMBIOD_ST_TRANACTIVE
;
194 vcp
->vc_smbuid
= SMB_UID_UNKNOWN
;
195 smb_iod_closetran(iod
);
196 iod
->iod_state
= SMBIOD_ST_NOTCONN
;
201 smb_iod_treeconnect(struct smbiod
*iod
, struct smb_share
*ssp
)
205 if (iod
->iod_state
!= SMBIOD_ST_VCACTIVE
) {
206 if (iod
->iod_state
!= SMBIOD_ST_DEAD
)
208 iod
->iod_state
= SMBIOD_ST_RECONNECT
;
209 error
= smb_iod_connect(iod
);
213 SMBIODEBUG(("tree reconnect\n"));
215 ssp
->ss_flags
|= SMBS_RECONNECTING
;
217 error
= smb_smb_treeconnect(ssp
, &iod
->iod_scred
);
219 ssp
->ss_flags
&= ~SMBS_RECONNECTING
;
221 wakeup(&ssp
->ss_vcgenid
);
226 smb_iod_sendrq(struct smbiod
*iod
, struct smb_rq
*rqp
)
228 struct lwp
*l
= iod
->iod_l
;
229 struct smb_vc
*vcp
= iod
->iod_vc
;
230 struct smb_share
*ssp
= rqp
->sr_share
;
234 SMBIODEBUG(("iod_state = %d, rqmid %d\n", iod
->iod_state
, rqp
->sr_mid
));
235 switch (iod
->iod_state
) {
236 case SMBIOD_ST_NOTCONN
:
237 smb_iod_rqprocessed(rqp
, ENOTCONN
);
240 iod
->iod_state
= SMBIOD_ST_RECONNECT
;
242 case SMBIOD_ST_RECONNECT
:
247 if (rqp
->sr_sendcnt
== 0) {
248 u_int16_t tid
= ssp
? ssp
->ss_tid
: SMB_TID_UNKNOWN
;
249 u_int16_t rquid
= vcp
? vcp
->vc_smbuid
: 0;
250 #ifdef movedtoanotherplace
251 if (vcp
->vc_maxmux
!= 0 && iod
->iod_muxcnt
>= vcp
->vc_maxmux
)
254 SMBRQ_PUTLE16(rqp
->sr_rqtid
, tid
);
255 SMBRQ_PUTLE16(rqp
->sr_rquid
, rquid
);
256 mb_fixhdr(&rqp
->sr_rq
);
258 if (rqp
->sr_sendcnt
++ > 5) {
259 rqp
->sr_flags
|= SMBR_RESTART
;
260 smb_iod_rqprocessed(rqp
, rqp
->sr_lerror
);
262 * If all attempts to send a request failed, then
263 * something is seriously hosed.
267 SMBSDEBUG(("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp
->sr_mid
, 0, 0, 0));
268 m_dumpm(rqp
->sr_rq
.mb_top
);
269 m
= m_copym(rqp
->sr_rq
.mb_top
, 0, M_COPYALL
, M_WAIT
);
270 error
= rqp
->sr_lerror
= (m
) ? SMB_TRAN_SEND(vcp
, m
, l
) : ENOBUFS
;
272 if (rqp
->sr_timo
> 0)
273 callout_reset(&rqp
->sr_timo_ch
, rqp
->sr_timo
,
274 smb_iod_rqtimedout
, rqp
);
276 if (rqp
->sr_flags
& SMBR_NOWAIT
) {
277 /* caller doesn't want to wait, flag as processed */
278 smb_iod_rqprocessed(rqp
, 0);
283 iod
->iod_lastrqsent
= ts
;
285 rqp
->sr_flags
|= SMBR_SENT
;
286 rqp
->sr_state
= SMBRQ_SENT
;
290 * Check for fatal errors
292 if (vcp
&& SMB_TRAN_FATAL(vcp
, error
)) {
294 * No further attempts should be made
298 if (smb_rq_intr(rqp
))
299 smb_iod_rqprocessed(rqp
, EINTR
);
304 * Process incoming packets
307 smb_iod_recvall(struct smbiod
*iod
)
309 struct smb_vc
*vcp
= iod
->iod_vc
;
310 struct lwp
*l
= iod
->iod_l
;
317 switch (iod
->iod_state
) {
318 case SMBIOD_ST_NOTCONN
:
320 case SMBIOD_ST_RECONNECT
:
328 error
= SMB_TRAN_RECV(vcp
, &m
, l
);
329 if (error
== EWOULDBLOCK
)
331 if (SMB_TRAN_FATAL(vcp
, error
)) {
339 m
= m_pullup(m
, SMB_HDRLEN
);
341 continue; /* wait for a good packet */
343 * Now we got an entire and possibly invalid SMB packet.
344 * Be careful while parsing it.
347 hp
= mtod(m
, u_char
*);
348 if (memcmp(hp
, SMB_SIGNATURE
, SMB_SIGLEN
) != 0) {
352 mid
= SMB_HDRMID(hp
);
353 SMBSDEBUG(("mid %04x\n", (u_int
)mid
));
355 SIMPLEQ_FOREACH(rqp
, &iod
->iod_rqlist
, sr_link
) {
356 if (rqp
->sr_mid
!= mid
)
359 if (rqp
->sr_rp
.md_top
== NULL
) {
360 md_initm(&rqp
->sr_rp
, m
);
362 if (rqp
->sr_flags
& SMBR_MULTIPACKET
) {
363 md_append_record(&rqp
->sr_rp
, m
);
366 SMBIODEBUG(("duplicate response %d (ignored)\n", mid
));
371 smb_iod_rqprocessed(rqp
, 0);
374 SMB_IOD_RQUNLOCK(iod
);
376 SMBIODEBUG(("drop resp with mid %d\n", (u_int
)mid
));
377 /* smb_printrqlist(vcp);*/
382 * check for interrupts
385 SIMPLEQ_FOREACH(rqp
, &iod
->iod_rqlist
, sr_link
) {
386 if (smb_proc_intr(rqp
->sr_cred
->scr_l
)) {
387 smb_iod_rqprocessed(rqp
, EINTR
);
390 SMB_IOD_RQUNLOCK(iod
);
394 smb_iod_request(struct smbiod
*iod
, int event
, void *ident
)
396 struct smbiod_event
*evp
;
400 evp
= smb_zmalloc(sizeof(*evp
), M_SMBIOD
, M_WAITOK
);
401 evp
->ev_type
= event
;
402 evp
->ev_ident
= ident
;
404 SIMPLEQ_INSERT_TAIL(&iod
->iod_evlist
, evp
, ev_link
);
405 if ((event
& SMBIOD_EV_SYNC
) == 0) {
406 SMB_IOD_EVUNLOCK(iod
);
411 mtsleep(evp
, PWAIT
| PNORELOCK
, "smbevw", 0, SMB_IOD_EVLOCKPTR(iod
));
412 error
= evp
->ev_error
;
418 * Place request in the queue.
419 * Request from smbiod have a high priority.
422 smb_iod_addrq(struct smb_rq
*rqp
)
424 struct smb_vc
*vcp
= rqp
->sr_vc
;
425 struct smbiod
*iod
= vcp
->vc_iod
;
429 if (rqp
->sr_cred
->scr_l
== iod
->iod_l
) {
430 rqp
->sr_flags
|= SMBR_INTERNAL
;
432 SIMPLEQ_INSERT_HEAD(&iod
->iod_rqlist
, rqp
, sr_link
);
433 SMB_IOD_RQUNLOCK(iod
);
435 if (smb_iod_sendrq(iod
, rqp
) != 0) {
440 * we don't need to lock state field here
442 if (rqp
->sr_state
!= SMBRQ_NOTSENT
)
444 tsleep(&iod
->iod_flags
, PWAIT
, "smbsndw", hz
);
447 smb_iod_removerq(rqp
);
448 return rqp
->sr_lerror
;
451 switch (iod
->iod_state
) {
452 case SMBIOD_ST_NOTCONN
:
455 error
= smb_iod_request(iod
, SMBIOD_EV_CONNECT
| SMBIOD_EV_SYNC
, NULL
);
459 * Return error to force the caller reissue the request
460 * using new connection state.
470 if (vcp
->vc_maxmux
== 0)
471 panic("%s: vc maxmum == 0", __func__
);
473 if (iod
->iod_muxcnt
< vcp
->vc_maxmux
)
476 /* XXX use interruptible sleep? */
477 mtsleep(&iod
->iod_muxwant
, PWAIT
, "smbmux",
478 0, SMB_IOD_RQLOCKPTR(iod
));
481 SIMPLEQ_INSERT_TAIL(&iod
->iod_rqlist
, rqp
, sr_link
);
482 SMB_IOD_RQUNLOCK(iod
);
488 smb_iod_removerq(struct smb_rq
*rqp
)
490 struct smb_vc
*vcp
= rqp
->sr_vc
;
491 struct smbiod
*iod
= vcp
->vc_iod
;
494 if (rqp
->sr_flags
& SMBR_INTERNAL
) {
496 SIMPLEQ_REMOVE(&iod
->iod_rqlist
, rqp
, smb_rq
, sr_link
);
497 SMB_IOD_RQUNLOCK(iod
);
501 while (rqp
->sr_flags
& SMBR_XLOCK
) {
502 rqp
->sr_flags
|= SMBR_XLOCKWANT
;
503 mtsleep(rqp
, PWAIT
, "smbxrm", 0, SMB_IOD_RQLOCKPTR(iod
));
505 SIMPLEQ_REMOVE(&iod
->iod_rqlist
, rqp
, smb_rq
, sr_link
);
507 if (iod
->iod_muxwant
) {
509 wakeup(&iod
->iod_muxwant
);
511 SMB_IOD_RQUNLOCK(iod
);
516 smb_iod_waitrq(struct smb_rq
*rqp
)
518 struct smbiod
*iod
= rqp
->sr_vc
->vc_iod
;
522 if (rqp
->sr_flags
& SMBR_INTERNAL
) {
524 smb_iod_sendall(iod
);
525 smb_iod_recvall(iod
);
526 if (rqp
->sr_rpgen
!= rqp
->sr_rplast
)
528 tsleep(&iod
->iod_flags
, PWAIT
, "smbirq", hz
);
530 smb_iod_removerq(rqp
);
531 return rqp
->sr_lerror
;
535 if (rqp
->sr_rpgen
== rqp
->sr_rplast
) {
536 /* XXX interruptible sleep? */
537 mtsleep(&rqp
->sr_state
, PWAIT
, "smbwrq", 0,
538 SMBRQ_SLOCKPTR(rqp
));
542 error
= rqp
->sr_lerror
;
543 if (rqp
->sr_flags
& SMBR_MULTIPACKET
) {
545 * If request should stay in the list, then reinsert it
546 * at the end of queue so other waiters have chance to concur
549 SIMPLEQ_REMOVE(&iod
->iod_rqlist
, rqp
, smb_rq
, sr_link
);
550 SIMPLEQ_INSERT_TAIL(&iod
->iod_rqlist
, rqp
, sr_link
);
551 SMB_IOD_RQUNLOCK(iod
);
553 smb_iod_removerq(rqp
);
559 smb_iod_sendall(struct smbiod
*iod
)
563 bool sentany
= false;
567 * Loop through the list of requests and send them if possible
570 SIMPLEQ_FOREACH(rqp
, &iod
->iod_rqlist
, sr_link
) {
571 if (__predict_false(rqp
->sr_state
== SMBRQ_NOTSENT
)) {
572 rqp
->sr_flags
|= SMBR_XLOCK
;
573 SMB_IOD_RQUNLOCK(iod
);
574 herror
= smb_iod_sendrq(iod
, rqp
);
576 rqp
->sr_flags
&= ~SMBR_XLOCK
;
577 if (rqp
->sr_flags
& SMBR_XLOCKWANT
) {
578 rqp
->sr_flags
&= ~SMBR_XLOCKWANT
;
582 if (__predict_false(herror
!= 0))
587 SMB_IOD_RQUNLOCK(iod
);
588 if (herror
== ENOTCONN
)
595 * "main" function for smbiod daemon
598 smb_iod_main(struct smbiod
*iod
)
601 struct smb_vc
*vcp
= iod
->iod_vc
;
602 struct timespec tsnow
;
604 struct smbiod_event
*evp
;
609 * Check all interesting events
613 evp
= SIMPLEQ_FIRST(&iod
->iod_evlist
);
615 SMB_IOD_EVUNLOCK(iod
);
618 SIMPLEQ_REMOVE_HEAD(&iod
->iod_evlist
, ev_link
);
619 evp
->ev_type
|= SMBIOD_EV_PROCESSING
;
620 SMB_IOD_EVUNLOCK(iod
);
621 switch (evp
->ev_type
& SMBIOD_EV_MASK
) {
622 case SMBIOD_EV_CONNECT
:
623 iod
->iod_state
= SMBIOD_ST_RECONNECT
;
624 evp
->ev_error
= smb_iod_connect(iod
);
626 case SMBIOD_EV_DISCONNECT
:
627 evp
->ev_error
= smb_iod_disconnect(iod
);
629 case SMBIOD_EV_TREECONNECT
:
630 evp
->ev_error
= smb_iod_treeconnect(iod
, evp
->ev_ident
);
632 case SMBIOD_EV_SHUTDOWN
:
633 iod
->iod_flags
|= SMBIOD_SHUTDOWN
;
635 case SMBIOD_EV_NEWRQ
:
638 if (evp
->ev_type
& SMBIOD_EV_SYNC
) {
641 SMB_IOD_EVUNLOCK(iod
);
646 if (iod
->iod_state
== SMBIOD_ST_VCACTIVE
) {
648 timespecsub(&tsnow
, &iod
->iod_pingtimo
);
649 if (timespeccmp(&tsnow
, &iod
->iod_lastrqsent
, >)) {
650 smb_smb_echo(vcp
, &iod
->iod_scred
);
656 * Do a send/receive cycle once and then as many times
657 * afterwards as we can send out new data. This is to make
658 * sure we got all data sent which might have ended up in the
659 * queue during the receive phase (which might block releasing
662 smb_iod_sendall(iod
);
663 smb_iod_recvall(iod
);
664 while (smb_iod_sendall(iod
)) {
665 smb_iod_recvall(iod
);
670 smb_iod_thread(void *arg
)
672 struct smbiod
*iod
= arg
;
676 * Here we assume that the thread structure will be the same
677 * for an entire kthread (kproc, to be more precise) life.
679 KASSERT(iod
->iod_l
== curlwp
);
680 smb_makescred(&iod
->iod_scred
, iod
->iod_l
, NULL
);
682 while ((iod
->iod_flags
& SMBIOD_SHUTDOWN
) == 0) {
684 if (iod
->iod_flags
& SMBIOD_SHUTDOWN
)
686 SMBIODEBUG(("going to sleep\n"));
688 * technically wakeup every hz is unnecessary, but keep
689 * this here until smb has been made mpsafe.
691 tsleep(&iod
->iod_flags
, PSOCK
, "smbidle", hz
);
698 smb_iod_create(struct smb_vc
*vcp
)
703 iod
= smb_zmalloc(sizeof(*iod
), M_SMBIOD
, M_WAITOK
);
704 iod
->iod_id
= smb_iod_next
++;
705 iod
->iod_state
= SMBIOD_ST_NOTCONN
;
708 iod
->iod_pingtimo
.tv_sec
= SMBIOD_PING_TIMO
;
709 microtime(&iod
->iod_lastrqsent
);
712 smb_sl_init(&iod
->iod_rqlock
, "smbrql");
713 SIMPLEQ_INIT(&iod
->iod_rqlist
);
714 smb_sl_init(&iod
->iod_evlock
, "smbevl");
715 SIMPLEQ_INIT(&iod
->iod_evlist
);
717 error
= kthread_create(PRI_NONE
, 0, NULL
, smb_iod_thread
, iod
,
718 &iod
->iod_l
, "smbiod%d", iod
->iod_id
);
720 error
= kthread_create(smb_iod_thread
, iod
, &iod
->iod_p
,
721 RFNOWAIT
, "smbiod%d", iod
->iod_id
);
724 SMBIODEBUG(("can't start smbiod: %d", error
));
732 smb_iod_destroy(struct smbiod
*iod
)
734 smb_iod_request(iod
, SMBIOD_EV_SHUTDOWN
| SMBIOD_EV_SYNC
, NULL
);
735 smb_sl_destroy(&iod
->iod_rqlock
);
736 smb_sl_destroy(&iod
->iod_evlock
);