No empty .Rs/.Re
[netbsd-mini2440.git] / sys / netsmb / smb_iod.c
blobe14920eb18d50e234806f18b2b37e7de3e492353
1 /* $NetBSD: smb_iod.c,v 1.34 2009/09/04 16:16:52 pooka Exp $ */
3 /*
4 * Copyright (c) 2000-2001 Boris Popov
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Boris Popov.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * FreeBSD: src/sys/netsmb/smb_iod.c,v 1.4 2001/12/09 17:48:08 arr Exp
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: smb_iod.c,v 1.34 2009/09/04 16:16:52 pooka Exp $");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/unistd.h>
49 #include <netsmb/smb.h>
50 #include <netsmb/smb_conn.h>
51 #include <netsmb/smb_rq.h>
52 #include <netsmb/smb_tran.h>
53 #include <netsmb/smb_trantcp.h>
55 #define SMB_IOD_EVLOCKPTR(iod) (&((iod)->iod_evlock))
56 #define SMB_IOD_EVLOCK(iod) smb_sl_lock(&((iod)->iod_evlock))
57 #define SMB_IOD_EVUNLOCK(iod) smb_sl_unlock(&((iod)->iod_evlock))
59 #define SMB_IOD_RQLOCKPTR(iod) (&((iod)->iod_rqlock))
60 #define SMB_IOD_RQLOCK(iod) smb_sl_lock(&((iod)->iod_rqlock))
61 #define SMB_IOD_RQUNLOCK(iod) smb_sl_unlock(&((iod)->iod_rqlock))
63 #define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags)
65 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
67 static int smb_iod_next;
69 static bool smb_iod_sendall(struct smbiod *iod);
70 static int smb_iod_disconnect(struct smbiod *iod);
71 static void smb_iod_thread(void *);
73 static void
74 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
76 SMBRQ_SLOCK(rqp);
77 rqp->sr_lerror = error;
78 rqp->sr_rpgen++;
79 rqp->sr_state = SMBRQ_NOTIFIED;
80 wakeup(&rqp->sr_state);
81 if (rqp->sr_timo > 0)
82 callout_stop(&rqp->sr_timo_ch);
83 if (rqp->sr_recvcallback)
84 (*rqp->sr_recvcallback)(rqp->sr_recvarg);
85 SMBRQ_SUNLOCK(rqp);
88 static void
89 smb_iod_rqtimedout(void *arg)
91 smb_iod_rqprocessed((struct smb_rq *)arg, ETIMEDOUT);
94 static void
95 smb_iod_invrq(struct smbiod *iod)
97 struct smb_rq *rqp;
100 * Invalidate all outstanding requests for this connection
102 SMB_IOD_RQLOCK(iod);
103 SIMPLEQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
104 if (rqp->sr_flags & SMBR_INTERNAL)
105 SMBRQ_SUNLOCK(rqp);
106 rqp->sr_flags |= SMBR_RESTART;
107 smb_iod_rqprocessed(rqp, ENOTCONN);
109 SMB_IOD_RQUNLOCK(iod);
112 static void
113 smb_iod_closetran(struct smbiod *iod)
115 struct smb_vc *vcp = iod->iod_vc;
116 struct lwp *l = iod->iod_l;
118 if (vcp->vc_tdata == NULL)
119 return;
120 SMB_TRAN_DISCONNECT(vcp, l);
121 SMB_TRAN_DONE(vcp, l);
122 vcp->vc_tdata = NULL;
125 static void
126 smb_iod_dead(struct smbiod *iod)
128 iod->iod_state = SMBIOD_ST_DEAD;
129 smb_iod_closetran(iod);
130 smb_iod_invrq(iod);
133 static int
134 smb_iod_connect(struct smbiod *iod)
136 struct smb_vc *vcp = iod->iod_vc;
137 struct lwp *l = iod->iod_l;
138 int error;
140 SMBIODEBUG(("%d\n", iod->iod_state));
141 switch(iod->iod_state) {
142 case SMBIOD_ST_VCACTIVE:
143 SMBIODEBUG(("called for already opened connection\n"));
144 return EISCONN;
145 case SMBIOD_ST_DEAD:
146 return ENOTCONN; /* XXX: last error code ? */
147 default:
148 break;
150 vcp->vc_genid++;
152 #define ithrow(cmd) \
153 if ((error = cmd)) \
154 goto fail
156 ithrow(SMB_TRAN_CREATE(vcp, l));
157 SMBIODEBUG(("tcreate\n"));
158 if (vcp->vc_laddr) {
159 ithrow(SMB_TRAN_BIND(vcp, vcp->vc_laddr, l));
161 SMBIODEBUG(("tbind\n"));
162 ithrow(SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, l));
163 SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
164 iod->iod_state = SMBIOD_ST_TRANACTIVE;
165 SMBIODEBUG(("tconnect\n"));
166 /* vcp->vc_mid = 0;*/
167 ithrow(smb_smb_negotiate(vcp, &iod->iod_scred));
168 SMBIODEBUG(("snegotiate\n"));
169 ithrow(smb_smb_ssnsetup(vcp, &iod->iod_scred));
170 iod->iod_state = SMBIOD_ST_VCACTIVE;
172 #undef ithrow
174 SMBIODEBUG(("completed\n"));
175 smb_iod_invrq(iod);
177 return (0);
179 fail:
180 smb_iod_dead(iod);
181 return (error);
184 static int
185 smb_iod_disconnect(struct smbiod *iod)
187 struct smb_vc *vcp = iod->iod_vc;
189 SMBIODEBUG(("\n"));
190 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
191 smb_smb_ssnclose(vcp, &iod->iod_scred);
192 iod->iod_state = SMBIOD_ST_TRANACTIVE;
194 vcp->vc_smbuid = SMB_UID_UNKNOWN;
195 smb_iod_closetran(iod);
196 iod->iod_state = SMBIOD_ST_NOTCONN;
197 return 0;
200 static int
201 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
203 int error;
205 if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
206 if (iod->iod_state != SMBIOD_ST_DEAD)
207 return ENOTCONN;
208 iod->iod_state = SMBIOD_ST_RECONNECT;
209 error = smb_iod_connect(iod);
210 if (error)
211 return error;
213 SMBIODEBUG(("tree reconnect\n"));
214 SMBS_ST_LOCK(ssp);
215 ssp->ss_flags |= SMBS_RECONNECTING;
216 SMBS_ST_UNLOCK(ssp);
217 error = smb_smb_treeconnect(ssp, &iod->iod_scred);
218 SMBS_ST_LOCK(ssp);
219 ssp->ss_flags &= ~SMBS_RECONNECTING;
220 SMBS_ST_UNLOCK(ssp);
221 wakeup(&ssp->ss_vcgenid);
222 return error;
225 static int
226 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
228 struct lwp *l = iod->iod_l;
229 struct smb_vc *vcp = iod->iod_vc;
230 struct smb_share *ssp = rqp->sr_share;
231 struct mbuf *m;
232 int error;
234 SMBIODEBUG(("iod_state = %d, rqmid %d\n", iod->iod_state, rqp->sr_mid));
235 switch (iod->iod_state) {
236 case SMBIOD_ST_NOTCONN:
237 smb_iod_rqprocessed(rqp, ENOTCONN);
238 return 0;
239 case SMBIOD_ST_DEAD:
240 iod->iod_state = SMBIOD_ST_RECONNECT;
241 return 0;
242 case SMBIOD_ST_RECONNECT:
243 return 0;
244 default:
245 break;
247 if (rqp->sr_sendcnt == 0) {
248 u_int16_t tid = ssp ? ssp->ss_tid : SMB_TID_UNKNOWN;
249 u_int16_t rquid = vcp ? vcp->vc_smbuid : 0;
250 #ifdef movedtoanotherplace
251 if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
252 return 0;
253 #endif
254 SMBRQ_PUTLE16(rqp->sr_rqtid, tid);
255 SMBRQ_PUTLE16(rqp->sr_rquid, rquid);
256 mb_fixhdr(&rqp->sr_rq);
258 if (rqp->sr_sendcnt++ > 5) {
259 rqp->sr_flags |= SMBR_RESTART;
260 smb_iod_rqprocessed(rqp, rqp->sr_lerror);
262 * If all attempts to send a request failed, then
263 * something is seriously hosed.
265 return ENOTCONN;
267 SMBSDEBUG(("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0));
268 m_dumpm(rqp->sr_rq.mb_top);
269 m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAIT);
270 error = rqp->sr_lerror = (m) ? SMB_TRAN_SEND(vcp, m, l) : ENOBUFS;
271 if (error == 0) {
272 if (rqp->sr_timo > 0)
273 callout_reset(&rqp->sr_timo_ch, rqp->sr_timo,
274 smb_iod_rqtimedout, rqp);
276 if (rqp->sr_flags & SMBR_NOWAIT) {
277 /* caller doesn't want to wait, flag as processed */
278 smb_iod_rqprocessed(rqp, 0);
279 return (0);
282 #if 0
283 iod->iod_lastrqsent = ts;
284 #endif
285 rqp->sr_flags |= SMBR_SENT;
286 rqp->sr_state = SMBRQ_SENT;
287 return 0;
290 * Check for fatal errors
292 if (vcp && SMB_TRAN_FATAL(vcp, error)) {
294 * No further attempts should be made
296 return ENOTCONN;
298 if (smb_rq_intr(rqp))
299 smb_iod_rqprocessed(rqp, EINTR);
300 return 0;
304 * Process incoming packets
306 static void
307 smb_iod_recvall(struct smbiod *iod)
309 struct smb_vc *vcp = iod->iod_vc;
310 struct lwp *l = iod->iod_l;
311 struct smb_rq *rqp;
312 struct mbuf *m;
313 u_char *hp;
314 u_short mid;
315 int error;
317 switch (iod->iod_state) {
318 case SMBIOD_ST_NOTCONN:
319 case SMBIOD_ST_DEAD:
320 case SMBIOD_ST_RECONNECT:
321 return;
322 default:
323 break;
326 for (;;) {
327 m = NULL;
328 error = SMB_TRAN_RECV(vcp, &m, l);
329 if (error == EWOULDBLOCK)
330 break;
331 if (SMB_TRAN_FATAL(vcp, error)) {
332 smb_iod_dead(iod);
333 break;
335 if (error)
336 break;
337 KASSERT(m != NULL);
339 m = m_pullup(m, SMB_HDRLEN);
340 if (m == NULL)
341 continue; /* wait for a good packet */
343 * Now we got an entire and possibly invalid SMB packet.
344 * Be careful while parsing it.
346 m_dumpm(m);
347 hp = mtod(m, u_char*);
348 if (memcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
349 m_freem(m);
350 continue;
352 mid = SMB_HDRMID(hp);
353 SMBSDEBUG(("mid %04x\n", (u_int)mid));
354 SMB_IOD_RQLOCK(iod);
355 SIMPLEQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
356 if (rqp->sr_mid != mid)
357 continue;
358 SMBRQ_SLOCK(rqp);
359 if (rqp->sr_rp.md_top == NULL) {
360 md_initm(&rqp->sr_rp, m);
361 } else {
362 if (rqp->sr_flags & SMBR_MULTIPACKET) {
363 md_append_record(&rqp->sr_rp, m);
364 } else {
365 SMBRQ_SUNLOCK(rqp);
366 SMBIODEBUG(("duplicate response %d (ignored)\n", mid));
367 break;
370 SMBRQ_SUNLOCK(rqp);
371 smb_iod_rqprocessed(rqp, 0);
372 break;
374 SMB_IOD_RQUNLOCK(iod);
375 if (rqp == NULL) {
376 SMBIODEBUG(("drop resp with mid %d\n", (u_int)mid));
377 /* smb_printrqlist(vcp);*/
378 m_freem(m);
382 * check for interrupts
384 SMB_IOD_RQLOCK(iod);
385 SIMPLEQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
386 if (smb_proc_intr(rqp->sr_cred->scr_l)) {
387 smb_iod_rqprocessed(rqp, EINTR);
390 SMB_IOD_RQUNLOCK(iod);
394 smb_iod_request(struct smbiod *iod, int event, void *ident)
396 struct smbiod_event *evp;
397 int error;
399 SMBIODEBUG(("\n"));
400 evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
401 evp->ev_type = event;
402 evp->ev_ident = ident;
403 SMB_IOD_EVLOCK(iod);
404 SIMPLEQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
405 if ((event & SMBIOD_EV_SYNC) == 0) {
406 SMB_IOD_EVUNLOCK(iod);
407 smb_iod_wakeup(iod);
408 return 0;
410 smb_iod_wakeup(iod);
411 mtsleep(evp, PWAIT | PNORELOCK, "smbevw", 0, SMB_IOD_EVLOCKPTR(iod));
412 error = evp->ev_error;
413 free(evp, M_SMBIOD);
414 return error;
418 * Place request in the queue.
419 * Request from smbiod have a high priority.
422 smb_iod_addrq(struct smb_rq *rqp)
424 struct smb_vc *vcp = rqp->sr_vc;
425 struct smbiod *iod = vcp->vc_iod;
426 int error;
428 SMBIODEBUG(("\n"));
429 if (rqp->sr_cred->scr_l == iod->iod_l) {
430 rqp->sr_flags |= SMBR_INTERNAL;
431 SMB_IOD_RQLOCK(iod);
432 SIMPLEQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
433 SMB_IOD_RQUNLOCK(iod);
434 for (;;) {
435 if (smb_iod_sendrq(iod, rqp) != 0) {
436 smb_iod_dead(iod);
437 break;
440 * we don't need to lock state field here
442 if (rqp->sr_state != SMBRQ_NOTSENT)
443 break;
444 tsleep(&iod->iod_flags, PWAIT, "smbsndw", hz);
446 if (rqp->sr_lerror)
447 smb_iod_removerq(rqp);
448 return rqp->sr_lerror;
451 switch (iod->iod_state) {
452 case SMBIOD_ST_NOTCONN:
453 return ENOTCONN;
454 case SMBIOD_ST_DEAD:
455 error = smb_iod_request(iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
456 if (error)
457 return error;
459 * Return error to force the caller reissue the request
460 * using new connection state.
462 return EXDEV;
463 default:
464 break;
467 SMB_IOD_RQLOCK(iod);
468 for (;;) {
469 #ifdef DIAGNOSTIC
470 if (vcp->vc_maxmux == 0)
471 panic("%s: vc maxmum == 0", __func__);
472 #endif
473 if (iod->iod_muxcnt < vcp->vc_maxmux)
474 break;
475 iod->iod_muxwant++;
476 /* XXX use interruptible sleep? */
477 mtsleep(&iod->iod_muxwant, PWAIT, "smbmux",
478 0, SMB_IOD_RQLOCKPTR(iod));
480 iod->iod_muxcnt++;
481 SIMPLEQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
482 SMB_IOD_RQUNLOCK(iod);
483 smb_iod_wakeup(iod);
484 return 0;
488 smb_iod_removerq(struct smb_rq *rqp)
490 struct smb_vc *vcp = rqp->sr_vc;
491 struct smbiod *iod = vcp->vc_iod;
493 SMBIODEBUG(("\n"));
494 if (rqp->sr_flags & SMBR_INTERNAL) {
495 SMB_IOD_RQLOCK(iod);
496 SIMPLEQ_REMOVE(&iod->iod_rqlist, rqp, smb_rq, sr_link);
497 SMB_IOD_RQUNLOCK(iod);
498 return 0;
500 SMB_IOD_RQLOCK(iod);
501 while (rqp->sr_flags & SMBR_XLOCK) {
502 rqp->sr_flags |= SMBR_XLOCKWANT;
503 mtsleep(rqp, PWAIT, "smbxrm", 0, SMB_IOD_RQLOCKPTR(iod));
505 SIMPLEQ_REMOVE(&iod->iod_rqlist, rqp, smb_rq, sr_link);
506 iod->iod_muxcnt--;
507 if (iod->iod_muxwant) {
508 iod->iod_muxwant--;
509 wakeup(&iod->iod_muxwant);
511 SMB_IOD_RQUNLOCK(iod);
512 return 0;
516 smb_iod_waitrq(struct smb_rq *rqp)
518 struct smbiod *iod = rqp->sr_vc->vc_iod;
519 int error;
521 SMBIODEBUG(("\n"));
522 if (rqp->sr_flags & SMBR_INTERNAL) {
523 for (;;) {
524 smb_iod_sendall(iod);
525 smb_iod_recvall(iod);
526 if (rqp->sr_rpgen != rqp->sr_rplast)
527 break;
528 tsleep(&iod->iod_flags, PWAIT, "smbirq", hz);
530 smb_iod_removerq(rqp);
531 return rqp->sr_lerror;
534 SMBRQ_SLOCK(rqp);
535 if (rqp->sr_rpgen == rqp->sr_rplast) {
536 /* XXX interruptible sleep? */
537 mtsleep(&rqp->sr_state, PWAIT, "smbwrq", 0,
538 SMBRQ_SLOCKPTR(rqp));
540 rqp->sr_rplast++;
541 SMBRQ_SUNLOCK(rqp);
542 error = rqp->sr_lerror;
543 if (rqp->sr_flags & SMBR_MULTIPACKET) {
545 * If request should stay in the list, then reinsert it
546 * at the end of queue so other waiters have chance to concur
548 SMB_IOD_RQLOCK(iod);
549 SIMPLEQ_REMOVE(&iod->iod_rqlist, rqp, smb_rq, sr_link);
550 SIMPLEQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
551 SMB_IOD_RQUNLOCK(iod);
552 } else
553 smb_iod_removerq(rqp);
554 return error;
558 static bool
559 smb_iod_sendall(struct smbiod *iod)
561 struct smb_rq *rqp;
562 int herror;
563 bool sentany = false;
565 herror = 0;
567 * Loop through the list of requests and send them if possible
569 SMB_IOD_RQLOCK(iod);
570 SIMPLEQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
571 if (__predict_false(rqp->sr_state == SMBRQ_NOTSENT)) {
572 rqp->sr_flags |= SMBR_XLOCK;
573 SMB_IOD_RQUNLOCK(iod);
574 herror = smb_iod_sendrq(iod, rqp);
575 SMB_IOD_RQLOCK(iod);
576 rqp->sr_flags &= ~SMBR_XLOCK;
577 if (rqp->sr_flags & SMBR_XLOCKWANT) {
578 rqp->sr_flags &= ~SMBR_XLOCKWANT;
579 wakeup(rqp);
582 if (__predict_false(herror != 0))
583 break;
584 sentany = true;
587 SMB_IOD_RQUNLOCK(iod);
588 if (herror == ENOTCONN)
589 smb_iod_dead(iod);
591 return sentany;
595 * "main" function for smbiod daemon
597 static inline void
598 smb_iod_main(struct smbiod *iod)
600 #if 0
601 struct smb_vc *vcp = iod->iod_vc;
602 struct timespec tsnow;
603 #endif
604 struct smbiod_event *evp;
606 SMBIODEBUG(("\n"));
609 * Check all interesting events
611 for (;;) {
612 SMB_IOD_EVLOCK(iod);
613 evp = SIMPLEQ_FIRST(&iod->iod_evlist);
614 if (evp == NULL) {
615 SMB_IOD_EVUNLOCK(iod);
616 break;
618 SIMPLEQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
619 evp->ev_type |= SMBIOD_EV_PROCESSING;
620 SMB_IOD_EVUNLOCK(iod);
621 switch (evp->ev_type & SMBIOD_EV_MASK) {
622 case SMBIOD_EV_CONNECT:
623 iod->iod_state = SMBIOD_ST_RECONNECT;
624 evp->ev_error = smb_iod_connect(iod);
625 break;
626 case SMBIOD_EV_DISCONNECT:
627 evp->ev_error = smb_iod_disconnect(iod);
628 break;
629 case SMBIOD_EV_TREECONNECT:
630 evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
631 break;
632 case SMBIOD_EV_SHUTDOWN:
633 iod->iod_flags |= SMBIOD_SHUTDOWN;
634 break;
635 case SMBIOD_EV_NEWRQ:
636 break;
638 if (evp->ev_type & SMBIOD_EV_SYNC) {
639 SMB_IOD_EVLOCK(iod);
640 wakeup(evp);
641 SMB_IOD_EVUNLOCK(iod);
642 } else
643 free(evp, M_SMBIOD);
645 #if 0
646 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
647 getnanotime(&tsnow);
648 timespecsub(&tsnow, &iod->iod_pingtimo);
649 if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
650 smb_smb_echo(vcp, &iod->iod_scred);
653 #endif
656 * Do a send/receive cycle once and then as many times
657 * afterwards as we can send out new data. This is to make
658 * sure we got all data sent which might have ended up in the
659 * queue during the receive phase (which might block releasing
660 * the kernel lock).
662 smb_iod_sendall(iod);
663 smb_iod_recvall(iod);
664 while (smb_iod_sendall(iod)) {
665 smb_iod_recvall(iod);
669 void
670 smb_iod_thread(void *arg)
672 struct smbiod *iod = arg;
673 int s;
676 * Here we assume that the thread structure will be the same
677 * for an entire kthread (kproc, to be more precise) life.
679 KASSERT(iod->iod_l == curlwp);
680 smb_makescred(&iod->iod_scred, iod->iod_l, NULL);
681 s = splnet();
682 while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
683 smb_iod_main(iod);
684 if (iod->iod_flags & SMBIOD_SHUTDOWN)
685 break;
686 SMBIODEBUG(("going to sleep\n"));
688 * technically wakeup every hz is unnecessary, but keep
689 * this here until smb has been made mpsafe.
691 tsleep(&iod->iod_flags, PSOCK, "smbidle", hz);
693 splx(s);
694 kthread_exit(0);
698 smb_iod_create(struct smb_vc *vcp)
700 struct smbiod *iod;
701 int error;
703 iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
704 iod->iod_id = smb_iod_next++;
705 iod->iod_state = SMBIOD_ST_NOTCONN;
706 iod->iod_vc = vcp;
707 #if 0
708 iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
709 microtime(&iod->iod_lastrqsent);
710 #endif
711 vcp->vc_iod = iod;
712 smb_sl_init(&iod->iod_rqlock, "smbrql");
713 SIMPLEQ_INIT(&iod->iod_rqlist);
714 smb_sl_init(&iod->iod_evlock, "smbevl");
715 SIMPLEQ_INIT(&iod->iod_evlist);
716 #ifdef __NetBSD__
717 error = kthread_create(PRI_NONE, 0, NULL, smb_iod_thread, iod,
718 &iod->iod_l, "smbiod%d", iod->iod_id);
719 #else
720 error = kthread_create(smb_iod_thread, iod, &iod->iod_p,
721 RFNOWAIT, "smbiod%d", iod->iod_id);
722 #endif
723 if (error) {
724 SMBIODEBUG(("can't start smbiod: %d", error));
725 free(iod, M_SMBIOD);
726 return error;
728 return 0;
732 smb_iod_destroy(struct smbiod *iod)
734 smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
735 smb_sl_destroy(&iod->iod_rqlock);
736 smb_sl_destroy(&iod->iod_evlock);
737 free(iod, M_SMBIOD);
738 return 0;
742 smb_iod_init(void)
744 return 0;
747 #ifndef __NetBSD__
749 smb_iod_done(void)
751 return 0;
753 #endif