Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / fs / smbclnt / netsmb / smb_rq.c
blobb7e8d5d739b4756750c8356ade6f604ec499a6d1
1 /*
2 * Copyright (c) 2000-2001, Boris Popov
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * $Id: smb_rq.c,v 1.29 2005/02/11 01:44:17 lindak Exp $
36 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/time.h>
42 #include <sys/kmem.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/socket.h>
46 #include <sys/mount.h>
47 #include <sys/sunddi.h>
48 #include <sys/cmn_err.h>
49 #include <sys/sdt.h>
51 #include <netsmb/smb_osdep.h>
53 #include <netsmb/smb.h>
54 #include <netsmb/smb_conn.h>
55 #include <netsmb/smb_subr.h>
56 #include <netsmb/smb_tran.h>
57 #include <netsmb/smb_rq.h>
60 * How long to wait before restarting a request (after reconnect)
62 #define SMB_RCNDELAY 2 /* seconds */
65 * leave this zero - we can't ssecond guess server side effects of
66 * duplicate ops, this isn't nfs!
68 #define SMBMAXRESTARTS 0
71 static int smb_rq_reply(struct smb_rq *rqp);
72 static int smb_rq_enqueue(struct smb_rq *rqp);
73 static int smb_rq_getenv(struct smb_connobj *layer,
74 struct smb_vc **vcpp, struct smb_share **sspp);
75 static int smb_rq_new(struct smb_rq *rqp, uchar_t cmd);
76 static int smb_t2_reply(struct smb_t2rq *t2p);
77 static int smb_nt_reply(struct smb_ntrq *ntp);
81 * Done with a request object. Free its contents.
82 * If it was allocated (SMBR_ALLOCED) free it too.
83 * Some of these are stack locals, not allocated.
85 * No locks here - this is the last ref.
87 void
88 smb_rq_done(struct smb_rq *rqp)
92 * No smb_vc_rele() here - see smb_rq_init()
94 mb_done(&rqp->sr_rq);
95 md_done(&rqp->sr_rp);
96 mutex_destroy(&rqp->sr_lock);
97 cv_destroy(&rqp->sr_cond);
98 if (rqp->sr_flags & SMBR_ALLOCED)
99 kmem_free(rqp, sizeof (*rqp));
103 smb_rq_alloc(struct smb_connobj *layer, uchar_t cmd, struct smb_cred *scred,
104 struct smb_rq **rqpp)
106 struct smb_rq *rqp;
107 int error;
109 rqp = kmem_alloc(sizeof (struct smb_rq), KM_SLEEP);
110 if (rqp == NULL)
111 return (ENOMEM);
112 error = smb_rq_init(rqp, layer, cmd, scred);
113 if (error) {
114 smb_rq_done(rqp);
115 return (error);
117 rqp->sr_flags |= SMBR_ALLOCED;
118 *rqpp = rqp;
119 return (0);
123 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *co, uchar_t cmd,
124 struct smb_cred *scred)
126 int error;
128 bzero(rqp, sizeof (*rqp));
129 mutex_init(&rqp->sr_lock, NULL, MUTEX_DRIVER, NULL);
130 cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL);
132 error = smb_rq_getenv(co, &rqp->sr_vc, &rqp->sr_share);
133 if (error)
134 return (error);
137 * We copied a VC pointer (vcp) into rqp->sr_vc,
138 * but we do NOT do a smb_vc_hold here. Instead,
139 * the caller is responsible for the hold on the
140 * share or the VC as needed. For smbfs callers,
141 * the hold is on the share, via the smbfs mount.
142 * For nsmb ioctl callers, the hold is done when
143 * the driver handle gets VC or share references.
144 * This design avoids frequent hold/rele activity
145 * when creating and completing requests.
148 rqp->sr_rexmit = SMBMAXRESTARTS;
149 rqp->sr_cred = scred; /* Note: ref hold done by caller. */
150 rqp->sr_pid = (uint16_t)ddi_get_pid();
151 error = smb_rq_new(rqp, cmd);
153 return (error);
156 static int
157 smb_rq_new(struct smb_rq *rqp, uchar_t cmd)
159 struct mbchain *mbp = &rqp->sr_rq;
160 struct smb_vc *vcp = rqp->sr_vc;
161 int error;
163 ASSERT(rqp != NULL);
165 rqp->sr_sendcnt = 0;
166 rqp->sr_cmd = cmd;
168 mb_done(mbp);
169 md_done(&rqp->sr_rp);
170 error = mb_init(mbp);
171 if (error)
172 return (error);
175 * Is this the right place to save the flags?
177 rqp->sr_rqflags = vcp->vc_hflags;
178 rqp->sr_rqflags2 = vcp->vc_hflags2;
181 * The SMB header is filled in later by
182 * smb_rq_fillhdr (see below)
183 * Just reserve space here.
185 mb_put_mem(mbp, NULL, SMB_HDRLEN, MB_MZERO);
187 return (0);
191 * Given a request with it's body already composed,
192 * rewind to the start and fill in the SMB header.
193 * This is called after the request is enqueued,
194 * so we have the final MID, seq num. etc.
196 void
197 smb_rq_fillhdr(struct smb_rq *rqp)
199 struct mbchain mbtmp, *mbp = &mbtmp;
200 mblk_t *m;
203 * Fill in the SMB header using a dup of the first mblk,
204 * which points at the same data but has its own wptr,
205 * so we can rewind without trashing the message.
207 m = dupb(rqp->sr_rq.mb_top);
208 m->b_wptr = m->b_rptr; /* rewind */
209 mb_initm(mbp, m);
211 mb_put_mem(mbp, SMB_SIGNATURE, 4, MB_MSYSTEM);
212 mb_put_uint8(mbp, rqp->sr_cmd);
213 mb_put_uint32le(mbp, 0); /* status */
214 mb_put_uint8(mbp, rqp->sr_rqflags);
215 mb_put_uint16le(mbp, rqp->sr_rqflags2);
216 mb_put_uint16le(mbp, 0); /* pid-high */
217 mb_put_mem(mbp, NULL, 8, MB_MZERO); /* MAC sig. (later) */
218 mb_put_uint16le(mbp, 0); /* reserved */
219 mb_put_uint16le(mbp, rqp->sr_rqtid);
220 mb_put_uint16le(mbp, rqp->sr_pid);
221 mb_put_uint16le(mbp, rqp->sr_rquid);
222 mb_put_uint16le(mbp, rqp->sr_mid);
224 /* This will free the mblk from dupb. */
225 mb_done(mbp);
229 smb_rq_simple(struct smb_rq *rqp)
231 return (smb_rq_simple_timed(rqp, smb_timo_default));
235 * Simple request-reply exchange
238 smb_rq_simple_timed(struct smb_rq *rqp, int timeout)
240 int error = EINVAL;
242 for (; ; ) {
244 * Don't send any new requests if force unmount is underway.
245 * This check was moved into smb_rq_enqueue.
247 rqp->sr_flags &= ~SMBR_RESTART;
248 rqp->sr_timo = timeout; /* in seconds */
249 rqp->sr_state = SMBRQ_NOTSENT;
250 error = smb_rq_enqueue(rqp);
251 if (error) {
252 break;
254 error = smb_rq_reply(rqp);
255 if (!error)
256 break;
257 if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) !=
258 SMBR_RESTART)
259 break;
260 if (rqp->sr_rexmit <= 0)
261 break;
262 SMBRQ_LOCK(rqp);
263 if (rqp->sr_share) {
264 (void) cv_reltimedwait(&rqp->sr_cond, &(rqp)->sr_lock,
265 SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
267 } else {
268 ddi_sleep(SMB_RCNDELAY);
270 SMBRQ_UNLOCK(rqp);
271 rqp->sr_rexmit--;
273 return (error);
277 static int
278 smb_rq_enqueue(struct smb_rq *rqp)
280 struct smb_vc *vcp = rqp->sr_vc;
281 struct smb_share *ssp = rqp->sr_share;
282 int error = 0;
285 * Normal requests may initiate a reconnect,
286 * and/or wait for state changes to finish.
287 * Some requests set the NORECONNECT flag
288 * to avoid all that (i.e. tree discon)
290 if (rqp->sr_flags & SMBR_NORECONNECT) {
291 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
292 SMBSDEBUG("bad vc_state=%d\n", vcp->vc_state);
293 return (ENOTCONN);
295 if (ssp != NULL &&
296 ((ssp->ss_flags & SMBS_CONNECTED) == 0))
297 return (ENOTCONN);
298 goto ok_out;
302 * If we're not connected, initiate a reconnect
303 * and/or wait for an existing one to finish.
305 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
306 error = smb_iod_reconnect(vcp);
307 if (error != 0)
308 return (error);
312 * If this request has a "share" object
313 * that needs a tree connect, do it now.
315 if (ssp != NULL && (ssp->ss_flags & SMBS_CONNECTED) == 0) {
316 error = smb_share_tcon(ssp, rqp->sr_cred);
317 if (error)
318 return (error);
322 * We now know what UID + TID to use.
323 * Store them in the request.
325 ok_out:
326 rqp->sr_rquid = vcp->vc_smbuid;
327 rqp->sr_rqtid = ssp ? ssp->ss_tid : SMB_TID_UNKNOWN;
328 error = smb_iod_addrq(rqp);
330 return (error);
334 * Mark location of the word count, which is filled in later by
335 * smb_rw_wend(). Also initialize the counter that it uses
336 * to figure out what value to fill in.
338 * Note that the word count happens to be 8-bit.
340 void
341 smb_rq_wstart(struct smb_rq *rqp)
343 rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof (uint8_t));
344 rqp->sr_rq.mb_count = 0;
347 void
348 smb_rq_wend(struct smb_rq *rqp)
350 uint_t wcnt;
352 if (rqp->sr_wcount == NULL) {
353 SMBSDEBUG("no wcount\n");
354 return;
356 wcnt = rqp->sr_rq.mb_count;
357 if (wcnt > 0x1ff)
358 SMBSDEBUG("word count too large (%d)\n", wcnt);
359 if (wcnt & 1)
360 SMBSDEBUG("odd word count\n");
361 /* Fill in the word count (8-bits) */
362 *rqp->sr_wcount = (wcnt >> 1);
366 * Mark location of the byte count, which is filled in later by
367 * smb_rw_bend(). Also initialize the counter that it uses
368 * to figure out what value to fill in.
370 * Note that the byte count happens to be 16-bit.
372 void
373 smb_rq_bstart(struct smb_rq *rqp)
375 rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof (uint16_t));
376 rqp->sr_rq.mb_count = 0;
379 void
380 smb_rq_bend(struct smb_rq *rqp)
382 uint_t bcnt;
384 if (rqp->sr_bcount == NULL) {
385 SMBSDEBUG("no bcount\n");
386 return;
388 bcnt = rqp->sr_rq.mb_count;
389 if (bcnt > 0xffff)
390 SMBSDEBUG("byte count too large (%d)\n", bcnt);
392 * Fill in the byte count (16-bits)
393 * The pointer is char * type due to
394 * typical off-by-one alignment.
396 rqp->sr_bcount[0] = bcnt & 0xFF;
397 rqp->sr_bcount[1] = (bcnt >> 8);
401 smb_rq_intr(struct smb_rq *rqp)
403 if (rqp->sr_flags & SMBR_INTR)
404 return (EINTR);
406 return (0);
409 static int
410 smb_rq_getenv(struct smb_connobj *co,
411 struct smb_vc **vcpp, struct smb_share **sspp)
413 struct smb_vc *vcp = NULL;
414 struct smb_share *ssp = NULL;
415 int error = EINVAL;
417 if (co->co_flags & SMBO_GONE) {
418 SMBSDEBUG("zombie CO\n");
419 error = EINVAL;
420 goto out;
423 switch (co->co_level) {
424 case SMBL_SHARE:
425 ssp = CPTOSS(co);
426 if ((co->co_flags & SMBO_GONE) ||
427 co->co_parent == NULL) {
428 SMBSDEBUG("zombie share %s\n", ssp->ss_name);
429 break;
431 /* instead of recursion... */
432 co = co->co_parent;
433 /* FALLTHROUGH */
434 case SMBL_VC:
435 vcp = CPTOVC(co);
436 if ((co->co_flags & SMBO_GONE) ||
437 co->co_parent == NULL) {
438 SMBSDEBUG("zombie VC %s\n", vcp->vc_srvname);
439 break;
441 error = 0;
442 break;
444 default:
445 SMBSDEBUG("invalid level %d passed\n", co->co_level);
448 out:
449 if (!error) {
450 if (vcpp)
451 *vcpp = vcp;
452 if (sspp)
453 *sspp = ssp;
456 return (error);
460 * Wait for reply on the request
462 static int
463 smb_rq_reply(struct smb_rq *rqp)
465 struct mdchain *mdp = &rqp->sr_rp;
466 u_int8_t tb;
467 int error, rperror = 0;
469 if (rqp->sr_timo == SMBNOREPLYWAIT) {
470 smb_iod_removerq(rqp);
471 return (0);
474 error = smb_iod_waitrq(rqp);
475 if (error)
476 return (error);
479 * If the request was signed, validate the
480 * signature on the response.
482 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
483 error = smb_rq_verify(rqp);
484 if (error)
485 return (error);
489 * Parse the SMB header
491 error = md_get_uint32le(mdp, NULL);
492 if (error)
493 return (error);
494 error = md_get_uint8(mdp, &tb);
495 error = md_get_uint32le(mdp, &rqp->sr_error);
496 error = md_get_uint8(mdp, &rqp->sr_rpflags);
497 error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
498 if (rqp->sr_rpflags2 & SMB_FLAGS2_ERR_STATUS) {
500 * Do a special check for STATUS_BUFFER_OVERFLOW;
501 * it's not an error.
503 if (rqp->sr_error == NT_STATUS_BUFFER_OVERFLOW) {
505 * Don't report it as an error to our caller;
506 * they can look at rqp->sr_error if they
507 * need to know whether we got a
508 * STATUS_BUFFER_OVERFLOW.
509 * XXX - should we do that for all errors
510 * where (error & 0xC0000000) is 0x80000000,
511 * i.e. all warnings?
513 rperror = 0;
514 } else
515 rperror = smb_maperr32(rqp->sr_error);
516 } else {
517 rqp->sr_errclass = rqp->sr_error & 0xff;
518 rqp->sr_serror = rqp->sr_error >> 16;
519 rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
521 if (rperror == EMOREDATA) {
522 rperror = E2BIG;
523 rqp->sr_flags |= SMBR_MOREDATA;
524 } else
525 rqp->sr_flags &= ~SMBR_MOREDATA;
527 error = md_get_uint32le(mdp, NULL);
528 error = md_get_uint32le(mdp, NULL);
529 error = md_get_uint32le(mdp, NULL);
531 error = md_get_uint16le(mdp, &rqp->sr_rptid);
532 error = md_get_uint16le(mdp, &rqp->sr_rppid);
533 error = md_get_uint16le(mdp, &rqp->sr_rpuid);
534 error = md_get_uint16le(mdp, &rqp->sr_rpmid);
536 return ((error) ? error : rperror);
540 #define ALIGN4(a) (((a) + 3) & ~3)
543 * TRANS2 request implementation
544 * TRANS implementation is in the "t2" routines
545 * NT_TRANSACTION implementation is the separate "nt" stuff
548 smb_t2_alloc(struct smb_connobj *layer, ushort_t setup, struct smb_cred *scred,
549 struct smb_t2rq **t2pp)
551 struct smb_t2rq *t2p;
552 int error;
554 t2p = kmem_alloc(sizeof (*t2p), KM_SLEEP);
555 if (t2p == NULL)
556 return (ENOMEM);
557 error = smb_t2_init(t2p, layer, &setup, 1, scred);
558 t2p->t2_flags |= SMBT2_ALLOCED;
559 if (error) {
560 smb_t2_done(t2p);
561 return (error);
563 *t2pp = t2p;
564 return (0);
568 smb_nt_alloc(struct smb_connobj *layer, ushort_t fn, struct smb_cred *scred,
569 struct smb_ntrq **ntpp)
571 struct smb_ntrq *ntp;
572 int error;
574 ntp = kmem_alloc(sizeof (*ntp), KM_SLEEP);
575 if (ntp == NULL)
576 return (ENOMEM);
577 error = smb_nt_init(ntp, layer, fn, scred);
578 mutex_init(&ntp->nt_lock, NULL, MUTEX_DRIVER, NULL);
579 cv_init(&ntp->nt_cond, NULL, CV_DEFAULT, NULL);
580 ntp->nt_flags |= SMBT2_ALLOCED;
581 if (error) {
582 smb_nt_done(ntp);
583 return (error);
585 *ntpp = ntp;
586 return (0);
590 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, ushort_t *setup,
591 int setupcnt, struct smb_cred *scred)
593 int i;
594 int error;
596 bzero(t2p, sizeof (*t2p));
597 mutex_init(&t2p->t2_lock, NULL, MUTEX_DRIVER, NULL);
598 cv_init(&t2p->t2_cond, NULL, CV_DEFAULT, NULL);
600 t2p->t2_source = source;
601 t2p->t2_setupcount = (u_int16_t)setupcnt;
602 t2p->t2_setupdata = t2p->t2_setup;
603 for (i = 0; i < setupcnt; i++)
604 t2p->t2_setup[i] = setup[i];
605 t2p->t2_fid = 0xffff;
606 t2p->t2_cred = scred;
607 t2p->t2_share = (source->co_level == SMBL_SHARE ?
608 CPTOSS(source) : NULL); /* for smb up/down */
609 error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
610 if (error)
611 return (error);
612 return (0);
616 smb_nt_init(struct smb_ntrq *ntp, struct smb_connobj *source, ushort_t fn,
617 struct smb_cred *scred)
619 int error;
621 bzero(ntp, sizeof (*ntp));
622 ntp->nt_source = source;
623 ntp->nt_function = fn;
624 ntp->nt_cred = scred;
625 ntp->nt_share = (source->co_level == SMBL_SHARE ?
626 CPTOSS(source) : NULL); /* for smb up/down */
627 error = smb_rq_getenv(source, &ntp->nt_vc, NULL);
628 if (error)
629 return (error);
630 return (0);
633 void
634 smb_t2_done(struct smb_t2rq *t2p)
636 mb_done(&t2p->t2_tparam);
637 mb_done(&t2p->t2_tdata);
638 md_done(&t2p->t2_rparam);
639 md_done(&t2p->t2_rdata);
640 mutex_destroy(&t2p->t2_lock);
641 cv_destroy(&t2p->t2_cond);
642 if (t2p->t2_flags & SMBT2_ALLOCED)
643 kmem_free(t2p, sizeof (*t2p));
646 void
647 smb_nt_done(struct smb_ntrq *ntp)
649 mb_done(&ntp->nt_tsetup);
650 mb_done(&ntp->nt_tparam);
651 mb_done(&ntp->nt_tdata);
652 md_done(&ntp->nt_rparam);
653 md_done(&ntp->nt_rdata);
654 cv_destroy(&ntp->nt_cond);
655 mutex_destroy(&ntp->nt_lock);
656 if (ntp->nt_flags & SMBT2_ALLOCED)
657 kmem_free(ntp, sizeof (*ntp));
661 * Extract data [offset,count] from mtop and add to mdp.
663 static int
664 smb_t2_placedata(mblk_t *mtop, u_int16_t offset, u_int16_t count,
665 struct mdchain *mdp)
667 mblk_t *n;
669 n = m_copym(mtop, offset, count, M_WAITOK);
670 if (n == NULL)
671 return (EBADRPC);
673 if (mdp->md_top == NULL) {
674 md_initm(mdp, n);
675 } else
676 m_cat(mdp->md_top, n);
678 return (0);
681 static int
682 smb_t2_reply(struct smb_t2rq *t2p)
684 struct mdchain *mdp;
685 struct smb_rq *rqp = t2p->t2_rq;
686 int error, error2, totpgot, totdgot;
687 u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
688 u_int16_t tmp, bc, dcount;
689 u_int8_t wc;
691 t2p->t2_flags &= ~SMBT2_MOREDATA;
693 error = smb_rq_reply(rqp);
694 if (rqp->sr_flags & SMBR_MOREDATA)
695 t2p->t2_flags |= SMBT2_MOREDATA;
696 t2p->t2_sr_errclass = rqp->sr_errclass;
697 t2p->t2_sr_serror = rqp->sr_serror;
698 t2p->t2_sr_error = rqp->sr_error;
699 t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
700 if (error && !(rqp->sr_flags & SMBR_MOREDATA))
701 return (error);
703 * Now we have to get all subseqent responses, if any.
704 * The CIFS specification says that they can be misordered,
705 * which is weird.
706 * TODO: timo
708 totpgot = totdgot = 0;
709 totpcount = totdcount = 0xffff;
710 mdp = &rqp->sr_rp;
711 for (;;) {
712 DTRACE_PROBE2(smb_trans_reply,
713 (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
714 m_dumpm(mdp->md_top);
716 if ((error2 = md_get_uint8(mdp, &wc)) != 0)
717 break;
718 if (wc < 10) {
719 error2 = ENOENT;
720 break;
722 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
723 break;
724 if (totpcount > tmp)
725 totpcount = tmp;
726 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
727 break;
728 if (totdcount > tmp)
729 totdcount = tmp;
730 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
731 (error2 = md_get_uint16le(mdp, &pcount)) != 0 ||
732 (error2 = md_get_uint16le(mdp, &poff)) != 0 ||
733 (error2 = md_get_uint16le(mdp, &pdisp)) != 0)
734 break;
735 if (pcount != 0 && pdisp != totpgot) {
736 SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
737 pdisp, totpgot);
738 error2 = EINVAL;
739 break;
741 if ((error2 = md_get_uint16le(mdp, &dcount)) != 0 ||
742 (error2 = md_get_uint16le(mdp, &doff)) != 0 ||
743 (error2 = md_get_uint16le(mdp, &ddisp)) != 0)
744 break;
745 if (dcount != 0 && ddisp != totdgot) {
746 SMBSDEBUG("Can't handle misordered data: dcount %d\n",
747 dcount);
748 error2 = EINVAL;
749 break;
752 /* XXX: Skip setup words? We don't save them? */
753 md_get_uint8(mdp, &wc); /* SetupCount */
754 md_get_uint8(mdp, NULL); /* Reserved2 */
755 tmp = wc;
756 while (tmp--)
757 md_get_uint16le(mdp, NULL);
759 if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
760 break;
763 * There are pad bytes here, and the poff value
764 * indicates where the next data are found.
765 * No need to guess at the padding size.
767 if (pcount) {
768 error2 = smb_t2_placedata(mdp->md_top, poff,
769 pcount, &t2p->t2_rparam);
770 if (error2)
771 break;
773 totpgot += pcount;
775 if (dcount) {
776 error2 = smb_t2_placedata(mdp->md_top, doff,
777 dcount, &t2p->t2_rdata);
778 if (error2)
779 break;
781 totdgot += dcount;
783 if (totpgot >= totpcount && totdgot >= totdcount) {
784 error2 = 0;
785 t2p->t2_flags |= SMBT2_ALLRECV;
786 break;
789 * We're done with this reply, look for the next one.
791 SMBRQ_LOCK(rqp);
792 md_next_record(&rqp->sr_rp);
793 SMBRQ_UNLOCK(rqp);
794 error2 = smb_rq_reply(rqp);
795 if (rqp->sr_flags & SMBR_MOREDATA)
796 t2p->t2_flags |= SMBT2_MOREDATA;
797 if (!error2)
798 continue;
799 t2p->t2_sr_errclass = rqp->sr_errclass;
800 t2p->t2_sr_serror = rqp->sr_serror;
801 t2p->t2_sr_error = rqp->sr_error;
802 t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
803 error = error2;
804 if (!(rqp->sr_flags & SMBR_MOREDATA))
805 break;
807 return (error ? error : error2);
810 static int
811 smb_nt_reply(struct smb_ntrq *ntp)
813 struct mdchain *mdp;
814 struct smb_rq *rqp = ntp->nt_rq;
815 int error, error2;
816 u_int32_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
817 u_int32_t tmp, dcount, totpgot, totdgot;
818 u_int16_t bc;
819 u_int8_t wc;
821 ntp->nt_flags &= ~SMBT2_MOREDATA;
823 error = smb_rq_reply(rqp);
824 if (rqp->sr_flags & SMBR_MOREDATA)
825 ntp->nt_flags |= SMBT2_MOREDATA;
826 ntp->nt_sr_error = rqp->sr_error;
827 ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
828 if (error && !(rqp->sr_flags & SMBR_MOREDATA))
829 return (error);
831 * Now we have to get all subseqent responses. The CIFS specification
832 * says that they can be misordered which is weird.
833 * TODO: timo
835 totpgot = totdgot = 0;
836 totpcount = totdcount = 0xffffffff;
837 mdp = &rqp->sr_rp;
838 for (;;) {
839 DTRACE_PROBE2(smb_trans_reply,
840 (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
841 m_dumpm(mdp->md_top);
843 if ((error2 = md_get_uint8(mdp, &wc)) != 0)
844 break;
845 if (wc < 18) {
846 error2 = ENOENT;
847 break;
849 md_get_mem(mdp, NULL, 3, MB_MSYSTEM); /* reserved */
850 if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
851 break;
852 if (totpcount > tmp)
853 totpcount = tmp;
854 if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
855 break;
856 if (totdcount > tmp)
857 totdcount = tmp;
858 if ((error2 = md_get_uint32le(mdp, &pcount)) != 0 ||
859 (error2 = md_get_uint32le(mdp, &poff)) != 0 ||
860 (error2 = md_get_uint32le(mdp, &pdisp)) != 0)
861 break;
862 if (pcount != 0 && pdisp != totpgot) {
863 SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
864 pdisp, totpgot);
865 error2 = EINVAL;
866 break;
868 if ((error2 = md_get_uint32le(mdp, &dcount)) != 0 ||
869 (error2 = md_get_uint32le(mdp, &doff)) != 0 ||
870 (error2 = md_get_uint32le(mdp, &ddisp)) != 0)
871 break;
872 if (dcount != 0 && ddisp != totdgot) {
873 SMBSDEBUG("Can't handle misordered data: dcount %d\n",
874 dcount);
875 error2 = EINVAL;
876 break;
879 /* XXX: Skip setup words? We don't save them? */
880 md_get_uint8(mdp, &wc); /* SetupCount */
881 tmp = wc;
882 while (tmp--)
883 md_get_uint16le(mdp, NULL);
885 if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
886 break;
889 * There are pad bytes here, and the poff value
890 * indicates where the next data are found.
891 * No need to guess at the padding size.
893 if (pcount) {
894 error2 = smb_t2_placedata(mdp->md_top, poff, pcount,
895 &ntp->nt_rparam);
896 if (error2)
897 break;
899 totpgot += pcount;
901 if (dcount) {
902 error2 = smb_t2_placedata(mdp->md_top, doff, dcount,
903 &ntp->nt_rdata);
904 if (error2)
905 break;
907 totdgot += dcount;
909 if (totpgot >= totpcount && totdgot >= totdcount) {
910 error2 = 0;
911 ntp->nt_flags |= SMBT2_ALLRECV;
912 break;
915 * We're done with this reply, look for the next one.
917 SMBRQ_LOCK(rqp);
918 md_next_record(&rqp->sr_rp);
919 SMBRQ_UNLOCK(rqp);
920 error2 = smb_rq_reply(rqp);
921 if (rqp->sr_flags & SMBR_MOREDATA)
922 ntp->nt_flags |= SMBT2_MOREDATA;
923 if (!error2)
924 continue;
925 ntp->nt_sr_error = rqp->sr_error;
926 ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
927 error = error2;
928 if (!(rqp->sr_flags & SMBR_MOREDATA))
929 break;
931 return (error ? error : error2);
935 * Perform a full round of TRANS2 request
937 static int
938 smb_t2_request_int(struct smb_t2rq *t2p)
940 struct smb_vc *vcp = t2p->t2_vc;
941 struct smb_cred *scred = t2p->t2_cred;
942 struct mbchain *mbp;
943 struct mdchain *mdp, mbparam, mbdata;
944 mblk_t *m;
945 struct smb_rq *rqp;
946 int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
947 int error, doff, poff, txdcount, txpcount, nmlen, nmsize;
949 m = t2p->t2_tparam.mb_top;
950 if (m) {
951 md_initm(&mbparam, m); /* do not free it! */
952 totpcount = m_fixhdr(m);
953 if (totpcount > 0xffff) /* maxvalue for ushort_t */
954 return (EINVAL);
955 } else
956 totpcount = 0;
957 m = t2p->t2_tdata.mb_top;
958 if (m) {
959 md_initm(&mbdata, m); /* do not free it! */
960 totdcount = m_fixhdr(m);
961 if (totdcount > 0xffff)
962 return (EINVAL);
963 } else
964 totdcount = 0;
965 leftdcount = totdcount;
966 leftpcount = totpcount;
967 txmax = vcp->vc_txmax;
968 error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
969 SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
970 if (error)
971 return (error);
972 rqp->sr_timo = smb_timo_default;
973 rqp->sr_flags |= SMBR_MULTIPACKET;
974 t2p->t2_rq = rqp;
975 mbp = &rqp->sr_rq;
976 smb_rq_wstart(rqp);
977 mb_put_uint16le(mbp, totpcount);
978 mb_put_uint16le(mbp, totdcount);
979 mb_put_uint16le(mbp, t2p->t2_maxpcount);
980 mb_put_uint16le(mbp, t2p->t2_maxdcount);
981 mb_put_uint8(mbp, t2p->t2_maxscount);
982 mb_put_uint8(mbp, 0); /* reserved */
983 mb_put_uint16le(mbp, 0); /* flags */
984 mb_put_uint32le(mbp, 0); /* Timeout */
985 mb_put_uint16le(mbp, 0); /* reserved 2 */
986 len = mb_fixhdr(mbp);
989 * Now we know the size of the trans overhead stuff:
990 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + nmsize),
991 * where nmsize is the OTW size of the name, including
992 * the unicode null terminator and any alignment.
993 * Use this to decide which parts (and how much)
994 * can go into this request: params, data
996 nmlen = t2p->t_name ? t2p->t_name_len : 0;
997 nmsize = nmlen + 1; /* null term. */
998 if (SMB_UNICODE_STRINGS(vcp)) {
999 nmsize *= 2;
1000 /* we know put_dmem will need to align */
1001 nmsize += 1;
1003 len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmsize);
1004 if (len + leftpcount > txmax) {
1005 txpcount = min(leftpcount, txmax - len);
1006 poff = len;
1007 txdcount = 0;
1008 doff = 0;
1009 } else {
1010 txpcount = leftpcount;
1011 poff = txpcount ? len : 0;
1013 * Other client traffic seems to "ALIGN2" here. The extra
1014 * 2 byte pad we use has no observed downside and may be
1015 * required for some old servers(?)
1017 len = ALIGN4(len + txpcount);
1018 txdcount = min(leftdcount, txmax - len);
1019 doff = txdcount ? len : 0;
1021 leftpcount -= txpcount;
1022 leftdcount -= txdcount;
1023 mb_put_uint16le(mbp, txpcount);
1024 mb_put_uint16le(mbp, poff);
1025 mb_put_uint16le(mbp, txdcount);
1026 mb_put_uint16le(mbp, doff);
1027 mb_put_uint8(mbp, t2p->t2_setupcount);
1028 mb_put_uint8(mbp, 0);
1029 for (i = 0; i < t2p->t2_setupcount; i++) {
1030 mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
1032 smb_rq_wend(rqp);
1033 smb_rq_bstart(rqp);
1034 if (t2p->t_name) {
1035 /* Put the string and terminating null. */
1036 error = smb_put_dmem(mbp, vcp, t2p->t_name, nmlen + 1,
1037 SMB_CS_NONE, NULL);
1038 } else {
1039 /* nmsize accounts for padding, char size. */
1040 error = mb_put_mem(mbp, NULL, nmsize, MB_MZERO);
1042 if (error)
1043 goto freerq;
1044 len = mb_fixhdr(mbp);
1045 if (txpcount) {
1046 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1047 error = md_get_mbuf(&mbparam, txpcount, &m);
1048 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1049 if (error)
1050 goto freerq;
1051 mb_put_mbuf(mbp, m);
1053 len = mb_fixhdr(mbp);
1054 if (txdcount) {
1055 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1056 error = md_get_mbuf(&mbdata, txdcount, &m);
1057 if (error)
1058 goto freerq;
1059 mb_put_mbuf(mbp, m);
1061 smb_rq_bend(rqp); /* incredible, but thats it... */
1062 error = smb_rq_enqueue(rqp);
1063 if (error)
1064 goto freerq;
1065 if (leftpcount || leftdcount) {
1066 error = smb_rq_reply(rqp);
1067 if (error)
1068 goto bad;
1070 * this is an interim response, ignore it.
1072 SMBRQ_LOCK(rqp);
1073 md_next_record(&rqp->sr_rp);
1074 SMBRQ_UNLOCK(rqp);
1076 while (leftpcount || leftdcount) {
1077 error = smb_rq_new(rqp, t2p->t_name ?
1078 SMB_COM_TRANSACTION_SECONDARY :
1079 SMB_COM_TRANSACTION2_SECONDARY);
1080 if (error)
1081 goto bad;
1082 mbp = &rqp->sr_rq;
1083 smb_rq_wstart(rqp);
1084 mb_put_uint16le(mbp, totpcount);
1085 mb_put_uint16le(mbp, totdcount);
1086 len = mb_fixhdr(mbp);
1088 * now we have known packet size as
1089 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
1090 * and need to decide which parts should go into request
1092 len = ALIGN4(len + 6 * 2 + 2);
1093 if (t2p->t_name == NULL)
1094 len += 2;
1095 if (len + leftpcount > txmax) {
1096 txpcount = min(leftpcount, txmax - len);
1097 poff = len;
1098 txdcount = 0;
1099 doff = 0;
1100 } else {
1101 txpcount = leftpcount;
1102 poff = txpcount ? len : 0;
1103 len = ALIGN4(len + txpcount);
1104 txdcount = min(leftdcount, txmax - len);
1105 doff = txdcount ? len : 0;
1107 mb_put_uint16le(mbp, txpcount);
1108 mb_put_uint16le(mbp, poff);
1109 mb_put_uint16le(mbp, totpcount - leftpcount);
1110 mb_put_uint16le(mbp, txdcount);
1111 mb_put_uint16le(mbp, doff);
1112 mb_put_uint16le(mbp, totdcount - leftdcount);
1113 leftpcount -= txpcount;
1114 leftdcount -= txdcount;
1115 if (t2p->t_name == NULL)
1116 mb_put_uint16le(mbp, t2p->t2_fid);
1117 smb_rq_wend(rqp);
1118 smb_rq_bstart(rqp);
1119 mb_put_uint8(mbp, 0); /* name */
1120 len = mb_fixhdr(mbp);
1121 if (txpcount) {
1122 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1123 error = md_get_mbuf(&mbparam, txpcount, &m);
1124 if (error)
1125 goto bad;
1126 mb_put_mbuf(mbp, m);
1128 len = mb_fixhdr(mbp);
1129 if (txdcount) {
1130 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1131 error = md_get_mbuf(&mbdata, txdcount, &m);
1132 if (error)
1133 goto bad;
1134 mb_put_mbuf(mbp, m);
1136 smb_rq_bend(rqp);
1137 error = smb_iod_multirq(rqp);
1138 if (error)
1139 goto bad;
1140 } /* while left params or data */
1141 error = smb_t2_reply(t2p);
1142 if (error && !(t2p->t2_flags & SMBT2_MOREDATA))
1143 goto bad;
1144 mdp = &t2p->t2_rdata;
1145 if (mdp->md_top) {
1146 md_initm(mdp, mdp->md_top);
1148 mdp = &t2p->t2_rparam;
1149 if (mdp->md_top) {
1150 md_initm(mdp, mdp->md_top);
1152 bad:
1153 smb_iod_removerq(rqp);
1154 freerq:
1155 if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) {
1156 if (rqp->sr_flags & SMBR_RESTART)
1157 t2p->t2_flags |= SMBT2_RESTART;
1158 md_done(&t2p->t2_rparam);
1159 md_done(&t2p->t2_rdata);
1161 smb_rq_done(rqp);
1162 return (error);
1167 * Perform a full round of NT_TRANSACTION request
1169 static int
1170 smb_nt_request_int(struct smb_ntrq *ntp)
1172 struct smb_vc *vcp = ntp->nt_vc;
1173 struct smb_cred *scred = ntp->nt_cred;
1174 struct mbchain *mbp;
1175 struct mdchain *mdp, mbsetup, mbparam, mbdata;
1176 mblk_t *m;
1177 struct smb_rq *rqp;
1178 int totpcount, leftpcount, totdcount, leftdcount, len, txmax;
1179 int error, doff, poff, txdcount, txpcount;
1180 int totscount;
1182 m = ntp->nt_tsetup.mb_top;
1183 if (m) {
1184 md_initm(&mbsetup, m); /* do not free it! */
1185 totscount = m_fixhdr(m);
1186 if (totscount > 2 * 0xff)
1187 return (EINVAL);
1188 } else
1189 totscount = 0;
1190 m = ntp->nt_tparam.mb_top;
1191 if (m) {
1192 md_initm(&mbparam, m); /* do not free it! */
1193 totpcount = m_fixhdr(m);
1194 if (totpcount > 0x7fffffff)
1195 return (EINVAL);
1196 } else
1197 totpcount = 0;
1198 m = ntp->nt_tdata.mb_top;
1199 if (m) {
1200 md_initm(&mbdata, m); /* do not free it! */
1201 totdcount = m_fixhdr(m);
1202 if (totdcount > 0x7fffffff)
1203 return (EINVAL);
1204 } else
1205 totdcount = 0;
1206 leftdcount = totdcount;
1207 leftpcount = totpcount;
1208 txmax = vcp->vc_txmax;
1209 error = smb_rq_alloc(ntp->nt_source, SMB_COM_NT_TRANSACT, scred, &rqp);
1210 if (error)
1211 return (error);
1212 rqp->sr_timo = smb_timo_default;
1213 rqp->sr_flags |= SMBR_MULTIPACKET;
1214 ntp->nt_rq = rqp;
1215 mbp = &rqp->sr_rq;
1216 smb_rq_wstart(rqp);
1217 mb_put_uint8(mbp, ntp->nt_maxscount);
1218 mb_put_uint16le(mbp, 0); /* reserved (flags?) */
1219 mb_put_uint32le(mbp, totpcount);
1220 mb_put_uint32le(mbp, totdcount);
1221 mb_put_uint32le(mbp, ntp->nt_maxpcount);
1222 mb_put_uint32le(mbp, ntp->nt_maxdcount);
1223 len = mb_fixhdr(mbp);
1225 * now we have known packet size as
1226 * ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2),
1227 * and need to decide which parts should go into the first request
1229 len = ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2);
1230 if (len + leftpcount > txmax) {
1231 txpcount = min(leftpcount, txmax - len);
1232 poff = len;
1233 txdcount = 0;
1234 doff = 0;
1235 } else {
1236 txpcount = leftpcount;
1237 poff = txpcount ? len : 0;
1238 len = ALIGN4(len + txpcount);
1239 txdcount = min(leftdcount, txmax - len);
1240 doff = txdcount ? len : 0;
1242 leftpcount -= txpcount;
1243 leftdcount -= txdcount;
1244 mb_put_uint32le(mbp, txpcount);
1245 mb_put_uint32le(mbp, poff);
1246 mb_put_uint32le(mbp, txdcount);
1247 mb_put_uint32le(mbp, doff);
1248 mb_put_uint8(mbp, (totscount+1)/2);
1249 mb_put_uint16le(mbp, ntp->nt_function);
1250 if (totscount) {
1251 error = md_get_mbuf(&mbsetup, totscount, &m);
1252 SMBSDEBUG("%d:%d:%d\n", error, totscount, txmax);
1253 if (error)
1254 goto freerq;
1255 mb_put_mbuf(mbp, m);
1256 if (totscount & 1)
1257 mb_put_uint8(mbp, 0); /* setup is in words */
1259 smb_rq_wend(rqp);
1260 smb_rq_bstart(rqp);
1261 len = mb_fixhdr(mbp);
1262 if (txpcount) {
1263 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1264 error = md_get_mbuf(&mbparam, txpcount, &m);
1265 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1266 if (error)
1267 goto freerq;
1268 mb_put_mbuf(mbp, m);
1270 len = mb_fixhdr(mbp);
1271 if (txdcount) {
1272 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1273 error = md_get_mbuf(&mbdata, txdcount, &m);
1274 if (error)
1275 goto freerq;
1276 mb_put_mbuf(mbp, m);
1278 smb_rq_bend(rqp); /* incredible, but thats it... */
1279 error = smb_rq_enqueue(rqp);
1280 if (error)
1281 goto freerq;
1282 if (leftpcount || leftdcount) {
1283 error = smb_rq_reply(rqp);
1284 if (error)
1285 goto bad;
1287 * this is an interim response, ignore it.
1289 SMBRQ_LOCK(rqp);
1290 md_next_record(&rqp->sr_rp);
1291 SMBRQ_UNLOCK(rqp);
1293 while (leftpcount || leftdcount) {
1294 error = smb_rq_new(rqp, SMB_COM_NT_TRANSACT_SECONDARY);
1295 if (error)
1296 goto bad;
1297 mbp = &rqp->sr_rq;
1298 smb_rq_wstart(rqp);
1299 mb_put_mem(mbp, NULL, 3, MB_MZERO);
1300 mb_put_uint32le(mbp, totpcount);
1301 mb_put_uint32le(mbp, totdcount);
1302 len = mb_fixhdr(mbp);
1304 * now we have known packet size as
1305 * ALIGN4(len + 6 * 4 + 2)
1306 * and need to decide which parts should go into request
1308 len = ALIGN4(len + 6 * 4 + 2);
1309 if (len + leftpcount > txmax) {
1310 txpcount = min(leftpcount, txmax - len);
1311 poff = len;
1312 txdcount = 0;
1313 doff = 0;
1314 } else {
1315 txpcount = leftpcount;
1316 poff = txpcount ? len : 0;
1317 len = ALIGN4(len + txpcount);
1318 txdcount = min(leftdcount, txmax - len);
1319 doff = txdcount ? len : 0;
1321 mb_put_uint32le(mbp, txpcount);
1322 mb_put_uint32le(mbp, poff);
1323 mb_put_uint32le(mbp, totpcount - leftpcount);
1324 mb_put_uint32le(mbp, txdcount);
1325 mb_put_uint32le(mbp, doff);
1326 mb_put_uint32le(mbp, totdcount - leftdcount);
1327 leftpcount -= txpcount;
1328 leftdcount -= txdcount;
1329 smb_rq_wend(rqp);
1330 smb_rq_bstart(rqp);
1331 len = mb_fixhdr(mbp);
1332 if (txpcount) {
1333 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1334 error = md_get_mbuf(&mbparam, txpcount, &m);
1335 if (error)
1336 goto bad;
1337 mb_put_mbuf(mbp, m);
1339 len = mb_fixhdr(mbp);
1340 if (txdcount) {
1341 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1342 error = md_get_mbuf(&mbdata, txdcount, &m);
1343 if (error)
1344 goto bad;
1345 mb_put_mbuf(mbp, m);
1347 smb_rq_bend(rqp);
1348 error = smb_iod_multirq(rqp);
1349 if (error)
1350 goto bad;
1351 } /* while left params or data */
1352 error = smb_nt_reply(ntp);
1353 if (error && !(ntp->nt_flags & SMBT2_MOREDATA))
1354 goto bad;
1355 mdp = &ntp->nt_rdata;
1356 if (mdp->md_top) {
1357 md_initm(mdp, mdp->md_top);
1359 mdp = &ntp->nt_rparam;
1360 if (mdp->md_top) {
1361 md_initm(mdp, mdp->md_top);
1363 bad:
1364 smb_iod_removerq(rqp);
1365 freerq:
1366 if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) {
1367 if (rqp->sr_flags & SMBR_RESTART)
1368 ntp->nt_flags |= SMBT2_RESTART;
1369 md_done(&ntp->nt_rparam);
1370 md_done(&ntp->nt_rdata);
1372 smb_rq_done(rqp);
1373 return (error);
1377 smb_t2_request(struct smb_t2rq *t2p)
1379 int error = EINVAL, i;
1381 for (i = 0; ; ) {
1383 * Don't send any new requests if force unmount is underway.
1384 * This check was moved into smb_rq_enqueue, called by
1385 * smb_t2_request_int()
1387 t2p->t2_flags &= ~SMBT2_RESTART;
1388 error = smb_t2_request_int(t2p);
1389 if (!error)
1390 break;
1391 if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1392 SMBT2_RESTART)
1393 break;
1394 if (++i > SMBMAXRESTARTS)
1395 break;
1396 mutex_enter(&(t2p)->t2_lock);
1397 if (t2p->t2_share) {
1398 (void) cv_reltimedwait(&t2p->t2_cond, &(t2p)->t2_lock,
1399 SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
1400 } else {
1401 ddi_sleep(SMB_RCNDELAY);
1403 mutex_exit(&(t2p)->t2_lock);
1405 return (error);
1410 smb_nt_request(struct smb_ntrq *ntp)
1412 int error = EINVAL, i;
1414 for (i = 0; ; ) {
1416 * Don't send any new requests if force unmount is underway.
1417 * This check was moved into smb_rq_enqueue, called by
1418 * smb_nt_request_int()
1420 ntp->nt_flags &= ~SMBT2_RESTART;
1421 error = smb_nt_request_int(ntp);
1422 if (!error)
1423 break;
1424 if ((ntp->nt_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1425 SMBT2_RESTART)
1426 break;
1427 if (++i > SMBMAXRESTARTS)
1428 break;
1429 mutex_enter(&(ntp)->nt_lock);
1430 if (ntp->nt_share) {
1431 (void) cv_reltimedwait(&ntp->nt_cond, &(ntp)->nt_lock,
1432 SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
1434 } else {
1435 ddi_sleep(SMB_RCNDELAY);
1437 mutex_exit(&(ntp)->nt_lock);
1439 return (error);