2 * Copyright (c) 2000-2001, Boris Popov
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $Id: smb_rq.c,v 1.29 2005/02/11 01:44:17 lindak Exp $
36 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
39 #include <sys/param.h>
40 #include <sys/systm.h>
45 #include <sys/socket.h>
46 #include <sys/mount.h>
47 #include <sys/sunddi.h>
48 #include <sys/cmn_err.h>
51 #include <netsmb/smb_osdep.h>
53 #include <netsmb/smb.h>
54 #include <netsmb/smb_conn.h>
55 #include <netsmb/smb_subr.h>
56 #include <netsmb/smb_tran.h>
57 #include <netsmb/smb_rq.h>
60 * How long to wait before restarting a request (after reconnect)
62 #define SMB_RCNDELAY 2 /* seconds */
65 * leave this zero - we can't ssecond guess server side effects of
66 * duplicate ops, this isn't nfs!
68 #define SMBMAXRESTARTS 0
71 static int smb_rq_reply(struct smb_rq
*rqp
);
72 static int smb_rq_enqueue(struct smb_rq
*rqp
);
73 static int smb_rq_getenv(struct smb_connobj
*layer
,
74 struct smb_vc
**vcpp
, struct smb_share
**sspp
);
75 static int smb_rq_new(struct smb_rq
*rqp
, uchar_t cmd
);
76 static int smb_t2_reply(struct smb_t2rq
*t2p
);
77 static int smb_nt_reply(struct smb_ntrq
*ntp
);
81 * Done with a request object. Free its contents.
82 * If it was allocated (SMBR_ALLOCED) free it too.
83 * Some of these are stack locals, not allocated.
85 * No locks here - this is the last ref.
88 smb_rq_done(struct smb_rq
*rqp
)
92 * No smb_vc_rele() here - see smb_rq_init()
96 mutex_destroy(&rqp
->sr_lock
);
97 cv_destroy(&rqp
->sr_cond
);
98 if (rqp
->sr_flags
& SMBR_ALLOCED
)
99 kmem_free(rqp
, sizeof (*rqp
));
103 smb_rq_alloc(struct smb_connobj
*layer
, uchar_t cmd
, struct smb_cred
*scred
,
104 struct smb_rq
**rqpp
)
109 rqp
= kmem_alloc(sizeof (struct smb_rq
), KM_SLEEP
);
112 error
= smb_rq_init(rqp
, layer
, cmd
, scred
);
117 rqp
->sr_flags
|= SMBR_ALLOCED
;
123 smb_rq_init(struct smb_rq
*rqp
, struct smb_connobj
*co
, uchar_t cmd
,
124 struct smb_cred
*scred
)
128 bzero(rqp
, sizeof (*rqp
));
129 mutex_init(&rqp
->sr_lock
, NULL
, MUTEX_DRIVER
, NULL
);
130 cv_init(&rqp
->sr_cond
, NULL
, CV_DEFAULT
, NULL
);
132 error
= smb_rq_getenv(co
, &rqp
->sr_vc
, &rqp
->sr_share
);
137 * We copied a VC pointer (vcp) into rqp->sr_vc,
138 * but we do NOT do a smb_vc_hold here. Instead,
139 * the caller is responsible for the hold on the
140 * share or the VC as needed. For smbfs callers,
141 * the hold is on the share, via the smbfs mount.
142 * For nsmb ioctl callers, the hold is done when
143 * the driver handle gets VC or share references.
144 * This design avoids frequent hold/rele activity
145 * when creating and completing requests.
148 rqp
->sr_rexmit
= SMBMAXRESTARTS
;
149 rqp
->sr_cred
= scred
; /* Note: ref hold done by caller. */
150 rqp
->sr_pid
= (uint16_t)ddi_get_pid();
151 error
= smb_rq_new(rqp
, cmd
);
157 smb_rq_new(struct smb_rq
*rqp
, uchar_t cmd
)
159 struct mbchain
*mbp
= &rqp
->sr_rq
;
160 struct smb_vc
*vcp
= rqp
->sr_vc
;
169 md_done(&rqp
->sr_rp
);
170 error
= mb_init(mbp
);
175 * Is this the right place to save the flags?
177 rqp
->sr_rqflags
= vcp
->vc_hflags
;
178 rqp
->sr_rqflags2
= vcp
->vc_hflags2
;
181 * The SMB header is filled in later by
182 * smb_rq_fillhdr (see below)
183 * Just reserve space here.
185 mb_put_mem(mbp
, NULL
, SMB_HDRLEN
, MB_MZERO
);
191 * Given a request with it's body already composed,
192 * rewind to the start and fill in the SMB header.
193 * This is called after the request is enqueued,
194 * so we have the final MID, seq num. etc.
197 smb_rq_fillhdr(struct smb_rq
*rqp
)
199 struct mbchain mbtmp
, *mbp
= &mbtmp
;
203 * Fill in the SMB header using a dup of the first mblk,
204 * which points at the same data but has its own wptr,
205 * so we can rewind without trashing the message.
207 m
= dupb(rqp
->sr_rq
.mb_top
);
208 m
->b_wptr
= m
->b_rptr
; /* rewind */
211 mb_put_mem(mbp
, SMB_SIGNATURE
, 4, MB_MSYSTEM
);
212 mb_put_uint8(mbp
, rqp
->sr_cmd
);
213 mb_put_uint32le(mbp
, 0); /* status */
214 mb_put_uint8(mbp
, rqp
->sr_rqflags
);
215 mb_put_uint16le(mbp
, rqp
->sr_rqflags2
);
216 mb_put_uint16le(mbp
, 0); /* pid-high */
217 mb_put_mem(mbp
, NULL
, 8, MB_MZERO
); /* MAC sig. (later) */
218 mb_put_uint16le(mbp
, 0); /* reserved */
219 mb_put_uint16le(mbp
, rqp
->sr_rqtid
);
220 mb_put_uint16le(mbp
, rqp
->sr_pid
);
221 mb_put_uint16le(mbp
, rqp
->sr_rquid
);
222 mb_put_uint16le(mbp
, rqp
->sr_mid
);
224 /* This will free the mblk from dupb. */
229 smb_rq_simple(struct smb_rq
*rqp
)
231 return (smb_rq_simple_timed(rqp
, smb_timo_default
));
235 * Simple request-reply exchange
238 smb_rq_simple_timed(struct smb_rq
*rqp
, int timeout
)
244 * Don't send any new requests if force unmount is underway.
245 * This check was moved into smb_rq_enqueue.
247 rqp
->sr_flags
&= ~SMBR_RESTART
;
248 rqp
->sr_timo
= timeout
; /* in seconds */
249 rqp
->sr_state
= SMBRQ_NOTSENT
;
250 error
= smb_rq_enqueue(rqp
);
254 error
= smb_rq_reply(rqp
);
257 if ((rqp
->sr_flags
& (SMBR_RESTART
| SMBR_NORESTART
)) !=
260 if (rqp
->sr_rexmit
<= 0)
264 (void) cv_reltimedwait(&rqp
->sr_cond
, &(rqp
)->sr_lock
,
265 SEC_TO_TICK(SMB_RCNDELAY
), TR_CLOCK_TICK
);
268 ddi_sleep(SMB_RCNDELAY
);
278 smb_rq_enqueue(struct smb_rq
*rqp
)
280 struct smb_vc
*vcp
= rqp
->sr_vc
;
281 struct smb_share
*ssp
= rqp
->sr_share
;
285 * Normal requests may initiate a reconnect,
286 * and/or wait for state changes to finish.
287 * Some requests set the NORECONNECT flag
288 * to avoid all that (i.e. tree discon)
290 if (rqp
->sr_flags
& SMBR_NORECONNECT
) {
291 if (vcp
->vc_state
!= SMBIOD_ST_VCACTIVE
) {
292 SMBSDEBUG("bad vc_state=%d\n", vcp
->vc_state
);
296 ((ssp
->ss_flags
& SMBS_CONNECTED
) == 0))
302 * If we're not connected, initiate a reconnect
303 * and/or wait for an existing one to finish.
305 if (vcp
->vc_state
!= SMBIOD_ST_VCACTIVE
) {
306 error
= smb_iod_reconnect(vcp
);
312 * If this request has a "share" object
313 * that needs a tree connect, do it now.
315 if (ssp
!= NULL
&& (ssp
->ss_flags
& SMBS_CONNECTED
) == 0) {
316 error
= smb_share_tcon(ssp
, rqp
->sr_cred
);
322 * We now know what UID + TID to use.
323 * Store them in the request.
326 rqp
->sr_rquid
= vcp
->vc_smbuid
;
327 rqp
->sr_rqtid
= ssp
? ssp
->ss_tid
: SMB_TID_UNKNOWN
;
328 error
= smb_iod_addrq(rqp
);
334 * Mark location of the word count, which is filled in later by
335 * smb_rw_wend(). Also initialize the counter that it uses
336 * to figure out what value to fill in.
338 * Note that the word count happens to be 8-bit.
341 smb_rq_wstart(struct smb_rq
*rqp
)
343 rqp
->sr_wcount
= mb_reserve(&rqp
->sr_rq
, sizeof (uint8_t));
344 rqp
->sr_rq
.mb_count
= 0;
348 smb_rq_wend(struct smb_rq
*rqp
)
352 if (rqp
->sr_wcount
== NULL
) {
353 SMBSDEBUG("no wcount\n");
356 wcnt
= rqp
->sr_rq
.mb_count
;
358 SMBSDEBUG("word count too large (%d)\n", wcnt
);
360 SMBSDEBUG("odd word count\n");
361 /* Fill in the word count (8-bits) */
362 *rqp
->sr_wcount
= (wcnt
>> 1);
366 * Mark location of the byte count, which is filled in later by
367 * smb_rw_bend(). Also initialize the counter that it uses
368 * to figure out what value to fill in.
370 * Note that the byte count happens to be 16-bit.
373 smb_rq_bstart(struct smb_rq
*rqp
)
375 rqp
->sr_bcount
= mb_reserve(&rqp
->sr_rq
, sizeof (uint16_t));
376 rqp
->sr_rq
.mb_count
= 0;
380 smb_rq_bend(struct smb_rq
*rqp
)
384 if (rqp
->sr_bcount
== NULL
) {
385 SMBSDEBUG("no bcount\n");
388 bcnt
= rqp
->sr_rq
.mb_count
;
390 SMBSDEBUG("byte count too large (%d)\n", bcnt
);
392 * Fill in the byte count (16-bits)
393 * The pointer is char * type due to
394 * typical off-by-one alignment.
396 rqp
->sr_bcount
[0] = bcnt
& 0xFF;
397 rqp
->sr_bcount
[1] = (bcnt
>> 8);
401 smb_rq_intr(struct smb_rq
*rqp
)
403 if (rqp
->sr_flags
& SMBR_INTR
)
410 smb_rq_getenv(struct smb_connobj
*co
,
411 struct smb_vc
**vcpp
, struct smb_share
**sspp
)
413 struct smb_vc
*vcp
= NULL
;
414 struct smb_share
*ssp
= NULL
;
417 if (co
->co_flags
& SMBO_GONE
) {
418 SMBSDEBUG("zombie CO\n");
423 switch (co
->co_level
) {
426 if ((co
->co_flags
& SMBO_GONE
) ||
427 co
->co_parent
== NULL
) {
428 SMBSDEBUG("zombie share %s\n", ssp
->ss_name
);
431 /* instead of recursion... */
436 if ((co
->co_flags
& SMBO_GONE
) ||
437 co
->co_parent
== NULL
) {
438 SMBSDEBUG("zombie VC %s\n", vcp
->vc_srvname
);
445 SMBSDEBUG("invalid level %d passed\n", co
->co_level
);
460 * Wait for reply on the request
463 smb_rq_reply(struct smb_rq
*rqp
)
465 struct mdchain
*mdp
= &rqp
->sr_rp
;
467 int error
, rperror
= 0;
469 if (rqp
->sr_timo
== SMBNOREPLYWAIT
) {
470 smb_iod_removerq(rqp
);
474 error
= smb_iod_waitrq(rqp
);
479 * If the request was signed, validate the
480 * signature on the response.
482 if (rqp
->sr_rqflags2
& SMB_FLAGS2_SECURITY_SIGNATURE
) {
483 error
= smb_rq_verify(rqp
);
489 * Parse the SMB header
491 error
= md_get_uint32le(mdp
, NULL
);
494 error
= md_get_uint8(mdp
, &tb
);
495 error
= md_get_uint32le(mdp
, &rqp
->sr_error
);
496 error
= md_get_uint8(mdp
, &rqp
->sr_rpflags
);
497 error
= md_get_uint16le(mdp
, &rqp
->sr_rpflags2
);
498 if (rqp
->sr_rpflags2
& SMB_FLAGS2_ERR_STATUS
) {
500 * Do a special check for STATUS_BUFFER_OVERFLOW;
503 if (rqp
->sr_error
== NT_STATUS_BUFFER_OVERFLOW
) {
505 * Don't report it as an error to our caller;
506 * they can look at rqp->sr_error if they
507 * need to know whether we got a
508 * STATUS_BUFFER_OVERFLOW.
509 * XXX - should we do that for all errors
510 * where (error & 0xC0000000) is 0x80000000,
515 rperror
= smb_maperr32(rqp
->sr_error
);
517 rqp
->sr_errclass
= rqp
->sr_error
& 0xff;
518 rqp
->sr_serror
= rqp
->sr_error
>> 16;
519 rperror
= smb_maperror(rqp
->sr_errclass
, rqp
->sr_serror
);
521 if (rperror
== EMOREDATA
) {
523 rqp
->sr_flags
|= SMBR_MOREDATA
;
525 rqp
->sr_flags
&= ~SMBR_MOREDATA
;
527 error
= md_get_uint32le(mdp
, NULL
);
528 error
= md_get_uint32le(mdp
, NULL
);
529 error
= md_get_uint32le(mdp
, NULL
);
531 error
= md_get_uint16le(mdp
, &rqp
->sr_rptid
);
532 error
= md_get_uint16le(mdp
, &rqp
->sr_rppid
);
533 error
= md_get_uint16le(mdp
, &rqp
->sr_rpuid
);
534 error
= md_get_uint16le(mdp
, &rqp
->sr_rpmid
);
536 return ((error
) ? error
: rperror
);
540 #define ALIGN4(a) (((a) + 3) & ~3)
543 * TRANS2 request implementation
544 * TRANS implementation is in the "t2" routines
545 * NT_TRANSACTION implementation is the separate "nt" stuff
548 smb_t2_alloc(struct smb_connobj
*layer
, ushort_t setup
, struct smb_cred
*scred
,
549 struct smb_t2rq
**t2pp
)
551 struct smb_t2rq
*t2p
;
554 t2p
= kmem_alloc(sizeof (*t2p
), KM_SLEEP
);
557 error
= smb_t2_init(t2p
, layer
, &setup
, 1, scred
);
558 t2p
->t2_flags
|= SMBT2_ALLOCED
;
568 smb_nt_alloc(struct smb_connobj
*layer
, ushort_t fn
, struct smb_cred
*scred
,
569 struct smb_ntrq
**ntpp
)
571 struct smb_ntrq
*ntp
;
574 ntp
= kmem_alloc(sizeof (*ntp
), KM_SLEEP
);
577 error
= smb_nt_init(ntp
, layer
, fn
, scred
);
578 mutex_init(&ntp
->nt_lock
, NULL
, MUTEX_DRIVER
, NULL
);
579 cv_init(&ntp
->nt_cond
, NULL
, CV_DEFAULT
, NULL
);
580 ntp
->nt_flags
|= SMBT2_ALLOCED
;
590 smb_t2_init(struct smb_t2rq
*t2p
, struct smb_connobj
*source
, ushort_t
*setup
,
591 int setupcnt
, struct smb_cred
*scred
)
596 bzero(t2p
, sizeof (*t2p
));
597 mutex_init(&t2p
->t2_lock
, NULL
, MUTEX_DRIVER
, NULL
);
598 cv_init(&t2p
->t2_cond
, NULL
, CV_DEFAULT
, NULL
);
600 t2p
->t2_source
= source
;
601 t2p
->t2_setupcount
= (u_int16_t
)setupcnt
;
602 t2p
->t2_setupdata
= t2p
->t2_setup
;
603 for (i
= 0; i
< setupcnt
; i
++)
604 t2p
->t2_setup
[i
] = setup
[i
];
605 t2p
->t2_fid
= 0xffff;
606 t2p
->t2_cred
= scred
;
607 t2p
->t2_share
= (source
->co_level
== SMBL_SHARE
?
608 CPTOSS(source
) : NULL
); /* for smb up/down */
609 error
= smb_rq_getenv(source
, &t2p
->t2_vc
, NULL
);
616 smb_nt_init(struct smb_ntrq
*ntp
, struct smb_connobj
*source
, ushort_t fn
,
617 struct smb_cred
*scred
)
621 bzero(ntp
, sizeof (*ntp
));
622 ntp
->nt_source
= source
;
623 ntp
->nt_function
= fn
;
624 ntp
->nt_cred
= scred
;
625 ntp
->nt_share
= (source
->co_level
== SMBL_SHARE
?
626 CPTOSS(source
) : NULL
); /* for smb up/down */
627 error
= smb_rq_getenv(source
, &ntp
->nt_vc
, NULL
);
634 smb_t2_done(struct smb_t2rq
*t2p
)
636 mb_done(&t2p
->t2_tparam
);
637 mb_done(&t2p
->t2_tdata
);
638 md_done(&t2p
->t2_rparam
);
639 md_done(&t2p
->t2_rdata
);
640 mutex_destroy(&t2p
->t2_lock
);
641 cv_destroy(&t2p
->t2_cond
);
642 if (t2p
->t2_flags
& SMBT2_ALLOCED
)
643 kmem_free(t2p
, sizeof (*t2p
));
647 smb_nt_done(struct smb_ntrq
*ntp
)
649 mb_done(&ntp
->nt_tsetup
);
650 mb_done(&ntp
->nt_tparam
);
651 mb_done(&ntp
->nt_tdata
);
652 md_done(&ntp
->nt_rparam
);
653 md_done(&ntp
->nt_rdata
);
654 cv_destroy(&ntp
->nt_cond
);
655 mutex_destroy(&ntp
->nt_lock
);
656 if (ntp
->nt_flags
& SMBT2_ALLOCED
)
657 kmem_free(ntp
, sizeof (*ntp
));
661 * Extract data [offset,count] from mtop and add to mdp.
664 smb_t2_placedata(mblk_t
*mtop
, u_int16_t offset
, u_int16_t count
,
669 n
= m_copym(mtop
, offset
, count
, M_WAITOK
);
673 if (mdp
->md_top
== NULL
) {
676 m_cat(mdp
->md_top
, n
);
682 smb_t2_reply(struct smb_t2rq
*t2p
)
685 struct smb_rq
*rqp
= t2p
->t2_rq
;
686 int error
, error2
, totpgot
, totdgot
;
687 u_int16_t totpcount
, totdcount
, pcount
, poff
, doff
, pdisp
, ddisp
;
688 u_int16_t tmp
, bc
, dcount
;
691 t2p
->t2_flags
&= ~SMBT2_MOREDATA
;
693 error
= smb_rq_reply(rqp
);
694 if (rqp
->sr_flags
& SMBR_MOREDATA
)
695 t2p
->t2_flags
|= SMBT2_MOREDATA
;
696 t2p
->t2_sr_errclass
= rqp
->sr_errclass
;
697 t2p
->t2_sr_serror
= rqp
->sr_serror
;
698 t2p
->t2_sr_error
= rqp
->sr_error
;
699 t2p
->t2_sr_rpflags2
= rqp
->sr_rpflags2
;
700 if (error
&& !(rqp
->sr_flags
& SMBR_MOREDATA
))
703 * Now we have to get all subseqent responses, if any.
704 * The CIFS specification says that they can be misordered,
708 totpgot
= totdgot
= 0;
709 totpcount
= totdcount
= 0xffff;
712 DTRACE_PROBE2(smb_trans_reply
,
713 (smb_rq_t
*), rqp
, (mblk_t
*), mdp
->md_top
);
714 m_dumpm(mdp
->md_top
);
716 if ((error2
= md_get_uint8(mdp
, &wc
)) != 0)
722 if ((error2
= md_get_uint16le(mdp
, &tmp
)) != 0)
726 if ((error2
= md_get_uint16le(mdp
, &tmp
)) != 0)
730 if ((error2
= md_get_uint16le(mdp
, &tmp
)) != 0 || /* reserved */
731 (error2
= md_get_uint16le(mdp
, &pcount
)) != 0 ||
732 (error2
= md_get_uint16le(mdp
, &poff
)) != 0 ||
733 (error2
= md_get_uint16le(mdp
, &pdisp
)) != 0)
735 if (pcount
!= 0 && pdisp
!= totpgot
) {
736 SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
741 if ((error2
= md_get_uint16le(mdp
, &dcount
)) != 0 ||
742 (error2
= md_get_uint16le(mdp
, &doff
)) != 0 ||
743 (error2
= md_get_uint16le(mdp
, &ddisp
)) != 0)
745 if (dcount
!= 0 && ddisp
!= totdgot
) {
746 SMBSDEBUG("Can't handle misordered data: dcount %d\n",
752 /* XXX: Skip setup words? We don't save them? */
753 md_get_uint8(mdp
, &wc
); /* SetupCount */
754 md_get_uint8(mdp
, NULL
); /* Reserved2 */
757 md_get_uint16le(mdp
, NULL
);
759 if ((error2
= md_get_uint16le(mdp
, &bc
)) != 0)
763 * There are pad bytes here, and the poff value
764 * indicates where the next data are found.
765 * No need to guess at the padding size.
768 error2
= smb_t2_placedata(mdp
->md_top
, poff
,
769 pcount
, &t2p
->t2_rparam
);
776 error2
= smb_t2_placedata(mdp
->md_top
, doff
,
777 dcount
, &t2p
->t2_rdata
);
783 if (totpgot
>= totpcount
&& totdgot
>= totdcount
) {
785 t2p
->t2_flags
|= SMBT2_ALLRECV
;
789 * We're done with this reply, look for the next one.
792 md_next_record(&rqp
->sr_rp
);
794 error2
= smb_rq_reply(rqp
);
795 if (rqp
->sr_flags
& SMBR_MOREDATA
)
796 t2p
->t2_flags
|= SMBT2_MOREDATA
;
799 t2p
->t2_sr_errclass
= rqp
->sr_errclass
;
800 t2p
->t2_sr_serror
= rqp
->sr_serror
;
801 t2p
->t2_sr_error
= rqp
->sr_error
;
802 t2p
->t2_sr_rpflags2
= rqp
->sr_rpflags2
;
804 if (!(rqp
->sr_flags
& SMBR_MOREDATA
))
807 return (error
? error
: error2
);
811 smb_nt_reply(struct smb_ntrq
*ntp
)
814 struct smb_rq
*rqp
= ntp
->nt_rq
;
816 u_int32_t totpcount
, totdcount
, pcount
, poff
, doff
, pdisp
, ddisp
;
817 u_int32_t tmp
, dcount
, totpgot
, totdgot
;
821 ntp
->nt_flags
&= ~SMBT2_MOREDATA
;
823 error
= smb_rq_reply(rqp
);
824 if (rqp
->sr_flags
& SMBR_MOREDATA
)
825 ntp
->nt_flags
|= SMBT2_MOREDATA
;
826 ntp
->nt_sr_error
= rqp
->sr_error
;
827 ntp
->nt_sr_rpflags2
= rqp
->sr_rpflags2
;
828 if (error
&& !(rqp
->sr_flags
& SMBR_MOREDATA
))
831 * Now we have to get all subseqent responses. The CIFS specification
832 * says that they can be misordered which is weird.
835 totpgot
= totdgot
= 0;
836 totpcount
= totdcount
= 0xffffffff;
839 DTRACE_PROBE2(smb_trans_reply
,
840 (smb_rq_t
*), rqp
, (mblk_t
*), mdp
->md_top
);
841 m_dumpm(mdp
->md_top
);
843 if ((error2
= md_get_uint8(mdp
, &wc
)) != 0)
849 md_get_mem(mdp
, NULL
, 3, MB_MSYSTEM
); /* reserved */
850 if ((error2
= md_get_uint32le(mdp
, &tmp
)) != 0)
854 if ((error2
= md_get_uint32le(mdp
, &tmp
)) != 0)
858 if ((error2
= md_get_uint32le(mdp
, &pcount
)) != 0 ||
859 (error2
= md_get_uint32le(mdp
, &poff
)) != 0 ||
860 (error2
= md_get_uint32le(mdp
, &pdisp
)) != 0)
862 if (pcount
!= 0 && pdisp
!= totpgot
) {
863 SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
868 if ((error2
= md_get_uint32le(mdp
, &dcount
)) != 0 ||
869 (error2
= md_get_uint32le(mdp
, &doff
)) != 0 ||
870 (error2
= md_get_uint32le(mdp
, &ddisp
)) != 0)
872 if (dcount
!= 0 && ddisp
!= totdgot
) {
873 SMBSDEBUG("Can't handle misordered data: dcount %d\n",
879 /* XXX: Skip setup words? We don't save them? */
880 md_get_uint8(mdp
, &wc
); /* SetupCount */
883 md_get_uint16le(mdp
, NULL
);
885 if ((error2
= md_get_uint16le(mdp
, &bc
)) != 0)
889 * There are pad bytes here, and the poff value
890 * indicates where the next data are found.
891 * No need to guess at the padding size.
894 error2
= smb_t2_placedata(mdp
->md_top
, poff
, pcount
,
902 error2
= smb_t2_placedata(mdp
->md_top
, doff
, dcount
,
909 if (totpgot
>= totpcount
&& totdgot
>= totdcount
) {
911 ntp
->nt_flags
|= SMBT2_ALLRECV
;
915 * We're done with this reply, look for the next one.
918 md_next_record(&rqp
->sr_rp
);
920 error2
= smb_rq_reply(rqp
);
921 if (rqp
->sr_flags
& SMBR_MOREDATA
)
922 ntp
->nt_flags
|= SMBT2_MOREDATA
;
925 ntp
->nt_sr_error
= rqp
->sr_error
;
926 ntp
->nt_sr_rpflags2
= rqp
->sr_rpflags2
;
928 if (!(rqp
->sr_flags
& SMBR_MOREDATA
))
931 return (error
? error
: error2
);
935 * Perform a full round of TRANS2 request
938 smb_t2_request_int(struct smb_t2rq
*t2p
)
940 struct smb_vc
*vcp
= t2p
->t2_vc
;
941 struct smb_cred
*scred
= t2p
->t2_cred
;
943 struct mdchain
*mdp
, mbparam
, mbdata
;
946 int totpcount
, leftpcount
, totdcount
, leftdcount
, len
, txmax
, i
;
947 int error
, doff
, poff
, txdcount
, txpcount
, nmlen
, nmsize
;
949 m
= t2p
->t2_tparam
.mb_top
;
951 md_initm(&mbparam
, m
); /* do not free it! */
952 totpcount
= m_fixhdr(m
);
953 if (totpcount
> 0xffff) /* maxvalue for ushort_t */
957 m
= t2p
->t2_tdata
.mb_top
;
959 md_initm(&mbdata
, m
); /* do not free it! */
960 totdcount
= m_fixhdr(m
);
961 if (totdcount
> 0xffff)
965 leftdcount
= totdcount
;
966 leftpcount
= totpcount
;
967 txmax
= vcp
->vc_txmax
;
968 error
= smb_rq_alloc(t2p
->t2_source
, t2p
->t_name
?
969 SMB_COM_TRANSACTION
: SMB_COM_TRANSACTION2
, scred
, &rqp
);
972 rqp
->sr_timo
= smb_timo_default
;
973 rqp
->sr_flags
|= SMBR_MULTIPACKET
;
977 mb_put_uint16le(mbp
, totpcount
);
978 mb_put_uint16le(mbp
, totdcount
);
979 mb_put_uint16le(mbp
, t2p
->t2_maxpcount
);
980 mb_put_uint16le(mbp
, t2p
->t2_maxdcount
);
981 mb_put_uint8(mbp
, t2p
->t2_maxscount
);
982 mb_put_uint8(mbp
, 0); /* reserved */
983 mb_put_uint16le(mbp
, 0); /* flags */
984 mb_put_uint32le(mbp
, 0); /* Timeout */
985 mb_put_uint16le(mbp
, 0); /* reserved 2 */
986 len
= mb_fixhdr(mbp
);
989 * Now we know the size of the trans overhead stuff:
990 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + nmsize),
991 * where nmsize is the OTW size of the name, including
992 * the unicode null terminator and any alignment.
993 * Use this to decide which parts (and how much)
994 * can go into this request: params, data
996 nmlen
= t2p
->t_name
? t2p
->t_name_len
: 0;
997 nmsize
= nmlen
+ 1; /* null term. */
998 if (SMB_UNICODE_STRINGS(vcp
)) {
1000 /* we know put_dmem will need to align */
1003 len
= ALIGN4(len
+ 5 * 2 + t2p
->t2_setupcount
* 2 + 2 + nmsize
);
1004 if (len
+ leftpcount
> txmax
) {
1005 txpcount
= min(leftpcount
, txmax
- len
);
1010 txpcount
= leftpcount
;
1011 poff
= txpcount
? len
: 0;
1013 * Other client traffic seems to "ALIGN2" here. The extra
1014 * 2 byte pad we use has no observed downside and may be
1015 * required for some old servers(?)
1017 len
= ALIGN4(len
+ txpcount
);
1018 txdcount
= min(leftdcount
, txmax
- len
);
1019 doff
= txdcount
? len
: 0;
1021 leftpcount
-= txpcount
;
1022 leftdcount
-= txdcount
;
1023 mb_put_uint16le(mbp
, txpcount
);
1024 mb_put_uint16le(mbp
, poff
);
1025 mb_put_uint16le(mbp
, txdcount
);
1026 mb_put_uint16le(mbp
, doff
);
1027 mb_put_uint8(mbp
, t2p
->t2_setupcount
);
1028 mb_put_uint8(mbp
, 0);
1029 for (i
= 0; i
< t2p
->t2_setupcount
; i
++) {
1030 mb_put_uint16le(mbp
, t2p
->t2_setupdata
[i
]);
1035 /* Put the string and terminating null. */
1036 error
= smb_put_dmem(mbp
, vcp
, t2p
->t_name
, nmlen
+ 1,
1039 /* nmsize accounts for padding, char size. */
1040 error
= mb_put_mem(mbp
, NULL
, nmsize
, MB_MZERO
);
1044 len
= mb_fixhdr(mbp
);
1046 mb_put_mem(mbp
, NULL
, ALIGN4(len
) - len
, MB_MZERO
);
1047 error
= md_get_mbuf(&mbparam
, txpcount
, &m
);
1048 SMBSDEBUG("%d:%d:%d\n", error
, txpcount
, txmax
);
1051 mb_put_mbuf(mbp
, m
);
1053 len
= mb_fixhdr(mbp
);
1055 mb_put_mem(mbp
, NULL
, ALIGN4(len
) - len
, MB_MZERO
);
1056 error
= md_get_mbuf(&mbdata
, txdcount
, &m
);
1059 mb_put_mbuf(mbp
, m
);
1061 smb_rq_bend(rqp
); /* incredible, but thats it... */
1062 error
= smb_rq_enqueue(rqp
);
1065 if (leftpcount
|| leftdcount
) {
1066 error
= smb_rq_reply(rqp
);
1070 * this is an interim response, ignore it.
1073 md_next_record(&rqp
->sr_rp
);
1076 while (leftpcount
|| leftdcount
) {
1077 error
= smb_rq_new(rqp
, t2p
->t_name
?
1078 SMB_COM_TRANSACTION_SECONDARY
:
1079 SMB_COM_TRANSACTION2_SECONDARY
);
1084 mb_put_uint16le(mbp
, totpcount
);
1085 mb_put_uint16le(mbp
, totdcount
);
1086 len
= mb_fixhdr(mbp
);
1088 * now we have known packet size as
1089 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
1090 * and need to decide which parts should go into request
1092 len
= ALIGN4(len
+ 6 * 2 + 2);
1093 if (t2p
->t_name
== NULL
)
1095 if (len
+ leftpcount
> txmax
) {
1096 txpcount
= min(leftpcount
, txmax
- len
);
1101 txpcount
= leftpcount
;
1102 poff
= txpcount
? len
: 0;
1103 len
= ALIGN4(len
+ txpcount
);
1104 txdcount
= min(leftdcount
, txmax
- len
);
1105 doff
= txdcount
? len
: 0;
1107 mb_put_uint16le(mbp
, txpcount
);
1108 mb_put_uint16le(mbp
, poff
);
1109 mb_put_uint16le(mbp
, totpcount
- leftpcount
);
1110 mb_put_uint16le(mbp
, txdcount
);
1111 mb_put_uint16le(mbp
, doff
);
1112 mb_put_uint16le(mbp
, totdcount
- leftdcount
);
1113 leftpcount
-= txpcount
;
1114 leftdcount
-= txdcount
;
1115 if (t2p
->t_name
== NULL
)
1116 mb_put_uint16le(mbp
, t2p
->t2_fid
);
1119 mb_put_uint8(mbp
, 0); /* name */
1120 len
= mb_fixhdr(mbp
);
1122 mb_put_mem(mbp
, NULL
, ALIGN4(len
) - len
, MB_MZERO
);
1123 error
= md_get_mbuf(&mbparam
, txpcount
, &m
);
1126 mb_put_mbuf(mbp
, m
);
1128 len
= mb_fixhdr(mbp
);
1130 mb_put_mem(mbp
, NULL
, ALIGN4(len
) - len
, MB_MZERO
);
1131 error
= md_get_mbuf(&mbdata
, txdcount
, &m
);
1134 mb_put_mbuf(mbp
, m
);
1137 error
= smb_iod_multirq(rqp
);
1140 } /* while left params or data */
1141 error
= smb_t2_reply(t2p
);
1142 if (error
&& !(t2p
->t2_flags
& SMBT2_MOREDATA
))
1144 mdp
= &t2p
->t2_rdata
;
1146 md_initm(mdp
, mdp
->md_top
);
1148 mdp
= &t2p
->t2_rparam
;
1150 md_initm(mdp
, mdp
->md_top
);
1153 smb_iod_removerq(rqp
);
1155 if (error
&& !(t2p
->t2_flags
& SMBT2_MOREDATA
)) {
1156 if (rqp
->sr_flags
& SMBR_RESTART
)
1157 t2p
->t2_flags
|= SMBT2_RESTART
;
1158 md_done(&t2p
->t2_rparam
);
1159 md_done(&t2p
->t2_rdata
);
1167 * Perform a full round of NT_TRANSACTION request
1170 smb_nt_request_int(struct smb_ntrq
*ntp
)
1172 struct smb_vc
*vcp
= ntp
->nt_vc
;
1173 struct smb_cred
*scred
= ntp
->nt_cred
;
1174 struct mbchain
*mbp
;
1175 struct mdchain
*mdp
, mbsetup
, mbparam
, mbdata
;
1178 int totpcount
, leftpcount
, totdcount
, leftdcount
, len
, txmax
;
1179 int error
, doff
, poff
, txdcount
, txpcount
;
1182 m
= ntp
->nt_tsetup
.mb_top
;
1184 md_initm(&mbsetup
, m
); /* do not free it! */
1185 totscount
= m_fixhdr(m
);
1186 if (totscount
> 2 * 0xff)
1190 m
= ntp
->nt_tparam
.mb_top
;
1192 md_initm(&mbparam
, m
); /* do not free it! */
1193 totpcount
= m_fixhdr(m
);
1194 if (totpcount
> 0x7fffffff)
1198 m
= ntp
->nt_tdata
.mb_top
;
1200 md_initm(&mbdata
, m
); /* do not free it! */
1201 totdcount
= m_fixhdr(m
);
1202 if (totdcount
> 0x7fffffff)
1206 leftdcount
= totdcount
;
1207 leftpcount
= totpcount
;
1208 txmax
= vcp
->vc_txmax
;
1209 error
= smb_rq_alloc(ntp
->nt_source
, SMB_COM_NT_TRANSACT
, scred
, &rqp
);
1212 rqp
->sr_timo
= smb_timo_default
;
1213 rqp
->sr_flags
|= SMBR_MULTIPACKET
;
1217 mb_put_uint8(mbp
, ntp
->nt_maxscount
);
1218 mb_put_uint16le(mbp
, 0); /* reserved (flags?) */
1219 mb_put_uint32le(mbp
, totpcount
);
1220 mb_put_uint32le(mbp
, totdcount
);
1221 mb_put_uint32le(mbp
, ntp
->nt_maxpcount
);
1222 mb_put_uint32le(mbp
, ntp
->nt_maxdcount
);
1223 len
= mb_fixhdr(mbp
);
1225 * now we have known packet size as
1226 * ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2),
1227 * and need to decide which parts should go into the first request
1229 len
= ALIGN4(len
+ 4 * 4 + 1 + 2 + ((totscount
+1)&~1) + 2);
1230 if (len
+ leftpcount
> txmax
) {
1231 txpcount
= min(leftpcount
, txmax
- len
);
1236 txpcount
= leftpcount
;
1237 poff
= txpcount
? len
: 0;
1238 len
= ALIGN4(len
+ txpcount
);
1239 txdcount
= min(leftdcount
, txmax
- len
);
1240 doff
= txdcount
? len
: 0;
1242 leftpcount
-= txpcount
;
1243 leftdcount
-= txdcount
;
1244 mb_put_uint32le(mbp
, txpcount
);
1245 mb_put_uint32le(mbp
, poff
);
1246 mb_put_uint32le(mbp
, txdcount
);
1247 mb_put_uint32le(mbp
, doff
);
1248 mb_put_uint8(mbp
, (totscount
+1)/2);
1249 mb_put_uint16le(mbp
, ntp
->nt_function
);
1251 error
= md_get_mbuf(&mbsetup
, totscount
, &m
);
1252 SMBSDEBUG("%d:%d:%d\n", error
, totscount
, txmax
);
1255 mb_put_mbuf(mbp
, m
);
1257 mb_put_uint8(mbp
, 0); /* setup is in words */
1261 len
= mb_fixhdr(mbp
);
1263 mb_put_mem(mbp
, NULL
, ALIGN4(len
) - len
, MB_MZERO
);
1264 error
= md_get_mbuf(&mbparam
, txpcount
, &m
);
1265 SMBSDEBUG("%d:%d:%d\n", error
, txpcount
, txmax
);
1268 mb_put_mbuf(mbp
, m
);
1270 len
= mb_fixhdr(mbp
);
1272 mb_put_mem(mbp
, NULL
, ALIGN4(len
) - len
, MB_MZERO
);
1273 error
= md_get_mbuf(&mbdata
, txdcount
, &m
);
1276 mb_put_mbuf(mbp
, m
);
1278 smb_rq_bend(rqp
); /* incredible, but thats it... */
1279 error
= smb_rq_enqueue(rqp
);
1282 if (leftpcount
|| leftdcount
) {
1283 error
= smb_rq_reply(rqp
);
1287 * this is an interim response, ignore it.
1290 md_next_record(&rqp
->sr_rp
);
1293 while (leftpcount
|| leftdcount
) {
1294 error
= smb_rq_new(rqp
, SMB_COM_NT_TRANSACT_SECONDARY
);
1299 mb_put_mem(mbp
, NULL
, 3, MB_MZERO
);
1300 mb_put_uint32le(mbp
, totpcount
);
1301 mb_put_uint32le(mbp
, totdcount
);
1302 len
= mb_fixhdr(mbp
);
1304 * now we have known packet size as
1305 * ALIGN4(len + 6 * 4 + 2)
1306 * and need to decide which parts should go into request
1308 len
= ALIGN4(len
+ 6 * 4 + 2);
1309 if (len
+ leftpcount
> txmax
) {
1310 txpcount
= min(leftpcount
, txmax
- len
);
1315 txpcount
= leftpcount
;
1316 poff
= txpcount
? len
: 0;
1317 len
= ALIGN4(len
+ txpcount
);
1318 txdcount
= min(leftdcount
, txmax
- len
);
1319 doff
= txdcount
? len
: 0;
1321 mb_put_uint32le(mbp
, txpcount
);
1322 mb_put_uint32le(mbp
, poff
);
1323 mb_put_uint32le(mbp
, totpcount
- leftpcount
);
1324 mb_put_uint32le(mbp
, txdcount
);
1325 mb_put_uint32le(mbp
, doff
);
1326 mb_put_uint32le(mbp
, totdcount
- leftdcount
);
1327 leftpcount
-= txpcount
;
1328 leftdcount
-= txdcount
;
1331 len
= mb_fixhdr(mbp
);
1333 mb_put_mem(mbp
, NULL
, ALIGN4(len
) - len
, MB_MZERO
);
1334 error
= md_get_mbuf(&mbparam
, txpcount
, &m
);
1337 mb_put_mbuf(mbp
, m
);
1339 len
= mb_fixhdr(mbp
);
1341 mb_put_mem(mbp
, NULL
, ALIGN4(len
) - len
, MB_MZERO
);
1342 error
= md_get_mbuf(&mbdata
, txdcount
, &m
);
1345 mb_put_mbuf(mbp
, m
);
1348 error
= smb_iod_multirq(rqp
);
1351 } /* while left params or data */
1352 error
= smb_nt_reply(ntp
);
1353 if (error
&& !(ntp
->nt_flags
& SMBT2_MOREDATA
))
1355 mdp
= &ntp
->nt_rdata
;
1357 md_initm(mdp
, mdp
->md_top
);
1359 mdp
= &ntp
->nt_rparam
;
1361 md_initm(mdp
, mdp
->md_top
);
1364 smb_iod_removerq(rqp
);
1366 if (error
&& !(ntp
->nt_flags
& SMBT2_MOREDATA
)) {
1367 if (rqp
->sr_flags
& SMBR_RESTART
)
1368 ntp
->nt_flags
|= SMBT2_RESTART
;
1369 md_done(&ntp
->nt_rparam
);
1370 md_done(&ntp
->nt_rdata
);
1377 smb_t2_request(struct smb_t2rq
*t2p
)
1379 int error
= EINVAL
, i
;
1383 * Don't send any new requests if force unmount is underway.
1384 * This check was moved into smb_rq_enqueue, called by
1385 * smb_t2_request_int()
1387 t2p
->t2_flags
&= ~SMBT2_RESTART
;
1388 error
= smb_t2_request_int(t2p
);
1391 if ((t2p
->t2_flags
& (SMBT2_RESTART
| SMBT2_NORESTART
)) !=
1394 if (++i
> SMBMAXRESTARTS
)
1396 mutex_enter(&(t2p
)->t2_lock
);
1397 if (t2p
->t2_share
) {
1398 (void) cv_reltimedwait(&t2p
->t2_cond
, &(t2p
)->t2_lock
,
1399 SEC_TO_TICK(SMB_RCNDELAY
), TR_CLOCK_TICK
);
1401 ddi_sleep(SMB_RCNDELAY
);
1403 mutex_exit(&(t2p
)->t2_lock
);
1410 smb_nt_request(struct smb_ntrq
*ntp
)
1412 int error
= EINVAL
, i
;
1416 * Don't send any new requests if force unmount is underway.
1417 * This check was moved into smb_rq_enqueue, called by
1418 * smb_nt_request_int()
1420 ntp
->nt_flags
&= ~SMBT2_RESTART
;
1421 error
= smb_nt_request_int(ntp
);
1424 if ((ntp
->nt_flags
& (SMBT2_RESTART
| SMBT2_NORESTART
)) !=
1427 if (++i
> SMBMAXRESTARTS
)
1429 mutex_enter(&(ntp
)->nt_lock
);
1430 if (ntp
->nt_share
) {
1431 (void) cv_reltimedwait(&ntp
->nt_cond
, &(ntp
)->nt_lock
,
1432 SEC_TO_TICK(SMB_RCNDELAY
), TR_CLOCK_TICK
);
1435 ddi_sleep(SMB_RCNDELAY
);
1437 mutex_exit(&(ntp
)->nt_lock
);