1 /* $NetBSD: smb_rq.c,v 1.32 2009/07/06 11:46:49 njoly Exp $ */
4 * Copyright (c) 2000-2001, Boris Popov
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Boris Popov.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * FreeBSD: src/sys/netsmb/smb_rq.c,v 1.4 2001/12/09 17:48:08 arr Exp
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: smb_rq.c,v 1.32 2009/07/06 11:46:49 njoly Exp $");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
50 #include <netsmb/smb.h>
51 #include <netsmb/smb_conn.h>
52 #include <netsmb/smb_rq.h>
53 #include <netsmb/smb_subr.h>
54 #include <netsmb/smb_tran.h>
57 MODULE_DEPEND(netsmb
, libmchain
, 1, 1, 1);
60 static int smb_rq_init(struct smb_rq
*, struct smb_connobj
*, u_char
,
62 static int smb_rq_getenv(struct smb_connobj
*layer
,
63 struct smb_vc
**vcpp
, struct smb_share
**sspp
);
64 static int smb_rq_new(struct smb_rq
*rqp
, u_char cmd
);
65 static int smb_t2_init(struct smb_t2rq
*, struct smb_connobj
*, u_short
,
67 static int smb_t2_reply(struct smb_t2rq
*t2p
);
69 static struct pool smbrq_pool
, smbt2rq_pool
;
75 pool_init(&smbrq_pool
, sizeof(struct smb_rq
), 0, 0, 0, "smbrqpl",
76 &pool_allocator_nointr
, IPL_NONE
);
77 pool_init(&smbt2rq_pool
, sizeof(struct smb_t2rq
), 0, 0, 0, "smbt2pl",
78 &pool_allocator_nointr
, IPL_NONE
);
85 pool_destroy(&smbrq_pool
);
86 pool_destroy(&smbt2rq_pool
);
90 smb_rq_alloc(struct smb_connobj
*layer
, u_char cmd
, struct smb_cred
*scred
,
96 rqp
= pool_get(&smbrq_pool
, PR_WAITOK
);
97 error
= smb_rq_init(rqp
, layer
, cmd
, scred
);
98 rqp
->sr_flags
|= SMBR_ALLOCED
;
99 callout_init(&rqp
->sr_timo_ch
, 0);
109 smb_rq_init(struct smb_rq
*rqp
, struct smb_connobj
*layer
, u_char cmd
,
110 struct smb_cred
*scred
)
115 memset(rqp
, 0, sizeof(*rqp
));
116 smb_sl_init(&rqp
->sr_slock
, "srslock");
117 error
= smb_rq_getenv(layer
, &rqp
->sr_vc
, &rqp
->sr_share
);
120 error
= smb_vc_access(rqp
->sr_vc
, scred
, SMBM_EXEC
);
124 error
= smb_share_access(rqp
->sr_share
, scred
, SMBM_EXEC
);
128 rqp
->sr_cred
= scred
;
129 rqp
->sr_mid
= smb_vc_nextmid(rqp
->sr_vc
);
130 SMB_TRAN_GETPARAM(rqp
->sr_vc
, SMBTP_TIMEOUT
, &timo
);
131 rqp
->sr_timo
= timo
.tv_sec
* hz
;
132 return smb_rq_new(rqp
, cmd
);
136 smb_rq_new(struct smb_rq
*rqp
, u_char cmd
)
138 struct smb_vc
*vcp
= rqp
->sr_vc
;
139 struct mbchain
*mbp
= &rqp
->sr_rq
;
144 md_done(&rqp
->sr_rp
);
145 error
= mb_init(mbp
);
148 mb_put_mem(mbp
, SMB_SIGNATURE
, SMB_SIGLEN
, MB_MSYSTEM
);
149 mb_put_uint8(mbp
, cmd
);
150 mb_put_uint32le(mbp
, 0); /* DosError */
151 mb_put_uint8(mbp
, vcp
->vc_hflags
);
152 mb_put_uint16le(mbp
, vcp
->vc_hflags2
);
153 mb_put_mem(mbp
, NULL
, 12, MB_MZERO
);
154 rqp
->sr_rqtid
= mb_reserve(mbp
, sizeof(u_int16_t
));
156 * SMB packet PID is used for lock validation. Besides that,
157 * it's opaque for the server.
159 mb_put_uint16le(mbp
, 1 /*rqp->sr_cred->scr_p->p_pid & 0xffff*/);
160 rqp
->sr_rquid
= mb_reserve(mbp
, sizeof(u_int16_t
));
161 mb_put_uint16le(mbp
, rqp
->sr_mid
);
166 smb_rq_done(struct smb_rq
*rqp
)
168 mb_done(&rqp
->sr_rq
);
169 md_done(&rqp
->sr_rp
);
170 smb_sl_destroy(&rqp
->sr_slock
);
171 if (rqp
->sr_flags
& SMBR_ALLOCED
) {
172 callout_destroy(&rqp
->sr_timo_ch
);
173 pool_put(&smbrq_pool
, rqp
);
178 * Simple request-reply exchange
181 smb_rq_simple(struct smb_rq
*rqp
)
185 for (i
= 0; i
< SMB_MAXRCN
; i
++) {
186 rqp
->sr_flags
&= ~SMBR_RESTART
;
187 rqp
->sr_state
= SMBRQ_NOTSENT
;
188 error
= smb_rq_enqueue(rqp
);
191 error
= smb_rq_reply(rqp
);
194 if ((rqp
->sr_flags
& (SMBR_RESTART
| SMBR_NORESTART
)) != SMBR_RESTART
)
201 smb_rq_enqueue(struct smb_rq
*rqp
)
203 struct smb_share
*ssp
= rqp
->sr_share
;
206 if (ssp
== NULL
|| rqp
->sr_cred
== &rqp
->sr_vc
->vc_iod
->iod_scred
) {
207 return smb_iod_addrq(rqp
);
211 if (ssp
->ss_flags
& SMBS_RECONNECTING
) {
213 error
= mtsleep(&ssp
->ss_vcgenid
,
214 PWAIT
| PCATCH
| PNORELOCK
,
215 "smbtrcn", hz
, SMBS_ST_LOCKPTR(ssp
));
216 if (error
&& error
!= EWOULDBLOCK
)
220 if (smb_share_valid(ssp
) || (ssp
->ss_flags
& SMBS_CONNECTED
) == 0) {
224 error
= smb_iod_request(rqp
->sr_vc
->vc_iod
,
225 SMBIOD_EV_TREECONNECT
| SMBIOD_EV_SYNC
, ssp
);
229 error
= smb_iod_addrq(rqp
);
237 smb_rq_wstart(struct smb_rq
*rqp
)
239 rqp
->sr_wcount
= mb_reserve(&rqp
->sr_rq
, sizeof(u_int8_t
));
240 rqp
->sr_rq
.mb_count
= 0;
244 smb_rq_wend(struct smb_rq
*rqp
)
247 if (rqp
->sr_wcount
== NULL
)
248 panic("smb_rq_wend: no wcount");
249 if (rqp
->sr_rq
.mb_count
& 1)
250 panic("smb_rq_wend: odd word count");
252 rqp
->sr_wcount
[0] = rqp
->sr_rq
.mb_count
/ 2;
256 smb_rq_bstart(struct smb_rq
*rqp
)
258 rqp
->sr_bcount
= mb_reserve(&rqp
->sr_rq
, sizeof(u_int16_t
));
259 rqp
->sr_rq
.mb_count
= 0;
263 smb_rq_bend(struct smb_rq
*rqp
)
265 u_int16_t bcnt
= rqp
->sr_rq
.mb_count
;
268 if (rqp
->sr_bcount
== NULL
)
269 panic("smb_rq_bend: no bcount");
270 if (rqp
->sr_rq
.mb_count
> 0xffff)
271 panic("smb_rq_bend: byte count too large (%d)", bcnt
);
273 SMBRQ_PUTLE16(rqp
->sr_bcount
, bcnt
);
277 smb_rq_intr(struct smb_rq
*rqp
)
279 struct lwp
*l
= rqp
->sr_cred
->scr_l
;
281 if (rqp
->sr_flags
& SMBR_INTR
)
283 return smb_proc_intr(l
);
287 smb_rq_getrequest(struct smb_rq
*rqp
, struct mbchain
**mbpp
)
294 smb_rq_getreply(struct smb_rq
*rqp
, struct mdchain
**mbpp
)
301 smb_rq_getenv(struct smb_connobj
*layer
,
302 struct smb_vc
**vcpp
, struct smb_share
**sspp
)
304 struct smb_vc
*vcp
= NULL
;
305 struct smb_share
*ssp
= NULL
;
306 struct smb_connobj
*cp
;
309 switch (layer
->co_level
) {
312 if (layer
->co_parent
== NULL
) {
313 SMBERROR(("zombie VC %s\n", vcp
->vc_srvname
));
320 cp
= layer
->co_parent
;
322 SMBERROR(("zombie share %s\n", ssp
->ss_name
));
326 error
= smb_rq_getenv(cp
, &vcp
, NULL
);
331 SMBERROR(("invalid layer %d passed\n", layer
->co_level
));
342 * Wait for reply on the request
345 smb_rq_reply(struct smb_rq
*rqp
)
347 struct mdchain
*mdp
= &rqp
->sr_rp
;
352 error
= smb_iod_waitrq(rqp
);
355 error
= md_get_uint32(mdp
, NULL
);
358 (void) md_get_uint8(mdp
, NULL
);
359 if (rqp
->sr_vc
->vc_hflags2
& SMB_FLAGS2_ERR_STATUS
) {
360 (void) md_get_uint32(mdp
, NULL
); /* XXX ignored? */
362 (void) md_get_uint8(mdp
, &errclass
);
363 (void) md_get_uint8(mdp
, NULL
);
364 error
= md_get_uint16le(mdp
, &serror
);
366 error
= smb_maperror(errclass
, serror
);
368 (void) md_get_uint8(mdp
, NULL
); /* rpflags */
369 (void) md_get_uint16(mdp
, NULL
); /* rpflags2 */
371 (void) md_get_uint32(mdp
, NULL
);
372 (void) md_get_uint32(mdp
, NULL
);
373 (void) md_get_uint32(mdp
, NULL
);
375 (void) md_get_uint16le(mdp
, &rqp
->sr_rptid
);
376 (void) md_get_uint16le(mdp
, &rqp
->sr_rppid
);
377 (void) md_get_uint16le(mdp
, &rqp
->sr_rpuid
);
378 (void) md_get_uint16le(mdp
, &rqp
->sr_rpmid
);
380 SMBSDEBUG(("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n",
381 rqp
->sr_rpmid
, rqp
->sr_rppid
, rqp
->sr_rpuid
, rqp
->sr_rptid
,
387 smb_rq_setcallback(struct smb_rq
*rqp
, void (*recvcallb
)(void *), void *arg
)
390 rqp
->sr_recvcallback
= recvcallb
;
391 rqp
->sr_recvarg
= arg
;
395 #define ALIGN4(a) (((a) + 3) & ~3)
398 * TRANS2 request implementation
401 smb_t2_alloc(struct smb_connobj
*layer
, u_short setup
, struct smb_cred
*scred
,
402 struct smb_t2rq
**t2pp
)
404 struct smb_t2rq
*t2p
;
407 t2p
= pool_get(&smbt2rq_pool
, PR_WAITOK
);
408 error
= smb_t2_init(t2p
, layer
, setup
, scred
);
409 t2p
->t2_flags
|= SMBT2_ALLOCED
;
419 smb_t2_init(struct smb_t2rq
*t2p
, struct smb_connobj
*source
, u_short setup
,
420 struct smb_cred
*scred
)
424 memset(t2p
, 0, sizeof(*t2p
));
425 t2p
->t2_source
= source
;
426 t2p
->t2_setupcount
= 1;
427 t2p
->t2_setupdata
= t2p
->t2_setup
;
428 t2p
->t2_setup
[0] = setup
;
429 t2p
->t2_fid
= 0xffff;
430 t2p
->t2_cred
= scred
;
431 error
= smb_rq_getenv(source
, &t2p
->t2_vc
, NULL
);
438 smb_t2_done(struct smb_t2rq
*t2p
)
440 mb_done(&t2p
->t2_tparam
);
441 mb_done(&t2p
->t2_tdata
);
442 md_done(&t2p
->t2_rparam
);
443 md_done(&t2p
->t2_rdata
);
444 if (t2p
->t2_flags
& SMBT2_ALLOCED
)
445 pool_put(&smbt2rq_pool
, t2p
);
449 smb_t2_placedata(struct mbuf
*mtop
, u_int16_t offset
, u_int16_t count
,
455 m0
= m_split(mtop
, offset
, M_WAIT
);
458 for(len
= 0, m
= m0
; m
->m_next
; m
= m
->m_next
)
461 m
->m_len
-= len
- count
;
462 if (mdp
->md_top
== NULL
) {
465 m_cat(mdp
->md_top
, m0
);
470 smb_t2_reply(struct smb_t2rq
*t2p
)
473 struct smb_rq
*rqp
= t2p
->t2_rq
;
474 int error
, totpgot
, totdgot
;
475 u_int16_t totpcount
, totdcount
, pcount
, poff
, doff
, pdisp
, ddisp
;
476 u_int16_t tmp
, bc
, dcount
;
479 error
= smb_rq_reply(rqp
);
482 if ((t2p
->t2_flags
& SMBT2_ALLSENT
) == 0) {
484 * this is an interim response, ignore it.
487 md_next_record(&rqp
->sr_rp
);
492 * Now we have to get all subseqent responses. The CIFS specification
493 * says that they can be misordered which is weird.
496 totpgot
= totdgot
= 0;
497 totpcount
= totdcount
= 0xffff;
500 m_dumpm(mdp
->md_top
);
501 if ((error
= md_get_uint8(mdp
, &wc
)) != 0)
507 if ((error
= md_get_uint16le(mdp
, &tmp
)) != 0)
511 md_get_uint16le(mdp
, &tmp
);
514 if ((error
= md_get_uint16le(mdp
, &tmp
)) != 0 || /* reserved */
515 (error
= md_get_uint16le(mdp
, &pcount
)) != 0 ||
516 (error
= md_get_uint16le(mdp
, &poff
)) != 0 ||
517 (error
= md_get_uint16le(mdp
, &pdisp
)) != 0)
519 if (pcount
!= 0 && pdisp
!= totpgot
) {
520 SMBERROR(("Can't handle misordered parameters %d:%d\n",
525 if ((error
= md_get_uint16le(mdp
, &dcount
)) != 0 ||
526 (error
= md_get_uint16le(mdp
, &doff
)) != 0 ||
527 (error
= md_get_uint16le(mdp
, &ddisp
)) != 0)
529 if (dcount
!= 0 && ddisp
!= totdgot
) {
530 SMBERROR(("Can't handle misordered data\n"));
534 md_get_uint8(mdp
, &wc
);
535 md_get_uint8(mdp
, NULL
);
538 md_get_uint16(mdp
, NULL
);
539 if ((error
= md_get_uint16le(mdp
, &bc
)) != 0)
541 /* tmp = SMB_HDRLEN + 1 + 10 * 2 + 2 * wc + 2;*/
543 error
= smb_t2_placedata(mdp
->md_top
, doff
, dcount
,
549 error
= smb_t2_placedata(mdp
->md_top
, poff
, pcount
,
556 if (totpgot
>= totpcount
&& totdgot
>= totdcount
) {
558 t2p
->t2_flags
|= SMBT2_ALLRECV
;
562 * We're done with this reply, look for the next one.
565 md_next_record(&rqp
->sr_rp
);
567 error
= smb_rq_reply(rqp
);
575 * Perform a full round of TRANS2 request
578 smb_t2_request_int(struct smb_t2rq
*t2p
)
580 struct smb_vc
*vcp
= t2p
->t2_vc
;
581 struct smb_cred
*scred
= t2p
->t2_cred
;
583 struct mdchain
*mdp
, mbparam
, mbdata
;
586 int totpcount
, leftpcount
, totdcount
, leftdcount
, len
, txmax
, i
;
587 int error
, doff
, poff
, txdcount
, txpcount
, nmlen
;
589 m
= t2p
->t2_tparam
.mb_top
;
591 md_initm(&mbparam
, m
); /* do not free it! */
592 totpcount
= m_fixhdr(m
);
593 if (totpcount
> 0xffff) /* maxvalue for u_short */
597 m
= t2p
->t2_tdata
.mb_top
;
599 md_initm(&mbdata
, m
); /* do not free it! */
600 totdcount
= m_fixhdr(m
);
601 if (totdcount
> 0xffff)
605 leftdcount
= totdcount
;
606 leftpcount
= totpcount
;
607 txmax
= vcp
->vc_txmax
;
608 error
= smb_rq_alloc(t2p
->t2_source
, t2p
->t_name
?
609 SMB_COM_TRANSACTION
: SMB_COM_TRANSACTION2
, scred
, &rqp
);
612 rqp
->sr_flags
|= SMBR_MULTIPACKET
;
616 mb_put_uint16le(mbp
, totpcount
);
617 mb_put_uint16le(mbp
, totdcount
);
618 mb_put_uint16le(mbp
, t2p
->t2_maxpcount
);
619 mb_put_uint16le(mbp
, t2p
->t2_maxdcount
);
620 mb_put_uint8(mbp
, t2p
->t2_maxscount
);
621 mb_put_uint8(mbp
, 0); /* reserved */
622 mb_put_uint16le(mbp
, 0); /* flags */
623 mb_put_uint32le(mbp
, 0); /* Timeout */
624 mb_put_uint16le(mbp
, 0); /* reserved 2 */
625 len
= mb_fixhdr(mbp
);
627 * now we have known packet size as
628 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1),
629 * and need to decide which parts should go into the first request
631 nmlen
= t2p
->t_name
? strlen(t2p
->t_name
) : 0;
632 len
= ALIGN4(len
+ 5 * 2 + t2p
->t2_setupcount
* 2 + 2 + nmlen
+ 1);
633 if (len
+ leftpcount
> txmax
) {
634 txpcount
= min(leftpcount
, txmax
- len
);
639 txpcount
= leftpcount
;
640 poff
= txpcount
? len
: 0;
641 len
= ALIGN4(len
+ txpcount
);
642 txdcount
= min(leftdcount
, txmax
- len
);
643 doff
= txdcount
? len
: 0;
645 leftpcount
-= txpcount
;
646 leftdcount
-= txdcount
;
647 mb_put_uint16le(mbp
, txpcount
);
648 mb_put_uint16le(mbp
, poff
);
649 mb_put_uint16le(mbp
, txdcount
);
650 mb_put_uint16le(mbp
, doff
);
651 mb_put_uint8(mbp
, t2p
->t2_setupcount
);
652 mb_put_uint8(mbp
, 0);
653 for (i
= 0; i
< t2p
->t2_setupcount
; i
++)
654 mb_put_uint16le(mbp
, t2p
->t2_setupdata
[i
]);
659 mb_put_mem(mbp
, t2p
->t_name
, nmlen
, MB_MSYSTEM
);
660 mb_put_uint8(mbp
, 0); /* terminating zero */
661 len
= mb_fixhdr(mbp
);
663 mb_put_mem(mbp
, NULL
, ALIGN4(len
) - len
, MB_MZERO
);
664 error
= md_get_mbuf(&mbparam
, txpcount
, &m
);
665 SMBSDEBUG(("%d:%d:%d\n", error
, txpcount
, txmax
));
670 len
= mb_fixhdr(mbp
);
672 mb_put_mem(mbp
, NULL
, ALIGN4(len
) - len
, MB_MZERO
);
673 error
= md_get_mbuf(&mbdata
, txdcount
, &m
);
678 smb_rq_bend(rqp
); /* incredible, but thats it... */
679 error
= smb_rq_enqueue(rqp
);
682 if (leftpcount
== 0 && leftdcount
== 0)
683 t2p
->t2_flags
|= SMBT2_ALLSENT
;
684 error
= smb_t2_reply(t2p
);
687 while (leftpcount
|| leftdcount
) {
688 error
= smb_rq_new(rqp
, t2p
->t_name
?
689 SMB_COM_TRANSACTION_SECONDARY
: SMB_COM_TRANSACTION2_SECONDARY
);
694 mb_put_uint16le(mbp
, totpcount
);
695 mb_put_uint16le(mbp
, totdcount
);
696 len
= mb_fixhdr(mbp
);
698 * now we have known packet size as
699 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
700 * and need to decide which parts should go into request
702 len
= ALIGN4(len
+ 6 * 2 + 2);
703 if (t2p
->t_name
== NULL
)
705 if (len
+ leftpcount
> txmax
) {
706 txpcount
= min(leftpcount
, txmax
- len
);
711 txpcount
= leftpcount
;
712 poff
= txpcount
? len
: 0;
713 len
= ALIGN4(len
+ txpcount
);
714 txdcount
= min(leftdcount
, txmax
- len
);
715 doff
= txdcount
? len
: 0;
717 mb_put_uint16le(mbp
, txpcount
);
718 mb_put_uint16le(mbp
, poff
);
719 mb_put_uint16le(mbp
, totpcount
- leftpcount
);
720 mb_put_uint16le(mbp
, txdcount
);
721 mb_put_uint16le(mbp
, doff
);
722 mb_put_uint16le(mbp
, totdcount
- leftdcount
);
723 leftpcount
-= txpcount
;
724 leftdcount
-= txdcount
;
725 if (t2p
->t_name
== NULL
)
726 mb_put_uint16le(mbp
, t2p
->t2_fid
);
729 mb_put_uint8(mbp
, 0); /* name */
730 len
= mb_fixhdr(mbp
);
732 mb_put_mem(mbp
, NULL
, ALIGN4(len
) - len
, MB_MZERO
);
733 error
= md_get_mbuf(&mbparam
, txpcount
, &m
);
738 len
= mb_fixhdr(mbp
);
740 mb_put_mem(mbp
, NULL
, ALIGN4(len
) - len
, MB_MZERO
);
741 error
= md_get_mbuf(&mbdata
, txdcount
, &m
);
747 rqp
->sr_state
= SMBRQ_NOTSENT
;
748 error
= smb_iod_request(vcp
->vc_iod
, SMBIOD_EV_NEWRQ
, NULL
);
751 } /* while left params or data */
752 t2p
->t2_flags
|= SMBT2_ALLSENT
;
753 mdp
= &t2p
->t2_rdata
;
755 m_fixhdr(mdp
->md_top
);
756 md_initm(mdp
, mdp
->md_top
);
758 mdp
= &t2p
->t2_rparam
;
760 m_fixhdr(mdp
->md_top
);
761 md_initm(mdp
, mdp
->md_top
);
764 smb_iod_removerq(rqp
);
768 if (rqp
->sr_flags
& SMBR_RESTART
)
769 t2p
->t2_flags
|= SMBT2_RESTART
;
770 md_done(&t2p
->t2_rparam
);
771 md_done(&t2p
->t2_rdata
);
777 smb_t2_request(struct smb_t2rq
*t2p
)
779 int error
= EINVAL
, i
;
781 for (i
= 0; i
< SMB_MAXRCN
; i
++) {
782 t2p
->t2_flags
&= ~SMBT2_RESTART
;
783 error
= smb_t2_request_int(t2p
);
786 if ((t2p
->t2_flags
& (SMBT2_RESTART
| SMBT2_NORESTART
)) != SMBT2_RESTART
)