4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <asm/uaccess.h>
30 #include <asm/processor.h>
31 #include <linux/mempool.h>
34 #include "cifsproto.h"
35 #include "cifs_debug.h"
37 extern mempool_t
*cifs_mid_poolp
;
40 wake_up_task(struct mid_q_entry
*mid
)
42 wake_up_process(mid
->callback_data
);
46 AllocMidQEntry(const struct smb_hdr
*smb_buffer
, struct TCP_Server_Info
*server
)
48 struct mid_q_entry
*temp
;
51 cERROR(1, "Null TCP session in AllocMidQEntry");
55 temp
= mempool_alloc(cifs_mid_poolp
, GFP_NOFS
);
59 memset(temp
, 0, sizeof(struct mid_q_entry
));
60 temp
->mid
= smb_buffer
->Mid
; /* always LE */
61 temp
->pid
= current
->pid
;
62 temp
->command
= smb_buffer
->Command
;
63 cFYI(1, "For smb_command %d", temp
->command
);
64 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
65 /* when mid allocated can be before when sent */
66 temp
->when_alloc
= jiffies
;
69 * The default is for the mid to be synchronous, so the
70 * default callback just wakes up the current task.
72 temp
->callback
= wake_up_task
;
73 temp
->callback_data
= current
;
76 atomic_inc(&midCount
);
77 temp
->midState
= MID_REQUEST_ALLOCATED
;
82 DeleteMidQEntry(struct mid_q_entry
*midEntry
)
84 #ifdef CONFIG_CIFS_STATS2
87 midEntry
->midState
= MID_FREE
;
88 atomic_dec(&midCount
);
89 if (midEntry
->largeBuf
)
90 cifs_buf_release(midEntry
->resp_buf
);
92 cifs_small_buf_release(midEntry
->resp_buf
);
93 #ifdef CONFIG_CIFS_STATS2
95 /* commands taking longer than one second are indications that
96 something is wrong, unless it is quite a slow link or server */
97 if ((now
- midEntry
->when_alloc
) > HZ
) {
98 if ((cifsFYI
& CIFS_TIMER
) &&
99 (midEntry
->command
!= SMB_COM_LOCKING_ANDX
)) {
100 printk(KERN_DEBUG
" CIFS slow rsp: cmd %d mid %d",
101 midEntry
->command
, midEntry
->mid
);
102 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
103 now
- midEntry
->when_alloc
,
104 now
- midEntry
->when_sent
,
105 now
- midEntry
->when_received
);
109 mempool_free(midEntry
, cifs_mid_poolp
);
113 delete_mid(struct mid_q_entry
*mid
)
115 spin_lock(&GlobalMid_Lock
);
116 list_del(&mid
->qhead
);
117 spin_unlock(&GlobalMid_Lock
);
119 DeleteMidQEntry(mid
);
123 smb_sendv(struct TCP_Server_Info
*server
, struct kvec
*iov
, int n_vec
)
127 struct msghdr smb_msg
;
128 struct smb_hdr
*smb_buffer
= iov
[0].iov_base
;
129 unsigned int len
= iov
[0].iov_len
;
130 unsigned int total_len
;
132 unsigned int smb_buf_length
= be32_to_cpu(smb_buffer
->smb_buf_length
);
133 struct socket
*ssocket
= server
->ssocket
;
136 return -ENOTSOCK
; /* BB eventually add reconnect code here */
138 smb_msg
.msg_name
= (struct sockaddr
*) &server
->dstaddr
;
139 smb_msg
.msg_namelen
= sizeof(struct sockaddr
);
140 smb_msg
.msg_control
= NULL
;
141 smb_msg
.msg_controllen
= 0;
142 if (server
->noblocksnd
)
143 smb_msg
.msg_flags
= MSG_DONTWAIT
+ MSG_NOSIGNAL
;
145 smb_msg
.msg_flags
= MSG_NOSIGNAL
;
148 for (i
= 0; i
< n_vec
; i
++)
149 total_len
+= iov
[i
].iov_len
;
151 cFYI(1, "Sending smb: total_len %d", total_len
);
152 dump_smb(smb_buffer
, len
);
156 rc
= kernel_sendmsg(ssocket
, &smb_msg
, &iov
[first_vec
],
157 n_vec
- first_vec
, total_len
);
158 if ((rc
== -ENOSPC
) || (rc
== -EAGAIN
)) {
160 /* if blocking send we try 3 times, since each can block
161 for 5 seconds. For nonblocking we have to try more
162 but wait increasing amounts of time allowing time for
163 socket to clear. The overall time we wait in either
164 case to send on the socket is about 15 seconds.
165 Similarly we wait for 15 seconds for
166 a response from the server in SendReceive[2]
167 for the server to send a response back for
168 most types of requests (except SMB Write
169 past end of file which can be slow, and
170 blocking lock operations). NFS waits slightly longer
171 than CIFS, but this can make it take longer for
172 nonresponsive servers to be detected and 15 seconds
173 is more than enough time for modern networks to
174 send a packet. In most cases if we fail to send
175 after the retries we will kill the socket and
176 reconnect which may clear the network problem.
178 if ((i
>= 14) || (!server
->noblocksnd
&& (i
> 2))) {
179 cERROR(1, "sends on sock %p stuck for 15 seconds",
190 if (rc
== total_len
) {
193 } else if (rc
> total_len
) {
194 cERROR(1, "sent %d requested %d", rc
, total_len
);
198 /* should never happen, letting socket clear before
199 retrying is our only obvious option here */
200 cERROR(1, "tcp sent no data");
205 /* the line below resets i */
206 for (i
= first_vec
; i
< n_vec
; i
++) {
207 if (iov
[i
].iov_len
) {
208 if (rc
> iov
[i
].iov_len
) {
209 rc
-= iov
[i
].iov_len
;
212 iov
[i
].iov_base
+= rc
;
213 iov
[i
].iov_len
-= rc
;
219 i
= 0; /* in case we get ENOSPC on the next send */
222 if ((total_len
> 0) && (total_len
!= smb_buf_length
+ 4)) {
223 cFYI(1, "partial send (%d remaining), terminating session",
225 /* If we have only sent part of an SMB then the next SMB
226 could be taken as the remainder of this one. We need
227 to kill the socket so the server throws away the partial
229 server
->tcpStatus
= CifsNeedReconnect
;
232 if (rc
< 0 && rc
!= -EINTR
)
233 cERROR(1, "Error %d sending data on socket to server", rc
);
237 /* Don't want to modify the buffer as a
238 side effect of this call. */
239 smb_buffer
->smb_buf_length
= cpu_to_be32(smb_buf_length
);
245 smb_send(struct TCP_Server_Info
*server
, struct smb_hdr
*smb_buffer
,
246 unsigned int smb_buf_length
)
250 iov
.iov_base
= smb_buffer
;
251 iov
.iov_len
= smb_buf_length
+ 4;
253 return smb_sendv(server
, &iov
, 1);
256 static int wait_for_free_request(struct TCP_Server_Info
*server
,
259 if (long_op
== CIFS_ASYNC_OP
) {
260 /* oplock breaks must not be held up */
261 atomic_inc(&server
->inFlight
);
265 spin_lock(&GlobalMid_Lock
);
267 if (atomic_read(&server
->inFlight
) >= cifs_max_pending
) {
268 spin_unlock(&GlobalMid_Lock
);
269 #ifdef CONFIG_CIFS_STATS2
270 atomic_inc(&server
->num_waiters
);
272 wait_event(server
->request_q
,
273 atomic_read(&server
->inFlight
)
275 #ifdef CONFIG_CIFS_STATS2
276 atomic_dec(&server
->num_waiters
);
278 spin_lock(&GlobalMid_Lock
);
280 if (server
->tcpStatus
== CifsExiting
) {
281 spin_unlock(&GlobalMid_Lock
);
285 /* can not count locking commands against total
286 as they are allowed to block on server */
288 /* update # of requests on the wire to server */
289 if (long_op
!= CIFS_BLOCKING_OP
)
290 atomic_inc(&server
->inFlight
);
291 spin_unlock(&GlobalMid_Lock
);
298 static int allocate_mid(struct cifs_ses
*ses
, struct smb_hdr
*in_buf
,
299 struct mid_q_entry
**ppmidQ
)
301 if (ses
->server
->tcpStatus
== CifsExiting
) {
305 if (ses
->server
->tcpStatus
== CifsNeedReconnect
) {
306 cFYI(1, "tcp session dead - return to caller to retry");
310 if (ses
->status
!= CifsGood
) {
311 /* check if SMB session is bad because we are setting it up */
312 if ((in_buf
->Command
!= SMB_COM_SESSION_SETUP_ANDX
) &&
313 (in_buf
->Command
!= SMB_COM_NEGOTIATE
))
315 /* else ok - we are setting up session */
317 *ppmidQ
= AllocMidQEntry(in_buf
, ses
->server
);
320 spin_lock(&GlobalMid_Lock
);
321 list_add_tail(&(*ppmidQ
)->qhead
, &ses
->server
->pending_mid_q
);
322 spin_unlock(&GlobalMid_Lock
);
327 wait_for_response(struct TCP_Server_Info
*server
, struct mid_q_entry
*midQ
)
331 error
= wait_event_killable(server
->response_q
,
332 midQ
->midState
!= MID_REQUEST_SUBMITTED
);
341 * Send a SMB request and set the callback function in the mid to handle
342 * the result. Caller is responsible for dealing with timeouts.
345 cifs_call_async(struct TCP_Server_Info
*server
, struct kvec
*iov
,
346 unsigned int nvec
, mid_callback_t
*callback
, void *cbdata
,
350 struct mid_q_entry
*mid
;
351 struct smb_hdr
*hdr
= (struct smb_hdr
*)iov
[0].iov_base
;
353 rc
= wait_for_free_request(server
, ignore_pend
? CIFS_ASYNC_OP
: 0);
357 /* enable signing if server requires it */
358 if (server
->sec_mode
& (SECMODE_SIGN_REQUIRED
| SECMODE_SIGN_ENABLED
))
359 hdr
->Flags2
|= SMBFLG2_SECURITY_SIGNATURE
;
361 mutex_lock(&server
->srv_mutex
);
362 mid
= AllocMidQEntry(hdr
, server
);
364 mutex_unlock(&server
->srv_mutex
);
365 atomic_dec(&server
->inFlight
);
366 wake_up(&server
->request_q
);
370 /* put it on the pending_mid_q */
371 spin_lock(&GlobalMid_Lock
);
372 list_add_tail(&mid
->qhead
, &server
->pending_mid_q
);
373 spin_unlock(&GlobalMid_Lock
);
375 rc
= cifs_sign_smb2(iov
, nvec
, server
, &mid
->sequence_number
);
377 mutex_unlock(&server
->srv_mutex
);
381 mid
->callback
= callback
;
382 mid
->callback_data
= cbdata
;
383 mid
->midState
= MID_REQUEST_SUBMITTED
;
384 #ifdef CONFIG_CIFS_STATS2
385 atomic_inc(&server
->inSend
);
387 rc
= smb_sendv(server
, iov
, nvec
);
388 #ifdef CONFIG_CIFS_STATS2
389 atomic_dec(&server
->inSend
);
390 mid
->when_sent
= jiffies
;
392 mutex_unlock(&server
->srv_mutex
);
399 atomic_dec(&server
->inFlight
);
400 wake_up(&server
->request_q
);
406 * Send an SMB Request. No response info (other than return code)
407 * needs to be parsed.
409 * flags indicate the type of request buffer and how long to wait
410 * and whether to log NT STATUS code (error) before mapping it to POSIX error
414 SendReceiveNoRsp(const unsigned int xid
, struct cifs_ses
*ses
,
415 struct smb_hdr
*in_buf
, int flags
)
421 iov
[0].iov_base
= (char *)in_buf
;
422 iov
[0].iov_len
= be32_to_cpu(in_buf
->smb_buf_length
) + 4;
423 flags
|= CIFS_NO_RESP
;
424 rc
= SendReceive2(xid
, ses
, iov
, 1, &resp_buf_type
, flags
);
425 cFYI(DBG2
, "SendRcvNoRsp flags %d rc %d", flags
, rc
);
431 cifs_sync_mid_result(struct mid_q_entry
*mid
, struct TCP_Server_Info
*server
)
435 cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__
, mid
->command
,
436 mid
->mid
, mid
->midState
);
438 spin_lock(&GlobalMid_Lock
);
439 switch (mid
->midState
) {
440 case MID_RESPONSE_RECEIVED
:
441 spin_unlock(&GlobalMid_Lock
);
443 case MID_RETRY_NEEDED
:
446 case MID_RESPONSE_MALFORMED
:
453 list_del_init(&mid
->qhead
);
454 cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__
,
455 mid
->mid
, mid
->midState
);
458 spin_unlock(&GlobalMid_Lock
);
460 DeleteMidQEntry(mid
);
465 * An NT cancel request header looks just like the original request except:
467 * The Command is SMB_COM_NT_CANCEL
468 * The WordCount is zeroed out
469 * The ByteCount is zeroed out
471 * This function mangles an existing request buffer into a
472 * SMB_COM_NT_CANCEL request and then sends it.
475 send_nt_cancel(struct TCP_Server_Info
*server
, struct smb_hdr
*in_buf
,
476 struct mid_q_entry
*mid
)
480 /* -4 for RFC1001 length and +2 for BCC field */
481 in_buf
->smb_buf_length
= cpu_to_be32(sizeof(struct smb_hdr
) - 4 + 2);
482 in_buf
->Command
= SMB_COM_NT_CANCEL
;
483 in_buf
->WordCount
= 0;
486 mutex_lock(&server
->srv_mutex
);
487 rc
= cifs_sign_smb(in_buf
, server
, &mid
->sequence_number
);
489 mutex_unlock(&server
->srv_mutex
);
492 rc
= smb_send(server
, in_buf
, be32_to_cpu(in_buf
->smb_buf_length
));
493 mutex_unlock(&server
->srv_mutex
);
495 cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
502 cifs_check_receive(struct mid_q_entry
*mid
, struct TCP_Server_Info
*server
,
505 dump_smb(mid
->resp_buf
,
506 min_t(u32
, 92, be32_to_cpu(mid
->resp_buf
->smb_buf_length
)));
508 /* convert the length into a more usable form */
509 if (server
->sec_mode
& (SECMODE_SIGN_REQUIRED
| SECMODE_SIGN_ENABLED
)) {
510 /* FIXME: add code to kill session */
511 if (cifs_verify_signature(mid
->resp_buf
, server
,
512 mid
->sequence_number
+ 1) != 0)
513 cERROR(1, "Unexpected SMB signature");
516 /* BB special case reconnect tid and uid here? */
517 return map_smb_to_linux_error(mid
->resp_buf
, log_error
);
521 SendReceive2(const unsigned int xid
, struct cifs_ses
*ses
,
522 struct kvec
*iov
, int n_vec
, int *pRespBufType
/* ret */,
527 struct mid_q_entry
*midQ
;
528 struct smb_hdr
*in_buf
= iov
[0].iov_base
;
530 long_op
= flags
& CIFS_TIMEOUT_MASK
;
532 *pRespBufType
= CIFS_NO_BUFFER
; /* no response buf yet */
534 if ((ses
== NULL
) || (ses
->server
== NULL
)) {
535 cifs_small_buf_release(in_buf
);
536 cERROR(1, "Null session");
540 if (ses
->server
->tcpStatus
== CifsExiting
) {
541 cifs_small_buf_release(in_buf
);
545 /* Ensure that we do not send more than 50 overlapping requests
546 to the same server. We may make this configurable later or
549 rc
= wait_for_free_request(ses
->server
, long_op
);
551 cifs_small_buf_release(in_buf
);
555 /* make sure that we sign in the same order that we send on this socket
556 and avoid races inside tcp sendmsg code that could cause corruption
559 mutex_lock(&ses
->server
->srv_mutex
);
561 rc
= allocate_mid(ses
, in_buf
, &midQ
);
563 mutex_unlock(&ses
->server
->srv_mutex
);
564 cifs_small_buf_release(in_buf
);
565 /* Update # of requests on wire to server */
566 atomic_dec(&ses
->server
->inFlight
);
567 wake_up(&ses
->server
->request_q
);
570 rc
= cifs_sign_smb2(iov
, n_vec
, ses
->server
, &midQ
->sequence_number
);
572 mutex_unlock(&ses
->server
->srv_mutex
);
573 cifs_small_buf_release(in_buf
);
577 midQ
->midState
= MID_REQUEST_SUBMITTED
;
578 #ifdef CONFIG_CIFS_STATS2
579 atomic_inc(&ses
->server
->inSend
);
581 rc
= smb_sendv(ses
->server
, iov
, n_vec
);
582 #ifdef CONFIG_CIFS_STATS2
583 atomic_dec(&ses
->server
->inSend
);
584 midQ
->when_sent
= jiffies
;
587 mutex_unlock(&ses
->server
->srv_mutex
);
590 cifs_small_buf_release(in_buf
);
594 if (long_op
== CIFS_ASYNC_OP
) {
595 cifs_small_buf_release(in_buf
);
599 rc
= wait_for_response(ses
->server
, midQ
);
601 send_nt_cancel(ses
->server
, in_buf
, midQ
);
602 spin_lock(&GlobalMid_Lock
);
603 if (midQ
->midState
== MID_REQUEST_SUBMITTED
) {
604 midQ
->callback
= DeleteMidQEntry
;
605 spin_unlock(&GlobalMid_Lock
);
606 cifs_small_buf_release(in_buf
);
607 atomic_dec(&ses
->server
->inFlight
);
608 wake_up(&ses
->server
->request_q
);
611 spin_unlock(&GlobalMid_Lock
);
614 cifs_small_buf_release(in_buf
);
616 rc
= cifs_sync_mid_result(midQ
, ses
->server
);
618 atomic_dec(&ses
->server
->inFlight
);
619 wake_up(&ses
->server
->request_q
);
623 if (!midQ
->resp_buf
|| midQ
->midState
!= MID_RESPONSE_RECEIVED
) {
625 cFYI(1, "Bad MID state?");
629 iov
[0].iov_base
= (char *)midQ
->resp_buf
;
630 iov
[0].iov_len
= be32_to_cpu(midQ
->resp_buf
->smb_buf_length
) + 4;
632 *pRespBufType
= CIFS_LARGE_BUFFER
;
634 *pRespBufType
= CIFS_SMALL_BUFFER
;
636 rc
= cifs_check_receive(midQ
, ses
->server
, flags
& CIFS_LOG_ERROR
);
638 /* mark it so buf will not be freed by delete_mid */
639 if ((flags
& CIFS_NO_RESP
) == 0)
640 midQ
->resp_buf
= NULL
;
643 atomic_dec(&ses
->server
->inFlight
);
644 wake_up(&ses
->server
->request_q
);
650 SendReceive(const unsigned int xid
, struct cifs_ses
*ses
,
651 struct smb_hdr
*in_buf
, struct smb_hdr
*out_buf
,
652 int *pbytes_returned
, const int long_op
)
655 struct mid_q_entry
*midQ
;
658 cERROR(1, "Null smb session");
661 if (ses
->server
== NULL
) {
662 cERROR(1, "Null tcp session");
666 if (ses
->server
->tcpStatus
== CifsExiting
)
669 /* Ensure that we do not send more than 50 overlapping requests
670 to the same server. We may make this configurable later or
673 if (be32_to_cpu(in_buf
->smb_buf_length
) > CIFSMaxBufSize
+
674 MAX_CIFS_HDR_SIZE
- 4) {
675 cERROR(1, "Illegal length, greater than maximum frame, %d",
676 be32_to_cpu(in_buf
->smb_buf_length
));
680 rc
= wait_for_free_request(ses
->server
, long_op
);
684 /* make sure that we sign in the same order that we send on this socket
685 and avoid races inside tcp sendmsg code that could cause corruption
688 mutex_lock(&ses
->server
->srv_mutex
);
690 rc
= allocate_mid(ses
, in_buf
, &midQ
);
692 mutex_unlock(&ses
->server
->srv_mutex
);
693 /* Update # of requests on wire to server */
694 atomic_dec(&ses
->server
->inFlight
);
695 wake_up(&ses
->server
->request_q
);
699 rc
= cifs_sign_smb(in_buf
, ses
->server
, &midQ
->sequence_number
);
701 mutex_unlock(&ses
->server
->srv_mutex
);
705 midQ
->midState
= MID_REQUEST_SUBMITTED
;
706 #ifdef CONFIG_CIFS_STATS2
707 atomic_inc(&ses
->server
->inSend
);
709 rc
= smb_send(ses
->server
, in_buf
, be32_to_cpu(in_buf
->smb_buf_length
));
710 #ifdef CONFIG_CIFS_STATS2
711 atomic_dec(&ses
->server
->inSend
);
712 midQ
->when_sent
= jiffies
;
714 mutex_unlock(&ses
->server
->srv_mutex
);
719 if (long_op
== CIFS_ASYNC_OP
)
722 rc
= wait_for_response(ses
->server
, midQ
);
724 send_nt_cancel(ses
->server
, in_buf
, midQ
);
725 spin_lock(&GlobalMid_Lock
);
726 if (midQ
->midState
== MID_REQUEST_SUBMITTED
) {
727 /* no longer considered to be "in-flight" */
728 midQ
->callback
= DeleteMidQEntry
;
729 spin_unlock(&GlobalMid_Lock
);
730 atomic_dec(&ses
->server
->inFlight
);
731 wake_up(&ses
->server
->request_q
);
734 spin_unlock(&GlobalMid_Lock
);
737 rc
= cifs_sync_mid_result(midQ
, ses
->server
);
739 atomic_dec(&ses
->server
->inFlight
);
740 wake_up(&ses
->server
->request_q
);
744 if (!midQ
->resp_buf
|| !out_buf
||
745 midQ
->midState
!= MID_RESPONSE_RECEIVED
) {
747 cERROR(1, "Bad MID state?");
751 *pbytes_returned
= be32_to_cpu(midQ
->resp_buf
->smb_buf_length
);
752 memcpy(out_buf
, midQ
->resp_buf
, *pbytes_returned
+ 4);
753 rc
= cifs_check_receive(midQ
, ses
->server
, 0);
756 atomic_dec(&ses
->server
->inFlight
);
757 wake_up(&ses
->server
->request_q
);
762 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
763 blocking lock to return. */
766 send_lock_cancel(const unsigned int xid
, struct cifs_tcon
*tcon
,
767 struct smb_hdr
*in_buf
,
768 struct smb_hdr
*out_buf
)
771 struct cifs_ses
*ses
= tcon
->ses
;
772 LOCK_REQ
*pSMB
= (LOCK_REQ
*)in_buf
;
774 /* We just modify the current in_buf to change
775 the type of lock from LOCKING_ANDX_SHARED_LOCK
776 or LOCKING_ANDX_EXCLUSIVE_LOCK to
777 LOCKING_ANDX_CANCEL_LOCK. */
779 pSMB
->LockType
= LOCKING_ANDX_CANCEL_LOCK
|LOCKING_ANDX_LARGE_FILES
;
781 pSMB
->hdr
.Mid
= GetNextMid(ses
->server
);
783 return SendReceive(xid
, ses
, in_buf
, out_buf
,
788 SendReceiveBlockingLock(const unsigned int xid
, struct cifs_tcon
*tcon
,
789 struct smb_hdr
*in_buf
, struct smb_hdr
*out_buf
,
790 int *pbytes_returned
)
794 struct mid_q_entry
*midQ
;
795 struct cifs_ses
*ses
;
797 if (tcon
== NULL
|| tcon
->ses
== NULL
) {
798 cERROR(1, "Null smb session");
803 if (ses
->server
== NULL
) {
804 cERROR(1, "Null tcp session");
808 if (ses
->server
->tcpStatus
== CifsExiting
)
811 /* Ensure that we do not send more than 50 overlapping requests
812 to the same server. We may make this configurable later or
815 if (be32_to_cpu(in_buf
->smb_buf_length
) > CIFSMaxBufSize
+
816 MAX_CIFS_HDR_SIZE
- 4) {
817 cERROR(1, "Illegal length, greater than maximum frame, %d",
818 be32_to_cpu(in_buf
->smb_buf_length
));
822 rc
= wait_for_free_request(ses
->server
, CIFS_BLOCKING_OP
);
826 /* make sure that we sign in the same order that we send on this socket
827 and avoid races inside tcp sendmsg code that could cause corruption
830 mutex_lock(&ses
->server
->srv_mutex
);
832 rc
= allocate_mid(ses
, in_buf
, &midQ
);
834 mutex_unlock(&ses
->server
->srv_mutex
);
838 rc
= cifs_sign_smb(in_buf
, ses
->server
, &midQ
->sequence_number
);
841 mutex_unlock(&ses
->server
->srv_mutex
);
845 midQ
->midState
= MID_REQUEST_SUBMITTED
;
846 #ifdef CONFIG_CIFS_STATS2
847 atomic_inc(&ses
->server
->inSend
);
849 rc
= smb_send(ses
->server
, in_buf
, be32_to_cpu(in_buf
->smb_buf_length
));
850 #ifdef CONFIG_CIFS_STATS2
851 atomic_dec(&ses
->server
->inSend
);
852 midQ
->when_sent
= jiffies
;
854 mutex_unlock(&ses
->server
->srv_mutex
);
861 /* Wait for a reply - allow signals to interrupt. */
862 rc
= wait_event_interruptible(ses
->server
->response_q
,
863 (!(midQ
->midState
== MID_REQUEST_SUBMITTED
)) ||
864 ((ses
->server
->tcpStatus
!= CifsGood
) &&
865 (ses
->server
->tcpStatus
!= CifsNew
)));
867 /* Were we interrupted by a signal ? */
868 if ((rc
== -ERESTARTSYS
) &&
869 (midQ
->midState
== MID_REQUEST_SUBMITTED
) &&
870 ((ses
->server
->tcpStatus
== CifsGood
) ||
871 (ses
->server
->tcpStatus
== CifsNew
))) {
873 if (in_buf
->Command
== SMB_COM_TRANSACTION2
) {
874 /* POSIX lock. We send a NT_CANCEL SMB to cause the
875 blocking lock to return. */
876 rc
= send_nt_cancel(ses
->server
, in_buf
, midQ
);
882 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
883 to cause the blocking lock to return. */
885 rc
= send_lock_cancel(xid
, tcon
, in_buf
, out_buf
);
887 /* If we get -ENOLCK back the lock may have
888 already been removed. Don't exit in this case. */
889 if (rc
&& rc
!= -ENOLCK
) {
895 rc
= wait_for_response(ses
->server
, midQ
);
897 send_nt_cancel(ses
->server
, in_buf
, midQ
);
898 spin_lock(&GlobalMid_Lock
);
899 if (midQ
->midState
== MID_REQUEST_SUBMITTED
) {
900 /* no longer considered to be "in-flight" */
901 midQ
->callback
= DeleteMidQEntry
;
902 spin_unlock(&GlobalMid_Lock
);
905 spin_unlock(&GlobalMid_Lock
);
908 /* We got the response - restart system call. */
912 rc
= cifs_sync_mid_result(midQ
, ses
->server
);
916 /* rcvd frame is ok */
917 if (out_buf
== NULL
|| midQ
->midState
!= MID_RESPONSE_RECEIVED
) {
919 cERROR(1, "Bad MID state?");
923 *pbytes_returned
= be32_to_cpu(midQ
->resp_buf
->smb_buf_length
);
924 memcpy(out_buf
, midQ
->resp_buf
, *pbytes_returned
+ 4);
925 rc
= cifs_check_receive(midQ
, ses
->server
, 0);
928 if (rstart
&& rc
== -EACCES
)