kvm tools, setup: Create private directory
[linux-2.6/next.git] / fs / cifs / transport.c
blob147aa22c3c3a4b0f2b647b6978ec31340ff1a5da
1 /*
2 * fs/cifs/transport.c
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <asm/uaccess.h>
30 #include <asm/processor.h>
31 #include <linux/mempool.h>
32 #include "cifspdu.h"
33 #include "cifsglob.h"
34 #include "cifsproto.h"
35 #include "cifs_debug.h"
37 extern mempool_t *cifs_mid_poolp;
39 static void
40 wake_up_task(struct mid_q_entry *mid)
42 wake_up_process(mid->callback_data);
45 struct mid_q_entry *
46 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48 struct mid_q_entry *temp;
50 if (server == NULL) {
51 cERROR(1, "Null TCP session in AllocMidQEntry");
52 return NULL;
55 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
56 if (temp == NULL)
57 return temp;
58 else {
59 memset(temp, 0, sizeof(struct mid_q_entry));
60 temp->mid = smb_buffer->Mid; /* always LE */
61 temp->pid = current->pid;
62 temp->command = smb_buffer->Command;
63 cFYI(1, "For smb_command %d", temp->command);
64 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
65 /* when mid allocated can be before when sent */
66 temp->when_alloc = jiffies;
69 * The default is for the mid to be synchronous, so the
70 * default callback just wakes up the current task.
72 temp->callback = wake_up_task;
73 temp->callback_data = current;
76 atomic_inc(&midCount);
77 temp->midState = MID_REQUEST_ALLOCATED;
78 return temp;
81 void
82 DeleteMidQEntry(struct mid_q_entry *midEntry)
84 #ifdef CONFIG_CIFS_STATS2
85 unsigned long now;
86 #endif
87 midEntry->midState = MID_FREE;
88 atomic_dec(&midCount);
89 if (midEntry->largeBuf)
90 cifs_buf_release(midEntry->resp_buf);
91 else
92 cifs_small_buf_release(midEntry->resp_buf);
93 #ifdef CONFIG_CIFS_STATS2
94 now = jiffies;
95 /* commands taking longer than one second are indications that
96 something is wrong, unless it is quite a slow link or server */
97 if ((now - midEntry->when_alloc) > HZ) {
98 if ((cifsFYI & CIFS_TIMER) &&
99 (midEntry->command != SMB_COM_LOCKING_ANDX)) {
100 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %d",
101 midEntry->command, midEntry->mid);
102 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
103 now - midEntry->when_alloc,
104 now - midEntry->when_sent,
105 now - midEntry->when_received);
108 #endif
109 mempool_free(midEntry, cifs_mid_poolp);
112 static void
113 delete_mid(struct mid_q_entry *mid)
115 spin_lock(&GlobalMid_Lock);
116 list_del(&mid->qhead);
117 spin_unlock(&GlobalMid_Lock);
119 DeleteMidQEntry(mid);
122 static int
123 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
125 int rc = 0;
126 int i = 0;
127 struct msghdr smb_msg;
128 struct smb_hdr *smb_buffer = iov[0].iov_base;
129 unsigned int len = iov[0].iov_len;
130 unsigned int total_len;
131 int first_vec = 0;
132 unsigned int smb_buf_length = be32_to_cpu(smb_buffer->smb_buf_length);
133 struct socket *ssocket = server->ssocket;
135 if (ssocket == NULL)
136 return -ENOTSOCK; /* BB eventually add reconnect code here */
138 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
139 smb_msg.msg_namelen = sizeof(struct sockaddr);
140 smb_msg.msg_control = NULL;
141 smb_msg.msg_controllen = 0;
142 if (server->noblocksnd)
143 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
144 else
145 smb_msg.msg_flags = MSG_NOSIGNAL;
147 total_len = 0;
148 for (i = 0; i < n_vec; i++)
149 total_len += iov[i].iov_len;
151 cFYI(1, "Sending smb: total_len %d", total_len);
152 dump_smb(smb_buffer, len);
154 i = 0;
155 while (total_len) {
156 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
157 n_vec - first_vec, total_len);
158 if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
159 i++;
160 /* if blocking send we try 3 times, since each can block
161 for 5 seconds. For nonblocking we have to try more
162 but wait increasing amounts of time allowing time for
163 socket to clear. The overall time we wait in either
164 case to send on the socket is about 15 seconds.
165 Similarly we wait for 15 seconds for
166 a response from the server in SendReceive[2]
167 for the server to send a response back for
168 most types of requests (except SMB Write
169 past end of file which can be slow, and
170 blocking lock operations). NFS waits slightly longer
171 than CIFS, but this can make it take longer for
172 nonresponsive servers to be detected and 15 seconds
173 is more than enough time for modern networks to
174 send a packet. In most cases if we fail to send
175 after the retries we will kill the socket and
176 reconnect which may clear the network problem.
178 if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
179 cERROR(1, "sends on sock %p stuck for 15 seconds",
180 ssocket);
181 rc = -EAGAIN;
182 break;
184 msleep(1 << i);
185 continue;
187 if (rc < 0)
188 break;
190 if (rc == total_len) {
191 total_len = 0;
192 break;
193 } else if (rc > total_len) {
194 cERROR(1, "sent %d requested %d", rc, total_len);
195 break;
197 if (rc == 0) {
198 /* should never happen, letting socket clear before
199 retrying is our only obvious option here */
200 cERROR(1, "tcp sent no data");
201 msleep(500);
202 continue;
204 total_len -= rc;
205 /* the line below resets i */
206 for (i = first_vec; i < n_vec; i++) {
207 if (iov[i].iov_len) {
208 if (rc > iov[i].iov_len) {
209 rc -= iov[i].iov_len;
210 iov[i].iov_len = 0;
211 } else {
212 iov[i].iov_base += rc;
213 iov[i].iov_len -= rc;
214 first_vec = i;
215 break;
219 i = 0; /* in case we get ENOSPC on the next send */
222 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
223 cFYI(1, "partial send (%d remaining), terminating session",
224 total_len);
225 /* If we have only sent part of an SMB then the next SMB
226 could be taken as the remainder of this one. We need
227 to kill the socket so the server throws away the partial
228 SMB */
229 server->tcpStatus = CifsNeedReconnect;
232 if (rc < 0 && rc != -EINTR)
233 cERROR(1, "Error %d sending data on socket to server", rc);
234 else
235 rc = 0;
237 /* Don't want to modify the buffer as a
238 side effect of this call. */
239 smb_buffer->smb_buf_length = cpu_to_be32(smb_buf_length);
241 return rc;
245 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
246 unsigned int smb_buf_length)
248 struct kvec iov;
250 iov.iov_base = smb_buffer;
251 iov.iov_len = smb_buf_length + 4;
253 return smb_sendv(server, &iov, 1);
256 static int wait_for_free_request(struct TCP_Server_Info *server,
257 const int long_op)
259 if (long_op == CIFS_ASYNC_OP) {
260 /* oplock breaks must not be held up */
261 atomic_inc(&server->inFlight);
262 return 0;
265 spin_lock(&GlobalMid_Lock);
266 while (1) {
267 if (atomic_read(&server->inFlight) >= cifs_max_pending) {
268 spin_unlock(&GlobalMid_Lock);
269 #ifdef CONFIG_CIFS_STATS2
270 atomic_inc(&server->num_waiters);
271 #endif
272 wait_event(server->request_q,
273 atomic_read(&server->inFlight)
274 < cifs_max_pending);
275 #ifdef CONFIG_CIFS_STATS2
276 atomic_dec(&server->num_waiters);
277 #endif
278 spin_lock(&GlobalMid_Lock);
279 } else {
280 if (server->tcpStatus == CifsExiting) {
281 spin_unlock(&GlobalMid_Lock);
282 return -ENOENT;
285 /* can not count locking commands against total
286 as they are allowed to block on server */
288 /* update # of requests on the wire to server */
289 if (long_op != CIFS_BLOCKING_OP)
290 atomic_inc(&server->inFlight);
291 spin_unlock(&GlobalMid_Lock);
292 break;
295 return 0;
298 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
299 struct mid_q_entry **ppmidQ)
301 if (ses->server->tcpStatus == CifsExiting) {
302 return -ENOENT;
305 if (ses->server->tcpStatus == CifsNeedReconnect) {
306 cFYI(1, "tcp session dead - return to caller to retry");
307 return -EAGAIN;
310 if (ses->status != CifsGood) {
311 /* check if SMB session is bad because we are setting it up */
312 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
313 (in_buf->Command != SMB_COM_NEGOTIATE))
314 return -EAGAIN;
315 /* else ok - we are setting up session */
317 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
318 if (*ppmidQ == NULL)
319 return -ENOMEM;
320 spin_lock(&GlobalMid_Lock);
321 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
322 spin_unlock(&GlobalMid_Lock);
323 return 0;
326 static int
327 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
329 int error;
331 error = wait_event_killable(server->response_q,
332 midQ->midState != MID_REQUEST_SUBMITTED);
333 if (error < 0)
334 return -ERESTARTSYS;
336 return 0;
341 * Send a SMB request and set the callback function in the mid to handle
342 * the result. Caller is responsible for dealing with timeouts.
345 cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
346 unsigned int nvec, mid_callback_t *callback, void *cbdata,
347 bool ignore_pend)
349 int rc;
350 struct mid_q_entry *mid;
351 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
353 rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
354 if (rc)
355 return rc;
357 /* enable signing if server requires it */
358 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
359 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
361 mutex_lock(&server->srv_mutex);
362 mid = AllocMidQEntry(hdr, server);
363 if (mid == NULL) {
364 mutex_unlock(&server->srv_mutex);
365 return -ENOMEM;
368 /* put it on the pending_mid_q */
369 spin_lock(&GlobalMid_Lock);
370 list_add_tail(&mid->qhead, &server->pending_mid_q);
371 spin_unlock(&GlobalMid_Lock);
373 rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
374 if (rc) {
375 mutex_unlock(&server->srv_mutex);
376 goto out_err;
379 mid->callback = callback;
380 mid->callback_data = cbdata;
381 mid->midState = MID_REQUEST_SUBMITTED;
382 #ifdef CONFIG_CIFS_STATS2
383 atomic_inc(&server->inSend);
384 #endif
385 rc = smb_sendv(server, iov, nvec);
386 #ifdef CONFIG_CIFS_STATS2
387 atomic_dec(&server->inSend);
388 mid->when_sent = jiffies;
389 #endif
390 mutex_unlock(&server->srv_mutex);
391 if (rc)
392 goto out_err;
394 return rc;
395 out_err:
396 delete_mid(mid);
397 atomic_dec(&server->inFlight);
398 wake_up(&server->request_q);
399 return rc;
404 * Send an SMB Request. No response info (other than return code)
405 * needs to be parsed.
407 * flags indicate the type of request buffer and how long to wait
408 * and whether to log NT STATUS code (error) before mapping it to POSIX error
412 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
413 struct smb_hdr *in_buf, int flags)
415 int rc;
416 struct kvec iov[1];
417 int resp_buf_type;
419 iov[0].iov_base = (char *)in_buf;
420 iov[0].iov_len = be32_to_cpu(in_buf->smb_buf_length) + 4;
421 flags |= CIFS_NO_RESP;
422 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
423 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
425 return rc;
428 static int
429 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
431 int rc = 0;
433 cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
434 mid->mid, mid->midState);
436 spin_lock(&GlobalMid_Lock);
437 switch (mid->midState) {
438 case MID_RESPONSE_RECEIVED:
439 spin_unlock(&GlobalMid_Lock);
440 return rc;
441 case MID_RETRY_NEEDED:
442 rc = -EAGAIN;
443 break;
444 case MID_RESPONSE_MALFORMED:
445 rc = -EIO;
446 break;
447 case MID_SHUTDOWN:
448 rc = -EHOSTDOWN;
449 break;
450 default:
451 list_del_init(&mid->qhead);
452 cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
453 mid->mid, mid->midState);
454 rc = -EIO;
456 spin_unlock(&GlobalMid_Lock);
458 DeleteMidQEntry(mid);
459 return rc;
463 * An NT cancel request header looks just like the original request except:
465 * The Command is SMB_COM_NT_CANCEL
466 * The WordCount is zeroed out
467 * The ByteCount is zeroed out
469 * This function mangles an existing request buffer into a
470 * SMB_COM_NT_CANCEL request and then sends it.
472 static int
473 send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
474 struct mid_q_entry *mid)
476 int rc = 0;
478 /* -4 for RFC1001 length and +2 for BCC field */
479 in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
480 in_buf->Command = SMB_COM_NT_CANCEL;
481 in_buf->WordCount = 0;
482 put_bcc(0, in_buf);
484 mutex_lock(&server->srv_mutex);
485 rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
486 if (rc) {
487 mutex_unlock(&server->srv_mutex);
488 return rc;
490 rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
491 mutex_unlock(&server->srv_mutex);
493 cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
494 in_buf->Mid, rc);
496 return rc;
500 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
501 bool log_error)
503 dump_smb(mid->resp_buf,
504 min_t(u32, 92, be32_to_cpu(mid->resp_buf->smb_buf_length)));
506 /* convert the length into a more usable form */
507 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
508 /* FIXME: add code to kill session */
509 if (cifs_verify_signature(mid->resp_buf, server,
510 mid->sequence_number + 1) != 0)
511 cERROR(1, "Unexpected SMB signature");
514 /* BB special case reconnect tid and uid here? */
515 return map_smb_to_linux_error(mid->resp_buf, log_error);
519 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
520 struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
521 const int flags)
523 int rc = 0;
524 int long_op;
525 struct mid_q_entry *midQ;
526 struct smb_hdr *in_buf = iov[0].iov_base;
528 long_op = flags & CIFS_TIMEOUT_MASK;
530 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
532 if ((ses == NULL) || (ses->server == NULL)) {
533 cifs_small_buf_release(in_buf);
534 cERROR(1, "Null session");
535 return -EIO;
538 if (ses->server->tcpStatus == CifsExiting) {
539 cifs_small_buf_release(in_buf);
540 return -ENOENT;
543 /* Ensure that we do not send more than 50 overlapping requests
544 to the same server. We may make this configurable later or
545 use ses->maxReq */
547 rc = wait_for_free_request(ses->server, long_op);
548 if (rc) {
549 cifs_small_buf_release(in_buf);
550 return rc;
553 /* make sure that we sign in the same order that we send on this socket
554 and avoid races inside tcp sendmsg code that could cause corruption
555 of smb data */
557 mutex_lock(&ses->server->srv_mutex);
559 rc = allocate_mid(ses, in_buf, &midQ);
560 if (rc) {
561 mutex_unlock(&ses->server->srv_mutex);
562 cifs_small_buf_release(in_buf);
563 /* Update # of requests on wire to server */
564 atomic_dec(&ses->server->inFlight);
565 wake_up(&ses->server->request_q);
566 return rc;
568 rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
569 if (rc) {
570 mutex_unlock(&ses->server->srv_mutex);
571 cifs_small_buf_release(in_buf);
572 goto out;
575 midQ->midState = MID_REQUEST_SUBMITTED;
576 #ifdef CONFIG_CIFS_STATS2
577 atomic_inc(&ses->server->inSend);
578 #endif
579 rc = smb_sendv(ses->server, iov, n_vec);
580 #ifdef CONFIG_CIFS_STATS2
581 atomic_dec(&ses->server->inSend);
582 midQ->when_sent = jiffies;
583 #endif
585 mutex_unlock(&ses->server->srv_mutex);
587 if (rc < 0) {
588 cifs_small_buf_release(in_buf);
589 goto out;
592 if (long_op == CIFS_ASYNC_OP) {
593 cifs_small_buf_release(in_buf);
594 goto out;
597 rc = wait_for_response(ses->server, midQ);
598 if (rc != 0) {
599 send_nt_cancel(ses->server, in_buf, midQ);
600 spin_lock(&GlobalMid_Lock);
601 if (midQ->midState == MID_REQUEST_SUBMITTED) {
602 midQ->callback = DeleteMidQEntry;
603 spin_unlock(&GlobalMid_Lock);
604 cifs_small_buf_release(in_buf);
605 atomic_dec(&ses->server->inFlight);
606 wake_up(&ses->server->request_q);
607 return rc;
609 spin_unlock(&GlobalMid_Lock);
612 cifs_small_buf_release(in_buf);
614 rc = cifs_sync_mid_result(midQ, ses->server);
615 if (rc != 0) {
616 atomic_dec(&ses->server->inFlight);
617 wake_up(&ses->server->request_q);
618 return rc;
621 if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) {
622 rc = -EIO;
623 cFYI(1, "Bad MID state?");
624 goto out;
627 iov[0].iov_base = (char *)midQ->resp_buf;
628 iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4;
629 if (midQ->largeBuf)
630 *pRespBufType = CIFS_LARGE_BUFFER;
631 else
632 *pRespBufType = CIFS_SMALL_BUFFER;
634 rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
636 /* mark it so buf will not be freed by delete_mid */
637 if ((flags & CIFS_NO_RESP) == 0)
638 midQ->resp_buf = NULL;
639 out:
640 delete_mid(midQ);
641 atomic_dec(&ses->server->inFlight);
642 wake_up(&ses->server->request_q);
644 return rc;
648 SendReceive(const unsigned int xid, struct cifs_ses *ses,
649 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
650 int *pbytes_returned, const int long_op)
652 int rc = 0;
653 struct mid_q_entry *midQ;
655 if (ses == NULL) {
656 cERROR(1, "Null smb session");
657 return -EIO;
659 if (ses->server == NULL) {
660 cERROR(1, "Null tcp session");
661 return -EIO;
664 if (ses->server->tcpStatus == CifsExiting)
665 return -ENOENT;
667 /* Ensure that we do not send more than 50 overlapping requests
668 to the same server. We may make this configurable later or
669 use ses->maxReq */
671 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
672 MAX_CIFS_HDR_SIZE - 4) {
673 cERROR(1, "Illegal length, greater than maximum frame, %d",
674 be32_to_cpu(in_buf->smb_buf_length));
675 return -EIO;
678 rc = wait_for_free_request(ses->server, long_op);
679 if (rc)
680 return rc;
682 /* make sure that we sign in the same order that we send on this socket
683 and avoid races inside tcp sendmsg code that could cause corruption
684 of smb data */
686 mutex_lock(&ses->server->srv_mutex);
688 rc = allocate_mid(ses, in_buf, &midQ);
689 if (rc) {
690 mutex_unlock(&ses->server->srv_mutex);
691 /* Update # of requests on wire to server */
692 atomic_dec(&ses->server->inFlight);
693 wake_up(&ses->server->request_q);
694 return rc;
697 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
698 if (rc) {
699 mutex_unlock(&ses->server->srv_mutex);
700 goto out;
703 midQ->midState = MID_REQUEST_SUBMITTED;
704 #ifdef CONFIG_CIFS_STATS2
705 atomic_inc(&ses->server->inSend);
706 #endif
707 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
708 #ifdef CONFIG_CIFS_STATS2
709 atomic_dec(&ses->server->inSend);
710 midQ->when_sent = jiffies;
711 #endif
712 mutex_unlock(&ses->server->srv_mutex);
714 if (rc < 0)
715 goto out;
717 if (long_op == CIFS_ASYNC_OP)
718 goto out;
720 rc = wait_for_response(ses->server, midQ);
721 if (rc != 0) {
722 send_nt_cancel(ses->server, in_buf, midQ);
723 spin_lock(&GlobalMid_Lock);
724 if (midQ->midState == MID_REQUEST_SUBMITTED) {
725 /* no longer considered to be "in-flight" */
726 midQ->callback = DeleteMidQEntry;
727 spin_unlock(&GlobalMid_Lock);
728 atomic_dec(&ses->server->inFlight);
729 wake_up(&ses->server->request_q);
730 return rc;
732 spin_unlock(&GlobalMid_Lock);
735 rc = cifs_sync_mid_result(midQ, ses->server);
736 if (rc != 0) {
737 atomic_dec(&ses->server->inFlight);
738 wake_up(&ses->server->request_q);
739 return rc;
742 if (!midQ->resp_buf || !out_buf ||
743 midQ->midState != MID_RESPONSE_RECEIVED) {
744 rc = -EIO;
745 cERROR(1, "Bad MID state?");
746 goto out;
749 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
750 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
751 rc = cifs_check_receive(midQ, ses->server, 0);
752 out:
753 delete_mid(midQ);
754 atomic_dec(&ses->server->inFlight);
755 wake_up(&ses->server->request_q);
757 return rc;
760 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
761 blocking lock to return. */
763 static int
764 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
765 struct smb_hdr *in_buf,
766 struct smb_hdr *out_buf)
768 int bytes_returned;
769 struct cifs_ses *ses = tcon->ses;
770 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
772 /* We just modify the current in_buf to change
773 the type of lock from LOCKING_ANDX_SHARED_LOCK
774 or LOCKING_ANDX_EXCLUSIVE_LOCK to
775 LOCKING_ANDX_CANCEL_LOCK. */
777 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
778 pSMB->Timeout = 0;
779 pSMB->hdr.Mid = GetNextMid(ses->server);
781 return SendReceive(xid, ses, in_buf, out_buf,
782 &bytes_returned, 0);
786 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
787 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
788 int *pbytes_returned)
790 int rc = 0;
791 int rstart = 0;
792 struct mid_q_entry *midQ;
793 struct cifs_ses *ses;
795 if (tcon == NULL || tcon->ses == NULL) {
796 cERROR(1, "Null smb session");
797 return -EIO;
799 ses = tcon->ses;
801 if (ses->server == NULL) {
802 cERROR(1, "Null tcp session");
803 return -EIO;
806 if (ses->server->tcpStatus == CifsExiting)
807 return -ENOENT;
809 /* Ensure that we do not send more than 50 overlapping requests
810 to the same server. We may make this configurable later or
811 use ses->maxReq */
813 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
814 MAX_CIFS_HDR_SIZE - 4) {
815 cERROR(1, "Illegal length, greater than maximum frame, %d",
816 be32_to_cpu(in_buf->smb_buf_length));
817 return -EIO;
820 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
821 if (rc)
822 return rc;
824 /* make sure that we sign in the same order that we send on this socket
825 and avoid races inside tcp sendmsg code that could cause corruption
826 of smb data */
828 mutex_lock(&ses->server->srv_mutex);
830 rc = allocate_mid(ses, in_buf, &midQ);
831 if (rc) {
832 mutex_unlock(&ses->server->srv_mutex);
833 return rc;
836 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
837 if (rc) {
838 delete_mid(midQ);
839 mutex_unlock(&ses->server->srv_mutex);
840 return rc;
843 midQ->midState = MID_REQUEST_SUBMITTED;
844 #ifdef CONFIG_CIFS_STATS2
845 atomic_inc(&ses->server->inSend);
846 #endif
847 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
848 #ifdef CONFIG_CIFS_STATS2
849 atomic_dec(&ses->server->inSend);
850 midQ->when_sent = jiffies;
851 #endif
852 mutex_unlock(&ses->server->srv_mutex);
854 if (rc < 0) {
855 delete_mid(midQ);
856 return rc;
859 /* Wait for a reply - allow signals to interrupt. */
860 rc = wait_event_interruptible(ses->server->response_q,
861 (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
862 ((ses->server->tcpStatus != CifsGood) &&
863 (ses->server->tcpStatus != CifsNew)));
865 /* Were we interrupted by a signal ? */
866 if ((rc == -ERESTARTSYS) &&
867 (midQ->midState == MID_REQUEST_SUBMITTED) &&
868 ((ses->server->tcpStatus == CifsGood) ||
869 (ses->server->tcpStatus == CifsNew))) {
871 if (in_buf->Command == SMB_COM_TRANSACTION2) {
872 /* POSIX lock. We send a NT_CANCEL SMB to cause the
873 blocking lock to return. */
874 rc = send_nt_cancel(ses->server, in_buf, midQ);
875 if (rc) {
876 delete_mid(midQ);
877 return rc;
879 } else {
880 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
881 to cause the blocking lock to return. */
883 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
885 /* If we get -ENOLCK back the lock may have
886 already been removed. Don't exit in this case. */
887 if (rc && rc != -ENOLCK) {
888 delete_mid(midQ);
889 return rc;
893 rc = wait_for_response(ses->server, midQ);
894 if (rc) {
895 send_nt_cancel(ses->server, in_buf, midQ);
896 spin_lock(&GlobalMid_Lock);
897 if (midQ->midState == MID_REQUEST_SUBMITTED) {
898 /* no longer considered to be "in-flight" */
899 midQ->callback = DeleteMidQEntry;
900 spin_unlock(&GlobalMid_Lock);
901 return rc;
903 spin_unlock(&GlobalMid_Lock);
906 /* We got the response - restart system call. */
907 rstart = 1;
910 rc = cifs_sync_mid_result(midQ, ses->server);
911 if (rc != 0)
912 return rc;
914 /* rcvd frame is ok */
915 if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) {
916 rc = -EIO;
917 cERROR(1, "Bad MID state?");
918 goto out;
921 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
922 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
923 rc = cifs_check_receive(midQ, ses->server, 0);
924 out:
925 delete_mid(midQ);
926 if (rstart && rc == -EACCES)
927 return -ERESTARTSYS;
928 return rc;