Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / fs / cifs / transport.c
blob9779b3292d8e8d1cfc8a38558f1e04644b0ed224
1 /*
2 * fs/cifs/transport.c
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_debug.h"
40 #include "smbdirect.h"
42 /* Max number of iovectors we can use off the stack when sending requests. */
43 #define CIFS_MAX_IOV_SIZE 8
45 void
46 cifs_wake_up_task(struct mid_q_entry *mid)
48 wake_up_process(mid->callback_data);
51 struct mid_q_entry *
52 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
54 struct mid_q_entry *temp;
56 if (server == NULL) {
57 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
58 return NULL;
61 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
62 memset(temp, 0, sizeof(struct mid_q_entry));
63 temp->mid = get_mid(smb_buffer);
64 temp->pid = current->pid;
65 temp->command = cpu_to_le16(smb_buffer->Command);
66 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
67 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
68 /* when mid allocated can be before when sent */
69 temp->when_alloc = jiffies;
70 temp->server = server;
73 * The default is for the mid to be synchronous, so the
74 * default callback just wakes up the current task.
76 temp->callback = cifs_wake_up_task;
77 temp->callback_data = current;
79 atomic_inc(&midCount);
80 temp->mid_state = MID_REQUEST_ALLOCATED;
81 return temp;
84 void
85 DeleteMidQEntry(struct mid_q_entry *midEntry)
87 #ifdef CONFIG_CIFS_STATS2
88 __le16 command = midEntry->server->vals->lock_cmd;
89 unsigned long now;
90 #endif
91 midEntry->mid_state = MID_FREE;
92 atomic_dec(&midCount);
93 if (midEntry->large_buf)
94 cifs_buf_release(midEntry->resp_buf);
95 else
96 cifs_small_buf_release(midEntry->resp_buf);
97 #ifdef CONFIG_CIFS_STATS2
98 now = jiffies;
99 /* commands taking longer than one second are indications that
100 something is wrong, unless it is quite a slow link or server */
101 if (time_after(now, midEntry->when_alloc + HZ)) {
102 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
103 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
104 midEntry->command, midEntry->mid);
105 pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
106 now - midEntry->when_alloc,
107 now - midEntry->when_sent,
108 now - midEntry->when_received);
111 #endif
112 mempool_free(midEntry, cifs_mid_poolp);
115 void
116 cifs_delete_mid(struct mid_q_entry *mid)
118 spin_lock(&GlobalMid_Lock);
119 list_del(&mid->qhead);
120 spin_unlock(&GlobalMid_Lock);
122 DeleteMidQEntry(mid);
126 * smb_send_kvec - send an array of kvecs to the server
127 * @server: Server to send the data to
128 * @smb_msg: Message to send
129 * @sent: amount of data sent on socket is stored here
131 * Our basic "send data to server" function. Should be called with srv_mutex
132 * held. The caller is responsible for handling the results.
134 static int
135 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
136 size_t *sent)
138 int rc = 0;
139 int retries = 0;
140 struct socket *ssocket = server->ssocket;
142 *sent = 0;
144 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
145 smb_msg->msg_namelen = sizeof(struct sockaddr);
146 smb_msg->msg_control = NULL;
147 smb_msg->msg_controllen = 0;
148 if (server->noblocksnd)
149 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
150 else
151 smb_msg->msg_flags = MSG_NOSIGNAL;
153 while (msg_data_left(smb_msg)) {
155 * If blocking send, we try 3 times, since each can block
156 * for 5 seconds. For nonblocking we have to try more
157 * but wait increasing amounts of time allowing time for
158 * socket to clear. The overall time we wait in either
159 * case to send on the socket is about 15 seconds.
160 * Similarly we wait for 15 seconds for a response from
161 * the server in SendReceive[2] for the server to send
162 * a response back for most types of requests (except
163 * SMB Write past end of file which can be slow, and
164 * blocking lock operations). NFS waits slightly longer
165 * than CIFS, but this can make it take longer for
166 * nonresponsive servers to be detected and 15 seconds
167 * is more than enough time for modern networks to
168 * send a packet. In most cases if we fail to send
169 * after the retries we will kill the socket and
170 * reconnect which may clear the network problem.
172 rc = sock_sendmsg(ssocket, smb_msg);
173 if (rc == -EAGAIN) {
174 retries++;
175 if (retries >= 14 ||
176 (!server->noblocksnd && (retries > 2))) {
177 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
178 ssocket);
179 return -EAGAIN;
181 msleep(1 << retries);
182 continue;
185 if (rc < 0)
186 return rc;
188 if (rc == 0) {
189 /* should never happen, letting socket clear before
190 retrying is our only obvious option here */
191 cifs_dbg(VFS, "tcp sent no data\n");
192 msleep(500);
193 continue;
196 /* send was at least partially successful */
197 *sent += rc;
198 retries = 0; /* in case we get ENOSPC on the next send */
200 return 0;
203 static unsigned long
204 rqst_len(struct smb_rqst *rqst)
206 unsigned int i;
207 struct kvec *iov = rqst->rq_iov;
208 unsigned long buflen = 0;
210 /* total up iov array first */
211 for (i = 0; i < rqst->rq_nvec; i++)
212 buflen += iov[i].iov_len;
214 /* add in the page array if there is one */
215 if (rqst->rq_npages) {
216 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
217 buflen += rqst->rq_tailsz;
220 return buflen;
223 static int
224 __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
226 int rc;
227 struct kvec *iov = rqst->rq_iov;
228 int n_vec = rqst->rq_nvec;
229 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
230 unsigned long send_length;
231 unsigned int i;
232 size_t total_len = 0, sent, size;
233 struct socket *ssocket = server->ssocket;
234 struct msghdr smb_msg;
235 int val = 1;
236 if (cifs_rdma_enabled(server) && server->smbd_conn) {
237 rc = smbd_send(server->smbd_conn, rqst);
238 goto smbd_done;
240 if (ssocket == NULL)
241 return -ENOTSOCK;
243 /* sanity check send length */
244 send_length = rqst_len(rqst);
245 if (send_length != smb_buf_length + 4) {
246 WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
247 send_length, smb_buf_length);
248 return -EIO;
251 if (n_vec < 2)
252 return -EIO;
254 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
255 dump_smb(iov[0].iov_base, iov[0].iov_len);
256 dump_smb(iov[1].iov_base, iov[1].iov_len);
258 /* cork the socket */
259 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
260 (char *)&val, sizeof(val));
262 size = 0;
263 for (i = 0; i < n_vec; i++)
264 size += iov[i].iov_len;
266 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
268 rc = smb_send_kvec(server, &smb_msg, &sent);
269 if (rc < 0)
270 goto uncork;
272 total_len += sent;
274 /* now walk the page array and send each page in it */
275 for (i = 0; i < rqst->rq_npages; i++) {
276 size_t len = i == rqst->rq_npages - 1
277 ? rqst->rq_tailsz
278 : rqst->rq_pagesz;
279 struct bio_vec bvec = {
280 .bv_page = rqst->rq_pages[i],
281 .bv_len = len
283 iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
284 &bvec, 1, len);
285 rc = smb_send_kvec(server, &smb_msg, &sent);
286 if (rc < 0)
287 break;
289 total_len += sent;
292 uncork:
293 /* uncork it */
294 val = 0;
295 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
296 (char *)&val, sizeof(val));
298 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
299 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
300 smb_buf_length + 4, total_len);
302 * If we have only sent part of an SMB then the next SMB could
303 * be taken as the remainder of this one. We need to kill the
304 * socket so the server throws away the partial SMB
306 server->tcpStatus = CifsNeedReconnect;
308 smbd_done:
309 if (rc < 0 && rc != -EINTR)
310 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
311 rc);
312 else
313 rc = 0;
315 return rc;
318 static int
319 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
321 struct smb_rqst cur_rqst;
322 int rc;
324 if (!(flags & CIFS_TRANSFORM_REQ))
325 return __smb_send_rqst(server, rqst);
327 if (!server->ops->init_transform_rq ||
328 !server->ops->free_transform_rq) {
329 cifs_dbg(VFS, "Encryption requested but transform callbacks are missed\n");
330 return -EIO;
333 rc = server->ops->init_transform_rq(server, &cur_rqst, rqst);
334 if (rc)
335 return rc;
337 rc = __smb_send_rqst(server, &cur_rqst);
338 server->ops->free_transform_rq(&cur_rqst);
339 return rc;
343 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
344 unsigned int smb_buf_length)
346 struct kvec iov[2];
347 struct smb_rqst rqst = { .rq_iov = iov,
348 .rq_nvec = 2 };
350 iov[0].iov_base = smb_buffer;
351 iov[0].iov_len = 4;
352 iov[1].iov_base = (char *)smb_buffer + 4;
353 iov[1].iov_len = smb_buf_length;
355 return __smb_send_rqst(server, &rqst);
358 static int
359 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
360 int *credits)
362 int rc;
364 spin_lock(&server->req_lock);
365 if (timeout == CIFS_ASYNC_OP) {
366 /* oplock breaks must not be held up */
367 server->in_flight++;
368 *credits -= 1;
369 spin_unlock(&server->req_lock);
370 return 0;
373 while (1) {
374 if (*credits <= 0) {
375 spin_unlock(&server->req_lock);
376 cifs_num_waiters_inc(server);
377 rc = wait_event_killable(server->request_q,
378 has_credits(server, credits));
379 cifs_num_waiters_dec(server);
380 if (rc)
381 return rc;
382 spin_lock(&server->req_lock);
383 } else {
384 if (server->tcpStatus == CifsExiting) {
385 spin_unlock(&server->req_lock);
386 return -ENOENT;
390 * Can not count locking commands against total
391 * as they are allowed to block on server.
394 /* update # of requests on the wire to server */
395 if (timeout != CIFS_BLOCKING_OP) {
396 *credits -= 1;
397 server->in_flight++;
399 spin_unlock(&server->req_lock);
400 break;
403 return 0;
406 static int
407 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
408 const int optype)
410 int *val;
412 val = server->ops->get_credits_field(server, optype);
413 /* Since an echo is already inflight, no need to wait to send another */
414 if (*val <= 0 && optype == CIFS_ECHO_OP)
415 return -EAGAIN;
416 return wait_for_free_credits(server, timeout, val);
420 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
421 unsigned int *num, unsigned int *credits)
423 *num = size;
424 *credits = 0;
425 return 0;
428 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
429 struct mid_q_entry **ppmidQ)
431 if (ses->server->tcpStatus == CifsExiting) {
432 return -ENOENT;
435 if (ses->server->tcpStatus == CifsNeedReconnect) {
436 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
437 return -EAGAIN;
440 if (ses->status == CifsNew) {
441 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
442 (in_buf->Command != SMB_COM_NEGOTIATE))
443 return -EAGAIN;
444 /* else ok - we are setting up session */
447 if (ses->status == CifsExiting) {
448 /* check if SMB session is bad because we are setting it up */
449 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
450 return -EAGAIN;
451 /* else ok - we are shutting down session */
454 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
455 if (*ppmidQ == NULL)
456 return -ENOMEM;
457 spin_lock(&GlobalMid_Lock);
458 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
459 spin_unlock(&GlobalMid_Lock);
460 return 0;
463 static int
464 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
466 int error;
468 error = wait_event_freezekillable_unsafe(server->response_q,
469 midQ->mid_state != MID_REQUEST_SUBMITTED);
470 if (error < 0)
471 return -ERESTARTSYS;
473 return 0;
476 struct mid_q_entry *
477 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
479 int rc;
480 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
481 struct mid_q_entry *mid;
483 if (rqst->rq_iov[0].iov_len != 4 ||
484 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
485 return ERR_PTR(-EIO);
487 /* enable signing if server requires it */
488 if (server->sign)
489 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
491 mid = AllocMidQEntry(hdr, server);
492 if (mid == NULL)
493 return ERR_PTR(-ENOMEM);
495 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
496 if (rc) {
497 DeleteMidQEntry(mid);
498 return ERR_PTR(rc);
501 return mid;
505 * Send a SMB request and set the callback function in the mid to handle
506 * the result. Caller is responsible for dealing with timeouts.
509 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
510 mid_receive_t *receive, mid_callback_t *callback,
511 mid_handle_t *handle, void *cbdata, const int flags)
513 int rc, timeout, optype;
514 struct mid_q_entry *mid;
515 unsigned int credits = 0;
517 timeout = flags & CIFS_TIMEOUT_MASK;
518 optype = flags & CIFS_OP_MASK;
520 if ((flags & CIFS_HAS_CREDITS) == 0) {
521 rc = wait_for_free_request(server, timeout, optype);
522 if (rc)
523 return rc;
524 credits = 1;
527 mutex_lock(&server->srv_mutex);
528 mid = server->ops->setup_async_request(server, rqst);
529 if (IS_ERR(mid)) {
530 mutex_unlock(&server->srv_mutex);
531 add_credits_and_wake_if(server, credits, optype);
532 return PTR_ERR(mid);
535 mid->receive = receive;
536 mid->callback = callback;
537 mid->callback_data = cbdata;
538 mid->handle = handle;
539 mid->mid_state = MID_REQUEST_SUBMITTED;
541 /* put it on the pending_mid_q */
542 spin_lock(&GlobalMid_Lock);
543 list_add_tail(&mid->qhead, &server->pending_mid_q);
544 spin_unlock(&GlobalMid_Lock);
547 * Need to store the time in mid before calling I/O. For call_async,
548 * I/O response may come back and free the mid entry on another thread.
550 cifs_save_when_sent(mid);
551 cifs_in_send_inc(server);
552 rc = smb_send_rqst(server, rqst, flags);
553 cifs_in_send_dec(server);
555 if (rc < 0) {
556 server->sequence_number -= 2;
557 cifs_delete_mid(mid);
560 mutex_unlock(&server->srv_mutex);
562 if (rc == 0)
563 return 0;
565 add_credits_and_wake_if(server, credits, optype);
566 return rc;
571 * Send an SMB Request. No response info (other than return code)
572 * needs to be parsed.
574 * flags indicate the type of request buffer and how long to wait
575 * and whether to log NT STATUS code (error) before mapping it to POSIX error
579 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
580 char *in_buf, int flags)
582 int rc;
583 struct kvec iov[1];
584 struct kvec rsp_iov;
585 int resp_buf_type;
587 iov[0].iov_base = in_buf;
588 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
589 flags |= CIFS_NO_RESP;
590 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
591 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
593 return rc;
596 static int
597 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
599 int rc = 0;
601 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
602 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
604 spin_lock(&GlobalMid_Lock);
605 switch (mid->mid_state) {
606 case MID_RESPONSE_RECEIVED:
607 spin_unlock(&GlobalMid_Lock);
608 return rc;
609 case MID_RETRY_NEEDED:
610 rc = -EAGAIN;
611 break;
612 case MID_RESPONSE_MALFORMED:
613 rc = -EIO;
614 break;
615 case MID_SHUTDOWN:
616 rc = -EHOSTDOWN;
617 break;
618 default:
619 list_del_init(&mid->qhead);
620 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
621 __func__, mid->mid, mid->mid_state);
622 rc = -EIO;
624 spin_unlock(&GlobalMid_Lock);
626 DeleteMidQEntry(mid);
627 return rc;
630 static inline int
631 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
632 struct mid_q_entry *mid)
634 return server->ops->send_cancel ?
635 server->ops->send_cancel(server, rqst, mid) : 0;
639 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
640 bool log_error)
642 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
644 dump_smb(mid->resp_buf, min_t(u32, 92, len));
646 /* convert the length into a more usable form */
647 if (server->sign) {
648 struct kvec iov[2];
649 int rc = 0;
650 struct smb_rqst rqst = { .rq_iov = iov,
651 .rq_nvec = 2 };
653 iov[0].iov_base = mid->resp_buf;
654 iov[0].iov_len = 4;
655 iov[1].iov_base = (char *)mid->resp_buf + 4;
656 iov[1].iov_len = len - 4;
657 /* FIXME: add code to kill session */
658 rc = cifs_verify_signature(&rqst, server,
659 mid->sequence_number);
660 if (rc)
661 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
662 rc);
665 /* BB special case reconnect tid and uid here? */
666 return map_smb_to_linux_error(mid->resp_buf, log_error);
669 struct mid_q_entry *
670 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
672 int rc;
673 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
674 struct mid_q_entry *mid;
676 if (rqst->rq_iov[0].iov_len != 4 ||
677 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
678 return ERR_PTR(-EIO);
680 rc = allocate_mid(ses, hdr, &mid);
681 if (rc)
682 return ERR_PTR(rc);
683 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
684 if (rc) {
685 cifs_delete_mid(mid);
686 return ERR_PTR(rc);
688 return mid;
692 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
693 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
694 struct kvec *resp_iov)
696 int rc = 0;
697 int timeout, optype;
698 struct mid_q_entry *midQ;
699 unsigned int credits = 1;
700 char *buf;
702 timeout = flags & CIFS_TIMEOUT_MASK;
703 optype = flags & CIFS_OP_MASK;
705 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
707 if ((ses == NULL) || (ses->server == NULL)) {
708 cifs_dbg(VFS, "Null session\n");
709 return -EIO;
712 if (ses->server->tcpStatus == CifsExiting)
713 return -ENOENT;
716 * Ensure that we do not send more than 50 overlapping requests
717 * to the same server. We may make this configurable later or
718 * use ses->maxReq.
721 rc = wait_for_free_request(ses->server, timeout, optype);
722 if (rc)
723 return rc;
726 * Make sure that we sign in the same order that we send on this socket
727 * and avoid races inside tcp sendmsg code that could cause corruption
728 * of smb data.
731 mutex_lock(&ses->server->srv_mutex);
733 midQ = ses->server->ops->setup_request(ses, rqst);
734 if (IS_ERR(midQ)) {
735 mutex_unlock(&ses->server->srv_mutex);
736 /* Update # of requests on wire to server */
737 add_credits(ses->server, 1, optype);
738 return PTR_ERR(midQ);
741 midQ->mid_state = MID_REQUEST_SUBMITTED;
742 cifs_in_send_inc(ses->server);
743 rc = smb_send_rqst(ses->server, rqst, flags);
744 cifs_in_send_dec(ses->server);
745 cifs_save_when_sent(midQ);
747 if (rc < 0)
748 ses->server->sequence_number -= 2;
749 mutex_unlock(&ses->server->srv_mutex);
751 if (rc < 0)
752 goto out;
754 if (timeout == CIFS_ASYNC_OP)
755 goto out;
757 rc = wait_for_response(ses->server, midQ);
758 if (rc != 0) {
759 cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
760 send_cancel(ses->server, rqst, midQ);
761 spin_lock(&GlobalMid_Lock);
762 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
763 midQ->mid_flags |= MID_WAIT_CANCELLED;
764 midQ->callback = DeleteMidQEntry;
765 spin_unlock(&GlobalMid_Lock);
766 add_credits(ses->server, 1, optype);
767 return rc;
769 spin_unlock(&GlobalMid_Lock);
772 rc = cifs_sync_mid_result(midQ, ses->server);
773 if (rc != 0) {
774 add_credits(ses->server, 1, optype);
775 return rc;
778 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
779 rc = -EIO;
780 cifs_dbg(FYI, "Bad MID state?\n");
781 goto out;
784 buf = (char *)midQ->resp_buf;
785 resp_iov->iov_base = buf;
786 resp_iov->iov_len = get_rfc1002_length(buf) + 4;
787 if (midQ->large_buf)
788 *resp_buf_type = CIFS_LARGE_BUFFER;
789 else
790 *resp_buf_type = CIFS_SMALL_BUFFER;
792 credits = ses->server->ops->get_credits(midQ);
794 rc = ses->server->ops->check_receive(midQ, ses->server,
795 flags & CIFS_LOG_ERROR);
797 /* mark it so buf will not be freed by cifs_delete_mid */
798 if ((flags & CIFS_NO_RESP) == 0)
799 midQ->resp_buf = NULL;
800 out:
801 cifs_delete_mid(midQ);
802 add_credits(ses->server, credits, optype);
804 return rc;
808 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
809 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
810 const int flags, struct kvec *resp_iov)
812 struct smb_rqst rqst;
813 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
814 int rc;
816 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
817 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
818 GFP_KERNEL);
819 if (!new_iov)
820 return -ENOMEM;
821 } else
822 new_iov = s_iov;
824 /* 1st iov is a RFC1001 length followed by the rest of the packet */
825 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
827 new_iov[0].iov_base = new_iov[1].iov_base;
828 new_iov[0].iov_len = 4;
829 new_iov[1].iov_base += 4;
830 new_iov[1].iov_len -= 4;
832 memset(&rqst, 0, sizeof(struct smb_rqst));
833 rqst.rq_iov = new_iov;
834 rqst.rq_nvec = n_vec + 1;
836 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
837 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
838 kfree(new_iov);
839 return rc;
842 /* Like SendReceive2 but iov[0] does not contain an rfc1002 header */
844 smb2_send_recv(const unsigned int xid, struct cifs_ses *ses,
845 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
846 const int flags, struct kvec *resp_iov)
848 struct smb_rqst rqst;
849 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
850 int rc;
851 int i;
852 __u32 count;
853 __be32 rfc1002_marker;
855 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
856 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
857 GFP_KERNEL);
858 if (!new_iov)
859 return -ENOMEM;
860 } else
861 new_iov = s_iov;
863 /* 1st iov is an RFC1002 Session Message length */
864 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
866 count = 0;
867 for (i = 1; i < n_vec + 1; i++)
868 count += new_iov[i].iov_len;
870 rfc1002_marker = cpu_to_be32(count);
872 new_iov[0].iov_base = &rfc1002_marker;
873 new_iov[0].iov_len = 4;
875 memset(&rqst, 0, sizeof(struct smb_rqst));
876 rqst.rq_iov = new_iov;
877 rqst.rq_nvec = n_vec + 1;
879 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
880 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
881 kfree(new_iov);
882 return rc;
886 SendReceive(const unsigned int xid, struct cifs_ses *ses,
887 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
888 int *pbytes_returned, const int timeout)
890 int rc = 0;
891 struct mid_q_entry *midQ;
892 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
893 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
894 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
896 if (ses == NULL) {
897 cifs_dbg(VFS, "Null smb session\n");
898 return -EIO;
900 if (ses->server == NULL) {
901 cifs_dbg(VFS, "Null tcp session\n");
902 return -EIO;
905 if (ses->server->tcpStatus == CifsExiting)
906 return -ENOENT;
908 /* Ensure that we do not send more than 50 overlapping requests
909 to the same server. We may make this configurable later or
910 use ses->maxReq */
912 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
913 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
914 len);
915 return -EIO;
918 rc = wait_for_free_request(ses->server, timeout, 0);
919 if (rc)
920 return rc;
922 /* make sure that we sign in the same order that we send on this socket
923 and avoid races inside tcp sendmsg code that could cause corruption
924 of smb data */
926 mutex_lock(&ses->server->srv_mutex);
928 rc = allocate_mid(ses, in_buf, &midQ);
929 if (rc) {
930 mutex_unlock(&ses->server->srv_mutex);
931 /* Update # of requests on wire to server */
932 add_credits(ses->server, 1, 0);
933 return rc;
936 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
937 if (rc) {
938 mutex_unlock(&ses->server->srv_mutex);
939 goto out;
942 midQ->mid_state = MID_REQUEST_SUBMITTED;
944 cifs_in_send_inc(ses->server);
945 rc = smb_send(ses->server, in_buf, len);
946 cifs_in_send_dec(ses->server);
947 cifs_save_when_sent(midQ);
949 if (rc < 0)
950 ses->server->sequence_number -= 2;
952 mutex_unlock(&ses->server->srv_mutex);
954 if (rc < 0)
955 goto out;
957 if (timeout == CIFS_ASYNC_OP)
958 goto out;
960 rc = wait_for_response(ses->server, midQ);
961 if (rc != 0) {
962 send_cancel(ses->server, &rqst, midQ);
963 spin_lock(&GlobalMid_Lock);
964 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
965 /* no longer considered to be "in-flight" */
966 midQ->callback = DeleteMidQEntry;
967 spin_unlock(&GlobalMid_Lock);
968 add_credits(ses->server, 1, 0);
969 return rc;
971 spin_unlock(&GlobalMid_Lock);
974 rc = cifs_sync_mid_result(midQ, ses->server);
975 if (rc != 0) {
976 add_credits(ses->server, 1, 0);
977 return rc;
980 if (!midQ->resp_buf || !out_buf ||
981 midQ->mid_state != MID_RESPONSE_RECEIVED) {
982 rc = -EIO;
983 cifs_dbg(VFS, "Bad MID state?\n");
984 goto out;
987 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
988 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
989 rc = cifs_check_receive(midQ, ses->server, 0);
990 out:
991 cifs_delete_mid(midQ);
992 add_credits(ses->server, 1, 0);
994 return rc;
997 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
998 blocking lock to return. */
1000 static int
1001 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1002 struct smb_hdr *in_buf,
1003 struct smb_hdr *out_buf)
1005 int bytes_returned;
1006 struct cifs_ses *ses = tcon->ses;
1007 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1009 /* We just modify the current in_buf to change
1010 the type of lock from LOCKING_ANDX_SHARED_LOCK
1011 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1012 LOCKING_ANDX_CANCEL_LOCK. */
1014 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1015 pSMB->Timeout = 0;
1016 pSMB->hdr.Mid = get_next_mid(ses->server);
1018 return SendReceive(xid, ses, in_buf, out_buf,
1019 &bytes_returned, 0);
1023 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1024 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1025 int *pbytes_returned)
1027 int rc = 0;
1028 int rstart = 0;
1029 struct mid_q_entry *midQ;
1030 struct cifs_ses *ses;
1031 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1032 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1033 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1035 if (tcon == NULL || tcon->ses == NULL) {
1036 cifs_dbg(VFS, "Null smb session\n");
1037 return -EIO;
1039 ses = tcon->ses;
1041 if (ses->server == NULL) {
1042 cifs_dbg(VFS, "Null tcp session\n");
1043 return -EIO;
1046 if (ses->server->tcpStatus == CifsExiting)
1047 return -ENOENT;
1049 /* Ensure that we do not send more than 50 overlapping requests
1050 to the same server. We may make this configurable later or
1051 use ses->maxReq */
1053 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1054 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1055 len);
1056 return -EIO;
1059 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1060 if (rc)
1061 return rc;
1063 /* make sure that we sign in the same order that we send on this socket
1064 and avoid races inside tcp sendmsg code that could cause corruption
1065 of smb data */
1067 mutex_lock(&ses->server->srv_mutex);
1069 rc = allocate_mid(ses, in_buf, &midQ);
1070 if (rc) {
1071 mutex_unlock(&ses->server->srv_mutex);
1072 return rc;
1075 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1076 if (rc) {
1077 cifs_delete_mid(midQ);
1078 mutex_unlock(&ses->server->srv_mutex);
1079 return rc;
1082 midQ->mid_state = MID_REQUEST_SUBMITTED;
1083 cifs_in_send_inc(ses->server);
1084 rc = smb_send(ses->server, in_buf, len);
1085 cifs_in_send_dec(ses->server);
1086 cifs_save_when_sent(midQ);
1088 if (rc < 0)
1089 ses->server->sequence_number -= 2;
1091 mutex_unlock(&ses->server->srv_mutex);
1093 if (rc < 0) {
1094 cifs_delete_mid(midQ);
1095 return rc;
1098 /* Wait for a reply - allow signals to interrupt. */
1099 rc = wait_event_interruptible(ses->server->response_q,
1100 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1101 ((ses->server->tcpStatus != CifsGood) &&
1102 (ses->server->tcpStatus != CifsNew)));
1104 /* Were we interrupted by a signal ? */
1105 if ((rc == -ERESTARTSYS) &&
1106 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1107 ((ses->server->tcpStatus == CifsGood) ||
1108 (ses->server->tcpStatus == CifsNew))) {
1110 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1111 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1112 blocking lock to return. */
1113 rc = send_cancel(ses->server, &rqst, midQ);
1114 if (rc) {
1115 cifs_delete_mid(midQ);
1116 return rc;
1118 } else {
1119 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1120 to cause the blocking lock to return. */
1122 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1124 /* If we get -ENOLCK back the lock may have
1125 already been removed. Don't exit in this case. */
1126 if (rc && rc != -ENOLCK) {
1127 cifs_delete_mid(midQ);
1128 return rc;
1132 rc = wait_for_response(ses->server, midQ);
1133 if (rc) {
1134 send_cancel(ses->server, &rqst, midQ);
1135 spin_lock(&GlobalMid_Lock);
1136 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1137 /* no longer considered to be "in-flight" */
1138 midQ->callback = DeleteMidQEntry;
1139 spin_unlock(&GlobalMid_Lock);
1140 return rc;
1142 spin_unlock(&GlobalMid_Lock);
1145 /* We got the response - restart system call. */
1146 rstart = 1;
1149 rc = cifs_sync_mid_result(midQ, ses->server);
1150 if (rc != 0)
1151 return rc;
1153 /* rcvd frame is ok */
1154 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1155 rc = -EIO;
1156 cifs_dbg(VFS, "Bad MID state?\n");
1157 goto out;
1160 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1161 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1162 rc = cifs_check_receive(midQ, ses->server, 0);
1163 out:
1164 cifs_delete_mid(midQ);
1165 if (rstart && rc == -EACCES)
1166 return -ERESTARTSYS;
1167 return rc;