Merge tag 'regmap-fix-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / fs / cifs / transport.c
blobe9abb41aa89bcc12002c61d777d4c4f728e9ffb8
1 /*
2 * fs/cifs/transport.c
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
50 wake_up_process(mid->callback_data);
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
56 struct mid_q_entry *temp;
58 if (server == NULL) {
59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60 return NULL;
63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64 memset(temp, 0, sizeof(struct mid_q_entry));
65 kref_init(&temp->refcount);
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
79 get_task_struct(current);
80 temp->creator = current;
81 temp->callback = cifs_wake_up_task;
82 temp->callback_data = current;
84 atomic_inc(&midCount);
85 temp->mid_state = MID_REQUEST_ALLOCATED;
86 return temp;
89 static void _cifs_mid_q_entry_release(struct kref *refcount)
91 struct mid_q_entry *midEntry =
92 container_of(refcount, struct mid_q_entry, refcount);
93 #ifdef CONFIG_CIFS_STATS2
94 __le16 command = midEntry->server->vals->lock_cmd;
95 __u16 smb_cmd = le16_to_cpu(midEntry->command);
96 unsigned long now;
97 unsigned long roundtrip_time;
98 #endif
99 struct TCP_Server_Info *server = midEntry->server;
101 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103 server->ops->handle_cancelled_mid)
104 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
106 midEntry->mid_state = MID_FREE;
107 atomic_dec(&midCount);
108 if (midEntry->large_buf)
109 cifs_buf_release(midEntry->resp_buf);
110 else
111 cifs_small_buf_release(midEntry->resp_buf);
112 #ifdef CONFIG_CIFS_STATS2
113 now = jiffies;
114 if (now < midEntry->when_alloc)
115 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
116 roundtrip_time = now - midEntry->when_alloc;
118 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120 server->slowest_cmd[smb_cmd] = roundtrip_time;
121 server->fastest_cmd[smb_cmd] = roundtrip_time;
122 } else {
123 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124 server->slowest_cmd[smb_cmd] = roundtrip_time;
125 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126 server->fastest_cmd[smb_cmd] = roundtrip_time;
128 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129 server->time_per_cmd[smb_cmd] += roundtrip_time;
132 * commands taking longer than one second (default) can be indications
133 * that something is wrong, unless it is quite a slow link or a very
134 * busy server. Note that this calc is unlikely or impossible to wrap
135 * as long as slow_rsp_threshold is not set way above recommended max
136 * value (32767 ie 9 hours) and is generally harmless even if wrong
137 * since only affects debug counters - so leaving the calc as simple
138 * comparison rather than doing multiple conversions and overflow
139 * checks
141 if ((slow_rsp_threshold != 0) &&
142 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
143 (midEntry->command != command)) {
145 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146 * NB: le16_to_cpu returns unsigned so can not be negative below
148 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
151 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
152 midEntry->when_sent, midEntry->when_received);
153 if (cifsFYI & CIFS_TIMER) {
154 pr_debug("slow rsp: cmd %d mid %llu",
155 midEntry->command, midEntry->mid);
156 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
157 now - midEntry->when_alloc,
158 now - midEntry->when_sent,
159 now - midEntry->when_received);
162 #endif
163 put_task_struct(midEntry->creator);
165 mempool_free(midEntry, cifs_mid_poolp);
168 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
170 spin_lock(&GlobalMid_Lock);
171 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172 spin_unlock(&GlobalMid_Lock);
175 void DeleteMidQEntry(struct mid_q_entry *midEntry)
177 cifs_mid_q_entry_release(midEntry);
180 void
181 cifs_delete_mid(struct mid_q_entry *mid)
183 spin_lock(&GlobalMid_Lock);
184 if (!(mid->mid_flags & MID_DELETED)) {
185 list_del_init(&mid->qhead);
186 mid->mid_flags |= MID_DELETED;
188 spin_unlock(&GlobalMid_Lock);
190 DeleteMidQEntry(mid);
194 * smb_send_kvec - send an array of kvecs to the server
195 * @server: Server to send the data to
196 * @smb_msg: Message to send
197 * @sent: amount of data sent on socket is stored here
199 * Our basic "send data to server" function. Should be called with srv_mutex
200 * held. The caller is responsible for handling the results.
202 static int
203 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204 size_t *sent)
206 int rc = 0;
207 int retries = 0;
208 struct socket *ssocket = server->ssocket;
210 *sent = 0;
212 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213 smb_msg->msg_namelen = sizeof(struct sockaddr);
214 smb_msg->msg_control = NULL;
215 smb_msg->msg_controllen = 0;
216 if (server->noblocksnd)
217 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
218 else
219 smb_msg->msg_flags = MSG_NOSIGNAL;
221 while (msg_data_left(smb_msg)) {
223 * If blocking send, we try 3 times, since each can block
224 * for 5 seconds. For nonblocking we have to try more
225 * but wait increasing amounts of time allowing time for
226 * socket to clear. The overall time we wait in either
227 * case to send on the socket is about 15 seconds.
228 * Similarly we wait for 15 seconds for a response from
229 * the server in SendReceive[2] for the server to send
230 * a response back for most types of requests (except
231 * SMB Write past end of file which can be slow, and
232 * blocking lock operations). NFS waits slightly longer
233 * than CIFS, but this can make it take longer for
234 * nonresponsive servers to be detected and 15 seconds
235 * is more than enough time for modern networks to
236 * send a packet. In most cases if we fail to send
237 * after the retries we will kill the socket and
238 * reconnect which may clear the network problem.
240 rc = sock_sendmsg(ssocket, smb_msg);
241 if (rc == -EAGAIN) {
242 retries++;
243 if (retries >= 14 ||
244 (!server->noblocksnd && (retries > 2))) {
245 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
246 ssocket);
247 return -EAGAIN;
249 msleep(1 << retries);
250 continue;
253 if (rc < 0)
254 return rc;
256 if (rc == 0) {
257 /* should never happen, letting socket clear before
258 retrying is our only obvious option here */
259 cifs_server_dbg(VFS, "tcp sent no data\n");
260 msleep(500);
261 continue;
264 /* send was at least partially successful */
265 *sent += rc;
266 retries = 0; /* in case we get ENOSPC on the next send */
268 return 0;
271 unsigned long
272 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
274 unsigned int i;
275 struct kvec *iov;
276 int nvec;
277 unsigned long buflen = 0;
279 if (server->vals->header_preamble_size == 0 &&
280 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
281 iov = &rqst->rq_iov[1];
282 nvec = rqst->rq_nvec - 1;
283 } else {
284 iov = rqst->rq_iov;
285 nvec = rqst->rq_nvec;
288 /* total up iov array first */
289 for (i = 0; i < nvec; i++)
290 buflen += iov[i].iov_len;
293 * Add in the page array if there is one. The caller needs to make
294 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295 * multiple pages ends at page boundary, rq_tailsz needs to be set to
296 * PAGE_SIZE.
298 if (rqst->rq_npages) {
299 if (rqst->rq_npages == 1)
300 buflen += rqst->rq_tailsz;
301 else {
303 * If there is more than one page, calculate the
304 * buffer length based on rq_offset and rq_tailsz
306 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307 rqst->rq_offset;
308 buflen += rqst->rq_tailsz;
312 return buflen;
315 static int
316 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317 struct smb_rqst *rqst)
319 int rc = 0;
320 struct kvec *iov;
321 int n_vec;
322 unsigned int send_length = 0;
323 unsigned int i, j;
324 sigset_t mask, oldmask;
325 size_t total_len = 0, sent, size;
326 struct socket *ssocket = server->ssocket;
327 struct msghdr smb_msg;
328 __be32 rfc1002_marker;
330 if (cifs_rdma_enabled(server)) {
331 /* return -EAGAIN when connecting or reconnecting */
332 rc = -EAGAIN;
333 if (server->smbd_conn)
334 rc = smbd_send(server, num_rqst, rqst);
335 goto smbd_done;
338 if (ssocket == NULL)
339 return -EAGAIN;
341 if (signal_pending(current)) {
342 cifs_dbg(FYI, "signal pending before send request\n");
343 return -ERESTARTSYS;
346 /* cork the socket */
347 tcp_sock_set_cork(ssocket->sk, true);
349 for (j = 0; j < num_rqst; j++)
350 send_length += smb_rqst_len(server, &rqst[j]);
351 rfc1002_marker = cpu_to_be32(send_length);
354 * We should not allow signals to interrupt the network send because
355 * any partial send will cause session reconnects thus increasing
356 * latency of system calls and overload a server with unnecessary
357 * requests.
360 sigfillset(&mask);
361 sigprocmask(SIG_BLOCK, &mask, &oldmask);
363 /* Generate a rfc1002 marker for SMB2+ */
364 if (server->vals->header_preamble_size == 0) {
365 struct kvec hiov = {
366 .iov_base = &rfc1002_marker,
367 .iov_len = 4
369 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
370 rc = smb_send_kvec(server, &smb_msg, &sent);
371 if (rc < 0)
372 goto unmask;
374 total_len += sent;
375 send_length += 4;
378 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
380 for (j = 0; j < num_rqst; j++) {
381 iov = rqst[j].rq_iov;
382 n_vec = rqst[j].rq_nvec;
384 size = 0;
385 for (i = 0; i < n_vec; i++) {
386 dump_smb(iov[i].iov_base, iov[i].iov_len);
387 size += iov[i].iov_len;
390 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
392 rc = smb_send_kvec(server, &smb_msg, &sent);
393 if (rc < 0)
394 goto unmask;
396 total_len += sent;
398 /* now walk the page array and send each page in it */
399 for (i = 0; i < rqst[j].rq_npages; i++) {
400 struct bio_vec bvec;
402 bvec.bv_page = rqst[j].rq_pages[i];
403 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
404 &bvec.bv_offset);
406 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
407 &bvec, 1, bvec.bv_len);
408 rc = smb_send_kvec(server, &smb_msg, &sent);
409 if (rc < 0)
410 break;
412 total_len += sent;
416 unmask:
417 sigprocmask(SIG_SETMASK, &oldmask, NULL);
420 * If signal is pending but we have already sent the whole packet to
421 * the server we need to return success status to allow a corresponding
422 * mid entry to be kept in the pending requests queue thus allowing
423 * to handle responses from the server by the client.
425 * If only part of the packet has been sent there is no need to hide
426 * interrupt because the session will be reconnected anyway, so there
427 * won't be any response from the server to handle.
430 if (signal_pending(current) && (total_len != send_length)) {
431 cifs_dbg(FYI, "signal is pending after attempt to send\n");
432 rc = -EINTR;
435 /* uncork it */
436 tcp_sock_set_cork(ssocket->sk, false);
438 if ((total_len > 0) && (total_len != send_length)) {
439 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
440 send_length, total_len);
442 * If we have only sent part of an SMB then the next SMB could
443 * be taken as the remainder of this one. We need to kill the
444 * socket so the server throws away the partial SMB
446 server->tcpStatus = CifsNeedReconnect;
447 trace_smb3_partial_send_reconnect(server->CurrentMid,
448 server->hostname);
450 smbd_done:
451 if (rc < 0 && rc != -EINTR)
452 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
453 rc);
454 else if (rc > 0)
455 rc = 0;
457 return rc;
460 static int
461 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
462 struct smb_rqst *rqst, int flags)
464 struct kvec iov;
465 struct smb2_transform_hdr *tr_hdr;
466 struct smb_rqst cur_rqst[MAX_COMPOUND];
467 int rc;
469 if (!(flags & CIFS_TRANSFORM_REQ))
470 return __smb_send_rqst(server, num_rqst, rqst);
472 if (num_rqst > MAX_COMPOUND - 1)
473 return -ENOMEM;
475 if (!server->ops->init_transform_rq) {
476 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
477 return -EIO;
480 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
481 if (!tr_hdr)
482 return -ENOMEM;
484 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
485 memset(&iov, 0, sizeof(iov));
486 memset(tr_hdr, 0, sizeof(*tr_hdr));
488 iov.iov_base = tr_hdr;
489 iov.iov_len = sizeof(*tr_hdr);
490 cur_rqst[0].rq_iov = &iov;
491 cur_rqst[0].rq_nvec = 1;
493 rc = server->ops->init_transform_rq(server, num_rqst + 1,
494 &cur_rqst[0], rqst);
495 if (rc)
496 goto out;
498 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
499 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
500 out:
501 kfree(tr_hdr);
502 return rc;
506 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
507 unsigned int smb_buf_length)
509 struct kvec iov[2];
510 struct smb_rqst rqst = { .rq_iov = iov,
511 .rq_nvec = 2 };
513 iov[0].iov_base = smb_buffer;
514 iov[0].iov_len = 4;
515 iov[1].iov_base = (char *)smb_buffer + 4;
516 iov[1].iov_len = smb_buf_length;
518 return __smb_send_rqst(server, 1, &rqst);
521 static int
522 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
523 const int timeout, const int flags,
524 unsigned int *instance)
526 long rc;
527 int *credits;
528 int optype;
529 long int t;
530 int scredits = server->credits;
532 if (timeout < 0)
533 t = MAX_JIFFY_OFFSET;
534 else
535 t = msecs_to_jiffies(timeout);
537 optype = flags & CIFS_OP_MASK;
539 *instance = 0;
541 credits = server->ops->get_credits_field(server, optype);
542 /* Since an echo is already inflight, no need to wait to send another */
543 if (*credits <= 0 && optype == CIFS_ECHO_OP)
544 return -EAGAIN;
546 spin_lock(&server->req_lock);
547 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
548 /* oplock breaks must not be held up */
549 server->in_flight++;
550 if (server->in_flight > server->max_in_flight)
551 server->max_in_flight = server->in_flight;
552 *credits -= 1;
553 *instance = server->reconnect_instance;
554 spin_unlock(&server->req_lock);
555 return 0;
558 while (1) {
559 if (*credits < num_credits) {
560 spin_unlock(&server->req_lock);
561 cifs_num_waiters_inc(server);
562 rc = wait_event_killable_timeout(server->request_q,
563 has_credits(server, credits, num_credits), t);
564 cifs_num_waiters_dec(server);
565 if (!rc) {
566 trace_smb3_credit_timeout(server->CurrentMid,
567 server->hostname, num_credits, 0);
568 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
569 timeout);
570 return -ENOTSUPP;
572 if (rc == -ERESTARTSYS)
573 return -ERESTARTSYS;
574 spin_lock(&server->req_lock);
575 } else {
576 if (server->tcpStatus == CifsExiting) {
577 spin_unlock(&server->req_lock);
578 return -ENOENT;
582 * For normal commands, reserve the last MAX_COMPOUND
583 * credits to compound requests.
584 * Otherwise these compounds could be permanently
585 * starved for credits by single-credit requests.
587 * To prevent spinning CPU, block this thread until
588 * there are >MAX_COMPOUND credits available.
589 * But only do this is we already have a lot of
590 * credits in flight to avoid triggering this check
591 * for servers that are slow to hand out credits on
592 * new sessions.
594 if (!optype && num_credits == 1 &&
595 server->in_flight > 2 * MAX_COMPOUND &&
596 *credits <= MAX_COMPOUND) {
597 spin_unlock(&server->req_lock);
598 cifs_num_waiters_inc(server);
599 rc = wait_event_killable_timeout(
600 server->request_q,
601 has_credits(server, credits,
602 MAX_COMPOUND + 1),
604 cifs_num_waiters_dec(server);
605 if (!rc) {
606 trace_smb3_credit_timeout(
607 server->CurrentMid,
608 server->hostname, num_credits,
610 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
611 timeout);
612 return -ENOTSUPP;
614 if (rc == -ERESTARTSYS)
615 return -ERESTARTSYS;
616 spin_lock(&server->req_lock);
617 continue;
621 * Can not count locking commands against total
622 * as they are allowed to block on server.
625 /* update # of requests on the wire to server */
626 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
627 *credits -= num_credits;
628 scredits = *credits;
629 server->in_flight += num_credits;
630 if (server->in_flight > server->max_in_flight)
631 server->max_in_flight = server->in_flight;
632 *instance = server->reconnect_instance;
634 spin_unlock(&server->req_lock);
636 trace_smb3_add_credits(server->CurrentMid,
637 server->hostname, scredits, -(num_credits));
638 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
639 __func__, num_credits, scredits);
640 break;
643 return 0;
646 static int
647 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
648 unsigned int *instance)
650 return wait_for_free_credits(server, 1, -1, flags,
651 instance);
654 static int
655 wait_for_compound_request(struct TCP_Server_Info *server, int num,
656 const int flags, unsigned int *instance)
658 int *credits;
659 int scredits, sin_flight;
661 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
663 spin_lock(&server->req_lock);
664 scredits = *credits;
665 sin_flight = server->in_flight;
667 if (*credits < num) {
669 * Return immediately if not too many requests in flight since
670 * we will likely be stuck on waiting for credits.
672 if (server->in_flight < num - *credits) {
673 spin_unlock(&server->req_lock);
674 trace_smb3_insufficient_credits(server->CurrentMid,
675 server->hostname, scredits, sin_flight);
676 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
677 __func__, sin_flight, num, scredits);
678 return -ENOTSUPP;
681 spin_unlock(&server->req_lock);
683 return wait_for_free_credits(server, num, 60000, flags,
684 instance);
688 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
689 unsigned int *num, struct cifs_credits *credits)
691 *num = size;
692 credits->value = 0;
693 credits->instance = server->reconnect_instance;
694 return 0;
697 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
698 struct mid_q_entry **ppmidQ)
700 if (ses->server->tcpStatus == CifsExiting) {
701 return -ENOENT;
704 if (ses->server->tcpStatus == CifsNeedReconnect) {
705 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
706 return -EAGAIN;
709 if (ses->status == CifsNew) {
710 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
711 (in_buf->Command != SMB_COM_NEGOTIATE))
712 return -EAGAIN;
713 /* else ok - we are setting up session */
716 if (ses->status == CifsExiting) {
717 /* check if SMB session is bad because we are setting it up */
718 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
719 return -EAGAIN;
720 /* else ok - we are shutting down session */
723 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
724 if (*ppmidQ == NULL)
725 return -ENOMEM;
726 spin_lock(&GlobalMid_Lock);
727 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
728 spin_unlock(&GlobalMid_Lock);
729 return 0;
732 static int
733 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
735 int error;
737 error = wait_event_freezekillable_unsafe(server->response_q,
738 midQ->mid_state != MID_REQUEST_SUBMITTED);
739 if (error < 0)
740 return -ERESTARTSYS;
742 return 0;
745 struct mid_q_entry *
746 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
748 int rc;
749 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
750 struct mid_q_entry *mid;
752 if (rqst->rq_iov[0].iov_len != 4 ||
753 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
754 return ERR_PTR(-EIO);
756 /* enable signing if server requires it */
757 if (server->sign)
758 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
760 mid = AllocMidQEntry(hdr, server);
761 if (mid == NULL)
762 return ERR_PTR(-ENOMEM);
764 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
765 if (rc) {
766 DeleteMidQEntry(mid);
767 return ERR_PTR(rc);
770 return mid;
774 * Send a SMB request and set the callback function in the mid to handle
775 * the result. Caller is responsible for dealing with timeouts.
778 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
779 mid_receive_t *receive, mid_callback_t *callback,
780 mid_handle_t *handle, void *cbdata, const int flags,
781 const struct cifs_credits *exist_credits)
783 int rc;
784 struct mid_q_entry *mid;
785 struct cifs_credits credits = { .value = 0, .instance = 0 };
786 unsigned int instance;
787 int optype;
789 optype = flags & CIFS_OP_MASK;
791 if ((flags & CIFS_HAS_CREDITS) == 0) {
792 rc = wait_for_free_request(server, flags, &instance);
793 if (rc)
794 return rc;
795 credits.value = 1;
796 credits.instance = instance;
797 } else
798 instance = exist_credits->instance;
800 mutex_lock(&server->srv_mutex);
803 * We can't use credits obtained from the previous session to send this
804 * request. Check if there were reconnects after we obtained credits and
805 * return -EAGAIN in such cases to let callers handle it.
807 if (instance != server->reconnect_instance) {
808 mutex_unlock(&server->srv_mutex);
809 add_credits_and_wake_if(server, &credits, optype);
810 return -EAGAIN;
813 mid = server->ops->setup_async_request(server, rqst);
814 if (IS_ERR(mid)) {
815 mutex_unlock(&server->srv_mutex);
816 add_credits_and_wake_if(server, &credits, optype);
817 return PTR_ERR(mid);
820 mid->receive = receive;
821 mid->callback = callback;
822 mid->callback_data = cbdata;
823 mid->handle = handle;
824 mid->mid_state = MID_REQUEST_SUBMITTED;
826 /* put it on the pending_mid_q */
827 spin_lock(&GlobalMid_Lock);
828 list_add_tail(&mid->qhead, &server->pending_mid_q);
829 spin_unlock(&GlobalMid_Lock);
832 * Need to store the time in mid before calling I/O. For call_async,
833 * I/O response may come back and free the mid entry on another thread.
835 cifs_save_when_sent(mid);
836 cifs_in_send_inc(server);
837 rc = smb_send_rqst(server, 1, rqst, flags);
838 cifs_in_send_dec(server);
840 if (rc < 0) {
841 revert_current_mid(server, mid->credits);
842 server->sequence_number -= 2;
843 cifs_delete_mid(mid);
846 mutex_unlock(&server->srv_mutex);
848 if (rc == 0)
849 return 0;
851 add_credits_and_wake_if(server, &credits, optype);
852 return rc;
857 * Send an SMB Request. No response info (other than return code)
858 * needs to be parsed.
860 * flags indicate the type of request buffer and how long to wait
861 * and whether to log NT STATUS code (error) before mapping it to POSIX error
865 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
866 char *in_buf, int flags)
868 int rc;
869 struct kvec iov[1];
870 struct kvec rsp_iov;
871 int resp_buf_type;
873 iov[0].iov_base = in_buf;
874 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
875 flags |= CIFS_NO_RSP_BUF;
876 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
877 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
879 return rc;
882 static int
883 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
885 int rc = 0;
887 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
888 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
890 spin_lock(&GlobalMid_Lock);
891 switch (mid->mid_state) {
892 case MID_RESPONSE_RECEIVED:
893 spin_unlock(&GlobalMid_Lock);
894 return rc;
895 case MID_RETRY_NEEDED:
896 rc = -EAGAIN;
897 break;
898 case MID_RESPONSE_MALFORMED:
899 rc = -EIO;
900 break;
901 case MID_SHUTDOWN:
902 rc = -EHOSTDOWN;
903 break;
904 default:
905 if (!(mid->mid_flags & MID_DELETED)) {
906 list_del_init(&mid->qhead);
907 mid->mid_flags |= MID_DELETED;
909 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
910 __func__, mid->mid, mid->mid_state);
911 rc = -EIO;
913 spin_unlock(&GlobalMid_Lock);
915 DeleteMidQEntry(mid);
916 return rc;
919 static inline int
920 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
921 struct mid_q_entry *mid)
923 return server->ops->send_cancel ?
924 server->ops->send_cancel(server, rqst, mid) : 0;
928 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
929 bool log_error)
931 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
933 dump_smb(mid->resp_buf, min_t(u32, 92, len));
935 /* convert the length into a more usable form */
936 if (server->sign) {
937 struct kvec iov[2];
938 int rc = 0;
939 struct smb_rqst rqst = { .rq_iov = iov,
940 .rq_nvec = 2 };
942 iov[0].iov_base = mid->resp_buf;
943 iov[0].iov_len = 4;
944 iov[1].iov_base = (char *)mid->resp_buf + 4;
945 iov[1].iov_len = len - 4;
946 /* FIXME: add code to kill session */
947 rc = cifs_verify_signature(&rqst, server,
948 mid->sequence_number);
949 if (rc)
950 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
951 rc);
954 /* BB special case reconnect tid and uid here? */
955 return map_and_check_smb_error(mid, log_error);
958 struct mid_q_entry *
959 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
960 struct smb_rqst *rqst)
962 int rc;
963 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
964 struct mid_q_entry *mid;
966 if (rqst->rq_iov[0].iov_len != 4 ||
967 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
968 return ERR_PTR(-EIO);
970 rc = allocate_mid(ses, hdr, &mid);
971 if (rc)
972 return ERR_PTR(rc);
973 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
974 if (rc) {
975 cifs_delete_mid(mid);
976 return ERR_PTR(rc);
978 return mid;
981 static void
982 cifs_compound_callback(struct mid_q_entry *mid)
984 struct TCP_Server_Info *server = mid->server;
985 struct cifs_credits credits;
987 credits.value = server->ops->get_credits(mid);
988 credits.instance = server->reconnect_instance;
990 add_credits(server, &credits, mid->optype);
993 static void
994 cifs_compound_last_callback(struct mid_q_entry *mid)
996 cifs_compound_callback(mid);
997 cifs_wake_up_task(mid);
1000 static void
1001 cifs_cancelled_callback(struct mid_q_entry *mid)
1003 cifs_compound_callback(mid);
1004 DeleteMidQEntry(mid);
1008 * Return a channel (master if none) of @ses that can be used to send
1009 * regular requests.
1011 * If we are currently binding a new channel (negprot/sess.setup),
1012 * return the new incomplete channel.
1014 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1016 uint index = 0;
1018 if (!ses)
1019 return NULL;
1021 if (!ses->binding) {
1022 /* round robin */
1023 if (ses->chan_count > 1) {
1024 index = (uint)atomic_inc_return(&ses->chan_seq);
1025 index %= ses->chan_count;
1027 return ses->chans[index].server;
1028 } else {
1029 return cifs_ses_server(ses);
1034 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1035 struct TCP_Server_Info *server,
1036 const int flags, const int num_rqst, struct smb_rqst *rqst,
1037 int *resp_buf_type, struct kvec *resp_iov)
1039 int i, j, optype, rc = 0;
1040 struct mid_q_entry *midQ[MAX_COMPOUND];
1041 bool cancelled_mid[MAX_COMPOUND] = {false};
1042 struct cifs_credits credits[MAX_COMPOUND] = {
1043 { .value = 0, .instance = 0 }
1045 unsigned int instance;
1046 char *buf;
1048 optype = flags & CIFS_OP_MASK;
1050 for (i = 0; i < num_rqst; i++)
1051 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
1053 if (!ses || !ses->server || !server) {
1054 cifs_dbg(VFS, "Null session\n");
1055 return -EIO;
1058 if (server->tcpStatus == CifsExiting)
1059 return -ENOENT;
1062 * Wait for all the requests to become available.
1063 * This approach still leaves the possibility to be stuck waiting for
1064 * credits if the server doesn't grant credits to the outstanding
1065 * requests and if the client is completely idle, not generating any
1066 * other requests.
1067 * This can be handled by the eventual session reconnect.
1069 rc = wait_for_compound_request(server, num_rqst, flags,
1070 &instance);
1071 if (rc)
1072 return rc;
1074 for (i = 0; i < num_rqst; i++) {
1075 credits[i].value = 1;
1076 credits[i].instance = instance;
1080 * Make sure that we sign in the same order that we send on this socket
1081 * and avoid races inside tcp sendmsg code that could cause corruption
1082 * of smb data.
1085 mutex_lock(&server->srv_mutex);
1088 * All the parts of the compound chain belong obtained credits from the
1089 * same session. We can not use credits obtained from the previous
1090 * session to send this request. Check if there were reconnects after
1091 * we obtained credits and return -EAGAIN in such cases to let callers
1092 * handle it.
1094 if (instance != server->reconnect_instance) {
1095 mutex_unlock(&server->srv_mutex);
1096 for (j = 0; j < num_rqst; j++)
1097 add_credits(server, &credits[j], optype);
1098 return -EAGAIN;
1101 for (i = 0; i < num_rqst; i++) {
1102 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1103 if (IS_ERR(midQ[i])) {
1104 revert_current_mid(server, i);
1105 for (j = 0; j < i; j++)
1106 cifs_delete_mid(midQ[j]);
1107 mutex_unlock(&server->srv_mutex);
1109 /* Update # of requests on wire to server */
1110 for (j = 0; j < num_rqst; j++)
1111 add_credits(server, &credits[j], optype);
1112 return PTR_ERR(midQ[i]);
1115 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1116 midQ[i]->optype = optype;
1118 * Invoke callback for every part of the compound chain
1119 * to calculate credits properly. Wake up this thread only when
1120 * the last element is received.
1122 if (i < num_rqst - 1)
1123 midQ[i]->callback = cifs_compound_callback;
1124 else
1125 midQ[i]->callback = cifs_compound_last_callback;
1127 cifs_in_send_inc(server);
1128 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1129 cifs_in_send_dec(server);
1131 for (i = 0; i < num_rqst; i++)
1132 cifs_save_when_sent(midQ[i]);
1134 if (rc < 0) {
1135 revert_current_mid(server, num_rqst);
1136 server->sequence_number -= 2;
1139 mutex_unlock(&server->srv_mutex);
1142 * If sending failed for some reason or it is an oplock break that we
1143 * will not receive a response to - return credits back
1145 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1146 for (i = 0; i < num_rqst; i++)
1147 add_credits(server, &credits[i], optype);
1148 goto out;
1152 * At this point the request is passed to the network stack - we assume
1153 * that any credits taken from the server structure on the client have
1154 * been spent and we can't return them back. Once we receive responses
1155 * we will collect credits granted by the server in the mid callbacks
1156 * and add those credits to the server structure.
1160 * Compounding is never used during session establish.
1162 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1163 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1164 rqst[0].rq_nvec);
1166 for (i = 0; i < num_rqst; i++) {
1167 rc = wait_for_response(server, midQ[i]);
1168 if (rc != 0)
1169 break;
1171 if (rc != 0) {
1172 for (; i < num_rqst; i++) {
1173 cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1174 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1175 send_cancel(server, &rqst[i], midQ[i]);
1176 spin_lock(&GlobalMid_Lock);
1177 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1178 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1179 midQ[i]->callback = cifs_cancelled_callback;
1180 cancelled_mid[i] = true;
1181 credits[i].value = 0;
1183 spin_unlock(&GlobalMid_Lock);
1187 for (i = 0; i < num_rqst; i++) {
1188 if (rc < 0)
1189 goto out;
1191 rc = cifs_sync_mid_result(midQ[i], server);
1192 if (rc != 0) {
1193 /* mark this mid as cancelled to not free it below */
1194 cancelled_mid[i] = true;
1195 goto out;
1198 if (!midQ[i]->resp_buf ||
1199 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1200 rc = -EIO;
1201 cifs_dbg(FYI, "Bad MID state?\n");
1202 goto out;
1205 buf = (char *)midQ[i]->resp_buf;
1206 resp_iov[i].iov_base = buf;
1207 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1208 server->vals->header_preamble_size;
1210 if (midQ[i]->large_buf)
1211 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1212 else
1213 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1215 rc = server->ops->check_receive(midQ[i], server,
1216 flags & CIFS_LOG_ERROR);
1218 /* mark it so buf will not be freed by cifs_delete_mid */
1219 if ((flags & CIFS_NO_RSP_BUF) == 0)
1220 midQ[i]->resp_buf = NULL;
1225 * Compounding is never used during session establish.
1227 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1228 struct kvec iov = {
1229 .iov_base = resp_iov[0].iov_base,
1230 .iov_len = resp_iov[0].iov_len
1232 smb311_update_preauth_hash(ses, &iov, 1);
1235 out:
1237 * This will dequeue all mids. After this it is important that the
1238 * demultiplex_thread will not process any of these mids any futher.
1239 * This is prevented above by using a noop callback that will not
1240 * wake this thread except for the very last PDU.
1242 for (i = 0; i < num_rqst; i++) {
1243 if (!cancelled_mid[i])
1244 cifs_delete_mid(midQ[i]);
1247 return rc;
1251 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1252 struct TCP_Server_Info *server,
1253 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1254 struct kvec *resp_iov)
1256 return compound_send_recv(xid, ses, server, flags, 1,
1257 rqst, resp_buf_type, resp_iov);
1261 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1262 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1263 const int flags, struct kvec *resp_iov)
1265 struct smb_rqst rqst;
1266 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1267 int rc;
1269 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1270 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1271 GFP_KERNEL);
1272 if (!new_iov) {
1273 /* otherwise cifs_send_recv below sets resp_buf_type */
1274 *resp_buf_type = CIFS_NO_BUFFER;
1275 return -ENOMEM;
1277 } else
1278 new_iov = s_iov;
1280 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1281 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1283 new_iov[0].iov_base = new_iov[1].iov_base;
1284 new_iov[0].iov_len = 4;
1285 new_iov[1].iov_base += 4;
1286 new_iov[1].iov_len -= 4;
1288 memset(&rqst, 0, sizeof(struct smb_rqst));
1289 rqst.rq_iov = new_iov;
1290 rqst.rq_nvec = n_vec + 1;
1292 rc = cifs_send_recv(xid, ses, ses->server,
1293 &rqst, resp_buf_type, flags, resp_iov);
1294 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1295 kfree(new_iov);
1296 return rc;
1300 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1301 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1302 int *pbytes_returned, const int flags)
1304 int rc = 0;
1305 struct mid_q_entry *midQ;
1306 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1307 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1308 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1309 struct cifs_credits credits = { .value = 1, .instance = 0 };
1310 struct TCP_Server_Info *server;
1312 if (ses == NULL) {
1313 cifs_dbg(VFS, "Null smb session\n");
1314 return -EIO;
1316 server = ses->server;
1317 if (server == NULL) {
1318 cifs_dbg(VFS, "Null tcp session\n");
1319 return -EIO;
1322 if (server->tcpStatus == CifsExiting)
1323 return -ENOENT;
1325 /* Ensure that we do not send more than 50 overlapping requests
1326 to the same server. We may make this configurable later or
1327 use ses->maxReq */
1329 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1330 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1331 len);
1332 return -EIO;
1335 rc = wait_for_free_request(server, flags, &credits.instance);
1336 if (rc)
1337 return rc;
1339 /* make sure that we sign in the same order that we send on this socket
1340 and avoid races inside tcp sendmsg code that could cause corruption
1341 of smb data */
1343 mutex_lock(&server->srv_mutex);
1345 rc = allocate_mid(ses, in_buf, &midQ);
1346 if (rc) {
1347 mutex_unlock(&server->srv_mutex);
1348 /* Update # of requests on wire to server */
1349 add_credits(server, &credits, 0);
1350 return rc;
1353 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1354 if (rc) {
1355 mutex_unlock(&server->srv_mutex);
1356 goto out;
1359 midQ->mid_state = MID_REQUEST_SUBMITTED;
1361 cifs_in_send_inc(server);
1362 rc = smb_send(server, in_buf, len);
1363 cifs_in_send_dec(server);
1364 cifs_save_when_sent(midQ);
1366 if (rc < 0)
1367 server->sequence_number -= 2;
1369 mutex_unlock(&server->srv_mutex);
1371 if (rc < 0)
1372 goto out;
1374 rc = wait_for_response(server, midQ);
1375 if (rc != 0) {
1376 send_cancel(server, &rqst, midQ);
1377 spin_lock(&GlobalMid_Lock);
1378 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1379 /* no longer considered to be "in-flight" */
1380 midQ->callback = DeleteMidQEntry;
1381 spin_unlock(&GlobalMid_Lock);
1382 add_credits(server, &credits, 0);
1383 return rc;
1385 spin_unlock(&GlobalMid_Lock);
1388 rc = cifs_sync_mid_result(midQ, server);
1389 if (rc != 0) {
1390 add_credits(server, &credits, 0);
1391 return rc;
1394 if (!midQ->resp_buf || !out_buf ||
1395 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1396 rc = -EIO;
1397 cifs_server_dbg(VFS, "Bad MID state?\n");
1398 goto out;
1401 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1402 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1403 rc = cifs_check_receive(midQ, server, 0);
1404 out:
1405 cifs_delete_mid(midQ);
1406 add_credits(server, &credits, 0);
1408 return rc;
1411 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1412 blocking lock to return. */
1414 static int
1415 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1416 struct smb_hdr *in_buf,
1417 struct smb_hdr *out_buf)
1419 int bytes_returned;
1420 struct cifs_ses *ses = tcon->ses;
1421 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1423 /* We just modify the current in_buf to change
1424 the type of lock from LOCKING_ANDX_SHARED_LOCK
1425 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1426 LOCKING_ANDX_CANCEL_LOCK. */
1428 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1429 pSMB->Timeout = 0;
1430 pSMB->hdr.Mid = get_next_mid(ses->server);
1432 return SendReceive(xid, ses, in_buf, out_buf,
1433 &bytes_returned, 0);
1437 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1438 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1439 int *pbytes_returned)
1441 int rc = 0;
1442 int rstart = 0;
1443 struct mid_q_entry *midQ;
1444 struct cifs_ses *ses;
1445 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1446 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1447 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1448 unsigned int instance;
1449 struct TCP_Server_Info *server;
1451 if (tcon == NULL || tcon->ses == NULL) {
1452 cifs_dbg(VFS, "Null smb session\n");
1453 return -EIO;
1455 ses = tcon->ses;
1456 server = ses->server;
1458 if (server == NULL) {
1459 cifs_dbg(VFS, "Null tcp session\n");
1460 return -EIO;
1463 if (server->tcpStatus == CifsExiting)
1464 return -ENOENT;
1466 /* Ensure that we do not send more than 50 overlapping requests
1467 to the same server. We may make this configurable later or
1468 use ses->maxReq */
1470 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1471 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1472 len);
1473 return -EIO;
1476 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1477 if (rc)
1478 return rc;
1480 /* make sure that we sign in the same order that we send on this socket
1481 and avoid races inside tcp sendmsg code that could cause corruption
1482 of smb data */
1484 mutex_lock(&server->srv_mutex);
1486 rc = allocate_mid(ses, in_buf, &midQ);
1487 if (rc) {
1488 mutex_unlock(&server->srv_mutex);
1489 return rc;
1492 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1493 if (rc) {
1494 cifs_delete_mid(midQ);
1495 mutex_unlock(&server->srv_mutex);
1496 return rc;
1499 midQ->mid_state = MID_REQUEST_SUBMITTED;
1500 cifs_in_send_inc(server);
1501 rc = smb_send(server, in_buf, len);
1502 cifs_in_send_dec(server);
1503 cifs_save_when_sent(midQ);
1505 if (rc < 0)
1506 server->sequence_number -= 2;
1508 mutex_unlock(&server->srv_mutex);
1510 if (rc < 0) {
1511 cifs_delete_mid(midQ);
1512 return rc;
1515 /* Wait for a reply - allow signals to interrupt. */
1516 rc = wait_event_interruptible(server->response_q,
1517 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1518 ((server->tcpStatus != CifsGood) &&
1519 (server->tcpStatus != CifsNew)));
1521 /* Were we interrupted by a signal ? */
1522 if ((rc == -ERESTARTSYS) &&
1523 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1524 ((server->tcpStatus == CifsGood) ||
1525 (server->tcpStatus == CifsNew))) {
1527 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1528 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1529 blocking lock to return. */
1530 rc = send_cancel(server, &rqst, midQ);
1531 if (rc) {
1532 cifs_delete_mid(midQ);
1533 return rc;
1535 } else {
1536 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1537 to cause the blocking lock to return. */
1539 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1541 /* If we get -ENOLCK back the lock may have
1542 already been removed. Don't exit in this case. */
1543 if (rc && rc != -ENOLCK) {
1544 cifs_delete_mid(midQ);
1545 return rc;
1549 rc = wait_for_response(server, midQ);
1550 if (rc) {
1551 send_cancel(server, &rqst, midQ);
1552 spin_lock(&GlobalMid_Lock);
1553 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1554 /* no longer considered to be "in-flight" */
1555 midQ->callback = DeleteMidQEntry;
1556 spin_unlock(&GlobalMid_Lock);
1557 return rc;
1559 spin_unlock(&GlobalMid_Lock);
1562 /* We got the response - restart system call. */
1563 rstart = 1;
1566 rc = cifs_sync_mid_result(midQ, server);
1567 if (rc != 0)
1568 return rc;
1570 /* rcvd frame is ok */
1571 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1572 rc = -EIO;
1573 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1574 goto out;
1577 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1578 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1579 rc = cifs_check_receive(midQ, server, 0);
1580 out:
1581 cifs_delete_mid(midQ);
1582 if (rstart && rc == -EACCES)
1583 return -ERESTARTSYS;
1584 return rc;