io_uring: don't use 'fd' for openat/openat2/statx
[linux/fpc-iii.git] / fs / cifs / transport.c
blobc97570eb2c180add90730953c4eadd2e1c46371a
1 /*
2 * fs/cifs/transport.c
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
50 wake_up_process(mid->callback_data);
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
56 struct mid_q_entry *temp;
58 if (server == NULL) {
59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60 return NULL;
63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64 memset(temp, 0, sizeof(struct mid_q_entry));
65 kref_init(&temp->refcount);
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
79 get_task_struct(current);
80 temp->creator = current;
81 temp->callback = cifs_wake_up_task;
82 temp->callback_data = current;
84 atomic_inc(&midCount);
85 temp->mid_state = MID_REQUEST_ALLOCATED;
86 return temp;
89 static void _cifs_mid_q_entry_release(struct kref *refcount)
91 struct mid_q_entry *midEntry =
92 container_of(refcount, struct mid_q_entry, refcount);
93 #ifdef CONFIG_CIFS_STATS2
94 __le16 command = midEntry->server->vals->lock_cmd;
95 __u16 smb_cmd = le16_to_cpu(midEntry->command);
96 unsigned long now;
97 unsigned long roundtrip_time;
98 #endif
99 struct TCP_Server_Info *server = midEntry->server;
101 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103 server->ops->handle_cancelled_mid)
104 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
106 midEntry->mid_state = MID_FREE;
107 atomic_dec(&midCount);
108 if (midEntry->large_buf)
109 cifs_buf_release(midEntry->resp_buf);
110 else
111 cifs_small_buf_release(midEntry->resp_buf);
112 #ifdef CONFIG_CIFS_STATS2
113 now = jiffies;
114 if (now < midEntry->when_alloc)
115 cifs_server_dbg(VFS, "invalid mid allocation time\n");
116 roundtrip_time = now - midEntry->when_alloc;
118 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120 server->slowest_cmd[smb_cmd] = roundtrip_time;
121 server->fastest_cmd[smb_cmd] = roundtrip_time;
122 } else {
123 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124 server->slowest_cmd[smb_cmd] = roundtrip_time;
125 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126 server->fastest_cmd[smb_cmd] = roundtrip_time;
128 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129 server->time_per_cmd[smb_cmd] += roundtrip_time;
132 * commands taking longer than one second (default) can be indications
133 * that something is wrong, unless it is quite a slow link or a very
134 * busy server. Note that this calc is unlikely or impossible to wrap
135 * as long as slow_rsp_threshold is not set way above recommended max
136 * value (32767 ie 9 hours) and is generally harmless even if wrong
137 * since only affects debug counters - so leaving the calc as simple
138 * comparison rather than doing multiple conversions and overflow
139 * checks
141 if ((slow_rsp_threshold != 0) &&
142 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
143 (midEntry->command != command)) {
145 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146 * NB: le16_to_cpu returns unsigned so can not be negative below
148 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
151 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
152 midEntry->when_sent, midEntry->when_received);
153 if (cifsFYI & CIFS_TIMER) {
154 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
155 midEntry->command, midEntry->mid);
156 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
157 now - midEntry->when_alloc,
158 now - midEntry->when_sent,
159 now - midEntry->when_received);
162 #endif
163 put_task_struct(midEntry->creator);
165 mempool_free(midEntry, cifs_mid_poolp);
168 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
170 spin_lock(&GlobalMid_Lock);
171 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172 spin_unlock(&GlobalMid_Lock);
175 void DeleteMidQEntry(struct mid_q_entry *midEntry)
177 cifs_mid_q_entry_release(midEntry);
180 void
181 cifs_delete_mid(struct mid_q_entry *mid)
183 spin_lock(&GlobalMid_Lock);
184 if (!(mid->mid_flags & MID_DELETED)) {
185 list_del_init(&mid->qhead);
186 mid->mid_flags |= MID_DELETED;
188 spin_unlock(&GlobalMid_Lock);
190 DeleteMidQEntry(mid);
194 * smb_send_kvec - send an array of kvecs to the server
195 * @server: Server to send the data to
196 * @smb_msg: Message to send
197 * @sent: amount of data sent on socket is stored here
199 * Our basic "send data to server" function. Should be called with srv_mutex
200 * held. The caller is responsible for handling the results.
202 static int
203 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204 size_t *sent)
206 int rc = 0;
207 int retries = 0;
208 struct socket *ssocket = server->ssocket;
210 *sent = 0;
212 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213 smb_msg->msg_namelen = sizeof(struct sockaddr);
214 smb_msg->msg_control = NULL;
215 smb_msg->msg_controllen = 0;
216 if (server->noblocksnd)
217 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
218 else
219 smb_msg->msg_flags = MSG_NOSIGNAL;
221 while (msg_data_left(smb_msg)) {
223 * If blocking send, we try 3 times, since each can block
224 * for 5 seconds. For nonblocking we have to try more
225 * but wait increasing amounts of time allowing time for
226 * socket to clear. The overall time we wait in either
227 * case to send on the socket is about 15 seconds.
228 * Similarly we wait for 15 seconds for a response from
229 * the server in SendReceive[2] for the server to send
230 * a response back for most types of requests (except
231 * SMB Write past end of file which can be slow, and
232 * blocking lock operations). NFS waits slightly longer
233 * than CIFS, but this can make it take longer for
234 * nonresponsive servers to be detected and 15 seconds
235 * is more than enough time for modern networks to
236 * send a packet. In most cases if we fail to send
237 * after the retries we will kill the socket and
238 * reconnect which may clear the network problem.
240 rc = sock_sendmsg(ssocket, smb_msg);
241 if (rc == -EAGAIN) {
242 retries++;
243 if (retries >= 14 ||
244 (!server->noblocksnd && (retries > 2))) {
245 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
246 ssocket);
247 return -EAGAIN;
249 msleep(1 << retries);
250 continue;
253 if (rc < 0)
254 return rc;
256 if (rc == 0) {
257 /* should never happen, letting socket clear before
258 retrying is our only obvious option here */
259 cifs_server_dbg(VFS, "tcp sent no data\n");
260 msleep(500);
261 continue;
264 /* send was at least partially successful */
265 *sent += rc;
266 retries = 0; /* in case we get ENOSPC on the next send */
268 return 0;
271 unsigned long
272 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
274 unsigned int i;
275 struct kvec *iov;
276 int nvec;
277 unsigned long buflen = 0;
279 if (server->vals->header_preamble_size == 0 &&
280 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
281 iov = &rqst->rq_iov[1];
282 nvec = rqst->rq_nvec - 1;
283 } else {
284 iov = rqst->rq_iov;
285 nvec = rqst->rq_nvec;
288 /* total up iov array first */
289 for (i = 0; i < nvec; i++)
290 buflen += iov[i].iov_len;
293 * Add in the page array if there is one. The caller needs to make
294 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295 * multiple pages ends at page boundary, rq_tailsz needs to be set to
296 * PAGE_SIZE.
298 if (rqst->rq_npages) {
299 if (rqst->rq_npages == 1)
300 buflen += rqst->rq_tailsz;
301 else {
303 * If there is more than one page, calculate the
304 * buffer length based on rq_offset and rq_tailsz
306 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307 rqst->rq_offset;
308 buflen += rqst->rq_tailsz;
312 return buflen;
315 static int
316 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317 struct smb_rqst *rqst)
319 int rc = 0;
320 struct kvec *iov;
321 int n_vec;
322 unsigned int send_length = 0;
323 unsigned int i, j;
324 sigset_t mask, oldmask;
325 size_t total_len = 0, sent, size;
326 struct socket *ssocket = server->ssocket;
327 struct msghdr smb_msg;
328 int val = 1;
329 __be32 rfc1002_marker;
331 if (cifs_rdma_enabled(server)) {
332 /* return -EAGAIN when connecting or reconnecting */
333 rc = -EAGAIN;
334 if (server->smbd_conn)
335 rc = smbd_send(server, num_rqst, rqst);
336 goto smbd_done;
339 if (ssocket == NULL)
340 return -EAGAIN;
342 if (signal_pending(current)) {
343 cifs_dbg(FYI, "signal is pending before sending any data\n");
344 return -EINTR;
347 /* cork the socket */
348 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
349 (char *)&val, sizeof(val));
351 for (j = 0; j < num_rqst; j++)
352 send_length += smb_rqst_len(server, &rqst[j]);
353 rfc1002_marker = cpu_to_be32(send_length);
356 * We should not allow signals to interrupt the network send because
357 * any partial send will cause session reconnects thus increasing
358 * latency of system calls and overload a server with unnecessary
359 * requests.
362 sigfillset(&mask);
363 sigprocmask(SIG_BLOCK, &mask, &oldmask);
365 /* Generate a rfc1002 marker for SMB2+ */
366 if (server->vals->header_preamble_size == 0) {
367 struct kvec hiov = {
368 .iov_base = &rfc1002_marker,
369 .iov_len = 4
371 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
372 rc = smb_send_kvec(server, &smb_msg, &sent);
373 if (rc < 0)
374 goto unmask;
376 total_len += sent;
377 send_length += 4;
380 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
382 for (j = 0; j < num_rqst; j++) {
383 iov = rqst[j].rq_iov;
384 n_vec = rqst[j].rq_nvec;
386 size = 0;
387 for (i = 0; i < n_vec; i++) {
388 dump_smb(iov[i].iov_base, iov[i].iov_len);
389 size += iov[i].iov_len;
392 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
394 rc = smb_send_kvec(server, &smb_msg, &sent);
395 if (rc < 0)
396 goto unmask;
398 total_len += sent;
400 /* now walk the page array and send each page in it */
401 for (i = 0; i < rqst[j].rq_npages; i++) {
402 struct bio_vec bvec;
404 bvec.bv_page = rqst[j].rq_pages[i];
405 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
406 &bvec.bv_offset);
408 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
409 &bvec, 1, bvec.bv_len);
410 rc = smb_send_kvec(server, &smb_msg, &sent);
411 if (rc < 0)
412 break;
414 total_len += sent;
418 unmask:
419 sigprocmask(SIG_SETMASK, &oldmask, NULL);
422 * If signal is pending but we have already sent the whole packet to
423 * the server we need to return success status to allow a corresponding
424 * mid entry to be kept in the pending requests queue thus allowing
425 * to handle responses from the server by the client.
427 * If only part of the packet has been sent there is no need to hide
428 * interrupt because the session will be reconnected anyway, so there
429 * won't be any response from the server to handle.
432 if (signal_pending(current) && (total_len != send_length)) {
433 cifs_dbg(FYI, "signal is pending after attempt to send\n");
434 rc = -EINTR;
437 /* uncork it */
438 val = 0;
439 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
440 (char *)&val, sizeof(val));
442 if ((total_len > 0) && (total_len != send_length)) {
443 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
444 send_length, total_len);
446 * If we have only sent part of an SMB then the next SMB could
447 * be taken as the remainder of this one. We need to kill the
448 * socket so the server throws away the partial SMB
450 server->tcpStatus = CifsNeedReconnect;
451 trace_smb3_partial_send_reconnect(server->CurrentMid,
452 server->hostname);
454 smbd_done:
455 if (rc < 0 && rc != -EINTR)
456 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
457 rc);
458 else if (rc > 0)
459 rc = 0;
461 return rc;
464 static int
465 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
466 struct smb_rqst *rqst, int flags)
468 struct kvec iov;
469 struct smb2_transform_hdr *tr_hdr;
470 struct smb_rqst cur_rqst[MAX_COMPOUND];
471 int rc;
473 if (!(flags & CIFS_TRANSFORM_REQ))
474 return __smb_send_rqst(server, num_rqst, rqst);
476 if (num_rqst > MAX_COMPOUND - 1)
477 return -ENOMEM;
479 if (!server->ops->init_transform_rq) {
480 cifs_server_dbg(VFS, "Encryption requested but transform "
481 "callback is missing\n");
482 return -EIO;
485 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
486 if (!tr_hdr)
487 return -ENOMEM;
489 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
490 memset(&iov, 0, sizeof(iov));
491 memset(tr_hdr, 0, sizeof(*tr_hdr));
493 iov.iov_base = tr_hdr;
494 iov.iov_len = sizeof(*tr_hdr);
495 cur_rqst[0].rq_iov = &iov;
496 cur_rqst[0].rq_nvec = 1;
498 rc = server->ops->init_transform_rq(server, num_rqst + 1,
499 &cur_rqst[0], rqst);
500 if (rc)
501 goto out;
503 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
504 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
505 out:
506 kfree(tr_hdr);
507 return rc;
511 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
512 unsigned int smb_buf_length)
514 struct kvec iov[2];
515 struct smb_rqst rqst = { .rq_iov = iov,
516 .rq_nvec = 2 };
518 iov[0].iov_base = smb_buffer;
519 iov[0].iov_len = 4;
520 iov[1].iov_base = (char *)smb_buffer + 4;
521 iov[1].iov_len = smb_buf_length;
523 return __smb_send_rqst(server, 1, &rqst);
526 static int
527 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
528 const int timeout, const int flags,
529 unsigned int *instance)
531 int rc;
532 int *credits;
533 int optype;
534 long int t;
536 if (timeout < 0)
537 t = MAX_JIFFY_OFFSET;
538 else
539 t = msecs_to_jiffies(timeout);
541 optype = flags & CIFS_OP_MASK;
543 *instance = 0;
545 credits = server->ops->get_credits_field(server, optype);
546 /* Since an echo is already inflight, no need to wait to send another */
547 if (*credits <= 0 && optype == CIFS_ECHO_OP)
548 return -EAGAIN;
550 spin_lock(&server->req_lock);
551 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
552 /* oplock breaks must not be held up */
553 server->in_flight++;
554 if (server->in_flight > server->max_in_flight)
555 server->max_in_flight = server->in_flight;
556 *credits -= 1;
557 *instance = server->reconnect_instance;
558 spin_unlock(&server->req_lock);
559 return 0;
562 while (1) {
563 if (*credits < num_credits) {
564 spin_unlock(&server->req_lock);
565 cifs_num_waiters_inc(server);
566 rc = wait_event_killable_timeout(server->request_q,
567 has_credits(server, credits, num_credits), t);
568 cifs_num_waiters_dec(server);
569 if (!rc) {
570 trace_smb3_credit_timeout(server->CurrentMid,
571 server->hostname, num_credits);
572 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
573 timeout);
574 return -ENOTSUPP;
576 if (rc == -ERESTARTSYS)
577 return -ERESTARTSYS;
578 spin_lock(&server->req_lock);
579 } else {
580 if (server->tcpStatus == CifsExiting) {
581 spin_unlock(&server->req_lock);
582 return -ENOENT;
586 * For normal commands, reserve the last MAX_COMPOUND
587 * credits to compound requests.
588 * Otherwise these compounds could be permanently
589 * starved for credits by single-credit requests.
591 * To prevent spinning CPU, block this thread until
592 * there are >MAX_COMPOUND credits available.
593 * But only do this is we already have a lot of
594 * credits in flight to avoid triggering this check
595 * for servers that are slow to hand out credits on
596 * new sessions.
598 if (!optype && num_credits == 1 &&
599 server->in_flight > 2 * MAX_COMPOUND &&
600 *credits <= MAX_COMPOUND) {
601 spin_unlock(&server->req_lock);
602 cifs_num_waiters_inc(server);
603 rc = wait_event_killable_timeout(
604 server->request_q,
605 has_credits(server, credits,
606 MAX_COMPOUND + 1),
608 cifs_num_waiters_dec(server);
609 if (!rc) {
610 trace_smb3_credit_timeout(
611 server->CurrentMid,
612 server->hostname, num_credits);
613 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
614 timeout);
615 return -ENOTSUPP;
617 if (rc == -ERESTARTSYS)
618 return -ERESTARTSYS;
619 spin_lock(&server->req_lock);
620 continue;
624 * Can not count locking commands against total
625 * as they are allowed to block on server.
628 /* update # of requests on the wire to server */
629 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
630 *credits -= num_credits;
631 server->in_flight += num_credits;
632 if (server->in_flight > server->max_in_flight)
633 server->max_in_flight = server->in_flight;
634 *instance = server->reconnect_instance;
636 spin_unlock(&server->req_lock);
637 break;
640 return 0;
643 static int
644 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
645 unsigned int *instance)
647 return wait_for_free_credits(server, 1, -1, flags,
648 instance);
651 static int
652 wait_for_compound_request(struct TCP_Server_Info *server, int num,
653 const int flags, unsigned int *instance)
655 int *credits;
657 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
659 spin_lock(&server->req_lock);
660 if (*credits < num) {
662 * Return immediately if not too many requests in flight since
663 * we will likely be stuck on waiting for credits.
665 if (server->in_flight < num - *credits) {
666 spin_unlock(&server->req_lock);
667 return -ENOTSUPP;
670 spin_unlock(&server->req_lock);
672 return wait_for_free_credits(server, num, 60000, flags,
673 instance);
677 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
678 unsigned int *num, struct cifs_credits *credits)
680 *num = size;
681 credits->value = 0;
682 credits->instance = server->reconnect_instance;
683 return 0;
686 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
687 struct mid_q_entry **ppmidQ)
689 if (ses->server->tcpStatus == CifsExiting) {
690 return -ENOENT;
693 if (ses->server->tcpStatus == CifsNeedReconnect) {
694 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
695 return -EAGAIN;
698 if (ses->status == CifsNew) {
699 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
700 (in_buf->Command != SMB_COM_NEGOTIATE))
701 return -EAGAIN;
702 /* else ok - we are setting up session */
705 if (ses->status == CifsExiting) {
706 /* check if SMB session is bad because we are setting it up */
707 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
708 return -EAGAIN;
709 /* else ok - we are shutting down session */
712 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
713 if (*ppmidQ == NULL)
714 return -ENOMEM;
715 spin_lock(&GlobalMid_Lock);
716 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
717 spin_unlock(&GlobalMid_Lock);
718 return 0;
721 static int
722 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
724 int error;
726 error = wait_event_freezekillable_unsafe(server->response_q,
727 midQ->mid_state != MID_REQUEST_SUBMITTED);
728 if (error < 0)
729 return -ERESTARTSYS;
731 return 0;
734 struct mid_q_entry *
735 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
737 int rc;
738 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
739 struct mid_q_entry *mid;
741 if (rqst->rq_iov[0].iov_len != 4 ||
742 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
743 return ERR_PTR(-EIO);
745 /* enable signing if server requires it */
746 if (server->sign)
747 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
749 mid = AllocMidQEntry(hdr, server);
750 if (mid == NULL)
751 return ERR_PTR(-ENOMEM);
753 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
754 if (rc) {
755 DeleteMidQEntry(mid);
756 return ERR_PTR(rc);
759 return mid;
763 * Send a SMB request and set the callback function in the mid to handle
764 * the result. Caller is responsible for dealing with timeouts.
767 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
768 mid_receive_t *receive, mid_callback_t *callback,
769 mid_handle_t *handle, void *cbdata, const int flags,
770 const struct cifs_credits *exist_credits)
772 int rc;
773 struct mid_q_entry *mid;
774 struct cifs_credits credits = { .value = 0, .instance = 0 };
775 unsigned int instance;
776 int optype;
778 optype = flags & CIFS_OP_MASK;
780 if ((flags & CIFS_HAS_CREDITS) == 0) {
781 rc = wait_for_free_request(server, flags, &instance);
782 if (rc)
783 return rc;
784 credits.value = 1;
785 credits.instance = instance;
786 } else
787 instance = exist_credits->instance;
789 mutex_lock(&server->srv_mutex);
792 * We can't use credits obtained from the previous session to send this
793 * request. Check if there were reconnects after we obtained credits and
794 * return -EAGAIN in such cases to let callers handle it.
796 if (instance != server->reconnect_instance) {
797 mutex_unlock(&server->srv_mutex);
798 add_credits_and_wake_if(server, &credits, optype);
799 return -EAGAIN;
802 mid = server->ops->setup_async_request(server, rqst);
803 if (IS_ERR(mid)) {
804 mutex_unlock(&server->srv_mutex);
805 add_credits_and_wake_if(server, &credits, optype);
806 return PTR_ERR(mid);
809 mid->receive = receive;
810 mid->callback = callback;
811 mid->callback_data = cbdata;
812 mid->handle = handle;
813 mid->mid_state = MID_REQUEST_SUBMITTED;
815 /* put it on the pending_mid_q */
816 spin_lock(&GlobalMid_Lock);
817 list_add_tail(&mid->qhead, &server->pending_mid_q);
818 spin_unlock(&GlobalMid_Lock);
821 * Need to store the time in mid before calling I/O. For call_async,
822 * I/O response may come back and free the mid entry on another thread.
824 cifs_save_when_sent(mid);
825 cifs_in_send_inc(server);
826 rc = smb_send_rqst(server, 1, rqst, flags);
827 cifs_in_send_dec(server);
829 if (rc < 0) {
830 revert_current_mid(server, mid->credits);
831 server->sequence_number -= 2;
832 cifs_delete_mid(mid);
835 mutex_unlock(&server->srv_mutex);
837 if (rc == 0)
838 return 0;
840 add_credits_and_wake_if(server, &credits, optype);
841 return rc;
846 * Send an SMB Request. No response info (other than return code)
847 * needs to be parsed.
849 * flags indicate the type of request buffer and how long to wait
850 * and whether to log NT STATUS code (error) before mapping it to POSIX error
854 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
855 char *in_buf, int flags)
857 int rc;
858 struct kvec iov[1];
859 struct kvec rsp_iov;
860 int resp_buf_type;
862 iov[0].iov_base = in_buf;
863 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
864 flags |= CIFS_NO_RSP_BUF;
865 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
866 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
868 return rc;
871 static int
872 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
874 int rc = 0;
876 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
877 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
879 spin_lock(&GlobalMid_Lock);
880 switch (mid->mid_state) {
881 case MID_RESPONSE_RECEIVED:
882 spin_unlock(&GlobalMid_Lock);
883 return rc;
884 case MID_RETRY_NEEDED:
885 rc = -EAGAIN;
886 break;
887 case MID_RESPONSE_MALFORMED:
888 rc = -EIO;
889 break;
890 case MID_SHUTDOWN:
891 rc = -EHOSTDOWN;
892 break;
893 default:
894 if (!(mid->mid_flags & MID_DELETED)) {
895 list_del_init(&mid->qhead);
896 mid->mid_flags |= MID_DELETED;
898 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
899 __func__, mid->mid, mid->mid_state);
900 rc = -EIO;
902 spin_unlock(&GlobalMid_Lock);
904 DeleteMidQEntry(mid);
905 return rc;
908 static inline int
909 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
910 struct mid_q_entry *mid)
912 return server->ops->send_cancel ?
913 server->ops->send_cancel(server, rqst, mid) : 0;
917 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
918 bool log_error)
920 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
922 dump_smb(mid->resp_buf, min_t(u32, 92, len));
924 /* convert the length into a more usable form */
925 if (server->sign) {
926 struct kvec iov[2];
927 int rc = 0;
928 struct smb_rqst rqst = { .rq_iov = iov,
929 .rq_nvec = 2 };
931 iov[0].iov_base = mid->resp_buf;
932 iov[0].iov_len = 4;
933 iov[1].iov_base = (char *)mid->resp_buf + 4;
934 iov[1].iov_len = len - 4;
935 /* FIXME: add code to kill session */
936 rc = cifs_verify_signature(&rqst, server,
937 mid->sequence_number);
938 if (rc)
939 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
940 rc);
943 /* BB special case reconnect tid and uid here? */
944 return map_smb_to_linux_error(mid->resp_buf, log_error);
947 struct mid_q_entry *
948 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
949 struct smb_rqst *rqst)
951 int rc;
952 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
953 struct mid_q_entry *mid;
955 if (rqst->rq_iov[0].iov_len != 4 ||
956 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
957 return ERR_PTR(-EIO);
959 rc = allocate_mid(ses, hdr, &mid);
960 if (rc)
961 return ERR_PTR(rc);
962 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
963 if (rc) {
964 cifs_delete_mid(mid);
965 return ERR_PTR(rc);
967 return mid;
970 static void
971 cifs_compound_callback(struct mid_q_entry *mid)
973 struct TCP_Server_Info *server = mid->server;
974 struct cifs_credits credits;
976 credits.value = server->ops->get_credits(mid);
977 credits.instance = server->reconnect_instance;
979 add_credits(server, &credits, mid->optype);
982 static void
983 cifs_compound_last_callback(struct mid_q_entry *mid)
985 cifs_compound_callback(mid);
986 cifs_wake_up_task(mid);
989 static void
990 cifs_cancelled_callback(struct mid_q_entry *mid)
992 cifs_compound_callback(mid);
993 DeleteMidQEntry(mid);
997 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
998 const int flags, const int num_rqst, struct smb_rqst *rqst,
999 int *resp_buf_type, struct kvec *resp_iov)
1001 int i, j, optype, rc = 0;
1002 struct mid_q_entry *midQ[MAX_COMPOUND];
1003 bool cancelled_mid[MAX_COMPOUND] = {false};
1004 struct cifs_credits credits[MAX_COMPOUND] = {
1005 { .value = 0, .instance = 0 }
1007 unsigned int instance;
1008 char *buf;
1009 struct TCP_Server_Info *server;
1011 optype = flags & CIFS_OP_MASK;
1013 for (i = 0; i < num_rqst; i++)
1014 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
1016 if ((ses == NULL) || (ses->server == NULL)) {
1017 cifs_dbg(VFS, "Null session\n");
1018 return -EIO;
1021 if (!ses->binding) {
1022 uint index = 0;
1024 if (ses->chan_count > 1) {
1025 index = (uint)atomic_inc_return(&ses->chan_seq);
1026 index %= ses->chan_count;
1028 server = ses->chans[index].server;
1029 } else {
1030 server = cifs_ses_server(ses);
1033 if (server->tcpStatus == CifsExiting)
1034 return -ENOENT;
1037 * Wait for all the requests to become available.
1038 * This approach still leaves the possibility to be stuck waiting for
1039 * credits if the server doesn't grant credits to the outstanding
1040 * requests and if the client is completely idle, not generating any
1041 * other requests.
1042 * This can be handled by the eventual session reconnect.
1044 rc = wait_for_compound_request(server, num_rqst, flags,
1045 &instance);
1046 if (rc)
1047 return rc;
1049 for (i = 0; i < num_rqst; i++) {
1050 credits[i].value = 1;
1051 credits[i].instance = instance;
1055 * Make sure that we sign in the same order that we send on this socket
1056 * and avoid races inside tcp sendmsg code that could cause corruption
1057 * of smb data.
1060 mutex_lock(&server->srv_mutex);
1063 * All the parts of the compound chain belong obtained credits from the
1064 * same session. We can not use credits obtained from the previous
1065 * session to send this request. Check if there were reconnects after
1066 * we obtained credits and return -EAGAIN in such cases to let callers
1067 * handle it.
1069 if (instance != server->reconnect_instance) {
1070 mutex_unlock(&server->srv_mutex);
1071 for (j = 0; j < num_rqst; j++)
1072 add_credits(server, &credits[j], optype);
1073 return -EAGAIN;
1076 for (i = 0; i < num_rqst; i++) {
1077 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1078 if (IS_ERR(midQ[i])) {
1079 revert_current_mid(server, i);
1080 for (j = 0; j < i; j++)
1081 cifs_delete_mid(midQ[j]);
1082 mutex_unlock(&server->srv_mutex);
1084 /* Update # of requests on wire to server */
1085 for (j = 0; j < num_rqst; j++)
1086 add_credits(server, &credits[j], optype);
1087 return PTR_ERR(midQ[i]);
1090 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1091 midQ[i]->optype = optype;
1093 * Invoke callback for every part of the compound chain
1094 * to calculate credits properly. Wake up this thread only when
1095 * the last element is received.
1097 if (i < num_rqst - 1)
1098 midQ[i]->callback = cifs_compound_callback;
1099 else
1100 midQ[i]->callback = cifs_compound_last_callback;
1102 cifs_in_send_inc(server);
1103 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1104 cifs_in_send_dec(server);
1106 for (i = 0; i < num_rqst; i++)
1107 cifs_save_when_sent(midQ[i]);
1109 if (rc < 0) {
1110 revert_current_mid(server, num_rqst);
1111 server->sequence_number -= 2;
1114 mutex_unlock(&server->srv_mutex);
1117 * If sending failed for some reason or it is an oplock break that we
1118 * will not receive a response to - return credits back
1120 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1121 for (i = 0; i < num_rqst; i++)
1122 add_credits(server, &credits[i], optype);
1123 goto out;
1127 * At this point the request is passed to the network stack - we assume
1128 * that any credits taken from the server structure on the client have
1129 * been spent and we can't return them back. Once we receive responses
1130 * we will collect credits granted by the server in the mid callbacks
1131 * and add those credits to the server structure.
1135 * Compounding is never used during session establish.
1137 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1138 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1139 rqst[0].rq_nvec);
1141 for (i = 0; i < num_rqst; i++) {
1142 rc = wait_for_response(server, midQ[i]);
1143 if (rc != 0)
1144 break;
1146 if (rc != 0) {
1147 for (; i < num_rqst; i++) {
1148 cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1149 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1150 send_cancel(server, &rqst[i], midQ[i]);
1151 spin_lock(&GlobalMid_Lock);
1152 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1153 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1154 midQ[i]->callback = cifs_cancelled_callback;
1155 cancelled_mid[i] = true;
1156 credits[i].value = 0;
1158 spin_unlock(&GlobalMid_Lock);
1162 for (i = 0; i < num_rqst; i++) {
1163 if (rc < 0)
1164 goto out;
1166 rc = cifs_sync_mid_result(midQ[i], server);
1167 if (rc != 0) {
1168 /* mark this mid as cancelled to not free it below */
1169 cancelled_mid[i] = true;
1170 goto out;
1173 if (!midQ[i]->resp_buf ||
1174 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1175 rc = -EIO;
1176 cifs_dbg(FYI, "Bad MID state?\n");
1177 goto out;
1180 buf = (char *)midQ[i]->resp_buf;
1181 resp_iov[i].iov_base = buf;
1182 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1183 server->vals->header_preamble_size;
1185 if (midQ[i]->large_buf)
1186 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1187 else
1188 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1190 rc = server->ops->check_receive(midQ[i], server,
1191 flags & CIFS_LOG_ERROR);
1193 /* mark it so buf will not be freed by cifs_delete_mid */
1194 if ((flags & CIFS_NO_RSP_BUF) == 0)
1195 midQ[i]->resp_buf = NULL;
1200 * Compounding is never used during session establish.
1202 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1203 struct kvec iov = {
1204 .iov_base = resp_iov[0].iov_base,
1205 .iov_len = resp_iov[0].iov_len
1207 smb311_update_preauth_hash(ses, &iov, 1);
1210 out:
1212 * This will dequeue all mids. After this it is important that the
1213 * demultiplex_thread will not process any of these mids any futher.
1214 * This is prevented above by using a noop callback that will not
1215 * wake this thread except for the very last PDU.
1217 for (i = 0; i < num_rqst; i++) {
1218 if (!cancelled_mid[i])
1219 cifs_delete_mid(midQ[i]);
1222 return rc;
1226 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1227 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1228 struct kvec *resp_iov)
1230 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1231 resp_iov);
1235 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1236 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1237 const int flags, struct kvec *resp_iov)
1239 struct smb_rqst rqst;
1240 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1241 int rc;
1243 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1244 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1245 GFP_KERNEL);
1246 if (!new_iov) {
1247 /* otherwise cifs_send_recv below sets resp_buf_type */
1248 *resp_buf_type = CIFS_NO_BUFFER;
1249 return -ENOMEM;
1251 } else
1252 new_iov = s_iov;
1254 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1255 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1257 new_iov[0].iov_base = new_iov[1].iov_base;
1258 new_iov[0].iov_len = 4;
1259 new_iov[1].iov_base += 4;
1260 new_iov[1].iov_len -= 4;
1262 memset(&rqst, 0, sizeof(struct smb_rqst));
1263 rqst.rq_iov = new_iov;
1264 rqst.rq_nvec = n_vec + 1;
1266 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1267 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1268 kfree(new_iov);
1269 return rc;
1273 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1274 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1275 int *pbytes_returned, const int flags)
1277 int rc = 0;
1278 struct mid_q_entry *midQ;
1279 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1280 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1281 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1282 struct cifs_credits credits = { .value = 1, .instance = 0 };
1283 struct TCP_Server_Info *server;
1285 if (ses == NULL) {
1286 cifs_dbg(VFS, "Null smb session\n");
1287 return -EIO;
1289 server = ses->server;
1290 if (server == NULL) {
1291 cifs_dbg(VFS, "Null tcp session\n");
1292 return -EIO;
1295 if (server->tcpStatus == CifsExiting)
1296 return -ENOENT;
1298 /* Ensure that we do not send more than 50 overlapping requests
1299 to the same server. We may make this configurable later or
1300 use ses->maxReq */
1302 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1303 cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1304 len);
1305 return -EIO;
1308 rc = wait_for_free_request(server, flags, &credits.instance);
1309 if (rc)
1310 return rc;
1312 /* make sure that we sign in the same order that we send on this socket
1313 and avoid races inside tcp sendmsg code that could cause corruption
1314 of smb data */
1316 mutex_lock(&server->srv_mutex);
1318 rc = allocate_mid(ses, in_buf, &midQ);
1319 if (rc) {
1320 mutex_unlock(&server->srv_mutex);
1321 /* Update # of requests on wire to server */
1322 add_credits(server, &credits, 0);
1323 return rc;
1326 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1327 if (rc) {
1328 mutex_unlock(&server->srv_mutex);
1329 goto out;
1332 midQ->mid_state = MID_REQUEST_SUBMITTED;
1334 cifs_in_send_inc(server);
1335 rc = smb_send(server, in_buf, len);
1336 cifs_in_send_dec(server);
1337 cifs_save_when_sent(midQ);
1339 if (rc < 0)
1340 server->sequence_number -= 2;
1342 mutex_unlock(&server->srv_mutex);
1344 if (rc < 0)
1345 goto out;
1347 rc = wait_for_response(server, midQ);
1348 if (rc != 0) {
1349 send_cancel(server, &rqst, midQ);
1350 spin_lock(&GlobalMid_Lock);
1351 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1352 /* no longer considered to be "in-flight" */
1353 midQ->callback = DeleteMidQEntry;
1354 spin_unlock(&GlobalMid_Lock);
1355 add_credits(server, &credits, 0);
1356 return rc;
1358 spin_unlock(&GlobalMid_Lock);
1361 rc = cifs_sync_mid_result(midQ, server);
1362 if (rc != 0) {
1363 add_credits(server, &credits, 0);
1364 return rc;
1367 if (!midQ->resp_buf || !out_buf ||
1368 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1369 rc = -EIO;
1370 cifs_server_dbg(VFS, "Bad MID state?\n");
1371 goto out;
1374 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1375 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1376 rc = cifs_check_receive(midQ, server, 0);
1377 out:
1378 cifs_delete_mid(midQ);
1379 add_credits(server, &credits, 0);
1381 return rc;
1384 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1385 blocking lock to return. */
1387 static int
1388 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1389 struct smb_hdr *in_buf,
1390 struct smb_hdr *out_buf)
1392 int bytes_returned;
1393 struct cifs_ses *ses = tcon->ses;
1394 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1396 /* We just modify the current in_buf to change
1397 the type of lock from LOCKING_ANDX_SHARED_LOCK
1398 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1399 LOCKING_ANDX_CANCEL_LOCK. */
1401 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1402 pSMB->Timeout = 0;
1403 pSMB->hdr.Mid = get_next_mid(ses->server);
1405 return SendReceive(xid, ses, in_buf, out_buf,
1406 &bytes_returned, 0);
1410 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1411 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1412 int *pbytes_returned)
1414 int rc = 0;
1415 int rstart = 0;
1416 struct mid_q_entry *midQ;
1417 struct cifs_ses *ses;
1418 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1419 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1420 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1421 unsigned int instance;
1422 struct TCP_Server_Info *server;
1424 if (tcon == NULL || tcon->ses == NULL) {
1425 cifs_dbg(VFS, "Null smb session\n");
1426 return -EIO;
1428 ses = tcon->ses;
1429 server = ses->server;
1431 if (server == NULL) {
1432 cifs_dbg(VFS, "Null tcp session\n");
1433 return -EIO;
1436 if (server->tcpStatus == CifsExiting)
1437 return -ENOENT;
1439 /* Ensure that we do not send more than 50 overlapping requests
1440 to the same server. We may make this configurable later or
1441 use ses->maxReq */
1443 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1444 cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1445 len);
1446 return -EIO;
1449 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1450 if (rc)
1451 return rc;
1453 /* make sure that we sign in the same order that we send on this socket
1454 and avoid races inside tcp sendmsg code that could cause corruption
1455 of smb data */
1457 mutex_lock(&server->srv_mutex);
1459 rc = allocate_mid(ses, in_buf, &midQ);
1460 if (rc) {
1461 mutex_unlock(&server->srv_mutex);
1462 return rc;
1465 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1466 if (rc) {
1467 cifs_delete_mid(midQ);
1468 mutex_unlock(&server->srv_mutex);
1469 return rc;
1472 midQ->mid_state = MID_REQUEST_SUBMITTED;
1473 cifs_in_send_inc(server);
1474 rc = smb_send(server, in_buf, len);
1475 cifs_in_send_dec(server);
1476 cifs_save_when_sent(midQ);
1478 if (rc < 0)
1479 server->sequence_number -= 2;
1481 mutex_unlock(&server->srv_mutex);
1483 if (rc < 0) {
1484 cifs_delete_mid(midQ);
1485 return rc;
1488 /* Wait for a reply - allow signals to interrupt. */
1489 rc = wait_event_interruptible(server->response_q,
1490 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1491 ((server->tcpStatus != CifsGood) &&
1492 (server->tcpStatus != CifsNew)));
1494 /* Were we interrupted by a signal ? */
1495 if ((rc == -ERESTARTSYS) &&
1496 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1497 ((server->tcpStatus == CifsGood) ||
1498 (server->tcpStatus == CifsNew))) {
1500 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1501 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1502 blocking lock to return. */
1503 rc = send_cancel(server, &rqst, midQ);
1504 if (rc) {
1505 cifs_delete_mid(midQ);
1506 return rc;
1508 } else {
1509 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1510 to cause the blocking lock to return. */
1512 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1514 /* If we get -ENOLCK back the lock may have
1515 already been removed. Don't exit in this case. */
1516 if (rc && rc != -ENOLCK) {
1517 cifs_delete_mid(midQ);
1518 return rc;
1522 rc = wait_for_response(server, midQ);
1523 if (rc) {
1524 send_cancel(server, &rqst, midQ);
1525 spin_lock(&GlobalMid_Lock);
1526 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1527 /* no longer considered to be "in-flight" */
1528 midQ->callback = DeleteMidQEntry;
1529 spin_unlock(&GlobalMid_Lock);
1530 return rc;
1532 spin_unlock(&GlobalMid_Lock);
1535 /* We got the response - restart system call. */
1536 rstart = 1;
1539 rc = cifs_sync_mid_result(midQ, server);
1540 if (rc != 0)
1541 return rc;
1543 /* rcvd frame is ok */
1544 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1545 rc = -EIO;
1546 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1547 goto out;
1550 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1551 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1552 rc = cifs_check_receive(midQ, server, 0);
1553 out:
1554 cifs_delete_mid(midQ);
1555 if (rstart && rc == -EACCES)
1556 return -ERESTARTSYS;
1557 return rc;