Linux 4.19.133
[linux/fpc-iii.git] / fs / cifs / transport.c
blob70412944b267d9a0591c56ae7c1bf89bce02ff6a
1 /*
2 * fs/cifs/transport.c
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_debug.h"
40 #include "smb2proto.h"
41 #include "smbdirect.h"
43 /* Max number of iovectors we can use off the stack when sending requests. */
44 #define CIFS_MAX_IOV_SIZE 8
46 void
47 cifs_wake_up_task(struct mid_q_entry *mid)
49 wake_up_process(mid->callback_data);
52 struct mid_q_entry *
53 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 struct mid_q_entry *temp;
57 if (server == NULL) {
58 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
59 return NULL;
62 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
63 memset(temp, 0, sizeof(struct mid_q_entry));
64 kref_init(&temp->refcount);
65 temp->mid = get_mid(smb_buffer);
66 temp->pid = current->pid;
67 temp->command = cpu_to_le16(smb_buffer->Command);
68 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
69 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
70 /* when mid allocated can be before when sent */
71 temp->when_alloc = jiffies;
72 temp->server = server;
75 * The default is for the mid to be synchronous, so the
76 * default callback just wakes up the current task.
78 temp->callback = cifs_wake_up_task;
79 temp->callback_data = current;
81 atomic_inc(&midCount);
82 temp->mid_state = MID_REQUEST_ALLOCATED;
83 return temp;
86 static void _cifs_mid_q_entry_release(struct kref *refcount)
88 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
89 refcount);
91 mempool_free(mid, cifs_mid_poolp);
94 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96 spin_lock(&GlobalMid_Lock);
97 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
98 spin_unlock(&GlobalMid_Lock);
101 void
102 DeleteMidQEntry(struct mid_q_entry *midEntry)
104 #ifdef CONFIG_CIFS_STATS2
105 __le16 command = midEntry->server->vals->lock_cmd;
106 unsigned long now;
107 #endif
108 midEntry->mid_state = MID_FREE;
109 atomic_dec(&midCount);
110 if (midEntry->large_buf)
111 cifs_buf_release(midEntry->resp_buf);
112 else
113 cifs_small_buf_release(midEntry->resp_buf);
114 #ifdef CONFIG_CIFS_STATS2
115 now = jiffies;
116 /* commands taking longer than one second are indications that
117 something is wrong, unless it is quite a slow link or server */
118 if (time_after(now, midEntry->when_alloc + HZ) &&
119 (midEntry->command != command)) {
120 /* smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command */
121 if ((le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS) &&
122 (le16_to_cpu(midEntry->command) >= 0))
123 cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
125 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
126 midEntry->mid, midEntry->pid,
127 midEntry->when_sent, midEntry->when_received);
128 if (cifsFYI & CIFS_TIMER) {
129 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
130 midEntry->command, midEntry->mid);
131 pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
132 now - midEntry->when_alloc,
133 now - midEntry->when_sent,
134 now - midEntry->when_received);
137 #endif
138 cifs_mid_q_entry_release(midEntry);
141 void
142 cifs_delete_mid(struct mid_q_entry *mid)
144 spin_lock(&GlobalMid_Lock);
145 list_del_init(&mid->qhead);
146 mid->mid_flags |= MID_DELETED;
147 spin_unlock(&GlobalMid_Lock);
149 DeleteMidQEntry(mid);
153 * smb_send_kvec - send an array of kvecs to the server
154 * @server: Server to send the data to
155 * @smb_msg: Message to send
156 * @sent: amount of data sent on socket is stored here
158 * Our basic "send data to server" function. Should be called with srv_mutex
159 * held. The caller is responsible for handling the results.
161 static int
162 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
163 size_t *sent)
165 int rc = 0;
166 int retries = 0;
167 struct socket *ssocket = server->ssocket;
169 *sent = 0;
171 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
172 smb_msg->msg_namelen = sizeof(struct sockaddr);
173 smb_msg->msg_control = NULL;
174 smb_msg->msg_controllen = 0;
175 if (server->noblocksnd)
176 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
177 else
178 smb_msg->msg_flags = MSG_NOSIGNAL;
180 while (msg_data_left(smb_msg)) {
182 * If blocking send, we try 3 times, since each can block
183 * for 5 seconds. For nonblocking we have to try more
184 * but wait increasing amounts of time allowing time for
185 * socket to clear. The overall time we wait in either
186 * case to send on the socket is about 15 seconds.
187 * Similarly we wait for 15 seconds for a response from
188 * the server in SendReceive[2] for the server to send
189 * a response back for most types of requests (except
190 * SMB Write past end of file which can be slow, and
191 * blocking lock operations). NFS waits slightly longer
192 * than CIFS, but this can make it take longer for
193 * nonresponsive servers to be detected and 15 seconds
194 * is more than enough time for modern networks to
195 * send a packet. In most cases if we fail to send
196 * after the retries we will kill the socket and
197 * reconnect which may clear the network problem.
199 rc = sock_sendmsg(ssocket, smb_msg);
200 if (rc == -EAGAIN) {
201 retries++;
202 if (retries >= 14 ||
203 (!server->noblocksnd && (retries > 2))) {
204 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
205 ssocket);
206 return -EAGAIN;
208 msleep(1 << retries);
209 continue;
212 if (rc < 0)
213 return rc;
215 if (rc == 0) {
216 /* should never happen, letting socket clear before
217 retrying is our only obvious option here */
218 cifs_dbg(VFS, "tcp sent no data\n");
219 msleep(500);
220 continue;
223 /* send was at least partially successful */
224 *sent += rc;
225 retries = 0; /* in case we get ENOSPC on the next send */
227 return 0;
230 unsigned long
231 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
233 unsigned int i;
234 struct kvec *iov;
235 int nvec;
236 unsigned long buflen = 0;
238 if (server->vals->header_preamble_size == 0 &&
239 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
240 iov = &rqst->rq_iov[1];
241 nvec = rqst->rq_nvec - 1;
242 } else {
243 iov = rqst->rq_iov;
244 nvec = rqst->rq_nvec;
247 /* total up iov array first */
248 for (i = 0; i < nvec; i++)
249 buflen += iov[i].iov_len;
252 * Add in the page array if there is one. The caller needs to make
253 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
254 * multiple pages ends at page boundary, rq_tailsz needs to be set to
255 * PAGE_SIZE.
257 if (rqst->rq_npages) {
258 if (rqst->rq_npages == 1)
259 buflen += rqst->rq_tailsz;
260 else {
262 * If there is more than one page, calculate the
263 * buffer length based on rq_offset and rq_tailsz
265 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
266 rqst->rq_offset;
267 buflen += rqst->rq_tailsz;
271 return buflen;
274 static int
275 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
276 struct smb_rqst *rqst)
278 int rc = 0;
279 struct kvec *iov;
280 int n_vec;
281 unsigned int send_length = 0;
282 unsigned int i, j;
283 size_t total_len = 0, sent, size;
284 struct socket *ssocket = server->ssocket;
285 struct msghdr smb_msg;
286 int val = 1;
287 __be32 rfc1002_marker;
289 if (cifs_rdma_enabled(server)) {
290 /* return -EAGAIN when connecting or reconnecting */
291 rc = -EAGAIN;
292 if (server->smbd_conn)
293 rc = smbd_send(server, num_rqst, rqst);
294 goto smbd_done;
296 if (ssocket == NULL)
297 return -ENOTSOCK;
299 /* cork the socket */
300 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
301 (char *)&val, sizeof(val));
303 for (j = 0; j < num_rqst; j++)
304 send_length += smb_rqst_len(server, &rqst[j]);
305 rfc1002_marker = cpu_to_be32(send_length);
307 /* Generate a rfc1002 marker for SMB2+ */
308 if (server->vals->header_preamble_size == 0) {
309 struct kvec hiov = {
310 .iov_base = &rfc1002_marker,
311 .iov_len = 4
313 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
314 1, 4);
315 rc = smb_send_kvec(server, &smb_msg, &sent);
316 if (rc < 0)
317 goto uncork;
319 total_len += sent;
320 send_length += 4;
323 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
325 for (j = 0; j < num_rqst; j++) {
326 iov = rqst[j].rq_iov;
327 n_vec = rqst[j].rq_nvec;
329 size = 0;
330 for (i = 0; i < n_vec; i++) {
331 dump_smb(iov[i].iov_base, iov[i].iov_len);
332 size += iov[i].iov_len;
335 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
336 iov, n_vec, size);
338 rc = smb_send_kvec(server, &smb_msg, &sent);
339 if (rc < 0)
340 goto uncork;
342 total_len += sent;
344 /* now walk the page array and send each page in it */
345 for (i = 0; i < rqst[j].rq_npages; i++) {
346 struct bio_vec bvec;
348 bvec.bv_page = rqst[j].rq_pages[i];
349 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
350 &bvec.bv_offset);
352 iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
353 &bvec, 1, bvec.bv_len);
354 rc = smb_send_kvec(server, &smb_msg, &sent);
355 if (rc < 0)
356 break;
358 total_len += sent;
362 uncork:
363 /* uncork it */
364 val = 0;
365 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
366 (char *)&val, sizeof(val));
368 if ((total_len > 0) && (total_len != send_length)) {
369 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
370 send_length, total_len);
372 * If we have only sent part of an SMB then the next SMB could
373 * be taken as the remainder of this one. We need to kill the
374 * socket so the server throws away the partial SMB
376 server->tcpStatus = CifsNeedReconnect;
377 trace_smb3_partial_send_reconnect(server->CurrentMid,
378 server->hostname);
380 smbd_done:
381 if (rc < 0 && rc != -EINTR)
382 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
383 rc);
384 else if (rc > 0)
385 rc = 0;
387 return rc;
390 static int
391 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
392 struct smb_rqst *rqst, int flags)
394 struct kvec iov;
395 struct smb2_transform_hdr *tr_hdr;
396 struct smb_rqst cur_rqst[MAX_COMPOUND];
397 int rc;
399 if (!(flags & CIFS_TRANSFORM_REQ))
400 return __smb_send_rqst(server, num_rqst, rqst);
402 if (num_rqst > MAX_COMPOUND - 1)
403 return -ENOMEM;
405 if (!server->ops->init_transform_rq) {
406 cifs_dbg(VFS, "Encryption requested but transform callback "
407 "is missing\n");
408 return -EIO;
411 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
412 if (!tr_hdr)
413 return -ENOMEM;
415 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
416 memset(&iov, 0, sizeof(iov));
417 memset(tr_hdr, 0, sizeof(*tr_hdr));
419 iov.iov_base = tr_hdr;
420 iov.iov_len = sizeof(*tr_hdr);
421 cur_rqst[0].rq_iov = &iov;
422 cur_rqst[0].rq_nvec = 1;
424 rc = server->ops->init_transform_rq(server, num_rqst + 1,
425 &cur_rqst[0], rqst);
426 if (rc)
427 goto out;
429 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
430 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
431 out:
432 kfree(tr_hdr);
433 return rc;
437 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
438 unsigned int smb_buf_length)
440 struct kvec iov[2];
441 struct smb_rqst rqst = { .rq_iov = iov,
442 .rq_nvec = 2 };
444 iov[0].iov_base = smb_buffer;
445 iov[0].iov_len = 4;
446 iov[1].iov_base = (char *)smb_buffer + 4;
447 iov[1].iov_len = smb_buf_length;
449 return __smb_send_rqst(server, 1, &rqst);
452 static int
453 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
454 int *credits)
456 int rc;
458 spin_lock(&server->req_lock);
459 if (timeout == CIFS_ASYNC_OP) {
460 /* oplock breaks must not be held up */
461 server->in_flight++;
462 *credits -= 1;
463 spin_unlock(&server->req_lock);
464 return 0;
467 while (1) {
468 if (*credits <= 0) {
469 spin_unlock(&server->req_lock);
470 cifs_num_waiters_inc(server);
471 rc = wait_event_killable(server->request_q,
472 has_credits(server, credits));
473 cifs_num_waiters_dec(server);
474 if (rc)
475 return rc;
476 spin_lock(&server->req_lock);
477 } else {
478 if (server->tcpStatus == CifsExiting) {
479 spin_unlock(&server->req_lock);
480 return -ENOENT;
484 * Can not count locking commands against total
485 * as they are allowed to block on server.
488 /* update # of requests on the wire to server */
489 if (timeout != CIFS_BLOCKING_OP) {
490 *credits -= 1;
491 server->in_flight++;
493 spin_unlock(&server->req_lock);
494 break;
497 return 0;
500 static int
501 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
502 const int optype)
504 int *val;
506 val = server->ops->get_credits_field(server, optype);
507 /* Since an echo is already inflight, no need to wait to send another */
508 if (*val <= 0 && optype == CIFS_ECHO_OP)
509 return -EAGAIN;
510 return wait_for_free_credits(server, timeout, val);
514 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
515 unsigned int *num, unsigned int *credits)
517 *num = size;
518 *credits = 0;
519 return 0;
522 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
523 struct mid_q_entry **ppmidQ)
525 if (ses->server->tcpStatus == CifsExiting) {
526 return -ENOENT;
529 if (ses->server->tcpStatus == CifsNeedReconnect) {
530 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
531 return -EAGAIN;
534 if (ses->status == CifsNew) {
535 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
536 (in_buf->Command != SMB_COM_NEGOTIATE))
537 return -EAGAIN;
538 /* else ok - we are setting up session */
541 if (ses->status == CifsExiting) {
542 /* check if SMB session is bad because we are setting it up */
543 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
544 return -EAGAIN;
545 /* else ok - we are shutting down session */
548 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
549 if (*ppmidQ == NULL)
550 return -ENOMEM;
551 spin_lock(&GlobalMid_Lock);
552 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
553 spin_unlock(&GlobalMid_Lock);
554 return 0;
557 static int
558 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
560 int error;
562 error = wait_event_freezekillable_unsafe(server->response_q,
563 midQ->mid_state != MID_REQUEST_SUBMITTED);
564 if (error < 0)
565 return -ERESTARTSYS;
567 return 0;
570 struct mid_q_entry *
571 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
573 int rc;
574 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
575 struct mid_q_entry *mid;
577 if (rqst->rq_iov[0].iov_len != 4 ||
578 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
579 return ERR_PTR(-EIO);
581 /* enable signing if server requires it */
582 if (server->sign)
583 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
585 mid = AllocMidQEntry(hdr, server);
586 if (mid == NULL)
587 return ERR_PTR(-ENOMEM);
589 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
590 if (rc) {
591 DeleteMidQEntry(mid);
592 return ERR_PTR(rc);
595 return mid;
599 * Send a SMB request and set the callback function in the mid to handle
600 * the result. Caller is responsible for dealing with timeouts.
603 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
604 mid_receive_t *receive, mid_callback_t *callback,
605 mid_handle_t *handle, void *cbdata, const int flags)
607 int rc, timeout, optype;
608 struct mid_q_entry *mid;
609 unsigned int credits = 0;
611 timeout = flags & CIFS_TIMEOUT_MASK;
612 optype = flags & CIFS_OP_MASK;
614 if ((flags & CIFS_HAS_CREDITS) == 0) {
615 rc = wait_for_free_request(server, timeout, optype);
616 if (rc)
617 return rc;
618 credits = 1;
621 mutex_lock(&server->srv_mutex);
622 mid = server->ops->setup_async_request(server, rqst);
623 if (IS_ERR(mid)) {
624 mutex_unlock(&server->srv_mutex);
625 add_credits_and_wake_if(server, credits, optype);
626 return PTR_ERR(mid);
629 mid->receive = receive;
630 mid->callback = callback;
631 mid->callback_data = cbdata;
632 mid->handle = handle;
633 mid->mid_state = MID_REQUEST_SUBMITTED;
635 /* put it on the pending_mid_q */
636 spin_lock(&GlobalMid_Lock);
637 list_add_tail(&mid->qhead, &server->pending_mid_q);
638 spin_unlock(&GlobalMid_Lock);
641 * Need to store the time in mid before calling I/O. For call_async,
642 * I/O response may come back and free the mid entry on another thread.
644 cifs_save_when_sent(mid);
645 cifs_in_send_inc(server);
646 rc = smb_send_rqst(server, 1, rqst, flags);
647 cifs_in_send_dec(server);
649 if (rc < 0) {
650 revert_current_mid(server, mid->credits);
651 server->sequence_number -= 2;
652 cifs_delete_mid(mid);
655 mutex_unlock(&server->srv_mutex);
657 if (rc == 0)
658 return 0;
660 add_credits_and_wake_if(server, credits, optype);
661 return rc;
666 * Send an SMB Request. No response info (other than return code)
667 * needs to be parsed.
669 * flags indicate the type of request buffer and how long to wait
670 * and whether to log NT STATUS code (error) before mapping it to POSIX error
674 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
675 char *in_buf, int flags)
677 int rc;
678 struct kvec iov[1];
679 struct kvec rsp_iov;
680 int resp_buf_type;
682 iov[0].iov_base = in_buf;
683 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
684 flags |= CIFS_NO_RESP;
685 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
686 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
688 return rc;
691 static int
692 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
694 int rc = 0;
696 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
697 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
699 spin_lock(&GlobalMid_Lock);
700 switch (mid->mid_state) {
701 case MID_RESPONSE_RECEIVED:
702 spin_unlock(&GlobalMid_Lock);
703 return rc;
704 case MID_RETRY_NEEDED:
705 rc = -EAGAIN;
706 break;
707 case MID_RESPONSE_MALFORMED:
708 rc = -EIO;
709 break;
710 case MID_SHUTDOWN:
711 rc = -EHOSTDOWN;
712 break;
713 default:
714 list_del_init(&mid->qhead);
715 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
716 __func__, mid->mid, mid->mid_state);
717 rc = -EIO;
719 spin_unlock(&GlobalMid_Lock);
721 DeleteMidQEntry(mid);
722 return rc;
725 static inline int
726 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
727 struct mid_q_entry *mid)
729 return server->ops->send_cancel ?
730 server->ops->send_cancel(server, rqst, mid) : 0;
734 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
735 bool log_error)
737 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
739 dump_smb(mid->resp_buf, min_t(u32, 92, len));
741 /* convert the length into a more usable form */
742 if (server->sign) {
743 struct kvec iov[2];
744 int rc = 0;
745 struct smb_rqst rqst = { .rq_iov = iov,
746 .rq_nvec = 2 };
748 iov[0].iov_base = mid->resp_buf;
749 iov[0].iov_len = 4;
750 iov[1].iov_base = (char *)mid->resp_buf + 4;
751 iov[1].iov_len = len - 4;
752 /* FIXME: add code to kill session */
753 rc = cifs_verify_signature(&rqst, server,
754 mid->sequence_number);
755 if (rc)
756 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
757 rc);
760 /* BB special case reconnect tid and uid here? */
761 return map_smb_to_linux_error(mid->resp_buf, log_error);
764 struct mid_q_entry *
765 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
767 int rc;
768 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
769 struct mid_q_entry *mid;
771 if (rqst->rq_iov[0].iov_len != 4 ||
772 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
773 return ERR_PTR(-EIO);
775 rc = allocate_mid(ses, hdr, &mid);
776 if (rc)
777 return ERR_PTR(rc);
778 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
779 if (rc) {
780 cifs_delete_mid(mid);
781 return ERR_PTR(rc);
783 return mid;
786 static void
787 cifs_noop_callback(struct mid_q_entry *mid)
792 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
793 const int flags, const int num_rqst, struct smb_rqst *rqst,
794 int *resp_buf_type, struct kvec *resp_iov)
796 int i, j, rc = 0;
797 int timeout, optype;
798 struct mid_q_entry *midQ[MAX_COMPOUND];
799 bool cancelled_mid[MAX_COMPOUND] = {false};
800 unsigned int credits[MAX_COMPOUND] = {0};
801 char *buf;
803 timeout = flags & CIFS_TIMEOUT_MASK;
804 optype = flags & CIFS_OP_MASK;
806 for (i = 0; i < num_rqst; i++)
807 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
809 if ((ses == NULL) || (ses->server == NULL)) {
810 cifs_dbg(VFS, "Null session\n");
811 return -EIO;
814 if (ses->server->tcpStatus == CifsExiting)
815 return -ENOENT;
818 * Ensure we obtain 1 credit per request in the compound chain.
819 * It can be optimized further by waiting for all the credits
820 * at once but this can wait long enough if we don't have enough
821 * credits due to some heavy operations in progress or the server
822 * not granting us much, so a fallback to the current approach is
823 * needed anyway.
825 for (i = 0; i < num_rqst; i++) {
826 rc = wait_for_free_request(ses->server, timeout, optype);
827 if (rc) {
829 * We haven't sent an SMB packet to the server yet but
830 * we already obtained credits for i requests in the
831 * compound chain - need to return those credits back
832 * for future use. Note that we need to call add_credits
833 * multiple times to match the way we obtained credits
834 * in the first place and to account for in flight
835 * requests correctly.
837 for (j = 0; j < i; j++)
838 add_credits(ses->server, 1, optype);
839 return rc;
841 credits[i] = 1;
845 * Make sure that we sign in the same order that we send on this socket
846 * and avoid races inside tcp sendmsg code that could cause corruption
847 * of smb data.
850 mutex_lock(&ses->server->srv_mutex);
852 for (i = 0; i < num_rqst; i++) {
853 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
854 if (IS_ERR(midQ[i])) {
855 revert_current_mid(ses->server, i);
856 for (j = 0; j < i; j++)
857 cifs_delete_mid(midQ[j]);
858 mutex_unlock(&ses->server->srv_mutex);
860 /* Update # of requests on wire to server */
861 for (j = 0; j < num_rqst; j++)
862 add_credits(ses->server, credits[j], optype);
863 return PTR_ERR(midQ[i]);
866 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
868 * We don't invoke the callback compounds unless it is the last
869 * request.
871 if (i < num_rqst - 1)
872 midQ[i]->callback = cifs_noop_callback;
874 cifs_in_send_inc(ses->server);
875 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
876 cifs_in_send_dec(ses->server);
878 for (i = 0; i < num_rqst; i++)
879 cifs_save_when_sent(midQ[i]);
881 if (rc < 0) {
882 revert_current_mid(ses->server, num_rqst);
883 ses->server->sequence_number -= 2;
886 mutex_unlock(&ses->server->srv_mutex);
888 if (rc < 0)
889 goto out;
892 * Compounding is never used during session establish.
894 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
895 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
896 rqst[0].rq_nvec);
898 if (timeout == CIFS_ASYNC_OP)
899 goto out;
901 for (i = 0; i < num_rqst; i++) {
902 rc = wait_for_response(ses->server, midQ[i]);
903 if (rc != 0) {
904 cifs_dbg(FYI, "Cancelling wait for mid %llu\n",
905 midQ[i]->mid);
906 send_cancel(ses->server, &rqst[i], midQ[i]);
907 spin_lock(&GlobalMid_Lock);
908 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
909 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
910 midQ[i]->callback = DeleteMidQEntry;
911 cancelled_mid[i] = true;
913 spin_unlock(&GlobalMid_Lock);
917 for (i = 0; i < num_rqst; i++)
918 if (!cancelled_mid[i] && midQ[i]->resp_buf
919 && (midQ[i]->mid_state == MID_RESPONSE_RECEIVED))
920 credits[i] = ses->server->ops->get_credits(midQ[i]);
922 for (i = 0; i < num_rqst; i++) {
923 if (rc < 0)
924 goto out;
926 rc = cifs_sync_mid_result(midQ[i], ses->server);
927 if (rc != 0) {
928 /* mark this mid as cancelled to not free it below */
929 cancelled_mid[i] = true;
930 goto out;
933 if (!midQ[i]->resp_buf ||
934 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
935 rc = -EIO;
936 cifs_dbg(FYI, "Bad MID state?\n");
937 goto out;
940 buf = (char *)midQ[i]->resp_buf;
941 resp_iov[i].iov_base = buf;
942 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
943 ses->server->vals->header_preamble_size;
945 if (midQ[i]->large_buf)
946 resp_buf_type[i] = CIFS_LARGE_BUFFER;
947 else
948 resp_buf_type[i] = CIFS_SMALL_BUFFER;
950 rc = ses->server->ops->check_receive(midQ[i], ses->server,
951 flags & CIFS_LOG_ERROR);
953 /* mark it so buf will not be freed by cifs_delete_mid */
954 if ((flags & CIFS_NO_RESP) == 0)
955 midQ[i]->resp_buf = NULL;
960 * Compounding is never used during session establish.
962 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
963 struct kvec iov = {
964 .iov_base = resp_iov[0].iov_base,
965 .iov_len = resp_iov[0].iov_len
967 smb311_update_preauth_hash(ses, &iov, 1);
970 out:
972 * This will dequeue all mids. After this it is important that the
973 * demultiplex_thread will not process any of these mids any futher.
974 * This is prevented above by using a noop callback that will not
975 * wake this thread except for the very last PDU.
977 for (i = 0; i < num_rqst; i++) {
978 if (!cancelled_mid[i])
979 cifs_delete_mid(midQ[i]);
980 add_credits(ses->server, credits[i], optype);
983 return rc;
987 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
988 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
989 struct kvec *resp_iov)
991 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
992 resp_iov);
996 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
997 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
998 const int flags, struct kvec *resp_iov)
1000 struct smb_rqst rqst;
1001 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1002 int rc;
1004 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1005 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1006 GFP_KERNEL);
1007 if (!new_iov) {
1008 /* otherwise cifs_send_recv below sets resp_buf_type */
1009 *resp_buf_type = CIFS_NO_BUFFER;
1010 return -ENOMEM;
1012 } else
1013 new_iov = s_iov;
1015 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1016 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1018 new_iov[0].iov_base = new_iov[1].iov_base;
1019 new_iov[0].iov_len = 4;
1020 new_iov[1].iov_base += 4;
1021 new_iov[1].iov_len -= 4;
1023 memset(&rqst, 0, sizeof(struct smb_rqst));
1024 rqst.rq_iov = new_iov;
1025 rqst.rq_nvec = n_vec + 1;
1027 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1028 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1029 kfree(new_iov);
1030 return rc;
1034 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1035 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1036 int *pbytes_returned, const int timeout)
1038 int rc = 0;
1039 struct mid_q_entry *midQ;
1040 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1041 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1042 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1044 if (ses == NULL) {
1045 cifs_dbg(VFS, "Null smb session\n");
1046 return -EIO;
1048 if (ses->server == NULL) {
1049 cifs_dbg(VFS, "Null tcp session\n");
1050 return -EIO;
1053 if (ses->server->tcpStatus == CifsExiting)
1054 return -ENOENT;
1056 /* Ensure that we do not send more than 50 overlapping requests
1057 to the same server. We may make this configurable later or
1058 use ses->maxReq */
1060 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1061 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1062 len);
1063 return -EIO;
1066 rc = wait_for_free_request(ses->server, timeout, 0);
1067 if (rc)
1068 return rc;
1070 /* make sure that we sign in the same order that we send on this socket
1071 and avoid races inside tcp sendmsg code that could cause corruption
1072 of smb data */
1074 mutex_lock(&ses->server->srv_mutex);
1076 rc = allocate_mid(ses, in_buf, &midQ);
1077 if (rc) {
1078 mutex_unlock(&ses->server->srv_mutex);
1079 /* Update # of requests on wire to server */
1080 add_credits(ses->server, 1, 0);
1081 return rc;
1084 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1085 if (rc) {
1086 mutex_unlock(&ses->server->srv_mutex);
1087 goto out;
1090 midQ->mid_state = MID_REQUEST_SUBMITTED;
1092 cifs_in_send_inc(ses->server);
1093 rc = smb_send(ses->server, in_buf, len);
1094 cifs_in_send_dec(ses->server);
1095 cifs_save_when_sent(midQ);
1097 if (rc < 0)
1098 ses->server->sequence_number -= 2;
1100 mutex_unlock(&ses->server->srv_mutex);
1102 if (rc < 0)
1103 goto out;
1105 if (timeout == CIFS_ASYNC_OP)
1106 goto out;
1108 rc = wait_for_response(ses->server, midQ);
1109 if (rc != 0) {
1110 send_cancel(ses->server, &rqst, midQ);
1111 spin_lock(&GlobalMid_Lock);
1112 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1113 /* no longer considered to be "in-flight" */
1114 midQ->callback = DeleteMidQEntry;
1115 spin_unlock(&GlobalMid_Lock);
1116 add_credits(ses->server, 1, 0);
1117 return rc;
1119 spin_unlock(&GlobalMid_Lock);
1122 rc = cifs_sync_mid_result(midQ, ses->server);
1123 if (rc != 0) {
1124 add_credits(ses->server, 1, 0);
1125 return rc;
1128 if (!midQ->resp_buf || !out_buf ||
1129 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1130 rc = -EIO;
1131 cifs_dbg(VFS, "Bad MID state?\n");
1132 goto out;
1135 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1136 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1137 rc = cifs_check_receive(midQ, ses->server, 0);
1138 out:
1139 cifs_delete_mid(midQ);
1140 add_credits(ses->server, 1, 0);
1142 return rc;
1145 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1146 blocking lock to return. */
1148 static int
1149 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1150 struct smb_hdr *in_buf,
1151 struct smb_hdr *out_buf)
1153 int bytes_returned;
1154 struct cifs_ses *ses = tcon->ses;
1155 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1157 /* We just modify the current in_buf to change
1158 the type of lock from LOCKING_ANDX_SHARED_LOCK
1159 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1160 LOCKING_ANDX_CANCEL_LOCK. */
1162 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1163 pSMB->Timeout = 0;
1164 pSMB->hdr.Mid = get_next_mid(ses->server);
1166 return SendReceive(xid, ses, in_buf, out_buf,
1167 &bytes_returned, 0);
1171 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1172 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1173 int *pbytes_returned)
1175 int rc = 0;
1176 int rstart = 0;
1177 struct mid_q_entry *midQ;
1178 struct cifs_ses *ses;
1179 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1180 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1181 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1183 if (tcon == NULL || tcon->ses == NULL) {
1184 cifs_dbg(VFS, "Null smb session\n");
1185 return -EIO;
1187 ses = tcon->ses;
1189 if (ses->server == NULL) {
1190 cifs_dbg(VFS, "Null tcp session\n");
1191 return -EIO;
1194 if (ses->server->tcpStatus == CifsExiting)
1195 return -ENOENT;
1197 /* Ensure that we do not send more than 50 overlapping requests
1198 to the same server. We may make this configurable later or
1199 use ses->maxReq */
1201 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1202 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1203 len);
1204 return -EIO;
1207 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1208 if (rc)
1209 return rc;
1211 /* make sure that we sign in the same order that we send on this socket
1212 and avoid races inside tcp sendmsg code that could cause corruption
1213 of smb data */
1215 mutex_lock(&ses->server->srv_mutex);
1217 rc = allocate_mid(ses, in_buf, &midQ);
1218 if (rc) {
1219 mutex_unlock(&ses->server->srv_mutex);
1220 return rc;
1223 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1224 if (rc) {
1225 cifs_delete_mid(midQ);
1226 mutex_unlock(&ses->server->srv_mutex);
1227 return rc;
1230 midQ->mid_state = MID_REQUEST_SUBMITTED;
1231 cifs_in_send_inc(ses->server);
1232 rc = smb_send(ses->server, in_buf, len);
1233 cifs_in_send_dec(ses->server);
1234 cifs_save_when_sent(midQ);
1236 if (rc < 0)
1237 ses->server->sequence_number -= 2;
1239 mutex_unlock(&ses->server->srv_mutex);
1241 if (rc < 0) {
1242 cifs_delete_mid(midQ);
1243 return rc;
1246 /* Wait for a reply - allow signals to interrupt. */
1247 rc = wait_event_interruptible(ses->server->response_q,
1248 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1249 ((ses->server->tcpStatus != CifsGood) &&
1250 (ses->server->tcpStatus != CifsNew)));
1252 /* Were we interrupted by a signal ? */
1253 if ((rc == -ERESTARTSYS) &&
1254 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1255 ((ses->server->tcpStatus == CifsGood) ||
1256 (ses->server->tcpStatus == CifsNew))) {
1258 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1259 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1260 blocking lock to return. */
1261 rc = send_cancel(ses->server, &rqst, midQ);
1262 if (rc) {
1263 cifs_delete_mid(midQ);
1264 return rc;
1266 } else {
1267 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1268 to cause the blocking lock to return. */
1270 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1272 /* If we get -ENOLCK back the lock may have
1273 already been removed. Don't exit in this case. */
1274 if (rc && rc != -ENOLCK) {
1275 cifs_delete_mid(midQ);
1276 return rc;
1280 rc = wait_for_response(ses->server, midQ);
1281 if (rc) {
1282 send_cancel(ses->server, &rqst, midQ);
1283 spin_lock(&GlobalMid_Lock);
1284 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1285 /* no longer considered to be "in-flight" */
1286 midQ->callback = DeleteMidQEntry;
1287 spin_unlock(&GlobalMid_Lock);
1288 return rc;
1290 spin_unlock(&GlobalMid_Lock);
1293 /* We got the response - restart system call. */
1294 rstart = 1;
1297 rc = cifs_sync_mid_result(midQ, ses->server);
1298 if (rc != 0)
1299 return rc;
1301 /* rcvd frame is ok */
1302 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1303 rc = -EIO;
1304 cifs_dbg(VFS, "Bad MID state?\n");
1305 goto out;
1308 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1309 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1310 rc = cifs_check_receive(midQ, ses->server, 0);
1311 out:
1312 cifs_delete_mid(midQ);
1313 if (rstart && rc == -EACCES)
1314 return -ERESTARTSYS;
1315 return rc;