Merge tag 'timers_urgent_for_v6.13_rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[drm/drm-misc.git] / fs / smb / client / transport.c
blob0dc80959ce48852f58f7cd9204c151cfdf246591
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
8 */
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <linux/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include <linux/task_io_accounting_ops.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smb2proto.h"
30 #include "smbdirect.h"
31 #include "compress.h"
33 /* Max number of iovectors we can use off the stack when sending requests. */
34 #define CIFS_MAX_IOV_SIZE 8
36 void
37 cifs_wake_up_task(struct mid_q_entry *mid)
39 if (mid->mid_state == MID_RESPONSE_RECEIVED)
40 mid->mid_state = MID_RESPONSE_READY;
41 wake_up_process(mid->callback_data);
44 static struct mid_q_entry *
45 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
47 struct mid_q_entry *temp;
49 if (server == NULL) {
50 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
51 return NULL;
54 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
55 memset(temp, 0, sizeof(struct mid_q_entry));
56 kref_init(&temp->refcount);
57 temp->mid = get_mid(smb_buffer);
58 temp->pid = current->pid;
59 temp->command = cpu_to_le16(smb_buffer->Command);
60 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
61 /* easier to use jiffies */
62 /* when mid allocated can be before when sent */
63 temp->when_alloc = jiffies;
64 temp->server = server;
67 * The default is for the mid to be synchronous, so the
68 * default callback just wakes up the current task.
70 get_task_struct(current);
71 temp->creator = current;
72 temp->callback = cifs_wake_up_task;
73 temp->callback_data = current;
75 atomic_inc(&mid_count);
76 temp->mid_state = MID_REQUEST_ALLOCATED;
77 return temp;
80 void __release_mid(struct kref *refcount)
82 struct mid_q_entry *midEntry =
83 container_of(refcount, struct mid_q_entry, refcount);
84 #ifdef CONFIG_CIFS_STATS2
85 __le16 command = midEntry->server->vals->lock_cmd;
86 __u16 smb_cmd = le16_to_cpu(midEntry->command);
87 unsigned long now;
88 unsigned long roundtrip_time;
89 #endif
90 struct TCP_Server_Info *server = midEntry->server;
92 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
93 (midEntry->mid_state == MID_RESPONSE_RECEIVED ||
94 midEntry->mid_state == MID_RESPONSE_READY) &&
95 server->ops->handle_cancelled_mid)
96 server->ops->handle_cancelled_mid(midEntry, server);
98 midEntry->mid_state = MID_FREE;
99 atomic_dec(&mid_count);
100 if (midEntry->large_buf)
101 cifs_buf_release(midEntry->resp_buf);
102 else
103 cifs_small_buf_release(midEntry->resp_buf);
104 #ifdef CONFIG_CIFS_STATS2
105 now = jiffies;
106 if (now < midEntry->when_alloc)
107 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
108 roundtrip_time = now - midEntry->when_alloc;
110 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
111 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
112 server->slowest_cmd[smb_cmd] = roundtrip_time;
113 server->fastest_cmd[smb_cmd] = roundtrip_time;
114 } else {
115 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
116 server->slowest_cmd[smb_cmd] = roundtrip_time;
117 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
118 server->fastest_cmd[smb_cmd] = roundtrip_time;
120 cifs_stats_inc(&server->num_cmds[smb_cmd]);
121 server->time_per_cmd[smb_cmd] += roundtrip_time;
124 * commands taking longer than one second (default) can be indications
125 * that something is wrong, unless it is quite a slow link or a very
126 * busy server. Note that this calc is unlikely or impossible to wrap
127 * as long as slow_rsp_threshold is not set way above recommended max
128 * value (32767 ie 9 hours) and is generally harmless even if wrong
129 * since only affects debug counters - so leaving the calc as simple
130 * comparison rather than doing multiple conversions and overflow
131 * checks
133 if ((slow_rsp_threshold != 0) &&
134 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
135 (midEntry->command != command)) {
137 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
138 * NB: le16_to_cpu returns unsigned so can not be negative below
140 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
141 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
143 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
144 midEntry->when_sent, midEntry->when_received);
145 if (cifsFYI & CIFS_TIMER) {
146 pr_debug("slow rsp: cmd %d mid %llu",
147 midEntry->command, midEntry->mid);
148 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
149 now - midEntry->when_alloc,
150 now - midEntry->when_sent,
151 now - midEntry->when_received);
154 #endif
155 put_task_struct(midEntry->creator);
157 mempool_free(midEntry, cifs_mid_poolp);
160 void
161 delete_mid(struct mid_q_entry *mid)
163 spin_lock(&mid->server->mid_lock);
164 if (!(mid->mid_flags & MID_DELETED)) {
165 list_del_init(&mid->qhead);
166 mid->mid_flags |= MID_DELETED;
168 spin_unlock(&mid->server->mid_lock);
170 release_mid(mid);
174 * smb_send_kvec - send an array of kvecs to the server
175 * @server: Server to send the data to
176 * @smb_msg: Message to send
177 * @sent: amount of data sent on socket is stored here
179 * Our basic "send data to server" function. Should be called with srv_mutex
180 * held. The caller is responsible for handling the results.
182 static int
183 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
184 size_t *sent)
186 int rc = 0;
187 int retries = 0;
188 struct socket *ssocket = server->ssocket;
190 *sent = 0;
192 if (server->noblocksnd)
193 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
194 else
195 smb_msg->msg_flags = MSG_NOSIGNAL;
197 while (msg_data_left(smb_msg)) {
199 * If blocking send, we try 3 times, since each can block
200 * for 5 seconds. For nonblocking we have to try more
201 * but wait increasing amounts of time allowing time for
202 * socket to clear. The overall time we wait in either
203 * case to send on the socket is about 15 seconds.
204 * Similarly we wait for 15 seconds for a response from
205 * the server in SendReceive[2] for the server to send
206 * a response back for most types of requests (except
207 * SMB Write past end of file which can be slow, and
208 * blocking lock operations). NFS waits slightly longer
209 * than CIFS, but this can make it take longer for
210 * nonresponsive servers to be detected and 15 seconds
211 * is more than enough time for modern networks to
212 * send a packet. In most cases if we fail to send
213 * after the retries we will kill the socket and
214 * reconnect which may clear the network problem.
216 rc = sock_sendmsg(ssocket, smb_msg);
217 if (rc == -EAGAIN) {
218 retries++;
219 if (retries >= 14 ||
220 (!server->noblocksnd && (retries > 2))) {
221 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
222 ssocket);
223 return -EAGAIN;
225 msleep(1 << retries);
226 continue;
229 if (rc < 0)
230 return rc;
232 if (rc == 0) {
233 /* should never happen, letting socket clear before
234 retrying is our only obvious option here */
235 cifs_server_dbg(VFS, "tcp sent no data\n");
236 msleep(500);
237 continue;
240 /* send was at least partially successful */
241 *sent += rc;
242 retries = 0; /* in case we get ENOSPC on the next send */
244 return 0;
247 unsigned long
248 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
250 unsigned int i;
251 struct kvec *iov;
252 int nvec;
253 unsigned long buflen = 0;
255 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
256 rqst->rq_iov[0].iov_len == 4) {
257 iov = &rqst->rq_iov[1];
258 nvec = rqst->rq_nvec - 1;
259 } else {
260 iov = rqst->rq_iov;
261 nvec = rqst->rq_nvec;
264 /* total up iov array first */
265 for (i = 0; i < nvec; i++)
266 buflen += iov[i].iov_len;
268 buflen += iov_iter_count(&rqst->rq_iter);
269 return buflen;
272 static int
273 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
274 struct smb_rqst *rqst)
276 int rc;
277 struct kvec *iov;
278 int n_vec;
279 unsigned int send_length = 0;
280 unsigned int i, j;
281 sigset_t mask, oldmask;
282 size_t total_len = 0, sent, size;
283 struct socket *ssocket = server->ssocket;
284 struct msghdr smb_msg = {};
285 __be32 rfc1002_marker;
287 cifs_in_send_inc(server);
288 if (cifs_rdma_enabled(server)) {
289 /* return -EAGAIN when connecting or reconnecting */
290 rc = -EAGAIN;
291 if (server->smbd_conn)
292 rc = smbd_send(server, num_rqst, rqst);
293 goto smbd_done;
296 rc = -EAGAIN;
297 if (ssocket == NULL)
298 goto out;
300 rc = -ERESTARTSYS;
301 if (fatal_signal_pending(current)) {
302 cifs_dbg(FYI, "signal pending before send request\n");
303 goto out;
306 rc = 0;
307 /* cork the socket */
308 tcp_sock_set_cork(ssocket->sk, true);
310 for (j = 0; j < num_rqst; j++)
311 send_length += smb_rqst_len(server, &rqst[j]);
312 rfc1002_marker = cpu_to_be32(send_length);
315 * We should not allow signals to interrupt the network send because
316 * any partial send will cause session reconnects thus increasing
317 * latency of system calls and overload a server with unnecessary
318 * requests.
321 sigfillset(&mask);
322 sigprocmask(SIG_BLOCK, &mask, &oldmask);
324 /* Generate a rfc1002 marker for SMB2+ */
325 if (!is_smb1(server)) {
326 struct kvec hiov = {
327 .iov_base = &rfc1002_marker,
328 .iov_len = 4
330 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
331 rc = smb_send_kvec(server, &smb_msg, &sent);
332 if (rc < 0)
333 goto unmask;
335 total_len += sent;
336 send_length += 4;
339 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
341 for (j = 0; j < num_rqst; j++) {
342 iov = rqst[j].rq_iov;
343 n_vec = rqst[j].rq_nvec;
345 size = 0;
346 for (i = 0; i < n_vec; i++) {
347 dump_smb(iov[i].iov_base, iov[i].iov_len);
348 size += iov[i].iov_len;
351 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
353 rc = smb_send_kvec(server, &smb_msg, &sent);
354 if (rc < 0)
355 goto unmask;
357 total_len += sent;
359 if (iov_iter_count(&rqst[j].rq_iter) > 0) {
360 smb_msg.msg_iter = rqst[j].rq_iter;
361 rc = smb_send_kvec(server, &smb_msg, &sent);
362 if (rc < 0)
363 break;
364 total_len += sent;
369 unmask:
370 sigprocmask(SIG_SETMASK, &oldmask, NULL);
373 * If signal is pending but we have already sent the whole packet to
374 * the server we need to return success status to allow a corresponding
375 * mid entry to be kept in the pending requests queue thus allowing
376 * to handle responses from the server by the client.
378 * If only part of the packet has been sent there is no need to hide
379 * interrupt because the session will be reconnected anyway, so there
380 * won't be any response from the server to handle.
383 if (signal_pending(current) && (total_len != send_length)) {
384 cifs_dbg(FYI, "signal is pending after attempt to send\n");
385 rc = -ERESTARTSYS;
388 /* uncork it */
389 tcp_sock_set_cork(ssocket->sk, false);
391 if ((total_len > 0) && (total_len != send_length)) {
392 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
393 send_length, total_len);
395 * If we have only sent part of an SMB then the next SMB could
396 * be taken as the remainder of this one. We need to kill the
397 * socket so the server throws away the partial SMB
399 cifs_signal_cifsd_for_reconnect(server, false);
400 trace_smb3_partial_send_reconnect(server->CurrentMid,
401 server->conn_id, server->hostname);
403 smbd_done:
405 * there's hardly any use for the layers above to know the
406 * actual error code here. All they should do at this point is
407 * to retry the connection and hope it goes away.
409 if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
410 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
411 rc);
412 rc = -ECONNABORTED;
413 cifs_signal_cifsd_for_reconnect(server, false);
414 } else if (rc > 0)
415 rc = 0;
416 out:
417 cifs_in_send_dec(server);
418 return rc;
421 static int
422 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
423 struct smb_rqst *rqst, int flags)
425 struct smb2_transform_hdr tr_hdr;
426 struct smb_rqst new_rqst[MAX_COMPOUND] = {};
427 struct kvec iov = {
428 .iov_base = &tr_hdr,
429 .iov_len = sizeof(tr_hdr),
431 int rc;
433 if (flags & CIFS_COMPRESS_REQ)
434 return smb_compress(server, &rqst[0], __smb_send_rqst);
436 if (!(flags & CIFS_TRANSFORM_REQ))
437 return __smb_send_rqst(server, num_rqst, rqst);
439 if (WARN_ON_ONCE(num_rqst > MAX_COMPOUND - 1))
440 return -EIO;
442 if (!server->ops->init_transform_rq) {
443 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
444 return -EIO;
447 new_rqst[0].rq_iov = &iov;
448 new_rqst[0].rq_nvec = 1;
450 rc = server->ops->init_transform_rq(server, num_rqst + 1,
451 new_rqst, rqst);
452 if (!rc) {
453 rc = __smb_send_rqst(server, num_rqst + 1, new_rqst);
454 smb3_free_compound_rqst(num_rqst, &new_rqst[1]);
456 return rc;
460 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
461 unsigned int smb_buf_length)
463 struct kvec iov[2];
464 struct smb_rqst rqst = { .rq_iov = iov,
465 .rq_nvec = 2 };
467 iov[0].iov_base = smb_buffer;
468 iov[0].iov_len = 4;
469 iov[1].iov_base = (char *)smb_buffer + 4;
470 iov[1].iov_len = smb_buf_length;
472 return __smb_send_rqst(server, 1, &rqst);
475 static int
476 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
477 const int timeout, const int flags,
478 unsigned int *instance)
480 long rc;
481 int *credits;
482 int optype;
483 long int t;
484 int scredits, in_flight;
486 if (timeout < 0)
487 t = MAX_JIFFY_OFFSET;
488 else
489 t = msecs_to_jiffies(timeout);
491 optype = flags & CIFS_OP_MASK;
493 *instance = 0;
495 credits = server->ops->get_credits_field(server, optype);
496 /* Since an echo is already inflight, no need to wait to send another */
497 if (*credits <= 0 && optype == CIFS_ECHO_OP)
498 return -EAGAIN;
500 spin_lock(&server->req_lock);
501 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
502 /* oplock breaks must not be held up */
503 server->in_flight++;
504 if (server->in_flight > server->max_in_flight)
505 server->max_in_flight = server->in_flight;
506 *credits -= 1;
507 *instance = server->reconnect_instance;
508 scredits = *credits;
509 in_flight = server->in_flight;
510 spin_unlock(&server->req_lock);
512 trace_smb3_nblk_credits(server->CurrentMid,
513 server->conn_id, server->hostname, scredits, -1, in_flight);
514 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
515 __func__, 1, scredits);
517 return 0;
520 while (1) {
521 spin_unlock(&server->req_lock);
523 spin_lock(&server->srv_lock);
524 if (server->tcpStatus == CifsExiting) {
525 spin_unlock(&server->srv_lock);
526 return -ENOENT;
528 spin_unlock(&server->srv_lock);
530 spin_lock(&server->req_lock);
531 if (*credits < num_credits) {
532 scredits = *credits;
533 spin_unlock(&server->req_lock);
535 cifs_num_waiters_inc(server);
536 rc = wait_event_killable_timeout(server->request_q,
537 has_credits(server, credits, num_credits), t);
538 cifs_num_waiters_dec(server);
539 if (!rc) {
540 spin_lock(&server->req_lock);
541 scredits = *credits;
542 in_flight = server->in_flight;
543 spin_unlock(&server->req_lock);
545 trace_smb3_credit_timeout(server->CurrentMid,
546 server->conn_id, server->hostname, scredits,
547 num_credits, in_flight);
548 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
549 timeout);
550 return -EBUSY;
552 if (rc == -ERESTARTSYS)
553 return -ERESTARTSYS;
554 spin_lock(&server->req_lock);
555 } else {
557 * For normal commands, reserve the last MAX_COMPOUND
558 * credits to compound requests.
559 * Otherwise these compounds could be permanently
560 * starved for credits by single-credit requests.
562 * To prevent spinning CPU, block this thread until
563 * there are >MAX_COMPOUND credits available.
564 * But only do this is we already have a lot of
565 * credits in flight to avoid triggering this check
566 * for servers that are slow to hand out credits on
567 * new sessions.
569 if (!optype && num_credits == 1 &&
570 server->in_flight > 2 * MAX_COMPOUND &&
571 *credits <= MAX_COMPOUND) {
572 spin_unlock(&server->req_lock);
574 cifs_num_waiters_inc(server);
575 rc = wait_event_killable_timeout(
576 server->request_q,
577 has_credits(server, credits,
578 MAX_COMPOUND + 1),
580 cifs_num_waiters_dec(server);
581 if (!rc) {
582 spin_lock(&server->req_lock);
583 scredits = *credits;
584 in_flight = server->in_flight;
585 spin_unlock(&server->req_lock);
587 trace_smb3_credit_timeout(
588 server->CurrentMid,
589 server->conn_id, server->hostname,
590 scredits, num_credits, in_flight);
591 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
592 timeout);
593 return -EBUSY;
595 if (rc == -ERESTARTSYS)
596 return -ERESTARTSYS;
597 spin_lock(&server->req_lock);
598 continue;
602 * Can not count locking commands against total
603 * as they are allowed to block on server.
606 /* update # of requests on the wire to server */
607 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
608 *credits -= num_credits;
609 server->in_flight += num_credits;
610 if (server->in_flight > server->max_in_flight)
611 server->max_in_flight = server->in_flight;
612 *instance = server->reconnect_instance;
614 scredits = *credits;
615 in_flight = server->in_flight;
616 spin_unlock(&server->req_lock);
618 trace_smb3_waitff_credits(server->CurrentMid,
619 server->conn_id, server->hostname, scredits,
620 -(num_credits), in_flight);
621 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
622 __func__, num_credits, scredits);
623 break;
626 return 0;
629 static int
630 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
631 unsigned int *instance)
633 return wait_for_free_credits(server, 1, -1, flags,
634 instance);
637 static int
638 wait_for_compound_request(struct TCP_Server_Info *server, int num,
639 const int flags, unsigned int *instance)
641 int *credits;
642 int scredits, in_flight;
644 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
646 spin_lock(&server->req_lock);
647 scredits = *credits;
648 in_flight = server->in_flight;
650 if (*credits < num) {
652 * If the server is tight on resources or just gives us less
653 * credits for other reasons (e.g. requests are coming out of
654 * order and the server delays granting more credits until it
655 * processes a missing mid) and we exhausted most available
656 * credits there may be situations when we try to send
657 * a compound request but we don't have enough credits. At this
658 * point the client needs to decide if it should wait for
659 * additional credits or fail the request. If at least one
660 * request is in flight there is a high probability that the
661 * server will return enough credits to satisfy this compound
662 * request.
664 * Return immediately if no requests in flight since we will be
665 * stuck on waiting for credits.
667 if (server->in_flight == 0) {
668 spin_unlock(&server->req_lock);
669 trace_smb3_insufficient_credits(server->CurrentMid,
670 server->conn_id, server->hostname, scredits,
671 num, in_flight);
672 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
673 __func__, in_flight, num, scredits);
674 return -EDEADLK;
677 spin_unlock(&server->req_lock);
679 return wait_for_free_credits(server, num, 60000, flags,
680 instance);
684 cifs_wait_mtu_credits(struct TCP_Server_Info *server, size_t size,
685 size_t *num, struct cifs_credits *credits)
687 *num = size;
688 credits->value = 0;
689 credits->instance = server->reconnect_instance;
690 return 0;
693 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
694 struct mid_q_entry **ppmidQ)
696 spin_lock(&ses->ses_lock);
697 if (ses->ses_status == SES_NEW) {
698 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
699 (in_buf->Command != SMB_COM_NEGOTIATE)) {
700 spin_unlock(&ses->ses_lock);
701 return -EAGAIN;
703 /* else ok - we are setting up session */
706 if (ses->ses_status == SES_EXITING) {
707 /* check if SMB session is bad because we are setting it up */
708 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
709 spin_unlock(&ses->ses_lock);
710 return -EAGAIN;
712 /* else ok - we are shutting down session */
714 spin_unlock(&ses->ses_lock);
716 *ppmidQ = alloc_mid(in_buf, ses->server);
717 if (*ppmidQ == NULL)
718 return -ENOMEM;
719 spin_lock(&ses->server->mid_lock);
720 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
721 spin_unlock(&ses->server->mid_lock);
722 return 0;
725 static int
726 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
728 int error;
730 error = wait_event_state(server->response_q,
731 midQ->mid_state != MID_REQUEST_SUBMITTED &&
732 midQ->mid_state != MID_RESPONSE_RECEIVED,
733 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
734 if (error < 0)
735 return -ERESTARTSYS;
737 return 0;
740 struct mid_q_entry *
741 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
743 int rc;
744 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
745 struct mid_q_entry *mid;
747 if (rqst->rq_iov[0].iov_len != 4 ||
748 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
749 return ERR_PTR(-EIO);
751 /* enable signing if server requires it */
752 if (server->sign)
753 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
755 mid = alloc_mid(hdr, server);
756 if (mid == NULL)
757 return ERR_PTR(-ENOMEM);
759 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
760 if (rc) {
761 release_mid(mid);
762 return ERR_PTR(rc);
765 return mid;
769 * Send a SMB request and set the callback function in the mid to handle
770 * the result. Caller is responsible for dealing with timeouts.
773 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
774 mid_receive_t *receive, mid_callback_t *callback,
775 mid_handle_t *handle, void *cbdata, const int flags,
776 const struct cifs_credits *exist_credits)
778 int rc;
779 struct mid_q_entry *mid;
780 struct cifs_credits credits = { .value = 0, .instance = 0 };
781 unsigned int instance;
782 int optype;
784 optype = flags & CIFS_OP_MASK;
786 if ((flags & CIFS_HAS_CREDITS) == 0) {
787 rc = wait_for_free_request(server, flags, &instance);
788 if (rc)
789 return rc;
790 credits.value = 1;
791 credits.instance = instance;
792 } else
793 instance = exist_credits->instance;
795 cifs_server_lock(server);
798 * We can't use credits obtained from the previous session to send this
799 * request. Check if there were reconnects after we obtained credits and
800 * return -EAGAIN in such cases to let callers handle it.
802 if (instance != server->reconnect_instance) {
803 cifs_server_unlock(server);
804 add_credits_and_wake_if(server, &credits, optype);
805 return -EAGAIN;
808 mid = server->ops->setup_async_request(server, rqst);
809 if (IS_ERR(mid)) {
810 cifs_server_unlock(server);
811 add_credits_and_wake_if(server, &credits, optype);
812 return PTR_ERR(mid);
815 mid->receive = receive;
816 mid->callback = callback;
817 mid->callback_data = cbdata;
818 mid->handle = handle;
819 mid->mid_state = MID_REQUEST_SUBMITTED;
821 /* put it on the pending_mid_q */
822 spin_lock(&server->mid_lock);
823 list_add_tail(&mid->qhead, &server->pending_mid_q);
824 spin_unlock(&server->mid_lock);
827 * Need to store the time in mid before calling I/O. For call_async,
828 * I/O response may come back and free the mid entry on another thread.
830 cifs_save_when_sent(mid);
831 rc = smb_send_rqst(server, 1, rqst, flags);
833 if (rc < 0) {
834 revert_current_mid(server, mid->credits);
835 server->sequence_number -= 2;
836 delete_mid(mid);
839 cifs_server_unlock(server);
841 if (rc == 0)
842 return 0;
844 add_credits_and_wake_if(server, &credits, optype);
845 return rc;
850 * Send an SMB Request. No response info (other than return code)
851 * needs to be parsed.
853 * flags indicate the type of request buffer and how long to wait
854 * and whether to log NT STATUS code (error) before mapping it to POSIX error
858 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
859 char *in_buf, int flags)
861 int rc;
862 struct kvec iov[1];
863 struct kvec rsp_iov;
864 int resp_buf_type;
866 iov[0].iov_base = in_buf;
867 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
868 flags |= CIFS_NO_RSP_BUF;
869 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
870 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
872 return rc;
875 static int
876 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
878 int rc = 0;
880 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
881 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
883 spin_lock(&server->mid_lock);
884 switch (mid->mid_state) {
885 case MID_RESPONSE_READY:
886 spin_unlock(&server->mid_lock);
887 return rc;
888 case MID_RETRY_NEEDED:
889 rc = -EAGAIN;
890 break;
891 case MID_RESPONSE_MALFORMED:
892 rc = -EIO;
893 break;
894 case MID_SHUTDOWN:
895 rc = -EHOSTDOWN;
896 break;
897 default:
898 if (!(mid->mid_flags & MID_DELETED)) {
899 list_del_init(&mid->qhead);
900 mid->mid_flags |= MID_DELETED;
902 spin_unlock(&server->mid_lock);
903 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
904 __func__, mid->mid, mid->mid_state);
905 rc = -EIO;
906 goto sync_mid_done;
908 spin_unlock(&server->mid_lock);
910 sync_mid_done:
911 release_mid(mid);
912 return rc;
915 static inline int
916 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
917 struct mid_q_entry *mid)
919 return server->ops->send_cancel ?
920 server->ops->send_cancel(server, rqst, mid) : 0;
924 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
925 bool log_error)
927 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
929 dump_smb(mid->resp_buf, min_t(u32, 92, len));
931 /* convert the length into a more usable form */
932 if (server->sign) {
933 struct kvec iov[2];
934 int rc = 0;
935 struct smb_rqst rqst = { .rq_iov = iov,
936 .rq_nvec = 2 };
938 iov[0].iov_base = mid->resp_buf;
939 iov[0].iov_len = 4;
940 iov[1].iov_base = (char *)mid->resp_buf + 4;
941 iov[1].iov_len = len - 4;
942 /* FIXME: add code to kill session */
943 rc = cifs_verify_signature(&rqst, server,
944 mid->sequence_number);
945 if (rc)
946 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
947 rc);
950 /* BB special case reconnect tid and uid here? */
951 return map_and_check_smb_error(mid, log_error);
954 struct mid_q_entry *
955 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
956 struct smb_rqst *rqst)
958 int rc;
959 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
960 struct mid_q_entry *mid;
962 if (rqst->rq_iov[0].iov_len != 4 ||
963 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
964 return ERR_PTR(-EIO);
966 rc = allocate_mid(ses, hdr, &mid);
967 if (rc)
968 return ERR_PTR(rc);
969 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
970 if (rc) {
971 delete_mid(mid);
972 return ERR_PTR(rc);
974 return mid;
977 static void
978 cifs_compound_callback(struct mid_q_entry *mid)
980 struct TCP_Server_Info *server = mid->server;
981 struct cifs_credits credits = {
982 .value = server->ops->get_credits(mid),
983 .instance = server->reconnect_instance,
986 add_credits(server, &credits, mid->optype);
988 if (mid->mid_state == MID_RESPONSE_RECEIVED)
989 mid->mid_state = MID_RESPONSE_READY;
992 static void
993 cifs_compound_last_callback(struct mid_q_entry *mid)
995 cifs_compound_callback(mid);
996 cifs_wake_up_task(mid);
999 static void
1000 cifs_cancelled_callback(struct mid_q_entry *mid)
1002 cifs_compound_callback(mid);
1003 release_mid(mid);
1007 * Return a channel (master if none) of @ses that can be used to send
1008 * regular requests.
1010 * If we are currently binding a new channel (negprot/sess.setup),
1011 * return the new incomplete channel.
1013 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1015 uint index = 0;
1016 unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
1017 struct TCP_Server_Info *server = NULL;
1018 int i;
1020 if (!ses)
1021 return NULL;
1023 spin_lock(&ses->chan_lock);
1024 for (i = 0; i < ses->chan_count; i++) {
1025 server = ses->chans[i].server;
1026 if (!server || server->terminate)
1027 continue;
1029 if (CIFS_CHAN_NEEDS_RECONNECT(ses, i))
1030 continue;
1033 * strictly speaking, we should pick up req_lock to read
1034 * server->in_flight. But it shouldn't matter much here if we
1035 * race while reading this data. The worst that can happen is
1036 * that we could use a channel that's not least loaded. Avoiding
1037 * taking the lock could help reduce wait time, which is
1038 * important for this function
1040 if (server->in_flight < min_in_flight) {
1041 min_in_flight = server->in_flight;
1042 index = i;
1044 if (server->in_flight > max_in_flight)
1045 max_in_flight = server->in_flight;
1048 /* if all channels are equally loaded, fall back to round-robin */
1049 if (min_in_flight == max_in_flight) {
1050 index = (uint)atomic_inc_return(&ses->chan_seq);
1051 index %= ses->chan_count;
1054 server = ses->chans[index].server;
1055 spin_unlock(&ses->chan_lock);
1057 return server;
1061 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1062 struct TCP_Server_Info *server,
1063 const int flags, const int num_rqst, struct smb_rqst *rqst,
1064 int *resp_buf_type, struct kvec *resp_iov)
1066 int i, j, optype, rc = 0;
1067 struct mid_q_entry *midQ[MAX_COMPOUND];
1068 bool cancelled_mid[MAX_COMPOUND] = {false};
1069 struct cifs_credits credits[MAX_COMPOUND] = {
1070 { .value = 0, .instance = 0 }
1072 unsigned int instance;
1073 char *buf;
1075 optype = flags & CIFS_OP_MASK;
1077 for (i = 0; i < num_rqst; i++)
1078 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
1080 if (!ses || !ses->server || !server) {
1081 cifs_dbg(VFS, "Null session\n");
1082 return -EIO;
1085 spin_lock(&server->srv_lock);
1086 if (server->tcpStatus == CifsExiting) {
1087 spin_unlock(&server->srv_lock);
1088 return -ENOENT;
1090 spin_unlock(&server->srv_lock);
1093 * Wait for all the requests to become available.
1094 * This approach still leaves the possibility to be stuck waiting for
1095 * credits if the server doesn't grant credits to the outstanding
1096 * requests and if the client is completely idle, not generating any
1097 * other requests.
1098 * This can be handled by the eventual session reconnect.
1100 rc = wait_for_compound_request(server, num_rqst, flags,
1101 &instance);
1102 if (rc)
1103 return rc;
1105 for (i = 0; i < num_rqst; i++) {
1106 credits[i].value = 1;
1107 credits[i].instance = instance;
1111 * Make sure that we sign in the same order that we send on this socket
1112 * and avoid races inside tcp sendmsg code that could cause corruption
1113 * of smb data.
1116 cifs_server_lock(server);
1119 * All the parts of the compound chain belong obtained credits from the
1120 * same session. We can not use credits obtained from the previous
1121 * session to send this request. Check if there were reconnects after
1122 * we obtained credits and return -EAGAIN in such cases to let callers
1123 * handle it.
1125 if (instance != server->reconnect_instance) {
1126 cifs_server_unlock(server);
1127 for (j = 0; j < num_rqst; j++)
1128 add_credits(server, &credits[j], optype);
1129 return -EAGAIN;
1132 for (i = 0; i < num_rqst; i++) {
1133 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1134 if (IS_ERR(midQ[i])) {
1135 revert_current_mid(server, i);
1136 for (j = 0; j < i; j++)
1137 delete_mid(midQ[j]);
1138 cifs_server_unlock(server);
1140 /* Update # of requests on wire to server */
1141 for (j = 0; j < num_rqst; j++)
1142 add_credits(server, &credits[j], optype);
1143 return PTR_ERR(midQ[i]);
1146 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1147 midQ[i]->optype = optype;
1149 * Invoke callback for every part of the compound chain
1150 * to calculate credits properly. Wake up this thread only when
1151 * the last element is received.
1153 if (i < num_rqst - 1)
1154 midQ[i]->callback = cifs_compound_callback;
1155 else
1156 midQ[i]->callback = cifs_compound_last_callback;
1158 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1160 for (i = 0; i < num_rqst; i++)
1161 cifs_save_when_sent(midQ[i]);
1163 if (rc < 0) {
1164 revert_current_mid(server, num_rqst);
1165 server->sequence_number -= 2;
1168 cifs_server_unlock(server);
1171 * If sending failed for some reason or it is an oplock break that we
1172 * will not receive a response to - return credits back
1174 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1175 for (i = 0; i < num_rqst; i++)
1176 add_credits(server, &credits[i], optype);
1177 goto out;
1181 * At this point the request is passed to the network stack - we assume
1182 * that any credits taken from the server structure on the client have
1183 * been spent and we can't return them back. Once we receive responses
1184 * we will collect credits granted by the server in the mid callbacks
1185 * and add those credits to the server structure.
1189 * Compounding is never used during session establish.
1191 spin_lock(&ses->ses_lock);
1192 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1193 spin_unlock(&ses->ses_lock);
1195 cifs_server_lock(server);
1196 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1197 cifs_server_unlock(server);
1199 spin_lock(&ses->ses_lock);
1201 spin_unlock(&ses->ses_lock);
1203 for (i = 0; i < num_rqst; i++) {
1204 rc = wait_for_response(server, midQ[i]);
1205 if (rc != 0)
1206 break;
1208 if (rc != 0) {
1209 for (; i < num_rqst; i++) {
1210 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1211 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1212 send_cancel(server, &rqst[i], midQ[i]);
1213 spin_lock(&server->mid_lock);
1214 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1215 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
1216 midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
1217 midQ[i]->callback = cifs_cancelled_callback;
1218 cancelled_mid[i] = true;
1219 credits[i].value = 0;
1221 spin_unlock(&server->mid_lock);
1225 for (i = 0; i < num_rqst; i++) {
1226 if (rc < 0)
1227 goto out;
1229 rc = cifs_sync_mid_result(midQ[i], server);
1230 if (rc != 0) {
1231 /* mark this mid as cancelled to not free it below */
1232 cancelled_mid[i] = true;
1233 goto out;
1236 if (!midQ[i]->resp_buf ||
1237 midQ[i]->mid_state != MID_RESPONSE_READY) {
1238 rc = -EIO;
1239 cifs_dbg(FYI, "Bad MID state?\n");
1240 goto out;
1243 buf = (char *)midQ[i]->resp_buf;
1244 resp_iov[i].iov_base = buf;
1245 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1246 HEADER_PREAMBLE_SIZE(server);
1248 if (midQ[i]->large_buf)
1249 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1250 else
1251 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1253 rc = server->ops->check_receive(midQ[i], server,
1254 flags & CIFS_LOG_ERROR);
1256 /* mark it so buf will not be freed by delete_mid */
1257 if ((flags & CIFS_NO_RSP_BUF) == 0)
1258 midQ[i]->resp_buf = NULL;
1263 * Compounding is never used during session establish.
1265 spin_lock(&ses->ses_lock);
1266 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1267 struct kvec iov = {
1268 .iov_base = resp_iov[0].iov_base,
1269 .iov_len = resp_iov[0].iov_len
1271 spin_unlock(&ses->ses_lock);
1272 cifs_server_lock(server);
1273 smb311_update_preauth_hash(ses, server, &iov, 1);
1274 cifs_server_unlock(server);
1275 spin_lock(&ses->ses_lock);
1277 spin_unlock(&ses->ses_lock);
1279 out:
1281 * This will dequeue all mids. After this it is important that the
1282 * demultiplex_thread will not process any of these mids any further.
1283 * This is prevented above by using a noop callback that will not
1284 * wake this thread except for the very last PDU.
1286 for (i = 0; i < num_rqst; i++) {
1287 if (!cancelled_mid[i])
1288 delete_mid(midQ[i]);
1291 return rc;
1295 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1296 struct TCP_Server_Info *server,
1297 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1298 struct kvec *resp_iov)
1300 return compound_send_recv(xid, ses, server, flags, 1,
1301 rqst, resp_buf_type, resp_iov);
1305 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1306 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1307 const int flags, struct kvec *resp_iov)
1309 struct smb_rqst rqst;
1310 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1311 int rc;
1313 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1314 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1315 GFP_KERNEL);
1316 if (!new_iov) {
1317 /* otherwise cifs_send_recv below sets resp_buf_type */
1318 *resp_buf_type = CIFS_NO_BUFFER;
1319 return -ENOMEM;
1321 } else
1322 new_iov = s_iov;
1324 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1325 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1327 new_iov[0].iov_base = new_iov[1].iov_base;
1328 new_iov[0].iov_len = 4;
1329 new_iov[1].iov_base += 4;
1330 new_iov[1].iov_len -= 4;
1332 memset(&rqst, 0, sizeof(struct smb_rqst));
1333 rqst.rq_iov = new_iov;
1334 rqst.rq_nvec = n_vec + 1;
1336 rc = cifs_send_recv(xid, ses, ses->server,
1337 &rqst, resp_buf_type, flags, resp_iov);
1338 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1339 kfree(new_iov);
1340 return rc;
1344 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1345 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1346 int *pbytes_returned, const int flags)
1348 int rc = 0;
1349 struct mid_q_entry *midQ;
1350 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1351 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1352 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1353 struct cifs_credits credits = { .value = 1, .instance = 0 };
1354 struct TCP_Server_Info *server;
1356 if (ses == NULL) {
1357 cifs_dbg(VFS, "Null smb session\n");
1358 return -EIO;
1360 server = ses->server;
1361 if (server == NULL) {
1362 cifs_dbg(VFS, "Null tcp session\n");
1363 return -EIO;
1366 spin_lock(&server->srv_lock);
1367 if (server->tcpStatus == CifsExiting) {
1368 spin_unlock(&server->srv_lock);
1369 return -ENOENT;
1371 spin_unlock(&server->srv_lock);
1373 /* Ensure that we do not send more than 50 overlapping requests
1374 to the same server. We may make this configurable later or
1375 use ses->maxReq */
1377 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1378 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1379 len);
1380 return -EIO;
1383 rc = wait_for_free_request(server, flags, &credits.instance);
1384 if (rc)
1385 return rc;
1387 /* make sure that we sign in the same order that we send on this socket
1388 and avoid races inside tcp sendmsg code that could cause corruption
1389 of smb data */
1391 cifs_server_lock(server);
1393 rc = allocate_mid(ses, in_buf, &midQ);
1394 if (rc) {
1395 cifs_server_unlock(server);
1396 /* Update # of requests on wire to server */
1397 add_credits(server, &credits, 0);
1398 return rc;
1401 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1402 if (rc) {
1403 cifs_server_unlock(server);
1404 goto out;
1407 midQ->mid_state = MID_REQUEST_SUBMITTED;
1409 rc = smb_send(server, in_buf, len);
1410 cifs_save_when_sent(midQ);
1412 if (rc < 0)
1413 server->sequence_number -= 2;
1415 cifs_server_unlock(server);
1417 if (rc < 0)
1418 goto out;
1420 rc = wait_for_response(server, midQ);
1421 if (rc != 0) {
1422 send_cancel(server, &rqst, midQ);
1423 spin_lock(&server->mid_lock);
1424 if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1425 midQ->mid_state == MID_RESPONSE_RECEIVED) {
1426 /* no longer considered to be "in-flight" */
1427 midQ->callback = release_mid;
1428 spin_unlock(&server->mid_lock);
1429 add_credits(server, &credits, 0);
1430 return rc;
1432 spin_unlock(&server->mid_lock);
1435 rc = cifs_sync_mid_result(midQ, server);
1436 if (rc != 0) {
1437 add_credits(server, &credits, 0);
1438 return rc;
1441 if (!midQ->resp_buf || !out_buf ||
1442 midQ->mid_state != MID_RESPONSE_READY) {
1443 rc = -EIO;
1444 cifs_server_dbg(VFS, "Bad MID state?\n");
1445 goto out;
1448 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1449 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1450 rc = cifs_check_receive(midQ, server, 0);
1451 out:
1452 delete_mid(midQ);
1453 add_credits(server, &credits, 0);
1455 return rc;
1458 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1459 blocking lock to return. */
1461 static int
1462 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1463 struct smb_hdr *in_buf,
1464 struct smb_hdr *out_buf)
1466 int bytes_returned;
1467 struct cifs_ses *ses = tcon->ses;
1468 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1470 /* We just modify the current in_buf to change
1471 the type of lock from LOCKING_ANDX_SHARED_LOCK
1472 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1473 LOCKING_ANDX_CANCEL_LOCK. */
1475 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1476 pSMB->Timeout = 0;
1477 pSMB->hdr.Mid = get_next_mid(ses->server);
1479 return SendReceive(xid, ses, in_buf, out_buf,
1480 &bytes_returned, 0);
1484 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1485 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1486 int *pbytes_returned)
1488 int rc = 0;
1489 int rstart = 0;
1490 struct mid_q_entry *midQ;
1491 struct cifs_ses *ses;
1492 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1493 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1494 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1495 unsigned int instance;
1496 struct TCP_Server_Info *server;
1498 if (tcon == NULL || tcon->ses == NULL) {
1499 cifs_dbg(VFS, "Null smb session\n");
1500 return -EIO;
1502 ses = tcon->ses;
1503 server = ses->server;
1505 if (server == NULL) {
1506 cifs_dbg(VFS, "Null tcp session\n");
1507 return -EIO;
1510 spin_lock(&server->srv_lock);
1511 if (server->tcpStatus == CifsExiting) {
1512 spin_unlock(&server->srv_lock);
1513 return -ENOENT;
1515 spin_unlock(&server->srv_lock);
1517 /* Ensure that we do not send more than 50 overlapping requests
1518 to the same server. We may make this configurable later or
1519 use ses->maxReq */
1521 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1522 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1523 len);
1524 return -EIO;
1527 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1528 if (rc)
1529 return rc;
1531 /* make sure that we sign in the same order that we send on this socket
1532 and avoid races inside tcp sendmsg code that could cause corruption
1533 of smb data */
1535 cifs_server_lock(server);
1537 rc = allocate_mid(ses, in_buf, &midQ);
1538 if (rc) {
1539 cifs_server_unlock(server);
1540 return rc;
1543 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1544 if (rc) {
1545 delete_mid(midQ);
1546 cifs_server_unlock(server);
1547 return rc;
1550 midQ->mid_state = MID_REQUEST_SUBMITTED;
1551 rc = smb_send(server, in_buf, len);
1552 cifs_save_when_sent(midQ);
1554 if (rc < 0)
1555 server->sequence_number -= 2;
1557 cifs_server_unlock(server);
1559 if (rc < 0) {
1560 delete_mid(midQ);
1561 return rc;
1564 /* Wait for a reply - allow signals to interrupt. */
1565 rc = wait_event_interruptible(server->response_q,
1566 (!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
1567 midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
1568 ((server->tcpStatus != CifsGood) &&
1569 (server->tcpStatus != CifsNew)));
1571 /* Were we interrupted by a signal ? */
1572 spin_lock(&server->srv_lock);
1573 if ((rc == -ERESTARTSYS) &&
1574 (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1575 midQ->mid_state == MID_RESPONSE_RECEIVED) &&
1576 ((server->tcpStatus == CifsGood) ||
1577 (server->tcpStatus == CifsNew))) {
1578 spin_unlock(&server->srv_lock);
1580 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1581 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1582 blocking lock to return. */
1583 rc = send_cancel(server, &rqst, midQ);
1584 if (rc) {
1585 delete_mid(midQ);
1586 return rc;
1588 } else {
1589 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1590 to cause the blocking lock to return. */
1592 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1594 /* If we get -ENOLCK back the lock may have
1595 already been removed. Don't exit in this case. */
1596 if (rc && rc != -ENOLCK) {
1597 delete_mid(midQ);
1598 return rc;
1602 rc = wait_for_response(server, midQ);
1603 if (rc) {
1604 send_cancel(server, &rqst, midQ);
1605 spin_lock(&server->mid_lock);
1606 if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1607 midQ->mid_state == MID_RESPONSE_RECEIVED) {
1608 /* no longer considered to be "in-flight" */
1609 midQ->callback = release_mid;
1610 spin_unlock(&server->mid_lock);
1611 return rc;
1613 spin_unlock(&server->mid_lock);
1616 /* We got the response - restart system call. */
1617 rstart = 1;
1618 spin_lock(&server->srv_lock);
1620 spin_unlock(&server->srv_lock);
1622 rc = cifs_sync_mid_result(midQ, server);
1623 if (rc != 0)
1624 return rc;
1626 /* rcvd frame is ok */
1627 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
1628 rc = -EIO;
1629 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1630 goto out;
1633 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1634 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1635 rc = cifs_check_receive(midQ, server, 0);
1636 out:
1637 delete_mid(midQ);
1638 if (rstart && rc == -EACCES)
1639 return -ERESTARTSYS;
1640 return rc;
1644 * Discard any remaining data in the current SMB. To do this, we borrow the
1645 * current bigbuf.
1648 cifs_discard_remaining_data(struct TCP_Server_Info *server)
1650 unsigned int rfclen = server->pdu_size;
1651 size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1652 server->total_read;
1654 while (remaining > 0) {
1655 ssize_t length;
1657 length = cifs_discard_from_socket(server,
1658 min_t(size_t, remaining,
1659 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1660 if (length < 0)
1661 return length;
1662 server->total_read += length;
1663 remaining -= length;
1666 return 0;
1669 static int
1670 __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1671 bool malformed)
1673 int length;
1675 length = cifs_discard_remaining_data(server);
1676 dequeue_mid(mid, malformed);
1677 mid->resp_buf = server->smallbuf;
1678 server->smallbuf = NULL;
1679 return length;
1682 static int
1683 cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1685 struct cifs_io_subrequest *rdata = mid->callback_data;
1687 return __cifs_readv_discard(server, mid, rdata->result);
1691 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1693 int length, len;
1694 unsigned int data_offset, data_len;
1695 struct cifs_io_subrequest *rdata = mid->callback_data;
1696 char *buf = server->smallbuf;
1697 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1698 bool use_rdma_mr = false;
1700 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%zu\n",
1701 __func__, mid->mid, rdata->subreq.start, rdata->subreq.len);
1704 * read the rest of READ_RSP header (sans Data array), or whatever we
1705 * can if there's not enough data. At this point, we've read down to
1706 * the Mid.
1708 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1709 HEADER_SIZE(server) + 1;
1711 length = cifs_read_from_socket(server,
1712 buf + HEADER_SIZE(server) - 1, len);
1713 if (length < 0)
1714 return length;
1715 server->total_read += length;
1717 if (server->ops->is_session_expired &&
1718 server->ops->is_session_expired(buf)) {
1719 cifs_reconnect(server, true);
1720 return -1;
1723 if (server->ops->is_status_pending &&
1724 server->ops->is_status_pending(buf, server)) {
1725 cifs_discard_remaining_data(server);
1726 return -1;
1729 /* set up first two iov for signature check and to get credits */
1730 rdata->iov[0].iov_base = buf;
1731 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1732 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1733 rdata->iov[1].iov_len =
1734 server->total_read - HEADER_PREAMBLE_SIZE(server);
1735 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1736 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1737 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1738 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1740 /* Was the SMB read successful? */
1741 rdata->result = server->ops->map_error(buf, false);
1742 if (rdata->result != 0) {
1743 cifs_dbg(FYI, "%s: server returned error %d\n",
1744 __func__, rdata->result);
1745 /* normal error on read response */
1746 return __cifs_readv_discard(server, mid, false);
1749 /* Is there enough to get to the rest of the READ_RSP header? */
1750 if (server->total_read < server->vals->read_rsp_size) {
1751 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1752 __func__, server->total_read,
1753 server->vals->read_rsp_size);
1754 rdata->result = -EIO;
1755 return cifs_readv_discard(server, mid);
1758 data_offset = server->ops->read_data_offset(buf) +
1759 HEADER_PREAMBLE_SIZE(server);
1760 if (data_offset < server->total_read) {
1762 * win2k8 sometimes sends an offset of 0 when the read
1763 * is beyond the EOF. Treat it as if the data starts just after
1764 * the header.
1766 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1767 __func__, data_offset);
1768 data_offset = server->total_read;
1769 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1770 /* data_offset is beyond the end of smallbuf */
1771 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1772 __func__, data_offset);
1773 rdata->result = -EIO;
1774 return cifs_readv_discard(server, mid);
1777 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1778 __func__, server->total_read, data_offset);
1780 len = data_offset - server->total_read;
1781 if (len > 0) {
1782 /* read any junk before data into the rest of smallbuf */
1783 length = cifs_read_from_socket(server,
1784 buf + server->total_read, len);
1785 if (length < 0)
1786 return length;
1787 server->total_read += length;
1790 /* how much data is in the response? */
1791 #ifdef CONFIG_CIFS_SMB_DIRECT
1792 use_rdma_mr = rdata->mr;
1793 #endif
1794 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1795 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1796 /* data_len is corrupt -- discard frame */
1797 rdata->result = -EIO;
1798 return cifs_readv_discard(server, mid);
1801 #ifdef CONFIG_CIFS_SMB_DIRECT
1802 if (rdata->mr)
1803 length = data_len; /* An RDMA read is already done. */
1804 else
1805 #endif
1806 length = cifs_read_iter_from_socket(server, &rdata->subreq.io_iter,
1807 data_len);
1808 if (length > 0)
1809 rdata->got_bytes += length;
1810 server->total_read += length;
1812 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1813 server->total_read, buflen, data_len);
1815 /* discard anything left over */
1816 if (server->total_read < buflen)
1817 return cifs_readv_discard(server, mid);
1819 dequeue_mid(mid, false);
1820 mid->resp_buf = server->smallbuf;
1821 server->smallbuf = NULL;
1822 return length;