1 // SPDX-License-Identifier: LGPL-2.1
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <linux/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include <linux/task_io_accounting_ops.h>
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smb2proto.h"
30 #include "smbdirect.h"
33 /* Max number of iovectors we can use off the stack when sending requests. */
34 #define CIFS_MAX_IOV_SIZE 8
37 cifs_wake_up_task(struct mid_q_entry
*mid
)
39 if (mid
->mid_state
== MID_RESPONSE_RECEIVED
)
40 mid
->mid_state
= MID_RESPONSE_READY
;
41 wake_up_process(mid
->callback_data
);
44 static struct mid_q_entry
*
45 alloc_mid(const struct smb_hdr
*smb_buffer
, struct TCP_Server_Info
*server
)
47 struct mid_q_entry
*temp
;
50 cifs_dbg(VFS
, "%s: null TCP session\n", __func__
);
54 temp
= mempool_alloc(cifs_mid_poolp
, GFP_NOFS
);
55 memset(temp
, 0, sizeof(struct mid_q_entry
));
56 kref_init(&temp
->refcount
);
57 temp
->mid
= get_mid(smb_buffer
);
58 temp
->pid
= current
->pid
;
59 temp
->command
= cpu_to_le16(smb_buffer
->Command
);
60 cifs_dbg(FYI
, "For smb_command %d\n", smb_buffer
->Command
);
61 /* easier to use jiffies */
62 /* when mid allocated can be before when sent */
63 temp
->when_alloc
= jiffies
;
64 temp
->server
= server
;
67 * The default is for the mid to be synchronous, so the
68 * default callback just wakes up the current task.
70 get_task_struct(current
);
71 temp
->creator
= current
;
72 temp
->callback
= cifs_wake_up_task
;
73 temp
->callback_data
= current
;
75 atomic_inc(&mid_count
);
76 temp
->mid_state
= MID_REQUEST_ALLOCATED
;
80 void __release_mid(struct kref
*refcount
)
82 struct mid_q_entry
*midEntry
=
83 container_of(refcount
, struct mid_q_entry
, refcount
);
84 #ifdef CONFIG_CIFS_STATS2
85 __le16 command
= midEntry
->server
->vals
->lock_cmd
;
86 __u16 smb_cmd
= le16_to_cpu(midEntry
->command
);
88 unsigned long roundtrip_time
;
90 struct TCP_Server_Info
*server
= midEntry
->server
;
92 if (midEntry
->resp_buf
&& (midEntry
->mid_flags
& MID_WAIT_CANCELLED
) &&
93 (midEntry
->mid_state
== MID_RESPONSE_RECEIVED
||
94 midEntry
->mid_state
== MID_RESPONSE_READY
) &&
95 server
->ops
->handle_cancelled_mid
)
96 server
->ops
->handle_cancelled_mid(midEntry
, server
);
98 midEntry
->mid_state
= MID_FREE
;
99 atomic_dec(&mid_count
);
100 if (midEntry
->large_buf
)
101 cifs_buf_release(midEntry
->resp_buf
);
103 cifs_small_buf_release(midEntry
->resp_buf
);
104 #ifdef CONFIG_CIFS_STATS2
106 if (now
< midEntry
->when_alloc
)
107 cifs_server_dbg(VFS
, "Invalid mid allocation time\n");
108 roundtrip_time
= now
- midEntry
->when_alloc
;
110 if (smb_cmd
< NUMBER_OF_SMB2_COMMANDS
) {
111 if (atomic_read(&server
->num_cmds
[smb_cmd
]) == 0) {
112 server
->slowest_cmd
[smb_cmd
] = roundtrip_time
;
113 server
->fastest_cmd
[smb_cmd
] = roundtrip_time
;
115 if (server
->slowest_cmd
[smb_cmd
] < roundtrip_time
)
116 server
->slowest_cmd
[smb_cmd
] = roundtrip_time
;
117 else if (server
->fastest_cmd
[smb_cmd
] > roundtrip_time
)
118 server
->fastest_cmd
[smb_cmd
] = roundtrip_time
;
120 cifs_stats_inc(&server
->num_cmds
[smb_cmd
]);
121 server
->time_per_cmd
[smb_cmd
] += roundtrip_time
;
124 * commands taking longer than one second (default) can be indications
125 * that something is wrong, unless it is quite a slow link or a very
126 * busy server. Note that this calc is unlikely or impossible to wrap
127 * as long as slow_rsp_threshold is not set way above recommended max
128 * value (32767 ie 9 hours) and is generally harmless even if wrong
129 * since only affects debug counters - so leaving the calc as simple
130 * comparison rather than doing multiple conversions and overflow
133 if ((slow_rsp_threshold
!= 0) &&
134 time_after(now
, midEntry
->when_alloc
+ (slow_rsp_threshold
* HZ
)) &&
135 (midEntry
->command
!= command
)) {
137 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
138 * NB: le16_to_cpu returns unsigned so can not be negative below
140 if (smb_cmd
< NUMBER_OF_SMB2_COMMANDS
)
141 cifs_stats_inc(&server
->smb2slowcmd
[smb_cmd
]);
143 trace_smb3_slow_rsp(smb_cmd
, midEntry
->mid
, midEntry
->pid
,
144 midEntry
->when_sent
, midEntry
->when_received
);
145 if (cifsFYI
& CIFS_TIMER
) {
146 pr_debug("slow rsp: cmd %d mid %llu",
147 midEntry
->command
, midEntry
->mid
);
148 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
149 now
- midEntry
->when_alloc
,
150 now
- midEntry
->when_sent
,
151 now
- midEntry
->when_received
);
155 put_task_struct(midEntry
->creator
);
157 mempool_free(midEntry
, cifs_mid_poolp
);
161 delete_mid(struct mid_q_entry
*mid
)
163 spin_lock(&mid
->server
->mid_lock
);
164 if (!(mid
->mid_flags
& MID_DELETED
)) {
165 list_del_init(&mid
->qhead
);
166 mid
->mid_flags
|= MID_DELETED
;
168 spin_unlock(&mid
->server
->mid_lock
);
174 * smb_send_kvec - send an array of kvecs to the server
175 * @server: Server to send the data to
176 * @smb_msg: Message to send
177 * @sent: amount of data sent on socket is stored here
179 * Our basic "send data to server" function. Should be called with srv_mutex
180 * held. The caller is responsible for handling the results.
183 smb_send_kvec(struct TCP_Server_Info
*server
, struct msghdr
*smb_msg
,
188 struct socket
*ssocket
= server
->ssocket
;
192 if (server
->noblocksnd
)
193 smb_msg
->msg_flags
= MSG_DONTWAIT
+ MSG_NOSIGNAL
;
195 smb_msg
->msg_flags
= MSG_NOSIGNAL
;
197 while (msg_data_left(smb_msg
)) {
199 * If blocking send, we try 3 times, since each can block
200 * for 5 seconds. For nonblocking we have to try more
201 * but wait increasing amounts of time allowing time for
202 * socket to clear. The overall time we wait in either
203 * case to send on the socket is about 15 seconds.
204 * Similarly we wait for 15 seconds for a response from
205 * the server in SendReceive[2] for the server to send
206 * a response back for most types of requests (except
207 * SMB Write past end of file which can be slow, and
208 * blocking lock operations). NFS waits slightly longer
209 * than CIFS, but this can make it take longer for
210 * nonresponsive servers to be detected and 15 seconds
211 * is more than enough time for modern networks to
212 * send a packet. In most cases if we fail to send
213 * after the retries we will kill the socket and
214 * reconnect which may clear the network problem.
216 rc
= sock_sendmsg(ssocket
, smb_msg
);
220 (!server
->noblocksnd
&& (retries
> 2))) {
221 cifs_server_dbg(VFS
, "sends on sock %p stuck for 15 seconds\n",
225 msleep(1 << retries
);
233 /* should never happen, letting socket clear before
234 retrying is our only obvious option here */
235 cifs_server_dbg(VFS
, "tcp sent no data\n");
240 /* send was at least partially successful */
242 retries
= 0; /* in case we get ENOSPC on the next send */
248 smb_rqst_len(struct TCP_Server_Info
*server
, struct smb_rqst
*rqst
)
253 unsigned long buflen
= 0;
255 if (!is_smb1(server
) && rqst
->rq_nvec
>= 2 &&
256 rqst
->rq_iov
[0].iov_len
== 4) {
257 iov
= &rqst
->rq_iov
[1];
258 nvec
= rqst
->rq_nvec
- 1;
261 nvec
= rqst
->rq_nvec
;
264 /* total up iov array first */
265 for (i
= 0; i
< nvec
; i
++)
266 buflen
+= iov
[i
].iov_len
;
268 buflen
+= iov_iter_count(&rqst
->rq_iter
);
273 __smb_send_rqst(struct TCP_Server_Info
*server
, int num_rqst
,
274 struct smb_rqst
*rqst
)
279 unsigned int send_length
= 0;
281 sigset_t mask
, oldmask
;
282 size_t total_len
= 0, sent
, size
;
283 struct socket
*ssocket
= server
->ssocket
;
284 struct msghdr smb_msg
= {};
285 __be32 rfc1002_marker
;
287 cifs_in_send_inc(server
);
288 if (cifs_rdma_enabled(server
)) {
289 /* return -EAGAIN when connecting or reconnecting */
291 if (server
->smbd_conn
)
292 rc
= smbd_send(server
, num_rqst
, rqst
);
301 if (fatal_signal_pending(current
)) {
302 cifs_dbg(FYI
, "signal pending before send request\n");
307 /* cork the socket */
308 tcp_sock_set_cork(ssocket
->sk
, true);
310 for (j
= 0; j
< num_rqst
; j
++)
311 send_length
+= smb_rqst_len(server
, &rqst
[j
]);
312 rfc1002_marker
= cpu_to_be32(send_length
);
315 * We should not allow signals to interrupt the network send because
316 * any partial send will cause session reconnects thus increasing
317 * latency of system calls and overload a server with unnecessary
322 sigprocmask(SIG_BLOCK
, &mask
, &oldmask
);
324 /* Generate a rfc1002 marker for SMB2+ */
325 if (!is_smb1(server
)) {
327 .iov_base
= &rfc1002_marker
,
330 iov_iter_kvec(&smb_msg
.msg_iter
, ITER_SOURCE
, &hiov
, 1, 4);
331 rc
= smb_send_kvec(server
, &smb_msg
, &sent
);
339 cifs_dbg(FYI
, "Sending smb: smb_len=%u\n", send_length
);
341 for (j
= 0; j
< num_rqst
; j
++) {
342 iov
= rqst
[j
].rq_iov
;
343 n_vec
= rqst
[j
].rq_nvec
;
346 for (i
= 0; i
< n_vec
; i
++) {
347 dump_smb(iov
[i
].iov_base
, iov
[i
].iov_len
);
348 size
+= iov
[i
].iov_len
;
351 iov_iter_kvec(&smb_msg
.msg_iter
, ITER_SOURCE
, iov
, n_vec
, size
);
353 rc
= smb_send_kvec(server
, &smb_msg
, &sent
);
359 if (iov_iter_count(&rqst
[j
].rq_iter
) > 0) {
360 smb_msg
.msg_iter
= rqst
[j
].rq_iter
;
361 rc
= smb_send_kvec(server
, &smb_msg
, &sent
);
370 sigprocmask(SIG_SETMASK
, &oldmask
, NULL
);
373 * If signal is pending but we have already sent the whole packet to
374 * the server we need to return success status to allow a corresponding
375 * mid entry to be kept in the pending requests queue thus allowing
376 * to handle responses from the server by the client.
378 * If only part of the packet has been sent there is no need to hide
379 * interrupt because the session will be reconnected anyway, so there
380 * won't be any response from the server to handle.
383 if (signal_pending(current
) && (total_len
!= send_length
)) {
384 cifs_dbg(FYI
, "signal is pending after attempt to send\n");
389 tcp_sock_set_cork(ssocket
->sk
, false);
391 if ((total_len
> 0) && (total_len
!= send_length
)) {
392 cifs_dbg(FYI
, "partial send (wanted=%u sent=%zu): terminating session\n",
393 send_length
, total_len
);
395 * If we have only sent part of an SMB then the next SMB could
396 * be taken as the remainder of this one. We need to kill the
397 * socket so the server throws away the partial SMB
399 cifs_signal_cifsd_for_reconnect(server
, false);
400 trace_smb3_partial_send_reconnect(server
->CurrentMid
,
401 server
->conn_id
, server
->hostname
);
405 * there's hardly any use for the layers above to know the
406 * actual error code here. All they should do at this point is
407 * to retry the connection and hope it goes away.
409 if (rc
< 0 && rc
!= -EINTR
&& rc
!= -EAGAIN
) {
410 cifs_server_dbg(VFS
, "Error %d sending data on socket to server\n",
413 cifs_signal_cifsd_for_reconnect(server
, false);
417 cifs_in_send_dec(server
);
421 struct send_req_vars
{
422 struct smb2_transform_hdr tr_hdr
;
423 struct smb_rqst rqst
[MAX_COMPOUND
];
428 smb_send_rqst(struct TCP_Server_Info
*server
, int num_rqst
,
429 struct smb_rqst
*rqst
, int flags
)
431 struct send_req_vars
*vars
;
432 struct smb_rqst
*cur_rqst
;
436 if (flags
& CIFS_COMPRESS_REQ
)
437 return smb_compress(server
, &rqst
[0], __smb_send_rqst
);
439 if (!(flags
& CIFS_TRANSFORM_REQ
))
440 return __smb_send_rqst(server
, num_rqst
, rqst
);
442 if (WARN_ON_ONCE(num_rqst
> MAX_COMPOUND
- 1))
445 if (!server
->ops
->init_transform_rq
) {
446 cifs_server_dbg(VFS
, "Encryption requested but transform callback is missing\n");
450 vars
= kzalloc(sizeof(*vars
), GFP_NOFS
);
453 cur_rqst
= vars
->rqst
;
456 iov
->iov_base
= &vars
->tr_hdr
;
457 iov
->iov_len
= sizeof(vars
->tr_hdr
);
458 cur_rqst
[0].rq_iov
= iov
;
459 cur_rqst
[0].rq_nvec
= 1;
461 rc
= server
->ops
->init_transform_rq(server
, num_rqst
+ 1,
466 rc
= __smb_send_rqst(server
, num_rqst
+ 1, &cur_rqst
[0]);
467 smb3_free_compound_rqst(num_rqst
, &cur_rqst
[1]);
474 smb_send(struct TCP_Server_Info
*server
, struct smb_hdr
*smb_buffer
,
475 unsigned int smb_buf_length
)
478 struct smb_rqst rqst
= { .rq_iov
= iov
,
481 iov
[0].iov_base
= smb_buffer
;
483 iov
[1].iov_base
= (char *)smb_buffer
+ 4;
484 iov
[1].iov_len
= smb_buf_length
;
486 return __smb_send_rqst(server
, 1, &rqst
);
490 wait_for_free_credits(struct TCP_Server_Info
*server
, const int num_credits
,
491 const int timeout
, const int flags
,
492 unsigned int *instance
)
498 int scredits
, in_flight
;
501 t
= MAX_JIFFY_OFFSET
;
503 t
= msecs_to_jiffies(timeout
);
505 optype
= flags
& CIFS_OP_MASK
;
509 credits
= server
->ops
->get_credits_field(server
, optype
);
510 /* Since an echo is already inflight, no need to wait to send another */
511 if (*credits
<= 0 && optype
== CIFS_ECHO_OP
)
514 spin_lock(&server
->req_lock
);
515 if ((flags
& CIFS_TIMEOUT_MASK
) == CIFS_NON_BLOCKING
) {
516 /* oplock breaks must not be held up */
518 if (server
->in_flight
> server
->max_in_flight
)
519 server
->max_in_flight
= server
->in_flight
;
521 *instance
= server
->reconnect_instance
;
523 in_flight
= server
->in_flight
;
524 spin_unlock(&server
->req_lock
);
526 trace_smb3_nblk_credits(server
->CurrentMid
,
527 server
->conn_id
, server
->hostname
, scredits
, -1, in_flight
);
528 cifs_dbg(FYI
, "%s: remove %u credits total=%d\n",
529 __func__
, 1, scredits
);
535 spin_unlock(&server
->req_lock
);
537 spin_lock(&server
->srv_lock
);
538 if (server
->tcpStatus
== CifsExiting
) {
539 spin_unlock(&server
->srv_lock
);
542 spin_unlock(&server
->srv_lock
);
544 spin_lock(&server
->req_lock
);
545 if (*credits
< num_credits
) {
547 spin_unlock(&server
->req_lock
);
549 cifs_num_waiters_inc(server
);
550 rc
= wait_event_killable_timeout(server
->request_q
,
551 has_credits(server
, credits
, num_credits
), t
);
552 cifs_num_waiters_dec(server
);
554 spin_lock(&server
->req_lock
);
556 in_flight
= server
->in_flight
;
557 spin_unlock(&server
->req_lock
);
559 trace_smb3_credit_timeout(server
->CurrentMid
,
560 server
->conn_id
, server
->hostname
, scredits
,
561 num_credits
, in_flight
);
562 cifs_server_dbg(VFS
, "wait timed out after %d ms\n",
566 if (rc
== -ERESTARTSYS
)
568 spin_lock(&server
->req_lock
);
571 * For normal commands, reserve the last MAX_COMPOUND
572 * credits to compound requests.
573 * Otherwise these compounds could be permanently
574 * starved for credits by single-credit requests.
576 * To prevent spinning CPU, block this thread until
577 * there are >MAX_COMPOUND credits available.
578 * But only do this is we already have a lot of
579 * credits in flight to avoid triggering this check
580 * for servers that are slow to hand out credits on
583 if (!optype
&& num_credits
== 1 &&
584 server
->in_flight
> 2 * MAX_COMPOUND
&&
585 *credits
<= MAX_COMPOUND
) {
586 spin_unlock(&server
->req_lock
);
588 cifs_num_waiters_inc(server
);
589 rc
= wait_event_killable_timeout(
591 has_credits(server
, credits
,
594 cifs_num_waiters_dec(server
);
596 spin_lock(&server
->req_lock
);
598 in_flight
= server
->in_flight
;
599 spin_unlock(&server
->req_lock
);
601 trace_smb3_credit_timeout(
603 server
->conn_id
, server
->hostname
,
604 scredits
, num_credits
, in_flight
);
605 cifs_server_dbg(VFS
, "wait timed out after %d ms\n",
609 if (rc
== -ERESTARTSYS
)
611 spin_lock(&server
->req_lock
);
616 * Can not count locking commands against total
617 * as they are allowed to block on server.
620 /* update # of requests on the wire to server */
621 if ((flags
& CIFS_TIMEOUT_MASK
) != CIFS_BLOCKING_OP
) {
622 *credits
-= num_credits
;
623 server
->in_flight
+= num_credits
;
624 if (server
->in_flight
> server
->max_in_flight
)
625 server
->max_in_flight
= server
->in_flight
;
626 *instance
= server
->reconnect_instance
;
629 in_flight
= server
->in_flight
;
630 spin_unlock(&server
->req_lock
);
632 trace_smb3_waitff_credits(server
->CurrentMid
,
633 server
->conn_id
, server
->hostname
, scredits
,
634 -(num_credits
), in_flight
);
635 cifs_dbg(FYI
, "%s: remove %u credits total=%d\n",
636 __func__
, num_credits
, scredits
);
644 wait_for_free_request(struct TCP_Server_Info
*server
, const int flags
,
645 unsigned int *instance
)
647 return wait_for_free_credits(server
, 1, -1, flags
,
652 wait_for_compound_request(struct TCP_Server_Info
*server
, int num
,
653 const int flags
, unsigned int *instance
)
656 int scredits
, in_flight
;
658 credits
= server
->ops
->get_credits_field(server
, flags
& CIFS_OP_MASK
);
660 spin_lock(&server
->req_lock
);
662 in_flight
= server
->in_flight
;
664 if (*credits
< num
) {
666 * If the server is tight on resources or just gives us less
667 * credits for other reasons (e.g. requests are coming out of
668 * order and the server delays granting more credits until it
669 * processes a missing mid) and we exhausted most available
670 * credits there may be situations when we try to send
671 * a compound request but we don't have enough credits. At this
672 * point the client needs to decide if it should wait for
673 * additional credits or fail the request. If at least one
674 * request is in flight there is a high probability that the
675 * server will return enough credits to satisfy this compound
678 * Return immediately if no requests in flight since we will be
679 * stuck on waiting for credits.
681 if (server
->in_flight
== 0) {
682 spin_unlock(&server
->req_lock
);
683 trace_smb3_insufficient_credits(server
->CurrentMid
,
684 server
->conn_id
, server
->hostname
, scredits
,
686 cifs_dbg(FYI
, "%s: %d requests in flight, needed %d total=%d\n",
687 __func__
, in_flight
, num
, scredits
);
691 spin_unlock(&server
->req_lock
);
693 return wait_for_free_credits(server
, num
, 60000, flags
,
698 cifs_wait_mtu_credits(struct TCP_Server_Info
*server
, size_t size
,
699 size_t *num
, struct cifs_credits
*credits
)
703 credits
->instance
= server
->reconnect_instance
;
707 static int allocate_mid(struct cifs_ses
*ses
, struct smb_hdr
*in_buf
,
708 struct mid_q_entry
**ppmidQ
)
710 spin_lock(&ses
->ses_lock
);
711 if (ses
->ses_status
== SES_NEW
) {
712 if ((in_buf
->Command
!= SMB_COM_SESSION_SETUP_ANDX
) &&
713 (in_buf
->Command
!= SMB_COM_NEGOTIATE
)) {
714 spin_unlock(&ses
->ses_lock
);
717 /* else ok - we are setting up session */
720 if (ses
->ses_status
== SES_EXITING
) {
721 /* check if SMB session is bad because we are setting it up */
722 if (in_buf
->Command
!= SMB_COM_LOGOFF_ANDX
) {
723 spin_unlock(&ses
->ses_lock
);
726 /* else ok - we are shutting down session */
728 spin_unlock(&ses
->ses_lock
);
730 *ppmidQ
= alloc_mid(in_buf
, ses
->server
);
733 spin_lock(&ses
->server
->mid_lock
);
734 list_add_tail(&(*ppmidQ
)->qhead
, &ses
->server
->pending_mid_q
);
735 spin_unlock(&ses
->server
->mid_lock
);
740 wait_for_response(struct TCP_Server_Info
*server
, struct mid_q_entry
*midQ
)
744 error
= wait_event_state(server
->response_q
,
745 midQ
->mid_state
!= MID_REQUEST_SUBMITTED
&&
746 midQ
->mid_state
!= MID_RESPONSE_RECEIVED
,
747 (TASK_KILLABLE
|TASK_FREEZABLE_UNSAFE
));
755 cifs_setup_async_request(struct TCP_Server_Info
*server
, struct smb_rqst
*rqst
)
758 struct smb_hdr
*hdr
= (struct smb_hdr
*)rqst
->rq_iov
[0].iov_base
;
759 struct mid_q_entry
*mid
;
761 if (rqst
->rq_iov
[0].iov_len
!= 4 ||
762 rqst
->rq_iov
[0].iov_base
+ 4 != rqst
->rq_iov
[1].iov_base
)
763 return ERR_PTR(-EIO
);
765 /* enable signing if server requires it */
767 hdr
->Flags2
|= SMBFLG2_SECURITY_SIGNATURE
;
769 mid
= alloc_mid(hdr
, server
);
771 return ERR_PTR(-ENOMEM
);
773 rc
= cifs_sign_rqst(rqst
, server
, &mid
->sequence_number
);
783 * Send a SMB request and set the callback function in the mid to handle
784 * the result. Caller is responsible for dealing with timeouts.
787 cifs_call_async(struct TCP_Server_Info
*server
, struct smb_rqst
*rqst
,
788 mid_receive_t
*receive
, mid_callback_t
*callback
,
789 mid_handle_t
*handle
, void *cbdata
, const int flags
,
790 const struct cifs_credits
*exist_credits
)
793 struct mid_q_entry
*mid
;
794 struct cifs_credits credits
= { .value
= 0, .instance
= 0 };
795 unsigned int instance
;
798 optype
= flags
& CIFS_OP_MASK
;
800 if ((flags
& CIFS_HAS_CREDITS
) == 0) {
801 rc
= wait_for_free_request(server
, flags
, &instance
);
805 credits
.instance
= instance
;
807 instance
= exist_credits
->instance
;
809 cifs_server_lock(server
);
812 * We can't use credits obtained from the previous session to send this
813 * request. Check if there were reconnects after we obtained credits and
814 * return -EAGAIN in such cases to let callers handle it.
816 if (instance
!= server
->reconnect_instance
) {
817 cifs_server_unlock(server
);
818 add_credits_and_wake_if(server
, &credits
, optype
);
822 mid
= server
->ops
->setup_async_request(server
, rqst
);
824 cifs_server_unlock(server
);
825 add_credits_and_wake_if(server
, &credits
, optype
);
829 mid
->receive
= receive
;
830 mid
->callback
= callback
;
831 mid
->callback_data
= cbdata
;
832 mid
->handle
= handle
;
833 mid
->mid_state
= MID_REQUEST_SUBMITTED
;
835 /* put it on the pending_mid_q */
836 spin_lock(&server
->mid_lock
);
837 list_add_tail(&mid
->qhead
, &server
->pending_mid_q
);
838 spin_unlock(&server
->mid_lock
);
841 * Need to store the time in mid before calling I/O. For call_async,
842 * I/O response may come back and free the mid entry on another thread.
844 cifs_save_when_sent(mid
);
845 rc
= smb_send_rqst(server
, 1, rqst
, flags
);
848 revert_current_mid(server
, mid
->credits
);
849 server
->sequence_number
-= 2;
853 cifs_server_unlock(server
);
858 add_credits_and_wake_if(server
, &credits
, optype
);
864 * Send an SMB Request. No response info (other than return code)
865 * needs to be parsed.
867 * flags indicate the type of request buffer and how long to wait
868 * and whether to log NT STATUS code (error) before mapping it to POSIX error
872 SendReceiveNoRsp(const unsigned int xid
, struct cifs_ses
*ses
,
873 char *in_buf
, int flags
)
880 iov
[0].iov_base
= in_buf
;
881 iov
[0].iov_len
= get_rfc1002_length(in_buf
) + 4;
882 flags
|= CIFS_NO_RSP_BUF
;
883 rc
= SendReceive2(xid
, ses
, iov
, 1, &resp_buf_type
, flags
, &rsp_iov
);
884 cifs_dbg(NOISY
, "SendRcvNoRsp flags %d rc %d\n", flags
, rc
);
890 cifs_sync_mid_result(struct mid_q_entry
*mid
, struct TCP_Server_Info
*server
)
894 cifs_dbg(FYI
, "%s: cmd=%d mid=%llu state=%d\n",
895 __func__
, le16_to_cpu(mid
->command
), mid
->mid
, mid
->mid_state
);
897 spin_lock(&server
->mid_lock
);
898 switch (mid
->mid_state
) {
899 case MID_RESPONSE_READY
:
900 spin_unlock(&server
->mid_lock
);
902 case MID_RETRY_NEEDED
:
905 case MID_RESPONSE_MALFORMED
:
912 if (!(mid
->mid_flags
& MID_DELETED
)) {
913 list_del_init(&mid
->qhead
);
914 mid
->mid_flags
|= MID_DELETED
;
916 spin_unlock(&server
->mid_lock
);
917 cifs_server_dbg(VFS
, "%s: invalid mid state mid=%llu state=%d\n",
918 __func__
, mid
->mid
, mid
->mid_state
);
922 spin_unlock(&server
->mid_lock
);
930 send_cancel(struct TCP_Server_Info
*server
, struct smb_rqst
*rqst
,
931 struct mid_q_entry
*mid
)
933 return server
->ops
->send_cancel
?
934 server
->ops
->send_cancel(server
, rqst
, mid
) : 0;
938 cifs_check_receive(struct mid_q_entry
*mid
, struct TCP_Server_Info
*server
,
941 unsigned int len
= get_rfc1002_length(mid
->resp_buf
) + 4;
943 dump_smb(mid
->resp_buf
, min_t(u32
, 92, len
));
945 /* convert the length into a more usable form */
949 struct smb_rqst rqst
= { .rq_iov
= iov
,
952 iov
[0].iov_base
= mid
->resp_buf
;
954 iov
[1].iov_base
= (char *)mid
->resp_buf
+ 4;
955 iov
[1].iov_len
= len
- 4;
956 /* FIXME: add code to kill session */
957 rc
= cifs_verify_signature(&rqst
, server
,
958 mid
->sequence_number
);
960 cifs_server_dbg(VFS
, "SMB signature verification returned error = %d\n",
964 /* BB special case reconnect tid and uid here? */
965 return map_and_check_smb_error(mid
, log_error
);
969 cifs_setup_request(struct cifs_ses
*ses
, struct TCP_Server_Info
*ignored
,
970 struct smb_rqst
*rqst
)
973 struct smb_hdr
*hdr
= (struct smb_hdr
*)rqst
->rq_iov
[0].iov_base
;
974 struct mid_q_entry
*mid
;
976 if (rqst
->rq_iov
[0].iov_len
!= 4 ||
977 rqst
->rq_iov
[0].iov_base
+ 4 != rqst
->rq_iov
[1].iov_base
)
978 return ERR_PTR(-EIO
);
980 rc
= allocate_mid(ses
, hdr
, &mid
);
983 rc
= cifs_sign_rqst(rqst
, ses
->server
, &mid
->sequence_number
);
992 cifs_compound_callback(struct mid_q_entry
*mid
)
994 struct TCP_Server_Info
*server
= mid
->server
;
995 struct cifs_credits credits
= {
996 .value
= server
->ops
->get_credits(mid
),
997 .instance
= server
->reconnect_instance
,
1000 add_credits(server
, &credits
, mid
->optype
);
1002 if (mid
->mid_state
== MID_RESPONSE_RECEIVED
)
1003 mid
->mid_state
= MID_RESPONSE_READY
;
1007 cifs_compound_last_callback(struct mid_q_entry
*mid
)
1009 cifs_compound_callback(mid
);
1010 cifs_wake_up_task(mid
);
1014 cifs_cancelled_callback(struct mid_q_entry
*mid
)
1016 cifs_compound_callback(mid
);
1021 * Return a channel (master if none) of @ses that can be used to send
1024 * If we are currently binding a new channel (negprot/sess.setup),
1025 * return the new incomplete channel.
1027 struct TCP_Server_Info
*cifs_pick_channel(struct cifs_ses
*ses
)
1030 unsigned int min_in_flight
= UINT_MAX
, max_in_flight
= 0;
1031 struct TCP_Server_Info
*server
= NULL
;
1037 spin_lock(&ses
->chan_lock
);
1038 for (i
= 0; i
< ses
->chan_count
; i
++) {
1039 server
= ses
->chans
[i
].server
;
1040 if (!server
|| server
->terminate
)
1043 if (CIFS_CHAN_NEEDS_RECONNECT(ses
, i
))
1047 * strictly speaking, we should pick up req_lock to read
1048 * server->in_flight. But it shouldn't matter much here if we
1049 * race while reading this data. The worst that can happen is
1050 * that we could use a channel that's not least loaded. Avoiding
1051 * taking the lock could help reduce wait time, which is
1052 * important for this function
1054 if (server
->in_flight
< min_in_flight
) {
1055 min_in_flight
= server
->in_flight
;
1058 if (server
->in_flight
> max_in_flight
)
1059 max_in_flight
= server
->in_flight
;
1062 /* if all channels are equally loaded, fall back to round-robin */
1063 if (min_in_flight
== max_in_flight
) {
1064 index
= (uint
)atomic_inc_return(&ses
->chan_seq
);
1065 index
%= ses
->chan_count
;
1068 server
= ses
->chans
[index
].server
;
1069 spin_unlock(&ses
->chan_lock
);
1075 compound_send_recv(const unsigned int xid
, struct cifs_ses
*ses
,
1076 struct TCP_Server_Info
*server
,
1077 const int flags
, const int num_rqst
, struct smb_rqst
*rqst
,
1078 int *resp_buf_type
, struct kvec
*resp_iov
)
1080 int i
, j
, optype
, rc
= 0;
1081 struct mid_q_entry
*midQ
[MAX_COMPOUND
];
1082 bool cancelled_mid
[MAX_COMPOUND
] = {false};
1083 struct cifs_credits credits
[MAX_COMPOUND
] = {
1084 { .value
= 0, .instance
= 0 }
1086 unsigned int instance
;
1089 optype
= flags
& CIFS_OP_MASK
;
1091 for (i
= 0; i
< num_rqst
; i
++)
1092 resp_buf_type
[i
] = CIFS_NO_BUFFER
; /* no response buf yet */
1094 if (!ses
|| !ses
->server
|| !server
) {
1095 cifs_dbg(VFS
, "Null session\n");
1099 spin_lock(&server
->srv_lock
);
1100 if (server
->tcpStatus
== CifsExiting
) {
1101 spin_unlock(&server
->srv_lock
);
1104 spin_unlock(&server
->srv_lock
);
1107 * Wait for all the requests to become available.
1108 * This approach still leaves the possibility to be stuck waiting for
1109 * credits if the server doesn't grant credits to the outstanding
1110 * requests and if the client is completely idle, not generating any
1112 * This can be handled by the eventual session reconnect.
1114 rc
= wait_for_compound_request(server
, num_rqst
, flags
,
1119 for (i
= 0; i
< num_rqst
; i
++) {
1120 credits
[i
].value
= 1;
1121 credits
[i
].instance
= instance
;
1125 * Make sure that we sign in the same order that we send on this socket
1126 * and avoid races inside tcp sendmsg code that could cause corruption
1130 cifs_server_lock(server
);
1133 * All the parts of the compound chain belong obtained credits from the
1134 * same session. We can not use credits obtained from the previous
1135 * session to send this request. Check if there were reconnects after
1136 * we obtained credits and return -EAGAIN in such cases to let callers
1139 if (instance
!= server
->reconnect_instance
) {
1140 cifs_server_unlock(server
);
1141 for (j
= 0; j
< num_rqst
; j
++)
1142 add_credits(server
, &credits
[j
], optype
);
1146 for (i
= 0; i
< num_rqst
; i
++) {
1147 midQ
[i
] = server
->ops
->setup_request(ses
, server
, &rqst
[i
]);
1148 if (IS_ERR(midQ
[i
])) {
1149 revert_current_mid(server
, i
);
1150 for (j
= 0; j
< i
; j
++)
1151 delete_mid(midQ
[j
]);
1152 cifs_server_unlock(server
);
1154 /* Update # of requests on wire to server */
1155 for (j
= 0; j
< num_rqst
; j
++)
1156 add_credits(server
, &credits
[j
], optype
);
1157 return PTR_ERR(midQ
[i
]);
1160 midQ
[i
]->mid_state
= MID_REQUEST_SUBMITTED
;
1161 midQ
[i
]->optype
= optype
;
1163 * Invoke callback for every part of the compound chain
1164 * to calculate credits properly. Wake up this thread only when
1165 * the last element is received.
1167 if (i
< num_rqst
- 1)
1168 midQ
[i
]->callback
= cifs_compound_callback
;
1170 midQ
[i
]->callback
= cifs_compound_last_callback
;
1172 rc
= smb_send_rqst(server
, num_rqst
, rqst
, flags
);
1174 for (i
= 0; i
< num_rqst
; i
++)
1175 cifs_save_when_sent(midQ
[i
]);
1178 revert_current_mid(server
, num_rqst
);
1179 server
->sequence_number
-= 2;
1182 cifs_server_unlock(server
);
1185 * If sending failed for some reason or it is an oplock break that we
1186 * will not receive a response to - return credits back
1188 if (rc
< 0 || (flags
& CIFS_NO_SRV_RSP
)) {
1189 for (i
= 0; i
< num_rqst
; i
++)
1190 add_credits(server
, &credits
[i
], optype
);
1195 * At this point the request is passed to the network stack - we assume
1196 * that any credits taken from the server structure on the client have
1197 * been spent and we can't return them back. Once we receive responses
1198 * we will collect credits granted by the server in the mid callbacks
1199 * and add those credits to the server structure.
1203 * Compounding is never used during session establish.
1205 spin_lock(&ses
->ses_lock
);
1206 if ((ses
->ses_status
== SES_NEW
) || (optype
& CIFS_NEG_OP
) || (optype
& CIFS_SESS_OP
)) {
1207 spin_unlock(&ses
->ses_lock
);
1209 cifs_server_lock(server
);
1210 smb311_update_preauth_hash(ses
, server
, rqst
[0].rq_iov
, rqst
[0].rq_nvec
);
1211 cifs_server_unlock(server
);
1213 spin_lock(&ses
->ses_lock
);
1215 spin_unlock(&ses
->ses_lock
);
1217 for (i
= 0; i
< num_rqst
; i
++) {
1218 rc
= wait_for_response(server
, midQ
[i
]);
1223 for (; i
< num_rqst
; i
++) {
1224 cifs_server_dbg(FYI
, "Cancelling wait for mid %llu cmd: %d\n",
1225 midQ
[i
]->mid
, le16_to_cpu(midQ
[i
]->command
));
1226 send_cancel(server
, &rqst
[i
], midQ
[i
]);
1227 spin_lock(&server
->mid_lock
);
1228 midQ
[i
]->mid_flags
|= MID_WAIT_CANCELLED
;
1229 if (midQ
[i
]->mid_state
== MID_REQUEST_SUBMITTED
||
1230 midQ
[i
]->mid_state
== MID_RESPONSE_RECEIVED
) {
1231 midQ
[i
]->callback
= cifs_cancelled_callback
;
1232 cancelled_mid
[i
] = true;
1233 credits
[i
].value
= 0;
1235 spin_unlock(&server
->mid_lock
);
1239 for (i
= 0; i
< num_rqst
; i
++) {
1243 rc
= cifs_sync_mid_result(midQ
[i
], server
);
1245 /* mark this mid as cancelled to not free it below */
1246 cancelled_mid
[i
] = true;
1250 if (!midQ
[i
]->resp_buf
||
1251 midQ
[i
]->mid_state
!= MID_RESPONSE_READY
) {
1253 cifs_dbg(FYI
, "Bad MID state?\n");
1257 buf
= (char *)midQ
[i
]->resp_buf
;
1258 resp_iov
[i
].iov_base
= buf
;
1259 resp_iov
[i
].iov_len
= midQ
[i
]->resp_buf_size
+
1260 HEADER_PREAMBLE_SIZE(server
);
1262 if (midQ
[i
]->large_buf
)
1263 resp_buf_type
[i
] = CIFS_LARGE_BUFFER
;
1265 resp_buf_type
[i
] = CIFS_SMALL_BUFFER
;
1267 rc
= server
->ops
->check_receive(midQ
[i
], server
,
1268 flags
& CIFS_LOG_ERROR
);
1270 /* mark it so buf will not be freed by delete_mid */
1271 if ((flags
& CIFS_NO_RSP_BUF
) == 0)
1272 midQ
[i
]->resp_buf
= NULL
;
1277 * Compounding is never used during session establish.
1279 spin_lock(&ses
->ses_lock
);
1280 if ((ses
->ses_status
== SES_NEW
) || (optype
& CIFS_NEG_OP
) || (optype
& CIFS_SESS_OP
)) {
1282 .iov_base
= resp_iov
[0].iov_base
,
1283 .iov_len
= resp_iov
[0].iov_len
1285 spin_unlock(&ses
->ses_lock
);
1286 cifs_server_lock(server
);
1287 smb311_update_preauth_hash(ses
, server
, &iov
, 1);
1288 cifs_server_unlock(server
);
1289 spin_lock(&ses
->ses_lock
);
1291 spin_unlock(&ses
->ses_lock
);
1295 * This will dequeue all mids. After this it is important that the
1296 * demultiplex_thread will not process any of these mids any further.
1297 * This is prevented above by using a noop callback that will not
1298 * wake this thread except for the very last PDU.
1300 for (i
= 0; i
< num_rqst
; i
++) {
1301 if (!cancelled_mid
[i
])
1302 delete_mid(midQ
[i
]);
1309 cifs_send_recv(const unsigned int xid
, struct cifs_ses
*ses
,
1310 struct TCP_Server_Info
*server
,
1311 struct smb_rqst
*rqst
, int *resp_buf_type
, const int flags
,
1312 struct kvec
*resp_iov
)
1314 return compound_send_recv(xid
, ses
, server
, flags
, 1,
1315 rqst
, resp_buf_type
, resp_iov
);
1319 SendReceive2(const unsigned int xid
, struct cifs_ses
*ses
,
1320 struct kvec
*iov
, int n_vec
, int *resp_buf_type
/* ret */,
1321 const int flags
, struct kvec
*resp_iov
)
1323 struct smb_rqst rqst
;
1324 struct kvec s_iov
[CIFS_MAX_IOV_SIZE
], *new_iov
;
1327 if (n_vec
+ 1 > CIFS_MAX_IOV_SIZE
) {
1328 new_iov
= kmalloc_array(n_vec
+ 1, sizeof(struct kvec
),
1331 /* otherwise cifs_send_recv below sets resp_buf_type */
1332 *resp_buf_type
= CIFS_NO_BUFFER
;
1338 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1339 memcpy(new_iov
+ 1, iov
, (sizeof(struct kvec
) * n_vec
));
1341 new_iov
[0].iov_base
= new_iov
[1].iov_base
;
1342 new_iov
[0].iov_len
= 4;
1343 new_iov
[1].iov_base
+= 4;
1344 new_iov
[1].iov_len
-= 4;
1346 memset(&rqst
, 0, sizeof(struct smb_rqst
));
1347 rqst
.rq_iov
= new_iov
;
1348 rqst
.rq_nvec
= n_vec
+ 1;
1350 rc
= cifs_send_recv(xid
, ses
, ses
->server
,
1351 &rqst
, resp_buf_type
, flags
, resp_iov
);
1352 if (n_vec
+ 1 > CIFS_MAX_IOV_SIZE
)
1358 SendReceive(const unsigned int xid
, struct cifs_ses
*ses
,
1359 struct smb_hdr
*in_buf
, struct smb_hdr
*out_buf
,
1360 int *pbytes_returned
, const int flags
)
1363 struct mid_q_entry
*midQ
;
1364 unsigned int len
= be32_to_cpu(in_buf
->smb_buf_length
);
1365 struct kvec iov
= { .iov_base
= in_buf
, .iov_len
= len
};
1366 struct smb_rqst rqst
= { .rq_iov
= &iov
, .rq_nvec
= 1 };
1367 struct cifs_credits credits
= { .value
= 1, .instance
= 0 };
1368 struct TCP_Server_Info
*server
;
1371 cifs_dbg(VFS
, "Null smb session\n");
1374 server
= ses
->server
;
1375 if (server
== NULL
) {
1376 cifs_dbg(VFS
, "Null tcp session\n");
1380 spin_lock(&server
->srv_lock
);
1381 if (server
->tcpStatus
== CifsExiting
) {
1382 spin_unlock(&server
->srv_lock
);
1385 spin_unlock(&server
->srv_lock
);
1387 /* Ensure that we do not send more than 50 overlapping requests
1388 to the same server. We may make this configurable later or
1391 if (len
> CIFSMaxBufSize
+ MAX_CIFS_HDR_SIZE
- 4) {
1392 cifs_server_dbg(VFS
, "Invalid length, greater than maximum frame, %d\n",
1397 rc
= wait_for_free_request(server
, flags
, &credits
.instance
);
1401 /* make sure that we sign in the same order that we send on this socket
1402 and avoid races inside tcp sendmsg code that could cause corruption
1405 cifs_server_lock(server
);
1407 rc
= allocate_mid(ses
, in_buf
, &midQ
);
1409 cifs_server_unlock(server
);
1410 /* Update # of requests on wire to server */
1411 add_credits(server
, &credits
, 0);
1415 rc
= cifs_sign_smb(in_buf
, server
, &midQ
->sequence_number
);
1417 cifs_server_unlock(server
);
1421 midQ
->mid_state
= MID_REQUEST_SUBMITTED
;
1423 rc
= smb_send(server
, in_buf
, len
);
1424 cifs_save_when_sent(midQ
);
1427 server
->sequence_number
-= 2;
1429 cifs_server_unlock(server
);
1434 rc
= wait_for_response(server
, midQ
);
1436 send_cancel(server
, &rqst
, midQ
);
1437 spin_lock(&server
->mid_lock
);
1438 if (midQ
->mid_state
== MID_REQUEST_SUBMITTED
||
1439 midQ
->mid_state
== MID_RESPONSE_RECEIVED
) {
1440 /* no longer considered to be "in-flight" */
1441 midQ
->callback
= release_mid
;
1442 spin_unlock(&server
->mid_lock
);
1443 add_credits(server
, &credits
, 0);
1446 spin_unlock(&server
->mid_lock
);
1449 rc
= cifs_sync_mid_result(midQ
, server
);
1451 add_credits(server
, &credits
, 0);
1455 if (!midQ
->resp_buf
|| !out_buf
||
1456 midQ
->mid_state
!= MID_RESPONSE_READY
) {
1458 cifs_server_dbg(VFS
, "Bad MID state?\n");
1462 *pbytes_returned
= get_rfc1002_length(midQ
->resp_buf
);
1463 memcpy(out_buf
, midQ
->resp_buf
, *pbytes_returned
+ 4);
1464 rc
= cifs_check_receive(midQ
, server
, 0);
1467 add_credits(server
, &credits
, 0);
1472 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1473 blocking lock to return. */
1476 send_lock_cancel(const unsigned int xid
, struct cifs_tcon
*tcon
,
1477 struct smb_hdr
*in_buf
,
1478 struct smb_hdr
*out_buf
)
1481 struct cifs_ses
*ses
= tcon
->ses
;
1482 LOCK_REQ
*pSMB
= (LOCK_REQ
*)in_buf
;
1484 /* We just modify the current in_buf to change
1485 the type of lock from LOCKING_ANDX_SHARED_LOCK
1486 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1487 LOCKING_ANDX_CANCEL_LOCK. */
1489 pSMB
->LockType
= LOCKING_ANDX_CANCEL_LOCK
|LOCKING_ANDX_LARGE_FILES
;
1491 pSMB
->hdr
.Mid
= get_next_mid(ses
->server
);
1493 return SendReceive(xid
, ses
, in_buf
, out_buf
,
1494 &bytes_returned
, 0);
1498 SendReceiveBlockingLock(const unsigned int xid
, struct cifs_tcon
*tcon
,
1499 struct smb_hdr
*in_buf
, struct smb_hdr
*out_buf
,
1500 int *pbytes_returned
)
1504 struct mid_q_entry
*midQ
;
1505 struct cifs_ses
*ses
;
1506 unsigned int len
= be32_to_cpu(in_buf
->smb_buf_length
);
1507 struct kvec iov
= { .iov_base
= in_buf
, .iov_len
= len
};
1508 struct smb_rqst rqst
= { .rq_iov
= &iov
, .rq_nvec
= 1 };
1509 unsigned int instance
;
1510 struct TCP_Server_Info
*server
;
1512 if (tcon
== NULL
|| tcon
->ses
== NULL
) {
1513 cifs_dbg(VFS
, "Null smb session\n");
1517 server
= ses
->server
;
1519 if (server
== NULL
) {
1520 cifs_dbg(VFS
, "Null tcp session\n");
1524 spin_lock(&server
->srv_lock
);
1525 if (server
->tcpStatus
== CifsExiting
) {
1526 spin_unlock(&server
->srv_lock
);
1529 spin_unlock(&server
->srv_lock
);
1531 /* Ensure that we do not send more than 50 overlapping requests
1532 to the same server. We may make this configurable later or
1535 if (len
> CIFSMaxBufSize
+ MAX_CIFS_HDR_SIZE
- 4) {
1536 cifs_tcon_dbg(VFS
, "Invalid length, greater than maximum frame, %d\n",
1541 rc
= wait_for_free_request(server
, CIFS_BLOCKING_OP
, &instance
);
1545 /* make sure that we sign in the same order that we send on this socket
1546 and avoid races inside tcp sendmsg code that could cause corruption
1549 cifs_server_lock(server
);
1551 rc
= allocate_mid(ses
, in_buf
, &midQ
);
1553 cifs_server_unlock(server
);
1557 rc
= cifs_sign_smb(in_buf
, server
, &midQ
->sequence_number
);
1560 cifs_server_unlock(server
);
1564 midQ
->mid_state
= MID_REQUEST_SUBMITTED
;
1565 rc
= smb_send(server
, in_buf
, len
);
1566 cifs_save_when_sent(midQ
);
1569 server
->sequence_number
-= 2;
1571 cifs_server_unlock(server
);
1578 /* Wait for a reply - allow signals to interrupt. */
1579 rc
= wait_event_interruptible(server
->response_q
,
1580 (!(midQ
->mid_state
== MID_REQUEST_SUBMITTED
||
1581 midQ
->mid_state
== MID_RESPONSE_RECEIVED
)) ||
1582 ((server
->tcpStatus
!= CifsGood
) &&
1583 (server
->tcpStatus
!= CifsNew
)));
1585 /* Were we interrupted by a signal ? */
1586 spin_lock(&server
->srv_lock
);
1587 if ((rc
== -ERESTARTSYS
) &&
1588 (midQ
->mid_state
== MID_REQUEST_SUBMITTED
||
1589 midQ
->mid_state
== MID_RESPONSE_RECEIVED
) &&
1590 ((server
->tcpStatus
== CifsGood
) ||
1591 (server
->tcpStatus
== CifsNew
))) {
1592 spin_unlock(&server
->srv_lock
);
1594 if (in_buf
->Command
== SMB_COM_TRANSACTION2
) {
1595 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1596 blocking lock to return. */
1597 rc
= send_cancel(server
, &rqst
, midQ
);
1603 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1604 to cause the blocking lock to return. */
1606 rc
= send_lock_cancel(xid
, tcon
, in_buf
, out_buf
);
1608 /* If we get -ENOLCK back the lock may have
1609 already been removed. Don't exit in this case. */
1610 if (rc
&& rc
!= -ENOLCK
) {
1616 rc
= wait_for_response(server
, midQ
);
1618 send_cancel(server
, &rqst
, midQ
);
1619 spin_lock(&server
->mid_lock
);
1620 if (midQ
->mid_state
== MID_REQUEST_SUBMITTED
||
1621 midQ
->mid_state
== MID_RESPONSE_RECEIVED
) {
1622 /* no longer considered to be "in-flight" */
1623 midQ
->callback
= release_mid
;
1624 spin_unlock(&server
->mid_lock
);
1627 spin_unlock(&server
->mid_lock
);
1630 /* We got the response - restart system call. */
1632 spin_lock(&server
->srv_lock
);
1634 spin_unlock(&server
->srv_lock
);
1636 rc
= cifs_sync_mid_result(midQ
, server
);
1640 /* rcvd frame is ok */
1641 if (out_buf
== NULL
|| midQ
->mid_state
!= MID_RESPONSE_READY
) {
1643 cifs_tcon_dbg(VFS
, "Bad MID state?\n");
1647 *pbytes_returned
= get_rfc1002_length(midQ
->resp_buf
);
1648 memcpy(out_buf
, midQ
->resp_buf
, *pbytes_returned
+ 4);
1649 rc
= cifs_check_receive(midQ
, server
, 0);
1652 if (rstart
&& rc
== -EACCES
)
1653 return -ERESTARTSYS
;
1658 * Discard any remaining data in the current SMB. To do this, we borrow the
1662 cifs_discard_remaining_data(struct TCP_Server_Info
*server
)
1664 unsigned int rfclen
= server
->pdu_size
;
1665 size_t remaining
= rfclen
+ HEADER_PREAMBLE_SIZE(server
) -
1668 while (remaining
> 0) {
1671 length
= cifs_discard_from_socket(server
,
1672 min_t(size_t, remaining
,
1673 CIFSMaxBufSize
+ MAX_HEADER_SIZE(server
)));
1676 server
->total_read
+= length
;
1677 remaining
-= length
;
1684 __cifs_readv_discard(struct TCP_Server_Info
*server
, struct mid_q_entry
*mid
,
1689 length
= cifs_discard_remaining_data(server
);
1690 dequeue_mid(mid
, malformed
);
1691 mid
->resp_buf
= server
->smallbuf
;
1692 server
->smallbuf
= NULL
;
1697 cifs_readv_discard(struct TCP_Server_Info
*server
, struct mid_q_entry
*mid
)
1699 struct cifs_io_subrequest
*rdata
= mid
->callback_data
;
1701 return __cifs_readv_discard(server
, mid
, rdata
->result
);
1705 cifs_readv_receive(struct TCP_Server_Info
*server
, struct mid_q_entry
*mid
)
1708 unsigned int data_offset
, data_len
;
1709 struct cifs_io_subrequest
*rdata
= mid
->callback_data
;
1710 char *buf
= server
->smallbuf
;
1711 unsigned int buflen
= server
->pdu_size
+ HEADER_PREAMBLE_SIZE(server
);
1712 bool use_rdma_mr
= false;
1714 cifs_dbg(FYI
, "%s: mid=%llu offset=%llu bytes=%zu\n",
1715 __func__
, mid
->mid
, rdata
->subreq
.start
, rdata
->subreq
.len
);
1718 * read the rest of READ_RSP header (sans Data array), or whatever we
1719 * can if there's not enough data. At this point, we've read down to
1722 len
= min_t(unsigned int, buflen
, server
->vals
->read_rsp_size
) -
1723 HEADER_SIZE(server
) + 1;
1725 length
= cifs_read_from_socket(server
,
1726 buf
+ HEADER_SIZE(server
) - 1, len
);
1729 server
->total_read
+= length
;
1731 if (server
->ops
->is_session_expired
&&
1732 server
->ops
->is_session_expired(buf
)) {
1733 cifs_reconnect(server
, true);
1737 if (server
->ops
->is_status_pending
&&
1738 server
->ops
->is_status_pending(buf
, server
)) {
1739 cifs_discard_remaining_data(server
);
1743 /* set up first two iov for signature check and to get credits */
1744 rdata
->iov
[0].iov_base
= buf
;
1745 rdata
->iov
[0].iov_len
= HEADER_PREAMBLE_SIZE(server
);
1746 rdata
->iov
[1].iov_base
= buf
+ HEADER_PREAMBLE_SIZE(server
);
1747 rdata
->iov
[1].iov_len
=
1748 server
->total_read
- HEADER_PREAMBLE_SIZE(server
);
1749 cifs_dbg(FYI
, "0: iov_base=%p iov_len=%zu\n",
1750 rdata
->iov
[0].iov_base
, rdata
->iov
[0].iov_len
);
1751 cifs_dbg(FYI
, "1: iov_base=%p iov_len=%zu\n",
1752 rdata
->iov
[1].iov_base
, rdata
->iov
[1].iov_len
);
1754 /* Was the SMB read successful? */
1755 rdata
->result
= server
->ops
->map_error(buf
, false);
1756 if (rdata
->result
!= 0) {
1757 cifs_dbg(FYI
, "%s: server returned error %d\n",
1758 __func__
, rdata
->result
);
1759 /* normal error on read response */
1760 return __cifs_readv_discard(server
, mid
, false);
1763 /* Is there enough to get to the rest of the READ_RSP header? */
1764 if (server
->total_read
< server
->vals
->read_rsp_size
) {
1765 cifs_dbg(FYI
, "%s: server returned short header. got=%u expected=%zu\n",
1766 __func__
, server
->total_read
,
1767 server
->vals
->read_rsp_size
);
1768 rdata
->result
= -EIO
;
1769 return cifs_readv_discard(server
, mid
);
1772 data_offset
= server
->ops
->read_data_offset(buf
) +
1773 HEADER_PREAMBLE_SIZE(server
);
1774 if (data_offset
< server
->total_read
) {
1776 * win2k8 sometimes sends an offset of 0 when the read
1777 * is beyond the EOF. Treat it as if the data starts just after
1780 cifs_dbg(FYI
, "%s: data offset (%u) inside read response header\n",
1781 __func__
, data_offset
);
1782 data_offset
= server
->total_read
;
1783 } else if (data_offset
> MAX_CIFS_SMALL_BUFFER_SIZE
) {
1784 /* data_offset is beyond the end of smallbuf */
1785 cifs_dbg(FYI
, "%s: data offset (%u) beyond end of smallbuf\n",
1786 __func__
, data_offset
);
1787 rdata
->result
= -EIO
;
1788 return cifs_readv_discard(server
, mid
);
1791 cifs_dbg(FYI
, "%s: total_read=%u data_offset=%u\n",
1792 __func__
, server
->total_read
, data_offset
);
1794 len
= data_offset
- server
->total_read
;
1796 /* read any junk before data into the rest of smallbuf */
1797 length
= cifs_read_from_socket(server
,
1798 buf
+ server
->total_read
, len
);
1801 server
->total_read
+= length
;
1804 /* how much data is in the response? */
1805 #ifdef CONFIG_CIFS_SMB_DIRECT
1806 use_rdma_mr
= rdata
->mr
;
1808 data_len
= server
->ops
->read_data_length(buf
, use_rdma_mr
);
1809 if (!use_rdma_mr
&& (data_offset
+ data_len
> buflen
)) {
1810 /* data_len is corrupt -- discard frame */
1811 rdata
->result
= -EIO
;
1812 return cifs_readv_discard(server
, mid
);
1815 #ifdef CONFIG_CIFS_SMB_DIRECT
1817 length
= data_len
; /* An RDMA read is already done. */
1820 length
= cifs_read_iter_from_socket(server
, &rdata
->subreq
.io_iter
,
1823 rdata
->got_bytes
+= length
;
1824 server
->total_read
+= length
;
1826 cifs_dbg(FYI
, "total_read=%u buflen=%u remaining=%u\n",
1827 server
->total_read
, buflen
, data_len
);
1829 /* discard anything left over */
1830 if (server
->total_read
< buflen
)
1831 return cifs_readv_discard(server
, mid
);
1833 dequeue_mid(mid
, false);
1834 mid
->resp_buf
= server
->smallbuf
;
1835 server
->smallbuf
= NULL
;