1 // SPDX-License-Identifier: LGPL-2.1
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <linux/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include <linux/task_io_accounting_ops.h>
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smb2proto.h"
30 #include "smbdirect.h"
33 /* Max number of iovectors we can use off the stack when sending requests. */
34 #define CIFS_MAX_IOV_SIZE 8
37 cifs_wake_up_task(struct mid_q_entry
*mid
)
39 if (mid
->mid_state
== MID_RESPONSE_RECEIVED
)
40 mid
->mid_state
= MID_RESPONSE_READY
;
41 wake_up_process(mid
->callback_data
);
44 static struct mid_q_entry
*
45 alloc_mid(const struct smb_hdr
*smb_buffer
, struct TCP_Server_Info
*server
)
47 struct mid_q_entry
*temp
;
50 cifs_dbg(VFS
, "%s: null TCP session\n", __func__
);
54 temp
= mempool_alloc(cifs_mid_poolp
, GFP_NOFS
);
55 memset(temp
, 0, sizeof(struct mid_q_entry
));
56 kref_init(&temp
->refcount
);
57 temp
->mid
= get_mid(smb_buffer
);
58 temp
->pid
= current
->pid
;
59 temp
->command
= cpu_to_le16(smb_buffer
->Command
);
60 cifs_dbg(FYI
, "For smb_command %d\n", smb_buffer
->Command
);
61 /* easier to use jiffies */
62 /* when mid allocated can be before when sent */
63 temp
->when_alloc
= jiffies
;
64 temp
->server
= server
;
67 * The default is for the mid to be synchronous, so the
68 * default callback just wakes up the current task.
70 get_task_struct(current
);
71 temp
->creator
= current
;
72 temp
->callback
= cifs_wake_up_task
;
73 temp
->callback_data
= current
;
75 atomic_inc(&mid_count
);
76 temp
->mid_state
= MID_REQUEST_ALLOCATED
;
80 void __release_mid(struct kref
*refcount
)
82 struct mid_q_entry
*midEntry
=
83 container_of(refcount
, struct mid_q_entry
, refcount
);
84 #ifdef CONFIG_CIFS_STATS2
85 __le16 command
= midEntry
->server
->vals
->lock_cmd
;
86 __u16 smb_cmd
= le16_to_cpu(midEntry
->command
);
88 unsigned long roundtrip_time
;
90 struct TCP_Server_Info
*server
= midEntry
->server
;
92 if (midEntry
->resp_buf
&& (midEntry
->mid_flags
& MID_WAIT_CANCELLED
) &&
93 (midEntry
->mid_state
== MID_RESPONSE_RECEIVED
||
94 midEntry
->mid_state
== MID_RESPONSE_READY
) &&
95 server
->ops
->handle_cancelled_mid
)
96 server
->ops
->handle_cancelled_mid(midEntry
, server
);
98 midEntry
->mid_state
= MID_FREE
;
99 atomic_dec(&mid_count
);
100 if (midEntry
->large_buf
)
101 cifs_buf_release(midEntry
->resp_buf
);
103 cifs_small_buf_release(midEntry
->resp_buf
);
104 #ifdef CONFIG_CIFS_STATS2
106 if (now
< midEntry
->when_alloc
)
107 cifs_server_dbg(VFS
, "Invalid mid allocation time\n");
108 roundtrip_time
= now
- midEntry
->when_alloc
;
110 if (smb_cmd
< NUMBER_OF_SMB2_COMMANDS
) {
111 if (atomic_read(&server
->num_cmds
[smb_cmd
]) == 0) {
112 server
->slowest_cmd
[smb_cmd
] = roundtrip_time
;
113 server
->fastest_cmd
[smb_cmd
] = roundtrip_time
;
115 if (server
->slowest_cmd
[smb_cmd
] < roundtrip_time
)
116 server
->slowest_cmd
[smb_cmd
] = roundtrip_time
;
117 else if (server
->fastest_cmd
[smb_cmd
] > roundtrip_time
)
118 server
->fastest_cmd
[smb_cmd
] = roundtrip_time
;
120 cifs_stats_inc(&server
->num_cmds
[smb_cmd
]);
121 server
->time_per_cmd
[smb_cmd
] += roundtrip_time
;
124 * commands taking longer than one second (default) can be indications
125 * that something is wrong, unless it is quite a slow link or a very
126 * busy server. Note that this calc is unlikely or impossible to wrap
127 * as long as slow_rsp_threshold is not set way above recommended max
128 * value (32767 ie 9 hours) and is generally harmless even if wrong
129 * since only affects debug counters - so leaving the calc as simple
130 * comparison rather than doing multiple conversions and overflow
133 if ((slow_rsp_threshold
!= 0) &&
134 time_after(now
, midEntry
->when_alloc
+ (slow_rsp_threshold
* HZ
)) &&
135 (midEntry
->command
!= command
)) {
137 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
138 * NB: le16_to_cpu returns unsigned so can not be negative below
140 if (smb_cmd
< NUMBER_OF_SMB2_COMMANDS
)
141 cifs_stats_inc(&server
->smb2slowcmd
[smb_cmd
]);
143 trace_smb3_slow_rsp(smb_cmd
, midEntry
->mid
, midEntry
->pid
,
144 midEntry
->when_sent
, midEntry
->when_received
);
145 if (cifsFYI
& CIFS_TIMER
) {
146 pr_debug("slow rsp: cmd %d mid %llu",
147 midEntry
->command
, midEntry
->mid
);
148 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
149 now
- midEntry
->when_alloc
,
150 now
- midEntry
->when_sent
,
151 now
- midEntry
->when_received
);
155 put_task_struct(midEntry
->creator
);
157 mempool_free(midEntry
, cifs_mid_poolp
);
161 delete_mid(struct mid_q_entry
*mid
)
163 spin_lock(&mid
->server
->mid_lock
);
164 if (!(mid
->mid_flags
& MID_DELETED
)) {
165 list_del_init(&mid
->qhead
);
166 mid
->mid_flags
|= MID_DELETED
;
168 spin_unlock(&mid
->server
->mid_lock
);
174 * smb_send_kvec - send an array of kvecs to the server
175 * @server: Server to send the data to
176 * @smb_msg: Message to send
177 * @sent: amount of data sent on socket is stored here
179 * Our basic "send data to server" function. Should be called with srv_mutex
180 * held. The caller is responsible for handling the results.
183 smb_send_kvec(struct TCP_Server_Info
*server
, struct msghdr
*smb_msg
,
188 struct socket
*ssocket
= server
->ssocket
;
192 if (server
->noblocksnd
)
193 smb_msg
->msg_flags
= MSG_DONTWAIT
+ MSG_NOSIGNAL
;
195 smb_msg
->msg_flags
= MSG_NOSIGNAL
;
197 while (msg_data_left(smb_msg
)) {
199 * If blocking send, we try 3 times, since each can block
200 * for 5 seconds. For nonblocking we have to try more
201 * but wait increasing amounts of time allowing time for
202 * socket to clear. The overall time we wait in either
203 * case to send on the socket is about 15 seconds.
204 * Similarly we wait for 15 seconds for a response from
205 * the server in SendReceive[2] for the server to send
206 * a response back for most types of requests (except
207 * SMB Write past end of file which can be slow, and
208 * blocking lock operations). NFS waits slightly longer
209 * than CIFS, but this can make it take longer for
210 * nonresponsive servers to be detected and 15 seconds
211 * is more than enough time for modern networks to
212 * send a packet. In most cases if we fail to send
213 * after the retries we will kill the socket and
214 * reconnect which may clear the network problem.
216 rc
= sock_sendmsg(ssocket
, smb_msg
);
220 (!server
->noblocksnd
&& (retries
> 2))) {
221 cifs_server_dbg(VFS
, "sends on sock %p stuck for 15 seconds\n",
225 msleep(1 << retries
);
233 /* should never happen, letting socket clear before
234 retrying is our only obvious option here */
235 cifs_server_dbg(VFS
, "tcp sent no data\n");
240 /* send was at least partially successful */
242 retries
= 0; /* in case we get ENOSPC on the next send */
248 smb_rqst_len(struct TCP_Server_Info
*server
, struct smb_rqst
*rqst
)
253 unsigned long buflen
= 0;
255 if (!is_smb1(server
) && rqst
->rq_nvec
>= 2 &&
256 rqst
->rq_iov
[0].iov_len
== 4) {
257 iov
= &rqst
->rq_iov
[1];
258 nvec
= rqst
->rq_nvec
- 1;
261 nvec
= rqst
->rq_nvec
;
264 /* total up iov array first */
265 for (i
= 0; i
< nvec
; i
++)
266 buflen
+= iov
[i
].iov_len
;
268 buflen
+= iov_iter_count(&rqst
->rq_iter
);
273 __smb_send_rqst(struct TCP_Server_Info
*server
, int num_rqst
,
274 struct smb_rqst
*rqst
)
279 unsigned int send_length
= 0;
281 sigset_t mask
, oldmask
;
282 size_t total_len
= 0, sent
, size
;
283 struct socket
*ssocket
= server
->ssocket
;
284 struct msghdr smb_msg
= {};
285 __be32 rfc1002_marker
;
287 cifs_in_send_inc(server
);
288 if (cifs_rdma_enabled(server
)) {
289 /* return -EAGAIN when connecting or reconnecting */
291 if (server
->smbd_conn
)
292 rc
= smbd_send(server
, num_rqst
, rqst
);
301 if (fatal_signal_pending(current
)) {
302 cifs_dbg(FYI
, "signal pending before send request\n");
307 /* cork the socket */
308 tcp_sock_set_cork(ssocket
->sk
, true);
310 for (j
= 0; j
< num_rqst
; j
++)
311 send_length
+= smb_rqst_len(server
, &rqst
[j
]);
312 rfc1002_marker
= cpu_to_be32(send_length
);
315 * We should not allow signals to interrupt the network send because
316 * any partial send will cause session reconnects thus increasing
317 * latency of system calls and overload a server with unnecessary
322 sigprocmask(SIG_BLOCK
, &mask
, &oldmask
);
324 /* Generate a rfc1002 marker for SMB2+ */
325 if (!is_smb1(server
)) {
327 .iov_base
= &rfc1002_marker
,
330 iov_iter_kvec(&smb_msg
.msg_iter
, ITER_SOURCE
, &hiov
, 1, 4);
331 rc
= smb_send_kvec(server
, &smb_msg
, &sent
);
339 cifs_dbg(FYI
, "Sending smb: smb_len=%u\n", send_length
);
341 for (j
= 0; j
< num_rqst
; j
++) {
342 iov
= rqst
[j
].rq_iov
;
343 n_vec
= rqst
[j
].rq_nvec
;
346 for (i
= 0; i
< n_vec
; i
++) {
347 dump_smb(iov
[i
].iov_base
, iov
[i
].iov_len
);
348 size
+= iov
[i
].iov_len
;
351 iov_iter_kvec(&smb_msg
.msg_iter
, ITER_SOURCE
, iov
, n_vec
, size
);
353 rc
= smb_send_kvec(server
, &smb_msg
, &sent
);
359 if (iov_iter_count(&rqst
[j
].rq_iter
) > 0) {
360 smb_msg
.msg_iter
= rqst
[j
].rq_iter
;
361 rc
= smb_send_kvec(server
, &smb_msg
, &sent
);
370 sigprocmask(SIG_SETMASK
, &oldmask
, NULL
);
373 * If signal is pending but we have already sent the whole packet to
374 * the server we need to return success status to allow a corresponding
375 * mid entry to be kept in the pending requests queue thus allowing
376 * to handle responses from the server by the client.
378 * If only part of the packet has been sent there is no need to hide
379 * interrupt because the session will be reconnected anyway, so there
380 * won't be any response from the server to handle.
383 if (signal_pending(current
) && (total_len
!= send_length
)) {
384 cifs_dbg(FYI
, "signal is pending after attempt to send\n");
389 tcp_sock_set_cork(ssocket
->sk
, false);
391 if ((total_len
> 0) && (total_len
!= send_length
)) {
392 cifs_dbg(FYI
, "partial send (wanted=%u sent=%zu): terminating session\n",
393 send_length
, total_len
);
395 * If we have only sent part of an SMB then the next SMB could
396 * be taken as the remainder of this one. We need to kill the
397 * socket so the server throws away the partial SMB
399 cifs_signal_cifsd_for_reconnect(server
, false);
400 trace_smb3_partial_send_reconnect(server
->CurrentMid
,
401 server
->conn_id
, server
->hostname
);
405 * there's hardly any use for the layers above to know the
406 * actual error code here. All they should do at this point is
407 * to retry the connection and hope it goes away.
409 if (rc
< 0 && rc
!= -EINTR
&& rc
!= -EAGAIN
) {
410 cifs_server_dbg(VFS
, "Error %d sending data on socket to server\n",
413 cifs_signal_cifsd_for_reconnect(server
, false);
417 cifs_in_send_dec(server
);
422 smb_send_rqst(struct TCP_Server_Info
*server
, int num_rqst
,
423 struct smb_rqst
*rqst
, int flags
)
425 struct smb2_transform_hdr tr_hdr
;
426 struct smb_rqst new_rqst
[MAX_COMPOUND
] = {};
429 .iov_len
= sizeof(tr_hdr
),
433 if (flags
& CIFS_COMPRESS_REQ
)
434 return smb_compress(server
, &rqst
[0], __smb_send_rqst
);
436 if (!(flags
& CIFS_TRANSFORM_REQ
))
437 return __smb_send_rqst(server
, num_rqst
, rqst
);
439 if (WARN_ON_ONCE(num_rqst
> MAX_COMPOUND
- 1))
442 if (!server
->ops
->init_transform_rq
) {
443 cifs_server_dbg(VFS
, "Encryption requested but transform callback is missing\n");
447 new_rqst
[0].rq_iov
= &iov
;
448 new_rqst
[0].rq_nvec
= 1;
450 rc
= server
->ops
->init_transform_rq(server
, num_rqst
+ 1,
453 rc
= __smb_send_rqst(server
, num_rqst
+ 1, new_rqst
);
454 smb3_free_compound_rqst(num_rqst
, &new_rqst
[1]);
460 smb_send(struct TCP_Server_Info
*server
, struct smb_hdr
*smb_buffer
,
461 unsigned int smb_buf_length
)
464 struct smb_rqst rqst
= { .rq_iov
= iov
,
467 iov
[0].iov_base
= smb_buffer
;
469 iov
[1].iov_base
= (char *)smb_buffer
+ 4;
470 iov
[1].iov_len
= smb_buf_length
;
472 return __smb_send_rqst(server
, 1, &rqst
);
476 wait_for_free_credits(struct TCP_Server_Info
*server
, const int num_credits
,
477 const int timeout
, const int flags
,
478 unsigned int *instance
)
484 int scredits
, in_flight
;
487 t
= MAX_JIFFY_OFFSET
;
489 t
= msecs_to_jiffies(timeout
);
491 optype
= flags
& CIFS_OP_MASK
;
495 credits
= server
->ops
->get_credits_field(server
, optype
);
496 /* Since an echo is already inflight, no need to wait to send another */
497 if (*credits
<= 0 && optype
== CIFS_ECHO_OP
)
500 spin_lock(&server
->req_lock
);
501 if ((flags
& CIFS_TIMEOUT_MASK
) == CIFS_NON_BLOCKING
) {
502 /* oplock breaks must not be held up */
504 if (server
->in_flight
> server
->max_in_flight
)
505 server
->max_in_flight
= server
->in_flight
;
507 *instance
= server
->reconnect_instance
;
509 in_flight
= server
->in_flight
;
510 spin_unlock(&server
->req_lock
);
512 trace_smb3_nblk_credits(server
->CurrentMid
,
513 server
->conn_id
, server
->hostname
, scredits
, -1, in_flight
);
514 cifs_dbg(FYI
, "%s: remove %u credits total=%d\n",
515 __func__
, 1, scredits
);
521 spin_unlock(&server
->req_lock
);
523 spin_lock(&server
->srv_lock
);
524 if (server
->tcpStatus
== CifsExiting
) {
525 spin_unlock(&server
->srv_lock
);
528 spin_unlock(&server
->srv_lock
);
530 spin_lock(&server
->req_lock
);
531 if (*credits
< num_credits
) {
533 spin_unlock(&server
->req_lock
);
535 cifs_num_waiters_inc(server
);
536 rc
= wait_event_killable_timeout(server
->request_q
,
537 has_credits(server
, credits
, num_credits
), t
);
538 cifs_num_waiters_dec(server
);
540 spin_lock(&server
->req_lock
);
542 in_flight
= server
->in_flight
;
543 spin_unlock(&server
->req_lock
);
545 trace_smb3_credit_timeout(server
->CurrentMid
,
546 server
->conn_id
, server
->hostname
, scredits
,
547 num_credits
, in_flight
);
548 cifs_server_dbg(VFS
, "wait timed out after %d ms\n",
552 if (rc
== -ERESTARTSYS
)
554 spin_lock(&server
->req_lock
);
557 * For normal commands, reserve the last MAX_COMPOUND
558 * credits to compound requests.
559 * Otherwise these compounds could be permanently
560 * starved for credits by single-credit requests.
562 * To prevent spinning CPU, block this thread until
563 * there are >MAX_COMPOUND credits available.
564 * But only do this is we already have a lot of
565 * credits in flight to avoid triggering this check
566 * for servers that are slow to hand out credits on
569 if (!optype
&& num_credits
== 1 &&
570 server
->in_flight
> 2 * MAX_COMPOUND
&&
571 *credits
<= MAX_COMPOUND
) {
572 spin_unlock(&server
->req_lock
);
574 cifs_num_waiters_inc(server
);
575 rc
= wait_event_killable_timeout(
577 has_credits(server
, credits
,
580 cifs_num_waiters_dec(server
);
582 spin_lock(&server
->req_lock
);
584 in_flight
= server
->in_flight
;
585 spin_unlock(&server
->req_lock
);
587 trace_smb3_credit_timeout(
589 server
->conn_id
, server
->hostname
,
590 scredits
, num_credits
, in_flight
);
591 cifs_server_dbg(VFS
, "wait timed out after %d ms\n",
595 if (rc
== -ERESTARTSYS
)
597 spin_lock(&server
->req_lock
);
602 * Can not count locking commands against total
603 * as they are allowed to block on server.
606 /* update # of requests on the wire to server */
607 if ((flags
& CIFS_TIMEOUT_MASK
) != CIFS_BLOCKING_OP
) {
608 *credits
-= num_credits
;
609 server
->in_flight
+= num_credits
;
610 if (server
->in_flight
> server
->max_in_flight
)
611 server
->max_in_flight
= server
->in_flight
;
612 *instance
= server
->reconnect_instance
;
615 in_flight
= server
->in_flight
;
616 spin_unlock(&server
->req_lock
);
618 trace_smb3_waitff_credits(server
->CurrentMid
,
619 server
->conn_id
, server
->hostname
, scredits
,
620 -(num_credits
), in_flight
);
621 cifs_dbg(FYI
, "%s: remove %u credits total=%d\n",
622 __func__
, num_credits
, scredits
);
630 wait_for_free_request(struct TCP_Server_Info
*server
, const int flags
,
631 unsigned int *instance
)
633 return wait_for_free_credits(server
, 1, -1, flags
,
638 wait_for_compound_request(struct TCP_Server_Info
*server
, int num
,
639 const int flags
, unsigned int *instance
)
642 int scredits
, in_flight
;
644 credits
= server
->ops
->get_credits_field(server
, flags
& CIFS_OP_MASK
);
646 spin_lock(&server
->req_lock
);
648 in_flight
= server
->in_flight
;
650 if (*credits
< num
) {
652 * If the server is tight on resources or just gives us less
653 * credits for other reasons (e.g. requests are coming out of
654 * order and the server delays granting more credits until it
655 * processes a missing mid) and we exhausted most available
656 * credits there may be situations when we try to send
657 * a compound request but we don't have enough credits. At this
658 * point the client needs to decide if it should wait for
659 * additional credits or fail the request. If at least one
660 * request is in flight there is a high probability that the
661 * server will return enough credits to satisfy this compound
664 * Return immediately if no requests in flight since we will be
665 * stuck on waiting for credits.
667 if (server
->in_flight
== 0) {
668 spin_unlock(&server
->req_lock
);
669 trace_smb3_insufficient_credits(server
->CurrentMid
,
670 server
->conn_id
, server
->hostname
, scredits
,
672 cifs_dbg(FYI
, "%s: %d requests in flight, needed %d total=%d\n",
673 __func__
, in_flight
, num
, scredits
);
677 spin_unlock(&server
->req_lock
);
679 return wait_for_free_credits(server
, num
, 60000, flags
,
684 cifs_wait_mtu_credits(struct TCP_Server_Info
*server
, size_t size
,
685 size_t *num
, struct cifs_credits
*credits
)
689 credits
->instance
= server
->reconnect_instance
;
693 static int allocate_mid(struct cifs_ses
*ses
, struct smb_hdr
*in_buf
,
694 struct mid_q_entry
**ppmidQ
)
696 spin_lock(&ses
->ses_lock
);
697 if (ses
->ses_status
== SES_NEW
) {
698 if ((in_buf
->Command
!= SMB_COM_SESSION_SETUP_ANDX
) &&
699 (in_buf
->Command
!= SMB_COM_NEGOTIATE
)) {
700 spin_unlock(&ses
->ses_lock
);
703 /* else ok - we are setting up session */
706 if (ses
->ses_status
== SES_EXITING
) {
707 /* check if SMB session is bad because we are setting it up */
708 if (in_buf
->Command
!= SMB_COM_LOGOFF_ANDX
) {
709 spin_unlock(&ses
->ses_lock
);
712 /* else ok - we are shutting down session */
714 spin_unlock(&ses
->ses_lock
);
716 *ppmidQ
= alloc_mid(in_buf
, ses
->server
);
719 spin_lock(&ses
->server
->mid_lock
);
720 list_add_tail(&(*ppmidQ
)->qhead
, &ses
->server
->pending_mid_q
);
721 spin_unlock(&ses
->server
->mid_lock
);
726 wait_for_response(struct TCP_Server_Info
*server
, struct mid_q_entry
*midQ
)
730 error
= wait_event_state(server
->response_q
,
731 midQ
->mid_state
!= MID_REQUEST_SUBMITTED
&&
732 midQ
->mid_state
!= MID_RESPONSE_RECEIVED
,
733 (TASK_KILLABLE
|TASK_FREEZABLE_UNSAFE
));
741 cifs_setup_async_request(struct TCP_Server_Info
*server
, struct smb_rqst
*rqst
)
744 struct smb_hdr
*hdr
= (struct smb_hdr
*)rqst
->rq_iov
[0].iov_base
;
745 struct mid_q_entry
*mid
;
747 if (rqst
->rq_iov
[0].iov_len
!= 4 ||
748 rqst
->rq_iov
[0].iov_base
+ 4 != rqst
->rq_iov
[1].iov_base
)
749 return ERR_PTR(-EIO
);
751 /* enable signing if server requires it */
753 hdr
->Flags2
|= SMBFLG2_SECURITY_SIGNATURE
;
755 mid
= alloc_mid(hdr
, server
);
757 return ERR_PTR(-ENOMEM
);
759 rc
= cifs_sign_rqst(rqst
, server
, &mid
->sequence_number
);
769 * Send a SMB request and set the callback function in the mid to handle
770 * the result. Caller is responsible for dealing with timeouts.
773 cifs_call_async(struct TCP_Server_Info
*server
, struct smb_rqst
*rqst
,
774 mid_receive_t
*receive
, mid_callback_t
*callback
,
775 mid_handle_t
*handle
, void *cbdata
, const int flags
,
776 const struct cifs_credits
*exist_credits
)
779 struct mid_q_entry
*mid
;
780 struct cifs_credits credits
= { .value
= 0, .instance
= 0 };
781 unsigned int instance
;
784 optype
= flags
& CIFS_OP_MASK
;
786 if ((flags
& CIFS_HAS_CREDITS
) == 0) {
787 rc
= wait_for_free_request(server
, flags
, &instance
);
791 credits
.instance
= instance
;
793 instance
= exist_credits
->instance
;
795 cifs_server_lock(server
);
798 * We can't use credits obtained from the previous session to send this
799 * request. Check if there were reconnects after we obtained credits and
800 * return -EAGAIN in such cases to let callers handle it.
802 if (instance
!= server
->reconnect_instance
) {
803 cifs_server_unlock(server
);
804 add_credits_and_wake_if(server
, &credits
, optype
);
808 mid
= server
->ops
->setup_async_request(server
, rqst
);
810 cifs_server_unlock(server
);
811 add_credits_and_wake_if(server
, &credits
, optype
);
815 mid
->receive
= receive
;
816 mid
->callback
= callback
;
817 mid
->callback_data
= cbdata
;
818 mid
->handle
= handle
;
819 mid
->mid_state
= MID_REQUEST_SUBMITTED
;
821 /* put it on the pending_mid_q */
822 spin_lock(&server
->mid_lock
);
823 list_add_tail(&mid
->qhead
, &server
->pending_mid_q
);
824 spin_unlock(&server
->mid_lock
);
827 * Need to store the time in mid before calling I/O. For call_async,
828 * I/O response may come back and free the mid entry on another thread.
830 cifs_save_when_sent(mid
);
831 rc
= smb_send_rqst(server
, 1, rqst
, flags
);
834 revert_current_mid(server
, mid
->credits
);
835 server
->sequence_number
-= 2;
839 cifs_server_unlock(server
);
844 add_credits_and_wake_if(server
, &credits
, optype
);
850 * Send an SMB Request. No response info (other than return code)
851 * needs to be parsed.
853 * flags indicate the type of request buffer and how long to wait
854 * and whether to log NT STATUS code (error) before mapping it to POSIX error
858 SendReceiveNoRsp(const unsigned int xid
, struct cifs_ses
*ses
,
859 char *in_buf
, int flags
)
866 iov
[0].iov_base
= in_buf
;
867 iov
[0].iov_len
= get_rfc1002_length(in_buf
) + 4;
868 flags
|= CIFS_NO_RSP_BUF
;
869 rc
= SendReceive2(xid
, ses
, iov
, 1, &resp_buf_type
, flags
, &rsp_iov
);
870 cifs_dbg(NOISY
, "SendRcvNoRsp flags %d rc %d\n", flags
, rc
);
876 cifs_sync_mid_result(struct mid_q_entry
*mid
, struct TCP_Server_Info
*server
)
880 cifs_dbg(FYI
, "%s: cmd=%d mid=%llu state=%d\n",
881 __func__
, le16_to_cpu(mid
->command
), mid
->mid
, mid
->mid_state
);
883 spin_lock(&server
->mid_lock
);
884 switch (mid
->mid_state
) {
885 case MID_RESPONSE_READY
:
886 spin_unlock(&server
->mid_lock
);
888 case MID_RETRY_NEEDED
:
891 case MID_RESPONSE_MALFORMED
:
898 if (!(mid
->mid_flags
& MID_DELETED
)) {
899 list_del_init(&mid
->qhead
);
900 mid
->mid_flags
|= MID_DELETED
;
902 spin_unlock(&server
->mid_lock
);
903 cifs_server_dbg(VFS
, "%s: invalid mid state mid=%llu state=%d\n",
904 __func__
, mid
->mid
, mid
->mid_state
);
908 spin_unlock(&server
->mid_lock
);
916 send_cancel(struct TCP_Server_Info
*server
, struct smb_rqst
*rqst
,
917 struct mid_q_entry
*mid
)
919 return server
->ops
->send_cancel
?
920 server
->ops
->send_cancel(server
, rqst
, mid
) : 0;
924 cifs_check_receive(struct mid_q_entry
*mid
, struct TCP_Server_Info
*server
,
927 unsigned int len
= get_rfc1002_length(mid
->resp_buf
) + 4;
929 dump_smb(mid
->resp_buf
, min_t(u32
, 92, len
));
931 /* convert the length into a more usable form */
935 struct smb_rqst rqst
= { .rq_iov
= iov
,
938 iov
[0].iov_base
= mid
->resp_buf
;
940 iov
[1].iov_base
= (char *)mid
->resp_buf
+ 4;
941 iov
[1].iov_len
= len
- 4;
942 /* FIXME: add code to kill session */
943 rc
= cifs_verify_signature(&rqst
, server
,
944 mid
->sequence_number
);
946 cifs_server_dbg(VFS
, "SMB signature verification returned error = %d\n",
950 /* BB special case reconnect tid and uid here? */
951 return map_and_check_smb_error(mid
, log_error
);
955 cifs_setup_request(struct cifs_ses
*ses
, struct TCP_Server_Info
*ignored
,
956 struct smb_rqst
*rqst
)
959 struct smb_hdr
*hdr
= (struct smb_hdr
*)rqst
->rq_iov
[0].iov_base
;
960 struct mid_q_entry
*mid
;
962 if (rqst
->rq_iov
[0].iov_len
!= 4 ||
963 rqst
->rq_iov
[0].iov_base
+ 4 != rqst
->rq_iov
[1].iov_base
)
964 return ERR_PTR(-EIO
);
966 rc
= allocate_mid(ses
, hdr
, &mid
);
969 rc
= cifs_sign_rqst(rqst
, ses
->server
, &mid
->sequence_number
);
978 cifs_compound_callback(struct mid_q_entry
*mid
)
980 struct TCP_Server_Info
*server
= mid
->server
;
981 struct cifs_credits credits
= {
982 .value
= server
->ops
->get_credits(mid
),
983 .instance
= server
->reconnect_instance
,
986 add_credits(server
, &credits
, mid
->optype
);
988 if (mid
->mid_state
== MID_RESPONSE_RECEIVED
)
989 mid
->mid_state
= MID_RESPONSE_READY
;
993 cifs_compound_last_callback(struct mid_q_entry
*mid
)
995 cifs_compound_callback(mid
);
996 cifs_wake_up_task(mid
);
1000 cifs_cancelled_callback(struct mid_q_entry
*mid
)
1002 cifs_compound_callback(mid
);
1007 * Return a channel (master if none) of @ses that can be used to send
1010 * If we are currently binding a new channel (negprot/sess.setup),
1011 * return the new incomplete channel.
1013 struct TCP_Server_Info
*cifs_pick_channel(struct cifs_ses
*ses
)
1016 unsigned int min_in_flight
= UINT_MAX
, max_in_flight
= 0;
1017 struct TCP_Server_Info
*server
= NULL
;
1023 spin_lock(&ses
->chan_lock
);
1024 for (i
= 0; i
< ses
->chan_count
; i
++) {
1025 server
= ses
->chans
[i
].server
;
1026 if (!server
|| server
->terminate
)
1029 if (CIFS_CHAN_NEEDS_RECONNECT(ses
, i
))
1033 * strictly speaking, we should pick up req_lock to read
1034 * server->in_flight. But it shouldn't matter much here if we
1035 * race while reading this data. The worst that can happen is
1036 * that we could use a channel that's not least loaded. Avoiding
1037 * taking the lock could help reduce wait time, which is
1038 * important for this function
1040 if (server
->in_flight
< min_in_flight
) {
1041 min_in_flight
= server
->in_flight
;
1044 if (server
->in_flight
> max_in_flight
)
1045 max_in_flight
= server
->in_flight
;
1048 /* if all channels are equally loaded, fall back to round-robin */
1049 if (min_in_flight
== max_in_flight
) {
1050 index
= (uint
)atomic_inc_return(&ses
->chan_seq
);
1051 index
%= ses
->chan_count
;
1054 server
= ses
->chans
[index
].server
;
1055 spin_unlock(&ses
->chan_lock
);
1061 compound_send_recv(const unsigned int xid
, struct cifs_ses
*ses
,
1062 struct TCP_Server_Info
*server
,
1063 const int flags
, const int num_rqst
, struct smb_rqst
*rqst
,
1064 int *resp_buf_type
, struct kvec
*resp_iov
)
1066 int i
, j
, optype
, rc
= 0;
1067 struct mid_q_entry
*midQ
[MAX_COMPOUND
];
1068 bool cancelled_mid
[MAX_COMPOUND
] = {false};
1069 struct cifs_credits credits
[MAX_COMPOUND
] = {
1070 { .value
= 0, .instance
= 0 }
1072 unsigned int instance
;
1075 optype
= flags
& CIFS_OP_MASK
;
1077 for (i
= 0; i
< num_rqst
; i
++)
1078 resp_buf_type
[i
] = CIFS_NO_BUFFER
; /* no response buf yet */
1080 if (!ses
|| !ses
->server
|| !server
) {
1081 cifs_dbg(VFS
, "Null session\n");
1085 spin_lock(&server
->srv_lock
);
1086 if (server
->tcpStatus
== CifsExiting
) {
1087 spin_unlock(&server
->srv_lock
);
1090 spin_unlock(&server
->srv_lock
);
1093 * Wait for all the requests to become available.
1094 * This approach still leaves the possibility to be stuck waiting for
1095 * credits if the server doesn't grant credits to the outstanding
1096 * requests and if the client is completely idle, not generating any
1098 * This can be handled by the eventual session reconnect.
1100 rc
= wait_for_compound_request(server
, num_rqst
, flags
,
1105 for (i
= 0; i
< num_rqst
; i
++) {
1106 credits
[i
].value
= 1;
1107 credits
[i
].instance
= instance
;
1111 * Make sure that we sign in the same order that we send on this socket
1112 * and avoid races inside tcp sendmsg code that could cause corruption
1116 cifs_server_lock(server
);
1119 * All the parts of the compound chain belong obtained credits from the
1120 * same session. We can not use credits obtained from the previous
1121 * session to send this request. Check if there were reconnects after
1122 * we obtained credits and return -EAGAIN in such cases to let callers
1125 if (instance
!= server
->reconnect_instance
) {
1126 cifs_server_unlock(server
);
1127 for (j
= 0; j
< num_rqst
; j
++)
1128 add_credits(server
, &credits
[j
], optype
);
1132 for (i
= 0; i
< num_rqst
; i
++) {
1133 midQ
[i
] = server
->ops
->setup_request(ses
, server
, &rqst
[i
]);
1134 if (IS_ERR(midQ
[i
])) {
1135 revert_current_mid(server
, i
);
1136 for (j
= 0; j
< i
; j
++)
1137 delete_mid(midQ
[j
]);
1138 cifs_server_unlock(server
);
1140 /* Update # of requests on wire to server */
1141 for (j
= 0; j
< num_rqst
; j
++)
1142 add_credits(server
, &credits
[j
], optype
);
1143 return PTR_ERR(midQ
[i
]);
1146 midQ
[i
]->mid_state
= MID_REQUEST_SUBMITTED
;
1147 midQ
[i
]->optype
= optype
;
1149 * Invoke callback for every part of the compound chain
1150 * to calculate credits properly. Wake up this thread only when
1151 * the last element is received.
1153 if (i
< num_rqst
- 1)
1154 midQ
[i
]->callback
= cifs_compound_callback
;
1156 midQ
[i
]->callback
= cifs_compound_last_callback
;
1158 rc
= smb_send_rqst(server
, num_rqst
, rqst
, flags
);
1160 for (i
= 0; i
< num_rqst
; i
++)
1161 cifs_save_when_sent(midQ
[i
]);
1164 revert_current_mid(server
, num_rqst
);
1165 server
->sequence_number
-= 2;
1168 cifs_server_unlock(server
);
1171 * If sending failed for some reason or it is an oplock break that we
1172 * will not receive a response to - return credits back
1174 if (rc
< 0 || (flags
& CIFS_NO_SRV_RSP
)) {
1175 for (i
= 0; i
< num_rqst
; i
++)
1176 add_credits(server
, &credits
[i
], optype
);
1181 * At this point the request is passed to the network stack - we assume
1182 * that any credits taken from the server structure on the client have
1183 * been spent and we can't return them back. Once we receive responses
1184 * we will collect credits granted by the server in the mid callbacks
1185 * and add those credits to the server structure.
1189 * Compounding is never used during session establish.
1191 spin_lock(&ses
->ses_lock
);
1192 if ((ses
->ses_status
== SES_NEW
) || (optype
& CIFS_NEG_OP
) || (optype
& CIFS_SESS_OP
)) {
1193 spin_unlock(&ses
->ses_lock
);
1195 cifs_server_lock(server
);
1196 smb311_update_preauth_hash(ses
, server
, rqst
[0].rq_iov
, rqst
[0].rq_nvec
);
1197 cifs_server_unlock(server
);
1199 spin_lock(&ses
->ses_lock
);
1201 spin_unlock(&ses
->ses_lock
);
1203 for (i
= 0; i
< num_rqst
; i
++) {
1204 rc
= wait_for_response(server
, midQ
[i
]);
1209 for (; i
< num_rqst
; i
++) {
1210 cifs_server_dbg(FYI
, "Cancelling wait for mid %llu cmd: %d\n",
1211 midQ
[i
]->mid
, le16_to_cpu(midQ
[i
]->command
));
1212 send_cancel(server
, &rqst
[i
], midQ
[i
]);
1213 spin_lock(&server
->mid_lock
);
1214 midQ
[i
]->mid_flags
|= MID_WAIT_CANCELLED
;
1215 if (midQ
[i
]->mid_state
== MID_REQUEST_SUBMITTED
||
1216 midQ
[i
]->mid_state
== MID_RESPONSE_RECEIVED
) {
1217 midQ
[i
]->callback
= cifs_cancelled_callback
;
1218 cancelled_mid
[i
] = true;
1219 credits
[i
].value
= 0;
1221 spin_unlock(&server
->mid_lock
);
1225 for (i
= 0; i
< num_rqst
; i
++) {
1229 rc
= cifs_sync_mid_result(midQ
[i
], server
);
1231 /* mark this mid as cancelled to not free it below */
1232 cancelled_mid
[i
] = true;
1236 if (!midQ
[i
]->resp_buf
||
1237 midQ
[i
]->mid_state
!= MID_RESPONSE_READY
) {
1239 cifs_dbg(FYI
, "Bad MID state?\n");
1243 buf
= (char *)midQ
[i
]->resp_buf
;
1244 resp_iov
[i
].iov_base
= buf
;
1245 resp_iov
[i
].iov_len
= midQ
[i
]->resp_buf_size
+
1246 HEADER_PREAMBLE_SIZE(server
);
1248 if (midQ
[i
]->large_buf
)
1249 resp_buf_type
[i
] = CIFS_LARGE_BUFFER
;
1251 resp_buf_type
[i
] = CIFS_SMALL_BUFFER
;
1253 rc
= server
->ops
->check_receive(midQ
[i
], server
,
1254 flags
& CIFS_LOG_ERROR
);
1256 /* mark it so buf will not be freed by delete_mid */
1257 if ((flags
& CIFS_NO_RSP_BUF
) == 0)
1258 midQ
[i
]->resp_buf
= NULL
;
1263 * Compounding is never used during session establish.
1265 spin_lock(&ses
->ses_lock
);
1266 if ((ses
->ses_status
== SES_NEW
) || (optype
& CIFS_NEG_OP
) || (optype
& CIFS_SESS_OP
)) {
1268 .iov_base
= resp_iov
[0].iov_base
,
1269 .iov_len
= resp_iov
[0].iov_len
1271 spin_unlock(&ses
->ses_lock
);
1272 cifs_server_lock(server
);
1273 smb311_update_preauth_hash(ses
, server
, &iov
, 1);
1274 cifs_server_unlock(server
);
1275 spin_lock(&ses
->ses_lock
);
1277 spin_unlock(&ses
->ses_lock
);
1281 * This will dequeue all mids. After this it is important that the
1282 * demultiplex_thread will not process any of these mids any further.
1283 * This is prevented above by using a noop callback that will not
1284 * wake this thread except for the very last PDU.
1286 for (i
= 0; i
< num_rqst
; i
++) {
1287 if (!cancelled_mid
[i
])
1288 delete_mid(midQ
[i
]);
1295 cifs_send_recv(const unsigned int xid
, struct cifs_ses
*ses
,
1296 struct TCP_Server_Info
*server
,
1297 struct smb_rqst
*rqst
, int *resp_buf_type
, const int flags
,
1298 struct kvec
*resp_iov
)
1300 return compound_send_recv(xid
, ses
, server
, flags
, 1,
1301 rqst
, resp_buf_type
, resp_iov
);
1305 SendReceive2(const unsigned int xid
, struct cifs_ses
*ses
,
1306 struct kvec
*iov
, int n_vec
, int *resp_buf_type
/* ret */,
1307 const int flags
, struct kvec
*resp_iov
)
1309 struct smb_rqst rqst
;
1310 struct kvec s_iov
[CIFS_MAX_IOV_SIZE
], *new_iov
;
1313 if (n_vec
+ 1 > CIFS_MAX_IOV_SIZE
) {
1314 new_iov
= kmalloc_array(n_vec
+ 1, sizeof(struct kvec
),
1317 /* otherwise cifs_send_recv below sets resp_buf_type */
1318 *resp_buf_type
= CIFS_NO_BUFFER
;
1324 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1325 memcpy(new_iov
+ 1, iov
, (sizeof(struct kvec
) * n_vec
));
1327 new_iov
[0].iov_base
= new_iov
[1].iov_base
;
1328 new_iov
[0].iov_len
= 4;
1329 new_iov
[1].iov_base
+= 4;
1330 new_iov
[1].iov_len
-= 4;
1332 memset(&rqst
, 0, sizeof(struct smb_rqst
));
1333 rqst
.rq_iov
= new_iov
;
1334 rqst
.rq_nvec
= n_vec
+ 1;
1336 rc
= cifs_send_recv(xid
, ses
, ses
->server
,
1337 &rqst
, resp_buf_type
, flags
, resp_iov
);
1338 if (n_vec
+ 1 > CIFS_MAX_IOV_SIZE
)
1344 SendReceive(const unsigned int xid
, struct cifs_ses
*ses
,
1345 struct smb_hdr
*in_buf
, struct smb_hdr
*out_buf
,
1346 int *pbytes_returned
, const int flags
)
1349 struct mid_q_entry
*midQ
;
1350 unsigned int len
= be32_to_cpu(in_buf
->smb_buf_length
);
1351 struct kvec iov
= { .iov_base
= in_buf
, .iov_len
= len
};
1352 struct smb_rqst rqst
= { .rq_iov
= &iov
, .rq_nvec
= 1 };
1353 struct cifs_credits credits
= { .value
= 1, .instance
= 0 };
1354 struct TCP_Server_Info
*server
;
1357 cifs_dbg(VFS
, "Null smb session\n");
1360 server
= ses
->server
;
1361 if (server
== NULL
) {
1362 cifs_dbg(VFS
, "Null tcp session\n");
1366 spin_lock(&server
->srv_lock
);
1367 if (server
->tcpStatus
== CifsExiting
) {
1368 spin_unlock(&server
->srv_lock
);
1371 spin_unlock(&server
->srv_lock
);
1373 /* Ensure that we do not send more than 50 overlapping requests
1374 to the same server. We may make this configurable later or
1377 if (len
> CIFSMaxBufSize
+ MAX_CIFS_HDR_SIZE
- 4) {
1378 cifs_server_dbg(VFS
, "Invalid length, greater than maximum frame, %d\n",
1383 rc
= wait_for_free_request(server
, flags
, &credits
.instance
);
1387 /* make sure that we sign in the same order that we send on this socket
1388 and avoid races inside tcp sendmsg code that could cause corruption
1391 cifs_server_lock(server
);
1393 rc
= allocate_mid(ses
, in_buf
, &midQ
);
1395 cifs_server_unlock(server
);
1396 /* Update # of requests on wire to server */
1397 add_credits(server
, &credits
, 0);
1401 rc
= cifs_sign_smb(in_buf
, server
, &midQ
->sequence_number
);
1403 cifs_server_unlock(server
);
1407 midQ
->mid_state
= MID_REQUEST_SUBMITTED
;
1409 rc
= smb_send(server
, in_buf
, len
);
1410 cifs_save_when_sent(midQ
);
1413 server
->sequence_number
-= 2;
1415 cifs_server_unlock(server
);
1420 rc
= wait_for_response(server
, midQ
);
1422 send_cancel(server
, &rqst
, midQ
);
1423 spin_lock(&server
->mid_lock
);
1424 if (midQ
->mid_state
== MID_REQUEST_SUBMITTED
||
1425 midQ
->mid_state
== MID_RESPONSE_RECEIVED
) {
1426 /* no longer considered to be "in-flight" */
1427 midQ
->callback
= release_mid
;
1428 spin_unlock(&server
->mid_lock
);
1429 add_credits(server
, &credits
, 0);
1432 spin_unlock(&server
->mid_lock
);
1435 rc
= cifs_sync_mid_result(midQ
, server
);
1437 add_credits(server
, &credits
, 0);
1441 if (!midQ
->resp_buf
|| !out_buf
||
1442 midQ
->mid_state
!= MID_RESPONSE_READY
) {
1444 cifs_server_dbg(VFS
, "Bad MID state?\n");
1448 *pbytes_returned
= get_rfc1002_length(midQ
->resp_buf
);
1449 memcpy(out_buf
, midQ
->resp_buf
, *pbytes_returned
+ 4);
1450 rc
= cifs_check_receive(midQ
, server
, 0);
1453 add_credits(server
, &credits
, 0);
1458 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1459 blocking lock to return. */
1462 send_lock_cancel(const unsigned int xid
, struct cifs_tcon
*tcon
,
1463 struct smb_hdr
*in_buf
,
1464 struct smb_hdr
*out_buf
)
1467 struct cifs_ses
*ses
= tcon
->ses
;
1468 LOCK_REQ
*pSMB
= (LOCK_REQ
*)in_buf
;
1470 /* We just modify the current in_buf to change
1471 the type of lock from LOCKING_ANDX_SHARED_LOCK
1472 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1473 LOCKING_ANDX_CANCEL_LOCK. */
1475 pSMB
->LockType
= LOCKING_ANDX_CANCEL_LOCK
|LOCKING_ANDX_LARGE_FILES
;
1477 pSMB
->hdr
.Mid
= get_next_mid(ses
->server
);
1479 return SendReceive(xid
, ses
, in_buf
, out_buf
,
1480 &bytes_returned
, 0);
1484 SendReceiveBlockingLock(const unsigned int xid
, struct cifs_tcon
*tcon
,
1485 struct smb_hdr
*in_buf
, struct smb_hdr
*out_buf
,
1486 int *pbytes_returned
)
1490 struct mid_q_entry
*midQ
;
1491 struct cifs_ses
*ses
;
1492 unsigned int len
= be32_to_cpu(in_buf
->smb_buf_length
);
1493 struct kvec iov
= { .iov_base
= in_buf
, .iov_len
= len
};
1494 struct smb_rqst rqst
= { .rq_iov
= &iov
, .rq_nvec
= 1 };
1495 unsigned int instance
;
1496 struct TCP_Server_Info
*server
;
1498 if (tcon
== NULL
|| tcon
->ses
== NULL
) {
1499 cifs_dbg(VFS
, "Null smb session\n");
1503 server
= ses
->server
;
1505 if (server
== NULL
) {
1506 cifs_dbg(VFS
, "Null tcp session\n");
1510 spin_lock(&server
->srv_lock
);
1511 if (server
->tcpStatus
== CifsExiting
) {
1512 spin_unlock(&server
->srv_lock
);
1515 spin_unlock(&server
->srv_lock
);
1517 /* Ensure that we do not send more than 50 overlapping requests
1518 to the same server. We may make this configurable later or
1521 if (len
> CIFSMaxBufSize
+ MAX_CIFS_HDR_SIZE
- 4) {
1522 cifs_tcon_dbg(VFS
, "Invalid length, greater than maximum frame, %d\n",
1527 rc
= wait_for_free_request(server
, CIFS_BLOCKING_OP
, &instance
);
1531 /* make sure that we sign in the same order that we send on this socket
1532 and avoid races inside tcp sendmsg code that could cause corruption
1535 cifs_server_lock(server
);
1537 rc
= allocate_mid(ses
, in_buf
, &midQ
);
1539 cifs_server_unlock(server
);
1543 rc
= cifs_sign_smb(in_buf
, server
, &midQ
->sequence_number
);
1546 cifs_server_unlock(server
);
1550 midQ
->mid_state
= MID_REQUEST_SUBMITTED
;
1551 rc
= smb_send(server
, in_buf
, len
);
1552 cifs_save_when_sent(midQ
);
1555 server
->sequence_number
-= 2;
1557 cifs_server_unlock(server
);
1564 /* Wait for a reply - allow signals to interrupt. */
1565 rc
= wait_event_interruptible(server
->response_q
,
1566 (!(midQ
->mid_state
== MID_REQUEST_SUBMITTED
||
1567 midQ
->mid_state
== MID_RESPONSE_RECEIVED
)) ||
1568 ((server
->tcpStatus
!= CifsGood
) &&
1569 (server
->tcpStatus
!= CifsNew
)));
1571 /* Were we interrupted by a signal ? */
1572 spin_lock(&server
->srv_lock
);
1573 if ((rc
== -ERESTARTSYS
) &&
1574 (midQ
->mid_state
== MID_REQUEST_SUBMITTED
||
1575 midQ
->mid_state
== MID_RESPONSE_RECEIVED
) &&
1576 ((server
->tcpStatus
== CifsGood
) ||
1577 (server
->tcpStatus
== CifsNew
))) {
1578 spin_unlock(&server
->srv_lock
);
1580 if (in_buf
->Command
== SMB_COM_TRANSACTION2
) {
1581 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1582 blocking lock to return. */
1583 rc
= send_cancel(server
, &rqst
, midQ
);
1589 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1590 to cause the blocking lock to return. */
1592 rc
= send_lock_cancel(xid
, tcon
, in_buf
, out_buf
);
1594 /* If we get -ENOLCK back the lock may have
1595 already been removed. Don't exit in this case. */
1596 if (rc
&& rc
!= -ENOLCK
) {
1602 rc
= wait_for_response(server
, midQ
);
1604 send_cancel(server
, &rqst
, midQ
);
1605 spin_lock(&server
->mid_lock
);
1606 if (midQ
->mid_state
== MID_REQUEST_SUBMITTED
||
1607 midQ
->mid_state
== MID_RESPONSE_RECEIVED
) {
1608 /* no longer considered to be "in-flight" */
1609 midQ
->callback
= release_mid
;
1610 spin_unlock(&server
->mid_lock
);
1613 spin_unlock(&server
->mid_lock
);
1616 /* We got the response - restart system call. */
1618 spin_lock(&server
->srv_lock
);
1620 spin_unlock(&server
->srv_lock
);
1622 rc
= cifs_sync_mid_result(midQ
, server
);
1626 /* rcvd frame is ok */
1627 if (out_buf
== NULL
|| midQ
->mid_state
!= MID_RESPONSE_READY
) {
1629 cifs_tcon_dbg(VFS
, "Bad MID state?\n");
1633 *pbytes_returned
= get_rfc1002_length(midQ
->resp_buf
);
1634 memcpy(out_buf
, midQ
->resp_buf
, *pbytes_returned
+ 4);
1635 rc
= cifs_check_receive(midQ
, server
, 0);
1638 if (rstart
&& rc
== -EACCES
)
1639 return -ERESTARTSYS
;
1644 * Discard any remaining data in the current SMB. To do this, we borrow the
1648 cifs_discard_remaining_data(struct TCP_Server_Info
*server
)
1650 unsigned int rfclen
= server
->pdu_size
;
1651 size_t remaining
= rfclen
+ HEADER_PREAMBLE_SIZE(server
) -
1654 while (remaining
> 0) {
1657 length
= cifs_discard_from_socket(server
,
1658 min_t(size_t, remaining
,
1659 CIFSMaxBufSize
+ MAX_HEADER_SIZE(server
)));
1662 server
->total_read
+= length
;
1663 remaining
-= length
;
1670 __cifs_readv_discard(struct TCP_Server_Info
*server
, struct mid_q_entry
*mid
,
1675 length
= cifs_discard_remaining_data(server
);
1676 dequeue_mid(mid
, malformed
);
1677 mid
->resp_buf
= server
->smallbuf
;
1678 server
->smallbuf
= NULL
;
1683 cifs_readv_discard(struct TCP_Server_Info
*server
, struct mid_q_entry
*mid
)
1685 struct cifs_io_subrequest
*rdata
= mid
->callback_data
;
1687 return __cifs_readv_discard(server
, mid
, rdata
->result
);
1691 cifs_readv_receive(struct TCP_Server_Info
*server
, struct mid_q_entry
*mid
)
1694 unsigned int data_offset
, data_len
;
1695 struct cifs_io_subrequest
*rdata
= mid
->callback_data
;
1696 char *buf
= server
->smallbuf
;
1697 unsigned int buflen
= server
->pdu_size
+ HEADER_PREAMBLE_SIZE(server
);
1698 bool use_rdma_mr
= false;
1700 cifs_dbg(FYI
, "%s: mid=%llu offset=%llu bytes=%zu\n",
1701 __func__
, mid
->mid
, rdata
->subreq
.start
, rdata
->subreq
.len
);
1704 * read the rest of READ_RSP header (sans Data array), or whatever we
1705 * can if there's not enough data. At this point, we've read down to
1708 len
= min_t(unsigned int, buflen
, server
->vals
->read_rsp_size
) -
1709 HEADER_SIZE(server
) + 1;
1711 length
= cifs_read_from_socket(server
,
1712 buf
+ HEADER_SIZE(server
) - 1, len
);
1715 server
->total_read
+= length
;
1717 if (server
->ops
->is_session_expired
&&
1718 server
->ops
->is_session_expired(buf
)) {
1719 cifs_reconnect(server
, true);
1723 if (server
->ops
->is_status_pending
&&
1724 server
->ops
->is_status_pending(buf
, server
)) {
1725 cifs_discard_remaining_data(server
);
1729 /* set up first two iov for signature check and to get credits */
1730 rdata
->iov
[0].iov_base
= buf
;
1731 rdata
->iov
[0].iov_len
= HEADER_PREAMBLE_SIZE(server
);
1732 rdata
->iov
[1].iov_base
= buf
+ HEADER_PREAMBLE_SIZE(server
);
1733 rdata
->iov
[1].iov_len
=
1734 server
->total_read
- HEADER_PREAMBLE_SIZE(server
);
1735 cifs_dbg(FYI
, "0: iov_base=%p iov_len=%zu\n",
1736 rdata
->iov
[0].iov_base
, rdata
->iov
[0].iov_len
);
1737 cifs_dbg(FYI
, "1: iov_base=%p iov_len=%zu\n",
1738 rdata
->iov
[1].iov_base
, rdata
->iov
[1].iov_len
);
1740 /* Was the SMB read successful? */
1741 rdata
->result
= server
->ops
->map_error(buf
, false);
1742 if (rdata
->result
!= 0) {
1743 cifs_dbg(FYI
, "%s: server returned error %d\n",
1744 __func__
, rdata
->result
);
1745 /* normal error on read response */
1746 return __cifs_readv_discard(server
, mid
, false);
1749 /* Is there enough to get to the rest of the READ_RSP header? */
1750 if (server
->total_read
< server
->vals
->read_rsp_size
) {
1751 cifs_dbg(FYI
, "%s: server returned short header. got=%u expected=%zu\n",
1752 __func__
, server
->total_read
,
1753 server
->vals
->read_rsp_size
);
1754 rdata
->result
= -EIO
;
1755 return cifs_readv_discard(server
, mid
);
1758 data_offset
= server
->ops
->read_data_offset(buf
) +
1759 HEADER_PREAMBLE_SIZE(server
);
1760 if (data_offset
< server
->total_read
) {
1762 * win2k8 sometimes sends an offset of 0 when the read
1763 * is beyond the EOF. Treat it as if the data starts just after
1766 cifs_dbg(FYI
, "%s: data offset (%u) inside read response header\n",
1767 __func__
, data_offset
);
1768 data_offset
= server
->total_read
;
1769 } else if (data_offset
> MAX_CIFS_SMALL_BUFFER_SIZE
) {
1770 /* data_offset is beyond the end of smallbuf */
1771 cifs_dbg(FYI
, "%s: data offset (%u) beyond end of smallbuf\n",
1772 __func__
, data_offset
);
1773 rdata
->result
= -EIO
;
1774 return cifs_readv_discard(server
, mid
);
1777 cifs_dbg(FYI
, "%s: total_read=%u data_offset=%u\n",
1778 __func__
, server
->total_read
, data_offset
);
1780 len
= data_offset
- server
->total_read
;
1782 /* read any junk before data into the rest of smallbuf */
1783 length
= cifs_read_from_socket(server
,
1784 buf
+ server
->total_read
, len
);
1787 server
->total_read
+= length
;
1790 /* how much data is in the response? */
1791 #ifdef CONFIG_CIFS_SMB_DIRECT
1792 use_rdma_mr
= rdata
->mr
;
1794 data_len
= server
->ops
->read_data_length(buf
, use_rdma_mr
);
1795 if (!use_rdma_mr
&& (data_offset
+ data_len
> buflen
)) {
1796 /* data_len is corrupt -- discard frame */
1797 rdata
->result
= -EIO
;
1798 return cifs_readv_discard(server
, mid
);
1801 #ifdef CONFIG_CIFS_SMB_DIRECT
1803 length
= data_len
; /* An RDMA read is already done. */
1806 length
= cifs_read_iter_from_socket(server
, &rdata
->subreq
.io_iter
,
1809 rdata
->got_bytes
+= length
;
1810 server
->total_read
+= length
;
1812 cifs_dbg(FYI
, "total_read=%u buflen=%u remaining=%u\n",
1813 server
->total_read
, buflen
, data_len
);
1815 /* discard anything left over */
1816 if (server
->total_read
< buflen
)
1817 return cifs_readv_discard(server
, mid
);
1819 dequeue_mid(mid
, false);
1820 mid
->resp_buf
= server
->smallbuf
;
1821 server
->smallbuf
= NULL
;