4 * Copyright (C) 2001 by Urban Widmark
6 * Please add a note about your changes to smbfs in the ChangeLog file.
9 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/net.h>
14 #include <linux/smb_fs.h>
15 #include <linux/smbno.h>
16 #include <linux/smb_mount.h>
18 #include "smb_debug.h"
22 /* #define SMB_SLAB_DEBUG (SLAB_RED_ZONE | SLAB_POISON) */
23 #define SMB_SLAB_DEBUG 0
25 #define ROUND_UP(x) (((x)+3) & ~3)
27 /* cache for request structures */
28 static kmem_cache_t
*req_cachep
;
30 static int smb_request_send_req(struct smb_request
*req
);
34 name, active, num, objsize, active_slabs, num_slaps, #pages
38 int smb_init_request_cache(void)
40 req_cachep
= kmem_cache_create("smb_request",
41 sizeof(struct smb_request
), 0,
42 SMB_SLAB_DEBUG
| SLAB_HWCACHE_ALIGN
,
44 if (req_cachep
== NULL
)
50 void smb_destroy_request_cache(void)
52 if (kmem_cache_destroy(req_cachep
))
53 printk(KERN_INFO
"smb_destroy_request_cache: not all structures were freed\n");
57 * Allocate and initialise a request structure
59 static struct smb_request
*smb_do_alloc_request(struct smb_sb_info
*server
,
62 struct smb_request
*req
;
63 unsigned char *buf
= NULL
;
65 req
= kmem_cache_alloc(req_cachep
, SLAB_KERNEL
);
66 VERBOSE("allocating request: %p\n", req
);
71 buf
= smb_kmalloc(bufsize
, GFP_NOFS
);
73 kmem_cache_free(req_cachep
, req
);
78 memset(req
, 0, sizeof(struct smb_request
));
80 req
->rq_bufsize
= bufsize
;
81 req
->rq_server
= server
;
82 init_waitqueue_head(&req
->rq_wait
);
83 INIT_LIST_HEAD(&req
->rq_queue
);
84 atomic_set(&req
->rq_count
, 1);
90 struct smb_request
*smb_alloc_request(struct smb_sb_info
*server
, int bufsize
)
92 struct smb_request
*req
= NULL
;
95 atomic_inc(&server
->nr_requests
);
96 if (atomic_read(&server
->nr_requests
) <= MAX_REQUEST_HARD
) {
97 req
= smb_do_alloc_request(server
, bufsize
);
104 * Try to free up at least one request in order to stay
105 * below the hard limit
107 if (nfs_try_to_free_pages(server
))
110 if (signalled() && (server
->flags
& NFS_MOUNT_INTR
))
111 return ERR_PTR(-ERESTARTSYS
);
112 current
->policy
= SCHED_YIELD
;
115 /* FIXME: we want something like nfs does above, but that
116 requires changes to all callers and can wait. */
123 static void smb_free_request(struct smb_request
*req
)
125 atomic_dec(&req
->rq_server
->nr_requests
);
126 if (req
->rq_buffer
&& !(req
->rq_flags
& SMB_REQ_STATIC
))
127 smb_kfree(req
->rq_buffer
);
128 if (req
->rq_trans2buffer
)
129 smb_kfree(req
->rq_trans2buffer
);
130 kmem_cache_free(req_cachep
, req
);
134 * What prevents a rget to race with a rput? The count must never drop to zero
135 * while it is in use. Only rput if it is ok that it is free'd.
137 static void smb_rget(struct smb_request
*req
)
139 atomic_inc(&req
->rq_count
);
141 void smb_rput(struct smb_request
*req
)
143 if (atomic_dec_and_test(&req
->rq_count
)) {
144 list_del_init(&req
->rq_queue
);
145 smb_free_request(req
);
149 /* setup to receive the data part of the SMB */
150 static int smb_setup_bcc(struct smb_request
*req
)
153 req
->rq_rlen
= smb_len(req
->rq_header
) + 4 - req
->rq_bytes_recvd
;
155 if (req
->rq_rlen
> req
->rq_bufsize
) {
156 PARANOIA("Packet too large %d > %d\n",
157 req
->rq_rlen
, req
->rq_bufsize
);
161 req
->rq_iov
[0].iov_base
= req
->rq_buffer
;
162 req
->rq_iov
[0].iov_len
= req
->rq_rlen
;
169 * Prepare a "normal" request structure.
171 static int smb_setup_request(struct smb_request
*req
)
173 int len
= smb_len(req
->rq_header
) + 4;
176 /* if we expect a data part in the reply we set the iov's to read it */
177 if (req
->rq_resp_bcc
)
178 req
->rq_setup_read
= smb_setup_bcc
;
180 /* This tries to support re-using the same request */
181 req
->rq_bytes_sent
= 0;
185 req
->rq_fragment
= 0;
186 if (req
->rq_trans2buffer
)
187 smb_kfree(req
->rq_trans2buffer
);
193 * Prepare a transaction2 request structure
195 static int smb_setup_trans2request(struct smb_request
*req
)
197 struct smb_sb_info
*server
= req
->rq_server
;
199 static unsigned char padding
[4];
201 /* I know the following is very ugly, but I want to build the
202 smb packet as efficiently as possible. */
204 const int smb_parameters
= 15;
205 const int header
= SMB_HEADER_LEN
+ 2 * smb_parameters
+ 2;
206 const int oparam
= ROUND_UP(header
+ 3);
207 const int odata
= ROUND_UP(oparam
+ req
->rq_lparm
);
208 const int bcc
= (req
->rq_data
? odata
+ req
->rq_ldata
:
209 oparam
+ req
->rq_lparm
) - header
;
211 if ((bcc
+ oparam
) > server
->opt
.max_xmit
)
213 smb_setup_header(req
, SMBtrans2
, smb_parameters
, bcc
);
216 * max parameters + max data + max setup == bufsize to make NT4 happy
217 * and not abort the transfer or split into multiple responses. It also
218 * makes smbfs happy as handling packets larger than the buffer size
221 * OS/2 is probably going to hate me for this ...
223 mparam
= SMB_TRANS2_MAX_PARAM
;
224 mdata
= req
->rq_bufsize
- mparam
;
226 mdata
= server
->opt
.max_xmit
- mparam
- 100;
233 /* NT/win2k has ~4k max_xmit, so with this we request more than it wants
234 to return as one SMB. Useful for testing the fragmented trans2
239 WSET(req
->rq_header
, smb_tpscnt
, req
->rq_lparm
);
240 WSET(req
->rq_header
, smb_tdscnt
, req
->rq_ldata
);
241 WSET(req
->rq_header
, smb_mprcnt
, mparam
);
242 WSET(req
->rq_header
, smb_mdrcnt
, mdata
);
243 WSET(req
->rq_header
, smb_msrcnt
, 0); /* max setup always 0 ? */
244 WSET(req
->rq_header
, smb_flags
, 0);
245 DSET(req
->rq_header
, smb_timeout
, 0);
246 WSET(req
->rq_header
, smb_pscnt
, req
->rq_lparm
);
247 WSET(req
->rq_header
, smb_psoff
, oparam
- 4);
248 WSET(req
->rq_header
, smb_dscnt
, req
->rq_ldata
);
249 WSET(req
->rq_header
, smb_dsoff
, req
->rq_data
? odata
- 4 : 0);
250 *(req
->rq_header
+ smb_suwcnt
) = 0x01; /* setup count */
251 *(req
->rq_header
+ smb_suwcnt
+ 1) = 0x00; /* reserved */
252 WSET(req
->rq_header
, smb_setup0
, req
->rq_trans2_command
);
255 req
->rq_iov
[0].iov_base
= (void *) req
->rq_header
;
256 req
->rq_iov
[0].iov_len
= oparam
;
257 req
->rq_iov
[1].iov_base
= (req
->rq_parm
==NULL
) ? padding
: req
->rq_parm
;
258 req
->rq_iov
[1].iov_len
= req
->rq_lparm
;
259 req
->rq_slen
= oparam
+ req
->rq_lparm
;
263 req
->rq_iov
[2].iov_base
= padding
;
264 req
->rq_iov
[2].iov_len
= odata
- oparam
- req
->rq_lparm
;
265 req
->rq_iov
[3].iov_base
= req
->rq_data
;
266 req
->rq_iov
[3].iov_len
= req
->rq_ldata
;
267 req
->rq_slen
= odata
+ req
->rq_ldata
;
270 /* always a data part for trans2 replies */
271 req
->rq_setup_read
= smb_setup_bcc
;
277 * Add a request and tell smbiod to process it
279 int smb_add_request(struct smb_request
*req
)
282 struct smb_sb_info
*server
= req
->rq_server
;
285 smb_setup_request(req
);
286 if (req
->rq_trans2_command
) {
287 if (req
->rq_buffer
== NULL
) {
288 PARANOIA("trans2 attempted without response buffer!\n");
291 result
= smb_setup_trans2request(req
);
296 #ifdef SMB_DEBUG_PACKET_SIZE
300 /* add 'req' to the queue of requests */
301 if (smb_lock_server_interruptible(server
))
305 * Try to send the request as the process. If that fails we queue the
306 * request and let smbiod send it later.
309 /* FIXME: each server has a number on the maximum number of parallel
310 requests. 10, 50 or so. We should not allow more requests to be
312 if (server
->mid
> 0xf000)
314 req
->rq_mid
= server
->mid
++;
315 WSET(req
->rq_header
, smb_mid
, req
->rq_mid
);
318 if (server
->state
== CONN_VALID
) {
319 if (list_empty(&server
->xmitq
))
320 result
= smb_request_send_req(req
);
322 /* Connection lost? */
323 server
->conn_error
= result
;
324 server
->state
= CONN_INVALID
;
328 list_add_tail(&req
->rq_queue
, &server
->xmitq
);
331 if (server
->state
!= CONN_VALID
)
332 smbiod_retry(server
);
334 smb_unlock_server(server
);
338 timeleft
= wait_event_interruptible_timeout(req
->rq_wait
,
339 req
->rq_flags
& SMB_REQ_RECEIVED
, 30*HZ
);
340 if (!timeleft
|| signal_pending(current
)) {
342 * On timeout or on interrupt we want to try and remove the
343 * request from the recvq/xmitq.
345 smb_lock_server(server
);
346 if (!(req
->rq_flags
& SMB_REQ_RECEIVED
)) {
347 list_del_init(&req
->rq_queue
);
350 smb_unlock_server(server
);
354 PARANOIA("request [%p, mid=%d] timed out!\n",
356 VERBOSE("smb_com: %02x\n", *(req
->rq_header
+ smb_com
));
357 VERBOSE("smb_rcls: %02x\n", *(req
->rq_header
+ smb_rcls
));
358 VERBOSE("smb_flg: %02x\n", *(req
->rq_header
+ smb_flg
));
359 VERBOSE("smb_tid: %04x\n", WVAL(req
->rq_header
, smb_tid
));
360 VERBOSE("smb_pid: %04x\n", WVAL(req
->rq_header
, smb_pid
));
361 VERBOSE("smb_uid: %04x\n", WVAL(req
->rq_header
, smb_uid
));
362 VERBOSE("smb_mid: %04x\n", WVAL(req
->rq_header
, smb_mid
));
363 VERBOSE("smb_wct: %02x\n", *(req
->rq_header
+ smb_wct
));
365 req
->rq_rcls
= ERRSRV
;
366 req
->rq_err
= ERRtimeout
;
368 /* Just in case it was "stuck" */
371 VERBOSE("woke up, rcls=%d\n", req
->rq_rcls
);
373 if (req
->rq_rcls
!= 0)
374 req
->rq_errno
= smb_errno(req
);
375 if (signal_pending(current
))
376 req
->rq_errno
= -ERESTARTSYS
;
377 return req
->rq_errno
;
381 * Send a request and place it on the recvq if successfully sent.
382 * Must be called with the server lock held.
384 static int smb_request_send_req(struct smb_request
*req
)
386 struct smb_sb_info
*server
= req
->rq_server
;
389 if (req
->rq_bytes_sent
== 0) {
390 WSET(req
->rq_header
, smb_tid
, server
->opt
.tid
);
391 WSET(req
->rq_header
, smb_pid
, 1);
392 WSET(req
->rq_header
, smb_uid
, server
->opt
.server_uid
);
395 result
= smb_send_request(req
);
396 if (result
< 0 && result
!= -EAGAIN
)
400 if (!(req
->rq_flags
& SMB_REQ_TRANSMITTED
))
403 list_del_init(&req
->rq_queue
);
404 list_add_tail(&req
->rq_queue
, &server
->recvq
);
411 * Sends one request for this server. (smbiod)
412 * Must be called with the server lock held.
413 * Returns: <0 on error
414 * 0 if no request could be completely sent
415 * 1 if all data for one request was sent
417 int smb_request_send_server(struct smb_sb_info
*server
)
419 struct list_head
*head
;
420 struct smb_request
*req
;
423 if (server
->state
!= CONN_VALID
)
426 /* dequeue first request, if any */
428 head
= server
->xmitq
.next
;
429 if (head
!= &server
->xmitq
) {
430 req
= list_entry(head
, struct smb_request
, rq_queue
);
435 result
= smb_request_send_req(req
);
437 server
->conn_error
= result
;
438 list_del_init(&req
->rq_queue
);
439 list_add(&req
->rq_queue
, &server
->xmitq
);
449 * Try to find a request matching this "mid". Typically the first entry will
450 * be the matching one.
452 static struct smb_request
*find_request(struct smb_sb_info
*server
, int mid
)
454 struct list_head
*tmp
;
455 struct smb_request
*req
= NULL
;
457 list_for_each(tmp
, &server
->recvq
) {
458 req
= list_entry(tmp
, struct smb_request
, rq_queue
);
459 if (req
->rq_mid
== mid
) {
466 VERBOSE("received reply with mid %d but no request!\n",
467 WVAL(server
->header
, smb_mid
));
468 server
->rstate
= SMB_RECV_DROP
;
475 * Called when we have read the smb header and believe this is a response.
477 static int smb_init_request(struct smb_sb_info
*server
, struct smb_request
*req
)
481 memcpy(req
->rq_header
, server
->header
, SMB_HEADER_LEN
);
483 wct
= *(req
->rq_header
+ smb_wct
);
485 PARANOIA("wct too large, %d > 20\n", wct
);
486 server
->rstate
= SMB_RECV_DROP
;
490 req
->rq_resp_wct
= wct
;
491 hdrlen
= SMB_HEADER_LEN
+ wct
*2 + 2;
492 VERBOSE("header length: %d smb_wct: %2d\n", hdrlen
, wct
);
494 req
->rq_bytes_recvd
= SMB_HEADER_LEN
;
495 req
->rq_rlen
= hdrlen
;
496 req
->rq_iov
[0].iov_base
= req
->rq_header
;
497 req
->rq_iov
[0].iov_len
= hdrlen
;
499 server
->rstate
= SMB_RECV_PARAM
;
501 #ifdef SMB_DEBUG_PACKET_SIZE
502 add_recv_stats(smb_len(server
->header
));
508 * Reads the SMB parameters
510 static int smb_recv_param(struct smb_sb_info
*server
, struct smb_request
*req
)
514 result
= smb_receive(server
, req
);
517 if (req
->rq_bytes_recvd
< req
->rq_rlen
)
520 VERBOSE("result: %d smb_bcc: %04x\n", result
,
521 WVAL(req
->rq_header
, SMB_HEADER_LEN
+
522 (*(req
->rq_header
+ smb_wct
) * 2)));
525 req
->rq_iov
[0].iov_base
= NULL
;
527 if (req
->rq_callback
)
528 req
->rq_callback(req
);
529 else if (req
->rq_setup_read
)
530 result
= req
->rq_setup_read(req
);
532 server
->rstate
= SMB_RECV_DROP
;
536 server
->rstate
= req
->rq_rlen
> 0 ? SMB_RECV_DATA
: SMB_RECV_END
;
538 req
->rq_bytes_recvd
= 0; // recvd out of the iov
540 VERBOSE("rlen: %d\n", req
->rq_rlen
);
541 if (req
->rq_rlen
< 0) {
542 PARANOIA("Parameters read beyond end of packet!\n");
543 server
->rstate
= SMB_RECV_END
;
552 static int smb_recv_data(struct smb_sb_info
*server
, struct smb_request
*req
)
556 result
= smb_receive(server
, req
);
559 if (req
->rq_bytes_recvd
< req
->rq_rlen
)
561 server
->rstate
= SMB_RECV_END
;
563 VERBOSE("result: %d\n", result
);
568 * Receive a transaction2 response
569 * Return: 0 if the response has been fully read
570 * 1 if there are further "fragments" to read
571 * <0 if there is an error
573 static int smb_recv_trans2(struct smb_sb_info
*server
, struct smb_request
*req
)
575 unsigned char *inbuf
;
576 unsigned int parm_disp
, parm_offset
, parm_count
, parm_tot
;
577 unsigned int data_disp
, data_offset
, data_count
, data_tot
;
578 int hdrlen
= SMB_HEADER_LEN
+ req
->rq_resp_wct
*2 - 2;
580 VERBOSE("handling trans2\n");
582 inbuf
= req
->rq_header
;
583 data_tot
= WVAL(inbuf
, smb_tdrcnt
);
584 parm_tot
= WVAL(inbuf
, smb_tprcnt
);
585 parm_disp
= WVAL(inbuf
, smb_prdisp
);
586 parm_offset
= WVAL(inbuf
, smb_proff
);
587 parm_count
= WVAL(inbuf
, smb_prcnt
);
588 data_disp
= WVAL(inbuf
, smb_drdisp
);
589 data_offset
= WVAL(inbuf
, smb_droff
);
590 data_count
= WVAL(inbuf
, smb_drcnt
);
592 /* Modify offset for the split header/buffer we use */
593 if (data_count
|| data_offset
) {
594 if (unlikely(data_offset
< hdrlen
))
597 data_offset
-= hdrlen
;
599 if (parm_count
|| parm_offset
) {
600 if (unlikely(parm_offset
< hdrlen
))
603 parm_offset
-= hdrlen
;
606 if (parm_count
== parm_tot
&& data_count
== data_tot
) {
608 * This packet has all the trans2 data.
610 * We setup the request so that this will be the common
611 * case. It may be a server error to not return a
612 * response that fits.
614 VERBOSE("single trans2 response "
615 "dcnt=%u, pcnt=%u, doff=%u, poff=%u\n",
616 data_count
, parm_count
,
617 data_offset
, parm_offset
);
618 req
->rq_ldata
= data_count
;
619 req
->rq_lparm
= parm_count
;
620 req
->rq_data
= req
->rq_buffer
+ data_offset
;
621 req
->rq_parm
= req
->rq_buffer
+ parm_offset
;
622 if (unlikely(parm_offset
+ parm_count
> req
->rq_rlen
))
624 if (unlikely(data_offset
+ data_count
> req
->rq_rlen
))
629 VERBOSE("multi trans2 response "
630 "frag=%d, dcnt=%u, pcnt=%u, doff=%u, poff=%u\n",
632 data_count
, parm_count
,
633 data_offset
, parm_offset
);
635 if (!req
->rq_fragment
) {
638 /* We got the first trans2 fragment */
639 req
->rq_fragment
= 1;
640 req
->rq_total_data
= data_tot
;
641 req
->rq_total_parm
= parm_tot
;
645 buf_len
= data_tot
+ parm_tot
;
646 if (buf_len
> SMB_MAX_PACKET_SIZE
)
649 req
->rq_trans2bufsize
= buf_len
;
650 req
->rq_trans2buffer
= smb_kmalloc(buf_len
, GFP_NOFS
);
651 if (!req
->rq_trans2buffer
)
653 memset(req
->rq_trans2buffer
, 0, buf_len
);
655 req
->rq_parm
= req
->rq_trans2buffer
;
656 req
->rq_data
= req
->rq_trans2buffer
+ parm_tot
;
657 } else if (unlikely(req
->rq_total_data
< data_tot
||
658 req
->rq_total_parm
< parm_tot
))
661 if (unlikely(parm_disp
+ parm_count
> req
->rq_total_parm
||
662 parm_offset
+ parm_count
> req
->rq_rlen
))
664 if (unlikely(data_disp
+ data_count
> req
->rq_total_data
||
665 data_offset
+ data_count
> req
->rq_rlen
))
668 inbuf
= req
->rq_buffer
;
669 memcpy(req
->rq_parm
+ parm_disp
, inbuf
+ parm_offset
, parm_count
);
670 memcpy(req
->rq_data
+ data_disp
, inbuf
+ data_offset
, data_count
);
672 req
->rq_ldata
+= data_count
;
673 req
->rq_lparm
+= parm_count
;
676 * Check whether we've received all of the data. Note that
677 * we use the packet totals -- total lengths might shrink!
679 if (req
->rq_ldata
>= data_tot
&& req
->rq_lparm
>= parm_tot
) {
680 req
->rq_ldata
= data_tot
;
681 req
->rq_lparm
= parm_tot
;
687 printk(KERN_ERR
"smb_trans2: data/param too long, data=%u, parm=%u\n",
691 printk(KERN_ERR
"smb_trans2: couldn't allocate data area of %d bytes\n",
692 req
->rq_trans2bufsize
);
693 req
->rq_errno
= -ENOMEM
;
696 printk(KERN_ERR
"smb_trans2: data/params grew!\n");
699 printk(KERN_ERR
"smb_trans2: invalid parms, disp=%u, cnt=%u, tot=%u, ofs=%u\n",
700 parm_disp
, parm_count
, parm_tot
, parm_offset
);
703 printk(KERN_ERR
"smb_trans2: invalid data, disp=%u, cnt=%u, tot=%u, ofs=%u\n",
704 data_disp
, data_count
, data_tot
, data_offset
);
706 req
->rq_errno
= -EIO
;
708 return req
->rq_errno
;
712 * State machine for receiving responses. We handle the fact that we can't
713 * read the full response in one try by having states telling us how much we
716 * Must be called with the server lock held (only called from smbiod).
718 * Return: <0 on error
720 int smb_request_recv(struct smb_sb_info
*server
)
722 struct smb_request
*req
= NULL
;
725 if (smb_recv_available(server
) <= 0)
728 VERBOSE("state: %d\n", server
->rstate
);
729 switch (server
->rstate
) {
731 result
= smb_receive_drop(server
);
734 if (server
->rstate
== SMB_RECV_DROP
)
736 server
->rstate
= SMB_RECV_START
;
739 server
->smb_read
= 0;
740 server
->rstate
= SMB_RECV_HEADER
;
742 case SMB_RECV_HEADER
:
743 result
= smb_receive_header(server
);
746 if (server
->rstate
== SMB_RECV_HEADER
)
748 if (! (*(server
->header
+ smb_flg
) & SMB_FLAGS_REPLY
) ) {
749 server
->rstate
= SMB_RECV_REQUEST
;
752 if (server
->rstate
!= SMB_RECV_HCOMPLETE
)
755 case SMB_RECV_HCOMPLETE
:
756 req
= find_request(server
, WVAL(server
->header
, smb_mid
));
759 smb_init_request(server
, req
);
760 req
->rq_rcls
= *(req
->rq_header
+ smb_rcls
);
761 req
->rq_err
= WVAL(req
->rq_header
, smb_err
);
762 if (server
->rstate
!= SMB_RECV_PARAM
)
767 req
= find_request(server
,WVAL(server
->header
,smb_mid
));
770 result
= smb_recv_param(server
, req
);
773 if (server
->rstate
!= SMB_RECV_DATA
)
778 req
= find_request(server
,WVAL(server
->header
,smb_mid
));
781 result
= smb_recv_data(server
, req
);
786 /* We should never be called with any of these states */
788 case SMB_RECV_REQUEST
:
789 server
->rstate
= SMB_RECV_END
;
794 /* We saw an error */
798 if (server
->rstate
!= SMB_RECV_END
)
802 if (req
->rq_trans2_command
&& req
->rq_rcls
== SUCCESS
)
803 result
= smb_recv_trans2(server
, req
);
806 * Response completely read. Drop any extra bytes sent by the server.
807 * (Yes, servers sometimes add extra bytes to responses)
809 VERBOSE("smb_len: %d smb_read: %d\n",
810 server
->smb_len
, server
->smb_read
);
811 if (server
->smb_read
< server
->smb_len
)
812 smb_receive_drop(server
);
814 server
->rstate
= SMB_RECV_START
;
817 list_del_init(&req
->rq_queue
);
818 req
->rq_flags
|= SMB_REQ_RECEIVED
;
820 wake_up_interruptible(&req
->rq_wait
);