4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/slab.h>
23 #include <linux/ctype.h>
24 #include <linux/mempool.h>
25 #include <linux/vmalloc.h>
28 #include "cifsproto.h"
29 #include "cifs_debug.h"
32 #include "cifs_unicode.h"
35 extern mempool_t
*cifs_sm_req_poolp
;
36 extern mempool_t
*cifs_req_poolp
;
38 /* The xid serves as a useful identifier for each incoming vfs request,
39 in a similar way to the mid which is useful to track each sent smb,
40 and CurrentXid can also provide a running counter (although it
41 will eventually wrap past zero) of the total vfs operations handled
42 since the cifs fs was mounted */
49 spin_lock(&GlobalMid_Lock
);
50 GlobalTotalActiveXid
++;
52 /* keep high water mark for number of simultaneous ops in filesystem */
53 if (GlobalTotalActiveXid
> GlobalMaxActiveXid
)
54 GlobalMaxActiveXid
= GlobalTotalActiveXid
;
55 if (GlobalTotalActiveXid
> 65000)
56 cifs_dbg(FYI
, "warning: more than 65000 requests active\n");
57 xid
= GlobalCurrentXid
++;
58 spin_unlock(&GlobalMid_Lock
);
63 _free_xid(unsigned int xid
)
65 spin_lock(&GlobalMid_Lock
);
66 /* if (GlobalTotalActiveXid == 0)
68 GlobalTotalActiveXid
--;
69 spin_unlock(&GlobalMid_Lock
);
75 struct cifs_ses
*ret_buf
;
77 ret_buf
= kzalloc(sizeof(struct cifs_ses
), GFP_KERNEL
);
79 atomic_inc(&sesInfoAllocCount
);
80 ret_buf
->status
= CifsNew
;
82 INIT_LIST_HEAD(&ret_buf
->smb_ses_list
);
83 INIT_LIST_HEAD(&ret_buf
->tcon_list
);
84 mutex_init(&ret_buf
->session_mutex
);
85 spin_lock_init(&ret_buf
->iface_lock
);
91 sesInfoFree(struct cifs_ses
*buf_to_free
)
93 if (buf_to_free
== NULL
) {
94 cifs_dbg(FYI
, "Null buffer passed to sesInfoFree\n");
98 atomic_dec(&sesInfoAllocCount
);
99 kfree(buf_to_free
->serverOS
);
100 kfree(buf_to_free
->serverDomain
);
101 kfree(buf_to_free
->serverNOS
);
102 kzfree(buf_to_free
->password
);
103 kfree(buf_to_free
->user_name
);
104 kfree(buf_to_free
->domainName
);
105 kzfree(buf_to_free
->auth_key
.response
);
106 kfree(buf_to_free
->iface_list
);
113 struct cifs_tcon
*ret_buf
;
115 ret_buf
= kzalloc(sizeof(*ret_buf
), GFP_KERNEL
);
118 ret_buf
->crfid
.fid
= kzalloc(sizeof(*ret_buf
->crfid
.fid
), GFP_KERNEL
);
119 if (!ret_buf
->crfid
.fid
) {
124 atomic_inc(&tconInfoAllocCount
);
125 ret_buf
->tidStatus
= CifsNew
;
127 INIT_LIST_HEAD(&ret_buf
->openFileList
);
128 INIT_LIST_HEAD(&ret_buf
->tcon_list
);
129 spin_lock_init(&ret_buf
->open_file_lock
);
130 mutex_init(&ret_buf
->crfid
.fid_mutex
);
131 spin_lock_init(&ret_buf
->stat_lock
);
132 atomic_set(&ret_buf
->num_local_opens
, 0);
133 atomic_set(&ret_buf
->num_remote_opens
, 0);
139 tconInfoFree(struct cifs_tcon
*buf_to_free
)
141 if (buf_to_free
== NULL
) {
142 cifs_dbg(FYI
, "Null buffer passed to tconInfoFree\n");
145 atomic_dec(&tconInfoAllocCount
);
146 kfree(buf_to_free
->nativeFileSystem
);
147 kzfree(buf_to_free
->password
);
148 kfree(buf_to_free
->crfid
.fid
);
149 #ifdef CONFIG_CIFS_DFS_UPCALL
150 kfree(buf_to_free
->dfs_path
);
158 struct smb_hdr
*ret_buf
= NULL
;
160 * SMB2 header is bigger than CIFS one - no problems to clean some
161 * more bytes for CIFS.
163 size_t buf_size
= sizeof(struct smb2_sync_hdr
);
166 * We could use negotiated size instead of max_msgsize -
167 * but it may be more efficient to always alloc same size
168 * albeit slightly larger than necessary and maxbuffersize
169 * defaults to this and can not be bigger.
171 ret_buf
= mempool_alloc(cifs_req_poolp
, GFP_NOFS
);
173 /* clear the first few header bytes */
174 /* for most paths, more is cleared in header_assemble */
175 memset(ret_buf
, 0, buf_size
+ 3);
176 atomic_inc(&bufAllocCount
);
177 #ifdef CONFIG_CIFS_STATS2
178 atomic_inc(&totBufAllocCount
);
179 #endif /* CONFIG_CIFS_STATS2 */
185 cifs_buf_release(void *buf_to_free
)
187 if (buf_to_free
== NULL
) {
188 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
191 mempool_free(buf_to_free
, cifs_req_poolp
);
193 atomic_dec(&bufAllocCount
);
198 cifs_small_buf_get(void)
200 struct smb_hdr
*ret_buf
= NULL
;
202 /* We could use negotiated size instead of max_msgsize -
203 but it may be more efficient to always alloc same size
204 albeit slightly larger than necessary and maxbuffersize
205 defaults to this and can not be bigger */
206 ret_buf
= mempool_alloc(cifs_sm_req_poolp
, GFP_NOFS
);
207 /* No need to clear memory here, cleared in header assemble */
208 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
209 atomic_inc(&smBufAllocCount
);
210 #ifdef CONFIG_CIFS_STATS2
211 atomic_inc(&totSmBufAllocCount
);
212 #endif /* CONFIG_CIFS_STATS2 */
218 cifs_small_buf_release(void *buf_to_free
)
221 if (buf_to_free
== NULL
) {
222 cifs_dbg(FYI
, "Null buffer passed to cifs_small_buf_release\n");
225 mempool_free(buf_to_free
, cifs_sm_req_poolp
);
227 atomic_dec(&smBufAllocCount
);
232 free_rsp_buf(int resp_buftype
, void *rsp
)
234 if (resp_buftype
== CIFS_SMALL_BUFFER
)
235 cifs_small_buf_release(rsp
);
236 else if (resp_buftype
== CIFS_LARGE_BUFFER
)
237 cifs_buf_release(rsp
);
240 /* NB: MID can not be set if treeCon not passed in, in that
241 case it is responsbility of caller to set the mid */
243 header_assemble(struct smb_hdr
*buffer
, char smb_command
/* command */ ,
244 const struct cifs_tcon
*treeCon
, int word_count
245 /* length of fixed section (word count) in two byte units */)
247 char *temp
= (char *) buffer
;
249 memset(temp
, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
251 buffer
->smb_buf_length
= cpu_to_be32(
252 (2 * word_count
) + sizeof(struct smb_hdr
) -
253 4 /* RFC 1001 length field does not count */ +
254 2 /* for bcc field itself */) ;
256 buffer
->Protocol
[0] = 0xFF;
257 buffer
->Protocol
[1] = 'S';
258 buffer
->Protocol
[2] = 'M';
259 buffer
->Protocol
[3] = 'B';
260 buffer
->Command
= smb_command
;
261 buffer
->Flags
= 0x00; /* case sensitive */
262 buffer
->Flags2
= SMBFLG2_KNOWS_LONG_NAMES
;
263 buffer
->Pid
= cpu_to_le16((__u16
)current
->tgid
);
264 buffer
->PidHigh
= cpu_to_le16((__u16
)(current
->tgid
>> 16));
266 buffer
->Tid
= treeCon
->tid
;
268 if (treeCon
->ses
->capabilities
& CAP_UNICODE
)
269 buffer
->Flags2
|= SMBFLG2_UNICODE
;
270 if (treeCon
->ses
->capabilities
& CAP_STATUS32
)
271 buffer
->Flags2
|= SMBFLG2_ERR_STATUS
;
273 /* Uid is not converted */
274 buffer
->Uid
= treeCon
->ses
->Suid
;
275 buffer
->Mid
= get_next_mid(treeCon
->ses
->server
);
277 if (treeCon
->Flags
& SMB_SHARE_IS_IN_DFS
)
278 buffer
->Flags2
|= SMBFLG2_DFS
;
280 buffer
->Flags
|= SMBFLG_CASELESS
;
281 if ((treeCon
->ses
) && (treeCon
->ses
->server
))
282 if (treeCon
->ses
->server
->sign
)
283 buffer
->Flags2
|= SMBFLG2_SECURITY_SIGNATURE
;
286 /* endian conversion of flags is now done just before sending */
287 buffer
->WordCount
= (char) word_count
;
292 check_smb_hdr(struct smb_hdr
*smb
)
294 /* does it have the right SMB "signature" ? */
295 if (*(__le32
*) smb
->Protocol
!= cpu_to_le32(0x424d53ff)) {
296 cifs_dbg(VFS
, "Bad protocol string signature header 0x%x\n",
297 *(unsigned int *)smb
->Protocol
);
301 /* if it's a response then accept */
302 if (smb
->Flags
& SMBFLG_RESPONSE
)
305 /* only one valid case where server sends us request */
306 if (smb
->Command
== SMB_COM_LOCKING_ANDX
)
309 cifs_dbg(VFS
, "Server sent request, not response. mid=%u\n",
315 checkSMB(char *buf
, unsigned int total_read
, struct TCP_Server_Info
*server
)
317 struct smb_hdr
*smb
= (struct smb_hdr
*)buf
;
318 __u32 rfclen
= be32_to_cpu(smb
->smb_buf_length
);
319 __u32 clc_len
; /* calculated length */
320 cifs_dbg(FYI
, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
323 /* is this frame too small to even get to a BCC? */
324 if (total_read
< 2 + sizeof(struct smb_hdr
)) {
325 if ((total_read
>= sizeof(struct smb_hdr
) - 1)
326 && (smb
->Status
.CifsError
!= 0)) {
327 /* it's an error return */
329 /* some error cases do not return wct and bcc */
331 } else if ((total_read
== sizeof(struct smb_hdr
) + 1) &&
332 (smb
->WordCount
== 0)) {
333 char *tmp
= (char *)smb
;
334 /* Need to work around a bug in two servers here */
335 /* First, check if the part of bcc they sent was zero */
336 if (tmp
[sizeof(struct smb_hdr
)] == 0) {
337 /* some servers return only half of bcc
338 * on simple responses (wct, bcc both zero)
339 * in particular have seen this on
340 * ulogoffX and FindClose. This leaves
341 * one byte of bcc potentially unitialized
343 /* zero rest of bcc */
344 tmp
[sizeof(struct smb_hdr
)+1] = 0;
347 cifs_dbg(VFS
, "rcvd invalid byte count (bcc)\n");
349 cifs_dbg(VFS
, "Length less than smb header size\n");
354 /* otherwise, there is enough to get to the BCC */
355 if (check_smb_hdr(smb
))
357 clc_len
= smbCalcSize(smb
, server
);
359 if (4 + rfclen
!= total_read
) {
360 cifs_dbg(VFS
, "Length read does not match RFC1001 length %d\n",
365 if (4 + rfclen
!= clc_len
) {
366 __u16 mid
= get_mid(smb
);
367 /* check if bcc wrapped around for large read responses */
368 if ((rfclen
> 64 * 1024) && (rfclen
> clc_len
)) {
369 /* check if lengths match mod 64K */
370 if (((4 + rfclen
) & 0xFFFF) == (clc_len
& 0xFFFF))
371 return 0; /* bcc wrapped */
373 cifs_dbg(FYI
, "Calculated size %u vs length %u mismatch for mid=%u\n",
374 clc_len
, 4 + rfclen
, mid
);
376 if (4 + rfclen
< clc_len
) {
377 cifs_dbg(VFS
, "RFC1001 size %u smaller than SMB for mid=%u\n",
380 } else if (rfclen
> clc_len
+ 512) {
382 * Some servers (Windows XP in particular) send more
383 * data than the lengths in the SMB packet would
384 * indicate on certain calls (byte range locks and
385 * trans2 find first calls in particular). While the
386 * client can handle such a frame by ignoring the
387 * trailing data, we choose limit the amount of extra
390 cifs_dbg(VFS
, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
399 is_valid_oplock_break(char *buffer
, struct TCP_Server_Info
*srv
)
401 struct smb_hdr
*buf
= (struct smb_hdr
*)buffer
;
402 struct smb_com_lock_req
*pSMB
= (struct smb_com_lock_req
*)buf
;
403 struct list_head
*tmp
, *tmp1
, *tmp2
;
404 struct cifs_ses
*ses
;
405 struct cifs_tcon
*tcon
;
406 struct cifsInodeInfo
*pCifsInode
;
407 struct cifsFileInfo
*netfile
;
409 cifs_dbg(FYI
, "Checking for oplock break or dnotify response\n");
410 if ((pSMB
->hdr
.Command
== SMB_COM_NT_TRANSACT
) &&
411 (pSMB
->hdr
.Flags
& SMBFLG_RESPONSE
)) {
412 struct smb_com_transaction_change_notify_rsp
*pSMBr
=
413 (struct smb_com_transaction_change_notify_rsp
*)buf
;
414 struct file_notify_information
*pnotify
;
415 __u32 data_offset
= 0;
416 size_t len
= srv
->total_read
- sizeof(pSMBr
->hdr
.smb_buf_length
);
418 if (get_bcc(buf
) > sizeof(struct file_notify_information
)) {
419 data_offset
= le32_to_cpu(pSMBr
->DataOffset
);
422 len
- sizeof(struct file_notify_information
)) {
423 cifs_dbg(FYI
, "invalid data_offset %u\n",
427 pnotify
= (struct file_notify_information
*)
428 ((char *)&pSMBr
->hdr
.Protocol
+ data_offset
);
429 cifs_dbg(FYI
, "dnotify on %s Action: 0x%x\n",
430 pnotify
->FileName
, pnotify
->Action
);
431 /* cifs_dump_mem("Rcvd notify Data: ",buf,
432 sizeof(struct smb_hdr)+60); */
435 if (pSMBr
->hdr
.Status
.CifsError
) {
436 cifs_dbg(FYI
, "notify err 0x%x\n",
437 pSMBr
->hdr
.Status
.CifsError
);
442 if (pSMB
->hdr
.Command
!= SMB_COM_LOCKING_ANDX
)
444 if (pSMB
->hdr
.Flags
& SMBFLG_RESPONSE
) {
445 /* no sense logging error on invalid handle on oplock
446 break - harmless race between close request and oplock
447 break response is expected from time to time writing out
448 large dirty files cached on the client */
449 if ((NT_STATUS_INVALID_HANDLE
) ==
450 le32_to_cpu(pSMB
->hdr
.Status
.CifsError
)) {
451 cifs_dbg(FYI
, "invalid handle on oplock break\n");
453 } else if (ERRbadfid
==
454 le16_to_cpu(pSMB
->hdr
.Status
.DosError
.Error
)) {
457 return false; /* on valid oplock brk we get "request" */
460 if (pSMB
->hdr
.WordCount
!= 8)
463 cifs_dbg(FYI
, "oplock type 0x%x level 0x%x\n",
464 pSMB
->LockType
, pSMB
->OplockLevel
);
465 if (!(pSMB
->LockType
& LOCKING_ANDX_OPLOCK_RELEASE
))
468 /* look up tcon based on tid & uid */
469 spin_lock(&cifs_tcp_ses_lock
);
470 list_for_each(tmp
, &srv
->smb_ses_list
) {
471 ses
= list_entry(tmp
, struct cifs_ses
, smb_ses_list
);
472 list_for_each(tmp1
, &ses
->tcon_list
) {
473 tcon
= list_entry(tmp1
, struct cifs_tcon
, tcon_list
);
474 if (tcon
->tid
!= buf
->Tid
)
477 cifs_stats_inc(&tcon
->stats
.cifs_stats
.num_oplock_brks
);
478 spin_lock(&tcon
->open_file_lock
);
479 list_for_each(tmp2
, &tcon
->openFileList
) {
480 netfile
= list_entry(tmp2
, struct cifsFileInfo
,
482 if (pSMB
->Fid
!= netfile
->fid
.netfid
)
485 cifs_dbg(FYI
, "file id match, oplock break\n");
486 pCifsInode
= CIFS_I(d_inode(netfile
->dentry
));
488 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK
,
491 netfile
->oplock_epoch
= 0;
492 netfile
->oplock_level
= pSMB
->OplockLevel
;
493 netfile
->oplock_break_cancelled
= false;
494 cifs_queue_oplock_break(netfile
);
496 spin_unlock(&tcon
->open_file_lock
);
497 spin_unlock(&cifs_tcp_ses_lock
);
500 spin_unlock(&tcon
->open_file_lock
);
501 spin_unlock(&cifs_tcp_ses_lock
);
502 cifs_dbg(FYI
, "No matching file for oplock break\n");
506 spin_unlock(&cifs_tcp_ses_lock
);
507 cifs_dbg(FYI
, "Can not process oplock break for non-existent connection\n");
512 dump_smb(void *buf
, int smb_buf_length
)
517 print_hex_dump(KERN_DEBUG
, "", DUMP_PREFIX_NONE
, 8, 2, buf
,
518 smb_buf_length
, true);
522 cifs_autodisable_serverino(struct cifs_sb_info
*cifs_sb
)
524 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_SERVER_INUM
) {
525 struct cifs_tcon
*tcon
= NULL
;
527 if (cifs_sb
->master_tlink
)
528 tcon
= cifs_sb_master_tcon(cifs_sb
);
530 cifs_sb
->mnt_cifs_flags
&= ~CIFS_MOUNT_SERVER_INUM
;
531 cifs_sb
->mnt_cifs_serverino_autodisabled
= true;
532 cifs_dbg(VFS
, "Autodisabling the use of server inode numbers on %s.\n",
533 tcon
? tcon
->treeName
: "new server");
534 cifs_dbg(VFS
, "The server doesn't seem to support them properly or the files might be on different servers (DFS).\n");
535 cifs_dbg(VFS
, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
540 void cifs_set_oplock_level(struct cifsInodeInfo
*cinode
, __u32 oplock
)
544 if (oplock
== OPLOCK_EXCLUSIVE
) {
545 cinode
->oplock
= CIFS_CACHE_WRITE_FLG
| CIFS_CACHE_READ_FLG
;
546 cifs_dbg(FYI
, "Exclusive Oplock granted on inode %p\n",
548 } else if (oplock
== OPLOCK_READ
) {
549 cinode
->oplock
= CIFS_CACHE_READ_FLG
;
550 cifs_dbg(FYI
, "Level II Oplock granted on inode %p\n",
557 * We wait for oplock breaks to be processed before we attempt to perform
560 int cifs_get_writer(struct cifsInodeInfo
*cinode
)
565 rc
= wait_on_bit(&cinode
->flags
, CIFS_INODE_PENDING_OPLOCK_BREAK
,
570 spin_lock(&cinode
->writers_lock
);
571 if (!cinode
->writers
)
572 set_bit(CIFS_INODE_PENDING_WRITERS
, &cinode
->flags
);
574 /* Check to see if we have started servicing an oplock break */
575 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK
, &cinode
->flags
)) {
577 if (cinode
->writers
== 0) {
578 clear_bit(CIFS_INODE_PENDING_WRITERS
, &cinode
->flags
);
579 wake_up_bit(&cinode
->flags
, CIFS_INODE_PENDING_WRITERS
);
581 spin_unlock(&cinode
->writers_lock
);
584 spin_unlock(&cinode
->writers_lock
);
588 void cifs_put_writer(struct cifsInodeInfo
*cinode
)
590 spin_lock(&cinode
->writers_lock
);
592 if (cinode
->writers
== 0) {
593 clear_bit(CIFS_INODE_PENDING_WRITERS
, &cinode
->flags
);
594 wake_up_bit(&cinode
->flags
, CIFS_INODE_PENDING_WRITERS
);
596 spin_unlock(&cinode
->writers_lock
);
600 * cifs_queue_oplock_break - queue the oplock break handler for cfile
602 * This function is called from the demultiplex thread when it
603 * receives an oplock break for @cfile.
605 * Assumes the tcon->open_file_lock is held.
606 * Assumes cfile->file_info_lock is NOT held.
608 void cifs_queue_oplock_break(struct cifsFileInfo
*cfile
)
611 * Bump the handle refcount now while we hold the
612 * open_file_lock to enforce the validity of it for the oplock
613 * break handler. The matching put is done at the end of the
616 cifsFileInfo_get(cfile
);
618 queue_work(cifsoplockd_wq
, &cfile
->oplock_break
);
621 void cifs_done_oplock_break(struct cifsInodeInfo
*cinode
)
623 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK
, &cinode
->flags
);
624 wake_up_bit(&cinode
->flags
, CIFS_INODE_PENDING_OPLOCK_BREAK
);
628 backup_cred(struct cifs_sb_info
*cifs_sb
)
630 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_CIFS_BACKUPUID
) {
631 if (uid_eq(cifs_sb
->mnt_backupuid
, current_fsuid()))
634 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_CIFS_BACKUPGID
) {
635 if (in_group_p(cifs_sb
->mnt_backupgid
))
643 cifs_del_pending_open(struct cifs_pending_open
*open
)
645 spin_lock(&tlink_tcon(open
->tlink
)->open_file_lock
);
646 list_del(&open
->olist
);
647 spin_unlock(&tlink_tcon(open
->tlink
)->open_file_lock
);
651 cifs_add_pending_open_locked(struct cifs_fid
*fid
, struct tcon_link
*tlink
,
652 struct cifs_pending_open
*open
)
654 memcpy(open
->lease_key
, fid
->lease_key
, SMB2_LEASE_KEY_SIZE
);
655 open
->oplock
= CIFS_OPLOCK_NO_CHANGE
;
657 fid
->pending_open
= open
;
658 list_add_tail(&open
->olist
, &tlink_tcon(tlink
)->pending_opens
);
662 cifs_add_pending_open(struct cifs_fid
*fid
, struct tcon_link
*tlink
,
663 struct cifs_pending_open
*open
)
665 spin_lock(&tlink_tcon(tlink
)->open_file_lock
);
666 cifs_add_pending_open_locked(fid
, tlink
, open
);
667 spin_unlock(&tlink_tcon(open
->tlink
)->open_file_lock
);
670 /* parses DFS refferal V3 structure
671 * caller is responsible for freeing target_nodes
674 * - on failure - errno
677 parse_dfs_referrals(struct get_dfs_referral_rsp
*rsp
, u32 rsp_size
,
678 unsigned int *num_of_nodes
,
679 struct dfs_info3_param
**target_nodes
,
680 const struct nls_table
*nls_codepage
, int remap
,
681 const char *searchName
, bool is_unicode
)
685 struct dfs_referral_level_3
*ref
;
687 *num_of_nodes
= le16_to_cpu(rsp
->NumberOfReferrals
);
689 if (*num_of_nodes
< 1) {
690 cifs_dbg(VFS
, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
693 goto parse_DFS_referrals_exit
;
696 ref
= (struct dfs_referral_level_3
*) &(rsp
->referrals
);
697 if (ref
->VersionNumber
!= cpu_to_le16(3)) {
698 cifs_dbg(VFS
, "Referrals of V%d version are not supported, should be V3\n",
699 le16_to_cpu(ref
->VersionNumber
));
701 goto parse_DFS_referrals_exit
;
704 /* get the upper boundary of the resp buffer */
705 data_end
= (char *)rsp
+ rsp_size
;
707 cifs_dbg(FYI
, "num_referrals: %d dfs flags: 0x%x ...\n",
708 *num_of_nodes
, le32_to_cpu(rsp
->DFSFlags
));
710 *target_nodes
= kcalloc(*num_of_nodes
, sizeof(struct dfs_info3_param
),
712 if (*target_nodes
== NULL
) {
714 goto parse_DFS_referrals_exit
;
717 /* collect necessary data from referrals */
718 for (i
= 0; i
< *num_of_nodes
; i
++) {
721 struct dfs_info3_param
*node
= (*target_nodes
)+i
;
723 node
->flags
= le32_to_cpu(rsp
->DFSFlags
);
725 __le16
*tmp
= kmalloc(strlen(searchName
)*2 + 2,
729 goto parse_DFS_referrals_exit
;
731 cifsConvertToUTF16((__le16
*) tmp
, searchName
,
732 PATH_MAX
, nls_codepage
, remap
);
733 node
->path_consumed
= cifs_utf16_bytes(tmp
,
734 le16_to_cpu(rsp
->PathConsumed
),
738 node
->path_consumed
= le16_to_cpu(rsp
->PathConsumed
);
740 node
->server_type
= le16_to_cpu(ref
->ServerType
);
741 node
->ref_flag
= le16_to_cpu(ref
->ReferralEntryFlags
);
744 temp
= (char *)ref
+ le16_to_cpu(ref
->DfsPathOffset
);
745 max_len
= data_end
- temp
;
746 node
->path_name
= cifs_strndup_from_utf16(temp
, max_len
,
747 is_unicode
, nls_codepage
);
748 if (!node
->path_name
) {
750 goto parse_DFS_referrals_exit
;
753 /* copy link target UNC */
754 temp
= (char *)ref
+ le16_to_cpu(ref
->NetworkAddressOffset
);
755 max_len
= data_end
- temp
;
756 node
->node_name
= cifs_strndup_from_utf16(temp
, max_len
,
757 is_unicode
, nls_codepage
);
758 if (!node
->node_name
) {
760 goto parse_DFS_referrals_exit
;
763 node
->ttl
= le32_to_cpu(ref
->TimeToLive
);
768 parse_DFS_referrals_exit
:
770 free_dfs_info_array(*target_nodes
, *num_of_nodes
);
771 *target_nodes
= NULL
;
777 struct cifs_aio_ctx
*
778 cifs_aio_ctx_alloc(void)
780 struct cifs_aio_ctx
*ctx
;
783 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
784 * to false so that we know when we have to unreference pages within
785 * cifs_aio_ctx_release()
787 ctx
= kzalloc(sizeof(struct cifs_aio_ctx
), GFP_KERNEL
);
791 INIT_LIST_HEAD(&ctx
->list
);
792 mutex_init(&ctx
->aio_mutex
);
793 init_completion(&ctx
->done
);
794 kref_init(&ctx
->refcount
);
799 cifs_aio_ctx_release(struct kref
*refcount
)
801 struct cifs_aio_ctx
*ctx
= container_of(refcount
,
802 struct cifs_aio_ctx
, refcount
);
804 cifsFileInfo_put(ctx
->cfile
);
807 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
808 * which means that iov_iter_get_pages() was a success and thus that
809 * we have taken reference on pages.
814 for (i
= 0; i
< ctx
->npages
; i
++) {
815 if (ctx
->should_dirty
)
816 set_page_dirty(ctx
->bv
[i
].bv_page
);
817 put_page(ctx
->bv
[i
].bv_page
);
825 #define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
828 setup_aio_ctx_iter(struct cifs_aio_ctx
*ctx
, struct iov_iter
*iter
, int rw
)
831 unsigned int cur_npages
;
832 unsigned int npages
= 0;
835 size_t count
= iov_iter_count(iter
);
836 unsigned int saved_len
;
838 unsigned int max_pages
= iov_iter_npages(iter
, INT_MAX
);
839 struct page
**pages
= NULL
;
840 struct bio_vec
*bv
= NULL
;
842 if (iov_iter_is_kvec(iter
)) {
843 memcpy(&ctx
->iter
, iter
, sizeof(struct iov_iter
));
845 iov_iter_advance(iter
, count
);
849 if (max_pages
* sizeof(struct bio_vec
) <= CIFS_AIO_KMALLOC_LIMIT
)
850 bv
= kmalloc_array(max_pages
, sizeof(struct bio_vec
),
854 bv
= vmalloc(array_size(max_pages
, sizeof(struct bio_vec
)));
859 if (max_pages
* sizeof(struct page
*) <= CIFS_AIO_KMALLOC_LIMIT
)
860 pages
= kmalloc_array(max_pages
, sizeof(struct page
*),
864 pages
= vmalloc(array_size(max_pages
, sizeof(struct page
*)));
873 while (count
&& npages
< max_pages
) {
874 rc
= iov_iter_get_pages(iter
, pages
, count
, max_pages
, &start
);
876 cifs_dbg(VFS
, "couldn't get user pages (rc=%zd)\n", rc
);
881 cifs_dbg(VFS
, "get pages rc=%zd more than %zu\n", rc
,
886 iov_iter_advance(iter
, rc
);
889 cur_npages
= DIV_ROUND_UP(rc
, PAGE_SIZE
);
891 if (npages
+ cur_npages
> max_pages
) {
892 cifs_dbg(VFS
, "out of vec array capacity (%u vs %u)\n",
893 npages
+ cur_npages
, max_pages
);
897 for (i
= 0; i
< cur_npages
; i
++) {
898 len
= rc
> PAGE_SIZE
? PAGE_SIZE
: rc
;
899 bv
[npages
+ i
].bv_page
= pages
[i
];
900 bv
[npages
+ i
].bv_offset
= start
;
901 bv
[npages
+ i
].bv_len
= len
- start
;
906 npages
+= cur_npages
;
911 ctx
->len
= saved_len
- count
;
912 ctx
->npages
= npages
;
913 iov_iter_bvec(&ctx
->iter
, rw
, ctx
->bv
, npages
, ctx
->len
);
918 * cifs_alloc_hash - allocate hash and hash context together
920 * The caller has to make sure @sdesc is initialized to either NULL or
921 * a valid context. Both can be freed via cifs_free_hash().
924 cifs_alloc_hash(const char *name
,
925 struct crypto_shash
**shash
, struct sdesc
**sdesc
)
933 *shash
= crypto_alloc_shash(name
, 0, 0);
934 if (IS_ERR(*shash
)) {
935 cifs_dbg(VFS
, "could not allocate crypto %s\n", name
);
936 rc
= PTR_ERR(*shash
);
942 size
= sizeof(struct shash_desc
) + crypto_shash_descsize(*shash
);
943 *sdesc
= kmalloc(size
, GFP_KERNEL
);
944 if (*sdesc
== NULL
) {
945 cifs_dbg(VFS
, "no memory left to allocate crypto %s\n", name
);
946 crypto_free_shash(*shash
);
951 (*sdesc
)->shash
.tfm
= *shash
;
956 * cifs_free_hash - free hash and hash context together
958 * Freeing a NULL hash or context is safe.
961 cifs_free_hash(struct crypto_shash
**shash
, struct sdesc
**sdesc
)
966 crypto_free_shash(*shash
);
971 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
972 * Input: rqst - a smb_rqst, page - a page index for rqst
973 * Output: *len - the length for this page, *offset - the offset for this page
975 void rqst_page_get_length(struct smb_rqst
*rqst
, unsigned int page
,
976 unsigned int *len
, unsigned int *offset
)
978 *len
= rqst
->rq_pagesz
;
979 *offset
= (page
== 0) ? rqst
->rq_offset
: 0;
981 if (rqst
->rq_npages
== 1 || page
== rqst
->rq_npages
-1)
982 *len
= rqst
->rq_tailsz
;
984 *len
= rqst
->rq_pagesz
- rqst
->rq_offset
;
987 void extract_unc_hostname(const char *unc
, const char **h
, size_t *len
)
991 /* skip initial slashes */
992 while (*unc
&& (*unc
== '\\' || *unc
== '/'))
997 while (*end
&& !(*end
== '\\' || *end
== '/'))
1005 * copy_path_name - copy src path to dst, possibly truncating
1007 * returns number of bytes written (including trailing nul)
1009 int copy_path_name(char *dst
, const char *src
)
1014 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1015 * will truncate and strlen(dst) will be PATH_MAX-1
1017 name_len
= strscpy(dst
, src
, PATH_MAX
);
1018 if (WARN_ON_ONCE(name_len
< 0))
1019 name_len
= PATH_MAX
-1;
1021 /* we count the trailing nul */