4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/slab.h>
23 #include <linux/ctype.h>
24 #include <linux/mempool.h>
25 #include <linux/vmalloc.h>
28 #include "cifsproto.h"
29 #include "cifs_debug.h"
32 #include "cifs_unicode.h"
36 extern mempool_t
*cifs_sm_req_poolp
;
37 extern mempool_t
*cifs_req_poolp
;
39 /* The xid serves as a useful identifier for each incoming vfs request,
40 in a similar way to the mid which is useful to track each sent smb,
41 and CurrentXid can also provide a running counter (although it
42 will eventually wrap past zero) of the total vfs operations handled
43 since the cifs fs was mounted */
50 spin_lock(&GlobalMid_Lock
);
51 GlobalTotalActiveXid
++;
53 /* keep high water mark for number of simultaneous ops in filesystem */
54 if (GlobalTotalActiveXid
> GlobalMaxActiveXid
)
55 GlobalMaxActiveXid
= GlobalTotalActiveXid
;
56 if (GlobalTotalActiveXid
> 65000)
57 cifs_dbg(FYI
, "warning: more than 65000 requests active\n");
58 xid
= GlobalCurrentXid
++;
59 spin_unlock(&GlobalMid_Lock
);
64 _free_xid(unsigned int xid
)
66 spin_lock(&GlobalMid_Lock
);
67 /* if (GlobalTotalActiveXid == 0)
69 GlobalTotalActiveXid
--;
70 spin_unlock(&GlobalMid_Lock
);
76 struct cifs_ses
*ret_buf
;
78 ret_buf
= kzalloc(sizeof(struct cifs_ses
), GFP_KERNEL
);
80 atomic_inc(&sesInfoAllocCount
);
81 ret_buf
->status
= CifsNew
;
83 INIT_LIST_HEAD(&ret_buf
->smb_ses_list
);
84 INIT_LIST_HEAD(&ret_buf
->tcon_list
);
85 mutex_init(&ret_buf
->session_mutex
);
86 spin_lock_init(&ret_buf
->iface_lock
);
92 sesInfoFree(struct cifs_ses
*buf_to_free
)
94 if (buf_to_free
== NULL
) {
95 cifs_dbg(FYI
, "Null buffer passed to sesInfoFree\n");
99 atomic_dec(&sesInfoAllocCount
);
100 kfree(buf_to_free
->serverOS
);
101 kfree(buf_to_free
->serverDomain
);
102 kfree(buf_to_free
->serverNOS
);
103 kzfree(buf_to_free
->password
);
104 kfree(buf_to_free
->user_name
);
105 kfree(buf_to_free
->domainName
);
106 kzfree(buf_to_free
->auth_key
.response
);
107 kfree(buf_to_free
->iface_list
);
114 struct cifs_tcon
*ret_buf
;
116 ret_buf
= kzalloc(sizeof(*ret_buf
), GFP_KERNEL
);
119 ret_buf
->crfid
.fid
= kzalloc(sizeof(*ret_buf
->crfid
.fid
), GFP_KERNEL
);
120 if (!ret_buf
->crfid
.fid
) {
125 atomic_inc(&tconInfoAllocCount
);
126 ret_buf
->tidStatus
= CifsNew
;
128 INIT_LIST_HEAD(&ret_buf
->openFileList
);
129 INIT_LIST_HEAD(&ret_buf
->tcon_list
);
130 spin_lock_init(&ret_buf
->open_file_lock
);
131 mutex_init(&ret_buf
->crfid
.fid_mutex
);
132 spin_lock_init(&ret_buf
->stat_lock
);
133 atomic_set(&ret_buf
->num_local_opens
, 0);
134 atomic_set(&ret_buf
->num_remote_opens
, 0);
140 tconInfoFree(struct cifs_tcon
*buf_to_free
)
142 if (buf_to_free
== NULL
) {
143 cifs_dbg(FYI
, "Null buffer passed to tconInfoFree\n");
146 atomic_dec(&tconInfoAllocCount
);
147 kfree(buf_to_free
->nativeFileSystem
);
148 kzfree(buf_to_free
->password
);
149 kfree(buf_to_free
->crfid
.fid
);
150 #ifdef CONFIG_CIFS_DFS_UPCALL
151 kfree(buf_to_free
->dfs_path
);
159 struct smb_hdr
*ret_buf
= NULL
;
161 * SMB2 header is bigger than CIFS one - no problems to clean some
162 * more bytes for CIFS.
164 size_t buf_size
= sizeof(struct smb2_sync_hdr
);
167 * We could use negotiated size instead of max_msgsize -
168 * but it may be more efficient to always alloc same size
169 * albeit slightly larger than necessary and maxbuffersize
170 * defaults to this and can not be bigger.
172 ret_buf
= mempool_alloc(cifs_req_poolp
, GFP_NOFS
);
174 /* clear the first few header bytes */
175 /* for most paths, more is cleared in header_assemble */
176 memset(ret_buf
, 0, buf_size
+ 3);
177 atomic_inc(&bufAllocCount
);
178 #ifdef CONFIG_CIFS_STATS2
179 atomic_inc(&totBufAllocCount
);
180 #endif /* CONFIG_CIFS_STATS2 */
186 cifs_buf_release(void *buf_to_free
)
188 if (buf_to_free
== NULL
) {
189 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
192 mempool_free(buf_to_free
, cifs_req_poolp
);
194 atomic_dec(&bufAllocCount
);
199 cifs_small_buf_get(void)
201 struct smb_hdr
*ret_buf
= NULL
;
203 /* We could use negotiated size instead of max_msgsize -
204 but it may be more efficient to always alloc same size
205 albeit slightly larger than necessary and maxbuffersize
206 defaults to this and can not be bigger */
207 ret_buf
= mempool_alloc(cifs_sm_req_poolp
, GFP_NOFS
);
208 /* No need to clear memory here, cleared in header assemble */
209 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
210 atomic_inc(&smBufAllocCount
);
211 #ifdef CONFIG_CIFS_STATS2
212 atomic_inc(&totSmBufAllocCount
);
213 #endif /* CONFIG_CIFS_STATS2 */
219 cifs_small_buf_release(void *buf_to_free
)
222 if (buf_to_free
== NULL
) {
223 cifs_dbg(FYI
, "Null buffer passed to cifs_small_buf_release\n");
226 mempool_free(buf_to_free
, cifs_sm_req_poolp
);
228 atomic_dec(&smBufAllocCount
);
233 free_rsp_buf(int resp_buftype
, void *rsp
)
235 if (resp_buftype
== CIFS_SMALL_BUFFER
)
236 cifs_small_buf_release(rsp
);
237 else if (resp_buftype
== CIFS_LARGE_BUFFER
)
238 cifs_buf_release(rsp
);
241 /* NB: MID can not be set if treeCon not passed in, in that
242 case it is responsbility of caller to set the mid */
244 header_assemble(struct smb_hdr
*buffer
, char smb_command
/* command */ ,
245 const struct cifs_tcon
*treeCon
, int word_count
246 /* length of fixed section (word count) in two byte units */)
248 char *temp
= (char *) buffer
;
250 memset(temp
, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
252 buffer
->smb_buf_length
= cpu_to_be32(
253 (2 * word_count
) + sizeof(struct smb_hdr
) -
254 4 /* RFC 1001 length field does not count */ +
255 2 /* for bcc field itself */) ;
257 buffer
->Protocol
[0] = 0xFF;
258 buffer
->Protocol
[1] = 'S';
259 buffer
->Protocol
[2] = 'M';
260 buffer
->Protocol
[3] = 'B';
261 buffer
->Command
= smb_command
;
262 buffer
->Flags
= 0x00; /* case sensitive */
263 buffer
->Flags2
= SMBFLG2_KNOWS_LONG_NAMES
;
264 buffer
->Pid
= cpu_to_le16((__u16
)current
->tgid
);
265 buffer
->PidHigh
= cpu_to_le16((__u16
)(current
->tgid
>> 16));
267 buffer
->Tid
= treeCon
->tid
;
269 if (treeCon
->ses
->capabilities
& CAP_UNICODE
)
270 buffer
->Flags2
|= SMBFLG2_UNICODE
;
271 if (treeCon
->ses
->capabilities
& CAP_STATUS32
)
272 buffer
->Flags2
|= SMBFLG2_ERR_STATUS
;
274 /* Uid is not converted */
275 buffer
->Uid
= treeCon
->ses
->Suid
;
276 buffer
->Mid
= get_next_mid(treeCon
->ses
->server
);
278 if (treeCon
->Flags
& SMB_SHARE_IS_IN_DFS
)
279 buffer
->Flags2
|= SMBFLG2_DFS
;
281 buffer
->Flags
|= SMBFLG_CASELESS
;
282 if ((treeCon
->ses
) && (treeCon
->ses
->server
))
283 if (treeCon
->ses
->server
->sign
)
284 buffer
->Flags2
|= SMBFLG2_SECURITY_SIGNATURE
;
287 /* endian conversion of flags is now done just before sending */
288 buffer
->WordCount
= (char) word_count
;
293 check_smb_hdr(struct smb_hdr
*smb
)
295 /* does it have the right SMB "signature" ? */
296 if (*(__le32
*) smb
->Protocol
!= cpu_to_le32(0x424d53ff)) {
297 cifs_dbg(VFS
, "Bad protocol string signature header 0x%x\n",
298 *(unsigned int *)smb
->Protocol
);
302 /* if it's a response then accept */
303 if (smb
->Flags
& SMBFLG_RESPONSE
)
306 /* only one valid case where server sends us request */
307 if (smb
->Command
== SMB_COM_LOCKING_ANDX
)
310 cifs_dbg(VFS
, "Server sent request, not response. mid=%u\n",
316 checkSMB(char *buf
, unsigned int total_read
, struct TCP_Server_Info
*server
)
318 struct smb_hdr
*smb
= (struct smb_hdr
*)buf
;
319 __u32 rfclen
= be32_to_cpu(smb
->smb_buf_length
);
320 __u32 clc_len
; /* calculated length */
321 cifs_dbg(FYI
, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
324 /* is this frame too small to even get to a BCC? */
325 if (total_read
< 2 + sizeof(struct smb_hdr
)) {
326 if ((total_read
>= sizeof(struct smb_hdr
) - 1)
327 && (smb
->Status
.CifsError
!= 0)) {
328 /* it's an error return */
330 /* some error cases do not return wct and bcc */
332 } else if ((total_read
== sizeof(struct smb_hdr
) + 1) &&
333 (smb
->WordCount
== 0)) {
334 char *tmp
= (char *)smb
;
335 /* Need to work around a bug in two servers here */
336 /* First, check if the part of bcc they sent was zero */
337 if (tmp
[sizeof(struct smb_hdr
)] == 0) {
338 /* some servers return only half of bcc
339 * on simple responses (wct, bcc both zero)
340 * in particular have seen this on
341 * ulogoffX and FindClose. This leaves
342 * one byte of bcc potentially unitialized
344 /* zero rest of bcc */
345 tmp
[sizeof(struct smb_hdr
)+1] = 0;
348 cifs_dbg(VFS
, "rcvd invalid byte count (bcc)\n");
350 cifs_dbg(VFS
, "Length less than smb header size\n");
355 /* otherwise, there is enough to get to the BCC */
356 if (check_smb_hdr(smb
))
358 clc_len
= smbCalcSize(smb
, server
);
360 if (4 + rfclen
!= total_read
) {
361 cifs_dbg(VFS
, "Length read does not match RFC1001 length %d\n",
366 if (4 + rfclen
!= clc_len
) {
367 __u16 mid
= get_mid(smb
);
368 /* check if bcc wrapped around for large read responses */
369 if ((rfclen
> 64 * 1024) && (rfclen
> clc_len
)) {
370 /* check if lengths match mod 64K */
371 if (((4 + rfclen
) & 0xFFFF) == (clc_len
& 0xFFFF))
372 return 0; /* bcc wrapped */
374 cifs_dbg(FYI
, "Calculated size %u vs length %u mismatch for mid=%u\n",
375 clc_len
, 4 + rfclen
, mid
);
377 if (4 + rfclen
< clc_len
) {
378 cifs_dbg(VFS
, "RFC1001 size %u smaller than SMB for mid=%u\n",
381 } else if (rfclen
> clc_len
+ 512) {
383 * Some servers (Windows XP in particular) send more
384 * data than the lengths in the SMB packet would
385 * indicate on certain calls (byte range locks and
386 * trans2 find first calls in particular). While the
387 * client can handle such a frame by ignoring the
388 * trailing data, we choose limit the amount of extra
391 cifs_dbg(VFS
, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
400 is_valid_oplock_break(char *buffer
, struct TCP_Server_Info
*srv
)
402 struct smb_hdr
*buf
= (struct smb_hdr
*)buffer
;
403 struct smb_com_lock_req
*pSMB
= (struct smb_com_lock_req
*)buf
;
404 struct list_head
*tmp
, *tmp1
, *tmp2
;
405 struct cifs_ses
*ses
;
406 struct cifs_tcon
*tcon
;
407 struct cifsInodeInfo
*pCifsInode
;
408 struct cifsFileInfo
*netfile
;
410 cifs_dbg(FYI
, "Checking for oplock break or dnotify response\n");
411 if ((pSMB
->hdr
.Command
== SMB_COM_NT_TRANSACT
) &&
412 (pSMB
->hdr
.Flags
& SMBFLG_RESPONSE
)) {
413 struct smb_com_transaction_change_notify_rsp
*pSMBr
=
414 (struct smb_com_transaction_change_notify_rsp
*)buf
;
415 struct file_notify_information
*pnotify
;
416 __u32 data_offset
= 0;
417 size_t len
= srv
->total_read
- sizeof(pSMBr
->hdr
.smb_buf_length
);
419 if (get_bcc(buf
) > sizeof(struct file_notify_information
)) {
420 data_offset
= le32_to_cpu(pSMBr
->DataOffset
);
423 len
- sizeof(struct file_notify_information
)) {
424 cifs_dbg(FYI
, "invalid data_offset %u\n",
428 pnotify
= (struct file_notify_information
*)
429 ((char *)&pSMBr
->hdr
.Protocol
+ data_offset
);
430 cifs_dbg(FYI
, "dnotify on %s Action: 0x%x\n",
431 pnotify
->FileName
, pnotify
->Action
);
432 /* cifs_dump_mem("Rcvd notify Data: ",buf,
433 sizeof(struct smb_hdr)+60); */
436 if (pSMBr
->hdr
.Status
.CifsError
) {
437 cifs_dbg(FYI
, "notify err 0x%x\n",
438 pSMBr
->hdr
.Status
.CifsError
);
443 if (pSMB
->hdr
.Command
!= SMB_COM_LOCKING_ANDX
)
445 if (pSMB
->hdr
.Flags
& SMBFLG_RESPONSE
) {
446 /* no sense logging error on invalid handle on oplock
447 break - harmless race between close request and oplock
448 break response is expected from time to time writing out
449 large dirty files cached on the client */
450 if ((NT_STATUS_INVALID_HANDLE
) ==
451 le32_to_cpu(pSMB
->hdr
.Status
.CifsError
)) {
452 cifs_dbg(FYI
, "invalid handle on oplock break\n");
454 } else if (ERRbadfid
==
455 le16_to_cpu(pSMB
->hdr
.Status
.DosError
.Error
)) {
458 return false; /* on valid oplock brk we get "request" */
461 if (pSMB
->hdr
.WordCount
!= 8)
464 cifs_dbg(FYI
, "oplock type 0x%x level 0x%x\n",
465 pSMB
->LockType
, pSMB
->OplockLevel
);
466 if (!(pSMB
->LockType
& LOCKING_ANDX_OPLOCK_RELEASE
))
469 /* look up tcon based on tid & uid */
470 spin_lock(&cifs_tcp_ses_lock
);
471 list_for_each(tmp
, &srv
->smb_ses_list
) {
472 ses
= list_entry(tmp
, struct cifs_ses
, smb_ses_list
);
473 list_for_each(tmp1
, &ses
->tcon_list
) {
474 tcon
= list_entry(tmp1
, struct cifs_tcon
, tcon_list
);
475 if (tcon
->tid
!= buf
->Tid
)
478 cifs_stats_inc(&tcon
->stats
.cifs_stats
.num_oplock_brks
);
479 spin_lock(&tcon
->open_file_lock
);
480 list_for_each(tmp2
, &tcon
->openFileList
) {
481 netfile
= list_entry(tmp2
, struct cifsFileInfo
,
483 if (pSMB
->Fid
!= netfile
->fid
.netfid
)
486 cifs_dbg(FYI
, "file id match, oplock break\n");
487 pCifsInode
= CIFS_I(d_inode(netfile
->dentry
));
489 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK
,
492 netfile
->oplock_epoch
= 0;
493 netfile
->oplock_level
= pSMB
->OplockLevel
;
494 netfile
->oplock_break_cancelled
= false;
495 cifs_queue_oplock_break(netfile
);
497 spin_unlock(&tcon
->open_file_lock
);
498 spin_unlock(&cifs_tcp_ses_lock
);
501 spin_unlock(&tcon
->open_file_lock
);
502 spin_unlock(&cifs_tcp_ses_lock
);
503 cifs_dbg(FYI
, "No matching file for oplock break\n");
507 spin_unlock(&cifs_tcp_ses_lock
);
508 cifs_dbg(FYI
, "Can not process oplock break for non-existent connection\n");
513 dump_smb(void *buf
, int smb_buf_length
)
518 print_hex_dump(KERN_DEBUG
, "", DUMP_PREFIX_NONE
, 8, 2, buf
,
519 smb_buf_length
, true);
523 cifs_autodisable_serverino(struct cifs_sb_info
*cifs_sb
)
525 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_SERVER_INUM
) {
526 struct cifs_tcon
*tcon
= NULL
;
528 if (cifs_sb
->master_tlink
)
529 tcon
= cifs_sb_master_tcon(cifs_sb
);
531 cifs_sb
->mnt_cifs_flags
&= ~CIFS_MOUNT_SERVER_INUM
;
532 cifs_sb
->mnt_cifs_serverino_autodisabled
= true;
533 cifs_dbg(VFS
, "Autodisabling the use of server inode numbers on %s.\n",
534 tcon
? tcon
->treeName
: "new server");
535 cifs_dbg(VFS
, "The server doesn't seem to support them properly or the files might be on different servers (DFS).\n");
536 cifs_dbg(VFS
, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
541 void cifs_set_oplock_level(struct cifsInodeInfo
*cinode
, __u32 oplock
)
545 if (oplock
== OPLOCK_EXCLUSIVE
) {
546 cinode
->oplock
= CIFS_CACHE_WRITE_FLG
| CIFS_CACHE_READ_FLG
;
547 cifs_dbg(FYI
, "Exclusive Oplock granted on inode %p\n",
549 } else if (oplock
== OPLOCK_READ
) {
550 cinode
->oplock
= CIFS_CACHE_READ_FLG
;
551 cifs_dbg(FYI
, "Level II Oplock granted on inode %p\n",
558 * We wait for oplock breaks to be processed before we attempt to perform
561 int cifs_get_writer(struct cifsInodeInfo
*cinode
)
566 rc
= wait_on_bit(&cinode
->flags
, CIFS_INODE_PENDING_OPLOCK_BREAK
,
571 spin_lock(&cinode
->writers_lock
);
572 if (!cinode
->writers
)
573 set_bit(CIFS_INODE_PENDING_WRITERS
, &cinode
->flags
);
575 /* Check to see if we have started servicing an oplock break */
576 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK
, &cinode
->flags
)) {
578 if (cinode
->writers
== 0) {
579 clear_bit(CIFS_INODE_PENDING_WRITERS
, &cinode
->flags
);
580 wake_up_bit(&cinode
->flags
, CIFS_INODE_PENDING_WRITERS
);
582 spin_unlock(&cinode
->writers_lock
);
585 spin_unlock(&cinode
->writers_lock
);
589 void cifs_put_writer(struct cifsInodeInfo
*cinode
)
591 spin_lock(&cinode
->writers_lock
);
593 if (cinode
->writers
== 0) {
594 clear_bit(CIFS_INODE_PENDING_WRITERS
, &cinode
->flags
);
595 wake_up_bit(&cinode
->flags
, CIFS_INODE_PENDING_WRITERS
);
597 spin_unlock(&cinode
->writers_lock
);
601 * cifs_queue_oplock_break - queue the oplock break handler for cfile
603 * This function is called from the demultiplex thread when it
604 * receives an oplock break for @cfile.
606 * Assumes the tcon->open_file_lock is held.
607 * Assumes cfile->file_info_lock is NOT held.
609 void cifs_queue_oplock_break(struct cifsFileInfo
*cfile
)
612 * Bump the handle refcount now while we hold the
613 * open_file_lock to enforce the validity of it for the oplock
614 * break handler. The matching put is done at the end of the
617 cifsFileInfo_get(cfile
);
619 queue_work(cifsoplockd_wq
, &cfile
->oplock_break
);
622 void cifs_done_oplock_break(struct cifsInodeInfo
*cinode
)
624 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK
, &cinode
->flags
);
625 wake_up_bit(&cinode
->flags
, CIFS_INODE_PENDING_OPLOCK_BREAK
);
629 backup_cred(struct cifs_sb_info
*cifs_sb
)
631 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_CIFS_BACKUPUID
) {
632 if (uid_eq(cifs_sb
->mnt_backupuid
, current_fsuid()))
635 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_CIFS_BACKUPGID
) {
636 if (in_group_p(cifs_sb
->mnt_backupgid
))
644 cifs_del_pending_open(struct cifs_pending_open
*open
)
646 spin_lock(&tlink_tcon(open
->tlink
)->open_file_lock
);
647 list_del(&open
->olist
);
648 spin_unlock(&tlink_tcon(open
->tlink
)->open_file_lock
);
652 cifs_add_pending_open_locked(struct cifs_fid
*fid
, struct tcon_link
*tlink
,
653 struct cifs_pending_open
*open
)
655 memcpy(open
->lease_key
, fid
->lease_key
, SMB2_LEASE_KEY_SIZE
);
656 open
->oplock
= CIFS_OPLOCK_NO_CHANGE
;
658 fid
->pending_open
= open
;
659 list_add_tail(&open
->olist
, &tlink_tcon(tlink
)->pending_opens
);
663 cifs_add_pending_open(struct cifs_fid
*fid
, struct tcon_link
*tlink
,
664 struct cifs_pending_open
*open
)
666 spin_lock(&tlink_tcon(tlink
)->open_file_lock
);
667 cifs_add_pending_open_locked(fid
, tlink
, open
);
668 spin_unlock(&tlink_tcon(open
->tlink
)->open_file_lock
);
671 /* parses DFS refferal V3 structure
672 * caller is responsible for freeing target_nodes
675 * - on failure - errno
678 parse_dfs_referrals(struct get_dfs_referral_rsp
*rsp
, u32 rsp_size
,
679 unsigned int *num_of_nodes
,
680 struct dfs_info3_param
**target_nodes
,
681 const struct nls_table
*nls_codepage
, int remap
,
682 const char *searchName
, bool is_unicode
)
686 struct dfs_referral_level_3
*ref
;
688 *num_of_nodes
= le16_to_cpu(rsp
->NumberOfReferrals
);
690 if (*num_of_nodes
< 1) {
691 cifs_dbg(VFS
, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
694 goto parse_DFS_referrals_exit
;
697 ref
= (struct dfs_referral_level_3
*) &(rsp
->referrals
);
698 if (ref
->VersionNumber
!= cpu_to_le16(3)) {
699 cifs_dbg(VFS
, "Referrals of V%d version are not supported, should be V3\n",
700 le16_to_cpu(ref
->VersionNumber
));
702 goto parse_DFS_referrals_exit
;
705 /* get the upper boundary of the resp buffer */
706 data_end
= (char *)rsp
+ rsp_size
;
708 cifs_dbg(FYI
, "num_referrals: %d dfs flags: 0x%x ...\n",
709 *num_of_nodes
, le32_to_cpu(rsp
->DFSFlags
));
711 *target_nodes
= kcalloc(*num_of_nodes
, sizeof(struct dfs_info3_param
),
713 if (*target_nodes
== NULL
) {
715 goto parse_DFS_referrals_exit
;
718 /* collect necessary data from referrals */
719 for (i
= 0; i
< *num_of_nodes
; i
++) {
722 struct dfs_info3_param
*node
= (*target_nodes
)+i
;
724 node
->flags
= le32_to_cpu(rsp
->DFSFlags
);
726 __le16
*tmp
= kmalloc(strlen(searchName
)*2 + 2,
730 goto parse_DFS_referrals_exit
;
732 cifsConvertToUTF16((__le16
*) tmp
, searchName
,
733 PATH_MAX
, nls_codepage
, remap
);
734 node
->path_consumed
= cifs_utf16_bytes(tmp
,
735 le16_to_cpu(rsp
->PathConsumed
),
739 node
->path_consumed
= le16_to_cpu(rsp
->PathConsumed
);
741 node
->server_type
= le16_to_cpu(ref
->ServerType
);
742 node
->ref_flag
= le16_to_cpu(ref
->ReferralEntryFlags
);
745 temp
= (char *)ref
+ le16_to_cpu(ref
->DfsPathOffset
);
746 max_len
= data_end
- temp
;
747 node
->path_name
= cifs_strndup_from_utf16(temp
, max_len
,
748 is_unicode
, nls_codepage
);
749 if (!node
->path_name
) {
751 goto parse_DFS_referrals_exit
;
754 /* copy link target UNC */
755 temp
= (char *)ref
+ le16_to_cpu(ref
->NetworkAddressOffset
);
756 max_len
= data_end
- temp
;
757 node
->node_name
= cifs_strndup_from_utf16(temp
, max_len
,
758 is_unicode
, nls_codepage
);
759 if (!node
->node_name
) {
761 goto parse_DFS_referrals_exit
;
764 node
->ttl
= le32_to_cpu(ref
->TimeToLive
);
769 parse_DFS_referrals_exit
:
771 free_dfs_info_array(*target_nodes
, *num_of_nodes
);
772 *target_nodes
= NULL
;
778 struct cifs_aio_ctx
*
779 cifs_aio_ctx_alloc(void)
781 struct cifs_aio_ctx
*ctx
;
784 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
785 * to false so that we know when we have to unreference pages within
786 * cifs_aio_ctx_release()
788 ctx
= kzalloc(sizeof(struct cifs_aio_ctx
), GFP_KERNEL
);
792 INIT_LIST_HEAD(&ctx
->list
);
793 mutex_init(&ctx
->aio_mutex
);
794 init_completion(&ctx
->done
);
795 kref_init(&ctx
->refcount
);
800 cifs_aio_ctx_release(struct kref
*refcount
)
802 struct cifs_aio_ctx
*ctx
= container_of(refcount
,
803 struct cifs_aio_ctx
, refcount
);
805 cifsFileInfo_put(ctx
->cfile
);
808 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
809 * which means that iov_iter_get_pages() was a success and thus that
810 * we have taken reference on pages.
815 for (i
= 0; i
< ctx
->npages
; i
++) {
816 if (ctx
->should_dirty
)
817 set_page_dirty(ctx
->bv
[i
].bv_page
);
818 put_page(ctx
->bv
[i
].bv_page
);
826 #define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
829 setup_aio_ctx_iter(struct cifs_aio_ctx
*ctx
, struct iov_iter
*iter
, int rw
)
832 unsigned int cur_npages
;
833 unsigned int npages
= 0;
836 size_t count
= iov_iter_count(iter
);
837 unsigned int saved_len
;
839 unsigned int max_pages
= iov_iter_npages(iter
, INT_MAX
);
840 struct page
**pages
= NULL
;
841 struct bio_vec
*bv
= NULL
;
843 if (iov_iter_is_kvec(iter
)) {
844 memcpy(&ctx
->iter
, iter
, sizeof(struct iov_iter
));
846 iov_iter_advance(iter
, count
);
850 if (max_pages
* sizeof(struct bio_vec
) <= CIFS_AIO_KMALLOC_LIMIT
)
851 bv
= kmalloc_array(max_pages
, sizeof(struct bio_vec
),
855 bv
= vmalloc(array_size(max_pages
, sizeof(struct bio_vec
)));
860 if (max_pages
* sizeof(struct page
*) <= CIFS_AIO_KMALLOC_LIMIT
)
861 pages
= kmalloc_array(max_pages
, sizeof(struct page
*),
865 pages
= vmalloc(array_size(max_pages
, sizeof(struct page
*)));
874 while (count
&& npages
< max_pages
) {
875 rc
= iov_iter_get_pages(iter
, pages
, count
, max_pages
, &start
);
877 cifs_dbg(VFS
, "couldn't get user pages (rc=%zd)\n", rc
);
882 cifs_dbg(VFS
, "get pages rc=%zd more than %zu\n", rc
,
887 iov_iter_advance(iter
, rc
);
890 cur_npages
= DIV_ROUND_UP(rc
, PAGE_SIZE
);
892 if (npages
+ cur_npages
> max_pages
) {
893 cifs_dbg(VFS
, "out of vec array capacity (%u vs %u)\n",
894 npages
+ cur_npages
, max_pages
);
898 for (i
= 0; i
< cur_npages
; i
++) {
899 len
= rc
> PAGE_SIZE
? PAGE_SIZE
: rc
;
900 bv
[npages
+ i
].bv_page
= pages
[i
];
901 bv
[npages
+ i
].bv_offset
= start
;
902 bv
[npages
+ i
].bv_len
= len
- start
;
907 npages
+= cur_npages
;
912 ctx
->len
= saved_len
- count
;
913 ctx
->npages
= npages
;
914 iov_iter_bvec(&ctx
->iter
, rw
, ctx
->bv
, npages
, ctx
->len
);
919 * cifs_alloc_hash - allocate hash and hash context together
921 * The caller has to make sure @sdesc is initialized to either NULL or
922 * a valid context. Both can be freed via cifs_free_hash().
925 cifs_alloc_hash(const char *name
,
926 struct crypto_shash
**shash
, struct sdesc
**sdesc
)
934 *shash
= crypto_alloc_shash(name
, 0, 0);
935 if (IS_ERR(*shash
)) {
936 cifs_dbg(VFS
, "could not allocate crypto %s\n", name
);
937 rc
= PTR_ERR(*shash
);
943 size
= sizeof(struct shash_desc
) + crypto_shash_descsize(*shash
);
944 *sdesc
= kmalloc(size
, GFP_KERNEL
);
945 if (*sdesc
== NULL
) {
946 cifs_dbg(VFS
, "no memory left to allocate crypto %s\n", name
);
947 crypto_free_shash(*shash
);
952 (*sdesc
)->shash
.tfm
= *shash
;
957 * cifs_free_hash - free hash and hash context together
959 * Freeing a NULL hash or context is safe.
962 cifs_free_hash(struct crypto_shash
**shash
, struct sdesc
**sdesc
)
967 crypto_free_shash(*shash
);
972 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
973 * Input: rqst - a smb_rqst, page - a page index for rqst
974 * Output: *len - the length for this page, *offset - the offset for this page
976 void rqst_page_get_length(struct smb_rqst
*rqst
, unsigned int page
,
977 unsigned int *len
, unsigned int *offset
)
979 *len
= rqst
->rq_pagesz
;
980 *offset
= (page
== 0) ? rqst
->rq_offset
: 0;
982 if (rqst
->rq_npages
== 1 || page
== rqst
->rq_npages
-1)
983 *len
= rqst
->rq_tailsz
;
985 *len
= rqst
->rq_pagesz
- rqst
->rq_offset
;
988 void extract_unc_hostname(const char *unc
, const char **h
, size_t *len
)
992 /* skip initial slashes */
993 while (*unc
&& (*unc
== '\\' || *unc
== '/'))
998 while (*end
&& !(*end
== '\\' || *end
== '/'))
1006 * copy_path_name - copy src path to dst, possibly truncating
1008 * returns number of bytes written (including trailing nul)
1010 int copy_path_name(char *dst
, const char *src
)
1015 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1016 * will truncate and strlen(dst) will be PATH_MAX-1
1018 name_len
= strscpy(dst
, src
, PATH_MAX
);
1019 if (WARN_ON_ONCE(name_len
< 0))
1020 name_len
= PATH_MAX
-1;
1022 /* we count the trailing nul */
1027 struct super_cb_data
{
1029 struct super_block
*sb
;
1032 static void tcp_super_cb(struct super_block
*sb
, void *arg
)
1034 struct super_cb_data
*sd
= arg
;
1035 struct TCP_Server_Info
*server
= sd
->data
;
1036 struct cifs_sb_info
*cifs_sb
;
1037 struct cifs_tcon
*tcon
;
1042 cifs_sb
= CIFS_SB(sb
);
1043 tcon
= cifs_sb_master_tcon(cifs_sb
);
1044 if (tcon
->ses
->server
== server
)
1048 static struct super_block
*__cifs_get_super(void (*f
)(struct super_block
*, void *),
1051 struct super_cb_data sd
= {
1056 iterate_supers_type(&cifs_fs_type
, f
, &sd
);
1059 return ERR_PTR(-EINVAL
);
1061 * Grab an active reference in order to prevent automounts (DFS links)
1062 * of expiring and then freeing up our cifs superblock pointer while
1063 * we're doing failover.
1065 cifs_sb_active(sd
.sb
);
1069 static void __cifs_put_super(struct super_block
*sb
)
1071 if (!IS_ERR_OR_NULL(sb
))
1072 cifs_sb_deactive(sb
);
1075 struct super_block
*cifs_get_tcp_super(struct TCP_Server_Info
*server
)
1077 return __cifs_get_super(tcp_super_cb
, server
);
1080 void cifs_put_tcp_super(struct super_block
*sb
)
1082 __cifs_put_super(sb
);
1085 #ifdef CONFIG_CIFS_DFS_UPCALL
1086 static void tcon_super_cb(struct super_block
*sb
, void *arg
)
1088 struct super_cb_data
*sd
= arg
;
1089 struct cifs_tcon
*tcon
= sd
->data
;
1090 struct cifs_sb_info
*cifs_sb
;
1095 cifs_sb
= CIFS_SB(sb
);
1096 if (tcon
->dfs_path
&& cifs_sb
->origin_fullpath
&&
1097 !strcasecmp(tcon
->dfs_path
, cifs_sb
->origin_fullpath
))
1101 static inline struct super_block
*cifs_get_tcon_super(struct cifs_tcon
*tcon
)
1103 return __cifs_get_super(tcon_super_cb
, tcon
);
1106 static inline void cifs_put_tcon_super(struct super_block
*sb
)
1108 __cifs_put_super(sb
);
1111 static inline struct super_block
*cifs_get_tcon_super(struct cifs_tcon
*tcon
)
1113 return ERR_PTR(-EOPNOTSUPP
);
1116 static inline void cifs_put_tcon_super(struct super_block
*sb
)
1121 int update_super_prepath(struct cifs_tcon
*tcon
, const char *prefix
,
1124 struct super_block
*sb
;
1125 struct cifs_sb_info
*cifs_sb
;
1128 sb
= cifs_get_tcon_super(tcon
);
1132 cifs_sb
= CIFS_SB(sb
);
1134 kfree(cifs_sb
->prepath
);
1136 if (*prefix
&& prefix_len
) {
1137 cifs_sb
->prepath
= kstrndup(prefix
, prefix_len
, GFP_ATOMIC
);
1138 if (!cifs_sb
->prepath
) {
1143 convert_delimiter(cifs_sb
->prepath
, CIFS_DIR_SEP(cifs_sb
));
1145 cifs_sb
->prepath
= NULL
;
1147 cifs_sb
->mnt_cifs_flags
|= CIFS_MOUNT_USE_PREFIX_PATH
;
1150 cifs_put_tcon_super(sb
);