1 // SPDX-License-Identifier: LGPL-2.1
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
9 #include <linux/slab.h>
10 #include <linux/ctype.h>
11 #include <linux/mempool.h>
12 #include <linux/vmalloc.h>
15 #include "cifsproto.h"
16 #include "cifs_debug.h"
19 #include "cifs_unicode.h"
22 #ifdef CONFIG_CIFS_DFS_UPCALL
23 #include "dns_resolve.h"
24 #include "dfs_cache.h"
27 #include "fs_context.h"
28 #include "cached_dir.h"
30 /* The xid serves as a useful identifier for each incoming vfs request,
31 in a similar way to the mid which is useful to track each sent smb,
32 and CurrentXid can also provide a running counter (although it
33 will eventually wrap past zero) of the total vfs operations handled
34 since the cifs fs was mounted */
41 spin_lock(&GlobalMid_Lock
);
42 GlobalTotalActiveXid
++;
44 /* keep high water mark for number of simultaneous ops in filesystem */
45 if (GlobalTotalActiveXid
> GlobalMaxActiveXid
)
46 GlobalMaxActiveXid
= GlobalTotalActiveXid
;
47 if (GlobalTotalActiveXid
> 65000)
48 cifs_dbg(FYI
, "warning: more than 65000 requests active\n");
49 xid
= GlobalCurrentXid
++;
50 spin_unlock(&GlobalMid_Lock
);
55 _free_xid(unsigned int xid
)
57 spin_lock(&GlobalMid_Lock
);
58 /* if (GlobalTotalActiveXid == 0)
60 GlobalTotalActiveXid
--;
61 spin_unlock(&GlobalMid_Lock
);
67 struct cifs_ses
*ret_buf
;
69 ret_buf
= kzalloc(sizeof(struct cifs_ses
), GFP_KERNEL
);
71 atomic_inc(&sesInfoAllocCount
);
72 spin_lock_init(&ret_buf
->ses_lock
);
73 ret_buf
->ses_status
= SES_NEW
;
75 INIT_LIST_HEAD(&ret_buf
->smb_ses_list
);
76 INIT_LIST_HEAD(&ret_buf
->tcon_list
);
77 mutex_init(&ret_buf
->session_mutex
);
78 spin_lock_init(&ret_buf
->iface_lock
);
79 INIT_LIST_HEAD(&ret_buf
->iface_list
);
80 spin_lock_init(&ret_buf
->chan_lock
);
86 sesInfoFree(struct cifs_ses
*buf_to_free
)
88 struct cifs_server_iface
*iface
= NULL
, *niface
= NULL
;
90 if (buf_to_free
== NULL
) {
91 cifs_dbg(FYI
, "Null buffer passed to sesInfoFree\n");
95 unload_nls(buf_to_free
->local_nls
);
96 atomic_dec(&sesInfoAllocCount
);
97 kfree(buf_to_free
->serverOS
);
98 kfree(buf_to_free
->serverDomain
);
99 kfree(buf_to_free
->serverNOS
);
100 kfree_sensitive(buf_to_free
->password
);
101 kfree_sensitive(buf_to_free
->password2
);
102 kfree(buf_to_free
->user_name
);
103 kfree(buf_to_free
->domainName
);
104 kfree_sensitive(buf_to_free
->auth_key
.response
);
105 spin_lock(&buf_to_free
->iface_lock
);
106 list_for_each_entry_safe(iface
, niface
, &buf_to_free
->iface_list
,
108 kref_put(&iface
->refcount
, release_iface
);
109 spin_unlock(&buf_to_free
->iface_lock
);
110 kfree_sensitive(buf_to_free
);
114 tcon_info_alloc(bool dir_leases_enabled
, enum smb3_tcon_ref_trace trace
)
116 struct cifs_tcon
*ret_buf
;
117 static atomic_t tcon_debug_id
;
119 ret_buf
= kzalloc(sizeof(*ret_buf
), GFP_KERNEL
);
123 if (dir_leases_enabled
== true) {
124 ret_buf
->cfids
= init_cached_dirs();
125 if (!ret_buf
->cfids
) {
130 /* else ret_buf->cfids is already set to NULL above */
132 atomic_inc(&tconInfoAllocCount
);
133 ret_buf
->status
= TID_NEW
;
134 ret_buf
->debug_id
= atomic_inc_return(&tcon_debug_id
);
135 ret_buf
->tc_count
= 1;
136 spin_lock_init(&ret_buf
->tc_lock
);
137 INIT_LIST_HEAD(&ret_buf
->openFileList
);
138 INIT_LIST_HEAD(&ret_buf
->tcon_list
);
139 spin_lock_init(&ret_buf
->open_file_lock
);
140 spin_lock_init(&ret_buf
->stat_lock
);
141 atomic_set(&ret_buf
->num_local_opens
, 0);
142 atomic_set(&ret_buf
->num_remote_opens
, 0);
143 ret_buf
->stats_from_time
= ktime_get_real_seconds();
144 #ifdef CONFIG_CIFS_FSCACHE
145 mutex_init(&ret_buf
->fscache_lock
);
147 trace_smb3_tcon_ref(ret_buf
->debug_id
, ret_buf
->tc_count
, trace
);
148 #ifdef CONFIG_CIFS_DFS_UPCALL
149 INIT_LIST_HEAD(&ret_buf
->dfs_ses_list
);
156 tconInfoFree(struct cifs_tcon
*tcon
, enum smb3_tcon_ref_trace trace
)
159 cifs_dbg(FYI
, "Null buffer passed to tconInfoFree\n");
162 trace_smb3_tcon_ref(tcon
->debug_id
, tcon
->tc_count
, trace
);
163 free_cached_dirs(tcon
->cfids
);
164 atomic_dec(&tconInfoAllocCount
);
165 kfree(tcon
->nativeFileSystem
);
166 kfree_sensitive(tcon
->password
);
167 kfree(tcon
->origin_fullpath
);
174 struct smb_hdr
*ret_buf
= NULL
;
176 * SMB2 header is bigger than CIFS one - no problems to clean some
177 * more bytes for CIFS.
179 size_t buf_size
= sizeof(struct smb2_hdr
);
182 * We could use negotiated size instead of max_msgsize -
183 * but it may be more efficient to always alloc same size
184 * albeit slightly larger than necessary and maxbuffersize
185 * defaults to this and can not be bigger.
187 ret_buf
= mempool_alloc(cifs_req_poolp
, GFP_NOFS
);
189 /* clear the first few header bytes */
190 /* for most paths, more is cleared in header_assemble */
191 memset(ret_buf
, 0, buf_size
+ 3);
192 atomic_inc(&buf_alloc_count
);
193 #ifdef CONFIG_CIFS_STATS2
194 atomic_inc(&total_buf_alloc_count
);
195 #endif /* CONFIG_CIFS_STATS2 */
201 cifs_buf_release(void *buf_to_free
)
203 if (buf_to_free
== NULL
) {
204 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
207 mempool_free(buf_to_free
, cifs_req_poolp
);
209 atomic_dec(&buf_alloc_count
);
214 cifs_small_buf_get(void)
216 struct smb_hdr
*ret_buf
= NULL
;
218 /* We could use negotiated size instead of max_msgsize -
219 but it may be more efficient to always alloc same size
220 albeit slightly larger than necessary and maxbuffersize
221 defaults to this and can not be bigger */
222 ret_buf
= mempool_alloc(cifs_sm_req_poolp
, GFP_NOFS
);
223 /* No need to clear memory here, cleared in header assemble */
224 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
225 atomic_inc(&small_buf_alloc_count
);
226 #ifdef CONFIG_CIFS_STATS2
227 atomic_inc(&total_small_buf_alloc_count
);
228 #endif /* CONFIG_CIFS_STATS2 */
234 cifs_small_buf_release(void *buf_to_free
)
237 if (buf_to_free
== NULL
) {
238 cifs_dbg(FYI
, "Null buffer passed to cifs_small_buf_release\n");
241 mempool_free(buf_to_free
, cifs_sm_req_poolp
);
243 atomic_dec(&small_buf_alloc_count
);
248 free_rsp_buf(int resp_buftype
, void *rsp
)
250 if (resp_buftype
== CIFS_SMALL_BUFFER
)
251 cifs_small_buf_release(rsp
);
252 else if (resp_buftype
== CIFS_LARGE_BUFFER
)
253 cifs_buf_release(rsp
);
256 /* NB: MID can not be set if treeCon not passed in, in that
257 case it is responsibility of caller to set the mid */
259 header_assemble(struct smb_hdr
*buffer
, char smb_command
/* command */ ,
260 const struct cifs_tcon
*treeCon
, int word_count
261 /* length of fixed section (word count) in two byte units */)
263 char *temp
= (char *) buffer
;
265 memset(temp
, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
267 buffer
->smb_buf_length
= cpu_to_be32(
268 (2 * word_count
) + sizeof(struct smb_hdr
) -
269 4 /* RFC 1001 length field does not count */ +
270 2 /* for bcc field itself */) ;
272 buffer
->Protocol
[0] = 0xFF;
273 buffer
->Protocol
[1] = 'S';
274 buffer
->Protocol
[2] = 'M';
275 buffer
->Protocol
[3] = 'B';
276 buffer
->Command
= smb_command
;
277 buffer
->Flags
= 0x00; /* case sensitive */
278 buffer
->Flags2
= SMBFLG2_KNOWS_LONG_NAMES
;
279 buffer
->Pid
= cpu_to_le16((__u16
)current
->tgid
);
280 buffer
->PidHigh
= cpu_to_le16((__u16
)(current
->tgid
>> 16));
282 buffer
->Tid
= treeCon
->tid
;
284 if (treeCon
->ses
->capabilities
& CAP_UNICODE
)
285 buffer
->Flags2
|= SMBFLG2_UNICODE
;
286 if (treeCon
->ses
->capabilities
& CAP_STATUS32
)
287 buffer
->Flags2
|= SMBFLG2_ERR_STATUS
;
289 /* Uid is not converted */
290 buffer
->Uid
= treeCon
->ses
->Suid
;
291 if (treeCon
->ses
->server
)
292 buffer
->Mid
= get_next_mid(treeCon
->ses
->server
);
294 if (treeCon
->Flags
& SMB_SHARE_IS_IN_DFS
)
295 buffer
->Flags2
|= SMBFLG2_DFS
;
297 buffer
->Flags
|= SMBFLG_CASELESS
;
298 if ((treeCon
->ses
) && (treeCon
->ses
->server
))
299 if (treeCon
->ses
->server
->sign
)
300 buffer
->Flags2
|= SMBFLG2_SECURITY_SIGNATURE
;
303 /* endian conversion of flags is now done just before sending */
304 buffer
->WordCount
= (char) word_count
;
309 check_smb_hdr(struct smb_hdr
*smb
)
311 /* does it have the right SMB "signature" ? */
312 if (*(__le32
*) smb
->Protocol
!= cpu_to_le32(0x424d53ff)) {
313 cifs_dbg(VFS
, "Bad protocol string signature header 0x%x\n",
314 *(unsigned int *)smb
->Protocol
);
318 /* if it's a response then accept */
319 if (smb
->Flags
& SMBFLG_RESPONSE
)
322 /* only one valid case where server sends us request */
323 if (smb
->Command
== SMB_COM_LOCKING_ANDX
)
326 cifs_dbg(VFS
, "Server sent request, not response. mid=%u\n",
332 checkSMB(char *buf
, unsigned int total_read
, struct TCP_Server_Info
*server
)
334 struct smb_hdr
*smb
= (struct smb_hdr
*)buf
;
335 __u32 rfclen
= be32_to_cpu(smb
->smb_buf_length
);
336 __u32 clc_len
; /* calculated length */
337 cifs_dbg(FYI
, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
340 /* is this frame too small to even get to a BCC? */
341 if (total_read
< 2 + sizeof(struct smb_hdr
)) {
342 if ((total_read
>= sizeof(struct smb_hdr
) - 1)
343 && (smb
->Status
.CifsError
!= 0)) {
344 /* it's an error return */
346 /* some error cases do not return wct and bcc */
348 } else if ((total_read
== sizeof(struct smb_hdr
) + 1) &&
349 (smb
->WordCount
== 0)) {
350 char *tmp
= (char *)smb
;
351 /* Need to work around a bug in two servers here */
352 /* First, check if the part of bcc they sent was zero */
353 if (tmp
[sizeof(struct smb_hdr
)] == 0) {
354 /* some servers return only half of bcc
355 * on simple responses (wct, bcc both zero)
356 * in particular have seen this on
357 * ulogoffX and FindClose. This leaves
358 * one byte of bcc potentially uninitialized
360 /* zero rest of bcc */
361 tmp
[sizeof(struct smb_hdr
)+1] = 0;
364 cifs_dbg(VFS
, "rcvd invalid byte count (bcc)\n");
366 cifs_dbg(VFS
, "Length less than smb header size\n");
369 } else if (total_read
< sizeof(*smb
) + 2 * smb
->WordCount
) {
370 cifs_dbg(VFS
, "%s: can't read BCC due to invalid WordCount(%u)\n",
371 __func__
, smb
->WordCount
);
375 /* otherwise, there is enough to get to the BCC */
376 if (check_smb_hdr(smb
))
378 clc_len
= smbCalcSize(smb
);
380 if (4 + rfclen
!= total_read
) {
381 cifs_dbg(VFS
, "Length read does not match RFC1001 length %d\n",
386 if (4 + rfclen
!= clc_len
) {
387 __u16 mid
= get_mid(smb
);
388 /* check if bcc wrapped around for large read responses */
389 if ((rfclen
> 64 * 1024) && (rfclen
> clc_len
)) {
390 /* check if lengths match mod 64K */
391 if (((4 + rfclen
) & 0xFFFF) == (clc_len
& 0xFFFF))
392 return 0; /* bcc wrapped */
394 cifs_dbg(FYI
, "Calculated size %u vs length %u mismatch for mid=%u\n",
395 clc_len
, 4 + rfclen
, mid
);
397 if (4 + rfclen
< clc_len
) {
398 cifs_dbg(VFS
, "RFC1001 size %u smaller than SMB for mid=%u\n",
401 } else if (rfclen
> clc_len
+ 512) {
403 * Some servers (Windows XP in particular) send more
404 * data than the lengths in the SMB packet would
405 * indicate on certain calls (byte range locks and
406 * trans2 find first calls in particular). While the
407 * client can handle such a frame by ignoring the
408 * trailing data, we choose limit the amount of extra
411 cifs_dbg(VFS
, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
420 is_valid_oplock_break(char *buffer
, struct TCP_Server_Info
*srv
)
422 struct smb_hdr
*buf
= (struct smb_hdr
*)buffer
;
423 struct smb_com_lock_req
*pSMB
= (struct smb_com_lock_req
*)buf
;
424 struct TCP_Server_Info
*pserver
;
425 struct cifs_ses
*ses
;
426 struct cifs_tcon
*tcon
;
427 struct cifsInodeInfo
*pCifsInode
;
428 struct cifsFileInfo
*netfile
;
430 cifs_dbg(FYI
, "Checking for oplock break or dnotify response\n");
431 if ((pSMB
->hdr
.Command
== SMB_COM_NT_TRANSACT
) &&
432 (pSMB
->hdr
.Flags
& SMBFLG_RESPONSE
)) {
433 struct smb_com_transaction_change_notify_rsp
*pSMBr
=
434 (struct smb_com_transaction_change_notify_rsp
*)buf
;
435 struct file_notify_information
*pnotify
;
436 __u32 data_offset
= 0;
437 size_t len
= srv
->total_read
- sizeof(pSMBr
->hdr
.smb_buf_length
);
439 if (get_bcc(buf
) > sizeof(struct file_notify_information
)) {
440 data_offset
= le32_to_cpu(pSMBr
->DataOffset
);
443 len
- sizeof(struct file_notify_information
)) {
444 cifs_dbg(FYI
, "Invalid data_offset %u\n",
448 pnotify
= (struct file_notify_information
*)
449 ((char *)&pSMBr
->hdr
.Protocol
+ data_offset
);
450 cifs_dbg(FYI
, "dnotify on %s Action: 0x%x\n",
451 pnotify
->FileName
, pnotify
->Action
);
452 /* cifs_dump_mem("Rcvd notify Data: ",buf,
453 sizeof(struct smb_hdr)+60); */
456 if (pSMBr
->hdr
.Status
.CifsError
) {
457 cifs_dbg(FYI
, "notify err 0x%x\n",
458 pSMBr
->hdr
.Status
.CifsError
);
463 if (pSMB
->hdr
.Command
!= SMB_COM_LOCKING_ANDX
)
465 if (pSMB
->hdr
.Flags
& SMBFLG_RESPONSE
) {
466 /* no sense logging error on invalid handle on oplock
467 break - harmless race between close request and oplock
468 break response is expected from time to time writing out
469 large dirty files cached on the client */
470 if ((NT_STATUS_INVALID_HANDLE
) ==
471 le32_to_cpu(pSMB
->hdr
.Status
.CifsError
)) {
472 cifs_dbg(FYI
, "Invalid handle on oplock break\n");
474 } else if (ERRbadfid
==
475 le16_to_cpu(pSMB
->hdr
.Status
.DosError
.Error
)) {
478 return false; /* on valid oplock brk we get "request" */
481 if (pSMB
->hdr
.WordCount
!= 8)
484 cifs_dbg(FYI
, "oplock type 0x%x level 0x%x\n",
485 pSMB
->LockType
, pSMB
->OplockLevel
);
486 if (!(pSMB
->LockType
& LOCKING_ANDX_OPLOCK_RELEASE
))
489 /* If server is a channel, select the primary channel */
490 pserver
= SERVER_IS_CHAN(srv
) ? srv
->primary_server
: srv
;
492 /* look up tcon based on tid & uid */
493 spin_lock(&cifs_tcp_ses_lock
);
494 list_for_each_entry(ses
, &pserver
->smb_ses_list
, smb_ses_list
) {
495 if (cifs_ses_exiting(ses
))
497 list_for_each_entry(tcon
, &ses
->tcon_list
, tcon_list
) {
498 if (tcon
->tid
!= buf
->Tid
)
501 cifs_stats_inc(&tcon
->stats
.cifs_stats
.num_oplock_brks
);
502 spin_lock(&tcon
->open_file_lock
);
503 list_for_each_entry(netfile
, &tcon
->openFileList
, tlist
) {
504 if (pSMB
->Fid
!= netfile
->fid
.netfid
)
507 cifs_dbg(FYI
, "file id match, oplock break\n");
508 pCifsInode
= CIFS_I(d_inode(netfile
->dentry
));
510 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK
,
513 netfile
->oplock_epoch
= 0;
514 netfile
->oplock_level
= pSMB
->OplockLevel
;
515 netfile
->oplock_break_cancelled
= false;
516 cifs_queue_oplock_break(netfile
);
518 spin_unlock(&tcon
->open_file_lock
);
519 spin_unlock(&cifs_tcp_ses_lock
);
522 spin_unlock(&tcon
->open_file_lock
);
523 spin_unlock(&cifs_tcp_ses_lock
);
524 cifs_dbg(FYI
, "No matching file for oplock break\n");
528 spin_unlock(&cifs_tcp_ses_lock
);
529 cifs_dbg(FYI
, "Can not process oplock break for non-existent connection\n");
534 dump_smb(void *buf
, int smb_buf_length
)
539 print_hex_dump(KERN_DEBUG
, "", DUMP_PREFIX_NONE
, 8, 2, buf
,
540 smb_buf_length
, true);
544 cifs_autodisable_serverino(struct cifs_sb_info
*cifs_sb
)
546 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_SERVER_INUM
) {
547 struct cifs_tcon
*tcon
= NULL
;
549 if (cifs_sb
->master_tlink
)
550 tcon
= cifs_sb_master_tcon(cifs_sb
);
552 cifs_sb
->mnt_cifs_flags
&= ~CIFS_MOUNT_SERVER_INUM
;
553 cifs_sb
->mnt_cifs_serverino_autodisabled
= true;
554 cifs_dbg(VFS
, "Autodisabling the use of server inode numbers on %s\n",
555 tcon
? tcon
->tree_name
: "new server");
556 cifs_dbg(VFS
, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
557 cifs_dbg(VFS
, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
562 void cifs_set_oplock_level(struct cifsInodeInfo
*cinode
, __u32 oplock
)
566 if (oplock
== OPLOCK_EXCLUSIVE
) {
567 cinode
->oplock
= CIFS_CACHE_WRITE_FLG
| CIFS_CACHE_READ_FLG
;
568 cifs_dbg(FYI
, "Exclusive Oplock granted on inode %p\n",
569 &cinode
->netfs
.inode
);
570 } else if (oplock
== OPLOCK_READ
) {
571 cinode
->oplock
= CIFS_CACHE_READ_FLG
;
572 cifs_dbg(FYI
, "Level II Oplock granted on inode %p\n",
573 &cinode
->netfs
.inode
);
579 * We wait for oplock breaks to be processed before we attempt to perform
582 int cifs_get_writer(struct cifsInodeInfo
*cinode
)
587 rc
= wait_on_bit(&cinode
->flags
, CIFS_INODE_PENDING_OPLOCK_BREAK
,
592 spin_lock(&cinode
->writers_lock
);
593 if (!cinode
->writers
)
594 set_bit(CIFS_INODE_PENDING_WRITERS
, &cinode
->flags
);
596 /* Check to see if we have started servicing an oplock break */
597 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK
, &cinode
->flags
)) {
599 if (cinode
->writers
== 0) {
600 clear_bit(CIFS_INODE_PENDING_WRITERS
, &cinode
->flags
);
601 wake_up_bit(&cinode
->flags
, CIFS_INODE_PENDING_WRITERS
);
603 spin_unlock(&cinode
->writers_lock
);
606 spin_unlock(&cinode
->writers_lock
);
610 void cifs_put_writer(struct cifsInodeInfo
*cinode
)
612 spin_lock(&cinode
->writers_lock
);
614 if (cinode
->writers
== 0) {
615 clear_bit(CIFS_INODE_PENDING_WRITERS
, &cinode
->flags
);
616 wake_up_bit(&cinode
->flags
, CIFS_INODE_PENDING_WRITERS
);
618 spin_unlock(&cinode
->writers_lock
);
622 * cifs_queue_oplock_break - queue the oplock break handler for cfile
623 * @cfile: The file to break the oplock on
625 * This function is called from the demultiplex thread when it
626 * receives an oplock break for @cfile.
628 * Assumes the tcon->open_file_lock is held.
629 * Assumes cfile->file_info_lock is NOT held.
631 void cifs_queue_oplock_break(struct cifsFileInfo
*cfile
)
634 * Bump the handle refcount now while we hold the
635 * open_file_lock to enforce the validity of it for the oplock
636 * break handler. The matching put is done at the end of the
639 cifsFileInfo_get(cfile
);
641 queue_work(cifsoplockd_wq
, &cfile
->oplock_break
);
644 void cifs_done_oplock_break(struct cifsInodeInfo
*cinode
)
646 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK
, &cinode
->flags
);
647 wake_up_bit(&cinode
->flags
, CIFS_INODE_PENDING_OPLOCK_BREAK
);
651 backup_cred(struct cifs_sb_info
*cifs_sb
)
653 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_CIFS_BACKUPUID
) {
654 if (uid_eq(cifs_sb
->ctx
->backupuid
, current_fsuid()))
657 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_CIFS_BACKUPGID
) {
658 if (in_group_p(cifs_sb
->ctx
->backupgid
))
666 cifs_del_pending_open(struct cifs_pending_open
*open
)
668 spin_lock(&tlink_tcon(open
->tlink
)->open_file_lock
);
669 list_del(&open
->olist
);
670 spin_unlock(&tlink_tcon(open
->tlink
)->open_file_lock
);
674 cifs_add_pending_open_locked(struct cifs_fid
*fid
, struct tcon_link
*tlink
,
675 struct cifs_pending_open
*open
)
677 memcpy(open
->lease_key
, fid
->lease_key
, SMB2_LEASE_KEY_SIZE
);
678 open
->oplock
= CIFS_OPLOCK_NO_CHANGE
;
680 fid
->pending_open
= open
;
681 list_add_tail(&open
->olist
, &tlink_tcon(tlink
)->pending_opens
);
685 cifs_add_pending_open(struct cifs_fid
*fid
, struct tcon_link
*tlink
,
686 struct cifs_pending_open
*open
)
688 spin_lock(&tlink_tcon(tlink
)->open_file_lock
);
689 cifs_add_pending_open_locked(fid
, tlink
, open
);
690 spin_unlock(&tlink_tcon(open
->tlink
)->open_file_lock
);
694 * Critical section which runs after acquiring deferred_lock.
695 * As there is no reference count on cifs_deferred_close, pdclose
696 * should not be used outside deferred_lock.
699 cifs_is_deferred_close(struct cifsFileInfo
*cfile
, struct cifs_deferred_close
**pdclose
)
701 struct cifs_deferred_close
*dclose
;
703 list_for_each_entry(dclose
, &CIFS_I(d_inode(cfile
->dentry
))->deferred_closes
, dlist
) {
704 if ((dclose
->netfid
== cfile
->fid
.netfid
) &&
705 (dclose
->persistent_fid
== cfile
->fid
.persistent_fid
) &&
706 (dclose
->volatile_fid
== cfile
->fid
.volatile_fid
)) {
715 * Critical section which runs after acquiring deferred_lock.
718 cifs_add_deferred_close(struct cifsFileInfo
*cfile
, struct cifs_deferred_close
*dclose
)
720 bool is_deferred
= false;
721 struct cifs_deferred_close
*pdclose
;
723 is_deferred
= cifs_is_deferred_close(cfile
, &pdclose
);
729 dclose
->tlink
= cfile
->tlink
;
730 dclose
->netfid
= cfile
->fid
.netfid
;
731 dclose
->persistent_fid
= cfile
->fid
.persistent_fid
;
732 dclose
->volatile_fid
= cfile
->fid
.volatile_fid
;
733 list_add_tail(&dclose
->dlist
, &CIFS_I(d_inode(cfile
->dentry
))->deferred_closes
);
737 * Critical section which runs after acquiring deferred_lock.
740 cifs_del_deferred_close(struct cifsFileInfo
*cfile
)
742 bool is_deferred
= false;
743 struct cifs_deferred_close
*dclose
;
745 is_deferred
= cifs_is_deferred_close(cfile
, &dclose
);
748 list_del(&dclose
->dlist
);
753 cifs_close_deferred_file(struct cifsInodeInfo
*cifs_inode
)
755 struct cifsFileInfo
*cfile
= NULL
;
756 struct file_list
*tmp_list
, *tmp_next_list
;
757 LIST_HEAD(file_head
);
759 if (cifs_inode
== NULL
)
762 spin_lock(&cifs_inode
->open_file_lock
);
763 list_for_each_entry(cfile
, &cifs_inode
->openFileList
, flist
) {
764 if (delayed_work_pending(&cfile
->deferred
)) {
765 if (cancel_delayed_work(&cfile
->deferred
)) {
766 spin_lock(&cifs_inode
->deferred_lock
);
767 cifs_del_deferred_close(cfile
);
768 spin_unlock(&cifs_inode
->deferred_lock
);
770 tmp_list
= kmalloc(sizeof(struct file_list
), GFP_ATOMIC
);
771 if (tmp_list
== NULL
)
773 tmp_list
->cfile
= cfile
;
774 list_add_tail(&tmp_list
->list
, &file_head
);
778 spin_unlock(&cifs_inode
->open_file_lock
);
780 list_for_each_entry_safe(tmp_list
, tmp_next_list
, &file_head
, list
) {
781 _cifsFileInfo_put(tmp_list
->cfile
, false, false);
782 list_del(&tmp_list
->list
);
788 cifs_close_all_deferred_files(struct cifs_tcon
*tcon
)
790 struct cifsFileInfo
*cfile
;
791 struct file_list
*tmp_list
, *tmp_next_list
;
792 LIST_HEAD(file_head
);
794 spin_lock(&tcon
->open_file_lock
);
795 list_for_each_entry(cfile
, &tcon
->openFileList
, tlist
) {
796 if (delayed_work_pending(&cfile
->deferred
)) {
797 if (cancel_delayed_work(&cfile
->deferred
)) {
798 spin_lock(&CIFS_I(d_inode(cfile
->dentry
))->deferred_lock
);
799 cifs_del_deferred_close(cfile
);
800 spin_unlock(&CIFS_I(d_inode(cfile
->dentry
))->deferred_lock
);
802 tmp_list
= kmalloc(sizeof(struct file_list
), GFP_ATOMIC
);
803 if (tmp_list
== NULL
)
805 tmp_list
->cfile
= cfile
;
806 list_add_tail(&tmp_list
->list
, &file_head
);
810 spin_unlock(&tcon
->open_file_lock
);
812 list_for_each_entry_safe(tmp_list
, tmp_next_list
, &file_head
, list
) {
813 _cifsFileInfo_put(tmp_list
->cfile
, true, false);
814 list_del(&tmp_list
->list
);
819 cifs_close_deferred_file_under_dentry(struct cifs_tcon
*tcon
, const char *path
)
821 struct cifsFileInfo
*cfile
;
822 struct file_list
*tmp_list
, *tmp_next_list
;
824 const char *full_path
;
825 LIST_HEAD(file_head
);
827 page
= alloc_dentry_path();
828 spin_lock(&tcon
->open_file_lock
);
829 list_for_each_entry(cfile
, &tcon
->openFileList
, tlist
) {
830 full_path
= build_path_from_dentry(cfile
->dentry
, page
);
831 if (strstr(full_path
, path
)) {
832 if (delayed_work_pending(&cfile
->deferred
)) {
833 if (cancel_delayed_work(&cfile
->deferred
)) {
834 spin_lock(&CIFS_I(d_inode(cfile
->dentry
))->deferred_lock
);
835 cifs_del_deferred_close(cfile
);
836 spin_unlock(&CIFS_I(d_inode(cfile
->dentry
))->deferred_lock
);
838 tmp_list
= kmalloc(sizeof(struct file_list
), GFP_ATOMIC
);
839 if (tmp_list
== NULL
)
841 tmp_list
->cfile
= cfile
;
842 list_add_tail(&tmp_list
->list
, &file_head
);
847 spin_unlock(&tcon
->open_file_lock
);
849 list_for_each_entry_safe(tmp_list
, tmp_next_list
, &file_head
, list
) {
850 _cifsFileInfo_put(tmp_list
->cfile
, true, false);
851 list_del(&tmp_list
->list
);
854 free_dentry_path(page
);
858 * If a dentry has been deleted, all corresponding open handles should know that
859 * so that we do not defer close them.
861 void cifs_mark_open_handles_for_deleted_file(struct inode
*inode
,
864 struct cifsFileInfo
*cfile
;
866 const char *full_path
;
867 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
869 page
= alloc_dentry_path();
870 spin_lock(&cinode
->open_file_lock
);
873 * note: we need to construct path from dentry and compare only if the
874 * inode has any hardlinks. When number of hardlinks is 1, we can just
875 * mark all open handles since they are going to be from the same file.
877 if (inode
->i_nlink
> 1) {
878 list_for_each_entry(cfile
, &cinode
->openFileList
, flist
) {
879 full_path
= build_path_from_dentry(cfile
->dentry
, page
);
880 if (!IS_ERR(full_path
) && strcmp(full_path
, path
) == 0)
881 cfile
->status_file_deleted
= true;
884 list_for_each_entry(cfile
, &cinode
->openFileList
, flist
)
885 cfile
->status_file_deleted
= true;
887 spin_unlock(&cinode
->open_file_lock
);
888 free_dentry_path(page
);
891 /* parses DFS referral V3 structure
892 * caller is responsible for freeing target_nodes
895 * - on failure - errno
898 parse_dfs_referrals(struct get_dfs_referral_rsp
*rsp
, u32 rsp_size
,
899 unsigned int *num_of_nodes
,
900 struct dfs_info3_param
**target_nodes
,
901 const struct nls_table
*nls_codepage
, int remap
,
902 const char *searchName
, bool is_unicode
)
906 struct dfs_referral_level_3
*ref
;
908 *num_of_nodes
= le16_to_cpu(rsp
->NumberOfReferrals
);
910 if (*num_of_nodes
< 1) {
911 cifs_dbg(VFS
, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
914 goto parse_DFS_referrals_exit
;
917 ref
= (struct dfs_referral_level_3
*) &(rsp
->referrals
);
918 if (ref
->VersionNumber
!= cpu_to_le16(3)) {
919 cifs_dbg(VFS
, "Referrals of V%d version are not supported, should be V3\n",
920 le16_to_cpu(ref
->VersionNumber
));
922 goto parse_DFS_referrals_exit
;
925 /* get the upper boundary of the resp buffer */
926 data_end
= (char *)rsp
+ rsp_size
;
928 cifs_dbg(FYI
, "num_referrals: %d dfs flags: 0x%x ...\n",
929 *num_of_nodes
, le32_to_cpu(rsp
->DFSFlags
));
931 *target_nodes
= kcalloc(*num_of_nodes
, sizeof(struct dfs_info3_param
),
933 if (*target_nodes
== NULL
) {
935 goto parse_DFS_referrals_exit
;
938 /* collect necessary data from referrals */
939 for (i
= 0; i
< *num_of_nodes
; i
++) {
942 struct dfs_info3_param
*node
= (*target_nodes
)+i
;
944 node
->flags
= le32_to_cpu(rsp
->DFSFlags
);
946 __le16
*tmp
= kmalloc(strlen(searchName
)*2 + 2,
950 goto parse_DFS_referrals_exit
;
952 cifsConvertToUTF16((__le16
*) tmp
, searchName
,
953 PATH_MAX
, nls_codepage
, remap
);
954 node
->path_consumed
= cifs_utf16_bytes(tmp
,
955 le16_to_cpu(rsp
->PathConsumed
),
959 node
->path_consumed
= le16_to_cpu(rsp
->PathConsumed
);
961 node
->server_type
= le16_to_cpu(ref
->ServerType
);
962 node
->ref_flag
= le16_to_cpu(ref
->ReferralEntryFlags
);
965 temp
= (char *)ref
+ le16_to_cpu(ref
->DfsPathOffset
);
966 max_len
= data_end
- temp
;
967 node
->path_name
= cifs_strndup_from_utf16(temp
, max_len
,
968 is_unicode
, nls_codepage
);
969 if (!node
->path_name
) {
971 goto parse_DFS_referrals_exit
;
974 /* copy link target UNC */
975 temp
= (char *)ref
+ le16_to_cpu(ref
->NetworkAddressOffset
);
976 max_len
= data_end
- temp
;
977 node
->node_name
= cifs_strndup_from_utf16(temp
, max_len
,
978 is_unicode
, nls_codepage
);
979 if (!node
->node_name
) {
981 goto parse_DFS_referrals_exit
;
984 node
->ttl
= le32_to_cpu(ref
->TimeToLive
);
989 parse_DFS_referrals_exit
:
991 free_dfs_info_array(*target_nodes
, *num_of_nodes
);
992 *target_nodes
= NULL
;
999 * cifs_alloc_hash - allocate hash and hash context together
1000 * @name: The name of the crypto hash algo
1001 * @sdesc: SHASH descriptor where to put the pointer to the hash TFM
1003 * The caller has to make sure @sdesc is initialized to either NULL or
1004 * a valid context. It can be freed via cifs_free_hash().
1007 cifs_alloc_hash(const char *name
, struct shash_desc
**sdesc
)
1010 struct crypto_shash
*alg
= NULL
;
1015 alg
= crypto_alloc_shash(name
, 0, 0);
1017 cifs_dbg(VFS
, "Could not allocate shash TFM '%s'\n", name
);
1023 *sdesc
= kmalloc(sizeof(struct shash_desc
) + crypto_shash_descsize(alg
), GFP_KERNEL
);
1024 if (*sdesc
== NULL
) {
1025 cifs_dbg(VFS
, "no memory left to allocate shash TFM '%s'\n", name
);
1026 crypto_free_shash(alg
);
1030 (*sdesc
)->tfm
= alg
;
1035 * cifs_free_hash - free hash and hash context together
1036 * @sdesc: Where to find the pointer to the hash TFM
1038 * Freeing a NULL descriptor is safe.
1041 cifs_free_hash(struct shash_desc
**sdesc
)
1043 if (unlikely(!sdesc
) || !*sdesc
)
1046 if ((*sdesc
)->tfm
) {
1047 crypto_free_shash((*sdesc
)->tfm
);
1048 (*sdesc
)->tfm
= NULL
;
1051 kfree_sensitive(*sdesc
);
1055 void extract_unc_hostname(const char *unc
, const char **h
, size_t *len
)
1059 /* skip initial slashes */
1060 while (*unc
&& (*unc
== '\\' || *unc
== '/'))
1065 while (*end
&& !(*end
== '\\' || *end
== '/'))
1073 * copy_path_name - copy src path to dst, possibly truncating
1074 * @dst: The destination buffer
1075 * @src: The source name
1077 * returns number of bytes written (including trailing nul)
1079 int copy_path_name(char *dst
, const char *src
)
1084 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1085 * will truncate and strlen(dst) will be PATH_MAX-1
1087 name_len
= strscpy(dst
, src
, PATH_MAX
);
1088 if (WARN_ON_ONCE(name_len
< 0))
1089 name_len
= PATH_MAX
-1;
1091 /* we count the trailing nul */
1096 struct super_cb_data
{
1098 struct super_block
*sb
;
1101 static void tcon_super_cb(struct super_block
*sb
, void *arg
)
1103 struct super_cb_data
*sd
= arg
;
1104 struct cifs_sb_info
*cifs_sb
;
1105 struct cifs_tcon
*t1
= sd
->data
, *t2
;
1110 cifs_sb
= CIFS_SB(sb
);
1111 t2
= cifs_sb_master_tcon(cifs_sb
);
1113 spin_lock(&t2
->tc_lock
);
1114 if ((t1
->ses
== t2
->ses
||
1115 t1
->ses
->dfs_root_ses
== t2
->ses
->dfs_root_ses
) &&
1116 t1
->ses
->server
== t2
->ses
->server
&&
1117 t2
->origin_fullpath
&&
1118 dfs_src_pathname_equal(t2
->origin_fullpath
, t1
->origin_fullpath
))
1120 spin_unlock(&t2
->tc_lock
);
1123 static struct super_block
*__cifs_get_super(void (*f
)(struct super_block
*, void *),
1126 struct super_cb_data sd
= {
1130 struct file_system_type
**fs_type
= (struct file_system_type
*[]) {
1131 &cifs_fs_type
, &smb3_fs_type
, NULL
,
1134 for (; *fs_type
; fs_type
++) {
1135 iterate_supers_type(*fs_type
, f
, &sd
);
1138 * Grab an active reference in order to prevent automounts (DFS links)
1139 * of expiring and then freeing up our cifs superblock pointer while
1140 * we're doing failover.
1142 cifs_sb_active(sd
.sb
);
1146 pr_warn_once("%s: could not find dfs superblock\n", __func__
);
1147 return ERR_PTR(-EINVAL
);
1150 static void __cifs_put_super(struct super_block
*sb
)
1152 if (!IS_ERR_OR_NULL(sb
))
1153 cifs_sb_deactive(sb
);
1156 struct super_block
*cifs_get_dfs_tcon_super(struct cifs_tcon
*tcon
)
1158 spin_lock(&tcon
->tc_lock
);
1159 if (!tcon
->origin_fullpath
) {
1160 spin_unlock(&tcon
->tc_lock
);
1161 return ERR_PTR(-ENOENT
);
1163 spin_unlock(&tcon
->tc_lock
);
1164 return __cifs_get_super(tcon_super_cb
, tcon
);
1167 void cifs_put_tcp_super(struct super_block
*sb
)
1169 __cifs_put_super(sb
);
1172 #ifdef CONFIG_CIFS_DFS_UPCALL
1173 int match_target_ip(struct TCP_Server_Info
*server
,
1174 const char *share
, size_t share_len
,
1179 struct sockaddr_storage ss
;
1183 target
= kzalloc(share_len
+ 3, GFP_KERNEL
);
1187 scnprintf(target
, share_len
+ 3, "\\\\%.*s", (int)share_len
, share
);
1189 cifs_dbg(FYI
, "%s: target name: %s\n", __func__
, target
+ 2);
1191 rc
= dns_resolve_server_name_to_ip(target
, (struct sockaddr
*)&ss
, NULL
);
1197 spin_lock(&server
->srv_lock
);
1198 *result
= cifs_match_ipaddr((struct sockaddr
*)&server
->dstaddr
, (struct sockaddr
*)&ss
);
1199 spin_unlock(&server
->srv_lock
);
1200 cifs_dbg(FYI
, "%s: ip addresses match: %u\n", __func__
, *result
);
1204 int cifs_update_super_prepath(struct cifs_sb_info
*cifs_sb
, char *prefix
)
1208 kfree(cifs_sb
->prepath
);
1209 cifs_sb
->prepath
= NULL
;
1211 if (prefix
&& *prefix
) {
1212 cifs_sb
->prepath
= cifs_sanitize_prepath(prefix
, GFP_ATOMIC
);
1213 if (IS_ERR(cifs_sb
->prepath
)) {
1214 rc
= PTR_ERR(cifs_sb
->prepath
);
1215 cifs_sb
->prepath
= NULL
;
1218 if (cifs_sb
->prepath
)
1219 convert_delimiter(cifs_sb
->prepath
, CIFS_DIR_SEP(cifs_sb
));
1222 cifs_sb
->mnt_cifs_flags
|= CIFS_MOUNT_USE_PREFIX_PATH
;
1227 * Handle weird Windows SMB server behaviour. It responds with
1228 * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for
1229 * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains
1230 * non-ASCII unicode symbols.
1232 int cifs_inval_name_dfs_link_error(const unsigned int xid
,
1233 struct cifs_tcon
*tcon
,
1234 struct cifs_sb_info
*cifs_sb
,
1235 const char *full_path
,
1238 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
1239 struct cifs_ses
*ses
= tcon
->ses
;
1247 * Fast path - skip check when @full_path doesn't have a prefix path to
1248 * look up or tcon is not DFS.
1250 if (strlen(full_path
) < 2 || !cifs_sb
||
1251 (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_DFS
) ||
1255 spin_lock(&server
->srv_lock
);
1256 if (!server
->leaf_fullpath
) {
1257 spin_unlock(&server
->srv_lock
);
1260 spin_unlock(&server
->srv_lock
);
1263 * Slow path - tcon is DFS and @full_path has prefix path, so attempt
1264 * to get a referral to figure out whether it is an DFS link.
1266 len
= strnlen(tcon
->tree_name
, MAX_TREE_SIZE
+ 1) + strlen(full_path
) + 1;
1267 path
= kmalloc(len
, GFP_KERNEL
);
1271 scnprintf(path
, len
, "%s%s", tcon
->tree_name
, full_path
);
1272 ref_path
= dfs_cache_canonical_path(path
+ 1, cifs_sb
->local_nls
,
1273 cifs_remap(cifs_sb
));
1276 if (IS_ERR(ref_path
)) {
1277 if (PTR_ERR(ref_path
) != -EINVAL
)
1278 return PTR_ERR(ref_path
);
1280 struct dfs_info3_param
*refs
= NULL
;
1284 * XXX: we are not using dfs_cache_find() here because we might
1285 * end up filling all the DFS cache and thus potentially
1286 * removing cached DFS targets that the client would eventually
1287 * need during failover.
1289 ses
= CIFS_DFS_ROOT_SES(ses
);
1290 if (ses
->server
->ops
->get_dfs_refer
&&
1291 !ses
->server
->ops
->get_dfs_refer(xid
, ses
, ref_path
, &refs
,
1292 &num_refs
, cifs_sb
->local_nls
,
1293 cifs_remap(cifs_sb
)))
1294 *islink
= refs
[0].server_type
== DFS_TYPE_LINK
;
1295 free_dfs_info_array(refs
, num_refs
);
1302 int cifs_wait_for_server_reconnect(struct TCP_Server_Info
*server
, bool retry
)
1307 spin_lock(&server
->srv_lock
);
1308 if (server
->tcpStatus
!= CifsNeedReconnect
) {
1309 spin_unlock(&server
->srv_lock
);
1312 timeout
*= server
->nr_targets
;
1313 spin_unlock(&server
->srv_lock
);
1316 * Give demultiplex thread up to 10 seconds to each target available for
1317 * reconnect -- should be greater than cifs socket timeout which is 7
1320 * On "soft" mounts we wait once. Hard mounts keep retrying until
1321 * process is killed or server comes back on-line.
1324 rc
= wait_event_interruptible_timeout(server
->response_q
,
1325 (server
->tcpStatus
!= CifsNeedReconnect
),
1328 cifs_dbg(FYI
, "%s: aborting reconnect due to received signal\n",
1330 return -ERESTARTSYS
;
1333 /* are we still trying to reconnect? */
1334 spin_lock(&server
->srv_lock
);
1335 if (server
->tcpStatus
!= CifsNeedReconnect
) {
1336 spin_unlock(&server
->srv_lock
);
1339 spin_unlock(&server
->srv_lock
);
1342 cifs_dbg(FYI
, "%s: gave up waiting on reconnect\n", __func__
);