1 // SPDX-License-Identifier: GPL-2.0
3 * Functions to handle the cached directory entries
5 * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
8 #include <linux/namei.h>
10 #include "cifsproto.h"
11 #include "cifs_debug.h"
12 #include "smb2proto.h"
13 #include "cached_dir.h"
15 static struct cached_fid
*init_cached_dir(const char *path
);
16 static void free_cached_dir(struct cached_fid
*cfid
);
17 static void smb2_close_cached_fid(struct kref
*ref
);
18 static void cfids_laundromat_worker(struct work_struct
*work
);
20 struct cached_dir_dentry
{
21 struct list_head entry
;
22 struct dentry
*dentry
;
25 static struct cached_fid
*find_or_create_cached_dir(struct cached_fids
*cfids
,
28 __u32 max_cached_dirs
)
30 struct cached_fid
*cfid
;
32 spin_lock(&cfids
->cfid_list_lock
);
33 list_for_each_entry(cfid
, &cfids
->entries
, entry
) {
34 if (!strcmp(cfid
->path
, path
)) {
36 * If it doesn't have a lease it is either not yet
37 * fully cached or it may be in the process of
38 * being deleted due to a lease break.
40 if (!cfid
->time
|| !cfid
->has_lease
) {
41 spin_unlock(&cfids
->cfid_list_lock
);
44 kref_get(&cfid
->refcount
);
45 spin_unlock(&cfids
->cfid_list_lock
);
50 spin_unlock(&cfids
->cfid_list_lock
);
53 if (cfids
->num_entries
>= max_cached_dirs
) {
54 spin_unlock(&cfids
->cfid_list_lock
);
57 cfid
= init_cached_dir(path
);
59 spin_unlock(&cfids
->cfid_list_lock
);
64 list_add(&cfid
->entry
, &cfids
->entries
);
66 kref_get(&cfid
->refcount
);
68 * Set @cfid->has_lease to true during construction so that the lease
69 * reference can be put in cached_dir_lease_break() due to a potential
70 * lease break right after the request is sent or while @cfid is still
71 * being cached, or if a reconnection is triggered during construction.
72 * Concurrent processes won't be to use it yet due to @cfid->time being
75 cfid
->has_lease
= true;
77 spin_unlock(&cfids
->cfid_list_lock
);
81 static struct dentry
*
82 path_to_dentry(struct cifs_sb_info
*cifs_sb
, const char *path
)
84 struct dentry
*dentry
;
88 sep
= CIFS_DIR_SEP(cifs_sb
);
89 dentry
= dget(cifs_sb
->root
);
93 struct inode
*dir
= d_inode(dentry
);
96 if (!S_ISDIR(dir
->i_mode
)) {
98 dentry
= ERR_PTR(-ENOTDIR
);
102 /* skip separators */
109 while (*s
&& *s
!= sep
)
112 child
= lookup_positive_unlocked(p
, dentry
, s
- p
);
115 } while (!IS_ERR(dentry
));
119 static const char *path_no_prefix(struct cifs_sb_info
*cifs_sb
,
127 if ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_USE_PREFIX_PATH
) &&
129 len
= strlen(cifs_sb
->prepath
) + 1;
130 if (unlikely(len
> strlen(path
)))
131 return ERR_PTR(-EINVAL
);
137 * Open the and cache a directory handle.
138 * If error then *cfid is not initialized.
140 int open_cached_dir(unsigned int xid
, struct cifs_tcon
*tcon
,
142 struct cifs_sb_info
*cifs_sb
,
143 bool lookup_only
, struct cached_fid
**ret_cfid
)
145 struct cifs_ses
*ses
;
146 struct TCP_Server_Info
*server
;
147 struct cifs_open_parms oparms
;
148 struct smb2_create_rsp
*o_rsp
= NULL
;
149 struct smb2_query_info_rsp
*qi_rsp
= NULL
;
151 struct smb_rqst rqst
[2];
152 struct kvec rsp_iov
[2];
153 struct kvec open_iov
[SMB2_CREATE_IOV_SIZE
];
154 struct kvec qi_iov
[1];
156 __le16
*utf16_path
= NULL
;
157 u8 oplock
= SMB2_OPLOCK_LEVEL_II
;
158 struct cifs_fid
*pfid
;
159 struct dentry
*dentry
= NULL
;
160 struct cached_fid
*cfid
;
161 struct cached_fids
*cfids
;
163 int retries
= 0, cur_sleep
= 1;
165 if (cifs_sb
->root
== NULL
)
178 /* reinitialize for possible replay */
180 oplock
= SMB2_OPLOCK_LEVEL_II
;
181 server
= cifs_pick_channel(ses
);
183 if (!server
->ops
->new_lease_key
)
186 utf16_path
= cifs_convert_path_to_utf16(path
, cifs_sb
);
190 cfid
= find_or_create_cached_dir(cfids
, path
, lookup_only
, tcon
->max_cached_dirs
);
196 * Return cached fid if it is valid (has a lease and has a time).
197 * Otherwise, it is either a new entry or laundromat worker removed it
198 * from @cfids->entries. Caller will put last reference if the latter.
200 spin_lock(&cfids
->cfid_list_lock
);
201 if (cfid
->has_lease
&& cfid
->time
) {
202 spin_unlock(&cfids
->cfid_list_lock
);
207 spin_unlock(&cfids
->cfid_list_lock
);
210 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
211 * calling ->lookup() which already adds those through
212 * build_path_from_dentry(). Also, do it earlier as we might reconnect
213 * below when trying to send compounded request and then potentially
214 * having a different prefix path (e.g. after DFS failover).
216 npath
= path_no_prefix(cifs_sb
, path
);
223 dentry
= dget(cifs_sb
->root
);
225 dentry
= path_to_dentry(cifs_sb
, npath
);
226 if (IS_ERR(dentry
)) {
231 cfid
->dentry
= dentry
;
235 * We do not hold the lock for the open because in case
236 * SMB2_open needs to reconnect.
237 * This is safe because no other thread will be able to get a ref
238 * to the cfid until we have finished opening the file and (possibly)
241 if (smb3_encryption_required(tcon
))
242 flags
|= CIFS_TRANSFORM_REQ
;
245 server
->ops
->new_lease_key(pfid
);
247 memset(rqst
, 0, sizeof(rqst
));
248 resp_buftype
[0] = resp_buftype
[1] = CIFS_NO_BUFFER
;
249 memset(rsp_iov
, 0, sizeof(rsp_iov
));
252 memset(&open_iov
, 0, sizeof(open_iov
));
253 rqst
[0].rq_iov
= open_iov
;
254 rqst
[0].rq_nvec
= SMB2_CREATE_IOV_SIZE
;
256 oparms
= (struct cifs_open_parms
) {
259 .create_options
= cifs_create_options(cifs_sb
, CREATE_NOT_FILE
),
260 .desired_access
= FILE_READ_DATA
| FILE_READ_ATTRIBUTES
|
262 .disposition
= FILE_OPEN
,
264 .replay
= !!(retries
),
267 rc
= SMB2_open_init(tcon
, server
,
268 &rqst
[0], &oplock
, &oparms
, utf16_path
);
271 smb2_set_next_command(tcon
, &rqst
[0]);
273 memset(&qi_iov
, 0, sizeof(qi_iov
));
274 rqst
[1].rq_iov
= qi_iov
;
277 rc
= SMB2_query_info_init(tcon
, server
,
278 &rqst
[1], COMPOUND_FID
,
279 COMPOUND_FID
, FILE_ALL_INFORMATION
,
281 sizeof(struct smb2_file_all_info
) +
282 PATH_MAX
* 2, 0, NULL
);
286 smb2_set_related(&rqst
[1]);
289 smb2_set_replay(server
, &rqst
[0]);
290 smb2_set_replay(server
, &rqst
[1]);
293 rc
= compound_send_recv(xid
, ses
, server
,
295 resp_buftype
, rsp_iov
);
297 if (rc
== -EREMCHG
) {
298 tcon
->need_reconnect
= true;
299 pr_warn_once("server share %s deleted\n",
304 cfid
->is_open
= true;
306 spin_lock(&cfids
->cfid_list_lock
);
308 o_rsp
= (struct smb2_create_rsp
*)rsp_iov
[0].iov_base
;
309 oparms
.fid
->persistent_fid
= o_rsp
->PersistentFileId
;
310 oparms
.fid
->volatile_fid
= o_rsp
->VolatileFileId
;
311 #ifdef CONFIG_CIFS_DEBUG2
312 oparms
.fid
->mid
= le64_to_cpu(o_rsp
->hdr
.MessageId
);
313 #endif /* CIFS_DEBUG2 */
316 if (o_rsp
->OplockLevel
!= SMB2_OPLOCK_LEVEL_LEASE
) {
317 spin_unlock(&cfids
->cfid_list_lock
);
322 rc
= smb2_parse_contexts(server
, rsp_iov
,
324 oparms
.fid
->lease_key
,
325 &oplock
, NULL
, NULL
);
327 spin_unlock(&cfids
->cfid_list_lock
);
332 if (!(oplock
& SMB2_LEASE_READ_CACHING_HE
)) {
333 spin_unlock(&cfids
->cfid_list_lock
);
336 qi_rsp
= (struct smb2_query_info_rsp
*)rsp_iov
[1].iov_base
;
337 if (le32_to_cpu(qi_rsp
->OutputBufferLength
) < sizeof(struct smb2_file_all_info
)) {
338 spin_unlock(&cfids
->cfid_list_lock
);
341 if (!smb2_validate_and_copy_iov(
342 le16_to_cpu(qi_rsp
->OutputBufferOffset
),
343 sizeof(struct smb2_file_all_info
),
344 &rsp_iov
[1], sizeof(struct smb2_file_all_info
),
345 (char *)&cfid
->file_all_info
))
346 cfid
->file_all_info_is_valid
= true;
348 cfid
->time
= jiffies
;
349 spin_unlock(&cfids
->cfid_list_lock
);
350 /* At this point the directory handle is fully cached */
354 SMB2_open_free(&rqst
[0]);
355 SMB2_query_info_free(&rqst
[1]);
356 free_rsp_buf(resp_buftype
[0], rsp_iov
[0].iov_base
);
357 free_rsp_buf(resp_buftype
[1], rsp_iov
[1].iov_base
);
360 spin_lock(&cfids
->cfid_list_lock
);
362 list_del(&cfid
->entry
);
363 cfid
->on_list
= false;
364 cfids
->num_entries
--;
366 if (cfid
->has_lease
) {
368 * We are guaranteed to have two references at this
369 * point. One for the caller and one for a potential
370 * lease. Release one here, and the second below.
372 cfid
->has_lease
= false;
373 kref_put(&cfid
->refcount
, smb2_close_cached_fid
);
375 spin_unlock(&cfids
->cfid_list_lock
);
377 kref_put(&cfid
->refcount
, smb2_close_cached_fid
);
380 atomic_inc(&tcon
->num_remote_opens
);
384 if (is_replayable_error(rc
) &&
385 smb2_should_replay(tcon
, &retries
, &cur_sleep
))
391 int open_cached_dir_by_dentry(struct cifs_tcon
*tcon
,
392 struct dentry
*dentry
,
393 struct cached_fid
**ret_cfid
)
395 struct cached_fid
*cfid
;
396 struct cached_fids
*cfids
= tcon
->cfids
;
401 spin_lock(&cfids
->cfid_list_lock
);
402 list_for_each_entry(cfid
, &cfids
->entries
, entry
) {
403 if (dentry
&& cfid
->dentry
== dentry
) {
404 cifs_dbg(FYI
, "found a cached file handle by dentry\n");
405 kref_get(&cfid
->refcount
);
407 spin_unlock(&cfids
->cfid_list_lock
);
411 spin_unlock(&cfids
->cfid_list_lock
);
416 smb2_close_cached_fid(struct kref
*ref
)
418 struct cached_fid
*cfid
= container_of(ref
, struct cached_fid
,
422 spin_lock(&cfid
->cfids
->cfid_list_lock
);
424 list_del(&cfid
->entry
);
425 cfid
->on_list
= false;
426 cfid
->cfids
->num_entries
--;
428 spin_unlock(&cfid
->cfids
->cfid_list_lock
);
434 rc
= SMB2_close(0, cfid
->tcon
, cfid
->fid
.persistent_fid
,
435 cfid
->fid
.volatile_fid
);
436 if (rc
) /* should we retry on -EBUSY or -EAGAIN? */
437 cifs_dbg(VFS
, "close cached dir rc %d\n", rc
);
440 free_cached_dir(cfid
);
443 void drop_cached_dir_by_name(const unsigned int xid
, struct cifs_tcon
*tcon
,
444 const char *name
, struct cifs_sb_info
*cifs_sb
)
446 struct cached_fid
*cfid
= NULL
;
449 rc
= open_cached_dir(xid
, tcon
, name
, cifs_sb
, true, &cfid
);
451 cifs_dbg(FYI
, "no cached dir found for rmdir(%s)\n", name
);
454 spin_lock(&cfid
->cfids
->cfid_list_lock
);
455 if (cfid
->has_lease
) {
456 cfid
->has_lease
= false;
457 kref_put(&cfid
->refcount
, smb2_close_cached_fid
);
459 spin_unlock(&cfid
->cfids
->cfid_list_lock
);
460 close_cached_dir(cfid
);
464 void close_cached_dir(struct cached_fid
*cfid
)
466 kref_put(&cfid
->refcount
, smb2_close_cached_fid
);
470 * Called from cifs_kill_sb when we unmount a share
472 void close_all_cached_dirs(struct cifs_sb_info
*cifs_sb
)
474 struct rb_root
*root
= &cifs_sb
->tlink_tree
;
475 struct rb_node
*node
;
476 struct cached_fid
*cfid
;
477 struct cifs_tcon
*tcon
;
478 struct tcon_link
*tlink
;
479 struct cached_fids
*cfids
;
480 struct cached_dir_dentry
*tmp_list
, *q
;
483 spin_lock(&cifs_sb
->tlink_tree_lock
);
484 for (node
= rb_first(root
); node
; node
= rb_next(node
)) {
485 tlink
= rb_entry(node
, struct tcon_link
, tl_rbnode
);
486 tcon
= tlink_tcon(tlink
);
492 spin_lock(&cfids
->cfid_list_lock
);
493 list_for_each_entry(cfid
, &cfids
->entries
, entry
) {
494 tmp_list
= kmalloc(sizeof(*tmp_list
), GFP_ATOMIC
);
495 if (tmp_list
== NULL
)
497 spin_lock(&cfid
->fid_lock
);
498 tmp_list
->dentry
= cfid
->dentry
;
500 spin_unlock(&cfid
->fid_lock
);
502 list_add_tail(&tmp_list
->entry
, &entry
);
504 spin_unlock(&cfids
->cfid_list_lock
);
506 spin_unlock(&cifs_sb
->tlink_tree_lock
);
508 list_for_each_entry_safe(tmp_list
, q
, &entry
, entry
) {
509 list_del(&tmp_list
->entry
);
510 dput(tmp_list
->dentry
);
514 /* Flush any pending work that will drop dentries */
515 flush_workqueue(cfid_put_wq
);
519 * Invalidate all cached dirs when a TCON has been reset
520 * due to a session loss.
522 void invalidate_all_cached_dirs(struct cifs_tcon
*tcon
)
524 struct cached_fids
*cfids
= tcon
->cfids
;
525 struct cached_fid
*cfid
, *q
;
531 * Mark all the cfids as closed, and move them to the cfids->dying list.
532 * They'll be cleaned up later by cfids_invalidation_worker. Take
533 * a reference to each cfid during this process.
535 spin_lock(&cfids
->cfid_list_lock
);
536 list_for_each_entry_safe(cfid
, q
, &cfids
->entries
, entry
) {
537 list_move(&cfid
->entry
, &cfids
->dying
);
538 cfids
->num_entries
--;
539 cfid
->is_open
= false;
540 cfid
->on_list
= false;
541 if (cfid
->has_lease
) {
543 * The lease was never cancelled from the server,
544 * so steal that reference.
546 cfid
->has_lease
= false;
548 kref_get(&cfid
->refcount
);
551 * Queue dropping of the dentries once locks have been dropped
553 if (!list_empty(&cfids
->dying
))
554 queue_work(cfid_put_wq
, &cfids
->invalidation_work
);
555 spin_unlock(&cfids
->cfid_list_lock
);
559 cached_dir_offload_close(struct work_struct
*work
)
561 struct cached_fid
*cfid
= container_of(work
,
562 struct cached_fid
, close_work
);
563 struct cifs_tcon
*tcon
= cfid
->tcon
;
565 WARN_ON(cfid
->on_list
);
567 kref_put(&cfid
->refcount
, smb2_close_cached_fid
);
568 cifs_put_tcon(tcon
, netfs_trace_tcon_ref_put_cached_close
);
572 * Release the cached directory's dentry, and then queue work to drop cached
573 * directory itself (closing on server if needed).
575 * Must be called with a reference to the cached_fid and a reference to the
578 static void cached_dir_put_work(struct work_struct
*work
)
580 struct cached_fid
*cfid
= container_of(work
, struct cached_fid
,
582 struct dentry
*dentry
;
584 spin_lock(&cfid
->fid_lock
);
585 dentry
= cfid
->dentry
;
587 spin_unlock(&cfid
->fid_lock
);
590 queue_work(serverclose_wq
, &cfid
->close_work
);
593 int cached_dir_lease_break(struct cifs_tcon
*tcon
, __u8 lease_key
[16])
595 struct cached_fids
*cfids
= tcon
->cfids
;
596 struct cached_fid
*cfid
;
601 spin_lock(&cfids
->cfid_list_lock
);
602 list_for_each_entry(cfid
, &cfids
->entries
, entry
) {
603 if (cfid
->has_lease
&&
606 SMB2_LEASE_KEY_SIZE
)) {
607 cfid
->has_lease
= false;
610 * We found a lease remove it from the list
611 * so no threads can access it.
613 list_del(&cfid
->entry
);
614 cfid
->on_list
= false;
615 cfids
->num_entries
--;
618 trace_smb3_tcon_ref(tcon
->debug_id
, tcon
->tc_count
,
619 netfs_trace_tcon_ref_get_cached_lease_break
);
620 queue_work(cfid_put_wq
, &cfid
->put_work
);
621 spin_unlock(&cfids
->cfid_list_lock
);
625 spin_unlock(&cfids
->cfid_list_lock
);
629 static struct cached_fid
*init_cached_dir(const char *path
)
631 struct cached_fid
*cfid
;
633 cfid
= kzalloc(sizeof(*cfid
), GFP_ATOMIC
);
636 cfid
->path
= kstrdup(path
, GFP_ATOMIC
);
642 INIT_WORK(&cfid
->close_work
, cached_dir_offload_close
);
643 INIT_WORK(&cfid
->put_work
, cached_dir_put_work
);
644 INIT_LIST_HEAD(&cfid
->entry
);
645 INIT_LIST_HEAD(&cfid
->dirents
.entries
);
646 mutex_init(&cfid
->dirents
.de_mutex
);
647 spin_lock_init(&cfid
->fid_lock
);
648 kref_init(&cfid
->refcount
);
652 static void free_cached_dir(struct cached_fid
*cfid
)
654 struct cached_dirent
*dirent
, *q
;
656 WARN_ON(work_pending(&cfid
->close_work
));
657 WARN_ON(work_pending(&cfid
->put_work
));
663 * Delete all cached dirent names
665 list_for_each_entry_safe(dirent
, q
, &cfid
->dirents
.entries
, entry
) {
666 list_del(&dirent
->entry
);
676 static void cfids_invalidation_worker(struct work_struct
*work
)
678 struct cached_fids
*cfids
= container_of(work
, struct cached_fids
,
680 struct cached_fid
*cfid
, *q
;
683 spin_lock(&cfids
->cfid_list_lock
);
684 /* move cfids->dying to the local list */
685 list_cut_before(&entry
, &cfids
->dying
, &cfids
->dying
);
686 spin_unlock(&cfids
->cfid_list_lock
);
688 list_for_each_entry_safe(cfid
, q
, &entry
, entry
) {
689 list_del(&cfid
->entry
);
690 /* Drop the ref-count acquired in invalidate_all_cached_dirs */
691 kref_put(&cfid
->refcount
, smb2_close_cached_fid
);
695 static void cfids_laundromat_worker(struct work_struct
*work
)
697 struct cached_fids
*cfids
;
698 struct cached_fid
*cfid
, *q
;
699 struct dentry
*dentry
;
702 cfids
= container_of(work
, struct cached_fids
, laundromat_work
.work
);
704 spin_lock(&cfids
->cfid_list_lock
);
705 list_for_each_entry_safe(cfid
, q
, &cfids
->entries
, entry
) {
707 time_after(jiffies
, cfid
->time
+ HZ
* dir_cache_timeout
)) {
708 cfid
->on_list
= false;
709 list_move(&cfid
->entry
, &entry
);
710 cfids
->num_entries
--;
711 if (cfid
->has_lease
) {
713 * Our lease has not yet been cancelled from the
714 * server. Steal that reference.
716 cfid
->has_lease
= false;
718 kref_get(&cfid
->refcount
);
721 spin_unlock(&cfids
->cfid_list_lock
);
723 list_for_each_entry_safe(cfid
, q
, &entry
, entry
) {
724 list_del(&cfid
->entry
);
726 spin_lock(&cfid
->fid_lock
);
727 dentry
= cfid
->dentry
;
729 spin_unlock(&cfid
->fid_lock
);
733 spin_lock(&cifs_tcp_ses_lock
);
734 ++cfid
->tcon
->tc_count
;
735 trace_smb3_tcon_ref(cfid
->tcon
->debug_id
, cfid
->tcon
->tc_count
,
736 netfs_trace_tcon_ref_get_cached_laundromat
);
737 spin_unlock(&cifs_tcp_ses_lock
);
738 queue_work(serverclose_wq
, &cfid
->close_work
);
741 * Drop the ref-count from above, either the lease-ref (if there
742 * was one) or the extra one acquired.
744 kref_put(&cfid
->refcount
, smb2_close_cached_fid
);
746 queue_delayed_work(cfid_put_wq
, &cfids
->laundromat_work
,
747 dir_cache_timeout
* HZ
);
750 struct cached_fids
*init_cached_dirs(void)
752 struct cached_fids
*cfids
;
754 cfids
= kzalloc(sizeof(*cfids
), GFP_KERNEL
);
757 spin_lock_init(&cfids
->cfid_list_lock
);
758 INIT_LIST_HEAD(&cfids
->entries
);
759 INIT_LIST_HEAD(&cfids
->dying
);
761 INIT_WORK(&cfids
->invalidation_work
, cfids_invalidation_worker
);
762 INIT_DELAYED_WORK(&cfids
->laundromat_work
, cfids_laundromat_worker
);
763 queue_delayed_work(cfid_put_wq
, &cfids
->laundromat_work
,
764 dir_cache_timeout
* HZ
);
770 * Called from tconInfoFree when we are tearing down the tcon.
771 * There are no active users or open files/directories at this point.
773 void free_cached_dirs(struct cached_fids
*cfids
)
775 struct cached_fid
*cfid
, *q
;
781 cancel_delayed_work_sync(&cfids
->laundromat_work
);
782 cancel_work_sync(&cfids
->invalidation_work
);
784 spin_lock(&cfids
->cfid_list_lock
);
785 list_for_each_entry_safe(cfid
, q
, &cfids
->entries
, entry
) {
786 cfid
->on_list
= false;
787 cfid
->is_open
= false;
788 list_move(&cfid
->entry
, &entry
);
790 list_for_each_entry_safe(cfid
, q
, &cfids
->dying
, entry
) {
791 cfid
->on_list
= false;
792 cfid
->is_open
= false;
793 list_move(&cfid
->entry
, &entry
);
795 spin_unlock(&cfids
->cfid_list_lock
);
797 list_for_each_entry_safe(cfid
, q
, &entry
, entry
) {
798 list_del(&cfid
->entry
);
799 free_cached_dir(cfid
);