1 // SPDX-License-Identifier: GPL-2.0
3 * DFS referral cache routines
5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
8 #include <linux/jhash.h>
9 #include <linux/ktime.h>
10 #include <linux/slab.h>
11 #include <linux/proc_fs.h>
12 #include <linux/nls.h>
13 #include <linux/workqueue.h>
16 #include "smb2proto.h"
17 #include "cifsproto.h"
18 #include "cifs_debug.h"
19 #include "cifs_unicode.h"
22 #include "dfs_cache.h"
24 #define CACHE_HTABLE_SIZE 32
25 #define CACHE_MAX_ENTRIES 64
27 #define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
28 DFSREF_STORAGE_SERVER))
30 struct cache_dfs_tgt
{
32 struct list_head list
;
36 struct hlist_node hlist
;
41 struct timespec64 etime
;
44 struct list_head tlist
;
45 struct cache_dfs_tgt
*tgthint
;
50 spinlock_t smb_vol_lock
;
51 struct smb_vol smb_vol
;
53 struct list_head list
;
54 struct list_head rlist
;
58 static struct kmem_cache
*cache_slab __read_mostly
;
59 static struct workqueue_struct
*dfscache_wq __read_mostly
;
62 static DEFINE_SPINLOCK(cache_ttl_lock
);
64 static struct nls_table
*cache_nlsc
;
67 * Number of entries in the cache
69 static atomic_t cache_count
;
71 static struct hlist_head cache_htable
[CACHE_HTABLE_SIZE
];
72 static DECLARE_RWSEM(htable_rw_lock
);
74 static LIST_HEAD(vol_list
);
75 static DEFINE_SPINLOCK(vol_list_lock
);
77 static void refresh_cache_worker(struct work_struct
*work
);
79 static DECLARE_DELAYED_WORK(refresh_task
, refresh_cache_worker
);
81 static int get_normalized_path(const char *path
, char **npath
)
83 if (!path
|| strlen(path
) < 3 || (*path
!= '\\' && *path
!= '/'))
87 *npath
= (char *)path
;
89 *npath
= kstrndup(path
, strlen(path
), GFP_KERNEL
);
92 convert_delimiter(*npath
, '\\');
97 static inline void free_normalized_path(const char *path
, char *npath
)
103 static inline bool cache_entry_expired(const struct cache_entry
*ce
)
105 struct timespec64 ts
;
107 ktime_get_coarse_real_ts64(&ts
);
108 return timespec64_compare(&ts
, &ce
->etime
) >= 0;
111 static inline void free_tgts(struct cache_entry
*ce
)
113 struct cache_dfs_tgt
*t
, *n
;
115 list_for_each_entry_safe(t
, n
, &ce
->tlist
, list
) {
122 static inline void flush_cache_ent(struct cache_entry
*ce
)
124 hlist_del_init(&ce
->hlist
);
127 atomic_dec(&cache_count
);
128 kmem_cache_free(cache_slab
, ce
);
131 static void flush_cache_ents(void)
135 for (i
= 0; i
< CACHE_HTABLE_SIZE
; i
++) {
136 struct hlist_head
*l
= &cache_htable
[i
];
137 struct hlist_node
*n
;
138 struct cache_entry
*ce
;
140 hlist_for_each_entry_safe(ce
, n
, l
, hlist
) {
141 if (!hlist_unhashed(&ce
->hlist
))
148 * dfs cache /proc file
150 static int dfscache_proc_show(struct seq_file
*m
, void *v
)
153 struct cache_entry
*ce
;
154 struct cache_dfs_tgt
*t
;
156 seq_puts(m
, "DFS cache\n---------\n");
158 down_read(&htable_rw_lock
);
159 for (i
= 0; i
< CACHE_HTABLE_SIZE
; i
++) {
160 struct hlist_head
*l
= &cache_htable
[i
];
162 hlist_for_each_entry(ce
, l
, hlist
) {
163 if (hlist_unhashed(&ce
->hlist
))
167 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
168 "interlink=%s,path_consumed=%d,expired=%s\n",
170 ce
->srvtype
== DFS_TYPE_ROOT
? "root" : "link",
171 ce
->ttl
, ce
->etime
.tv_nsec
,
172 IS_INTERLINK_SET(ce
->flags
) ? "yes" : "no",
174 cache_entry_expired(ce
) ? "yes" : "no");
176 list_for_each_entry(t
, &ce
->tlist
, list
) {
177 seq_printf(m
, " %s%s\n",
179 ce
->tgthint
== t
? " (target hint)" : "");
183 up_read(&htable_rw_lock
);
188 static ssize_t
dfscache_proc_write(struct file
*file
, const char __user
*buffer
,
189 size_t count
, loff_t
*ppos
)
194 rc
= get_user(c
, buffer
);
201 cifs_dbg(FYI
, "clearing dfs cache");
203 down_write(&htable_rw_lock
);
205 up_write(&htable_rw_lock
);
210 static int dfscache_proc_open(struct inode
*inode
, struct file
*file
)
212 return single_open(file
, dfscache_proc_show
, NULL
);
215 const struct proc_ops dfscache_proc_ops
= {
216 .proc_open
= dfscache_proc_open
,
217 .proc_read
= seq_read
,
218 .proc_lseek
= seq_lseek
,
219 .proc_release
= single_release
,
220 .proc_write
= dfscache_proc_write
,
223 #ifdef CONFIG_CIFS_DEBUG2
224 static inline void dump_tgts(const struct cache_entry
*ce
)
226 struct cache_dfs_tgt
*t
;
228 cifs_dbg(FYI
, "target list:\n");
229 list_for_each_entry(t
, &ce
->tlist
, list
) {
230 cifs_dbg(FYI
, " %s%s\n", t
->name
,
231 ce
->tgthint
== t
? " (target hint)" : "");
235 static inline void dump_ce(const struct cache_entry
*ce
)
237 cifs_dbg(FYI
, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
238 "interlink=%s,path_consumed=%d,expired=%s\n", ce
->path
,
239 ce
->srvtype
== DFS_TYPE_ROOT
? "root" : "link", ce
->ttl
,
241 IS_INTERLINK_SET(ce
->flags
) ? "yes" : "no",
243 cache_entry_expired(ce
) ? "yes" : "no");
247 static inline void dump_refs(const struct dfs_info3_param
*refs
, int numrefs
)
251 cifs_dbg(FYI
, "DFS referrals returned by the server:\n");
252 for (i
= 0; i
< numrefs
; i
++) {
253 const struct dfs_info3_param
*ref
= &refs
[i
];
258 "path_consumed: %d\n"
259 "server_type: 0x%x\n"
264 ref
->flags
, ref
->path_consumed
, ref
->server_type
,
265 ref
->ref_flag
, ref
->path_name
, ref
->node_name
,
266 ref
->ttl
, ref
->ttl
/ 60);
272 #define dump_refs(r, n)
276 * dfs_cache_init - Initialize DFS referral cache.
278 * Return zero if initialized successfully, otherwise non-zero.
280 int dfs_cache_init(void)
285 dfscache_wq
= alloc_workqueue("cifs-dfscache",
286 WQ_FREEZABLE
| WQ_MEM_RECLAIM
, 1);
290 cache_slab
= kmem_cache_create("cifs_dfs_cache",
291 sizeof(struct cache_entry
), 0,
292 SLAB_HWCACHE_ALIGN
, NULL
);
298 for (i
= 0; i
< CACHE_HTABLE_SIZE
; i
++)
299 INIT_HLIST_HEAD(&cache_htable
[i
]);
301 atomic_set(&cache_count
, 0);
302 cache_nlsc
= load_nls_default();
304 cifs_dbg(FYI
, "%s: initialized DFS referral cache\n", __func__
);
308 destroy_workqueue(dfscache_wq
);
312 static inline unsigned int cache_entry_hash(const void *data
, int size
)
316 h
= jhash(data
, size
, 0);
317 return h
& (CACHE_HTABLE_SIZE
- 1);
320 /* Check whether second path component of @path is SYSVOL or NETLOGON */
321 static inline bool is_sysvol_or_netlogon(const char *path
)
326 s
= strchr(path
+ 1, sep
) + 1;
327 return !strncasecmp(s
, "sysvol", strlen("sysvol")) ||
328 !strncasecmp(s
, "netlogon", strlen("netlogon"));
331 /* Return target hint of a DFS cache entry */
332 static inline char *get_tgt_name(const struct cache_entry
*ce
)
334 struct cache_dfs_tgt
*t
= ce
->tgthint
;
336 return t
? t
->name
: ERR_PTR(-ENOENT
);
339 /* Return expire time out of a new entry's TTL */
340 static inline struct timespec64
get_expire_time(int ttl
)
342 struct timespec64 ts
= {
346 struct timespec64 now
;
348 ktime_get_coarse_real_ts64(&now
);
349 return timespec64_add(now
, ts
);
352 /* Allocate a new DFS target */
353 static struct cache_dfs_tgt
*alloc_target(const char *name
)
355 struct cache_dfs_tgt
*t
;
357 t
= kmalloc(sizeof(*t
), GFP_ATOMIC
);
359 return ERR_PTR(-ENOMEM
);
360 t
->name
= kstrndup(name
, strlen(name
), GFP_ATOMIC
);
363 return ERR_PTR(-ENOMEM
);
365 INIT_LIST_HEAD(&t
->list
);
370 * Copy DFS referral information to a cache entry and conditionally update
373 static int copy_ref_data(const struct dfs_info3_param
*refs
, int numrefs
,
374 struct cache_entry
*ce
, const char *tgthint
)
378 ce
->ttl
= refs
[0].ttl
;
379 ce
->etime
= get_expire_time(ce
->ttl
);
380 ce
->srvtype
= refs
[0].server_type
;
381 ce
->flags
= refs
[0].ref_flag
;
382 ce
->path_consumed
= refs
[0].path_consumed
;
384 for (i
= 0; i
< numrefs
; i
++) {
385 struct cache_dfs_tgt
*t
;
387 t
= alloc_target(refs
[i
].node_name
);
392 if (tgthint
&& !strcasecmp(t
->name
, tgthint
)) {
393 list_add(&t
->list
, &ce
->tlist
);
396 list_add_tail(&t
->list
, &ce
->tlist
);
401 ce
->tgthint
= list_first_entry_or_null(&ce
->tlist
,
402 struct cache_dfs_tgt
, list
);
407 /* Allocate a new cache entry */
408 static struct cache_entry
*alloc_cache_entry(const char *path
,
409 const struct dfs_info3_param
*refs
,
412 struct cache_entry
*ce
;
415 ce
= kmem_cache_zalloc(cache_slab
, GFP_KERNEL
);
417 return ERR_PTR(-ENOMEM
);
419 ce
->path
= kstrndup(path
, strlen(path
), GFP_KERNEL
);
421 kmem_cache_free(cache_slab
, ce
);
422 return ERR_PTR(-ENOMEM
);
424 INIT_HLIST_NODE(&ce
->hlist
);
425 INIT_LIST_HEAD(&ce
->tlist
);
427 rc
= copy_ref_data(refs
, numrefs
, ce
, NULL
);
430 kmem_cache_free(cache_slab
, ce
);
436 /* Must be called with htable_rw_lock held */
437 static void remove_oldest_entry(void)
440 struct cache_entry
*ce
;
441 struct cache_entry
*to_del
= NULL
;
443 for (i
= 0; i
< CACHE_HTABLE_SIZE
; i
++) {
444 struct hlist_head
*l
= &cache_htable
[i
];
446 hlist_for_each_entry(ce
, l
, hlist
) {
447 if (hlist_unhashed(&ce
->hlist
))
449 if (!to_del
|| timespec64_compare(&ce
->etime
,
456 cifs_dbg(FYI
, "%s: no entry to remove", __func__
);
460 cifs_dbg(FYI
, "%s: removing entry", __func__
);
462 flush_cache_ent(to_del
);
465 /* Add a new DFS cache entry */
466 static int add_cache_entry(const char *path
, unsigned int hash
,
467 struct dfs_info3_param
*refs
, int numrefs
)
469 struct cache_entry
*ce
;
471 ce
= alloc_cache_entry(path
, refs
, numrefs
);
475 spin_lock(&cache_ttl_lock
);
478 queue_delayed_work(dfscache_wq
, &refresh_task
, cache_ttl
* HZ
);
480 cache_ttl
= min_t(int, cache_ttl
, ce
->ttl
);
481 mod_delayed_work(dfscache_wq
, &refresh_task
, cache_ttl
* HZ
);
483 spin_unlock(&cache_ttl_lock
);
485 down_write(&htable_rw_lock
);
486 hlist_add_head(&ce
->hlist
, &cache_htable
[hash
]);
488 up_write(&htable_rw_lock
);
494 * Find a DFS cache entry in hash table and optionally check prefix path against
496 * Use whole path components in the match.
497 * Must be called with htable_rw_lock held.
499 * Return ERR_PTR(-ENOENT) if the entry is not found.
501 static struct cache_entry
*lookup_cache_entry(const char *path
,
504 struct cache_entry
*ce
;
508 h
= cache_entry_hash(path
, strlen(path
));
510 hlist_for_each_entry(ce
, &cache_htable
[h
], hlist
) {
511 if (!strcasecmp(path
, ce
->path
)) {
519 ce
= ERR_PTR(-ENOENT
);
526 static void __vol_release(struct vol_info
*vi
)
530 cifs_cleanup_volume_info_contents(&vi
->smb_vol
);
534 static void vol_release(struct kref
*kref
)
536 struct vol_info
*vi
= container_of(kref
, struct vol_info
, refcnt
);
538 spin_lock(&vol_list_lock
);
540 spin_unlock(&vol_list_lock
);
544 static inline void free_vol_list(void)
546 struct vol_info
*vi
, *nvi
;
548 list_for_each_entry_safe(vi
, nvi
, &vol_list
, list
) {
549 list_del_init(&vi
->list
);
555 * dfs_cache_destroy - destroy DFS referral cache
557 void dfs_cache_destroy(void)
559 cancel_delayed_work_sync(&refresh_task
);
560 unload_nls(cache_nlsc
);
563 kmem_cache_destroy(cache_slab
);
564 destroy_workqueue(dfscache_wq
);
566 cifs_dbg(FYI
, "%s: destroyed DFS referral cache\n", __func__
);
569 /* Must be called with htable_rw_lock held */
570 static int __update_cache_entry(const char *path
,
571 const struct dfs_info3_param
*refs
,
575 struct cache_entry
*ce
;
578 ce
= lookup_cache_entry(path
, NULL
);
583 s
= ce
->tgthint
->name
;
584 th
= kstrndup(s
, strlen(s
), GFP_ATOMIC
);
592 rc
= copy_ref_data(refs
, numrefs
, ce
, th
);
599 static int get_dfs_referral(const unsigned int xid
, struct cifs_ses
*ses
,
600 const struct nls_table
*nls_codepage
, int remap
,
601 const char *path
, struct dfs_info3_param
**refs
,
604 cifs_dbg(FYI
, "%s: get an DFS referral for %s\n", __func__
, path
);
606 if (!ses
|| !ses
->server
|| !ses
->server
->ops
->get_dfs_refer
)
608 if (unlikely(!nls_codepage
))
614 return ses
->server
->ops
->get_dfs_refer(xid
, ses
, path
, refs
, numrefs
,
615 nls_codepage
, remap
);
618 /* Update an expired cache entry by getting a new DFS referral from server */
619 static int update_cache_entry(const char *path
,
620 const struct dfs_info3_param
*refs
,
626 down_write(&htable_rw_lock
);
627 rc
= __update_cache_entry(path
, refs
, numrefs
);
628 up_write(&htable_rw_lock
);
634 * Find, create or update a DFS cache entry.
636 * If the entry wasn't found, it will create a new one. Or if it was found but
637 * expired, then it will update the entry accordingly.
639 * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
640 * handle them properly.
642 static int __dfs_cache_find(const unsigned int xid
, struct cifs_ses
*ses
,
643 const struct nls_table
*nls_codepage
, int remap
,
644 const char *path
, bool noreq
)
648 struct cache_entry
*ce
;
649 struct dfs_info3_param
*refs
= NULL
;
653 cifs_dbg(FYI
, "%s: search path: %s\n", __func__
, path
);
655 down_read(&htable_rw_lock
);
657 ce
= lookup_cache_entry(path
, &hash
);
660 * If @noreq is set, no requests will be sent to the server. Just return
664 up_read(&htable_rw_lock
);
665 return PTR_ERR_OR_ZERO(ce
);
669 if (!cache_entry_expired(ce
)) {
671 up_read(&htable_rw_lock
);
678 up_read(&htable_rw_lock
);
681 * No entry was found.
683 * Request a new DFS referral in order to create a new cache entry, or
684 * updating an existing one.
686 rc
= get_dfs_referral(xid
, ses
, nls_codepage
, remap
, path
,
691 dump_refs(refs
, numrefs
);
694 rc
= update_cache_entry(path
, refs
, numrefs
);
698 if (atomic_read(&cache_count
) >= CACHE_MAX_ENTRIES
) {
699 cifs_dbg(FYI
, "%s: reached max cache size (%d)", __func__
,
701 down_write(&htable_rw_lock
);
702 remove_oldest_entry();
703 up_write(&htable_rw_lock
);
706 rc
= add_cache_entry(path
, hash
, refs
, numrefs
);
708 atomic_inc(&cache_count
);
711 free_dfs_info_array(refs
, numrefs
);
716 * Set up a DFS referral from a given cache entry.
718 * Must be called with htable_rw_lock held.
720 static int setup_referral(const char *path
, struct cache_entry
*ce
,
721 struct dfs_info3_param
*ref
, const char *target
)
725 cifs_dbg(FYI
, "%s: set up new ref\n", __func__
);
727 memset(ref
, 0, sizeof(*ref
));
729 ref
->path_name
= kstrndup(path
, strlen(path
), GFP_ATOMIC
);
733 ref
->node_name
= kstrndup(target
, strlen(target
), GFP_ATOMIC
);
734 if (!ref
->node_name
) {
739 ref
->path_consumed
= ce
->path_consumed
;
741 ref
->server_type
= ce
->srvtype
;
742 ref
->ref_flag
= ce
->flags
;
747 kfree(ref
->path_name
);
748 ref
->path_name
= NULL
;
752 /* Return target list of a DFS cache entry */
753 static int get_targets(struct cache_entry
*ce
, struct dfs_cache_tgt_list
*tl
)
756 struct list_head
*head
= &tl
->tl_list
;
757 struct cache_dfs_tgt
*t
;
758 struct dfs_cache_tgt_iterator
*it
, *nit
;
760 memset(tl
, 0, sizeof(*tl
));
761 INIT_LIST_HEAD(head
);
763 list_for_each_entry(t
, &ce
->tlist
, list
) {
764 it
= kzalloc(sizeof(*it
), GFP_ATOMIC
);
770 it
->it_name
= kstrndup(t
->name
, strlen(t
->name
), GFP_ATOMIC
);
777 if (ce
->tgthint
== t
)
778 list_add(&it
->it_list
, head
);
780 list_add_tail(&it
->it_list
, head
);
783 tl
->tl_numtgts
= ce
->numtgts
;
788 list_for_each_entry_safe(it
, nit
, head
, it_list
) {
796 * dfs_cache_find - find a DFS cache entry
798 * If it doesn't find the cache entry, then it will get a DFS referral
799 * for @path and create a new entry.
801 * In case the cache entry exists but expired, it will get a DFS referral
802 * for @path and then update the respective cache entry.
804 * These parameters are passed down to the get_dfs_refer() call if it
805 * needs to be issued:
807 * @ses: smb session to issue the request on
808 * @nls_codepage: charset conversion
809 * @remap: path character remapping type
810 * @path: path to lookup in DFS referral cache.
812 * @ref: when non-NULL, store single DFS referral result in it.
813 * @tgt_list: when non-NULL, store complete DFS target list in it.
815 * Return zero if the target was found, otherwise non-zero.
817 int dfs_cache_find(const unsigned int xid
, struct cifs_ses
*ses
,
818 const struct nls_table
*nls_codepage
, int remap
,
819 const char *path
, struct dfs_info3_param
*ref
,
820 struct dfs_cache_tgt_list
*tgt_list
)
824 struct cache_entry
*ce
;
826 rc
= get_normalized_path(path
, &npath
);
830 rc
= __dfs_cache_find(xid
, ses
, nls_codepage
, remap
, npath
, false);
834 down_read(&htable_rw_lock
);
836 ce
= lookup_cache_entry(npath
, NULL
);
838 up_read(&htable_rw_lock
);
844 rc
= setup_referral(path
, ce
, ref
, get_tgt_name(ce
));
848 rc
= get_targets(ce
, tgt_list
);
850 up_read(&htable_rw_lock
);
853 free_normalized_path(path
, npath
);
858 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
859 * the currently connected server.
861 * NOTE: This function will neither update a cache entry in case it was
862 * expired, nor create a new cache entry if @path hasn't been found. It heavily
863 * relies on an existing cache entry.
865 * @path: path to lookup in the DFS referral cache.
866 * @ref: when non-NULL, store single DFS referral result in it.
867 * @tgt_list: when non-NULL, store complete DFS target list in it.
869 * Return 0 if successful.
870 * Return -ENOENT if the entry was not found.
871 * Return non-zero for other errors.
873 int dfs_cache_noreq_find(const char *path
, struct dfs_info3_param
*ref
,
874 struct dfs_cache_tgt_list
*tgt_list
)
878 struct cache_entry
*ce
;
880 rc
= get_normalized_path(path
, &npath
);
884 cifs_dbg(FYI
, "%s: path: %s\n", __func__
, npath
);
886 down_read(&htable_rw_lock
);
888 ce
= lookup_cache_entry(npath
, NULL
);
895 rc
= setup_referral(path
, ce
, ref
, get_tgt_name(ce
));
899 rc
= get_targets(ce
, tgt_list
);
902 up_read(&htable_rw_lock
);
903 free_normalized_path(path
, npath
);
909 * dfs_cache_update_tgthint - update target hint of a DFS cache entry
911 * If it doesn't find the cache entry, then it will get a DFS referral for @path
912 * and create a new entry.
914 * In case the cache entry exists but expired, it will get a DFS referral
915 * for @path and then update the respective cache entry.
919 * @nls_codepage: charset conversion
920 * @remap: type of character remapping for paths
921 * @path: path to lookup in DFS referral cache.
922 * @it: DFS target iterator
924 * Return zero if the target hint was updated successfully, otherwise non-zero.
926 int dfs_cache_update_tgthint(const unsigned int xid
, struct cifs_ses
*ses
,
927 const struct nls_table
*nls_codepage
, int remap
,
929 const struct dfs_cache_tgt_iterator
*it
)
933 struct cache_entry
*ce
;
934 struct cache_dfs_tgt
*t
;
936 rc
= get_normalized_path(path
, &npath
);
940 cifs_dbg(FYI
, "%s: update target hint - path: %s\n", __func__
, npath
);
942 rc
= __dfs_cache_find(xid
, ses
, nls_codepage
, remap
, npath
, false);
946 down_write(&htable_rw_lock
);
948 ce
= lookup_cache_entry(npath
, NULL
);
956 if (likely(!strcasecmp(it
->it_name
, t
->name
)))
959 list_for_each_entry(t
, &ce
->tlist
, list
) {
960 if (!strcasecmp(t
->name
, it
->it_name
)) {
962 cifs_dbg(FYI
, "%s: new target hint: %s\n", __func__
,
969 up_write(&htable_rw_lock
);
971 free_normalized_path(path
, npath
);
977 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
978 * without sending any requests to the currently connected server.
980 * NOTE: This function will neither update a cache entry in case it was
981 * expired, nor create a new cache entry if @path hasn't been found. It heavily
982 * relies on an existing cache entry.
984 * @path: path to lookup in DFS referral cache.
985 * @it: target iterator which contains the target hint to update the cache
988 * Return zero if the target hint was updated successfully, otherwise non-zero.
990 int dfs_cache_noreq_update_tgthint(const char *path
,
991 const struct dfs_cache_tgt_iterator
*it
)
995 struct cache_entry
*ce
;
996 struct cache_dfs_tgt
*t
;
1001 rc
= get_normalized_path(path
, &npath
);
1005 cifs_dbg(FYI
, "%s: path: %s\n", __func__
, npath
);
1007 down_write(&htable_rw_lock
);
1009 ce
= lookup_cache_entry(npath
, NULL
);
1018 if (unlikely(!strcasecmp(it
->it_name
, t
->name
)))
1021 list_for_each_entry(t
, &ce
->tlist
, list
) {
1022 if (!strcasecmp(t
->name
, it
->it_name
)) {
1024 cifs_dbg(FYI
, "%s: new target hint: %s\n", __func__
,
1031 up_write(&htable_rw_lock
);
1032 free_normalized_path(path
, npath
);
1038 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
1039 * target iterator (@it).
1041 * @path: path to lookup in DFS referral cache.
1042 * @it: DFS target iterator.
1043 * @ref: DFS referral pointer to set up the gathered information.
1045 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
1047 int dfs_cache_get_tgt_referral(const char *path
,
1048 const struct dfs_cache_tgt_iterator
*it
,
1049 struct dfs_info3_param
*ref
)
1053 struct cache_entry
*ce
;
1058 rc
= get_normalized_path(path
, &npath
);
1062 cifs_dbg(FYI
, "%s: path: %s\n", __func__
, npath
);
1064 down_read(&htable_rw_lock
);
1066 ce
= lookup_cache_entry(npath
, NULL
);
1072 cifs_dbg(FYI
, "%s: target name: %s\n", __func__
, it
->it_name
);
1074 rc
= setup_referral(path
, ce
, ref
, it
->it_name
);
1077 up_read(&htable_rw_lock
);
1078 free_normalized_path(path
, npath
);
1083 static int dup_vol(struct smb_vol
*vol
, struct smb_vol
*new)
1085 memcpy(new, vol
, sizeof(*new));
1087 if (vol
->username
) {
1088 new->username
= kstrndup(vol
->username
, strlen(vol
->username
),
1093 if (vol
->password
) {
1094 new->password
= kstrndup(vol
->password
, strlen(vol
->password
),
1097 goto err_free_username
;
1100 cifs_dbg(FYI
, "%s: vol->UNC: %s\n", __func__
, vol
->UNC
);
1101 new->UNC
= kstrndup(vol
->UNC
, strlen(vol
->UNC
), GFP_KERNEL
);
1103 goto err_free_password
;
1105 if (vol
->domainname
) {
1106 new->domainname
= kstrndup(vol
->domainname
,
1107 strlen(vol
->domainname
), GFP_KERNEL
);
1108 if (!new->domainname
)
1111 if (vol
->iocharset
) {
1112 new->iocharset
= kstrndup(vol
->iocharset
,
1113 strlen(vol
->iocharset
), GFP_KERNEL
);
1114 if (!new->iocharset
)
1115 goto err_free_domainname
;
1118 cifs_dbg(FYI
, "%s: vol->prepath: %s\n", __func__
, vol
->prepath
);
1119 new->prepath
= kstrndup(vol
->prepath
, strlen(vol
->prepath
),
1122 goto err_free_iocharset
;
1128 kfree(new->iocharset
);
1129 err_free_domainname
:
1130 kfree(new->domainname
);
1134 kzfree(new->password
);
1136 kfree(new->username
);
1142 * dfs_cache_add_vol - add a cifs volume during mount() that will be handled by
1143 * DFS cache refresh worker.
1145 * @mntdata: mount data.
1146 * @vol: cifs volume.
1147 * @fullpath: origin full path.
1149 * Return zero if volume was set up correctly, otherwise non-zero.
1151 int dfs_cache_add_vol(char *mntdata
, struct smb_vol
*vol
, const char *fullpath
)
1154 struct vol_info
*vi
;
1156 if (!vol
|| !fullpath
|| !mntdata
)
1159 cifs_dbg(FYI
, "%s: fullpath: %s\n", __func__
, fullpath
);
1161 vi
= kzalloc(sizeof(*vi
), GFP_KERNEL
);
1165 vi
->fullpath
= kstrndup(fullpath
, strlen(fullpath
), GFP_KERNEL
);
1166 if (!vi
->fullpath
) {
1171 rc
= dup_vol(vol
, &vi
->smb_vol
);
1173 goto err_free_fullpath
;
1175 vi
->mntdata
= mntdata
;
1176 spin_lock_init(&vi
->smb_vol_lock
);
1177 kref_init(&vi
->refcnt
);
1179 spin_lock(&vol_list_lock
);
1180 list_add_tail(&vi
->list
, &vol_list
);
1181 spin_unlock(&vol_list_lock
);
1186 kfree(vi
->fullpath
);
1192 /* Must be called with vol_list_lock held */
1193 static struct vol_info
*find_vol(const char *fullpath
)
1195 struct vol_info
*vi
;
1197 list_for_each_entry(vi
, &vol_list
, list
) {
1198 cifs_dbg(FYI
, "%s: vi->fullpath: %s\n", __func__
, vi
->fullpath
);
1199 if (!strcasecmp(vi
->fullpath
, fullpath
))
1202 return ERR_PTR(-ENOENT
);
1206 * dfs_cache_update_vol - update vol info in DFS cache after failover
1208 * @fullpath: fullpath to look up in volume list.
1209 * @server: TCP ses pointer.
1211 * Return zero if volume was updated, otherwise non-zero.
1213 int dfs_cache_update_vol(const char *fullpath
, struct TCP_Server_Info
*server
)
1215 struct vol_info
*vi
;
1217 if (!fullpath
|| !server
)
1220 cifs_dbg(FYI
, "%s: fullpath: %s\n", __func__
, fullpath
);
1222 spin_lock(&vol_list_lock
);
1223 vi
= find_vol(fullpath
);
1225 spin_unlock(&vol_list_lock
);
1228 kref_get(&vi
->refcnt
);
1229 spin_unlock(&vol_list_lock
);
1231 cifs_dbg(FYI
, "%s: updating volume info\n", __func__
);
1232 spin_lock(&vi
->smb_vol_lock
);
1233 memcpy(&vi
->smb_vol
.dstaddr
, &server
->dstaddr
,
1234 sizeof(vi
->smb_vol
.dstaddr
));
1235 spin_unlock(&vi
->smb_vol_lock
);
1237 kref_put(&vi
->refcnt
, vol_release
);
1243 * dfs_cache_del_vol - remove volume info in DFS cache during umount()
1245 * @fullpath: fullpath to look up in volume list.
1247 void dfs_cache_del_vol(const char *fullpath
)
1249 struct vol_info
*vi
;
1251 if (!fullpath
|| !*fullpath
)
1254 cifs_dbg(FYI
, "%s: fullpath: %s\n", __func__
, fullpath
);
1256 spin_lock(&vol_list_lock
);
1257 vi
= find_vol(fullpath
);
1258 spin_unlock(&vol_list_lock
);
1260 kref_put(&vi
->refcnt
, vol_release
);
1264 * dfs_cache_get_tgt_share - parse a DFS target
1266 * @it: DFS target iterator.
1267 * @share: tree name.
1268 * @share_len: length of tree name.
1269 * @prefix: prefix path.
1270 * @prefix_len: length of prefix path.
1272 * Return zero if target was parsed correctly, otherwise non-zero.
1274 int dfs_cache_get_tgt_share(const struct dfs_cache_tgt_iterator
*it
,
1275 const char **share
, size_t *share_len
,
1276 const char **prefix
, size_t *prefix_len
)
1280 if (!it
|| !share
|| !share_len
|| !prefix
|| !prefix_len
)
1283 sep
= it
->it_name
[0];
1284 if (sep
!= '\\' && sep
!= '/')
1287 s
= strchr(it
->it_name
+ 1, sep
);
1291 s
= strchrnul(s
+ 1, sep
);
1293 *share
= it
->it_name
;
1294 *share_len
= s
- it
->it_name
;
1295 *prefix
= *s
? s
+ 1 : s
;
1296 *prefix_len
= &it
->it_name
[strlen(it
->it_name
)] - *prefix
;
1301 /* Get all tcons that are within a DFS namespace and can be refreshed */
1302 static void get_tcons(struct TCP_Server_Info
*server
, struct list_head
*head
)
1304 struct cifs_ses
*ses
;
1305 struct cifs_tcon
*tcon
;
1307 INIT_LIST_HEAD(head
);
1309 spin_lock(&cifs_tcp_ses_lock
);
1310 list_for_each_entry(ses
, &server
->smb_ses_list
, smb_ses_list
) {
1311 list_for_each_entry(tcon
, &ses
->tcon_list
, tcon_list
) {
1312 if (!tcon
->need_reconnect
&& !tcon
->need_reopen_files
&&
1315 list_add_tail(&tcon
->ulist
, head
);
1318 if (ses
->tcon_ipc
&& !ses
->tcon_ipc
->need_reconnect
&&
1319 ses
->tcon_ipc
->dfs_path
) {
1320 list_add_tail(&ses
->tcon_ipc
->ulist
, head
);
1323 spin_unlock(&cifs_tcp_ses_lock
);
1326 static bool is_dfs_link(const char *path
)
1330 s
= strchr(path
+ 1, '\\');
1333 return !!strchr(s
+ 1, '\\');
1336 static char *get_dfs_root(const char *path
)
1340 s
= strchr(path
+ 1, '\\');
1342 return ERR_PTR(-EINVAL
);
1344 s
= strchr(s
+ 1, '\\');
1346 return ERR_PTR(-EINVAL
);
1348 npath
= kstrndup(path
, s
- path
, GFP_KERNEL
);
1350 return ERR_PTR(-ENOMEM
);
1355 static inline void put_tcp_server(struct TCP_Server_Info
*server
)
1357 cifs_put_tcp_session(server
, 0);
1360 static struct TCP_Server_Info
*get_tcp_server(struct smb_vol
*vol
)
1362 struct TCP_Server_Info
*server
;
1364 server
= cifs_find_tcp_session(vol
);
1365 if (IS_ERR_OR_NULL(server
))
1368 spin_lock(&GlobalMid_Lock
);
1369 if (server
->tcpStatus
!= CifsGood
) {
1370 spin_unlock(&GlobalMid_Lock
);
1371 put_tcp_server(server
);
1374 spin_unlock(&GlobalMid_Lock
);
1379 /* Find root SMB session out of a DFS link path */
1380 static struct cifs_ses
*find_root_ses(struct vol_info
*vi
,
1381 struct cifs_tcon
*tcon
,
1386 struct cache_entry
*ce
;
1387 struct dfs_info3_param ref
= {0};
1388 char *mdata
= NULL
, *devname
= NULL
;
1389 struct TCP_Server_Info
*server
;
1390 struct cifs_ses
*ses
;
1391 struct smb_vol vol
= {NULL
};
1393 rpath
= get_dfs_root(path
);
1395 return ERR_CAST(rpath
);
1397 down_read(&htable_rw_lock
);
1399 ce
= lookup_cache_entry(rpath
, NULL
);
1401 up_read(&htable_rw_lock
);
1406 rc
= setup_referral(path
, ce
, &ref
, get_tgt_name(ce
));
1408 up_read(&htable_rw_lock
);
1413 up_read(&htable_rw_lock
);
1415 mdata
= cifs_compose_mount_options(vi
->mntdata
, rpath
, &ref
,
1417 free_dfs_info_param(&ref
);
1419 if (IS_ERR(mdata
)) {
1420 ses
= ERR_CAST(mdata
);
1425 rc
= cifs_setup_volume_info(&vol
, mdata
, devname
, false);
1433 server
= get_tcp_server(&vol
);
1435 ses
= ERR_PTR(-EHOSTDOWN
);
1439 ses
= cifs_get_smb_ses(server
, &vol
);
1442 cifs_cleanup_volume_info_contents(&vol
);
1449 /* Refresh DFS cache entry from a given tcon */
1450 static int refresh_tcon(struct vol_info
*vi
, struct cifs_tcon
*tcon
)
1455 struct cache_entry
*ce
;
1456 struct cifs_ses
*root_ses
= NULL
, *ses
;
1457 struct dfs_info3_param
*refs
= NULL
;
1462 path
= tcon
->dfs_path
+ 1;
1464 rc
= get_normalized_path(path
, &npath
);
1468 down_read(&htable_rw_lock
);
1470 ce
= lookup_cache_entry(npath
, NULL
);
1473 up_read(&htable_rw_lock
);
1477 if (!cache_entry_expired(ce
)) {
1478 up_read(&htable_rw_lock
);
1482 up_read(&htable_rw_lock
);
1484 /* If it's a DFS Link, then use root SMB session for refreshing it */
1485 if (is_dfs_link(npath
)) {
1486 ses
= root_ses
= find_root_ses(vi
, tcon
, npath
);
1496 rc
= get_dfs_referral(xid
, ses
, cache_nlsc
, tcon
->remap
, npath
, &refs
,
1499 dump_refs(refs
, numrefs
);
1500 rc
= update_cache_entry(npath
, refs
, numrefs
);
1501 free_dfs_info_array(refs
, numrefs
);
1505 cifs_put_smb_ses(root_ses
);
1508 free_normalized_path(path
, npath
);
1516 * Worker that will refresh DFS cache based on lowest TTL value from a DFS
1519 static void refresh_cache_worker(struct work_struct
*work
)
1521 struct vol_info
*vi
, *nvi
;
1522 struct TCP_Server_Info
*server
;
1525 struct cifs_tcon
*tcon
, *ntcon
;
1529 * Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
1532 spin_lock(&vol_list_lock
);
1533 list_for_each_entry(vi
, &vol_list
, list
) {
1534 server
= get_tcp_server(&vi
->smb_vol
);
1538 kref_get(&vi
->refcnt
);
1539 list_add_tail(&vi
->rlist
, &vols
);
1540 put_tcp_server(server
);
1542 spin_unlock(&vol_list_lock
);
1544 /* Walk through all TCONs and refresh any expired cache entry */
1545 list_for_each_entry_safe(vi
, nvi
, &vols
, rlist
) {
1546 spin_lock(&vi
->smb_vol_lock
);
1547 server
= get_tcp_server(&vi
->smb_vol
);
1548 spin_unlock(&vi
->smb_vol_lock
);
1553 get_tcons(server
, &tcons
);
1556 list_for_each_entry_safe(tcon
, ntcon
, &tcons
, ulist
) {
1558 * Skip tcp server if any of its tcons failed to refresh
1559 * (possibily due to reconnects).
1562 rc
= refresh_tcon(vi
, tcon
);
1564 list_del_init(&tcon
->ulist
);
1565 cifs_put_tcon(tcon
);
1568 put_tcp_server(server
);
1571 list_del_init(&vi
->rlist
);
1572 kref_put(&vi
->refcnt
, vol_release
);
1575 spin_lock(&cache_ttl_lock
);
1576 queue_delayed_work(dfscache_wq
, &refresh_task
, cache_ttl
* HZ
);
1577 spin_unlock(&cache_ttl_lock
);