1 // SPDX-License-Identifier: GPL-2.0
3 * DFS referral cache routines
5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
8 #include <linux/jhash.h>
9 #include <linux/ktime.h>
10 #include <linux/slab.h>
11 #include <linux/proc_fs.h>
12 #include <linux/nls.h>
13 #include <linux/workqueue.h>
16 #include "smb2proto.h"
17 #include "cifsproto.h"
18 #include "cifs_debug.h"
19 #include "cifs_unicode.h"
21 #include "fs_context.h"
23 #include "dfs_cache.h"
25 #define CACHE_HTABLE_SIZE 32
26 #define CACHE_MAX_ENTRIES 64
28 #define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
29 DFSREF_STORAGE_SERVER))
31 struct cache_dfs_tgt
{
34 struct list_head list
;
38 struct hlist_node hlist
;
43 struct timespec64 etime
;
46 struct list_head tlist
;
47 struct cache_dfs_tgt
*tgthint
;
53 struct smb3_fs_context ctx
;
55 struct list_head list
;
56 struct list_head rlist
;
60 static struct kmem_cache
*cache_slab __read_mostly
;
61 static struct workqueue_struct
*dfscache_wq __read_mostly
;
64 static DEFINE_SPINLOCK(cache_ttl_lock
);
66 static struct nls_table
*cache_nlsc
;
69 * Number of entries in the cache
71 static atomic_t cache_count
;
73 static struct hlist_head cache_htable
[CACHE_HTABLE_SIZE
];
74 static DECLARE_RWSEM(htable_rw_lock
);
76 static LIST_HEAD(vol_list
);
77 static DEFINE_SPINLOCK(vol_list_lock
);
79 static void refresh_cache_worker(struct work_struct
*work
);
81 static DECLARE_DELAYED_WORK(refresh_task
, refresh_cache_worker
);
83 static int get_normalized_path(const char *path
, char **npath
)
85 if (!path
|| strlen(path
) < 3 || (*path
!= '\\' && *path
!= '/'))
89 *npath
= (char *)path
;
91 *npath
= kstrndup(path
, strlen(path
), GFP_KERNEL
);
94 convert_delimiter(*npath
, '\\');
99 static inline void free_normalized_path(const char *path
, char *npath
)
105 static inline bool cache_entry_expired(const struct cache_entry
*ce
)
107 struct timespec64 ts
;
109 ktime_get_coarse_real_ts64(&ts
);
110 return timespec64_compare(&ts
, &ce
->etime
) >= 0;
113 static inline void free_tgts(struct cache_entry
*ce
)
115 struct cache_dfs_tgt
*t
, *n
;
117 list_for_each_entry_safe(t
, n
, &ce
->tlist
, list
) {
124 static inline void flush_cache_ent(struct cache_entry
*ce
)
126 hlist_del_init(&ce
->hlist
);
129 atomic_dec(&cache_count
);
130 kmem_cache_free(cache_slab
, ce
);
133 static void flush_cache_ents(void)
137 for (i
= 0; i
< CACHE_HTABLE_SIZE
; i
++) {
138 struct hlist_head
*l
= &cache_htable
[i
];
139 struct hlist_node
*n
;
140 struct cache_entry
*ce
;
142 hlist_for_each_entry_safe(ce
, n
, l
, hlist
) {
143 if (!hlist_unhashed(&ce
->hlist
))
150 * dfs cache /proc file
152 static int dfscache_proc_show(struct seq_file
*m
, void *v
)
155 struct cache_entry
*ce
;
156 struct cache_dfs_tgt
*t
;
158 seq_puts(m
, "DFS cache\n---------\n");
160 down_read(&htable_rw_lock
);
161 for (i
= 0; i
< CACHE_HTABLE_SIZE
; i
++) {
162 struct hlist_head
*l
= &cache_htable
[i
];
164 hlist_for_each_entry(ce
, l
, hlist
) {
165 if (hlist_unhashed(&ce
->hlist
))
169 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
170 "interlink=%s,path_consumed=%d,expired=%s\n",
172 ce
->srvtype
== DFS_TYPE_ROOT
? "root" : "link",
173 ce
->ttl
, ce
->etime
.tv_nsec
,
174 IS_INTERLINK_SET(ce
->flags
) ? "yes" : "no",
176 cache_entry_expired(ce
) ? "yes" : "no");
178 list_for_each_entry(t
, &ce
->tlist
, list
) {
179 seq_printf(m
, " %s%s\n",
181 ce
->tgthint
== t
? " (target hint)" : "");
185 up_read(&htable_rw_lock
);
190 static ssize_t
dfscache_proc_write(struct file
*file
, const char __user
*buffer
,
191 size_t count
, loff_t
*ppos
)
196 rc
= get_user(c
, buffer
);
203 cifs_dbg(FYI
, "clearing dfs cache\n");
205 down_write(&htable_rw_lock
);
207 up_write(&htable_rw_lock
);
212 static int dfscache_proc_open(struct inode
*inode
, struct file
*file
)
214 return single_open(file
, dfscache_proc_show
, NULL
);
217 const struct proc_ops dfscache_proc_ops
= {
218 .proc_open
= dfscache_proc_open
,
219 .proc_read
= seq_read
,
220 .proc_lseek
= seq_lseek
,
221 .proc_release
= single_release
,
222 .proc_write
= dfscache_proc_write
,
225 #ifdef CONFIG_CIFS_DEBUG2
226 static inline void dump_tgts(const struct cache_entry
*ce
)
228 struct cache_dfs_tgt
*t
;
230 cifs_dbg(FYI
, "target list:\n");
231 list_for_each_entry(t
, &ce
->tlist
, list
) {
232 cifs_dbg(FYI
, " %s%s\n", t
->name
,
233 ce
->tgthint
== t
? " (target hint)" : "");
237 static inline void dump_ce(const struct cache_entry
*ce
)
239 cifs_dbg(FYI
, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,interlink=%s,path_consumed=%d,expired=%s\n",
241 ce
->srvtype
== DFS_TYPE_ROOT
? "root" : "link", ce
->ttl
,
243 IS_INTERLINK_SET(ce
->flags
) ? "yes" : "no",
245 cache_entry_expired(ce
) ? "yes" : "no");
249 static inline void dump_refs(const struct dfs_info3_param
*refs
, int numrefs
)
253 cifs_dbg(FYI
, "DFS referrals returned by the server:\n");
254 for (i
= 0; i
< numrefs
; i
++) {
255 const struct dfs_info3_param
*ref
= &refs
[i
];
260 "path_consumed: %d\n"
261 "server_type: 0x%x\n"
266 ref
->flags
, ref
->path_consumed
, ref
->server_type
,
267 ref
->ref_flag
, ref
->path_name
, ref
->node_name
,
268 ref
->ttl
, ref
->ttl
/ 60);
274 #define dump_refs(r, n)
278 * dfs_cache_init - Initialize DFS referral cache.
280 * Return zero if initialized successfully, otherwise non-zero.
282 int dfs_cache_init(void)
287 dfscache_wq
= alloc_workqueue("cifs-dfscache",
288 WQ_FREEZABLE
| WQ_MEM_RECLAIM
, 1);
292 cache_slab
= kmem_cache_create("cifs_dfs_cache",
293 sizeof(struct cache_entry
), 0,
294 SLAB_HWCACHE_ALIGN
, NULL
);
300 for (i
= 0; i
< CACHE_HTABLE_SIZE
; i
++)
301 INIT_HLIST_HEAD(&cache_htable
[i
]);
303 atomic_set(&cache_count
, 0);
304 cache_nlsc
= load_nls_default();
306 cifs_dbg(FYI
, "%s: initialized DFS referral cache\n", __func__
);
310 destroy_workqueue(dfscache_wq
);
314 static inline unsigned int cache_entry_hash(const void *data
, int size
)
318 h
= jhash(data
, size
, 0);
319 return h
& (CACHE_HTABLE_SIZE
- 1);
322 /* Check whether second path component of @path is SYSVOL or NETLOGON */
323 static inline bool is_sysvol_or_netlogon(const char *path
)
328 s
= strchr(path
+ 1, sep
) + 1;
329 return !strncasecmp(s
, "sysvol", strlen("sysvol")) ||
330 !strncasecmp(s
, "netlogon", strlen("netlogon"));
333 /* Return target hint of a DFS cache entry */
334 static inline char *get_tgt_name(const struct cache_entry
*ce
)
336 struct cache_dfs_tgt
*t
= ce
->tgthint
;
338 return t
? t
->name
: ERR_PTR(-ENOENT
);
341 /* Return expire time out of a new entry's TTL */
342 static inline struct timespec64
get_expire_time(int ttl
)
344 struct timespec64 ts
= {
348 struct timespec64 now
;
350 ktime_get_coarse_real_ts64(&now
);
351 return timespec64_add(now
, ts
);
354 /* Allocate a new DFS target */
355 static struct cache_dfs_tgt
*alloc_target(const char *name
, int path_consumed
)
357 struct cache_dfs_tgt
*t
;
359 t
= kmalloc(sizeof(*t
), GFP_ATOMIC
);
361 return ERR_PTR(-ENOMEM
);
362 t
->name
= kstrndup(name
, strlen(name
), GFP_ATOMIC
);
365 return ERR_PTR(-ENOMEM
);
367 t
->path_consumed
= path_consumed
;
368 INIT_LIST_HEAD(&t
->list
);
373 * Copy DFS referral information to a cache entry and conditionally update
376 static int copy_ref_data(const struct dfs_info3_param
*refs
, int numrefs
,
377 struct cache_entry
*ce
, const char *tgthint
)
381 ce
->ttl
= refs
[0].ttl
;
382 ce
->etime
= get_expire_time(ce
->ttl
);
383 ce
->srvtype
= refs
[0].server_type
;
384 ce
->flags
= refs
[0].ref_flag
;
385 ce
->path_consumed
= refs
[0].path_consumed
;
387 for (i
= 0; i
< numrefs
; i
++) {
388 struct cache_dfs_tgt
*t
;
390 t
= alloc_target(refs
[i
].node_name
, refs
[i
].path_consumed
);
395 if (tgthint
&& !strcasecmp(t
->name
, tgthint
)) {
396 list_add(&t
->list
, &ce
->tlist
);
399 list_add_tail(&t
->list
, &ce
->tlist
);
404 ce
->tgthint
= list_first_entry_or_null(&ce
->tlist
,
405 struct cache_dfs_tgt
, list
);
410 /* Allocate a new cache entry */
411 static struct cache_entry
*alloc_cache_entry(const char *path
,
412 const struct dfs_info3_param
*refs
,
415 struct cache_entry
*ce
;
418 ce
= kmem_cache_zalloc(cache_slab
, GFP_KERNEL
);
420 return ERR_PTR(-ENOMEM
);
422 ce
->path
= kstrndup(path
, strlen(path
), GFP_KERNEL
);
424 kmem_cache_free(cache_slab
, ce
);
425 return ERR_PTR(-ENOMEM
);
427 INIT_HLIST_NODE(&ce
->hlist
);
428 INIT_LIST_HEAD(&ce
->tlist
);
430 rc
= copy_ref_data(refs
, numrefs
, ce
, NULL
);
433 kmem_cache_free(cache_slab
, ce
);
439 /* Must be called with htable_rw_lock held */
440 static void remove_oldest_entry(void)
443 struct cache_entry
*ce
;
444 struct cache_entry
*to_del
= NULL
;
446 for (i
= 0; i
< CACHE_HTABLE_SIZE
; i
++) {
447 struct hlist_head
*l
= &cache_htable
[i
];
449 hlist_for_each_entry(ce
, l
, hlist
) {
450 if (hlist_unhashed(&ce
->hlist
))
452 if (!to_del
|| timespec64_compare(&ce
->etime
,
459 cifs_dbg(FYI
, "%s: no entry to remove\n", __func__
);
463 cifs_dbg(FYI
, "%s: removing entry\n", __func__
);
465 flush_cache_ent(to_del
);
468 /* Add a new DFS cache entry */
469 static int add_cache_entry(const char *path
, unsigned int hash
,
470 struct dfs_info3_param
*refs
, int numrefs
)
472 struct cache_entry
*ce
;
474 ce
= alloc_cache_entry(path
, refs
, numrefs
);
478 spin_lock(&cache_ttl_lock
);
481 queue_delayed_work(dfscache_wq
, &refresh_task
, cache_ttl
* HZ
);
483 cache_ttl
= min_t(int, cache_ttl
, ce
->ttl
);
484 mod_delayed_work(dfscache_wq
, &refresh_task
, cache_ttl
* HZ
);
486 spin_unlock(&cache_ttl_lock
);
488 down_write(&htable_rw_lock
);
489 hlist_add_head(&ce
->hlist
, &cache_htable
[hash
]);
491 up_write(&htable_rw_lock
);
496 static struct cache_entry
*__lookup_cache_entry(const char *path
)
498 struct cache_entry
*ce
;
502 h
= cache_entry_hash(path
, strlen(path
));
504 hlist_for_each_entry(ce
, &cache_htable
[h
], hlist
) {
505 if (!strcasecmp(path
, ce
->path
)) {
513 ce
= ERR_PTR(-ENOENT
);
518 * Find a DFS cache entry in hash table and optionally check prefix path against
520 * Use whole path components in the match.
521 * Must be called with htable_rw_lock held.
523 * Return ERR_PTR(-ENOENT) if the entry is not found.
525 static struct cache_entry
*lookup_cache_entry(const char *path
, unsigned int *hash
)
527 struct cache_entry
*ce
= ERR_PTR(-ENOENT
);
534 npath
= kstrndup(path
, strlen(path
), GFP_KERNEL
);
536 return ERR_PTR(-ENOMEM
);
540 while ((s
= strchr(s
, sep
)) && ++cnt
< 3)
544 h
= cache_entry_hash(path
, strlen(path
));
545 ce
= __lookup_cache_entry(path
);
549 * Handle paths that have more than two path components and are a complete prefix of the DFS
550 * referral request path (@path).
552 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
554 h
= cache_entry_hash(npath
, strlen(npath
));
555 e
= npath
+ strlen(npath
) - 1;
559 /* skip separators */
560 while (e
> s
&& *e
== sep
)
568 ce
= __lookup_cache_entry(npath
);
570 h
= cache_entry_hash(npath
, strlen(npath
));
575 /* backward until separator */
576 while (e
> s
&& *e
!= sep
)
586 static void __vol_release(struct vol_info
*vi
)
590 smb3_cleanup_fs_context_contents(&vi
->ctx
);
594 static void vol_release(struct kref
*kref
)
596 struct vol_info
*vi
= container_of(kref
, struct vol_info
, refcnt
);
598 spin_lock(&vol_list_lock
);
600 spin_unlock(&vol_list_lock
);
604 static inline void free_vol_list(void)
606 struct vol_info
*vi
, *nvi
;
608 list_for_each_entry_safe(vi
, nvi
, &vol_list
, list
) {
609 list_del_init(&vi
->list
);
615 * dfs_cache_destroy - destroy DFS referral cache
617 void dfs_cache_destroy(void)
619 cancel_delayed_work_sync(&refresh_task
);
620 unload_nls(cache_nlsc
);
623 kmem_cache_destroy(cache_slab
);
624 destroy_workqueue(dfscache_wq
);
626 cifs_dbg(FYI
, "%s: destroyed DFS referral cache\n", __func__
);
629 /* Must be called with htable_rw_lock held */
630 static int __update_cache_entry(const char *path
,
631 const struct dfs_info3_param
*refs
,
635 struct cache_entry
*ce
;
638 ce
= lookup_cache_entry(path
, NULL
);
643 s
= ce
->tgthint
->name
;
644 th
= kstrndup(s
, strlen(s
), GFP_ATOMIC
);
652 rc
= copy_ref_data(refs
, numrefs
, ce
, th
);
659 static int get_dfs_referral(const unsigned int xid
, struct cifs_ses
*ses
,
660 const struct nls_table
*nls_codepage
, int remap
,
661 const char *path
, struct dfs_info3_param
**refs
,
664 cifs_dbg(FYI
, "%s: get an DFS referral for %s\n", __func__
, path
);
666 if (!ses
|| !ses
->server
|| !ses
->server
->ops
->get_dfs_refer
)
668 if (unlikely(!nls_codepage
))
674 return ses
->server
->ops
->get_dfs_refer(xid
, ses
, path
, refs
, numrefs
,
675 nls_codepage
, remap
);
678 /* Update an expired cache entry by getting a new DFS referral from server */
679 static int update_cache_entry(const char *path
,
680 const struct dfs_info3_param
*refs
,
686 down_write(&htable_rw_lock
);
687 rc
= __update_cache_entry(path
, refs
, numrefs
);
688 up_write(&htable_rw_lock
);
694 * Find, create or update a DFS cache entry.
696 * If the entry wasn't found, it will create a new one. Or if it was found but
697 * expired, then it will update the entry accordingly.
699 * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
700 * handle them properly.
702 static int __dfs_cache_find(const unsigned int xid
, struct cifs_ses
*ses
,
703 const struct nls_table
*nls_codepage
, int remap
,
704 const char *path
, bool noreq
)
708 struct cache_entry
*ce
;
709 struct dfs_info3_param
*refs
= NULL
;
713 cifs_dbg(FYI
, "%s: search path: %s\n", __func__
, path
);
715 down_read(&htable_rw_lock
);
717 ce
= lookup_cache_entry(path
, &hash
);
720 * If @noreq is set, no requests will be sent to the server. Just return
724 up_read(&htable_rw_lock
);
725 return PTR_ERR_OR_ZERO(ce
);
729 if (!cache_entry_expired(ce
)) {
731 up_read(&htable_rw_lock
);
738 up_read(&htable_rw_lock
);
741 * No entry was found.
743 * Request a new DFS referral in order to create a new cache entry, or
744 * updating an existing one.
746 rc
= get_dfs_referral(xid
, ses
, nls_codepage
, remap
, path
,
751 dump_refs(refs
, numrefs
);
754 rc
= update_cache_entry(path
, refs
, numrefs
);
758 if (atomic_read(&cache_count
) >= CACHE_MAX_ENTRIES
) {
759 cifs_dbg(FYI
, "%s: reached max cache size (%d)\n",
760 __func__
, CACHE_MAX_ENTRIES
);
761 down_write(&htable_rw_lock
);
762 remove_oldest_entry();
763 up_write(&htable_rw_lock
);
766 rc
= add_cache_entry(path
, hash
, refs
, numrefs
);
768 atomic_inc(&cache_count
);
771 free_dfs_info_array(refs
, numrefs
);
776 * Set up a DFS referral from a given cache entry.
778 * Must be called with htable_rw_lock held.
780 static int setup_referral(const char *path
, struct cache_entry
*ce
,
781 struct dfs_info3_param
*ref
, const char *target
)
785 cifs_dbg(FYI
, "%s: set up new ref\n", __func__
);
787 memset(ref
, 0, sizeof(*ref
));
789 ref
->path_name
= kstrndup(path
, strlen(path
), GFP_ATOMIC
);
793 ref
->node_name
= kstrndup(target
, strlen(target
), GFP_ATOMIC
);
794 if (!ref
->node_name
) {
799 ref
->path_consumed
= ce
->path_consumed
;
801 ref
->server_type
= ce
->srvtype
;
802 ref
->ref_flag
= ce
->flags
;
807 kfree(ref
->path_name
);
808 ref
->path_name
= NULL
;
812 /* Return target list of a DFS cache entry */
813 static int get_targets(struct cache_entry
*ce
, struct dfs_cache_tgt_list
*tl
)
816 struct list_head
*head
= &tl
->tl_list
;
817 struct cache_dfs_tgt
*t
;
818 struct dfs_cache_tgt_iterator
*it
, *nit
;
820 memset(tl
, 0, sizeof(*tl
));
821 INIT_LIST_HEAD(head
);
823 list_for_each_entry(t
, &ce
->tlist
, list
) {
824 it
= kzalloc(sizeof(*it
), GFP_ATOMIC
);
830 it
->it_name
= kstrndup(t
->name
, strlen(t
->name
), GFP_ATOMIC
);
836 it
->it_path_consumed
= t
->path_consumed
;
838 if (ce
->tgthint
== t
)
839 list_add(&it
->it_list
, head
);
841 list_add_tail(&it
->it_list
, head
);
844 tl
->tl_numtgts
= ce
->numtgts
;
849 list_for_each_entry_safe(it
, nit
, head
, it_list
) {
857 * dfs_cache_find - find a DFS cache entry
859 * If it doesn't find the cache entry, then it will get a DFS referral
860 * for @path and create a new entry.
862 * In case the cache entry exists but expired, it will get a DFS referral
863 * for @path and then update the respective cache entry.
865 * These parameters are passed down to the get_dfs_refer() call if it
866 * needs to be issued:
868 * @ses: smb session to issue the request on
869 * @nls_codepage: charset conversion
870 * @remap: path character remapping type
871 * @path: path to lookup in DFS referral cache.
873 * @ref: when non-NULL, store single DFS referral result in it.
874 * @tgt_list: when non-NULL, store complete DFS target list in it.
876 * Return zero if the target was found, otherwise non-zero.
878 int dfs_cache_find(const unsigned int xid
, struct cifs_ses
*ses
,
879 const struct nls_table
*nls_codepage
, int remap
,
880 const char *path
, struct dfs_info3_param
*ref
,
881 struct dfs_cache_tgt_list
*tgt_list
)
885 struct cache_entry
*ce
;
887 rc
= get_normalized_path(path
, &npath
);
891 rc
= __dfs_cache_find(xid
, ses
, nls_codepage
, remap
, npath
, false);
895 down_read(&htable_rw_lock
);
897 ce
= lookup_cache_entry(npath
, NULL
);
899 up_read(&htable_rw_lock
);
905 rc
= setup_referral(path
, ce
, ref
, get_tgt_name(ce
));
909 rc
= get_targets(ce
, tgt_list
);
911 up_read(&htable_rw_lock
);
914 free_normalized_path(path
, npath
);
919 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
920 * the currently connected server.
922 * NOTE: This function will neither update a cache entry in case it was
923 * expired, nor create a new cache entry if @path hasn't been found. It heavily
924 * relies on an existing cache entry.
926 * @path: path to lookup in the DFS referral cache.
927 * @ref: when non-NULL, store single DFS referral result in it.
928 * @tgt_list: when non-NULL, store complete DFS target list in it.
930 * Return 0 if successful.
931 * Return -ENOENT if the entry was not found.
932 * Return non-zero for other errors.
934 int dfs_cache_noreq_find(const char *path
, struct dfs_info3_param
*ref
,
935 struct dfs_cache_tgt_list
*tgt_list
)
939 struct cache_entry
*ce
;
941 rc
= get_normalized_path(path
, &npath
);
945 cifs_dbg(FYI
, "%s: path: %s\n", __func__
, npath
);
947 down_read(&htable_rw_lock
);
949 ce
= lookup_cache_entry(npath
, NULL
);
956 rc
= setup_referral(path
, ce
, ref
, get_tgt_name(ce
));
960 rc
= get_targets(ce
, tgt_list
);
963 up_read(&htable_rw_lock
);
964 free_normalized_path(path
, npath
);
970 * dfs_cache_update_tgthint - update target hint of a DFS cache entry
972 * If it doesn't find the cache entry, then it will get a DFS referral for @path
973 * and create a new entry.
975 * In case the cache entry exists but expired, it will get a DFS referral
976 * for @path and then update the respective cache entry.
980 * @nls_codepage: charset conversion
981 * @remap: type of character remapping for paths
982 * @path: path to lookup in DFS referral cache.
983 * @it: DFS target iterator
985 * Return zero if the target hint was updated successfully, otherwise non-zero.
987 int dfs_cache_update_tgthint(const unsigned int xid
, struct cifs_ses
*ses
,
988 const struct nls_table
*nls_codepage
, int remap
,
990 const struct dfs_cache_tgt_iterator
*it
)
994 struct cache_entry
*ce
;
995 struct cache_dfs_tgt
*t
;
997 rc
= get_normalized_path(path
, &npath
);
1001 cifs_dbg(FYI
, "%s: update target hint - path: %s\n", __func__
, npath
);
1003 rc
= __dfs_cache_find(xid
, ses
, nls_codepage
, remap
, npath
, false);
1007 down_write(&htable_rw_lock
);
1009 ce
= lookup_cache_entry(npath
, NULL
);
1017 if (likely(!strcasecmp(it
->it_name
, t
->name
)))
1020 list_for_each_entry(t
, &ce
->tlist
, list
) {
1021 if (!strcasecmp(t
->name
, it
->it_name
)) {
1023 cifs_dbg(FYI
, "%s: new target hint: %s\n", __func__
,
1030 up_write(&htable_rw_lock
);
1032 free_normalized_path(path
, npath
);
1038 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
1039 * without sending any requests to the currently connected server.
1041 * NOTE: This function will neither update a cache entry in case it was
1042 * expired, nor create a new cache entry if @path hasn't been found. It heavily
1043 * relies on an existing cache entry.
1045 * @path: path to lookup in DFS referral cache.
1046 * @it: target iterator which contains the target hint to update the cache
1049 * Return zero if the target hint was updated successfully, otherwise non-zero.
1051 int dfs_cache_noreq_update_tgthint(const char *path
,
1052 const struct dfs_cache_tgt_iterator
*it
)
1056 struct cache_entry
*ce
;
1057 struct cache_dfs_tgt
*t
;
1062 rc
= get_normalized_path(path
, &npath
);
1066 cifs_dbg(FYI
, "%s: path: %s\n", __func__
, npath
);
1068 down_write(&htable_rw_lock
);
1070 ce
= lookup_cache_entry(npath
, NULL
);
1079 if (unlikely(!strcasecmp(it
->it_name
, t
->name
)))
1082 list_for_each_entry(t
, &ce
->tlist
, list
) {
1083 if (!strcasecmp(t
->name
, it
->it_name
)) {
1085 cifs_dbg(FYI
, "%s: new target hint: %s\n", __func__
,
1092 up_write(&htable_rw_lock
);
1093 free_normalized_path(path
, npath
);
1099 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
1100 * target iterator (@it).
1102 * @path: path to lookup in DFS referral cache.
1103 * @it: DFS target iterator.
1104 * @ref: DFS referral pointer to set up the gathered information.
1106 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
1108 int dfs_cache_get_tgt_referral(const char *path
,
1109 const struct dfs_cache_tgt_iterator
*it
,
1110 struct dfs_info3_param
*ref
)
1114 struct cache_entry
*ce
;
1119 rc
= get_normalized_path(path
, &npath
);
1123 cifs_dbg(FYI
, "%s: path: %s\n", __func__
, npath
);
1125 down_read(&htable_rw_lock
);
1127 ce
= lookup_cache_entry(npath
, NULL
);
1133 cifs_dbg(FYI
, "%s: target name: %s\n", __func__
, it
->it_name
);
1135 rc
= setup_referral(path
, ce
, ref
, it
->it_name
);
1138 up_read(&htable_rw_lock
);
1139 free_normalized_path(path
, npath
);
1145 * dfs_cache_add_vol - add a cifs context during mount() that will be handled by
1146 * DFS cache refresh worker.
1148 * @mntdata: mount data.
1149 * @ctx: cifs context.
1150 * @fullpath: origin full path.
1152 * Return zero if context was set up correctly, otherwise non-zero.
1154 int dfs_cache_add_vol(char *mntdata
, struct smb3_fs_context
*ctx
, const char *fullpath
)
1157 struct vol_info
*vi
;
1159 if (!ctx
|| !fullpath
|| !mntdata
)
1162 cifs_dbg(FYI
, "%s: fullpath: %s\n", __func__
, fullpath
);
1164 vi
= kzalloc(sizeof(*vi
), GFP_KERNEL
);
1168 vi
->fullpath
= kstrndup(fullpath
, strlen(fullpath
), GFP_KERNEL
);
1169 if (!vi
->fullpath
) {
1174 rc
= smb3_fs_context_dup(&vi
->ctx
, ctx
);
1176 goto err_free_fullpath
;
1178 vi
->mntdata
= mntdata
;
1179 spin_lock_init(&vi
->ctx_lock
);
1180 kref_init(&vi
->refcnt
);
1182 spin_lock(&vol_list_lock
);
1183 list_add_tail(&vi
->list
, &vol_list
);
1184 spin_unlock(&vol_list_lock
);
1189 kfree(vi
->fullpath
);
1195 /* Must be called with vol_list_lock held */
1196 static struct vol_info
*find_vol(const char *fullpath
)
1198 struct vol_info
*vi
;
1200 list_for_each_entry(vi
, &vol_list
, list
) {
1201 cifs_dbg(FYI
, "%s: vi->fullpath: %s\n", __func__
, vi
->fullpath
);
1202 if (!strcasecmp(vi
->fullpath
, fullpath
))
1205 return ERR_PTR(-ENOENT
);
1209 * dfs_cache_update_vol - update vol info in DFS cache after failover
1211 * @fullpath: fullpath to look up in volume list.
1212 * @server: TCP ses pointer.
1214 * Return zero if volume was updated, otherwise non-zero.
1216 int dfs_cache_update_vol(const char *fullpath
, struct TCP_Server_Info
*server
)
1218 struct vol_info
*vi
;
1220 if (!fullpath
|| !server
)
1223 cifs_dbg(FYI
, "%s: fullpath: %s\n", __func__
, fullpath
);
1225 spin_lock(&vol_list_lock
);
1226 vi
= find_vol(fullpath
);
1228 spin_unlock(&vol_list_lock
);
1231 kref_get(&vi
->refcnt
);
1232 spin_unlock(&vol_list_lock
);
1234 cifs_dbg(FYI
, "%s: updating volume info\n", __func__
);
1235 spin_lock(&vi
->ctx_lock
);
1236 memcpy(&vi
->ctx
.dstaddr
, &server
->dstaddr
,
1237 sizeof(vi
->ctx
.dstaddr
));
1238 spin_unlock(&vi
->ctx_lock
);
1240 kref_put(&vi
->refcnt
, vol_release
);
1246 * dfs_cache_del_vol - remove volume info in DFS cache during umount()
1248 * @fullpath: fullpath to look up in volume list.
1250 void dfs_cache_del_vol(const char *fullpath
)
1252 struct vol_info
*vi
;
1254 if (!fullpath
|| !*fullpath
)
1257 cifs_dbg(FYI
, "%s: fullpath: %s\n", __func__
, fullpath
);
1259 spin_lock(&vol_list_lock
);
1260 vi
= find_vol(fullpath
);
1261 spin_unlock(&vol_list_lock
);
1263 kref_put(&vi
->refcnt
, vol_release
);
1267 * dfs_cache_get_tgt_share - parse a DFS target
1269 * @path: DFS full path
1270 * @it: DFS target iterator.
1271 * @share: tree name.
1272 * @prefix: prefix path.
1274 * Return zero if target was parsed correctly, otherwise non-zero.
1276 int dfs_cache_get_tgt_share(char *path
, const struct dfs_cache_tgt_iterator
*it
,
1277 char **share
, char **prefix
)
1281 size_t plen1
, plen2
;
1283 if (!it
|| !path
|| !share
|| !prefix
|| strlen(path
) < it
->it_path_consumed
)
1289 sep
= it
->it_name
[0];
1290 if (sep
!= '\\' && sep
!= '/')
1293 s
= strchr(it
->it_name
+ 1, sep
);
1297 /* point to prefix in target node */
1298 s
= strchrnul(s
+ 1, sep
);
1300 /* extract target share */
1301 *share
= kstrndup(it
->it_name
, s
- it
->it_name
, GFP_KERNEL
);
1305 /* skip separator */
1308 /* point to prefix in DFS path */
1309 p
= path
+ it
->it_path_consumed
;
1313 /* merge prefix paths from DFS path and target node */
1314 plen1
= it
->it_name
+ strlen(it
->it_name
) - s
;
1315 plen2
= path
+ strlen(path
) - p
;
1316 if (plen1
|| plen2
) {
1317 len
= plen1
+ plen2
+ 2;
1318 *prefix
= kmalloc(len
, GFP_KERNEL
);
1325 scnprintf(*prefix
, len
, "%.*s%c%.*s", (int)plen1
, s
, sep
, (int)plen2
, p
);
1327 strscpy(*prefix
, p
, len
);
1332 /* Get all tcons that are within a DFS namespace and can be refreshed */
1333 static void get_tcons(struct TCP_Server_Info
*server
, struct list_head
*head
)
1335 struct cifs_ses
*ses
;
1336 struct cifs_tcon
*tcon
;
1338 INIT_LIST_HEAD(head
);
1340 spin_lock(&cifs_tcp_ses_lock
);
1341 list_for_each_entry(ses
, &server
->smb_ses_list
, smb_ses_list
) {
1342 list_for_each_entry(tcon
, &ses
->tcon_list
, tcon_list
) {
1343 if (!tcon
->need_reconnect
&& !tcon
->need_reopen_files
&&
1346 list_add_tail(&tcon
->ulist
, head
);
1349 if (ses
->tcon_ipc
&& !ses
->tcon_ipc
->need_reconnect
&&
1350 ses
->tcon_ipc
->dfs_path
) {
1351 list_add_tail(&ses
->tcon_ipc
->ulist
, head
);
1354 spin_unlock(&cifs_tcp_ses_lock
);
1357 static bool is_dfs_link(const char *path
)
1361 s
= strchr(path
+ 1, '\\');
1364 return !!strchr(s
+ 1, '\\');
1367 static char *get_dfs_root(const char *path
)
1371 s
= strchr(path
+ 1, '\\');
1373 return ERR_PTR(-EINVAL
);
1375 s
= strchr(s
+ 1, '\\');
1377 return ERR_PTR(-EINVAL
);
1379 npath
= kstrndup(path
, s
- path
, GFP_KERNEL
);
1381 return ERR_PTR(-ENOMEM
);
1386 static inline void put_tcp_server(struct TCP_Server_Info
*server
)
1388 cifs_put_tcp_session(server
, 0);
1391 static struct TCP_Server_Info
*get_tcp_server(struct smb3_fs_context
*ctx
)
1393 struct TCP_Server_Info
*server
;
1395 server
= cifs_find_tcp_session(ctx
);
1396 if (IS_ERR_OR_NULL(server
))
1399 spin_lock(&GlobalMid_Lock
);
1400 if (server
->tcpStatus
!= CifsGood
) {
1401 spin_unlock(&GlobalMid_Lock
);
1402 put_tcp_server(server
);
1405 spin_unlock(&GlobalMid_Lock
);
1410 /* Find root SMB session out of a DFS link path */
1411 static struct cifs_ses
*find_root_ses(struct vol_info
*vi
,
1412 struct cifs_tcon
*tcon
,
1417 struct cache_entry
*ce
;
1418 struct dfs_info3_param ref
= {0};
1420 struct TCP_Server_Info
*server
;
1421 struct cifs_ses
*ses
;
1422 struct smb3_fs_context ctx
= {NULL
};
1424 rpath
= get_dfs_root(path
);
1426 return ERR_CAST(rpath
);
1428 down_read(&htable_rw_lock
);
1430 ce
= lookup_cache_entry(rpath
, NULL
);
1432 up_read(&htable_rw_lock
);
1437 rc
= setup_referral(path
, ce
, &ref
, get_tgt_name(ce
));
1439 up_read(&htable_rw_lock
);
1444 up_read(&htable_rw_lock
);
1446 mdata
= cifs_compose_mount_options(vi
->mntdata
, rpath
, &ref
);
1447 free_dfs_info_param(&ref
);
1449 if (IS_ERR(mdata
)) {
1450 ses
= ERR_CAST(mdata
);
1455 rc
= cifs_setup_volume_info(&ctx
);
1462 server
= get_tcp_server(&ctx
);
1464 ses
= ERR_PTR(-EHOSTDOWN
);
1468 ses
= cifs_get_smb_ses(server
, &ctx
);
1471 smb3_cleanup_fs_context_contents(&ctx
);
1478 /* Refresh DFS cache entry from a given tcon */
1479 static int refresh_tcon(struct vol_info
*vi
, struct cifs_tcon
*tcon
)
1484 struct cache_entry
*ce
;
1485 struct cifs_ses
*root_ses
= NULL
, *ses
;
1486 struct dfs_info3_param
*refs
= NULL
;
1491 path
= tcon
->dfs_path
+ 1;
1493 rc
= get_normalized_path(path
, &npath
);
1497 down_read(&htable_rw_lock
);
1499 ce
= lookup_cache_entry(npath
, NULL
);
1502 up_read(&htable_rw_lock
);
1506 if (!cache_entry_expired(ce
)) {
1507 up_read(&htable_rw_lock
);
1511 up_read(&htable_rw_lock
);
1513 /* If it's a DFS Link, then use root SMB session for refreshing it */
1514 if (is_dfs_link(npath
)) {
1515 ses
= root_ses
= find_root_ses(vi
, tcon
, npath
);
1525 rc
= get_dfs_referral(xid
, ses
, cache_nlsc
, tcon
->remap
, npath
, &refs
,
1528 dump_refs(refs
, numrefs
);
1529 rc
= update_cache_entry(npath
, refs
, numrefs
);
1530 free_dfs_info_array(refs
, numrefs
);
1534 cifs_put_smb_ses(root_ses
);
1537 free_normalized_path(path
, npath
);
1545 * Worker that will refresh DFS cache based on lowest TTL value from a DFS
1548 static void refresh_cache_worker(struct work_struct
*work
)
1550 struct vol_info
*vi
, *nvi
;
1551 struct TCP_Server_Info
*server
;
1554 struct cifs_tcon
*tcon
, *ntcon
;
1558 * Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
1561 spin_lock(&vol_list_lock
);
1562 list_for_each_entry(vi
, &vol_list
, list
) {
1563 server
= get_tcp_server(&vi
->ctx
);
1567 kref_get(&vi
->refcnt
);
1568 list_add_tail(&vi
->rlist
, &vols
);
1569 put_tcp_server(server
);
1571 spin_unlock(&vol_list_lock
);
1573 /* Walk through all TCONs and refresh any expired cache entry */
1574 list_for_each_entry_safe(vi
, nvi
, &vols
, rlist
) {
1575 spin_lock(&vi
->ctx_lock
);
1576 server
= get_tcp_server(&vi
->ctx
);
1577 spin_unlock(&vi
->ctx_lock
);
1582 get_tcons(server
, &tcons
);
1585 list_for_each_entry_safe(tcon
, ntcon
, &tcons
, ulist
) {
1587 * Skip tcp server if any of its tcons failed to refresh
1588 * (possibily due to reconnects).
1591 rc
= refresh_tcon(vi
, tcon
);
1593 list_del_init(&tcon
->ulist
);
1594 cifs_put_tcon(tcon
);
1597 put_tcp_server(server
);
1600 list_del_init(&vi
->rlist
);
1601 kref_put(&vi
->refcnt
, vol_release
);
1604 spin_lock(&cache_ttl_lock
);
1605 queue_delayed_work(dfscache_wq
, &refresh_task
, cache_ttl
* HZ
);
1606 spin_unlock(&cache_ttl_lock
);