1 // SPDX-License-Identifier: GPL-2.0
3 * DFS referral cache routines
5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
8 #include <linux/jhash.h>
9 #include <linux/ktime.h>
10 #include <linux/slab.h>
11 #include <linux/proc_fs.h>
12 #include <linux/nls.h>
13 #include <linux/workqueue.h>
14 #include <linux/uuid.h>
17 #include "smb2proto.h"
18 #include "cifsproto.h"
19 #include "cifs_debug.h"
20 #include "cifs_unicode.h"
22 #include "dns_resolve.h"
25 #include "dfs_cache.h"
27 #define CACHE_HTABLE_SIZE 512
28 #define CACHE_MAX_ENTRIES 1024
29 #define CACHE_MIN_TTL 120 /* 2 minutes */
30 #define CACHE_DEFAULT_TTL 300 /* 5 minutes */
32 struct cache_dfs_tgt
{
35 struct list_head list
;
39 struct hlist_node hlist
;
41 int hdr_flags
; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
42 int ttl
; /* DFS_REREFERRAL_V3.TimeToLive */
43 int srvtype
; /* DFS_REREFERRAL_V3.ServerType */
44 int ref_flags
; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
45 struct timespec64 etime
;
46 int path_consumed
; /* RESP_GET_DFS_REFERRAL.PathConsumed */
48 struct list_head tlist
;
49 struct cache_dfs_tgt
*tgthint
;
52 static struct kmem_cache
*cache_slab __read_mostly
;
53 struct workqueue_struct
*dfscache_wq
;
55 atomic_t dfs_cache_ttl
;
57 static struct nls_table
*cache_cp
;
60 * Number of entries in the cache
62 static atomic_t cache_count
;
64 static struct hlist_head cache_htable
[CACHE_HTABLE_SIZE
];
65 static DECLARE_RWSEM(htable_rw_lock
);
68 * dfs_cache_canonical_path - get a canonical DFS path
72 * @remap: mapping type
74 * Return canonical path if success, otherwise error.
76 char *dfs_cache_canonical_path(const char *path
, const struct nls_table
*cp
, int remap
)
82 if (!path
|| strlen(path
) < 3 || (*path
!= '\\' && *path
!= '/'))
83 return ERR_PTR(-EINVAL
);
85 if (unlikely(strcmp(cp
->charset
, cache_cp
->charset
))) {
86 tmp
= (char *)cifs_strndup_to_utf16(path
, strlen(path
), &plen
, cp
, remap
);
88 cifs_dbg(VFS
, "%s: failed to convert path to utf16\n", __func__
);
89 return ERR_PTR(-EINVAL
);
92 npath
= cifs_strndup_from_utf16(tmp
, plen
, true, cache_cp
);
96 cifs_dbg(VFS
, "%s: failed to convert path from utf16\n", __func__
);
97 return ERR_PTR(-EINVAL
);
100 npath
= kstrdup(path
, GFP_KERNEL
);
102 return ERR_PTR(-ENOMEM
);
104 convert_delimiter(npath
, '\\');
108 static inline bool cache_entry_expired(const struct cache_entry
*ce
)
110 struct timespec64 ts
;
112 ktime_get_coarse_real_ts64(&ts
);
113 return timespec64_compare(&ts
, &ce
->etime
) >= 0;
116 static inline void free_tgts(struct cache_entry
*ce
)
118 struct cache_dfs_tgt
*t
, *n
;
120 list_for_each_entry_safe(t
, n
, &ce
->tlist
, list
) {
127 static inline void flush_cache_ent(struct cache_entry
*ce
)
129 cifs_dbg(FYI
, "%s: %s\n", __func__
, ce
->path
);
130 hlist_del_init(&ce
->hlist
);
133 atomic_dec(&cache_count
);
134 kmem_cache_free(cache_slab
, ce
);
137 static void flush_cache_ents(void)
141 for (i
= 0; i
< CACHE_HTABLE_SIZE
; i
++) {
142 struct hlist_head
*l
= &cache_htable
[i
];
143 struct hlist_node
*n
;
144 struct cache_entry
*ce
;
146 hlist_for_each_entry_safe(ce
, n
, l
, hlist
) {
147 if (!hlist_unhashed(&ce
->hlist
))
154 * dfs cache /proc file
156 static int dfscache_proc_show(struct seq_file
*m
, void *v
)
159 struct cache_entry
*ce
;
160 struct cache_dfs_tgt
*t
;
162 seq_puts(m
, "DFS cache\n---------\n");
164 down_read(&htable_rw_lock
);
165 for (i
= 0; i
< CACHE_HTABLE_SIZE
; i
++) {
166 struct hlist_head
*l
= &cache_htable
[i
];
168 hlist_for_each_entry(ce
, l
, hlist
) {
169 if (hlist_unhashed(&ce
->hlist
))
173 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
174 ce
->path
, ce
->srvtype
== DFS_TYPE_ROOT
? "root" : "link",
175 ce
->ttl
, ce
->etime
.tv_nsec
, ce
->hdr_flags
, ce
->ref_flags
,
176 str_yes_no(DFS_INTERLINK(ce
->hdr_flags
)),
177 ce
->path_consumed
, str_yes_no(cache_entry_expired(ce
)));
179 list_for_each_entry(t
, &ce
->tlist
, list
) {
180 seq_printf(m
, " %s%s\n",
182 READ_ONCE(ce
->tgthint
) == t
? " (target hint)" : "");
186 up_read(&htable_rw_lock
);
191 static ssize_t
dfscache_proc_write(struct file
*file
, const char __user
*buffer
,
192 size_t count
, loff_t
*ppos
)
197 rc
= get_user(c
, buffer
);
204 cifs_dbg(FYI
, "clearing dfs cache\n");
206 down_write(&htable_rw_lock
);
208 up_write(&htable_rw_lock
);
213 static int dfscache_proc_open(struct inode
*inode
, struct file
*file
)
215 return single_open(file
, dfscache_proc_show
, NULL
);
218 const struct proc_ops dfscache_proc_ops
= {
219 .proc_open
= dfscache_proc_open
,
220 .proc_read
= seq_read
,
221 .proc_lseek
= seq_lseek
,
222 .proc_release
= single_release
,
223 .proc_write
= dfscache_proc_write
,
226 #ifdef CONFIG_CIFS_DEBUG2
227 static inline void dump_tgts(const struct cache_entry
*ce
)
229 struct cache_dfs_tgt
*t
;
231 cifs_dbg(FYI
, "target list:\n");
232 list_for_each_entry(t
, &ce
->tlist
, list
) {
233 cifs_dbg(FYI
, " %s%s\n", t
->name
,
234 READ_ONCE(ce
->tgthint
) == t
? " (target hint)" : "");
238 static inline void dump_ce(const struct cache_entry
*ce
)
240 cifs_dbg(FYI
, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
242 ce
->srvtype
== DFS_TYPE_ROOT
? "root" : "link", ce
->ttl
,
244 ce
->hdr_flags
, ce
->ref_flags
,
245 str_yes_no(DFS_INTERLINK(ce
->hdr_flags
)),
247 str_yes_no(cache_entry_expired(ce
)));
251 static inline void dump_refs(const struct dfs_info3_param
*refs
, int numrefs
)
255 cifs_dbg(FYI
, "DFS referrals returned by the server:\n");
256 for (i
= 0; i
< numrefs
; i
++) {
257 const struct dfs_info3_param
*ref
= &refs
[i
];
262 "path_consumed: %d\n"
263 "server_type: 0x%x\n"
268 ref
->flags
, ref
->path_consumed
, ref
->server_type
,
269 ref
->ref_flag
, ref
->path_name
, ref
->node_name
,
270 ref
->ttl
, ref
->ttl
/ 60);
276 #define dump_refs(r, n)
280 * dfs_cache_init - Initialize DFS referral cache.
282 * Return zero if initialized successfully, otherwise non-zero.
284 int dfs_cache_init(void)
289 dfscache_wq
= alloc_workqueue("cifs-dfscache",
290 WQ_UNBOUND
|WQ_FREEZABLE
|WQ_MEM_RECLAIM
,
295 cache_slab
= kmem_cache_create("cifs_dfs_cache",
296 sizeof(struct cache_entry
), 0,
297 SLAB_HWCACHE_ALIGN
, NULL
);
303 for (i
= 0; i
< CACHE_HTABLE_SIZE
; i
++)
304 INIT_HLIST_HEAD(&cache_htable
[i
]);
306 atomic_set(&cache_count
, 0);
307 atomic_set(&dfs_cache_ttl
, CACHE_DEFAULT_TTL
);
308 cache_cp
= load_nls("utf8");
310 cache_cp
= load_nls_default();
312 cifs_dbg(FYI
, "%s: initialized DFS referral cache\n", __func__
);
316 destroy_workqueue(dfscache_wq
);
320 static int cache_entry_hash(const void *data
, int size
, unsigned int *hash
)
323 const unsigned char *s
= data
;
327 for (i
= 0; i
< size
; i
+= clen
) {
328 clen
= cache_cp
->char2uni(&s
[i
], size
- i
, &c
);
329 if (unlikely(clen
< 0)) {
330 cifs_dbg(VFS
, "%s: can't convert char\n", __func__
);
334 h
= jhash(&c
, sizeof(c
), h
);
336 *hash
= h
% CACHE_HTABLE_SIZE
;
340 /* Return target hint of a DFS cache entry */
341 static inline char *get_tgt_name(const struct cache_entry
*ce
)
343 struct cache_dfs_tgt
*t
= READ_ONCE(ce
->tgthint
);
345 return t
? t
->name
: ERR_PTR(-ENOENT
);
348 /* Return expire time out of a new entry's TTL */
349 static inline struct timespec64
get_expire_time(int ttl
)
351 struct timespec64 ts
= {
355 struct timespec64 now
;
357 ktime_get_coarse_real_ts64(&now
);
358 return timespec64_add(now
, ts
);
361 /* Allocate a new DFS target */
362 static struct cache_dfs_tgt
*alloc_target(const char *name
, int path_consumed
)
364 struct cache_dfs_tgt
*t
;
366 t
= kmalloc(sizeof(*t
), GFP_ATOMIC
);
368 return ERR_PTR(-ENOMEM
);
369 t
->name
= kstrdup(name
, GFP_ATOMIC
);
372 return ERR_PTR(-ENOMEM
);
374 t
->path_consumed
= path_consumed
;
375 INIT_LIST_HEAD(&t
->list
);
380 * Copy DFS referral information to a cache entry and conditionally update
383 static int copy_ref_data(const struct dfs_info3_param
*refs
, int numrefs
,
384 struct cache_entry
*ce
, const char *tgthint
)
386 struct cache_dfs_tgt
*target
;
389 ce
->ttl
= max_t(int, refs
[0].ttl
, CACHE_MIN_TTL
);
390 ce
->etime
= get_expire_time(ce
->ttl
);
391 ce
->srvtype
= refs
[0].server_type
;
392 ce
->hdr_flags
= refs
[0].flags
;
393 ce
->ref_flags
= refs
[0].ref_flag
;
394 ce
->path_consumed
= refs
[0].path_consumed
;
396 for (i
= 0; i
< numrefs
; i
++) {
397 struct cache_dfs_tgt
*t
;
399 t
= alloc_target(refs
[i
].node_name
, refs
[i
].path_consumed
);
404 if (tgthint
&& !strcasecmp(t
->name
, tgthint
)) {
405 list_add(&t
->list
, &ce
->tlist
);
408 list_add_tail(&t
->list
, &ce
->tlist
);
413 target
= list_first_entry_or_null(&ce
->tlist
, struct cache_dfs_tgt
,
415 WRITE_ONCE(ce
->tgthint
, target
);
420 /* Allocate a new cache entry */
421 static struct cache_entry
*alloc_cache_entry(struct dfs_info3_param
*refs
, int numrefs
)
423 struct cache_entry
*ce
;
426 ce
= kmem_cache_zalloc(cache_slab
, GFP_KERNEL
);
428 return ERR_PTR(-ENOMEM
);
430 ce
->path
= refs
[0].path_name
;
431 refs
[0].path_name
= NULL
;
433 INIT_HLIST_NODE(&ce
->hlist
);
434 INIT_LIST_HEAD(&ce
->tlist
);
436 rc
= copy_ref_data(refs
, numrefs
, ce
, NULL
);
439 kmem_cache_free(cache_slab
, ce
);
445 /* Remove all referrals that have a single target or oldest entry */
446 static void purge_cache(void)
449 struct cache_entry
*ce
;
450 struct cache_entry
*oldest
= NULL
;
452 for (i
= 0; i
< CACHE_HTABLE_SIZE
; i
++) {
453 struct hlist_head
*l
= &cache_htable
[i
];
454 struct hlist_node
*n
;
456 hlist_for_each_entry_safe(ce
, n
, l
, hlist
) {
457 if (hlist_unhashed(&ce
->hlist
))
459 if (ce
->numtgts
== 1)
462 timespec64_compare(&ce
->etime
,
468 if (atomic_read(&cache_count
) >= CACHE_MAX_ENTRIES
&& oldest
)
469 flush_cache_ent(oldest
);
472 /* Add a new DFS cache entry */
473 static struct cache_entry
*add_cache_entry_locked(struct dfs_info3_param
*refs
,
477 struct cache_entry
*ce
;
481 WARN_ON(!rwsem_is_locked(&htable_rw_lock
));
483 if (atomic_read(&cache_count
) >= CACHE_MAX_ENTRIES
) {
484 cifs_dbg(FYI
, "%s: reached max cache size (%d)\n", __func__
, CACHE_MAX_ENTRIES
);
488 rc
= cache_entry_hash(refs
[0].path_name
, strlen(refs
[0].path_name
), &hash
);
492 ce
= alloc_cache_entry(refs
, numrefs
);
496 ttl
= min_t(int, atomic_read(&dfs_cache_ttl
), ce
->ttl
);
497 atomic_set(&dfs_cache_ttl
, ttl
);
499 hlist_add_head(&ce
->hlist
, &cache_htable
[hash
]);
502 atomic_inc(&cache_count
);
507 /* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
508 static bool dfs_path_equal(const char *s1
, int len1
, const char *s2
, int len2
)
516 for (i
= 0; i
< len1
; i
+= l1
) {
517 l1
= cache_cp
->char2uni(&s1
[i
], len1
- i
, &c1
);
518 l2
= cache_cp
->char2uni(&s2
[i
], len2
- i
, &c2
);
519 if (unlikely(l1
< 0 && l2
< 0)) {
527 if (cifs_toupper(c1
) != cifs_toupper(c2
))
533 static struct cache_entry
*__lookup_cache_entry(const char *path
, unsigned int hash
, int len
)
535 struct cache_entry
*ce
;
537 hlist_for_each_entry(ce
, &cache_htable
[hash
], hlist
) {
538 if (dfs_path_equal(ce
->path
, strlen(ce
->path
), path
, len
)) {
543 return ERR_PTR(-ENOENT
);
547 * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
549 * Use whole path components in the match. Must be called with htable_rw_lock held.
551 * Return cached entry if successful.
552 * Return ERR_PTR(-ENOENT) if the entry is not found.
553 * Return error ptr otherwise.
555 static struct cache_entry
*lookup_cache_entry(const char *path
)
557 struct cache_entry
*ce
;
559 const char *s
= path
, *e
;
564 while ((s
= strchr(s
, sep
)) && ++cnt
< 3)
568 rc
= cache_entry_hash(path
, strlen(path
), &hash
);
571 return __lookup_cache_entry(path
, hash
, strlen(path
));
574 * Handle paths that have more than two path components and are a complete prefix of the DFS
575 * referral request path (@path).
577 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
579 e
= path
+ strlen(path
) - 1;
583 /* skip separators */
584 while (e
> s
&& *e
== sep
)
590 rc
= cache_entry_hash(path
, len
, &hash
);
593 ce
= __lookup_cache_entry(path
, hash
, len
);
597 /* backward until separator */
598 while (e
> s
&& *e
!= sep
)
601 return ERR_PTR(-ENOENT
);
605 * dfs_cache_destroy - destroy DFS referral cache
607 void dfs_cache_destroy(void)
609 unload_nls(cache_cp
);
611 kmem_cache_destroy(cache_slab
);
612 destroy_workqueue(dfscache_wq
);
614 cifs_dbg(FYI
, "%s: destroyed DFS referral cache\n", __func__
);
617 /* Update a cache entry with the new referral in @refs */
618 static int update_cache_entry_locked(struct cache_entry
*ce
, const struct dfs_info3_param
*refs
,
621 struct cache_dfs_tgt
*target
;
625 WARN_ON(!rwsem_is_locked(&htable_rw_lock
));
627 target
= READ_ONCE(ce
->tgthint
);
629 th
= kstrdup(target
->name
, GFP_ATOMIC
);
637 rc
= copy_ref_data(refs
, numrefs
, ce
, th
);
644 static int get_dfs_referral(const unsigned int xid
, struct cifs_ses
*ses
, const char *path
,
645 struct dfs_info3_param
**refs
, int *numrefs
)
653 if (!ses
|| !ses
->server
|| !ses
->server
->ops
->get_dfs_refer
)
655 if (unlikely(!cache_cp
))
658 cifs_dbg(FYI
, "%s: ipc=%s referral=%s\n", __func__
, ses
->tcon_ipc
->tree_name
, path
);
659 rc
= ses
->server
->ops
->get_dfs_refer(xid
, ses
, path
, refs
, numrefs
, cache_cp
,
662 struct dfs_info3_param
*ref
= *refs
;
664 for (i
= 0; i
< *numrefs
; i
++)
665 convert_delimiter(ref
[i
].path_name
, '\\');
671 * Find, create or update a DFS cache entry.
673 * If the entry wasn't found, it will create a new one. Or if it was found but
674 * expired, then it will update the entry accordingly.
676 * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
677 * handle them properly.
679 * On success, return entry with acquired lock for reading, otherwise error ptr.
681 static struct cache_entry
*cache_refresh_path(const unsigned int xid
,
682 struct cifs_ses
*ses
,
686 struct dfs_info3_param
*refs
= NULL
;
687 struct cache_entry
*ce
;
691 cifs_dbg(FYI
, "%s: search path: %s\n", __func__
, path
);
693 down_read(&htable_rw_lock
);
695 ce
= lookup_cache_entry(path
);
697 if (!force_refresh
&& !cache_entry_expired(ce
))
699 } else if (PTR_ERR(ce
) != -ENOENT
) {
700 up_read(&htable_rw_lock
);
705 * Unlock shared access as we don't want to hold any locks while getting
706 * a new referral. The @ses used for performing the I/O could be
707 * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
708 * in order to failover -- if necessary.
710 up_read(&htable_rw_lock
);
713 * Either the entry was not found, or it is expired, or it is a forced
715 * Request a new DFS referral in order to create or update a cache entry.
717 rc
= get_dfs_referral(xid
, ses
, path
, &refs
, &numrefs
);
723 dump_refs(refs
, numrefs
);
725 down_write(&htable_rw_lock
);
726 /* Re-check as another task might have it added or refreshed already */
727 ce
= lookup_cache_entry(path
);
729 if (force_refresh
|| cache_entry_expired(ce
)) {
730 rc
= update_cache_entry_locked(ce
, refs
, numrefs
);
734 } else if (PTR_ERR(ce
) == -ENOENT
) {
735 ce
= add_cache_entry_locked(refs
, numrefs
);
739 up_write(&htable_rw_lock
);
743 downgrade_write(&htable_rw_lock
);
745 free_dfs_info_array(refs
, numrefs
);
750 * Set up a DFS referral from a given cache entry.
752 * Must be called with htable_rw_lock held.
754 static int setup_referral(const char *path
, struct cache_entry
*ce
,
755 struct dfs_info3_param
*ref
, const char *target
)
759 cifs_dbg(FYI
, "%s: set up new ref\n", __func__
);
761 memset(ref
, 0, sizeof(*ref
));
763 ref
->path_name
= kstrdup(path
, GFP_ATOMIC
);
767 ref
->node_name
= kstrdup(target
, GFP_ATOMIC
);
768 if (!ref
->node_name
) {
773 ref
->path_consumed
= ce
->path_consumed
;
775 ref
->server_type
= ce
->srvtype
;
776 ref
->ref_flag
= ce
->ref_flags
;
777 ref
->flags
= ce
->hdr_flags
;
782 kfree(ref
->path_name
);
783 ref
->path_name
= NULL
;
787 /* Return target list of a DFS cache entry */
788 static int get_targets(struct cache_entry
*ce
, struct dfs_cache_tgt_list
*tl
)
791 struct list_head
*head
= &tl
->tl_list
;
792 struct cache_dfs_tgt
*t
;
793 struct dfs_cache_tgt_iterator
*it
, *nit
;
795 memset(tl
, 0, sizeof(*tl
));
796 INIT_LIST_HEAD(head
);
798 list_for_each_entry(t
, &ce
->tlist
, list
) {
799 it
= kzalloc(sizeof(*it
), GFP_ATOMIC
);
805 it
->it_name
= kstrdup(t
->name
, GFP_ATOMIC
);
811 it
->it_path_consumed
= t
->path_consumed
;
813 if (READ_ONCE(ce
->tgthint
) == t
)
814 list_add(&it
->it_list
, head
);
816 list_add_tail(&it
->it_list
, head
);
819 tl
->tl_numtgts
= ce
->numtgts
;
824 list_for_each_entry_safe(it
, nit
, head
, it_list
) {
825 list_del(&it
->it_list
);
833 * dfs_cache_find - find a DFS cache entry
835 * If it doesn't find the cache entry, then it will get a DFS referral
836 * for @path and create a new entry.
838 * In case the cache entry exists but expired, it will get a DFS referral
839 * for @path and then update the respective cache entry.
841 * These parameters are passed down to the get_dfs_refer() call if it
842 * needs to be issued:
844 * @ses: smb session to issue the request on
846 * @remap: path character remapping type
847 * @path: path to lookup in DFS referral cache.
849 * @ref: when non-NULL, store single DFS referral result in it.
850 * @tgt_list: when non-NULL, store complete DFS target list in it.
852 * Return zero if the target was found, otherwise non-zero.
854 int dfs_cache_find(const unsigned int xid
, struct cifs_ses
*ses
, const struct nls_table
*cp
,
855 int remap
, const char *path
, struct dfs_info3_param
*ref
,
856 struct dfs_cache_tgt_list
*tgt_list
)
860 struct cache_entry
*ce
;
862 npath
= dfs_cache_canonical_path(path
, cp
, remap
);
864 return PTR_ERR(npath
);
866 ce
= cache_refresh_path(xid
, ses
, npath
, false);
873 rc
= setup_referral(path
, ce
, ref
, get_tgt_name(ce
));
877 rc
= get_targets(ce
, tgt_list
);
879 up_read(&htable_rw_lock
);
887 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
888 * the currently connected server.
890 * NOTE: This function will neither update a cache entry in case it was
891 * expired, nor create a new cache entry if @path hasn't been found. It heavily
892 * relies on an existing cache entry.
894 * @path: canonical DFS path to lookup in the DFS referral cache.
895 * @ref: when non-NULL, store single DFS referral result in it.
896 * @tgt_list: when non-NULL, store complete DFS target list in it.
898 * Return 0 if successful.
899 * Return -ENOENT if the entry was not found.
900 * Return non-zero for other errors.
902 int dfs_cache_noreq_find(const char *path
, struct dfs_info3_param
*ref
,
903 struct dfs_cache_tgt_list
*tgt_list
)
906 struct cache_entry
*ce
;
908 cifs_dbg(FYI
, "%s: path: %s\n", __func__
, path
);
910 down_read(&htable_rw_lock
);
912 ce
= lookup_cache_entry(path
);
919 rc
= setup_referral(path
, ce
, ref
, get_tgt_name(ce
));
923 rc
= get_targets(ce
, tgt_list
);
926 up_read(&htable_rw_lock
);
931 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
932 * without sending any requests to the currently connected server.
934 * NOTE: This function will neither update a cache entry in case it was
935 * expired, nor create a new cache entry if @path hasn't been found. It heavily
936 * relies on an existing cache entry.
938 * @path: canonical DFS path to lookup in DFS referral cache.
939 * @it: target iterator which contains the target hint to update the cache
942 * Return zero if the target hint was updated successfully, otherwise non-zero.
944 void dfs_cache_noreq_update_tgthint(const char *path
, const struct dfs_cache_tgt_iterator
*it
)
946 struct cache_dfs_tgt
*t
;
947 struct cache_entry
*ce
;
952 cifs_dbg(FYI
, "%s: path: %s\n", __func__
, path
);
954 down_read(&htable_rw_lock
);
956 ce
= lookup_cache_entry(path
);
960 t
= READ_ONCE(ce
->tgthint
);
962 if (unlikely(!strcasecmp(it
->it_name
, t
->name
)))
965 list_for_each_entry(t
, &ce
->tlist
, list
) {
966 if (!strcasecmp(t
->name
, it
->it_name
)) {
967 WRITE_ONCE(ce
->tgthint
, t
);
968 cifs_dbg(FYI
, "%s: new target hint: %s\n", __func__
,
975 up_read(&htable_rw_lock
);
979 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
980 * target iterator (@it).
982 * @path: canonical DFS path to lookup in DFS referral cache.
983 * @it: DFS target iterator.
984 * @ref: DFS referral pointer to set up the gathered information.
986 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
988 int dfs_cache_get_tgt_referral(const char *path
, const struct dfs_cache_tgt_iterator
*it
,
989 struct dfs_info3_param
*ref
)
992 struct cache_entry
*ce
;
997 cifs_dbg(FYI
, "%s: path: %s\n", __func__
, path
);
999 down_read(&htable_rw_lock
);
1001 ce
= lookup_cache_entry(path
);
1007 cifs_dbg(FYI
, "%s: target name: %s\n", __func__
, it
->it_name
);
1009 rc
= setup_referral(path
, ce
, ref
, it
->it_name
);
1012 up_read(&htable_rw_lock
);
1016 /* Extract share from DFS target and return a pointer to prefix path or NULL */
1017 static const char *parse_target_share(const char *target
, char **share
)
1019 const char *s
, *seps
= "/\\";
1022 s
= strpbrk(target
+ 1, seps
);
1024 return ERR_PTR(-EINVAL
);
1026 len
= strcspn(s
+ 1, seps
);
1028 return ERR_PTR(-EINVAL
);
1031 len
= s
- target
+ 1;
1032 *share
= kstrndup(target
, len
, GFP_KERNEL
);
1034 return ERR_PTR(-ENOMEM
);
1037 return s
+ strspn(s
, seps
);
1041 * dfs_cache_get_tgt_share - parse a DFS target
1043 * @path: DFS full path
1044 * @it: DFS target iterator.
1045 * @share: tree name.
1046 * @prefix: prefix path.
1048 * Return zero if target was parsed correctly, otherwise non-zero.
1050 int dfs_cache_get_tgt_share(char *path
, const struct dfs_cache_tgt_iterator
*it
, char **share
,
1056 const char *target_ppath
, *dfsref_ppath
;
1057 size_t target_pplen
, dfsref_pplen
;
1060 if (!it
|| !path
|| !share
|| !prefix
|| strlen(path
) < it
->it_path_consumed
)
1063 sep
= it
->it_name
[0];
1064 if (sep
!= '\\' && sep
!= '/')
1067 target_ppath
= parse_target_share(it
->it_name
, &target_share
);
1068 if (IS_ERR(target_ppath
))
1069 return PTR_ERR(target_ppath
);
1071 /* point to prefix in DFS referral path */
1072 dfsref_ppath
= path
+ it
->it_path_consumed
;
1073 dfsref_ppath
+= strspn(dfsref_ppath
, "/\\");
1075 target_pplen
= strlen(target_ppath
);
1076 dfsref_pplen
= strlen(dfsref_ppath
);
1078 /* merge prefix paths from DFS referral path and target node */
1079 if (target_pplen
|| dfsref_pplen
) {
1080 len
= target_pplen
+ dfsref_pplen
+ 2;
1081 ppath
= kzalloc(len
, GFP_KERNEL
);
1083 kfree(target_share
);
1086 c
= strscpy(ppath
, target_ppath
, len
);
1087 if (c
&& dfsref_pplen
)
1089 strlcat(ppath
, dfsref_ppath
, len
);
1091 *share
= target_share
;
1096 static bool target_share_equal(struct cifs_tcon
*tcon
, const char *s1
)
1098 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
1099 struct sockaddr_storage ss
;
1101 const char *s2
= &tcon
->tree_name
[1];
1103 char unc
[sizeof("\\\\") + SERVER_NAME_LENGTH
] = {0};
1107 if (strcasecmp(s2
, s1
))
1111 * Resolve share's hostname and check if server address matches. Otherwise just ignore it
1112 * as we could not have upcall to resolve hostname or failed to convert ip address.
1114 extract_unc_hostname(s1
, &host
, &hostlen
);
1115 scnprintf(unc
, sizeof(unc
), "\\\\%.*s", (int)hostlen
, host
);
1117 rc
= dns_resolve_server_name_to_ip(unc
, (struct sockaddr
*)&ss
, NULL
);
1119 cifs_dbg(FYI
, "%s: could not resolve %.*s. assuming server address matches.\n",
1120 __func__
, (int)hostlen
, host
);
1124 cifs_server_lock(server
);
1125 match
= cifs_match_ipaddr((struct sockaddr
*)&server
->dstaddr
, (struct sockaddr
*)&ss
);
1126 cifs_server_unlock(server
);
1131 static bool is_ses_good(struct cifs_ses
*ses
)
1133 struct TCP_Server_Info
*server
= ses
->server
;
1134 struct cifs_tcon
*tcon
= ses
->tcon_ipc
;
1137 spin_lock(&ses
->ses_lock
);
1138 spin_lock(&ses
->chan_lock
);
1139 ret
= !cifs_chan_needs_reconnect(ses
, server
) &&
1140 ses
->ses_status
== SES_GOOD
&&
1141 !tcon
->need_reconnect
;
1142 spin_unlock(&ses
->chan_lock
);
1143 spin_unlock(&ses
->ses_lock
);
1147 static char *get_ses_refpath(struct cifs_ses
*ses
)
1149 struct TCP_Server_Info
*server
= ses
->server
;
1150 char *path
= ERR_PTR(-ENOENT
);
1152 mutex_lock(&server
->refpath_lock
);
1153 if (server
->leaf_fullpath
) {
1154 path
= kstrdup(server
->leaf_fullpath
+ 1, GFP_ATOMIC
);
1156 path
= ERR_PTR(-ENOMEM
);
1158 mutex_unlock(&server
->refpath_lock
);
1162 /* Refresh dfs referral of @ses */
1163 static void refresh_ses_referral(struct cifs_ses
*ses
)
1165 struct cache_entry
*ce
;
1172 path
= get_ses_refpath(ses
);
1179 ses
= CIFS_DFS_ROOT_SES(ses
);
1180 if (!is_ses_good(ses
)) {
1181 cifs_dbg(FYI
, "%s: skip cache refresh due to disconnected ipc\n",
1186 ce
= cache_refresh_path(xid
, ses
, path
, false);
1188 up_read(&htable_rw_lock
);
1197 static int __refresh_tcon_referral(struct cifs_tcon
*tcon
,
1199 struct dfs_info3_param
*refs
,
1200 int numrefs
, bool force_refresh
)
1202 struct cache_entry
*ce
;
1203 bool reconnect
= force_refresh
;
1207 if (unlikely(!numrefs
))
1210 if (force_refresh
) {
1211 for (i
= 0; i
< numrefs
; i
++) {
1212 /* TODO: include prefix paths in the matching */
1213 if (target_share_equal(tcon
, refs
[i
].node_name
)) {
1220 down_write(&htable_rw_lock
);
1221 ce
= lookup_cache_entry(path
);
1223 if (force_refresh
|| cache_entry_expired(ce
))
1224 rc
= update_cache_entry_locked(ce
, refs
, numrefs
);
1225 } else if (PTR_ERR(ce
) == -ENOENT
) {
1226 ce
= add_cache_entry_locked(refs
, numrefs
);
1228 up_write(&htable_rw_lock
);
1233 cifs_tcon_dbg(FYI
, "%s: mark for reconnect\n", __func__
);
1234 cifs_signal_cifsd_for_reconnect(tcon
->ses
->server
, true);
1239 static void refresh_tcon_referral(struct cifs_tcon
*tcon
, bool force_refresh
)
1241 struct dfs_info3_param
*refs
= NULL
;
1242 struct cache_entry
*ce
;
1243 struct cifs_ses
*ses
;
1253 path
= get_ses_refpath(ses
);
1260 down_read(&htable_rw_lock
);
1261 ce
= lookup_cache_entry(path
);
1262 needs_refresh
= force_refresh
|| IS_ERR(ce
) || cache_entry_expired(ce
);
1263 if (!needs_refresh
) {
1264 up_read(&htable_rw_lock
);
1267 up_read(&htable_rw_lock
);
1269 ses
= CIFS_DFS_ROOT_SES(ses
);
1270 if (!is_ses_good(ses
)) {
1271 cifs_dbg(FYI
, "%s: skip cache refresh due to disconnected ipc\n",
1276 rc
= get_dfs_referral(xid
, ses
, path
, &refs
, &numrefs
);
1278 rc
= __refresh_tcon_referral(tcon
, path
, refs
,
1279 numrefs
, force_refresh
);
1285 free_dfs_info_array(refs
, numrefs
);
1289 * dfs_cache_remount_fs - remount a DFS share
1291 * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
1292 * match any of the new targets, mark it for reconnect.
1294 * @cifs_sb: cifs superblock.
1296 * Return zero if remounted, otherwise non-zero.
1298 int dfs_cache_remount_fs(struct cifs_sb_info
*cifs_sb
)
1300 struct cifs_tcon
*tcon
;
1302 if (!cifs_sb
|| !cifs_sb
->master_tlink
)
1305 tcon
= cifs_sb_master_tcon(cifs_sb
);
1307 spin_lock(&tcon
->tc_lock
);
1308 if (!tcon
->origin_fullpath
) {
1309 spin_unlock(&tcon
->tc_lock
);
1310 cifs_dbg(FYI
, "%s: not a dfs mount\n", __func__
);
1313 spin_unlock(&tcon
->tc_lock
);
1316 * After reconnecting to a different server, unique ids won't match anymore, so we disable
1317 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
1319 cifs_autodisable_serverino(cifs_sb
);
1321 * Force the use of prefix path to support failover on DFS paths that resolve to targets
1322 * that have different prefix paths.
1324 cifs_sb
->mnt_cifs_flags
|= CIFS_MOUNT_USE_PREFIX_PATH
;
1326 refresh_tcon_referral(tcon
, true);
1330 /* Refresh all DFS referrals related to DFS tcon */
1331 void dfs_cache_refresh(struct work_struct
*work
)
1333 struct cifs_tcon
*tcon
;
1334 struct cifs_ses
*ses
;
1336 tcon
= container_of(work
, struct cifs_tcon
, dfs_cache_work
.work
);
1338 list_for_each_entry(ses
, &tcon
->dfs_ses_list
, dlist
)
1339 refresh_ses_referral(ses
);
1340 refresh_tcon_referral(tcon
, false);
1342 queue_delayed_work(dfscache_wq
, &tcon
->dfs_cache_work
,
1343 atomic_read(&dfs_cache_ttl
) * HZ
);