Merge tag 'timers_urgent_for_v6.13_rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[drm/drm-misc.git] / fs / smb / client / dfs_cache.c
blob541608b0267efff53ac4d60eac4559a52539f7d8
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DFS referral cache routines
5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
6 */
8 #include <linux/jhash.h>
9 #include <linux/ktime.h>
10 #include <linux/slab.h>
11 #include <linux/proc_fs.h>
12 #include <linux/nls.h>
13 #include <linux/workqueue.h>
14 #include <linux/uuid.h>
15 #include "cifsglob.h"
16 #include "smb2pdu.h"
17 #include "smb2proto.h"
18 #include "cifsproto.h"
19 #include "cifs_debug.h"
20 #include "cifs_unicode.h"
21 #include "smb2glob.h"
22 #include "dns_resolve.h"
23 #include "dfs.h"
25 #include "dfs_cache.h"
27 #define CACHE_HTABLE_SIZE 512
28 #define CACHE_MAX_ENTRIES 1024
29 #define CACHE_MIN_TTL 120 /* 2 minutes */
30 #define CACHE_DEFAULT_TTL 300 /* 5 minutes */
32 struct cache_dfs_tgt {
33 char *name;
34 int path_consumed;
35 struct list_head list;
38 struct cache_entry {
39 struct hlist_node hlist;
40 const char *path;
41 int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
42 int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
43 int srvtype; /* DFS_REREFERRAL_V3.ServerType */
44 int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
45 struct timespec64 etime;
46 int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
47 int numtgts;
48 struct list_head tlist;
49 struct cache_dfs_tgt *tgthint;
52 static struct kmem_cache *cache_slab __read_mostly;
53 struct workqueue_struct *dfscache_wq;
55 atomic_t dfs_cache_ttl;
57 static struct nls_table *cache_cp;
60 * Number of entries in the cache
62 static atomic_t cache_count;
64 static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
65 static DECLARE_RWSEM(htable_rw_lock);
67 /**
68 * dfs_cache_canonical_path - get a canonical DFS path
70 * @path: DFS path
71 * @cp: codepage
72 * @remap: mapping type
74 * Return canonical path if success, otherwise error.
76 char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
78 char *tmp;
79 int plen = 0;
80 char *npath;
82 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
83 return ERR_PTR(-EINVAL);
85 if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
86 tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
87 if (!tmp) {
88 cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
89 return ERR_PTR(-EINVAL);
92 npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
93 kfree(tmp);
95 if (!npath) {
96 cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
97 return ERR_PTR(-EINVAL);
99 } else {
100 npath = kstrdup(path, GFP_KERNEL);
101 if (!npath)
102 return ERR_PTR(-ENOMEM);
104 convert_delimiter(npath, '\\');
105 return npath;
108 static inline bool cache_entry_expired(const struct cache_entry *ce)
110 struct timespec64 ts;
112 ktime_get_coarse_real_ts64(&ts);
113 return timespec64_compare(&ts, &ce->etime) >= 0;
116 static inline void free_tgts(struct cache_entry *ce)
118 struct cache_dfs_tgt *t, *n;
120 list_for_each_entry_safe(t, n, &ce->tlist, list) {
121 list_del(&t->list);
122 kfree(t->name);
123 kfree(t);
127 static inline void flush_cache_ent(struct cache_entry *ce)
129 cifs_dbg(FYI, "%s: %s\n", __func__, ce->path);
130 hlist_del_init(&ce->hlist);
131 kfree(ce->path);
132 free_tgts(ce);
133 atomic_dec(&cache_count);
134 kmem_cache_free(cache_slab, ce);
137 static void flush_cache_ents(void)
139 int i;
141 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
142 struct hlist_head *l = &cache_htable[i];
143 struct hlist_node *n;
144 struct cache_entry *ce;
146 hlist_for_each_entry_safe(ce, n, l, hlist) {
147 if (!hlist_unhashed(&ce->hlist))
148 flush_cache_ent(ce);
154 * dfs cache /proc file
156 static int dfscache_proc_show(struct seq_file *m, void *v)
158 int i;
159 struct cache_entry *ce;
160 struct cache_dfs_tgt *t;
162 seq_puts(m, "DFS cache\n---------\n");
164 down_read(&htable_rw_lock);
165 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
166 struct hlist_head *l = &cache_htable[i];
168 hlist_for_each_entry(ce, l, hlist) {
169 if (hlist_unhashed(&ce->hlist))
170 continue;
172 seq_printf(m,
173 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
174 ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
175 ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
176 str_yes_no(DFS_INTERLINK(ce->hdr_flags)),
177 ce->path_consumed, str_yes_no(cache_entry_expired(ce)));
179 list_for_each_entry(t, &ce->tlist, list) {
180 seq_printf(m, " %s%s\n",
181 t->name,
182 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
186 up_read(&htable_rw_lock);
188 return 0;
191 static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
192 size_t count, loff_t *ppos)
194 char c;
195 int rc;
197 rc = get_user(c, buffer);
198 if (rc)
199 return rc;
201 if (c != '0')
202 return -EINVAL;
204 cifs_dbg(FYI, "clearing dfs cache\n");
206 down_write(&htable_rw_lock);
207 flush_cache_ents();
208 up_write(&htable_rw_lock);
210 return count;
213 static int dfscache_proc_open(struct inode *inode, struct file *file)
215 return single_open(file, dfscache_proc_show, NULL);
218 const struct proc_ops dfscache_proc_ops = {
219 .proc_open = dfscache_proc_open,
220 .proc_read = seq_read,
221 .proc_lseek = seq_lseek,
222 .proc_release = single_release,
223 .proc_write = dfscache_proc_write,
226 #ifdef CONFIG_CIFS_DEBUG2
227 static inline void dump_tgts(const struct cache_entry *ce)
229 struct cache_dfs_tgt *t;
231 cifs_dbg(FYI, "target list:\n");
232 list_for_each_entry(t, &ce->tlist, list) {
233 cifs_dbg(FYI, " %s%s\n", t->name,
234 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
238 static inline void dump_ce(const struct cache_entry *ce)
240 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
241 ce->path,
242 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
243 ce->etime.tv_nsec,
244 ce->hdr_flags, ce->ref_flags,
245 str_yes_no(DFS_INTERLINK(ce->hdr_flags)),
246 ce->path_consumed,
247 str_yes_no(cache_entry_expired(ce)));
248 dump_tgts(ce);
251 static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
253 int i;
255 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
256 for (i = 0; i < numrefs; i++) {
257 const struct dfs_info3_param *ref = &refs[i];
259 cifs_dbg(FYI,
260 "\n"
261 "flags: 0x%x\n"
262 "path_consumed: %d\n"
263 "server_type: 0x%x\n"
264 "ref_flag: 0x%x\n"
265 "path_name: %s\n"
266 "node_name: %s\n"
267 "ttl: %d (%dm)\n",
268 ref->flags, ref->path_consumed, ref->server_type,
269 ref->ref_flag, ref->path_name, ref->node_name,
270 ref->ttl, ref->ttl / 60);
273 #else
274 #define dump_tgts(e)
275 #define dump_ce(e)
276 #define dump_refs(r, n)
277 #endif
280 * dfs_cache_init - Initialize DFS referral cache.
282 * Return zero if initialized successfully, otherwise non-zero.
284 int dfs_cache_init(void)
286 int rc;
287 int i;
289 dfscache_wq = alloc_workqueue("cifs-dfscache",
290 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM,
292 if (!dfscache_wq)
293 return -ENOMEM;
295 cache_slab = kmem_cache_create("cifs_dfs_cache",
296 sizeof(struct cache_entry), 0,
297 SLAB_HWCACHE_ALIGN, NULL);
298 if (!cache_slab) {
299 rc = -ENOMEM;
300 goto out_destroy_wq;
303 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
304 INIT_HLIST_HEAD(&cache_htable[i]);
306 atomic_set(&cache_count, 0);
307 atomic_set(&dfs_cache_ttl, CACHE_DEFAULT_TTL);
308 cache_cp = load_nls("utf8");
309 if (!cache_cp)
310 cache_cp = load_nls_default();
312 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
313 return 0;
315 out_destroy_wq:
316 destroy_workqueue(dfscache_wq);
317 return rc;
320 static int cache_entry_hash(const void *data, int size, unsigned int *hash)
322 int i, clen;
323 const unsigned char *s = data;
324 wchar_t c;
325 unsigned int h = 0;
327 for (i = 0; i < size; i += clen) {
328 clen = cache_cp->char2uni(&s[i], size - i, &c);
329 if (unlikely(clen < 0)) {
330 cifs_dbg(VFS, "%s: can't convert char\n", __func__);
331 return clen;
333 c = cifs_toupper(c);
334 h = jhash(&c, sizeof(c), h);
336 *hash = h % CACHE_HTABLE_SIZE;
337 return 0;
340 /* Return target hint of a DFS cache entry */
341 static inline char *get_tgt_name(const struct cache_entry *ce)
343 struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
345 return t ? t->name : ERR_PTR(-ENOENT);
348 /* Return expire time out of a new entry's TTL */
349 static inline struct timespec64 get_expire_time(int ttl)
351 struct timespec64 ts = {
352 .tv_sec = ttl,
353 .tv_nsec = 0,
355 struct timespec64 now;
357 ktime_get_coarse_real_ts64(&now);
358 return timespec64_add(now, ts);
361 /* Allocate a new DFS target */
362 static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
364 struct cache_dfs_tgt *t;
366 t = kmalloc(sizeof(*t), GFP_ATOMIC);
367 if (!t)
368 return ERR_PTR(-ENOMEM);
369 t->name = kstrdup(name, GFP_ATOMIC);
370 if (!t->name) {
371 kfree(t);
372 return ERR_PTR(-ENOMEM);
374 t->path_consumed = path_consumed;
375 INIT_LIST_HEAD(&t->list);
376 return t;
380 * Copy DFS referral information to a cache entry and conditionally update
381 * target hint.
383 static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
384 struct cache_entry *ce, const char *tgthint)
386 struct cache_dfs_tgt *target;
387 int i;
389 ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
390 ce->etime = get_expire_time(ce->ttl);
391 ce->srvtype = refs[0].server_type;
392 ce->hdr_flags = refs[0].flags;
393 ce->ref_flags = refs[0].ref_flag;
394 ce->path_consumed = refs[0].path_consumed;
396 for (i = 0; i < numrefs; i++) {
397 struct cache_dfs_tgt *t;
399 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
400 if (IS_ERR(t)) {
401 free_tgts(ce);
402 return PTR_ERR(t);
404 if (tgthint && !strcasecmp(t->name, tgthint)) {
405 list_add(&t->list, &ce->tlist);
406 tgthint = NULL;
407 } else {
408 list_add_tail(&t->list, &ce->tlist);
410 ce->numtgts++;
413 target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
414 list);
415 WRITE_ONCE(ce->tgthint, target);
417 return 0;
420 /* Allocate a new cache entry */
421 static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
423 struct cache_entry *ce;
424 int rc;
426 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
427 if (!ce)
428 return ERR_PTR(-ENOMEM);
430 ce->path = refs[0].path_name;
431 refs[0].path_name = NULL;
433 INIT_HLIST_NODE(&ce->hlist);
434 INIT_LIST_HEAD(&ce->tlist);
436 rc = copy_ref_data(refs, numrefs, ce, NULL);
437 if (rc) {
438 kfree(ce->path);
439 kmem_cache_free(cache_slab, ce);
440 ce = ERR_PTR(rc);
442 return ce;
445 /* Remove all referrals that have a single target or oldest entry */
446 static void purge_cache(void)
448 int i;
449 struct cache_entry *ce;
450 struct cache_entry *oldest = NULL;
452 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
453 struct hlist_head *l = &cache_htable[i];
454 struct hlist_node *n;
456 hlist_for_each_entry_safe(ce, n, l, hlist) {
457 if (hlist_unhashed(&ce->hlist))
458 continue;
459 if (ce->numtgts == 1)
460 flush_cache_ent(ce);
461 else if (!oldest ||
462 timespec64_compare(&ce->etime,
463 &oldest->etime) < 0)
464 oldest = ce;
468 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES && oldest)
469 flush_cache_ent(oldest);
472 /* Add a new DFS cache entry */
473 static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
474 int numrefs)
476 int rc;
477 struct cache_entry *ce;
478 unsigned int hash;
479 int ttl;
481 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
483 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
484 cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
485 purge_cache();
488 rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
489 if (rc)
490 return ERR_PTR(rc);
492 ce = alloc_cache_entry(refs, numrefs);
493 if (IS_ERR(ce))
494 return ce;
496 ttl = min_t(int, atomic_read(&dfs_cache_ttl), ce->ttl);
497 atomic_set(&dfs_cache_ttl, ttl);
499 hlist_add_head(&ce->hlist, &cache_htable[hash]);
500 dump_ce(ce);
502 atomic_inc(&cache_count);
504 return ce;
507 /* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
508 static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
510 int i, l1, l2;
511 wchar_t c1, c2;
513 if (len1 != len2)
514 return false;
516 for (i = 0; i < len1; i += l1) {
517 l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
518 l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
519 if (unlikely(l1 < 0 && l2 < 0)) {
520 if (s1[i] != s2[i])
521 return false;
522 l1 = 1;
523 continue;
525 if (l1 != l2)
526 return false;
527 if (cifs_toupper(c1) != cifs_toupper(c2))
528 return false;
530 return true;
533 static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
535 struct cache_entry *ce;
537 hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
538 if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
539 dump_ce(ce);
540 return ce;
543 return ERR_PTR(-ENOENT);
547 * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
549 * Use whole path components in the match. Must be called with htable_rw_lock held.
551 * Return cached entry if successful.
552 * Return ERR_PTR(-ENOENT) if the entry is not found.
553 * Return error ptr otherwise.
555 static struct cache_entry *lookup_cache_entry(const char *path)
557 struct cache_entry *ce;
558 int cnt = 0;
559 const char *s = path, *e;
560 char sep = *s;
561 unsigned int hash;
562 int rc;
564 while ((s = strchr(s, sep)) && ++cnt < 3)
565 s++;
567 if (cnt < 3) {
568 rc = cache_entry_hash(path, strlen(path), &hash);
569 if (rc)
570 return ERR_PTR(rc);
571 return __lookup_cache_entry(path, hash, strlen(path));
574 * Handle paths that have more than two path components and are a complete prefix of the DFS
575 * referral request path (@path).
577 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
579 e = path + strlen(path) - 1;
580 while (e > s) {
581 int len;
583 /* skip separators */
584 while (e > s && *e == sep)
585 e--;
586 if (e == s)
587 break;
589 len = e + 1 - path;
590 rc = cache_entry_hash(path, len, &hash);
591 if (rc)
592 return ERR_PTR(rc);
593 ce = __lookup_cache_entry(path, hash, len);
594 if (!IS_ERR(ce))
595 return ce;
597 /* backward until separator */
598 while (e > s && *e != sep)
599 e--;
601 return ERR_PTR(-ENOENT);
605 * dfs_cache_destroy - destroy DFS referral cache
607 void dfs_cache_destroy(void)
609 unload_nls(cache_cp);
610 flush_cache_ents();
611 kmem_cache_destroy(cache_slab);
612 destroy_workqueue(dfscache_wq);
614 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
617 /* Update a cache entry with the new referral in @refs */
618 static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
619 int numrefs)
621 struct cache_dfs_tgt *target;
622 char *th = NULL;
623 int rc;
625 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
627 target = READ_ONCE(ce->tgthint);
628 if (target) {
629 th = kstrdup(target->name, GFP_ATOMIC);
630 if (!th)
631 return -ENOMEM;
634 free_tgts(ce);
635 ce->numtgts = 0;
637 rc = copy_ref_data(refs, numrefs, ce, th);
639 kfree(th);
641 return rc;
644 static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
645 struct dfs_info3_param **refs, int *numrefs)
647 int rc;
648 int i;
650 *refs = NULL;
651 *numrefs = 0;
653 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
654 return -EOPNOTSUPP;
655 if (unlikely(!cache_cp))
656 return -EINVAL;
658 cifs_dbg(FYI, "%s: ipc=%s referral=%s\n", __func__, ses->tcon_ipc->tree_name, path);
659 rc = ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
660 NO_MAP_UNI_RSVD);
661 if (!rc) {
662 struct dfs_info3_param *ref = *refs;
664 for (i = 0; i < *numrefs; i++)
665 convert_delimiter(ref[i].path_name, '\\');
667 return rc;
671 * Find, create or update a DFS cache entry.
673 * If the entry wasn't found, it will create a new one. Or if it was found but
674 * expired, then it will update the entry accordingly.
676 * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
677 * handle them properly.
679 * On success, return entry with acquired lock for reading, otherwise error ptr.
681 static struct cache_entry *cache_refresh_path(const unsigned int xid,
682 struct cifs_ses *ses,
683 const char *path,
684 bool force_refresh)
686 struct dfs_info3_param *refs = NULL;
687 struct cache_entry *ce;
688 int numrefs = 0;
689 int rc;
691 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
693 down_read(&htable_rw_lock);
695 ce = lookup_cache_entry(path);
696 if (!IS_ERR(ce)) {
697 if (!force_refresh && !cache_entry_expired(ce))
698 return ce;
699 } else if (PTR_ERR(ce) != -ENOENT) {
700 up_read(&htable_rw_lock);
701 return ce;
705 * Unlock shared access as we don't want to hold any locks while getting
706 * a new referral. The @ses used for performing the I/O could be
707 * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
708 * in order to failover -- if necessary.
710 up_read(&htable_rw_lock);
713 * Either the entry was not found, or it is expired, or it is a forced
714 * refresh.
715 * Request a new DFS referral in order to create or update a cache entry.
717 rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
718 if (rc) {
719 ce = ERR_PTR(rc);
720 goto out;
723 dump_refs(refs, numrefs);
725 down_write(&htable_rw_lock);
726 /* Re-check as another task might have it added or refreshed already */
727 ce = lookup_cache_entry(path);
728 if (!IS_ERR(ce)) {
729 if (force_refresh || cache_entry_expired(ce)) {
730 rc = update_cache_entry_locked(ce, refs, numrefs);
731 if (rc)
732 ce = ERR_PTR(rc);
734 } else if (PTR_ERR(ce) == -ENOENT) {
735 ce = add_cache_entry_locked(refs, numrefs);
738 if (IS_ERR(ce)) {
739 up_write(&htable_rw_lock);
740 goto out;
743 downgrade_write(&htable_rw_lock);
744 out:
745 free_dfs_info_array(refs, numrefs);
746 return ce;
750 * Set up a DFS referral from a given cache entry.
752 * Must be called with htable_rw_lock held.
754 static int setup_referral(const char *path, struct cache_entry *ce,
755 struct dfs_info3_param *ref, const char *target)
757 int rc;
759 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
761 memset(ref, 0, sizeof(*ref));
763 ref->path_name = kstrdup(path, GFP_ATOMIC);
764 if (!ref->path_name)
765 return -ENOMEM;
767 ref->node_name = kstrdup(target, GFP_ATOMIC);
768 if (!ref->node_name) {
769 rc = -ENOMEM;
770 goto err_free_path;
773 ref->path_consumed = ce->path_consumed;
774 ref->ttl = ce->ttl;
775 ref->server_type = ce->srvtype;
776 ref->ref_flag = ce->ref_flags;
777 ref->flags = ce->hdr_flags;
779 return 0;
781 err_free_path:
782 kfree(ref->path_name);
783 ref->path_name = NULL;
784 return rc;
787 /* Return target list of a DFS cache entry */
788 static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
790 int rc;
791 struct list_head *head = &tl->tl_list;
792 struct cache_dfs_tgt *t;
793 struct dfs_cache_tgt_iterator *it, *nit;
795 memset(tl, 0, sizeof(*tl));
796 INIT_LIST_HEAD(head);
798 list_for_each_entry(t, &ce->tlist, list) {
799 it = kzalloc(sizeof(*it), GFP_ATOMIC);
800 if (!it) {
801 rc = -ENOMEM;
802 goto err_free_it;
805 it->it_name = kstrdup(t->name, GFP_ATOMIC);
806 if (!it->it_name) {
807 kfree(it);
808 rc = -ENOMEM;
809 goto err_free_it;
811 it->it_path_consumed = t->path_consumed;
813 if (READ_ONCE(ce->tgthint) == t)
814 list_add(&it->it_list, head);
815 else
816 list_add_tail(&it->it_list, head);
819 tl->tl_numtgts = ce->numtgts;
821 return 0;
823 err_free_it:
824 list_for_each_entry_safe(it, nit, head, it_list) {
825 list_del(&it->it_list);
826 kfree(it->it_name);
827 kfree(it);
829 return rc;
833 * dfs_cache_find - find a DFS cache entry
835 * If it doesn't find the cache entry, then it will get a DFS referral
836 * for @path and create a new entry.
838 * In case the cache entry exists but expired, it will get a DFS referral
839 * for @path and then update the respective cache entry.
841 * These parameters are passed down to the get_dfs_refer() call if it
842 * needs to be issued:
843 * @xid: syscall xid
844 * @ses: smb session to issue the request on
845 * @cp: codepage
846 * @remap: path character remapping type
847 * @path: path to lookup in DFS referral cache.
849 * @ref: when non-NULL, store single DFS referral result in it.
850 * @tgt_list: when non-NULL, store complete DFS target list in it.
852 * Return zero if the target was found, otherwise non-zero.
854 int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
855 int remap, const char *path, struct dfs_info3_param *ref,
856 struct dfs_cache_tgt_list *tgt_list)
858 int rc;
859 const char *npath;
860 struct cache_entry *ce;
862 npath = dfs_cache_canonical_path(path, cp, remap);
863 if (IS_ERR(npath))
864 return PTR_ERR(npath);
866 ce = cache_refresh_path(xid, ses, npath, false);
867 if (IS_ERR(ce)) {
868 rc = PTR_ERR(ce);
869 goto out_free_path;
872 if (ref)
873 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
874 else
875 rc = 0;
876 if (!rc && tgt_list)
877 rc = get_targets(ce, tgt_list);
879 up_read(&htable_rw_lock);
881 out_free_path:
882 kfree(npath);
883 return rc;
887 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
888 * the currently connected server.
890 * NOTE: This function will neither update a cache entry in case it was
891 * expired, nor create a new cache entry if @path hasn't been found. It heavily
892 * relies on an existing cache entry.
894 * @path: canonical DFS path to lookup in the DFS referral cache.
895 * @ref: when non-NULL, store single DFS referral result in it.
896 * @tgt_list: when non-NULL, store complete DFS target list in it.
898 * Return 0 if successful.
899 * Return -ENOENT if the entry was not found.
900 * Return non-zero for other errors.
902 int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
903 struct dfs_cache_tgt_list *tgt_list)
905 int rc;
906 struct cache_entry *ce;
908 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
910 down_read(&htable_rw_lock);
912 ce = lookup_cache_entry(path);
913 if (IS_ERR(ce)) {
914 rc = PTR_ERR(ce);
915 goto out_unlock;
918 if (ref)
919 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
920 else
921 rc = 0;
922 if (!rc && tgt_list)
923 rc = get_targets(ce, tgt_list);
925 out_unlock:
926 up_read(&htable_rw_lock);
927 return rc;
931 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
932 * without sending any requests to the currently connected server.
934 * NOTE: This function will neither update a cache entry in case it was
935 * expired, nor create a new cache entry if @path hasn't been found. It heavily
936 * relies on an existing cache entry.
938 * @path: canonical DFS path to lookup in DFS referral cache.
939 * @it: target iterator which contains the target hint to update the cache
940 * entry with.
942 * Return zero if the target hint was updated successfully, otherwise non-zero.
944 void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
946 struct cache_dfs_tgt *t;
947 struct cache_entry *ce;
949 if (!path || !it)
950 return;
952 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
954 down_read(&htable_rw_lock);
956 ce = lookup_cache_entry(path);
957 if (IS_ERR(ce))
958 goto out_unlock;
960 t = READ_ONCE(ce->tgthint);
962 if (unlikely(!strcasecmp(it->it_name, t->name)))
963 goto out_unlock;
965 list_for_each_entry(t, &ce->tlist, list) {
966 if (!strcasecmp(t->name, it->it_name)) {
967 WRITE_ONCE(ce->tgthint, t);
968 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
969 it->it_name);
970 break;
974 out_unlock:
975 up_read(&htable_rw_lock);
979 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
980 * target iterator (@it).
982 * @path: canonical DFS path to lookup in DFS referral cache.
983 * @it: DFS target iterator.
984 * @ref: DFS referral pointer to set up the gathered information.
986 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
988 int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
989 struct dfs_info3_param *ref)
991 int rc;
992 struct cache_entry *ce;
994 if (!it || !ref)
995 return -EINVAL;
997 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
999 down_read(&htable_rw_lock);
1001 ce = lookup_cache_entry(path);
1002 if (IS_ERR(ce)) {
1003 rc = PTR_ERR(ce);
1004 goto out_unlock;
1007 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1009 rc = setup_referral(path, ce, ref, it->it_name);
1011 out_unlock:
1012 up_read(&htable_rw_lock);
1013 return rc;
1016 /* Extract share from DFS target and return a pointer to prefix path or NULL */
1017 static const char *parse_target_share(const char *target, char **share)
1019 const char *s, *seps = "/\\";
1020 size_t len;
1022 s = strpbrk(target + 1, seps);
1023 if (!s)
1024 return ERR_PTR(-EINVAL);
1026 len = strcspn(s + 1, seps);
1027 if (!len)
1028 return ERR_PTR(-EINVAL);
1029 s += len;
1031 len = s - target + 1;
1032 *share = kstrndup(target, len, GFP_KERNEL);
1033 if (!*share)
1034 return ERR_PTR(-ENOMEM);
1036 s = target + len;
1037 return s + strspn(s, seps);
1041 * dfs_cache_get_tgt_share - parse a DFS target
1043 * @path: DFS full path
1044 * @it: DFS target iterator.
1045 * @share: tree name.
1046 * @prefix: prefix path.
1048 * Return zero if target was parsed correctly, otherwise non-zero.
1050 int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
1051 char **prefix)
1053 char sep;
1054 char *target_share;
1055 char *ppath = NULL;
1056 const char *target_ppath, *dfsref_ppath;
1057 size_t target_pplen, dfsref_pplen;
1058 size_t len, c;
1060 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1061 return -EINVAL;
1063 sep = it->it_name[0];
1064 if (sep != '\\' && sep != '/')
1065 return -EINVAL;
1067 target_ppath = parse_target_share(it->it_name, &target_share);
1068 if (IS_ERR(target_ppath))
1069 return PTR_ERR(target_ppath);
1071 /* point to prefix in DFS referral path */
1072 dfsref_ppath = path + it->it_path_consumed;
1073 dfsref_ppath += strspn(dfsref_ppath, "/\\");
1075 target_pplen = strlen(target_ppath);
1076 dfsref_pplen = strlen(dfsref_ppath);
1078 /* merge prefix paths from DFS referral path and target node */
1079 if (target_pplen || dfsref_pplen) {
1080 len = target_pplen + dfsref_pplen + 2;
1081 ppath = kzalloc(len, GFP_KERNEL);
1082 if (!ppath) {
1083 kfree(target_share);
1084 return -ENOMEM;
1086 c = strscpy(ppath, target_ppath, len);
1087 if (c && dfsref_pplen)
1088 ppath[c] = sep;
1089 strlcat(ppath, dfsref_ppath, len);
1091 *share = target_share;
1092 *prefix = ppath;
1093 return 0;
1096 static bool target_share_equal(struct cifs_tcon *tcon, const char *s1)
1098 struct TCP_Server_Info *server = tcon->ses->server;
1099 struct sockaddr_storage ss;
1100 const char *host;
1101 const char *s2 = &tcon->tree_name[1];
1102 size_t hostlen;
1103 char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
1104 bool match;
1105 int rc;
1107 if (strcasecmp(s2, s1))
1108 return false;
1111 * Resolve share's hostname and check if server address matches. Otherwise just ignore it
1112 * as we could not have upcall to resolve hostname or failed to convert ip address.
1114 extract_unc_hostname(s1, &host, &hostlen);
1115 scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
1117 rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
1118 if (rc < 0) {
1119 cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
1120 __func__, (int)hostlen, host);
1121 return true;
1124 cifs_server_lock(server);
1125 match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1126 cifs_server_unlock(server);
1128 return match;
1131 static bool is_ses_good(struct cifs_ses *ses)
1133 struct TCP_Server_Info *server = ses->server;
1134 struct cifs_tcon *tcon = ses->tcon_ipc;
1135 bool ret;
1137 spin_lock(&ses->ses_lock);
1138 spin_lock(&ses->chan_lock);
1139 ret = !cifs_chan_needs_reconnect(ses, server) &&
1140 ses->ses_status == SES_GOOD &&
1141 !tcon->need_reconnect;
1142 spin_unlock(&ses->chan_lock);
1143 spin_unlock(&ses->ses_lock);
1144 return ret;
1147 static char *get_ses_refpath(struct cifs_ses *ses)
1149 struct TCP_Server_Info *server = ses->server;
1150 char *path = ERR_PTR(-ENOENT);
1152 mutex_lock(&server->refpath_lock);
1153 if (server->leaf_fullpath) {
1154 path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
1155 if (!path)
1156 path = ERR_PTR(-ENOMEM);
1158 mutex_unlock(&server->refpath_lock);
1159 return path;
1162 /* Refresh dfs referral of @ses */
1163 static void refresh_ses_referral(struct cifs_ses *ses)
1165 struct cache_entry *ce;
1166 unsigned int xid;
1167 char *path;
1168 int rc = 0;
1170 xid = get_xid();
1172 path = get_ses_refpath(ses);
1173 if (IS_ERR(path)) {
1174 rc = PTR_ERR(path);
1175 path = NULL;
1176 goto out;
1179 ses = CIFS_DFS_ROOT_SES(ses);
1180 if (!is_ses_good(ses)) {
1181 cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
1182 __func__);
1183 goto out;
1186 ce = cache_refresh_path(xid, ses, path, false);
1187 if (!IS_ERR(ce))
1188 up_read(&htable_rw_lock);
1189 else
1190 rc = PTR_ERR(ce);
1192 out:
1193 free_xid(xid);
1194 kfree(path);
1197 static int __refresh_tcon_referral(struct cifs_tcon *tcon,
1198 const char *path,
1199 struct dfs_info3_param *refs,
1200 int numrefs, bool force_refresh)
1202 struct cache_entry *ce;
1203 bool reconnect = force_refresh;
1204 int rc = 0;
1205 int i;
1207 if (unlikely(!numrefs))
1208 return 0;
1210 if (force_refresh) {
1211 for (i = 0; i < numrefs; i++) {
1212 /* TODO: include prefix paths in the matching */
1213 if (target_share_equal(tcon, refs[i].node_name)) {
1214 reconnect = false;
1215 break;
1220 down_write(&htable_rw_lock);
1221 ce = lookup_cache_entry(path);
1222 if (!IS_ERR(ce)) {
1223 if (force_refresh || cache_entry_expired(ce))
1224 rc = update_cache_entry_locked(ce, refs, numrefs);
1225 } else if (PTR_ERR(ce) == -ENOENT) {
1226 ce = add_cache_entry_locked(refs, numrefs);
1228 up_write(&htable_rw_lock);
1230 if (IS_ERR(ce))
1231 rc = PTR_ERR(ce);
1232 if (reconnect) {
1233 cifs_tcon_dbg(FYI, "%s: mark for reconnect\n", __func__);
1234 cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
1236 return rc;
1239 static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh)
1241 struct dfs_info3_param *refs = NULL;
1242 struct cache_entry *ce;
1243 struct cifs_ses *ses;
1244 unsigned int xid;
1245 bool needs_refresh;
1246 char *path;
1247 int numrefs = 0;
1248 int rc = 0;
1250 xid = get_xid();
1251 ses = tcon->ses;
1253 path = get_ses_refpath(ses);
1254 if (IS_ERR(path)) {
1255 rc = PTR_ERR(path);
1256 path = NULL;
1257 goto out;
1260 down_read(&htable_rw_lock);
1261 ce = lookup_cache_entry(path);
1262 needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
1263 if (!needs_refresh) {
1264 up_read(&htable_rw_lock);
1265 goto out;
1267 up_read(&htable_rw_lock);
1269 ses = CIFS_DFS_ROOT_SES(ses);
1270 if (!is_ses_good(ses)) {
1271 cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
1272 __func__);
1273 goto out;
1276 rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
1277 if (!rc) {
1278 rc = __refresh_tcon_referral(tcon, path, refs,
1279 numrefs, force_refresh);
1282 out:
1283 free_xid(xid);
1284 kfree(path);
1285 free_dfs_info_array(refs, numrefs);
1289 * dfs_cache_remount_fs - remount a DFS share
1291 * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
1292 * match any of the new targets, mark it for reconnect.
1294 * @cifs_sb: cifs superblock.
1296 * Return zero if remounted, otherwise non-zero.
1298 int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
1300 struct cifs_tcon *tcon;
1302 if (!cifs_sb || !cifs_sb->master_tlink)
1303 return -EINVAL;
1305 tcon = cifs_sb_master_tcon(cifs_sb);
1307 spin_lock(&tcon->tc_lock);
1308 if (!tcon->origin_fullpath) {
1309 spin_unlock(&tcon->tc_lock);
1310 cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
1311 return 0;
1313 spin_unlock(&tcon->tc_lock);
1316 * After reconnecting to a different server, unique ids won't match anymore, so we disable
1317 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
1319 cifs_autodisable_serverino(cifs_sb);
1321 * Force the use of prefix path to support failover on DFS paths that resolve to targets
1322 * that have different prefix paths.
1324 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1326 refresh_tcon_referral(tcon, true);
1327 return 0;
1330 /* Refresh all DFS referrals related to DFS tcon */
1331 void dfs_cache_refresh(struct work_struct *work)
1333 struct cifs_tcon *tcon;
1334 struct cifs_ses *ses;
1336 tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
1338 list_for_each_entry(ses, &tcon->dfs_ses_list, dlist)
1339 refresh_ses_referral(ses);
1340 refresh_tcon_referral(tcon, false);
1342 queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
1343 atomic_read(&dfs_cache_ttl) * HZ);