1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AFS cell and server record management
4 * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/slab.h>
10 #include <linux/ctype.h>
11 #include <linux/dns_resolver.h>
12 #include <linux/sched.h>
13 #include <linux/inet.h>
14 #include <linux/namei.h>
15 #include <keys/rxrpc-type.h>
18 static unsigned __read_mostly afs_cell_gc_delay
= 10;
19 static unsigned __read_mostly afs_cell_min_ttl
= 10 * 60;
20 static unsigned __read_mostly afs_cell_max_ttl
= 24 * 60 * 60;
22 static void afs_manage_cell(struct work_struct
*);
24 static void afs_dec_cells_outstanding(struct afs_net
*net
)
26 if (atomic_dec_and_test(&net
->cells_outstanding
))
27 wake_up_var(&net
->cells_outstanding
);
31 * Set the cell timer to fire after a given delay, assuming it's not already
32 * set for an earlier time.
34 static void afs_set_cell_timer(struct afs_net
*net
, time64_t delay
)
37 atomic_inc(&net
->cells_outstanding
);
38 if (timer_reduce(&net
->cells_timer
, jiffies
+ delay
* HZ
))
39 afs_dec_cells_outstanding(net
);
44 * Look up and get an activation reference on a cell record under RCU
45 * conditions. The caller must hold the RCU read lock.
47 struct afs_cell
*afs_lookup_cell_rcu(struct afs_net
*net
,
48 const char *name
, unsigned int namesz
)
50 struct afs_cell
*cell
= NULL
;
52 int n
, seq
= 0, ret
= 0;
54 _enter("%*.*s", namesz
, namesz
, name
);
56 if (name
&& namesz
== 0)
57 return ERR_PTR(-EINVAL
);
58 if (namesz
> AFS_MAXCELLNAME
)
59 return ERR_PTR(-ENAMETOOLONG
);
62 /* Unfortunately, rbtree walking doesn't give reliable results
63 * under just the RCU read lock, so we have to check for
67 afs_put_cell(net
, cell
);
71 read_seqbegin_or_lock(&net
->cells_lock
, &seq
);
74 cell
= rcu_dereference_raw(net
->ws_cell
);
84 p
= rcu_dereference_raw(net
->cells
.rb_node
);
86 cell
= rb_entry(p
, struct afs_cell
, net_node
);
88 n
= strncasecmp(cell
->name
, name
,
89 min_t(size_t, cell
->name_len
, namesz
));
91 n
= cell
->name_len
- namesz
;
93 p
= rcu_dereference_raw(p
->rb_left
);
95 p
= rcu_dereference_raw(p
->rb_right
);
97 if (atomic_inc_not_zero(&cell
->usage
)) {
101 /* We want to repeat the search, this time with
102 * the lock properly locked.
108 } while (need_seqretry(&net
->cells_lock
, seq
));
110 done_seqretry(&net
->cells_lock
, seq
);
112 if (ret
!= 0 && cell
)
113 afs_put_cell(net
, cell
);
115 return ret
== 0 ? cell
: ERR_PTR(ret
);
119 * Set up a cell record and fill in its name, VL server address list and
120 * allocate an anonymous key
122 static struct afs_cell
*afs_alloc_cell(struct afs_net
*net
,
123 const char *name
, unsigned int namelen
,
124 const char *addresses
)
126 struct afs_vlserver_list
*vllist
;
127 struct afs_cell
*cell
;
132 return ERR_PTR(-EINVAL
);
133 if (namelen
> AFS_MAXCELLNAME
) {
134 _leave(" = -ENAMETOOLONG");
135 return ERR_PTR(-ENAMETOOLONG
);
138 /* Prohibit cell names that contain unprintable chars, '/' and '@' or
139 * that begin with a dot. This also precludes "@cell".
142 return ERR_PTR(-EINVAL
);
143 for (i
= 0; i
< namelen
; i
++) {
145 if (!isprint(ch
) || ch
== '/' || ch
== '@')
146 return ERR_PTR(-EINVAL
);
149 _enter("%*.*s,%s", namelen
, namelen
, name
, addresses
);
151 cell
= kzalloc(sizeof(struct afs_cell
), GFP_KERNEL
);
153 _leave(" = -ENOMEM");
154 return ERR_PTR(-ENOMEM
);
157 cell
->name
= kmalloc(namelen
+ 1, GFP_KERNEL
);
160 return ERR_PTR(-ENOMEM
);
164 cell
->name_len
= namelen
;
165 for (i
= 0; i
< namelen
; i
++)
166 cell
->name
[i
] = tolower(name
[i
]);
169 atomic_set(&cell
->usage
, 2);
170 INIT_WORK(&cell
->manager
, afs_manage_cell
);
171 INIT_LIST_HEAD(&cell
->proc_volumes
);
172 rwlock_init(&cell
->proc_lock
);
173 rwlock_init(&cell
->vl_servers_lock
);
175 /* Provide a VL server list, filling it in if we were given a list of
179 vllist
= afs_parse_text_addrs(net
,
180 addresses
, strlen(addresses
), ':',
181 VL_SERVICE
, AFS_VL_PORT
);
182 if (IS_ERR(vllist
)) {
183 ret
= PTR_ERR(vllist
);
187 vllist
->source
= DNS_RECORD_FROM_CONFIG
;
188 vllist
->status
= DNS_LOOKUP_NOT_DONE
;
189 cell
->dns_expiry
= TIME64_MAX
;
192 vllist
= afs_alloc_vlserver_list(0);
195 vllist
->source
= DNS_RECORD_UNAVAILABLE
;
196 vllist
->status
= DNS_LOOKUP_NOT_DONE
;
197 cell
->dns_expiry
= ktime_get_real_seconds();
200 rcu_assign_pointer(cell
->vl_servers
, vllist
);
202 cell
->dns_source
= vllist
->source
;
203 cell
->dns_status
= vllist
->status
;
204 smp_store_release(&cell
->dns_lookup_count
, 1); /* vs source/status */
206 _leave(" = %p", cell
);
211 printk(KERN_ERR
"kAFS: bad VL server IP address\n");
215 _leave(" = %d", ret
);
220 * afs_lookup_cell - Look up or create a cell record.
221 * @net: The network namespace
222 * @name: The name of the cell.
223 * @namesz: The strlen of the cell name.
224 * @vllist: A colon/comma separated list of numeric IP addresses or NULL.
225 * @excl: T if an error should be given if the cell name already exists.
227 * Look up a cell record by name and query the DNS for VL server addresses if
228 * needed. Note that that actual DNS query is punted off to the manager thread
229 * so that this function can return immediately if interrupted whilst allowing
230 * cell records to be shared even if not yet fully constructed.
232 struct afs_cell
*afs_lookup_cell(struct afs_net
*net
,
233 const char *name
, unsigned int namesz
,
234 const char *vllist
, bool excl
)
236 struct afs_cell
*cell
, *candidate
, *cursor
;
237 struct rb_node
*parent
, **pp
;
238 enum afs_cell_state state
;
241 _enter("%s,%s", name
, vllist
);
245 cell
= afs_lookup_cell_rcu(net
, name
, namesz
);
251 /* Assume we're probably going to create a cell and preallocate and
252 * mostly set up a candidate record. We can then use this to stash the
253 * name, the net namespace and VL server addresses.
255 * We also want to do this before we hold any locks as it may involve
256 * upcalling to userspace to make DNS queries.
258 candidate
= afs_alloc_cell(net
, name
, namesz
, vllist
);
259 if (IS_ERR(candidate
)) {
260 _leave(" = %ld", PTR_ERR(candidate
));
264 /* Find the insertion point and check to see if someone else added a
265 * cell whilst we were allocating.
267 write_seqlock(&net
->cells_lock
);
269 pp
= &net
->cells
.rb_node
;
273 cursor
= rb_entry(parent
, struct afs_cell
, net_node
);
275 n
= strncasecmp(cursor
->name
, name
,
276 min_t(size_t, cursor
->name_len
, namesz
));
278 n
= cursor
->name_len
- namesz
;
280 pp
= &(*pp
)->rb_left
;
282 pp
= &(*pp
)->rb_right
;
284 goto cell_already_exists
;
289 rb_link_node_rcu(&cell
->net_node
, parent
, pp
);
290 rb_insert_color(&cell
->net_node
, &net
->cells
);
291 atomic_inc(&net
->cells_outstanding
);
292 write_sequnlock(&net
->cells_lock
);
294 queue_work(afs_wq
, &cell
->manager
);
297 _debug("wait_for_cell");
298 wait_var_event(&cell
->state
,
300 state
= smp_load_acquire(&cell
->state
); /* vs error */
301 state
== AFS_CELL_ACTIVE
|| state
== AFS_CELL_FAILED
;
304 /* Check the state obtained from the wait check. */
305 if (state
== AFS_CELL_FAILED
) {
310 _leave(" = %p [cell]", cell
);
314 _debug("cell exists");
319 afs_get_cell(cursor
);
322 write_sequnlock(&net
->cells_lock
);
328 afs_put_cell(net
, cell
);
330 _leave(" = %d [error]", ret
);
335 * set the root cell information
336 * - can be called with a module parameter string
337 * - can be called from a write to /proc/fs/afs/rootcell
339 int afs_cell_init(struct afs_net
*net
, const char *rootcell
)
341 struct afs_cell
*old_root
, *new_root
;
342 const char *cp
, *vllist
;
348 /* module is loaded with no parameters, or built statically.
349 * - in the future we might initialize cell DB here.
351 _leave(" = 0 [no root]");
355 cp
= strchr(rootcell
, ':');
357 _debug("kAFS: no VL server IP addresses specified");
359 len
= strlen(rootcell
);
365 /* allocate a cell record for the root cell */
366 new_root
= afs_lookup_cell(net
, rootcell
, len
, vllist
, false);
367 if (IS_ERR(new_root
)) {
368 _leave(" = %ld", PTR_ERR(new_root
));
369 return PTR_ERR(new_root
);
372 if (!test_and_set_bit(AFS_CELL_FL_NO_GC
, &new_root
->flags
))
373 afs_get_cell(new_root
);
375 /* install the new cell */
376 write_seqlock(&net
->cells_lock
);
377 old_root
= rcu_access_pointer(net
->ws_cell
);
378 rcu_assign_pointer(net
->ws_cell
, new_root
);
379 write_sequnlock(&net
->cells_lock
);
381 afs_put_cell(net
, old_root
);
387 * Update a cell's VL server address list from the DNS.
389 static int afs_update_cell(struct afs_cell
*cell
)
391 struct afs_vlserver_list
*vllist
, *old
= NULL
, *p
;
392 unsigned int min_ttl
= READ_ONCE(afs_cell_min_ttl
);
393 unsigned int max_ttl
= READ_ONCE(afs_cell_max_ttl
);
394 time64_t now
, expiry
= 0;
397 _enter("%s", cell
->name
);
399 vllist
= afs_dns_query(cell
, &expiry
);
400 if (IS_ERR(vllist
)) {
401 ret
= PTR_ERR(vllist
);
403 _debug("%s: fail %d", cell
->name
, ret
);
408 vllist
= afs_alloc_vlserver_list(0);
415 vllist
->status
= DNS_LOOKUP_GOT_NOT_FOUND
;
419 vllist
->status
= DNS_LOOKUP_GOT_TEMP_FAILURE
;
422 vllist
->status
= DNS_LOOKUP_GOT_LOCAL_FAILURE
;
427 _debug("%s: got list %d %d", cell
->name
, vllist
->source
, vllist
->status
);
428 cell
->dns_status
= vllist
->status
;
430 now
= ktime_get_real_seconds();
431 if (min_ttl
> max_ttl
)
433 if (expiry
< now
+ min_ttl
)
434 expiry
= now
+ min_ttl
;
435 else if (expiry
> now
+ max_ttl
)
436 expiry
= now
+ max_ttl
;
438 _debug("%s: status %d", cell
->name
, vllist
->status
);
439 if (vllist
->source
== DNS_RECORD_UNAVAILABLE
) {
440 switch (vllist
->status
) {
441 case DNS_LOOKUP_GOT_NOT_FOUND
:
442 /* The DNS said that the cell does not exist or there
443 * weren't any addresses to be had.
445 cell
->dns_expiry
= expiry
;
449 case DNS_LOOKUP_GOT_LOCAL_FAILURE
:
450 case DNS_LOOKUP_GOT_TEMP_FAILURE
:
451 case DNS_LOOKUP_GOT_NS_FAILURE
:
453 cell
->dns_expiry
= now
+ 10;
457 cell
->dns_expiry
= expiry
;
460 /* Replace the VL server list if the new record has servers or the old
463 write_lock(&cell
->vl_servers_lock
);
464 p
= rcu_dereference_protected(cell
->vl_servers
, true);
465 if (vllist
->nr_servers
> 0 || p
->nr_servers
== 0) {
466 rcu_assign_pointer(cell
->vl_servers
, vllist
);
467 cell
->dns_source
= vllist
->source
;
470 write_unlock(&cell
->vl_servers_lock
);
471 afs_put_vlserverlist(cell
->net
, old
);
474 smp_store_release(&cell
->dns_lookup_count
,
475 cell
->dns_lookup_count
+ 1); /* vs source/status */
476 wake_up_var(&cell
->dns_lookup_count
);
477 _leave(" = %d", ret
);
482 * Destroy a cell record
484 static void afs_cell_destroy(struct rcu_head
*rcu
)
486 struct afs_cell
*cell
= container_of(rcu
, struct afs_cell
, rcu
);
488 _enter("%p{%s}", cell
, cell
->name
);
490 ASSERTCMP(atomic_read(&cell
->usage
), ==, 0);
492 afs_put_vlserverlist(cell
->net
, rcu_access_pointer(cell
->vl_servers
));
493 key_put(cell
->anonymous_key
);
497 _leave(" [destroyed]");
501 * Queue the cell manager.
503 static void afs_queue_cell_manager(struct afs_net
*net
)
505 int outstanding
= atomic_inc_return(&net
->cells_outstanding
);
507 _enter("%d", outstanding
);
509 if (!queue_work(afs_wq
, &net
->cells_manager
))
510 afs_dec_cells_outstanding(net
);
514 * Cell management timer. We have an increment on cells_outstanding that we
515 * need to pass along to the work item.
517 void afs_cells_timer(struct timer_list
*timer
)
519 struct afs_net
*net
= container_of(timer
, struct afs_net
, cells_timer
);
522 if (!queue_work(afs_wq
, &net
->cells_manager
))
523 afs_dec_cells_outstanding(net
);
527 * Get a reference on a cell record.
529 struct afs_cell
*afs_get_cell(struct afs_cell
*cell
)
531 atomic_inc(&cell
->usage
);
536 * Drop a reference on a cell record.
538 void afs_put_cell(struct afs_net
*net
, struct afs_cell
*cell
)
540 time64_t now
, expire_delay
;
545 _enter("%s", cell
->name
);
547 now
= ktime_get_real_seconds();
548 cell
->last_inactive
= now
;
550 if (cell
->vl_servers
->nr_servers
)
551 expire_delay
= afs_cell_gc_delay
;
553 if (atomic_dec_return(&cell
->usage
) > 1)
556 /* 'cell' may now be garbage collected. */
557 afs_set_cell_timer(net
, expire_delay
);
561 * Allocate a key to use as a placeholder for anonymous user security.
563 static int afs_alloc_anon_key(struct afs_cell
*cell
)
566 char keyname
[4 + AFS_MAXCELLNAME
+ 1], *cp
, *dp
;
568 /* Create a key to represent an anonymous user. */
569 memcpy(keyname
, "afs@", 4);
573 *dp
++ = tolower(*cp
);
576 key
= rxrpc_get_null_key(keyname
);
580 cell
->anonymous_key
= key
;
582 _debug("anon key %p{%x}",
583 cell
->anonymous_key
, key_serial(cell
->anonymous_key
));
590 static int afs_activate_cell(struct afs_net
*net
, struct afs_cell
*cell
)
592 struct hlist_node
**p
;
593 struct afs_cell
*pcell
;
596 if (!cell
->anonymous_key
) {
597 ret
= afs_alloc_anon_key(cell
);
602 #ifdef CONFIG_AFS_FSCACHE
603 cell
->cache
= fscache_acquire_cookie(afs_cache_netfs
.primary_index
,
604 &afs_cell_cache_index_def
,
605 cell
->name
, strlen(cell
->name
),
609 ret
= afs_proc_cell_setup(cell
);
613 mutex_lock(&net
->proc_cells_lock
);
614 for (p
= &net
->proc_cells
.first
; *p
; p
= &(*p
)->next
) {
615 pcell
= hlist_entry(*p
, struct afs_cell
, proc_link
);
616 if (strcmp(cell
->name
, pcell
->name
) < 0)
620 cell
->proc_link
.pprev
= p
;
621 cell
->proc_link
.next
= *p
;
622 rcu_assign_pointer(*p
, &cell
->proc_link
.next
);
623 if (cell
->proc_link
.next
)
624 cell
->proc_link
.next
->pprev
= &cell
->proc_link
.next
;
626 afs_dynroot_mkdir(net
, cell
);
627 mutex_unlock(&net
->proc_cells_lock
);
634 static void afs_deactivate_cell(struct afs_net
*net
, struct afs_cell
*cell
)
636 _enter("%s", cell
->name
);
638 afs_proc_cell_remove(cell
);
640 mutex_lock(&net
->proc_cells_lock
);
641 hlist_del_rcu(&cell
->proc_link
);
642 afs_dynroot_rmdir(net
, cell
);
643 mutex_unlock(&net
->proc_cells_lock
);
645 #ifdef CONFIG_AFS_FSCACHE
646 fscache_relinquish_cookie(cell
->cache
, NULL
, false);
654 * Manage a cell record, initialising and destroying it, maintaining its DNS
657 static void afs_manage_cell(struct work_struct
*work
)
659 struct afs_cell
*cell
= container_of(work
, struct afs_cell
, manager
);
660 struct afs_net
*net
= cell
->net
;
664 _enter("%s", cell
->name
);
667 _debug("state %u", cell
->state
);
668 switch (cell
->state
) {
669 case AFS_CELL_INACTIVE
:
670 case AFS_CELL_FAILED
:
671 write_seqlock(&net
->cells_lock
);
673 deleted
= atomic_try_cmpxchg_relaxed(&cell
->usage
, &usage
, 0);
675 rb_erase(&cell
->net_node
, &net
->cells
);
676 write_sequnlock(&net
->cells_lock
);
678 goto final_destruction
;
679 if (cell
->state
== AFS_CELL_FAILED
)
681 smp_store_release(&cell
->state
, AFS_CELL_UNSET
);
682 wake_up_var(&cell
->state
);
686 smp_store_release(&cell
->state
, AFS_CELL_ACTIVATING
);
687 wake_up_var(&cell
->state
);
690 case AFS_CELL_ACTIVATING
:
691 ret
= afs_activate_cell(net
, cell
);
693 goto activation_failed
;
695 smp_store_release(&cell
->state
, AFS_CELL_ACTIVE
);
696 wake_up_var(&cell
->state
);
699 case AFS_CELL_ACTIVE
:
700 if (atomic_read(&cell
->usage
) > 1) {
701 if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP
, &cell
->flags
)) {
702 ret
= afs_update_cell(cell
);
708 smp_store_release(&cell
->state
, AFS_CELL_DEACTIVATING
);
709 wake_up_var(&cell
->state
);
712 case AFS_CELL_DEACTIVATING
:
713 if (atomic_read(&cell
->usage
) > 1)
714 goto reverse_deactivation
;
715 afs_deactivate_cell(net
, cell
);
716 smp_store_release(&cell
->state
, AFS_CELL_INACTIVE
);
717 wake_up_var(&cell
->state
);
723 _debug("bad state %u", cell
->state
);
724 BUG(); /* Unhandled state */
728 afs_deactivate_cell(net
, cell
);
730 smp_store_release(&cell
->state
, AFS_CELL_FAILED
); /* vs error */
731 wake_up_var(&cell
->state
);
734 reverse_deactivation
:
735 smp_store_release(&cell
->state
, AFS_CELL_ACTIVE
);
736 wake_up_var(&cell
->state
);
737 _leave(" [deact->act]");
741 _leave(" [done %u]", cell
->state
);
745 call_rcu(&cell
->rcu
, afs_cell_destroy
);
746 afs_dec_cells_outstanding(net
);
747 _leave(" [destruct %d]", atomic_read(&net
->cells_outstanding
));
751 * Manage the records of cells known to a network namespace. This includes
752 * updating the DNS records and garbage collecting unused cells that were
753 * automatically added.
755 * Note that constructed cell records may only be removed from net->cells by
756 * this work item, so it is safe for this work item to stash a cursor pointing
757 * into the tree and then return to caller (provided it skips cells that are
758 * still under construction).
760 * Note also that we were given an increment on net->cells_outstanding by
761 * whoever queued us that we need to deal with before returning.
763 void afs_manage_cells(struct work_struct
*work
)
765 struct afs_net
*net
= container_of(work
, struct afs_net
, cells_manager
);
766 struct rb_node
*cursor
;
767 time64_t now
= ktime_get_real_seconds(), next_manage
= TIME64_MAX
;
768 bool purging
= !net
->live
;
772 /* Trawl the cell database looking for cells that have expired from
773 * lack of use and cells whose DNS results have expired and dispatch
776 read_seqlock_excl(&net
->cells_lock
);
778 for (cursor
= rb_first(&net
->cells
); cursor
; cursor
= rb_next(cursor
)) {
779 struct afs_cell
*cell
=
780 rb_entry(cursor
, struct afs_cell
, net_node
);
782 bool sched_cell
= false;
784 usage
= atomic_read(&cell
->usage
);
785 _debug("manage %s %u", cell
->name
, usage
);
787 ASSERTCMP(usage
, >=, 1);
790 if (test_and_clear_bit(AFS_CELL_FL_NO_GC
, &cell
->flags
))
791 usage
= atomic_dec_return(&cell
->usage
);
792 ASSERTCMP(usage
, ==, 1);
796 struct afs_vlserver_list
*vllist
;
797 time64_t expire_at
= cell
->last_inactive
;
799 read_lock(&cell
->vl_servers_lock
);
800 vllist
= rcu_dereference_protected(
802 lockdep_is_held(&cell
->vl_servers_lock
));
803 if (vllist
->nr_servers
> 0)
804 expire_at
+= afs_cell_gc_delay
;
805 read_unlock(&cell
->vl_servers_lock
);
806 if (purging
|| expire_at
<= now
)
808 else if (expire_at
< next_manage
)
809 next_manage
= expire_at
;
813 if (test_bit(AFS_CELL_FL_DO_LOOKUP
, &cell
->flags
))
818 queue_work(afs_wq
, &cell
->manager
);
821 read_sequnlock_excl(&net
->cells_lock
);
823 /* Update the timer on the way out. We have to pass an increment on
824 * cells_outstanding in the namespace that we are in to the timer or
825 * the work scheduler.
827 if (!purging
&& next_manage
< TIME64_MAX
) {
828 now
= ktime_get_real_seconds();
830 if (next_manage
- now
<= 0) {
831 if (queue_work(afs_wq
, &net
->cells_manager
))
832 atomic_inc(&net
->cells_outstanding
);
834 afs_set_cell_timer(net
, next_manage
- now
);
838 afs_dec_cells_outstanding(net
);
839 _leave(" [%d]", atomic_read(&net
->cells_outstanding
));
843 * Purge in-memory cell database.
845 void afs_cell_purge(struct afs_net
*net
)
851 write_seqlock(&net
->cells_lock
);
852 ws
= rcu_access_pointer(net
->ws_cell
);
853 RCU_INIT_POINTER(net
->ws_cell
, NULL
);
854 write_sequnlock(&net
->cells_lock
);
855 afs_put_cell(net
, ws
);
858 if (del_timer_sync(&net
->cells_timer
))
859 atomic_dec(&net
->cells_outstanding
);
862 afs_queue_cell_manager(net
);
865 wait_var_event(&net
->cells_outstanding
,
866 !atomic_read(&net
->cells_outstanding
));