2 * fs/kernfs/dir.c - kernfs directory implementation
4 * Copyright (c) 2001-3 Patrick Mochel
5 * Copyright (c) 2007 SUSE Linux Products GmbH
6 * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
8 * This file is released under the GPLv2.
11 #include <linux/sched.h>
13 #include <linux/namei.h>
14 #include <linux/idr.h>
15 #include <linux/slab.h>
16 #include <linux/security.h>
17 #include <linux/hash.h>
19 #include "kernfs-internal.h"
21 DEFINE_MUTEX(kernfs_mutex
);
22 static DEFINE_SPINLOCK(kernfs_rename_lock
); /* kn->parent and ->name */
23 static char kernfs_pr_cont_buf
[PATH_MAX
]; /* protected by rename_lock */
25 #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
27 static bool kernfs_active(struct kernfs_node
*kn
)
29 lockdep_assert_held(&kernfs_mutex
);
30 return atomic_read(&kn
->active
) >= 0;
33 static bool kernfs_lockdep(struct kernfs_node
*kn
)
35 #ifdef CONFIG_DEBUG_LOCK_ALLOC
36 return kn
->flags
& KERNFS_LOCKDEP
;
42 static int kernfs_name_locked(struct kernfs_node
*kn
, char *buf
, size_t buflen
)
44 return strlcpy(buf
, kn
->parent
? kn
->name
: "/", buflen
);
47 static char * __must_check
kernfs_path_locked(struct kernfs_node
*kn
, char *buf
,
50 char *p
= buf
+ buflen
;
56 len
= strlen(kn
->name
);
57 if (p
- buf
< len
+ 1) {
63 memcpy(p
, kn
->name
, len
);
66 } while (kn
&& kn
->parent
);
72 * kernfs_name - obtain the name of a given node
73 * @kn: kernfs_node of interest
74 * @buf: buffer to copy @kn's name into
75 * @buflen: size of @buf
77 * Copies the name of @kn into @buf of @buflen bytes. The behavior is
78 * similar to strlcpy(). It returns the length of @kn's name and if @buf
79 * isn't long enough, it's filled upto @buflen-1 and nul terminated.
81 * This function can be called from any context.
83 int kernfs_name(struct kernfs_node
*kn
, char *buf
, size_t buflen
)
88 spin_lock_irqsave(&kernfs_rename_lock
, flags
);
89 ret
= kernfs_name_locked(kn
, buf
, buflen
);
90 spin_unlock_irqrestore(&kernfs_rename_lock
, flags
);
95 * kernfs_path - build full path of a given node
96 * @kn: kernfs_node of interest
97 * @buf: buffer to copy @kn's name into
98 * @buflen: size of @buf
100 * Builds and returns the full path of @kn in @buf of @buflen bytes. The
101 * path is built from the end of @buf so the returned pointer usually
102 * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated
103 * and %NULL is returned.
105 char *kernfs_path(struct kernfs_node
*kn
, char *buf
, size_t buflen
)
110 spin_lock_irqsave(&kernfs_rename_lock
, flags
);
111 p
= kernfs_path_locked(kn
, buf
, buflen
);
112 spin_unlock_irqrestore(&kernfs_rename_lock
, flags
);
115 EXPORT_SYMBOL_GPL(kernfs_path
);
118 * pr_cont_kernfs_name - pr_cont name of a kernfs_node
119 * @kn: kernfs_node of interest
121 * This function can be called from any context.
123 void pr_cont_kernfs_name(struct kernfs_node
*kn
)
127 spin_lock_irqsave(&kernfs_rename_lock
, flags
);
129 kernfs_name_locked(kn
, kernfs_pr_cont_buf
, sizeof(kernfs_pr_cont_buf
));
130 pr_cont("%s", kernfs_pr_cont_buf
);
132 spin_unlock_irqrestore(&kernfs_rename_lock
, flags
);
136 * pr_cont_kernfs_path - pr_cont path of a kernfs_node
137 * @kn: kernfs_node of interest
139 * This function can be called from any context.
141 void pr_cont_kernfs_path(struct kernfs_node
*kn
)
146 spin_lock_irqsave(&kernfs_rename_lock
, flags
);
148 p
= kernfs_path_locked(kn
, kernfs_pr_cont_buf
,
149 sizeof(kernfs_pr_cont_buf
));
153 pr_cont("<name too long>");
155 spin_unlock_irqrestore(&kernfs_rename_lock
, flags
);
159 * kernfs_get_parent - determine the parent node and pin it
160 * @kn: kernfs_node of interest
162 * Determines @kn's parent, pins and returns it. This function can be
163 * called from any context.
165 struct kernfs_node
*kernfs_get_parent(struct kernfs_node
*kn
)
167 struct kernfs_node
*parent
;
170 spin_lock_irqsave(&kernfs_rename_lock
, flags
);
173 spin_unlock_irqrestore(&kernfs_rename_lock
, flags
);
180 * @name: Null terminated string to hash
181 * @ns: Namespace tag to hash
183 * Returns 31 bit hash of ns + name (so it fits in an off_t )
185 static unsigned int kernfs_name_hash(const char *name
, const void *ns
)
187 unsigned long hash
= init_name_hash();
188 unsigned int len
= strlen(name
);
190 hash
= partial_name_hash(*name
++, hash
);
191 hash
= (end_name_hash(hash
) ^ hash_ptr((void *)ns
, 31));
193 /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
201 static int kernfs_name_compare(unsigned int hash
, const char *name
,
202 const void *ns
, const struct kernfs_node
*kn
)
204 if (hash
!= kn
->hash
)
205 return hash
- kn
->hash
;
208 return strcmp(name
, kn
->name
);
211 static int kernfs_sd_compare(const struct kernfs_node
*left
,
212 const struct kernfs_node
*right
)
214 return kernfs_name_compare(left
->hash
, left
->name
, left
->ns
, right
);
218 * kernfs_link_sibling - link kernfs_node into sibling rbtree
219 * @kn: kernfs_node of interest
221 * Link @kn into its sibling rbtree which starts from
222 * @kn->parent->dir.children.
225 * mutex_lock(kernfs_mutex)
228 * 0 on susccess -EEXIST on failure.
230 static int kernfs_link_sibling(struct kernfs_node
*kn
)
232 struct rb_node
**node
= &kn
->parent
->dir
.children
.rb_node
;
233 struct rb_node
*parent
= NULL
;
236 struct kernfs_node
*pos
;
239 pos
= rb_to_kn(*node
);
241 result
= kernfs_sd_compare(kn
, pos
);
243 node
= &pos
->rb
.rb_left
;
245 node
= &pos
->rb
.rb_right
;
250 /* add new node and rebalance the tree */
251 rb_link_node(&kn
->rb
, parent
, node
);
252 rb_insert_color(&kn
->rb
, &kn
->parent
->dir
.children
);
254 /* successfully added, account subdir number */
255 if (kernfs_type(kn
) == KERNFS_DIR
)
256 kn
->parent
->dir
.subdirs
++;
262 * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree
263 * @kn: kernfs_node of interest
265 * Try to unlink @kn from its sibling rbtree which starts from
266 * kn->parent->dir.children. Returns %true if @kn was actually
267 * removed, %false if @kn wasn't on the rbtree.
270 * mutex_lock(kernfs_mutex)
272 static bool kernfs_unlink_sibling(struct kernfs_node
*kn
)
274 if (RB_EMPTY_NODE(&kn
->rb
))
277 if (kernfs_type(kn
) == KERNFS_DIR
)
278 kn
->parent
->dir
.subdirs
--;
280 rb_erase(&kn
->rb
, &kn
->parent
->dir
.children
);
281 RB_CLEAR_NODE(&kn
->rb
);
286 * kernfs_get_active - get an active reference to kernfs_node
287 * @kn: kernfs_node to get an active reference to
289 * Get an active reference of @kn. This function is noop if @kn
293 * Pointer to @kn on success, NULL on failure.
295 struct kernfs_node
*kernfs_get_active(struct kernfs_node
*kn
)
300 if (!atomic_inc_unless_negative(&kn
->active
))
303 if (kernfs_lockdep(kn
))
304 rwsem_acquire_read(&kn
->dep_map
, 0, 1, _RET_IP_
);
309 * kernfs_put_active - put an active reference to kernfs_node
310 * @kn: kernfs_node to put an active reference to
312 * Put an active reference to @kn. This function is noop if @kn
315 void kernfs_put_active(struct kernfs_node
*kn
)
317 struct kernfs_root
*root
= kernfs_root(kn
);
323 if (kernfs_lockdep(kn
))
324 rwsem_release(&kn
->dep_map
, 1, _RET_IP_
);
325 v
= atomic_dec_return(&kn
->active
);
326 if (likely(v
!= KN_DEACTIVATED_BIAS
))
329 wake_up_all(&root
->deactivate_waitq
);
333 * kernfs_drain - drain kernfs_node
334 * @kn: kernfs_node to drain
336 * Drain existing usages and nuke all existing mmaps of @kn. Mutiple
337 * removers may invoke this function concurrently on @kn and all will
338 * return after draining is complete.
340 static void kernfs_drain(struct kernfs_node
*kn
)
341 __releases(&kernfs_mutex
) __acquires(&kernfs_mutex
)
343 struct kernfs_root
*root
= kernfs_root(kn
);
345 lockdep_assert_held(&kernfs_mutex
);
346 WARN_ON_ONCE(kernfs_active(kn
));
348 mutex_unlock(&kernfs_mutex
);
350 if (kernfs_lockdep(kn
)) {
351 rwsem_acquire(&kn
->dep_map
, 0, 0, _RET_IP_
);
352 if (atomic_read(&kn
->active
) != KN_DEACTIVATED_BIAS
)
353 lock_contended(&kn
->dep_map
, _RET_IP_
);
356 /* but everyone should wait for draining */
357 wait_event(root
->deactivate_waitq
,
358 atomic_read(&kn
->active
) == KN_DEACTIVATED_BIAS
);
360 if (kernfs_lockdep(kn
)) {
361 lock_acquired(&kn
->dep_map
, _RET_IP_
);
362 rwsem_release(&kn
->dep_map
, 1, _RET_IP_
);
365 kernfs_unmap_bin_file(kn
);
367 mutex_lock(&kernfs_mutex
);
371 * kernfs_get - get a reference count on a kernfs_node
372 * @kn: the target kernfs_node
374 void kernfs_get(struct kernfs_node
*kn
)
377 WARN_ON(!atomic_read(&kn
->count
));
378 atomic_inc(&kn
->count
);
381 EXPORT_SYMBOL_GPL(kernfs_get
);
384 * kernfs_put - put a reference count on a kernfs_node
385 * @kn: the target kernfs_node
387 * Put a reference count of @kn and destroy it if it reached zero.
389 void kernfs_put(struct kernfs_node
*kn
)
391 struct kernfs_node
*parent
;
392 struct kernfs_root
*root
;
394 if (!kn
|| !atomic_dec_and_test(&kn
->count
))
396 root
= kernfs_root(kn
);
399 * Moving/renaming is always done while holding reference.
400 * kn->parent won't change beneath us.
404 WARN_ONCE(atomic_read(&kn
->active
) != KN_DEACTIVATED_BIAS
,
405 "kernfs_put: %s/%s: released with incorrect active_ref %d\n",
406 parent
? parent
->name
: "", kn
->name
, atomic_read(&kn
->active
));
408 if (kernfs_type(kn
) == KERNFS_LINK
)
409 kernfs_put(kn
->symlink
.target_kn
);
410 if (!(kn
->flags
& KERNFS_STATIC_NAME
))
413 if (kn
->iattr
->ia_secdata
)
414 security_release_secctx(kn
->iattr
->ia_secdata
,
415 kn
->iattr
->ia_secdata_len
);
416 simple_xattrs_free(&kn
->iattr
->xattrs
);
419 ida_simple_remove(&root
->ino_ida
, kn
->ino
);
420 kmem_cache_free(kernfs_node_cache
, kn
);
424 if (atomic_dec_and_test(&kn
->count
))
427 /* just released the root kn, free @root too */
428 ida_destroy(&root
->ino_ida
);
432 EXPORT_SYMBOL_GPL(kernfs_put
);
434 static int kernfs_dop_revalidate(struct dentry
*dentry
, unsigned int flags
)
436 struct kernfs_node
*kn
;
438 if (flags
& LOOKUP_RCU
)
441 /* Always perform fresh lookup for negatives */
442 if (!dentry
->d_inode
)
443 goto out_bad_unlocked
;
445 kn
= dentry
->d_fsdata
;
446 mutex_lock(&kernfs_mutex
);
448 /* The kernfs node has been deactivated */
449 if (!kernfs_active(kn
))
452 /* The kernfs node has been moved? */
453 if (dentry
->d_parent
->d_fsdata
!= kn
->parent
)
456 /* The kernfs node has been renamed */
457 if (strcmp(dentry
->d_name
.name
, kn
->name
) != 0)
460 /* The kernfs node has been moved to a different namespace */
461 if (kn
->parent
&& kernfs_ns_enabled(kn
->parent
) &&
462 kernfs_info(dentry
->d_sb
)->ns
!= kn
->ns
)
465 mutex_unlock(&kernfs_mutex
);
469 mutex_unlock(&kernfs_mutex
);
472 * @dentry doesn't match the underlying kernfs node, drop the
473 * dentry and force lookup. If we have submounts we must allow the
474 * vfs caches to lie about the state of the filesystem to prevent
475 * leaks and other nasty things, so use check_submounts_and_drop()
476 * instead of d_drop().
478 if (check_submounts_and_drop(dentry
) != 0)
484 static void kernfs_dop_release(struct dentry
*dentry
)
486 kernfs_put(dentry
->d_fsdata
);
489 const struct dentry_operations kernfs_dops
= {
490 .d_revalidate
= kernfs_dop_revalidate
,
491 .d_release
= kernfs_dop_release
,
495 * kernfs_node_from_dentry - determine kernfs_node associated with a dentry
496 * @dentry: the dentry in question
498 * Return the kernfs_node associated with @dentry. If @dentry is not a
499 * kernfs one, %NULL is returned.
501 * While the returned kernfs_node will stay accessible as long as @dentry
502 * is accessible, the returned node can be in any state and the caller is
503 * fully responsible for determining what's accessible.
505 struct kernfs_node
*kernfs_node_from_dentry(struct dentry
*dentry
)
507 if (dentry
->d_sb
->s_op
== &kernfs_sops
)
508 return dentry
->d_fsdata
;
512 static struct kernfs_node
*__kernfs_new_node(struct kernfs_root
*root
,
513 const char *name
, umode_t mode
,
516 char *dup_name
= NULL
;
517 struct kernfs_node
*kn
;
520 if (!(flags
& KERNFS_STATIC_NAME
)) {
521 name
= dup_name
= kstrdup(name
, GFP_KERNEL
);
526 kn
= kmem_cache_zalloc(kernfs_node_cache
, GFP_KERNEL
);
530 ret
= ida_simple_get(&root
->ino_ida
, 1, 0, GFP_KERNEL
);
535 atomic_set(&kn
->count
, 1);
536 atomic_set(&kn
->active
, KN_DEACTIVATED_BIAS
);
537 RB_CLEAR_NODE(&kn
->rb
);
546 kmem_cache_free(kernfs_node_cache
, kn
);
552 struct kernfs_node
*kernfs_new_node(struct kernfs_node
*parent
,
553 const char *name
, umode_t mode
,
556 struct kernfs_node
*kn
;
558 kn
= __kernfs_new_node(kernfs_root(parent
), name
, mode
, flags
);
567 * kernfs_add_one - add kernfs_node to parent without warning
568 * @kn: kernfs_node to be added
570 * The caller must already have initialized @kn->parent. This
571 * function increments nlink of the parent's inode if @kn is a
572 * directory and link into the children list of the parent.
575 * 0 on success, -EEXIST if entry with the given name already
578 int kernfs_add_one(struct kernfs_node
*kn
)
580 struct kernfs_node
*parent
= kn
->parent
;
581 struct kernfs_iattrs
*ps_iattr
;
585 mutex_lock(&kernfs_mutex
);
588 has_ns
= kernfs_ns_enabled(parent
);
589 if (WARN(has_ns
!= (bool)kn
->ns
, KERN_WARNING
"kernfs: ns %s in '%s' for '%s'\n",
590 has_ns
? "required" : "invalid", parent
->name
, kn
->name
))
593 if (kernfs_type(parent
) != KERNFS_DIR
)
597 if ((parent
->flags
& KERNFS_ACTIVATED
) && !kernfs_active(parent
))
600 kn
->hash
= kernfs_name_hash(kn
->name
, kn
->ns
);
602 ret
= kernfs_link_sibling(kn
);
606 /* Update timestamps on the parent */
607 ps_iattr
= parent
->iattr
;
609 struct iattr
*ps_iattrs
= &ps_iattr
->ia_iattr
;
610 ps_iattrs
->ia_ctime
= ps_iattrs
->ia_mtime
= CURRENT_TIME
;
613 mutex_unlock(&kernfs_mutex
);
616 * Activate the new node unless CREATE_DEACTIVATED is requested.
617 * If not activated here, the kernfs user is responsible for
618 * activating the node with kernfs_activate(). A node which hasn't
619 * been activated is not visible to userland and its removal won't
620 * trigger deactivation.
622 if (!(kernfs_root(kn
)->flags
& KERNFS_ROOT_CREATE_DEACTIVATED
))
627 mutex_unlock(&kernfs_mutex
);
632 * kernfs_find_ns - find kernfs_node with the given name
633 * @parent: kernfs_node to search under
634 * @name: name to look for
635 * @ns: the namespace tag to use
637 * Look for kernfs_node with name @name under @parent. Returns pointer to
638 * the found kernfs_node on success, %NULL on failure.
640 static struct kernfs_node
*kernfs_find_ns(struct kernfs_node
*parent
,
641 const unsigned char *name
,
644 struct rb_node
*node
= parent
->dir
.children
.rb_node
;
645 bool has_ns
= kernfs_ns_enabled(parent
);
648 lockdep_assert_held(&kernfs_mutex
);
650 if (has_ns
!= (bool)ns
) {
651 WARN(1, KERN_WARNING
"kernfs: ns %s in '%s' for '%s'\n",
652 has_ns
? "required" : "invalid", parent
->name
, name
);
656 hash
= kernfs_name_hash(name
, ns
);
658 struct kernfs_node
*kn
;
662 result
= kernfs_name_compare(hash
, name
, ns
, kn
);
664 node
= node
->rb_left
;
666 node
= node
->rb_right
;
674 * kernfs_find_and_get_ns - find and get kernfs_node with the given name
675 * @parent: kernfs_node to search under
676 * @name: name to look for
677 * @ns: the namespace tag to use
679 * Look for kernfs_node with name @name under @parent and get a reference
680 * if found. This function may sleep and returns pointer to the found
681 * kernfs_node on success, %NULL on failure.
683 struct kernfs_node
*kernfs_find_and_get_ns(struct kernfs_node
*parent
,
684 const char *name
, const void *ns
)
686 struct kernfs_node
*kn
;
688 mutex_lock(&kernfs_mutex
);
689 kn
= kernfs_find_ns(parent
, name
, ns
);
691 mutex_unlock(&kernfs_mutex
);
695 EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns
);
698 * kernfs_create_root - create a new kernfs hierarchy
699 * @scops: optional syscall operations for the hierarchy
700 * @flags: KERNFS_ROOT_* flags
701 * @priv: opaque data associated with the new directory
703 * Returns the root of the new hierarchy on success, ERR_PTR() value on
706 struct kernfs_root
*kernfs_create_root(struct kernfs_syscall_ops
*scops
,
707 unsigned int flags
, void *priv
)
709 struct kernfs_root
*root
;
710 struct kernfs_node
*kn
;
712 root
= kzalloc(sizeof(*root
), GFP_KERNEL
);
714 return ERR_PTR(-ENOMEM
);
716 ida_init(&root
->ino_ida
);
717 INIT_LIST_HEAD(&root
->supers
);
719 kn
= __kernfs_new_node(root
, "", S_IFDIR
| S_IRUGO
| S_IXUGO
,
722 ida_destroy(&root
->ino_ida
);
724 return ERR_PTR(-ENOMEM
);
730 root
->syscall_ops
= scops
;
733 init_waitqueue_head(&root
->deactivate_waitq
);
735 if (!(root
->flags
& KERNFS_ROOT_CREATE_DEACTIVATED
))
742 * kernfs_destroy_root - destroy a kernfs hierarchy
743 * @root: root of the hierarchy to destroy
745 * Destroy the hierarchy anchored at @root by removing all existing
746 * directories and destroying @root.
748 void kernfs_destroy_root(struct kernfs_root
*root
)
750 kernfs_remove(root
->kn
); /* will also free @root */
754 * kernfs_create_dir_ns - create a directory
755 * @parent: parent in which to create a new directory
756 * @name: name of the new directory
757 * @mode: mode of the new directory
758 * @priv: opaque data associated with the new directory
759 * @ns: optional namespace tag of the directory
761 * Returns the created node on success, ERR_PTR() value on failure.
763 struct kernfs_node
*kernfs_create_dir_ns(struct kernfs_node
*parent
,
764 const char *name
, umode_t mode
,
765 void *priv
, const void *ns
)
767 struct kernfs_node
*kn
;
771 kn
= kernfs_new_node(parent
, name
, mode
| S_IFDIR
, KERNFS_DIR
);
773 return ERR_PTR(-ENOMEM
);
775 kn
->dir
.root
= parent
->dir
.root
;
780 rc
= kernfs_add_one(kn
);
788 static struct dentry
*kernfs_iop_lookup(struct inode
*dir
,
789 struct dentry
*dentry
,
793 struct kernfs_node
*parent
= dentry
->d_parent
->d_fsdata
;
794 struct kernfs_node
*kn
;
796 const void *ns
= NULL
;
798 mutex_lock(&kernfs_mutex
);
800 if (kernfs_ns_enabled(parent
))
801 ns
= kernfs_info(dir
->i_sb
)->ns
;
803 kn
= kernfs_find_ns(parent
, dentry
->d_name
.name
, ns
);
806 if (!kn
|| !kernfs_active(kn
)) {
811 dentry
->d_fsdata
= kn
;
813 /* attach dentry and inode */
814 inode
= kernfs_get_inode(dir
->i_sb
, kn
);
816 ret
= ERR_PTR(-ENOMEM
);
820 /* instantiate and hash dentry */
821 ret
= d_materialise_unique(dentry
, inode
);
823 mutex_unlock(&kernfs_mutex
);
827 static int kernfs_iop_mkdir(struct inode
*dir
, struct dentry
*dentry
,
830 struct kernfs_node
*parent
= dir
->i_private
;
831 struct kernfs_syscall_ops
*scops
= kernfs_root(parent
)->syscall_ops
;
834 if (!scops
|| !scops
->mkdir
)
837 if (!kernfs_get_active(parent
))
840 ret
= scops
->mkdir(parent
, dentry
->d_name
.name
, mode
);
842 kernfs_put_active(parent
);
846 static int kernfs_iop_rmdir(struct inode
*dir
, struct dentry
*dentry
)
848 struct kernfs_node
*kn
= dentry
->d_fsdata
;
849 struct kernfs_syscall_ops
*scops
= kernfs_root(kn
)->syscall_ops
;
852 if (!scops
|| !scops
->rmdir
)
855 if (!kernfs_get_active(kn
))
858 ret
= scops
->rmdir(kn
);
860 kernfs_put_active(kn
);
864 static int kernfs_iop_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
865 struct inode
*new_dir
, struct dentry
*new_dentry
)
867 struct kernfs_node
*kn
= old_dentry
->d_fsdata
;
868 struct kernfs_node
*new_parent
= new_dir
->i_private
;
869 struct kernfs_syscall_ops
*scops
= kernfs_root(kn
)->syscall_ops
;
872 if (!scops
|| !scops
->rename
)
875 if (!kernfs_get_active(kn
))
878 if (!kernfs_get_active(new_parent
)) {
879 kernfs_put_active(kn
);
883 ret
= scops
->rename(kn
, new_parent
, new_dentry
->d_name
.name
);
885 kernfs_put_active(new_parent
);
886 kernfs_put_active(kn
);
890 const struct inode_operations kernfs_dir_iops
= {
891 .lookup
= kernfs_iop_lookup
,
892 .permission
= kernfs_iop_permission
,
893 .setattr
= kernfs_iop_setattr
,
894 .getattr
= kernfs_iop_getattr
,
895 .setxattr
= kernfs_iop_setxattr
,
896 .removexattr
= kernfs_iop_removexattr
,
897 .getxattr
= kernfs_iop_getxattr
,
898 .listxattr
= kernfs_iop_listxattr
,
900 .mkdir
= kernfs_iop_mkdir
,
901 .rmdir
= kernfs_iop_rmdir
,
902 .rename
= kernfs_iop_rename
,
905 static struct kernfs_node
*kernfs_leftmost_descendant(struct kernfs_node
*pos
)
907 struct kernfs_node
*last
;
914 if (kernfs_type(pos
) != KERNFS_DIR
)
917 rbn
= rb_first(&pos
->dir
.children
);
928 * kernfs_next_descendant_post - find the next descendant for post-order walk
929 * @pos: the current position (%NULL to initiate traversal)
930 * @root: kernfs_node whose descendants to walk
932 * Find the next descendant to visit for post-order traversal of @root's
933 * descendants. @root is included in the iteration and the last node to be
936 static struct kernfs_node
*kernfs_next_descendant_post(struct kernfs_node
*pos
,
937 struct kernfs_node
*root
)
941 lockdep_assert_held(&kernfs_mutex
);
943 /* if first iteration, visit leftmost descendant which may be root */
945 return kernfs_leftmost_descendant(root
);
947 /* if we visited @root, we're done */
951 /* if there's an unvisited sibling, visit its leftmost descendant */
952 rbn
= rb_next(&pos
->rb
);
954 return kernfs_leftmost_descendant(rb_to_kn(rbn
));
956 /* no sibling left, visit parent */
961 * kernfs_activate - activate a node which started deactivated
962 * @kn: kernfs_node whose subtree is to be activated
964 * If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node
965 * needs to be explicitly activated. A node which hasn't been activated
966 * isn't visible to userland and deactivation is skipped during its
967 * removal. This is useful to construct atomic init sequences where
968 * creation of multiple nodes should either succeed or fail atomically.
970 * The caller is responsible for ensuring that this function is not called
971 * after kernfs_remove*() is invoked on @kn.
973 void kernfs_activate(struct kernfs_node
*kn
)
975 struct kernfs_node
*pos
;
977 mutex_lock(&kernfs_mutex
);
980 while ((pos
= kernfs_next_descendant_post(pos
, kn
))) {
981 if (!pos
|| (pos
->flags
& KERNFS_ACTIVATED
))
984 WARN_ON_ONCE(pos
->parent
&& RB_EMPTY_NODE(&pos
->rb
));
985 WARN_ON_ONCE(atomic_read(&pos
->active
) != KN_DEACTIVATED_BIAS
);
987 atomic_sub(KN_DEACTIVATED_BIAS
, &pos
->active
);
988 pos
->flags
|= KERNFS_ACTIVATED
;
991 mutex_unlock(&kernfs_mutex
);
994 static void __kernfs_remove(struct kernfs_node
*kn
)
996 struct kernfs_node
*pos
;
998 lockdep_assert_held(&kernfs_mutex
);
1001 * Short-circuit if non-root @kn has already finished removal.
1002 * This is for kernfs_remove_self() which plays with active ref
1005 if (!kn
|| (kn
->parent
&& RB_EMPTY_NODE(&kn
->rb
)))
1008 pr_debug("kernfs %s: removing\n", kn
->name
);
1010 /* prevent any new usage under @kn by deactivating all nodes */
1012 while ((pos
= kernfs_next_descendant_post(pos
, kn
)))
1013 if (kernfs_active(pos
))
1014 atomic_add(KN_DEACTIVATED_BIAS
, &pos
->active
);
1016 /* deactivate and unlink the subtree node-by-node */
1018 pos
= kernfs_leftmost_descendant(kn
);
1021 * kernfs_drain() drops kernfs_mutex temporarily and @pos's
1022 * base ref could have been put by someone else by the time
1023 * the function returns. Make sure it doesn't go away
1029 * Drain iff @kn was activated. This avoids draining and
1030 * its lockdep annotations for nodes which have never been
1031 * activated and allows embedding kernfs_remove() in create
1032 * error paths without worrying about draining.
1034 if (kn
->flags
& KERNFS_ACTIVATED
)
1037 WARN_ON_ONCE(atomic_read(&kn
->active
) != KN_DEACTIVATED_BIAS
);
1040 * kernfs_unlink_sibling() succeeds once per node. Use it
1041 * to decide who's responsible for cleanups.
1043 if (!pos
->parent
|| kernfs_unlink_sibling(pos
)) {
1044 struct kernfs_iattrs
*ps_iattr
=
1045 pos
->parent
? pos
->parent
->iattr
: NULL
;
1047 /* update timestamps on the parent */
1049 ps_iattr
->ia_iattr
.ia_ctime
= CURRENT_TIME
;
1050 ps_iattr
->ia_iattr
.ia_mtime
= CURRENT_TIME
;
1057 } while (pos
!= kn
);
1061 * kernfs_remove - remove a kernfs_node recursively
1062 * @kn: the kernfs_node to remove
1064 * Remove @kn along with all its subdirectories and files.
1066 void kernfs_remove(struct kernfs_node
*kn
)
1068 mutex_lock(&kernfs_mutex
);
1069 __kernfs_remove(kn
);
1070 mutex_unlock(&kernfs_mutex
);
1074 * kernfs_break_active_protection - break out of active protection
1075 * @kn: the self kernfs_node
1077 * The caller must be running off of a kernfs operation which is invoked
1078 * with an active reference - e.g. one of kernfs_ops. Each invocation of
1079 * this function must also be matched with an invocation of
1080 * kernfs_unbreak_active_protection().
1082 * This function releases the active reference of @kn the caller is
1083 * holding. Once this function is called, @kn may be removed at any point
1084 * and the caller is solely responsible for ensuring that the objects it
1085 * dereferences are accessible.
1087 void kernfs_break_active_protection(struct kernfs_node
*kn
)
1090 * Take out ourself out of the active ref dependency chain. If
1091 * we're called without an active ref, lockdep will complain.
1093 kernfs_put_active(kn
);
1097 * kernfs_unbreak_active_protection - undo kernfs_break_active_protection()
1098 * @kn: the self kernfs_node
1100 * If kernfs_break_active_protection() was called, this function must be
1101 * invoked before finishing the kernfs operation. Note that while this
1102 * function restores the active reference, it doesn't and can't actually
1103 * restore the active protection - @kn may already or be in the process of
1104 * being removed. Once kernfs_break_active_protection() is invoked, that
1105 * protection is irreversibly gone for the kernfs operation instance.
1107 * While this function may be called at any point after
1108 * kernfs_break_active_protection() is invoked, its most useful location
1109 * would be right before the enclosing kernfs operation returns.
1111 void kernfs_unbreak_active_protection(struct kernfs_node
*kn
)
1114 * @kn->active could be in any state; however, the increment we do
1115 * here will be undone as soon as the enclosing kernfs operation
1116 * finishes and this temporary bump can't break anything. If @kn
1117 * is alive, nothing changes. If @kn is being deactivated, the
1118 * soon-to-follow put will either finish deactivation or restore
1119 * deactivated state. If @kn is already removed, the temporary
1120 * bump is guaranteed to be gone before @kn is released.
1122 atomic_inc(&kn
->active
);
1123 if (kernfs_lockdep(kn
))
1124 rwsem_acquire(&kn
->dep_map
, 0, 1, _RET_IP_
);
1128 * kernfs_remove_self - remove a kernfs_node from its own method
1129 * @kn: the self kernfs_node to remove
1131 * The caller must be running off of a kernfs operation which is invoked
1132 * with an active reference - e.g. one of kernfs_ops. This can be used to
1133 * implement a file operation which deletes itself.
1135 * For example, the "delete" file for a sysfs device directory can be
1136 * implemented by invoking kernfs_remove_self() on the "delete" file
1137 * itself. This function breaks the circular dependency of trying to
1138 * deactivate self while holding an active ref itself. It isn't necessary
1139 * to modify the usual removal path to use kernfs_remove_self(). The
1140 * "delete" implementation can simply invoke kernfs_remove_self() on self
1141 * before proceeding with the usual removal path. kernfs will ignore later
1142 * kernfs_remove() on self.
1144 * kernfs_remove_self() can be called multiple times concurrently on the
1145 * same kernfs_node. Only the first one actually performs removal and
1146 * returns %true. All others will wait until the kernfs operation which
1147 * won self-removal finishes and return %false. Note that the losers wait
1148 * for the completion of not only the winning kernfs_remove_self() but also
1149 * the whole kernfs_ops which won the arbitration. This can be used to
1150 * guarantee, for example, all concurrent writes to a "delete" file to
1151 * finish only after the whole operation is complete.
1153 bool kernfs_remove_self(struct kernfs_node
*kn
)
1157 mutex_lock(&kernfs_mutex
);
1158 kernfs_break_active_protection(kn
);
1161 * SUICIDAL is used to arbitrate among competing invocations. Only
1162 * the first one will actually perform removal. When the removal
1163 * is complete, SUICIDED is set and the active ref is restored
1164 * while holding kernfs_mutex. The ones which lost arbitration
1165 * waits for SUICDED && drained which can happen only after the
1166 * enclosing kernfs operation which executed the winning instance
1167 * of kernfs_remove_self() finished.
1169 if (!(kn
->flags
& KERNFS_SUICIDAL
)) {
1170 kn
->flags
|= KERNFS_SUICIDAL
;
1171 __kernfs_remove(kn
);
1172 kn
->flags
|= KERNFS_SUICIDED
;
1175 wait_queue_head_t
*waitq
= &kernfs_root(kn
)->deactivate_waitq
;
1179 prepare_to_wait(waitq
, &wait
, TASK_UNINTERRUPTIBLE
);
1181 if ((kn
->flags
& KERNFS_SUICIDED
) &&
1182 atomic_read(&kn
->active
) == KN_DEACTIVATED_BIAS
)
1185 mutex_unlock(&kernfs_mutex
);
1187 mutex_lock(&kernfs_mutex
);
1189 finish_wait(waitq
, &wait
);
1190 WARN_ON_ONCE(!RB_EMPTY_NODE(&kn
->rb
));
1195 * This must be done while holding kernfs_mutex; otherwise, waiting
1196 * for SUICIDED && deactivated could finish prematurely.
1198 kernfs_unbreak_active_protection(kn
);
1200 mutex_unlock(&kernfs_mutex
);
1205 * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it
1206 * @parent: parent of the target
1207 * @name: name of the kernfs_node to remove
1208 * @ns: namespace tag of the kernfs_node to remove
1210 * Look for the kernfs_node with @name and @ns under @parent and remove it.
1211 * Returns 0 on success, -ENOENT if such entry doesn't exist.
1213 int kernfs_remove_by_name_ns(struct kernfs_node
*parent
, const char *name
,
1216 struct kernfs_node
*kn
;
1219 WARN(1, KERN_WARNING
"kernfs: can not remove '%s', no directory\n",
1224 mutex_lock(&kernfs_mutex
);
1226 kn
= kernfs_find_ns(parent
, name
, ns
);
1228 __kernfs_remove(kn
);
1230 mutex_unlock(&kernfs_mutex
);
1239 * kernfs_rename_ns - move and rename a kernfs_node
1241 * @new_parent: new parent to put @sd under
1242 * @new_name: new name
1243 * @new_ns: new namespace tag
1245 int kernfs_rename_ns(struct kernfs_node
*kn
, struct kernfs_node
*new_parent
,
1246 const char *new_name
, const void *new_ns
)
1248 struct kernfs_node
*old_parent
;
1249 const char *old_name
= NULL
;
1252 /* can't move or rename root */
1256 mutex_lock(&kernfs_mutex
);
1259 if (!kernfs_active(kn
) || !kernfs_active(new_parent
))
1263 if ((kn
->parent
== new_parent
) && (kn
->ns
== new_ns
) &&
1264 (strcmp(kn
->name
, new_name
) == 0))
1265 goto out
; /* nothing to rename */
1268 if (kernfs_find_ns(new_parent
, new_name
, new_ns
))
1271 /* rename kernfs_node */
1272 if (strcmp(kn
->name
, new_name
) != 0) {
1274 new_name
= kstrdup(new_name
, GFP_KERNEL
);
1282 * Move to the appropriate place in the appropriate directories rbtree.
1284 kernfs_unlink_sibling(kn
);
1285 kernfs_get(new_parent
);
1287 /* rename_lock protects ->parent and ->name accessors */
1288 spin_lock_irq(&kernfs_rename_lock
);
1290 old_parent
= kn
->parent
;
1291 kn
->parent
= new_parent
;
1295 if (!(kn
->flags
& KERNFS_STATIC_NAME
))
1296 old_name
= kn
->name
;
1297 kn
->flags
&= ~KERNFS_STATIC_NAME
;
1298 kn
->name
= new_name
;
1301 spin_unlock_irq(&kernfs_rename_lock
);
1303 kn
->hash
= kernfs_name_hash(kn
->name
, kn
->ns
);
1304 kernfs_link_sibling(kn
);
1306 kernfs_put(old_parent
);
1311 mutex_unlock(&kernfs_mutex
);
1315 /* Relationship between s_mode and the DT_xxx types */
1316 static inline unsigned char dt_type(struct kernfs_node
*kn
)
1318 return (kn
->mode
>> 12) & 15;
1321 static int kernfs_dir_fop_release(struct inode
*inode
, struct file
*filp
)
1323 kernfs_put(filp
->private_data
);
1327 static struct kernfs_node
*kernfs_dir_pos(const void *ns
,
1328 struct kernfs_node
*parent
, loff_t hash
, struct kernfs_node
*pos
)
1331 int valid
= kernfs_active(pos
) &&
1332 pos
->parent
== parent
&& hash
== pos
->hash
;
1337 if (!pos
&& (hash
> 1) && (hash
< INT_MAX
)) {
1338 struct rb_node
*node
= parent
->dir
.children
.rb_node
;
1340 pos
= rb_to_kn(node
);
1342 if (hash
< pos
->hash
)
1343 node
= node
->rb_left
;
1344 else if (hash
> pos
->hash
)
1345 node
= node
->rb_right
;
1350 /* Skip over entries which are dying/dead or in the wrong namespace */
1351 while (pos
&& (!kernfs_active(pos
) || pos
->ns
!= ns
)) {
1352 struct rb_node
*node
= rb_next(&pos
->rb
);
1356 pos
= rb_to_kn(node
);
1361 static struct kernfs_node
*kernfs_dir_next_pos(const void *ns
,
1362 struct kernfs_node
*parent
, ino_t ino
, struct kernfs_node
*pos
)
1364 pos
= kernfs_dir_pos(ns
, parent
, ino
, pos
);
1367 struct rb_node
*node
= rb_next(&pos
->rb
);
1371 pos
= rb_to_kn(node
);
1372 } while (pos
&& (!kernfs_active(pos
) || pos
->ns
!= ns
));
1377 static int kernfs_fop_readdir(struct file
*file
, struct dir_context
*ctx
)
1379 struct dentry
*dentry
= file
->f_path
.dentry
;
1380 struct kernfs_node
*parent
= dentry
->d_fsdata
;
1381 struct kernfs_node
*pos
= file
->private_data
;
1382 const void *ns
= NULL
;
1384 if (!dir_emit_dots(file
, ctx
))
1386 mutex_lock(&kernfs_mutex
);
1388 if (kernfs_ns_enabled(parent
))
1389 ns
= kernfs_info(dentry
->d_sb
)->ns
;
1391 for (pos
= kernfs_dir_pos(ns
, parent
, ctx
->pos
, pos
);
1393 pos
= kernfs_dir_next_pos(ns
, parent
, ctx
->pos
, pos
)) {
1394 const char *name
= pos
->name
;
1395 unsigned int type
= dt_type(pos
);
1396 int len
= strlen(name
);
1397 ino_t ino
= pos
->ino
;
1399 ctx
->pos
= pos
->hash
;
1400 file
->private_data
= pos
;
1403 mutex_unlock(&kernfs_mutex
);
1404 if (!dir_emit(ctx
, name
, len
, ino
, type
))
1406 mutex_lock(&kernfs_mutex
);
1408 mutex_unlock(&kernfs_mutex
);
1409 file
->private_data
= NULL
;
1414 static loff_t
kernfs_dir_fop_llseek(struct file
*file
, loff_t offset
,
1417 struct inode
*inode
= file_inode(file
);
1420 mutex_lock(&inode
->i_mutex
);
1421 ret
= generic_file_llseek(file
, offset
, whence
);
1422 mutex_unlock(&inode
->i_mutex
);
1427 const struct file_operations kernfs_dir_fops
= {
1428 .read
= generic_read_dir
,
1429 .iterate
= kernfs_fop_readdir
,
1430 .release
= kernfs_dir_fop_release
,
1431 .llseek
= kernfs_dir_fop_llseek
,