4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/ratelimit.h>
18 #include <linux/string.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/security.h>
28 #include <linux/seqlock.h>
29 #include <linux/bootmem.h>
30 #include <linux/bit_spinlock.h>
31 #include <linux/rculist_bl.h>
32 #include <linux/list_lru.h>
38 * dcache->d_inode->i_lock protects:
39 * - i_dentry, d_u.d_alias, d_inode of aliases
40 * dcache_hash_bucket lock protects:
41 * - the dcache hash table
42 * s_roots bl list spinlock protects:
43 * - the s_roots list (see __d_drop)
44 * dentry->d_sb->s_dentry_lru_lock protects:
45 * - the dcache lru lists and counters
52 * - d_parent and d_subdirs
53 * - childrens' d_child and d_parent
54 * - d_u.d_alias, d_inode
57 * dentry->d_inode->i_lock
59 * dentry->d_sb->s_dentry_lru_lock
60 * dcache_hash_bucket lock
63 * If there is an ancestor relationship:
64 * dentry->d_parent->...->d_parent->d_lock
66 * dentry->d_parent->d_lock
69 * If no ancestor relationship:
70 * arbitrary, since it's serialized on rename_lock
72 int sysctl_vfs_cache_pressure __read_mostly
= 100;
73 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure
);
75 __cacheline_aligned_in_smp
DEFINE_SEQLOCK(rename_lock
);
77 EXPORT_SYMBOL(rename_lock
);
79 static struct kmem_cache
*dentry_cache __read_mostly
;
81 const struct qstr empty_name
= QSTR_INIT("", 0);
82 EXPORT_SYMBOL(empty_name
);
83 const struct qstr slash_name
= QSTR_INIT("/", 1);
84 EXPORT_SYMBOL(slash_name
);
87 * This is the single most critical data structure when it comes
88 * to the dcache: the hashtable for lookups. Somebody should try
89 * to make this good - I've just made it work.
91 * This hash-function tries to avoid losing too many bits of hash
92 * information, yet avoid using a prime hash-size or similar.
95 static unsigned int d_hash_shift __read_mostly
;
97 static struct hlist_bl_head
*dentry_hashtable __read_mostly
;
99 static inline struct hlist_bl_head
*d_hash(unsigned int hash
)
101 return dentry_hashtable
+ (hash
>> d_hash_shift
);
104 #define IN_LOOKUP_SHIFT 10
105 static struct hlist_bl_head in_lookup_hashtable
[1 << IN_LOOKUP_SHIFT
];
107 static inline struct hlist_bl_head
*in_lookup_hash(const struct dentry
*parent
,
110 hash
+= (unsigned long) parent
/ L1_CACHE_BYTES
;
111 return in_lookup_hashtable
+ hash_32(hash
, IN_LOOKUP_SHIFT
);
115 /* Statistics gathering. */
116 struct dentry_stat_t dentry_stat
= {
120 static DEFINE_PER_CPU(long, nr_dentry
);
121 static DEFINE_PER_CPU(long, nr_dentry_unused
);
123 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
126 * Here we resort to our own counters instead of using generic per-cpu counters
127 * for consistency with what the vfs inode code does. We are expected to harvest
128 * better code and performance by having our own specialized counters.
130 * Please note that the loop is done over all possible CPUs, not over all online
131 * CPUs. The reason for this is that we don't want to play games with CPUs going
132 * on and off. If one of them goes off, we will just keep their counters.
134 * glommer: See cffbc8a for details, and if you ever intend to change this,
135 * please update all vfs counters to match.
137 static long get_nr_dentry(void)
141 for_each_possible_cpu(i
)
142 sum
+= per_cpu(nr_dentry
, i
);
143 return sum
< 0 ? 0 : sum
;
146 static long get_nr_dentry_unused(void)
150 for_each_possible_cpu(i
)
151 sum
+= per_cpu(nr_dentry_unused
, i
);
152 return sum
< 0 ? 0 : sum
;
155 int proc_nr_dentry(struct ctl_table
*table
, int write
, void __user
*buffer
,
156 size_t *lenp
, loff_t
*ppos
)
158 dentry_stat
.nr_dentry
= get_nr_dentry();
159 dentry_stat
.nr_unused
= get_nr_dentry_unused();
160 return proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
165 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
166 * The strings are both count bytes long, and count is non-zero.
168 #ifdef CONFIG_DCACHE_WORD_ACCESS
170 #include <asm/word-at-a-time.h>
172 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
173 * aligned allocation for this particular component. We don't
174 * strictly need the load_unaligned_zeropad() safety, but it
175 * doesn't hurt either.
177 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
178 * need the careful unaligned handling.
180 static inline int dentry_string_cmp(const unsigned char *cs
, const unsigned char *ct
, unsigned tcount
)
182 unsigned long a
,b
,mask
;
185 a
= read_word_at_a_time(cs
);
186 b
= load_unaligned_zeropad(ct
);
187 if (tcount
< sizeof(unsigned long))
189 if (unlikely(a
!= b
))
191 cs
+= sizeof(unsigned long);
192 ct
+= sizeof(unsigned long);
193 tcount
-= sizeof(unsigned long);
197 mask
= bytemask_from_count(tcount
);
198 return unlikely(!!((a
^ b
) & mask
));
203 static inline int dentry_string_cmp(const unsigned char *cs
, const unsigned char *ct
, unsigned tcount
)
217 static inline int dentry_cmp(const struct dentry
*dentry
, const unsigned char *ct
, unsigned tcount
)
220 * Be careful about RCU walk racing with rename:
221 * use 'READ_ONCE' to fetch the name pointer.
223 * NOTE! Even if a rename will mean that the length
224 * was not loaded atomically, we don't care. The
225 * RCU walk will check the sequence count eventually,
226 * and catch it. And we won't overrun the buffer,
227 * because we're reading the name pointer atomically,
228 * and a dentry name is guaranteed to be properly
229 * terminated with a NUL byte.
231 * End result: even if 'len' is wrong, we'll exit
232 * early because the data cannot match (there can
233 * be no NUL in the ct/tcount data)
235 const unsigned char *cs
= READ_ONCE(dentry
->d_name
.name
);
237 return dentry_string_cmp(cs
, ct
, tcount
);
240 struct external_name
{
243 struct rcu_head head
;
245 unsigned char name
[];
248 static inline struct external_name
*external_name(struct dentry
*dentry
)
250 return container_of(dentry
->d_name
.name
, struct external_name
, name
[0]);
253 static void __d_free(struct rcu_head
*head
)
255 struct dentry
*dentry
= container_of(head
, struct dentry
, d_u
.d_rcu
);
257 kmem_cache_free(dentry_cache
, dentry
);
260 static void __d_free_external_name(struct rcu_head
*head
)
262 struct external_name
*name
= container_of(head
, struct external_name
,
265 mod_node_page_state(page_pgdat(virt_to_page(name
)),
266 NR_INDIRECTLY_RECLAIMABLE_BYTES
,
272 static void __d_free_external(struct rcu_head
*head
)
274 struct dentry
*dentry
= container_of(head
, struct dentry
, d_u
.d_rcu
);
276 __d_free_external_name(&external_name(dentry
)->u
.head
);
278 kmem_cache_free(dentry_cache
, dentry
);
281 static inline int dname_external(const struct dentry
*dentry
)
283 return dentry
->d_name
.name
!= dentry
->d_iname
;
286 void take_dentry_name_snapshot(struct name_snapshot
*name
, struct dentry
*dentry
)
288 spin_lock(&dentry
->d_lock
);
289 if (unlikely(dname_external(dentry
))) {
290 struct external_name
*p
= external_name(dentry
);
291 atomic_inc(&p
->u
.count
);
292 spin_unlock(&dentry
->d_lock
);
293 name
->name
= p
->name
;
295 memcpy(name
->inline_name
, dentry
->d_iname
, DNAME_INLINE_LEN
);
296 spin_unlock(&dentry
->d_lock
);
297 name
->name
= name
->inline_name
;
300 EXPORT_SYMBOL(take_dentry_name_snapshot
);
302 void release_dentry_name_snapshot(struct name_snapshot
*name
)
304 if (unlikely(name
->name
!= name
->inline_name
)) {
305 struct external_name
*p
;
306 p
= container_of(name
->name
, struct external_name
, name
[0]);
307 if (unlikely(atomic_dec_and_test(&p
->u
.count
)))
308 call_rcu(&p
->u
.head
, __d_free_external_name
);
311 EXPORT_SYMBOL(release_dentry_name_snapshot
);
313 static inline void __d_set_inode_and_type(struct dentry
*dentry
,
319 dentry
->d_inode
= inode
;
320 flags
= READ_ONCE(dentry
->d_flags
);
321 flags
&= ~(DCACHE_ENTRY_TYPE
| DCACHE_FALLTHRU
);
323 WRITE_ONCE(dentry
->d_flags
, flags
);
326 static inline void __d_clear_type_and_inode(struct dentry
*dentry
)
328 unsigned flags
= READ_ONCE(dentry
->d_flags
);
330 flags
&= ~(DCACHE_ENTRY_TYPE
| DCACHE_FALLTHRU
);
331 WRITE_ONCE(dentry
->d_flags
, flags
);
332 dentry
->d_inode
= NULL
;
335 static void dentry_free(struct dentry
*dentry
)
337 WARN_ON(!hlist_unhashed(&dentry
->d_u
.d_alias
));
338 if (unlikely(dname_external(dentry
))) {
339 struct external_name
*p
= external_name(dentry
);
340 if (likely(atomic_dec_and_test(&p
->u
.count
))) {
341 call_rcu(&dentry
->d_u
.d_rcu
, __d_free_external
);
345 /* if dentry was never visible to RCU, immediate free is OK */
346 if (!(dentry
->d_flags
& DCACHE_RCUACCESS
))
347 __d_free(&dentry
->d_u
.d_rcu
);
349 call_rcu(&dentry
->d_u
.d_rcu
, __d_free
);
353 * Release the dentry's inode, using the filesystem
354 * d_iput() operation if defined.
356 static void dentry_unlink_inode(struct dentry
* dentry
)
357 __releases(dentry
->d_lock
)
358 __releases(dentry
->d_inode
->i_lock
)
360 struct inode
*inode
= dentry
->d_inode
;
361 bool hashed
= !d_unhashed(dentry
);
364 raw_write_seqcount_begin(&dentry
->d_seq
);
365 __d_clear_type_and_inode(dentry
);
366 hlist_del_init(&dentry
->d_u
.d_alias
);
368 raw_write_seqcount_end(&dentry
->d_seq
);
369 spin_unlock(&dentry
->d_lock
);
370 spin_unlock(&inode
->i_lock
);
372 fsnotify_inoderemove(inode
);
373 if (dentry
->d_op
&& dentry
->d_op
->d_iput
)
374 dentry
->d_op
->d_iput(dentry
, inode
);
380 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
381 * is in use - which includes both the "real" per-superblock
382 * LRU list _and_ the DCACHE_SHRINK_LIST use.
384 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
385 * on the shrink list (ie not on the superblock LRU list).
387 * The per-cpu "nr_dentry_unused" counters are updated with
388 * the DCACHE_LRU_LIST bit.
390 * These helper functions make sure we always follow the
391 * rules. d_lock must be held by the caller.
393 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
394 static void d_lru_add(struct dentry
*dentry
)
396 D_FLAG_VERIFY(dentry
, 0);
397 dentry
->d_flags
|= DCACHE_LRU_LIST
;
398 this_cpu_inc(nr_dentry_unused
);
399 WARN_ON_ONCE(!list_lru_add(&dentry
->d_sb
->s_dentry_lru
, &dentry
->d_lru
));
402 static void d_lru_del(struct dentry
*dentry
)
404 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
405 dentry
->d_flags
&= ~DCACHE_LRU_LIST
;
406 this_cpu_dec(nr_dentry_unused
);
407 WARN_ON_ONCE(!list_lru_del(&dentry
->d_sb
->s_dentry_lru
, &dentry
->d_lru
));
410 static void d_shrink_del(struct dentry
*dentry
)
412 D_FLAG_VERIFY(dentry
, DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
);
413 list_del_init(&dentry
->d_lru
);
414 dentry
->d_flags
&= ~(DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
);
415 this_cpu_dec(nr_dentry_unused
);
418 static void d_shrink_add(struct dentry
*dentry
, struct list_head
*list
)
420 D_FLAG_VERIFY(dentry
, 0);
421 list_add(&dentry
->d_lru
, list
);
422 dentry
->d_flags
|= DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
;
423 this_cpu_inc(nr_dentry_unused
);
427 * These can only be called under the global LRU lock, ie during the
428 * callback for freeing the LRU list. "isolate" removes it from the
429 * LRU lists entirely, while shrink_move moves it to the indicated
432 static void d_lru_isolate(struct list_lru_one
*lru
, struct dentry
*dentry
)
434 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
435 dentry
->d_flags
&= ~DCACHE_LRU_LIST
;
436 this_cpu_dec(nr_dentry_unused
);
437 list_lru_isolate(lru
, &dentry
->d_lru
);
440 static void d_lru_shrink_move(struct list_lru_one
*lru
, struct dentry
*dentry
,
441 struct list_head
*list
)
443 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
444 dentry
->d_flags
|= DCACHE_SHRINK_LIST
;
445 list_lru_isolate_move(lru
, &dentry
->d_lru
, list
);
449 * d_drop - drop a dentry
450 * @dentry: dentry to drop
452 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
453 * be found through a VFS lookup any more. Note that this is different from
454 * deleting the dentry - d_delete will try to mark the dentry negative if
455 * possible, giving a successful _negative_ lookup, while d_drop will
456 * just make the cache lookup fail.
458 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
459 * reason (NFS timeouts or autofs deletes).
461 * __d_drop requires dentry->d_lock
462 * ___d_drop doesn't mark dentry as "unhashed"
463 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
465 static void ___d_drop(struct dentry
*dentry
)
467 struct hlist_bl_head
*b
;
469 * Hashed dentries are normally on the dentry hashtable,
470 * with the exception of those newly allocated by
471 * d_obtain_root, which are always IS_ROOT:
473 if (unlikely(IS_ROOT(dentry
)))
474 b
= &dentry
->d_sb
->s_roots
;
476 b
= d_hash(dentry
->d_name
.hash
);
479 __hlist_bl_del(&dentry
->d_hash
);
483 void __d_drop(struct dentry
*dentry
)
485 if (!d_unhashed(dentry
)) {
487 dentry
->d_hash
.pprev
= NULL
;
488 write_seqcount_invalidate(&dentry
->d_seq
);
491 EXPORT_SYMBOL(__d_drop
);
493 void d_drop(struct dentry
*dentry
)
495 spin_lock(&dentry
->d_lock
);
497 spin_unlock(&dentry
->d_lock
);
499 EXPORT_SYMBOL(d_drop
);
501 static inline void dentry_unlist(struct dentry
*dentry
, struct dentry
*parent
)
505 * Inform d_walk() and shrink_dentry_list() that we are no longer
506 * attached to the dentry tree
508 dentry
->d_flags
|= DCACHE_DENTRY_KILLED
;
509 if (unlikely(list_empty(&dentry
->d_child
)))
511 __list_del_entry(&dentry
->d_child
);
513 * Cursors can move around the list of children. While we'd been
514 * a normal list member, it didn't matter - ->d_child.next would've
515 * been updated. However, from now on it won't be and for the
516 * things like d_walk() it might end up with a nasty surprise.
517 * Normally d_walk() doesn't care about cursors moving around -
518 * ->d_lock on parent prevents that and since a cursor has no children
519 * of its own, we get through it without ever unlocking the parent.
520 * There is one exception, though - if we ascend from a child that
521 * gets killed as soon as we unlock it, the next sibling is found
522 * using the value left in its ->d_child.next. And if _that_
523 * pointed to a cursor, and cursor got moved (e.g. by lseek())
524 * before d_walk() regains parent->d_lock, we'll end up skipping
525 * everything the cursor had been moved past.
527 * Solution: make sure that the pointer left behind in ->d_child.next
528 * points to something that won't be moving around. I.e. skip the
531 while (dentry
->d_child
.next
!= &parent
->d_subdirs
) {
532 next
= list_entry(dentry
->d_child
.next
, struct dentry
, d_child
);
533 if (likely(!(next
->d_flags
& DCACHE_DENTRY_CURSOR
)))
535 dentry
->d_child
.next
= next
->d_child
.next
;
539 static void __dentry_kill(struct dentry
*dentry
)
541 struct dentry
*parent
= NULL
;
542 bool can_free
= true;
543 if (!IS_ROOT(dentry
))
544 parent
= dentry
->d_parent
;
547 * The dentry is now unrecoverably dead to the world.
549 lockref_mark_dead(&dentry
->d_lockref
);
552 * inform the fs via d_prune that this dentry is about to be
553 * unhashed and destroyed.
555 if (dentry
->d_flags
& DCACHE_OP_PRUNE
)
556 dentry
->d_op
->d_prune(dentry
);
558 if (dentry
->d_flags
& DCACHE_LRU_LIST
) {
559 if (!(dentry
->d_flags
& DCACHE_SHRINK_LIST
))
562 /* if it was on the hash then remove it */
564 dentry_unlist(dentry
, parent
);
566 spin_unlock(&parent
->d_lock
);
568 dentry_unlink_inode(dentry
);
570 spin_unlock(&dentry
->d_lock
);
571 this_cpu_dec(nr_dentry
);
572 if (dentry
->d_op
&& dentry
->d_op
->d_release
)
573 dentry
->d_op
->d_release(dentry
);
575 spin_lock(&dentry
->d_lock
);
576 if (dentry
->d_flags
& DCACHE_SHRINK_LIST
) {
577 dentry
->d_flags
|= DCACHE_MAY_FREE
;
580 spin_unlock(&dentry
->d_lock
);
581 if (likely(can_free
))
586 static struct dentry
*__lock_parent(struct dentry
*dentry
)
588 struct dentry
*parent
;
590 spin_unlock(&dentry
->d_lock
);
592 parent
= READ_ONCE(dentry
->d_parent
);
593 spin_lock(&parent
->d_lock
);
595 * We can't blindly lock dentry until we are sure
596 * that we won't violate the locking order.
597 * Any changes of dentry->d_parent must have
598 * been done with parent->d_lock held, so
599 * spin_lock() above is enough of a barrier
600 * for checking if it's still our child.
602 if (unlikely(parent
!= dentry
->d_parent
)) {
603 spin_unlock(&parent
->d_lock
);
607 if (parent
!= dentry
)
608 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
614 static inline struct dentry
*lock_parent(struct dentry
*dentry
)
616 struct dentry
*parent
= dentry
->d_parent
;
619 if (likely(spin_trylock(&parent
->d_lock
)))
621 return __lock_parent(dentry
);
624 static inline bool retain_dentry(struct dentry
*dentry
)
626 WARN_ON(d_in_lookup(dentry
));
628 /* Unreachable? Get rid of it */
629 if (unlikely(d_unhashed(dentry
)))
632 if (unlikely(dentry
->d_flags
& DCACHE_DISCONNECTED
))
635 if (unlikely(dentry
->d_flags
& DCACHE_OP_DELETE
)) {
636 if (dentry
->d_op
->d_delete(dentry
))
639 /* retain; LRU fodder */
640 dentry
->d_lockref
.count
--;
641 if (unlikely(!(dentry
->d_flags
& DCACHE_LRU_LIST
)))
643 else if (unlikely(!(dentry
->d_flags
& DCACHE_REFERENCED
)))
644 dentry
->d_flags
|= DCACHE_REFERENCED
;
649 * Finish off a dentry we've decided to kill.
650 * dentry->d_lock must be held, returns with it unlocked.
651 * Returns dentry requiring refcount drop, or NULL if we're done.
653 static struct dentry
*dentry_kill(struct dentry
*dentry
)
654 __releases(dentry
->d_lock
)
656 struct inode
*inode
= dentry
->d_inode
;
657 struct dentry
*parent
= NULL
;
659 if (inode
&& unlikely(!spin_trylock(&inode
->i_lock
)))
662 if (!IS_ROOT(dentry
)) {
663 parent
= dentry
->d_parent
;
664 if (unlikely(!spin_trylock(&parent
->d_lock
))) {
665 parent
= __lock_parent(dentry
);
666 if (likely(inode
|| !dentry
->d_inode
))
668 /* negative that became positive */
670 spin_unlock(&parent
->d_lock
);
671 inode
= dentry
->d_inode
;
675 __dentry_kill(dentry
);
679 spin_unlock(&dentry
->d_lock
);
680 spin_lock(&inode
->i_lock
);
681 spin_lock(&dentry
->d_lock
);
682 parent
= lock_parent(dentry
);
684 if (unlikely(dentry
->d_lockref
.count
!= 1)) {
685 dentry
->d_lockref
.count
--;
686 } else if (likely(!retain_dentry(dentry
))) {
687 __dentry_kill(dentry
);
690 /* we are keeping it, after all */
692 spin_unlock(&inode
->i_lock
);
694 spin_unlock(&parent
->d_lock
);
695 spin_unlock(&dentry
->d_lock
);
700 * Try to do a lockless dput(), and return whether that was successful.
702 * If unsuccessful, we return false, having already taken the dentry lock.
704 * The caller needs to hold the RCU read lock, so that the dentry is
705 * guaranteed to stay around even if the refcount goes down to zero!
707 static inline bool fast_dput(struct dentry
*dentry
)
710 unsigned int d_flags
;
713 * If we have a d_op->d_delete() operation, we sould not
714 * let the dentry count go to zero, so use "put_or_lock".
716 if (unlikely(dentry
->d_flags
& DCACHE_OP_DELETE
))
717 return lockref_put_or_lock(&dentry
->d_lockref
);
720 * .. otherwise, we can try to just decrement the
721 * lockref optimistically.
723 ret
= lockref_put_return(&dentry
->d_lockref
);
726 * If the lockref_put_return() failed due to the lock being held
727 * by somebody else, the fast path has failed. We will need to
728 * get the lock, and then check the count again.
730 if (unlikely(ret
< 0)) {
731 spin_lock(&dentry
->d_lock
);
732 if (dentry
->d_lockref
.count
> 1) {
733 dentry
->d_lockref
.count
--;
734 spin_unlock(&dentry
->d_lock
);
741 * If we weren't the last ref, we're done.
747 * Careful, careful. The reference count went down
748 * to zero, but we don't hold the dentry lock, so
749 * somebody else could get it again, and do another
750 * dput(), and we need to not race with that.
752 * However, there is a very special and common case
753 * where we don't care, because there is nothing to
754 * do: the dentry is still hashed, it does not have
755 * a 'delete' op, and it's referenced and already on
758 * NOTE! Since we aren't locked, these values are
759 * not "stable". However, it is sufficient that at
760 * some point after we dropped the reference the
761 * dentry was hashed and the flags had the proper
762 * value. Other dentry users may have re-gotten
763 * a reference to the dentry and change that, but
764 * our work is done - we can leave the dentry
765 * around with a zero refcount.
768 d_flags
= READ_ONCE(dentry
->d_flags
);
769 d_flags
&= DCACHE_REFERENCED
| DCACHE_LRU_LIST
| DCACHE_DISCONNECTED
;
771 /* Nothing to do? Dropping the reference was all we needed? */
772 if (d_flags
== (DCACHE_REFERENCED
| DCACHE_LRU_LIST
) && !d_unhashed(dentry
))
776 * Not the fast normal case? Get the lock. We've already decremented
777 * the refcount, but we'll need to re-check the situation after
780 spin_lock(&dentry
->d_lock
);
783 * Did somebody else grab a reference to it in the meantime, and
784 * we're no longer the last user after all? Alternatively, somebody
785 * else could have killed it and marked it dead. Either way, we
786 * don't need to do anything else.
788 if (dentry
->d_lockref
.count
) {
789 spin_unlock(&dentry
->d_lock
);
794 * Re-get the reference we optimistically dropped. We hold the
795 * lock, and we just tested that it was zero, so we can just
798 dentry
->d_lockref
.count
= 1;
806 * This is complicated by the fact that we do not want to put
807 * dentries that are no longer on any hash chain on the unused
808 * list: we'd much rather just get rid of them immediately.
810 * However, that implies that we have to traverse the dentry
811 * tree upwards to the parents which might _also_ now be
812 * scheduled for deletion (it may have been only waiting for
813 * its last child to go away).
815 * This tail recursion is done by hand as we don't want to depend
816 * on the compiler to always get this right (gcc generally doesn't).
817 * Real recursion would eat up our stack space.
821 * dput - release a dentry
822 * @dentry: dentry to release
824 * Release a dentry. This will drop the usage count and if appropriate
825 * call the dentry unlink method as well as removing it from the queues and
826 * releasing its resources. If the parent dentries were scheduled for release
827 * they too may now get deleted.
829 void dput(struct dentry
*dentry
)
835 if (likely(fast_dput(dentry
))) {
840 /* Slow case: now with the dentry lock held */
843 if (likely(retain_dentry(dentry
))) {
844 spin_unlock(&dentry
->d_lock
);
848 dentry
= dentry_kill(dentry
);
854 /* This must be called with d_lock held */
855 static inline void __dget_dlock(struct dentry
*dentry
)
857 dentry
->d_lockref
.count
++;
860 static inline void __dget(struct dentry
*dentry
)
862 lockref_get(&dentry
->d_lockref
);
865 struct dentry
*dget_parent(struct dentry
*dentry
)
871 * Do optimistic parent lookup without any
875 ret
= READ_ONCE(dentry
->d_parent
);
876 gotref
= lockref_get_not_zero(&ret
->d_lockref
);
878 if (likely(gotref
)) {
879 if (likely(ret
== READ_ONCE(dentry
->d_parent
)))
886 * Don't need rcu_dereference because we re-check it was correct under
890 ret
= dentry
->d_parent
;
891 spin_lock(&ret
->d_lock
);
892 if (unlikely(ret
!= dentry
->d_parent
)) {
893 spin_unlock(&ret
->d_lock
);
898 BUG_ON(!ret
->d_lockref
.count
);
899 ret
->d_lockref
.count
++;
900 spin_unlock(&ret
->d_lock
);
903 EXPORT_SYMBOL(dget_parent
);
905 static struct dentry
* __d_find_any_alias(struct inode
*inode
)
907 struct dentry
*alias
;
909 if (hlist_empty(&inode
->i_dentry
))
911 alias
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_u
.d_alias
);
917 * d_find_any_alias - find any alias for a given inode
918 * @inode: inode to find an alias for
920 * If any aliases exist for the given inode, take and return a
921 * reference for one of them. If no aliases exist, return %NULL.
923 struct dentry
*d_find_any_alias(struct inode
*inode
)
927 spin_lock(&inode
->i_lock
);
928 de
= __d_find_any_alias(inode
);
929 spin_unlock(&inode
->i_lock
);
932 EXPORT_SYMBOL(d_find_any_alias
);
935 * d_find_alias - grab a hashed alias of inode
936 * @inode: inode in question
938 * If inode has a hashed alias, or is a directory and has any alias,
939 * acquire the reference to alias and return it. Otherwise return NULL.
940 * Notice that if inode is a directory there can be only one alias and
941 * it can be unhashed only if it has no children, or if it is the root
942 * of a filesystem, or if the directory was renamed and d_revalidate
943 * was the first vfs operation to notice.
945 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
946 * any other hashed alias over that one.
948 static struct dentry
*__d_find_alias(struct inode
*inode
)
950 struct dentry
*alias
;
952 if (S_ISDIR(inode
->i_mode
))
953 return __d_find_any_alias(inode
);
955 hlist_for_each_entry(alias
, &inode
->i_dentry
, d_u
.d_alias
) {
956 spin_lock(&alias
->d_lock
);
957 if (!d_unhashed(alias
)) {
959 spin_unlock(&alias
->d_lock
);
962 spin_unlock(&alias
->d_lock
);
967 struct dentry
*d_find_alias(struct inode
*inode
)
969 struct dentry
*de
= NULL
;
971 if (!hlist_empty(&inode
->i_dentry
)) {
972 spin_lock(&inode
->i_lock
);
973 de
= __d_find_alias(inode
);
974 spin_unlock(&inode
->i_lock
);
978 EXPORT_SYMBOL(d_find_alias
);
981 * Try to kill dentries associated with this inode.
982 * WARNING: you must own a reference to inode.
984 void d_prune_aliases(struct inode
*inode
)
986 struct dentry
*dentry
;
988 spin_lock(&inode
->i_lock
);
989 hlist_for_each_entry(dentry
, &inode
->i_dentry
, d_u
.d_alias
) {
990 spin_lock(&dentry
->d_lock
);
991 if (!dentry
->d_lockref
.count
) {
992 struct dentry
*parent
= lock_parent(dentry
);
993 if (likely(!dentry
->d_lockref
.count
)) {
994 __dentry_kill(dentry
);
999 spin_unlock(&parent
->d_lock
);
1001 spin_unlock(&dentry
->d_lock
);
1003 spin_unlock(&inode
->i_lock
);
1005 EXPORT_SYMBOL(d_prune_aliases
);
1008 * Lock a dentry from shrink list.
1009 * Called under rcu_read_lock() and dentry->d_lock; the former
1010 * guarantees that nothing we access will be freed under us.
1011 * Note that dentry is *not* protected from concurrent dentry_kill(),
1014 * Return false if dentry has been disrupted or grabbed, leaving
1015 * the caller to kick it off-list. Otherwise, return true and have
1016 * that dentry's inode and parent both locked.
1018 static bool shrink_lock_dentry(struct dentry
*dentry
)
1020 struct inode
*inode
;
1021 struct dentry
*parent
;
1023 if (dentry
->d_lockref
.count
)
1026 inode
= dentry
->d_inode
;
1027 if (inode
&& unlikely(!spin_trylock(&inode
->i_lock
))) {
1028 spin_unlock(&dentry
->d_lock
);
1029 spin_lock(&inode
->i_lock
);
1030 spin_lock(&dentry
->d_lock
);
1031 if (unlikely(dentry
->d_lockref
.count
))
1033 /* changed inode means that somebody had grabbed it */
1034 if (unlikely(inode
!= dentry
->d_inode
))
1038 parent
= dentry
->d_parent
;
1039 if (IS_ROOT(dentry
) || likely(spin_trylock(&parent
->d_lock
)))
1042 spin_unlock(&dentry
->d_lock
);
1043 spin_lock(&parent
->d_lock
);
1044 if (unlikely(parent
!= dentry
->d_parent
)) {
1045 spin_unlock(&parent
->d_lock
);
1046 spin_lock(&dentry
->d_lock
);
1049 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
1050 if (likely(!dentry
->d_lockref
.count
))
1052 spin_unlock(&parent
->d_lock
);
1055 spin_unlock(&inode
->i_lock
);
1059 static void shrink_dentry_list(struct list_head
*list
)
1061 while (!list_empty(list
)) {
1062 struct dentry
*dentry
, *parent
;
1064 dentry
= list_entry(list
->prev
, struct dentry
, d_lru
);
1065 spin_lock(&dentry
->d_lock
);
1067 if (!shrink_lock_dentry(dentry
)) {
1068 bool can_free
= false;
1070 d_shrink_del(dentry
);
1071 if (dentry
->d_lockref
.count
< 0)
1072 can_free
= dentry
->d_flags
& DCACHE_MAY_FREE
;
1073 spin_unlock(&dentry
->d_lock
);
1075 dentry_free(dentry
);
1079 d_shrink_del(dentry
);
1080 parent
= dentry
->d_parent
;
1081 __dentry_kill(dentry
);
1082 if (parent
== dentry
)
1085 * We need to prune ancestors too. This is necessary to prevent
1086 * quadratic behavior of shrink_dcache_parent(), but is also
1087 * expected to be beneficial in reducing dentry cache
1091 while (dentry
&& !lockref_put_or_lock(&dentry
->d_lockref
))
1092 dentry
= dentry_kill(dentry
);
1096 static enum lru_status
dentry_lru_isolate(struct list_head
*item
,
1097 struct list_lru_one
*lru
, spinlock_t
*lru_lock
, void *arg
)
1099 struct list_head
*freeable
= arg
;
1100 struct dentry
*dentry
= container_of(item
, struct dentry
, d_lru
);
1104 * we are inverting the lru lock/dentry->d_lock here,
1105 * so use a trylock. If we fail to get the lock, just skip
1108 if (!spin_trylock(&dentry
->d_lock
))
1112 * Referenced dentries are still in use. If they have active
1113 * counts, just remove them from the LRU. Otherwise give them
1114 * another pass through the LRU.
1116 if (dentry
->d_lockref
.count
) {
1117 d_lru_isolate(lru
, dentry
);
1118 spin_unlock(&dentry
->d_lock
);
1122 if (dentry
->d_flags
& DCACHE_REFERENCED
) {
1123 dentry
->d_flags
&= ~DCACHE_REFERENCED
;
1124 spin_unlock(&dentry
->d_lock
);
1127 * The list move itself will be made by the common LRU code. At
1128 * this point, we've dropped the dentry->d_lock but keep the
1129 * lru lock. This is safe to do, since every list movement is
1130 * protected by the lru lock even if both locks are held.
1132 * This is guaranteed by the fact that all LRU management
1133 * functions are intermediated by the LRU API calls like
1134 * list_lru_add and list_lru_del. List movement in this file
1135 * only ever occur through this functions or through callbacks
1136 * like this one, that are called from the LRU API.
1138 * The only exceptions to this are functions like
1139 * shrink_dentry_list, and code that first checks for the
1140 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
1141 * operating only with stack provided lists after they are
1142 * properly isolated from the main list. It is thus, always a
1148 d_lru_shrink_move(lru
, dentry
, freeable
);
1149 spin_unlock(&dentry
->d_lock
);
1155 * prune_dcache_sb - shrink the dcache
1157 * @sc: shrink control, passed to list_lru_shrink_walk()
1159 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1160 * is done when we need more memory and called from the superblock shrinker
1163 * This function may fail to free any resources if all the dentries are in
1166 long prune_dcache_sb(struct super_block
*sb
, struct shrink_control
*sc
)
1171 freed
= list_lru_shrink_walk(&sb
->s_dentry_lru
, sc
,
1172 dentry_lru_isolate
, &dispose
);
1173 shrink_dentry_list(&dispose
);
1177 static enum lru_status
dentry_lru_isolate_shrink(struct list_head
*item
,
1178 struct list_lru_one
*lru
, spinlock_t
*lru_lock
, void *arg
)
1180 struct list_head
*freeable
= arg
;
1181 struct dentry
*dentry
= container_of(item
, struct dentry
, d_lru
);
1184 * we are inverting the lru lock/dentry->d_lock here,
1185 * so use a trylock. If we fail to get the lock, just skip
1188 if (!spin_trylock(&dentry
->d_lock
))
1191 d_lru_shrink_move(lru
, dentry
, freeable
);
1192 spin_unlock(&dentry
->d_lock
);
1199 * shrink_dcache_sb - shrink dcache for a superblock
1202 * Shrink the dcache for the specified super block. This is used to free
1203 * the dcache before unmounting a file system.
1205 void shrink_dcache_sb(struct super_block
*sb
)
1212 freed
= list_lru_walk(&sb
->s_dentry_lru
,
1213 dentry_lru_isolate_shrink
, &dispose
, 1024);
1215 this_cpu_sub(nr_dentry_unused
, freed
);
1216 shrink_dentry_list(&dispose
);
1217 } while (list_lru_count(&sb
->s_dentry_lru
) > 0);
1219 EXPORT_SYMBOL(shrink_dcache_sb
);
1222 * enum d_walk_ret - action to talke during tree walk
1223 * @D_WALK_CONTINUE: contrinue walk
1224 * @D_WALK_QUIT: quit walk
1225 * @D_WALK_NORETRY: quit when retry is needed
1226 * @D_WALK_SKIP: skip this dentry and its children
1236 * d_walk - walk the dentry tree
1237 * @parent: start of walk
1238 * @data: data passed to @enter() and @finish()
1239 * @enter: callback when first entering the dentry
1241 * The @enter() callbacks are called with d_lock held.
1243 static void d_walk(struct dentry
*parent
, void *data
,
1244 enum d_walk_ret (*enter
)(void *, struct dentry
*))
1246 struct dentry
*this_parent
;
1247 struct list_head
*next
;
1249 enum d_walk_ret ret
;
1253 read_seqbegin_or_lock(&rename_lock
, &seq
);
1254 this_parent
= parent
;
1255 spin_lock(&this_parent
->d_lock
);
1257 ret
= enter(data
, this_parent
);
1259 case D_WALK_CONTINUE
:
1264 case D_WALK_NORETRY
:
1269 next
= this_parent
->d_subdirs
.next
;
1271 while (next
!= &this_parent
->d_subdirs
) {
1272 struct list_head
*tmp
= next
;
1273 struct dentry
*dentry
= list_entry(tmp
, struct dentry
, d_child
);
1276 if (unlikely(dentry
->d_flags
& DCACHE_DENTRY_CURSOR
))
1279 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
1281 ret
= enter(data
, dentry
);
1283 case D_WALK_CONTINUE
:
1286 spin_unlock(&dentry
->d_lock
);
1288 case D_WALK_NORETRY
:
1292 spin_unlock(&dentry
->d_lock
);
1296 if (!list_empty(&dentry
->d_subdirs
)) {
1297 spin_unlock(&this_parent
->d_lock
);
1298 spin_release(&dentry
->d_lock
.dep_map
, 1, _RET_IP_
);
1299 this_parent
= dentry
;
1300 spin_acquire(&this_parent
->d_lock
.dep_map
, 0, 1, _RET_IP_
);
1303 spin_unlock(&dentry
->d_lock
);
1306 * All done at this level ... ascend and resume the search.
1310 if (this_parent
!= parent
) {
1311 struct dentry
*child
= this_parent
;
1312 this_parent
= child
->d_parent
;
1314 spin_unlock(&child
->d_lock
);
1315 spin_lock(&this_parent
->d_lock
);
1317 /* might go back up the wrong parent if we have had a rename. */
1318 if (need_seqretry(&rename_lock
, seq
))
1320 /* go into the first sibling still alive */
1322 next
= child
->d_child
.next
;
1323 if (next
== &this_parent
->d_subdirs
)
1325 child
= list_entry(next
, struct dentry
, d_child
);
1326 } while (unlikely(child
->d_flags
& DCACHE_DENTRY_KILLED
));
1330 if (need_seqretry(&rename_lock
, seq
))
1335 spin_unlock(&this_parent
->d_lock
);
1336 done_seqretry(&rename_lock
, seq
);
1340 spin_unlock(&this_parent
->d_lock
);
1349 struct check_mount
{
1350 struct vfsmount
*mnt
;
1351 unsigned int mounted
;
1354 static enum d_walk_ret
path_check_mount(void *data
, struct dentry
*dentry
)
1356 struct check_mount
*info
= data
;
1357 struct path path
= { .mnt
= info
->mnt
, .dentry
= dentry
};
1359 if (likely(!d_mountpoint(dentry
)))
1360 return D_WALK_CONTINUE
;
1361 if (__path_is_mountpoint(&path
)) {
1365 return D_WALK_CONTINUE
;
1369 * path_has_submounts - check for mounts over a dentry in the
1370 * current namespace.
1371 * @parent: path to check.
1373 * Return true if the parent or its subdirectories contain
1374 * a mount point in the current namespace.
1376 int path_has_submounts(const struct path
*parent
)
1378 struct check_mount data
= { .mnt
= parent
->mnt
, .mounted
= 0 };
1380 read_seqlock_excl(&mount_lock
);
1381 d_walk(parent
->dentry
, &data
, path_check_mount
);
1382 read_sequnlock_excl(&mount_lock
);
1384 return data
.mounted
;
1386 EXPORT_SYMBOL(path_has_submounts
);
1389 * Called by mount code to set a mountpoint and check if the mountpoint is
1390 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1391 * subtree can become unreachable).
1393 * Only one of d_invalidate() and d_set_mounted() must succeed. For
1394 * this reason take rename_lock and d_lock on dentry and ancestors.
1396 int d_set_mounted(struct dentry
*dentry
)
1400 write_seqlock(&rename_lock
);
1401 for (p
= dentry
->d_parent
; !IS_ROOT(p
); p
= p
->d_parent
) {
1402 /* Need exclusion wrt. d_invalidate() */
1403 spin_lock(&p
->d_lock
);
1404 if (unlikely(d_unhashed(p
))) {
1405 spin_unlock(&p
->d_lock
);
1408 spin_unlock(&p
->d_lock
);
1410 spin_lock(&dentry
->d_lock
);
1411 if (!d_unlinked(dentry
)) {
1413 if (!d_mountpoint(dentry
)) {
1414 dentry
->d_flags
|= DCACHE_MOUNTED
;
1418 spin_unlock(&dentry
->d_lock
);
1420 write_sequnlock(&rename_lock
);
1425 * Search the dentry child list of the specified parent,
1426 * and move any unused dentries to the end of the unused
1427 * list for prune_dcache(). We descend to the next level
1428 * whenever the d_subdirs list is non-empty and continue
1431 * It returns zero iff there are no unused children,
1432 * otherwise it returns the number of children moved to
1433 * the end of the unused list. This may not be the total
1434 * number of unused children, because select_parent can
1435 * drop the lock and return early due to latency
1439 struct select_data
{
1440 struct dentry
*start
;
1441 struct list_head dispose
;
1445 static enum d_walk_ret
select_collect(void *_data
, struct dentry
*dentry
)
1447 struct select_data
*data
= _data
;
1448 enum d_walk_ret ret
= D_WALK_CONTINUE
;
1450 if (data
->start
== dentry
)
1453 if (dentry
->d_flags
& DCACHE_SHRINK_LIST
) {
1456 if (dentry
->d_flags
& DCACHE_LRU_LIST
)
1458 if (!dentry
->d_lockref
.count
) {
1459 d_shrink_add(dentry
, &data
->dispose
);
1464 * We can return to the caller if we have found some (this
1465 * ensures forward progress). We'll be coming back to find
1468 if (!list_empty(&data
->dispose
))
1469 ret
= need_resched() ? D_WALK_QUIT
: D_WALK_NORETRY
;
1475 * shrink_dcache_parent - prune dcache
1476 * @parent: parent of entries to prune
1478 * Prune the dcache to remove unused children of the parent dentry.
1480 void shrink_dcache_parent(struct dentry
*parent
)
1483 struct select_data data
;
1485 INIT_LIST_HEAD(&data
.dispose
);
1486 data
.start
= parent
;
1489 d_walk(parent
, &data
, select_collect
);
1491 if (!list_empty(&data
.dispose
)) {
1492 shrink_dentry_list(&data
.dispose
);
1501 EXPORT_SYMBOL(shrink_dcache_parent
);
1503 static enum d_walk_ret
umount_check(void *_data
, struct dentry
*dentry
)
1505 /* it has busy descendents; complain about those instead */
1506 if (!list_empty(&dentry
->d_subdirs
))
1507 return D_WALK_CONTINUE
;
1509 /* root with refcount 1 is fine */
1510 if (dentry
== _data
&& dentry
->d_lockref
.count
== 1)
1511 return D_WALK_CONTINUE
;
1513 printk(KERN_ERR
"BUG: Dentry %p{i=%lx,n=%pd} "
1514 " still in use (%d) [unmount of %s %s]\n",
1517 dentry
->d_inode
->i_ino
: 0UL,
1519 dentry
->d_lockref
.count
,
1520 dentry
->d_sb
->s_type
->name
,
1521 dentry
->d_sb
->s_id
);
1523 return D_WALK_CONTINUE
;
1526 static void do_one_tree(struct dentry
*dentry
)
1528 shrink_dcache_parent(dentry
);
1529 d_walk(dentry
, dentry
, umount_check
);
1535 * destroy the dentries attached to a superblock on unmounting
1537 void shrink_dcache_for_umount(struct super_block
*sb
)
1539 struct dentry
*dentry
;
1541 WARN(down_read_trylock(&sb
->s_umount
), "s_umount should've been locked");
1543 dentry
= sb
->s_root
;
1545 do_one_tree(dentry
);
1547 while (!hlist_bl_empty(&sb
->s_roots
)) {
1548 dentry
= dget(hlist_bl_entry(hlist_bl_first(&sb
->s_roots
), struct dentry
, d_hash
));
1549 do_one_tree(dentry
);
1553 static enum d_walk_ret
find_submount(void *_data
, struct dentry
*dentry
)
1555 struct dentry
**victim
= _data
;
1556 if (d_mountpoint(dentry
)) {
1557 __dget_dlock(dentry
);
1561 return D_WALK_CONTINUE
;
1565 * d_invalidate - detach submounts, prune dcache, and drop
1566 * @dentry: dentry to invalidate (aka detach, prune and drop)
1568 void d_invalidate(struct dentry
*dentry
)
1570 bool had_submounts
= false;
1571 spin_lock(&dentry
->d_lock
);
1572 if (d_unhashed(dentry
)) {
1573 spin_unlock(&dentry
->d_lock
);
1577 spin_unlock(&dentry
->d_lock
);
1579 /* Negative dentries can be dropped without further checks */
1580 if (!dentry
->d_inode
)
1583 shrink_dcache_parent(dentry
);
1585 struct dentry
*victim
= NULL
;
1586 d_walk(dentry
, &victim
, find_submount
);
1589 shrink_dcache_parent(dentry
);
1592 had_submounts
= true;
1593 detach_mounts(victim
);
1597 EXPORT_SYMBOL(d_invalidate
);
1600 * __d_alloc - allocate a dcache entry
1601 * @sb: filesystem it will belong to
1602 * @name: qstr of the name
1604 * Allocates a dentry. It returns %NULL if there is insufficient memory
1605 * available. On a success the dentry is returned. The name passed in is
1606 * copied and the copy passed in may be reused after this call.
1609 struct dentry
*__d_alloc(struct super_block
*sb
, const struct qstr
*name
)
1611 struct external_name
*ext
= NULL
;
1612 struct dentry
*dentry
;
1616 dentry
= kmem_cache_alloc(dentry_cache
, GFP_KERNEL
);
1621 * We guarantee that the inline name is always NUL-terminated.
1622 * This way the memcpy() done by the name switching in rename
1623 * will still always have a NUL at the end, even if we might
1624 * be overwriting an internal NUL character
1626 dentry
->d_iname
[DNAME_INLINE_LEN
-1] = 0;
1627 if (unlikely(!name
)) {
1629 dname
= dentry
->d_iname
;
1630 } else if (name
->len
> DNAME_INLINE_LEN
-1) {
1631 size_t size
= offsetof(struct external_name
, name
[1]);
1633 ext
= kmalloc(size
+ name
->len
, GFP_KERNEL_ACCOUNT
);
1635 kmem_cache_free(dentry_cache
, dentry
);
1638 atomic_set(&ext
->u
.count
, 1);
1641 dname
= dentry
->d_iname
;
1644 dentry
->d_name
.len
= name
->len
;
1645 dentry
->d_name
.hash
= name
->hash
;
1646 memcpy(dname
, name
->name
, name
->len
);
1647 dname
[name
->len
] = 0;
1649 /* Make sure we always see the terminating NUL character */
1650 smp_store_release(&dentry
->d_name
.name
, dname
); /* ^^^ */
1652 dentry
->d_lockref
.count
= 1;
1653 dentry
->d_flags
= 0;
1654 spin_lock_init(&dentry
->d_lock
);
1655 seqcount_init(&dentry
->d_seq
);
1656 dentry
->d_inode
= NULL
;
1657 dentry
->d_parent
= dentry
;
1659 dentry
->d_op
= NULL
;
1660 dentry
->d_fsdata
= NULL
;
1661 INIT_HLIST_BL_NODE(&dentry
->d_hash
);
1662 INIT_LIST_HEAD(&dentry
->d_lru
);
1663 INIT_LIST_HEAD(&dentry
->d_subdirs
);
1664 INIT_HLIST_NODE(&dentry
->d_u
.d_alias
);
1665 INIT_LIST_HEAD(&dentry
->d_child
);
1666 d_set_d_op(dentry
, dentry
->d_sb
->s_d_op
);
1668 if (dentry
->d_op
&& dentry
->d_op
->d_init
) {
1669 err
= dentry
->d_op
->d_init(dentry
);
1671 if (dname_external(dentry
))
1672 kfree(external_name(dentry
));
1673 kmem_cache_free(dentry_cache
, dentry
);
1678 if (unlikely(ext
)) {
1679 pg_data_t
*pgdat
= page_pgdat(virt_to_page(ext
));
1680 mod_node_page_state(pgdat
, NR_INDIRECTLY_RECLAIMABLE_BYTES
,
1684 this_cpu_inc(nr_dentry
);
1690 * d_alloc - allocate a dcache entry
1691 * @parent: parent of entry to allocate
1692 * @name: qstr of the name
1694 * Allocates a dentry. It returns %NULL if there is insufficient memory
1695 * available. On a success the dentry is returned. The name passed in is
1696 * copied and the copy passed in may be reused after this call.
1698 struct dentry
*d_alloc(struct dentry
* parent
, const struct qstr
*name
)
1700 struct dentry
*dentry
= __d_alloc(parent
->d_sb
, name
);
1703 dentry
->d_flags
|= DCACHE_RCUACCESS
;
1704 spin_lock(&parent
->d_lock
);
1706 * don't need child lock because it is not subject
1707 * to concurrency here
1709 __dget_dlock(parent
);
1710 dentry
->d_parent
= parent
;
1711 list_add(&dentry
->d_child
, &parent
->d_subdirs
);
1712 spin_unlock(&parent
->d_lock
);
1716 EXPORT_SYMBOL(d_alloc
);
1718 struct dentry
*d_alloc_anon(struct super_block
*sb
)
1720 return __d_alloc(sb
, NULL
);
1722 EXPORT_SYMBOL(d_alloc_anon
);
1724 struct dentry
*d_alloc_cursor(struct dentry
* parent
)
1726 struct dentry
*dentry
= d_alloc_anon(parent
->d_sb
);
1728 dentry
->d_flags
|= DCACHE_RCUACCESS
| DCACHE_DENTRY_CURSOR
;
1729 dentry
->d_parent
= dget(parent
);
1735 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1736 * @sb: the superblock
1737 * @name: qstr of the name
1739 * For a filesystem that just pins its dentries in memory and never
1740 * performs lookups at all, return an unhashed IS_ROOT dentry.
1742 struct dentry
*d_alloc_pseudo(struct super_block
*sb
, const struct qstr
*name
)
1744 return __d_alloc(sb
, name
);
1746 EXPORT_SYMBOL(d_alloc_pseudo
);
1748 struct dentry
*d_alloc_name(struct dentry
*parent
, const char *name
)
1753 q
.hash_len
= hashlen_string(parent
, name
);
1754 return d_alloc(parent
, &q
);
1756 EXPORT_SYMBOL(d_alloc_name
);
1758 void d_set_d_op(struct dentry
*dentry
, const struct dentry_operations
*op
)
1760 WARN_ON_ONCE(dentry
->d_op
);
1761 WARN_ON_ONCE(dentry
->d_flags
& (DCACHE_OP_HASH
|
1763 DCACHE_OP_REVALIDATE
|
1764 DCACHE_OP_WEAK_REVALIDATE
|
1771 dentry
->d_flags
|= DCACHE_OP_HASH
;
1773 dentry
->d_flags
|= DCACHE_OP_COMPARE
;
1774 if (op
->d_revalidate
)
1775 dentry
->d_flags
|= DCACHE_OP_REVALIDATE
;
1776 if (op
->d_weak_revalidate
)
1777 dentry
->d_flags
|= DCACHE_OP_WEAK_REVALIDATE
;
1779 dentry
->d_flags
|= DCACHE_OP_DELETE
;
1781 dentry
->d_flags
|= DCACHE_OP_PRUNE
;
1783 dentry
->d_flags
|= DCACHE_OP_REAL
;
1786 EXPORT_SYMBOL(d_set_d_op
);
1790 * d_set_fallthru - Mark a dentry as falling through to a lower layer
1791 * @dentry - The dentry to mark
1793 * Mark a dentry as falling through to the lower layer (as set with
1794 * d_pin_lower()). This flag may be recorded on the medium.
1796 void d_set_fallthru(struct dentry
*dentry
)
1798 spin_lock(&dentry
->d_lock
);
1799 dentry
->d_flags
|= DCACHE_FALLTHRU
;
1800 spin_unlock(&dentry
->d_lock
);
1802 EXPORT_SYMBOL(d_set_fallthru
);
1804 static unsigned d_flags_for_inode(struct inode
*inode
)
1806 unsigned add_flags
= DCACHE_REGULAR_TYPE
;
1809 return DCACHE_MISS_TYPE
;
1811 if (S_ISDIR(inode
->i_mode
)) {
1812 add_flags
= DCACHE_DIRECTORY_TYPE
;
1813 if (unlikely(!(inode
->i_opflags
& IOP_LOOKUP
))) {
1814 if (unlikely(!inode
->i_op
->lookup
))
1815 add_flags
= DCACHE_AUTODIR_TYPE
;
1817 inode
->i_opflags
|= IOP_LOOKUP
;
1819 goto type_determined
;
1822 if (unlikely(!(inode
->i_opflags
& IOP_NOFOLLOW
))) {
1823 if (unlikely(inode
->i_op
->get_link
)) {
1824 add_flags
= DCACHE_SYMLINK_TYPE
;
1825 goto type_determined
;
1827 inode
->i_opflags
|= IOP_NOFOLLOW
;
1830 if (unlikely(!S_ISREG(inode
->i_mode
)))
1831 add_flags
= DCACHE_SPECIAL_TYPE
;
1834 if (unlikely(IS_AUTOMOUNT(inode
)))
1835 add_flags
|= DCACHE_NEED_AUTOMOUNT
;
1839 static void __d_instantiate(struct dentry
*dentry
, struct inode
*inode
)
1841 unsigned add_flags
= d_flags_for_inode(inode
);
1842 WARN_ON(d_in_lookup(dentry
));
1844 spin_lock(&dentry
->d_lock
);
1845 hlist_add_head(&dentry
->d_u
.d_alias
, &inode
->i_dentry
);
1846 raw_write_seqcount_begin(&dentry
->d_seq
);
1847 __d_set_inode_and_type(dentry
, inode
, add_flags
);
1848 raw_write_seqcount_end(&dentry
->d_seq
);
1849 fsnotify_update_flags(dentry
);
1850 spin_unlock(&dentry
->d_lock
);
1854 * d_instantiate - fill in inode information for a dentry
1855 * @entry: dentry to complete
1856 * @inode: inode to attach to this dentry
1858 * Fill in inode information in the entry.
1860 * This turns negative dentries into productive full members
1863 * NOTE! This assumes that the inode count has been incremented
1864 * (or otherwise set) by the caller to indicate that it is now
1865 * in use by the dcache.
1868 void d_instantiate(struct dentry
*entry
, struct inode
* inode
)
1870 BUG_ON(!hlist_unhashed(&entry
->d_u
.d_alias
));
1872 security_d_instantiate(entry
, inode
);
1873 spin_lock(&inode
->i_lock
);
1874 __d_instantiate(entry
, inode
);
1875 spin_unlock(&inode
->i_lock
);
1878 EXPORT_SYMBOL(d_instantiate
);
1881 * This should be equivalent to d_instantiate() + unlock_new_inode(),
1882 * with lockdep-related part of unlock_new_inode() done before
1883 * anything else. Use that instead of open-coding d_instantiate()/
1884 * unlock_new_inode() combinations.
1886 void d_instantiate_new(struct dentry
*entry
, struct inode
*inode
)
1888 BUG_ON(!hlist_unhashed(&entry
->d_u
.d_alias
));
1890 lockdep_annotate_inode_mutex_key(inode
);
1891 security_d_instantiate(entry
, inode
);
1892 spin_lock(&inode
->i_lock
);
1893 __d_instantiate(entry
, inode
);
1894 WARN_ON(!(inode
->i_state
& I_NEW
));
1895 inode
->i_state
&= ~I_NEW
;
1897 wake_up_bit(&inode
->i_state
, __I_NEW
);
1898 spin_unlock(&inode
->i_lock
);
1900 EXPORT_SYMBOL(d_instantiate_new
);
1903 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1904 * @entry: dentry to complete
1905 * @inode: inode to attach to this dentry
1907 * Fill in inode information in the entry. If a directory alias is found, then
1908 * return an error (and drop inode). Together with d_materialise_unique() this
1909 * guarantees that a directory inode may never have more than one alias.
1911 int d_instantiate_no_diralias(struct dentry
*entry
, struct inode
*inode
)
1913 BUG_ON(!hlist_unhashed(&entry
->d_u
.d_alias
));
1915 security_d_instantiate(entry
, inode
);
1916 spin_lock(&inode
->i_lock
);
1917 if (S_ISDIR(inode
->i_mode
) && !hlist_empty(&inode
->i_dentry
)) {
1918 spin_unlock(&inode
->i_lock
);
1922 __d_instantiate(entry
, inode
);
1923 spin_unlock(&inode
->i_lock
);
1927 EXPORT_SYMBOL(d_instantiate_no_diralias
);
1929 struct dentry
*d_make_root(struct inode
*root_inode
)
1931 struct dentry
*res
= NULL
;
1934 res
= d_alloc_anon(root_inode
->i_sb
);
1936 d_instantiate(res
, root_inode
);
1942 EXPORT_SYMBOL(d_make_root
);
1944 static struct dentry
*__d_instantiate_anon(struct dentry
*dentry
,
1945 struct inode
*inode
,
1951 security_d_instantiate(dentry
, inode
);
1952 spin_lock(&inode
->i_lock
);
1953 res
= __d_find_any_alias(inode
);
1955 spin_unlock(&inode
->i_lock
);
1960 /* attach a disconnected dentry */
1961 add_flags
= d_flags_for_inode(inode
);
1964 add_flags
|= DCACHE_DISCONNECTED
;
1966 spin_lock(&dentry
->d_lock
);
1967 __d_set_inode_and_type(dentry
, inode
, add_flags
);
1968 hlist_add_head(&dentry
->d_u
.d_alias
, &inode
->i_dentry
);
1969 if (!disconnected
) {
1970 hlist_bl_lock(&dentry
->d_sb
->s_roots
);
1971 hlist_bl_add_head(&dentry
->d_hash
, &dentry
->d_sb
->s_roots
);
1972 hlist_bl_unlock(&dentry
->d_sb
->s_roots
);
1974 spin_unlock(&dentry
->d_lock
);
1975 spin_unlock(&inode
->i_lock
);
1984 struct dentry
*d_instantiate_anon(struct dentry
*dentry
, struct inode
*inode
)
1986 return __d_instantiate_anon(dentry
, inode
, true);
1988 EXPORT_SYMBOL(d_instantiate_anon
);
1990 static struct dentry
*__d_obtain_alias(struct inode
*inode
, bool disconnected
)
1996 return ERR_PTR(-ESTALE
);
1998 return ERR_CAST(inode
);
2000 res
= d_find_any_alias(inode
);
2004 tmp
= d_alloc_anon(inode
->i_sb
);
2006 res
= ERR_PTR(-ENOMEM
);
2010 return __d_instantiate_anon(tmp
, inode
, disconnected
);
2018 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2019 * @inode: inode to allocate the dentry for
2021 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
2022 * similar open by handle operations. The returned dentry may be anonymous,
2023 * or may have a full name (if the inode was already in the cache).
2025 * When called on a directory inode, we must ensure that the inode only ever
2026 * has one dentry. If a dentry is found, that is returned instead of
2027 * allocating a new one.
2029 * On successful return, the reference to the inode has been transferred
2030 * to the dentry. In case of an error the reference on the inode is released.
2031 * To make it easier to use in export operations a %NULL or IS_ERR inode may
2032 * be passed in and the error will be propagated to the return value,
2033 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2035 struct dentry
*d_obtain_alias(struct inode
*inode
)
2037 return __d_obtain_alias(inode
, true);
2039 EXPORT_SYMBOL(d_obtain_alias
);
2042 * d_obtain_root - find or allocate a dentry for a given inode
2043 * @inode: inode to allocate the dentry for
2045 * Obtain an IS_ROOT dentry for the root of a filesystem.
2047 * We must ensure that directory inodes only ever have one dentry. If a
2048 * dentry is found, that is returned instead of allocating a new one.
2050 * On successful return, the reference to the inode has been transferred
2051 * to the dentry. In case of an error the reference on the inode is
2052 * released. A %NULL or IS_ERR inode may be passed in and will be the
2053 * error will be propagate to the return value, with a %NULL @inode
2054 * replaced by ERR_PTR(-ESTALE).
2056 struct dentry
*d_obtain_root(struct inode
*inode
)
2058 return __d_obtain_alias(inode
, false);
2060 EXPORT_SYMBOL(d_obtain_root
);
2063 * d_add_ci - lookup or allocate new dentry with case-exact name
2064 * @inode: the inode case-insensitive lookup has found
2065 * @dentry: the negative dentry that was passed to the parent's lookup func
2066 * @name: the case-exact name to be associated with the returned dentry
2068 * This is to avoid filling the dcache with case-insensitive names to the
2069 * same inode, only the actual correct case is stored in the dcache for
2070 * case-insensitive filesystems.
2072 * For a case-insensitive lookup match and if the the case-exact dentry
2073 * already exists in in the dcache, use it and return it.
2075 * If no entry exists with the exact case name, allocate new dentry with
2076 * the exact case, and return the spliced entry.
2078 struct dentry
*d_add_ci(struct dentry
*dentry
, struct inode
*inode
,
2081 struct dentry
*found
, *res
;
2084 * First check if a dentry matching the name already exists,
2085 * if not go ahead and create it now.
2087 found
= d_hash_and_lookup(dentry
->d_parent
, name
);
2092 if (d_in_lookup(dentry
)) {
2093 found
= d_alloc_parallel(dentry
->d_parent
, name
,
2095 if (IS_ERR(found
) || !d_in_lookup(found
)) {
2100 found
= d_alloc(dentry
->d_parent
, name
);
2103 return ERR_PTR(-ENOMEM
);
2106 res
= d_splice_alias(inode
, found
);
2113 EXPORT_SYMBOL(d_add_ci
);
2116 static inline bool d_same_name(const struct dentry
*dentry
,
2117 const struct dentry
*parent
,
2118 const struct qstr
*name
)
2120 if (likely(!(parent
->d_flags
& DCACHE_OP_COMPARE
))) {
2121 if (dentry
->d_name
.len
!= name
->len
)
2123 return dentry_cmp(dentry
, name
->name
, name
->len
) == 0;
2125 return parent
->d_op
->d_compare(dentry
,
2126 dentry
->d_name
.len
, dentry
->d_name
.name
,
2131 * __d_lookup_rcu - search for a dentry (racy, store-free)
2132 * @parent: parent dentry
2133 * @name: qstr of name we wish to find
2134 * @seqp: returns d_seq value at the point where the dentry was found
2135 * Returns: dentry, or NULL
2137 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2138 * resolution (store-free path walking) design described in
2139 * Documentation/filesystems/path-lookup.txt.
2141 * This is not to be used outside core vfs.
2143 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2144 * held, and rcu_read_lock held. The returned dentry must not be stored into
2145 * without taking d_lock and checking d_seq sequence count against @seq
2148 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2151 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2152 * the returned dentry, so long as its parent's seqlock is checked after the
2153 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2154 * is formed, giving integrity down the path walk.
2156 * NOTE! The caller *has* to check the resulting dentry against the sequence
2157 * number we've returned before using any of the resulting dentry state!
2159 struct dentry
*__d_lookup_rcu(const struct dentry
*parent
,
2160 const struct qstr
*name
,
2163 u64 hashlen
= name
->hash_len
;
2164 const unsigned char *str
= name
->name
;
2165 struct hlist_bl_head
*b
= d_hash(hashlen_hash(hashlen
));
2166 struct hlist_bl_node
*node
;
2167 struct dentry
*dentry
;
2170 * Note: There is significant duplication with __d_lookup_rcu which is
2171 * required to prevent single threaded performance regressions
2172 * especially on architectures where smp_rmb (in seqcounts) are costly.
2173 * Keep the two functions in sync.
2177 * The hash list is protected using RCU.
2179 * Carefully use d_seq when comparing a candidate dentry, to avoid
2180 * races with d_move().
2182 * It is possible that concurrent renames can mess up our list
2183 * walk here and result in missing our dentry, resulting in the
2184 * false-negative result. d_lookup() protects against concurrent
2185 * renames using rename_lock seqlock.
2187 * See Documentation/filesystems/path-lookup.txt for more details.
2189 hlist_bl_for_each_entry_rcu(dentry
, node
, b
, d_hash
) {
2194 * The dentry sequence count protects us from concurrent
2195 * renames, and thus protects parent and name fields.
2197 * The caller must perform a seqcount check in order
2198 * to do anything useful with the returned dentry.
2200 * NOTE! We do a "raw" seqcount_begin here. That means that
2201 * we don't wait for the sequence count to stabilize if it
2202 * is in the middle of a sequence change. If we do the slow
2203 * dentry compare, we will do seqretries until it is stable,
2204 * and if we end up with a successful lookup, we actually
2205 * want to exit RCU lookup anyway.
2207 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2208 * we are still guaranteed NUL-termination of ->d_name.name.
2210 seq
= raw_seqcount_begin(&dentry
->d_seq
);
2211 if (dentry
->d_parent
!= parent
)
2213 if (d_unhashed(dentry
))
2216 if (unlikely(parent
->d_flags
& DCACHE_OP_COMPARE
)) {
2219 if (dentry
->d_name
.hash
!= hashlen_hash(hashlen
))
2221 tlen
= dentry
->d_name
.len
;
2222 tname
= dentry
->d_name
.name
;
2223 /* we want a consistent (name,len) pair */
2224 if (read_seqcount_retry(&dentry
->d_seq
, seq
)) {
2228 if (parent
->d_op
->d_compare(dentry
,
2229 tlen
, tname
, name
) != 0)
2232 if (dentry
->d_name
.hash_len
!= hashlen
)
2234 if (dentry_cmp(dentry
, str
, hashlen_len(hashlen
)) != 0)
2244 * d_lookup - search for a dentry
2245 * @parent: parent dentry
2246 * @name: qstr of name we wish to find
2247 * Returns: dentry, or NULL
2249 * d_lookup searches the children of the parent dentry for the name in
2250 * question. If the dentry is found its reference count is incremented and the
2251 * dentry is returned. The caller must use dput to free the entry when it has
2252 * finished using it. %NULL is returned if the dentry does not exist.
2254 struct dentry
*d_lookup(const struct dentry
*parent
, const struct qstr
*name
)
2256 struct dentry
*dentry
;
2260 seq
= read_seqbegin(&rename_lock
);
2261 dentry
= __d_lookup(parent
, name
);
2264 } while (read_seqretry(&rename_lock
, seq
));
2267 EXPORT_SYMBOL(d_lookup
);
2270 * __d_lookup - search for a dentry (racy)
2271 * @parent: parent dentry
2272 * @name: qstr of name we wish to find
2273 * Returns: dentry, or NULL
2275 * __d_lookup is like d_lookup, however it may (rarely) return a
2276 * false-negative result due to unrelated rename activity.
2278 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2279 * however it must be used carefully, eg. with a following d_lookup in
2280 * the case of failure.
2282 * __d_lookup callers must be commented.
2284 struct dentry
*__d_lookup(const struct dentry
*parent
, const struct qstr
*name
)
2286 unsigned int hash
= name
->hash
;
2287 struct hlist_bl_head
*b
= d_hash(hash
);
2288 struct hlist_bl_node
*node
;
2289 struct dentry
*found
= NULL
;
2290 struct dentry
*dentry
;
2293 * Note: There is significant duplication with __d_lookup_rcu which is
2294 * required to prevent single threaded performance regressions
2295 * especially on architectures where smp_rmb (in seqcounts) are costly.
2296 * Keep the two functions in sync.
2300 * The hash list is protected using RCU.
2302 * Take d_lock when comparing a candidate dentry, to avoid races
2305 * It is possible that concurrent renames can mess up our list
2306 * walk here and result in missing our dentry, resulting in the
2307 * false-negative result. d_lookup() protects against concurrent
2308 * renames using rename_lock seqlock.
2310 * See Documentation/filesystems/path-lookup.txt for more details.
2314 hlist_bl_for_each_entry_rcu(dentry
, node
, b
, d_hash
) {
2316 if (dentry
->d_name
.hash
!= hash
)
2319 spin_lock(&dentry
->d_lock
);
2320 if (dentry
->d_parent
!= parent
)
2322 if (d_unhashed(dentry
))
2325 if (!d_same_name(dentry
, parent
, name
))
2328 dentry
->d_lockref
.count
++;
2330 spin_unlock(&dentry
->d_lock
);
2333 spin_unlock(&dentry
->d_lock
);
2341 * d_hash_and_lookup - hash the qstr then search for a dentry
2342 * @dir: Directory to search in
2343 * @name: qstr of name we wish to find
2345 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2347 struct dentry
*d_hash_and_lookup(struct dentry
*dir
, struct qstr
*name
)
2350 * Check for a fs-specific hash function. Note that we must
2351 * calculate the standard hash first, as the d_op->d_hash()
2352 * routine may choose to leave the hash value unchanged.
2354 name
->hash
= full_name_hash(dir
, name
->name
, name
->len
);
2355 if (dir
->d_flags
& DCACHE_OP_HASH
) {
2356 int err
= dir
->d_op
->d_hash(dir
, name
);
2357 if (unlikely(err
< 0))
2358 return ERR_PTR(err
);
2360 return d_lookup(dir
, name
);
2362 EXPORT_SYMBOL(d_hash_and_lookup
);
2365 * When a file is deleted, we have two options:
2366 * - turn this dentry into a negative dentry
2367 * - unhash this dentry and free it.
2369 * Usually, we want to just turn this into
2370 * a negative dentry, but if anybody else is
2371 * currently using the dentry or the inode
2372 * we can't do that and we fall back on removing
2373 * it from the hash queues and waiting for
2374 * it to be deleted later when it has no users
2378 * d_delete - delete a dentry
2379 * @dentry: The dentry to delete
2381 * Turn the dentry into a negative dentry if possible, otherwise
2382 * remove it from the hash queues so it can be deleted later
2385 void d_delete(struct dentry
* dentry
)
2387 struct inode
*inode
= dentry
->d_inode
;
2388 int isdir
= d_is_dir(dentry
);
2390 spin_lock(&inode
->i_lock
);
2391 spin_lock(&dentry
->d_lock
);
2393 * Are we the only user?
2395 if (dentry
->d_lockref
.count
== 1) {
2396 dentry
->d_flags
&= ~DCACHE_CANT_MOUNT
;
2397 dentry_unlink_inode(dentry
);
2400 spin_unlock(&dentry
->d_lock
);
2401 spin_unlock(&inode
->i_lock
);
2403 fsnotify_nameremove(dentry
, isdir
);
2405 EXPORT_SYMBOL(d_delete
);
2407 static void __d_rehash(struct dentry
*entry
)
2409 struct hlist_bl_head
*b
= d_hash(entry
->d_name
.hash
);
2412 hlist_bl_add_head_rcu(&entry
->d_hash
, b
);
2417 * d_rehash - add an entry back to the hash
2418 * @entry: dentry to add to the hash
2420 * Adds a dentry to the hash according to its name.
2423 void d_rehash(struct dentry
* entry
)
2425 spin_lock(&entry
->d_lock
);
2427 spin_unlock(&entry
->d_lock
);
2429 EXPORT_SYMBOL(d_rehash
);
2431 static inline unsigned start_dir_add(struct inode
*dir
)
2435 unsigned n
= dir
->i_dir_seq
;
2436 if (!(n
& 1) && cmpxchg(&dir
->i_dir_seq
, n
, n
+ 1) == n
)
2442 static inline void end_dir_add(struct inode
*dir
, unsigned n
)
2444 smp_store_release(&dir
->i_dir_seq
, n
+ 2);
2447 static void d_wait_lookup(struct dentry
*dentry
)
2449 if (d_in_lookup(dentry
)) {
2450 DECLARE_WAITQUEUE(wait
, current
);
2451 add_wait_queue(dentry
->d_wait
, &wait
);
2453 set_current_state(TASK_UNINTERRUPTIBLE
);
2454 spin_unlock(&dentry
->d_lock
);
2456 spin_lock(&dentry
->d_lock
);
2457 } while (d_in_lookup(dentry
));
2461 struct dentry
*d_alloc_parallel(struct dentry
*parent
,
2462 const struct qstr
*name
,
2463 wait_queue_head_t
*wq
)
2465 unsigned int hash
= name
->hash
;
2466 struct hlist_bl_head
*b
= in_lookup_hash(parent
, hash
);
2467 struct hlist_bl_node
*node
;
2468 struct dentry
*new = d_alloc(parent
, name
);
2469 struct dentry
*dentry
;
2470 unsigned seq
, r_seq
, d_seq
;
2473 return ERR_PTR(-ENOMEM
);
2477 seq
= smp_load_acquire(&parent
->d_inode
->i_dir_seq
);
2478 r_seq
= read_seqbegin(&rename_lock
);
2479 dentry
= __d_lookup_rcu(parent
, name
, &d_seq
);
2480 if (unlikely(dentry
)) {
2481 if (!lockref_get_not_dead(&dentry
->d_lockref
)) {
2485 if (read_seqcount_retry(&dentry
->d_seq
, d_seq
)) {
2494 if (unlikely(read_seqretry(&rename_lock
, r_seq
))) {
2499 if (unlikely(seq
& 1)) {
2505 if (unlikely(READ_ONCE(parent
->d_inode
->i_dir_seq
) != seq
)) {
2511 * No changes for the parent since the beginning of d_lookup().
2512 * Since all removals from the chain happen with hlist_bl_lock(),
2513 * any potential in-lookup matches are going to stay here until
2514 * we unlock the chain. All fields are stable in everything
2517 hlist_bl_for_each_entry(dentry
, node
, b
, d_u
.d_in_lookup_hash
) {
2518 if (dentry
->d_name
.hash
!= hash
)
2520 if (dentry
->d_parent
!= parent
)
2522 if (!d_same_name(dentry
, parent
, name
))
2525 /* now we can try to grab a reference */
2526 if (!lockref_get_not_dead(&dentry
->d_lockref
)) {
2533 * somebody is likely to be still doing lookup for it;
2534 * wait for them to finish
2536 spin_lock(&dentry
->d_lock
);
2537 d_wait_lookup(dentry
);
2539 * it's not in-lookup anymore; in principle we should repeat
2540 * everything from dcache lookup, but it's likely to be what
2541 * d_lookup() would've found anyway. If it is, just return it;
2542 * otherwise we really have to repeat the whole thing.
2544 if (unlikely(dentry
->d_name
.hash
!= hash
))
2546 if (unlikely(dentry
->d_parent
!= parent
))
2548 if (unlikely(d_unhashed(dentry
)))
2550 if (unlikely(!d_same_name(dentry
, parent
, name
)))
2552 /* OK, it *is* a hashed match; return it */
2553 spin_unlock(&dentry
->d_lock
);
2558 /* we can't take ->d_lock here; it's OK, though. */
2559 new->d_flags
|= DCACHE_PAR_LOOKUP
;
2561 hlist_bl_add_head_rcu(&new->d_u
.d_in_lookup_hash
, b
);
2565 spin_unlock(&dentry
->d_lock
);
2569 EXPORT_SYMBOL(d_alloc_parallel
);
2571 void __d_lookup_done(struct dentry
*dentry
)
2573 struct hlist_bl_head
*b
= in_lookup_hash(dentry
->d_parent
,
2574 dentry
->d_name
.hash
);
2576 dentry
->d_flags
&= ~DCACHE_PAR_LOOKUP
;
2577 __hlist_bl_del(&dentry
->d_u
.d_in_lookup_hash
);
2578 wake_up_all(dentry
->d_wait
);
2579 dentry
->d_wait
= NULL
;
2581 INIT_HLIST_NODE(&dentry
->d_u
.d_alias
);
2582 INIT_LIST_HEAD(&dentry
->d_lru
);
2584 EXPORT_SYMBOL(__d_lookup_done
);
2586 /* inode->i_lock held if inode is non-NULL */
2588 static inline void __d_add(struct dentry
*dentry
, struct inode
*inode
)
2590 struct inode
*dir
= NULL
;
2592 spin_lock(&dentry
->d_lock
);
2593 if (unlikely(d_in_lookup(dentry
))) {
2594 dir
= dentry
->d_parent
->d_inode
;
2595 n
= start_dir_add(dir
);
2596 __d_lookup_done(dentry
);
2599 unsigned add_flags
= d_flags_for_inode(inode
);
2600 hlist_add_head(&dentry
->d_u
.d_alias
, &inode
->i_dentry
);
2601 raw_write_seqcount_begin(&dentry
->d_seq
);
2602 __d_set_inode_and_type(dentry
, inode
, add_flags
);
2603 raw_write_seqcount_end(&dentry
->d_seq
);
2604 fsnotify_update_flags(dentry
);
2608 end_dir_add(dir
, n
);
2609 spin_unlock(&dentry
->d_lock
);
2611 spin_unlock(&inode
->i_lock
);
2615 * d_add - add dentry to hash queues
2616 * @entry: dentry to add
2617 * @inode: The inode to attach to this dentry
2619 * This adds the entry to the hash queues and initializes @inode.
2620 * The entry was actually filled in earlier during d_alloc().
2623 void d_add(struct dentry
*entry
, struct inode
*inode
)
2626 security_d_instantiate(entry
, inode
);
2627 spin_lock(&inode
->i_lock
);
2629 __d_add(entry
, inode
);
2631 EXPORT_SYMBOL(d_add
);
2634 * d_exact_alias - find and hash an exact unhashed alias
2635 * @entry: dentry to add
2636 * @inode: The inode to go with this dentry
2638 * If an unhashed dentry with the same name/parent and desired
2639 * inode already exists, hash and return it. Otherwise, return
2642 * Parent directory should be locked.
2644 struct dentry
*d_exact_alias(struct dentry
*entry
, struct inode
*inode
)
2646 struct dentry
*alias
;
2647 unsigned int hash
= entry
->d_name
.hash
;
2649 spin_lock(&inode
->i_lock
);
2650 hlist_for_each_entry(alias
, &inode
->i_dentry
, d_u
.d_alias
) {
2652 * Don't need alias->d_lock here, because aliases with
2653 * d_parent == entry->d_parent are not subject to name or
2654 * parent changes, because the parent inode i_mutex is held.
2656 if (alias
->d_name
.hash
!= hash
)
2658 if (alias
->d_parent
!= entry
->d_parent
)
2660 if (!d_same_name(alias
, entry
->d_parent
, &entry
->d_name
))
2662 spin_lock(&alias
->d_lock
);
2663 if (!d_unhashed(alias
)) {
2664 spin_unlock(&alias
->d_lock
);
2667 __dget_dlock(alias
);
2669 spin_unlock(&alias
->d_lock
);
2671 spin_unlock(&inode
->i_lock
);
2674 spin_unlock(&inode
->i_lock
);
2677 EXPORT_SYMBOL(d_exact_alias
);
2680 * dentry_update_name_case - update case insensitive dentry with a new name
2681 * @dentry: dentry to be updated
2684 * Update a case insensitive dentry with new case of name.
2686 * dentry must have been returned by d_lookup with name @name. Old and new
2687 * name lengths must match (ie. no d_compare which allows mismatched name
2690 * Parent inode i_mutex must be held over d_lookup and into this call (to
2691 * keep renames and concurrent inserts, and readdir(2) away).
2693 void dentry_update_name_case(struct dentry
*dentry
, const struct qstr
*name
)
2695 BUG_ON(!inode_is_locked(dentry
->d_parent
->d_inode
));
2696 BUG_ON(dentry
->d_name
.len
!= name
->len
); /* d_lookup gives this */
2698 spin_lock(&dentry
->d_lock
);
2699 write_seqcount_begin(&dentry
->d_seq
);
2700 memcpy((unsigned char *)dentry
->d_name
.name
, name
->name
, name
->len
);
2701 write_seqcount_end(&dentry
->d_seq
);
2702 spin_unlock(&dentry
->d_lock
);
2704 EXPORT_SYMBOL(dentry_update_name_case
);
2706 static void swap_names(struct dentry
*dentry
, struct dentry
*target
)
2708 if (unlikely(dname_external(target
))) {
2709 if (unlikely(dname_external(dentry
))) {
2711 * Both external: swap the pointers
2713 swap(target
->d_name
.name
, dentry
->d_name
.name
);
2716 * dentry:internal, target:external. Steal target's
2717 * storage and make target internal.
2719 memcpy(target
->d_iname
, dentry
->d_name
.name
,
2720 dentry
->d_name
.len
+ 1);
2721 dentry
->d_name
.name
= target
->d_name
.name
;
2722 target
->d_name
.name
= target
->d_iname
;
2725 if (unlikely(dname_external(dentry
))) {
2727 * dentry:external, target:internal. Give dentry's
2728 * storage to target and make dentry internal
2730 memcpy(dentry
->d_iname
, target
->d_name
.name
,
2731 target
->d_name
.len
+ 1);
2732 target
->d_name
.name
= dentry
->d_name
.name
;
2733 dentry
->d_name
.name
= dentry
->d_iname
;
2736 * Both are internal.
2739 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN
, sizeof(long)));
2740 for (i
= 0; i
< DNAME_INLINE_LEN
/ sizeof(long); i
++) {
2741 swap(((long *) &dentry
->d_iname
)[i
],
2742 ((long *) &target
->d_iname
)[i
]);
2746 swap(dentry
->d_name
.hash_len
, target
->d_name
.hash_len
);
2749 static void copy_name(struct dentry
*dentry
, struct dentry
*target
)
2751 struct external_name
*old_name
= NULL
;
2752 if (unlikely(dname_external(dentry
)))
2753 old_name
= external_name(dentry
);
2754 if (unlikely(dname_external(target
))) {
2755 atomic_inc(&external_name(target
)->u
.count
);
2756 dentry
->d_name
= target
->d_name
;
2758 memcpy(dentry
->d_iname
, target
->d_name
.name
,
2759 target
->d_name
.len
+ 1);
2760 dentry
->d_name
.name
= dentry
->d_iname
;
2761 dentry
->d_name
.hash_len
= target
->d_name
.hash_len
;
2763 if (old_name
&& likely(atomic_dec_and_test(&old_name
->u
.count
)))
2764 call_rcu(&old_name
->u
.head
, __d_free_external_name
);
2768 * __d_move - move a dentry
2769 * @dentry: entry to move
2770 * @target: new dentry
2771 * @exchange: exchange the two dentries
2773 * Update the dcache to reflect the move of a file name. Negative
2774 * dcache entries should not be moved in this way. Caller must hold
2775 * rename_lock, the i_mutex of the source and target directories,
2776 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2778 static void __d_move(struct dentry
*dentry
, struct dentry
*target
,
2781 struct dentry
*old_parent
, *p
;
2782 struct inode
*dir
= NULL
;
2785 WARN_ON(!dentry
->d_inode
);
2786 if (WARN_ON(dentry
== target
))
2789 BUG_ON(d_ancestor(target
, dentry
));
2790 old_parent
= dentry
->d_parent
;
2791 p
= d_ancestor(old_parent
, target
);
2792 if (IS_ROOT(dentry
)) {
2794 spin_lock(&target
->d_parent
->d_lock
);
2796 /* target is not a descendent of dentry->d_parent */
2797 spin_lock(&target
->d_parent
->d_lock
);
2798 spin_lock_nested(&old_parent
->d_lock
, DENTRY_D_LOCK_NESTED
);
2800 BUG_ON(p
== dentry
);
2801 spin_lock(&old_parent
->d_lock
);
2803 spin_lock_nested(&target
->d_parent
->d_lock
,
2804 DENTRY_D_LOCK_NESTED
);
2806 spin_lock_nested(&dentry
->d_lock
, 2);
2807 spin_lock_nested(&target
->d_lock
, 3);
2809 if (unlikely(d_in_lookup(target
))) {
2810 dir
= target
->d_parent
->d_inode
;
2811 n
= start_dir_add(dir
);
2812 __d_lookup_done(target
);
2815 write_seqcount_begin(&dentry
->d_seq
);
2816 write_seqcount_begin_nested(&target
->d_seq
, DENTRY_D_LOCK_NESTED
);
2819 if (!d_unhashed(dentry
))
2821 if (!d_unhashed(target
))
2824 /* ... and switch them in the tree */
2825 dentry
->d_parent
= target
->d_parent
;
2827 copy_name(dentry
, target
);
2828 target
->d_hash
.pprev
= NULL
;
2829 dentry
->d_parent
->d_lockref
.count
++;
2830 if (dentry
== old_parent
)
2831 dentry
->d_flags
|= DCACHE_RCUACCESS
;
2833 WARN_ON(!--old_parent
->d_lockref
.count
);
2835 target
->d_parent
= old_parent
;
2836 swap_names(dentry
, target
);
2837 list_move(&target
->d_child
, &target
->d_parent
->d_subdirs
);
2839 fsnotify_update_flags(target
);
2841 list_move(&dentry
->d_child
, &dentry
->d_parent
->d_subdirs
);
2843 fsnotify_update_flags(dentry
);
2845 write_seqcount_end(&target
->d_seq
);
2846 write_seqcount_end(&dentry
->d_seq
);
2849 end_dir_add(dir
, n
);
2851 if (dentry
->d_parent
!= old_parent
)
2852 spin_unlock(&dentry
->d_parent
->d_lock
);
2853 if (dentry
!= old_parent
)
2854 spin_unlock(&old_parent
->d_lock
);
2855 spin_unlock(&target
->d_lock
);
2856 spin_unlock(&dentry
->d_lock
);
2860 * d_move - move a dentry
2861 * @dentry: entry to move
2862 * @target: new dentry
2864 * Update the dcache to reflect the move of a file name. Negative
2865 * dcache entries should not be moved in this way. See the locking
2866 * requirements for __d_move.
2868 void d_move(struct dentry
*dentry
, struct dentry
*target
)
2870 write_seqlock(&rename_lock
);
2871 __d_move(dentry
, target
, false);
2872 write_sequnlock(&rename_lock
);
2874 EXPORT_SYMBOL(d_move
);
2877 * d_exchange - exchange two dentries
2878 * @dentry1: first dentry
2879 * @dentry2: second dentry
2881 void d_exchange(struct dentry
*dentry1
, struct dentry
*dentry2
)
2883 write_seqlock(&rename_lock
);
2885 WARN_ON(!dentry1
->d_inode
);
2886 WARN_ON(!dentry2
->d_inode
);
2887 WARN_ON(IS_ROOT(dentry1
));
2888 WARN_ON(IS_ROOT(dentry2
));
2890 __d_move(dentry1
, dentry2
, true);
2892 write_sequnlock(&rename_lock
);
2896 * d_ancestor - search for an ancestor
2897 * @p1: ancestor dentry
2900 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2901 * an ancestor of p2, else NULL.
2903 struct dentry
*d_ancestor(struct dentry
*p1
, struct dentry
*p2
)
2907 for (p
= p2
; !IS_ROOT(p
); p
= p
->d_parent
) {
2908 if (p
->d_parent
== p1
)
2915 * This helper attempts to cope with remotely renamed directories
2917 * It assumes that the caller is already holding
2918 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2920 * Note: If ever the locking in lock_rename() changes, then please
2921 * remember to update this too...
2923 static int __d_unalias(struct inode
*inode
,
2924 struct dentry
*dentry
, struct dentry
*alias
)
2926 struct mutex
*m1
= NULL
;
2927 struct rw_semaphore
*m2
= NULL
;
2930 /* If alias and dentry share a parent, then no extra locks required */
2931 if (alias
->d_parent
== dentry
->d_parent
)
2934 /* See lock_rename() */
2935 if (!mutex_trylock(&dentry
->d_sb
->s_vfs_rename_mutex
))
2937 m1
= &dentry
->d_sb
->s_vfs_rename_mutex
;
2938 if (!inode_trylock_shared(alias
->d_parent
->d_inode
))
2940 m2
= &alias
->d_parent
->d_inode
->i_rwsem
;
2942 __d_move(alias
, dentry
, false);
2953 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2954 * @inode: the inode which may have a disconnected dentry
2955 * @dentry: a negative dentry which we want to point to the inode.
2957 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2958 * place of the given dentry and return it, else simply d_add the inode
2959 * to the dentry and return NULL.
2961 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2962 * we should error out: directories can't have multiple aliases.
2964 * This is needed in the lookup routine of any filesystem that is exportable
2965 * (via knfsd) so that we can build dcache paths to directories effectively.
2967 * If a dentry was found and moved, then it is returned. Otherwise NULL
2968 * is returned. This matches the expected return value of ->lookup.
2970 * Cluster filesystems may call this function with a negative, hashed dentry.
2971 * In that case, we know that the inode will be a regular file, and also this
2972 * will only occur during atomic_open. So we need to check for the dentry
2973 * being already hashed only in the final case.
2975 struct dentry
*d_splice_alias(struct inode
*inode
, struct dentry
*dentry
)
2978 return ERR_CAST(inode
);
2980 BUG_ON(!d_unhashed(dentry
));
2985 security_d_instantiate(dentry
, inode
);
2986 spin_lock(&inode
->i_lock
);
2987 if (S_ISDIR(inode
->i_mode
)) {
2988 struct dentry
*new = __d_find_any_alias(inode
);
2989 if (unlikely(new)) {
2990 /* The reference to new ensures it remains an alias */
2991 spin_unlock(&inode
->i_lock
);
2992 write_seqlock(&rename_lock
);
2993 if (unlikely(d_ancestor(new, dentry
))) {
2994 write_sequnlock(&rename_lock
);
2996 new = ERR_PTR(-ELOOP
);
2997 pr_warn_ratelimited(
2998 "VFS: Lookup of '%s' in %s %s"
2999 " would have caused loop\n",
3000 dentry
->d_name
.name
,
3001 inode
->i_sb
->s_type
->name
,
3003 } else if (!IS_ROOT(new)) {
3004 struct dentry
*old_parent
= dget(new->d_parent
);
3005 int err
= __d_unalias(inode
, dentry
, new);
3006 write_sequnlock(&rename_lock
);
3013 __d_move(new, dentry
, false);
3014 write_sequnlock(&rename_lock
);
3021 __d_add(dentry
, inode
);
3024 EXPORT_SYMBOL(d_splice_alias
);
3027 * Test whether new_dentry is a subdirectory of old_dentry.
3029 * Trivially implemented using the dcache structure
3033 * is_subdir - is new dentry a subdirectory of old_dentry
3034 * @new_dentry: new dentry
3035 * @old_dentry: old dentry
3037 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3038 * Returns false otherwise.
3039 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3042 bool is_subdir(struct dentry
*new_dentry
, struct dentry
*old_dentry
)
3047 if (new_dentry
== old_dentry
)
3051 /* for restarting inner loop in case of seq retry */
3052 seq
= read_seqbegin(&rename_lock
);
3054 * Need rcu_readlock to protect against the d_parent trashing
3058 if (d_ancestor(old_dentry
, new_dentry
))
3063 } while (read_seqretry(&rename_lock
, seq
));
3067 EXPORT_SYMBOL(is_subdir
);
3069 static enum d_walk_ret
d_genocide_kill(void *data
, struct dentry
*dentry
)
3071 struct dentry
*root
= data
;
3072 if (dentry
!= root
) {
3073 if (d_unhashed(dentry
) || !dentry
->d_inode
)
3076 if (!(dentry
->d_flags
& DCACHE_GENOCIDE
)) {
3077 dentry
->d_flags
|= DCACHE_GENOCIDE
;
3078 dentry
->d_lockref
.count
--;
3081 return D_WALK_CONTINUE
;
3084 void d_genocide(struct dentry
*parent
)
3086 d_walk(parent
, parent
, d_genocide_kill
);
3089 EXPORT_SYMBOL(d_genocide
);
3091 void d_tmpfile(struct dentry
*dentry
, struct inode
*inode
)
3093 inode_dec_link_count(inode
);
3094 BUG_ON(dentry
->d_name
.name
!= dentry
->d_iname
||
3095 !hlist_unhashed(&dentry
->d_u
.d_alias
) ||
3096 !d_unlinked(dentry
));
3097 spin_lock(&dentry
->d_parent
->d_lock
);
3098 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
3099 dentry
->d_name
.len
= sprintf(dentry
->d_iname
, "#%llu",
3100 (unsigned long long)inode
->i_ino
);
3101 spin_unlock(&dentry
->d_lock
);
3102 spin_unlock(&dentry
->d_parent
->d_lock
);
3103 d_instantiate(dentry
, inode
);
3105 EXPORT_SYMBOL(d_tmpfile
);
3107 static __initdata
unsigned long dhash_entries
;
3108 static int __init
set_dhash_entries(char *str
)
3112 dhash_entries
= simple_strtoul(str
, &str
, 0);
3115 __setup("dhash_entries=", set_dhash_entries
);
3117 static void __init
dcache_init_early(void)
3119 /* If hashes are distributed across NUMA nodes, defer
3120 * hash allocation until vmalloc space is available.
3126 alloc_large_system_hash("Dentry cache",
3127 sizeof(struct hlist_bl_head
),
3130 HASH_EARLY
| HASH_ZERO
,
3135 d_hash_shift
= 32 - d_hash_shift
;
3138 static void __init
dcache_init(void)
3141 * A constructor could be added for stable state like the lists,
3142 * but it is probably not worth it because of the cache nature
3145 dentry_cache
= KMEM_CACHE_USERCOPY(dentry
,
3146 SLAB_RECLAIM_ACCOUNT
|SLAB_PANIC
|SLAB_MEM_SPREAD
|SLAB_ACCOUNT
,
3149 /* Hash may have been set up in dcache_init_early */
3154 alloc_large_system_hash("Dentry cache",
3155 sizeof(struct hlist_bl_head
),
3163 d_hash_shift
= 32 - d_hash_shift
;
3166 /* SLAB cache for __getname() consumers */
3167 struct kmem_cache
*names_cachep __read_mostly
;
3168 EXPORT_SYMBOL(names_cachep
);
3170 void __init
vfs_caches_init_early(void)
3174 for (i
= 0; i
< ARRAY_SIZE(in_lookup_hashtable
); i
++)
3175 INIT_HLIST_BL_HEAD(&in_lookup_hashtable
[i
]);
3177 dcache_init_early();
3181 void __init
vfs_caches_init(void)
3183 names_cachep
= kmem_cache_create_usercopy("names_cache", PATH_MAX
, 0,
3184 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, 0, PATH_MAX
, NULL
);
3189 files_maxfiles_init();