powerpc/pseries: Decrease message level on EEH initialization
[linux/fpc-iii.git] / fs / dcache.c
blob7a5b51440afa96d8caed1a8f023df3301dbcd3d7
1 /*
2 * fs/dcache.c
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
9 /*
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
40 #include <linux/list_lru.h>
41 #include "internal.h"
42 #include "mount.h"
45 * Usage:
46 * dcache->d_inode->i_lock protects:
47 * - i_dentry, d_alias, d_inode of aliases
48 * dcache_hash_bucket lock protects:
49 * - the dcache hash table
50 * s_anon bl list spinlock protects:
51 * - the s_anon list (see __d_drop)
52 * dentry->d_sb->s_dentry_lru_lock protects:
53 * - the dcache lru lists and counters
54 * d_lock protects:
55 * - d_flags
56 * - d_name
57 * - d_lru
58 * - d_count
59 * - d_unhashed()
60 * - d_parent and d_subdirs
61 * - childrens' d_child and d_parent
62 * - d_alias, d_inode
64 * Ordering:
65 * dentry->d_inode->i_lock
66 * dentry->d_lock
67 * dentry->d_sb->s_dentry_lru_lock
68 * dcache_hash_bucket lock
69 * s_anon lock
71 * If there is an ancestor relationship:
72 * dentry->d_parent->...->d_parent->d_lock
73 * ...
74 * dentry->d_parent->d_lock
75 * dentry->d_lock
77 * If no ancestor relationship:
78 * if (dentry1 < dentry2)
79 * dentry1->d_lock
80 * dentry2->d_lock
82 int sysctl_vfs_cache_pressure __read_mostly = 100;
83 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
87 EXPORT_SYMBOL(rename_lock);
89 static struct kmem_cache *dentry_cache __read_mostly;
92 * This is the single most critical data structure when it comes
93 * to the dcache: the hashtable for lookups. Somebody should try
94 * to make this good - I've just made it work.
96 * This hash-function tries to avoid losing too many bits of hash
97 * information, yet avoid using a prime hash-size or similar.
100 static unsigned int d_hash_mask __read_mostly;
101 static unsigned int d_hash_shift __read_mostly;
103 static struct hlist_bl_head *dentry_hashtable __read_mostly;
105 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
106 unsigned int hash)
108 hash += (unsigned long) parent / L1_CACHE_BYTES;
109 return dentry_hashtable + hash_32(hash, d_hash_shift);
112 /* Statistics gathering. */
113 struct dentry_stat_t dentry_stat = {
114 .age_limit = 45,
117 static DEFINE_PER_CPU(long, nr_dentry);
118 static DEFINE_PER_CPU(long, nr_dentry_unused);
120 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
123 * Here we resort to our own counters instead of using generic per-cpu counters
124 * for consistency with what the vfs inode code does. We are expected to harvest
125 * better code and performance by having our own specialized counters.
127 * Please note that the loop is done over all possible CPUs, not over all online
128 * CPUs. The reason for this is that we don't want to play games with CPUs going
129 * on and off. If one of them goes off, we will just keep their counters.
131 * glommer: See cffbc8a for details, and if you ever intend to change this,
132 * please update all vfs counters to match.
134 static long get_nr_dentry(void)
136 int i;
137 long sum = 0;
138 for_each_possible_cpu(i)
139 sum += per_cpu(nr_dentry, i);
140 return sum < 0 ? 0 : sum;
143 static long get_nr_dentry_unused(void)
145 int i;
146 long sum = 0;
147 for_each_possible_cpu(i)
148 sum += per_cpu(nr_dentry_unused, i);
149 return sum < 0 ? 0 : sum;
152 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
153 size_t *lenp, loff_t *ppos)
155 dentry_stat.nr_dentry = get_nr_dentry();
156 dentry_stat.nr_unused = get_nr_dentry_unused();
157 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
159 #endif
162 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
163 * The strings are both count bytes long, and count is non-zero.
165 #ifdef CONFIG_DCACHE_WORD_ACCESS
167 #include <asm/word-at-a-time.h>
169 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
170 * aligned allocation for this particular component. We don't
171 * strictly need the load_unaligned_zeropad() safety, but it
172 * doesn't hurt either.
174 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
175 * need the careful unaligned handling.
177 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
179 unsigned long a,b,mask;
181 for (;;) {
182 a = *(unsigned long *)cs;
183 b = load_unaligned_zeropad(ct);
184 if (tcount < sizeof(unsigned long))
185 break;
186 if (unlikely(a != b))
187 return 1;
188 cs += sizeof(unsigned long);
189 ct += sizeof(unsigned long);
190 tcount -= sizeof(unsigned long);
191 if (!tcount)
192 return 0;
194 mask = bytemask_from_count(tcount);
195 return unlikely(!!((a ^ b) & mask));
198 #else
200 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
202 do {
203 if (*cs != *ct)
204 return 1;
205 cs++;
206 ct++;
207 tcount--;
208 } while (tcount);
209 return 0;
212 #endif
214 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
216 const unsigned char *cs;
218 * Be careful about RCU walk racing with rename:
219 * use ACCESS_ONCE to fetch the name pointer.
221 * NOTE! Even if a rename will mean that the length
222 * was not loaded atomically, we don't care. The
223 * RCU walk will check the sequence count eventually,
224 * and catch it. And we won't overrun the buffer,
225 * because we're reading the name pointer atomically,
226 * and a dentry name is guaranteed to be properly
227 * terminated with a NUL byte.
229 * End result: even if 'len' is wrong, we'll exit
230 * early because the data cannot match (there can
231 * be no NUL in the ct/tcount data)
233 cs = ACCESS_ONCE(dentry->d_name.name);
234 smp_read_barrier_depends();
235 return dentry_string_cmp(cs, ct, tcount);
238 static void __d_free(struct rcu_head *head)
240 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
242 WARN_ON(!hlist_unhashed(&dentry->d_alias));
243 if (dname_external(dentry))
244 kfree(dentry->d_name.name);
245 kmem_cache_free(dentry_cache, dentry);
248 static void dentry_free(struct dentry *dentry)
250 /* if dentry was never visible to RCU, immediate free is OK */
251 if (!(dentry->d_flags & DCACHE_RCUACCESS))
252 __d_free(&dentry->d_u.d_rcu);
253 else
254 call_rcu(&dentry->d_u.d_rcu, __d_free);
258 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
259 * @dentry: the target dentry
260 * After this call, in-progress rcu-walk path lookup will fail. This
261 * should be called after unhashing, and after changing d_inode (if
262 * the dentry has not already been unhashed).
264 static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
266 assert_spin_locked(&dentry->d_lock);
267 /* Go through a barrier */
268 write_seqcount_barrier(&dentry->d_seq);
272 * Release the dentry's inode, using the filesystem
273 * d_iput() operation if defined. Dentry has no refcount
274 * and is unhashed.
276 static void dentry_iput(struct dentry * dentry)
277 __releases(dentry->d_lock)
278 __releases(dentry->d_inode->i_lock)
280 struct inode *inode = dentry->d_inode;
281 if (inode) {
282 dentry->d_inode = NULL;
283 hlist_del_init(&dentry->d_alias);
284 spin_unlock(&dentry->d_lock);
285 spin_unlock(&inode->i_lock);
286 if (!inode->i_nlink)
287 fsnotify_inoderemove(inode);
288 if (dentry->d_op && dentry->d_op->d_iput)
289 dentry->d_op->d_iput(dentry, inode);
290 else
291 iput(inode);
292 } else {
293 spin_unlock(&dentry->d_lock);
298 * Release the dentry's inode, using the filesystem
299 * d_iput() operation if defined. dentry remains in-use.
301 static void dentry_unlink_inode(struct dentry * dentry)
302 __releases(dentry->d_lock)
303 __releases(dentry->d_inode->i_lock)
305 struct inode *inode = dentry->d_inode;
306 __d_clear_type(dentry);
307 dentry->d_inode = NULL;
308 hlist_del_init(&dentry->d_alias);
309 dentry_rcuwalk_barrier(dentry);
310 spin_unlock(&dentry->d_lock);
311 spin_unlock(&inode->i_lock);
312 if (!inode->i_nlink)
313 fsnotify_inoderemove(inode);
314 if (dentry->d_op && dentry->d_op->d_iput)
315 dentry->d_op->d_iput(dentry, inode);
316 else
317 iput(inode);
321 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
322 * is in use - which includes both the "real" per-superblock
323 * LRU list _and_ the DCACHE_SHRINK_LIST use.
325 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
326 * on the shrink list (ie not on the superblock LRU list).
328 * The per-cpu "nr_dentry_unused" counters are updated with
329 * the DCACHE_LRU_LIST bit.
331 * These helper functions make sure we always follow the
332 * rules. d_lock must be held by the caller.
334 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
335 static void d_lru_add(struct dentry *dentry)
337 D_FLAG_VERIFY(dentry, 0);
338 dentry->d_flags |= DCACHE_LRU_LIST;
339 this_cpu_inc(nr_dentry_unused);
340 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
343 static void d_lru_del(struct dentry *dentry)
345 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
346 dentry->d_flags &= ~DCACHE_LRU_LIST;
347 this_cpu_dec(nr_dentry_unused);
348 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
351 static void d_shrink_del(struct dentry *dentry)
353 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
354 list_del_init(&dentry->d_lru);
355 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
356 this_cpu_dec(nr_dentry_unused);
359 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
361 D_FLAG_VERIFY(dentry, 0);
362 list_add(&dentry->d_lru, list);
363 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
364 this_cpu_inc(nr_dentry_unused);
368 * These can only be called under the global LRU lock, ie during the
369 * callback for freeing the LRU list. "isolate" removes it from the
370 * LRU lists entirely, while shrink_move moves it to the indicated
371 * private list.
373 static void d_lru_isolate(struct dentry *dentry)
375 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
376 dentry->d_flags &= ~DCACHE_LRU_LIST;
377 this_cpu_dec(nr_dentry_unused);
378 list_del_init(&dentry->d_lru);
381 static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list)
383 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
384 dentry->d_flags |= DCACHE_SHRINK_LIST;
385 list_move_tail(&dentry->d_lru, list);
389 * dentry_lru_(add|del)_list) must be called with d_lock held.
391 static void dentry_lru_add(struct dentry *dentry)
393 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
394 d_lru_add(dentry);
398 * d_drop - drop a dentry
399 * @dentry: dentry to drop
401 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
402 * be found through a VFS lookup any more. Note that this is different from
403 * deleting the dentry - d_delete will try to mark the dentry negative if
404 * possible, giving a successful _negative_ lookup, while d_drop will
405 * just make the cache lookup fail.
407 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
408 * reason (NFS timeouts or autofs deletes).
410 * __d_drop requires dentry->d_lock.
412 void __d_drop(struct dentry *dentry)
414 if (!d_unhashed(dentry)) {
415 struct hlist_bl_head *b;
417 * Hashed dentries are normally on the dentry hashtable,
418 * with the exception of those newly allocated by
419 * d_obtain_alias, which are always IS_ROOT:
421 if (unlikely(IS_ROOT(dentry)))
422 b = &dentry->d_sb->s_anon;
423 else
424 b = d_hash(dentry->d_parent, dentry->d_name.hash);
426 hlist_bl_lock(b);
427 __hlist_bl_del(&dentry->d_hash);
428 dentry->d_hash.pprev = NULL;
429 hlist_bl_unlock(b);
430 dentry_rcuwalk_barrier(dentry);
433 EXPORT_SYMBOL(__d_drop);
435 void d_drop(struct dentry *dentry)
437 spin_lock(&dentry->d_lock);
438 __d_drop(dentry);
439 spin_unlock(&dentry->d_lock);
441 EXPORT_SYMBOL(d_drop);
443 static void __dentry_kill(struct dentry *dentry)
445 struct dentry *parent = NULL;
446 bool can_free = true;
447 if (!IS_ROOT(dentry))
448 parent = dentry->d_parent;
451 * The dentry is now unrecoverably dead to the world.
453 lockref_mark_dead(&dentry->d_lockref);
456 * inform the fs via d_prune that this dentry is about to be
457 * unhashed and destroyed.
459 if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
460 dentry->d_op->d_prune(dentry);
462 if (dentry->d_flags & DCACHE_LRU_LIST) {
463 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
464 d_lru_del(dentry);
466 /* if it was on the hash then remove it */
467 __d_drop(dentry);
468 list_del(&dentry->d_u.d_child);
470 * Inform d_walk() that we are no longer attached to the
471 * dentry tree
473 dentry->d_flags |= DCACHE_DENTRY_KILLED;
474 if (parent)
475 spin_unlock(&parent->d_lock);
476 dentry_iput(dentry);
478 * dentry_iput drops the locks, at which point nobody (except
479 * transient RCU lookups) can reach this dentry.
481 BUG_ON((int)dentry->d_lockref.count > 0);
482 this_cpu_dec(nr_dentry);
483 if (dentry->d_op && dentry->d_op->d_release)
484 dentry->d_op->d_release(dentry);
486 spin_lock(&dentry->d_lock);
487 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
488 dentry->d_flags |= DCACHE_MAY_FREE;
489 can_free = false;
491 spin_unlock(&dentry->d_lock);
492 if (likely(can_free))
493 dentry_free(dentry);
497 * Finish off a dentry we've decided to kill.
498 * dentry->d_lock must be held, returns with it unlocked.
499 * If ref is non-zero, then decrement the refcount too.
500 * Returns dentry requiring refcount drop, or NULL if we're done.
502 static struct dentry *dentry_kill(struct dentry *dentry)
503 __releases(dentry->d_lock)
505 struct inode *inode = dentry->d_inode;
506 struct dentry *parent = NULL;
508 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
509 goto failed;
511 if (!IS_ROOT(dentry)) {
512 parent = dentry->d_parent;
513 if (unlikely(!spin_trylock(&parent->d_lock))) {
514 if (inode)
515 spin_unlock(&inode->i_lock);
516 goto failed;
520 __dentry_kill(dentry);
521 return parent;
523 failed:
524 spin_unlock(&dentry->d_lock);
525 cpu_relax();
526 return dentry; /* try again with same dentry */
529 static inline struct dentry *lock_parent(struct dentry *dentry)
531 struct dentry *parent = dentry->d_parent;
532 if (IS_ROOT(dentry))
533 return NULL;
534 if (unlikely((int)dentry->d_lockref.count < 0))
535 return NULL;
536 if (likely(spin_trylock(&parent->d_lock)))
537 return parent;
538 rcu_read_lock();
539 spin_unlock(&dentry->d_lock);
540 again:
541 parent = ACCESS_ONCE(dentry->d_parent);
542 spin_lock(&parent->d_lock);
544 * We can't blindly lock dentry until we are sure
545 * that we won't violate the locking order.
546 * Any changes of dentry->d_parent must have
547 * been done with parent->d_lock held, so
548 * spin_lock() above is enough of a barrier
549 * for checking if it's still our child.
551 if (unlikely(parent != dentry->d_parent)) {
552 spin_unlock(&parent->d_lock);
553 goto again;
555 rcu_read_unlock();
556 if (parent != dentry)
557 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
558 else
559 parent = NULL;
560 return parent;
564 * This is dput
566 * This is complicated by the fact that we do not want to put
567 * dentries that are no longer on any hash chain on the unused
568 * list: we'd much rather just get rid of them immediately.
570 * However, that implies that we have to traverse the dentry
571 * tree upwards to the parents which might _also_ now be
572 * scheduled for deletion (it may have been only waiting for
573 * its last child to go away).
575 * This tail recursion is done by hand as we don't want to depend
576 * on the compiler to always get this right (gcc generally doesn't).
577 * Real recursion would eat up our stack space.
581 * dput - release a dentry
582 * @dentry: dentry to release
584 * Release a dentry. This will drop the usage count and if appropriate
585 * call the dentry unlink method as well as removing it from the queues and
586 * releasing its resources. If the parent dentries were scheduled for release
587 * they too may now get deleted.
589 void dput(struct dentry *dentry)
591 if (unlikely(!dentry))
592 return;
594 repeat:
595 if (lockref_put_or_lock(&dentry->d_lockref))
596 return;
598 /* Unreachable? Get rid of it */
599 if (unlikely(d_unhashed(dentry)))
600 goto kill_it;
602 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
603 if (dentry->d_op->d_delete(dentry))
604 goto kill_it;
607 if (!(dentry->d_flags & DCACHE_REFERENCED))
608 dentry->d_flags |= DCACHE_REFERENCED;
609 dentry_lru_add(dentry);
611 dentry->d_lockref.count--;
612 spin_unlock(&dentry->d_lock);
613 return;
615 kill_it:
616 dentry = dentry_kill(dentry);
617 if (dentry)
618 goto repeat;
620 EXPORT_SYMBOL(dput);
623 * d_invalidate - invalidate a dentry
624 * @dentry: dentry to invalidate
626 * Try to invalidate the dentry if it turns out to be
627 * possible. If there are other dentries that can be
628 * reached through this one we can't delete it and we
629 * return -EBUSY. On success we return 0.
631 * no dcache lock.
634 int d_invalidate(struct dentry * dentry)
637 * If it's already been dropped, return OK.
639 spin_lock(&dentry->d_lock);
640 if (d_unhashed(dentry)) {
641 spin_unlock(&dentry->d_lock);
642 return 0;
645 * Check whether to do a partial shrink_dcache
646 * to get rid of unused child entries.
648 if (!list_empty(&dentry->d_subdirs)) {
649 spin_unlock(&dentry->d_lock);
650 shrink_dcache_parent(dentry);
651 spin_lock(&dentry->d_lock);
655 * Somebody else still using it?
657 * If it's a directory, we can't drop it
658 * for fear of somebody re-populating it
659 * with children (even though dropping it
660 * would make it unreachable from the root,
661 * we might still populate it if it was a
662 * working directory or similar).
663 * We also need to leave mountpoints alone,
664 * directory or not.
666 if (dentry->d_lockref.count > 1 && dentry->d_inode) {
667 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
668 spin_unlock(&dentry->d_lock);
669 return -EBUSY;
673 __d_drop(dentry);
674 spin_unlock(&dentry->d_lock);
675 return 0;
677 EXPORT_SYMBOL(d_invalidate);
679 /* This must be called with d_lock held */
680 static inline void __dget_dlock(struct dentry *dentry)
682 dentry->d_lockref.count++;
685 static inline void __dget(struct dentry *dentry)
687 lockref_get(&dentry->d_lockref);
690 struct dentry *dget_parent(struct dentry *dentry)
692 int gotref;
693 struct dentry *ret;
696 * Do optimistic parent lookup without any
697 * locking.
699 rcu_read_lock();
700 ret = ACCESS_ONCE(dentry->d_parent);
701 gotref = lockref_get_not_zero(&ret->d_lockref);
702 rcu_read_unlock();
703 if (likely(gotref)) {
704 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
705 return ret;
706 dput(ret);
709 repeat:
711 * Don't need rcu_dereference because we re-check it was correct under
712 * the lock.
714 rcu_read_lock();
715 ret = dentry->d_parent;
716 spin_lock(&ret->d_lock);
717 if (unlikely(ret != dentry->d_parent)) {
718 spin_unlock(&ret->d_lock);
719 rcu_read_unlock();
720 goto repeat;
722 rcu_read_unlock();
723 BUG_ON(!ret->d_lockref.count);
724 ret->d_lockref.count++;
725 spin_unlock(&ret->d_lock);
726 return ret;
728 EXPORT_SYMBOL(dget_parent);
731 * d_find_alias - grab a hashed alias of inode
732 * @inode: inode in question
734 * If inode has a hashed alias, or is a directory and has any alias,
735 * acquire the reference to alias and return it. Otherwise return NULL.
736 * Notice that if inode is a directory there can be only one alias and
737 * it can be unhashed only if it has no children, or if it is the root
738 * of a filesystem.
740 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
741 * any other hashed alias over that one.
743 static struct dentry *__d_find_alias(struct inode *inode)
745 struct dentry *alias, *discon_alias;
747 again:
748 discon_alias = NULL;
749 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
750 spin_lock(&alias->d_lock);
751 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
752 if (IS_ROOT(alias) &&
753 (alias->d_flags & DCACHE_DISCONNECTED)) {
754 discon_alias = alias;
755 } else {
756 __dget_dlock(alias);
757 spin_unlock(&alias->d_lock);
758 return alias;
761 spin_unlock(&alias->d_lock);
763 if (discon_alias) {
764 alias = discon_alias;
765 spin_lock(&alias->d_lock);
766 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
767 __dget_dlock(alias);
768 spin_unlock(&alias->d_lock);
769 return alias;
771 spin_unlock(&alias->d_lock);
772 goto again;
774 return NULL;
777 struct dentry *d_find_alias(struct inode *inode)
779 struct dentry *de = NULL;
781 if (!hlist_empty(&inode->i_dentry)) {
782 spin_lock(&inode->i_lock);
783 de = __d_find_alias(inode);
784 spin_unlock(&inode->i_lock);
786 return de;
788 EXPORT_SYMBOL(d_find_alias);
791 * Try to kill dentries associated with this inode.
792 * WARNING: you must own a reference to inode.
794 void d_prune_aliases(struct inode *inode)
796 struct dentry *dentry;
797 restart:
798 spin_lock(&inode->i_lock);
799 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
800 spin_lock(&dentry->d_lock);
801 if (!dentry->d_lockref.count) {
803 * inform the fs via d_prune that this dentry
804 * is about to be unhashed and destroyed.
806 if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
807 !d_unhashed(dentry))
808 dentry->d_op->d_prune(dentry);
810 __dget_dlock(dentry);
811 __d_drop(dentry);
812 spin_unlock(&dentry->d_lock);
813 spin_unlock(&inode->i_lock);
814 dput(dentry);
815 goto restart;
817 spin_unlock(&dentry->d_lock);
819 spin_unlock(&inode->i_lock);
821 EXPORT_SYMBOL(d_prune_aliases);
823 static void shrink_dentry_list(struct list_head *list)
825 struct dentry *dentry, *parent;
827 while (!list_empty(list)) {
828 struct inode *inode;
829 dentry = list_entry(list->prev, struct dentry, d_lru);
830 spin_lock(&dentry->d_lock);
831 parent = lock_parent(dentry);
834 * The dispose list is isolated and dentries are not accounted
835 * to the LRU here, so we can simply remove it from the list
836 * here regardless of whether it is referenced or not.
838 d_shrink_del(dentry);
841 * We found an inuse dentry which was not removed from
842 * the LRU because of laziness during lookup. Do not free it.
844 if ((int)dentry->d_lockref.count > 0) {
845 spin_unlock(&dentry->d_lock);
846 if (parent)
847 spin_unlock(&parent->d_lock);
848 continue;
852 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
853 bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
854 spin_unlock(&dentry->d_lock);
855 if (parent)
856 spin_unlock(&parent->d_lock);
857 if (can_free)
858 dentry_free(dentry);
859 continue;
862 inode = dentry->d_inode;
863 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
864 d_shrink_add(dentry, list);
865 spin_unlock(&dentry->d_lock);
866 if (parent)
867 spin_unlock(&parent->d_lock);
868 continue;
871 __dentry_kill(dentry);
874 * We need to prune ancestors too. This is necessary to prevent
875 * quadratic behavior of shrink_dcache_parent(), but is also
876 * expected to be beneficial in reducing dentry cache
877 * fragmentation.
879 dentry = parent;
880 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
881 parent = lock_parent(dentry);
882 if (dentry->d_lockref.count != 1) {
883 dentry->d_lockref.count--;
884 spin_unlock(&dentry->d_lock);
885 if (parent)
886 spin_unlock(&parent->d_lock);
887 break;
889 inode = dentry->d_inode; /* can't be NULL */
890 if (unlikely(!spin_trylock(&inode->i_lock))) {
891 spin_unlock(&dentry->d_lock);
892 if (parent)
893 spin_unlock(&parent->d_lock);
894 cpu_relax();
895 continue;
897 __dentry_kill(dentry);
898 dentry = parent;
903 static enum lru_status
904 dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
906 struct list_head *freeable = arg;
907 struct dentry *dentry = container_of(item, struct dentry, d_lru);
911 * we are inverting the lru lock/dentry->d_lock here,
912 * so use a trylock. If we fail to get the lock, just skip
913 * it
915 if (!spin_trylock(&dentry->d_lock))
916 return LRU_SKIP;
919 * Referenced dentries are still in use. If they have active
920 * counts, just remove them from the LRU. Otherwise give them
921 * another pass through the LRU.
923 if (dentry->d_lockref.count) {
924 d_lru_isolate(dentry);
925 spin_unlock(&dentry->d_lock);
926 return LRU_REMOVED;
929 if (dentry->d_flags & DCACHE_REFERENCED) {
930 dentry->d_flags &= ~DCACHE_REFERENCED;
931 spin_unlock(&dentry->d_lock);
934 * The list move itself will be made by the common LRU code. At
935 * this point, we've dropped the dentry->d_lock but keep the
936 * lru lock. This is safe to do, since every list movement is
937 * protected by the lru lock even if both locks are held.
939 * This is guaranteed by the fact that all LRU management
940 * functions are intermediated by the LRU API calls like
941 * list_lru_add and list_lru_del. List movement in this file
942 * only ever occur through this functions or through callbacks
943 * like this one, that are called from the LRU API.
945 * The only exceptions to this are functions like
946 * shrink_dentry_list, and code that first checks for the
947 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
948 * operating only with stack provided lists after they are
949 * properly isolated from the main list. It is thus, always a
950 * local access.
952 return LRU_ROTATE;
955 d_lru_shrink_move(dentry, freeable);
956 spin_unlock(&dentry->d_lock);
958 return LRU_REMOVED;
962 * prune_dcache_sb - shrink the dcache
963 * @sb: superblock
964 * @nr_to_scan : number of entries to try to free
965 * @nid: which node to scan for freeable entities
967 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
968 * done when we need more memory an called from the superblock shrinker
969 * function.
971 * This function may fail to free any resources if all the dentries are in
972 * use.
974 long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
975 int nid)
977 LIST_HEAD(dispose);
978 long freed;
980 freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate,
981 &dispose, &nr_to_scan);
982 shrink_dentry_list(&dispose);
983 return freed;
986 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
987 spinlock_t *lru_lock, void *arg)
989 struct list_head *freeable = arg;
990 struct dentry *dentry = container_of(item, struct dentry, d_lru);
993 * we are inverting the lru lock/dentry->d_lock here,
994 * so use a trylock. If we fail to get the lock, just skip
995 * it
997 if (!spin_trylock(&dentry->d_lock))
998 return LRU_SKIP;
1000 d_lru_shrink_move(dentry, freeable);
1001 spin_unlock(&dentry->d_lock);
1003 return LRU_REMOVED;
1008 * shrink_dcache_sb - shrink dcache for a superblock
1009 * @sb: superblock
1011 * Shrink the dcache for the specified super block. This is used to free
1012 * the dcache before unmounting a file system.
1014 void shrink_dcache_sb(struct super_block *sb)
1016 long freed;
1018 do {
1019 LIST_HEAD(dispose);
1021 freed = list_lru_walk(&sb->s_dentry_lru,
1022 dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1024 this_cpu_sub(nr_dentry_unused, freed);
1025 shrink_dentry_list(&dispose);
1026 } while (freed > 0);
1028 EXPORT_SYMBOL(shrink_dcache_sb);
1031 * enum d_walk_ret - action to talke during tree walk
1032 * @D_WALK_CONTINUE: contrinue walk
1033 * @D_WALK_QUIT: quit walk
1034 * @D_WALK_NORETRY: quit when retry is needed
1035 * @D_WALK_SKIP: skip this dentry and its children
1037 enum d_walk_ret {
1038 D_WALK_CONTINUE,
1039 D_WALK_QUIT,
1040 D_WALK_NORETRY,
1041 D_WALK_SKIP,
1045 * d_walk - walk the dentry tree
1046 * @parent: start of walk
1047 * @data: data passed to @enter() and @finish()
1048 * @enter: callback when first entering the dentry
1049 * @finish: callback when successfully finished the walk
1051 * The @enter() and @finish() callbacks are called with d_lock held.
1053 static void d_walk(struct dentry *parent, void *data,
1054 enum d_walk_ret (*enter)(void *, struct dentry *),
1055 void (*finish)(void *))
1057 struct dentry *this_parent;
1058 struct list_head *next;
1059 unsigned seq = 0;
1060 enum d_walk_ret ret;
1061 bool retry = true;
1063 again:
1064 read_seqbegin_or_lock(&rename_lock, &seq);
1065 this_parent = parent;
1066 spin_lock(&this_parent->d_lock);
1068 ret = enter(data, this_parent);
1069 switch (ret) {
1070 case D_WALK_CONTINUE:
1071 break;
1072 case D_WALK_QUIT:
1073 case D_WALK_SKIP:
1074 goto out_unlock;
1075 case D_WALK_NORETRY:
1076 retry = false;
1077 break;
1079 repeat:
1080 next = this_parent->d_subdirs.next;
1081 resume:
1082 while (next != &this_parent->d_subdirs) {
1083 struct list_head *tmp = next;
1084 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1085 next = tmp->next;
1087 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1089 ret = enter(data, dentry);
1090 switch (ret) {
1091 case D_WALK_CONTINUE:
1092 break;
1093 case D_WALK_QUIT:
1094 spin_unlock(&dentry->d_lock);
1095 goto out_unlock;
1096 case D_WALK_NORETRY:
1097 retry = false;
1098 break;
1099 case D_WALK_SKIP:
1100 spin_unlock(&dentry->d_lock);
1101 continue;
1104 if (!list_empty(&dentry->d_subdirs)) {
1105 spin_unlock(&this_parent->d_lock);
1106 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1107 this_parent = dentry;
1108 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1109 goto repeat;
1111 spin_unlock(&dentry->d_lock);
1114 * All done at this level ... ascend and resume the search.
1116 if (this_parent != parent) {
1117 struct dentry *child = this_parent;
1118 this_parent = child->d_parent;
1120 rcu_read_lock();
1121 spin_unlock(&child->d_lock);
1122 spin_lock(&this_parent->d_lock);
1125 * might go back up the wrong parent if we have had a rename
1126 * or deletion
1128 if (this_parent != child->d_parent ||
1129 (child->d_flags & DCACHE_DENTRY_KILLED) ||
1130 need_seqretry(&rename_lock, seq)) {
1131 spin_unlock(&this_parent->d_lock);
1132 rcu_read_unlock();
1133 goto rename_retry;
1135 rcu_read_unlock();
1136 next = child->d_u.d_child.next;
1137 goto resume;
1139 if (need_seqretry(&rename_lock, seq)) {
1140 spin_unlock(&this_parent->d_lock);
1141 goto rename_retry;
1143 if (finish)
1144 finish(data);
1146 out_unlock:
1147 spin_unlock(&this_parent->d_lock);
1148 done_seqretry(&rename_lock, seq);
1149 return;
1151 rename_retry:
1152 if (!retry)
1153 return;
1154 seq = 1;
1155 goto again;
1159 * Search for at least 1 mount point in the dentry's subdirs.
1160 * We descend to the next level whenever the d_subdirs
1161 * list is non-empty and continue searching.
1164 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1166 int *ret = data;
1167 if (d_mountpoint(dentry)) {
1168 *ret = 1;
1169 return D_WALK_QUIT;
1171 return D_WALK_CONTINUE;
1175 * have_submounts - check for mounts over a dentry
1176 * @parent: dentry to check.
1178 * Return true if the parent or its subdirectories contain
1179 * a mount point
1181 int have_submounts(struct dentry *parent)
1183 int ret = 0;
1185 d_walk(parent, &ret, check_mount, NULL);
1187 return ret;
1189 EXPORT_SYMBOL(have_submounts);
1192 * Called by mount code to set a mountpoint and check if the mountpoint is
1193 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1194 * subtree can become unreachable).
1196 * Only one of check_submounts_and_drop() and d_set_mounted() must succeed. For
1197 * this reason take rename_lock and d_lock on dentry and ancestors.
1199 int d_set_mounted(struct dentry *dentry)
1201 struct dentry *p;
1202 int ret = -ENOENT;
1203 write_seqlock(&rename_lock);
1204 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1205 /* Need exclusion wrt. check_submounts_and_drop() */
1206 spin_lock(&p->d_lock);
1207 if (unlikely(d_unhashed(p))) {
1208 spin_unlock(&p->d_lock);
1209 goto out;
1211 spin_unlock(&p->d_lock);
1213 spin_lock(&dentry->d_lock);
1214 if (!d_unlinked(dentry)) {
1215 dentry->d_flags |= DCACHE_MOUNTED;
1216 ret = 0;
1218 spin_unlock(&dentry->d_lock);
1219 out:
1220 write_sequnlock(&rename_lock);
1221 return ret;
1225 * Search the dentry child list of the specified parent,
1226 * and move any unused dentries to the end of the unused
1227 * list for prune_dcache(). We descend to the next level
1228 * whenever the d_subdirs list is non-empty and continue
1229 * searching.
1231 * It returns zero iff there are no unused children,
1232 * otherwise it returns the number of children moved to
1233 * the end of the unused list. This may not be the total
1234 * number of unused children, because select_parent can
1235 * drop the lock and return early due to latency
1236 * constraints.
1239 struct select_data {
1240 struct dentry *start;
1241 struct list_head dispose;
1242 int found;
1245 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1247 struct select_data *data = _data;
1248 enum d_walk_ret ret = D_WALK_CONTINUE;
1250 if (data->start == dentry)
1251 goto out;
1253 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1254 data->found++;
1255 } else {
1256 if (dentry->d_flags & DCACHE_LRU_LIST)
1257 d_lru_del(dentry);
1258 if (!dentry->d_lockref.count) {
1259 d_shrink_add(dentry, &data->dispose);
1260 data->found++;
1264 * We can return to the caller if we have found some (this
1265 * ensures forward progress). We'll be coming back to find
1266 * the rest.
1268 if (!list_empty(&data->dispose))
1269 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1270 out:
1271 return ret;
1275 * shrink_dcache_parent - prune dcache
1276 * @parent: parent of entries to prune
1278 * Prune the dcache to remove unused children of the parent dentry.
1280 void shrink_dcache_parent(struct dentry *parent)
1282 for (;;) {
1283 struct select_data data;
1285 INIT_LIST_HEAD(&data.dispose);
1286 data.start = parent;
1287 data.found = 0;
1289 d_walk(parent, &data, select_collect, NULL);
1290 if (!data.found)
1291 break;
1293 shrink_dentry_list(&data.dispose);
1294 cond_resched();
1297 EXPORT_SYMBOL(shrink_dcache_parent);
1299 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1301 /* it has busy descendents; complain about those instead */
1302 if (!list_empty(&dentry->d_subdirs))
1303 return D_WALK_CONTINUE;
1305 /* root with refcount 1 is fine */
1306 if (dentry == _data && dentry->d_lockref.count == 1)
1307 return D_WALK_CONTINUE;
1309 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1310 " still in use (%d) [unmount of %s %s]\n",
1311 dentry,
1312 dentry->d_inode ?
1313 dentry->d_inode->i_ino : 0UL,
1314 dentry,
1315 dentry->d_lockref.count,
1316 dentry->d_sb->s_type->name,
1317 dentry->d_sb->s_id);
1318 WARN_ON(1);
1319 return D_WALK_CONTINUE;
1322 static void do_one_tree(struct dentry *dentry)
1324 shrink_dcache_parent(dentry);
1325 d_walk(dentry, dentry, umount_check, NULL);
1326 d_drop(dentry);
1327 dput(dentry);
1331 * destroy the dentries attached to a superblock on unmounting
1333 void shrink_dcache_for_umount(struct super_block *sb)
1335 struct dentry *dentry;
1337 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1339 dentry = sb->s_root;
1340 sb->s_root = NULL;
1341 do_one_tree(dentry);
1343 while (!hlist_bl_empty(&sb->s_anon)) {
1344 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1345 do_one_tree(dentry);
1349 static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry)
1351 struct select_data *data = _data;
1353 if (d_mountpoint(dentry)) {
1354 data->found = -EBUSY;
1355 return D_WALK_QUIT;
1358 return select_collect(_data, dentry);
1361 static void check_and_drop(void *_data)
1363 struct select_data *data = _data;
1365 if (d_mountpoint(data->start))
1366 data->found = -EBUSY;
1367 if (!data->found)
1368 __d_drop(data->start);
1372 * check_submounts_and_drop - prune dcache, check for submounts and drop
1374 * All done as a single atomic operation relative to has_unlinked_ancestor().
1375 * Returns 0 if successfully unhashed @parent. If there were submounts then
1376 * return -EBUSY.
1378 * @dentry: dentry to prune and drop
1380 int check_submounts_and_drop(struct dentry *dentry)
1382 int ret = 0;
1384 /* Negative dentries can be dropped without further checks */
1385 if (!dentry->d_inode) {
1386 d_drop(dentry);
1387 goto out;
1390 for (;;) {
1391 struct select_data data;
1393 INIT_LIST_HEAD(&data.dispose);
1394 data.start = dentry;
1395 data.found = 0;
1397 d_walk(dentry, &data, check_and_collect, check_and_drop);
1398 ret = data.found;
1400 if (!list_empty(&data.dispose))
1401 shrink_dentry_list(&data.dispose);
1403 if (ret <= 0)
1404 break;
1406 cond_resched();
1409 out:
1410 return ret;
1412 EXPORT_SYMBOL(check_submounts_and_drop);
1415 * __d_alloc - allocate a dcache entry
1416 * @sb: filesystem it will belong to
1417 * @name: qstr of the name
1419 * Allocates a dentry. It returns %NULL if there is insufficient memory
1420 * available. On a success the dentry is returned. The name passed in is
1421 * copied and the copy passed in may be reused after this call.
1424 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1426 struct dentry *dentry;
1427 char *dname;
1429 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1430 if (!dentry)
1431 return NULL;
1434 * We guarantee that the inline name is always NUL-terminated.
1435 * This way the memcpy() done by the name switching in rename
1436 * will still always have a NUL at the end, even if we might
1437 * be overwriting an internal NUL character
1439 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1440 if (name->len > DNAME_INLINE_LEN-1) {
1441 dname = kmalloc(name->len + 1, GFP_KERNEL);
1442 if (!dname) {
1443 kmem_cache_free(dentry_cache, dentry);
1444 return NULL;
1446 } else {
1447 dname = dentry->d_iname;
1450 dentry->d_name.len = name->len;
1451 dentry->d_name.hash = name->hash;
1452 memcpy(dname, name->name, name->len);
1453 dname[name->len] = 0;
1455 /* Make sure we always see the terminating NUL character */
1456 smp_wmb();
1457 dentry->d_name.name = dname;
1459 dentry->d_lockref.count = 1;
1460 dentry->d_flags = 0;
1461 spin_lock_init(&dentry->d_lock);
1462 seqcount_init(&dentry->d_seq);
1463 dentry->d_inode = NULL;
1464 dentry->d_parent = dentry;
1465 dentry->d_sb = sb;
1466 dentry->d_op = NULL;
1467 dentry->d_fsdata = NULL;
1468 INIT_HLIST_BL_NODE(&dentry->d_hash);
1469 INIT_LIST_HEAD(&dentry->d_lru);
1470 INIT_LIST_HEAD(&dentry->d_subdirs);
1471 INIT_HLIST_NODE(&dentry->d_alias);
1472 INIT_LIST_HEAD(&dentry->d_u.d_child);
1473 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1475 this_cpu_inc(nr_dentry);
1477 return dentry;
1481 * d_alloc - allocate a dcache entry
1482 * @parent: parent of entry to allocate
1483 * @name: qstr of the name
1485 * Allocates a dentry. It returns %NULL if there is insufficient memory
1486 * available. On a success the dentry is returned. The name passed in is
1487 * copied and the copy passed in may be reused after this call.
1489 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1491 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1492 if (!dentry)
1493 return NULL;
1495 spin_lock(&parent->d_lock);
1497 * don't need child lock because it is not subject
1498 * to concurrency here
1500 __dget_dlock(parent);
1501 dentry->d_parent = parent;
1502 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1503 spin_unlock(&parent->d_lock);
1505 return dentry;
1507 EXPORT_SYMBOL(d_alloc);
1510 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1511 * @sb: the superblock
1512 * @name: qstr of the name
1514 * For a filesystem that just pins its dentries in memory and never
1515 * performs lookups at all, return an unhashed IS_ROOT dentry.
1517 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1519 return __d_alloc(sb, name);
1521 EXPORT_SYMBOL(d_alloc_pseudo);
1523 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1525 struct qstr q;
1527 q.name = name;
1528 q.len = strlen(name);
1529 q.hash = full_name_hash(q.name, q.len);
1530 return d_alloc(parent, &q);
1532 EXPORT_SYMBOL(d_alloc_name);
1534 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1536 WARN_ON_ONCE(dentry->d_op);
1537 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1538 DCACHE_OP_COMPARE |
1539 DCACHE_OP_REVALIDATE |
1540 DCACHE_OP_WEAK_REVALIDATE |
1541 DCACHE_OP_DELETE ));
1542 dentry->d_op = op;
1543 if (!op)
1544 return;
1545 if (op->d_hash)
1546 dentry->d_flags |= DCACHE_OP_HASH;
1547 if (op->d_compare)
1548 dentry->d_flags |= DCACHE_OP_COMPARE;
1549 if (op->d_revalidate)
1550 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1551 if (op->d_weak_revalidate)
1552 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1553 if (op->d_delete)
1554 dentry->d_flags |= DCACHE_OP_DELETE;
1555 if (op->d_prune)
1556 dentry->d_flags |= DCACHE_OP_PRUNE;
1559 EXPORT_SYMBOL(d_set_d_op);
1561 static unsigned d_flags_for_inode(struct inode *inode)
1563 unsigned add_flags = DCACHE_FILE_TYPE;
1565 if (!inode)
1566 return DCACHE_MISS_TYPE;
1568 if (S_ISDIR(inode->i_mode)) {
1569 add_flags = DCACHE_DIRECTORY_TYPE;
1570 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1571 if (unlikely(!inode->i_op->lookup))
1572 add_flags = DCACHE_AUTODIR_TYPE;
1573 else
1574 inode->i_opflags |= IOP_LOOKUP;
1576 } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1577 if (unlikely(inode->i_op->follow_link))
1578 add_flags = DCACHE_SYMLINK_TYPE;
1579 else
1580 inode->i_opflags |= IOP_NOFOLLOW;
1583 if (unlikely(IS_AUTOMOUNT(inode)))
1584 add_flags |= DCACHE_NEED_AUTOMOUNT;
1585 return add_flags;
1588 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1590 unsigned add_flags = d_flags_for_inode(inode);
1592 spin_lock(&dentry->d_lock);
1593 __d_set_type(dentry, add_flags);
1594 if (inode)
1595 hlist_add_head(&dentry->d_alias, &inode->i_dentry);
1596 dentry->d_inode = inode;
1597 dentry_rcuwalk_barrier(dentry);
1598 spin_unlock(&dentry->d_lock);
1599 fsnotify_d_instantiate(dentry, inode);
1603 * d_instantiate - fill in inode information for a dentry
1604 * @entry: dentry to complete
1605 * @inode: inode to attach to this dentry
1607 * Fill in inode information in the entry.
1609 * This turns negative dentries into productive full members
1610 * of society.
1612 * NOTE! This assumes that the inode count has been incremented
1613 * (or otherwise set) by the caller to indicate that it is now
1614 * in use by the dcache.
1617 void d_instantiate(struct dentry *entry, struct inode * inode)
1619 BUG_ON(!hlist_unhashed(&entry->d_alias));
1620 if (inode)
1621 spin_lock(&inode->i_lock);
1622 __d_instantiate(entry, inode);
1623 if (inode)
1624 spin_unlock(&inode->i_lock);
1625 security_d_instantiate(entry, inode);
1627 EXPORT_SYMBOL(d_instantiate);
1630 * d_instantiate_unique - instantiate a non-aliased dentry
1631 * @entry: dentry to instantiate
1632 * @inode: inode to attach to this dentry
1634 * Fill in inode information in the entry. On success, it returns NULL.
1635 * If an unhashed alias of "entry" already exists, then we return the
1636 * aliased dentry instead and drop one reference to inode.
1638 * Note that in order to avoid conflicts with rename() etc, the caller
1639 * had better be holding the parent directory semaphore.
1641 * This also assumes that the inode count has been incremented
1642 * (or otherwise set) by the caller to indicate that it is now
1643 * in use by the dcache.
1645 static struct dentry *__d_instantiate_unique(struct dentry *entry,
1646 struct inode *inode)
1648 struct dentry *alias;
1649 int len = entry->d_name.len;
1650 const char *name = entry->d_name.name;
1651 unsigned int hash = entry->d_name.hash;
1653 if (!inode) {
1654 __d_instantiate(entry, NULL);
1655 return NULL;
1658 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1660 * Don't need alias->d_lock here, because aliases with
1661 * d_parent == entry->d_parent are not subject to name or
1662 * parent changes, because the parent inode i_mutex is held.
1664 if (alias->d_name.hash != hash)
1665 continue;
1666 if (alias->d_parent != entry->d_parent)
1667 continue;
1668 if (alias->d_name.len != len)
1669 continue;
1670 if (dentry_cmp(alias, name, len))
1671 continue;
1672 __dget(alias);
1673 return alias;
1676 __d_instantiate(entry, inode);
1677 return NULL;
1680 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1682 struct dentry *result;
1684 BUG_ON(!hlist_unhashed(&entry->d_alias));
1686 if (inode)
1687 spin_lock(&inode->i_lock);
1688 result = __d_instantiate_unique(entry, inode);
1689 if (inode)
1690 spin_unlock(&inode->i_lock);
1692 if (!result) {
1693 security_d_instantiate(entry, inode);
1694 return NULL;
1697 BUG_ON(!d_unhashed(result));
1698 iput(inode);
1699 return result;
1702 EXPORT_SYMBOL(d_instantiate_unique);
1705 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1706 * @entry: dentry to complete
1707 * @inode: inode to attach to this dentry
1709 * Fill in inode information in the entry. If a directory alias is found, then
1710 * return an error (and drop inode). Together with d_materialise_unique() this
1711 * guarantees that a directory inode may never have more than one alias.
1713 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1715 BUG_ON(!hlist_unhashed(&entry->d_alias));
1717 spin_lock(&inode->i_lock);
1718 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1719 spin_unlock(&inode->i_lock);
1720 iput(inode);
1721 return -EBUSY;
1723 __d_instantiate(entry, inode);
1724 spin_unlock(&inode->i_lock);
1725 security_d_instantiate(entry, inode);
1727 return 0;
1729 EXPORT_SYMBOL(d_instantiate_no_diralias);
1731 struct dentry *d_make_root(struct inode *root_inode)
1733 struct dentry *res = NULL;
1735 if (root_inode) {
1736 static const struct qstr name = QSTR_INIT("/", 1);
1738 res = __d_alloc(root_inode->i_sb, &name);
1739 if (res)
1740 d_instantiate(res, root_inode);
1741 else
1742 iput(root_inode);
1744 return res;
1746 EXPORT_SYMBOL(d_make_root);
1748 static struct dentry * __d_find_any_alias(struct inode *inode)
1750 struct dentry *alias;
1752 if (hlist_empty(&inode->i_dentry))
1753 return NULL;
1754 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1755 __dget(alias);
1756 return alias;
1760 * d_find_any_alias - find any alias for a given inode
1761 * @inode: inode to find an alias for
1763 * If any aliases exist for the given inode, take and return a
1764 * reference for one of them. If no aliases exist, return %NULL.
1766 struct dentry *d_find_any_alias(struct inode *inode)
1768 struct dentry *de;
1770 spin_lock(&inode->i_lock);
1771 de = __d_find_any_alias(inode);
1772 spin_unlock(&inode->i_lock);
1773 return de;
1775 EXPORT_SYMBOL(d_find_any_alias);
1777 static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
1779 static const struct qstr anonstring = QSTR_INIT("/", 1);
1780 struct dentry *tmp;
1781 struct dentry *res;
1782 unsigned add_flags;
1784 if (!inode)
1785 return ERR_PTR(-ESTALE);
1786 if (IS_ERR(inode))
1787 return ERR_CAST(inode);
1789 res = d_find_any_alias(inode);
1790 if (res)
1791 goto out_iput;
1793 tmp = __d_alloc(inode->i_sb, &anonstring);
1794 if (!tmp) {
1795 res = ERR_PTR(-ENOMEM);
1796 goto out_iput;
1799 spin_lock(&inode->i_lock);
1800 res = __d_find_any_alias(inode);
1801 if (res) {
1802 spin_unlock(&inode->i_lock);
1803 dput(tmp);
1804 goto out_iput;
1807 /* attach a disconnected dentry */
1808 add_flags = d_flags_for_inode(inode);
1810 if (disconnected)
1811 add_flags |= DCACHE_DISCONNECTED;
1813 spin_lock(&tmp->d_lock);
1814 tmp->d_inode = inode;
1815 tmp->d_flags |= add_flags;
1816 hlist_add_head(&tmp->d_alias, &inode->i_dentry);
1817 hlist_bl_lock(&tmp->d_sb->s_anon);
1818 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1819 hlist_bl_unlock(&tmp->d_sb->s_anon);
1820 spin_unlock(&tmp->d_lock);
1821 spin_unlock(&inode->i_lock);
1822 security_d_instantiate(tmp, inode);
1824 return tmp;
1826 out_iput:
1827 if (res && !IS_ERR(res))
1828 security_d_instantiate(res, inode);
1829 iput(inode);
1830 return res;
1834 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1835 * @inode: inode to allocate the dentry for
1837 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1838 * similar open by handle operations. The returned dentry may be anonymous,
1839 * or may have a full name (if the inode was already in the cache).
1841 * When called on a directory inode, we must ensure that the inode only ever
1842 * has one dentry. If a dentry is found, that is returned instead of
1843 * allocating a new one.
1845 * On successful return, the reference to the inode has been transferred
1846 * to the dentry. In case of an error the reference on the inode is released.
1847 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1848 * be passed in and the error will be propagated to the return value,
1849 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1851 struct dentry *d_obtain_alias(struct inode *inode)
1853 return __d_obtain_alias(inode, 1);
1855 EXPORT_SYMBOL(d_obtain_alias);
1858 * d_obtain_root - find or allocate a dentry for a given inode
1859 * @inode: inode to allocate the dentry for
1861 * Obtain an IS_ROOT dentry for the root of a filesystem.
1863 * We must ensure that directory inodes only ever have one dentry. If a
1864 * dentry is found, that is returned instead of allocating a new one.
1866 * On successful return, the reference to the inode has been transferred
1867 * to the dentry. In case of an error the reference on the inode is
1868 * released. A %NULL or IS_ERR inode may be passed in and will be the
1869 * error will be propagate to the return value, with a %NULL @inode
1870 * replaced by ERR_PTR(-ESTALE).
1872 struct dentry *d_obtain_root(struct inode *inode)
1874 return __d_obtain_alias(inode, 0);
1876 EXPORT_SYMBOL(d_obtain_root);
1879 * d_add_ci - lookup or allocate new dentry with case-exact name
1880 * @inode: the inode case-insensitive lookup has found
1881 * @dentry: the negative dentry that was passed to the parent's lookup func
1882 * @name: the case-exact name to be associated with the returned dentry
1884 * This is to avoid filling the dcache with case-insensitive names to the
1885 * same inode, only the actual correct case is stored in the dcache for
1886 * case-insensitive filesystems.
1888 * For a case-insensitive lookup match and if the the case-exact dentry
1889 * already exists in in the dcache, use it and return it.
1891 * If no entry exists with the exact case name, allocate new dentry with
1892 * the exact case, and return the spliced entry.
1894 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1895 struct qstr *name)
1897 struct dentry *found;
1898 struct dentry *new;
1901 * First check if a dentry matching the name already exists,
1902 * if not go ahead and create it now.
1904 found = d_hash_and_lookup(dentry->d_parent, name);
1905 if (unlikely(IS_ERR(found)))
1906 goto err_out;
1907 if (!found) {
1908 new = d_alloc(dentry->d_parent, name);
1909 if (!new) {
1910 found = ERR_PTR(-ENOMEM);
1911 goto err_out;
1914 found = d_splice_alias(inode, new);
1915 if (found) {
1916 dput(new);
1917 return found;
1919 return new;
1923 * If a matching dentry exists, and it's not negative use it.
1925 * Decrement the reference count to balance the iget() done
1926 * earlier on.
1928 if (found->d_inode) {
1929 if (unlikely(found->d_inode != inode)) {
1930 /* This can't happen because bad inodes are unhashed. */
1931 BUG_ON(!is_bad_inode(inode));
1932 BUG_ON(!is_bad_inode(found->d_inode));
1934 iput(inode);
1935 return found;
1939 * Negative dentry: instantiate it unless the inode is a directory and
1940 * already has a dentry.
1942 new = d_splice_alias(inode, found);
1943 if (new) {
1944 dput(found);
1945 found = new;
1947 return found;
1949 err_out:
1950 iput(inode);
1951 return found;
1953 EXPORT_SYMBOL(d_add_ci);
1956 * Do the slow-case of the dentry name compare.
1958 * Unlike the dentry_cmp() function, we need to atomically
1959 * load the name and length information, so that the
1960 * filesystem can rely on them, and can use the 'name' and
1961 * 'len' information without worrying about walking off the
1962 * end of memory etc.
1964 * Thus the read_seqcount_retry() and the "duplicate" info
1965 * in arguments (the low-level filesystem should not look
1966 * at the dentry inode or name contents directly, since
1967 * rename can change them while we're in RCU mode).
1969 enum slow_d_compare {
1970 D_COMP_OK,
1971 D_COMP_NOMATCH,
1972 D_COMP_SEQRETRY,
1975 static noinline enum slow_d_compare slow_dentry_cmp(
1976 const struct dentry *parent,
1977 struct dentry *dentry,
1978 unsigned int seq,
1979 const struct qstr *name)
1981 int tlen = dentry->d_name.len;
1982 const char *tname = dentry->d_name.name;
1984 if (read_seqcount_retry(&dentry->d_seq, seq)) {
1985 cpu_relax();
1986 return D_COMP_SEQRETRY;
1988 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
1989 return D_COMP_NOMATCH;
1990 return D_COMP_OK;
1994 * __d_lookup_rcu - search for a dentry (racy, store-free)
1995 * @parent: parent dentry
1996 * @name: qstr of name we wish to find
1997 * @seqp: returns d_seq value at the point where the dentry was found
1998 * Returns: dentry, or NULL
2000 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2001 * resolution (store-free path walking) design described in
2002 * Documentation/filesystems/path-lookup.txt.
2004 * This is not to be used outside core vfs.
2006 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2007 * held, and rcu_read_lock held. The returned dentry must not be stored into
2008 * without taking d_lock and checking d_seq sequence count against @seq
2009 * returned here.
2011 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2012 * function.
2014 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2015 * the returned dentry, so long as its parent's seqlock is checked after the
2016 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2017 * is formed, giving integrity down the path walk.
2019 * NOTE! The caller *has* to check the resulting dentry against the sequence
2020 * number we've returned before using any of the resulting dentry state!
2022 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2023 const struct qstr *name,
2024 unsigned *seqp)
2026 u64 hashlen = name->hash_len;
2027 const unsigned char *str = name->name;
2028 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
2029 struct hlist_bl_node *node;
2030 struct dentry *dentry;
2033 * Note: There is significant duplication with __d_lookup_rcu which is
2034 * required to prevent single threaded performance regressions
2035 * especially on architectures where smp_rmb (in seqcounts) are costly.
2036 * Keep the two functions in sync.
2040 * The hash list is protected using RCU.
2042 * Carefully use d_seq when comparing a candidate dentry, to avoid
2043 * races with d_move().
2045 * It is possible that concurrent renames can mess up our list
2046 * walk here and result in missing our dentry, resulting in the
2047 * false-negative result. d_lookup() protects against concurrent
2048 * renames using rename_lock seqlock.
2050 * See Documentation/filesystems/path-lookup.txt for more details.
2052 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2053 unsigned seq;
2055 seqretry:
2057 * The dentry sequence count protects us from concurrent
2058 * renames, and thus protects parent and name fields.
2060 * The caller must perform a seqcount check in order
2061 * to do anything useful with the returned dentry.
2063 * NOTE! We do a "raw" seqcount_begin here. That means that
2064 * we don't wait for the sequence count to stabilize if it
2065 * is in the middle of a sequence change. If we do the slow
2066 * dentry compare, we will do seqretries until it is stable,
2067 * and if we end up with a successful lookup, we actually
2068 * want to exit RCU lookup anyway.
2070 seq = raw_seqcount_begin(&dentry->d_seq);
2071 if (dentry->d_parent != parent)
2072 continue;
2073 if (d_unhashed(dentry))
2074 continue;
2076 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2077 if (dentry->d_name.hash != hashlen_hash(hashlen))
2078 continue;
2079 *seqp = seq;
2080 switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2081 case D_COMP_OK:
2082 return dentry;
2083 case D_COMP_NOMATCH:
2084 continue;
2085 default:
2086 goto seqretry;
2090 if (dentry->d_name.hash_len != hashlen)
2091 continue;
2092 *seqp = seq;
2093 if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2094 return dentry;
2096 return NULL;
2100 * d_lookup - search for a dentry
2101 * @parent: parent dentry
2102 * @name: qstr of name we wish to find
2103 * Returns: dentry, or NULL
2105 * d_lookup searches the children of the parent dentry for the name in
2106 * question. If the dentry is found its reference count is incremented and the
2107 * dentry is returned. The caller must use dput to free the entry when it has
2108 * finished using it. %NULL is returned if the dentry does not exist.
2110 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2112 struct dentry *dentry;
2113 unsigned seq;
2115 do {
2116 seq = read_seqbegin(&rename_lock);
2117 dentry = __d_lookup(parent, name);
2118 if (dentry)
2119 break;
2120 } while (read_seqretry(&rename_lock, seq));
2121 return dentry;
2123 EXPORT_SYMBOL(d_lookup);
2126 * __d_lookup - search for a dentry (racy)
2127 * @parent: parent dentry
2128 * @name: qstr of name we wish to find
2129 * Returns: dentry, or NULL
2131 * __d_lookup is like d_lookup, however it may (rarely) return a
2132 * false-negative result due to unrelated rename activity.
2134 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2135 * however it must be used carefully, eg. with a following d_lookup in
2136 * the case of failure.
2138 * __d_lookup callers must be commented.
2140 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2142 unsigned int len = name->len;
2143 unsigned int hash = name->hash;
2144 const unsigned char *str = name->name;
2145 struct hlist_bl_head *b = d_hash(parent, hash);
2146 struct hlist_bl_node *node;
2147 struct dentry *found = NULL;
2148 struct dentry *dentry;
2151 * Note: There is significant duplication with __d_lookup_rcu which is
2152 * required to prevent single threaded performance regressions
2153 * especially on architectures where smp_rmb (in seqcounts) are costly.
2154 * Keep the two functions in sync.
2158 * The hash list is protected using RCU.
2160 * Take d_lock when comparing a candidate dentry, to avoid races
2161 * with d_move().
2163 * It is possible that concurrent renames can mess up our list
2164 * walk here and result in missing our dentry, resulting in the
2165 * false-negative result. d_lookup() protects against concurrent
2166 * renames using rename_lock seqlock.
2168 * See Documentation/filesystems/path-lookup.txt for more details.
2170 rcu_read_lock();
2172 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2174 if (dentry->d_name.hash != hash)
2175 continue;
2177 spin_lock(&dentry->d_lock);
2178 if (dentry->d_parent != parent)
2179 goto next;
2180 if (d_unhashed(dentry))
2181 goto next;
2184 * It is safe to compare names since d_move() cannot
2185 * change the qstr (protected by d_lock).
2187 if (parent->d_flags & DCACHE_OP_COMPARE) {
2188 int tlen = dentry->d_name.len;
2189 const char *tname = dentry->d_name.name;
2190 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2191 goto next;
2192 } else {
2193 if (dentry->d_name.len != len)
2194 goto next;
2195 if (dentry_cmp(dentry, str, len))
2196 goto next;
2199 dentry->d_lockref.count++;
2200 found = dentry;
2201 spin_unlock(&dentry->d_lock);
2202 break;
2203 next:
2204 spin_unlock(&dentry->d_lock);
2206 rcu_read_unlock();
2208 return found;
2212 * d_hash_and_lookup - hash the qstr then search for a dentry
2213 * @dir: Directory to search in
2214 * @name: qstr of name we wish to find
2216 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2218 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2221 * Check for a fs-specific hash function. Note that we must
2222 * calculate the standard hash first, as the d_op->d_hash()
2223 * routine may choose to leave the hash value unchanged.
2225 name->hash = full_name_hash(name->name, name->len);
2226 if (dir->d_flags & DCACHE_OP_HASH) {
2227 int err = dir->d_op->d_hash(dir, name);
2228 if (unlikely(err < 0))
2229 return ERR_PTR(err);
2231 return d_lookup(dir, name);
2233 EXPORT_SYMBOL(d_hash_and_lookup);
2236 * d_validate - verify dentry provided from insecure source (deprecated)
2237 * @dentry: The dentry alleged to be valid child of @dparent
2238 * @dparent: The parent dentry (known to be valid)
2240 * An insecure source has sent us a dentry, here we verify it and dget() it.
2241 * This is used by ncpfs in its readdir implementation.
2242 * Zero is returned in the dentry is invalid.
2244 * This function is slow for big directories, and deprecated, do not use it.
2246 int d_validate(struct dentry *dentry, struct dentry *dparent)
2248 struct dentry *child;
2250 spin_lock(&dparent->d_lock);
2251 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2252 if (dentry == child) {
2253 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2254 __dget_dlock(dentry);
2255 spin_unlock(&dentry->d_lock);
2256 spin_unlock(&dparent->d_lock);
2257 return 1;
2260 spin_unlock(&dparent->d_lock);
2262 return 0;
2264 EXPORT_SYMBOL(d_validate);
2267 * When a file is deleted, we have two options:
2268 * - turn this dentry into a negative dentry
2269 * - unhash this dentry and free it.
2271 * Usually, we want to just turn this into
2272 * a negative dentry, but if anybody else is
2273 * currently using the dentry or the inode
2274 * we can't do that and we fall back on removing
2275 * it from the hash queues and waiting for
2276 * it to be deleted later when it has no users
2280 * d_delete - delete a dentry
2281 * @dentry: The dentry to delete
2283 * Turn the dentry into a negative dentry if possible, otherwise
2284 * remove it from the hash queues so it can be deleted later
2287 void d_delete(struct dentry * dentry)
2289 struct inode *inode;
2290 int isdir = 0;
2292 * Are we the only user?
2294 again:
2295 spin_lock(&dentry->d_lock);
2296 inode = dentry->d_inode;
2297 isdir = S_ISDIR(inode->i_mode);
2298 if (dentry->d_lockref.count == 1) {
2299 if (!spin_trylock(&inode->i_lock)) {
2300 spin_unlock(&dentry->d_lock);
2301 cpu_relax();
2302 goto again;
2304 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2305 dentry_unlink_inode(dentry);
2306 fsnotify_nameremove(dentry, isdir);
2307 return;
2310 if (!d_unhashed(dentry))
2311 __d_drop(dentry);
2313 spin_unlock(&dentry->d_lock);
2315 fsnotify_nameremove(dentry, isdir);
2317 EXPORT_SYMBOL(d_delete);
2319 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2321 BUG_ON(!d_unhashed(entry));
2322 hlist_bl_lock(b);
2323 entry->d_flags |= DCACHE_RCUACCESS;
2324 hlist_bl_add_head_rcu(&entry->d_hash, b);
2325 hlist_bl_unlock(b);
2328 static void _d_rehash(struct dentry * entry)
2330 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2334 * d_rehash - add an entry back to the hash
2335 * @entry: dentry to add to the hash
2337 * Adds a dentry to the hash according to its name.
2340 void d_rehash(struct dentry * entry)
2342 spin_lock(&entry->d_lock);
2343 _d_rehash(entry);
2344 spin_unlock(&entry->d_lock);
2346 EXPORT_SYMBOL(d_rehash);
2349 * dentry_update_name_case - update case insensitive dentry with a new name
2350 * @dentry: dentry to be updated
2351 * @name: new name
2353 * Update a case insensitive dentry with new case of name.
2355 * dentry must have been returned by d_lookup with name @name. Old and new
2356 * name lengths must match (ie. no d_compare which allows mismatched name
2357 * lengths).
2359 * Parent inode i_mutex must be held over d_lookup and into this call (to
2360 * keep renames and concurrent inserts, and readdir(2) away).
2362 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2364 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2365 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2367 spin_lock(&dentry->d_lock);
2368 write_seqcount_begin(&dentry->d_seq);
2369 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2370 write_seqcount_end(&dentry->d_seq);
2371 spin_unlock(&dentry->d_lock);
2373 EXPORT_SYMBOL(dentry_update_name_case);
2375 static void switch_names(struct dentry *dentry, struct dentry *target)
2377 if (dname_external(target)) {
2378 if (dname_external(dentry)) {
2380 * Both external: swap the pointers
2382 swap(target->d_name.name, dentry->d_name.name);
2383 } else {
2385 * dentry:internal, target:external. Steal target's
2386 * storage and make target internal.
2388 memcpy(target->d_iname, dentry->d_name.name,
2389 dentry->d_name.len + 1);
2390 dentry->d_name.name = target->d_name.name;
2391 target->d_name.name = target->d_iname;
2393 } else {
2394 if (dname_external(dentry)) {
2396 * dentry:external, target:internal. Give dentry's
2397 * storage to target and make dentry internal
2399 memcpy(dentry->d_iname, target->d_name.name,
2400 target->d_name.len + 1);
2401 target->d_name.name = dentry->d_name.name;
2402 dentry->d_name.name = dentry->d_iname;
2403 } else {
2405 * Both are internal.
2407 unsigned int i;
2408 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2409 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2410 swap(((long *) &dentry->d_iname)[i],
2411 ((long *) &target->d_iname)[i]);
2415 swap(dentry->d_name.len, target->d_name.len);
2418 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2421 * XXXX: do we really need to take target->d_lock?
2423 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2424 spin_lock(&target->d_parent->d_lock);
2425 else {
2426 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2427 spin_lock(&dentry->d_parent->d_lock);
2428 spin_lock_nested(&target->d_parent->d_lock,
2429 DENTRY_D_LOCK_NESTED);
2430 } else {
2431 spin_lock(&target->d_parent->d_lock);
2432 spin_lock_nested(&dentry->d_parent->d_lock,
2433 DENTRY_D_LOCK_NESTED);
2436 if (target < dentry) {
2437 spin_lock_nested(&target->d_lock, 2);
2438 spin_lock_nested(&dentry->d_lock, 3);
2439 } else {
2440 spin_lock_nested(&dentry->d_lock, 2);
2441 spin_lock_nested(&target->d_lock, 3);
2445 static void dentry_unlock_parents_for_move(struct dentry *dentry,
2446 struct dentry *target)
2448 if (target->d_parent != dentry->d_parent)
2449 spin_unlock(&dentry->d_parent->d_lock);
2450 if (target->d_parent != target)
2451 spin_unlock(&target->d_parent->d_lock);
2455 * When switching names, the actual string doesn't strictly have to
2456 * be preserved in the target - because we're dropping the target
2457 * anyway. As such, we can just do a simple memcpy() to copy over
2458 * the new name before we switch.
2460 * Note that we have to be a lot more careful about getting the hash
2461 * switched - we have to switch the hash value properly even if it
2462 * then no longer matches the actual (corrupted) string of the target.
2463 * The hash value has to match the hash queue that the dentry is on..
2466 * __d_move - move a dentry
2467 * @dentry: entry to move
2468 * @target: new dentry
2469 * @exchange: exchange the two dentries
2471 * Update the dcache to reflect the move of a file name. Negative
2472 * dcache entries should not be moved in this way. Caller must hold
2473 * rename_lock, the i_mutex of the source and target directories,
2474 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2476 static void __d_move(struct dentry *dentry, struct dentry *target,
2477 bool exchange)
2479 if (!dentry->d_inode)
2480 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2482 BUG_ON(d_ancestor(dentry, target));
2483 BUG_ON(d_ancestor(target, dentry));
2485 dentry_lock_for_move(dentry, target);
2487 write_seqcount_begin(&dentry->d_seq);
2488 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2490 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2493 * Move the dentry to the target hash queue. Don't bother checking
2494 * for the same hash queue because of how unlikely it is.
2496 __d_drop(dentry);
2497 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2500 * Unhash the target (d_delete() is not usable here). If exchanging
2501 * the two dentries, then rehash onto the other's hash queue.
2503 __d_drop(target);
2504 if (exchange) {
2505 __d_rehash(target,
2506 d_hash(dentry->d_parent, dentry->d_name.hash));
2509 list_del(&dentry->d_u.d_child);
2510 list_del(&target->d_u.d_child);
2512 /* Switch the names.. */
2513 switch_names(dentry, target);
2514 swap(dentry->d_name.hash, target->d_name.hash);
2516 /* ... and switch the parents */
2517 if (IS_ROOT(dentry)) {
2518 dentry->d_parent = target->d_parent;
2519 target->d_parent = target;
2520 INIT_LIST_HEAD(&target->d_u.d_child);
2521 } else {
2522 swap(dentry->d_parent, target->d_parent);
2524 /* And add them back to the (new) parent lists */
2525 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2528 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2530 write_seqcount_end(&target->d_seq);
2531 write_seqcount_end(&dentry->d_seq);
2533 dentry_unlock_parents_for_move(dentry, target);
2534 if (exchange)
2535 fsnotify_d_move(target);
2536 spin_unlock(&target->d_lock);
2537 fsnotify_d_move(dentry);
2538 spin_unlock(&dentry->d_lock);
2542 * d_move - move a dentry
2543 * @dentry: entry to move
2544 * @target: new dentry
2546 * Update the dcache to reflect the move of a file name. Negative
2547 * dcache entries should not be moved in this way. See the locking
2548 * requirements for __d_move.
2550 void d_move(struct dentry *dentry, struct dentry *target)
2552 write_seqlock(&rename_lock);
2553 __d_move(dentry, target, false);
2554 write_sequnlock(&rename_lock);
2556 EXPORT_SYMBOL(d_move);
2559 * d_exchange - exchange two dentries
2560 * @dentry1: first dentry
2561 * @dentry2: second dentry
2563 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2565 write_seqlock(&rename_lock);
2567 WARN_ON(!dentry1->d_inode);
2568 WARN_ON(!dentry2->d_inode);
2569 WARN_ON(IS_ROOT(dentry1));
2570 WARN_ON(IS_ROOT(dentry2));
2572 __d_move(dentry1, dentry2, true);
2574 write_sequnlock(&rename_lock);
2578 * d_ancestor - search for an ancestor
2579 * @p1: ancestor dentry
2580 * @p2: child dentry
2582 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2583 * an ancestor of p2, else NULL.
2585 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2587 struct dentry *p;
2589 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2590 if (p->d_parent == p1)
2591 return p;
2593 return NULL;
2597 * This helper attempts to cope with remotely renamed directories
2599 * It assumes that the caller is already holding
2600 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2602 * Note: If ever the locking in lock_rename() changes, then please
2603 * remember to update this too...
2605 static struct dentry *__d_unalias(struct inode *inode,
2606 struct dentry *dentry, struct dentry *alias)
2608 struct mutex *m1 = NULL, *m2 = NULL;
2609 struct dentry *ret = ERR_PTR(-EBUSY);
2611 /* If alias and dentry share a parent, then no extra locks required */
2612 if (alias->d_parent == dentry->d_parent)
2613 goto out_unalias;
2615 /* See lock_rename() */
2616 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2617 goto out_err;
2618 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2619 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2620 goto out_err;
2621 m2 = &alias->d_parent->d_inode->i_mutex;
2622 out_unalias:
2623 if (likely(!d_mountpoint(alias))) {
2624 __d_move(alias, dentry, false);
2625 ret = alias;
2627 out_err:
2628 spin_unlock(&inode->i_lock);
2629 if (m2)
2630 mutex_unlock(m2);
2631 if (m1)
2632 mutex_unlock(m1);
2633 return ret;
2637 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2638 * named dentry in place of the dentry to be replaced.
2639 * returns with anon->d_lock held!
2641 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2643 struct dentry *dparent;
2645 dentry_lock_for_move(anon, dentry);
2647 write_seqcount_begin(&dentry->d_seq);
2648 write_seqcount_begin_nested(&anon->d_seq, DENTRY_D_LOCK_NESTED);
2650 dparent = dentry->d_parent;
2652 switch_names(dentry, anon);
2653 swap(dentry->d_name.hash, anon->d_name.hash);
2655 dentry->d_parent = dentry;
2656 list_del_init(&dentry->d_u.d_child);
2657 anon->d_parent = dparent;
2658 if (likely(!d_unhashed(anon))) {
2659 hlist_bl_lock(&anon->d_sb->s_anon);
2660 __hlist_bl_del(&anon->d_hash);
2661 anon->d_hash.pprev = NULL;
2662 hlist_bl_unlock(&anon->d_sb->s_anon);
2664 list_move(&anon->d_u.d_child, &dparent->d_subdirs);
2666 write_seqcount_end(&dentry->d_seq);
2667 write_seqcount_end(&anon->d_seq);
2669 dentry_unlock_parents_for_move(anon, dentry);
2670 spin_unlock(&dentry->d_lock);
2672 /* anon->d_lock still locked, returns locked */
2676 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2677 * @inode: the inode which may have a disconnected dentry
2678 * @dentry: a negative dentry which we want to point to the inode.
2680 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2681 * place of the given dentry and return it, else simply d_add the inode
2682 * to the dentry and return NULL.
2684 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2685 * we should error out: directories can't have multiple aliases.
2687 * This is needed in the lookup routine of any filesystem that is exportable
2688 * (via knfsd) so that we can build dcache paths to directories effectively.
2690 * If a dentry was found and moved, then it is returned. Otherwise NULL
2691 * is returned. This matches the expected return value of ->lookup.
2693 * Cluster filesystems may call this function with a negative, hashed dentry.
2694 * In that case, we know that the inode will be a regular file, and also this
2695 * will only occur during atomic_open. So we need to check for the dentry
2696 * being already hashed only in the final case.
2698 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2700 struct dentry *new = NULL;
2702 if (IS_ERR(inode))
2703 return ERR_CAST(inode);
2705 if (inode && S_ISDIR(inode->i_mode)) {
2706 spin_lock(&inode->i_lock);
2707 new = __d_find_any_alias(inode);
2708 if (new) {
2709 if (!IS_ROOT(new)) {
2710 spin_unlock(&inode->i_lock);
2711 dput(new);
2712 return ERR_PTR(-EIO);
2714 if (d_ancestor(new, dentry)) {
2715 spin_unlock(&inode->i_lock);
2716 dput(new);
2717 return ERR_PTR(-EIO);
2719 write_seqlock(&rename_lock);
2720 __d_materialise_dentry(dentry, new);
2721 write_sequnlock(&rename_lock);
2722 _d_rehash(new);
2723 spin_unlock(&new->d_lock);
2724 spin_unlock(&inode->i_lock);
2725 security_d_instantiate(new, inode);
2726 iput(inode);
2727 } else {
2728 /* already taking inode->i_lock, so d_add() by hand */
2729 __d_instantiate(dentry, inode);
2730 spin_unlock(&inode->i_lock);
2731 security_d_instantiate(dentry, inode);
2732 d_rehash(dentry);
2734 } else {
2735 d_instantiate(dentry, inode);
2736 if (d_unhashed(dentry))
2737 d_rehash(dentry);
2739 return new;
2741 EXPORT_SYMBOL(d_splice_alias);
2744 * d_materialise_unique - introduce an inode into the tree
2745 * @dentry: candidate dentry
2746 * @inode: inode to bind to the dentry, to which aliases may be attached
2748 * Introduces an dentry into the tree, substituting an extant disconnected
2749 * root directory alias in its place if there is one. Caller must hold the
2750 * i_mutex of the parent directory.
2752 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2754 struct dentry *actual;
2756 BUG_ON(!d_unhashed(dentry));
2758 if (!inode) {
2759 actual = dentry;
2760 __d_instantiate(dentry, NULL);
2761 d_rehash(actual);
2762 goto out_nolock;
2765 spin_lock(&inode->i_lock);
2767 if (S_ISDIR(inode->i_mode)) {
2768 struct dentry *alias;
2770 /* Does an aliased dentry already exist? */
2771 alias = __d_find_alias(inode);
2772 if (alias) {
2773 actual = alias;
2774 write_seqlock(&rename_lock);
2776 if (d_ancestor(alias, dentry)) {
2777 /* Check for loops */
2778 actual = ERR_PTR(-ELOOP);
2779 spin_unlock(&inode->i_lock);
2780 } else if (IS_ROOT(alias)) {
2781 /* Is this an anonymous mountpoint that we
2782 * could splice into our tree? */
2783 __d_materialise_dentry(dentry, alias);
2784 write_sequnlock(&rename_lock);
2785 goto found;
2786 } else {
2787 /* Nope, but we must(!) avoid directory
2788 * aliasing. This drops inode->i_lock */
2789 actual = __d_unalias(inode, dentry, alias);
2791 write_sequnlock(&rename_lock);
2792 if (IS_ERR(actual)) {
2793 if (PTR_ERR(actual) == -ELOOP)
2794 pr_warn_ratelimited(
2795 "VFS: Lookup of '%s' in %s %s"
2796 " would have caused loop\n",
2797 dentry->d_name.name,
2798 inode->i_sb->s_type->name,
2799 inode->i_sb->s_id);
2800 dput(alias);
2802 goto out_nolock;
2806 /* Add a unique reference */
2807 actual = __d_instantiate_unique(dentry, inode);
2808 if (!actual)
2809 actual = dentry;
2810 else
2811 BUG_ON(!d_unhashed(actual));
2813 spin_lock(&actual->d_lock);
2814 found:
2815 _d_rehash(actual);
2816 spin_unlock(&actual->d_lock);
2817 spin_unlock(&inode->i_lock);
2818 out_nolock:
2819 if (actual == dentry) {
2820 security_d_instantiate(dentry, inode);
2821 return NULL;
2824 iput(inode);
2825 return actual;
2827 EXPORT_SYMBOL_GPL(d_materialise_unique);
2829 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2831 *buflen -= namelen;
2832 if (*buflen < 0)
2833 return -ENAMETOOLONG;
2834 *buffer -= namelen;
2835 memcpy(*buffer, str, namelen);
2836 return 0;
2840 * prepend_name - prepend a pathname in front of current buffer pointer
2841 * @buffer: buffer pointer
2842 * @buflen: allocated length of the buffer
2843 * @name: name string and length qstr structure
2845 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
2846 * make sure that either the old or the new name pointer and length are
2847 * fetched. However, there may be mismatch between length and pointer.
2848 * The length cannot be trusted, we need to copy it byte-by-byte until
2849 * the length is reached or a null byte is found. It also prepends "/" at
2850 * the beginning of the name. The sequence number check at the caller will
2851 * retry it again when a d_move() does happen. So any garbage in the buffer
2852 * due to mismatched pointer and length will be discarded.
2854 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2856 const char *dname = ACCESS_ONCE(name->name);
2857 u32 dlen = ACCESS_ONCE(name->len);
2858 char *p;
2860 *buflen -= dlen + 1;
2861 if (*buflen < 0)
2862 return -ENAMETOOLONG;
2863 p = *buffer -= dlen + 1;
2864 *p++ = '/';
2865 while (dlen--) {
2866 char c = *dname++;
2867 if (!c)
2868 break;
2869 *p++ = c;
2871 return 0;
2875 * prepend_path - Prepend path string to a buffer
2876 * @path: the dentry/vfsmount to report
2877 * @root: root vfsmnt/dentry
2878 * @buffer: pointer to the end of the buffer
2879 * @buflen: pointer to buffer length
2881 * The function will first try to write out the pathname without taking any
2882 * lock other than the RCU read lock to make sure that dentries won't go away.
2883 * It only checks the sequence number of the global rename_lock as any change
2884 * in the dentry's d_seq will be preceded by changes in the rename_lock
2885 * sequence number. If the sequence number had been changed, it will restart
2886 * the whole pathname back-tracing sequence again by taking the rename_lock.
2887 * In this case, there is no need to take the RCU read lock as the recursive
2888 * parent pointer references will keep the dentry chain alive as long as no
2889 * rename operation is performed.
2891 static int prepend_path(const struct path *path,
2892 const struct path *root,
2893 char **buffer, int *buflen)
2895 struct dentry *dentry;
2896 struct vfsmount *vfsmnt;
2897 struct mount *mnt;
2898 int error = 0;
2899 unsigned seq, m_seq = 0;
2900 char *bptr;
2901 int blen;
2903 rcu_read_lock();
2904 restart_mnt:
2905 read_seqbegin_or_lock(&mount_lock, &m_seq);
2906 seq = 0;
2907 rcu_read_lock();
2908 restart:
2909 bptr = *buffer;
2910 blen = *buflen;
2911 error = 0;
2912 dentry = path->dentry;
2913 vfsmnt = path->mnt;
2914 mnt = real_mount(vfsmnt);
2915 read_seqbegin_or_lock(&rename_lock, &seq);
2916 while (dentry != root->dentry || vfsmnt != root->mnt) {
2917 struct dentry * parent;
2919 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2920 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
2921 /* Global root? */
2922 if (mnt != parent) {
2923 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
2924 mnt = parent;
2925 vfsmnt = &mnt->mnt;
2926 continue;
2929 * Filesystems needing to implement special "root names"
2930 * should do so with ->d_dname()
2932 if (IS_ROOT(dentry) &&
2933 (dentry->d_name.len != 1 ||
2934 dentry->d_name.name[0] != '/')) {
2935 WARN(1, "Root dentry has weird name <%.*s>\n",
2936 (int) dentry->d_name.len,
2937 dentry->d_name.name);
2939 if (!error)
2940 error = is_mounted(vfsmnt) ? 1 : 2;
2941 break;
2943 parent = dentry->d_parent;
2944 prefetch(parent);
2945 error = prepend_name(&bptr, &blen, &dentry->d_name);
2946 if (error)
2947 break;
2949 dentry = parent;
2951 if (!(seq & 1))
2952 rcu_read_unlock();
2953 if (need_seqretry(&rename_lock, seq)) {
2954 seq = 1;
2955 goto restart;
2957 done_seqretry(&rename_lock, seq);
2959 if (!(m_seq & 1))
2960 rcu_read_unlock();
2961 if (need_seqretry(&mount_lock, m_seq)) {
2962 m_seq = 1;
2963 goto restart_mnt;
2965 done_seqretry(&mount_lock, m_seq);
2967 if (error >= 0 && bptr == *buffer) {
2968 if (--blen < 0)
2969 error = -ENAMETOOLONG;
2970 else
2971 *--bptr = '/';
2973 *buffer = bptr;
2974 *buflen = blen;
2975 return error;
2979 * __d_path - return the path of a dentry
2980 * @path: the dentry/vfsmount to report
2981 * @root: root vfsmnt/dentry
2982 * @buf: buffer to return value in
2983 * @buflen: buffer length
2985 * Convert a dentry into an ASCII path name.
2987 * Returns a pointer into the buffer or an error code if the
2988 * path was too long.
2990 * "buflen" should be positive.
2992 * If the path is not reachable from the supplied root, return %NULL.
2994 char *__d_path(const struct path *path,
2995 const struct path *root,
2996 char *buf, int buflen)
2998 char *res = buf + buflen;
2999 int error;
3001 prepend(&res, &buflen, "\0", 1);
3002 error = prepend_path(path, root, &res, &buflen);
3004 if (error < 0)
3005 return ERR_PTR(error);
3006 if (error > 0)
3007 return NULL;
3008 return res;
3011 char *d_absolute_path(const struct path *path,
3012 char *buf, int buflen)
3014 struct path root = {};
3015 char *res = buf + buflen;
3016 int error;
3018 prepend(&res, &buflen, "\0", 1);
3019 error = prepend_path(path, &root, &res, &buflen);
3021 if (error > 1)
3022 error = -EINVAL;
3023 if (error < 0)
3024 return ERR_PTR(error);
3025 return res;
3029 * same as __d_path but appends "(deleted)" for unlinked files.
3031 static int path_with_deleted(const struct path *path,
3032 const struct path *root,
3033 char **buf, int *buflen)
3035 prepend(buf, buflen, "\0", 1);
3036 if (d_unlinked(path->dentry)) {
3037 int error = prepend(buf, buflen, " (deleted)", 10);
3038 if (error)
3039 return error;
3042 return prepend_path(path, root, buf, buflen);
3045 static int prepend_unreachable(char **buffer, int *buflen)
3047 return prepend(buffer, buflen, "(unreachable)", 13);
3050 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3052 unsigned seq;
3054 do {
3055 seq = read_seqcount_begin(&fs->seq);
3056 *root = fs->root;
3057 } while (read_seqcount_retry(&fs->seq, seq));
3061 * d_path - return the path of a dentry
3062 * @path: path to report
3063 * @buf: buffer to return value in
3064 * @buflen: buffer length
3066 * Convert a dentry into an ASCII path name. If the entry has been deleted
3067 * the string " (deleted)" is appended. Note that this is ambiguous.
3069 * Returns a pointer into the buffer or an error code if the path was
3070 * too long. Note: Callers should use the returned pointer, not the passed
3071 * in buffer, to use the name! The implementation often starts at an offset
3072 * into the buffer, and may leave 0 bytes at the start.
3074 * "buflen" should be positive.
3076 char *d_path(const struct path *path, char *buf, int buflen)
3078 char *res = buf + buflen;
3079 struct path root;
3080 int error;
3083 * We have various synthetic filesystems that never get mounted. On
3084 * these filesystems dentries are never used for lookup purposes, and
3085 * thus don't need to be hashed. They also don't need a name until a
3086 * user wants to identify the object in /proc/pid/fd/. The little hack
3087 * below allows us to generate a name for these objects on demand:
3089 * Some pseudo inodes are mountable. When they are mounted
3090 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
3091 * and instead have d_path return the mounted path.
3093 if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3094 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3095 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3097 rcu_read_lock();
3098 get_fs_root_rcu(current->fs, &root);
3099 error = path_with_deleted(path, &root, &res, &buflen);
3100 rcu_read_unlock();
3102 if (error < 0)
3103 res = ERR_PTR(error);
3104 return res;
3106 EXPORT_SYMBOL(d_path);
3109 * Helper function for dentry_operations.d_dname() members
3111 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3112 const char *fmt, ...)
3114 va_list args;
3115 char temp[64];
3116 int sz;
3118 va_start(args, fmt);
3119 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3120 va_end(args);
3122 if (sz > sizeof(temp) || sz > buflen)
3123 return ERR_PTR(-ENAMETOOLONG);
3125 buffer += buflen - sz;
3126 return memcpy(buffer, temp, sz);
3129 char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3131 char *end = buffer + buflen;
3132 /* these dentries are never renamed, so d_lock is not needed */
3133 if (prepend(&end, &buflen, " (deleted)", 11) ||
3134 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3135 prepend(&end, &buflen, "/", 1))
3136 end = ERR_PTR(-ENAMETOOLONG);
3137 return end;
3139 EXPORT_SYMBOL(simple_dname);
3142 * Write full pathname from the root of the filesystem into the buffer.
3144 static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3146 struct dentry *dentry;
3147 char *end, *retval;
3148 int len, seq = 0;
3149 int error = 0;
3151 if (buflen < 2)
3152 goto Elong;
3154 rcu_read_lock();
3155 restart:
3156 dentry = d;
3157 end = buf + buflen;
3158 len = buflen;
3159 prepend(&end, &len, "\0", 1);
3160 /* Get '/' right */
3161 retval = end-1;
3162 *retval = '/';
3163 read_seqbegin_or_lock(&rename_lock, &seq);
3164 while (!IS_ROOT(dentry)) {
3165 struct dentry *parent = dentry->d_parent;
3167 prefetch(parent);
3168 error = prepend_name(&end, &len, &dentry->d_name);
3169 if (error)
3170 break;
3172 retval = end;
3173 dentry = parent;
3175 if (!(seq & 1))
3176 rcu_read_unlock();
3177 if (need_seqretry(&rename_lock, seq)) {
3178 seq = 1;
3179 goto restart;
3181 done_seqretry(&rename_lock, seq);
3182 if (error)
3183 goto Elong;
3184 return retval;
3185 Elong:
3186 return ERR_PTR(-ENAMETOOLONG);
3189 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3191 return __dentry_path(dentry, buf, buflen);
3193 EXPORT_SYMBOL(dentry_path_raw);
3195 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3197 char *p = NULL;
3198 char *retval;
3200 if (d_unlinked(dentry)) {
3201 p = buf + buflen;
3202 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3203 goto Elong;
3204 buflen++;
3206 retval = __dentry_path(dentry, buf, buflen);
3207 if (!IS_ERR(retval) && p)
3208 *p = '/'; /* restore '/' overriden with '\0' */
3209 return retval;
3210 Elong:
3211 return ERR_PTR(-ENAMETOOLONG);
3214 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3215 struct path *pwd)
3217 unsigned seq;
3219 do {
3220 seq = read_seqcount_begin(&fs->seq);
3221 *root = fs->root;
3222 *pwd = fs->pwd;
3223 } while (read_seqcount_retry(&fs->seq, seq));
3227 * NOTE! The user-level library version returns a
3228 * character pointer. The kernel system call just
3229 * returns the length of the buffer filled (which
3230 * includes the ending '\0' character), or a negative
3231 * error value. So libc would do something like
3233 * char *getcwd(char * buf, size_t size)
3235 * int retval;
3237 * retval = sys_getcwd(buf, size);
3238 * if (retval >= 0)
3239 * return buf;
3240 * errno = -retval;
3241 * return NULL;
3244 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3246 int error;
3247 struct path pwd, root;
3248 char *page = __getname();
3250 if (!page)
3251 return -ENOMEM;
3253 rcu_read_lock();
3254 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3256 error = -ENOENT;
3257 if (!d_unlinked(pwd.dentry)) {
3258 unsigned long len;
3259 char *cwd = page + PATH_MAX;
3260 int buflen = PATH_MAX;
3262 prepend(&cwd, &buflen, "\0", 1);
3263 error = prepend_path(&pwd, &root, &cwd, &buflen);
3264 rcu_read_unlock();
3266 if (error < 0)
3267 goto out;
3269 /* Unreachable from current root */
3270 if (error > 0) {
3271 error = prepend_unreachable(&cwd, &buflen);
3272 if (error)
3273 goto out;
3276 error = -ERANGE;
3277 len = PATH_MAX + page - cwd;
3278 if (len <= size) {
3279 error = len;
3280 if (copy_to_user(buf, cwd, len))
3281 error = -EFAULT;
3283 } else {
3284 rcu_read_unlock();
3287 out:
3288 __putname(page);
3289 return error;
3293 * Test whether new_dentry is a subdirectory of old_dentry.
3295 * Trivially implemented using the dcache structure
3299 * is_subdir - is new dentry a subdirectory of old_dentry
3300 * @new_dentry: new dentry
3301 * @old_dentry: old dentry
3303 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
3304 * Returns 0 otherwise.
3305 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3308 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3310 int result;
3311 unsigned seq;
3313 if (new_dentry == old_dentry)
3314 return 1;
3316 do {
3317 /* for restarting inner loop in case of seq retry */
3318 seq = read_seqbegin(&rename_lock);
3320 * Need rcu_readlock to protect against the d_parent trashing
3321 * due to d_move
3323 rcu_read_lock();
3324 if (d_ancestor(old_dentry, new_dentry))
3325 result = 1;
3326 else
3327 result = 0;
3328 rcu_read_unlock();
3329 } while (read_seqretry(&rename_lock, seq));
3331 return result;
3334 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3336 struct dentry *root = data;
3337 if (dentry != root) {
3338 if (d_unhashed(dentry) || !dentry->d_inode)
3339 return D_WALK_SKIP;
3341 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3342 dentry->d_flags |= DCACHE_GENOCIDE;
3343 dentry->d_lockref.count--;
3346 return D_WALK_CONTINUE;
3349 void d_genocide(struct dentry *parent)
3351 d_walk(parent, parent, d_genocide_kill, NULL);
3354 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3356 inode_dec_link_count(inode);
3357 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3358 !hlist_unhashed(&dentry->d_alias) ||
3359 !d_unlinked(dentry));
3360 spin_lock(&dentry->d_parent->d_lock);
3361 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3362 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3363 (unsigned long long)inode->i_ino);
3364 spin_unlock(&dentry->d_lock);
3365 spin_unlock(&dentry->d_parent->d_lock);
3366 d_instantiate(dentry, inode);
3368 EXPORT_SYMBOL(d_tmpfile);
3370 static __initdata unsigned long dhash_entries;
3371 static int __init set_dhash_entries(char *str)
3373 if (!str)
3374 return 0;
3375 dhash_entries = simple_strtoul(str, &str, 0);
3376 return 1;
3378 __setup("dhash_entries=", set_dhash_entries);
3380 static void __init dcache_init_early(void)
3382 unsigned int loop;
3384 /* If hashes are distributed across NUMA nodes, defer
3385 * hash allocation until vmalloc space is available.
3387 if (hashdist)
3388 return;
3390 dentry_hashtable =
3391 alloc_large_system_hash("Dentry cache",
3392 sizeof(struct hlist_bl_head),
3393 dhash_entries,
3395 HASH_EARLY,
3396 &d_hash_shift,
3397 &d_hash_mask,
3401 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3402 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3405 static void __init dcache_init(void)
3407 unsigned int loop;
3410 * A constructor could be added for stable state like the lists,
3411 * but it is probably not worth it because of the cache nature
3412 * of the dcache.
3414 dentry_cache = KMEM_CACHE(dentry,
3415 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3417 /* Hash may have been set up in dcache_init_early */
3418 if (!hashdist)
3419 return;
3421 dentry_hashtable =
3422 alloc_large_system_hash("Dentry cache",
3423 sizeof(struct hlist_bl_head),
3424 dhash_entries,
3427 &d_hash_shift,
3428 &d_hash_mask,
3432 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3433 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3436 /* SLAB cache for __getname() consumers */
3437 struct kmem_cache *names_cachep __read_mostly;
3438 EXPORT_SYMBOL(names_cachep);
3440 EXPORT_SYMBOL(d_genocide);
3442 void __init vfs_caches_init_early(void)
3444 dcache_init_early();
3445 inode_init_early();
3448 void __init vfs_caches_init(unsigned long mempages)
3450 unsigned long reserve;
3452 /* Base hash sizes on available memory, with a reserve equal to
3453 150% of current kernel size */
3455 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3456 mempages -= reserve;
3458 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3459 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3461 dcache_init();
3462 inode_init();
3463 files_init(mempages);
3464 mnt_init();
3465 bdev_cache_init();
3466 chrdev_init();