3 * Copyright (C) 2011 Novell Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
11 #include <linux/slab.h>
12 #include <linux/namei.h>
13 #include <linux/file.h>
14 #include <linux/xattr.h>
15 #include <linux/rbtree.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include <linux/ratelimit.h>
19 #include "overlayfs.h"
21 struct ovl_cache_entry
{
26 struct list_head l_node
;
28 struct ovl_cache_entry
*next_maybe_whiteout
;
34 struct ovl_dir_cache
{
37 struct list_head entries
;
41 struct ovl_readdir_data
{
42 struct dir_context ctx
;
43 struct dentry
*dentry
;
46 struct list_head
*list
;
47 struct list_head middle
;
48 struct ovl_cache_entry
*first_maybe_whiteout
;
52 bool d_type_supported
;
58 struct ovl_dir_cache
*cache
;
59 struct list_head
*cursor
;
60 struct file
*realfile
;
61 struct file
*upperfile
;
64 static struct ovl_cache_entry
*ovl_cache_entry_from_node(struct rb_node
*n
)
66 return rb_entry(n
, struct ovl_cache_entry
, node
);
69 static bool ovl_cache_entry_find_link(const char *name
, int len
,
70 struct rb_node
***link
,
71 struct rb_node
**parent
)
74 struct rb_node
**newp
= *link
;
76 while (!found
&& *newp
) {
78 struct ovl_cache_entry
*tmp
;
81 tmp
= ovl_cache_entry_from_node(*newp
);
82 cmp
= strncmp(name
, tmp
->name
, len
);
84 newp
= &tmp
->node
.rb_right
;
85 else if (cmp
< 0 || len
< tmp
->len
)
86 newp
= &tmp
->node
.rb_left
;
95 static struct ovl_cache_entry
*ovl_cache_entry_find(struct rb_root
*root
,
96 const char *name
, int len
)
98 struct rb_node
*node
= root
->rb_node
;
102 struct ovl_cache_entry
*p
= ovl_cache_entry_from_node(node
);
104 cmp
= strncmp(name
, p
->name
, len
);
106 node
= p
->node
.rb_right
;
107 else if (cmp
< 0 || len
< p
->len
)
108 node
= p
->node
.rb_left
;
116 static bool ovl_calc_d_ino(struct ovl_readdir_data
*rdd
,
117 struct ovl_cache_entry
*p
)
119 /* Don't care if not doing ovl_iter() */
123 /* Always recalc d_ino when remapping lower inode numbers */
124 if (ovl_xino_bits(rdd
->dentry
->d_sb
))
127 /* Always recalc d_ino for parent */
128 if (strcmp(p
->name
, "..") == 0)
131 /* If this is lower, then native d_ino will do */
136 * Recalc d_ino for '.' and for all entries if dir is impure (contains
139 if ((p
->name
[0] == '.' && p
->len
== 1) ||
140 ovl_test_flag(OVL_IMPURE
, d_inode(rdd
->dentry
)))
146 static struct ovl_cache_entry
*ovl_cache_entry_new(struct ovl_readdir_data
*rdd
,
147 const char *name
, int len
,
148 u64 ino
, unsigned int d_type
)
150 struct ovl_cache_entry
*p
;
151 size_t size
= offsetof(struct ovl_cache_entry
, name
[len
+ 1]);
153 p
= kmalloc(size
, GFP_KERNEL
);
157 memcpy(p
->name
, name
, len
);
163 /* Defer setting d_ino for upper entry to ovl_iterate() */
164 if (ovl_calc_d_ino(rdd
, p
))
166 p
->is_upper
= rdd
->is_upper
;
167 p
->is_whiteout
= false;
169 if (d_type
== DT_CHR
) {
170 p
->next_maybe_whiteout
= rdd
->first_maybe_whiteout
;
171 rdd
->first_maybe_whiteout
= p
;
176 static int ovl_cache_entry_add_rb(struct ovl_readdir_data
*rdd
,
177 const char *name
, int len
, u64 ino
,
180 struct rb_node
**newp
= &rdd
->root
->rb_node
;
181 struct rb_node
*parent
= NULL
;
182 struct ovl_cache_entry
*p
;
184 if (ovl_cache_entry_find_link(name
, len
, &newp
, &parent
))
187 p
= ovl_cache_entry_new(rdd
, name
, len
, ino
, d_type
);
193 list_add_tail(&p
->l_node
, rdd
->list
);
194 rb_link_node(&p
->node
, parent
, newp
);
195 rb_insert_color(&p
->node
, rdd
->root
);
200 static int ovl_fill_lowest(struct ovl_readdir_data
*rdd
,
201 const char *name
, int namelen
,
202 loff_t offset
, u64 ino
, unsigned int d_type
)
204 struct ovl_cache_entry
*p
;
206 p
= ovl_cache_entry_find(rdd
->root
, name
, namelen
);
208 list_move_tail(&p
->l_node
, &rdd
->middle
);
210 p
= ovl_cache_entry_new(rdd
, name
, namelen
, ino
, d_type
);
214 list_add_tail(&p
->l_node
, &rdd
->middle
);
220 void ovl_cache_free(struct list_head
*list
)
222 struct ovl_cache_entry
*p
;
223 struct ovl_cache_entry
*n
;
225 list_for_each_entry_safe(p
, n
, list
, l_node
)
228 INIT_LIST_HEAD(list
);
231 void ovl_dir_cache_free(struct inode
*inode
)
233 struct ovl_dir_cache
*cache
= ovl_dir_cache(inode
);
236 ovl_cache_free(&cache
->entries
);
241 static void ovl_cache_put(struct ovl_dir_file
*od
, struct dentry
*dentry
)
243 struct ovl_dir_cache
*cache
= od
->cache
;
245 WARN_ON(cache
->refcount
<= 0);
247 if (!cache
->refcount
) {
248 if (ovl_dir_cache(d_inode(dentry
)) == cache
)
249 ovl_set_dir_cache(d_inode(dentry
), NULL
);
251 ovl_cache_free(&cache
->entries
);
256 static int ovl_fill_merge(struct dir_context
*ctx
, const char *name
,
257 int namelen
, loff_t offset
, u64 ino
,
260 struct ovl_readdir_data
*rdd
=
261 container_of(ctx
, struct ovl_readdir_data
, ctx
);
265 return ovl_cache_entry_add_rb(rdd
, name
, namelen
, ino
, d_type
);
267 return ovl_fill_lowest(rdd
, name
, namelen
, offset
, ino
, d_type
);
270 static int ovl_check_whiteouts(struct dentry
*dir
, struct ovl_readdir_data
*rdd
)
273 struct ovl_cache_entry
*p
;
274 struct dentry
*dentry
;
275 const struct cred
*old_cred
;
277 old_cred
= ovl_override_creds(rdd
->dentry
->d_sb
);
279 err
= down_write_killable(&dir
->d_inode
->i_rwsem
);
281 while (rdd
->first_maybe_whiteout
) {
282 p
= rdd
->first_maybe_whiteout
;
283 rdd
->first_maybe_whiteout
= p
->next_maybe_whiteout
;
284 dentry
= lookup_one_len(p
->name
, dir
, p
->len
);
285 if (!IS_ERR(dentry
)) {
286 p
->is_whiteout
= ovl_is_whiteout(dentry
);
290 inode_unlock(dir
->d_inode
);
292 revert_creds(old_cred
);
297 static inline int ovl_dir_read(struct path
*realpath
,
298 struct ovl_readdir_data
*rdd
)
300 struct file
*realfile
;
303 realfile
= ovl_path_open(realpath
, O_RDONLY
| O_DIRECTORY
);
304 if (IS_ERR(realfile
))
305 return PTR_ERR(realfile
);
307 rdd
->first_maybe_whiteout
= NULL
;
312 err
= iterate_dir(realfile
, &rdd
->ctx
);
315 } while (!err
&& rdd
->count
);
317 if (!err
&& rdd
->first_maybe_whiteout
&& rdd
->dentry
)
318 err
= ovl_check_whiteouts(realpath
->dentry
, rdd
);
326 * Can we iterate real dir directly?
328 * Non-merge dir may contain whiteouts from a time it was a merge upper, before
329 * lower dir was removed under it and possibly before it was rotated from upper
332 static bool ovl_dir_is_real(struct dentry
*dir
)
334 return !ovl_test_flag(OVL_WHITEOUTS
, d_inode(dir
));
337 static void ovl_dir_reset(struct file
*file
)
339 struct ovl_dir_file
*od
= file
->private_data
;
340 struct ovl_dir_cache
*cache
= od
->cache
;
341 struct dentry
*dentry
= file
->f_path
.dentry
;
344 if (cache
&& ovl_dentry_version_get(dentry
) != cache
->version
) {
345 ovl_cache_put(od
, dentry
);
349 is_real
= ovl_dir_is_real(dentry
);
350 if (od
->is_real
!= is_real
) {
351 /* is_real can only become false when dir is copied up */
352 if (WARN_ON(is_real
))
358 static int ovl_dir_read_merged(struct dentry
*dentry
, struct list_head
*list
,
359 struct rb_root
*root
)
362 struct path realpath
;
363 struct ovl_readdir_data rdd
= {
364 .ctx
.actor
= ovl_fill_merge
,
372 for (idx
= 0; idx
!= -1; idx
= next
) {
373 next
= ovl_path_next(idx
, dentry
, &realpath
);
374 rdd
.is_upper
= ovl_dentry_upper(dentry
) == realpath
.dentry
;
377 err
= ovl_dir_read(&realpath
, &rdd
);
382 * Insert lowest layer entries before upper ones, this
383 * allows offsets to be reasonably constant
385 list_add(&rdd
.middle
, rdd
.list
);
386 rdd
.is_lowest
= true;
387 err
= ovl_dir_read(&realpath
, &rdd
);
388 list_del(&rdd
.middle
);
394 static void ovl_seek_cursor(struct ovl_dir_file
*od
, loff_t pos
)
399 list_for_each(p
, &od
->cache
->entries
) {
404 /* Cursor is safe since the cache is stable */
408 static struct ovl_dir_cache
*ovl_cache_get(struct dentry
*dentry
)
411 struct ovl_dir_cache
*cache
;
413 cache
= ovl_dir_cache(d_inode(dentry
));
414 if (cache
&& ovl_dentry_version_get(dentry
) == cache
->version
) {
415 WARN_ON(!cache
->refcount
);
419 ovl_set_dir_cache(d_inode(dentry
), NULL
);
421 cache
= kzalloc(sizeof(struct ovl_dir_cache
), GFP_KERNEL
);
423 return ERR_PTR(-ENOMEM
);
426 INIT_LIST_HEAD(&cache
->entries
);
427 cache
->root
= RB_ROOT
;
429 res
= ovl_dir_read_merged(dentry
, &cache
->entries
, &cache
->root
);
431 ovl_cache_free(&cache
->entries
);
436 cache
->version
= ovl_dentry_version_get(dentry
);
437 ovl_set_dir_cache(d_inode(dentry
), cache
);
442 /* Map inode number to lower fs unique range */
443 static u64
ovl_remap_lower_ino(u64 ino
, int xinobits
, int fsid
,
444 const char *name
, int namelen
)
446 if (ino
>> (64 - xinobits
)) {
447 pr_warn_ratelimited("overlayfs: d_ino too big (%.*s, ino=%llu, xinobits=%d)\n",
448 namelen
, name
, ino
, xinobits
);
452 return ino
| ((u64
)fsid
) << (64 - xinobits
);
456 * Set d_ino for upper entries. Non-upper entries should always report
457 * the uppermost real inode ino and should not call this function.
459 * When not all layer are on same fs, report real ino also for upper.
461 * When all layers are on the same fs, and upper has a reference to
462 * copy up origin, call vfs_getattr() on the overlay entry to make
463 * sure that d_ino will be consistent with st_ino from stat(2).
465 static int ovl_cache_update_ino(struct path
*path
, struct ovl_cache_entry
*p
)
468 struct dentry
*dir
= path
->dentry
;
469 struct dentry
*this = NULL
;
470 enum ovl_path_type type
;
471 u64 ino
= p
->real_ino
;
472 int xinobits
= ovl_xino_bits(dir
->d_sb
);
475 if (!ovl_same_sb(dir
->d_sb
) && !xinobits
)
478 if (p
->name
[0] == '.') {
483 if (p
->len
== 2 && p
->name
[1] == '.') {
484 /* we shall not be moved */
485 this = dget(dir
->d_parent
);
489 this = lookup_one_len(p
->name
, dir
, p
->len
);
490 if (IS_ERR_OR_NULL(this) || !this->d_inode
) {
500 type
= ovl_path_type(this);
501 if (OVL_TYPE_ORIGIN(type
)) {
503 struct path statpath
= *path
;
505 statpath
.dentry
= this;
506 err
= vfs_getattr(&statpath
, &stat
, STATX_INO
, 0);
511 * Directory inode is always on overlay st_dev.
512 * Non-dir with ovl_same_dev() could be on pseudo st_dev in case
513 * of xino bits overflow.
515 WARN_ON_ONCE(S_ISDIR(stat
.mode
) &&
516 dir
->d_sb
->s_dev
!= stat
.dev
);
518 } else if (xinobits
&& !OVL_TYPE_UPPER(type
)) {
519 ino
= ovl_remap_lower_ino(ino
, xinobits
,
520 ovl_layer_lower(this)->fsid
,
530 pr_warn_ratelimited("overlayfs: failed to look up (%s) for ino (%i)\n",
535 static int ovl_fill_plain(struct dir_context
*ctx
, const char *name
,
536 int namelen
, loff_t offset
, u64 ino
,
539 struct ovl_cache_entry
*p
;
540 struct ovl_readdir_data
*rdd
=
541 container_of(ctx
, struct ovl_readdir_data
, ctx
);
544 p
= ovl_cache_entry_new(rdd
, name
, namelen
, ino
, d_type
);
549 list_add_tail(&p
->l_node
, rdd
->list
);
554 static int ovl_dir_read_impure(struct path
*path
, struct list_head
*list
,
555 struct rb_root
*root
)
558 struct path realpath
;
559 struct ovl_cache_entry
*p
, *n
;
560 struct ovl_readdir_data rdd
= {
561 .ctx
.actor
= ovl_fill_plain
,
566 INIT_LIST_HEAD(list
);
568 ovl_path_upper(path
->dentry
, &realpath
);
570 err
= ovl_dir_read(&realpath
, &rdd
);
574 list_for_each_entry_safe(p
, n
, list
, l_node
) {
575 if (strcmp(p
->name
, ".") != 0 &&
576 strcmp(p
->name
, "..") != 0) {
577 err
= ovl_cache_update_ino(path
, p
);
581 if (p
->ino
== p
->real_ino
) {
582 list_del(&p
->l_node
);
585 struct rb_node
**newp
= &root
->rb_node
;
586 struct rb_node
*parent
= NULL
;
588 if (WARN_ON(ovl_cache_entry_find_link(p
->name
, p
->len
,
592 rb_link_node(&p
->node
, parent
, newp
);
593 rb_insert_color(&p
->node
, root
);
599 static struct ovl_dir_cache
*ovl_cache_get_impure(struct path
*path
)
602 struct dentry
*dentry
= path
->dentry
;
603 struct ovl_dir_cache
*cache
;
605 cache
= ovl_dir_cache(d_inode(dentry
));
606 if (cache
&& ovl_dentry_version_get(dentry
) == cache
->version
)
609 /* Impure cache is not refcounted, free it here */
610 ovl_dir_cache_free(d_inode(dentry
));
611 ovl_set_dir_cache(d_inode(dentry
), NULL
);
613 cache
= kzalloc(sizeof(struct ovl_dir_cache
), GFP_KERNEL
);
615 return ERR_PTR(-ENOMEM
);
617 res
= ovl_dir_read_impure(path
, &cache
->entries
, &cache
->root
);
619 ovl_cache_free(&cache
->entries
);
623 if (list_empty(&cache
->entries
)) {
625 * A good opportunity to get rid of an unneeded "impure" flag.
626 * Removing the "impure" xattr is best effort.
628 if (!ovl_want_write(dentry
)) {
629 ovl_do_removexattr(ovl_dentry_upper(dentry
),
631 ovl_drop_write(dentry
);
633 ovl_clear_flag(OVL_IMPURE
, d_inode(dentry
));
638 cache
->version
= ovl_dentry_version_get(dentry
);
639 ovl_set_dir_cache(d_inode(dentry
), cache
);
644 struct ovl_readdir_translate
{
645 struct dir_context
*orig_ctx
;
646 struct ovl_dir_cache
*cache
;
647 struct dir_context ctx
;
653 static int ovl_fill_real(struct dir_context
*ctx
, const char *name
,
654 int namelen
, loff_t offset
, u64 ino
,
657 struct ovl_readdir_translate
*rdt
=
658 container_of(ctx
, struct ovl_readdir_translate
, ctx
);
659 struct dir_context
*orig_ctx
= rdt
->orig_ctx
;
661 if (rdt
->parent_ino
&& strcmp(name
, "..") == 0) {
662 ino
= rdt
->parent_ino
;
663 } else if (rdt
->cache
) {
664 struct ovl_cache_entry
*p
;
666 p
= ovl_cache_entry_find(&rdt
->cache
->root
, name
, namelen
);
669 } else if (rdt
->xinobits
) {
670 ino
= ovl_remap_lower_ino(ino
, rdt
->xinobits
, rdt
->fsid
,
674 return orig_ctx
->actor(orig_ctx
, name
, namelen
, offset
, ino
, d_type
);
677 static bool ovl_is_impure_dir(struct file
*file
)
679 struct ovl_dir_file
*od
= file
->private_data
;
680 struct inode
*dir
= d_inode(file
->f_path
.dentry
);
683 * Only upper dir can be impure, but if we are in the middle of
684 * iterating a lower real dir, dir could be copied up and marked
685 * impure. We only want the impure cache if we started iterating
686 * a real upper dir to begin with.
688 return od
->is_upper
&& ovl_test_flag(OVL_IMPURE
, dir
);
692 static int ovl_iterate_real(struct file
*file
, struct dir_context
*ctx
)
695 struct ovl_dir_file
*od
= file
->private_data
;
696 struct dentry
*dir
= file
->f_path
.dentry
;
697 struct ovl_layer
*lower_layer
= ovl_layer_lower(dir
);
698 struct ovl_readdir_translate rdt
= {
699 .ctx
.actor
= ovl_fill_real
,
701 .xinobits
= ovl_xino_bits(dir
->d_sb
),
704 if (rdt
.xinobits
&& lower_layer
)
705 rdt
.fsid
= lower_layer
->fsid
;
707 if (OVL_TYPE_MERGE(ovl_path_type(dir
->d_parent
))) {
709 struct path statpath
= file
->f_path
;
711 statpath
.dentry
= dir
->d_parent
;
712 err
= vfs_getattr(&statpath
, &stat
, STATX_INO
, 0);
716 WARN_ON_ONCE(dir
->d_sb
->s_dev
!= stat
.dev
);
717 rdt
.parent_ino
= stat
.ino
;
720 if (ovl_is_impure_dir(file
)) {
721 rdt
.cache
= ovl_cache_get_impure(&file
->f_path
);
722 if (IS_ERR(rdt
.cache
))
723 return PTR_ERR(rdt
.cache
);
726 err
= iterate_dir(od
->realfile
, &rdt
.ctx
);
727 ctx
->pos
= rdt
.ctx
.pos
;
733 static int ovl_iterate(struct file
*file
, struct dir_context
*ctx
)
735 struct ovl_dir_file
*od
= file
->private_data
;
736 struct dentry
*dentry
= file
->f_path
.dentry
;
737 struct ovl_cache_entry
*p
;
745 * If parent is merge, then need to adjust d_ino for '..', if
746 * dir is impure then need to adjust d_ino for copied up
749 if (ovl_xino_bits(dentry
->d_sb
) ||
750 (ovl_same_sb(dentry
->d_sb
) &&
751 (ovl_is_impure_dir(file
) ||
752 OVL_TYPE_MERGE(ovl_path_type(dentry
->d_parent
))))) {
753 return ovl_iterate_real(file
, ctx
);
755 return iterate_dir(od
->realfile
, ctx
);
759 struct ovl_dir_cache
*cache
;
761 cache
= ovl_cache_get(dentry
);
763 return PTR_ERR(cache
);
766 ovl_seek_cursor(od
, ctx
->pos
);
769 while (od
->cursor
!= &od
->cache
->entries
) {
770 p
= list_entry(od
->cursor
, struct ovl_cache_entry
, l_node
);
771 if (!p
->is_whiteout
) {
773 err
= ovl_cache_update_ino(&file
->f_path
, p
);
777 if (!dir_emit(ctx
, p
->name
, p
->len
, p
->ino
, p
->type
))
780 od
->cursor
= p
->l_node
.next
;
786 static loff_t
ovl_dir_llseek(struct file
*file
, loff_t offset
, int origin
)
789 struct ovl_dir_file
*od
= file
->private_data
;
791 inode_lock(file_inode(file
));
796 res
= vfs_llseek(od
->realfile
, offset
, origin
);
797 file
->f_pos
= od
->realfile
->f_pos
;
803 offset
+= file
->f_pos
;
813 if (offset
!= file
->f_pos
) {
814 file
->f_pos
= offset
;
816 ovl_seek_cursor(od
, offset
);
821 inode_unlock(file_inode(file
));
826 static int ovl_dir_fsync(struct file
*file
, loff_t start
, loff_t end
,
829 struct ovl_dir_file
*od
= file
->private_data
;
830 struct dentry
*dentry
= file
->f_path
.dentry
;
831 struct file
*realfile
= od
->realfile
;
833 /* Nothing to sync for lower */
834 if (!OVL_TYPE_UPPER(ovl_path_type(dentry
)))
838 * Need to check if we started out being a lower dir, but got copied up
841 struct inode
*inode
= file_inode(file
);
843 realfile
= READ_ONCE(od
->upperfile
);
845 struct path upperpath
;
847 ovl_path_upper(dentry
, &upperpath
);
848 realfile
= ovl_path_open(&upperpath
, O_RDONLY
);
851 if (!od
->upperfile
) {
852 if (IS_ERR(realfile
)) {
854 return PTR_ERR(realfile
);
856 smp_store_release(&od
->upperfile
, realfile
);
858 /* somebody has beaten us to it */
859 if (!IS_ERR(realfile
))
861 realfile
= od
->upperfile
;
867 return vfs_fsync_range(realfile
, start
, end
, datasync
);
870 static int ovl_dir_release(struct inode
*inode
, struct file
*file
)
872 struct ovl_dir_file
*od
= file
->private_data
;
876 ovl_cache_put(od
, file
->f_path
.dentry
);
887 static int ovl_dir_open(struct inode
*inode
, struct file
*file
)
889 struct path realpath
;
890 struct file
*realfile
;
891 struct ovl_dir_file
*od
;
892 enum ovl_path_type type
;
894 od
= kzalloc(sizeof(struct ovl_dir_file
), GFP_KERNEL
);
898 type
= ovl_path_real(file
->f_path
.dentry
, &realpath
);
899 realfile
= ovl_path_open(&realpath
, file
->f_flags
);
900 if (IS_ERR(realfile
)) {
902 return PTR_ERR(realfile
);
904 od
->realfile
= realfile
;
905 od
->is_real
= ovl_dir_is_real(file
->f_path
.dentry
);
906 od
->is_upper
= OVL_TYPE_UPPER(type
);
907 file
->private_data
= od
;
912 const struct file_operations ovl_dir_operations
= {
913 .read
= generic_read_dir
,
914 .open
= ovl_dir_open
,
915 .iterate
= ovl_iterate
,
916 .llseek
= ovl_dir_llseek
,
917 .fsync
= ovl_dir_fsync
,
918 .release
= ovl_dir_release
,
921 int ovl_check_empty_dir(struct dentry
*dentry
, struct list_head
*list
)
924 struct ovl_cache_entry
*p
, *n
;
925 struct rb_root root
= RB_ROOT
;
926 const struct cred
*old_cred
;
928 old_cred
= ovl_override_creds(dentry
->d_sb
);
929 err
= ovl_dir_read_merged(dentry
, list
, &root
);
930 revert_creds(old_cred
);
936 list_for_each_entry_safe(p
, n
, list
, l_node
) {
938 * Select whiteouts in upperdir, they should
939 * be cleared when deleting this directory.
941 if (p
->is_whiteout
) {
947 if (p
->name
[0] == '.') {
950 if (p
->len
== 2 && p
->name
[1] == '.')
957 list_del(&p
->l_node
);
964 void ovl_cleanup_whiteouts(struct dentry
*upper
, struct list_head
*list
)
966 struct ovl_cache_entry
*p
;
968 inode_lock_nested(upper
->d_inode
, I_MUTEX_CHILD
);
969 list_for_each_entry(p
, list
, l_node
) {
970 struct dentry
*dentry
;
972 if (WARN_ON(!p
->is_whiteout
|| !p
->is_upper
))
975 dentry
= lookup_one_len(p
->name
, upper
, p
->len
);
976 if (IS_ERR(dentry
)) {
977 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
978 upper
->d_name
.name
, p
->len
, p
->name
,
979 (int) PTR_ERR(dentry
));
983 ovl_cleanup(upper
->d_inode
, dentry
);
986 inode_unlock(upper
->d_inode
);
989 static int ovl_check_d_type(struct dir_context
*ctx
, const char *name
,
990 int namelen
, loff_t offset
, u64 ino
,
993 struct ovl_readdir_data
*rdd
=
994 container_of(ctx
, struct ovl_readdir_data
, ctx
);
996 /* Even if d_type is not supported, DT_DIR is returned for . and .. */
997 if (!strncmp(name
, ".", namelen
) || !strncmp(name
, "..", namelen
))
1000 if (d_type
!= DT_UNKNOWN
)
1001 rdd
->d_type_supported
= true;
1007 * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values
1008 * if error is encountered.
1010 int ovl_check_d_type_supported(struct path
*realpath
)
1013 struct ovl_readdir_data rdd
= {
1014 .ctx
.actor
= ovl_check_d_type
,
1015 .d_type_supported
= false,
1018 err
= ovl_dir_read(realpath
, &rdd
);
1022 return rdd
.d_type_supported
;
1025 static void ovl_workdir_cleanup_recurse(struct path
*path
, int level
)
1028 struct inode
*dir
= path
->dentry
->d_inode
;
1030 struct rb_root root
= RB_ROOT
;
1031 struct ovl_cache_entry
*p
;
1032 struct ovl_readdir_data rdd
= {
1033 .ctx
.actor
= ovl_fill_merge
,
1040 err
= ovl_dir_read(path
, &rdd
);
1044 inode_lock_nested(dir
, I_MUTEX_PARENT
);
1045 list_for_each_entry(p
, &list
, l_node
) {
1046 struct dentry
*dentry
;
1048 if (p
->name
[0] == '.') {
1051 if (p
->len
== 2 && p
->name
[1] == '.')
1054 dentry
= lookup_one_len(p
->name
, path
->dentry
, p
->len
);
1057 if (dentry
->d_inode
)
1058 ovl_workdir_cleanup(dir
, path
->mnt
, dentry
, level
);
1063 ovl_cache_free(&list
);
1066 void ovl_workdir_cleanup(struct inode
*dir
, struct vfsmount
*mnt
,
1067 struct dentry
*dentry
, int level
)
1071 if (!d_is_dir(dentry
) || level
> 1) {
1072 ovl_cleanup(dir
, dentry
);
1076 err
= ovl_do_rmdir(dir
, dentry
);
1078 struct path path
= { .mnt
= mnt
, .dentry
= dentry
};
1081 ovl_workdir_cleanup_recurse(&path
, level
+ 1);
1082 inode_lock_nested(dir
, I_MUTEX_PARENT
);
1083 ovl_cleanup(dir
, dentry
);
1087 int ovl_indexdir_cleanup(struct ovl_fs
*ofs
)
1090 struct dentry
*indexdir
= ofs
->indexdir
;
1091 struct dentry
*index
= NULL
;
1092 struct inode
*dir
= indexdir
->d_inode
;
1093 struct path path
= { .mnt
= ofs
->upper_mnt
, .dentry
= indexdir
};
1095 struct rb_root root
= RB_ROOT
;
1096 struct ovl_cache_entry
*p
;
1097 struct ovl_readdir_data rdd
= {
1098 .ctx
.actor
= ovl_fill_merge
,
1105 err
= ovl_dir_read(&path
, &rdd
);
1109 inode_lock_nested(dir
, I_MUTEX_PARENT
);
1110 list_for_each_entry(p
, &list
, l_node
) {
1111 if (p
->name
[0] == '.') {
1114 if (p
->len
== 2 && p
->name
[1] == '.')
1117 index
= lookup_one_len(p
->name
, indexdir
, p
->len
);
1118 if (IS_ERR(index
)) {
1119 err
= PTR_ERR(index
);
1123 err
= ovl_verify_index(ofs
, index
);
1126 } else if (err
== -ESTALE
) {
1127 /* Cleanup stale index entries */
1128 err
= ovl_cleanup(dir
, index
);
1129 } else if (err
!= -ENOENT
) {
1131 * Abort mount to avoid corrupting the index if
1132 * an incompatible index entry was found or on out
1136 } else if (ofs
->config
.nfs_export
) {
1138 * Whiteout orphan index to block future open by
1139 * handle after overlay nlink dropped to zero.
1141 err
= ovl_cleanup_and_whiteout(indexdir
, dir
, index
);
1143 /* Cleanup orphan index entries */
1144 err
= ovl_cleanup(dir
, index
);
1157 ovl_cache_free(&list
);
1159 pr_err("overlayfs: failed index dir cleanup (%i)\n", err
);