3 * Copyright (C) 2011 Novell Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
11 #include <linux/slab.h>
12 #include <linux/namei.h>
13 #include <linux/file.h>
14 #include <linux/xattr.h>
15 #include <linux/rbtree.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include <linux/ratelimit.h>
19 #include "overlayfs.h"
21 struct ovl_cache_entry
{
26 struct list_head l_node
;
28 struct ovl_cache_entry
*next_maybe_whiteout
;
33 struct ovl_dir_cache
{
36 struct list_head entries
;
40 struct ovl_readdir_data
{
41 struct dir_context ctx
;
42 struct dentry
*dentry
;
45 struct list_head
*list
;
46 struct list_head middle
;
47 struct ovl_cache_entry
*first_maybe_whiteout
;
51 bool d_type_supported
;
57 struct ovl_dir_cache
*cache
;
58 struct list_head
*cursor
;
59 struct file
*realfile
;
60 struct file
*upperfile
;
63 static struct ovl_cache_entry
*ovl_cache_entry_from_node(struct rb_node
*n
)
65 return rb_entry(n
, struct ovl_cache_entry
, node
);
68 static bool ovl_cache_entry_find_link(const char *name
, int len
,
69 struct rb_node
***link
,
70 struct rb_node
**parent
)
73 struct rb_node
**newp
= *link
;
75 while (!found
&& *newp
) {
77 struct ovl_cache_entry
*tmp
;
80 tmp
= ovl_cache_entry_from_node(*newp
);
81 cmp
= strncmp(name
, tmp
->name
, len
);
83 newp
= &tmp
->node
.rb_right
;
84 else if (cmp
< 0 || len
< tmp
->len
)
85 newp
= &tmp
->node
.rb_left
;
94 static struct ovl_cache_entry
*ovl_cache_entry_find(struct rb_root
*root
,
95 const char *name
, int len
)
97 struct rb_node
*node
= root
->rb_node
;
101 struct ovl_cache_entry
*p
= ovl_cache_entry_from_node(node
);
103 cmp
= strncmp(name
, p
->name
, len
);
105 node
= p
->node
.rb_right
;
106 else if (cmp
< 0 || len
< p
->len
)
107 node
= p
->node
.rb_left
;
115 static bool ovl_calc_d_ino(struct ovl_readdir_data
*rdd
,
116 struct ovl_cache_entry
*p
)
118 /* Don't care if not doing ovl_iter() */
122 /* Always recalc d_ino for parent */
123 if (strcmp(p
->name
, "..") == 0)
126 /* If this is lower, then native d_ino will do */
131 * Recalc d_ino for '.' and for all entries if dir is impure (contains
134 if ((p
->name
[0] == '.' && p
->len
== 1) ||
135 ovl_test_flag(OVL_IMPURE
, d_inode(rdd
->dentry
)))
141 static struct ovl_cache_entry
*ovl_cache_entry_new(struct ovl_readdir_data
*rdd
,
142 const char *name
, int len
,
143 u64 ino
, unsigned int d_type
)
145 struct ovl_cache_entry
*p
;
146 size_t size
= offsetof(struct ovl_cache_entry
, name
[len
+ 1]);
148 p
= kmalloc(size
, GFP_KERNEL
);
152 memcpy(p
->name
, name
, len
);
158 /* Defer setting d_ino for upper entry to ovl_iterate() */
159 if (ovl_calc_d_ino(rdd
, p
))
161 p
->is_whiteout
= false;
163 if (d_type
== DT_CHR
) {
164 p
->next_maybe_whiteout
= rdd
->first_maybe_whiteout
;
165 rdd
->first_maybe_whiteout
= p
;
170 static int ovl_cache_entry_add_rb(struct ovl_readdir_data
*rdd
,
171 const char *name
, int len
, u64 ino
,
174 struct rb_node
**newp
= &rdd
->root
->rb_node
;
175 struct rb_node
*parent
= NULL
;
176 struct ovl_cache_entry
*p
;
178 if (ovl_cache_entry_find_link(name
, len
, &newp
, &parent
))
181 p
= ovl_cache_entry_new(rdd
, name
, len
, ino
, d_type
);
187 list_add_tail(&p
->l_node
, rdd
->list
);
188 rb_link_node(&p
->node
, parent
, newp
);
189 rb_insert_color(&p
->node
, rdd
->root
);
194 static int ovl_fill_lowest(struct ovl_readdir_data
*rdd
,
195 const char *name
, int namelen
,
196 loff_t offset
, u64 ino
, unsigned int d_type
)
198 struct ovl_cache_entry
*p
;
200 p
= ovl_cache_entry_find(rdd
->root
, name
, namelen
);
202 list_move_tail(&p
->l_node
, &rdd
->middle
);
204 p
= ovl_cache_entry_new(rdd
, name
, namelen
, ino
, d_type
);
208 list_add_tail(&p
->l_node
, &rdd
->middle
);
214 void ovl_cache_free(struct list_head
*list
)
216 struct ovl_cache_entry
*p
;
217 struct ovl_cache_entry
*n
;
219 list_for_each_entry_safe(p
, n
, list
, l_node
)
222 INIT_LIST_HEAD(list
);
225 void ovl_dir_cache_free(struct inode
*inode
)
227 struct ovl_dir_cache
*cache
= ovl_dir_cache(inode
);
230 ovl_cache_free(&cache
->entries
);
235 static void ovl_cache_put(struct ovl_dir_file
*od
, struct dentry
*dentry
)
237 struct ovl_dir_cache
*cache
= od
->cache
;
239 WARN_ON(cache
->refcount
<= 0);
241 if (!cache
->refcount
) {
242 if (ovl_dir_cache(d_inode(dentry
)) == cache
)
243 ovl_set_dir_cache(d_inode(dentry
), NULL
);
245 ovl_cache_free(&cache
->entries
);
250 static int ovl_fill_merge(struct dir_context
*ctx
, const char *name
,
251 int namelen
, loff_t offset
, u64 ino
,
254 struct ovl_readdir_data
*rdd
=
255 container_of(ctx
, struct ovl_readdir_data
, ctx
);
259 return ovl_cache_entry_add_rb(rdd
, name
, namelen
, ino
, d_type
);
261 return ovl_fill_lowest(rdd
, name
, namelen
, offset
, ino
, d_type
);
264 static int ovl_check_whiteouts(struct dentry
*dir
, struct ovl_readdir_data
*rdd
)
267 struct ovl_cache_entry
*p
;
268 struct dentry
*dentry
;
269 const struct cred
*old_cred
;
271 old_cred
= ovl_override_creds(rdd
->dentry
->d_sb
);
273 err
= down_write_killable(&dir
->d_inode
->i_rwsem
);
275 while (rdd
->first_maybe_whiteout
) {
276 p
= rdd
->first_maybe_whiteout
;
277 rdd
->first_maybe_whiteout
= p
->next_maybe_whiteout
;
278 dentry
= lookup_one_len(p
->name
, dir
, p
->len
);
279 if (!IS_ERR(dentry
)) {
280 p
->is_whiteout
= ovl_is_whiteout(dentry
);
284 inode_unlock(dir
->d_inode
);
286 revert_creds(old_cred
);
291 static inline int ovl_dir_read(struct path
*realpath
,
292 struct ovl_readdir_data
*rdd
)
294 struct file
*realfile
;
297 realfile
= ovl_path_open(realpath
, O_RDONLY
| O_DIRECTORY
);
298 if (IS_ERR(realfile
))
299 return PTR_ERR(realfile
);
301 rdd
->first_maybe_whiteout
= NULL
;
306 err
= iterate_dir(realfile
, &rdd
->ctx
);
309 } while (!err
&& rdd
->count
);
311 if (!err
&& rdd
->first_maybe_whiteout
&& rdd
->dentry
)
312 err
= ovl_check_whiteouts(realpath
->dentry
, rdd
);
319 static void ovl_dir_reset(struct file
*file
)
321 struct ovl_dir_file
*od
= file
->private_data
;
322 struct ovl_dir_cache
*cache
= od
->cache
;
323 struct dentry
*dentry
= file
->f_path
.dentry
;
324 enum ovl_path_type type
= ovl_path_type(dentry
);
326 if (cache
&& ovl_dentry_version_get(dentry
) != cache
->version
) {
327 ovl_cache_put(od
, dentry
);
331 WARN_ON(!od
->is_real
&& !OVL_TYPE_MERGE(type
));
332 if (od
->is_real
&& OVL_TYPE_MERGE(type
))
336 static int ovl_dir_read_merged(struct dentry
*dentry
, struct list_head
*list
,
337 struct rb_root
*root
)
340 struct path realpath
;
341 struct ovl_readdir_data rdd
= {
342 .ctx
.actor
= ovl_fill_merge
,
350 for (idx
= 0; idx
!= -1; idx
= next
) {
351 next
= ovl_path_next(idx
, dentry
, &realpath
);
352 rdd
.is_upper
= ovl_dentry_upper(dentry
) == realpath
.dentry
;
355 err
= ovl_dir_read(&realpath
, &rdd
);
360 * Insert lowest layer entries before upper ones, this
361 * allows offsets to be reasonably constant
363 list_add(&rdd
.middle
, rdd
.list
);
364 rdd
.is_lowest
= true;
365 err
= ovl_dir_read(&realpath
, &rdd
);
366 list_del(&rdd
.middle
);
372 static void ovl_seek_cursor(struct ovl_dir_file
*od
, loff_t pos
)
377 list_for_each(p
, &od
->cache
->entries
) {
382 /* Cursor is safe since the cache is stable */
386 static struct ovl_dir_cache
*ovl_cache_get(struct dentry
*dentry
)
389 struct ovl_dir_cache
*cache
;
391 cache
= ovl_dir_cache(d_inode(dentry
));
392 if (cache
&& ovl_dentry_version_get(dentry
) == cache
->version
) {
393 WARN_ON(!cache
->refcount
);
397 ovl_set_dir_cache(d_inode(dentry
), NULL
);
399 cache
= kzalloc(sizeof(struct ovl_dir_cache
), GFP_KERNEL
);
401 return ERR_PTR(-ENOMEM
);
404 INIT_LIST_HEAD(&cache
->entries
);
405 cache
->root
= RB_ROOT
;
407 res
= ovl_dir_read_merged(dentry
, &cache
->entries
, &cache
->root
);
409 ovl_cache_free(&cache
->entries
);
414 cache
->version
= ovl_dentry_version_get(dentry
);
415 ovl_set_dir_cache(d_inode(dentry
), cache
);
421 * Set d_ino for upper entries. Non-upper entries should always report
422 * the uppermost real inode ino and should not call this function.
424 * When not all layer are on same fs, report real ino also for upper.
426 * When all layers are on the same fs, and upper has a reference to
427 * copy up origin, call vfs_getattr() on the overlay entry to make
428 * sure that d_ino will be consistent with st_ino from stat(2).
430 static int ovl_cache_update_ino(struct path
*path
, struct ovl_cache_entry
*p
)
433 struct dentry
*dir
= path
->dentry
;
434 struct dentry
*this = NULL
;
435 enum ovl_path_type type
;
436 u64 ino
= p
->real_ino
;
439 if (!ovl_same_sb(dir
->d_sb
))
442 if (p
->name
[0] == '.') {
447 if (p
->len
== 2 && p
->name
[1] == '.') {
448 /* we shall not be moved */
449 this = dget(dir
->d_parent
);
453 this = lookup_one_len(p
->name
, dir
, p
->len
);
454 if (IS_ERR_OR_NULL(this) || !this->d_inode
) {
464 type
= ovl_path_type(this);
465 if (OVL_TYPE_ORIGIN(type
)) {
467 struct path statpath
= *path
;
469 statpath
.dentry
= this;
470 err
= vfs_getattr(&statpath
, &stat
, STATX_INO
, 0);
474 WARN_ON_ONCE(dir
->d_sb
->s_dev
!= stat
.dev
);
484 pr_warn_ratelimited("overlay: failed to look up (%s) for ino (%i)\n",
489 static int ovl_fill_plain(struct dir_context
*ctx
, const char *name
,
490 int namelen
, loff_t offset
, u64 ino
,
493 struct ovl_cache_entry
*p
;
494 struct ovl_readdir_data
*rdd
=
495 container_of(ctx
, struct ovl_readdir_data
, ctx
);
498 p
= ovl_cache_entry_new(rdd
, name
, namelen
, ino
, d_type
);
503 list_add_tail(&p
->l_node
, rdd
->list
);
508 static int ovl_dir_read_impure(struct path
*path
, struct list_head
*list
,
509 struct rb_root
*root
)
512 struct path realpath
;
513 struct ovl_cache_entry
*p
, *n
;
514 struct ovl_readdir_data rdd
= {
515 .ctx
.actor
= ovl_fill_plain
,
520 INIT_LIST_HEAD(list
);
522 ovl_path_upper(path
->dentry
, &realpath
);
524 err
= ovl_dir_read(&realpath
, &rdd
);
528 list_for_each_entry_safe(p
, n
, list
, l_node
) {
529 if (strcmp(p
->name
, ".") != 0 &&
530 strcmp(p
->name
, "..") != 0) {
531 err
= ovl_cache_update_ino(path
, p
);
535 if (p
->ino
== p
->real_ino
) {
536 list_del(&p
->l_node
);
539 struct rb_node
**newp
= &root
->rb_node
;
540 struct rb_node
*parent
= NULL
;
542 if (WARN_ON(ovl_cache_entry_find_link(p
->name
, p
->len
,
546 rb_link_node(&p
->node
, parent
, newp
);
547 rb_insert_color(&p
->node
, root
);
553 static struct ovl_dir_cache
*ovl_cache_get_impure(struct path
*path
)
556 struct dentry
*dentry
= path
->dentry
;
557 struct ovl_dir_cache
*cache
;
559 cache
= ovl_dir_cache(d_inode(dentry
));
560 if (cache
&& ovl_dentry_version_get(dentry
) == cache
->version
)
563 /* Impure cache is not refcounted, free it here */
564 ovl_dir_cache_free(d_inode(dentry
));
565 ovl_set_dir_cache(d_inode(dentry
), NULL
);
567 cache
= kzalloc(sizeof(struct ovl_dir_cache
), GFP_KERNEL
);
569 return ERR_PTR(-ENOMEM
);
571 res
= ovl_dir_read_impure(path
, &cache
->entries
, &cache
->root
);
573 ovl_cache_free(&cache
->entries
);
577 if (list_empty(&cache
->entries
)) {
579 * A good opportunity to get rid of an unneeded "impure" flag.
580 * Removing the "impure" xattr is best effort.
582 if (!ovl_want_write(dentry
)) {
583 ovl_do_removexattr(ovl_dentry_upper(dentry
),
585 ovl_drop_write(dentry
);
587 ovl_clear_flag(OVL_IMPURE
, d_inode(dentry
));
592 cache
->version
= ovl_dentry_version_get(dentry
);
593 ovl_set_dir_cache(d_inode(dentry
), cache
);
598 struct ovl_readdir_translate
{
599 struct dir_context
*orig_ctx
;
600 struct ovl_dir_cache
*cache
;
601 struct dir_context ctx
;
605 static int ovl_fill_real(struct dir_context
*ctx
, const char *name
,
606 int namelen
, loff_t offset
, u64 ino
,
609 struct ovl_readdir_translate
*rdt
=
610 container_of(ctx
, struct ovl_readdir_translate
, ctx
);
611 struct dir_context
*orig_ctx
= rdt
->orig_ctx
;
613 if (rdt
->parent_ino
&& strcmp(name
, "..") == 0)
614 ino
= rdt
->parent_ino
;
615 else if (rdt
->cache
) {
616 struct ovl_cache_entry
*p
;
618 p
= ovl_cache_entry_find(&rdt
->cache
->root
, name
, namelen
);
623 return orig_ctx
->actor(orig_ctx
, name
, namelen
, offset
, ino
, d_type
);
626 static bool ovl_is_impure_dir(struct file
*file
)
628 struct ovl_dir_file
*od
= file
->private_data
;
629 struct inode
*dir
= d_inode(file
->f_path
.dentry
);
632 * Only upper dir can be impure, but if we are in the middle of
633 * iterating a lower real dir, dir could be copied up and marked
634 * impure. We only want the impure cache if we started iterating
635 * a real upper dir to begin with.
637 return od
->is_upper
&& ovl_test_flag(OVL_IMPURE
, dir
);
641 static int ovl_iterate_real(struct file
*file
, struct dir_context
*ctx
)
644 struct ovl_dir_file
*od
= file
->private_data
;
645 struct dentry
*dir
= file
->f_path
.dentry
;
646 struct ovl_readdir_translate rdt
= {
647 .ctx
.actor
= ovl_fill_real
,
651 if (OVL_TYPE_MERGE(ovl_path_type(dir
->d_parent
))) {
653 struct path statpath
= file
->f_path
;
655 statpath
.dentry
= dir
->d_parent
;
656 err
= vfs_getattr(&statpath
, &stat
, STATX_INO
, 0);
660 WARN_ON_ONCE(dir
->d_sb
->s_dev
!= stat
.dev
);
661 rdt
.parent_ino
= stat
.ino
;
664 if (ovl_is_impure_dir(file
)) {
665 rdt
.cache
= ovl_cache_get_impure(&file
->f_path
);
666 if (IS_ERR(rdt
.cache
))
667 return PTR_ERR(rdt
.cache
);
670 err
= iterate_dir(od
->realfile
, &rdt
.ctx
);
671 ctx
->pos
= rdt
.ctx
.pos
;
677 static int ovl_iterate(struct file
*file
, struct dir_context
*ctx
)
679 struct ovl_dir_file
*od
= file
->private_data
;
680 struct dentry
*dentry
= file
->f_path
.dentry
;
681 struct ovl_cache_entry
*p
;
689 * If parent is merge, then need to adjust d_ino for '..', if
690 * dir is impure then need to adjust d_ino for copied up
693 if (ovl_same_sb(dentry
->d_sb
) &&
694 (ovl_is_impure_dir(file
) ||
695 OVL_TYPE_MERGE(ovl_path_type(dentry
->d_parent
)))) {
696 return ovl_iterate_real(file
, ctx
);
698 return iterate_dir(od
->realfile
, ctx
);
702 struct ovl_dir_cache
*cache
;
704 cache
= ovl_cache_get(dentry
);
706 return PTR_ERR(cache
);
709 ovl_seek_cursor(od
, ctx
->pos
);
712 while (od
->cursor
!= &od
->cache
->entries
) {
713 p
= list_entry(od
->cursor
, struct ovl_cache_entry
, l_node
);
714 if (!p
->is_whiteout
) {
716 err
= ovl_cache_update_ino(&file
->f_path
, p
);
720 if (!dir_emit(ctx
, p
->name
, p
->len
, p
->ino
, p
->type
))
723 od
->cursor
= p
->l_node
.next
;
729 static loff_t
ovl_dir_llseek(struct file
*file
, loff_t offset
, int origin
)
732 struct ovl_dir_file
*od
= file
->private_data
;
734 inode_lock(file_inode(file
));
739 res
= vfs_llseek(od
->realfile
, offset
, origin
);
740 file
->f_pos
= od
->realfile
->f_pos
;
746 offset
+= file
->f_pos
;
756 if (offset
!= file
->f_pos
) {
757 file
->f_pos
= offset
;
759 ovl_seek_cursor(od
, offset
);
764 inode_unlock(file_inode(file
));
769 static int ovl_dir_fsync(struct file
*file
, loff_t start
, loff_t end
,
772 struct ovl_dir_file
*od
= file
->private_data
;
773 struct dentry
*dentry
= file
->f_path
.dentry
;
774 struct file
*realfile
= od
->realfile
;
776 /* Nothing to sync for lower */
777 if (!OVL_TYPE_UPPER(ovl_path_type(dentry
)))
781 * Need to check if we started out being a lower dir, but got copied up
784 struct inode
*inode
= file_inode(file
);
786 realfile
= READ_ONCE(od
->upperfile
);
788 struct path upperpath
;
790 ovl_path_upper(dentry
, &upperpath
);
791 realfile
= ovl_path_open(&upperpath
, O_RDONLY
);
794 if (!od
->upperfile
) {
795 if (IS_ERR(realfile
)) {
797 return PTR_ERR(realfile
);
799 smp_store_release(&od
->upperfile
, realfile
);
801 /* somebody has beaten us to it */
802 if (!IS_ERR(realfile
))
804 realfile
= od
->upperfile
;
810 return vfs_fsync_range(realfile
, start
, end
, datasync
);
813 static int ovl_dir_release(struct inode
*inode
, struct file
*file
)
815 struct ovl_dir_file
*od
= file
->private_data
;
819 ovl_cache_put(od
, file
->f_path
.dentry
);
830 static int ovl_dir_open(struct inode
*inode
, struct file
*file
)
832 struct path realpath
;
833 struct file
*realfile
;
834 struct ovl_dir_file
*od
;
835 enum ovl_path_type type
;
837 od
= kzalloc(sizeof(struct ovl_dir_file
), GFP_KERNEL
);
841 type
= ovl_path_real(file
->f_path
.dentry
, &realpath
);
842 realfile
= ovl_path_open(&realpath
, file
->f_flags
);
843 if (IS_ERR(realfile
)) {
845 return PTR_ERR(realfile
);
847 od
->realfile
= realfile
;
848 od
->is_real
= !OVL_TYPE_MERGE(type
);
849 od
->is_upper
= OVL_TYPE_UPPER(type
);
850 file
->private_data
= od
;
855 const struct file_operations ovl_dir_operations
= {
856 .read
= generic_read_dir
,
857 .open
= ovl_dir_open
,
858 .iterate
= ovl_iterate
,
859 .llseek
= ovl_dir_llseek
,
860 .fsync
= ovl_dir_fsync
,
861 .release
= ovl_dir_release
,
864 int ovl_check_empty_dir(struct dentry
*dentry
, struct list_head
*list
)
867 struct ovl_cache_entry
*p
;
868 struct rb_root root
= RB_ROOT
;
870 err
= ovl_dir_read_merged(dentry
, list
, &root
);
876 list_for_each_entry(p
, list
, l_node
) {
880 if (p
->name
[0] == '.') {
883 if (p
->len
== 2 && p
->name
[1] == '.')
893 void ovl_cleanup_whiteouts(struct dentry
*upper
, struct list_head
*list
)
895 struct ovl_cache_entry
*p
;
897 inode_lock_nested(upper
->d_inode
, I_MUTEX_CHILD
);
898 list_for_each_entry(p
, list
, l_node
) {
899 struct dentry
*dentry
;
904 dentry
= lookup_one_len(p
->name
, upper
, p
->len
);
905 if (IS_ERR(dentry
)) {
906 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
907 upper
->d_name
.name
, p
->len
, p
->name
,
908 (int) PTR_ERR(dentry
));
912 ovl_cleanup(upper
->d_inode
, dentry
);
915 inode_unlock(upper
->d_inode
);
918 static int ovl_check_d_type(struct dir_context
*ctx
, const char *name
,
919 int namelen
, loff_t offset
, u64 ino
,
922 struct ovl_readdir_data
*rdd
=
923 container_of(ctx
, struct ovl_readdir_data
, ctx
);
925 /* Even if d_type is not supported, DT_DIR is returned for . and .. */
926 if (!strncmp(name
, ".", namelen
) || !strncmp(name
, "..", namelen
))
929 if (d_type
!= DT_UNKNOWN
)
930 rdd
->d_type_supported
= true;
936 * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values
937 * if error is encountered.
939 int ovl_check_d_type_supported(struct path
*realpath
)
942 struct ovl_readdir_data rdd
= {
943 .ctx
.actor
= ovl_check_d_type
,
944 .d_type_supported
= false,
947 err
= ovl_dir_read(realpath
, &rdd
);
951 return rdd
.d_type_supported
;
954 static void ovl_workdir_cleanup_recurse(struct path
*path
, int level
)
957 struct inode
*dir
= path
->dentry
->d_inode
;
959 struct rb_root root
= RB_ROOT
;
960 struct ovl_cache_entry
*p
;
961 struct ovl_readdir_data rdd
= {
962 .ctx
.actor
= ovl_fill_merge
,
969 err
= ovl_dir_read(path
, &rdd
);
973 inode_lock_nested(dir
, I_MUTEX_PARENT
);
974 list_for_each_entry(p
, &list
, l_node
) {
975 struct dentry
*dentry
;
977 if (p
->name
[0] == '.') {
980 if (p
->len
== 2 && p
->name
[1] == '.')
983 dentry
= lookup_one_len(p
->name
, path
->dentry
, p
->len
);
987 ovl_workdir_cleanup(dir
, path
->mnt
, dentry
, level
);
992 ovl_cache_free(&list
);
995 void ovl_workdir_cleanup(struct inode
*dir
, struct vfsmount
*mnt
,
996 struct dentry
*dentry
, int level
)
1000 if (!d_is_dir(dentry
) || level
> 1) {
1001 ovl_cleanup(dir
, dentry
);
1005 err
= ovl_do_rmdir(dir
, dentry
);
1007 struct path path
= { .mnt
= mnt
, .dentry
= dentry
};
1010 ovl_workdir_cleanup_recurse(&path
, level
+ 1);
1011 inode_lock_nested(dir
, I_MUTEX_PARENT
);
1012 ovl_cleanup(dir
, dentry
);
1016 int ovl_indexdir_cleanup(struct dentry
*dentry
, struct vfsmount
*mnt
,
1017 struct path
*lowerstack
, unsigned int numlower
)
1020 struct dentry
*index
= NULL
;
1021 struct inode
*dir
= dentry
->d_inode
;
1022 struct path path
= { .mnt
= mnt
, .dentry
= dentry
};
1024 struct rb_root root
= RB_ROOT
;
1025 struct ovl_cache_entry
*p
;
1026 struct ovl_readdir_data rdd
= {
1027 .ctx
.actor
= ovl_fill_merge
,
1034 err
= ovl_dir_read(&path
, &rdd
);
1038 inode_lock_nested(dir
, I_MUTEX_PARENT
);
1039 list_for_each_entry(p
, &list
, l_node
) {
1040 if (p
->name
[0] == '.') {
1043 if (p
->len
== 2 && p
->name
[1] == '.')
1046 index
= lookup_one_len(p
->name
, dentry
, p
->len
);
1047 if (IS_ERR(index
)) {
1048 err
= PTR_ERR(index
);
1052 err
= ovl_verify_index(index
, lowerstack
, numlower
);
1053 /* Cleanup stale and orphan index entries */
1054 if (err
&& (err
== -ESTALE
|| err
== -ENOENT
))
1055 err
= ovl_cleanup(dir
, index
);
1065 ovl_cache_free(&list
);
1067 pr_err("overlayfs: failed index dir cleanup (%i)\n", err
);