mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / fs / overlayfs / readdir.c
blob1d4f9997236f221fbd72d8a42405e3730f73c612
1 /*
3 * Copyright (C) 2011 Novell Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
10 #include <linux/fs.h>
11 #include <linux/slab.h>
12 #include <linux/namei.h>
13 #include <linux/file.h>
14 #include <linux/xattr.h>
15 #include <linux/rbtree.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include <linux/ratelimit.h>
19 #include "overlayfs.h"
21 struct ovl_cache_entry {
22 unsigned int len;
23 unsigned int type;
24 u64 real_ino;
25 u64 ino;
26 struct list_head l_node;
27 struct rb_node node;
28 struct ovl_cache_entry *next_maybe_whiteout;
29 bool is_whiteout;
30 char name[];
33 struct ovl_dir_cache {
34 long refcount;
35 u64 version;
36 struct list_head entries;
37 struct rb_root root;
40 struct ovl_readdir_data {
41 struct dir_context ctx;
42 struct dentry *dentry;
43 bool is_lowest;
44 struct rb_root *root;
45 struct list_head *list;
46 struct list_head middle;
47 struct ovl_cache_entry *first_maybe_whiteout;
48 int count;
49 int err;
50 bool is_upper;
51 bool d_type_supported;
54 struct ovl_dir_file {
55 bool is_real;
56 bool is_upper;
57 struct ovl_dir_cache *cache;
58 struct list_head *cursor;
59 struct file *realfile;
60 struct file *upperfile;
63 static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
65 return rb_entry(n, struct ovl_cache_entry, node);
68 static bool ovl_cache_entry_find_link(const char *name, int len,
69 struct rb_node ***link,
70 struct rb_node **parent)
72 bool found = false;
73 struct rb_node **newp = *link;
75 while (!found && *newp) {
76 int cmp;
77 struct ovl_cache_entry *tmp;
79 *parent = *newp;
80 tmp = ovl_cache_entry_from_node(*newp);
81 cmp = strncmp(name, tmp->name, len);
82 if (cmp > 0)
83 newp = &tmp->node.rb_right;
84 else if (cmp < 0 || len < tmp->len)
85 newp = &tmp->node.rb_left;
86 else
87 found = true;
89 *link = newp;
91 return found;
94 static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
95 const char *name, int len)
97 struct rb_node *node = root->rb_node;
98 int cmp;
100 while (node) {
101 struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
103 cmp = strncmp(name, p->name, len);
104 if (cmp > 0)
105 node = p->node.rb_right;
106 else if (cmp < 0 || len < p->len)
107 node = p->node.rb_left;
108 else
109 return p;
112 return NULL;
115 static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd,
116 struct ovl_cache_entry *p)
118 /* Don't care if not doing ovl_iter() */
119 if (!rdd->dentry)
120 return false;
122 /* Always recalc d_ino for parent */
123 if (strcmp(p->name, "..") == 0)
124 return true;
126 /* If this is lower, then native d_ino will do */
127 if (!rdd->is_upper)
128 return false;
131 * Recalc d_ino for '.' and for all entries if dir is impure (contains
132 * copied up entries)
134 if ((p->name[0] == '.' && p->len == 1) ||
135 ovl_test_flag(OVL_IMPURE, d_inode(rdd->dentry)))
136 return true;
138 return false;
141 static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
142 const char *name, int len,
143 u64 ino, unsigned int d_type)
145 struct ovl_cache_entry *p;
146 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
148 p = kmalloc(size, GFP_KERNEL);
149 if (!p)
150 return NULL;
152 memcpy(p->name, name, len);
153 p->name[len] = '\0';
154 p->len = len;
155 p->type = d_type;
156 p->real_ino = ino;
157 p->ino = ino;
158 /* Defer setting d_ino for upper entry to ovl_iterate() */
159 if (ovl_calc_d_ino(rdd, p))
160 p->ino = 0;
161 p->is_whiteout = false;
163 if (d_type == DT_CHR) {
164 p->next_maybe_whiteout = rdd->first_maybe_whiteout;
165 rdd->first_maybe_whiteout = p;
167 return p;
170 static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
171 const char *name, int len, u64 ino,
172 unsigned int d_type)
174 struct rb_node **newp = &rdd->root->rb_node;
175 struct rb_node *parent = NULL;
176 struct ovl_cache_entry *p;
178 if (ovl_cache_entry_find_link(name, len, &newp, &parent))
179 return 0;
181 p = ovl_cache_entry_new(rdd, name, len, ino, d_type);
182 if (p == NULL) {
183 rdd->err = -ENOMEM;
184 return -ENOMEM;
187 list_add_tail(&p->l_node, rdd->list);
188 rb_link_node(&p->node, parent, newp);
189 rb_insert_color(&p->node, rdd->root);
191 return 0;
194 static int ovl_fill_lowest(struct ovl_readdir_data *rdd,
195 const char *name, int namelen,
196 loff_t offset, u64 ino, unsigned int d_type)
198 struct ovl_cache_entry *p;
200 p = ovl_cache_entry_find(rdd->root, name, namelen);
201 if (p) {
202 list_move_tail(&p->l_node, &rdd->middle);
203 } else {
204 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
205 if (p == NULL)
206 rdd->err = -ENOMEM;
207 else
208 list_add_tail(&p->l_node, &rdd->middle);
211 return rdd->err;
214 void ovl_cache_free(struct list_head *list)
216 struct ovl_cache_entry *p;
217 struct ovl_cache_entry *n;
219 list_for_each_entry_safe(p, n, list, l_node)
220 kfree(p);
222 INIT_LIST_HEAD(list);
225 void ovl_dir_cache_free(struct inode *inode)
227 struct ovl_dir_cache *cache = ovl_dir_cache(inode);
229 if (cache) {
230 ovl_cache_free(&cache->entries);
231 kfree(cache);
235 static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
237 struct ovl_dir_cache *cache = od->cache;
239 WARN_ON(cache->refcount <= 0);
240 cache->refcount--;
241 if (!cache->refcount) {
242 if (ovl_dir_cache(d_inode(dentry)) == cache)
243 ovl_set_dir_cache(d_inode(dentry), NULL);
245 ovl_cache_free(&cache->entries);
246 kfree(cache);
250 static int ovl_fill_merge(struct dir_context *ctx, const char *name,
251 int namelen, loff_t offset, u64 ino,
252 unsigned int d_type)
254 struct ovl_readdir_data *rdd =
255 container_of(ctx, struct ovl_readdir_data, ctx);
257 rdd->count++;
258 if (!rdd->is_lowest)
259 return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
260 else
261 return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type);
264 static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
266 int err;
267 struct ovl_cache_entry *p;
268 struct dentry *dentry;
269 const struct cred *old_cred;
271 old_cred = ovl_override_creds(rdd->dentry->d_sb);
273 err = down_write_killable(&dir->d_inode->i_rwsem);
274 if (!err) {
275 while (rdd->first_maybe_whiteout) {
276 p = rdd->first_maybe_whiteout;
277 rdd->first_maybe_whiteout = p->next_maybe_whiteout;
278 dentry = lookup_one_len(p->name, dir, p->len);
279 if (!IS_ERR(dentry)) {
280 p->is_whiteout = ovl_is_whiteout(dentry);
281 dput(dentry);
284 inode_unlock(dir->d_inode);
286 revert_creds(old_cred);
288 return err;
291 static inline int ovl_dir_read(struct path *realpath,
292 struct ovl_readdir_data *rdd)
294 struct file *realfile;
295 int err;
297 realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
298 if (IS_ERR(realfile))
299 return PTR_ERR(realfile);
301 rdd->first_maybe_whiteout = NULL;
302 rdd->ctx.pos = 0;
303 do {
304 rdd->count = 0;
305 rdd->err = 0;
306 err = iterate_dir(realfile, &rdd->ctx);
307 if (err >= 0)
308 err = rdd->err;
309 } while (!err && rdd->count);
311 if (!err && rdd->first_maybe_whiteout && rdd->dentry)
312 err = ovl_check_whiteouts(realpath->dentry, rdd);
314 fput(realfile);
316 return err;
319 static void ovl_dir_reset(struct file *file)
321 struct ovl_dir_file *od = file->private_data;
322 struct ovl_dir_cache *cache = od->cache;
323 struct dentry *dentry = file->f_path.dentry;
324 enum ovl_path_type type = ovl_path_type(dentry);
326 if (cache && ovl_dentry_version_get(dentry) != cache->version) {
327 ovl_cache_put(od, dentry);
328 od->cache = NULL;
329 od->cursor = NULL;
331 WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type));
332 if (od->is_real && OVL_TYPE_MERGE(type))
333 od->is_real = false;
336 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list,
337 struct rb_root *root)
339 int err;
340 struct path realpath;
341 struct ovl_readdir_data rdd = {
342 .ctx.actor = ovl_fill_merge,
343 .dentry = dentry,
344 .list = list,
345 .root = root,
346 .is_lowest = false,
348 int idx, next;
350 for (idx = 0; idx != -1; idx = next) {
351 next = ovl_path_next(idx, dentry, &realpath);
352 rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry;
354 if (next != -1) {
355 err = ovl_dir_read(&realpath, &rdd);
356 if (err)
357 break;
358 } else {
360 * Insert lowest layer entries before upper ones, this
361 * allows offsets to be reasonably constant
363 list_add(&rdd.middle, rdd.list);
364 rdd.is_lowest = true;
365 err = ovl_dir_read(&realpath, &rdd);
366 list_del(&rdd.middle);
369 return err;
372 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
374 struct list_head *p;
375 loff_t off = 0;
377 list_for_each(p, &od->cache->entries) {
378 if (off >= pos)
379 break;
380 off++;
382 /* Cursor is safe since the cache is stable */
383 od->cursor = p;
386 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
388 int res;
389 struct ovl_dir_cache *cache;
391 cache = ovl_dir_cache(d_inode(dentry));
392 if (cache && ovl_dentry_version_get(dentry) == cache->version) {
393 WARN_ON(!cache->refcount);
394 cache->refcount++;
395 return cache;
397 ovl_set_dir_cache(d_inode(dentry), NULL);
399 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
400 if (!cache)
401 return ERR_PTR(-ENOMEM);
403 cache->refcount = 1;
404 INIT_LIST_HEAD(&cache->entries);
405 cache->root = RB_ROOT;
407 res = ovl_dir_read_merged(dentry, &cache->entries, &cache->root);
408 if (res) {
409 ovl_cache_free(&cache->entries);
410 kfree(cache);
411 return ERR_PTR(res);
414 cache->version = ovl_dentry_version_get(dentry);
415 ovl_set_dir_cache(d_inode(dentry), cache);
417 return cache;
421 * Set d_ino for upper entries. Non-upper entries should always report
422 * the uppermost real inode ino and should not call this function.
424 * When not all layer are on same fs, report real ino also for upper.
426 * When all layers are on the same fs, and upper has a reference to
427 * copy up origin, call vfs_getattr() on the overlay entry to make
428 * sure that d_ino will be consistent with st_ino from stat(2).
430 static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
433 struct dentry *dir = path->dentry;
434 struct dentry *this = NULL;
435 enum ovl_path_type type;
436 u64 ino = p->real_ino;
437 int err = 0;
439 if (!ovl_same_sb(dir->d_sb))
440 goto out;
442 if (p->name[0] == '.') {
443 if (p->len == 1) {
444 this = dget(dir);
445 goto get;
447 if (p->len == 2 && p->name[1] == '.') {
448 /* we shall not be moved */
449 this = dget(dir->d_parent);
450 goto get;
453 this = lookup_one_len(p->name, dir, p->len);
454 if (IS_ERR_OR_NULL(this) || !this->d_inode) {
455 if (IS_ERR(this)) {
456 err = PTR_ERR(this);
457 this = NULL;
458 goto fail;
460 goto out;
463 get:
464 type = ovl_path_type(this);
465 if (OVL_TYPE_ORIGIN(type)) {
466 struct kstat stat;
467 struct path statpath = *path;
469 statpath.dentry = this;
470 err = vfs_getattr(&statpath, &stat, STATX_INO, 0);
471 if (err)
472 goto fail;
474 WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev);
475 ino = stat.ino;
478 out:
479 p->ino = ino;
480 dput(this);
481 return err;
483 fail:
484 pr_warn_ratelimited("overlay: failed to look up (%s) for ino (%i)\n",
485 p->name, err);
486 goto out;
489 static int ovl_fill_plain(struct dir_context *ctx, const char *name,
490 int namelen, loff_t offset, u64 ino,
491 unsigned int d_type)
493 struct ovl_cache_entry *p;
494 struct ovl_readdir_data *rdd =
495 container_of(ctx, struct ovl_readdir_data, ctx);
497 rdd->count++;
498 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
499 if (p == NULL) {
500 rdd->err = -ENOMEM;
501 return -ENOMEM;
503 list_add_tail(&p->l_node, rdd->list);
505 return 0;
508 static int ovl_dir_read_impure(struct path *path, struct list_head *list,
509 struct rb_root *root)
511 int err;
512 struct path realpath;
513 struct ovl_cache_entry *p, *n;
514 struct ovl_readdir_data rdd = {
515 .ctx.actor = ovl_fill_plain,
516 .list = list,
517 .root = root,
520 INIT_LIST_HEAD(list);
521 *root = RB_ROOT;
522 ovl_path_upper(path->dentry, &realpath);
524 err = ovl_dir_read(&realpath, &rdd);
525 if (err)
526 return err;
528 list_for_each_entry_safe(p, n, list, l_node) {
529 if (strcmp(p->name, ".") != 0 &&
530 strcmp(p->name, "..") != 0) {
531 err = ovl_cache_update_ino(path, p);
532 if (err)
533 return err;
535 if (p->ino == p->real_ino) {
536 list_del(&p->l_node);
537 kfree(p);
538 } else {
539 struct rb_node **newp = &root->rb_node;
540 struct rb_node *parent = NULL;
542 if (WARN_ON(ovl_cache_entry_find_link(p->name, p->len,
543 &newp, &parent)))
544 return -EIO;
546 rb_link_node(&p->node, parent, newp);
547 rb_insert_color(&p->node, root);
550 return 0;
553 static struct ovl_dir_cache *ovl_cache_get_impure(struct path *path)
555 int res;
556 struct dentry *dentry = path->dentry;
557 struct ovl_dir_cache *cache;
559 cache = ovl_dir_cache(d_inode(dentry));
560 if (cache && ovl_dentry_version_get(dentry) == cache->version)
561 return cache;
563 /* Impure cache is not refcounted, free it here */
564 ovl_dir_cache_free(d_inode(dentry));
565 ovl_set_dir_cache(d_inode(dentry), NULL);
567 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
568 if (!cache)
569 return ERR_PTR(-ENOMEM);
571 res = ovl_dir_read_impure(path, &cache->entries, &cache->root);
572 if (res) {
573 ovl_cache_free(&cache->entries);
574 kfree(cache);
575 return ERR_PTR(res);
577 if (list_empty(&cache->entries)) {
579 * A good opportunity to get rid of an unneeded "impure" flag.
580 * Removing the "impure" xattr is best effort.
582 if (!ovl_want_write(dentry)) {
583 ovl_do_removexattr(ovl_dentry_upper(dentry),
584 OVL_XATTR_IMPURE);
585 ovl_drop_write(dentry);
587 ovl_clear_flag(OVL_IMPURE, d_inode(dentry));
588 kfree(cache);
589 return NULL;
592 cache->version = ovl_dentry_version_get(dentry);
593 ovl_set_dir_cache(d_inode(dentry), cache);
595 return cache;
598 struct ovl_readdir_translate {
599 struct dir_context *orig_ctx;
600 struct ovl_dir_cache *cache;
601 struct dir_context ctx;
602 u64 parent_ino;
605 static int ovl_fill_real(struct dir_context *ctx, const char *name,
606 int namelen, loff_t offset, u64 ino,
607 unsigned int d_type)
609 struct ovl_readdir_translate *rdt =
610 container_of(ctx, struct ovl_readdir_translate, ctx);
611 struct dir_context *orig_ctx = rdt->orig_ctx;
613 if (rdt->parent_ino && strcmp(name, "..") == 0)
614 ino = rdt->parent_ino;
615 else if (rdt->cache) {
616 struct ovl_cache_entry *p;
618 p = ovl_cache_entry_find(&rdt->cache->root, name, namelen);
619 if (p)
620 ino = p->ino;
623 return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type);
626 static bool ovl_is_impure_dir(struct file *file)
628 struct ovl_dir_file *od = file->private_data;
629 struct inode *dir = d_inode(file->f_path.dentry);
632 * Only upper dir can be impure, but if we are in the middle of
633 * iterating a lower real dir, dir could be copied up and marked
634 * impure. We only want the impure cache if we started iterating
635 * a real upper dir to begin with.
637 return od->is_upper && ovl_test_flag(OVL_IMPURE, dir);
641 static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
643 int err;
644 struct ovl_dir_file *od = file->private_data;
645 struct dentry *dir = file->f_path.dentry;
646 struct ovl_readdir_translate rdt = {
647 .ctx.actor = ovl_fill_real,
648 .orig_ctx = ctx,
651 if (OVL_TYPE_MERGE(ovl_path_type(dir->d_parent))) {
652 struct kstat stat;
653 struct path statpath = file->f_path;
655 statpath.dentry = dir->d_parent;
656 err = vfs_getattr(&statpath, &stat, STATX_INO, 0);
657 if (err)
658 return err;
660 WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev);
661 rdt.parent_ino = stat.ino;
664 if (ovl_is_impure_dir(file)) {
665 rdt.cache = ovl_cache_get_impure(&file->f_path);
666 if (IS_ERR(rdt.cache))
667 return PTR_ERR(rdt.cache);
670 err = iterate_dir(od->realfile, &rdt.ctx);
671 ctx->pos = rdt.ctx.pos;
673 return err;
677 static int ovl_iterate(struct file *file, struct dir_context *ctx)
679 struct ovl_dir_file *od = file->private_data;
680 struct dentry *dentry = file->f_path.dentry;
681 struct ovl_cache_entry *p;
682 int err;
684 if (!ctx->pos)
685 ovl_dir_reset(file);
687 if (od->is_real) {
689 * If parent is merge, then need to adjust d_ino for '..', if
690 * dir is impure then need to adjust d_ino for copied up
691 * entries.
693 if (ovl_same_sb(dentry->d_sb) &&
694 (ovl_is_impure_dir(file) ||
695 OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent)))) {
696 return ovl_iterate_real(file, ctx);
698 return iterate_dir(od->realfile, ctx);
701 if (!od->cache) {
702 struct ovl_dir_cache *cache;
704 cache = ovl_cache_get(dentry);
705 if (IS_ERR(cache))
706 return PTR_ERR(cache);
708 od->cache = cache;
709 ovl_seek_cursor(od, ctx->pos);
712 while (od->cursor != &od->cache->entries) {
713 p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
714 if (!p->is_whiteout) {
715 if (!p->ino) {
716 err = ovl_cache_update_ino(&file->f_path, p);
717 if (err)
718 return err;
720 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
721 break;
723 od->cursor = p->l_node.next;
724 ctx->pos++;
726 return 0;
729 static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
731 loff_t res;
732 struct ovl_dir_file *od = file->private_data;
734 inode_lock(file_inode(file));
735 if (!file->f_pos)
736 ovl_dir_reset(file);
738 if (od->is_real) {
739 res = vfs_llseek(od->realfile, offset, origin);
740 file->f_pos = od->realfile->f_pos;
741 } else {
742 res = -EINVAL;
744 switch (origin) {
745 case SEEK_CUR:
746 offset += file->f_pos;
747 break;
748 case SEEK_SET:
749 break;
750 default:
751 goto out_unlock;
753 if (offset < 0)
754 goto out_unlock;
756 if (offset != file->f_pos) {
757 file->f_pos = offset;
758 if (od->cache)
759 ovl_seek_cursor(od, offset);
761 res = offset;
763 out_unlock:
764 inode_unlock(file_inode(file));
766 return res;
769 static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
770 int datasync)
772 struct ovl_dir_file *od = file->private_data;
773 struct dentry *dentry = file->f_path.dentry;
774 struct file *realfile = od->realfile;
776 /* Nothing to sync for lower */
777 if (!OVL_TYPE_UPPER(ovl_path_type(dentry)))
778 return 0;
781 * Need to check if we started out being a lower dir, but got copied up
783 if (!od->is_upper) {
784 struct inode *inode = file_inode(file);
786 realfile = READ_ONCE(od->upperfile);
787 if (!realfile) {
788 struct path upperpath;
790 ovl_path_upper(dentry, &upperpath);
791 realfile = ovl_path_open(&upperpath, O_RDONLY);
793 inode_lock(inode);
794 if (!od->upperfile) {
795 if (IS_ERR(realfile)) {
796 inode_unlock(inode);
797 return PTR_ERR(realfile);
799 smp_store_release(&od->upperfile, realfile);
800 } else {
801 /* somebody has beaten us to it */
802 if (!IS_ERR(realfile))
803 fput(realfile);
804 realfile = od->upperfile;
806 inode_unlock(inode);
810 return vfs_fsync_range(realfile, start, end, datasync);
813 static int ovl_dir_release(struct inode *inode, struct file *file)
815 struct ovl_dir_file *od = file->private_data;
817 if (od->cache) {
818 inode_lock(inode);
819 ovl_cache_put(od, file->f_path.dentry);
820 inode_unlock(inode);
822 fput(od->realfile);
823 if (od->upperfile)
824 fput(od->upperfile);
825 kfree(od);
827 return 0;
830 static int ovl_dir_open(struct inode *inode, struct file *file)
832 struct path realpath;
833 struct file *realfile;
834 struct ovl_dir_file *od;
835 enum ovl_path_type type;
837 od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
838 if (!od)
839 return -ENOMEM;
841 type = ovl_path_real(file->f_path.dentry, &realpath);
842 realfile = ovl_path_open(&realpath, file->f_flags);
843 if (IS_ERR(realfile)) {
844 kfree(od);
845 return PTR_ERR(realfile);
847 od->realfile = realfile;
848 od->is_real = !OVL_TYPE_MERGE(type);
849 od->is_upper = OVL_TYPE_UPPER(type);
850 file->private_data = od;
852 return 0;
855 const struct file_operations ovl_dir_operations = {
856 .read = generic_read_dir,
857 .open = ovl_dir_open,
858 .iterate = ovl_iterate,
859 .llseek = ovl_dir_llseek,
860 .fsync = ovl_dir_fsync,
861 .release = ovl_dir_release,
864 int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
866 int err;
867 struct ovl_cache_entry *p;
868 struct rb_root root = RB_ROOT;
870 err = ovl_dir_read_merged(dentry, list, &root);
871 if (err)
872 return err;
874 err = 0;
876 list_for_each_entry(p, list, l_node) {
877 if (p->is_whiteout)
878 continue;
880 if (p->name[0] == '.') {
881 if (p->len == 1)
882 continue;
883 if (p->len == 2 && p->name[1] == '.')
884 continue;
886 err = -ENOTEMPTY;
887 break;
890 return err;
893 void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
895 struct ovl_cache_entry *p;
897 inode_lock_nested(upper->d_inode, I_MUTEX_CHILD);
898 list_for_each_entry(p, list, l_node) {
899 struct dentry *dentry;
901 if (!p->is_whiteout)
902 continue;
904 dentry = lookup_one_len(p->name, upper, p->len);
905 if (IS_ERR(dentry)) {
906 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
907 upper->d_name.name, p->len, p->name,
908 (int) PTR_ERR(dentry));
909 continue;
911 if (dentry->d_inode)
912 ovl_cleanup(upper->d_inode, dentry);
913 dput(dentry);
915 inode_unlock(upper->d_inode);
918 static int ovl_check_d_type(struct dir_context *ctx, const char *name,
919 int namelen, loff_t offset, u64 ino,
920 unsigned int d_type)
922 struct ovl_readdir_data *rdd =
923 container_of(ctx, struct ovl_readdir_data, ctx);
925 /* Even if d_type is not supported, DT_DIR is returned for . and .. */
926 if (!strncmp(name, ".", namelen) || !strncmp(name, "..", namelen))
927 return 0;
929 if (d_type != DT_UNKNOWN)
930 rdd->d_type_supported = true;
932 return 0;
936 * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values
937 * if error is encountered.
939 int ovl_check_d_type_supported(struct path *realpath)
941 int err;
942 struct ovl_readdir_data rdd = {
943 .ctx.actor = ovl_check_d_type,
944 .d_type_supported = false,
947 err = ovl_dir_read(realpath, &rdd);
948 if (err)
949 return err;
951 return rdd.d_type_supported;
954 static void ovl_workdir_cleanup_recurse(struct path *path, int level)
956 int err;
957 struct inode *dir = path->dentry->d_inode;
958 LIST_HEAD(list);
959 struct rb_root root = RB_ROOT;
960 struct ovl_cache_entry *p;
961 struct ovl_readdir_data rdd = {
962 .ctx.actor = ovl_fill_merge,
963 .dentry = NULL,
964 .list = &list,
965 .root = &root,
966 .is_lowest = false,
969 err = ovl_dir_read(path, &rdd);
970 if (err)
971 goto out;
973 inode_lock_nested(dir, I_MUTEX_PARENT);
974 list_for_each_entry(p, &list, l_node) {
975 struct dentry *dentry;
977 if (p->name[0] == '.') {
978 if (p->len == 1)
979 continue;
980 if (p->len == 2 && p->name[1] == '.')
981 continue;
983 dentry = lookup_one_len(p->name, path->dentry, p->len);
984 if (IS_ERR(dentry))
985 continue;
986 if (dentry->d_inode)
987 ovl_workdir_cleanup(dir, path->mnt, dentry, level);
988 dput(dentry);
990 inode_unlock(dir);
991 out:
992 ovl_cache_free(&list);
995 void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
996 struct dentry *dentry, int level)
998 int err;
1000 if (!d_is_dir(dentry) || level > 1) {
1001 ovl_cleanup(dir, dentry);
1002 return;
1005 err = ovl_do_rmdir(dir, dentry);
1006 if (err) {
1007 struct path path = { .mnt = mnt, .dentry = dentry };
1009 inode_unlock(dir);
1010 ovl_workdir_cleanup_recurse(&path, level + 1);
1011 inode_lock_nested(dir, I_MUTEX_PARENT);
1012 ovl_cleanup(dir, dentry);
1016 int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
1017 struct path *lowerstack, unsigned int numlower)
1019 int err;
1020 struct dentry *index = NULL;
1021 struct inode *dir = dentry->d_inode;
1022 struct path path = { .mnt = mnt, .dentry = dentry };
1023 LIST_HEAD(list);
1024 struct rb_root root = RB_ROOT;
1025 struct ovl_cache_entry *p;
1026 struct ovl_readdir_data rdd = {
1027 .ctx.actor = ovl_fill_merge,
1028 .dentry = NULL,
1029 .list = &list,
1030 .root = &root,
1031 .is_lowest = false,
1034 err = ovl_dir_read(&path, &rdd);
1035 if (err)
1036 goto out;
1038 inode_lock_nested(dir, I_MUTEX_PARENT);
1039 list_for_each_entry(p, &list, l_node) {
1040 if (p->name[0] == '.') {
1041 if (p->len == 1)
1042 continue;
1043 if (p->len == 2 && p->name[1] == '.')
1044 continue;
1046 index = lookup_one_len(p->name, dentry, p->len);
1047 if (IS_ERR(index)) {
1048 err = PTR_ERR(index);
1049 index = NULL;
1050 break;
1052 err = ovl_verify_index(index, lowerstack, numlower);
1053 /* Cleanup stale and orphan index entries */
1054 if (err && (err == -ESTALE || err == -ENOENT))
1055 err = ovl_cleanup(dir, index);
1056 if (err)
1057 break;
1059 dput(index);
1060 index = NULL;
1062 dput(index);
1063 inode_unlock(dir);
1064 out:
1065 ovl_cache_free(&list);
1066 if (err)
1067 pr_err("overlayfs: failed index dir cleanup (%i)\n", err);
1068 return err;