3 * Copyright (C) 2011 Novell Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
11 #include <linux/slab.h>
12 #include <linux/namei.h>
13 #include <linux/file.h>
14 #include <linux/xattr.h>
15 #include <linux/rbtree.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include "overlayfs.h"
20 struct ovl_cache_entry
{
24 struct list_head l_node
;
26 struct ovl_cache_entry
*next_maybe_whiteout
;
31 struct ovl_dir_cache
{
34 struct list_head entries
;
37 struct ovl_readdir_data
{
38 struct dir_context ctx
;
41 struct list_head
*list
;
42 struct list_head middle
;
43 struct ovl_cache_entry
*first_maybe_whiteout
;
46 bool d_type_supported
;
52 struct ovl_dir_cache
*cache
;
53 struct list_head
*cursor
;
54 struct file
*realfile
;
55 struct file
*upperfile
;
58 static struct ovl_cache_entry
*ovl_cache_entry_from_node(struct rb_node
*n
)
60 return container_of(n
, struct ovl_cache_entry
, node
);
63 static struct ovl_cache_entry
*ovl_cache_entry_find(struct rb_root
*root
,
64 const char *name
, int len
)
66 struct rb_node
*node
= root
->rb_node
;
70 struct ovl_cache_entry
*p
= ovl_cache_entry_from_node(node
);
72 cmp
= strncmp(name
, p
->name
, len
);
74 node
= p
->node
.rb_right
;
75 else if (cmp
< 0 || len
< p
->len
)
76 node
= p
->node
.rb_left
;
84 static struct ovl_cache_entry
*ovl_cache_entry_new(struct ovl_readdir_data
*rdd
,
85 const char *name
, int len
,
86 u64 ino
, unsigned int d_type
)
88 struct ovl_cache_entry
*p
;
89 size_t size
= offsetof(struct ovl_cache_entry
, name
[len
+ 1]);
91 p
= kmalloc(size
, GFP_KERNEL
);
95 memcpy(p
->name
, name
, len
);
100 p
->is_whiteout
= false;
102 if (d_type
== DT_CHR
) {
103 p
->next_maybe_whiteout
= rdd
->first_maybe_whiteout
;
104 rdd
->first_maybe_whiteout
= p
;
109 static int ovl_cache_entry_add_rb(struct ovl_readdir_data
*rdd
,
110 const char *name
, int len
, u64 ino
,
113 struct rb_node
**newp
= &rdd
->root
.rb_node
;
114 struct rb_node
*parent
= NULL
;
115 struct ovl_cache_entry
*p
;
119 struct ovl_cache_entry
*tmp
;
122 tmp
= ovl_cache_entry_from_node(*newp
);
123 cmp
= strncmp(name
, tmp
->name
, len
);
125 newp
= &tmp
->node
.rb_right
;
126 else if (cmp
< 0 || len
< tmp
->len
)
127 newp
= &tmp
->node
.rb_left
;
132 p
= ovl_cache_entry_new(rdd
, name
, len
, ino
, d_type
);
136 list_add_tail(&p
->l_node
, rdd
->list
);
137 rb_link_node(&p
->node
, parent
, newp
);
138 rb_insert_color(&p
->node
, &rdd
->root
);
143 static int ovl_fill_lowest(struct ovl_readdir_data
*rdd
,
144 const char *name
, int namelen
,
145 loff_t offset
, u64 ino
, unsigned int d_type
)
147 struct ovl_cache_entry
*p
;
149 p
= ovl_cache_entry_find(&rdd
->root
, name
, namelen
);
151 list_move_tail(&p
->l_node
, &rdd
->middle
);
153 p
= ovl_cache_entry_new(rdd
, name
, namelen
, ino
, d_type
);
157 list_add_tail(&p
->l_node
, &rdd
->middle
);
163 void ovl_cache_free(struct list_head
*list
)
165 struct ovl_cache_entry
*p
;
166 struct ovl_cache_entry
*n
;
168 list_for_each_entry_safe(p
, n
, list
, l_node
)
171 INIT_LIST_HEAD(list
);
174 static void ovl_cache_put(struct ovl_dir_file
*od
, struct dentry
*dentry
)
176 struct ovl_dir_cache
*cache
= od
->cache
;
178 WARN_ON(cache
->refcount
<= 0);
180 if (!cache
->refcount
) {
181 if (ovl_dir_cache(dentry
) == cache
)
182 ovl_set_dir_cache(dentry
, NULL
);
184 ovl_cache_free(&cache
->entries
);
189 static int ovl_fill_merge(struct dir_context
*ctx
, const char *name
,
190 int namelen
, loff_t offset
, u64 ino
,
193 struct ovl_readdir_data
*rdd
=
194 container_of(ctx
, struct ovl_readdir_data
, ctx
);
198 return ovl_cache_entry_add_rb(rdd
, name
, namelen
, ino
, d_type
);
200 return ovl_fill_lowest(rdd
, name
, namelen
, offset
, ino
, d_type
);
203 static int ovl_check_whiteouts(struct dentry
*dir
, struct ovl_readdir_data
*rdd
)
206 struct ovl_cache_entry
*p
;
207 struct dentry
*dentry
;
208 const struct cred
*old_cred
;
209 struct cred
*override_cred
;
211 override_cred
= prepare_creds();
216 * CAP_DAC_OVERRIDE for lookup
218 cap_raise(override_cred
->cap_effective
, CAP_DAC_OVERRIDE
);
219 old_cred
= override_creds(override_cred
);
221 err
= mutex_lock_killable(&dir
->d_inode
->i_mutex
);
223 while (rdd
->first_maybe_whiteout
) {
224 p
= rdd
->first_maybe_whiteout
;
225 rdd
->first_maybe_whiteout
= p
->next_maybe_whiteout
;
226 dentry
= lookup_one_len(p
->name
, dir
, p
->len
);
227 if (!IS_ERR(dentry
)) {
228 p
->is_whiteout
= ovl_is_whiteout(dentry
);
232 inode_unlock(dir
->d_inode
);
234 revert_creds(old_cred
);
235 put_cred(override_cred
);
240 static inline int ovl_dir_read(struct path
*realpath
,
241 struct ovl_readdir_data
*rdd
)
243 struct file
*realfile
;
246 realfile
= ovl_path_open(realpath
, O_RDONLY
| O_DIRECTORY
);
247 if (IS_ERR(realfile
))
248 return PTR_ERR(realfile
);
250 rdd
->first_maybe_whiteout
= NULL
;
255 err
= iterate_dir(realfile
, &rdd
->ctx
);
258 } while (!err
&& rdd
->count
);
260 if (!err
&& rdd
->first_maybe_whiteout
)
261 err
= ovl_check_whiteouts(realpath
->dentry
, rdd
);
268 static void ovl_dir_reset(struct file
*file
)
270 struct ovl_dir_file
*od
= file
->private_data
;
271 struct ovl_dir_cache
*cache
= od
->cache
;
272 struct dentry
*dentry
= file
->f_path
.dentry
;
273 enum ovl_path_type type
= ovl_path_type(dentry
);
275 if (cache
&& ovl_dentry_version_get(dentry
) != cache
->version
) {
276 ovl_cache_put(od
, dentry
);
280 WARN_ON(!od
->is_real
&& !OVL_TYPE_MERGE(type
));
281 if (od
->is_real
&& OVL_TYPE_MERGE(type
))
285 static int ovl_dir_read_merged(struct dentry
*dentry
, struct list_head
*list
)
288 struct path realpath
;
289 struct ovl_readdir_data rdd
= {
290 .ctx
.actor
= ovl_fill_merge
,
297 for (idx
= 0; idx
!= -1; idx
= next
) {
298 next
= ovl_path_next(idx
, dentry
, &realpath
);
301 err
= ovl_dir_read(&realpath
, &rdd
);
306 * Insert lowest layer entries before upper ones, this
307 * allows offsets to be reasonably constant
309 list_add(&rdd
.middle
, rdd
.list
);
310 rdd
.is_lowest
= true;
311 err
= ovl_dir_read(&realpath
, &rdd
);
312 list_del(&rdd
.middle
);
318 static void ovl_seek_cursor(struct ovl_dir_file
*od
, loff_t pos
)
323 list_for_each(p
, &od
->cache
->entries
) {
328 /* Cursor is safe since the cache is stable */
332 static struct ovl_dir_cache
*ovl_cache_get(struct dentry
*dentry
)
335 struct ovl_dir_cache
*cache
;
337 cache
= ovl_dir_cache(dentry
);
338 if (cache
&& ovl_dentry_version_get(dentry
) == cache
->version
) {
342 ovl_set_dir_cache(dentry
, NULL
);
344 cache
= kzalloc(sizeof(struct ovl_dir_cache
), GFP_KERNEL
);
346 return ERR_PTR(-ENOMEM
);
349 INIT_LIST_HEAD(&cache
->entries
);
351 res
= ovl_dir_read_merged(dentry
, &cache
->entries
);
353 ovl_cache_free(&cache
->entries
);
358 cache
->version
= ovl_dentry_version_get(dentry
);
359 ovl_set_dir_cache(dentry
, cache
);
364 static int ovl_iterate(struct file
*file
, struct dir_context
*ctx
)
366 struct ovl_dir_file
*od
= file
->private_data
;
367 struct dentry
*dentry
= file
->f_path
.dentry
;
368 struct ovl_cache_entry
*p
;
374 return iterate_dir(od
->realfile
, ctx
);
377 struct ovl_dir_cache
*cache
;
379 cache
= ovl_cache_get(dentry
);
381 return PTR_ERR(cache
);
384 ovl_seek_cursor(od
, ctx
->pos
);
387 while (od
->cursor
!= &od
->cache
->entries
) {
388 p
= list_entry(od
->cursor
, struct ovl_cache_entry
, l_node
);
390 if (!dir_emit(ctx
, p
->name
, p
->len
, p
->ino
, p
->type
))
392 od
->cursor
= p
->l_node
.next
;
398 static loff_t
ovl_dir_llseek(struct file
*file
, loff_t offset
, int origin
)
401 struct ovl_dir_file
*od
= file
->private_data
;
403 inode_lock(file_inode(file
));
408 res
= vfs_llseek(od
->realfile
, offset
, origin
);
409 file
->f_pos
= od
->realfile
->f_pos
;
415 offset
+= file
->f_pos
;
425 if (offset
!= file
->f_pos
) {
426 file
->f_pos
= offset
;
428 ovl_seek_cursor(od
, offset
);
433 inode_unlock(file_inode(file
));
438 static int ovl_dir_fsync(struct file
*file
, loff_t start
, loff_t end
,
441 struct ovl_dir_file
*od
= file
->private_data
;
442 struct dentry
*dentry
= file
->f_path
.dentry
;
443 struct file
*realfile
= od
->realfile
;
446 * Need to check if we started out being a lower dir, but got copied up
448 if (!od
->is_upper
&& OVL_TYPE_UPPER(ovl_path_type(dentry
))) {
449 struct inode
*inode
= file_inode(file
);
451 realfile
= lockless_dereference(od
->upperfile
);
453 struct path upperpath
;
455 ovl_path_upper(dentry
, &upperpath
);
456 realfile
= ovl_path_open(&upperpath
, O_RDONLY
);
457 smp_mb__before_spinlock();
459 if (!od
->upperfile
) {
460 if (IS_ERR(realfile
)) {
462 return PTR_ERR(realfile
);
464 od
->upperfile
= realfile
;
466 /* somebody has beaten us to it */
467 if (!IS_ERR(realfile
))
469 realfile
= od
->upperfile
;
475 return vfs_fsync_range(realfile
, start
, end
, datasync
);
478 static int ovl_dir_release(struct inode
*inode
, struct file
*file
)
480 struct ovl_dir_file
*od
= file
->private_data
;
484 ovl_cache_put(od
, file
->f_path
.dentry
);
495 static int ovl_dir_open(struct inode
*inode
, struct file
*file
)
497 struct path realpath
;
498 struct file
*realfile
;
499 struct ovl_dir_file
*od
;
500 enum ovl_path_type type
;
502 od
= kzalloc(sizeof(struct ovl_dir_file
), GFP_KERNEL
);
506 type
= ovl_path_real(file
->f_path
.dentry
, &realpath
);
507 realfile
= ovl_path_open(&realpath
, file
->f_flags
);
508 if (IS_ERR(realfile
)) {
510 return PTR_ERR(realfile
);
512 od
->realfile
= realfile
;
513 od
->is_real
= !OVL_TYPE_MERGE(type
);
514 od
->is_upper
= OVL_TYPE_UPPER(type
);
515 file
->private_data
= od
;
520 const struct file_operations ovl_dir_operations
= {
521 .read
= generic_read_dir
,
522 .open
= ovl_dir_open
,
523 .iterate
= ovl_iterate
,
524 .llseek
= ovl_dir_llseek
,
525 .fsync
= ovl_dir_fsync
,
526 .release
= ovl_dir_release
,
529 int ovl_check_empty_dir(struct dentry
*dentry
, struct list_head
*list
)
532 struct ovl_cache_entry
*p
;
534 err
= ovl_dir_read_merged(dentry
, list
);
540 list_for_each_entry(p
, list
, l_node
) {
544 if (p
->name
[0] == '.') {
547 if (p
->len
== 2 && p
->name
[1] == '.')
557 void ovl_cleanup_whiteouts(struct dentry
*upper
, struct list_head
*list
)
559 struct ovl_cache_entry
*p
;
561 inode_lock_nested(upper
->d_inode
, I_MUTEX_CHILD
);
562 list_for_each_entry(p
, list
, l_node
) {
563 struct dentry
*dentry
;
568 dentry
= lookup_one_len(p
->name
, upper
, p
->len
);
569 if (IS_ERR(dentry
)) {
570 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
571 upper
->d_name
.name
, p
->len
, p
->name
,
572 (int) PTR_ERR(dentry
));
576 ovl_cleanup(upper
->d_inode
, dentry
);
579 inode_unlock(upper
->d_inode
);
582 static int ovl_check_d_type(struct dir_context
*ctx
, const char *name
,
583 int namelen
, loff_t offset
, u64 ino
,
586 struct ovl_readdir_data
*rdd
=
587 container_of(ctx
, struct ovl_readdir_data
, ctx
);
589 /* Even if d_type is not supported, DT_DIR is returned for . and .. */
590 if (!strncmp(name
, ".", namelen
) || !strncmp(name
, "..", namelen
))
593 if (d_type
!= DT_UNKNOWN
)
594 rdd
->d_type_supported
= true;
600 * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values
601 * if error is encountered.
603 int ovl_check_d_type_supported(struct path
*realpath
)
606 struct ovl_readdir_data rdd
= {
607 .ctx
.actor
= ovl_check_d_type
,
608 .d_type_supported
= false,
611 err
= ovl_dir_read(realpath
, &rdd
);
615 return rdd
.d_type_supported
;