1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2011 Novell Inc.
8 #include <linux/slab.h>
9 #include <linux/cred.h>
10 #include <linux/xattr.h>
11 #include <linux/posix_acl.h>
12 #include <linux/ratelimit.h>
13 #include <linux/fiemap.h>
14 #include "overlayfs.h"
17 int ovl_setattr(struct dentry
*dentry
, struct iattr
*attr
)
20 bool full_copy_up
= false;
21 struct dentry
*upperdentry
;
22 const struct cred
*old_cred
;
24 err
= setattr_prepare(dentry
, attr
);
28 err
= ovl_want_write(dentry
);
32 if (attr
->ia_valid
& ATTR_SIZE
) {
33 struct inode
*realinode
= d_inode(ovl_dentry_real(dentry
));
36 if (atomic_read(&realinode
->i_writecount
) < 0)
39 /* Truncate should trigger data copy up as well */
44 err
= ovl_copy_up(dentry
);
46 err
= ovl_copy_up_with_data(dentry
);
48 struct inode
*winode
= NULL
;
50 upperdentry
= ovl_dentry_upper(dentry
);
52 if (attr
->ia_valid
& ATTR_SIZE
) {
53 winode
= d_inode(upperdentry
);
54 err
= get_write_access(winode
);
59 if (attr
->ia_valid
& (ATTR_KILL_SUID
|ATTR_KILL_SGID
))
60 attr
->ia_valid
&= ~ATTR_MODE
;
63 * We might have to translate ovl file into real file object
64 * once use cases emerge. For now, simply don't let underlying
65 * filesystem rely on attr->ia_file
67 attr
->ia_valid
&= ~ATTR_FILE
;
70 * If open(O_TRUNC) is done, VFS calls ->setattr with ATTR_OPEN
71 * set. Overlayfs does not pass O_TRUNC flag to underlying
72 * filesystem during open -> do not pass ATTR_OPEN. This
73 * disables optimization in fuse which assumes open(O_TRUNC)
74 * already set file size to 0. But we never passed O_TRUNC to
75 * fuse. So by clearing ATTR_OPEN, fuse will be forced to send
76 * setattr request to server.
78 attr
->ia_valid
&= ~ATTR_OPEN
;
80 inode_lock(upperdentry
->d_inode
);
81 old_cred
= ovl_override_creds(dentry
->d_sb
);
82 err
= notify_change(upperdentry
, attr
, NULL
);
83 revert_creds(old_cred
);
85 ovl_copyattr(upperdentry
->d_inode
, dentry
->d_inode
);
86 inode_unlock(upperdentry
->d_inode
);
89 put_write_access(winode
);
92 ovl_drop_write(dentry
);
97 static int ovl_map_dev_ino(struct dentry
*dentry
, struct kstat
*stat
, int fsid
)
99 bool samefs
= ovl_same_fs(dentry
->d_sb
);
100 unsigned int xinobits
= ovl_xino_bits(dentry
->d_sb
);
101 unsigned int xinoshift
= 64 - xinobits
;
105 * When all layers are on the same fs, all real inode
106 * number are unique, so we use the overlay st_dev,
107 * which is friendly to du -x.
109 stat
->dev
= dentry
->d_sb
->s_dev
;
111 } else if (xinobits
) {
113 * All inode numbers of underlying fs should not be using the
114 * high xinobits, so we use high xinobits to partition the
115 * overlay st_ino address space. The high bits holds the fsid
116 * (upper fsid is 0). The lowest xinobit is reserved for mapping
117 * the non-peresistent inode numbers range in case of overflow.
118 * This way all overlay inode numbers are unique and use the
121 if (likely(!(stat
->ino
>> xinoshift
))) {
122 stat
->ino
|= ((u64
)fsid
) << (xinoshift
+ 1);
123 stat
->dev
= dentry
->d_sb
->s_dev
;
125 } else if (ovl_xino_warn(dentry
->d_sb
)) {
126 pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
127 dentry
, stat
->ino
, xinobits
);
131 /* The inode could not be mapped to a unified st_ino address space */
132 if (S_ISDIR(dentry
->d_inode
->i_mode
)) {
134 * Always use the overlay st_dev for directories, so 'find
135 * -xdev' will scan the entire overlay mount and won't cross the
136 * overlay mount boundaries.
138 * If not all layers are on the same fs the pair {real st_ino;
139 * overlay st_dev} is not unique, so use the non persistent
140 * overlay st_ino for directories.
142 stat
->dev
= dentry
->d_sb
->s_dev
;
143 stat
->ino
= dentry
->d_inode
->i_ino
;
146 * For non-samefs setup, if we cannot map all layers st_ino
147 * to a unified address space, we need to make sure that st_dev
148 * is unique per underlying fs, so we use the unique anonymous
149 * bdev assigned to the underlying fs.
151 stat
->dev
= OVL_FS(dentry
->d_sb
)->fs
[fsid
].pseudo_dev
;
157 int ovl_getattr(const struct path
*path
, struct kstat
*stat
,
158 u32 request_mask
, unsigned int flags
)
160 struct dentry
*dentry
= path
->dentry
;
161 enum ovl_path_type type
;
162 struct path realpath
;
163 const struct cred
*old_cred
;
164 bool is_dir
= S_ISDIR(dentry
->d_inode
->i_mode
);
167 bool metacopy_blocks
= false;
169 metacopy_blocks
= ovl_is_metacopy_dentry(dentry
);
171 type
= ovl_path_real(dentry
, &realpath
);
172 old_cred
= ovl_override_creds(dentry
->d_sb
);
173 err
= vfs_getattr(&realpath
, stat
, request_mask
, flags
);
178 * For non-dir or same fs, we use st_ino of the copy up origin.
179 * This guaranties constant st_dev/st_ino across copy up.
180 * With xino feature and non-samefs, we use st_ino of the copy up
181 * origin masked with high bits that represent the layer id.
183 * If lower filesystem supports NFS file handles, this also guaranties
184 * persistent st_ino across mount cycle.
186 if (!is_dir
|| ovl_same_dev(dentry
->d_sb
)) {
187 if (!OVL_TYPE_UPPER(type
)) {
188 fsid
= ovl_layer_lower(dentry
)->fsid
;
189 } else if (OVL_TYPE_ORIGIN(type
)) {
190 struct kstat lowerstat
;
191 u32 lowermask
= STATX_INO
| STATX_BLOCKS
|
192 (!is_dir
? STATX_NLINK
: 0);
194 ovl_path_lower(dentry
, &realpath
);
195 err
= vfs_getattr(&realpath
, &lowerstat
,
201 * Lower hardlinks may be broken on copy up to different
202 * upper files, so we cannot use the lower origin st_ino
203 * for those different files, even for the same fs case.
205 * Similarly, several redirected dirs can point to the
206 * same dir on a lower layer. With the "verify_lower"
207 * feature, we do not use the lower origin st_ino, if
208 * we haven't verified that this redirect is unique.
210 * With inodes index enabled, it is safe to use st_ino
211 * of an indexed origin. The index validates that the
212 * upper hardlink is not broken and that a redirected
213 * dir is the only redirect to that origin.
215 if (ovl_test_flag(OVL_INDEX
, d_inode(dentry
)) ||
216 (!ovl_verify_lower(dentry
->d_sb
) &&
217 (is_dir
|| lowerstat
.nlink
== 1))) {
218 fsid
= ovl_layer_lower(dentry
)->fsid
;
219 stat
->ino
= lowerstat
.ino
;
223 * If we are querying a metacopy dentry and lower
224 * dentry is data dentry, then use the blocks we
225 * queried just now. We don't have to do additional
226 * vfs_getattr(). If lower itself is metacopy, then
227 * additional vfs_getattr() is unavoidable.
229 if (metacopy_blocks
&&
230 realpath
.dentry
== ovl_dentry_lowerdata(dentry
)) {
231 stat
->blocks
= lowerstat
.blocks
;
232 metacopy_blocks
= false;
236 if (metacopy_blocks
) {
238 * If lower is not same as lowerdata or if there was
239 * no origin on upper, we can end up here.
241 struct kstat lowerdatastat
;
242 u32 lowermask
= STATX_BLOCKS
;
244 ovl_path_lowerdata(dentry
, &realpath
);
245 err
= vfs_getattr(&realpath
, &lowerdatastat
,
249 stat
->blocks
= lowerdatastat
.blocks
;
253 err
= ovl_map_dev_ino(dentry
, stat
, fsid
);
258 * It's probably not worth it to count subdirs to get the
259 * correct link count. nlink=1 seems to pacify 'find' and
262 if (is_dir
&& OVL_TYPE_MERGE(type
))
266 * Return the overlay inode nlinks for indexed upper inodes.
267 * Overlay inode nlink counts the union of the upper hardlinks
268 * and non-covered lower hardlinks. It does not include the upper
271 if (!is_dir
&& ovl_test_flag(OVL_INDEX
, d_inode(dentry
)))
272 stat
->nlink
= dentry
->d_inode
->i_nlink
;
275 revert_creds(old_cred
);
280 int ovl_permission(struct inode
*inode
, int mask
)
282 struct inode
*upperinode
= ovl_inode_upper(inode
);
283 struct inode
*realinode
= upperinode
?: ovl_inode_lower(inode
);
284 const struct cred
*old_cred
;
287 /* Careful in RCU walk mode */
289 WARN_ON(!(mask
& MAY_NOT_BLOCK
));
294 * Check overlay inode with the creds of task and underlying inode
295 * with creds of mounter
297 err
= generic_permission(inode
, mask
);
301 old_cred
= ovl_override_creds(inode
->i_sb
);
303 !special_file(realinode
->i_mode
) && mask
& MAY_WRITE
) {
304 mask
&= ~(MAY_WRITE
| MAY_APPEND
);
305 /* Make sure mounter can read file for copy up later */
308 err
= inode_permission(realinode
, mask
);
309 revert_creds(old_cred
);
314 static const char *ovl_get_link(struct dentry
*dentry
,
316 struct delayed_call
*done
)
318 const struct cred
*old_cred
;
322 return ERR_PTR(-ECHILD
);
324 old_cred
= ovl_override_creds(dentry
->d_sb
);
325 p
= vfs_get_link(ovl_dentry_real(dentry
), done
);
326 revert_creds(old_cred
);
330 bool ovl_is_private_xattr(const char *name
)
332 return strncmp(name
, OVL_XATTR_PREFIX
,
333 sizeof(OVL_XATTR_PREFIX
) - 1) == 0;
336 int ovl_xattr_set(struct dentry
*dentry
, struct inode
*inode
, const char *name
,
337 const void *value
, size_t size
, int flags
)
340 struct dentry
*upperdentry
= ovl_i_dentry_upper(inode
);
341 struct dentry
*realdentry
= upperdentry
?: ovl_dentry_lower(dentry
);
342 const struct cred
*old_cred
;
344 err
= ovl_want_write(dentry
);
348 if (!value
&& !upperdentry
) {
349 err
= vfs_getxattr(realdentry
, name
, NULL
, 0);
355 err
= ovl_copy_up(dentry
);
359 realdentry
= ovl_dentry_upper(dentry
);
362 old_cred
= ovl_override_creds(dentry
->d_sb
);
364 err
= vfs_setxattr(realdentry
, name
, value
, size
, flags
);
366 WARN_ON(flags
!= XATTR_REPLACE
);
367 err
= vfs_removexattr(realdentry
, name
);
369 revert_creds(old_cred
);
372 ovl_copyattr(d_inode(realdentry
), inode
);
375 ovl_drop_write(dentry
);
380 int ovl_xattr_get(struct dentry
*dentry
, struct inode
*inode
, const char *name
,
381 void *value
, size_t size
)
384 const struct cred
*old_cred
;
385 struct dentry
*realdentry
=
386 ovl_i_dentry_upper(inode
) ?: ovl_dentry_lower(dentry
);
388 old_cred
= ovl_override_creds(dentry
->d_sb
);
389 res
= vfs_getxattr(realdentry
, name
, value
, size
);
390 revert_creds(old_cred
);
394 static bool ovl_can_list(const char *s
)
396 /* List all non-trusted xatts */
397 if (strncmp(s
, XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
) != 0)
400 /* Never list trusted.overlay, list other trusted for superuser only */
401 return !ovl_is_private_xattr(s
) &&
402 ns_capable_noaudit(&init_user_ns
, CAP_SYS_ADMIN
);
405 ssize_t
ovl_listxattr(struct dentry
*dentry
, char *list
, size_t size
)
407 struct dentry
*realdentry
= ovl_dentry_real(dentry
);
411 const struct cred
*old_cred
;
413 old_cred
= ovl_override_creds(dentry
->d_sb
);
414 res
= vfs_listxattr(realdentry
, list
, size
);
415 revert_creds(old_cred
);
416 if (res
<= 0 || size
== 0)
419 /* filter out private xattrs */
420 for (s
= list
, len
= res
; len
;) {
421 size_t slen
= strnlen(s
, len
) + 1;
423 /* underlying fs providing us with an broken xattr list? */
424 if (WARN_ON(slen
> len
))
428 if (!ovl_can_list(s
)) {
430 memmove(s
, s
+ slen
, len
);
439 struct posix_acl
*ovl_get_acl(struct inode
*inode
, int type
)
441 struct inode
*realinode
= ovl_inode_real(inode
);
442 const struct cred
*old_cred
;
443 struct posix_acl
*acl
;
445 if (!IS_ENABLED(CONFIG_FS_POSIX_ACL
) || !IS_POSIXACL(realinode
))
448 old_cred
= ovl_override_creds(inode
->i_sb
);
449 acl
= get_acl(realinode
, type
);
450 revert_creds(old_cred
);
455 int ovl_update_time(struct inode
*inode
, struct timespec64
*ts
, int flags
)
457 if (flags
& S_ATIME
) {
458 struct ovl_fs
*ofs
= inode
->i_sb
->s_fs_info
;
459 struct path upperpath
= {
460 .mnt
= ovl_upper_mnt(ofs
),
461 .dentry
= ovl_upperdentry_dereference(OVL_I(inode
)),
464 if (upperpath
.dentry
) {
465 touch_atime(&upperpath
);
466 inode
->i_atime
= d_inode(upperpath
.dentry
)->i_atime
;
472 static int ovl_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
476 struct inode
*realinode
= ovl_inode_real(inode
);
477 const struct cred
*old_cred
;
479 if (!realinode
->i_op
->fiemap
)
482 old_cred
= ovl_override_creds(inode
->i_sb
);
483 err
= realinode
->i_op
->fiemap(realinode
, fieinfo
, start
, len
);
484 revert_creds(old_cred
);
489 static const struct inode_operations ovl_file_inode_operations
= {
490 .setattr
= ovl_setattr
,
491 .permission
= ovl_permission
,
492 .getattr
= ovl_getattr
,
493 .listxattr
= ovl_listxattr
,
494 .get_acl
= ovl_get_acl
,
495 .update_time
= ovl_update_time
,
496 .fiemap
= ovl_fiemap
,
499 static const struct inode_operations ovl_symlink_inode_operations
= {
500 .setattr
= ovl_setattr
,
501 .get_link
= ovl_get_link
,
502 .getattr
= ovl_getattr
,
503 .listxattr
= ovl_listxattr
,
504 .update_time
= ovl_update_time
,
507 static const struct inode_operations ovl_special_inode_operations
= {
508 .setattr
= ovl_setattr
,
509 .permission
= ovl_permission
,
510 .getattr
= ovl_getattr
,
511 .listxattr
= ovl_listxattr
,
512 .get_acl
= ovl_get_acl
,
513 .update_time
= ovl_update_time
,
516 static const struct address_space_operations ovl_aops
= {
517 /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
518 .direct_IO
= noop_direct_IO
,
522 * It is possible to stack overlayfs instance on top of another
523 * overlayfs instance as lower layer. We need to annotate the
524 * stackable i_mutex locks according to stack level of the super
525 * block instance. An overlayfs instance can never be in stack
526 * depth 0 (there is always a real fs below it). An overlayfs
527 * inode lock will use the lockdep annotaion ovl_i_mutex_key[depth].
529 * For example, here is a snip from /proc/lockdep_chains after
530 * dir_iterate of nested overlayfs:
532 * [...] &ovl_i_mutex_dir_key[depth] (stack_depth=2)
533 * [...] &ovl_i_mutex_dir_key[depth]#2 (stack_depth=1)
534 * [...] &type->i_mutex_dir_key (stack_depth=0)
536 * Locking order w.r.t ovl_want_write() is important for nested overlayfs.
538 * This chain is valid:
539 * - inode->i_rwsem (inode_lock[2])
540 * - upper_mnt->mnt_sb->s_writers (ovl_want_write[0])
541 * - OVL_I(inode)->lock (ovl_inode_lock[2])
542 * - OVL_I(lowerinode)->lock (ovl_inode_lock[1])
544 * And this chain is valid:
545 * - inode->i_rwsem (inode_lock[2])
546 * - OVL_I(inode)->lock (ovl_inode_lock[2])
547 * - lowerinode->i_rwsem (inode_lock[1])
548 * - OVL_I(lowerinode)->lock (ovl_inode_lock[1])
550 * But lowerinode->i_rwsem SHOULD NOT be acquired while ovl_want_write() is
551 * held, because it is in reverse order of the non-nested case using the same
553 * - inode->i_rwsem (inode_lock[1])
554 * - upper_mnt->mnt_sb->s_writers (ovl_want_write[0])
555 * - OVL_I(inode)->lock (ovl_inode_lock[1])
557 #define OVL_MAX_NESTING FILESYSTEM_MAX_STACK_DEPTH
559 static inline void ovl_lockdep_annotate_inode_mutex_key(struct inode
*inode
)
561 #ifdef CONFIG_LOCKDEP
562 static struct lock_class_key ovl_i_mutex_key
[OVL_MAX_NESTING
];
563 static struct lock_class_key ovl_i_mutex_dir_key
[OVL_MAX_NESTING
];
564 static struct lock_class_key ovl_i_lock_key
[OVL_MAX_NESTING
];
566 int depth
= inode
->i_sb
->s_stack_depth
- 1;
568 if (WARN_ON_ONCE(depth
< 0 || depth
>= OVL_MAX_NESTING
))
571 if (S_ISDIR(inode
->i_mode
))
572 lockdep_set_class(&inode
->i_rwsem
, &ovl_i_mutex_dir_key
[depth
]);
574 lockdep_set_class(&inode
->i_rwsem
, &ovl_i_mutex_key
[depth
]);
576 lockdep_set_class(&OVL_I(inode
)->lock
, &ovl_i_lock_key
[depth
]);
580 static void ovl_next_ino(struct inode
*inode
)
582 struct ovl_fs
*ofs
= inode
->i_sb
->s_fs_info
;
584 inode
->i_ino
= atomic_long_inc_return(&ofs
->last_ino
);
585 if (unlikely(!inode
->i_ino
))
586 inode
->i_ino
= atomic_long_inc_return(&ofs
->last_ino
);
589 static void ovl_map_ino(struct inode
*inode
, unsigned long ino
, int fsid
)
591 int xinobits
= ovl_xino_bits(inode
->i_sb
);
592 unsigned int xinoshift
= 64 - xinobits
;
595 * When d_ino is consistent with st_ino (samefs or i_ino has enough
596 * bits to encode layer), set the same value used for st_ino to i_ino,
597 * so inode number exposed via /proc/locks and a like will be
598 * consistent with d_ino and st_ino values. An i_ino value inconsistent
599 * with d_ino also causes nfsd readdirplus to fail.
602 if (ovl_same_fs(inode
->i_sb
)) {
604 } else if (xinobits
&& likely(!(ino
>> xinoshift
))) {
605 inode
->i_ino
|= (unsigned long)fsid
<< (xinoshift
+ 1);
610 * For directory inodes on non-samefs with xino disabled or xino
611 * overflow, we allocate a non-persistent inode number, to be used for
612 * resolving st_ino collisions in ovl_map_dev_ino().
614 * To avoid ino collision with legitimate xino values from upper
615 * layer (fsid 0), use the lowest xinobit to map the non
616 * persistent inode numbers to the unified st_ino address space.
618 if (S_ISDIR(inode
->i_mode
)) {
621 inode
->i_ino
&= ~0UL >> xinobits
;
622 inode
->i_ino
|= 1UL << xinoshift
;
627 void ovl_inode_init(struct inode
*inode
, struct ovl_inode_params
*oip
,
628 unsigned long ino
, int fsid
)
630 struct inode
*realinode
;
632 if (oip
->upperdentry
)
633 OVL_I(inode
)->__upperdentry
= oip
->upperdentry
;
634 if (oip
->lowerpath
&& oip
->lowerpath
->dentry
)
635 OVL_I(inode
)->lower
= igrab(d_inode(oip
->lowerpath
->dentry
));
637 OVL_I(inode
)->lowerdata
= igrab(d_inode(oip
->lowerdata
));
639 realinode
= ovl_inode_real(inode
);
640 ovl_copyattr(realinode
, inode
);
641 ovl_copyflags(realinode
, inode
);
642 ovl_map_ino(inode
, ino
, fsid
);
645 static void ovl_fill_inode(struct inode
*inode
, umode_t mode
, dev_t rdev
)
647 inode
->i_mode
= mode
;
648 inode
->i_flags
|= S_NOCMTIME
;
649 #ifdef CONFIG_FS_POSIX_ACL
650 inode
->i_acl
= inode
->i_default_acl
= ACL_DONT_CACHE
;
653 ovl_lockdep_annotate_inode_mutex_key(inode
);
655 switch (mode
& S_IFMT
) {
657 inode
->i_op
= &ovl_file_inode_operations
;
658 inode
->i_fop
= &ovl_file_operations
;
659 inode
->i_mapping
->a_ops
= &ovl_aops
;
663 inode
->i_op
= &ovl_dir_inode_operations
;
664 inode
->i_fop
= &ovl_dir_operations
;
668 inode
->i_op
= &ovl_symlink_inode_operations
;
672 inode
->i_op
= &ovl_special_inode_operations
;
673 init_special_inode(inode
, mode
, rdev
);
679 * With inodes index enabled, an overlay inode nlink counts the union of upper
680 * hardlinks and non-covered lower hardlinks. During the lifetime of a non-pure
681 * upper inode, the following nlink modifying operations can happen:
683 * 1. Lower hardlink copy up
684 * 2. Upper hardlink created, unlinked or renamed over
685 * 3. Lower hardlink whiteout or renamed over
687 * For the first, copy up case, the union nlink does not change, whether the
688 * operation succeeds or fails, but the upper inode nlink may change.
689 * Therefore, before copy up, we store the union nlink value relative to the
690 * lower inode nlink in the index inode xattr trusted.overlay.nlink.
692 * For the second, upper hardlink case, the union nlink should be incremented
693 * or decremented IFF the operation succeeds, aligned with nlink change of the
694 * upper inode. Therefore, before link/unlink/rename, we store the union nlink
695 * value relative to the upper inode nlink in the index inode.
697 * For the last, lower cover up case, we simplify things by preceding the
698 * whiteout or cover up with copy up. This makes sure that there is an index
699 * upper inode where the nlink xattr can be stored before the copied up upper
702 #define OVL_NLINK_ADD_UPPER (1 << 0)
705 * On-disk format for indexed nlink:
707 * nlink relative to the upper inode - "U[+-]NUM"
708 * nlink relative to the lower inode - "L[+-]NUM"
711 static int ovl_set_nlink_common(struct dentry
*dentry
,
712 struct dentry
*realdentry
, const char *format
)
714 struct inode
*inode
= d_inode(dentry
);
715 struct inode
*realinode
= d_inode(realdentry
);
719 len
= snprintf(buf
, sizeof(buf
), format
,
720 (int) (inode
->i_nlink
- realinode
->i_nlink
));
722 if (WARN_ON(len
>= sizeof(buf
)))
725 return ovl_do_setxattr(ovl_dentry_upper(dentry
),
726 OVL_XATTR_NLINK
, buf
, len
, 0);
729 int ovl_set_nlink_upper(struct dentry
*dentry
)
731 return ovl_set_nlink_common(dentry
, ovl_dentry_upper(dentry
), "U%+i");
734 int ovl_set_nlink_lower(struct dentry
*dentry
)
736 return ovl_set_nlink_common(dentry
, ovl_dentry_lower(dentry
), "L%+i");
739 unsigned int ovl_get_nlink(struct dentry
*lowerdentry
,
740 struct dentry
*upperdentry
,
741 unsigned int fallback
)
748 if (!lowerdentry
|| !upperdentry
|| d_inode(lowerdentry
)->i_nlink
== 1)
751 err
= vfs_getxattr(upperdentry
, OVL_XATTR_NLINK
, &buf
, sizeof(buf
) - 1);
756 if ((buf
[0] != 'L' && buf
[0] != 'U') ||
757 (buf
[1] != '+' && buf
[1] != '-'))
760 err
= kstrtoint(buf
+ 1, 10, &nlink_diff
);
764 nlink
= d_inode(buf
[0] == 'L' ? lowerdentry
: upperdentry
)->i_nlink
;
773 pr_warn_ratelimited("failed to get index nlink (%pd2, err=%i)\n",
778 struct inode
*ovl_new_inode(struct super_block
*sb
, umode_t mode
, dev_t rdev
)
782 inode
= new_inode(sb
);
784 ovl_fill_inode(inode
, mode
, rdev
);
789 static int ovl_inode_test(struct inode
*inode
, void *data
)
791 return inode
->i_private
== data
;
794 static int ovl_inode_set(struct inode
*inode
, void *data
)
796 inode
->i_private
= data
;
800 static bool ovl_verify_inode(struct inode
*inode
, struct dentry
*lowerdentry
,
801 struct dentry
*upperdentry
, bool strict
)
804 * For directories, @strict verify from lookup path performs consistency
805 * checks, so NULL lower/upper in dentry must match NULL lower/upper in
806 * inode. Non @strict verify from NFS handle decode path passes NULL for
807 * 'unknown' lower/upper.
809 if (S_ISDIR(inode
->i_mode
) && strict
) {
810 /* Real lower dir moved to upper layer under us? */
811 if (!lowerdentry
&& ovl_inode_lower(inode
))
814 /* Lookup of an uncovered redirect origin? */
815 if (!upperdentry
&& ovl_inode_upper(inode
))
820 * Allow non-NULL lower inode in ovl_inode even if lowerdentry is NULL.
821 * This happens when finding a copied up overlay inode for a renamed
822 * or hardlinked overlay dentry and lower dentry cannot be followed
823 * by origin because lower fs does not support file handles.
825 if (lowerdentry
&& ovl_inode_lower(inode
) != d_inode(lowerdentry
))
829 * Allow non-NULL __upperdentry in inode even if upperdentry is NULL.
830 * This happens when finding a lower alias for a copied up hard link.
832 if (upperdentry
&& ovl_inode_upper(inode
) != d_inode(upperdentry
))
838 struct inode
*ovl_lookup_inode(struct super_block
*sb
, struct dentry
*real
,
841 struct inode
*inode
, *key
= d_inode(real
);
843 inode
= ilookup5(sb
, (unsigned long) key
, ovl_inode_test
, key
);
847 if (!ovl_verify_inode(inode
, is_upper
? NULL
: real
,
848 is_upper
? real
: NULL
, false)) {
850 return ERR_PTR(-ESTALE
);
856 bool ovl_lookup_trap_inode(struct super_block
*sb
, struct dentry
*dir
)
858 struct inode
*key
= d_inode(dir
);
862 trap
= ilookup5(sb
, (unsigned long) key
, ovl_inode_test
, key
);
866 res
= IS_DEADDIR(trap
) && !ovl_inode_upper(trap
) &&
867 !ovl_inode_lower(trap
);
874 * Create an inode cache entry for layer root dir, that will intentionally
875 * fail ovl_verify_inode(), so any lookup that will find some layer root
878 struct inode
*ovl_get_trap_inode(struct super_block
*sb
, struct dentry
*dir
)
880 struct inode
*key
= d_inode(dir
);
884 return ERR_PTR(-ENOTDIR
);
886 trap
= iget5_locked(sb
, (unsigned long) key
, ovl_inode_test
,
889 return ERR_PTR(-ENOMEM
);
891 if (!(trap
->i_state
& I_NEW
)) {
892 /* Conflicting layer roots? */
894 return ERR_PTR(-ELOOP
);
897 trap
->i_mode
= S_IFDIR
;
898 trap
->i_flags
= S_DEAD
;
899 unlock_new_inode(trap
);
905 * Does overlay inode need to be hashed by lower inode?
907 static bool ovl_hash_bylower(struct super_block
*sb
, struct dentry
*upper
,
908 struct dentry
*lower
, bool index
)
910 struct ovl_fs
*ofs
= sb
->s_fs_info
;
912 /* No, if pure upper */
916 /* Yes, if already indexed */
920 /* Yes, if won't be copied up */
921 if (!ovl_upper_mnt(ofs
))
924 /* No, if lower hardlink is or will be broken on copy up */
925 if ((upper
|| !ovl_indexdir(sb
)) &&
926 !d_is_dir(lower
) && d_inode(lower
)->i_nlink
> 1)
929 /* No, if non-indexed upper with NFS export */
930 if (sb
->s_export_op
&& upper
)
933 /* Otherwise, hash by lower inode for fsnotify */
937 static struct inode
*ovl_iget5(struct super_block
*sb
, struct inode
*newinode
,
940 return newinode
? inode_insert5(newinode
, (unsigned long) key
,
941 ovl_inode_test
, ovl_inode_set
, key
) :
942 iget5_locked(sb
, (unsigned long) key
,
943 ovl_inode_test
, ovl_inode_set
, key
);
946 struct inode
*ovl_get_inode(struct super_block
*sb
,
947 struct ovl_inode_params
*oip
)
949 struct dentry
*upperdentry
= oip
->upperdentry
;
950 struct ovl_path
*lowerpath
= oip
->lowerpath
;
951 struct inode
*realinode
= upperdentry
? d_inode(upperdentry
) : NULL
;
953 struct dentry
*lowerdentry
= lowerpath
? lowerpath
->dentry
: NULL
;
954 bool bylower
= ovl_hash_bylower(sb
, upperdentry
, lowerdentry
,
956 int fsid
= bylower
? lowerpath
->layer
->fsid
: 0;
958 unsigned long ino
= 0;
959 int err
= oip
->newinode
? -EEXIST
: -ENOMEM
;
962 realinode
= d_inode(lowerdentry
);
965 * Copy up origin (lower) may exist for non-indexed upper, but we must
966 * not use lower as hash key if this is a broken hardlink.
968 is_dir
= S_ISDIR(realinode
->i_mode
);
969 if (upperdentry
|| bylower
) {
970 struct inode
*key
= d_inode(bylower
? lowerdentry
:
972 unsigned int nlink
= is_dir
? 1 : realinode
->i_nlink
;
974 inode
= ovl_iget5(sb
, oip
->newinode
, key
);
977 if (!(inode
->i_state
& I_NEW
)) {
979 * Verify that the underlying files stored in the inode
980 * match those in the dentry.
982 if (!ovl_verify_inode(inode
, lowerdentry
, upperdentry
,
990 kfree(oip
->redirect
);
994 /* Recalculate nlink for non-dir due to indexing */
996 nlink
= ovl_get_nlink(lowerdentry
, upperdentry
, nlink
);
997 set_nlink(inode
, nlink
);
1000 /* Lower hardlink that will be broken on copy up */
1001 inode
= new_inode(sb
);
1006 ino
= realinode
->i_ino
;
1007 fsid
= lowerpath
->layer
->fsid
;
1009 ovl_fill_inode(inode
, realinode
->i_mode
, realinode
->i_rdev
);
1010 ovl_inode_init(inode
, oip
, ino
, fsid
);
1012 if (upperdentry
&& ovl_is_impuredir(upperdentry
))
1013 ovl_set_flag(OVL_IMPURE
, inode
);
1016 ovl_set_flag(OVL_INDEX
, inode
);
1018 OVL_I(inode
)->redirect
= oip
->redirect
;
1021 ovl_set_flag(OVL_CONST_INO
, inode
);
1023 /* Check for non-merge dir that may have whiteouts */
1025 if (((upperdentry
&& lowerdentry
) || oip
->numlower
> 1) ||
1026 ovl_check_origin_xattr(upperdentry
?: lowerdentry
)) {
1027 ovl_set_flag(OVL_WHITEOUTS
, inode
);
1031 if (inode
->i_state
& I_NEW
)
1032 unlock_new_inode(inode
);
1037 pr_warn_ratelimited("failed to get inode (%i)\n", err
);
1038 inode
= ERR_PTR(err
);