1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/blkdev.h>
9 #include <linux/export.h>
11 #include <linux/errno.h>
12 #include <linux/file.h>
13 #include <linux/highuid.h>
15 #include <linux/namei.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include <linux/syscalls.h>
19 #include <linux/pagemap.h>
20 #include <linux/compat.h>
21 #include <linux/iversion.h>
23 #include <linux/uaccess.h>
24 #include <asm/unistd.h>
26 #include <trace/events/timestamp.h>
32 * fill_mg_cmtime - Fill in the mtime and ctime and flag ctime as QUERIED
33 * @stat: where to store the resulting values
34 * @request_mask: STATX_* values requested
35 * @inode: inode from which to grab the c/mtime
37 * Given @inode, grab the ctime and mtime out if it and store the result
38 * in @stat. When fetching the value, flag it as QUERIED (if not already)
39 * so the next write will record a distinct timestamp.
41 * NB: The QUERIED flag is tracked in the ctime, but we set it there even
42 * if only the mtime was requested, as that ensures that the next mtime
43 * change will be distinct.
45 void fill_mg_cmtime(struct kstat
*stat
, u32 request_mask
, struct inode
*inode
)
47 atomic_t
*pcn
= (atomic_t
*)&inode
->i_ctime_nsec
;
49 /* If neither time was requested, then don't report them */
50 if (!(request_mask
& (STATX_CTIME
|STATX_MTIME
))) {
51 stat
->result_mask
&= ~(STATX_CTIME
|STATX_MTIME
);
55 stat
->mtime
= inode_get_mtime(inode
);
56 stat
->ctime
.tv_sec
= inode
->i_ctime_sec
;
57 stat
->ctime
.tv_nsec
= (u32
)atomic_read(pcn
);
58 if (!(stat
->ctime
.tv_nsec
& I_CTIME_QUERIED
))
59 stat
->ctime
.tv_nsec
= ((u32
)atomic_fetch_or(I_CTIME_QUERIED
, pcn
));
60 stat
->ctime
.tv_nsec
&= ~I_CTIME_QUERIED
;
61 trace_fill_mg_cmtime(inode
, &stat
->ctime
, &stat
->mtime
);
63 EXPORT_SYMBOL(fill_mg_cmtime
);
66 * generic_fillattr - Fill in the basic attributes from the inode struct
67 * @idmap: idmap of the mount the inode was found from
68 * @request_mask: statx request_mask
69 * @inode: Inode to use as the source
70 * @stat: Where to fill in the attributes
72 * Fill in the basic attributes in the kstat structure from data that's to be
73 * found on the VFS inode structure. This is the default if no getattr inode
74 * operation is supplied.
76 * If the inode has been found through an idmapped mount the idmap of
77 * the vfsmount must be passed through @idmap. This function will then
78 * take care to map the inode according to @idmap before filling in the
79 * uid and gid filds. On non-idmapped mounts or if permission checking is to be
80 * performed on the raw inode simply pass @nop_mnt_idmap.
82 void generic_fillattr(struct mnt_idmap
*idmap
, u32 request_mask
,
83 struct inode
*inode
, struct kstat
*stat
)
85 vfsuid_t vfsuid
= i_uid_into_vfsuid(idmap
, inode
);
86 vfsgid_t vfsgid
= i_gid_into_vfsgid(idmap
, inode
);
88 stat
->dev
= inode
->i_sb
->s_dev
;
89 stat
->ino
= inode
->i_ino
;
90 stat
->mode
= inode
->i_mode
;
91 stat
->nlink
= inode
->i_nlink
;
92 stat
->uid
= vfsuid_into_kuid(vfsuid
);
93 stat
->gid
= vfsgid_into_kgid(vfsgid
);
94 stat
->rdev
= inode
->i_rdev
;
95 stat
->size
= i_size_read(inode
);
96 stat
->atime
= inode_get_atime(inode
);
98 if (is_mgtime(inode
)) {
99 fill_mg_cmtime(stat
, request_mask
, inode
);
101 stat
->ctime
= inode_get_ctime(inode
);
102 stat
->mtime
= inode_get_mtime(inode
);
105 stat
->blksize
= i_blocksize(inode
);
106 stat
->blocks
= inode
->i_blocks
;
108 if ((request_mask
& STATX_CHANGE_COOKIE
) && IS_I_VERSION(inode
)) {
109 stat
->result_mask
|= STATX_CHANGE_COOKIE
;
110 stat
->change_cookie
= inode_query_iversion(inode
);
114 EXPORT_SYMBOL(generic_fillattr
);
117 * generic_fill_statx_attr - Fill in the statx attributes from the inode flags
118 * @inode: Inode to use as the source
119 * @stat: Where to fill in the attribute flags
121 * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the
122 * inode that are published on i_flags and enforced by the VFS.
124 void generic_fill_statx_attr(struct inode
*inode
, struct kstat
*stat
)
126 if (inode
->i_flags
& S_IMMUTABLE
)
127 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
128 if (inode
->i_flags
& S_APPEND
)
129 stat
->attributes
|= STATX_ATTR_APPEND
;
130 stat
->attributes_mask
|= KSTAT_ATTR_VFS_FLAGS
;
132 EXPORT_SYMBOL(generic_fill_statx_attr
);
135 * generic_fill_statx_atomic_writes - Fill in atomic writes statx attributes
136 * @stat: Where to fill in the attribute flags
137 * @unit_min: Minimum supported atomic write length in bytes
138 * @unit_max: Maximum supported atomic write length in bytes
140 * Fill in the STATX{_ATTR}_WRITE_ATOMIC flags in the kstat structure from
141 * atomic write unit_min and unit_max values.
143 void generic_fill_statx_atomic_writes(struct kstat
*stat
,
144 unsigned int unit_min
,
145 unsigned int unit_max
)
147 /* Confirm that the request type is known */
148 stat
->result_mask
|= STATX_WRITE_ATOMIC
;
150 /* Confirm that the file attribute type is known */
151 stat
->attributes_mask
|= STATX_ATTR_WRITE_ATOMIC
;
154 stat
->atomic_write_unit_min
= unit_min
;
155 stat
->atomic_write_unit_max
= unit_max
;
156 /* Initially only allow 1x segment */
157 stat
->atomic_write_segments_max
= 1;
159 /* Confirm atomic writes are actually supported */
160 stat
->attributes
|= STATX_ATTR_WRITE_ATOMIC
;
163 EXPORT_SYMBOL_GPL(generic_fill_statx_atomic_writes
);
166 * vfs_getattr_nosec - getattr without security checks
167 * @path: file to get attributes from
168 * @stat: structure to return attributes in
169 * @request_mask: STATX_xxx flags indicating what the caller wants
170 * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
172 * Get attributes without calling security_inode_getattr.
174 * Currently the only caller other than vfs_getattr is internal to the
175 * filehandle lookup code, which uses only the inode number and returns no
176 * attributes to any user. Any other code probably wants vfs_getattr.
178 int vfs_getattr_nosec(const struct path
*path
, struct kstat
*stat
,
179 u32 request_mask
, unsigned int query_flags
)
181 struct mnt_idmap
*idmap
;
182 struct inode
*inode
= d_backing_inode(path
->dentry
);
184 memset(stat
, 0, sizeof(*stat
));
185 stat
->result_mask
|= STATX_BASIC_STATS
;
186 query_flags
&= AT_STATX_SYNC_TYPE
;
188 /* allow the fs to override these if it really wants to */
189 /* SB_NOATIME means filesystem supplies dummy atime value */
190 if (inode
->i_sb
->s_flags
& SB_NOATIME
)
191 stat
->result_mask
&= ~STATX_ATIME
;
194 * Note: If you add another clause to set an attribute flag, please
195 * update attributes_mask below.
197 if (IS_AUTOMOUNT(inode
))
198 stat
->attributes
|= STATX_ATTR_AUTOMOUNT
;
201 stat
->attributes
|= STATX_ATTR_DAX
;
203 stat
->attributes_mask
|= (STATX_ATTR_AUTOMOUNT
|
206 idmap
= mnt_idmap(path
->mnt
);
207 if (inode
->i_op
->getattr
)
208 return inode
->i_op
->getattr(idmap
, path
, stat
,
212 generic_fillattr(idmap
, request_mask
, inode
, stat
);
215 EXPORT_SYMBOL(vfs_getattr_nosec
);
218 * vfs_getattr - Get the enhanced basic attributes of a file
219 * @path: The file of interest
220 * @stat: Where to return the statistics
221 * @request_mask: STATX_xxx flags indicating what the caller wants
222 * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
224 * Ask the filesystem for a file's attributes. The caller must indicate in
225 * request_mask and query_flags to indicate what they want.
227 * If the file is remote, the filesystem can be forced to update the attributes
228 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
229 * suppress the update by passing AT_STATX_DONT_SYNC.
231 * Bits must have been set in request_mask to indicate which attributes the
232 * caller wants retrieving. Any such attribute not requested may be returned
233 * anyway, but the value may be approximate, and, if remote, may not have been
234 * synchronised with the server.
236 * 0 will be returned on success, and a -ve error code if unsuccessful.
238 int vfs_getattr(const struct path
*path
, struct kstat
*stat
,
239 u32 request_mask
, unsigned int query_flags
)
243 retval
= security_inode_getattr(path
);
246 return vfs_getattr_nosec(path
, stat
, request_mask
, query_flags
);
248 EXPORT_SYMBOL(vfs_getattr
);
251 * vfs_fstat - Get the basic attributes by file descriptor
252 * @fd: The file descriptor referring to the file of interest
253 * @stat: The result structure to fill in.
255 * This function is a wrapper around vfs_getattr(). The main difference is
256 * that it uses a file descriptor to determine the file location.
258 * 0 will be returned on success, and a -ve error code if unsuccessful.
260 int vfs_fstat(int fd
, struct kstat
*stat
)
262 CLASS(fd_raw
, f
)(fd
);
265 return vfs_getattr(&fd_file(f
)->f_path
, stat
, STATX_BASIC_STATS
, 0);
268 static int statx_lookup_flags(int flags
)
270 int lookup_flags
= 0;
272 if (!(flags
& AT_SYMLINK_NOFOLLOW
))
273 lookup_flags
|= LOOKUP_FOLLOW
;
274 if (!(flags
& AT_NO_AUTOMOUNT
))
275 lookup_flags
|= LOOKUP_AUTOMOUNT
;
280 static int vfs_statx_path(struct path
*path
, int flags
, struct kstat
*stat
,
283 int error
= vfs_getattr(path
, stat
, request_mask
, flags
);
285 if (request_mask
& STATX_MNT_ID_UNIQUE
) {
286 stat
->mnt_id
= real_mount(path
->mnt
)->mnt_id_unique
;
287 stat
->result_mask
|= STATX_MNT_ID_UNIQUE
;
289 stat
->mnt_id
= real_mount(path
->mnt
)->mnt_id
;
290 stat
->result_mask
|= STATX_MNT_ID
;
293 if (path_mounted(path
))
294 stat
->attributes
|= STATX_ATTR_MOUNT_ROOT
;
295 stat
->attributes_mask
|= STATX_ATTR_MOUNT_ROOT
;
298 * If this is a block device inode, override the filesystem
299 * attributes with the block device specific parameters that need to be
300 * obtained from the bdev backing inode.
302 if (S_ISBLK(stat
->mode
))
303 bdev_statx(path
, stat
, request_mask
);
308 static int vfs_statx_fd(int fd
, int flags
, struct kstat
*stat
,
311 CLASS(fd_raw
, f
)(fd
);
314 return vfs_statx_path(&fd_file(f
)->f_path
, flags
, stat
, request_mask
);
318 * vfs_statx - Get basic and extra attributes by filename
319 * @dfd: A file descriptor representing the base dir for a relative filename
320 * @filename: The name of the file of interest
321 * @flags: Flags to control the query
322 * @stat: The result structure to fill in.
323 * @request_mask: STATX_xxx flags indicating what the caller wants
325 * This function is a wrapper around vfs_getattr(). The main difference is
326 * that it uses a filename and base directory to determine the file location.
327 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
328 * at the given name from being referenced.
330 * 0 will be returned on success, and a -ve error code if unsuccessful.
332 static int vfs_statx(int dfd
, struct filename
*filename
, int flags
,
333 struct kstat
*stat
, u32 request_mask
)
336 unsigned int lookup_flags
= statx_lookup_flags(flags
);
339 if (flags
& ~(AT_SYMLINK_NOFOLLOW
| AT_NO_AUTOMOUNT
| AT_EMPTY_PATH
|
344 error
= filename_lookup(dfd
, filename
, lookup_flags
, &path
, NULL
);
347 error
= vfs_statx_path(&path
, flags
, stat
, request_mask
);
349 if (retry_estale(error
, lookup_flags
)) {
350 lookup_flags
|= LOOKUP_REVAL
;
356 int vfs_fstatat(int dfd
, const char __user
*filename
,
357 struct kstat
*stat
, int flags
)
360 int statx_flags
= flags
| AT_NO_AUTOMOUNT
;
361 struct filename
*name
= getname_maybe_null(filename
, flags
);
363 if (!name
&& dfd
>= 0)
364 return vfs_fstat(dfd
, stat
);
366 ret
= vfs_statx(dfd
, name
, statx_flags
, stat
, STATX_BASIC_STATS
);
372 #ifdef __ARCH_WANT_OLD_STAT
375 * For backward compatibility? Maybe this should be moved
376 * into arch/i386 instead?
378 static int cp_old_stat(struct kstat
*stat
, struct __old_kernel_stat __user
* statbuf
)
380 static int warncount
= 5;
381 struct __old_kernel_stat tmp
;
385 printk(KERN_WARNING
"VFS: Warning: %s using old stat() call. Recompile your binary.\n",
387 } else if (warncount
< 0) {
388 /* it's laughable, but... */
392 memset(&tmp
, 0, sizeof(struct __old_kernel_stat
));
393 tmp
.st_dev
= old_encode_dev(stat
->dev
);
394 tmp
.st_ino
= stat
->ino
;
395 if (sizeof(tmp
.st_ino
) < sizeof(stat
->ino
) && tmp
.st_ino
!= stat
->ino
)
397 tmp
.st_mode
= stat
->mode
;
398 tmp
.st_nlink
= stat
->nlink
;
399 if (tmp
.st_nlink
!= stat
->nlink
)
401 SET_UID(tmp
.st_uid
, from_kuid_munged(current_user_ns(), stat
->uid
));
402 SET_GID(tmp
.st_gid
, from_kgid_munged(current_user_ns(), stat
->gid
));
403 tmp
.st_rdev
= old_encode_dev(stat
->rdev
);
404 #if BITS_PER_LONG == 32
405 if (stat
->size
> MAX_NON_LFS
)
408 tmp
.st_size
= stat
->size
;
409 tmp
.st_atime
= stat
->atime
.tv_sec
;
410 tmp
.st_mtime
= stat
->mtime
.tv_sec
;
411 tmp
.st_ctime
= stat
->ctime
.tv_sec
;
412 return copy_to_user(statbuf
,&tmp
,sizeof(tmp
)) ? -EFAULT
: 0;
415 SYSCALL_DEFINE2(stat
, const char __user
*, filename
,
416 struct __old_kernel_stat __user
*, statbuf
)
421 error
= vfs_stat(filename
, &stat
);
425 return cp_old_stat(&stat
, statbuf
);
428 SYSCALL_DEFINE2(lstat
, const char __user
*, filename
,
429 struct __old_kernel_stat __user
*, statbuf
)
434 error
= vfs_lstat(filename
, &stat
);
438 return cp_old_stat(&stat
, statbuf
);
441 SYSCALL_DEFINE2(fstat
, unsigned int, fd
, struct __old_kernel_stat __user
*, statbuf
)
444 int error
= vfs_fstat(fd
, &stat
);
447 error
= cp_old_stat(&stat
, statbuf
);
452 #endif /* __ARCH_WANT_OLD_STAT */
454 #ifdef __ARCH_WANT_NEW_STAT
456 #ifndef INIT_STRUCT_STAT_PADDING
457 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
460 static int cp_new_stat(struct kstat
*stat
, struct stat __user
*statbuf
)
464 if (sizeof(tmp
.st_dev
) < 4 && !old_valid_dev(stat
->dev
))
466 if (sizeof(tmp
.st_rdev
) < 4 && !old_valid_dev(stat
->rdev
))
468 #if BITS_PER_LONG == 32
469 if (stat
->size
> MAX_NON_LFS
)
473 INIT_STRUCT_STAT_PADDING(tmp
);
474 tmp
.st_dev
= new_encode_dev(stat
->dev
);
475 tmp
.st_ino
= stat
->ino
;
476 if (sizeof(tmp
.st_ino
) < sizeof(stat
->ino
) && tmp
.st_ino
!= stat
->ino
)
478 tmp
.st_mode
= stat
->mode
;
479 tmp
.st_nlink
= stat
->nlink
;
480 if (tmp
.st_nlink
!= stat
->nlink
)
482 SET_UID(tmp
.st_uid
, from_kuid_munged(current_user_ns(), stat
->uid
));
483 SET_GID(tmp
.st_gid
, from_kgid_munged(current_user_ns(), stat
->gid
));
484 tmp
.st_rdev
= new_encode_dev(stat
->rdev
);
485 tmp
.st_size
= stat
->size
;
486 tmp
.st_atime
= stat
->atime
.tv_sec
;
487 tmp
.st_mtime
= stat
->mtime
.tv_sec
;
488 tmp
.st_ctime
= stat
->ctime
.tv_sec
;
489 #ifdef STAT_HAVE_NSEC
490 tmp
.st_atime_nsec
= stat
->atime
.tv_nsec
;
491 tmp
.st_mtime_nsec
= stat
->mtime
.tv_nsec
;
492 tmp
.st_ctime_nsec
= stat
->ctime
.tv_nsec
;
494 tmp
.st_blocks
= stat
->blocks
;
495 tmp
.st_blksize
= stat
->blksize
;
496 return copy_to_user(statbuf
,&tmp
,sizeof(tmp
)) ? -EFAULT
: 0;
499 SYSCALL_DEFINE2(newstat
, const char __user
*, filename
,
500 struct stat __user
*, statbuf
)
503 int error
= vfs_stat(filename
, &stat
);
507 return cp_new_stat(&stat
, statbuf
);
510 SYSCALL_DEFINE2(newlstat
, const char __user
*, filename
,
511 struct stat __user
*, statbuf
)
516 error
= vfs_lstat(filename
, &stat
);
520 return cp_new_stat(&stat
, statbuf
);
523 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
524 SYSCALL_DEFINE4(newfstatat
, int, dfd
, const char __user
*, filename
,
525 struct stat __user
*, statbuf
, int, flag
)
530 error
= vfs_fstatat(dfd
, filename
, &stat
, flag
);
533 return cp_new_stat(&stat
, statbuf
);
537 SYSCALL_DEFINE2(newfstat
, unsigned int, fd
, struct stat __user
*, statbuf
)
540 int error
= vfs_fstat(fd
, &stat
);
543 error
= cp_new_stat(&stat
, statbuf
);
549 static int do_readlinkat(int dfd
, const char __user
*pathname
,
550 char __user
*buf
, int bufsiz
)
553 struct filename
*name
;
555 unsigned int lookup_flags
= LOOKUP_EMPTY
;
561 name
= getname_flags(pathname
, lookup_flags
);
562 error
= filename_lookup(dfd
, name
, lookup_flags
, &path
, NULL
);
563 if (unlikely(error
)) {
569 * AFS mountpoints allow readlink(2) but are not symlinks
571 if (d_is_symlink(path
.dentry
) ||
572 d_backing_inode(path
.dentry
)->i_op
->readlink
) {
573 error
= security_inode_readlink(path
.dentry
);
576 error
= vfs_readlink(path
.dentry
, buf
, bufsiz
);
579 error
= (name
->name
[0] == '\0') ? -ENOENT
: -EINVAL
;
583 if (retry_estale(error
, lookup_flags
)) {
584 lookup_flags
|= LOOKUP_REVAL
;
590 SYSCALL_DEFINE4(readlinkat
, int, dfd
, const char __user
*, pathname
,
591 char __user
*, buf
, int, bufsiz
)
593 return do_readlinkat(dfd
, pathname
, buf
, bufsiz
);
596 SYSCALL_DEFINE3(readlink
, const char __user
*, path
, char __user
*, buf
,
599 return do_readlinkat(AT_FDCWD
, path
, buf
, bufsiz
);
603 /* ---------- LFS-64 ----------- */
604 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
606 #ifndef INIT_STRUCT_STAT64_PADDING
607 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
610 static long cp_new_stat64(struct kstat
*stat
, struct stat64 __user
*statbuf
)
614 INIT_STRUCT_STAT64_PADDING(tmp
);
616 /* mips has weird padding, so we don't get 64 bits there */
617 tmp
.st_dev
= new_encode_dev(stat
->dev
);
618 tmp
.st_rdev
= new_encode_dev(stat
->rdev
);
620 tmp
.st_dev
= huge_encode_dev(stat
->dev
);
621 tmp
.st_rdev
= huge_encode_dev(stat
->rdev
);
623 tmp
.st_ino
= stat
->ino
;
624 if (sizeof(tmp
.st_ino
) < sizeof(stat
->ino
) && tmp
.st_ino
!= stat
->ino
)
626 #ifdef STAT64_HAS_BROKEN_ST_INO
627 tmp
.__st_ino
= stat
->ino
;
629 tmp
.st_mode
= stat
->mode
;
630 tmp
.st_nlink
= stat
->nlink
;
631 tmp
.st_uid
= from_kuid_munged(current_user_ns(), stat
->uid
);
632 tmp
.st_gid
= from_kgid_munged(current_user_ns(), stat
->gid
);
633 tmp
.st_atime
= stat
->atime
.tv_sec
;
634 tmp
.st_atime_nsec
= stat
->atime
.tv_nsec
;
635 tmp
.st_mtime
= stat
->mtime
.tv_sec
;
636 tmp
.st_mtime_nsec
= stat
->mtime
.tv_nsec
;
637 tmp
.st_ctime
= stat
->ctime
.tv_sec
;
638 tmp
.st_ctime_nsec
= stat
->ctime
.tv_nsec
;
639 tmp
.st_size
= stat
->size
;
640 tmp
.st_blocks
= stat
->blocks
;
641 tmp
.st_blksize
= stat
->blksize
;
642 return copy_to_user(statbuf
,&tmp
,sizeof(tmp
)) ? -EFAULT
: 0;
645 SYSCALL_DEFINE2(stat64
, const char __user
*, filename
,
646 struct stat64 __user
*, statbuf
)
649 int error
= vfs_stat(filename
, &stat
);
652 error
= cp_new_stat64(&stat
, statbuf
);
657 SYSCALL_DEFINE2(lstat64
, const char __user
*, filename
,
658 struct stat64 __user
*, statbuf
)
661 int error
= vfs_lstat(filename
, &stat
);
664 error
= cp_new_stat64(&stat
, statbuf
);
669 SYSCALL_DEFINE2(fstat64
, unsigned long, fd
, struct stat64 __user
*, statbuf
)
672 int error
= vfs_fstat(fd
, &stat
);
675 error
= cp_new_stat64(&stat
, statbuf
);
680 SYSCALL_DEFINE4(fstatat64
, int, dfd
, const char __user
*, filename
,
681 struct stat64 __user
*, statbuf
, int, flag
)
686 error
= vfs_fstatat(dfd
, filename
, &stat
, flag
);
689 return cp_new_stat64(&stat
, statbuf
);
691 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
693 static noinline_for_stack
int
694 cp_statx(const struct kstat
*stat
, struct statx __user
*buffer
)
698 memset(&tmp
, 0, sizeof(tmp
));
700 /* STATX_CHANGE_COOKIE is kernel-only for now */
701 tmp
.stx_mask
= stat
->result_mask
& ~STATX_CHANGE_COOKIE
;
702 tmp
.stx_blksize
= stat
->blksize
;
703 /* STATX_ATTR_CHANGE_MONOTONIC is kernel-only for now */
704 tmp
.stx_attributes
= stat
->attributes
& ~STATX_ATTR_CHANGE_MONOTONIC
;
705 tmp
.stx_nlink
= stat
->nlink
;
706 tmp
.stx_uid
= from_kuid_munged(current_user_ns(), stat
->uid
);
707 tmp
.stx_gid
= from_kgid_munged(current_user_ns(), stat
->gid
);
708 tmp
.stx_mode
= stat
->mode
;
709 tmp
.stx_ino
= stat
->ino
;
710 tmp
.stx_size
= stat
->size
;
711 tmp
.stx_blocks
= stat
->blocks
;
712 tmp
.stx_attributes_mask
= stat
->attributes_mask
;
713 tmp
.stx_atime
.tv_sec
= stat
->atime
.tv_sec
;
714 tmp
.stx_atime
.tv_nsec
= stat
->atime
.tv_nsec
;
715 tmp
.stx_btime
.tv_sec
= stat
->btime
.tv_sec
;
716 tmp
.stx_btime
.tv_nsec
= stat
->btime
.tv_nsec
;
717 tmp
.stx_ctime
.tv_sec
= stat
->ctime
.tv_sec
;
718 tmp
.stx_ctime
.tv_nsec
= stat
->ctime
.tv_nsec
;
719 tmp
.stx_mtime
.tv_sec
= stat
->mtime
.tv_sec
;
720 tmp
.stx_mtime
.tv_nsec
= stat
->mtime
.tv_nsec
;
721 tmp
.stx_rdev_major
= MAJOR(stat
->rdev
);
722 tmp
.stx_rdev_minor
= MINOR(stat
->rdev
);
723 tmp
.stx_dev_major
= MAJOR(stat
->dev
);
724 tmp
.stx_dev_minor
= MINOR(stat
->dev
);
725 tmp
.stx_mnt_id
= stat
->mnt_id
;
726 tmp
.stx_dio_mem_align
= stat
->dio_mem_align
;
727 tmp
.stx_dio_offset_align
= stat
->dio_offset_align
;
728 tmp
.stx_subvol
= stat
->subvol
;
729 tmp
.stx_atomic_write_unit_min
= stat
->atomic_write_unit_min
;
730 tmp
.stx_atomic_write_unit_max
= stat
->atomic_write_unit_max
;
731 tmp
.stx_atomic_write_segments_max
= stat
->atomic_write_segments_max
;
733 return copy_to_user(buffer
, &tmp
, sizeof(tmp
)) ? -EFAULT
: 0;
736 int do_statx(int dfd
, struct filename
*filename
, unsigned int flags
,
737 unsigned int mask
, struct statx __user
*buffer
)
742 if (mask
& STATX__RESERVED
)
744 if ((flags
& AT_STATX_SYNC_TYPE
) == AT_STATX_SYNC_TYPE
)
748 * STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests
751 mask
&= ~STATX_CHANGE_COOKIE
;
753 error
= vfs_statx(dfd
, filename
, flags
, &stat
, mask
);
757 return cp_statx(&stat
, buffer
);
760 int do_statx_fd(int fd
, unsigned int flags
, unsigned int mask
,
761 struct statx __user
*buffer
)
766 if (mask
& STATX__RESERVED
)
768 if ((flags
& AT_STATX_SYNC_TYPE
) == AT_STATX_SYNC_TYPE
)
772 * STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests
775 mask
&= ~STATX_CHANGE_COOKIE
;
777 error
= vfs_statx_fd(fd
, flags
, &stat
, mask
);
781 return cp_statx(&stat
, buffer
);
785 * sys_statx - System call to get enhanced stats
786 * @dfd: Base directory to pathwalk from *or* fd to stat.
787 * @filename: File to stat or either NULL or "" with AT_EMPTY_PATH
788 * @flags: AT_* flags to control pathwalk.
789 * @mask: Parts of statx struct actually required.
790 * @buffer: Result buffer.
792 * Note that fstat() can be emulated by setting dfd to the fd of interest,
793 * supplying "" (or preferably NULL) as the filename and setting AT_EMPTY_PATH
796 SYSCALL_DEFINE5(statx
,
797 int, dfd
, const char __user
*, filename
, unsigned, flags
,
799 struct statx __user
*, buffer
)
802 struct filename
*name
= getname_maybe_null(filename
, flags
);
804 if (!name
&& dfd
>= 0)
805 return do_statx_fd(dfd
, flags
& ~AT_NO_AUTOMOUNT
, mask
, buffer
);
807 ret
= do_statx(dfd
, name
, flags
, mask
, buffer
);
813 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT)
814 static int cp_compat_stat(struct kstat
*stat
, struct compat_stat __user
*ubuf
)
816 struct compat_stat tmp
;
818 if (sizeof(tmp
.st_dev
) < 4 && !old_valid_dev(stat
->dev
))
820 if (sizeof(tmp
.st_rdev
) < 4 && !old_valid_dev(stat
->rdev
))
823 memset(&tmp
, 0, sizeof(tmp
));
824 tmp
.st_dev
= new_encode_dev(stat
->dev
);
825 tmp
.st_ino
= stat
->ino
;
826 if (sizeof(tmp
.st_ino
) < sizeof(stat
->ino
) && tmp
.st_ino
!= stat
->ino
)
828 tmp
.st_mode
= stat
->mode
;
829 tmp
.st_nlink
= stat
->nlink
;
830 if (tmp
.st_nlink
!= stat
->nlink
)
832 SET_UID(tmp
.st_uid
, from_kuid_munged(current_user_ns(), stat
->uid
));
833 SET_GID(tmp
.st_gid
, from_kgid_munged(current_user_ns(), stat
->gid
));
834 tmp
.st_rdev
= new_encode_dev(stat
->rdev
);
835 if ((u64
) stat
->size
> MAX_NON_LFS
)
837 tmp
.st_size
= stat
->size
;
838 tmp
.st_atime
= stat
->atime
.tv_sec
;
839 tmp
.st_atime_nsec
= stat
->atime
.tv_nsec
;
840 tmp
.st_mtime
= stat
->mtime
.tv_sec
;
841 tmp
.st_mtime_nsec
= stat
->mtime
.tv_nsec
;
842 tmp
.st_ctime
= stat
->ctime
.tv_sec
;
843 tmp
.st_ctime_nsec
= stat
->ctime
.tv_nsec
;
844 tmp
.st_blocks
= stat
->blocks
;
845 tmp
.st_blksize
= stat
->blksize
;
846 return copy_to_user(ubuf
, &tmp
, sizeof(tmp
)) ? -EFAULT
: 0;
849 COMPAT_SYSCALL_DEFINE2(newstat
, const char __user
*, filename
,
850 struct compat_stat __user
*, statbuf
)
855 error
= vfs_stat(filename
, &stat
);
858 return cp_compat_stat(&stat
, statbuf
);
861 COMPAT_SYSCALL_DEFINE2(newlstat
, const char __user
*, filename
,
862 struct compat_stat __user
*, statbuf
)
867 error
= vfs_lstat(filename
, &stat
);
870 return cp_compat_stat(&stat
, statbuf
);
873 #ifndef __ARCH_WANT_STAT64
874 COMPAT_SYSCALL_DEFINE4(newfstatat
, unsigned int, dfd
,
875 const char __user
*, filename
,
876 struct compat_stat __user
*, statbuf
, int, flag
)
881 error
= vfs_fstatat(dfd
, filename
, &stat
, flag
);
884 return cp_compat_stat(&stat
, statbuf
);
888 COMPAT_SYSCALL_DEFINE2(newfstat
, unsigned int, fd
,
889 struct compat_stat __user
*, statbuf
)
892 int error
= vfs_fstat(fd
, &stat
);
895 error
= cp_compat_stat(&stat
, statbuf
);
900 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
901 void __inode_add_bytes(struct inode
*inode
, loff_t bytes
)
903 inode
->i_blocks
+= bytes
>> 9;
905 inode
->i_bytes
+= bytes
;
906 if (inode
->i_bytes
>= 512) {
908 inode
->i_bytes
-= 512;
911 EXPORT_SYMBOL(__inode_add_bytes
);
913 void inode_add_bytes(struct inode
*inode
, loff_t bytes
)
915 spin_lock(&inode
->i_lock
);
916 __inode_add_bytes(inode
, bytes
);
917 spin_unlock(&inode
->i_lock
);
920 EXPORT_SYMBOL(inode_add_bytes
);
922 void __inode_sub_bytes(struct inode
*inode
, loff_t bytes
)
924 inode
->i_blocks
-= bytes
>> 9;
926 if (inode
->i_bytes
< bytes
) {
928 inode
->i_bytes
+= 512;
930 inode
->i_bytes
-= bytes
;
933 EXPORT_SYMBOL(__inode_sub_bytes
);
935 void inode_sub_bytes(struct inode
*inode
, loff_t bytes
)
937 spin_lock(&inode
->i_lock
);
938 __inode_sub_bytes(inode
, bytes
);
939 spin_unlock(&inode
->i_lock
);
942 EXPORT_SYMBOL(inode_sub_bytes
);
944 loff_t
inode_get_bytes(struct inode
*inode
)
948 spin_lock(&inode
->i_lock
);
949 ret
= __inode_get_bytes(inode
);
950 spin_unlock(&inode
->i_lock
);
954 EXPORT_SYMBOL(inode_get_bytes
);
956 void inode_set_bytes(struct inode
*inode
, loff_t bytes
)
958 /* Caller is here responsible for sufficient locking
959 * (ie. inode->i_lock) */
960 inode
->i_blocks
= bytes
>> 9;
961 inode
->i_bytes
= bytes
& 511;
964 EXPORT_SYMBOL(inode_set_bytes
);