2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_inode.h"
26 #include "xfs_ioctl.h"
27 #include "xfs_alloc.h"
28 #include "xfs_rtalloc.h"
29 #include "xfs_itable.h"
30 #include "xfs_error.h"
33 #include "xfs_bmap_util.h"
34 #include "xfs_fsops.h"
35 #include "xfs_discard.h"
36 #include "xfs_quota.h"
37 #include "xfs_export.h"
38 #include "xfs_trace.h"
39 #include "xfs_icache.h"
40 #include "xfs_symlink.h"
41 #include "xfs_trans.h"
44 #include "xfs_btree.h"
45 #include <linux/fsmap.h>
46 #include "xfs_fsmap.h"
47 #include "scrub/xfs_scrub.h"
49 #include <linux/capability.h>
50 #include <linux/cred.h>
51 #include <linux/dcache.h>
52 #include <linux/mount.h>
53 #include <linux/namei.h>
54 #include <linux/pagemap.h>
55 #include <linux/slab.h>
56 #include <linux/exportfs.h>
59 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
60 * a file or fs handle.
62 * XFS_IOC_PATH_TO_FSHANDLE
63 * returns fs handle for a mount point or path within that mount point
64 * XFS_IOC_FD_TO_HANDLE
65 * returns full handle for a FD opened in user space
66 * XFS_IOC_PATH_TO_HANDLE
67 * returns full handle for a path
72 xfs_fsop_handlereq_t
*hreq
)
82 if (cmd
== XFS_IOC_FD_TO_HANDLE
) {
86 inode
= file_inode(f
.file
);
88 error
= user_lpath((const char __user
*)hreq
->path
, &path
);
91 inode
= d_inode(path
.dentry
);
96 * We can only generate handles for inodes residing on a XFS filesystem,
97 * and only for regular files, directories or symbolic links.
100 if (inode
->i_sb
->s_magic
!= XFS_SB_MAGIC
)
104 if (!S_ISREG(inode
->i_mode
) &&
105 !S_ISDIR(inode
->i_mode
) &&
106 !S_ISLNK(inode
->i_mode
))
110 memcpy(&handle
.ha_fsid
, ip
->i_mount
->m_fixedfsid
, sizeof(xfs_fsid_t
));
112 if (cmd
== XFS_IOC_PATH_TO_FSHANDLE
) {
114 * This handle only contains an fsid, zero the rest.
116 memset(&handle
.ha_fid
, 0, sizeof(handle
.ha_fid
));
117 hsize
= sizeof(xfs_fsid_t
);
119 handle
.ha_fid
.fid_len
= sizeof(xfs_fid_t
) -
120 sizeof(handle
.ha_fid
.fid_len
);
121 handle
.ha_fid
.fid_pad
= 0;
122 handle
.ha_fid
.fid_gen
= inode
->i_generation
;
123 handle
.ha_fid
.fid_ino
= ip
->i_ino
;
124 hsize
= sizeof(xfs_handle_t
);
128 if (copy_to_user(hreq
->ohandle
, &handle
, hsize
) ||
129 copy_to_user(hreq
->ohandlen
, &hsize
, sizeof(__s32
)))
135 if (cmd
== XFS_IOC_FD_TO_HANDLE
)
143 * No need to do permission checks on the various pathname components
144 * as the handle operations are privileged.
147 xfs_handle_acceptable(
149 struct dentry
*dentry
)
155 * Convert userspace handle data into a dentry.
158 xfs_handle_to_dentry(
159 struct file
*parfilp
,
160 void __user
*uhandle
,
164 struct xfs_fid64 fid
;
167 * Only allow handle opens under a directory.
169 if (!S_ISDIR(file_inode(parfilp
)->i_mode
))
170 return ERR_PTR(-ENOTDIR
);
172 if (hlen
!= sizeof(xfs_handle_t
))
173 return ERR_PTR(-EINVAL
);
174 if (copy_from_user(&handle
, uhandle
, hlen
))
175 return ERR_PTR(-EFAULT
);
176 if (handle
.ha_fid
.fid_len
!=
177 sizeof(handle
.ha_fid
) - sizeof(handle
.ha_fid
.fid_len
))
178 return ERR_PTR(-EINVAL
);
180 memset(&fid
, 0, sizeof(struct fid
));
181 fid
.ino
= handle
.ha_fid
.fid_ino
;
182 fid
.gen
= handle
.ha_fid
.fid_gen
;
184 return exportfs_decode_fh(parfilp
->f_path
.mnt
, (struct fid
*)&fid
, 3,
185 FILEID_INO32_GEN
| XFS_FILEID_TYPE_64FLAG
,
186 xfs_handle_acceptable
, NULL
);
189 STATIC
struct dentry
*
190 xfs_handlereq_to_dentry(
191 struct file
*parfilp
,
192 xfs_fsop_handlereq_t
*hreq
)
194 return xfs_handle_to_dentry(parfilp
, hreq
->ihandle
, hreq
->ihandlen
);
199 struct file
*parfilp
,
200 xfs_fsop_handlereq_t
*hreq
)
202 const struct cred
*cred
= current_cred();
208 struct dentry
*dentry
;
212 if (!capable(CAP_SYS_ADMIN
))
215 dentry
= xfs_handlereq_to_dentry(parfilp
, hreq
);
217 return PTR_ERR(dentry
);
218 inode
= d_inode(dentry
);
220 /* Restrict xfs_open_by_handle to directories & regular files. */
221 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
))) {
226 #if BITS_PER_LONG != 32
227 hreq
->oflags
|= O_LARGEFILE
;
230 permflag
= hreq
->oflags
;
231 fmode
= OPEN_FMODE(permflag
);
232 if ((!(permflag
& O_APPEND
) || (permflag
& O_TRUNC
)) &&
233 (fmode
& FMODE_WRITE
) && IS_APPEND(inode
)) {
238 if ((fmode
& FMODE_WRITE
) && IS_IMMUTABLE(inode
)) {
243 /* Can't write directories. */
244 if (S_ISDIR(inode
->i_mode
) && (fmode
& FMODE_WRITE
)) {
249 fd
= get_unused_fd_flags(0);
255 path
.mnt
= parfilp
->f_path
.mnt
;
256 path
.dentry
= dentry
;
257 filp
= dentry_open(&path
, hreq
->oflags
, cred
);
261 return PTR_ERR(filp
);
264 if (S_ISREG(inode
->i_mode
)) {
265 filp
->f_flags
|= O_NOATIME
;
266 filp
->f_mode
|= FMODE_NOCMTIME
;
269 fd_install(fd
, filp
);
278 xfs_readlink_by_handle(
279 struct file
*parfilp
,
280 xfs_fsop_handlereq_t
*hreq
)
282 struct dentry
*dentry
;
286 if (!capable(CAP_SYS_ADMIN
))
289 dentry
= xfs_handlereq_to_dentry(parfilp
, hreq
);
291 return PTR_ERR(dentry
);
293 /* Restrict this handle operation to symlinks only. */
294 if (!d_is_symlink(dentry
)) {
299 if (copy_from_user(&olen
, hreq
->ohandlen
, sizeof(__u32
))) {
304 error
= vfs_readlink(dentry
, hreq
->ohandle
, olen
);
317 xfs_mount_t
*mp
= ip
->i_mount
;
321 if (!capable(CAP_SYS_ADMIN
))
324 if (XFS_FORCED_SHUTDOWN(mp
))
327 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_ichange
, 0, 0, 0, &tp
);
331 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
332 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
334 ip
->i_d
.di_dmevmask
= evmask
;
335 ip
->i_d
.di_dmstate
= state
;
337 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
338 error
= xfs_trans_commit(tp
);
344 xfs_fssetdm_by_handle(
345 struct file
*parfilp
,
349 struct fsdmidata fsd
;
350 xfs_fsop_setdm_handlereq_t dmhreq
;
351 struct dentry
*dentry
;
353 if (!capable(CAP_MKNOD
))
355 if (copy_from_user(&dmhreq
, arg
, sizeof(xfs_fsop_setdm_handlereq_t
)))
358 error
= mnt_want_write_file(parfilp
);
362 dentry
= xfs_handlereq_to_dentry(parfilp
, &dmhreq
.hreq
);
363 if (IS_ERR(dentry
)) {
364 mnt_drop_write_file(parfilp
);
365 return PTR_ERR(dentry
);
368 if (IS_IMMUTABLE(d_inode(dentry
)) || IS_APPEND(d_inode(dentry
))) {
373 if (copy_from_user(&fsd
, dmhreq
.data
, sizeof(fsd
))) {
378 error
= xfs_set_dmattrs(XFS_I(d_inode(dentry
)), fsd
.fsd_dmevmask
,
382 mnt_drop_write_file(parfilp
);
388 xfs_attrlist_by_handle(
389 struct file
*parfilp
,
393 attrlist_cursor_kern_t
*cursor
;
394 struct xfs_fsop_attrlist_handlereq __user
*p
= arg
;
395 xfs_fsop_attrlist_handlereq_t al_hreq
;
396 struct dentry
*dentry
;
399 if (!capable(CAP_SYS_ADMIN
))
401 if (copy_from_user(&al_hreq
, arg
, sizeof(xfs_fsop_attrlist_handlereq_t
)))
403 if (al_hreq
.buflen
< sizeof(struct attrlist
) ||
404 al_hreq
.buflen
> XFS_XATTR_LIST_MAX
)
408 * Reject flags, only allow namespaces.
410 if (al_hreq
.flags
& ~(ATTR_ROOT
| ATTR_SECURE
))
413 dentry
= xfs_handlereq_to_dentry(parfilp
, &al_hreq
.hreq
);
415 return PTR_ERR(dentry
);
417 kbuf
= kmem_zalloc_large(al_hreq
.buflen
, KM_SLEEP
);
421 cursor
= (attrlist_cursor_kern_t
*)&al_hreq
.pos
;
422 error
= xfs_attr_list(XFS_I(d_inode(dentry
)), kbuf
, al_hreq
.buflen
,
423 al_hreq
.flags
, cursor
);
427 if (copy_to_user(&p
->pos
, cursor
, sizeof(attrlist_cursor_kern_t
))) {
432 if (copy_to_user(al_hreq
.buffer
, kbuf
, al_hreq
.buflen
))
443 xfs_attrmulti_attr_get(
446 unsigned char __user
*ubuf
,
453 if (*len
> XFS_XATTR_SIZE_MAX
)
455 kbuf
= kmem_zalloc_large(*len
, KM_SLEEP
);
459 error
= xfs_attr_get(XFS_I(inode
), name
, kbuf
, (int *)len
, flags
);
463 if (copy_to_user(ubuf
, kbuf
, *len
))
472 xfs_attrmulti_attr_set(
475 const unsigned char __user
*ubuf
,
482 if (IS_IMMUTABLE(inode
) || IS_APPEND(inode
))
484 if (len
> XFS_XATTR_SIZE_MAX
)
487 kbuf
= memdup_user(ubuf
, len
);
489 return PTR_ERR(kbuf
);
491 error
= xfs_attr_set(XFS_I(inode
), name
, kbuf
, len
, flags
);
493 xfs_forget_acl(inode
, name
, flags
);
499 xfs_attrmulti_attr_remove(
506 if (IS_IMMUTABLE(inode
) || IS_APPEND(inode
))
508 error
= xfs_attr_remove(XFS_I(inode
), name
, flags
);
510 xfs_forget_acl(inode
, name
, flags
);
515 xfs_attrmulti_by_handle(
516 struct file
*parfilp
,
520 xfs_attr_multiop_t
*ops
;
521 xfs_fsop_attrmulti_handlereq_t am_hreq
;
522 struct dentry
*dentry
;
523 unsigned int i
, size
;
524 unsigned char *attr_name
;
526 if (!capable(CAP_SYS_ADMIN
))
528 if (copy_from_user(&am_hreq
, arg
, sizeof(xfs_fsop_attrmulti_handlereq_t
)))
532 if (am_hreq
.opcount
>= INT_MAX
/ sizeof(xfs_attr_multiop_t
))
535 dentry
= xfs_handlereq_to_dentry(parfilp
, &am_hreq
.hreq
);
537 return PTR_ERR(dentry
);
540 size
= am_hreq
.opcount
* sizeof(xfs_attr_multiop_t
);
541 if (!size
|| size
> 16 * PAGE_SIZE
)
544 ops
= memdup_user(am_hreq
.ops
, size
);
546 error
= PTR_ERR(ops
);
551 attr_name
= kmalloc(MAXNAMELEN
, GFP_KERNEL
);
556 for (i
= 0; i
< am_hreq
.opcount
; i
++) {
557 ops
[i
].am_error
= strncpy_from_user((char *)attr_name
,
558 ops
[i
].am_attrname
, MAXNAMELEN
);
559 if (ops
[i
].am_error
== 0 || ops
[i
].am_error
== MAXNAMELEN
)
561 if (ops
[i
].am_error
< 0)
564 switch (ops
[i
].am_opcode
) {
566 ops
[i
].am_error
= xfs_attrmulti_attr_get(
567 d_inode(dentry
), attr_name
,
568 ops
[i
].am_attrvalue
, &ops
[i
].am_length
,
572 ops
[i
].am_error
= mnt_want_write_file(parfilp
);
575 ops
[i
].am_error
= xfs_attrmulti_attr_set(
576 d_inode(dentry
), attr_name
,
577 ops
[i
].am_attrvalue
, ops
[i
].am_length
,
579 mnt_drop_write_file(parfilp
);
582 ops
[i
].am_error
= mnt_want_write_file(parfilp
);
585 ops
[i
].am_error
= xfs_attrmulti_attr_remove(
586 d_inode(dentry
), attr_name
,
588 mnt_drop_write_file(parfilp
);
591 ops
[i
].am_error
= -EINVAL
;
595 if (copy_to_user(am_hreq
.ops
, ops
, size
))
612 struct inode
*inode
= file_inode(filp
);
613 struct xfs_inode
*ip
= XFS_I(inode
);
615 enum xfs_prealloc_flags flags
= 0;
616 uint iolock
= XFS_IOLOCK_EXCL
;
620 * Only allow the sys admin to reserve space unless
621 * unwritten extents are enabled.
623 if (!xfs_sb_version_hasextflgbit(&ip
->i_mount
->m_sb
) &&
624 !capable(CAP_SYS_ADMIN
))
627 if (inode
->i_flags
& (S_IMMUTABLE
|S_APPEND
))
630 if (!(filp
->f_mode
& FMODE_WRITE
))
633 if (!S_ISREG(inode
->i_mode
))
636 if (filp
->f_flags
& O_DSYNC
)
637 flags
|= XFS_PREALLOC_SYNC
;
638 if (filp
->f_mode
& FMODE_NOCMTIME
)
639 flags
|= XFS_PREALLOC_INVISIBLE
;
641 error
= mnt_want_write_file(filp
);
645 xfs_ilock(ip
, iolock
);
646 error
= xfs_break_layouts(inode
, &iolock
);
650 xfs_ilock(ip
, XFS_MMAPLOCK_EXCL
);
651 iolock
|= XFS_MMAPLOCK_EXCL
;
653 switch (bf
->l_whence
) {
657 bf
->l_start
+= filp
->f_pos
;
660 bf
->l_start
+= XFS_ISIZE(ip
);
668 * length of <= 0 for resv/unresv/zero is invalid. length for
669 * alloc/free is ignored completely and we have no idea what userspace
670 * might have set it to, so set it to zero to allow range
674 case XFS_IOC_ZERO_RANGE
:
676 case XFS_IOC_RESVSP64
:
677 case XFS_IOC_UNRESVSP
:
678 case XFS_IOC_UNRESVSP64
:
679 if (bf
->l_len
<= 0) {
689 if (bf
->l_start
< 0 ||
690 bf
->l_start
> inode
->i_sb
->s_maxbytes
||
691 bf
->l_start
+ bf
->l_len
< 0 ||
692 bf
->l_start
+ bf
->l_len
>= inode
->i_sb
->s_maxbytes
) {
698 case XFS_IOC_ZERO_RANGE
:
699 flags
|= XFS_PREALLOC_SET
;
700 error
= xfs_zero_file_space(ip
, bf
->l_start
, bf
->l_len
);
703 case XFS_IOC_RESVSP64
:
704 flags
|= XFS_PREALLOC_SET
;
705 error
= xfs_alloc_file_space(ip
, bf
->l_start
, bf
->l_len
,
708 case XFS_IOC_UNRESVSP
:
709 case XFS_IOC_UNRESVSP64
:
710 error
= xfs_free_file_space(ip
, bf
->l_start
, bf
->l_len
);
712 case XFS_IOC_ALLOCSP
:
713 case XFS_IOC_ALLOCSP64
:
715 case XFS_IOC_FREESP64
:
716 flags
|= XFS_PREALLOC_CLEAR
;
717 if (bf
->l_start
> XFS_ISIZE(ip
)) {
718 error
= xfs_alloc_file_space(ip
, XFS_ISIZE(ip
),
719 bf
->l_start
- XFS_ISIZE(ip
), 0);
724 iattr
.ia_valid
= ATTR_SIZE
;
725 iattr
.ia_size
= bf
->l_start
;
727 error
= xfs_vn_setattr_size(file_dentry(filp
), &iattr
);
737 error
= xfs_update_prealloc_flags(ip
, flags
);
740 xfs_iunlock(ip
, iolock
);
741 mnt_drop_write_file(filp
);
751 xfs_fsop_bulkreq_t bulkreq
;
752 int count
; /* # of records returned */
753 xfs_ino_t inlast
; /* last inode number */
757 /* done = 1 if there are more stats to get and if bulkstat */
758 /* should be called again (unused here, but used in dmapi) */
760 if (!capable(CAP_SYS_ADMIN
))
763 if (XFS_FORCED_SHUTDOWN(mp
))
766 if (copy_from_user(&bulkreq
, arg
, sizeof(xfs_fsop_bulkreq_t
)))
769 if (copy_from_user(&inlast
, bulkreq
.lastip
, sizeof(__s64
)))
772 if ((count
= bulkreq
.icount
) <= 0)
775 if (bulkreq
.ubuffer
== NULL
)
778 if (cmd
== XFS_IOC_FSINUMBERS
)
779 error
= xfs_inumbers(mp
, &inlast
, &count
,
780 bulkreq
.ubuffer
, xfs_inumbers_fmt
);
781 else if (cmd
== XFS_IOC_FSBULKSTAT_SINGLE
)
782 error
= xfs_bulkstat_one(mp
, inlast
, bulkreq
.ubuffer
,
783 sizeof(xfs_bstat_t
), NULL
, &done
);
784 else /* XFS_IOC_FSBULKSTAT */
785 error
= xfs_bulkstat(mp
, &inlast
, &count
, xfs_bulkstat_one
,
786 sizeof(xfs_bstat_t
), bulkreq
.ubuffer
,
792 if (bulkreq
.ocount
!= NULL
) {
793 if (copy_to_user(bulkreq
.lastip
, &inlast
,
797 if (copy_to_user(bulkreq
.ocount
, &count
, sizeof(count
)))
805 xfs_ioc_fsgeometry_v1(
809 xfs_fsop_geom_t fsgeo
;
812 error
= xfs_fs_geometry(mp
, &fsgeo
, 3);
817 * Caller should have passed an argument of type
818 * xfs_fsop_geom_v1_t. This is a proper subset of the
819 * xfs_fsop_geom_t that xfs_fs_geometry() fills in.
821 if (copy_to_user(arg
, &fsgeo
, sizeof(xfs_fsop_geom_v1_t
)))
831 xfs_fsop_geom_t fsgeo
;
834 error
= xfs_fs_geometry(mp
, &fsgeo
, 4);
838 if (copy_to_user(arg
, &fsgeo
, sizeof(fsgeo
)))
844 * Linux extended inode flags interface.
848 xfs_merge_ioc_xflags(
852 unsigned int xflags
= start
;
854 if (flags
& FS_IMMUTABLE_FL
)
855 xflags
|= FS_XFLAG_IMMUTABLE
;
857 xflags
&= ~FS_XFLAG_IMMUTABLE
;
858 if (flags
& FS_APPEND_FL
)
859 xflags
|= FS_XFLAG_APPEND
;
861 xflags
&= ~FS_XFLAG_APPEND
;
862 if (flags
& FS_SYNC_FL
)
863 xflags
|= FS_XFLAG_SYNC
;
865 xflags
&= ~FS_XFLAG_SYNC
;
866 if (flags
& FS_NOATIME_FL
)
867 xflags
|= FS_XFLAG_NOATIME
;
869 xflags
&= ~FS_XFLAG_NOATIME
;
870 if (flags
& FS_NODUMP_FL
)
871 xflags
|= FS_XFLAG_NODUMP
;
873 xflags
&= ~FS_XFLAG_NODUMP
;
882 unsigned int flags
= 0;
884 if (di_flags
& XFS_DIFLAG_IMMUTABLE
)
885 flags
|= FS_IMMUTABLE_FL
;
886 if (di_flags
& XFS_DIFLAG_APPEND
)
887 flags
|= FS_APPEND_FL
;
888 if (di_flags
& XFS_DIFLAG_SYNC
)
890 if (di_flags
& XFS_DIFLAG_NOATIME
)
891 flags
|= FS_NOATIME_FL
;
892 if (di_flags
& XFS_DIFLAG_NODUMP
)
893 flags
|= FS_NODUMP_FL
;
905 memset(&fa
, 0, sizeof(struct fsxattr
));
907 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
908 fa
.fsx_xflags
= xfs_ip2xflags(ip
);
909 fa
.fsx_extsize
= ip
->i_d
.di_extsize
<< ip
->i_mount
->m_sb
.sb_blocklog
;
910 fa
.fsx_cowextsize
= ip
->i_d
.di_cowextsize
<<
911 ip
->i_mount
->m_sb
.sb_blocklog
;
912 fa
.fsx_projid
= xfs_get_projid(ip
);
916 if (ip
->i_afp
->if_flags
& XFS_IFEXTENTS
)
917 fa
.fsx_nextents
= xfs_iext_count(ip
->i_afp
);
919 fa
.fsx_nextents
= ip
->i_d
.di_anextents
;
923 if (ip
->i_df
.if_flags
& XFS_IFEXTENTS
)
924 fa
.fsx_nextents
= xfs_iext_count(&ip
->i_df
);
926 fa
.fsx_nextents
= ip
->i_d
.di_nextents
;
928 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
930 if (copy_to_user(arg
, &fa
, sizeof(fa
)))
937 struct xfs_inode
*ip
,
940 /* can't set PREALLOC this way, just preserve it */
942 (ip
->i_d
.di_flags
& XFS_DIFLAG_PREALLOC
);
944 if (xflags
& FS_XFLAG_IMMUTABLE
)
945 di_flags
|= XFS_DIFLAG_IMMUTABLE
;
946 if (xflags
& FS_XFLAG_APPEND
)
947 di_flags
|= XFS_DIFLAG_APPEND
;
948 if (xflags
& FS_XFLAG_SYNC
)
949 di_flags
|= XFS_DIFLAG_SYNC
;
950 if (xflags
& FS_XFLAG_NOATIME
)
951 di_flags
|= XFS_DIFLAG_NOATIME
;
952 if (xflags
& FS_XFLAG_NODUMP
)
953 di_flags
|= XFS_DIFLAG_NODUMP
;
954 if (xflags
& FS_XFLAG_NODEFRAG
)
955 di_flags
|= XFS_DIFLAG_NODEFRAG
;
956 if (xflags
& FS_XFLAG_FILESTREAM
)
957 di_flags
|= XFS_DIFLAG_FILESTREAM
;
958 if (S_ISDIR(VFS_I(ip
)->i_mode
)) {
959 if (xflags
& FS_XFLAG_RTINHERIT
)
960 di_flags
|= XFS_DIFLAG_RTINHERIT
;
961 if (xflags
& FS_XFLAG_NOSYMLINKS
)
962 di_flags
|= XFS_DIFLAG_NOSYMLINKS
;
963 if (xflags
& FS_XFLAG_EXTSZINHERIT
)
964 di_flags
|= XFS_DIFLAG_EXTSZINHERIT
;
965 if (xflags
& FS_XFLAG_PROJINHERIT
)
966 di_flags
|= XFS_DIFLAG_PROJINHERIT
;
967 } else if (S_ISREG(VFS_I(ip
)->i_mode
)) {
968 if (xflags
& FS_XFLAG_REALTIME
)
969 di_flags
|= XFS_DIFLAG_REALTIME
;
970 if (xflags
& FS_XFLAG_EXTSIZE
)
971 di_flags
|= XFS_DIFLAG_EXTSIZE
;
979 struct xfs_inode
*ip
,
983 (ip
->i_d
.di_flags2
& XFS_DIFLAG2_REFLINK
);
985 if (xflags
& FS_XFLAG_DAX
)
986 di_flags2
|= XFS_DIFLAG2_DAX
;
987 if (xflags
& FS_XFLAG_COWEXTSIZE
)
988 di_flags2
|= XFS_DIFLAG2_COWEXTSIZE
;
994 xfs_diflags_to_linux(
995 struct xfs_inode
*ip
)
997 struct inode
*inode
= VFS_I(ip
);
998 unsigned int xflags
= xfs_ip2xflags(ip
);
1000 if (xflags
& FS_XFLAG_IMMUTABLE
)
1001 inode
->i_flags
|= S_IMMUTABLE
;
1003 inode
->i_flags
&= ~S_IMMUTABLE
;
1004 if (xflags
& FS_XFLAG_APPEND
)
1005 inode
->i_flags
|= S_APPEND
;
1007 inode
->i_flags
&= ~S_APPEND
;
1008 if (xflags
& FS_XFLAG_SYNC
)
1009 inode
->i_flags
|= S_SYNC
;
1011 inode
->i_flags
&= ~S_SYNC
;
1012 if (xflags
& FS_XFLAG_NOATIME
)
1013 inode
->i_flags
|= S_NOATIME
;
1015 inode
->i_flags
&= ~S_NOATIME
;
1016 #if 0 /* disabled until the flag switching races are sorted out */
1017 if (xflags
& FS_XFLAG_DAX
)
1018 inode
->i_flags
|= S_DAX
;
1020 inode
->i_flags
&= ~S_DAX
;
1025 xfs_ioctl_setattr_xflags(
1026 struct xfs_trans
*tp
,
1027 struct xfs_inode
*ip
,
1030 struct xfs_mount
*mp
= ip
->i_mount
;
1033 /* Can't change realtime flag if any extents are allocated. */
1034 if ((ip
->i_d
.di_nextents
|| ip
->i_delayed_blks
) &&
1035 XFS_IS_REALTIME_INODE(ip
) != (fa
->fsx_xflags
& FS_XFLAG_REALTIME
))
1038 /* If realtime flag is set then must have realtime device */
1039 if (fa
->fsx_xflags
& FS_XFLAG_REALTIME
) {
1040 if (mp
->m_sb
.sb_rblocks
== 0 || mp
->m_sb
.sb_rextsize
== 0 ||
1041 (ip
->i_d
.di_extsize
% mp
->m_sb
.sb_rextsize
))
1045 /* Clear reflink if we are actually able to set the rt flag. */
1046 if ((fa
->fsx_xflags
& FS_XFLAG_REALTIME
) && xfs_is_reflink_inode(ip
))
1047 ip
->i_d
.di_flags2
&= ~XFS_DIFLAG2_REFLINK
;
1049 /* Don't allow us to set DAX mode for a reflinked file for now. */
1050 if ((fa
->fsx_xflags
& FS_XFLAG_DAX
) && xfs_is_reflink_inode(ip
))
1054 * Can't modify an immutable/append-only file unless
1055 * we have appropriate permission.
1057 if (((ip
->i_d
.di_flags
& (XFS_DIFLAG_IMMUTABLE
| XFS_DIFLAG_APPEND
)) ||
1058 (fa
->fsx_xflags
& (FS_XFLAG_IMMUTABLE
| FS_XFLAG_APPEND
))) &&
1059 !capable(CAP_LINUX_IMMUTABLE
))
1062 /* diflags2 only valid for v3 inodes. */
1063 di_flags2
= xfs_flags2diflags2(ip
, fa
->fsx_xflags
);
1064 if (di_flags2
&& ip
->i_d
.di_version
< 3)
1067 ip
->i_d
.di_flags
= xfs_flags2diflags(ip
, fa
->fsx_xflags
);
1068 ip
->i_d
.di_flags2
= di_flags2
;
1070 xfs_diflags_to_linux(ip
);
1071 xfs_trans_ichgtime(tp
, ip
, XFS_ICHGTIME_CHG
);
1072 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1073 XFS_STATS_INC(mp
, xs_ig_attrchg
);
1078 * If we are changing DAX flags, we have to ensure the file is clean and any
1079 * cached objects in the address space are invalidated and removed. This
1080 * requires us to lock out other IO and page faults similar to a truncate
1081 * operation. The locks need to be held until the transaction has been committed
1082 * so that the cache invalidation is atomic with respect to the DAX flag
1086 xfs_ioctl_setattr_dax_invalidate(
1087 struct xfs_inode
*ip
,
1091 struct inode
*inode
= VFS_I(ip
);
1092 struct super_block
*sb
= inode
->i_sb
;
1098 * It is only valid to set the DAX flag on regular files and
1099 * directories on filesystems where the block size is equal to the page
1100 * size. On directories it serves as an inherit hint.
1102 if (fa
->fsx_xflags
& FS_XFLAG_DAX
) {
1103 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
)))
1105 if (bdev_dax_supported(sb
, sb
->s_blocksize
) < 0)
1109 /* If the DAX state is not changing, we have nothing to do here. */
1110 if ((fa
->fsx_xflags
& FS_XFLAG_DAX
) && IS_DAX(inode
))
1112 if (!(fa
->fsx_xflags
& FS_XFLAG_DAX
) && !IS_DAX(inode
))
1115 /* lock, flush and invalidate mapping in preparation for flag change */
1116 xfs_ilock(ip
, XFS_MMAPLOCK_EXCL
| XFS_IOLOCK_EXCL
);
1117 error
= filemap_write_and_wait(inode
->i_mapping
);
1120 error
= invalidate_inode_pages2(inode
->i_mapping
);
1124 *join_flags
= XFS_MMAPLOCK_EXCL
| XFS_IOLOCK_EXCL
;
1128 xfs_iunlock(ip
, XFS_MMAPLOCK_EXCL
| XFS_IOLOCK_EXCL
);
1134 * Set up the transaction structure for the setattr operation, checking that we
1135 * have permission to do so. On success, return a clean transaction and the
1136 * inode locked exclusively ready for further operation specific checks. On
1137 * failure, return an error without modifying or locking the inode.
1139 * The inode might already be IO locked on call. If this is the case, it is
1140 * indicated in @join_flags and we take full responsibility for ensuring they
1141 * are unlocked from now on. Hence if we have an error here, we still have to
1142 * unlock them. Otherwise, once they are joined to the transaction, they will
1143 * be unlocked on commit/cancel.
1145 static struct xfs_trans
*
1146 xfs_ioctl_setattr_get_trans(
1147 struct xfs_inode
*ip
,
1150 struct xfs_mount
*mp
= ip
->i_mount
;
1151 struct xfs_trans
*tp
;
1154 if (mp
->m_flags
& XFS_MOUNT_RDONLY
)
1157 if (XFS_FORCED_SHUTDOWN(mp
))
1160 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_ichange
, 0, 0, 0, &tp
);
1162 return ERR_PTR(error
);
1164 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1165 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
| join_flags
);
1169 * CAP_FOWNER overrides the following restrictions:
1171 * The user ID of the calling process must be equal to the file owner
1172 * ID, except in cases where the CAP_FSETID capability is applicable.
1174 if (!inode_owner_or_capable(VFS_I(ip
))) {
1179 if (mp
->m_flags
& XFS_MOUNT_WSYNC
)
1180 xfs_trans_set_sync(tp
);
1185 xfs_trans_cancel(tp
);
1188 xfs_iunlock(ip
, join_flags
);
1189 return ERR_PTR(error
);
1193 * extent size hint validation is somewhat cumbersome. Rules are:
1195 * 1. extent size hint is only valid for directories and regular files
1196 * 2. FS_XFLAG_EXTSIZE is only valid for regular files
1197 * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
1198 * 4. can only be changed on regular files if no extents are allocated
1199 * 5. can be changed on directories at any time
1200 * 6. extsize hint of 0 turns off hints, clears inode flags.
1201 * 7. Extent size must be a multiple of the appropriate block size.
1202 * 8. for non-realtime files, the extent size hint must be limited
1203 * to half the AG size to avoid alignment extending the extent beyond the
1206 * Please keep this function in sync with xfs_scrub_inode_extsize.
1209 xfs_ioctl_setattr_check_extsize(
1210 struct xfs_inode
*ip
,
1213 struct xfs_mount
*mp
= ip
->i_mount
;
1215 if ((fa
->fsx_xflags
& FS_XFLAG_EXTSIZE
) && !S_ISREG(VFS_I(ip
)->i_mode
))
1218 if ((fa
->fsx_xflags
& FS_XFLAG_EXTSZINHERIT
) &&
1219 !S_ISDIR(VFS_I(ip
)->i_mode
))
1222 if (S_ISREG(VFS_I(ip
)->i_mode
) && ip
->i_d
.di_nextents
&&
1223 ((ip
->i_d
.di_extsize
<< mp
->m_sb
.sb_blocklog
) != fa
->fsx_extsize
))
1226 if (fa
->fsx_extsize
!= 0) {
1228 xfs_fsblock_t extsize_fsb
;
1230 extsize_fsb
= XFS_B_TO_FSB(mp
, fa
->fsx_extsize
);
1231 if (extsize_fsb
> MAXEXTLEN
)
1234 if (XFS_IS_REALTIME_INODE(ip
) ||
1235 (fa
->fsx_xflags
& FS_XFLAG_REALTIME
)) {
1236 size
= mp
->m_sb
.sb_rextsize
<< mp
->m_sb
.sb_blocklog
;
1238 size
= mp
->m_sb
.sb_blocksize
;
1239 if (extsize_fsb
> mp
->m_sb
.sb_agblocks
/ 2)
1243 if (fa
->fsx_extsize
% size
)
1246 fa
->fsx_xflags
&= ~(FS_XFLAG_EXTSIZE
| FS_XFLAG_EXTSZINHERIT
);
1252 * CoW extent size hint validation rules are:
1254 * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
1255 * The inode does not have to have any shared blocks, but it must be a v3.
1256 * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
1257 * for a directory, the hint is propagated to new files.
1258 * 3. Can be changed on files & directories at any time.
1259 * 4. CoW extsize hint of 0 turns off hints, clears inode flags.
1260 * 5. Extent size must be a multiple of the appropriate block size.
1261 * 6. The extent size hint must be limited to half the AG size to avoid
1262 * alignment extending the extent beyond the limits of the AG.
1264 * Please keep this function in sync with xfs_scrub_inode_cowextsize.
1267 xfs_ioctl_setattr_check_cowextsize(
1268 struct xfs_inode
*ip
,
1271 struct xfs_mount
*mp
= ip
->i_mount
;
1273 if (!(fa
->fsx_xflags
& FS_XFLAG_COWEXTSIZE
))
1276 if (!xfs_sb_version_hasreflink(&ip
->i_mount
->m_sb
) ||
1277 ip
->i_d
.di_version
!= 3)
1280 if (!S_ISREG(VFS_I(ip
)->i_mode
) && !S_ISDIR(VFS_I(ip
)->i_mode
))
1283 if (fa
->fsx_cowextsize
!= 0) {
1285 xfs_fsblock_t cowextsize_fsb
;
1287 cowextsize_fsb
= XFS_B_TO_FSB(mp
, fa
->fsx_cowextsize
);
1288 if (cowextsize_fsb
> MAXEXTLEN
)
1291 size
= mp
->m_sb
.sb_blocksize
;
1292 if (cowextsize_fsb
> mp
->m_sb
.sb_agblocks
/ 2)
1295 if (fa
->fsx_cowextsize
% size
)
1298 fa
->fsx_xflags
&= ~FS_XFLAG_COWEXTSIZE
;
1304 xfs_ioctl_setattr_check_projid(
1305 struct xfs_inode
*ip
,
1308 /* Disallow 32bit project ids if projid32bit feature is not enabled. */
1309 if (fa
->fsx_projid
> (uint16_t)-1 &&
1310 !xfs_sb_version_hasprojid32bit(&ip
->i_mount
->m_sb
))
1314 * Project Quota ID state is only allowed to change from within the init
1315 * namespace. Enforce that restriction only if we are trying to change
1316 * the quota ID state. Everything else is allowed in user namespaces.
1318 if (current_user_ns() == &init_user_ns
)
1321 if (xfs_get_projid(ip
) != fa
->fsx_projid
)
1323 if ((fa
->fsx_xflags
& FS_XFLAG_PROJINHERIT
) !=
1324 (ip
->i_d
.di_flags
& XFS_DIFLAG_PROJINHERIT
))
1335 struct xfs_mount
*mp
= ip
->i_mount
;
1336 struct xfs_trans
*tp
;
1337 struct xfs_dquot
*udqp
= NULL
;
1338 struct xfs_dquot
*pdqp
= NULL
;
1339 struct xfs_dquot
*olddquot
= NULL
;
1343 trace_xfs_ioctl_setattr(ip
);
1345 code
= xfs_ioctl_setattr_check_projid(ip
, fa
);
1350 * If disk quotas is on, we make sure that the dquots do exist on disk,
1351 * before we start any other transactions. Trying to do this later
1352 * is messy. We don't care to take a readlock to look at the ids
1353 * in inode here, because we can't hold it across the trans_reserve.
1354 * If the IDs do change before we take the ilock, we're covered
1355 * because the i_*dquot fields will get updated anyway.
1357 if (XFS_IS_QUOTA_ON(mp
)) {
1358 code
= xfs_qm_vop_dqalloc(ip
, ip
->i_d
.di_uid
,
1359 ip
->i_d
.di_gid
, fa
->fsx_projid
,
1360 XFS_QMOPT_PQUOTA
, &udqp
, NULL
, &pdqp
);
1366 * Changing DAX config may require inode locking for mapping
1367 * invalidation. These need to be held all the way to transaction commit
1368 * or cancel time, so need to be passed through to
1369 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
1372 code
= xfs_ioctl_setattr_dax_invalidate(ip
, fa
, &join_flags
);
1374 goto error_free_dquots
;
1376 tp
= xfs_ioctl_setattr_get_trans(ip
, join_flags
);
1379 goto error_free_dquots
;
1383 if (XFS_IS_QUOTA_RUNNING(mp
) && XFS_IS_PQUOTA_ON(mp
) &&
1384 xfs_get_projid(ip
) != fa
->fsx_projid
) {
1385 code
= xfs_qm_vop_chown_reserve(tp
, ip
, udqp
, NULL
, pdqp
,
1386 capable(CAP_FOWNER
) ? XFS_QMOPT_FORCE_RES
: 0);
1387 if (code
) /* out of quota */
1388 goto error_trans_cancel
;
1391 code
= xfs_ioctl_setattr_check_extsize(ip
, fa
);
1393 goto error_trans_cancel
;
1395 code
= xfs_ioctl_setattr_check_cowextsize(ip
, fa
);
1397 goto error_trans_cancel
;
1399 code
= xfs_ioctl_setattr_xflags(tp
, ip
, fa
);
1401 goto error_trans_cancel
;
1404 * Change file ownership. Must be the owner or privileged. CAP_FSETID
1405 * overrides the following restrictions:
1407 * The set-user-ID and set-group-ID bits of a file will be cleared upon
1408 * successful return from chown()
1411 if ((VFS_I(ip
)->i_mode
& (S_ISUID
|S_ISGID
)) &&
1412 !capable_wrt_inode_uidgid(VFS_I(ip
), CAP_FSETID
))
1413 VFS_I(ip
)->i_mode
&= ~(S_ISUID
|S_ISGID
);
1415 /* Change the ownerships and register project quota modifications */
1416 if (xfs_get_projid(ip
) != fa
->fsx_projid
) {
1417 if (XFS_IS_QUOTA_RUNNING(mp
) && XFS_IS_PQUOTA_ON(mp
)) {
1418 olddquot
= xfs_qm_vop_chown(tp
, ip
,
1419 &ip
->i_pdquot
, pdqp
);
1421 ASSERT(ip
->i_d
.di_version
> 1);
1422 xfs_set_projid(ip
, fa
->fsx_projid
);
1426 * Only set the extent size hint if we've already determined that the
1427 * extent size hint should be set on the inode. If no extent size flags
1428 * are set on the inode then unconditionally clear the extent size hint.
1430 if (ip
->i_d
.di_flags
& (XFS_DIFLAG_EXTSIZE
| XFS_DIFLAG_EXTSZINHERIT
))
1431 ip
->i_d
.di_extsize
= fa
->fsx_extsize
>> mp
->m_sb
.sb_blocklog
;
1433 ip
->i_d
.di_extsize
= 0;
1434 if (ip
->i_d
.di_version
== 3 &&
1435 (ip
->i_d
.di_flags2
& XFS_DIFLAG2_COWEXTSIZE
))
1436 ip
->i_d
.di_cowextsize
= fa
->fsx_cowextsize
>>
1437 mp
->m_sb
.sb_blocklog
;
1439 ip
->i_d
.di_cowextsize
= 0;
1441 code
= xfs_trans_commit(tp
);
1444 * Release any dquot(s) the inode had kept before chown.
1446 xfs_qm_dqrele(olddquot
);
1447 xfs_qm_dqrele(udqp
);
1448 xfs_qm_dqrele(pdqp
);
1453 xfs_trans_cancel(tp
);
1455 xfs_qm_dqrele(udqp
);
1456 xfs_qm_dqrele(pdqp
);
1469 if (copy_from_user(&fa
, arg
, sizeof(fa
)))
1472 error
= mnt_want_write_file(filp
);
1475 error
= xfs_ioctl_setattr(ip
, &fa
);
1476 mnt_drop_write_file(filp
);
1487 flags
= xfs_di2lxflags(ip
->i_d
.di_flags
);
1488 if (copy_to_user(arg
, &flags
, sizeof(flags
)))
1495 struct xfs_inode
*ip
,
1499 struct xfs_trans
*tp
;
1505 if (copy_from_user(&flags
, arg
, sizeof(flags
)))
1508 if (flags
& ~(FS_IMMUTABLE_FL
| FS_APPEND_FL
| \
1509 FS_NOATIME_FL
| FS_NODUMP_FL
| \
1513 fa
.fsx_xflags
= xfs_merge_ioc_xflags(flags
, xfs_ip2xflags(ip
));
1515 error
= mnt_want_write_file(filp
);
1520 * Changing DAX config may require inode locking for mapping
1521 * invalidation. These need to be held all the way to transaction commit
1522 * or cancel time, so need to be passed through to
1523 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
1526 error
= xfs_ioctl_setattr_dax_invalidate(ip
, &fa
, &join_flags
);
1528 goto out_drop_write
;
1530 tp
= xfs_ioctl_setattr_get_trans(ip
, join_flags
);
1532 error
= PTR_ERR(tp
);
1533 goto out_drop_write
;
1536 error
= xfs_ioctl_setattr_xflags(tp
, ip
, &fa
);
1538 xfs_trans_cancel(tp
);
1539 goto out_drop_write
;
1542 error
= xfs_trans_commit(tp
);
1544 mnt_drop_write_file(filp
);
1551 struct getbmapx __user
*u
,
1554 if (put_user(p
->bmv_offset
, &u
->bmv_offset
) ||
1555 put_user(p
->bmv_block
, &u
->bmv_block
) ||
1556 put_user(p
->bmv_length
, &u
->bmv_length
) ||
1557 put_user(0, &u
->bmv_count
) ||
1558 put_user(0, &u
->bmv_entries
))
1560 if (recsize
< sizeof(struct getbmapx
))
1562 if (put_user(0, &u
->bmv_iflags
) ||
1563 put_user(p
->bmv_oflags
, &u
->bmv_oflags
) ||
1564 put_user(0, &u
->bmv_unused1
) ||
1565 put_user(0, &u
->bmv_unused2
))
1576 struct getbmapx bmx
= { 0 };
1577 struct kgetbmap
*buf
;
1582 case XFS_IOC_GETBMAPA
:
1583 bmx
.bmv_iflags
= BMV_IF_ATTRFORK
;
1585 case XFS_IOC_GETBMAP
:
1586 if (file
->f_mode
& FMODE_NOCMTIME
)
1587 bmx
.bmv_iflags
|= BMV_IF_NO_DMAPI_READ
;
1588 /* struct getbmap is a strict subset of struct getbmapx. */
1589 recsize
= sizeof(struct getbmap
);
1591 case XFS_IOC_GETBMAPX
:
1592 recsize
= sizeof(struct getbmapx
);
1598 if (copy_from_user(&bmx
, arg
, recsize
))
1601 if (bmx
.bmv_count
< 2)
1603 if (bmx
.bmv_count
> ULONG_MAX
/ recsize
)
1606 buf
= kmem_zalloc_large(bmx
.bmv_count
* sizeof(*buf
), 0);
1610 error
= xfs_getbmap(XFS_I(file_inode(file
)), &bmx
, buf
);
1615 if (copy_to_user(arg
, &bmx
, recsize
))
1619 for (i
= 0; i
< bmx
.bmv_entries
; i
++) {
1620 if (!xfs_getbmap_format(buf
+ i
, arg
, recsize
))
1631 struct getfsmap_info
{
1632 struct xfs_mount
*mp
;
1633 struct fsmap_head __user
*data
;
1639 xfs_getfsmap_format(struct xfs_fsmap
*xfm
, void *priv
)
1641 struct getfsmap_info
*info
= priv
;
1644 trace_xfs_getfsmap_mapping(info
->mp
, xfm
);
1646 info
->last_flags
= xfm
->fmr_flags
;
1647 xfs_fsmap_from_internal(&fm
, xfm
);
1648 if (copy_to_user(&info
->data
->fmh_recs
[info
->idx
++], &fm
,
1649 sizeof(struct fsmap
)))
1657 struct xfs_inode
*ip
,
1658 struct fsmap_head __user
*arg
)
1660 struct getfsmap_info info
= { NULL
};
1661 struct xfs_fsmap_head xhead
= {0};
1662 struct fsmap_head head
;
1663 bool aborted
= false;
1666 if (copy_from_user(&head
, arg
, sizeof(struct fsmap_head
)))
1668 if (memchr_inv(head
.fmh_reserved
, 0, sizeof(head
.fmh_reserved
)) ||
1669 memchr_inv(head
.fmh_keys
[0].fmr_reserved
, 0,
1670 sizeof(head
.fmh_keys
[0].fmr_reserved
)) ||
1671 memchr_inv(head
.fmh_keys
[1].fmr_reserved
, 0,
1672 sizeof(head
.fmh_keys
[1].fmr_reserved
)))
1675 xhead
.fmh_iflags
= head
.fmh_iflags
;
1676 xhead
.fmh_count
= head
.fmh_count
;
1677 xfs_fsmap_to_internal(&xhead
.fmh_keys
[0], &head
.fmh_keys
[0]);
1678 xfs_fsmap_to_internal(&xhead
.fmh_keys
[1], &head
.fmh_keys
[1]);
1680 trace_xfs_getfsmap_low_key(ip
->i_mount
, &xhead
.fmh_keys
[0]);
1681 trace_xfs_getfsmap_high_key(ip
->i_mount
, &xhead
.fmh_keys
[1]);
1683 info
.mp
= ip
->i_mount
;
1685 error
= xfs_getfsmap(ip
->i_mount
, &xhead
, xfs_getfsmap_format
, &info
);
1686 if (error
== XFS_BTREE_QUERY_RANGE_ABORT
) {
1692 /* If we didn't abort, set the "last" flag in the last fmx */
1693 if (!aborted
&& info
.idx
) {
1694 info
.last_flags
|= FMR_OF_LAST
;
1695 if (copy_to_user(&info
.data
->fmh_recs
[info
.idx
- 1].fmr_flags
,
1696 &info
.last_flags
, sizeof(info
.last_flags
)))
1700 /* copy back header */
1701 head
.fmh_entries
= xhead
.fmh_entries
;
1702 head
.fmh_oflags
= xhead
.fmh_oflags
;
1703 if (copy_to_user(arg
, &head
, sizeof(struct fsmap_head
)))
1710 xfs_ioc_scrub_metadata(
1711 struct xfs_inode
*ip
,
1714 struct xfs_scrub_metadata scrub
;
1717 if (!capable(CAP_SYS_ADMIN
))
1720 if (copy_from_user(&scrub
, arg
, sizeof(scrub
)))
1723 error
= xfs_scrub_metadata(ip
, &scrub
);
1727 if (copy_to_user(arg
, &scrub
, sizeof(scrub
)))
1737 xfs_inode_t
*ip
, *tip
;
1741 /* Pull information for the target fd */
1742 f
= fdget((int)sxp
->sx_fdtarget
);
1748 if (!(f
.file
->f_mode
& FMODE_WRITE
) ||
1749 !(f
.file
->f_mode
& FMODE_READ
) ||
1750 (f
.file
->f_flags
& O_APPEND
)) {
1755 tmp
= fdget((int)sxp
->sx_fdtmp
);
1761 if (!(tmp
.file
->f_mode
& FMODE_WRITE
) ||
1762 !(tmp
.file
->f_mode
& FMODE_READ
) ||
1763 (tmp
.file
->f_flags
& O_APPEND
)) {
1765 goto out_put_tmp_file
;
1768 if (IS_SWAPFILE(file_inode(f
.file
)) ||
1769 IS_SWAPFILE(file_inode(tmp
.file
))) {
1771 goto out_put_tmp_file
;
1775 * We need to ensure that the fds passed in point to XFS inodes
1776 * before we cast and access them as XFS structures as we have no
1777 * control over what the user passes us here.
1779 if (f
.file
->f_op
!= &xfs_file_operations
||
1780 tmp
.file
->f_op
!= &xfs_file_operations
) {
1782 goto out_put_tmp_file
;
1785 ip
= XFS_I(file_inode(f
.file
));
1786 tip
= XFS_I(file_inode(tmp
.file
));
1788 if (ip
->i_mount
!= tip
->i_mount
) {
1790 goto out_put_tmp_file
;
1793 if (ip
->i_ino
== tip
->i_ino
) {
1795 goto out_put_tmp_file
;
1798 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
1800 goto out_put_tmp_file
;
1803 error
= xfs_swap_extents(ip
, tip
, sxp
);
1814 * Note: some of the ioctl's return positive numbers as a
1815 * byte count indicating success, such as readlink_by_handle.
1816 * So we don't "sign flip" like most other routines. This means
1817 * true errors need to be returned as a negative value.
1825 struct inode
*inode
= file_inode(filp
);
1826 struct xfs_inode
*ip
= XFS_I(inode
);
1827 struct xfs_mount
*mp
= ip
->i_mount
;
1828 void __user
*arg
= (void __user
*)p
;
1831 trace_xfs_file_ioctl(ip
);
1835 return xfs_ioc_trim(mp
, arg
);
1836 case XFS_IOC_ALLOCSP
:
1837 case XFS_IOC_FREESP
:
1838 case XFS_IOC_RESVSP
:
1839 case XFS_IOC_UNRESVSP
:
1840 case XFS_IOC_ALLOCSP64
:
1841 case XFS_IOC_FREESP64
:
1842 case XFS_IOC_RESVSP64
:
1843 case XFS_IOC_UNRESVSP64
:
1844 case XFS_IOC_ZERO_RANGE
: {
1847 if (copy_from_user(&bf
, arg
, sizeof(bf
)))
1849 return xfs_ioc_space(filp
, cmd
, &bf
);
1851 case XFS_IOC_DIOINFO
: {
1853 xfs_buftarg_t
*target
=
1854 XFS_IS_REALTIME_INODE(ip
) ?
1855 mp
->m_rtdev_targp
: mp
->m_ddev_targp
;
1857 da
.d_mem
= da
.d_miniosz
= target
->bt_logical_sectorsize
;
1858 da
.d_maxiosz
= INT_MAX
& ~(da
.d_miniosz
- 1);
1860 if (copy_to_user(arg
, &da
, sizeof(da
)))
1865 case XFS_IOC_FSBULKSTAT_SINGLE
:
1866 case XFS_IOC_FSBULKSTAT
:
1867 case XFS_IOC_FSINUMBERS
:
1868 return xfs_ioc_bulkstat(mp
, cmd
, arg
);
1870 case XFS_IOC_FSGEOMETRY_V1
:
1871 return xfs_ioc_fsgeometry_v1(mp
, arg
);
1873 case XFS_IOC_FSGEOMETRY
:
1874 return xfs_ioc_fsgeometry(mp
, arg
);
1876 case XFS_IOC_GETVERSION
:
1877 return put_user(inode
->i_generation
, (int __user
*)arg
);
1879 case XFS_IOC_FSGETXATTR
:
1880 return xfs_ioc_fsgetxattr(ip
, 0, arg
);
1881 case XFS_IOC_FSGETXATTRA
:
1882 return xfs_ioc_fsgetxattr(ip
, 1, arg
);
1883 case XFS_IOC_FSSETXATTR
:
1884 return xfs_ioc_fssetxattr(ip
, filp
, arg
);
1885 case XFS_IOC_GETXFLAGS
:
1886 return xfs_ioc_getxflags(ip
, arg
);
1887 case XFS_IOC_SETXFLAGS
:
1888 return xfs_ioc_setxflags(ip
, filp
, arg
);
1890 case XFS_IOC_FSSETDM
: {
1891 struct fsdmidata dmi
;
1893 if (copy_from_user(&dmi
, arg
, sizeof(dmi
)))
1896 error
= mnt_want_write_file(filp
);
1900 error
= xfs_set_dmattrs(ip
, dmi
.fsd_dmevmask
,
1902 mnt_drop_write_file(filp
);
1906 case XFS_IOC_GETBMAP
:
1907 case XFS_IOC_GETBMAPA
:
1908 case XFS_IOC_GETBMAPX
:
1909 return xfs_ioc_getbmap(filp
, cmd
, arg
);
1911 case FS_IOC_GETFSMAP
:
1912 return xfs_ioc_getfsmap(ip
, arg
);
1914 case XFS_IOC_SCRUB_METADATA
:
1915 return xfs_ioc_scrub_metadata(ip
, arg
);
1917 case XFS_IOC_FD_TO_HANDLE
:
1918 case XFS_IOC_PATH_TO_HANDLE
:
1919 case XFS_IOC_PATH_TO_FSHANDLE
: {
1920 xfs_fsop_handlereq_t hreq
;
1922 if (copy_from_user(&hreq
, arg
, sizeof(hreq
)))
1924 return xfs_find_handle(cmd
, &hreq
);
1926 case XFS_IOC_OPEN_BY_HANDLE
: {
1927 xfs_fsop_handlereq_t hreq
;
1929 if (copy_from_user(&hreq
, arg
, sizeof(xfs_fsop_handlereq_t
)))
1931 return xfs_open_by_handle(filp
, &hreq
);
1933 case XFS_IOC_FSSETDM_BY_HANDLE
:
1934 return xfs_fssetdm_by_handle(filp
, arg
);
1936 case XFS_IOC_READLINK_BY_HANDLE
: {
1937 xfs_fsop_handlereq_t hreq
;
1939 if (copy_from_user(&hreq
, arg
, sizeof(xfs_fsop_handlereq_t
)))
1941 return xfs_readlink_by_handle(filp
, &hreq
);
1943 case XFS_IOC_ATTRLIST_BY_HANDLE
:
1944 return xfs_attrlist_by_handle(filp
, arg
);
1946 case XFS_IOC_ATTRMULTI_BY_HANDLE
:
1947 return xfs_attrmulti_by_handle(filp
, arg
);
1949 case XFS_IOC_SWAPEXT
: {
1950 struct xfs_swapext sxp
;
1952 if (copy_from_user(&sxp
, arg
, sizeof(xfs_swapext_t
)))
1954 error
= mnt_want_write_file(filp
);
1957 error
= xfs_ioc_swapext(&sxp
);
1958 mnt_drop_write_file(filp
);
1962 case XFS_IOC_FSCOUNTS
: {
1963 xfs_fsop_counts_t out
;
1965 error
= xfs_fs_counts(mp
, &out
);
1969 if (copy_to_user(arg
, &out
, sizeof(out
)))
1974 case XFS_IOC_SET_RESBLKS
: {
1975 xfs_fsop_resblks_t inout
;
1978 if (!capable(CAP_SYS_ADMIN
))
1981 if (mp
->m_flags
& XFS_MOUNT_RDONLY
)
1984 if (copy_from_user(&inout
, arg
, sizeof(inout
)))
1987 error
= mnt_want_write_file(filp
);
1991 /* input parameter is passed in resblks field of structure */
1993 error
= xfs_reserve_blocks(mp
, &in
, &inout
);
1994 mnt_drop_write_file(filp
);
1998 if (copy_to_user(arg
, &inout
, sizeof(inout
)))
2003 case XFS_IOC_GET_RESBLKS
: {
2004 xfs_fsop_resblks_t out
;
2006 if (!capable(CAP_SYS_ADMIN
))
2009 error
= xfs_reserve_blocks(mp
, NULL
, &out
);
2013 if (copy_to_user(arg
, &out
, sizeof(out
)))
2019 case XFS_IOC_FSGROWFSDATA
: {
2020 xfs_growfs_data_t in
;
2022 if (copy_from_user(&in
, arg
, sizeof(in
)))
2025 error
= mnt_want_write_file(filp
);
2028 error
= xfs_growfs_data(mp
, &in
);
2029 mnt_drop_write_file(filp
);
2033 case XFS_IOC_FSGROWFSLOG
: {
2034 xfs_growfs_log_t in
;
2036 if (copy_from_user(&in
, arg
, sizeof(in
)))
2039 error
= mnt_want_write_file(filp
);
2042 error
= xfs_growfs_log(mp
, &in
);
2043 mnt_drop_write_file(filp
);
2047 case XFS_IOC_FSGROWFSRT
: {
2050 if (copy_from_user(&in
, arg
, sizeof(in
)))
2053 error
= mnt_want_write_file(filp
);
2056 error
= xfs_growfs_rt(mp
, &in
);
2057 mnt_drop_write_file(filp
);
2061 case XFS_IOC_GOINGDOWN
: {
2064 if (!capable(CAP_SYS_ADMIN
))
2067 if (get_user(in
, (uint32_t __user
*)arg
))
2070 return xfs_fs_goingdown(mp
, in
);
2073 case XFS_IOC_ERROR_INJECTION
: {
2074 xfs_error_injection_t in
;
2076 if (!capable(CAP_SYS_ADMIN
))
2079 if (copy_from_user(&in
, arg
, sizeof(in
)))
2082 return xfs_errortag_add(mp
, in
.errtag
);
2085 case XFS_IOC_ERROR_CLEARALL
:
2086 if (!capable(CAP_SYS_ADMIN
))
2089 return xfs_errortag_clearall(mp
);
2091 case XFS_IOC_FREE_EOFBLOCKS
: {
2092 struct xfs_fs_eofblocks eofb
;
2093 struct xfs_eofblocks keofb
;
2095 if (!capable(CAP_SYS_ADMIN
))
2098 if (mp
->m_flags
& XFS_MOUNT_RDONLY
)
2101 if (copy_from_user(&eofb
, arg
, sizeof(eofb
)))
2104 error
= xfs_fs_eofblocks_from_user(&eofb
, &keofb
);
2108 return xfs_icache_free_eofblocks(mp
, &keofb
);