2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_itable.h"
41 #include "xfs_error.h"
46 #include "xfs_buf_item.h"
47 #include "xfs_utils.h"
48 #include "xfs_dfrag.h"
49 #include "xfs_fsops.h"
51 #include <linux/capability.h>
52 #include <linux/dcache.h>
53 #include <linux/mount.h>
54 #include <linux/namei.h>
55 #include <linux/pagemap.h>
58 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
59 * a file or fs handle.
61 * XFS_IOC_PATH_TO_FSHANDLE
62 * returns fs handle for a mount point or path within that mount point
63 * XFS_IOC_FD_TO_HANDLE
64 * returns full handle for a FD opened in user space
65 * XFS_IOC_PATH_TO_HANDLE
66 * returns full handle for a path
75 xfs_fsop_handlereq_t hreq
;
79 if (copy_from_user(&hreq
, arg
, sizeof(hreq
)))
80 return -XFS_ERROR(EFAULT
);
82 memset((char *)&handle
, 0, sizeof(handle
));
85 case XFS_IOC_PATH_TO_FSHANDLE
:
86 case XFS_IOC_PATH_TO_HANDLE
: {
90 error
= user_path_walk_link((const char __user
*)hreq
.path
, &nd
);
95 ASSERT(nd
.dentry
->d_inode
);
96 inode
= igrab(nd
.dentry
->d_inode
);
101 case XFS_IOC_FD_TO_HANDLE
: {
104 file
= fget(hreq
.fd
);
108 ASSERT(file
->f_path
.dentry
);
109 ASSERT(file
->f_path
.dentry
->d_inode
);
110 inode
= igrab(file
->f_path
.dentry
->d_inode
);
117 return -XFS_ERROR(EINVAL
);
120 if (inode
->i_sb
->s_magic
!= XFS_SB_MAGIC
) {
121 /* we're not in XFS anymore, Toto */
123 return -XFS_ERROR(EINVAL
);
126 switch (inode
->i_mode
& S_IFMT
) {
133 return -XFS_ERROR(EBADF
);
136 /* we need the vnode */
137 vp
= vn_from_inode(inode
);
139 /* now we can grab the fsid */
140 memcpy(&handle
.ha_fsid
, vp
->v_vfsp
->vfs_altfsid
, sizeof(xfs_fsid_t
));
141 hsize
= sizeof(xfs_fsid_t
);
143 if (cmd
!= XFS_IOC_PATH_TO_FSHANDLE
) {
147 /* need to get access to the xfs_inode to read the generation */
150 lock_mode
= xfs_ilock_map_shared(ip
);
152 /* fill in fid section of handle from inode */
153 handle
.ha_fid
.xfs_fid_len
= sizeof(xfs_fid_t
) -
154 sizeof(handle
.ha_fid
.xfs_fid_len
);
155 handle
.ha_fid
.xfs_fid_pad
= 0;
156 handle
.ha_fid
.xfs_fid_gen
= ip
->i_d
.di_gen
;
157 handle
.ha_fid
.xfs_fid_ino
= ip
->i_ino
;
159 xfs_iunlock_map_shared(ip
, lock_mode
);
161 hsize
= XFS_HSIZE(handle
);
164 /* now copy our handle into the user buffer & write out the size */
165 if (copy_to_user(hreq
.ohandle
, &handle
, hsize
) ||
166 copy_to_user(hreq
.ohandlen
, &hsize
, sizeof(__s32
))) {
168 return -XFS_ERROR(EFAULT
);
177 * Convert userspace handle data into vnode (and inode).
178 * We [ab]use the fact that all the fsop_handlereq ioctl calls
179 * have a data structure argument whose first component is always
180 * a xfs_fsop_handlereq_t, so we can cast to and from this type.
181 * This allows us to optimise the copy_from_user calls and gives
182 * a handy, shared routine.
184 * If no error, caller must always VN_RELE the returned vp.
187 xfs_vget_fsop_handlereq(
189 struct inode
*parinode
, /* parent inode pointer */
190 xfs_fsop_handlereq_t
*hreq
,
192 struct inode
**inode
)
197 xfs_handle_t
*handlep
;
200 struct inode
*inodep
;
207 * Only allow handle opens under a directory.
209 if (!S_ISDIR(parinode
->i_mode
))
210 return XFS_ERROR(ENOTDIR
);
212 hanp
= hreq
->ihandle
;
213 hlen
= hreq
->ihandlen
;
216 if (hlen
< sizeof(handlep
->ha_fsid
) || hlen
> sizeof(*handlep
))
217 return XFS_ERROR(EINVAL
);
218 if (copy_from_user(handlep
, hanp
, hlen
))
219 return XFS_ERROR(EFAULT
);
220 if (hlen
< sizeof(*handlep
))
221 memset(((char *)handlep
) + hlen
, 0, sizeof(*handlep
) - hlen
);
222 if (hlen
> sizeof(handlep
->ha_fsid
)) {
223 if (handlep
->ha_fid
.xfs_fid_len
!=
224 (hlen
- sizeof(handlep
->ha_fsid
)
225 - sizeof(handlep
->ha_fid
.xfs_fid_len
))
226 || handlep
->ha_fid
.xfs_fid_pad
)
227 return XFS_ERROR(EINVAL
);
231 * Crack the handle, obtain the inode # & generation #
233 xfid
= (struct xfs_fid
*)&handlep
->ha_fid
;
234 if (xfid
->xfs_fid_len
== sizeof(*xfid
) - sizeof(xfid
->xfs_fid_len
)) {
235 ino
= xfid
->xfs_fid_ino
;
236 igen
= xfid
->xfs_fid_gen
;
238 return XFS_ERROR(EINVAL
);
242 * Get the XFS inode, building a vnode to go with it.
244 error
= xfs_iget(mp
, NULL
, ino
, 0, XFS_ILOCK_SHARED
, &ip
, 0);
248 return XFS_ERROR(EIO
);
249 if (ip
->i_d
.di_mode
== 0 || ip
->i_d
.di_gen
!= igen
) {
250 xfs_iput_new(ip
, XFS_ILOCK_SHARED
);
251 return XFS_ERROR(ENOENT
);
255 inodep
= vn_to_inode(vpp
);
256 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
267 struct file
*parfilp
,
268 struct inode
*parinode
)
275 struct dentry
*dentry
;
277 xfs_fsop_handlereq_t hreq
;
279 if (!capable(CAP_SYS_ADMIN
))
280 return -XFS_ERROR(EPERM
);
281 if (copy_from_user(&hreq
, arg
, sizeof(xfs_fsop_handlereq_t
)))
282 return -XFS_ERROR(EFAULT
);
284 error
= xfs_vget_fsop_handlereq(mp
, parinode
, &hreq
, &vp
, &inode
);
288 /* Restrict xfs_open_by_handle to directories & regular files. */
289 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
))) {
291 return -XFS_ERROR(EINVAL
);
294 #if BITS_PER_LONG != 32
295 hreq
.oflags
|= O_LARGEFILE
;
297 /* Put open permission in namei format. */
298 permflag
= hreq
.oflags
;
299 if ((permflag
+1) & O_ACCMODE
)
301 if (permflag
& O_TRUNC
)
304 if ((!(permflag
& O_APPEND
) || (permflag
& O_TRUNC
)) &&
305 (permflag
& FMODE_WRITE
) && IS_APPEND(inode
)) {
307 return -XFS_ERROR(EPERM
);
310 if ((permflag
& FMODE_WRITE
) && IS_IMMUTABLE(inode
)) {
312 return -XFS_ERROR(EACCES
);
315 /* Can't write directories. */
316 if ( S_ISDIR(inode
->i_mode
) && (permflag
& FMODE_WRITE
)) {
318 return -XFS_ERROR(EISDIR
);
321 if ((new_fd
= get_unused_fd()) < 0) {
326 dentry
= d_alloc_anon(inode
);
327 if (dentry
== NULL
) {
329 put_unused_fd(new_fd
);
330 return -XFS_ERROR(ENOMEM
);
333 /* Ensure umount returns EBUSY on umounts while this file is open. */
334 mntget(parfilp
->f_path
.mnt
);
336 /* Create file pointer. */
337 filp
= dentry_open(dentry
, parfilp
->f_path
.mnt
, hreq
.oflags
);
339 put_unused_fd(new_fd
);
340 return -XFS_ERROR(-PTR_ERR(filp
));
342 if (inode
->i_mode
& S_IFREG
) {
343 /* invisible operation should not change atime */
344 filp
->f_flags
|= O_NOATIME
;
345 filp
->f_op
= &xfs_invis_file_operations
;
348 fd_install(new_fd
, filp
);
353 xfs_readlink_by_handle(
356 struct inode
*parinode
)
362 xfs_fsop_handlereq_t hreq
;
366 if (!capable(CAP_SYS_ADMIN
))
367 return -XFS_ERROR(EPERM
);
368 if (copy_from_user(&hreq
, arg
, sizeof(xfs_fsop_handlereq_t
)))
369 return -XFS_ERROR(EFAULT
);
371 error
= xfs_vget_fsop_handlereq(mp
, parinode
, &hreq
, &vp
, &inode
);
375 /* Restrict this handle operation to symlinks only. */
376 if (!S_ISLNK(inode
->i_mode
)) {
378 return -XFS_ERROR(EINVAL
);
381 if (copy_from_user(&olen
, hreq
.ohandlen
, sizeof(__u32
))) {
383 return -XFS_ERROR(EFAULT
);
386 aiov
.iov_base
= hreq
.ohandle
;
388 auio
.uio_iov
= (struct kvec
*)&aiov
;
391 auio
.uio_segflg
= UIO_USERSPACE
;
392 auio
.uio_resid
= olen
;
394 error
= bhv_vop_readlink(vp
, &auio
, IO_INVIS
, NULL
);
399 return (olen
- auio
.uio_resid
);
403 xfs_fssetdm_by_handle(
406 struct inode
*parinode
)
409 struct fsdmidata fsd
;
410 xfs_fsop_setdm_handlereq_t dmhreq
;
415 if (!capable(CAP_MKNOD
))
416 return -XFS_ERROR(EPERM
);
417 if (copy_from_user(&dmhreq
, arg
, sizeof(xfs_fsop_setdm_handlereq_t
)))
418 return -XFS_ERROR(EFAULT
);
420 error
= xfs_vget_fsop_handlereq(mp
, parinode
, &dmhreq
.hreq
, &vp
, &inode
);
424 if (IS_IMMUTABLE(inode
) || IS_APPEND(inode
)) {
426 return -XFS_ERROR(EPERM
);
429 if (copy_from_user(&fsd
, dmhreq
.data
, sizeof(fsd
))) {
431 return -XFS_ERROR(EFAULT
);
434 bdp
= bhv_base_unlocked(VN_BHV_HEAD(vp
));
435 error
= xfs_set_dmattrs(bdp
, fsd
.fsd_dmevmask
, fsd
.fsd_dmstate
, NULL
);
444 xfs_attrlist_by_handle(
447 struct inode
*parinode
)
450 attrlist_cursor_kern_t
*cursor
;
451 xfs_fsop_attrlist_handlereq_t al_hreq
;
456 if (!capable(CAP_SYS_ADMIN
))
457 return -XFS_ERROR(EPERM
);
458 if (copy_from_user(&al_hreq
, arg
, sizeof(xfs_fsop_attrlist_handlereq_t
)))
459 return -XFS_ERROR(EFAULT
);
460 if (al_hreq
.buflen
> XATTR_LIST_MAX
)
461 return -XFS_ERROR(EINVAL
);
463 error
= xfs_vget_fsop_handlereq(mp
, parinode
, &al_hreq
.hreq
,
468 kbuf
= kmalloc(al_hreq
.buflen
, GFP_KERNEL
);
472 cursor
= (attrlist_cursor_kern_t
*)&al_hreq
.pos
;
473 error
= bhv_vop_attr_list(vp
, kbuf
, al_hreq
.buflen
, al_hreq
.flags
,
478 if (copy_to_user(al_hreq
.buffer
, kbuf
, al_hreq
.buflen
))
490 xfs_attrmulti_attr_get(
500 if (*len
> XATTR_SIZE_MAX
)
502 kbuf
= kmalloc(*len
, GFP_KERNEL
);
506 error
= bhv_vop_attr_get(vp
, name
, kbuf
, len
, flags
, NULL
);
510 if (copy_to_user(ubuf
, kbuf
, *len
))
519 xfs_attrmulti_attr_set(
522 const char __user
*ubuf
,
529 if (IS_RDONLY(&vp
->v_inode
))
531 if (IS_IMMUTABLE(&vp
->v_inode
) || IS_APPEND(&vp
->v_inode
))
533 if (len
> XATTR_SIZE_MAX
)
536 kbuf
= kmalloc(len
, GFP_KERNEL
);
540 if (copy_from_user(kbuf
, ubuf
, len
))
543 error
= bhv_vop_attr_set(vp
, name
, kbuf
, len
, flags
, NULL
);
551 xfs_attrmulti_attr_remove(
556 if (IS_RDONLY(&vp
->v_inode
))
558 if (IS_IMMUTABLE(&vp
->v_inode
) || IS_APPEND(&vp
->v_inode
))
560 return bhv_vop_attr_remove(vp
, name
, flags
, NULL
);
564 xfs_attrmulti_by_handle(
567 struct inode
*parinode
)
570 xfs_attr_multiop_t
*ops
;
571 xfs_fsop_attrmulti_handlereq_t am_hreq
;
574 unsigned int i
, size
;
577 if (!capable(CAP_SYS_ADMIN
))
578 return -XFS_ERROR(EPERM
);
579 if (copy_from_user(&am_hreq
, arg
, sizeof(xfs_fsop_attrmulti_handlereq_t
)))
580 return -XFS_ERROR(EFAULT
);
582 error
= xfs_vget_fsop_handlereq(mp
, parinode
, &am_hreq
.hreq
, &vp
, &inode
);
587 size
= am_hreq
.opcount
* sizeof(attr_multiop_t
);
588 if (!size
|| size
> 16 * PAGE_SIZE
)
592 ops
= kmalloc(size
, GFP_KERNEL
);
597 if (copy_from_user(ops
, am_hreq
.ops
, size
))
600 attr_name
= kmalloc(MAXNAMELEN
, GFP_KERNEL
);
606 for (i
= 0; i
< am_hreq
.opcount
; i
++) {
607 ops
[i
].am_error
= strncpy_from_user(attr_name
,
608 ops
[i
].am_attrname
, MAXNAMELEN
);
609 if (ops
[i
].am_error
== 0 || ops
[i
].am_error
== MAXNAMELEN
)
611 if (ops
[i
].am_error
< 0)
614 switch (ops
[i
].am_opcode
) {
616 ops
[i
].am_error
= xfs_attrmulti_attr_get(vp
,
617 attr_name
, ops
[i
].am_attrvalue
,
618 &ops
[i
].am_length
, ops
[i
].am_flags
);
621 ops
[i
].am_error
= xfs_attrmulti_attr_set(vp
,
622 attr_name
, ops
[i
].am_attrvalue
,
623 ops
[i
].am_length
, ops
[i
].am_flags
);
626 ops
[i
].am_error
= xfs_attrmulti_attr_remove(vp
,
627 attr_name
, ops
[i
].am_flags
);
630 ops
[i
].am_error
= EINVAL
;
634 if (copy_to_user(am_hreq
.ops
, ops
, size
))
635 error
= XFS_ERROR(EFAULT
);
646 /* prototypes for a few of the stack-hungry cases that have
647 * their own functions. Functions are defined after their use
648 * so gcc doesn't get fancy and inline them with -03 */
666 xfs_ioc_fsgeometry_v1(
709 vp
= vn_from_inode(inode
);
711 vn_trace_entry(vp
, "xfs_ioctl", (inst_t
*)__return_address
);
713 ip
= XFS_BHVTOI(bdp
);
718 case XFS_IOC_ALLOCSP
:
721 case XFS_IOC_UNRESVSP
:
722 case XFS_IOC_ALLOCSP64
:
723 case XFS_IOC_FREESP64
:
724 case XFS_IOC_RESVSP64
:
725 case XFS_IOC_UNRESVSP64
:
727 * Only allow the sys admin to reserve space unless
728 * unwritten extents are enabled.
730 if (!XFS_SB_VERSION_HASEXTFLGBIT(&mp
->m_sb
) &&
731 !capable(CAP_SYS_ADMIN
))
734 return xfs_ioc_space(bdp
, inode
, filp
, ioflags
, cmd
, arg
);
736 case XFS_IOC_DIOINFO
: {
738 xfs_buftarg_t
*target
=
739 (ip
->i_d
.di_flags
& XFS_DIFLAG_REALTIME
) ?
740 mp
->m_rtdev_targp
: mp
->m_ddev_targp
;
742 da
.d_mem
= da
.d_miniosz
= 1 << target
->bt_sshift
;
743 da
.d_maxiosz
= INT_MAX
& ~(da
.d_miniosz
- 1);
745 if (copy_to_user(arg
, &da
, sizeof(da
)))
746 return -XFS_ERROR(EFAULT
);
750 case XFS_IOC_FSBULKSTAT_SINGLE
:
751 case XFS_IOC_FSBULKSTAT
:
752 case XFS_IOC_FSINUMBERS
:
753 return xfs_ioc_bulkstat(mp
, cmd
, arg
);
755 case XFS_IOC_FSGEOMETRY_V1
:
756 return xfs_ioc_fsgeometry_v1(mp
, arg
);
758 case XFS_IOC_FSGEOMETRY
:
759 return xfs_ioc_fsgeometry(mp
, arg
);
761 case XFS_IOC_GETVERSION
:
762 return put_user(inode
->i_generation
, (int __user
*)arg
);
764 case XFS_IOC_GETXFLAGS
:
765 case XFS_IOC_SETXFLAGS
:
766 case XFS_IOC_FSGETXATTR
:
767 case XFS_IOC_FSSETXATTR
:
768 case XFS_IOC_FSGETXATTRA
:
769 return xfs_ioc_xattr(vp
, ip
, filp
, cmd
, arg
);
771 case XFS_IOC_FSSETDM
: {
772 struct fsdmidata dmi
;
774 if (copy_from_user(&dmi
, arg
, sizeof(dmi
)))
775 return -XFS_ERROR(EFAULT
);
777 error
= xfs_set_dmattrs(bdp
, dmi
.fsd_dmevmask
, dmi
.fsd_dmstate
,
782 case XFS_IOC_GETBMAP
:
783 case XFS_IOC_GETBMAPA
:
784 return xfs_ioc_getbmap(bdp
, ioflags
, cmd
, arg
);
786 case XFS_IOC_GETBMAPX
:
787 return xfs_ioc_getbmapx(bdp
, arg
);
789 case XFS_IOC_FD_TO_HANDLE
:
790 case XFS_IOC_PATH_TO_HANDLE
:
791 case XFS_IOC_PATH_TO_FSHANDLE
:
792 return xfs_find_handle(cmd
, arg
);
794 case XFS_IOC_OPEN_BY_HANDLE
:
795 return xfs_open_by_handle(mp
, arg
, filp
, inode
);
797 case XFS_IOC_FSSETDM_BY_HANDLE
:
798 return xfs_fssetdm_by_handle(mp
, arg
, inode
);
800 case XFS_IOC_READLINK_BY_HANDLE
:
801 return xfs_readlink_by_handle(mp
, arg
, inode
);
803 case XFS_IOC_ATTRLIST_BY_HANDLE
:
804 return xfs_attrlist_by_handle(mp
, arg
, inode
);
806 case XFS_IOC_ATTRMULTI_BY_HANDLE
:
807 return xfs_attrmulti_by_handle(mp
, arg
, inode
);
809 case XFS_IOC_SWAPEXT
: {
810 error
= xfs_swapext((struct xfs_swapext __user
*)arg
);
814 case XFS_IOC_FSCOUNTS
: {
815 xfs_fsop_counts_t out
;
817 error
= xfs_fs_counts(mp
, &out
);
821 if (copy_to_user(arg
, &out
, sizeof(out
)))
822 return -XFS_ERROR(EFAULT
);
826 case XFS_IOC_SET_RESBLKS
: {
827 xfs_fsop_resblks_t inout
;
830 if (!capable(CAP_SYS_ADMIN
))
833 if (copy_from_user(&inout
, arg
, sizeof(inout
)))
834 return -XFS_ERROR(EFAULT
);
836 /* input parameter is passed in resblks field of structure */
838 error
= xfs_reserve_blocks(mp
, &in
, &inout
);
842 if (copy_to_user(arg
, &inout
, sizeof(inout
)))
843 return -XFS_ERROR(EFAULT
);
847 case XFS_IOC_GET_RESBLKS
: {
848 xfs_fsop_resblks_t out
;
850 if (!capable(CAP_SYS_ADMIN
))
853 error
= xfs_reserve_blocks(mp
, NULL
, &out
);
857 if (copy_to_user(arg
, &out
, sizeof(out
)))
858 return -XFS_ERROR(EFAULT
);
863 case XFS_IOC_FSGROWFSDATA
: {
864 xfs_growfs_data_t in
;
866 if (!capable(CAP_SYS_ADMIN
))
869 if (copy_from_user(&in
, arg
, sizeof(in
)))
870 return -XFS_ERROR(EFAULT
);
872 error
= xfs_growfs_data(mp
, &in
);
876 case XFS_IOC_FSGROWFSLOG
: {
879 if (!capable(CAP_SYS_ADMIN
))
882 if (copy_from_user(&in
, arg
, sizeof(in
)))
883 return -XFS_ERROR(EFAULT
);
885 error
= xfs_growfs_log(mp
, &in
);
889 case XFS_IOC_FSGROWFSRT
: {
892 if (!capable(CAP_SYS_ADMIN
))
895 if (copy_from_user(&in
, arg
, sizeof(in
)))
896 return -XFS_ERROR(EFAULT
);
898 error
= xfs_growfs_rt(mp
, &in
);
903 if (!capable(CAP_SYS_ADMIN
))
906 if (inode
->i_sb
->s_frozen
== SB_UNFROZEN
)
907 freeze_bdev(inode
->i_sb
->s_bdev
);
911 if (!capable(CAP_SYS_ADMIN
))
913 if (inode
->i_sb
->s_frozen
!= SB_UNFROZEN
)
914 thaw_bdev(inode
->i_sb
->s_bdev
, inode
->i_sb
);
917 case XFS_IOC_GOINGDOWN
: {
920 if (!capable(CAP_SYS_ADMIN
))
923 if (get_user(in
, (__uint32_t __user
*)arg
))
924 return -XFS_ERROR(EFAULT
);
926 error
= xfs_fs_goingdown(mp
, in
);
930 case XFS_IOC_ERROR_INJECTION
: {
931 xfs_error_injection_t in
;
933 if (!capable(CAP_SYS_ADMIN
))
936 if (copy_from_user(&in
, arg
, sizeof(in
)))
937 return -XFS_ERROR(EFAULT
);
939 error
= xfs_errortag_add(in
.errtag
, mp
);
943 case XFS_IOC_ERROR_CLEARALL
:
944 if (!capable(CAP_SYS_ADMIN
))
947 error
= xfs_errortag_clearall(mp
);
968 if (inode
->i_flags
& (S_IMMUTABLE
|S_APPEND
))
969 return -XFS_ERROR(EPERM
);
971 if (!(filp
->f_mode
& FMODE_WRITE
))
972 return -XFS_ERROR(EBADF
);
974 if (!S_ISREG(inode
->i_mode
))
975 return -XFS_ERROR(EINVAL
);
977 if (copy_from_user(&bf
, arg
, sizeof(bf
)))
978 return -XFS_ERROR(EFAULT
);
980 if (filp
->f_flags
& (O_NDELAY
|O_NONBLOCK
))
981 attr_flags
|= ATTR_NONBLOCK
;
982 if (ioflags
& IO_INVIS
)
983 attr_flags
|= ATTR_DMI
;
985 error
= xfs_change_file_space(bdp
, cmd
, &bf
, filp
->f_pos
,
996 xfs_fsop_bulkreq_t bulkreq
;
997 int count
; /* # of records returned */
998 xfs_ino_t inlast
; /* last inode number */
1002 /* done = 1 if there are more stats to get and if bulkstat */
1003 /* should be called again (unused here, but used in dmapi) */
1005 if (!capable(CAP_SYS_ADMIN
))
1008 if (XFS_FORCED_SHUTDOWN(mp
))
1009 return -XFS_ERROR(EIO
);
1011 if (copy_from_user(&bulkreq
, arg
, sizeof(xfs_fsop_bulkreq_t
)))
1012 return -XFS_ERROR(EFAULT
);
1014 if (copy_from_user(&inlast
, bulkreq
.lastip
, sizeof(__s64
)))
1015 return -XFS_ERROR(EFAULT
);
1017 if ((count
= bulkreq
.icount
) <= 0)
1018 return -XFS_ERROR(EINVAL
);
1020 if (cmd
== XFS_IOC_FSINUMBERS
)
1021 error
= xfs_inumbers(mp
, &inlast
, &count
,
1022 bulkreq
.ubuffer
, xfs_inumbers_fmt
);
1023 else if (cmd
== XFS_IOC_FSBULKSTAT_SINGLE
)
1024 error
= xfs_bulkstat_single(mp
, &inlast
,
1025 bulkreq
.ubuffer
, &done
);
1026 else { /* XFS_IOC_FSBULKSTAT */
1027 if (count
== 1 && inlast
!= 0) {
1029 error
= xfs_bulkstat_single(mp
, &inlast
,
1030 bulkreq
.ubuffer
, &done
);
1032 error
= xfs_bulkstat(mp
, &inlast
, &count
,
1033 (bulkstat_one_pf
)xfs_bulkstat_one
, NULL
,
1034 sizeof(xfs_bstat_t
), bulkreq
.ubuffer
,
1035 BULKSTAT_FG_QUICK
, &done
);
1042 if (bulkreq
.ocount
!= NULL
) {
1043 if (copy_to_user(bulkreq
.lastip
, &inlast
,
1045 return -XFS_ERROR(EFAULT
);
1047 if (copy_to_user(bulkreq
.ocount
, &count
, sizeof(count
)))
1048 return -XFS_ERROR(EFAULT
);
1055 xfs_ioc_fsgeometry_v1(
1059 xfs_fsop_geom_v1_t fsgeo
;
1062 error
= xfs_fs_geometry(mp
, (xfs_fsop_geom_t
*)&fsgeo
, 3);
1066 if (copy_to_user(arg
, &fsgeo
, sizeof(fsgeo
)))
1067 return -XFS_ERROR(EFAULT
);
1076 xfs_fsop_geom_t fsgeo
;
1079 error
= xfs_fs_geometry(mp
, &fsgeo
, 4);
1083 if (copy_to_user(arg
, &fsgeo
, sizeof(fsgeo
)))
1084 return -XFS_ERROR(EFAULT
);
1089 * Linux extended inode flags interface.
1093 xfs_merge_ioc_xflags(
1097 unsigned int xflags
= start
;
1099 if (flags
& FS_IMMUTABLE_FL
)
1100 xflags
|= XFS_XFLAG_IMMUTABLE
;
1102 xflags
&= ~XFS_XFLAG_IMMUTABLE
;
1103 if (flags
& FS_APPEND_FL
)
1104 xflags
|= XFS_XFLAG_APPEND
;
1106 xflags
&= ~XFS_XFLAG_APPEND
;
1107 if (flags
& FS_SYNC_FL
)
1108 xflags
|= XFS_XFLAG_SYNC
;
1110 xflags
&= ~XFS_XFLAG_SYNC
;
1111 if (flags
& FS_NOATIME_FL
)
1112 xflags
|= XFS_XFLAG_NOATIME
;
1114 xflags
&= ~XFS_XFLAG_NOATIME
;
1115 if (flags
& FS_NODUMP_FL
)
1116 xflags
|= XFS_XFLAG_NODUMP
;
1118 xflags
&= ~XFS_XFLAG_NODUMP
;
1125 __uint16_t di_flags
)
1127 unsigned int flags
= 0;
1129 if (di_flags
& XFS_DIFLAG_IMMUTABLE
)
1130 flags
|= FS_IMMUTABLE_FL
;
1131 if (di_flags
& XFS_DIFLAG_APPEND
)
1132 flags
|= FS_APPEND_FL
;
1133 if (di_flags
& XFS_DIFLAG_SYNC
)
1134 flags
|= FS_SYNC_FL
;
1135 if (di_flags
& XFS_DIFLAG_NOATIME
)
1136 flags
|= FS_NOATIME_FL
;
1137 if (di_flags
& XFS_DIFLAG_NODUMP
)
1138 flags
|= FS_NODUMP_FL
;
1151 struct bhv_vattr
*vattr
;
1156 vattr
= kmalloc(sizeof(*vattr
), GFP_KERNEL
);
1157 if (unlikely(!vattr
))
1161 case XFS_IOC_FSGETXATTR
: {
1162 vattr
->va_mask
= XFS_AT_XFLAGS
| XFS_AT_EXTSIZE
| \
1163 XFS_AT_NEXTENTS
| XFS_AT_PROJID
;
1164 error
= bhv_vop_getattr(vp
, vattr
, 0, NULL
);
1165 if (unlikely(error
)) {
1170 fa
.fsx_xflags
= vattr
->va_xflags
;
1171 fa
.fsx_extsize
= vattr
->va_extsize
;
1172 fa
.fsx_nextents
= vattr
->va_nextents
;
1173 fa
.fsx_projid
= vattr
->va_projid
;
1175 if (copy_to_user(arg
, &fa
, sizeof(fa
))) {
1182 case XFS_IOC_FSSETXATTR
: {
1183 if (copy_from_user(&fa
, arg
, sizeof(fa
))) {
1189 if (filp
->f_flags
& (O_NDELAY
|O_NONBLOCK
))
1190 attr_flags
|= ATTR_NONBLOCK
;
1192 vattr
->va_mask
= XFS_AT_XFLAGS
| XFS_AT_EXTSIZE
| XFS_AT_PROJID
;
1193 vattr
->va_xflags
= fa
.fsx_xflags
;
1194 vattr
->va_extsize
= fa
.fsx_extsize
;
1195 vattr
->va_projid
= fa
.fsx_projid
;
1197 error
= bhv_vop_setattr(vp
, vattr
, attr_flags
, NULL
);
1199 __vn_revalidate(vp
, vattr
); /* update flags */
1204 case XFS_IOC_FSGETXATTRA
: {
1205 vattr
->va_mask
= XFS_AT_XFLAGS
| XFS_AT_EXTSIZE
| \
1206 XFS_AT_ANEXTENTS
| XFS_AT_PROJID
;
1207 error
= bhv_vop_getattr(vp
, vattr
, 0, NULL
);
1208 if (unlikely(error
)) {
1213 fa
.fsx_xflags
= vattr
->va_xflags
;
1214 fa
.fsx_extsize
= vattr
->va_extsize
;
1215 fa
.fsx_nextents
= vattr
->va_anextents
;
1216 fa
.fsx_projid
= vattr
->va_projid
;
1218 if (copy_to_user(arg
, &fa
, sizeof(fa
))) {
1225 case XFS_IOC_GETXFLAGS
: {
1226 flags
= xfs_di2lxflags(ip
->i_d
.di_flags
);
1227 if (copy_to_user(arg
, &flags
, sizeof(flags
)))
1232 case XFS_IOC_SETXFLAGS
: {
1233 if (copy_from_user(&flags
, arg
, sizeof(flags
))) {
1238 if (flags
& ~(FS_IMMUTABLE_FL
| FS_APPEND_FL
| \
1239 FS_NOATIME_FL
| FS_NODUMP_FL
| \
1241 error
= -EOPNOTSUPP
;
1246 if (filp
->f_flags
& (O_NDELAY
|O_NONBLOCK
))
1247 attr_flags
|= ATTR_NONBLOCK
;
1249 vattr
->va_mask
= XFS_AT_XFLAGS
;
1250 vattr
->va_xflags
= xfs_merge_ioc_xflags(flags
,
1253 error
= bhv_vop_setattr(vp
, vattr
, attr_flags
, NULL
);
1255 __vn_revalidate(vp
, vattr
); /* update flags */
1280 if (copy_from_user(&bm
, arg
, sizeof(bm
)))
1281 return -XFS_ERROR(EFAULT
);
1283 if (bm
.bmv_count
< 2)
1284 return -XFS_ERROR(EINVAL
);
1286 iflags
= (cmd
== XFS_IOC_GETBMAPA
? BMV_IF_ATTRFORK
: 0);
1287 if (ioflags
& IO_INVIS
)
1288 iflags
|= BMV_IF_NO_DMAPI_READ
;
1290 error
= xfs_getbmap(bdp
, &bm
, (struct getbmap __user
*)arg
+1, iflags
);
1294 if (copy_to_user(arg
, &bm
, sizeof(bm
)))
1295 return -XFS_ERROR(EFAULT
);
1304 struct getbmapx bmx
;
1309 if (copy_from_user(&bmx
, arg
, sizeof(bmx
)))
1310 return -XFS_ERROR(EFAULT
);
1312 if (bmx
.bmv_count
< 2)
1313 return -XFS_ERROR(EINVAL
);
1316 * Map input getbmapx structure to a getbmap
1317 * structure for xfs_getbmap.
1319 GETBMAP_CONVERT(bmx
, bm
);
1321 iflags
= bmx
.bmv_iflags
;
1323 if (iflags
& (~BMV_IF_VALID
))
1324 return -XFS_ERROR(EINVAL
);
1326 iflags
|= BMV_IF_EXTENDED
;
1328 error
= xfs_getbmap(bdp
, &bm
, (struct getbmapx __user
*)arg
+1, iflags
);
1332 GETBMAP_CONVERT(bm
, bmx
);
1334 if (copy_to_user(arg
, &bmx
, sizeof(bmx
)))
1335 return -XFS_ERROR(EFAULT
);