2 * linux/fs/ocfs2/ioctl.c
4 * Copyright (C) 2006 Herbert Poetzl
5 * adapted from Remy Card's ext2/ioctl.c
9 #include <linux/mount.h>
10 #include <linux/compat.h>
12 #include <cluster/masklog.h>
24 #include "refcounttree.h"
27 #include "buffer_head_io.h"
29 #include "move_extents.h"
31 #define o2info_from_user(a, b) \
32 copy_from_user(&(a), (b), sizeof(a))
33 #define o2info_to_user(a, b) \
34 copy_to_user((typeof(a) __user *)b, &(a), sizeof(a))
37 * This call is void because we are already reporting an error that may
38 * be -EFAULT. The error will be returned from the ioctl(2) call. It's
39 * just a best-effort to tell userspace that this request caused the error.
41 static inline void o2info_set_request_error(struct ocfs2_info_request
*kreq
,
42 struct ocfs2_info_request __user
*req
)
44 kreq
->ir_flags
|= OCFS2_INFO_FL_ERROR
;
45 (void)put_user(kreq
->ir_flags
, (__u32 __user
*)&(req
->ir_flags
));
48 static inline void o2info_set_request_filled(struct ocfs2_info_request
*req
)
50 req
->ir_flags
|= OCFS2_INFO_FL_FILLED
;
53 static inline void o2info_clear_request_filled(struct ocfs2_info_request
*req
)
55 req
->ir_flags
&= ~OCFS2_INFO_FL_FILLED
;
58 static inline int o2info_coherent(struct ocfs2_info_request
*req
)
60 return (!(req
->ir_flags
& OCFS2_INFO_FL_NON_COHERENT
));
63 static int ocfs2_get_inode_attr(struct inode
*inode
, unsigned *flags
)
67 status
= ocfs2_inode_lock(inode
, NULL
, 0);
72 ocfs2_get_inode_flags(OCFS2_I(inode
));
73 *flags
= OCFS2_I(inode
)->ip_attr
;
74 ocfs2_inode_unlock(inode
, 0);
79 static int ocfs2_set_inode_attr(struct inode
*inode
, unsigned flags
,
82 struct ocfs2_inode_info
*ocfs2_inode
= OCFS2_I(inode
);
83 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
84 handle_t
*handle
= NULL
;
85 struct buffer_head
*bh
= NULL
;
89 mutex_lock(&inode
->i_mutex
);
91 status
= ocfs2_inode_lock(inode
, &bh
, 1);
98 if (!inode_owner_or_capable(inode
))
101 if (!S_ISDIR(inode
->i_mode
))
102 flags
&= ~OCFS2_DIRSYNC_FL
;
104 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
105 if (IS_ERR(handle
)) {
106 status
= PTR_ERR(handle
);
111 oldflags
= ocfs2_inode
->ip_attr
;
112 flags
= flags
& mask
;
113 flags
|= oldflags
& ~mask
;
116 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
117 * the relevant capability.
120 if ((oldflags
& OCFS2_IMMUTABLE_FL
) || ((flags
^ oldflags
) &
121 (OCFS2_APPEND_FL
| OCFS2_IMMUTABLE_FL
))) {
122 if (!capable(CAP_LINUX_IMMUTABLE
))
126 ocfs2_inode
->ip_attr
= flags
;
127 ocfs2_set_inode_flags(inode
);
129 status
= ocfs2_mark_inode_dirty(handle
, inode
, bh
);
134 ocfs2_commit_trans(osb
, handle
);
136 ocfs2_inode_unlock(inode
, 1);
138 mutex_unlock(&inode
->i_mutex
);
145 int ocfs2_info_handle_blocksize(struct inode
*inode
,
146 struct ocfs2_info_request __user
*req
)
148 int status
= -EFAULT
;
149 struct ocfs2_info_blocksize oib
;
151 if (o2info_from_user(oib
, req
))
154 oib
.ib_blocksize
= inode
->i_sb
->s_blocksize
;
156 o2info_set_request_filled(&oib
.ib_req
);
158 if (o2info_to_user(oib
, req
))
164 o2info_set_request_error(&oib
.ib_req
, req
);
169 int ocfs2_info_handle_clustersize(struct inode
*inode
,
170 struct ocfs2_info_request __user
*req
)
172 int status
= -EFAULT
;
173 struct ocfs2_info_clustersize oic
;
174 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
176 if (o2info_from_user(oic
, req
))
179 oic
.ic_clustersize
= osb
->s_clustersize
;
181 o2info_set_request_filled(&oic
.ic_req
);
183 if (o2info_to_user(oic
, req
))
189 o2info_set_request_error(&oic
.ic_req
, req
);
194 int ocfs2_info_handle_maxslots(struct inode
*inode
,
195 struct ocfs2_info_request __user
*req
)
197 int status
= -EFAULT
;
198 struct ocfs2_info_maxslots oim
;
199 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
201 if (o2info_from_user(oim
, req
))
204 oim
.im_max_slots
= osb
->max_slots
;
206 o2info_set_request_filled(&oim
.im_req
);
208 if (o2info_to_user(oim
, req
))
214 o2info_set_request_error(&oim
.im_req
, req
);
219 int ocfs2_info_handle_label(struct inode
*inode
,
220 struct ocfs2_info_request __user
*req
)
222 int status
= -EFAULT
;
223 struct ocfs2_info_label oil
;
224 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
226 if (o2info_from_user(oil
, req
))
229 memcpy(oil
.il_label
, osb
->vol_label
, OCFS2_MAX_VOL_LABEL_LEN
);
231 o2info_set_request_filled(&oil
.il_req
);
233 if (o2info_to_user(oil
, req
))
239 o2info_set_request_error(&oil
.il_req
, req
);
244 int ocfs2_info_handle_uuid(struct inode
*inode
,
245 struct ocfs2_info_request __user
*req
)
247 int status
= -EFAULT
;
248 struct ocfs2_info_uuid oiu
;
249 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
251 if (o2info_from_user(oiu
, req
))
254 memcpy(oiu
.iu_uuid_str
, osb
->uuid_str
, OCFS2_TEXT_UUID_LEN
+ 1);
256 o2info_set_request_filled(&oiu
.iu_req
);
258 if (o2info_to_user(oiu
, req
))
264 o2info_set_request_error(&oiu
.iu_req
, req
);
269 int ocfs2_info_handle_fs_features(struct inode
*inode
,
270 struct ocfs2_info_request __user
*req
)
272 int status
= -EFAULT
;
273 struct ocfs2_info_fs_features oif
;
274 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
276 if (o2info_from_user(oif
, req
))
279 oif
.if_compat_features
= osb
->s_feature_compat
;
280 oif
.if_incompat_features
= osb
->s_feature_incompat
;
281 oif
.if_ro_compat_features
= osb
->s_feature_ro_compat
;
283 o2info_set_request_filled(&oif
.if_req
);
285 if (o2info_to_user(oif
, req
))
291 o2info_set_request_error(&oif
.if_req
, req
);
296 int ocfs2_info_handle_journal_size(struct inode
*inode
,
297 struct ocfs2_info_request __user
*req
)
299 int status
= -EFAULT
;
300 struct ocfs2_info_journal_size oij
;
301 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
303 if (o2info_from_user(oij
, req
))
306 oij
.ij_journal_size
= osb
->journal
->j_inode
->i_size
;
308 o2info_set_request_filled(&oij
.ij_req
);
310 if (o2info_to_user(oij
, req
))
316 o2info_set_request_error(&oij
.ij_req
, req
);
321 int ocfs2_info_scan_inode_alloc(struct ocfs2_super
*osb
,
322 struct inode
*inode_alloc
, u64 blkno
,
323 struct ocfs2_info_freeinode
*fi
, u32 slot
)
325 int status
= 0, unlock
= 0;
327 struct buffer_head
*bh
= NULL
;
328 struct ocfs2_dinode
*dinode_alloc
= NULL
;
331 mutex_lock(&inode_alloc
->i_mutex
);
333 if (o2info_coherent(&fi
->ifi_req
)) {
334 status
= ocfs2_inode_lock(inode_alloc
, &bh
, 0);
341 status
= ocfs2_read_blocks_sync(osb
, blkno
, 1, &bh
);
348 dinode_alloc
= (struct ocfs2_dinode
*)bh
->b_data
;
350 fi
->ifi_stat
[slot
].lfi_total
=
351 le32_to_cpu(dinode_alloc
->id1
.bitmap1
.i_total
);
352 fi
->ifi_stat
[slot
].lfi_free
=
353 le32_to_cpu(dinode_alloc
->id1
.bitmap1
.i_total
) -
354 le32_to_cpu(dinode_alloc
->id1
.bitmap1
.i_used
);
358 ocfs2_inode_unlock(inode_alloc
, 0);
361 mutex_unlock(&inode_alloc
->i_mutex
);
368 int ocfs2_info_handle_freeinode(struct inode
*inode
,
369 struct ocfs2_info_request __user
*req
)
374 int status
= -EFAULT
, type
= INODE_ALLOC_SYSTEM_INODE
;
375 struct ocfs2_info_freeinode
*oifi
= NULL
;
376 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
377 struct inode
*inode_alloc
= NULL
;
379 oifi
= kzalloc(sizeof(struct ocfs2_info_freeinode
), GFP_KERNEL
);
386 if (o2info_from_user(*oifi
, req
))
389 oifi
->ifi_slotnum
= osb
->max_slots
;
391 for (i
= 0; i
< oifi
->ifi_slotnum
; i
++) {
392 if (o2info_coherent(&oifi
->ifi_req
)) {
393 inode_alloc
= ocfs2_get_system_file_inode(osb
, type
, i
);
395 mlog(ML_ERROR
, "unable to get alloc inode in "
401 ocfs2_sprintf_system_inode_name(namebuf
,
404 status
= ocfs2_lookup_ino_from_name(osb
->sys_root_inode
,
414 status
= ocfs2_info_scan_inode_alloc(osb
, inode_alloc
, blkno
, oifi
, i
);
422 o2info_set_request_filled(&oifi
->ifi_req
);
424 if (o2info_to_user(*oifi
, req
))
430 o2info_set_request_error(&oifi
->ifi_req
, req
);
437 static void o2ffg_update_histogram(struct ocfs2_info_free_chunk_list
*hist
,
438 unsigned int chunksize
)
442 index
= __ilog2_u32(chunksize
);
443 if (index
>= OCFS2_INFO_MAX_HIST
)
444 index
= OCFS2_INFO_MAX_HIST
- 1;
446 hist
->fc_chunks
[index
]++;
447 hist
->fc_clusters
[index
] += chunksize
;
450 static void o2ffg_update_stats(struct ocfs2_info_freefrag_stats
*stats
,
451 unsigned int chunksize
)
453 if (chunksize
> stats
->ffs_max
)
454 stats
->ffs_max
= chunksize
;
456 if (chunksize
< stats
->ffs_min
)
457 stats
->ffs_min
= chunksize
;
459 stats
->ffs_avg
+= chunksize
;
460 stats
->ffs_free_chunks_real
++;
463 void ocfs2_info_update_ffg(struct ocfs2_info_freefrag
*ffg
,
464 unsigned int chunksize
)
466 o2ffg_update_histogram(&(ffg
->iff_ffs
.ffs_fc_hist
), chunksize
);
467 o2ffg_update_stats(&(ffg
->iff_ffs
), chunksize
);
470 int ocfs2_info_freefrag_scan_chain(struct ocfs2_super
*osb
,
471 struct inode
*gb_inode
,
472 struct ocfs2_dinode
*gb_dinode
,
473 struct ocfs2_chain_rec
*rec
,
474 struct ocfs2_info_freefrag
*ffg
,
477 int status
= 0, used
;
480 struct buffer_head
*bh
= NULL
;
481 struct ocfs2_group_desc
*bg
= NULL
;
483 unsigned int max_bits
, num_clusters
;
484 unsigned int offset
= 0, cluster
, chunk
;
485 unsigned int chunk_free
, last_chunksize
= 0;
487 if (!le32_to_cpu(rec
->c_free
))
492 blkno
= le64_to_cpu(rec
->c_blkno
);
494 blkno
= le64_to_cpu(bg
->bg_next_group
);
501 if (o2info_coherent(&ffg
->iff_req
))
502 status
= ocfs2_read_group_descriptor(gb_inode
,
506 status
= ocfs2_read_blocks_sync(osb
, blkno
, 1, &bh
);
509 mlog(ML_ERROR
, "Can't read the group descriptor # "
510 "%llu from device.", (unsigned long long)blkno
);
515 bg
= (struct ocfs2_group_desc
*)bh
->b_data
;
517 if (!le16_to_cpu(bg
->bg_free_bits_count
))
520 max_bits
= le16_to_cpu(bg
->bg_bits
);
523 for (chunk
= 0; chunk
< chunks_in_group
; chunk
++) {
525 * last chunk may be not an entire one.
527 if ((offset
+ ffg
->iff_chunksize
) > max_bits
)
528 num_clusters
= max_bits
- offset
;
530 num_clusters
= ffg
->iff_chunksize
;
533 for (cluster
= 0; cluster
< num_clusters
; cluster
++) {
534 used
= ocfs2_test_bit(offset
,
535 (unsigned long *)bg
->bg_bitmap
);
537 * - chunk_free counts free clusters in #N chunk.
538 * - last_chunksize records the size(in) clusters
539 * for the last real free chunk being counted.
546 if (used
&& last_chunksize
) {
547 ocfs2_info_update_ffg(ffg
,
555 if (chunk_free
== ffg
->iff_chunksize
)
556 ffg
->iff_ffs
.ffs_free_chunks
++;
560 * need to update the info for last free chunk.
563 ocfs2_info_update_ffg(ffg
, last_chunksize
);
565 } while (le64_to_cpu(bg
->bg_next_group
));
573 int ocfs2_info_freefrag_scan_bitmap(struct ocfs2_super
*osb
,
574 struct inode
*gb_inode
, u64 blkno
,
575 struct ocfs2_info_freefrag
*ffg
)
578 int status
= 0, unlock
= 0, i
;
580 struct buffer_head
*bh
= NULL
;
581 struct ocfs2_chain_list
*cl
= NULL
;
582 struct ocfs2_chain_rec
*rec
= NULL
;
583 struct ocfs2_dinode
*gb_dinode
= NULL
;
586 mutex_lock(&gb_inode
->i_mutex
);
588 if (o2info_coherent(&ffg
->iff_req
)) {
589 status
= ocfs2_inode_lock(gb_inode
, &bh
, 0);
596 status
= ocfs2_read_blocks_sync(osb
, blkno
, 1, &bh
);
603 gb_dinode
= (struct ocfs2_dinode
*)bh
->b_data
;
604 cl
= &(gb_dinode
->id2
.i_chain
);
607 * Chunksize(in) clusters from userspace should be
608 * less than clusters in a group.
610 if (ffg
->iff_chunksize
> le16_to_cpu(cl
->cl_cpg
)) {
615 memset(&ffg
->iff_ffs
, 0, sizeof(struct ocfs2_info_freefrag_stats
));
617 ffg
->iff_ffs
.ffs_min
= ~0U;
618 ffg
->iff_ffs
.ffs_clusters
=
619 le32_to_cpu(gb_dinode
->id1
.bitmap1
.i_total
);
620 ffg
->iff_ffs
.ffs_free_clusters
= ffg
->iff_ffs
.ffs_clusters
-
621 le32_to_cpu(gb_dinode
->id1
.bitmap1
.i_used
);
623 chunks_in_group
= le16_to_cpu(cl
->cl_cpg
) / ffg
->iff_chunksize
+ 1;
625 for (i
= 0; i
< le16_to_cpu(cl
->cl_next_free_rec
); i
++) {
626 rec
= &(cl
->cl_recs
[i
]);
627 status
= ocfs2_info_freefrag_scan_chain(osb
, gb_inode
,
635 if (ffg
->iff_ffs
.ffs_free_chunks_real
)
636 ffg
->iff_ffs
.ffs_avg
= (ffg
->iff_ffs
.ffs_avg
/
637 ffg
->iff_ffs
.ffs_free_chunks_real
);
640 ocfs2_inode_unlock(gb_inode
, 0);
643 mutex_unlock(&gb_inode
->i_mutex
);
653 int ocfs2_info_handle_freefrag(struct inode
*inode
,
654 struct ocfs2_info_request __user
*req
)
658 int status
= -EFAULT
, type
= GLOBAL_BITMAP_SYSTEM_INODE
;
660 struct ocfs2_info_freefrag
*oiff
;
661 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
662 struct inode
*gb_inode
= NULL
;
664 oiff
= kzalloc(sizeof(struct ocfs2_info_freefrag
), GFP_KERNEL
);
671 if (o2info_from_user(*oiff
, req
))
674 * chunksize from userspace should be power of 2.
676 if ((oiff
->iff_chunksize
& (oiff
->iff_chunksize
- 1)) ||
677 (!oiff
->iff_chunksize
)) {
682 if (o2info_coherent(&oiff
->iff_req
)) {
683 gb_inode
= ocfs2_get_system_file_inode(osb
, type
,
686 mlog(ML_ERROR
, "unable to get global_bitmap inode\n");
691 ocfs2_sprintf_system_inode_name(namebuf
, sizeof(namebuf
), type
,
693 status
= ocfs2_lookup_ino_from_name(osb
->sys_root_inode
,
703 status
= ocfs2_info_freefrag_scan_bitmap(osb
, gb_inode
, blkno
, oiff
);
707 o2info_set_request_filled(&oiff
->iff_req
);
709 if (o2info_to_user(*oiff
, req
))
715 o2info_set_request_error(&oiff
->iff_req
, req
);
722 int ocfs2_info_handle_unknown(struct inode
*inode
,
723 struct ocfs2_info_request __user
*req
)
725 int status
= -EFAULT
;
726 struct ocfs2_info_request oir
;
728 if (o2info_from_user(oir
, req
))
731 o2info_clear_request_filled(&oir
);
733 if (o2info_to_user(oir
, req
))
739 o2info_set_request_error(&oir
, req
);
745 * Validate and distinguish OCFS2_IOC_INFO requests.
747 * - validate the magic number.
748 * - distinguish different requests.
749 * - validate size of different requests.
751 int ocfs2_info_handle_request(struct inode
*inode
,
752 struct ocfs2_info_request __user
*req
)
754 int status
= -EFAULT
;
755 struct ocfs2_info_request oir
;
757 if (o2info_from_user(oir
, req
))
761 if (oir
.ir_magic
!= OCFS2_INFO_MAGIC
)
764 switch (oir
.ir_code
) {
765 case OCFS2_INFO_BLOCKSIZE
:
766 if (oir
.ir_size
== sizeof(struct ocfs2_info_blocksize
))
767 status
= ocfs2_info_handle_blocksize(inode
, req
);
769 case OCFS2_INFO_CLUSTERSIZE
:
770 if (oir
.ir_size
== sizeof(struct ocfs2_info_clustersize
))
771 status
= ocfs2_info_handle_clustersize(inode
, req
);
773 case OCFS2_INFO_MAXSLOTS
:
774 if (oir
.ir_size
== sizeof(struct ocfs2_info_maxslots
))
775 status
= ocfs2_info_handle_maxslots(inode
, req
);
777 case OCFS2_INFO_LABEL
:
778 if (oir
.ir_size
== sizeof(struct ocfs2_info_label
))
779 status
= ocfs2_info_handle_label(inode
, req
);
781 case OCFS2_INFO_UUID
:
782 if (oir
.ir_size
== sizeof(struct ocfs2_info_uuid
))
783 status
= ocfs2_info_handle_uuid(inode
, req
);
785 case OCFS2_INFO_FS_FEATURES
:
786 if (oir
.ir_size
== sizeof(struct ocfs2_info_fs_features
))
787 status
= ocfs2_info_handle_fs_features(inode
, req
);
789 case OCFS2_INFO_JOURNAL_SIZE
:
790 if (oir
.ir_size
== sizeof(struct ocfs2_info_journal_size
))
791 status
= ocfs2_info_handle_journal_size(inode
, req
);
793 case OCFS2_INFO_FREEINODE
:
794 if (oir
.ir_size
== sizeof(struct ocfs2_info_freeinode
))
795 status
= ocfs2_info_handle_freeinode(inode
, req
);
797 case OCFS2_INFO_FREEFRAG
:
798 if (oir
.ir_size
== sizeof(struct ocfs2_info_freefrag
))
799 status
= ocfs2_info_handle_freefrag(inode
, req
);
802 status
= ocfs2_info_handle_unknown(inode
, req
);
810 int ocfs2_get_request_ptr(struct ocfs2_info
*info
, int idx
,
811 u64
*req_addr
, int compat_flag
)
813 int status
= -EFAULT
;
814 u64 __user
*bp
= NULL
;
819 * pointer bp stores the base address of a pointers array,
820 * which collects all addresses of separate request.
822 bp
= (u64 __user
*)(unsigned long)compat_ptr(info
->oi_requests
);
827 bp
= (u64 __user
*)(unsigned long)(info
->oi_requests
);
829 if (o2info_from_user(*req_addr
, bp
+ idx
))
838 * OCFS2_IOC_INFO handles an array of requests passed from userspace.
840 * ocfs2_info_handle() recevies a large info aggregation, grab and
841 * validate the request count from header, then break it into small
842 * pieces, later specific handlers can handle them one by one.
844 * Idea here is to make each separate request small enough to ensure
845 * a better backward&forward compatibility, since a small piece of
846 * request will be less likely to be broken if disk layout get changed.
848 int ocfs2_info_handle(struct inode
*inode
, struct ocfs2_info
*info
,
853 struct ocfs2_info_request __user
*reqp
;
855 if ((info
->oi_count
> OCFS2_INFO_MAX_REQUEST
) ||
856 (!info
->oi_requests
)) {
861 for (i
= 0; i
< info
->oi_count
; i
++) {
863 status
= ocfs2_get_request_ptr(info
, i
, &req_addr
, compat_flag
);
867 reqp
= (struct ocfs2_info_request __user
*)(unsigned long)req_addr
;
873 status
= ocfs2_info_handle_request(inode
, reqp
);
882 long ocfs2_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
884 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
888 struct ocfs2_space_resv sr
;
889 struct ocfs2_new_group_input input
;
890 struct reflink_arguments args
;
891 const char __user
*old_path
;
892 const char __user
*new_path
;
894 struct ocfs2_info info
;
895 void __user
*argp
= (void __user
*)arg
;
898 case OCFS2_IOC_GETFLAGS
:
899 status
= ocfs2_get_inode_attr(inode
, &flags
);
903 flags
&= OCFS2_FL_VISIBLE
;
904 return put_user(flags
, (int __user
*) arg
);
905 case OCFS2_IOC_SETFLAGS
:
906 if (get_user(flags
, (int __user
*) arg
))
909 status
= mnt_want_write_file(filp
);
912 status
= ocfs2_set_inode_attr(inode
, flags
,
913 OCFS2_FL_MODIFIABLE
);
914 mnt_drop_write_file(filp
);
916 case OCFS2_IOC_RESVSP
:
917 case OCFS2_IOC_RESVSP64
:
918 case OCFS2_IOC_UNRESVSP
:
919 case OCFS2_IOC_UNRESVSP64
:
920 if (copy_from_user(&sr
, (int __user
*) arg
, sizeof(sr
)))
923 return ocfs2_change_file_space(filp
, cmd
, &sr
);
924 case OCFS2_IOC_GROUP_EXTEND
:
925 if (!capable(CAP_SYS_RESOURCE
))
928 if (get_user(new_clusters
, (int __user
*)arg
))
931 status
= mnt_want_write_file(filp
);
934 status
= ocfs2_group_extend(inode
, new_clusters
);
935 mnt_drop_write_file(filp
);
937 case OCFS2_IOC_GROUP_ADD
:
938 case OCFS2_IOC_GROUP_ADD64
:
939 if (!capable(CAP_SYS_RESOURCE
))
942 if (copy_from_user(&input
, (int __user
*) arg
, sizeof(input
)))
945 status
= mnt_want_write_file(filp
);
948 status
= ocfs2_group_add(inode
, &input
);
949 mnt_drop_write_file(filp
);
951 case OCFS2_IOC_REFLINK
:
952 if (copy_from_user(&args
, argp
, sizeof(args
)))
954 old_path
= (const char __user
*)(unsigned long)args
.old_path
;
955 new_path
= (const char __user
*)(unsigned long)args
.new_path
;
956 preserve
= (args
.preserve
!= 0);
958 return ocfs2_reflink_ioctl(inode
, old_path
, new_path
, preserve
);
960 if (copy_from_user(&info
, argp
, sizeof(struct ocfs2_info
)))
963 return ocfs2_info_handle(inode
, &info
, 0);
966 struct super_block
*sb
= inode
->i_sb
;
967 struct fstrim_range range
;
970 if (!capable(CAP_SYS_ADMIN
))
973 if (copy_from_user(&range
, argp
, sizeof(range
)))
976 ret
= ocfs2_trim_fs(sb
, &range
);
980 if (copy_to_user(argp
, &range
, sizeof(range
)))
985 case OCFS2_IOC_MOVE_EXT
:
986 return ocfs2_ioctl_move_extents(filp
, argp
);
993 long ocfs2_compat_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
)
996 struct reflink_arguments args
;
997 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
998 struct ocfs2_info info
;
999 void __user
*argp
= (void __user
*)arg
;
1002 case OCFS2_IOC32_GETFLAGS
:
1003 cmd
= OCFS2_IOC_GETFLAGS
;
1005 case OCFS2_IOC32_SETFLAGS
:
1006 cmd
= OCFS2_IOC_SETFLAGS
;
1008 case OCFS2_IOC_RESVSP
:
1009 case OCFS2_IOC_RESVSP64
:
1010 case OCFS2_IOC_UNRESVSP
:
1011 case OCFS2_IOC_UNRESVSP64
:
1012 case OCFS2_IOC_GROUP_EXTEND
:
1013 case OCFS2_IOC_GROUP_ADD
:
1014 case OCFS2_IOC_GROUP_ADD64
:
1017 case OCFS2_IOC_REFLINK
:
1018 if (copy_from_user(&args
, argp
, sizeof(args
)))
1020 preserve
= (args
.preserve
!= 0);
1022 return ocfs2_reflink_ioctl(inode
, compat_ptr(args
.old_path
),
1023 compat_ptr(args
.new_path
), preserve
);
1024 case OCFS2_IOC_INFO
:
1025 if (copy_from_user(&info
, argp
, sizeof(struct ocfs2_info
)))
1028 return ocfs2_info_handle(inode
, &info
, 1);
1029 case OCFS2_IOC_MOVE_EXT
:
1032 return -ENOIOCTLCMD
;
1035 return ocfs2_ioctl(file
, cmd
, arg
);