1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 Red Hat, Inc.
9 #include <linux/compat.h>
10 #include <linux/fileattr.h>
11 #include <linux/fsverity.h>
13 static ssize_t
fuse_send_ioctl(struct fuse_mount
*fm
, struct fuse_args
*args
,
14 struct fuse_ioctl_out
*outarg
)
18 args
->out_args
[0].size
= sizeof(*outarg
);
19 args
->out_args
[0].value
= outarg
;
21 ret
= fuse_simple_request(fm
, args
);
23 /* Translate ENOSYS, which shouldn't be returned from fs */
27 if (ret
>= 0 && outarg
->result
== -ENOSYS
)
28 outarg
->result
= -ENOTTY
;
34 * CUSE servers compiled on 32bit broke on 64bit kernels because the
35 * ABI was defined to be 'struct iovec' which is different on 32bit
36 * and 64bit. Fortunately we can determine which structure the server
37 * used from the size of the reply.
39 static int fuse_copy_ioctl_iovec_old(struct iovec
*dst
, void *src
,
40 size_t transferred
, unsigned count
,
44 if (count
* sizeof(struct compat_iovec
) == transferred
) {
45 struct compat_iovec
*ciov
= src
;
49 * With this interface a 32bit server cannot support
50 * non-compat (i.e. ones coming from 64bit apps) ioctl
56 for (i
= 0; i
< count
; i
++) {
57 dst
[i
].iov_base
= compat_ptr(ciov
[i
].iov_base
);
58 dst
[i
].iov_len
= ciov
[i
].iov_len
;
64 if (count
* sizeof(struct iovec
) != transferred
)
67 memcpy(dst
, src
, transferred
);
71 /* Make sure iov_length() won't overflow */
72 static int fuse_verify_ioctl_iov(struct fuse_conn
*fc
, struct iovec
*iov
,
76 u32 max
= fc
->max_pages
<< PAGE_SHIFT
;
78 for (n
= 0; n
< count
; n
++, iov
++) {
79 if (iov
->iov_len
> (size_t) max
)
86 static int fuse_copy_ioctl_iovec(struct fuse_conn
*fc
, struct iovec
*dst
,
87 void *src
, size_t transferred
, unsigned count
,
91 struct fuse_ioctl_iovec
*fiov
= src
;
94 return fuse_copy_ioctl_iovec_old(dst
, src
, transferred
,
98 if (count
* sizeof(struct fuse_ioctl_iovec
) != transferred
)
101 for (i
= 0; i
< count
; i
++) {
102 /* Did the server supply an inappropriate value? */
103 if (fiov
[i
].base
!= (unsigned long) fiov
[i
].base
||
104 fiov
[i
].len
!= (unsigned long) fiov
[i
].len
)
107 dst
[i
].iov_base
= (void __user
*) (unsigned long) fiov
[i
].base
;
108 dst
[i
].iov_len
= (size_t) fiov
[i
].len
;
112 (ptr_to_compat(dst
[i
].iov_base
) != fiov
[i
].base
||
113 (compat_size_t
) dst
[i
].iov_len
!= fiov
[i
].len
))
121 /* For fs-verity, determine iov lengths from input */
122 static int fuse_setup_measure_verity(unsigned long arg
, struct iovec
*iov
)
125 struct fsverity_digest __user
*uarg
= (void __user
*)arg
;
127 if (copy_from_user(&digest_size
, &uarg
->digest_size
, sizeof(digest_size
)))
130 if (digest_size
> SIZE_MAX
- sizeof(struct fsverity_digest
))
133 iov
->iov_len
= sizeof(struct fsverity_digest
) + digest_size
;
138 static int fuse_setup_enable_verity(unsigned long arg
, struct iovec
*iov
,
139 unsigned int *in_iovs
)
141 struct fsverity_enable_arg enable
;
142 struct fsverity_enable_arg __user
*uarg
= (void __user
*)arg
;
143 const __u32 max_buffer_len
= FUSE_MAX_MAX_PAGES
* PAGE_SIZE
;
145 if (copy_from_user(&enable
, uarg
, sizeof(enable
)))
148 if (enable
.salt_size
> max_buffer_len
|| enable
.sig_size
> max_buffer_len
)
151 if (enable
.salt_size
> 0) {
155 iov
->iov_base
= u64_to_user_ptr(enable
.salt_ptr
);
156 iov
->iov_len
= enable
.salt_size
;
159 if (enable
.sig_size
> 0) {
163 iov
->iov_base
= u64_to_user_ptr(enable
.sig_ptr
);
164 iov
->iov_len
= enable
.sig_size
;
170 * For ioctls, there is no generic way to determine how much memory
171 * needs to be read and/or written. Furthermore, ioctls are allowed
172 * to dereference the passed pointer, so the parameter requires deep
173 * copying but FUSE has no idea whatsoever about what to copy in or
176 * This is solved by allowing FUSE server to retry ioctl with
177 * necessary in/out iovecs. Let's assume the ioctl implementation
178 * needs to read in the following structure.
185 * On the first callout to FUSE server, inarg->in_size and
186 * inarg->out_size will be NULL; then, the server completes the ioctl
187 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
188 * the actual iov array to
190 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
192 * which tells FUSE to copy in the requested area and retry the ioctl.
193 * On the second round, the server has access to the structure and
194 * from that it can tell what to look for next, so on the invocation,
195 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
197 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
198 * { .iov_base = a.buf, .iov_len = a.buflen } }
200 * FUSE will copy both struct a and the pointed buffer from the
201 * process doing the ioctl and retry ioctl with both struct a and the
204 * This time, FUSE server has everything it needs and completes ioctl
205 * without FUSE_IOCTL_RETRY which finishes the ioctl call.
207 * Copying data out works the same way.
209 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
210 * automatically initializes in and out iovs by decoding @cmd with
211 * _IOC_* macros and the server is not allowed to request RETRY. This
212 * limits ioctl data transfers to well-formed ioctls and is the forced
213 * behavior for all FUSE servers.
215 long fuse_do_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
,
218 struct fuse_file
*ff
= file
->private_data
;
219 struct fuse_mount
*fm
= ff
->fm
;
220 struct fuse_ioctl_in inarg
= {
226 struct fuse_ioctl_out outarg
;
227 struct iovec
*iov_page
= NULL
;
228 struct iovec
*in_iov
= NULL
, *out_iov
= NULL
;
229 unsigned int in_iovs
= 0, out_iovs
= 0, max_pages
;
230 size_t in_size
, out_size
, c
;
234 struct fuse_args_pages ap
= {};
236 #if BITS_PER_LONG == 32
237 inarg
.flags
|= FUSE_IOCTL_32BIT
;
239 if (flags
& FUSE_IOCTL_COMPAT
) {
240 inarg
.flags
|= FUSE_IOCTL_32BIT
;
241 #ifdef CONFIG_X86_X32_ABI
242 if (in_x32_syscall())
243 inarg
.flags
|= FUSE_IOCTL_COMPAT_X32
;
248 /* assume all the iovs returned by client always fits in a page */
249 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec
) * FUSE_IOCTL_MAX_IOV
> PAGE_SIZE
);
252 ap
.pages
= fuse_pages_alloc(fm
->fc
->max_pages
, GFP_KERNEL
, &ap
.descs
);
253 iov_page
= (struct iovec
*) __get_free_page(GFP_KERNEL
);
254 if (!ap
.pages
|| !iov_page
)
257 fuse_page_descs_length_init(ap
.descs
, 0, fm
->fc
->max_pages
);
260 * If restricted, initialize IO parameters as encoded in @cmd.
261 * RETRY from server is not allowed.
263 if (!(flags
& FUSE_IOCTL_UNRESTRICTED
)) {
264 struct iovec
*iov
= iov_page
;
266 iov
->iov_base
= (void __user
*)arg
;
267 iov
->iov_len
= _IOC_SIZE(cmd
);
269 if (_IOC_DIR(cmd
) & _IOC_WRITE
) {
274 if (_IOC_DIR(cmd
) & _IOC_READ
) {
281 case FS_IOC_MEASURE_VERITY
:
282 err
= fuse_setup_measure_verity(arg
, iov
);
284 case FS_IOC_ENABLE_VERITY
:
285 err
= fuse_setup_enable_verity(arg
, iov
, &in_iovs
);
293 inarg
.in_size
= in_size
= iov_length(in_iov
, in_iovs
);
294 inarg
.out_size
= out_size
= iov_length(out_iov
, out_iovs
);
297 * Out data can be used either for actual out data or iovs,
298 * make sure there always is at least one page.
300 out_size
= max_t(size_t, out_size
, PAGE_SIZE
);
301 max_pages
= DIV_ROUND_UP(max(in_size
, out_size
), PAGE_SIZE
);
303 /* make sure there are enough buffer pages and init request with them */
305 if (max_pages
> fm
->fc
->max_pages
)
307 while (ap
.num_pages
< max_pages
) {
308 ap
.pages
[ap
.num_pages
] = alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
);
309 if (!ap
.pages
[ap
.num_pages
])
315 /* okay, let's send it to the client */
316 ap
.args
.opcode
= FUSE_IOCTL
;
317 ap
.args
.nodeid
= ff
->nodeid
;
318 ap
.args
.in_numargs
= 1;
319 ap
.args
.in_args
[0].size
= sizeof(inarg
);
320 ap
.args
.in_args
[0].value
= &inarg
;
322 ap
.args
.in_numargs
++;
323 ap
.args
.in_args
[1].size
= in_size
;
324 ap
.args
.in_pages
= true;
327 iov_iter_init(&ii
, ITER_SOURCE
, in_iov
, in_iovs
, in_size
);
328 for (i
= 0; iov_iter_count(&ii
) && !WARN_ON(i
>= ap
.num_pages
); i
++) {
329 c
= copy_page_from_iter(ap
.pages
[i
], 0, PAGE_SIZE
, &ii
);
330 if (c
!= PAGE_SIZE
&& iov_iter_count(&ii
))
335 ap
.args
.out_numargs
= 2;
336 ap
.args
.out_args
[1].size
= out_size
;
337 ap
.args
.out_pages
= true;
338 ap
.args
.out_argvar
= true;
340 transferred
= fuse_send_ioctl(fm
, &ap
.args
, &outarg
);
345 /* did it ask for retry? */
346 if (outarg
.flags
& FUSE_IOCTL_RETRY
) {
349 /* no retry if in restricted mode */
351 if (!(flags
& FUSE_IOCTL_UNRESTRICTED
))
354 in_iovs
= outarg
.in_iovs
;
355 out_iovs
= outarg
.out_iovs
;
358 * Make sure things are in boundary, separate checks
359 * are to protect against overflow.
362 if (in_iovs
> FUSE_IOCTL_MAX_IOV
||
363 out_iovs
> FUSE_IOCTL_MAX_IOV
||
364 in_iovs
+ out_iovs
> FUSE_IOCTL_MAX_IOV
)
367 vaddr
= kmap_local_page(ap
.pages
[0]);
368 err
= fuse_copy_ioctl_iovec(fm
->fc
, iov_page
, vaddr
,
369 transferred
, in_iovs
+ out_iovs
,
370 (flags
& FUSE_IOCTL_COMPAT
) != 0);
376 out_iov
= in_iov
+ in_iovs
;
378 err
= fuse_verify_ioctl_iov(fm
->fc
, in_iov
, in_iovs
);
382 err
= fuse_verify_ioctl_iov(fm
->fc
, out_iov
, out_iovs
);
390 if (transferred
> inarg
.out_size
)
394 iov_iter_init(&ii
, ITER_DEST
, out_iov
, out_iovs
, transferred
);
395 for (i
= 0; iov_iter_count(&ii
) && !WARN_ON(i
>= ap
.num_pages
); i
++) {
396 c
= copy_page_to_iter(ap
.pages
[i
], 0, PAGE_SIZE
, &ii
);
397 if (c
!= PAGE_SIZE
&& iov_iter_count(&ii
))
402 free_page((unsigned long) iov_page
);
404 __free_page(ap
.pages
[--ap
.num_pages
]);
407 return err
? err
: outarg
.result
;
409 EXPORT_SYMBOL_GPL(fuse_do_ioctl
);
411 long fuse_ioctl_common(struct file
*file
, unsigned int cmd
,
412 unsigned long arg
, unsigned int flags
)
414 struct inode
*inode
= file_inode(file
);
415 struct fuse_conn
*fc
= get_fuse_conn(inode
);
417 if (!fuse_allow_current_process(fc
))
420 if (fuse_is_bad(inode
))
423 return fuse_do_ioctl(file
, cmd
, arg
, flags
);
426 long fuse_file_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
428 return fuse_ioctl_common(file
, cmd
, arg
, 0);
431 long fuse_file_compat_ioctl(struct file
*file
, unsigned int cmd
,
434 return fuse_ioctl_common(file
, cmd
, arg
, FUSE_IOCTL_COMPAT
);
437 static int fuse_priv_ioctl(struct inode
*inode
, struct fuse_file
*ff
,
438 unsigned int cmd
, void *ptr
, size_t size
)
440 struct fuse_mount
*fm
= ff
->fm
;
441 struct fuse_ioctl_in inarg
;
442 struct fuse_ioctl_out outarg
;
446 memset(&inarg
, 0, sizeof(inarg
));
450 #if BITS_PER_LONG == 32
451 inarg
.flags
|= FUSE_IOCTL_32BIT
;
453 if (S_ISDIR(inode
->i_mode
))
454 inarg
.flags
|= FUSE_IOCTL_DIR
;
456 if (_IOC_DIR(cmd
) & _IOC_READ
)
457 inarg
.out_size
= size
;
458 if (_IOC_DIR(cmd
) & _IOC_WRITE
)
459 inarg
.in_size
= size
;
461 args
.opcode
= FUSE_IOCTL
;
462 args
.nodeid
= ff
->nodeid
;
464 args
.in_args
[0].size
= sizeof(inarg
);
465 args
.in_args
[0].value
= &inarg
;
466 args
.in_args
[1].size
= inarg
.in_size
;
467 args
.in_args
[1].value
= ptr
;
468 args
.out_numargs
= 2;
469 args
.out_args
[1].size
= inarg
.out_size
;
470 args
.out_args
[1].value
= ptr
;
472 err
= fuse_send_ioctl(fm
, &args
, &outarg
);
474 if (outarg
.result
< 0)
476 else if (outarg
.flags
& FUSE_IOCTL_RETRY
)
482 static struct fuse_file
*fuse_priv_ioctl_prepare(struct inode
*inode
)
484 struct fuse_mount
*fm
= get_fuse_mount(inode
);
485 bool isdir
= S_ISDIR(inode
->i_mode
);
487 if (!fuse_allow_current_process(fm
->fc
))
488 return ERR_PTR(-EACCES
);
490 if (fuse_is_bad(inode
))
491 return ERR_PTR(-EIO
);
493 if (!S_ISREG(inode
->i_mode
) && !isdir
)
494 return ERR_PTR(-ENOTTY
);
496 return fuse_file_open(fm
, get_node_id(inode
), O_RDONLY
, isdir
);
499 static void fuse_priv_ioctl_cleanup(struct inode
*inode
, struct fuse_file
*ff
)
501 fuse_file_release(inode
, ff
, O_RDONLY
, NULL
, S_ISDIR(inode
->i_mode
));
504 int fuse_fileattr_get(struct dentry
*dentry
, struct fileattr
*fa
)
506 struct inode
*inode
= d_inode(dentry
);
507 struct fuse_file
*ff
;
512 ff
= fuse_priv_ioctl_prepare(inode
);
516 if (fa
->flags_valid
) {
517 err
= fuse_priv_ioctl(inode
, ff
, FS_IOC_GETFLAGS
,
518 &flags
, sizeof(flags
));
522 fileattr_fill_flags(fa
, flags
);
524 err
= fuse_priv_ioctl(inode
, ff
, FS_IOC_FSGETXATTR
,
529 fileattr_fill_xflags(fa
, xfa
.fsx_xflags
);
530 fa
->fsx_extsize
= xfa
.fsx_extsize
;
531 fa
->fsx_nextents
= xfa
.fsx_nextents
;
532 fa
->fsx_projid
= xfa
.fsx_projid
;
533 fa
->fsx_cowextsize
= xfa
.fsx_cowextsize
;
536 fuse_priv_ioctl_cleanup(inode
, ff
);
541 int fuse_fileattr_set(struct mnt_idmap
*idmap
,
542 struct dentry
*dentry
, struct fileattr
*fa
)
544 struct inode
*inode
= d_inode(dentry
);
545 struct fuse_file
*ff
;
546 unsigned int flags
= fa
->flags
;
550 ff
= fuse_priv_ioctl_prepare(inode
);
554 if (fa
->flags_valid
) {
555 err
= fuse_priv_ioctl(inode
, ff
, FS_IOC_SETFLAGS
,
556 &flags
, sizeof(flags
));
560 memset(&xfa
, 0, sizeof(xfa
));
561 xfa
.fsx_xflags
= fa
->fsx_xflags
;
562 xfa
.fsx_extsize
= fa
->fsx_extsize
;
563 xfa
.fsx_nextents
= fa
->fsx_nextents
;
564 xfa
.fsx_projid
= fa
->fsx_projid
;
565 xfa
.fsx_cowextsize
= fa
->fsx_cowextsize
;
567 err
= fuse_priv_ioctl(inode
, ff
, FS_IOC_FSSETXATTR
,
572 fuse_priv_ioctl_cleanup(inode
, ff
);