1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/read_write.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/slab.h>
9 #include <linux/stat.h>
10 #include <linux/sched/xacct.h>
11 #include <linux/fcntl.h>
12 #include <linux/file.h>
13 #include <linux/uio.h>
14 #include <linux/fsnotify.h>
15 #include <linux/security.h>
16 #include <linux/export.h>
17 #include <linux/syscalls.h>
18 #include <linux/pagemap.h>
19 #include <linux/splice.h>
20 #include <linux/compat.h>
21 #include <linux/mount.h>
25 #include <linux/uaccess.h>
26 #include <asm/unistd.h>
28 const struct file_operations generic_ro_fops
= {
29 .llseek
= generic_file_llseek
,
30 .read_iter
= generic_file_read_iter
,
31 .mmap
= generic_file_readonly_mmap
,
32 .splice_read
= filemap_splice_read
,
35 EXPORT_SYMBOL(generic_ro_fops
);
37 static inline bool unsigned_offsets(struct file
*file
)
39 return file
->f_op
->fop_flags
& FOP_UNSIGNED_OFFSET
;
43 * vfs_setpos_cookie - update the file offset for lseek and reset cookie
44 * @file: file structure in question
45 * @offset: file offset to seek to
46 * @maxsize: maximum file size
47 * @cookie: cookie to reset
49 * Update the file offset to the value specified by @offset if the given
50 * offset is valid and it is not equal to the current file offset and
51 * reset the specified cookie to indicate that a seek happened.
53 * Return the specified offset on success and -EINVAL on invalid offset.
55 static loff_t
vfs_setpos_cookie(struct file
*file
, loff_t offset
,
56 loff_t maxsize
, u64
*cookie
)
58 if (offset
< 0 && !unsigned_offsets(file
))
63 if (offset
!= file
->f_pos
) {
72 * vfs_setpos - update the file offset for lseek
73 * @file: file structure in question
74 * @offset: file offset to seek to
75 * @maxsize: maximum file size
77 * This is a low-level filesystem helper for updating the file offset to
78 * the value specified by @offset if the given offset is valid and it is
79 * not equal to the current file offset.
81 * Return the specified offset on success and -EINVAL on invalid offset.
83 loff_t
vfs_setpos(struct file
*file
, loff_t offset
, loff_t maxsize
)
85 return vfs_setpos_cookie(file
, offset
, maxsize
, NULL
);
87 EXPORT_SYMBOL(vfs_setpos
);
90 * must_set_pos - check whether f_pos has to be updated
91 * @file: file to seek on
92 * @offset: offset to use
93 * @whence: type of seek operation
96 * Check whether f_pos needs to be updated and update @offset according
99 * Return: 0 if f_pos doesn't need to be updated, 1 if f_pos has to be
100 * updated, and negative error code on failure.
102 static int must_set_pos(struct file
*file
, loff_t
*offset
, int whence
, loff_t eof
)
110 * Here we special-case the lseek(fd, 0, SEEK_CUR)
111 * position-querying operation. Avoid rewriting the "same"
112 * f_pos value back to the file because a concurrent read(),
113 * write() or lseek() might have altered it
116 *offset
= file
->f_pos
;
122 * In the generic case the entire file is data, so as long as
123 * offset isn't at the end of the file then the offset is data.
125 if ((unsigned long long)*offset
>= eof
)
130 * There is a virtual hole at the end of the file, so as long as
131 * offset isn't i_size or larger, return i_size.
133 if ((unsigned long long)*offset
>= eof
)
143 * generic_file_llseek_size - generic llseek implementation for regular files
144 * @file: file structure to seek on
145 * @offset: file offset to seek to
146 * @whence: type of seek
147 * @maxsize: max size of this file in file system
148 * @eof: offset used for SEEK_END position
150 * This is a variant of generic_file_llseek that allows passing in a custom
151 * maximum file size and a custom EOF position, for e.g. hashed directories
154 * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
155 * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes.
156 * read/writes behave like SEEK_SET against seeks.
159 generic_file_llseek_size(struct file
*file
, loff_t offset
, int whence
,
160 loff_t maxsize
, loff_t eof
)
164 ret
= must_set_pos(file
, &offset
, whence
, eof
);
170 if (whence
== SEEK_CUR
) {
172 * f_lock protects against read/modify/write race with
173 * other SEEK_CURs. Note that parallel writes and reads
174 * behave like SEEK_SET.
176 guard(spinlock
)(&file
->f_lock
);
177 return vfs_setpos(file
, file
->f_pos
+ offset
, maxsize
);
180 return vfs_setpos(file
, offset
, maxsize
);
182 EXPORT_SYMBOL(generic_file_llseek_size
);
185 * generic_llseek_cookie - versioned llseek implementation
186 * @file: file structure to seek on
187 * @offset: file offset to seek to
188 * @whence: type of seek
189 * @cookie: cookie to update
191 * See generic_file_llseek for a general description and locking assumptions.
193 * In contrast to generic_file_llseek, this function also resets a
194 * specified cookie to indicate a seek took place.
196 loff_t
generic_llseek_cookie(struct file
*file
, loff_t offset
, int whence
,
199 struct inode
*inode
= file
->f_mapping
->host
;
200 loff_t maxsize
= inode
->i_sb
->s_maxbytes
;
201 loff_t eof
= i_size_read(inode
);
204 if (WARN_ON_ONCE(!cookie
))
208 * Require that this is only used for directories that guarantee
209 * synchronization between readdir and seek so that an update to
210 * @cookie is correctly synchronized with concurrent readdir.
212 if (WARN_ON_ONCE(!(file
->f_mode
& FMODE_ATOMIC_POS
)))
215 ret
= must_set_pos(file
, &offset
, whence
, eof
);
221 /* No need to hold f_lock because we know that f_pos_lock is held. */
222 if (whence
== SEEK_CUR
)
223 return vfs_setpos_cookie(file
, file
->f_pos
+ offset
, maxsize
, cookie
);
225 return vfs_setpos_cookie(file
, offset
, maxsize
, cookie
);
227 EXPORT_SYMBOL(generic_llseek_cookie
);
230 * generic_file_llseek - generic llseek implementation for regular files
231 * @file: file structure to seek on
232 * @offset: file offset to seek to
233 * @whence: type of seek
235 * This is a generic implemenation of ->llseek useable for all normal local
236 * filesystems. It just updates the file offset to the value specified by
237 * @offset and @whence.
239 loff_t
generic_file_llseek(struct file
*file
, loff_t offset
, int whence
)
241 struct inode
*inode
= file
->f_mapping
->host
;
243 return generic_file_llseek_size(file
, offset
, whence
,
244 inode
->i_sb
->s_maxbytes
,
247 EXPORT_SYMBOL(generic_file_llseek
);
250 * fixed_size_llseek - llseek implementation for fixed-sized devices
251 * @file: file structure to seek on
252 * @offset: file offset to seek to
253 * @whence: type of seek
254 * @size: size of the file
257 loff_t
fixed_size_llseek(struct file
*file
, loff_t offset
, int whence
, loff_t size
)
260 case SEEK_SET
: case SEEK_CUR
: case SEEK_END
:
261 return generic_file_llseek_size(file
, offset
, whence
,
267 EXPORT_SYMBOL(fixed_size_llseek
);
270 * no_seek_end_llseek - llseek implementation for fixed-sized devices
271 * @file: file structure to seek on
272 * @offset: file offset to seek to
273 * @whence: type of seek
276 loff_t
no_seek_end_llseek(struct file
*file
, loff_t offset
, int whence
)
279 case SEEK_SET
: case SEEK_CUR
:
280 return generic_file_llseek_size(file
, offset
, whence
,
286 EXPORT_SYMBOL(no_seek_end_llseek
);
289 * no_seek_end_llseek_size - llseek implementation for fixed-sized devices
290 * @file: file structure to seek on
291 * @offset: file offset to seek to
292 * @whence: type of seek
293 * @size: maximal offset allowed
296 loff_t
no_seek_end_llseek_size(struct file
*file
, loff_t offset
, int whence
, loff_t size
)
299 case SEEK_SET
: case SEEK_CUR
:
300 return generic_file_llseek_size(file
, offset
, whence
,
306 EXPORT_SYMBOL(no_seek_end_llseek_size
);
309 * noop_llseek - No Operation Performed llseek implementation
310 * @file: file structure to seek on
311 * @offset: file offset to seek to
312 * @whence: type of seek
314 * This is an implementation of ->llseek useable for the rare special case when
315 * userspace expects the seek to succeed but the (device) file is actually not
316 * able to perform the seek. In this case you use noop_llseek() instead of
317 * falling back to the default implementation of ->llseek.
319 loff_t
noop_llseek(struct file
*file
, loff_t offset
, int whence
)
323 EXPORT_SYMBOL(noop_llseek
);
325 loff_t
default_llseek(struct file
*file
, loff_t offset
, int whence
)
327 struct inode
*inode
= file_inode(file
);
333 offset
+= i_size_read(inode
);
337 retval
= file
->f_pos
;
340 offset
+= file
->f_pos
;
344 * In the generic case the entire file is data, so as
345 * long as offset isn't at the end of the file then the
348 if (offset
>= inode
->i_size
) {
355 * There is a virtual hole at the end of the file, so
356 * as long as offset isn't i_size or larger, return
359 if (offset
>= inode
->i_size
) {
363 offset
= inode
->i_size
;
367 if (offset
>= 0 || unsigned_offsets(file
)) {
368 if (offset
!= file
->f_pos
)
369 file
->f_pos
= offset
;
376 EXPORT_SYMBOL(default_llseek
);
378 loff_t
vfs_llseek(struct file
*file
, loff_t offset
, int whence
)
380 if (!(file
->f_mode
& FMODE_LSEEK
))
382 return file
->f_op
->llseek(file
, offset
, whence
);
384 EXPORT_SYMBOL(vfs_llseek
);
386 static off_t
ksys_lseek(unsigned int fd
, off_t offset
, unsigned int whence
)
389 struct fd f
= fdget_pos(fd
);
394 if (whence
<= SEEK_MAX
) {
395 loff_t res
= vfs_llseek(fd_file(f
), offset
, whence
);
397 if (res
!= (loff_t
)retval
)
398 retval
= -EOVERFLOW
; /* LFS: should only happen on 32 bit platforms */
404 SYSCALL_DEFINE3(lseek
, unsigned int, fd
, off_t
, offset
, unsigned int, whence
)
406 return ksys_lseek(fd
, offset
, whence
);
410 COMPAT_SYSCALL_DEFINE3(lseek
, unsigned int, fd
, compat_off_t
, offset
, unsigned int, whence
)
412 return ksys_lseek(fd
, offset
, whence
);
416 #if !defined(CONFIG_64BIT) || defined(CONFIG_COMPAT) || \
417 defined(__ARCH_WANT_SYS_LLSEEK)
418 SYSCALL_DEFINE5(llseek
, unsigned int, fd
, unsigned long, offset_high
,
419 unsigned long, offset_low
, loff_t __user
*, result
,
420 unsigned int, whence
)
423 struct fd f
= fdget_pos(fd
);
430 if (whence
> SEEK_MAX
)
433 offset
= vfs_llseek(fd_file(f
), ((loff_t
) offset_high
<< 32) | offset_low
,
436 retval
= (int)offset
;
439 if (!copy_to_user(result
, &offset
, sizeof(offset
)))
448 int rw_verify_area(int read_write
, struct file
*file
, const loff_t
*ppos
, size_t count
)
450 int mask
= read_write
== READ
? MAY_READ
: MAY_WRITE
;
453 if (unlikely((ssize_t
) count
< 0))
459 if (unlikely(pos
< 0)) {
460 if (!unsigned_offsets(file
))
462 if (count
>= -pos
) /* both values are in 0..LLONG_MAX */
464 } else if (unlikely((loff_t
) (pos
+ count
) < 0)) {
465 if (!unsigned_offsets(file
))
470 ret
= security_file_permission(file
, mask
);
474 return fsnotify_file_area_perm(file
, mask
, ppos
, count
);
476 EXPORT_SYMBOL(rw_verify_area
);
478 static ssize_t
new_sync_read(struct file
*filp
, char __user
*buf
, size_t len
, loff_t
*ppos
)
481 struct iov_iter iter
;
484 init_sync_kiocb(&kiocb
, filp
);
485 kiocb
.ki_pos
= (ppos
? *ppos
: 0);
486 iov_iter_ubuf(&iter
, ITER_DEST
, buf
, len
);
488 ret
= filp
->f_op
->read_iter(&kiocb
, &iter
);
489 BUG_ON(ret
== -EIOCBQUEUED
);
491 *ppos
= kiocb
.ki_pos
;
495 static int warn_unsupported(struct file
*file
, const char *op
)
498 "kernel %s not supported for file %pD4 (pid: %d comm: %.20s)\n",
499 op
, file
, current
->pid
, current
->comm
);
503 ssize_t
__kernel_read(struct file
*file
, void *buf
, size_t count
, loff_t
*pos
)
507 .iov_len
= min_t(size_t, count
, MAX_RW_COUNT
),
510 struct iov_iter iter
;
513 if (WARN_ON_ONCE(!(file
->f_mode
& FMODE_READ
)))
515 if (!(file
->f_mode
& FMODE_CAN_READ
))
518 * Also fail if ->read_iter and ->read are both wired up as that
519 * implies very convoluted semantics.
521 if (unlikely(!file
->f_op
->read_iter
|| file
->f_op
->read
))
522 return warn_unsupported(file
, "read");
524 init_sync_kiocb(&kiocb
, file
);
525 kiocb
.ki_pos
= pos
? *pos
: 0;
526 iov_iter_kvec(&iter
, ITER_DEST
, &iov
, 1, iov
.iov_len
);
527 ret
= file
->f_op
->read_iter(&kiocb
, &iter
);
531 fsnotify_access(file
);
532 add_rchar(current
, ret
);
538 ssize_t
kernel_read(struct file
*file
, void *buf
, size_t count
, loff_t
*pos
)
542 ret
= rw_verify_area(READ
, file
, pos
, count
);
545 return __kernel_read(file
, buf
, count
, pos
);
547 EXPORT_SYMBOL(kernel_read
);
549 ssize_t
vfs_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*pos
)
553 if (!(file
->f_mode
& FMODE_READ
))
555 if (!(file
->f_mode
& FMODE_CAN_READ
))
557 if (unlikely(!access_ok(buf
, count
)))
560 ret
= rw_verify_area(READ
, file
, pos
, count
);
563 if (count
> MAX_RW_COUNT
)
564 count
= MAX_RW_COUNT
;
566 if (file
->f_op
->read
)
567 ret
= file
->f_op
->read(file
, buf
, count
, pos
);
568 else if (file
->f_op
->read_iter
)
569 ret
= new_sync_read(file
, buf
, count
, pos
);
573 fsnotify_access(file
);
574 add_rchar(current
, ret
);
580 static ssize_t
new_sync_write(struct file
*filp
, const char __user
*buf
, size_t len
, loff_t
*ppos
)
583 struct iov_iter iter
;
586 init_sync_kiocb(&kiocb
, filp
);
587 kiocb
.ki_pos
= (ppos
? *ppos
: 0);
588 iov_iter_ubuf(&iter
, ITER_SOURCE
, (void __user
*)buf
, len
);
590 ret
= filp
->f_op
->write_iter(&kiocb
, &iter
);
591 BUG_ON(ret
== -EIOCBQUEUED
);
593 *ppos
= kiocb
.ki_pos
;
597 /* caller is responsible for file_start_write/file_end_write */
598 ssize_t
__kernel_write_iter(struct file
*file
, struct iov_iter
*from
, loff_t
*pos
)
603 if (WARN_ON_ONCE(!(file
->f_mode
& FMODE_WRITE
)))
605 if (!(file
->f_mode
& FMODE_CAN_WRITE
))
608 * Also fail if ->write_iter and ->write are both wired up as that
609 * implies very convoluted semantics.
611 if (unlikely(!file
->f_op
->write_iter
|| file
->f_op
->write
))
612 return warn_unsupported(file
, "write");
614 init_sync_kiocb(&kiocb
, file
);
615 kiocb
.ki_pos
= pos
? *pos
: 0;
616 ret
= file
->f_op
->write_iter(&kiocb
, from
);
620 fsnotify_modify(file
);
621 add_wchar(current
, ret
);
627 /* caller is responsible for file_start_write/file_end_write */
628 ssize_t
__kernel_write(struct file
*file
, const void *buf
, size_t count
, loff_t
*pos
)
631 .iov_base
= (void *)buf
,
632 .iov_len
= min_t(size_t, count
, MAX_RW_COUNT
),
634 struct iov_iter iter
;
635 iov_iter_kvec(&iter
, ITER_SOURCE
, &iov
, 1, iov
.iov_len
);
636 return __kernel_write_iter(file
, &iter
, pos
);
639 * This "EXPORT_SYMBOL_GPL()" is more of a "EXPORT_SYMBOL_DONTUSE()",
640 * but autofs is one of the few internal kernel users that actually
641 * wants this _and_ can be built as a module. So we need to export
642 * this symbol for autofs, even though it really isn't appropriate
643 * for any other kernel modules.
645 EXPORT_SYMBOL_GPL(__kernel_write
);
647 ssize_t
kernel_write(struct file
*file
, const void *buf
, size_t count
,
652 ret
= rw_verify_area(WRITE
, file
, pos
, count
);
656 file_start_write(file
);
657 ret
= __kernel_write(file
, buf
, count
, pos
);
658 file_end_write(file
);
661 EXPORT_SYMBOL(kernel_write
);
663 ssize_t
vfs_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*pos
)
667 if (!(file
->f_mode
& FMODE_WRITE
))
669 if (!(file
->f_mode
& FMODE_CAN_WRITE
))
671 if (unlikely(!access_ok(buf
, count
)))
674 ret
= rw_verify_area(WRITE
, file
, pos
, count
);
677 if (count
> MAX_RW_COUNT
)
678 count
= MAX_RW_COUNT
;
679 file_start_write(file
);
680 if (file
->f_op
->write
)
681 ret
= file
->f_op
->write(file
, buf
, count
, pos
);
682 else if (file
->f_op
->write_iter
)
683 ret
= new_sync_write(file
, buf
, count
, pos
);
687 fsnotify_modify(file
);
688 add_wchar(current
, ret
);
691 file_end_write(file
);
695 /* file_ppos returns &file->f_pos or NULL if file is stream */
696 static inline loff_t
*file_ppos(struct file
*file
)
698 return file
->f_mode
& FMODE_STREAM
? NULL
: &file
->f_pos
;
701 ssize_t
ksys_read(unsigned int fd
, char __user
*buf
, size_t count
)
703 struct fd f
= fdget_pos(fd
);
704 ssize_t ret
= -EBADF
;
707 loff_t pos
, *ppos
= file_ppos(fd_file(f
));
712 ret
= vfs_read(fd_file(f
), buf
, count
, ppos
);
713 if (ret
>= 0 && ppos
)
714 fd_file(f
)->f_pos
= pos
;
720 SYSCALL_DEFINE3(read
, unsigned int, fd
, char __user
*, buf
, size_t, count
)
722 return ksys_read(fd
, buf
, count
);
725 ssize_t
ksys_write(unsigned int fd
, const char __user
*buf
, size_t count
)
727 struct fd f
= fdget_pos(fd
);
728 ssize_t ret
= -EBADF
;
731 loff_t pos
, *ppos
= file_ppos(fd_file(f
));
736 ret
= vfs_write(fd_file(f
), buf
, count
, ppos
);
737 if (ret
>= 0 && ppos
)
738 fd_file(f
)->f_pos
= pos
;
745 SYSCALL_DEFINE3(write
, unsigned int, fd
, const char __user
*, buf
,
748 return ksys_write(fd
, buf
, count
);
751 ssize_t
ksys_pread64(unsigned int fd
, char __user
*buf
, size_t count
,
755 ssize_t ret
= -EBADF
;
763 if (fd_file(f
)->f_mode
& FMODE_PREAD
)
764 ret
= vfs_read(fd_file(f
), buf
, count
, &pos
);
771 SYSCALL_DEFINE4(pread64
, unsigned int, fd
, char __user
*, buf
,
772 size_t, count
, loff_t
, pos
)
774 return ksys_pread64(fd
, buf
, count
, pos
);
777 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PREAD64)
778 COMPAT_SYSCALL_DEFINE5(pread64
, unsigned int, fd
, char __user
*, buf
,
779 size_t, count
, compat_arg_u64_dual(pos
))
781 return ksys_pread64(fd
, buf
, count
, compat_arg_u64_glue(pos
));
785 ssize_t
ksys_pwrite64(unsigned int fd
, const char __user
*buf
,
786 size_t count
, loff_t pos
)
789 ssize_t ret
= -EBADF
;
797 if (fd_file(f
)->f_mode
& FMODE_PWRITE
)
798 ret
= vfs_write(fd_file(f
), buf
, count
, &pos
);
805 SYSCALL_DEFINE4(pwrite64
, unsigned int, fd
, const char __user
*, buf
,
806 size_t, count
, loff_t
, pos
)
808 return ksys_pwrite64(fd
, buf
, count
, pos
);
811 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PWRITE64)
812 COMPAT_SYSCALL_DEFINE5(pwrite64
, unsigned int, fd
, const char __user
*, buf
,
813 size_t, count
, compat_arg_u64_dual(pos
))
815 return ksys_pwrite64(fd
, buf
, count
, compat_arg_u64_glue(pos
));
819 static ssize_t
do_iter_readv_writev(struct file
*filp
, struct iov_iter
*iter
,
820 loff_t
*ppos
, int type
, rwf_t flags
)
825 init_sync_kiocb(&kiocb
, filp
);
826 ret
= kiocb_set_rw_flags(&kiocb
, flags
, type
);
829 kiocb
.ki_pos
= (ppos
? *ppos
: 0);
832 ret
= filp
->f_op
->read_iter(&kiocb
, iter
);
834 ret
= filp
->f_op
->write_iter(&kiocb
, iter
);
835 BUG_ON(ret
== -EIOCBQUEUED
);
837 *ppos
= kiocb
.ki_pos
;
841 /* Do it by hand, with file-ops */
842 static ssize_t
do_loop_readv_writev(struct file
*filp
, struct iov_iter
*iter
,
843 loff_t
*ppos
, int type
, rwf_t flags
)
847 if (flags
& ~RWF_HIPRI
)
850 while (iov_iter_count(iter
)) {
854 nr
= filp
->f_op
->read(filp
, iter_iov_addr(iter
),
855 iter_iov_len(iter
), ppos
);
857 nr
= filp
->f_op
->write(filp
, iter_iov_addr(iter
),
858 iter_iov_len(iter
), ppos
);
867 if (nr
!= iter_iov_len(iter
))
869 iov_iter_advance(iter
, nr
);
875 ssize_t
vfs_iocb_iter_read(struct file
*file
, struct kiocb
*iocb
,
876 struct iov_iter
*iter
)
881 if (!file
->f_op
->read_iter
)
883 if (!(file
->f_mode
& FMODE_READ
))
885 if (!(file
->f_mode
& FMODE_CAN_READ
))
888 tot_len
= iov_iter_count(iter
);
891 ret
= rw_verify_area(READ
, file
, &iocb
->ki_pos
, tot_len
);
895 ret
= file
->f_op
->read_iter(iocb
, iter
);
898 fsnotify_access(file
);
901 EXPORT_SYMBOL(vfs_iocb_iter_read
);
903 ssize_t
vfs_iter_read(struct file
*file
, struct iov_iter
*iter
, loff_t
*ppos
,
909 if (!file
->f_op
->read_iter
)
911 if (!(file
->f_mode
& FMODE_READ
))
913 if (!(file
->f_mode
& FMODE_CAN_READ
))
916 tot_len
= iov_iter_count(iter
);
919 ret
= rw_verify_area(READ
, file
, ppos
, tot_len
);
923 ret
= do_iter_readv_writev(file
, iter
, ppos
, READ
, flags
);
926 fsnotify_access(file
);
929 EXPORT_SYMBOL(vfs_iter_read
);
932 * Caller is responsible for calling kiocb_end_write() on completion
933 * if async iocb was queued.
935 ssize_t
vfs_iocb_iter_write(struct file
*file
, struct kiocb
*iocb
,
936 struct iov_iter
*iter
)
941 if (!file
->f_op
->write_iter
)
943 if (!(file
->f_mode
& FMODE_WRITE
))
945 if (!(file
->f_mode
& FMODE_CAN_WRITE
))
948 tot_len
= iov_iter_count(iter
);
951 ret
= rw_verify_area(WRITE
, file
, &iocb
->ki_pos
, tot_len
);
955 kiocb_start_write(iocb
);
956 ret
= file
->f_op
->write_iter(iocb
, iter
);
957 if (ret
!= -EIOCBQUEUED
)
958 kiocb_end_write(iocb
);
960 fsnotify_modify(file
);
964 EXPORT_SYMBOL(vfs_iocb_iter_write
);
966 ssize_t
vfs_iter_write(struct file
*file
, struct iov_iter
*iter
, loff_t
*ppos
,
972 if (!(file
->f_mode
& FMODE_WRITE
))
974 if (!(file
->f_mode
& FMODE_CAN_WRITE
))
976 if (!file
->f_op
->write_iter
)
979 tot_len
= iov_iter_count(iter
);
983 ret
= rw_verify_area(WRITE
, file
, ppos
, tot_len
);
987 file_start_write(file
);
988 ret
= do_iter_readv_writev(file
, iter
, ppos
, WRITE
, flags
);
990 fsnotify_modify(file
);
991 file_end_write(file
);
995 EXPORT_SYMBOL(vfs_iter_write
);
997 static ssize_t
vfs_readv(struct file
*file
, const struct iovec __user
*vec
,
998 unsigned long vlen
, loff_t
*pos
, rwf_t flags
)
1000 struct iovec iovstack
[UIO_FASTIOV
];
1001 struct iovec
*iov
= iovstack
;
1002 struct iov_iter iter
;
1006 if (!(file
->f_mode
& FMODE_READ
))
1008 if (!(file
->f_mode
& FMODE_CAN_READ
))
1011 ret
= import_iovec(ITER_DEST
, vec
, vlen
, ARRAY_SIZE(iovstack
), &iov
,
1016 tot_len
= iov_iter_count(&iter
);
1020 ret
= rw_verify_area(READ
, file
, pos
, tot_len
);
1024 if (file
->f_op
->read_iter
)
1025 ret
= do_iter_readv_writev(file
, &iter
, pos
, READ
, flags
);
1027 ret
= do_loop_readv_writev(file
, &iter
, pos
, READ
, flags
);
1030 fsnotify_access(file
);
1035 static ssize_t
vfs_writev(struct file
*file
, const struct iovec __user
*vec
,
1036 unsigned long vlen
, loff_t
*pos
, rwf_t flags
)
1038 struct iovec iovstack
[UIO_FASTIOV
];
1039 struct iovec
*iov
= iovstack
;
1040 struct iov_iter iter
;
1044 if (!(file
->f_mode
& FMODE_WRITE
))
1046 if (!(file
->f_mode
& FMODE_CAN_WRITE
))
1049 ret
= import_iovec(ITER_SOURCE
, vec
, vlen
, ARRAY_SIZE(iovstack
), &iov
,
1054 tot_len
= iov_iter_count(&iter
);
1058 ret
= rw_verify_area(WRITE
, file
, pos
, tot_len
);
1062 file_start_write(file
);
1063 if (file
->f_op
->write_iter
)
1064 ret
= do_iter_readv_writev(file
, &iter
, pos
, WRITE
, flags
);
1066 ret
= do_loop_readv_writev(file
, &iter
, pos
, WRITE
, flags
);
1068 fsnotify_modify(file
);
1069 file_end_write(file
);
1075 static ssize_t
do_readv(unsigned long fd
, const struct iovec __user
*vec
,
1076 unsigned long vlen
, rwf_t flags
)
1078 struct fd f
= fdget_pos(fd
);
1079 ssize_t ret
= -EBADF
;
1082 loff_t pos
, *ppos
= file_ppos(fd_file(f
));
1087 ret
= vfs_readv(fd_file(f
), vec
, vlen
, ppos
, flags
);
1088 if (ret
>= 0 && ppos
)
1089 fd_file(f
)->f_pos
= pos
;
1094 add_rchar(current
, ret
);
1099 static ssize_t
do_writev(unsigned long fd
, const struct iovec __user
*vec
,
1100 unsigned long vlen
, rwf_t flags
)
1102 struct fd f
= fdget_pos(fd
);
1103 ssize_t ret
= -EBADF
;
1106 loff_t pos
, *ppos
= file_ppos(fd_file(f
));
1111 ret
= vfs_writev(fd_file(f
), vec
, vlen
, ppos
, flags
);
1112 if (ret
>= 0 && ppos
)
1113 fd_file(f
)->f_pos
= pos
;
1118 add_wchar(current
, ret
);
1123 static inline loff_t
pos_from_hilo(unsigned long high
, unsigned long low
)
1125 #define HALF_LONG_BITS (BITS_PER_LONG / 2)
1126 return (((loff_t
)high
<< HALF_LONG_BITS
) << HALF_LONG_BITS
) | low
;
1129 static ssize_t
do_preadv(unsigned long fd
, const struct iovec __user
*vec
,
1130 unsigned long vlen
, loff_t pos
, rwf_t flags
)
1133 ssize_t ret
= -EBADF
;
1141 if (fd_file(f
)->f_mode
& FMODE_PREAD
)
1142 ret
= vfs_readv(fd_file(f
), vec
, vlen
, &pos
, flags
);
1147 add_rchar(current
, ret
);
1152 static ssize_t
do_pwritev(unsigned long fd
, const struct iovec __user
*vec
,
1153 unsigned long vlen
, loff_t pos
, rwf_t flags
)
1156 ssize_t ret
= -EBADF
;
1164 if (fd_file(f
)->f_mode
& FMODE_PWRITE
)
1165 ret
= vfs_writev(fd_file(f
), vec
, vlen
, &pos
, flags
);
1170 add_wchar(current
, ret
);
1175 SYSCALL_DEFINE3(readv
, unsigned long, fd
, const struct iovec __user
*, vec
,
1176 unsigned long, vlen
)
1178 return do_readv(fd
, vec
, vlen
, 0);
1181 SYSCALL_DEFINE3(writev
, unsigned long, fd
, const struct iovec __user
*, vec
,
1182 unsigned long, vlen
)
1184 return do_writev(fd
, vec
, vlen
, 0);
1187 SYSCALL_DEFINE5(preadv
, unsigned long, fd
, const struct iovec __user
*, vec
,
1188 unsigned long, vlen
, unsigned long, pos_l
, unsigned long, pos_h
)
1190 loff_t pos
= pos_from_hilo(pos_h
, pos_l
);
1192 return do_preadv(fd
, vec
, vlen
, pos
, 0);
1195 SYSCALL_DEFINE6(preadv2
, unsigned long, fd
, const struct iovec __user
*, vec
,
1196 unsigned long, vlen
, unsigned long, pos_l
, unsigned long, pos_h
,
1199 loff_t pos
= pos_from_hilo(pos_h
, pos_l
);
1202 return do_readv(fd
, vec
, vlen
, flags
);
1204 return do_preadv(fd
, vec
, vlen
, pos
, flags
);
1207 SYSCALL_DEFINE5(pwritev
, unsigned long, fd
, const struct iovec __user
*, vec
,
1208 unsigned long, vlen
, unsigned long, pos_l
, unsigned long, pos_h
)
1210 loff_t pos
= pos_from_hilo(pos_h
, pos_l
);
1212 return do_pwritev(fd
, vec
, vlen
, pos
, 0);
1215 SYSCALL_DEFINE6(pwritev2
, unsigned long, fd
, const struct iovec __user
*, vec
,
1216 unsigned long, vlen
, unsigned long, pos_l
, unsigned long, pos_h
,
1219 loff_t pos
= pos_from_hilo(pos_h
, pos_l
);
1222 return do_writev(fd
, vec
, vlen
, flags
);
1224 return do_pwritev(fd
, vec
, vlen
, pos
, flags
);
1228 * Various compat syscalls. Note that they all pretend to take a native
1229 * iovec - import_iovec will properly treat those as compat_iovecs based on
1230 * in_compat_syscall().
1232 #ifdef CONFIG_COMPAT
1233 #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
1234 COMPAT_SYSCALL_DEFINE4(preadv64
, unsigned long, fd
,
1235 const struct iovec __user
*, vec
,
1236 unsigned long, vlen
, loff_t
, pos
)
1238 return do_preadv(fd
, vec
, vlen
, pos
, 0);
1242 COMPAT_SYSCALL_DEFINE5(preadv
, compat_ulong_t
, fd
,
1243 const struct iovec __user
*, vec
,
1244 compat_ulong_t
, vlen
, u32
, pos_low
, u32
, pos_high
)
1246 loff_t pos
= ((loff_t
)pos_high
<< 32) | pos_low
;
1248 return do_preadv(fd
, vec
, vlen
, pos
, 0);
1251 #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
1252 COMPAT_SYSCALL_DEFINE5(preadv64v2
, unsigned long, fd
,
1253 const struct iovec __user
*, vec
,
1254 unsigned long, vlen
, loff_t
, pos
, rwf_t
, flags
)
1257 return do_readv(fd
, vec
, vlen
, flags
);
1258 return do_preadv(fd
, vec
, vlen
, pos
, flags
);
1262 COMPAT_SYSCALL_DEFINE6(preadv2
, compat_ulong_t
, fd
,
1263 const struct iovec __user
*, vec
,
1264 compat_ulong_t
, vlen
, u32
, pos_low
, u32
, pos_high
,
1267 loff_t pos
= ((loff_t
)pos_high
<< 32) | pos_low
;
1270 return do_readv(fd
, vec
, vlen
, flags
);
1271 return do_preadv(fd
, vec
, vlen
, pos
, flags
);
1274 #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
1275 COMPAT_SYSCALL_DEFINE4(pwritev64
, unsigned long, fd
,
1276 const struct iovec __user
*, vec
,
1277 unsigned long, vlen
, loff_t
, pos
)
1279 return do_pwritev(fd
, vec
, vlen
, pos
, 0);
1283 COMPAT_SYSCALL_DEFINE5(pwritev
, compat_ulong_t
, fd
,
1284 const struct iovec __user
*,vec
,
1285 compat_ulong_t
, vlen
, u32
, pos_low
, u32
, pos_high
)
1287 loff_t pos
= ((loff_t
)pos_high
<< 32) | pos_low
;
1289 return do_pwritev(fd
, vec
, vlen
, pos
, 0);
1292 #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
1293 COMPAT_SYSCALL_DEFINE5(pwritev64v2
, unsigned long, fd
,
1294 const struct iovec __user
*, vec
,
1295 unsigned long, vlen
, loff_t
, pos
, rwf_t
, flags
)
1298 return do_writev(fd
, vec
, vlen
, flags
);
1299 return do_pwritev(fd
, vec
, vlen
, pos
, flags
);
1303 COMPAT_SYSCALL_DEFINE6(pwritev2
, compat_ulong_t
, fd
,
1304 const struct iovec __user
*,vec
,
1305 compat_ulong_t
, vlen
, u32
, pos_low
, u32
, pos_high
, rwf_t
, flags
)
1307 loff_t pos
= ((loff_t
)pos_high
<< 32) | pos_low
;
1310 return do_writev(fd
, vec
, vlen
, flags
);
1311 return do_pwritev(fd
, vec
, vlen
, pos
, flags
);
1313 #endif /* CONFIG_COMPAT */
1315 static ssize_t
do_sendfile(int out_fd
, int in_fd
, loff_t
*ppos
,
1316 size_t count
, loff_t max
)
1319 struct inode
*in_inode
, *out_inode
;
1320 struct pipe_inode_info
*opipe
;
1327 * Get input file, and verify that it is ok..
1333 if (!(fd_file(in
)->f_mode
& FMODE_READ
))
1337 pos
= fd_file(in
)->f_pos
;
1340 if (!(fd_file(in
)->f_mode
& FMODE_PREAD
))
1343 retval
= rw_verify_area(READ
, fd_file(in
), &pos
, count
);
1346 if (count
> MAX_RW_COUNT
)
1347 count
= MAX_RW_COUNT
;
1350 * Get output file, and verify that it is ok..
1353 out
= fdget(out_fd
);
1356 if (!(fd_file(out
)->f_mode
& FMODE_WRITE
))
1358 in_inode
= file_inode(fd_file(in
));
1359 out_inode
= file_inode(fd_file(out
));
1360 out_pos
= fd_file(out
)->f_pos
;
1363 max
= min(in_inode
->i_sb
->s_maxbytes
, out_inode
->i_sb
->s_maxbytes
);
1365 if (unlikely(pos
+ count
> max
)) {
1366 retval
= -EOVERFLOW
;
1375 * We need to debate whether we can enable this or not. The
1376 * man page documents EAGAIN return for the output at least,
1377 * and the application is arguably buggy if it doesn't expect
1378 * EAGAIN on a non-blocking file descriptor.
1380 if (fd_file(in
)->f_flags
& O_NONBLOCK
)
1381 fl
= SPLICE_F_NONBLOCK
;
1383 opipe
= get_pipe_info(fd_file(out
), true);
1385 retval
= rw_verify_area(WRITE
, fd_file(out
), &out_pos
, count
);
1388 retval
= do_splice_direct(fd_file(in
), &pos
, fd_file(out
), &out_pos
,
1391 if (fd_file(out
)->f_flags
& O_NONBLOCK
)
1392 fl
|= SPLICE_F_NONBLOCK
;
1394 retval
= splice_file_to_pipe(fd_file(in
), opipe
, &pos
, count
, fl
);
1398 add_rchar(current
, retval
);
1399 add_wchar(current
, retval
);
1400 fsnotify_access(fd_file(in
));
1401 fsnotify_modify(fd_file(out
));
1402 fd_file(out
)->f_pos
= out_pos
;
1406 fd_file(in
)->f_pos
= pos
;
1412 retval
= -EOVERFLOW
;
1422 SYSCALL_DEFINE4(sendfile
, int, out_fd
, int, in_fd
, off_t __user
*, offset
, size_t, count
)
1429 if (unlikely(get_user(off
, offset
)))
1432 ret
= do_sendfile(out_fd
, in_fd
, &pos
, count
, MAX_NON_LFS
);
1433 if (unlikely(put_user(pos
, offset
)))
1438 return do_sendfile(out_fd
, in_fd
, NULL
, count
, 0);
1441 SYSCALL_DEFINE4(sendfile64
, int, out_fd
, int, in_fd
, loff_t __user
*, offset
, size_t, count
)
1447 if (unlikely(copy_from_user(&pos
, offset
, sizeof(loff_t
))))
1449 ret
= do_sendfile(out_fd
, in_fd
, &pos
, count
, 0);
1450 if (unlikely(put_user(pos
, offset
)))
1455 return do_sendfile(out_fd
, in_fd
, NULL
, count
, 0);
1458 #ifdef CONFIG_COMPAT
1459 COMPAT_SYSCALL_DEFINE4(sendfile
, int, out_fd
, int, in_fd
,
1460 compat_off_t __user
*, offset
, compat_size_t
, count
)
1467 if (unlikely(get_user(off
, offset
)))
1470 ret
= do_sendfile(out_fd
, in_fd
, &pos
, count
, MAX_NON_LFS
);
1471 if (unlikely(put_user(pos
, offset
)))
1476 return do_sendfile(out_fd
, in_fd
, NULL
, count
, 0);
1479 COMPAT_SYSCALL_DEFINE4(sendfile64
, int, out_fd
, int, in_fd
,
1480 compat_loff_t __user
*, offset
, compat_size_t
, count
)
1486 if (unlikely(copy_from_user(&pos
, offset
, sizeof(loff_t
))))
1488 ret
= do_sendfile(out_fd
, in_fd
, &pos
, count
, 0);
1489 if (unlikely(put_user(pos
, offset
)))
1494 return do_sendfile(out_fd
, in_fd
, NULL
, count
, 0);
1499 * Performs necessary checks before doing a file copy
1501 * Can adjust amount of bytes to copy via @req_count argument.
1502 * Returns appropriate error code that caller should return or
1503 * zero in case the copy should be allowed.
1505 static int generic_copy_file_checks(struct file
*file_in
, loff_t pos_in
,
1506 struct file
*file_out
, loff_t pos_out
,
1507 size_t *req_count
, unsigned int flags
)
1509 struct inode
*inode_in
= file_inode(file_in
);
1510 struct inode
*inode_out
= file_inode(file_out
);
1511 uint64_t count
= *req_count
;
1515 ret
= generic_file_rw_checks(file_in
, file_out
);
1520 * We allow some filesystems to handle cross sb copy, but passing
1521 * a file of the wrong filesystem type to filesystem driver can result
1522 * in an attempt to dereference the wrong type of ->private_data, so
1523 * avoid doing that until we really have a good reason.
1525 * nfs and cifs define several different file_system_type structures
1526 * and several different sets of file_operations, but they all end up
1527 * using the same ->copy_file_range() function pointer.
1529 if (flags
& COPY_FILE_SPLICE
) {
1530 /* cross sb splice is allowed */
1531 } else if (file_out
->f_op
->copy_file_range
) {
1532 if (file_in
->f_op
->copy_file_range
!=
1533 file_out
->f_op
->copy_file_range
)
1535 } else if (file_inode(file_in
)->i_sb
!= file_inode(file_out
)->i_sb
) {
1539 /* Don't touch certain kinds of inodes */
1540 if (IS_IMMUTABLE(inode_out
))
1543 if (IS_SWAPFILE(inode_in
) || IS_SWAPFILE(inode_out
))
1546 /* Ensure offsets don't wrap. */
1547 if (pos_in
+ count
< pos_in
|| pos_out
+ count
< pos_out
)
1550 /* Shorten the copy to EOF */
1551 size_in
= i_size_read(inode_in
);
1552 if (pos_in
>= size_in
)
1555 count
= min(count
, size_in
- (uint64_t)pos_in
);
1557 ret
= generic_write_check_limits(file_out
, pos_out
, &count
);
1561 /* Don't allow overlapped copying within the same file. */
1562 if (inode_in
== inode_out
&&
1563 pos_out
+ count
> pos_in
&&
1564 pos_out
< pos_in
+ count
)
1572 * copy_file_range() differs from regular file read and write in that it
1573 * specifically allows return partial success. When it does so is up to
1574 * the copy_file_range method.
1576 ssize_t
vfs_copy_file_range(struct file
*file_in
, loff_t pos_in
,
1577 struct file
*file_out
, loff_t pos_out
,
1578 size_t len
, unsigned int flags
)
1581 bool splice
= flags
& COPY_FILE_SPLICE
;
1582 bool samesb
= file_inode(file_in
)->i_sb
== file_inode(file_out
)->i_sb
;
1584 if (flags
& ~COPY_FILE_SPLICE
)
1587 ret
= generic_copy_file_checks(file_in
, pos_in
, file_out
, pos_out
, &len
,
1592 ret
= rw_verify_area(READ
, file_in
, &pos_in
, len
);
1596 ret
= rw_verify_area(WRITE
, file_out
, &pos_out
, len
);
1603 file_start_write(file_out
);
1606 * Cloning is supported by more file systems, so we implement copy on
1607 * same sb using clone, but for filesystems where both clone and copy
1608 * are supported (e.g. nfs,cifs), we only call the copy method.
1610 if (!splice
&& file_out
->f_op
->copy_file_range
) {
1611 ret
= file_out
->f_op
->copy_file_range(file_in
, pos_in
,
1614 } else if (!splice
&& file_in
->f_op
->remap_file_range
&& samesb
) {
1615 ret
= file_in
->f_op
->remap_file_range(file_in
, pos_in
,
1617 min_t(loff_t
, MAX_RW_COUNT
, len
),
1618 REMAP_FILE_CAN_SHORTEN
);
1619 /* fallback to splice */
1622 } else if (samesb
) {
1623 /* Fallback to splice for same sb copy for backward compat */
1627 file_end_write(file_out
);
1633 * We can get here for same sb copy of filesystems that do not implement
1634 * ->copy_file_range() in case filesystem does not support clone or in
1635 * case filesystem supports clone but rejected the clone request (e.g.
1636 * because it was not block aligned).
1638 * In both cases, fall back to kernel copy so we are able to maintain a
1639 * consistent story about which filesystems support copy_file_range()
1640 * and which filesystems do not, that will allow userspace tools to
1641 * make consistent desicions w.r.t using copy_file_range().
1643 * We also get here if caller (e.g. nfsd) requested COPY_FILE_SPLICE
1644 * for server-side-copy between any two sb.
1646 * In any case, we call do_splice_direct() and not splice_file_range(),
1647 * without file_start_write() held, to avoid possible deadlocks related
1648 * to splicing from input file, while file_start_write() is held on
1649 * the output file on a different sb.
1651 ret
= do_splice_direct(file_in
, &pos_in
, file_out
, &pos_out
,
1652 min_t(size_t, len
, MAX_RW_COUNT
), 0);
1655 fsnotify_access(file_in
);
1656 add_rchar(current
, ret
);
1657 fsnotify_modify(file_out
);
1658 add_wchar(current
, ret
);
1666 EXPORT_SYMBOL(vfs_copy_file_range
);
1668 SYSCALL_DEFINE6(copy_file_range
, int, fd_in
, loff_t __user
*, off_in
,
1669 int, fd_out
, loff_t __user
*, off_out
,
1670 size_t, len
, unsigned int, flags
)
1676 ssize_t ret
= -EBADF
;
1678 f_in
= fdget(fd_in
);
1682 f_out
= fdget(fd_out
);
1683 if (!fd_file(f_out
))
1688 if (copy_from_user(&pos_in
, off_in
, sizeof(loff_t
)))
1691 pos_in
= fd_file(f_in
)->f_pos
;
1695 if (copy_from_user(&pos_out
, off_out
, sizeof(loff_t
)))
1698 pos_out
= fd_file(f_out
)->f_pos
;
1705 ret
= vfs_copy_file_range(fd_file(f_in
), pos_in
, fd_file(f_out
), pos_out
, len
,
1712 if (copy_to_user(off_in
, &pos_in
, sizeof(loff_t
)))
1715 fd_file(f_in
)->f_pos
= pos_in
;
1719 if (copy_to_user(off_out
, &pos_out
, sizeof(loff_t
)))
1722 fd_file(f_out
)->f_pos
= pos_out
;
1735 * Don't operate on ranges the page cache doesn't support, and don't exceed the
1736 * LFS limits. If pos is under the limit it becomes a short access. If it
1737 * exceeds the limit we return -EFBIG.
1739 int generic_write_check_limits(struct file
*file
, loff_t pos
, loff_t
*count
)
1741 struct inode
*inode
= file
->f_mapping
->host
;
1742 loff_t max_size
= inode
->i_sb
->s_maxbytes
;
1743 loff_t limit
= rlimit(RLIMIT_FSIZE
);
1745 if (limit
!= RLIM_INFINITY
) {
1747 send_sig(SIGXFSZ
, current
, 0);
1750 *count
= min(*count
, limit
- pos
);
1753 if (!(file
->f_flags
& O_LARGEFILE
))
1754 max_size
= MAX_NON_LFS
;
1756 if (unlikely(pos
>= max_size
))
1759 *count
= min(*count
, max_size
- pos
);
1763 EXPORT_SYMBOL_GPL(generic_write_check_limits
);
1765 /* Like generic_write_checks(), but takes size of write instead of iter. */
1766 int generic_write_checks_count(struct kiocb
*iocb
, loff_t
*count
)
1768 struct file
*file
= iocb
->ki_filp
;
1769 struct inode
*inode
= file
->f_mapping
->host
;
1771 if (IS_SWAPFILE(inode
))
1777 if (iocb
->ki_flags
& IOCB_APPEND
)
1778 iocb
->ki_pos
= i_size_read(inode
);
1780 if ((iocb
->ki_flags
& IOCB_NOWAIT
) &&
1781 !((iocb
->ki_flags
& IOCB_DIRECT
) ||
1782 (file
->f_op
->fop_flags
& FOP_BUFFER_WASYNC
)))
1785 return generic_write_check_limits(iocb
->ki_filp
, iocb
->ki_pos
, count
);
1787 EXPORT_SYMBOL(generic_write_checks_count
);
1790 * Performs necessary checks before doing a write
1792 * Can adjust writing position or amount of bytes to write.
1793 * Returns appropriate error code that caller should return or
1794 * zero in case that write should be allowed.
1796 ssize_t
generic_write_checks(struct kiocb
*iocb
, struct iov_iter
*from
)
1798 loff_t count
= iov_iter_count(from
);
1801 ret
= generic_write_checks_count(iocb
, &count
);
1805 iov_iter_truncate(from
, count
);
1806 return iov_iter_count(from
);
1808 EXPORT_SYMBOL(generic_write_checks
);
1811 * Performs common checks before doing a file copy/clone
1812 * from @file_in to @file_out.
1814 int generic_file_rw_checks(struct file
*file_in
, struct file
*file_out
)
1816 struct inode
*inode_in
= file_inode(file_in
);
1817 struct inode
*inode_out
= file_inode(file_out
);
1819 /* Don't copy dirs, pipes, sockets... */
1820 if (S_ISDIR(inode_in
->i_mode
) || S_ISDIR(inode_out
->i_mode
))
1822 if (!S_ISREG(inode_in
->i_mode
) || !S_ISREG(inode_out
->i_mode
))
1825 if (!(file_in
->f_mode
& FMODE_READ
) ||
1826 !(file_out
->f_mode
& FMODE_WRITE
) ||
1827 (file_out
->f_flags
& O_APPEND
))
1833 bool generic_atomic_write_valid(struct iov_iter
*iter
, loff_t pos
)
1835 size_t len
= iov_iter_count(iter
);
1837 if (!iter_is_ubuf(iter
))
1840 if (!is_power_of_2(len
))
1843 if (!IS_ALIGNED(pos
, len
))