Merge branch 'v6v7' into devel
[linux/fpc-iii.git] / fs / read_write.c
blob5520f8ad550446d095c6483e67acc0ca5522ff91
1 /*
2 * linux/fs/read_write.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 #include <linux/slab.h>
8 #include <linux/stat.h>
9 #include <linux/fcntl.h>
10 #include <linux/file.h>
11 #include <linux/uio.h>
12 #include <linux/fsnotify.h>
13 #include <linux/security.h>
14 #include <linux/module.h>
15 #include <linux/syscalls.h>
16 #include <linux/pagemap.h>
17 #include <linux/splice.h>
18 #include "read_write.h"
20 #include <asm/uaccess.h>
21 #include <asm/unistd.h>
23 const struct file_operations generic_ro_fops = {
24 .llseek = generic_file_llseek,
25 .read = do_sync_read,
26 .aio_read = generic_file_aio_read,
27 .mmap = generic_file_readonly_mmap,
28 .splice_read = generic_file_splice_read,
31 EXPORT_SYMBOL(generic_ro_fops);
33 static inline int unsigned_offsets(struct file *file)
35 return file->f_mode & FMODE_UNSIGNED_OFFSET;
38 /**
39 * generic_file_llseek_unlocked - lockless generic llseek implementation
40 * @file: file structure to seek on
41 * @offset: file offset to seek to
42 * @origin: type of seek
44 * Updates the file offset to the value specified by @offset and @origin.
45 * Locking must be provided by the caller.
47 loff_t
48 generic_file_llseek_unlocked(struct file *file, loff_t offset, int origin)
50 struct inode *inode = file->f_mapping->host;
52 switch (origin) {
53 case SEEK_END:
54 offset += inode->i_size;
55 break;
56 case SEEK_CUR:
58 * Here we special-case the lseek(fd, 0, SEEK_CUR)
59 * position-querying operation. Avoid rewriting the "same"
60 * f_pos value back to the file because a concurrent read(),
61 * write() or lseek() might have altered it
63 if (offset == 0)
64 return file->f_pos;
65 offset += file->f_pos;
66 break;
69 if (offset < 0 && !unsigned_offsets(file))
70 return -EINVAL;
71 if (offset > inode->i_sb->s_maxbytes)
72 return -EINVAL;
74 /* Special lock needed here? */
75 if (offset != file->f_pos) {
76 file->f_pos = offset;
77 file->f_version = 0;
80 return offset;
82 EXPORT_SYMBOL(generic_file_llseek_unlocked);
84 /**
85 * generic_file_llseek - generic llseek implementation for regular files
86 * @file: file structure to seek on
87 * @offset: file offset to seek to
88 * @origin: type of seek
90 * This is a generic implemenation of ->llseek useable for all normal local
91 * filesystems. It just updates the file offset to the value specified by
92 * @offset and @origin under i_mutex.
94 loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
96 loff_t rval;
98 mutex_lock(&file->f_dentry->d_inode->i_mutex);
99 rval = generic_file_llseek_unlocked(file, offset, origin);
100 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
102 return rval;
104 EXPORT_SYMBOL(generic_file_llseek);
107 * noop_llseek - No Operation Performed llseek implementation
108 * @file: file structure to seek on
109 * @offset: file offset to seek to
110 * @origin: type of seek
112 * This is an implementation of ->llseek useable for the rare special case when
113 * userspace expects the seek to succeed but the (device) file is actually not
114 * able to perform the seek. In this case you use noop_llseek() instead of
115 * falling back to the default implementation of ->llseek.
117 loff_t noop_llseek(struct file *file, loff_t offset, int origin)
119 return file->f_pos;
121 EXPORT_SYMBOL(noop_llseek);
123 loff_t no_llseek(struct file *file, loff_t offset, int origin)
125 return -ESPIPE;
127 EXPORT_SYMBOL(no_llseek);
129 loff_t default_llseek(struct file *file, loff_t offset, int origin)
131 loff_t retval;
133 mutex_lock(&file->f_dentry->d_inode->i_mutex);
134 switch (origin) {
135 case SEEK_END:
136 offset += i_size_read(file->f_path.dentry->d_inode);
137 break;
138 case SEEK_CUR:
139 if (offset == 0) {
140 retval = file->f_pos;
141 goto out;
143 offset += file->f_pos;
145 retval = -EINVAL;
146 if (offset >= 0 || unsigned_offsets(file)) {
147 if (offset != file->f_pos) {
148 file->f_pos = offset;
149 file->f_version = 0;
151 retval = offset;
153 out:
154 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
155 return retval;
157 EXPORT_SYMBOL(default_llseek);
159 loff_t vfs_llseek(struct file *file, loff_t offset, int origin)
161 loff_t (*fn)(struct file *, loff_t, int);
163 fn = no_llseek;
164 if (file->f_mode & FMODE_LSEEK) {
165 if (file->f_op && file->f_op->llseek)
166 fn = file->f_op->llseek;
168 return fn(file, offset, origin);
170 EXPORT_SYMBOL(vfs_llseek);
172 SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, origin)
174 off_t retval;
175 struct file * file;
176 int fput_needed;
178 retval = -EBADF;
179 file = fget_light(fd, &fput_needed);
180 if (!file)
181 goto bad;
183 retval = -EINVAL;
184 if (origin <= SEEK_MAX) {
185 loff_t res = vfs_llseek(file, offset, origin);
186 retval = res;
187 if (res != (loff_t)retval)
188 retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
190 fput_light(file, fput_needed);
191 bad:
192 return retval;
195 #ifdef __ARCH_WANT_SYS_LLSEEK
196 SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
197 unsigned long, offset_low, loff_t __user *, result,
198 unsigned int, origin)
200 int retval;
201 struct file * file;
202 loff_t offset;
203 int fput_needed;
205 retval = -EBADF;
206 file = fget_light(fd, &fput_needed);
207 if (!file)
208 goto bad;
210 retval = -EINVAL;
211 if (origin > SEEK_MAX)
212 goto out_putf;
214 offset = vfs_llseek(file, ((loff_t) offset_high << 32) | offset_low,
215 origin);
217 retval = (int)offset;
218 if (offset >= 0) {
219 retval = -EFAULT;
220 if (!copy_to_user(result, &offset, sizeof(offset)))
221 retval = 0;
223 out_putf:
224 fput_light(file, fput_needed);
225 bad:
226 return retval;
228 #endif
232 * rw_verify_area doesn't like huge counts. We limit
233 * them to something that fits in "int" so that others
234 * won't have to do range checks all the time.
236 int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count)
238 struct inode *inode;
239 loff_t pos;
240 int retval = -EINVAL;
242 inode = file->f_path.dentry->d_inode;
243 if (unlikely((ssize_t) count < 0))
244 return retval;
245 pos = *ppos;
246 if (unlikely(pos < 0)) {
247 if (!unsigned_offsets(file))
248 return retval;
249 if (count >= -pos) /* both values are in 0..LLONG_MAX */
250 return -EOVERFLOW;
251 } else if (unlikely((loff_t) (pos + count) < 0)) {
252 if (!unsigned_offsets(file))
253 return retval;
256 if (unlikely(inode->i_flock && mandatory_lock(inode))) {
257 retval = locks_mandatory_area(
258 read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
259 inode, file, pos, count);
260 if (retval < 0)
261 return retval;
263 retval = security_file_permission(file,
264 read_write == READ ? MAY_READ : MAY_WRITE);
265 if (retval)
266 return retval;
267 return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
270 static void wait_on_retry_sync_kiocb(struct kiocb *iocb)
272 set_current_state(TASK_UNINTERRUPTIBLE);
273 if (!kiocbIsKicked(iocb))
274 schedule();
275 else
276 kiocbClearKicked(iocb);
277 __set_current_state(TASK_RUNNING);
280 ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
282 struct iovec iov = { .iov_base = buf, .iov_len = len };
283 struct kiocb kiocb;
284 ssize_t ret;
286 init_sync_kiocb(&kiocb, filp);
287 kiocb.ki_pos = *ppos;
288 kiocb.ki_left = len;
289 kiocb.ki_nbytes = len;
291 for (;;) {
292 ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
293 if (ret != -EIOCBRETRY)
294 break;
295 wait_on_retry_sync_kiocb(&kiocb);
298 if (-EIOCBQUEUED == ret)
299 ret = wait_on_sync_kiocb(&kiocb);
300 *ppos = kiocb.ki_pos;
301 return ret;
304 EXPORT_SYMBOL(do_sync_read);
306 ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
308 ssize_t ret;
310 if (!(file->f_mode & FMODE_READ))
311 return -EBADF;
312 if (!file->f_op || (!file->f_op->read && !file->f_op->aio_read))
313 return -EINVAL;
314 if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
315 return -EFAULT;
317 ret = rw_verify_area(READ, file, pos, count);
318 if (ret >= 0) {
319 count = ret;
320 if (file->f_op->read)
321 ret = file->f_op->read(file, buf, count, pos);
322 else
323 ret = do_sync_read(file, buf, count, pos);
324 if (ret > 0) {
325 fsnotify_access(file);
326 add_rchar(current, ret);
328 inc_syscr(current);
331 return ret;
334 EXPORT_SYMBOL(vfs_read);
336 ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
338 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
339 struct kiocb kiocb;
340 ssize_t ret;
342 init_sync_kiocb(&kiocb, filp);
343 kiocb.ki_pos = *ppos;
344 kiocb.ki_left = len;
345 kiocb.ki_nbytes = len;
347 for (;;) {
348 ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
349 if (ret != -EIOCBRETRY)
350 break;
351 wait_on_retry_sync_kiocb(&kiocb);
354 if (-EIOCBQUEUED == ret)
355 ret = wait_on_sync_kiocb(&kiocb);
356 *ppos = kiocb.ki_pos;
357 return ret;
360 EXPORT_SYMBOL(do_sync_write);
362 ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
364 ssize_t ret;
366 if (!(file->f_mode & FMODE_WRITE))
367 return -EBADF;
368 if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
369 return -EINVAL;
370 if (unlikely(!access_ok(VERIFY_READ, buf, count)))
371 return -EFAULT;
373 ret = rw_verify_area(WRITE, file, pos, count);
374 if (ret >= 0) {
375 count = ret;
376 if (file->f_op->write)
377 ret = file->f_op->write(file, buf, count, pos);
378 else
379 ret = do_sync_write(file, buf, count, pos);
380 if (ret > 0) {
381 fsnotify_modify(file);
382 add_wchar(current, ret);
384 inc_syscw(current);
387 return ret;
390 EXPORT_SYMBOL(vfs_write);
392 static inline loff_t file_pos_read(struct file *file)
394 return file->f_pos;
397 static inline void file_pos_write(struct file *file, loff_t pos)
399 file->f_pos = pos;
402 SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
404 struct file *file;
405 ssize_t ret = -EBADF;
406 int fput_needed;
408 file = fget_light(fd, &fput_needed);
409 if (file) {
410 loff_t pos = file_pos_read(file);
411 ret = vfs_read(file, buf, count, &pos);
412 file_pos_write(file, pos);
413 fput_light(file, fput_needed);
416 return ret;
419 SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
420 size_t, count)
422 struct file *file;
423 ssize_t ret = -EBADF;
424 int fput_needed;
426 file = fget_light(fd, &fput_needed);
427 if (file) {
428 loff_t pos = file_pos_read(file);
429 ret = vfs_write(file, buf, count, &pos);
430 file_pos_write(file, pos);
431 fput_light(file, fput_needed);
434 return ret;
437 SYSCALL_DEFINE(pread64)(unsigned int fd, char __user *buf,
438 size_t count, loff_t pos)
440 struct file *file;
441 ssize_t ret = -EBADF;
442 int fput_needed;
444 if (pos < 0)
445 return -EINVAL;
447 file = fget_light(fd, &fput_needed);
448 if (file) {
449 ret = -ESPIPE;
450 if (file->f_mode & FMODE_PREAD)
451 ret = vfs_read(file, buf, count, &pos);
452 fput_light(file, fput_needed);
455 return ret;
457 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
458 asmlinkage long SyS_pread64(long fd, long buf, long count, loff_t pos)
460 return SYSC_pread64((unsigned int) fd, (char __user *) buf,
461 (size_t) count, pos);
463 SYSCALL_ALIAS(sys_pread64, SyS_pread64);
464 #endif
466 SYSCALL_DEFINE(pwrite64)(unsigned int fd, const char __user *buf,
467 size_t count, loff_t pos)
469 struct file *file;
470 ssize_t ret = -EBADF;
471 int fput_needed;
473 if (pos < 0)
474 return -EINVAL;
476 file = fget_light(fd, &fput_needed);
477 if (file) {
478 ret = -ESPIPE;
479 if (file->f_mode & FMODE_PWRITE)
480 ret = vfs_write(file, buf, count, &pos);
481 fput_light(file, fput_needed);
484 return ret;
486 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
487 asmlinkage long SyS_pwrite64(long fd, long buf, long count, loff_t pos)
489 return SYSC_pwrite64((unsigned int) fd, (const char __user *) buf,
490 (size_t) count, pos);
492 SYSCALL_ALIAS(sys_pwrite64, SyS_pwrite64);
493 #endif
496 * Reduce an iovec's length in-place. Return the resulting number of segments
498 unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
500 unsigned long seg = 0;
501 size_t len = 0;
503 while (seg < nr_segs) {
504 seg++;
505 if (len + iov->iov_len >= to) {
506 iov->iov_len = to - len;
507 break;
509 len += iov->iov_len;
510 iov++;
512 return seg;
514 EXPORT_SYMBOL(iov_shorten);
516 ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
517 unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn)
519 struct kiocb kiocb;
520 ssize_t ret;
522 init_sync_kiocb(&kiocb, filp);
523 kiocb.ki_pos = *ppos;
524 kiocb.ki_left = len;
525 kiocb.ki_nbytes = len;
527 for (;;) {
528 ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
529 if (ret != -EIOCBRETRY)
530 break;
531 wait_on_retry_sync_kiocb(&kiocb);
534 if (ret == -EIOCBQUEUED)
535 ret = wait_on_sync_kiocb(&kiocb);
536 *ppos = kiocb.ki_pos;
537 return ret;
540 /* Do it by hand, with file-ops */
541 ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
542 unsigned long nr_segs, loff_t *ppos, io_fn_t fn)
544 struct iovec *vector = iov;
545 ssize_t ret = 0;
547 while (nr_segs > 0) {
548 void __user *base;
549 size_t len;
550 ssize_t nr;
552 base = vector->iov_base;
553 len = vector->iov_len;
554 vector++;
555 nr_segs--;
557 nr = fn(filp, base, len, ppos);
559 if (nr < 0) {
560 if (!ret)
561 ret = nr;
562 break;
564 ret += nr;
565 if (nr != len)
566 break;
569 return ret;
572 /* A write operation does a read from user space and vice versa */
573 #define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
575 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
576 unsigned long nr_segs, unsigned long fast_segs,
577 struct iovec *fast_pointer,
578 struct iovec **ret_pointer)
580 unsigned long seg;
581 ssize_t ret;
582 struct iovec *iov = fast_pointer;
585 * SuS says "The readv() function *may* fail if the iovcnt argument
586 * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
587 * traditionally returned zero for zero segments, so...
589 if (nr_segs == 0) {
590 ret = 0;
591 goto out;
595 * First get the "struct iovec" from user memory and
596 * verify all the pointers
598 if (nr_segs > UIO_MAXIOV) {
599 ret = -EINVAL;
600 goto out;
602 if (nr_segs > fast_segs) {
603 iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
604 if (iov == NULL) {
605 ret = -ENOMEM;
606 goto out;
609 if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) {
610 ret = -EFAULT;
611 goto out;
615 * According to the Single Unix Specification we should return EINVAL
616 * if an element length is < 0 when cast to ssize_t or if the
617 * total length would overflow the ssize_t return value of the
618 * system call.
620 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
621 * overflow case.
623 ret = 0;
624 for (seg = 0; seg < nr_segs; seg++) {
625 void __user *buf = iov[seg].iov_base;
626 ssize_t len = (ssize_t)iov[seg].iov_len;
628 /* see if we we're about to use an invalid len or if
629 * it's about to overflow ssize_t */
630 if (len < 0) {
631 ret = -EINVAL;
632 goto out;
634 if (unlikely(!access_ok(vrfy_dir(type), buf, len))) {
635 ret = -EFAULT;
636 goto out;
638 if (len > MAX_RW_COUNT - ret) {
639 len = MAX_RW_COUNT - ret;
640 iov[seg].iov_len = len;
642 ret += len;
644 out:
645 *ret_pointer = iov;
646 return ret;
649 static ssize_t do_readv_writev(int type, struct file *file,
650 const struct iovec __user * uvector,
651 unsigned long nr_segs, loff_t *pos)
653 size_t tot_len;
654 struct iovec iovstack[UIO_FASTIOV];
655 struct iovec *iov = iovstack;
656 ssize_t ret;
657 io_fn_t fn;
658 iov_fn_t fnv;
660 if (!file->f_op) {
661 ret = -EINVAL;
662 goto out;
665 ret = rw_copy_check_uvector(type, uvector, nr_segs,
666 ARRAY_SIZE(iovstack), iovstack, &iov);
667 if (ret <= 0)
668 goto out;
670 tot_len = ret;
671 ret = rw_verify_area(type, file, pos, tot_len);
672 if (ret < 0)
673 goto out;
675 fnv = NULL;
676 if (type == READ) {
677 fn = file->f_op->read;
678 fnv = file->f_op->aio_read;
679 } else {
680 fn = (io_fn_t)file->f_op->write;
681 fnv = file->f_op->aio_write;
684 if (fnv)
685 ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
686 pos, fnv);
687 else
688 ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
690 out:
691 if (iov != iovstack)
692 kfree(iov);
693 if ((ret + (type == READ)) > 0) {
694 if (type == READ)
695 fsnotify_access(file);
696 else
697 fsnotify_modify(file);
699 return ret;
702 ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
703 unsigned long vlen, loff_t *pos)
705 if (!(file->f_mode & FMODE_READ))
706 return -EBADF;
707 if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
708 return -EINVAL;
710 return do_readv_writev(READ, file, vec, vlen, pos);
713 EXPORT_SYMBOL(vfs_readv);
715 ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
716 unsigned long vlen, loff_t *pos)
718 if (!(file->f_mode & FMODE_WRITE))
719 return -EBADF;
720 if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
721 return -EINVAL;
723 return do_readv_writev(WRITE, file, vec, vlen, pos);
726 EXPORT_SYMBOL(vfs_writev);
728 SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
729 unsigned long, vlen)
731 struct file *file;
732 ssize_t ret = -EBADF;
733 int fput_needed;
735 file = fget_light(fd, &fput_needed);
736 if (file) {
737 loff_t pos = file_pos_read(file);
738 ret = vfs_readv(file, vec, vlen, &pos);
739 file_pos_write(file, pos);
740 fput_light(file, fput_needed);
743 if (ret > 0)
744 add_rchar(current, ret);
745 inc_syscr(current);
746 return ret;
749 SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
750 unsigned long, vlen)
752 struct file *file;
753 ssize_t ret = -EBADF;
754 int fput_needed;
756 file = fget_light(fd, &fput_needed);
757 if (file) {
758 loff_t pos = file_pos_read(file);
759 ret = vfs_writev(file, vec, vlen, &pos);
760 file_pos_write(file, pos);
761 fput_light(file, fput_needed);
764 if (ret > 0)
765 add_wchar(current, ret);
766 inc_syscw(current);
767 return ret;
770 static inline loff_t pos_from_hilo(unsigned long high, unsigned long low)
772 #define HALF_LONG_BITS (BITS_PER_LONG / 2)
773 return (((loff_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low;
776 SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
777 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
779 loff_t pos = pos_from_hilo(pos_h, pos_l);
780 struct file *file;
781 ssize_t ret = -EBADF;
782 int fput_needed;
784 if (pos < 0)
785 return -EINVAL;
787 file = fget_light(fd, &fput_needed);
788 if (file) {
789 ret = -ESPIPE;
790 if (file->f_mode & FMODE_PREAD)
791 ret = vfs_readv(file, vec, vlen, &pos);
792 fput_light(file, fput_needed);
795 if (ret > 0)
796 add_rchar(current, ret);
797 inc_syscr(current);
798 return ret;
801 SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
802 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
804 loff_t pos = pos_from_hilo(pos_h, pos_l);
805 struct file *file;
806 ssize_t ret = -EBADF;
807 int fput_needed;
809 if (pos < 0)
810 return -EINVAL;
812 file = fget_light(fd, &fput_needed);
813 if (file) {
814 ret = -ESPIPE;
815 if (file->f_mode & FMODE_PWRITE)
816 ret = vfs_writev(file, vec, vlen, &pos);
817 fput_light(file, fput_needed);
820 if (ret > 0)
821 add_wchar(current, ret);
822 inc_syscw(current);
823 return ret;
826 static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
827 size_t count, loff_t max)
829 struct file * in_file, * out_file;
830 struct inode * in_inode, * out_inode;
831 loff_t pos;
832 ssize_t retval;
833 int fput_needed_in, fput_needed_out, fl;
836 * Get input file, and verify that it is ok..
838 retval = -EBADF;
839 in_file = fget_light(in_fd, &fput_needed_in);
840 if (!in_file)
841 goto out;
842 if (!(in_file->f_mode & FMODE_READ))
843 goto fput_in;
844 retval = -ESPIPE;
845 if (!ppos)
846 ppos = &in_file->f_pos;
847 else
848 if (!(in_file->f_mode & FMODE_PREAD))
849 goto fput_in;
850 retval = rw_verify_area(READ, in_file, ppos, count);
851 if (retval < 0)
852 goto fput_in;
853 count = retval;
856 * Get output file, and verify that it is ok..
858 retval = -EBADF;
859 out_file = fget_light(out_fd, &fput_needed_out);
860 if (!out_file)
861 goto fput_in;
862 if (!(out_file->f_mode & FMODE_WRITE))
863 goto fput_out;
864 retval = -EINVAL;
865 in_inode = in_file->f_path.dentry->d_inode;
866 out_inode = out_file->f_path.dentry->d_inode;
867 retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count);
868 if (retval < 0)
869 goto fput_out;
870 count = retval;
872 if (!max)
873 max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
875 pos = *ppos;
876 if (unlikely(pos + count > max)) {
877 retval = -EOVERFLOW;
878 if (pos >= max)
879 goto fput_out;
880 count = max - pos;
883 fl = 0;
884 #if 0
886 * We need to debate whether we can enable this or not. The
887 * man page documents EAGAIN return for the output at least,
888 * and the application is arguably buggy if it doesn't expect
889 * EAGAIN on a non-blocking file descriptor.
891 if (in_file->f_flags & O_NONBLOCK)
892 fl = SPLICE_F_NONBLOCK;
893 #endif
894 retval = do_splice_direct(in_file, ppos, out_file, count, fl);
896 if (retval > 0) {
897 add_rchar(current, retval);
898 add_wchar(current, retval);
901 inc_syscr(current);
902 inc_syscw(current);
903 if (*ppos > max)
904 retval = -EOVERFLOW;
906 fput_out:
907 fput_light(out_file, fput_needed_out);
908 fput_in:
909 fput_light(in_file, fput_needed_in);
910 out:
911 return retval;
914 SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
916 loff_t pos;
917 off_t off;
918 ssize_t ret;
920 if (offset) {
921 if (unlikely(get_user(off, offset)))
922 return -EFAULT;
923 pos = off;
924 ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
925 if (unlikely(put_user(pos, offset)))
926 return -EFAULT;
927 return ret;
930 return do_sendfile(out_fd, in_fd, NULL, count, 0);
933 SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
935 loff_t pos;
936 ssize_t ret;
938 if (offset) {
939 if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
940 return -EFAULT;
941 ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
942 if (unlikely(put_user(pos, offset)))
943 return -EFAULT;
944 return ret;
947 return do_sendfile(out_fd, in_fd, NULL, count, 0);