2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/splice.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
31 #include <linux/security.h>
34 * Attempt to steal a page from a pipe buffer. This should perhaps go into
35 * a vm helper function, it's already simplified quite a bit by the
36 * addition of remove_mapping(). If success is returned, the caller may
37 * attempt to reuse this page for another destination.
39 static int page_cache_pipe_buf_steal(struct pipe_inode_info
*pipe
,
40 struct pipe_buffer
*buf
)
42 struct page
*page
= buf
->page
;
43 struct address_space
*mapping
;
47 mapping
= page_mapping(page
);
49 WARN_ON(!PageUptodate(page
));
52 * At least for ext2 with nobh option, we need to wait on
53 * writeback completing on this page, since we'll remove it
54 * from the pagecache. Otherwise truncate wont wait on the
55 * page, allowing the disk blocks to be reused by someone else
56 * before we actually wrote our data to them. fs corruption
59 wait_on_page_writeback(page
);
61 if (PagePrivate(page
))
62 try_to_release_page(page
, GFP_KERNEL
);
65 * If we succeeded in removing the mapping, set LRU flag
68 if (remove_mapping(mapping
, page
)) {
69 buf
->flags
|= PIPE_BUF_FLAG_LRU
;
75 * Raced with truncate or failed to remove page from current
76 * address space, unlock and return failure.
82 static void page_cache_pipe_buf_release(struct pipe_inode_info
*pipe
,
83 struct pipe_buffer
*buf
)
85 page_cache_release(buf
->page
);
86 buf
->flags
&= ~PIPE_BUF_FLAG_LRU
;
90 * Check whether the contents of buf is OK to access. Since the content
91 * is a page cache page, IO may be in flight.
93 static int page_cache_pipe_buf_confirm(struct pipe_inode_info
*pipe
,
94 struct pipe_buffer
*buf
)
96 struct page
*page
= buf
->page
;
99 if (!PageUptodate(page
)) {
103 * Page got truncated/unhashed. This will cause a 0-byte
104 * splice, if this is the first page.
106 if (!page
->mapping
) {
112 * Uh oh, read-error from disk.
114 if (!PageUptodate(page
)) {
120 * Page is ok afterall, we are done.
131 static const struct pipe_buf_operations page_cache_pipe_buf_ops
= {
133 .map
= generic_pipe_buf_map
,
134 .unmap
= generic_pipe_buf_unmap
,
135 .confirm
= page_cache_pipe_buf_confirm
,
136 .release
= page_cache_pipe_buf_release
,
137 .steal
= page_cache_pipe_buf_steal
,
138 .get
= generic_pipe_buf_get
,
141 static int user_page_pipe_buf_steal(struct pipe_inode_info
*pipe
,
142 struct pipe_buffer
*buf
)
144 if (!(buf
->flags
& PIPE_BUF_FLAG_GIFT
))
147 buf
->flags
|= PIPE_BUF_FLAG_LRU
;
148 return generic_pipe_buf_steal(pipe
, buf
);
151 static const struct pipe_buf_operations user_page_pipe_buf_ops
= {
153 .map
= generic_pipe_buf_map
,
154 .unmap
= generic_pipe_buf_unmap
,
155 .confirm
= generic_pipe_buf_confirm
,
156 .release
= page_cache_pipe_buf_release
,
157 .steal
= user_page_pipe_buf_steal
,
158 .get
= generic_pipe_buf_get
,
162 * splice_to_pipe - fill passed data into a pipe
163 * @pipe: pipe to fill
167 * @spd contains a map of pages and len/offset tupples, a long with
168 * the struct pipe_buf_operations associated with these pages. This
169 * function will link that data to the pipe.
172 ssize_t
splice_to_pipe(struct pipe_inode_info
*pipe
,
173 struct splice_pipe_desc
*spd
)
175 unsigned int spd_pages
= spd
->nr_pages
;
176 int ret
, do_wakeup
, page_nr
;
183 mutex_lock(&pipe
->inode
->i_mutex
);
186 if (!pipe
->readers
) {
187 send_sig(SIGPIPE
, current
, 0);
193 if (pipe
->nrbufs
< PIPE_BUFFERS
) {
194 int newbuf
= (pipe
->curbuf
+ pipe
->nrbufs
) & (PIPE_BUFFERS
- 1);
195 struct pipe_buffer
*buf
= pipe
->bufs
+ newbuf
;
197 buf
->page
= spd
->pages
[page_nr
];
198 buf
->offset
= spd
->partial
[page_nr
].offset
;
199 buf
->len
= spd
->partial
[page_nr
].len
;
200 buf
->private = spd
->partial
[page_nr
].private;
202 if (spd
->flags
& SPLICE_F_GIFT
)
203 buf
->flags
|= PIPE_BUF_FLAG_GIFT
;
212 if (!--spd
->nr_pages
)
214 if (pipe
->nrbufs
< PIPE_BUFFERS
)
220 if (spd
->flags
& SPLICE_F_NONBLOCK
) {
226 if (signal_pending(current
)) {
234 if (waitqueue_active(&pipe
->wait
))
235 wake_up_interruptible_sync(&pipe
->wait
);
236 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
240 pipe
->waiting_writers
++;
242 pipe
->waiting_writers
--;
246 mutex_unlock(&pipe
->inode
->i_mutex
);
250 if (waitqueue_active(&pipe
->wait
))
251 wake_up_interruptible(&pipe
->wait
);
252 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
256 while (page_nr
< spd_pages
)
257 page_cache_release(spd
->pages
[page_nr
++]);
263 __generic_file_splice_read(struct file
*in
, loff_t
*ppos
,
264 struct pipe_inode_info
*pipe
, size_t len
,
267 struct address_space
*mapping
= in
->f_mapping
;
268 unsigned int loff
, nr_pages
;
269 struct page
*pages
[PIPE_BUFFERS
];
270 struct partial_page partial
[PIPE_BUFFERS
];
272 pgoff_t index
, end_index
;
275 struct splice_pipe_desc spd
= {
279 .ops
= &page_cache_pipe_buf_ops
,
282 index
= *ppos
>> PAGE_CACHE_SHIFT
;
283 loff
= *ppos
& ~PAGE_CACHE_MASK
;
284 nr_pages
= (len
+ loff
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
286 if (nr_pages
> PIPE_BUFFERS
)
287 nr_pages
= PIPE_BUFFERS
;
290 * Don't try to 2nd guess the read-ahead logic, call into
291 * page_cache_readahead() like the page cache reads would do.
293 page_cache_readahead(mapping
, &in
->f_ra
, in
, index
, nr_pages
);
296 * Lookup the (hopefully) full range of pages we need.
298 spd
.nr_pages
= find_get_pages_contig(mapping
, index
, nr_pages
, pages
);
301 * If find_get_pages_contig() returned fewer pages than we needed,
302 * allocate the rest and fill in the holes.
305 index
+= spd
.nr_pages
;
306 while (spd
.nr_pages
< nr_pages
) {
308 * Page could be there, find_get_pages_contig() breaks on
311 page
= find_get_page(mapping
, index
);
314 * Make sure the read-ahead engine is notified
315 * about this failure.
317 handle_ra_miss(mapping
, &in
->f_ra
, index
);
320 * page didn't exist, allocate one.
322 page
= page_cache_alloc_cold(mapping
);
326 error
= add_to_page_cache_lru(page
, mapping
, index
,
328 if (unlikely(error
)) {
329 page_cache_release(page
);
330 if (error
== -EEXIST
)
335 * add_to_page_cache() locks the page, unlock it
336 * to avoid convoluting the logic below even more.
341 pages
[spd
.nr_pages
++] = page
;
346 * Now loop over the map and see if we need to start IO on any
347 * pages, fill in the partial map, etc.
349 index
= *ppos
>> PAGE_CACHE_SHIFT
;
350 nr_pages
= spd
.nr_pages
;
352 for (page_nr
= 0; page_nr
< nr_pages
; page_nr
++) {
353 unsigned int this_len
;
359 * this_len is the max we'll use from this page
361 this_len
= min_t(unsigned long, len
, PAGE_CACHE_SIZE
- loff
);
362 page
= pages
[page_nr
];
365 * If the page isn't uptodate, we may need to start io on it
367 if (!PageUptodate(page
)) {
369 * If in nonblock mode then dont block on waiting
370 * for an in-flight io page
372 if (flags
& SPLICE_F_NONBLOCK
) {
373 if (TestSetPageLocked(page
))
379 * page was truncated, stop here. if this isn't the
380 * first page, we'll just complete what we already
383 if (!page
->mapping
) {
388 * page was already under io and is now done, great
390 if (PageUptodate(page
)) {
396 * need to read in the page
398 error
= mapping
->a_ops
->readpage(in
, page
);
399 if (unlikely(error
)) {
401 * We really should re-lookup the page here,
402 * but it complicates things a lot. Instead
403 * lets just do what we already stored, and
404 * we'll get it the next time we are called.
406 if (error
== AOP_TRUNCATED_PAGE
)
414 * i_size must be checked after PageUptodate.
416 isize
= i_size_read(mapping
->host
);
417 end_index
= (isize
- 1) >> PAGE_CACHE_SHIFT
;
418 if (unlikely(!isize
|| index
> end_index
))
422 * if this is the last page, see if we need to shrink
423 * the length and stop
425 if (end_index
== index
) {
429 * max good bytes in this page
431 plen
= ((isize
- 1) & ~PAGE_CACHE_MASK
) + 1;
436 * force quit after adding this page
438 this_len
= min(this_len
, plen
- loff
);
442 partial
[page_nr
].offset
= loff
;
443 partial
[page_nr
].len
= this_len
;
451 * Release any pages at the end, if we quit early. 'page_nr' is how far
452 * we got, 'nr_pages' is how many pages are in the map.
454 while (page_nr
< nr_pages
)
455 page_cache_release(pages
[page_nr
++]);
458 return splice_to_pipe(pipe
, &spd
);
464 * generic_file_splice_read - splice data from file to a pipe
465 * @in: file to splice from
466 * @ppos: position in @in
467 * @pipe: pipe to splice to
468 * @len: number of bytes to splice
469 * @flags: splice modifier flags
472 * Will read pages from given file and fill them into a pipe. Can be
473 * used as long as the address_space operations for the source implements
477 ssize_t
generic_file_splice_read(struct file
*in
, loff_t
*ppos
,
478 struct pipe_inode_info
*pipe
, size_t len
,
485 isize
= i_size_read(in
->f_mapping
->host
);
486 if (unlikely(*ppos
>= isize
))
489 left
= isize
- *ppos
;
490 if (unlikely(left
< len
))
495 while (len
&& !spliced
) {
496 ret
= __generic_file_splice_read(in
, ppos
, pipe
, len
, flags
);
503 if (flags
& SPLICE_F_NONBLOCK
) {
520 EXPORT_SYMBOL(generic_file_splice_read
);
523 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
524 * using sendpage(). Return the number of bytes sent.
526 static int pipe_to_sendpage(struct pipe_inode_info
*pipe
,
527 struct pipe_buffer
*buf
, struct splice_desc
*sd
)
529 struct file
*file
= sd
->u
.file
;
530 loff_t pos
= sd
->pos
;
533 ret
= buf
->ops
->confirm(pipe
, buf
);
535 more
= (sd
->flags
& SPLICE_F_MORE
) || sd
->len
< sd
->total_len
;
537 ret
= file
->f_op
->sendpage(file
, buf
->page
, buf
->offset
,
538 sd
->len
, &pos
, more
);
545 * This is a little more tricky than the file -> pipe splicing. There are
546 * basically three cases:
548 * - Destination page already exists in the address space and there
549 * are users of it. For that case we have no other option that
550 * copying the data. Tough luck.
551 * - Destination page already exists in the address space, but there
552 * are no users of it. Make sure it's uptodate, then drop it. Fall
553 * through to last case.
554 * - Destination page does not exist, we can add the pipe page to
555 * the page cache and avoid the copy.
557 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
558 * sd->flags), we attempt to migrate pages from the pipe to the output
559 * file address space page cache. This is possible if no one else has
560 * the pipe page referenced outside of the pipe and page cache. If
561 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
562 * a new page in the output file page cache and fill/dirty that.
564 static int pipe_to_file(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
,
565 struct splice_desc
*sd
)
567 struct file
*file
= sd
->u
.file
;
568 struct address_space
*mapping
= file
->f_mapping
;
569 unsigned int offset
, this_len
;
575 * make sure the data in this buffer is uptodate
577 ret
= buf
->ops
->confirm(pipe
, buf
);
581 index
= sd
->pos
>> PAGE_CACHE_SHIFT
;
582 offset
= sd
->pos
& ~PAGE_CACHE_MASK
;
585 if (this_len
+ offset
> PAGE_CACHE_SIZE
)
586 this_len
= PAGE_CACHE_SIZE
- offset
;
589 page
= find_lock_page(mapping
, index
);
592 page
= page_cache_alloc_cold(mapping
);
597 * This will also lock the page
599 ret
= add_to_page_cache_lru(page
, mapping
, index
,
605 ret
= mapping
->a_ops
->prepare_write(file
, page
, offset
, offset
+this_len
);
607 loff_t isize
= i_size_read(mapping
->host
);
609 if (ret
!= AOP_TRUNCATED_PAGE
)
611 page_cache_release(page
);
612 if (ret
== AOP_TRUNCATED_PAGE
)
616 * prepare_write() may have instantiated a few blocks
617 * outside i_size. Trim these off again.
619 if (sd
->pos
+ this_len
> isize
)
620 vmtruncate(mapping
->host
, isize
);
625 if (buf
->page
!= page
) {
627 * Careful, ->map() uses KM_USER0!
629 char *src
= buf
->ops
->map(pipe
, buf
, 1);
630 char *dst
= kmap_atomic(page
, KM_USER1
);
632 memcpy(dst
+ offset
, src
+ buf
->offset
, this_len
);
633 flush_dcache_page(page
);
634 kunmap_atomic(dst
, KM_USER1
);
635 buf
->ops
->unmap(pipe
, buf
, src
);
638 ret
= mapping
->a_ops
->commit_write(file
, page
, offset
, offset
+this_len
);
640 if (ret
== AOP_TRUNCATED_PAGE
) {
641 page_cache_release(page
);
647 * Partial write has happened, so 'ret' already initialized by
648 * number of bytes written, Where is nothing we have to do here.
653 * Return the number of bytes written and mark page as
654 * accessed, we are now done!
656 mark_page_accessed(page
);
658 page_cache_release(page
);
665 * __splice_from_pipe - splice data from a pipe to given actor
666 * @pipe: pipe to splice from
667 * @sd: information to @actor
668 * @actor: handler that splices the data
671 * This function does little more than loop over the pipe and call
672 * @actor to do the actual moving of a single struct pipe_buffer to
673 * the desired destination. See pipe_to_file, pipe_to_sendpage, or
677 ssize_t
__splice_from_pipe(struct pipe_inode_info
*pipe
, struct splice_desc
*sd
,
680 int ret
, do_wakeup
, err
;
687 struct pipe_buffer
*buf
= pipe
->bufs
+ pipe
->curbuf
;
688 const struct pipe_buf_operations
*ops
= buf
->ops
;
691 if (sd
->len
> sd
->total_len
)
692 sd
->len
= sd
->total_len
;
694 err
= actor(pipe
, buf
, sd
);
696 if (!ret
&& err
!= -ENODATA
)
708 sd
->total_len
-= err
;
714 ops
->release(pipe
, buf
);
715 pipe
->curbuf
= (pipe
->curbuf
+ 1) & (PIPE_BUFFERS
- 1);
729 if (!pipe
->waiting_writers
) {
734 if (sd
->flags
& SPLICE_F_NONBLOCK
) {
740 if (signal_pending(current
)) {
748 if (waitqueue_active(&pipe
->wait
))
749 wake_up_interruptible_sync(&pipe
->wait
);
750 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
759 if (waitqueue_active(&pipe
->wait
))
760 wake_up_interruptible(&pipe
->wait
);
761 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
766 EXPORT_SYMBOL(__splice_from_pipe
);
769 * splice_from_pipe - splice data from a pipe to a file
770 * @pipe: pipe to splice from
771 * @out: file to splice to
772 * @ppos: position in @out
773 * @len: how many bytes to splice
774 * @flags: splice modifier flags
775 * @actor: handler that splices the data
778 * See __splice_from_pipe. This function locks the input and output inodes,
779 * otherwise it's identical to __splice_from_pipe().
782 ssize_t
splice_from_pipe(struct pipe_inode_info
*pipe
, struct file
*out
,
783 loff_t
*ppos
, size_t len
, unsigned int flags
,
787 struct inode
*inode
= out
->f_mapping
->host
;
788 struct splice_desc sd
= {
796 * The actor worker might be calling ->prepare_write and
797 * ->commit_write. Most of the time, these expect i_mutex to
798 * be held. Since this may result in an ABBA deadlock with
799 * pipe->inode, we have to order lock acquiry here.
801 inode_double_lock(inode
, pipe
->inode
);
802 ret
= __splice_from_pipe(pipe
, &sd
, actor
);
803 inode_double_unlock(inode
, pipe
->inode
);
809 * generic_file_splice_write_nolock - generic_file_splice_write without mutexes
811 * @out: file to write to
812 * @ppos: position in @out
813 * @len: number of bytes to splice
814 * @flags: splice modifier flags
817 * Will either move or copy pages (determined by @flags options) from
818 * the given pipe inode to the given file. The caller is responsible
819 * for acquiring i_mutex on both inodes.
823 generic_file_splice_write_nolock(struct pipe_inode_info
*pipe
, struct file
*out
,
824 loff_t
*ppos
, size_t len
, unsigned int flags
)
826 struct address_space
*mapping
= out
->f_mapping
;
827 struct inode
*inode
= mapping
->host
;
828 struct splice_desc sd
= {
837 err
= remove_suid(out
->f_path
.dentry
);
841 ret
= __splice_from_pipe(pipe
, &sd
, pipe_to_file
);
843 unsigned long nr_pages
;
846 nr_pages
= (ret
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
849 * If file or inode is SYNC and we actually wrote some data,
852 if (unlikely((out
->f_flags
& O_SYNC
) || IS_SYNC(inode
))) {
853 err
= generic_osync_inode(inode
, mapping
,
854 OSYNC_METADATA
|OSYNC_DATA
);
859 balance_dirty_pages_ratelimited_nr(mapping
, nr_pages
);
865 EXPORT_SYMBOL(generic_file_splice_write_nolock
);
868 * generic_file_splice_write - splice data from a pipe to a file
870 * @out: file to write to
871 * @ppos: position in @out
872 * @len: number of bytes to splice
873 * @flags: splice modifier flags
876 * Will either move or copy pages (determined by @flags options) from
877 * the given pipe inode to the given file.
881 generic_file_splice_write(struct pipe_inode_info
*pipe
, struct file
*out
,
882 loff_t
*ppos
, size_t len
, unsigned int flags
)
884 struct address_space
*mapping
= out
->f_mapping
;
885 struct inode
*inode
= mapping
->host
;
889 err
= should_remove_suid(out
->f_path
.dentry
);
891 mutex_lock(&inode
->i_mutex
);
892 err
= __remove_suid(out
->f_path
.dentry
, err
);
893 mutex_unlock(&inode
->i_mutex
);
898 ret
= splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_file
);
900 unsigned long nr_pages
;
903 nr_pages
= (ret
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
906 * If file or inode is SYNC and we actually wrote some data,
909 if (unlikely((out
->f_flags
& O_SYNC
) || IS_SYNC(inode
))) {
910 mutex_lock(&inode
->i_mutex
);
911 err
= generic_osync_inode(inode
, mapping
,
912 OSYNC_METADATA
|OSYNC_DATA
);
913 mutex_unlock(&inode
->i_mutex
);
918 balance_dirty_pages_ratelimited_nr(mapping
, nr_pages
);
924 EXPORT_SYMBOL(generic_file_splice_write
);
927 * generic_splice_sendpage - splice data from a pipe to a socket
928 * @pipe: pipe to splice from
929 * @out: socket to write to
930 * @ppos: position in @out
931 * @len: number of bytes to splice
932 * @flags: splice modifier flags
935 * Will send @len bytes from the pipe to a network socket. No data copying
939 ssize_t
generic_splice_sendpage(struct pipe_inode_info
*pipe
, struct file
*out
,
940 loff_t
*ppos
, size_t len
, unsigned int flags
)
942 return splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_sendpage
);
945 EXPORT_SYMBOL(generic_splice_sendpage
);
948 * Attempt to initiate a splice from pipe to file.
950 static long do_splice_from(struct pipe_inode_info
*pipe
, struct file
*out
,
951 loff_t
*ppos
, size_t len
, unsigned int flags
)
955 if (unlikely(!out
->f_op
|| !out
->f_op
->splice_write
))
958 if (unlikely(!(out
->f_mode
& FMODE_WRITE
)))
961 ret
= rw_verify_area(WRITE
, out
, ppos
, len
);
962 if (unlikely(ret
< 0))
965 ret
= security_file_permission(out
, MAY_WRITE
);
966 if (unlikely(ret
< 0))
969 return out
->f_op
->splice_write(pipe
, out
, ppos
, len
, flags
);
973 * Attempt to initiate a splice from a file to a pipe.
975 static long do_splice_to(struct file
*in
, loff_t
*ppos
,
976 struct pipe_inode_info
*pipe
, size_t len
,
981 if (unlikely(!in
->f_op
|| !in
->f_op
->splice_read
))
984 if (unlikely(!(in
->f_mode
& FMODE_READ
)))
987 ret
= rw_verify_area(READ
, in
, ppos
, len
);
988 if (unlikely(ret
< 0))
991 ret
= security_file_permission(in
, MAY_READ
);
992 if (unlikely(ret
< 0))
995 return in
->f_op
->splice_read(in
, ppos
, pipe
, len
, flags
);
999 * splice_direct_to_actor - splices data directly between two non-pipes
1000 * @in: file to splice from
1001 * @sd: actor information on where to splice to
1002 * @actor: handles the data splicing
1005 * This is a special case helper to splice directly between two
1006 * points, without requiring an explicit pipe. Internally an allocated
1007 * pipe is cached in the process, and reused during the life time of
1011 ssize_t
splice_direct_to_actor(struct file
*in
, struct splice_desc
*sd
,
1012 splice_direct_actor
*actor
)
1014 struct pipe_inode_info
*pipe
;
1021 * We require the input being a regular file, as we don't want to
1022 * randomly drop data for eg socket -> socket splicing. Use the
1023 * piped splicing for that!
1025 i_mode
= in
->f_path
.dentry
->d_inode
->i_mode
;
1026 if (unlikely(!S_ISREG(i_mode
) && !S_ISBLK(i_mode
)))
1030 * neither in nor out is a pipe, setup an internal pipe attached to
1031 * 'out' and transfer the wanted data from 'in' to 'out' through that
1033 pipe
= current
->splice_pipe
;
1034 if (unlikely(!pipe
)) {
1035 pipe
= alloc_pipe_info(NULL
);
1040 * We don't have an immediate reader, but we'll read the stuff
1041 * out of the pipe right after the splice_to_pipe(). So set
1042 * PIPE_READERS appropriately.
1046 current
->splice_pipe
= pipe
;
1054 len
= sd
->total_len
;
1058 * Don't block on output, we have to drain the direct pipe.
1060 sd
->flags
&= ~SPLICE_F_NONBLOCK
;
1064 loff_t pos
= sd
->pos
;
1066 ret
= do_splice_to(in
, &pos
, pipe
, len
, flags
);
1067 if (unlikely(ret
<= 0))
1071 sd
->total_len
= read_len
;
1074 * NOTE: nonblocking mode only applies to the input. We
1075 * must not do the output in nonblocking mode as then we
1076 * could get stuck data in the internal pipe:
1078 ret
= actor(pipe
, sd
);
1079 if (unlikely(ret
<= 0))
1090 pipe
->nrbufs
= pipe
->curbuf
= 0;
1095 * If we did an incomplete transfer we must release
1096 * the pipe buffers in question:
1098 for (i
= 0; i
< PIPE_BUFFERS
; i
++) {
1099 struct pipe_buffer
*buf
= pipe
->bufs
+ i
;
1102 buf
->ops
->release(pipe
, buf
);
1106 pipe
->nrbufs
= pipe
->curbuf
= 0;
1109 * If we transferred some data, return the number of bytes:
1117 EXPORT_SYMBOL(splice_direct_to_actor
);
1119 static int direct_splice_actor(struct pipe_inode_info
*pipe
,
1120 struct splice_desc
*sd
)
1122 struct file
*file
= sd
->u
.file
;
1124 return do_splice_from(pipe
, file
, &sd
->pos
, sd
->total_len
, sd
->flags
);
1128 * do_splice_direct - splices data directly between two files
1129 * @in: file to splice from
1130 * @ppos: input file offset
1131 * @out: file to splice to
1132 * @len: number of bytes to splice
1133 * @flags: splice modifier flags
1136 * For use by do_sendfile(). splice can easily emulate sendfile, but
1137 * doing it in the application would incur an extra system call
1138 * (splice in + splice out, as compared to just sendfile()). So this helper
1139 * can splice directly through a process-private pipe.
1142 long do_splice_direct(struct file
*in
, loff_t
*ppos
, struct file
*out
,
1143 size_t len
, unsigned int flags
)
1145 struct splice_desc sd
= {
1154 ret
= splice_direct_to_actor(in
, &sd
, direct_splice_actor
);
1162 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1163 * location, so checking ->i_pipe is not enough to verify that this is a
1166 static inline struct pipe_inode_info
*pipe_info(struct inode
*inode
)
1168 if (S_ISFIFO(inode
->i_mode
))
1169 return inode
->i_pipe
;
1175 * Determine where to splice to/from.
1177 static long do_splice(struct file
*in
, loff_t __user
*off_in
,
1178 struct file
*out
, loff_t __user
*off_out
,
1179 size_t len
, unsigned int flags
)
1181 struct pipe_inode_info
*pipe
;
1182 loff_t offset
, *off
;
1185 pipe
= pipe_info(in
->f_path
.dentry
->d_inode
);
1190 if (out
->f_op
->llseek
== no_llseek
)
1192 if (copy_from_user(&offset
, off_out
, sizeof(loff_t
)))
1198 ret
= do_splice_from(pipe
, out
, off
, len
, flags
);
1200 if (off_out
&& copy_to_user(off_out
, off
, sizeof(loff_t
)))
1206 pipe
= pipe_info(out
->f_path
.dentry
->d_inode
);
1211 if (in
->f_op
->llseek
== no_llseek
)
1213 if (copy_from_user(&offset
, off_in
, sizeof(loff_t
)))
1219 ret
= do_splice_to(in
, off
, pipe
, len
, flags
);
1221 if (off_in
&& copy_to_user(off_in
, off
, sizeof(loff_t
)))
1231 * Map an iov into an array of pages and offset/length tupples. With the
1232 * partial_page structure, we can map several non-contiguous ranges into
1233 * our ones pages[] map instead of splitting that operation into pieces.
1234 * Could easily be exported as a generic helper for other users, in which
1235 * case one would probably want to add a 'max_nr_pages' parameter as well.
1237 static int get_iovec_page_array(const struct iovec __user
*iov
,
1238 unsigned int nr_vecs
, struct page
**pages
,
1239 struct partial_page
*partial
, int aligned
)
1241 int buffers
= 0, error
= 0;
1244 * It's ok to take the mmap_sem for reading, even
1245 * across a "get_user()".
1247 down_read(¤t
->mm
->mmap_sem
);
1250 unsigned long off
, npages
;
1256 * Get user address base and length for this iovec.
1258 error
= get_user(base
, &iov
->iov_base
);
1259 if (unlikely(error
))
1261 error
= get_user(len
, &iov
->iov_len
);
1262 if (unlikely(error
))
1266 * Sanity check this iovec. 0 read succeeds.
1271 if (unlikely(!base
))
1275 * Get this base offset and number of pages, then map
1276 * in the user pages.
1278 off
= (unsigned long) base
& ~PAGE_MASK
;
1281 * If asked for alignment, the offset must be zero and the
1282 * length a multiple of the PAGE_SIZE.
1285 if (aligned
&& (off
|| len
& ~PAGE_MASK
))
1288 npages
= (off
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1289 if (npages
> PIPE_BUFFERS
- buffers
)
1290 npages
= PIPE_BUFFERS
- buffers
;
1292 error
= get_user_pages(current
, current
->mm
,
1293 (unsigned long) base
, npages
, 0, 0,
1294 &pages
[buffers
], NULL
);
1296 if (unlikely(error
<= 0))
1300 * Fill this contiguous range into the partial page map.
1302 for (i
= 0; i
< error
; i
++) {
1303 const int plen
= min_t(size_t, len
, PAGE_SIZE
- off
);
1305 partial
[buffers
].offset
= off
;
1306 partial
[buffers
].len
= plen
;
1314 * We didn't complete this iov, stop here since it probably
1315 * means we have to move some of this into a pipe to
1316 * be able to continue.
1322 * Don't continue if we mapped fewer pages than we asked for,
1323 * or if we mapped the max number of pages that we have
1326 if (error
< npages
|| buffers
== PIPE_BUFFERS
)
1333 up_read(¤t
->mm
->mmap_sem
);
1341 static int pipe_to_user(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
,
1342 struct splice_desc
*sd
)
1347 ret
= buf
->ops
->confirm(pipe
, buf
);
1352 * See if we can use the atomic maps, by prefaulting in the
1353 * pages and doing an atomic copy
1355 if (!fault_in_pages_writeable(sd
->u
.userptr
, sd
->len
)) {
1356 src
= buf
->ops
->map(pipe
, buf
, 1);
1357 ret
= __copy_to_user_inatomic(sd
->u
.userptr
, src
+ buf
->offset
,
1359 buf
->ops
->unmap(pipe
, buf
, src
);
1367 * No dice, use slow non-atomic map and copy
1369 src
= buf
->ops
->map(pipe
, buf
, 0);
1372 if (copy_to_user(sd
->u
.userptr
, src
+ buf
->offset
, sd
->len
))
1377 sd
->u
.userptr
+= ret
;
1378 buf
->ops
->unmap(pipe
, buf
, src
);
1383 * For lack of a better implementation, implement vmsplice() to userspace
1384 * as a simple copy of the pipes pages to the user iov.
1386 static long vmsplice_to_user(struct file
*file
, const struct iovec __user
*iov
,
1387 unsigned long nr_segs
, unsigned int flags
)
1389 struct pipe_inode_info
*pipe
;
1390 struct splice_desc sd
;
1395 pipe
= pipe_info(file
->f_path
.dentry
->d_inode
);
1400 mutex_lock(&pipe
->inode
->i_mutex
);
1408 * Get user address base and length for this iovec.
1410 error
= get_user(base
, &iov
->iov_base
);
1411 if (unlikely(error
))
1413 error
= get_user(len
, &iov
->iov_len
);
1414 if (unlikely(error
))
1418 * Sanity check this iovec. 0 read succeeds.
1422 if (unlikely(!base
)) {
1430 sd
.u
.userptr
= base
;
1433 size
= __splice_from_pipe(pipe
, &sd
, pipe_to_user
);
1451 mutex_unlock(&pipe
->inode
->i_mutex
);
1460 * vmsplice splices a user address range into a pipe. It can be thought of
1461 * as splice-from-memory, where the regular splice is splice-from-file (or
1462 * to file). In both cases the output is a pipe, naturally.
1464 static long vmsplice_to_pipe(struct file
*file
, const struct iovec __user
*iov
,
1465 unsigned long nr_segs
, unsigned int flags
)
1467 struct pipe_inode_info
*pipe
;
1468 struct page
*pages
[PIPE_BUFFERS
];
1469 struct partial_page partial
[PIPE_BUFFERS
];
1470 struct splice_pipe_desc spd
= {
1474 .ops
= &user_page_pipe_buf_ops
,
1477 pipe
= pipe_info(file
->f_path
.dentry
->d_inode
);
1481 spd
.nr_pages
= get_iovec_page_array(iov
, nr_segs
, pages
, partial
,
1482 flags
& SPLICE_F_GIFT
);
1483 if (spd
.nr_pages
<= 0)
1484 return spd
.nr_pages
;
1486 return splice_to_pipe(pipe
, &spd
);
1490 * Note that vmsplice only really supports true splicing _from_ user memory
1491 * to a pipe, not the other way around. Splicing from user memory is a simple
1492 * operation that can be supported without any funky alignment restrictions
1493 * or nasty vm tricks. We simply map in the user memory and fill them into
1494 * a pipe. The reverse isn't quite as easy, though. There are two possible
1495 * solutions for that:
1497 * - memcpy() the data internally, at which point we might as well just
1498 * do a regular read() on the buffer anyway.
1499 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1500 * has restriction limitations on both ends of the pipe).
1502 * Currently we punt and implement it as a normal copy, see pipe_to_user().
1505 asmlinkage
long sys_vmsplice(int fd
, const struct iovec __user
*iov
,
1506 unsigned long nr_segs
, unsigned int flags
)
1512 if (unlikely(nr_segs
> UIO_MAXIOV
))
1514 else if (unlikely(!nr_segs
))
1518 file
= fget_light(fd
, &fput
);
1520 if (file
->f_mode
& FMODE_WRITE
)
1521 error
= vmsplice_to_pipe(file
, iov
, nr_segs
, flags
);
1522 else if (file
->f_mode
& FMODE_READ
)
1523 error
= vmsplice_to_user(file
, iov
, nr_segs
, flags
);
1525 fput_light(file
, fput
);
1531 asmlinkage
long sys_splice(int fd_in
, loff_t __user
*off_in
,
1532 int fd_out
, loff_t __user
*off_out
,
1533 size_t len
, unsigned int flags
)
1536 struct file
*in
, *out
;
1537 int fput_in
, fput_out
;
1543 in
= fget_light(fd_in
, &fput_in
);
1545 if (in
->f_mode
& FMODE_READ
) {
1546 out
= fget_light(fd_out
, &fput_out
);
1548 if (out
->f_mode
& FMODE_WRITE
)
1549 error
= do_splice(in
, off_in
,
1552 fput_light(out
, fput_out
);
1556 fput_light(in
, fput_in
);
1563 * Make sure there's data to read. Wait for input if we can, otherwise
1564 * return an appropriate error.
1566 static int link_ipipe_prep(struct pipe_inode_info
*pipe
, unsigned int flags
)
1571 * Check ->nrbufs without the inode lock first. This function
1572 * is speculative anyways, so missing one is ok.
1578 mutex_lock(&pipe
->inode
->i_mutex
);
1580 while (!pipe
->nrbufs
) {
1581 if (signal_pending(current
)) {
1587 if (!pipe
->waiting_writers
) {
1588 if (flags
& SPLICE_F_NONBLOCK
) {
1596 mutex_unlock(&pipe
->inode
->i_mutex
);
1601 * Make sure there's writeable room. Wait for room if we can, otherwise
1602 * return an appropriate error.
1604 static int link_opipe_prep(struct pipe_inode_info
*pipe
, unsigned int flags
)
1609 * Check ->nrbufs without the inode lock first. This function
1610 * is speculative anyways, so missing one is ok.
1612 if (pipe
->nrbufs
< PIPE_BUFFERS
)
1616 mutex_lock(&pipe
->inode
->i_mutex
);
1618 while (pipe
->nrbufs
>= PIPE_BUFFERS
) {
1619 if (!pipe
->readers
) {
1620 send_sig(SIGPIPE
, current
, 0);
1624 if (flags
& SPLICE_F_NONBLOCK
) {
1628 if (signal_pending(current
)) {
1632 pipe
->waiting_writers
++;
1634 pipe
->waiting_writers
--;
1637 mutex_unlock(&pipe
->inode
->i_mutex
);
1642 * Link contents of ipipe to opipe.
1644 static int link_pipe(struct pipe_inode_info
*ipipe
,
1645 struct pipe_inode_info
*opipe
,
1646 size_t len
, unsigned int flags
)
1648 struct pipe_buffer
*ibuf
, *obuf
;
1649 int ret
= 0, i
= 0, nbuf
;
1652 * Potential ABBA deadlock, work around it by ordering lock
1653 * grabbing by inode address. Otherwise two different processes
1654 * could deadlock (one doing tee from A -> B, the other from B -> A).
1656 inode_double_lock(ipipe
->inode
, opipe
->inode
);
1659 if (!opipe
->readers
) {
1660 send_sig(SIGPIPE
, current
, 0);
1667 * If we have iterated all input buffers or ran out of
1668 * output room, break.
1670 if (i
>= ipipe
->nrbufs
|| opipe
->nrbufs
>= PIPE_BUFFERS
)
1673 ibuf
= ipipe
->bufs
+ ((ipipe
->curbuf
+ i
) & (PIPE_BUFFERS
- 1));
1674 nbuf
= (opipe
->curbuf
+ opipe
->nrbufs
) & (PIPE_BUFFERS
- 1);
1677 * Get a reference to this pipe buffer,
1678 * so we can copy the contents over.
1680 ibuf
->ops
->get(ipipe
, ibuf
);
1682 obuf
= opipe
->bufs
+ nbuf
;
1686 * Don't inherit the gift flag, we need to
1687 * prevent multiple steals of this page.
1689 obuf
->flags
&= ~PIPE_BUF_FLAG_GIFT
;
1691 if (obuf
->len
> len
)
1700 inode_double_unlock(ipipe
->inode
, opipe
->inode
);
1703 * If we put data in the output pipe, wakeup any potential readers.
1707 if (waitqueue_active(&opipe
->wait
))
1708 wake_up_interruptible(&opipe
->wait
);
1709 kill_fasync(&opipe
->fasync_readers
, SIGIO
, POLL_IN
);
1716 * This is a tee(1) implementation that works on pipes. It doesn't copy
1717 * any data, it simply references the 'in' pages on the 'out' pipe.
1718 * The 'flags' used are the SPLICE_F_* variants, currently the only
1719 * applicable one is SPLICE_F_NONBLOCK.
1721 static long do_tee(struct file
*in
, struct file
*out
, size_t len
,
1724 struct pipe_inode_info
*ipipe
= pipe_info(in
->f_path
.dentry
->d_inode
);
1725 struct pipe_inode_info
*opipe
= pipe_info(out
->f_path
.dentry
->d_inode
);
1729 * Duplicate the contents of ipipe to opipe without actually
1732 if (ipipe
&& opipe
&& ipipe
!= opipe
) {
1734 * Keep going, unless we encounter an error. The ipipe/opipe
1735 * ordering doesn't really matter.
1737 ret
= link_ipipe_prep(ipipe
, flags
);
1739 ret
= link_opipe_prep(opipe
, flags
);
1741 ret
= link_pipe(ipipe
, opipe
, len
, flags
);
1742 if (!ret
&& (flags
& SPLICE_F_NONBLOCK
))
1751 asmlinkage
long sys_tee(int fdin
, int fdout
, size_t len
, unsigned int flags
)
1760 in
= fget_light(fdin
, &fput_in
);
1762 if (in
->f_mode
& FMODE_READ
) {
1764 struct file
*out
= fget_light(fdout
, &fput_out
);
1767 if (out
->f_mode
& FMODE_WRITE
)
1768 error
= do_tee(in
, out
, len
, flags
);
1769 fput_light(out
, fput_out
);
1772 fput_light(in
, fput_in
);