2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files and fixing the initial implementation
15 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
20 #include <linux/file.h>
21 #include <linux/pagemap.h>
22 #include <linux/pipe_fs_i.h>
23 #include <linux/mm_inline.h>
24 #include <linux/swap.h>
25 #include <linux/module.h>
28 * Passed to the actors
31 unsigned int len
, total_len
; /* current and remaining length */
32 unsigned int flags
; /* splice flags */
33 struct file
*file
; /* file to read/write */
34 loff_t pos
; /* file position */
37 static int page_cache_pipe_buf_steal(struct pipe_inode_info
*info
,
38 struct pipe_buffer
*buf
)
40 struct page
*page
= buf
->page
;
42 WARN_ON(!PageLocked(page
));
43 WARN_ON(!PageUptodate(page
));
45 if (!remove_mapping(page_mapping(page
), page
))
49 struct zone
*zone
= page_zone(page
);
51 spin_lock_irq(&zone
->lru_lock
);
52 BUG_ON(!PageLRU(page
));
54 del_page_from_lru(zone
, page
);
55 spin_unlock_irq(&zone
->lru_lock
);
62 static void page_cache_pipe_buf_release(struct pipe_inode_info
*info
,
63 struct pipe_buffer
*buf
)
65 page_cache_release(buf
->page
);
70 static void *page_cache_pipe_buf_map(struct file
*file
,
71 struct pipe_inode_info
*info
,
72 struct pipe_buffer
*buf
)
74 struct page
*page
= buf
->page
;
78 if (!PageUptodate(page
)) {
85 return ERR_PTR(-ENODATA
);
88 return kmap(buf
->page
);
91 static void page_cache_pipe_buf_unmap(struct pipe_inode_info
*info
,
92 struct pipe_buffer
*buf
)
95 unlock_page(buf
->page
);
99 static struct pipe_buf_operations page_cache_pipe_buf_ops
= {
101 .map
= page_cache_pipe_buf_map
,
102 .unmap
= page_cache_pipe_buf_unmap
,
103 .release
= page_cache_pipe_buf_release
,
104 .steal
= page_cache_pipe_buf_steal
,
107 static ssize_t
move_to_pipe(struct inode
*inode
, struct page
**pages
,
108 int nr_pages
, unsigned long offset
,
111 struct pipe_inode_info
*info
;
112 int ret
, do_wakeup
, i
;
118 mutex_lock(PIPE_MUTEX(*inode
));
120 info
= inode
->i_pipe
;
124 if (!PIPE_READERS(*inode
)) {
125 send_sig(SIGPIPE
, current
, 0);
132 if (bufs
< PIPE_BUFFERS
) {
133 int newbuf
= (info
->curbuf
+ bufs
) & (PIPE_BUFFERS
- 1);
134 struct pipe_buffer
*buf
= info
->bufs
+ newbuf
;
135 struct page
*page
= pages
[i
++];
136 unsigned long this_len
;
138 this_len
= PAGE_CACHE_SIZE
- offset
;
143 buf
->offset
= offset
;
145 buf
->ops
= &page_cache_pipe_buf_ops
;
146 info
->nrbufs
= ++bufs
;
156 if (bufs
< PIPE_BUFFERS
)
162 if (signal_pending(current
)) {
169 wake_up_interruptible_sync(PIPE_WAIT(*inode
));
170 kill_fasync(PIPE_FASYNC_READERS(*inode
), SIGIO
,
175 PIPE_WAITING_WRITERS(*inode
)++;
177 PIPE_WAITING_WRITERS(*inode
)--;
180 mutex_unlock(PIPE_MUTEX(*inode
));
183 wake_up_interruptible(PIPE_WAIT(*inode
));
184 kill_fasync(PIPE_FASYNC_READERS(*inode
), SIGIO
, POLL_IN
);
188 page_cache_release(pages
[i
++]);
193 static int __generic_file_splice_read(struct file
*in
, struct inode
*pipe
,
196 struct address_space
*mapping
= in
->f_mapping
;
197 unsigned int offset
, nr_pages
;
198 struct page
*pages
[PIPE_BUFFERS
], *shadow
[PIPE_BUFFERS
];
203 index
= in
->f_pos
>> PAGE_CACHE_SHIFT
;
204 offset
= in
->f_pos
& ~PAGE_CACHE_MASK
;
205 nr_pages
= (len
+ offset
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
207 if (nr_pages
> PIPE_BUFFERS
)
208 nr_pages
= PIPE_BUFFERS
;
211 * initiate read-ahead on this page range
213 do_page_cache_readahead(mapping
, in
, index
, nr_pages
);
216 * Get as many pages from the page cache as possible..
217 * Start IO on the page cache entries we create (we
218 * can assume that any pre-existing ones we find have
219 * already had IO started on them).
221 i
= find_get_pages(mapping
, index
, nr_pages
, pages
);
224 * common case - we found all pages and they are contiguous,
227 if (i
&& (pages
[i
- 1]->index
== index
+ i
- 1))
231 * fill shadow[] with pages at the right locations, so we only
234 memset(shadow
, 0, i
* sizeof(struct page
*));
235 for (j
= 0, pidx
= index
; j
< i
; pidx
++, j
++)
236 shadow
[pages
[j
]->index
- pidx
] = pages
[j
];
239 * now fill in the holes
241 for (i
= 0, pidx
= index
; i
< nr_pages
; pidx
++, i
++) {
248 * no page there, look one up / create it
250 page
= find_or_create_page(mapping
, pidx
,
251 mapping_gfp_mask(mapping
));
255 if (PageUptodate(page
))
258 error
= mapping
->a_ops
->readpage(in
, page
);
260 if (unlikely(error
)) {
261 page_cache_release(page
);
269 for (i
= 0; i
< nr_pages
; i
++) {
271 page_cache_release(shadow
[i
]);
276 memcpy(pages
, shadow
, i
* sizeof(struct page
*));
279 * Now we splice them into the pipe..
282 return move_to_pipe(pipe
, pages
, i
, offset
, len
);
285 ssize_t
generic_file_splice_read(struct file
*in
, struct inode
*pipe
,
286 size_t len
, unsigned int flags
)
294 ret
= __generic_file_splice_read(in
, pipe
, len
);
311 * Send 'len' bytes to socket from 'file' at position 'pos' using sendpage().
313 static int pipe_to_sendpage(struct pipe_inode_info
*info
,
314 struct pipe_buffer
*buf
, struct splice_desc
*sd
)
316 struct file
*file
= sd
->file
;
317 loff_t pos
= sd
->pos
;
323 * sub-optimal, but we are limited by the pipe ->map. we don't
324 * need a kmap'ed buffer here, we just want to make sure we
325 * have the page pinned if the pipe page originates from the
328 ptr
= buf
->ops
->map(file
, info
, buf
);
332 offset
= pos
& ~PAGE_CACHE_MASK
;
334 ret
= file
->f_op
->sendpage(file
, buf
->page
, offset
, sd
->len
, &pos
,
335 sd
->len
< sd
->total_len
);
337 buf
->ops
->unmap(info
, buf
);
345 * This is a little more tricky than the file -> pipe splicing. There are
346 * basically three cases:
348 * - Destination page already exists in the address space and there
349 * are users of it. For that case we have no other option that
350 * copying the data. Tough luck.
351 * - Destination page already exists in the address space, but there
352 * are no users of it. Make sure it's uptodate, then drop it. Fall
353 * through to last case.
354 * - Destination page does not exist, we can add the pipe page to
355 * the page cache and avoid the copy.
357 * For now we just do the slower thing and always copy pages over, it's
358 * easier than migrating pages from the pipe to the target file. For the
359 * case of doing file | file splicing, the migrate approach had some LRU
362 static int pipe_to_file(struct pipe_inode_info
*info
, struct pipe_buffer
*buf
,
363 struct splice_desc
*sd
)
365 struct file
*file
= sd
->file
;
366 struct address_space
*mapping
= file
->f_mapping
;
374 * after this, page will be locked and unmapped
376 src
= buf
->ops
->map(file
, info
, buf
);
380 index
= sd
->pos
>> PAGE_CACHE_SHIFT
;
381 offset
= sd
->pos
& ~PAGE_CACHE_MASK
;
384 * reuse buf page, if SPLICE_F_MOVE is set
386 if (sd
->flags
& SPLICE_F_MOVE
) {
387 if (buf
->ops
->steal(info
, buf
))
391 if (add_to_page_cache_lru(page
, mapping
, index
,
392 mapping_gfp_mask(mapping
)))
397 page
= find_or_create_page(mapping
, index
,
398 mapping_gfp_mask(mapping
));
403 * If the page is uptodate, it is also locked. If it isn't
404 * uptodate, we can mark it uptodate if we are filling the
405 * full page. Otherwise we need to read it in first...
407 if (!PageUptodate(page
)) {
408 if (sd
->len
< PAGE_CACHE_SIZE
) {
409 ret
= mapping
->a_ops
->readpage(file
, page
);
415 if (!PageUptodate(page
)) {
417 * page got invalidated, repeat
419 if (!page
->mapping
) {
421 page_cache_release(page
);
428 WARN_ON(!PageLocked(page
));
429 SetPageUptodate(page
);
434 ret
= mapping
->a_ops
->prepare_write(file
, page
, 0, sd
->len
);
439 char *dst
= kmap_atomic(page
, KM_USER0
);
441 memcpy(dst
+ offset
, src
+ buf
->offset
, sd
->len
);
442 flush_dcache_page(page
);
443 kunmap_atomic(dst
, KM_USER0
);
446 ret
= mapping
->a_ops
->commit_write(file
, page
, 0, sd
->len
);
450 set_page_dirty(page
);
451 ret
= write_one_page(page
, 0);
456 page_cache_release(page
);
457 buf
->ops
->unmap(info
, buf
);
461 typedef int (splice_actor
)(struct pipe_inode_info
*, struct pipe_buffer
*,
462 struct splice_desc
*);
464 static ssize_t
move_from_pipe(struct inode
*inode
, struct file
*out
,
465 size_t len
, unsigned int flags
,
468 struct pipe_inode_info
*info
;
469 int ret
, do_wakeup
, err
;
470 struct splice_desc sd
;
480 mutex_lock(PIPE_MUTEX(*inode
));
482 info
= inode
->i_pipe
;
484 int bufs
= info
->nrbufs
;
487 int curbuf
= info
->curbuf
;
488 struct pipe_buffer
*buf
= info
->bufs
+ curbuf
;
489 struct pipe_buf_operations
*ops
= buf
->ops
;
492 if (sd
.len
> sd
.total_len
)
493 sd
.len
= sd
.total_len
;
495 err
= actor(info
, buf
, &sd
);
497 if (!ret
&& err
!= -ENODATA
)
504 buf
->offset
+= sd
.len
;
508 ops
->release(info
, buf
);
509 curbuf
= (curbuf
+ 1) & (PIPE_BUFFERS
- 1);
510 info
->curbuf
= curbuf
;
511 info
->nrbufs
= --bufs
;
516 sd
.total_len
-= sd
.len
;
523 if (!PIPE_WRITERS(*inode
))
525 if (!PIPE_WAITING_WRITERS(*inode
)) {
530 if (signal_pending(current
)) {
537 wake_up_interruptible_sync(PIPE_WAIT(*inode
));
538 kill_fasync(PIPE_FASYNC_WRITERS(*inode
),SIGIO
,POLL_OUT
);
545 mutex_unlock(PIPE_MUTEX(*inode
));
548 wake_up_interruptible(PIPE_WAIT(*inode
));
549 kill_fasync(PIPE_FASYNC_WRITERS(*inode
), SIGIO
, POLL_OUT
);
552 mutex_lock(&out
->f_mapping
->host
->i_mutex
);
554 mutex_unlock(&out
->f_mapping
->host
->i_mutex
);
559 ssize_t
generic_file_splice_write(struct inode
*inode
, struct file
*out
,
560 size_t len
, unsigned int flags
)
562 return move_from_pipe(inode
, out
, len
, flags
, pipe_to_file
);
565 ssize_t
generic_splice_sendpage(struct inode
*inode
, struct file
*out
,
566 size_t len
, unsigned int flags
)
568 return move_from_pipe(inode
, out
, len
, flags
, pipe_to_sendpage
);
571 EXPORT_SYMBOL(generic_file_splice_write
);
572 EXPORT_SYMBOL(generic_file_splice_read
);
574 static long do_splice_from(struct inode
*pipe
, struct file
*out
, size_t len
,
580 if (!out
->f_op
|| !out
->f_op
->splice_write
)
583 if (!(out
->f_mode
& FMODE_WRITE
))
587 ret
= rw_verify_area(WRITE
, out
, &pos
, len
);
588 if (unlikely(ret
< 0))
591 return out
->f_op
->splice_write(pipe
, out
, len
, flags
);
594 static long do_splice_to(struct file
*in
, struct inode
*pipe
, size_t len
,
597 loff_t pos
, isize
, left
;
600 if (!in
->f_op
|| !in
->f_op
->splice_read
)
603 if (!(in
->f_mode
& FMODE_READ
))
607 ret
= rw_verify_area(READ
, in
, &pos
, len
);
608 if (unlikely(ret
< 0))
611 isize
= i_size_read(in
->f_mapping
->host
);
612 if (unlikely(in
->f_pos
>= isize
))
615 left
= isize
- in
->f_pos
;
619 return in
->f_op
->splice_read(in
, pipe
, len
, flags
);
622 static long do_splice(struct file
*in
, struct file
*out
, size_t len
,
627 pipe
= in
->f_dentry
->d_inode
;
629 return do_splice_from(pipe
, out
, len
, flags
);
631 pipe
= out
->f_dentry
->d_inode
;
633 return do_splice_to(in
, pipe
, len
, flags
);
638 asmlinkage
long sys_splice(int fdin
, int fdout
, size_t len
, unsigned int flags
)
641 struct file
*in
, *out
;
642 int fput_in
, fput_out
;
648 in
= fget_light(fdin
, &fput_in
);
650 if (in
->f_mode
& FMODE_READ
) {
651 out
= fget_light(fdout
, &fput_out
);
653 if (out
->f_mode
& FMODE_WRITE
)
654 error
= do_splice(in
, out
, len
, flags
);
655 fput_light(out
, fput_out
);
659 fput_light(in
, fput_in
);