[PATCH] md: make sure 64bit fields in version-1 metadata are 64-bit aligned
[linux/fpc-iii.git] / fs / splice.c
blobbfa42a277bb874ac94a34db4d3c7f593afc5410c
1 /*
2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files and fixing the initial implementation
13 * bugs.
15 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
19 #include <linux/fs.h>
20 #include <linux/file.h>
21 #include <linux/pagemap.h>
22 #include <linux/pipe_fs_i.h>
23 #include <linux/mm_inline.h>
24 #include <linux/swap.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h>
27 #include <linux/module.h>
28 #include <linux/syscalls.h>
31 * Passed to the actors
33 struct splice_desc {
34 unsigned int len, total_len; /* current and remaining length */
35 unsigned int flags; /* splice flags */
36 struct file *file; /* file to read/write */
37 loff_t pos; /* file position */
41 * Attempt to steal a page from a pipe buffer. This should perhaps go into
42 * a vm helper function, it's already simplified quite a bit by the
43 * addition of remove_mapping(). If success is returned, the caller may
44 * attempt to reuse this page for another destination.
46 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
47 struct pipe_buffer *buf)
49 struct page *page = buf->page;
50 struct address_space *mapping = page_mapping(page);
52 WARN_ON(!PageLocked(page));
53 WARN_ON(!PageUptodate(page));
56 * At least for ext2 with nobh option, we need to wait on writeback
57 * completing on this page, since we'll remove it from the pagecache.
58 * Otherwise truncate wont wait on the page, allowing the disk
59 * blocks to be reused by someone else before we actually wrote our
60 * data to them. fs corruption ensues.
62 wait_on_page_writeback(page);
64 if (PagePrivate(page))
65 try_to_release_page(page, mapping_gfp_mask(mapping));
67 if (!remove_mapping(mapping, page))
68 return 1;
70 buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
71 return 0;
74 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
75 struct pipe_buffer *buf)
77 page_cache_release(buf->page);
78 buf->page = NULL;
79 buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
82 static void *page_cache_pipe_buf_map(struct file *file,
83 struct pipe_inode_info *info,
84 struct pipe_buffer *buf)
86 struct page *page = buf->page;
88 lock_page(page);
90 if (!PageUptodate(page)) {
91 unlock_page(page);
92 return ERR_PTR(-EIO);
95 if (!page->mapping) {
96 unlock_page(page);
97 return ERR_PTR(-ENODATA);
100 return kmap(buf->page);
103 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
104 struct pipe_buffer *buf)
106 unlock_page(buf->page);
107 kunmap(buf->page);
110 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
111 .can_merge = 0,
112 .map = page_cache_pipe_buf_map,
113 .unmap = page_cache_pipe_buf_unmap,
114 .release = page_cache_pipe_buf_release,
115 .steal = page_cache_pipe_buf_steal,
119 * Pipe output worker. This sets up our pipe format with the page cache
120 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
122 static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
123 int nr_pages, unsigned long offset,
124 unsigned long len, unsigned int flags)
126 struct pipe_inode_info *info;
127 int ret, do_wakeup, i;
129 ret = 0;
130 do_wakeup = 0;
131 i = 0;
133 mutex_lock(PIPE_MUTEX(*inode));
135 info = inode->i_pipe;
136 for (;;) {
137 int bufs;
139 if (!PIPE_READERS(*inode)) {
140 send_sig(SIGPIPE, current, 0);
141 if (!ret)
142 ret = -EPIPE;
143 break;
146 bufs = info->nrbufs;
147 if (bufs < PIPE_BUFFERS) {
148 int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS - 1);
149 struct pipe_buffer *buf = info->bufs + newbuf;
150 struct page *page = pages[i++];
151 unsigned long this_len;
153 this_len = PAGE_CACHE_SIZE - offset;
154 if (this_len > len)
155 this_len = len;
157 buf->page = page;
158 buf->offset = offset;
159 buf->len = this_len;
160 buf->ops = &page_cache_pipe_buf_ops;
161 info->nrbufs = ++bufs;
162 do_wakeup = 1;
164 ret += this_len;
165 len -= this_len;
166 offset = 0;
167 if (!--nr_pages)
168 break;
169 if (!len)
170 break;
171 if (bufs < PIPE_BUFFERS)
172 continue;
174 break;
177 if (flags & SPLICE_F_NONBLOCK) {
178 if (!ret)
179 ret = -EAGAIN;
180 break;
183 if (signal_pending(current)) {
184 if (!ret)
185 ret = -ERESTARTSYS;
186 break;
189 if (do_wakeup) {
190 wake_up_interruptible_sync(PIPE_WAIT(*inode));
191 kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO,
192 POLL_IN);
193 do_wakeup = 0;
196 PIPE_WAITING_WRITERS(*inode)++;
197 pipe_wait(inode);
198 PIPE_WAITING_WRITERS(*inode)--;
201 mutex_unlock(PIPE_MUTEX(*inode));
203 if (do_wakeup) {
204 wake_up_interruptible(PIPE_WAIT(*inode));
205 kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
208 while (i < nr_pages)
209 page_cache_release(pages[i++]);
211 return ret;
214 static int __generic_file_splice_read(struct file *in, struct inode *pipe,
215 size_t len, unsigned int flags)
217 struct address_space *mapping = in->f_mapping;
218 unsigned int offset, nr_pages;
219 struct page *pages[PIPE_BUFFERS], *shadow[PIPE_BUFFERS];
220 struct page *page;
221 pgoff_t index, pidx;
222 int i, j;
224 index = in->f_pos >> PAGE_CACHE_SHIFT;
225 offset = in->f_pos & ~PAGE_CACHE_MASK;
226 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
228 if (nr_pages > PIPE_BUFFERS)
229 nr_pages = PIPE_BUFFERS;
232 * initiate read-ahead on this page range
234 do_page_cache_readahead(mapping, in, index, nr_pages);
237 * Get as many pages from the page cache as possible..
238 * Start IO on the page cache entries we create (we
239 * can assume that any pre-existing ones we find have
240 * already had IO started on them).
242 i = find_get_pages(mapping, index, nr_pages, pages);
245 * common case - we found all pages and they are contiguous,
246 * kick them off
248 if (i && (pages[i - 1]->index == index + i - 1))
249 goto splice_them;
252 * fill shadow[] with pages at the right locations, so we only
253 * have to fill holes
255 memset(shadow, 0, nr_pages * sizeof(struct page *));
256 for (j = 0; j < i; j++)
257 shadow[pages[j]->index - index] = pages[j];
260 * now fill in the holes
262 for (i = 0, pidx = index; i < nr_pages; pidx++, i++) {
263 int error;
265 if (shadow[i])
266 continue;
269 * no page there, look one up / create it
271 page = find_or_create_page(mapping, pidx,
272 mapping_gfp_mask(mapping));
273 if (!page)
274 break;
276 if (PageUptodate(page))
277 unlock_page(page);
278 else {
279 error = mapping->a_ops->readpage(in, page);
281 if (unlikely(error)) {
282 page_cache_release(page);
283 break;
286 shadow[i] = page;
289 if (!i) {
290 for (i = 0; i < nr_pages; i++) {
291 if (shadow[i])
292 page_cache_release(shadow[i]);
294 return 0;
297 memcpy(pages, shadow, i * sizeof(struct page *));
300 * Now we splice them into the pipe..
302 splice_them:
303 return move_to_pipe(pipe, pages, i, offset, len, flags);
307 * generic_file_splice_read - splice data from file to a pipe
308 * @in: file to splice from
309 * @pipe: pipe to splice to
310 * @len: number of bytes to splice
311 * @flags: splice modifier flags
313 * Will read pages from given file and fill them into a pipe.
316 ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
317 size_t len, unsigned int flags)
319 ssize_t spliced;
320 int ret;
322 ret = 0;
323 spliced = 0;
324 while (len) {
325 ret = __generic_file_splice_read(in, pipe, len, flags);
327 if (ret <= 0)
328 break;
330 in->f_pos += ret;
331 len -= ret;
332 spliced += ret;
334 if (!(flags & SPLICE_F_NONBLOCK))
335 continue;
336 ret = -EAGAIN;
337 break;
340 if (spliced)
341 return spliced;
343 return ret;
346 EXPORT_SYMBOL(generic_file_splice_read);
349 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
350 * using sendpage().
352 static int pipe_to_sendpage(struct pipe_inode_info *info,
353 struct pipe_buffer *buf, struct splice_desc *sd)
355 struct file *file = sd->file;
356 loff_t pos = sd->pos;
357 unsigned int offset;
358 ssize_t ret;
359 void *ptr;
360 int more;
363 * sub-optimal, but we are limited by the pipe ->map. we don't
364 * need a kmap'ed buffer here, we just want to make sure we
365 * have the page pinned if the pipe page originates from the
366 * page cache
368 ptr = buf->ops->map(file, info, buf);
369 if (IS_ERR(ptr))
370 return PTR_ERR(ptr);
372 offset = pos & ~PAGE_CACHE_MASK;
373 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
375 ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
377 buf->ops->unmap(info, buf);
378 if (ret == sd->len)
379 return 0;
381 return -EIO;
385 * This is a little more tricky than the file -> pipe splicing. There are
386 * basically three cases:
388 * - Destination page already exists in the address space and there
389 * are users of it. For that case we have no other option that
390 * copying the data. Tough luck.
391 * - Destination page already exists in the address space, but there
392 * are no users of it. Make sure it's uptodate, then drop it. Fall
393 * through to last case.
394 * - Destination page does not exist, we can add the pipe page to
395 * the page cache and avoid the copy.
397 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
398 * sd->flags), we attempt to migrate pages from the pipe to the output
399 * file address space page cache. This is possible if no one else has
400 * the pipe page referenced outside of the pipe and page cache. If
401 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
402 * a new page in the output file page cache and fill/dirty that.
404 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
405 struct splice_desc *sd)
407 struct file *file = sd->file;
408 struct address_space *mapping = file->f_mapping;
409 gfp_t gfp_mask = mapping_gfp_mask(mapping);
410 unsigned int offset;
411 struct page *page;
412 pgoff_t index;
413 char *src;
414 int ret;
417 * after this, page will be locked and unmapped
419 src = buf->ops->map(file, info, buf);
420 if (IS_ERR(src))
421 return PTR_ERR(src);
423 index = sd->pos >> PAGE_CACHE_SHIFT;
424 offset = sd->pos & ~PAGE_CACHE_MASK;
427 * reuse buf page, if SPLICE_F_MOVE is set
429 if (sd->flags & SPLICE_F_MOVE) {
431 * If steal succeeds, buf->page is now pruned from the vm
432 * side (LRU and page cache) and we can reuse it.
434 if (buf->ops->steal(info, buf))
435 goto find_page;
437 page = buf->page;
438 if (add_to_page_cache(page, mapping, index, gfp_mask))
439 goto find_page;
441 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
442 lru_cache_add(page);
443 } else {
444 find_page:
445 ret = -ENOMEM;
446 page = find_or_create_page(mapping, index, gfp_mask);
447 if (!page)
448 goto out;
451 * If the page is uptodate, it is also locked. If it isn't
452 * uptodate, we can mark it uptodate if we are filling the
453 * full page. Otherwise we need to read it in first...
455 if (!PageUptodate(page)) {
456 if (sd->len < PAGE_CACHE_SIZE) {
457 ret = mapping->a_ops->readpage(file, page);
458 if (unlikely(ret))
459 goto out;
461 lock_page(page);
463 if (!PageUptodate(page)) {
465 * page got invalidated, repeat
467 if (!page->mapping) {
468 unlock_page(page);
469 page_cache_release(page);
470 goto find_page;
472 ret = -EIO;
473 goto out;
475 } else {
476 WARN_ON(!PageLocked(page));
477 SetPageUptodate(page);
482 ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
483 if (ret == AOP_TRUNCATED_PAGE) {
484 page_cache_release(page);
485 goto find_page;
486 } else if (ret)
487 goto out;
489 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
490 char *dst = kmap_atomic(page, KM_USER0);
492 memcpy(dst + offset, src + buf->offset, sd->len);
493 flush_dcache_page(page);
494 kunmap_atomic(dst, KM_USER0);
497 ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
498 if (ret == AOP_TRUNCATED_PAGE) {
499 page_cache_release(page);
500 goto find_page;
501 } else if (ret)
502 goto out;
504 balance_dirty_pages_ratelimited(mapping);
505 out:
506 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
507 page_cache_release(page);
508 unlock_page(page);
510 buf->ops->unmap(info, buf);
511 return ret;
514 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
515 struct splice_desc *);
518 * Pipe input worker. Most of this logic works like a regular pipe, the
519 * key here is the 'actor' worker passed in that actually moves the data
520 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
522 static ssize_t move_from_pipe(struct inode *inode, struct file *out,
523 size_t len, unsigned int flags,
524 splice_actor *actor)
526 struct pipe_inode_info *info;
527 int ret, do_wakeup, err;
528 struct splice_desc sd;
530 ret = 0;
531 do_wakeup = 0;
533 sd.total_len = len;
534 sd.flags = flags;
535 sd.file = out;
536 sd.pos = out->f_pos;
538 mutex_lock(PIPE_MUTEX(*inode));
540 info = inode->i_pipe;
541 for (;;) {
542 int bufs = info->nrbufs;
544 if (bufs) {
545 int curbuf = info->curbuf;
546 struct pipe_buffer *buf = info->bufs + curbuf;
547 struct pipe_buf_operations *ops = buf->ops;
549 sd.len = buf->len;
550 if (sd.len > sd.total_len)
551 sd.len = sd.total_len;
553 err = actor(info, buf, &sd);
554 if (err) {
555 if (!ret && err != -ENODATA)
556 ret = err;
558 break;
561 ret += sd.len;
562 buf->offset += sd.len;
563 buf->len -= sd.len;
564 if (!buf->len) {
565 buf->ops = NULL;
566 ops->release(info, buf);
567 curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1);
568 info->curbuf = curbuf;
569 info->nrbufs = --bufs;
570 do_wakeup = 1;
573 sd.pos += sd.len;
574 sd.total_len -= sd.len;
575 if (!sd.total_len)
576 break;
579 if (bufs)
580 continue;
581 if (!PIPE_WRITERS(*inode))
582 break;
583 if (!PIPE_WAITING_WRITERS(*inode)) {
584 if (ret)
585 break;
588 if (flags & SPLICE_F_NONBLOCK) {
589 if (!ret)
590 ret = -EAGAIN;
591 break;
594 if (signal_pending(current)) {
595 if (!ret)
596 ret = -ERESTARTSYS;
597 break;
600 if (do_wakeup) {
601 wake_up_interruptible_sync(PIPE_WAIT(*inode));
602 kill_fasync(PIPE_FASYNC_WRITERS(*inode),SIGIO,POLL_OUT);
603 do_wakeup = 0;
606 pipe_wait(inode);
609 mutex_unlock(PIPE_MUTEX(*inode));
611 if (do_wakeup) {
612 wake_up_interruptible(PIPE_WAIT(*inode));
613 kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
616 mutex_lock(&out->f_mapping->host->i_mutex);
617 out->f_pos = sd.pos;
618 mutex_unlock(&out->f_mapping->host->i_mutex);
619 return ret;
624 * generic_file_splice_write - splice data from a pipe to a file
625 * @inode: pipe inode
626 * @out: file to write to
627 * @len: number of bytes to splice
628 * @flags: splice modifier flags
630 * Will either move or copy pages (determined by @flags options) from
631 * the given pipe inode to the given file.
634 ssize_t generic_file_splice_write(struct inode *inode, struct file *out,
635 size_t len, unsigned int flags)
637 struct address_space *mapping = out->f_mapping;
638 ssize_t ret = move_from_pipe(inode, out, len, flags, pipe_to_file);
641 * if file or inode is SYNC and we actually wrote some data, sync it
643 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
644 && ret > 0) {
645 struct inode *inode = mapping->host;
646 int err;
648 mutex_lock(&inode->i_mutex);
649 err = generic_osync_inode(mapping->host, mapping,
650 OSYNC_METADATA|OSYNC_DATA);
651 mutex_unlock(&inode->i_mutex);
653 if (err)
654 ret = err;
657 return ret;
660 EXPORT_SYMBOL(generic_file_splice_write);
663 * generic_splice_sendpage - splice data from a pipe to a socket
664 * @inode: pipe inode
665 * @out: socket to write to
666 * @len: number of bytes to splice
667 * @flags: splice modifier flags
669 * Will send @len bytes from the pipe to a network socket. No data copying
670 * is involved.
673 ssize_t generic_splice_sendpage(struct inode *inode, struct file *out,
674 size_t len, unsigned int flags)
676 return move_from_pipe(inode, out, len, flags, pipe_to_sendpage);
679 EXPORT_SYMBOL(generic_splice_sendpage);
682 * Attempt to initiate a splice from pipe to file.
684 static long do_splice_from(struct inode *pipe, struct file *out, size_t len,
685 unsigned int flags)
687 loff_t pos;
688 int ret;
690 if (!out->f_op || !out->f_op->splice_write)
691 return -EINVAL;
693 if (!(out->f_mode & FMODE_WRITE))
694 return -EBADF;
696 pos = out->f_pos;
697 ret = rw_verify_area(WRITE, out, &pos, len);
698 if (unlikely(ret < 0))
699 return ret;
701 return out->f_op->splice_write(pipe, out, len, flags);
705 * Attempt to initiate a splice from a file to a pipe.
707 static long do_splice_to(struct file *in, struct inode *pipe, size_t len,
708 unsigned int flags)
710 loff_t pos, isize, left;
711 int ret;
713 if (!in->f_op || !in->f_op->splice_read)
714 return -EINVAL;
716 if (!(in->f_mode & FMODE_READ))
717 return -EBADF;
719 pos = in->f_pos;
720 ret = rw_verify_area(READ, in, &pos, len);
721 if (unlikely(ret < 0))
722 return ret;
724 isize = i_size_read(in->f_mapping->host);
725 if (unlikely(in->f_pos >= isize))
726 return 0;
728 left = isize - in->f_pos;
729 if (left < len)
730 len = left;
732 return in->f_op->splice_read(in, pipe, len, flags);
736 * Determine where to splice to/from.
738 static long do_splice(struct file *in, struct file *out, size_t len,
739 unsigned int flags)
741 struct inode *pipe;
743 pipe = in->f_dentry->d_inode;
744 if (pipe->i_pipe)
745 return do_splice_from(pipe, out, len, flags);
747 pipe = out->f_dentry->d_inode;
748 if (pipe->i_pipe)
749 return do_splice_to(in, pipe, len, flags);
751 return -EINVAL;
754 asmlinkage long sys_splice(int fdin, int fdout, size_t len, unsigned int flags)
756 long error;
757 struct file *in, *out;
758 int fput_in, fput_out;
760 if (unlikely(!len))
761 return 0;
763 error = -EBADF;
764 in = fget_light(fdin, &fput_in);
765 if (in) {
766 if (in->f_mode & FMODE_READ) {
767 out = fget_light(fdout, &fput_out);
768 if (out) {
769 if (out->f_mode & FMODE_WRITE)
770 error = do_splice(in, out, len, flags);
771 fput_light(out, fput_out);
775 fput_light(in, fput_in);
778 return error;