4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/mount.h>
15 #include <linux/pipe_fs_i.h>
16 #include <linux/uio.h>
17 #include <linux/highmem.h>
18 #include <linux/pagemap.h>
20 #include <asm/uaccess.h>
21 #include <asm/ioctls.h>
24 * We use a start+len construction, which provides full use of the
26 * -- Florian Coosmann (FGC)
28 * Reads with count = 0 should always return 0.
29 * -- Julian Bradfield 1999-06-07.
31 * FIFOs and Pipes now generate SIGIO for both readers and writers.
32 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
34 * pipe_read & write cleanup
35 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
38 /* Drop the inode semaphore and wait for a pipe event, atomically */
39 void pipe_wait(struct pipe_inode_info
*pipe
)
44 * Pipes are system-local resources, so sleeping on them
45 * is considered a noninteractive wait:
47 prepare_to_wait(&pipe
->wait
, &wait
,
48 TASK_INTERRUPTIBLE
| TASK_NONINTERACTIVE
);
50 mutex_unlock(&pipe
->inode
->i_mutex
);
52 finish_wait(&pipe
->wait
, &wait
);
54 mutex_lock(&pipe
->inode
->i_mutex
);
58 pipe_iov_copy_from_user(void *to
, struct iovec
*iov
, unsigned long len
)
65 copy
= min_t(unsigned long, len
, iov
->iov_len
);
67 if (copy_from_user(to
, iov
->iov_base
, copy
))
71 iov
->iov_base
+= copy
;
78 pipe_iov_copy_to_user(struct iovec
*iov
, const void *from
, unsigned long len
)
85 copy
= min_t(unsigned long, len
, iov
->iov_len
);
87 if (copy_to_user(iov
->iov_base
, from
, copy
))
91 iov
->iov_base
+= copy
;
97 static void anon_pipe_buf_release(struct pipe_inode_info
*pipe
,
98 struct pipe_buffer
*buf
)
100 struct page
*page
= buf
->page
;
102 buf
->flags
&= ~PIPE_BUF_FLAG_STOLEN
;
105 * If nobody else uses this page, and we don't already have a
106 * temporary page, let's keep track of it as a one-deep
107 * allocation cache. (Otherwise just release our reference to it)
109 if (page_count(page
) == 1 && !pipe
->tmp_page
)
110 pipe
->tmp_page
= page
;
112 page_cache_release(page
);
115 static void * anon_pipe_buf_map(struct file
*file
, struct pipe_inode_info
*pipe
,
116 struct pipe_buffer
*buf
)
118 return kmap(buf
->page
);
121 static void anon_pipe_buf_unmap(struct pipe_inode_info
*pipe
,
122 struct pipe_buffer
*buf
)
127 static int anon_pipe_buf_steal(struct pipe_inode_info
*pipe
,
128 struct pipe_buffer
*buf
)
130 buf
->flags
|= PIPE_BUF_FLAG_STOLEN
;
134 static struct pipe_buf_operations anon_pipe_buf_ops
= {
136 .map
= anon_pipe_buf_map
,
137 .unmap
= anon_pipe_buf_unmap
,
138 .release
= anon_pipe_buf_release
,
139 .steal
= anon_pipe_buf_steal
,
143 pipe_readv(struct file
*filp
, const struct iovec
*_iov
,
144 unsigned long nr_segs
, loff_t
*ppos
)
146 struct inode
*inode
= filp
->f_dentry
->d_inode
;
147 struct pipe_inode_info
*pipe
;
150 struct iovec
*iov
= (struct iovec
*)_iov
;
153 total_len
= iov_length(iov
, nr_segs
);
154 /* Null read succeeds. */
155 if (unlikely(total_len
== 0))
160 mutex_lock(&inode
->i_mutex
);
161 pipe
= inode
->i_pipe
;
163 int bufs
= pipe
->nrbufs
;
165 int curbuf
= pipe
->curbuf
;
166 struct pipe_buffer
*buf
= pipe
->bufs
+ curbuf
;
167 struct pipe_buf_operations
*ops
= buf
->ops
;
169 size_t chars
= buf
->len
;
172 if (chars
> total_len
)
175 addr
= ops
->map(filp
, pipe
, buf
);
181 error
= pipe_iov_copy_to_user(iov
, addr
+ buf
->offset
, chars
);
182 ops
->unmap(pipe
, buf
);
183 if (unlikely(error
)) {
189 buf
->offset
+= chars
;
193 ops
->release(pipe
, buf
);
194 curbuf
= (curbuf
+ 1) & (PIPE_BUFFERS
-1);
195 pipe
->curbuf
= curbuf
;
196 pipe
->nrbufs
= --bufs
;
201 break; /* common path: read succeeded */
203 if (bufs
) /* More to do? */
207 if (!pipe
->waiting_writers
) {
208 /* syscall merging: Usually we must not sleep
209 * if O_NONBLOCK is set, or if we got some data.
210 * But if a writer sleeps in kernel space, then
211 * we can wait for that data without violating POSIX.
215 if (filp
->f_flags
& O_NONBLOCK
) {
220 if (signal_pending(current
)) {
226 wake_up_interruptible_sync(&pipe
->wait
);
227 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
231 mutex_unlock(&inode
->i_mutex
);
233 /* Signal writers asynchronously that there is more room. */
235 wake_up_interruptible(&pipe
->wait
);
236 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
244 pipe_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*ppos
)
246 struct iovec iov
= { .iov_base
= buf
, .iov_len
= count
};
248 return pipe_readv(filp
, &iov
, 1, ppos
);
252 pipe_writev(struct file
*filp
, const struct iovec
*_iov
,
253 unsigned long nr_segs
, loff_t
*ppos
)
255 struct inode
*inode
= filp
->f_dentry
->d_inode
;
256 struct pipe_inode_info
*pipe
;
259 struct iovec
*iov
= (struct iovec
*)_iov
;
263 total_len
= iov_length(iov
, nr_segs
);
264 /* Null write succeeds. */
265 if (unlikely(total_len
== 0))
270 mutex_lock(&inode
->i_mutex
);
271 pipe
= inode
->i_pipe
;
273 if (!pipe
->readers
) {
274 send_sig(SIGPIPE
, current
, 0);
279 /* We try to merge small writes */
280 chars
= total_len
& (PAGE_SIZE
-1); /* size of the last buffer */
281 if (pipe
->nrbufs
&& chars
!= 0) {
282 int lastbuf
= (pipe
->curbuf
+ pipe
->nrbufs
- 1) &
284 struct pipe_buffer
*buf
= pipe
->bufs
+ lastbuf
;
285 struct pipe_buf_operations
*ops
= buf
->ops
;
286 int offset
= buf
->offset
+ buf
->len
;
288 if (ops
->can_merge
&& offset
+ chars
<= PAGE_SIZE
) {
292 addr
= ops
->map(filp
, pipe
, buf
);
294 error
= PTR_ERR(addr
);
297 error
= pipe_iov_copy_from_user(offset
+ addr
, iov
,
299 ops
->unmap(pipe
, buf
);
315 if (!pipe
->readers
) {
316 send_sig(SIGPIPE
, current
, 0);
322 if (bufs
< PIPE_BUFFERS
) {
323 int newbuf
= (pipe
->curbuf
+ bufs
) & (PIPE_BUFFERS
-1);
324 struct pipe_buffer
*buf
= pipe
->bufs
+ newbuf
;
325 struct page
*page
= pipe
->tmp_page
;
329 page
= alloc_page(GFP_HIGHUSER
);
330 if (unlikely(!page
)) {
331 ret
= ret
? : -ENOMEM
;
334 pipe
->tmp_page
= page
;
336 /* Always wake up, even if the copy fails. Otherwise
337 * we lock up (O_NONBLOCK-)readers that sleep due to
339 * FIXME! Is this really true?
343 if (chars
> total_len
)
346 error
= pipe_iov_copy_from_user(kmap(page
), iov
, chars
);
348 if (unlikely(error
)) {
355 /* Insert it into the buffer array */
357 buf
->ops
= &anon_pipe_buf_ops
;
360 pipe
->nrbufs
= ++bufs
;
361 pipe
->tmp_page
= NULL
;
367 if (bufs
< PIPE_BUFFERS
)
369 if (filp
->f_flags
& O_NONBLOCK
) {
374 if (signal_pending(current
)) {
380 wake_up_interruptible_sync(&pipe
->wait
);
381 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
384 pipe
->waiting_writers
++;
386 pipe
->waiting_writers
--;
389 mutex_unlock(&inode
->i_mutex
);
391 wake_up_interruptible(&pipe
->wait
);
392 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
395 file_update_time(filp
);
400 pipe_write(struct file
*filp
, const char __user
*buf
,
401 size_t count
, loff_t
*ppos
)
403 struct iovec iov
= { .iov_base
= (void __user
*)buf
, .iov_len
= count
};
405 return pipe_writev(filp
, &iov
, 1, ppos
);
409 bad_pipe_r(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*ppos
)
415 bad_pipe_w(struct file
*filp
, const char __user
*buf
, size_t count
,
422 pipe_ioctl(struct inode
*pino
, struct file
*filp
,
423 unsigned int cmd
, unsigned long arg
)
425 struct inode
*inode
= filp
->f_dentry
->d_inode
;
426 struct pipe_inode_info
*pipe
;
427 int count
, buf
, nrbufs
;
431 mutex_lock(&inode
->i_mutex
);
432 pipe
= inode
->i_pipe
;
435 nrbufs
= pipe
->nrbufs
;
436 while (--nrbufs
>= 0) {
437 count
+= pipe
->bufs
[buf
].len
;
438 buf
= (buf
+1) & (PIPE_BUFFERS
-1);
440 mutex_unlock(&inode
->i_mutex
);
442 return put_user(count
, (int __user
*)arg
);
448 /* No kernel lock held - fine */
450 pipe_poll(struct file
*filp
, poll_table
*wait
)
453 struct inode
*inode
= filp
->f_dentry
->d_inode
;
454 struct pipe_inode_info
*pipe
= inode
->i_pipe
;
457 poll_wait(filp
, &pipe
->wait
, wait
);
459 /* Reading only -- no need for acquiring the semaphore. */
460 nrbufs
= pipe
->nrbufs
;
462 if (filp
->f_mode
& FMODE_READ
) {
463 mask
= (nrbufs
> 0) ? POLLIN
| POLLRDNORM
: 0;
464 if (!pipe
->writers
&& filp
->f_version
!= pipe
->w_counter
)
468 if (filp
->f_mode
& FMODE_WRITE
) {
469 mask
|= (nrbufs
< PIPE_BUFFERS
) ? POLLOUT
| POLLWRNORM
: 0;
471 * Most Unices do not set POLLERR for FIFOs but on Linux they
472 * behave exactly like pipes for poll().
482 pipe_release(struct inode
*inode
, int decr
, int decw
)
484 struct pipe_inode_info
*pipe
;
486 mutex_lock(&inode
->i_mutex
);
487 pipe
= inode
->i_pipe
;
488 pipe
->readers
-= decr
;
489 pipe
->writers
-= decw
;
491 if (!pipe
->readers
&& !pipe
->writers
) {
492 free_pipe_info(inode
);
494 wake_up_interruptible(&pipe
->wait
);
495 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
496 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
498 mutex_unlock(&inode
->i_mutex
);
504 pipe_read_fasync(int fd
, struct file
*filp
, int on
)
506 struct inode
*inode
= filp
->f_dentry
->d_inode
;
509 mutex_lock(&inode
->i_mutex
);
510 retval
= fasync_helper(fd
, filp
, on
, &inode
->i_pipe
->fasync_readers
);
511 mutex_unlock(&inode
->i_mutex
);
521 pipe_write_fasync(int fd
, struct file
*filp
, int on
)
523 struct inode
*inode
= filp
->f_dentry
->d_inode
;
526 mutex_lock(&inode
->i_mutex
);
527 retval
= fasync_helper(fd
, filp
, on
, &inode
->i_pipe
->fasync_writers
);
528 mutex_unlock(&inode
->i_mutex
);
538 pipe_rdwr_fasync(int fd
, struct file
*filp
, int on
)
540 struct inode
*inode
= filp
->f_dentry
->d_inode
;
541 struct pipe_inode_info
*pipe
= inode
->i_pipe
;
544 mutex_lock(&inode
->i_mutex
);
546 retval
= fasync_helper(fd
, filp
, on
, &pipe
->fasync_readers
);
549 retval
= fasync_helper(fd
, filp
, on
, &pipe
->fasync_writers
);
551 mutex_unlock(&inode
->i_mutex
);
561 pipe_read_release(struct inode
*inode
, struct file
*filp
)
563 pipe_read_fasync(-1, filp
, 0);
564 return pipe_release(inode
, 1, 0);
568 pipe_write_release(struct inode
*inode
, struct file
*filp
)
570 pipe_write_fasync(-1, filp
, 0);
571 return pipe_release(inode
, 0, 1);
575 pipe_rdwr_release(struct inode
*inode
, struct file
*filp
)
579 pipe_rdwr_fasync(-1, filp
, 0);
580 decr
= (filp
->f_mode
& FMODE_READ
) != 0;
581 decw
= (filp
->f_mode
& FMODE_WRITE
) != 0;
582 return pipe_release(inode
, decr
, decw
);
586 pipe_read_open(struct inode
*inode
, struct file
*filp
)
588 /* We could have perhaps used atomic_t, but this and friends
589 below are the only places. So it doesn't seem worthwhile. */
590 mutex_lock(&inode
->i_mutex
);
591 inode
->i_pipe
->readers
++;
592 mutex_unlock(&inode
->i_mutex
);
598 pipe_write_open(struct inode
*inode
, struct file
*filp
)
600 mutex_lock(&inode
->i_mutex
);
601 inode
->i_pipe
->writers
++;
602 mutex_unlock(&inode
->i_mutex
);
608 pipe_rdwr_open(struct inode
*inode
, struct file
*filp
)
610 mutex_lock(&inode
->i_mutex
);
611 if (filp
->f_mode
& FMODE_READ
)
612 inode
->i_pipe
->readers
++;
613 if (filp
->f_mode
& FMODE_WRITE
)
614 inode
->i_pipe
->writers
++;
615 mutex_unlock(&inode
->i_mutex
);
621 * The file_operations structs are not static because they
622 * are also used in linux/fs/fifo.c to do operations on FIFOs.
624 const struct file_operations read_fifo_fops
= {
631 .open
= pipe_read_open
,
632 .release
= pipe_read_release
,
633 .fasync
= pipe_read_fasync
,
636 const struct file_operations write_fifo_fops
= {
640 .writev
= pipe_writev
,
643 .open
= pipe_write_open
,
644 .release
= pipe_write_release
,
645 .fasync
= pipe_write_fasync
,
648 const struct file_operations rdwr_fifo_fops
= {
653 .writev
= pipe_writev
,
656 .open
= pipe_rdwr_open
,
657 .release
= pipe_rdwr_release
,
658 .fasync
= pipe_rdwr_fasync
,
661 static struct file_operations read_pipe_fops
= {
668 .open
= pipe_read_open
,
669 .release
= pipe_read_release
,
670 .fasync
= pipe_read_fasync
,
673 static struct file_operations write_pipe_fops
= {
677 .writev
= pipe_writev
,
680 .open
= pipe_write_open
,
681 .release
= pipe_write_release
,
682 .fasync
= pipe_write_fasync
,
685 static struct file_operations rdwr_pipe_fops
= {
690 .writev
= pipe_writev
,
693 .open
= pipe_rdwr_open
,
694 .release
= pipe_rdwr_release
,
695 .fasync
= pipe_rdwr_fasync
,
698 struct pipe_inode_info
* alloc_pipe_info(struct inode
*inode
)
700 struct pipe_inode_info
*pipe
;
702 pipe
= kzalloc(sizeof(struct pipe_inode_info
), GFP_KERNEL
);
704 init_waitqueue_head(&pipe
->wait
);
705 pipe
->r_counter
= pipe
->w_counter
= 1;
712 void __free_pipe_info(struct pipe_inode_info
*pipe
)
716 for (i
= 0; i
< PIPE_BUFFERS
; i
++) {
717 struct pipe_buffer
*buf
= pipe
->bufs
+ i
;
719 buf
->ops
->release(pipe
, buf
);
722 __free_page(pipe
->tmp_page
);
726 void free_pipe_info(struct inode
*inode
)
728 __free_pipe_info(inode
->i_pipe
);
729 inode
->i_pipe
= NULL
;
732 static struct vfsmount
*pipe_mnt __read_mostly
;
733 static int pipefs_delete_dentry(struct dentry
*dentry
)
738 static struct dentry_operations pipefs_dentry_operations
= {
739 .d_delete
= pipefs_delete_dentry
,
742 static struct inode
* get_pipe_inode(void)
744 struct inode
*inode
= new_inode(pipe_mnt
->mnt_sb
);
745 struct pipe_inode_info
*pipe
;
750 pipe
= alloc_pipe_info(inode
);
753 inode
->i_pipe
= pipe
;
755 pipe
->readers
= pipe
->writers
= 1;
756 inode
->i_fop
= &rdwr_pipe_fops
;
759 * Mark the inode dirty from the very beginning,
760 * that way it will never be moved to the dirty
761 * list because "mark_inode_dirty()" will think
762 * that it already _is_ on the dirty list.
764 inode
->i_state
= I_DIRTY
;
765 inode
->i_mode
= S_IFIFO
| S_IRUSR
| S_IWUSR
;
766 inode
->i_uid
= current
->fsuid
;
767 inode
->i_gid
= current
->fsgid
;
768 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
769 inode
->i_blksize
= PAGE_SIZE
;
784 struct dentry
*dentry
;
785 struct inode
* inode
;
786 struct file
*f1
, *f2
;
791 f1
= get_empty_filp();
795 f2
= get_empty_filp();
799 inode
= get_pipe_inode();
803 error
= get_unused_fd();
805 goto close_f12_inode
;
808 error
= get_unused_fd();
810 goto close_f12_inode_i
;
814 sprintf(name
, "[%lu]", inode
->i_ino
);
816 this.len
= strlen(name
);
817 this.hash
= inode
->i_ino
; /* will go */
818 dentry
= d_alloc(pipe_mnt
->mnt_sb
->s_root
, &this);
820 goto close_f12_inode_i_j
;
822 dentry
->d_op
= &pipefs_dentry_operations
;
823 d_add(dentry
, inode
);
824 f1
->f_vfsmnt
= f2
->f_vfsmnt
= mntget(mntget(pipe_mnt
));
825 f1
->f_dentry
= f2
->f_dentry
= dget(dentry
);
826 f1
->f_mapping
= f2
->f_mapping
= inode
->i_mapping
;
829 f1
->f_pos
= f2
->f_pos
= 0;
830 f1
->f_flags
= O_RDONLY
;
831 f1
->f_op
= &read_pipe_fops
;
832 f1
->f_mode
= FMODE_READ
;
836 f2
->f_flags
= O_WRONLY
;
837 f2
->f_op
= &write_pipe_fops
;
838 f2
->f_mode
= FMODE_WRITE
;
853 free_pipe_info(inode
);
864 * pipefs should _never_ be mounted by userland - too much of security hassle,
865 * no real gain from having the whole whorehouse mounted. So we don't need
866 * any operations on the root directory. However, we need a non-trivial
867 * d_name - pipe: will go nicely and kill the special-casing in procfs.
870 static struct super_block
*
871 pipefs_get_sb(struct file_system_type
*fs_type
, int flags
,
872 const char *dev_name
, void *data
)
874 return get_sb_pseudo(fs_type
, "pipe:", NULL
, PIPEFS_MAGIC
);
877 static struct file_system_type pipe_fs_type
= {
879 .get_sb
= pipefs_get_sb
,
880 .kill_sb
= kill_anon_super
,
883 static int __init
init_pipe_fs(void)
885 int err
= register_filesystem(&pipe_fs_type
);
888 pipe_mnt
= kern_mount(&pipe_fs_type
);
889 if (IS_ERR(pipe_mnt
)) {
890 err
= PTR_ERR(pipe_mnt
);
891 unregister_filesystem(&pipe_fs_type
);
897 static void __exit
exit_pipe_fs(void)
899 unregister_filesystem(&pipe_fs_type
);
903 fs_initcall(init_pipe_fs
);
904 module_exit(exit_pipe_fs
);