4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/log2.h>
15 #include <linux/mount.h>
16 #include <linux/magic.h>
17 #include <linux/pipe_fs_i.h>
18 #include <linux/uio.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/audit.h>
22 #include <linux/syscalls.h>
23 #include <linux/fcntl.h>
24 #include <linux/memcontrol.h>
26 #include <asm/uaccess.h>
27 #include <asm/ioctls.h>
32 * The max size that a non-root user is allowed to grow the pipe. Can
33 * be set by root in /proc/sys/fs/pipe-max-size
35 unsigned int pipe_max_size
= 1048576;
38 * Minimum pipe size, as required by POSIX
40 unsigned int pipe_min_size
= PAGE_SIZE
;
42 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
43 * matches default values.
45 unsigned long pipe_user_pages_hard
;
46 unsigned long pipe_user_pages_soft
= PIPE_DEF_BUFFERS
* INR_OPEN_CUR
;
49 * We use a start+len construction, which provides full use of the
51 * -- Florian Coosmann (FGC)
53 * Reads with count = 0 should always return 0.
54 * -- Julian Bradfield 1999-06-07.
56 * FIFOs and Pipes now generate SIGIO for both readers and writers.
57 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
59 * pipe_read & write cleanup
60 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
63 static void pipe_lock_nested(struct pipe_inode_info
*pipe
, int subclass
)
66 mutex_lock_nested(&pipe
->mutex
, subclass
);
69 void pipe_lock(struct pipe_inode_info
*pipe
)
72 * pipe_lock() nests non-pipe inode locks (for writing to a file)
74 pipe_lock_nested(pipe
, I_MUTEX_PARENT
);
76 EXPORT_SYMBOL(pipe_lock
);
78 void pipe_unlock(struct pipe_inode_info
*pipe
)
81 mutex_unlock(&pipe
->mutex
);
83 EXPORT_SYMBOL(pipe_unlock
);
85 static inline void __pipe_lock(struct pipe_inode_info
*pipe
)
87 mutex_lock_nested(&pipe
->mutex
, I_MUTEX_PARENT
);
90 static inline void __pipe_unlock(struct pipe_inode_info
*pipe
)
92 mutex_unlock(&pipe
->mutex
);
95 void pipe_double_lock(struct pipe_inode_info
*pipe1
,
96 struct pipe_inode_info
*pipe2
)
98 BUG_ON(pipe1
== pipe2
);
101 pipe_lock_nested(pipe1
, I_MUTEX_PARENT
);
102 pipe_lock_nested(pipe2
, I_MUTEX_CHILD
);
104 pipe_lock_nested(pipe2
, I_MUTEX_PARENT
);
105 pipe_lock_nested(pipe1
, I_MUTEX_CHILD
);
109 /* Drop the inode semaphore and wait for a pipe event, atomically */
110 void pipe_wait(struct pipe_inode_info
*pipe
)
115 * Pipes are system-local resources, so sleeping on them
116 * is considered a noninteractive wait:
118 prepare_to_wait(&pipe
->wait
, &wait
, TASK_INTERRUPTIBLE
);
121 finish_wait(&pipe
->wait
, &wait
);
125 static void anon_pipe_buf_release(struct pipe_inode_info
*pipe
,
126 struct pipe_buffer
*buf
)
128 struct page
*page
= buf
->page
;
131 * If nobody else uses this page, and we don't already have a
132 * temporary page, let's keep track of it as a one-deep
133 * allocation cache. (Otherwise just release our reference to it)
135 if (page_count(page
) == 1 && !pipe
->tmp_page
)
136 pipe
->tmp_page
= page
;
141 static int anon_pipe_buf_steal(struct pipe_inode_info
*pipe
,
142 struct pipe_buffer
*buf
)
144 struct page
*page
= buf
->page
;
146 if (page_count(page
) == 1) {
147 if (memcg_kmem_enabled())
148 memcg_kmem_uncharge(page
, 0);
149 __SetPageLocked(page
);
156 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
157 * @pipe: the pipe that the buffer belongs to
158 * @buf: the buffer to attempt to steal
161 * This function attempts to steal the &struct page attached to
162 * @buf. If successful, this function returns 0 and returns with
163 * the page locked. The caller may then reuse the page for whatever
164 * he wishes; the typical use is insertion into a different file
167 int generic_pipe_buf_steal(struct pipe_inode_info
*pipe
,
168 struct pipe_buffer
*buf
)
170 struct page
*page
= buf
->page
;
173 * A reference of one is golden, that means that the owner of this
174 * page is the only one holding a reference to it. lock the page
177 if (page_count(page
) == 1) {
184 EXPORT_SYMBOL(generic_pipe_buf_steal
);
187 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
188 * @pipe: the pipe that the buffer belongs to
189 * @buf: the buffer to get a reference to
192 * This function grabs an extra reference to @buf. It's used in
193 * in the tee() system call, when we duplicate the buffers in one
196 bool generic_pipe_buf_get(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
)
198 return try_get_page(buf
->page
);
200 EXPORT_SYMBOL(generic_pipe_buf_get
);
203 * generic_pipe_buf_confirm - verify contents of the pipe buffer
204 * @info: the pipe that the buffer belongs to
205 * @buf: the buffer to confirm
208 * This function does nothing, because the generic pipe code uses
209 * pages that are always good when inserted into the pipe.
211 int generic_pipe_buf_confirm(struct pipe_inode_info
*info
,
212 struct pipe_buffer
*buf
)
216 EXPORT_SYMBOL(generic_pipe_buf_confirm
);
219 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
220 * @pipe: the pipe that the buffer belongs to
221 * @buf: the buffer to put a reference to
224 * This function releases a reference to @buf.
226 void generic_pipe_buf_release(struct pipe_inode_info
*pipe
,
227 struct pipe_buffer
*buf
)
231 EXPORT_SYMBOL(generic_pipe_buf_release
);
233 static const struct pipe_buf_operations anon_pipe_buf_ops
= {
235 .confirm
= generic_pipe_buf_confirm
,
236 .release
= anon_pipe_buf_release
,
237 .steal
= anon_pipe_buf_steal
,
238 .get
= generic_pipe_buf_get
,
241 static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops
= {
243 .confirm
= generic_pipe_buf_confirm
,
244 .release
= anon_pipe_buf_release
,
245 .steal
= anon_pipe_buf_steal
,
246 .get
= generic_pipe_buf_get
,
249 static const struct pipe_buf_operations packet_pipe_buf_ops
= {
251 .confirm
= generic_pipe_buf_confirm
,
252 .release
= anon_pipe_buf_release
,
253 .steal
= anon_pipe_buf_steal
,
254 .get
= generic_pipe_buf_get
,
257 void pipe_buf_mark_unmergeable(struct pipe_buffer
*buf
)
259 if (buf
->ops
== &anon_pipe_buf_ops
)
260 buf
->ops
= &anon_pipe_buf_nomerge_ops
;
264 pipe_read(struct kiocb
*iocb
, struct iov_iter
*to
)
266 size_t total_len
= iov_iter_count(to
);
267 struct file
*filp
= iocb
->ki_filp
;
268 struct pipe_inode_info
*pipe
= filp
->private_data
;
272 /* Null read succeeds. */
273 if (unlikely(total_len
== 0))
280 int bufs
= pipe
->nrbufs
;
282 int curbuf
= pipe
->curbuf
;
283 struct pipe_buffer
*buf
= pipe
->bufs
+ curbuf
;
284 size_t chars
= buf
->len
;
288 if (chars
> total_len
)
291 error
= pipe_buf_confirm(pipe
, buf
);
298 written
= copy_page_to_iter(buf
->page
, buf
->offset
, chars
, to
);
299 if (unlikely(written
< chars
)) {
305 buf
->offset
+= chars
;
308 /* Was it a packet buffer? Clean up and exit */
309 if (buf
->flags
& PIPE_BUF_FLAG_PACKET
) {
315 pipe_buf_release(pipe
, buf
);
316 curbuf
= (curbuf
+ 1) & (pipe
->buffers
- 1);
317 pipe
->curbuf
= curbuf
;
318 pipe
->nrbufs
= --bufs
;
323 break; /* common path: read succeeded */
325 if (bufs
) /* More to do? */
329 if (!pipe
->waiting_writers
) {
330 /* syscall merging: Usually we must not sleep
331 * if O_NONBLOCK is set, or if we got some data.
332 * But if a writer sleeps in kernel space, then
333 * we can wait for that data without violating POSIX.
337 if (filp
->f_flags
& O_NONBLOCK
) {
342 if (signal_pending(current
)) {
348 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLOUT
| POLLWRNORM
);
349 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
355 /* Signal writers asynchronously that there is more room. */
357 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLOUT
| POLLWRNORM
);
358 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
365 static inline int is_packetized(struct file
*file
)
367 return (file
->f_flags
& O_DIRECT
) != 0;
371 pipe_write(struct kiocb
*iocb
, struct iov_iter
*from
)
373 struct file
*filp
= iocb
->ki_filp
;
374 struct pipe_inode_info
*pipe
= filp
->private_data
;
377 size_t total_len
= iov_iter_count(from
);
380 /* Null write succeeds. */
381 if (unlikely(total_len
== 0))
386 if (!pipe
->readers
) {
387 send_sig(SIGPIPE
, current
, 0);
392 /* We try to merge small writes */
393 chars
= total_len
& (PAGE_SIZE
-1); /* size of the last buffer */
394 if (pipe
->nrbufs
&& chars
!= 0) {
395 int lastbuf
= (pipe
->curbuf
+ pipe
->nrbufs
- 1) &
397 struct pipe_buffer
*buf
= pipe
->bufs
+ lastbuf
;
398 int offset
= buf
->offset
+ buf
->len
;
400 if (buf
->ops
->can_merge
&& offset
+ chars
<= PAGE_SIZE
) {
401 ret
= pipe_buf_confirm(pipe
, buf
);
405 ret
= copy_page_from_iter(buf
->page
, offset
, chars
, from
);
406 if (unlikely(ret
< chars
)) {
412 if (!iov_iter_count(from
))
420 if (!pipe
->readers
) {
421 send_sig(SIGPIPE
, current
, 0);
427 if (bufs
< pipe
->buffers
) {
428 int newbuf
= (pipe
->curbuf
+ bufs
) & (pipe
->buffers
-1);
429 struct pipe_buffer
*buf
= pipe
->bufs
+ newbuf
;
430 struct page
*page
= pipe
->tmp_page
;
434 page
= alloc_page(GFP_HIGHUSER
| __GFP_ACCOUNT
);
435 if (unlikely(!page
)) {
436 ret
= ret
? : -ENOMEM
;
439 pipe
->tmp_page
= page
;
441 /* Always wake up, even if the copy fails. Otherwise
442 * we lock up (O_NONBLOCK-)readers that sleep due to
444 * FIXME! Is this really true?
447 copied
= copy_page_from_iter(page
, 0, PAGE_SIZE
, from
);
448 if (unlikely(copied
< PAGE_SIZE
&& iov_iter_count(from
))) {
455 /* Insert it into the buffer array */
457 buf
->ops
= &anon_pipe_buf_ops
;
461 if (is_packetized(filp
)) {
462 buf
->ops
= &packet_pipe_buf_ops
;
463 buf
->flags
= PIPE_BUF_FLAG_PACKET
;
465 pipe
->nrbufs
= ++bufs
;
466 pipe
->tmp_page
= NULL
;
468 if (!iov_iter_count(from
))
471 if (bufs
< pipe
->buffers
)
473 if (filp
->f_flags
& O_NONBLOCK
) {
478 if (signal_pending(current
)) {
484 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLIN
| POLLRDNORM
);
485 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
488 pipe
->waiting_writers
++;
490 pipe
->waiting_writers
--;
495 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLIN
| POLLRDNORM
);
496 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
498 if (ret
> 0 && sb_start_write_trylock(file_inode(filp
)->i_sb
)) {
499 int err
= file_update_time(filp
);
502 sb_end_write(file_inode(filp
)->i_sb
);
507 static long pipe_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
509 struct pipe_inode_info
*pipe
= filp
->private_data
;
510 int count
, buf
, nrbufs
;
517 nrbufs
= pipe
->nrbufs
;
518 while (--nrbufs
>= 0) {
519 count
+= pipe
->bufs
[buf
].len
;
520 buf
= (buf
+1) & (pipe
->buffers
- 1);
524 return put_user(count
, (int __user
*)arg
);
530 /* No kernel lock held - fine */
532 pipe_poll(struct file
*filp
, poll_table
*wait
)
535 struct pipe_inode_info
*pipe
= filp
->private_data
;
538 poll_wait(filp
, &pipe
->wait
, wait
);
540 /* Reading only -- no need for acquiring the semaphore. */
541 nrbufs
= pipe
->nrbufs
;
543 if (filp
->f_mode
& FMODE_READ
) {
544 mask
= (nrbufs
> 0) ? POLLIN
| POLLRDNORM
: 0;
545 if (!pipe
->writers
&& filp
->f_version
!= pipe
->w_counter
)
549 if (filp
->f_mode
& FMODE_WRITE
) {
550 mask
|= (nrbufs
< pipe
->buffers
) ? POLLOUT
| POLLWRNORM
: 0;
552 * Most Unices do not set POLLERR for FIFOs but on Linux they
553 * behave exactly like pipes for poll().
562 static void put_pipe_info(struct inode
*inode
, struct pipe_inode_info
*pipe
)
566 spin_lock(&inode
->i_lock
);
567 if (!--pipe
->files
) {
568 inode
->i_pipe
= NULL
;
571 spin_unlock(&inode
->i_lock
);
574 free_pipe_info(pipe
);
578 pipe_release(struct inode
*inode
, struct file
*file
)
580 struct pipe_inode_info
*pipe
= file
->private_data
;
583 if (file
->f_mode
& FMODE_READ
)
585 if (file
->f_mode
& FMODE_WRITE
)
588 if (pipe
->readers
|| pipe
->writers
) {
589 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLIN
| POLLOUT
| POLLRDNORM
| POLLWRNORM
| POLLERR
| POLLHUP
);
590 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
591 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
595 put_pipe_info(inode
, pipe
);
600 pipe_fasync(int fd
, struct file
*filp
, int on
)
602 struct pipe_inode_info
*pipe
= filp
->private_data
;
606 if (filp
->f_mode
& FMODE_READ
)
607 retval
= fasync_helper(fd
, filp
, on
, &pipe
->fasync_readers
);
608 if ((filp
->f_mode
& FMODE_WRITE
) && retval
>= 0) {
609 retval
= fasync_helper(fd
, filp
, on
, &pipe
->fasync_writers
);
610 if (retval
< 0 && (filp
->f_mode
& FMODE_READ
))
611 /* this can happen only if on == T */
612 fasync_helper(-1, filp
, 0, &pipe
->fasync_readers
);
618 static unsigned long account_pipe_buffers(struct user_struct
*user
,
619 unsigned long old
, unsigned long new)
621 return atomic_long_add_return(new - old
, &user
->pipe_bufs
);
624 static bool too_many_pipe_buffers_soft(unsigned long user_bufs
)
626 return pipe_user_pages_soft
&& user_bufs
> pipe_user_pages_soft
;
629 static bool too_many_pipe_buffers_hard(unsigned long user_bufs
)
631 return pipe_user_pages_hard
&& user_bufs
> pipe_user_pages_hard
;
634 static bool is_unprivileged_user(void)
636 return !capable(CAP_SYS_RESOURCE
) && !capable(CAP_SYS_ADMIN
);
639 struct pipe_inode_info
*alloc_pipe_info(void)
641 struct pipe_inode_info
*pipe
;
642 unsigned long pipe_bufs
= PIPE_DEF_BUFFERS
;
643 struct user_struct
*user
= get_current_user();
644 unsigned long user_bufs
;
646 pipe
= kzalloc(sizeof(struct pipe_inode_info
), GFP_KERNEL_ACCOUNT
);
650 if (pipe_bufs
* PAGE_SIZE
> pipe_max_size
&& !capable(CAP_SYS_RESOURCE
))
651 pipe_bufs
= pipe_max_size
>> PAGE_SHIFT
;
653 user_bufs
= account_pipe_buffers(user
, 0, pipe_bufs
);
655 if (too_many_pipe_buffers_soft(user_bufs
) && is_unprivileged_user()) {
656 user_bufs
= account_pipe_buffers(user
, pipe_bufs
, 1);
660 if (too_many_pipe_buffers_hard(user_bufs
) && is_unprivileged_user())
661 goto out_revert_acct
;
663 pipe
->bufs
= kcalloc(pipe_bufs
, sizeof(struct pipe_buffer
),
667 init_waitqueue_head(&pipe
->wait
);
668 pipe
->r_counter
= pipe
->w_counter
= 1;
669 pipe
->buffers
= pipe_bufs
;
671 mutex_init(&pipe
->mutex
);
676 (void) account_pipe_buffers(user
, pipe_bufs
, 0);
683 void free_pipe_info(struct pipe_inode_info
*pipe
)
687 (void) account_pipe_buffers(pipe
->user
, pipe
->buffers
, 0);
688 free_uid(pipe
->user
);
689 for (i
= 0; i
< pipe
->buffers
; i
++) {
690 struct pipe_buffer
*buf
= pipe
->bufs
+ i
;
692 pipe_buf_release(pipe
, buf
);
695 __free_page(pipe
->tmp_page
);
700 static struct vfsmount
*pipe_mnt __read_mostly
;
703 * pipefs_dname() is called from d_path().
705 static char *pipefs_dname(struct dentry
*dentry
, char *buffer
, int buflen
)
707 return dynamic_dname(dentry
, buffer
, buflen
, "pipe:[%lu]",
708 d_inode(dentry
)->i_ino
);
711 static const struct dentry_operations pipefs_dentry_operations
= {
712 .d_dname
= pipefs_dname
,
715 static struct inode
* get_pipe_inode(void)
717 struct inode
*inode
= new_inode_pseudo(pipe_mnt
->mnt_sb
);
718 struct pipe_inode_info
*pipe
;
723 inode
->i_ino
= get_next_ino();
725 pipe
= alloc_pipe_info();
729 inode
->i_pipe
= pipe
;
731 pipe
->readers
= pipe
->writers
= 1;
732 inode
->i_fop
= &pipefifo_fops
;
735 * Mark the inode dirty from the very beginning,
736 * that way it will never be moved to the dirty
737 * list because "mark_inode_dirty()" will think
738 * that it already _is_ on the dirty list.
740 inode
->i_state
= I_DIRTY
;
741 inode
->i_mode
= S_IFIFO
| S_IRUSR
| S_IWUSR
;
742 inode
->i_uid
= current_fsuid();
743 inode
->i_gid
= current_fsgid();
744 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
755 int create_pipe_files(struct file
**res
, int flags
)
758 struct inode
*inode
= get_pipe_inode();
761 static struct qstr name
= { .name
= "" };
767 path
.dentry
= d_alloc_pseudo(pipe_mnt
->mnt_sb
, &name
);
770 path
.mnt
= mntget(pipe_mnt
);
772 d_instantiate(path
.dentry
, inode
);
774 f
= alloc_file(&path
, FMODE_WRITE
, &pipefifo_fops
);
780 f
->f_flags
= O_WRONLY
| (flags
& (O_NONBLOCK
| O_DIRECT
));
781 f
->private_data
= inode
->i_pipe
;
783 res
[0] = alloc_file(&path
, FMODE_READ
, &pipefifo_fops
);
784 if (IS_ERR(res
[0])) {
785 err
= PTR_ERR(res
[0]);
790 res
[0]->private_data
= inode
->i_pipe
;
791 res
[0]->f_flags
= O_RDONLY
| (flags
& O_NONBLOCK
);
798 free_pipe_info(inode
->i_pipe
);
803 free_pipe_info(inode
->i_pipe
);
808 static int __do_pipe_flags(int *fd
, struct file
**files
, int flags
)
813 if (flags
& ~(O_CLOEXEC
| O_NONBLOCK
| O_DIRECT
))
816 error
= create_pipe_files(files
, flags
);
820 error
= get_unused_fd_flags(flags
);
825 error
= get_unused_fd_flags(flags
);
830 audit_fd_pair(fdr
, fdw
);
843 int do_pipe_flags(int *fd
, int flags
)
845 struct file
*files
[2];
846 int error
= __do_pipe_flags(fd
, files
, flags
);
848 fd_install(fd
[0], files
[0]);
849 fd_install(fd
[1], files
[1]);
855 * sys_pipe() is the normal C calling standard for creating
856 * a pipe. It's not the way Unix traditionally does this, though.
858 SYSCALL_DEFINE2(pipe2
, int __user
*, fildes
, int, flags
)
860 struct file
*files
[2];
864 error
= __do_pipe_flags(fd
, files
, flags
);
866 if (unlikely(copy_to_user(fildes
, fd
, sizeof(fd
)))) {
869 put_unused_fd(fd
[0]);
870 put_unused_fd(fd
[1]);
873 fd_install(fd
[0], files
[0]);
874 fd_install(fd
[1], files
[1]);
880 SYSCALL_DEFINE1(pipe
, int __user
*, fildes
)
882 return sys_pipe2(fildes
, 0);
885 static int wait_for_partner(struct pipe_inode_info
*pipe
, unsigned int *cnt
)
889 while (cur
== *cnt
) {
891 if (signal_pending(current
))
894 return cur
== *cnt
? -ERESTARTSYS
: 0;
897 static void wake_up_partner(struct pipe_inode_info
*pipe
)
899 wake_up_interruptible(&pipe
->wait
);
902 static int fifo_open(struct inode
*inode
, struct file
*filp
)
904 struct pipe_inode_info
*pipe
;
905 bool is_pipe
= inode
->i_sb
->s_magic
== PIPEFS_MAGIC
;
910 spin_lock(&inode
->i_lock
);
912 pipe
= inode
->i_pipe
;
914 spin_unlock(&inode
->i_lock
);
916 spin_unlock(&inode
->i_lock
);
917 pipe
= alloc_pipe_info();
921 spin_lock(&inode
->i_lock
);
922 if (unlikely(inode
->i_pipe
)) {
923 inode
->i_pipe
->files
++;
924 spin_unlock(&inode
->i_lock
);
925 free_pipe_info(pipe
);
926 pipe
= inode
->i_pipe
;
928 inode
->i_pipe
= pipe
;
929 spin_unlock(&inode
->i_lock
);
932 filp
->private_data
= pipe
;
933 /* OK, we have a pipe and it's pinned down */
937 /* We can only do regular read/write on fifos */
938 filp
->f_mode
&= (FMODE_READ
| FMODE_WRITE
);
940 switch (filp
->f_mode
) {
944 * POSIX.1 says that O_NONBLOCK means return with the FIFO
945 * opened, even when there is no process writing the FIFO.
948 if (pipe
->readers
++ == 0)
949 wake_up_partner(pipe
);
951 if (!is_pipe
&& !pipe
->writers
) {
952 if ((filp
->f_flags
& O_NONBLOCK
)) {
953 /* suppress POLLHUP until we have
955 filp
->f_version
= pipe
->w_counter
;
957 if (wait_for_partner(pipe
, &pipe
->w_counter
))
966 * POSIX.1 says that O_NONBLOCK means return -1 with
967 * errno=ENXIO when there is no process reading the FIFO.
970 if (!is_pipe
&& (filp
->f_flags
& O_NONBLOCK
) && !pipe
->readers
)
974 if (!pipe
->writers
++)
975 wake_up_partner(pipe
);
977 if (!is_pipe
&& !pipe
->readers
) {
978 if (wait_for_partner(pipe
, &pipe
->r_counter
))
983 case FMODE_READ
| FMODE_WRITE
:
986 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
987 * This implementation will NEVER block on a O_RDWR open, since
988 * the process can at least talk to itself.
995 if (pipe
->readers
== 1 || pipe
->writers
== 1)
996 wake_up_partner(pipe
);
1005 __pipe_unlock(pipe
);
1009 if (!--pipe
->readers
)
1010 wake_up_interruptible(&pipe
->wait
);
1015 if (!--pipe
->writers
)
1016 wake_up_interruptible(&pipe
->wait
);
1021 __pipe_unlock(pipe
);
1023 put_pipe_info(inode
, pipe
);
1027 const struct file_operations pipefifo_fops
= {
1029 .llseek
= no_llseek
,
1030 .read_iter
= pipe_read
,
1031 .write_iter
= pipe_write
,
1033 .unlocked_ioctl
= pipe_ioctl
,
1034 .release
= pipe_release
,
1035 .fasync
= pipe_fasync
,
1039 * Currently we rely on the pipe array holding a power-of-2 number
1040 * of pages. Returns 0 on error.
1042 static inline unsigned int round_pipe_size(unsigned int size
)
1044 unsigned long nr_pages
;
1046 if (size
< pipe_min_size
)
1047 size
= pipe_min_size
;
1049 nr_pages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1053 return roundup_pow_of_two(nr_pages
) << PAGE_SHIFT
;
1057 * Allocate a new array of pipe buffers and copy the info over. Returns the
1058 * pipe size if successful, or return -ERROR on error.
1060 static long pipe_set_size(struct pipe_inode_info
*pipe
, unsigned long arg
)
1062 struct pipe_buffer
*bufs
;
1063 unsigned int size
, nr_pages
;
1064 unsigned long user_bufs
;
1067 size
= round_pipe_size(arg
);
1070 nr_pages
= size
>> PAGE_SHIFT
;
1076 * If trying to increase the pipe capacity, check that an
1077 * unprivileged user is not trying to exceed various limits
1078 * (soft limit check here, hard limit check just below).
1079 * Decreasing the pipe capacity is always permitted, even
1080 * if the user is currently over a limit.
1082 if (nr_pages
> pipe
->buffers
&&
1083 size
> pipe_max_size
&& !capable(CAP_SYS_RESOURCE
))
1086 user_bufs
= account_pipe_buffers(pipe
->user
, pipe
->buffers
, nr_pages
);
1088 if (nr_pages
> pipe
->buffers
&&
1089 (too_many_pipe_buffers_hard(user_bufs
) ||
1090 too_many_pipe_buffers_soft(user_bufs
)) &&
1091 is_unprivileged_user()) {
1093 goto out_revert_acct
;
1097 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1098 * expect a lot of shrink+grow operations, just free and allocate
1099 * again like we would do for growing. If the pipe currently
1100 * contains more buffers than arg, then return busy.
1102 if (nr_pages
< pipe
->nrbufs
) {
1104 goto out_revert_acct
;
1107 bufs
= kcalloc(nr_pages
, sizeof(*bufs
),
1108 GFP_KERNEL_ACCOUNT
| __GFP_NOWARN
);
1109 if (unlikely(!bufs
)) {
1111 goto out_revert_acct
;
1115 * The pipe array wraps around, so just start the new one at zero
1116 * and adjust the indexes.
1122 tail
= pipe
->curbuf
+ pipe
->nrbufs
;
1123 if (tail
< pipe
->buffers
)
1126 tail
&= (pipe
->buffers
- 1);
1128 head
= pipe
->nrbufs
- tail
;
1130 memcpy(bufs
, pipe
->bufs
+ pipe
->curbuf
, head
* sizeof(struct pipe_buffer
));
1132 memcpy(bufs
+ head
, pipe
->bufs
, tail
* sizeof(struct pipe_buffer
));
1138 pipe
->buffers
= nr_pages
;
1139 return nr_pages
* PAGE_SIZE
;
1142 (void) account_pipe_buffers(pipe
->user
, nr_pages
, pipe
->buffers
);
1147 * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1148 * will return an error.
1150 int pipe_proc_fn(struct ctl_table
*table
, int write
, void __user
*buf
,
1151 size_t *lenp
, loff_t
*ppos
)
1153 unsigned int rounded_pipe_max_size
;
1156 ret
= proc_dointvec_minmax(table
, write
, buf
, lenp
, ppos
);
1157 if (ret
< 0 || !write
)
1160 rounded_pipe_max_size
= round_pipe_size(pipe_max_size
);
1161 if (rounded_pipe_max_size
== 0)
1164 pipe_max_size
= rounded_pipe_max_size
;
1169 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1170 * location, so checking ->i_pipe is not enough to verify that this is a
1173 struct pipe_inode_info
*get_pipe_info(struct file
*file
)
1175 return file
->f_op
== &pipefifo_fops
? file
->private_data
: NULL
;
1178 long pipe_fcntl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1180 struct pipe_inode_info
*pipe
;
1183 pipe
= get_pipe_info(file
);
1191 ret
= pipe_set_size(pipe
, arg
);
1194 ret
= pipe
->buffers
* PAGE_SIZE
;
1201 __pipe_unlock(pipe
);
1205 static const struct super_operations pipefs_ops
= {
1206 .destroy_inode
= free_inode_nonrcu
,
1207 .statfs
= simple_statfs
,
1211 * pipefs should _never_ be mounted by userland - too much of security hassle,
1212 * no real gain from having the whole whorehouse mounted. So we don't need
1213 * any operations on the root directory. However, we need a non-trivial
1214 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1216 static struct dentry
*pipefs_mount(struct file_system_type
*fs_type
,
1217 int flags
, const char *dev_name
, void *data
)
1219 return mount_pseudo(fs_type
, "pipe:", &pipefs_ops
,
1220 &pipefs_dentry_operations
, PIPEFS_MAGIC
);
1223 static struct file_system_type pipe_fs_type
= {
1225 .mount
= pipefs_mount
,
1226 .kill_sb
= kill_anon_super
,
1229 static int __init
init_pipe_fs(void)
1231 int err
= register_filesystem(&pipe_fs_type
);
1234 pipe_mnt
= kern_mount(&pipe_fs_type
);
1235 if (IS_ERR(pipe_mnt
)) {
1236 err
= PTR_ERR(pipe_mnt
);
1237 unregister_filesystem(&pipe_fs_type
);
1243 fs_initcall(init_pipe_fs
);