4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/log2.h>
15 #include <linux/mount.h>
16 #include <linux/magic.h>
17 #include <linux/pipe_fs_i.h>
18 #include <linux/uio.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/audit.h>
22 #include <linux/syscalls.h>
23 #include <linux/fcntl.h>
24 #include <linux/aio.h>
26 #include <asm/uaccess.h>
27 #include <asm/ioctls.h>
32 * The max size that a non-root user is allowed to grow the pipe. Can
33 * be set by root in /proc/sys/fs/pipe-max-size
35 unsigned int pipe_max_size
= 1048576;
38 * Minimum pipe size, as required by POSIX
40 unsigned int pipe_min_size
= PAGE_SIZE
;
42 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
43 * matches default values.
45 unsigned long pipe_user_pages_hard
;
46 unsigned long pipe_user_pages_soft
= PIPE_DEF_BUFFERS
* INR_OPEN_CUR
;
49 * We use a start+len construction, which provides full use of the
51 * -- Florian Coosmann (FGC)
53 * Reads with count = 0 should always return 0.
54 * -- Julian Bradfield 1999-06-07.
56 * FIFOs and Pipes now generate SIGIO for both readers and writers.
57 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
59 * pipe_read & write cleanup
60 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
63 static void pipe_lock_nested(struct pipe_inode_info
*pipe
, int subclass
)
66 mutex_lock_nested(&pipe
->mutex
, subclass
);
69 void pipe_lock(struct pipe_inode_info
*pipe
)
72 * pipe_lock() nests non-pipe inode locks (for writing to a file)
74 pipe_lock_nested(pipe
, I_MUTEX_PARENT
);
76 EXPORT_SYMBOL(pipe_lock
);
78 void pipe_unlock(struct pipe_inode_info
*pipe
)
81 mutex_unlock(&pipe
->mutex
);
83 EXPORT_SYMBOL(pipe_unlock
);
85 static inline void __pipe_lock(struct pipe_inode_info
*pipe
)
87 mutex_lock_nested(&pipe
->mutex
, I_MUTEX_PARENT
);
90 static inline void __pipe_unlock(struct pipe_inode_info
*pipe
)
92 mutex_unlock(&pipe
->mutex
);
95 void pipe_double_lock(struct pipe_inode_info
*pipe1
,
96 struct pipe_inode_info
*pipe2
)
98 BUG_ON(pipe1
== pipe2
);
101 pipe_lock_nested(pipe1
, I_MUTEX_PARENT
);
102 pipe_lock_nested(pipe2
, I_MUTEX_CHILD
);
104 pipe_lock_nested(pipe2
, I_MUTEX_PARENT
);
105 pipe_lock_nested(pipe1
, I_MUTEX_CHILD
);
109 /* Drop the inode semaphore and wait for a pipe event, atomically */
110 void pipe_wait(struct pipe_inode_info
*pipe
)
115 * Pipes are system-local resources, so sleeping on them
116 * is considered a noninteractive wait:
118 prepare_to_wait(&pipe
->wait
, &wait
, TASK_INTERRUPTIBLE
);
121 finish_wait(&pipe
->wait
, &wait
);
125 static void anon_pipe_buf_release(struct pipe_inode_info
*pipe
,
126 struct pipe_buffer
*buf
)
128 struct page
*page
= buf
->page
;
131 * If nobody else uses this page, and we don't already have a
132 * temporary page, let's keep track of it as a one-deep
133 * allocation cache. (Otherwise just release our reference to it)
135 if (page_count(page
) == 1 && !pipe
->tmp_page
)
136 pipe
->tmp_page
= page
;
138 page_cache_release(page
);
142 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
143 * @pipe: the pipe that the buffer belongs to
144 * @buf: the buffer to attempt to steal
147 * This function attempts to steal the &struct page attached to
148 * @buf. If successful, this function returns 0 and returns with
149 * the page locked. The caller may then reuse the page for whatever
150 * he wishes; the typical use is insertion into a different file
153 int generic_pipe_buf_steal(struct pipe_inode_info
*pipe
,
154 struct pipe_buffer
*buf
)
156 struct page
*page
= buf
->page
;
159 * A reference of one is golden, that means that the owner of this
160 * page is the only one holding a reference to it. lock the page
163 if (page_count(page
) == 1) {
170 EXPORT_SYMBOL(generic_pipe_buf_steal
);
173 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
174 * @pipe: the pipe that the buffer belongs to
175 * @buf: the buffer to get a reference to
178 * This function grabs an extra reference to @buf. It's used in
179 * in the tee() system call, when we duplicate the buffers in one
182 void generic_pipe_buf_get(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
)
184 page_cache_get(buf
->page
);
186 EXPORT_SYMBOL(generic_pipe_buf_get
);
189 * generic_pipe_buf_confirm - verify contents of the pipe buffer
190 * @info: the pipe that the buffer belongs to
191 * @buf: the buffer to confirm
194 * This function does nothing, because the generic pipe code uses
195 * pages that are always good when inserted into the pipe.
197 int generic_pipe_buf_confirm(struct pipe_inode_info
*info
,
198 struct pipe_buffer
*buf
)
202 EXPORT_SYMBOL(generic_pipe_buf_confirm
);
205 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
206 * @pipe: the pipe that the buffer belongs to
207 * @buf: the buffer to put a reference to
210 * This function releases a reference to @buf.
212 void generic_pipe_buf_release(struct pipe_inode_info
*pipe
,
213 struct pipe_buffer
*buf
)
215 page_cache_release(buf
->page
);
217 EXPORT_SYMBOL(generic_pipe_buf_release
);
219 static const struct pipe_buf_operations anon_pipe_buf_ops
= {
221 .confirm
= generic_pipe_buf_confirm
,
222 .release
= anon_pipe_buf_release
,
223 .steal
= generic_pipe_buf_steal
,
224 .get
= generic_pipe_buf_get
,
227 static const struct pipe_buf_operations packet_pipe_buf_ops
= {
229 .confirm
= generic_pipe_buf_confirm
,
230 .release
= anon_pipe_buf_release
,
231 .steal
= generic_pipe_buf_steal
,
232 .get
= generic_pipe_buf_get
,
236 pipe_read(struct kiocb
*iocb
, struct iov_iter
*to
)
238 size_t total_len
= iov_iter_count(to
);
239 struct file
*filp
= iocb
->ki_filp
;
240 struct pipe_inode_info
*pipe
= filp
->private_data
;
244 /* Null read succeeds. */
245 if (unlikely(total_len
== 0))
252 int bufs
= pipe
->nrbufs
;
254 int curbuf
= pipe
->curbuf
;
255 struct pipe_buffer
*buf
= pipe
->bufs
+ curbuf
;
256 const struct pipe_buf_operations
*ops
= buf
->ops
;
257 size_t chars
= buf
->len
;
261 if (chars
> total_len
)
264 error
= ops
->confirm(pipe
, buf
);
271 written
= copy_page_to_iter(buf
->page
, buf
->offset
, chars
, to
);
272 if (unlikely(written
< chars
)) {
278 buf
->offset
+= chars
;
281 /* Was it a packet buffer? Clean up and exit */
282 if (buf
->flags
& PIPE_BUF_FLAG_PACKET
) {
289 ops
->release(pipe
, buf
);
290 curbuf
= (curbuf
+ 1) & (pipe
->buffers
- 1);
291 pipe
->curbuf
= curbuf
;
292 pipe
->nrbufs
= --bufs
;
297 break; /* common path: read succeeded */
299 if (bufs
) /* More to do? */
303 if (!pipe
->waiting_writers
) {
304 /* syscall merging: Usually we must not sleep
305 * if O_NONBLOCK is set, or if we got some data.
306 * But if a writer sleeps in kernel space, then
307 * we can wait for that data without violating POSIX.
311 if (filp
->f_flags
& O_NONBLOCK
) {
316 if (signal_pending(current
)) {
322 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLOUT
| POLLWRNORM
);
323 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
329 /* Signal writers asynchronously that there is more room. */
331 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLOUT
| POLLWRNORM
);
332 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
339 static inline int is_packetized(struct file
*file
)
341 return (file
->f_flags
& O_DIRECT
) != 0;
345 pipe_write(struct kiocb
*iocb
, struct iov_iter
*from
)
347 struct file
*filp
= iocb
->ki_filp
;
348 struct pipe_inode_info
*pipe
= filp
->private_data
;
351 size_t total_len
= iov_iter_count(from
);
354 /* Null write succeeds. */
355 if (unlikely(total_len
== 0))
360 if (!pipe
->readers
) {
361 send_sig(SIGPIPE
, current
, 0);
366 /* We try to merge small writes */
367 chars
= total_len
& (PAGE_SIZE
-1); /* size of the last buffer */
368 if (pipe
->nrbufs
&& chars
!= 0) {
369 int lastbuf
= (pipe
->curbuf
+ pipe
->nrbufs
- 1) &
371 struct pipe_buffer
*buf
= pipe
->bufs
+ lastbuf
;
372 const struct pipe_buf_operations
*ops
= buf
->ops
;
373 int offset
= buf
->offset
+ buf
->len
;
375 if (ops
->can_merge
&& offset
+ chars
<= PAGE_SIZE
) {
376 int error
= ops
->confirm(pipe
, buf
);
380 ret
= copy_page_from_iter(buf
->page
, offset
, chars
, from
);
381 if (unlikely(ret
< chars
)) {
388 if (!iov_iter_count(from
))
396 if (!pipe
->readers
) {
397 send_sig(SIGPIPE
, current
, 0);
403 if (bufs
< pipe
->buffers
) {
404 int newbuf
= (pipe
->curbuf
+ bufs
) & (pipe
->buffers
-1);
405 struct pipe_buffer
*buf
= pipe
->bufs
+ newbuf
;
406 struct page
*page
= pipe
->tmp_page
;
410 page
= alloc_page(GFP_HIGHUSER
);
411 if (unlikely(!page
)) {
412 ret
= ret
? : -ENOMEM
;
415 pipe
->tmp_page
= page
;
417 /* Always wake up, even if the copy fails. Otherwise
418 * we lock up (O_NONBLOCK-)readers that sleep due to
420 * FIXME! Is this really true?
423 copied
= copy_page_from_iter(page
, 0, PAGE_SIZE
, from
);
424 if (unlikely(copied
< PAGE_SIZE
&& iov_iter_count(from
))) {
431 /* Insert it into the buffer array */
433 buf
->ops
= &anon_pipe_buf_ops
;
437 if (is_packetized(filp
)) {
438 buf
->ops
= &packet_pipe_buf_ops
;
439 buf
->flags
= PIPE_BUF_FLAG_PACKET
;
441 pipe
->nrbufs
= ++bufs
;
442 pipe
->tmp_page
= NULL
;
444 if (!iov_iter_count(from
))
447 if (bufs
< pipe
->buffers
)
449 if (filp
->f_flags
& O_NONBLOCK
) {
454 if (signal_pending(current
)) {
460 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLIN
| POLLRDNORM
);
461 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
464 pipe
->waiting_writers
++;
466 pipe
->waiting_writers
--;
471 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLIN
| POLLRDNORM
);
472 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
474 if (ret
> 0 && sb_start_write_trylock(file_inode(filp
)->i_sb
)) {
475 int err
= file_update_time(filp
);
478 sb_end_write(file_inode(filp
)->i_sb
);
483 static long pipe_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
485 struct pipe_inode_info
*pipe
= filp
->private_data
;
486 int count
, buf
, nrbufs
;
493 nrbufs
= pipe
->nrbufs
;
494 while (--nrbufs
>= 0) {
495 count
+= pipe
->bufs
[buf
].len
;
496 buf
= (buf
+1) & (pipe
->buffers
- 1);
500 return put_user(count
, (int __user
*)arg
);
506 /* No kernel lock held - fine */
508 pipe_poll(struct file
*filp
, poll_table
*wait
)
511 struct pipe_inode_info
*pipe
= filp
->private_data
;
514 poll_wait(filp
, &pipe
->wait
, wait
);
516 /* Reading only -- no need for acquiring the semaphore. */
517 nrbufs
= pipe
->nrbufs
;
519 if (filp
->f_mode
& FMODE_READ
) {
520 mask
= (nrbufs
> 0) ? POLLIN
| POLLRDNORM
: 0;
521 if (!pipe
->writers
&& filp
->f_version
!= pipe
->w_counter
)
525 if (filp
->f_mode
& FMODE_WRITE
) {
526 mask
|= (nrbufs
< pipe
->buffers
) ? POLLOUT
| POLLWRNORM
: 0;
528 * Most Unices do not set POLLERR for FIFOs but on Linux they
529 * behave exactly like pipes for poll().
538 static void put_pipe_info(struct inode
*inode
, struct pipe_inode_info
*pipe
)
542 spin_lock(&inode
->i_lock
);
543 if (!--pipe
->files
) {
544 inode
->i_pipe
= NULL
;
547 spin_unlock(&inode
->i_lock
);
550 free_pipe_info(pipe
);
554 pipe_release(struct inode
*inode
, struct file
*file
)
556 struct pipe_inode_info
*pipe
= file
->private_data
;
559 if (file
->f_mode
& FMODE_READ
)
561 if (file
->f_mode
& FMODE_WRITE
)
564 if (pipe
->readers
|| pipe
->writers
) {
565 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLIN
| POLLOUT
| POLLRDNORM
| POLLWRNORM
| POLLERR
| POLLHUP
);
566 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
567 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
571 put_pipe_info(inode
, pipe
);
576 pipe_fasync(int fd
, struct file
*filp
, int on
)
578 struct pipe_inode_info
*pipe
= filp
->private_data
;
582 if (filp
->f_mode
& FMODE_READ
)
583 retval
= fasync_helper(fd
, filp
, on
, &pipe
->fasync_readers
);
584 if ((filp
->f_mode
& FMODE_WRITE
) && retval
>= 0) {
585 retval
= fasync_helper(fd
, filp
, on
, &pipe
->fasync_writers
);
586 if (retval
< 0 && (filp
->f_mode
& FMODE_READ
))
587 /* this can happen only if on == T */
588 fasync_helper(-1, filp
, 0, &pipe
->fasync_readers
);
594 static void account_pipe_buffers(struct pipe_inode_info
*pipe
,
595 unsigned long old
, unsigned long new)
597 atomic_long_add(new - old
, &pipe
->user
->pipe_bufs
);
600 static bool too_many_pipe_buffers_soft(struct user_struct
*user
)
602 return pipe_user_pages_soft
&&
603 atomic_long_read(&user
->pipe_bufs
) >= pipe_user_pages_soft
;
606 static bool too_many_pipe_buffers_hard(struct user_struct
*user
)
608 return pipe_user_pages_hard
&&
609 atomic_long_read(&user
->pipe_bufs
) >= pipe_user_pages_hard
;
612 struct pipe_inode_info
*alloc_pipe_info(void)
614 struct pipe_inode_info
*pipe
;
616 pipe
= kzalloc(sizeof(struct pipe_inode_info
), GFP_KERNEL
);
618 unsigned long pipe_bufs
= PIPE_DEF_BUFFERS
;
619 struct user_struct
*user
= get_current_user();
621 if (!too_many_pipe_buffers_hard(user
)) {
622 if (too_many_pipe_buffers_soft(user
))
624 pipe
->bufs
= kzalloc(sizeof(struct pipe_buffer
) * pipe_bufs
, GFP_KERNEL
);
628 init_waitqueue_head(&pipe
->wait
);
629 pipe
->r_counter
= pipe
->w_counter
= 1;
630 pipe
->buffers
= pipe_bufs
;
632 account_pipe_buffers(pipe
, 0, pipe_bufs
);
633 mutex_init(&pipe
->mutex
);
643 void free_pipe_info(struct pipe_inode_info
*pipe
)
647 account_pipe_buffers(pipe
, pipe
->buffers
, 0);
648 free_uid(pipe
->user
);
649 for (i
= 0; i
< pipe
->buffers
; i
++) {
650 struct pipe_buffer
*buf
= pipe
->bufs
+ i
;
652 buf
->ops
->release(pipe
, buf
);
655 __free_page(pipe
->tmp_page
);
660 static struct vfsmount
*pipe_mnt __read_mostly
;
663 * pipefs_dname() is called from d_path().
665 static char *pipefs_dname(struct dentry
*dentry
, char *buffer
, int buflen
)
667 return dynamic_dname(dentry
, buffer
, buflen
, "pipe:[%lu]",
668 dentry
->d_inode
->i_ino
);
671 static const struct dentry_operations pipefs_dentry_operations
= {
672 .d_dname
= pipefs_dname
,
675 static struct inode
* get_pipe_inode(void)
677 struct inode
*inode
= new_inode_pseudo(pipe_mnt
->mnt_sb
);
678 struct pipe_inode_info
*pipe
;
683 inode
->i_ino
= get_next_ino();
685 pipe
= alloc_pipe_info();
689 inode
->i_pipe
= pipe
;
691 pipe
->readers
= pipe
->writers
= 1;
692 inode
->i_fop
= &pipefifo_fops
;
695 * Mark the inode dirty from the very beginning,
696 * that way it will never be moved to the dirty
697 * list because "mark_inode_dirty()" will think
698 * that it already _is_ on the dirty list.
700 inode
->i_state
= I_DIRTY
;
701 inode
->i_mode
= S_IFIFO
| S_IRUSR
| S_IWUSR
;
702 inode
->i_uid
= current_fsuid();
703 inode
->i_gid
= current_fsgid();
704 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
715 int create_pipe_files(struct file
**res
, int flags
)
718 struct inode
*inode
= get_pipe_inode();
721 static struct qstr name
= { .name
= "" };
727 path
.dentry
= d_alloc_pseudo(pipe_mnt
->mnt_sb
, &name
);
730 path
.mnt
= mntget(pipe_mnt
);
732 d_instantiate(path
.dentry
, inode
);
735 f
= alloc_file(&path
, FMODE_WRITE
, &pipefifo_fops
);
739 f
->f_flags
= O_WRONLY
| (flags
& (O_NONBLOCK
| O_DIRECT
));
740 f
->private_data
= inode
->i_pipe
;
742 res
[0] = alloc_file(&path
, FMODE_READ
, &pipefifo_fops
);
747 res
[0]->private_data
= inode
->i_pipe
;
748 res
[0]->f_flags
= O_RDONLY
| (flags
& O_NONBLOCK
);
755 free_pipe_info(inode
->i_pipe
);
760 free_pipe_info(inode
->i_pipe
);
765 static int __do_pipe_flags(int *fd
, struct file
**files
, int flags
)
770 if (flags
& ~(O_CLOEXEC
| O_NONBLOCK
| O_DIRECT
))
773 error
= create_pipe_files(files
, flags
);
777 error
= get_unused_fd_flags(flags
);
782 error
= get_unused_fd_flags(flags
);
787 audit_fd_pair(fdr
, fdw
);
800 int do_pipe_flags(int *fd
, int flags
)
802 struct file
*files
[2];
803 int error
= __do_pipe_flags(fd
, files
, flags
);
805 fd_install(fd
[0], files
[0]);
806 fd_install(fd
[1], files
[1]);
812 * sys_pipe() is the normal C calling standard for creating
813 * a pipe. It's not the way Unix traditionally does this, though.
815 SYSCALL_DEFINE2(pipe2
, int __user
*, fildes
, int, flags
)
817 struct file
*files
[2];
821 error
= __do_pipe_flags(fd
, files
, flags
);
823 if (unlikely(copy_to_user(fildes
, fd
, sizeof(fd
)))) {
826 put_unused_fd(fd
[0]);
827 put_unused_fd(fd
[1]);
830 fd_install(fd
[0], files
[0]);
831 fd_install(fd
[1], files
[1]);
837 SYSCALL_DEFINE1(pipe
, int __user
*, fildes
)
839 return sys_pipe2(fildes
, 0);
842 static int wait_for_partner(struct pipe_inode_info
*pipe
, unsigned int *cnt
)
846 while (cur
== *cnt
) {
848 if (signal_pending(current
))
851 return cur
== *cnt
? -ERESTARTSYS
: 0;
854 static void wake_up_partner(struct pipe_inode_info
*pipe
)
856 wake_up_interruptible(&pipe
->wait
);
859 static int fifo_open(struct inode
*inode
, struct file
*filp
)
861 struct pipe_inode_info
*pipe
;
862 bool is_pipe
= inode
->i_sb
->s_magic
== PIPEFS_MAGIC
;
867 spin_lock(&inode
->i_lock
);
869 pipe
= inode
->i_pipe
;
871 spin_unlock(&inode
->i_lock
);
873 spin_unlock(&inode
->i_lock
);
874 pipe
= alloc_pipe_info();
878 spin_lock(&inode
->i_lock
);
879 if (unlikely(inode
->i_pipe
)) {
880 inode
->i_pipe
->files
++;
881 spin_unlock(&inode
->i_lock
);
882 free_pipe_info(pipe
);
883 pipe
= inode
->i_pipe
;
885 inode
->i_pipe
= pipe
;
886 spin_unlock(&inode
->i_lock
);
889 filp
->private_data
= pipe
;
890 /* OK, we have a pipe and it's pinned down */
894 /* We can only do regular read/write on fifos */
895 filp
->f_mode
&= (FMODE_READ
| FMODE_WRITE
);
897 switch (filp
->f_mode
) {
901 * POSIX.1 says that O_NONBLOCK means return with the FIFO
902 * opened, even when there is no process writing the FIFO.
905 if (pipe
->readers
++ == 0)
906 wake_up_partner(pipe
);
908 if (!is_pipe
&& !pipe
->writers
) {
909 if ((filp
->f_flags
& O_NONBLOCK
)) {
910 /* suppress POLLHUP until we have
912 filp
->f_version
= pipe
->w_counter
;
914 if (wait_for_partner(pipe
, &pipe
->w_counter
))
923 * POSIX.1 says that O_NONBLOCK means return -1 with
924 * errno=ENXIO when there is no process reading the FIFO.
927 if (!is_pipe
&& (filp
->f_flags
& O_NONBLOCK
) && !pipe
->readers
)
931 if (!pipe
->writers
++)
932 wake_up_partner(pipe
);
934 if (!is_pipe
&& !pipe
->readers
) {
935 if (wait_for_partner(pipe
, &pipe
->r_counter
))
940 case FMODE_READ
| FMODE_WRITE
:
943 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
944 * This implementation will NEVER block on a O_RDWR open, since
945 * the process can at least talk to itself.
952 if (pipe
->readers
== 1 || pipe
->writers
== 1)
953 wake_up_partner(pipe
);
966 if (!--pipe
->readers
)
967 wake_up_interruptible(&pipe
->wait
);
972 if (!--pipe
->writers
)
973 wake_up_interruptible(&pipe
->wait
);
980 put_pipe_info(inode
, pipe
);
984 const struct file_operations pipefifo_fops
= {
987 .read
= new_sync_read
,
988 .read_iter
= pipe_read
,
989 .write
= new_sync_write
,
990 .write_iter
= pipe_write
,
992 .unlocked_ioctl
= pipe_ioctl
,
993 .release
= pipe_release
,
994 .fasync
= pipe_fasync
,
998 * Allocate a new array of pipe buffers and copy the info over. Returns the
999 * pipe size if successful, or return -ERROR on error.
1001 static long pipe_set_size(struct pipe_inode_info
*pipe
, unsigned long nr_pages
)
1003 struct pipe_buffer
*bufs
;
1006 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1007 * expect a lot of shrink+grow operations, just free and allocate
1008 * again like we would do for growing. If the pipe currently
1009 * contains more buffers than arg, then return busy.
1011 if (nr_pages
< pipe
->nrbufs
)
1014 bufs
= kcalloc(nr_pages
, sizeof(*bufs
), GFP_KERNEL
| __GFP_NOWARN
);
1015 if (unlikely(!bufs
))
1019 * The pipe array wraps around, so just start the new one at zero
1020 * and adjust the indexes.
1026 tail
= pipe
->curbuf
+ pipe
->nrbufs
;
1027 if (tail
< pipe
->buffers
)
1030 tail
&= (pipe
->buffers
- 1);
1032 head
= pipe
->nrbufs
- tail
;
1034 memcpy(bufs
, pipe
->bufs
+ pipe
->curbuf
, head
* sizeof(struct pipe_buffer
));
1036 memcpy(bufs
+ head
, pipe
->bufs
, tail
* sizeof(struct pipe_buffer
));
1039 account_pipe_buffers(pipe
, pipe
->buffers
, nr_pages
);
1043 pipe
->buffers
= nr_pages
;
1044 return nr_pages
* PAGE_SIZE
;
1048 * Currently we rely on the pipe array holding a power-of-2 number
1051 static inline unsigned int round_pipe_size(unsigned int size
)
1053 unsigned long nr_pages
;
1055 nr_pages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1056 return roundup_pow_of_two(nr_pages
) << PAGE_SHIFT
;
1060 * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1061 * will return an error.
1063 int pipe_proc_fn(struct ctl_table
*table
, int write
, void __user
*buf
,
1064 size_t *lenp
, loff_t
*ppos
)
1068 ret
= proc_dointvec_minmax(table
, write
, buf
, lenp
, ppos
);
1069 if (ret
< 0 || !write
)
1072 pipe_max_size
= round_pipe_size(pipe_max_size
);
1077 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1078 * location, so checking ->i_pipe is not enough to verify that this is a
1081 struct pipe_inode_info
*get_pipe_info(struct file
*file
)
1083 return file
->f_op
== &pipefifo_fops
? file
->private_data
: NULL
;
1086 long pipe_fcntl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1088 struct pipe_inode_info
*pipe
;
1091 pipe
= get_pipe_info(file
);
1098 case F_SETPIPE_SZ
: {
1099 unsigned int size
, nr_pages
;
1101 size
= round_pipe_size(arg
);
1102 nr_pages
= size
>> PAGE_SHIFT
;
1108 if (!capable(CAP_SYS_RESOURCE
) && size
> pipe_max_size
) {
1111 } else if ((too_many_pipe_buffers_hard(pipe
->user
) ||
1112 too_many_pipe_buffers_soft(pipe
->user
)) &&
1113 !capable(CAP_SYS_RESOURCE
) && !capable(CAP_SYS_ADMIN
)) {
1117 ret
= pipe_set_size(pipe
, nr_pages
);
1121 ret
= pipe
->buffers
* PAGE_SIZE
;
1129 __pipe_unlock(pipe
);
1133 static const struct super_operations pipefs_ops
= {
1134 .destroy_inode
= free_inode_nonrcu
,
1135 .statfs
= simple_statfs
,
1139 * pipefs should _never_ be mounted by userland - too much of security hassle,
1140 * no real gain from having the whole whorehouse mounted. So we don't need
1141 * any operations on the root directory. However, we need a non-trivial
1142 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1144 static struct dentry
*pipefs_mount(struct file_system_type
*fs_type
,
1145 int flags
, const char *dev_name
, void *data
)
1147 return mount_pseudo(fs_type
, "pipe:", &pipefs_ops
,
1148 &pipefs_dentry_operations
, PIPEFS_MAGIC
);
1151 static struct file_system_type pipe_fs_type
= {
1153 .mount
= pipefs_mount
,
1154 .kill_sb
= kill_anon_super
,
1157 static int __init
init_pipe_fs(void)
1159 int err
= register_filesystem(&pipe_fs_type
);
1162 pipe_mnt
= kern_mount(&pipe_fs_type
);
1163 if (IS_ERR(pipe_mnt
)) {
1164 err
= PTR_ERR(pipe_mnt
);
1165 unregister_filesystem(&pipe_fs_type
);
1171 fs_initcall(init_pipe_fs
);