1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27 #include <linux/watch_queue.h>
28 #include <linux/sysctl.h>
30 #include <linux/uaccess.h>
31 #include <asm/ioctls.h>
36 * New pipe buffers will be restricted to this size while the user is exceeding
37 * their pipe buffer quota. The general pipe use case needs at least two
38 * buffers: one for data yet to be read, and one for new data. If this is less
39 * than two, then a write to a non-empty pipe may block even if the pipe is not
40 * full. This can occur with GNU make jobserver or similar uses of pipes as
41 * semaphores: multiple processes may be waiting to write tokens back to the
42 * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
44 * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
45 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
48 #define PIPE_MIN_DEF_BUFFERS 2
51 * The max size that a non-root user is allowed to grow the pipe. Can
52 * be set by root in /proc/sys/fs/pipe-max-size
54 static unsigned int pipe_max_size
= 1048576;
56 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
57 * matches default values.
59 static unsigned long pipe_user_pages_hard
;
60 static unsigned long pipe_user_pages_soft
= PIPE_DEF_BUFFERS
* INR_OPEN_CUR
;
63 * We use head and tail indices that aren't masked off, except at the point of
64 * dereference, but rather they're allowed to wrap naturally. This means there
65 * isn't a dead spot in the buffer, but the ring has to be a power of two and
67 * -- David Howells 2019-09-23.
69 * Reads with count = 0 should always return 0.
70 * -- Julian Bradfield 1999-06-07.
72 * FIFOs and Pipes now generate SIGIO for both readers and writers.
73 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
75 * pipe_read & write cleanup
76 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
79 static void pipe_lock_nested(struct pipe_inode_info
*pipe
, int subclass
)
82 mutex_lock_nested(&pipe
->mutex
, subclass
);
85 void pipe_lock(struct pipe_inode_info
*pipe
)
88 * pipe_lock() nests non-pipe inode locks (for writing to a file)
90 pipe_lock_nested(pipe
, I_MUTEX_PARENT
);
92 EXPORT_SYMBOL(pipe_lock
);
94 void pipe_unlock(struct pipe_inode_info
*pipe
)
97 mutex_unlock(&pipe
->mutex
);
99 EXPORT_SYMBOL(pipe_unlock
);
101 static inline void __pipe_lock(struct pipe_inode_info
*pipe
)
103 mutex_lock_nested(&pipe
->mutex
, I_MUTEX_PARENT
);
106 static inline void __pipe_unlock(struct pipe_inode_info
*pipe
)
108 mutex_unlock(&pipe
->mutex
);
111 void pipe_double_lock(struct pipe_inode_info
*pipe1
,
112 struct pipe_inode_info
*pipe2
)
114 BUG_ON(pipe1
== pipe2
);
117 pipe_lock_nested(pipe1
, I_MUTEX_PARENT
);
118 pipe_lock_nested(pipe2
, I_MUTEX_CHILD
);
120 pipe_lock_nested(pipe2
, I_MUTEX_PARENT
);
121 pipe_lock_nested(pipe1
, I_MUTEX_CHILD
);
125 static void anon_pipe_buf_release(struct pipe_inode_info
*pipe
,
126 struct pipe_buffer
*buf
)
128 struct page
*page
= buf
->page
;
131 * If nobody else uses this page, and we don't already have a
132 * temporary page, let's keep track of it as a one-deep
133 * allocation cache. (Otherwise just release our reference to it)
135 if (page_count(page
) == 1 && !pipe
->tmp_page
)
136 pipe
->tmp_page
= page
;
141 static bool anon_pipe_buf_try_steal(struct pipe_inode_info
*pipe
,
142 struct pipe_buffer
*buf
)
144 struct page
*page
= buf
->page
;
146 if (page_count(page
) != 1)
148 memcg_kmem_uncharge_page(page
, 0);
149 __SetPageLocked(page
);
154 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
155 * @pipe: the pipe that the buffer belongs to
156 * @buf: the buffer to attempt to steal
159 * This function attempts to steal the &struct page attached to
160 * @buf. If successful, this function returns 0 and returns with
161 * the page locked. The caller may then reuse the page for whatever
162 * he wishes; the typical use is insertion into a different file
165 bool generic_pipe_buf_try_steal(struct pipe_inode_info
*pipe
,
166 struct pipe_buffer
*buf
)
168 struct page
*page
= buf
->page
;
171 * A reference of one is golden, that means that the owner of this
172 * page is the only one holding a reference to it. lock the page
175 if (page_count(page
) == 1) {
181 EXPORT_SYMBOL(generic_pipe_buf_try_steal
);
184 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
185 * @pipe: the pipe that the buffer belongs to
186 * @buf: the buffer to get a reference to
189 * This function grabs an extra reference to @buf. It's used in
190 * the tee() system call, when we duplicate the buffers in one
193 bool generic_pipe_buf_get(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
)
195 return try_get_page(buf
->page
);
197 EXPORT_SYMBOL(generic_pipe_buf_get
);
200 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
201 * @pipe: the pipe that the buffer belongs to
202 * @buf: the buffer to put a reference to
205 * This function releases a reference to @buf.
207 void generic_pipe_buf_release(struct pipe_inode_info
*pipe
,
208 struct pipe_buffer
*buf
)
212 EXPORT_SYMBOL(generic_pipe_buf_release
);
214 static const struct pipe_buf_operations anon_pipe_buf_ops
= {
215 .release
= anon_pipe_buf_release
,
216 .try_steal
= anon_pipe_buf_try_steal
,
217 .get
= generic_pipe_buf_get
,
220 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
221 static inline bool pipe_readable(const struct pipe_inode_info
*pipe
)
223 unsigned int head
= READ_ONCE(pipe
->head
);
224 unsigned int tail
= READ_ONCE(pipe
->tail
);
225 unsigned int writers
= READ_ONCE(pipe
->writers
);
227 return !pipe_empty(head
, tail
) || !writers
;
230 static inline unsigned int pipe_update_tail(struct pipe_inode_info
*pipe
,
231 struct pipe_buffer
*buf
,
234 pipe_buf_release(pipe
, buf
);
237 * If the pipe has a watch_queue, we need additional protection
238 * by the spinlock because notifications get posted with only
239 * this spinlock, no mutex
241 if (pipe_has_watch_queue(pipe
)) {
242 spin_lock_irq(&pipe
->rd_wait
.lock
);
243 #ifdef CONFIG_WATCH_QUEUE
244 if (buf
->flags
& PIPE_BUF_FLAG_LOSS
)
245 pipe
->note_loss
= true;
248 spin_unlock_irq(&pipe
->rd_wait
.lock
);
253 * Without a watch_queue, we can simply increment the tail
254 * without the spinlock - the mutex is enough.
261 pipe_read(struct kiocb
*iocb
, struct iov_iter
*to
)
263 size_t total_len
= iov_iter_count(to
);
264 struct file
*filp
= iocb
->ki_filp
;
265 struct pipe_inode_info
*pipe
= filp
->private_data
;
266 bool was_full
, wake_next_reader
= false;
269 /* Null read succeeds. */
270 if (unlikely(total_len
== 0))
277 * We only wake up writers if the pipe was full when we started
278 * reading in order to avoid unnecessary wakeups.
280 * But when we do wake up writers, we do so using a sync wakeup
281 * (WF_SYNC), because we want them to get going and generate more
284 was_full
= pipe_full(pipe
->head
, pipe
->tail
, pipe
->max_usage
);
286 /* Read ->head with a barrier vs post_one_notification() */
287 unsigned int head
= smp_load_acquire(&pipe
->head
);
288 unsigned int tail
= pipe
->tail
;
289 unsigned int mask
= pipe
->ring_size
- 1;
291 #ifdef CONFIG_WATCH_QUEUE
292 if (pipe
->note_loss
) {
293 struct watch_notification n
;
301 n
.type
= WATCH_TYPE_META
;
302 n
.subtype
= WATCH_META_LOSS_NOTIFICATION
;
303 n
.info
= watch_sizeof(n
);
304 if (copy_to_iter(&n
, sizeof(n
), to
) != sizeof(n
)) {
310 total_len
-= sizeof(n
);
311 pipe
->note_loss
= false;
315 if (!pipe_empty(head
, tail
)) {
316 struct pipe_buffer
*buf
= &pipe
->bufs
[tail
& mask
];
317 size_t chars
= buf
->len
;
321 if (chars
> total_len
) {
322 if (buf
->flags
& PIPE_BUF_FLAG_WHOLE
) {
330 error
= pipe_buf_confirm(pipe
, buf
);
337 written
= copy_page_to_iter(buf
->page
, buf
->offset
, chars
, to
);
338 if (unlikely(written
< chars
)) {
344 buf
->offset
+= chars
;
347 /* Was it a packet buffer? Clean up and exit */
348 if (buf
->flags
& PIPE_BUF_FLAG_PACKET
) {
354 tail
= pipe_update_tail(pipe
, buf
, tail
);
357 break; /* common path: read succeeded */
358 if (!pipe_empty(head
, tail
)) /* More to do? */
366 if ((filp
->f_flags
& O_NONBLOCK
) ||
367 (iocb
->ki_flags
& IOCB_NOWAIT
)) {
374 * We only get here if we didn't actually read anything.
376 * However, we could have seen (and removed) a zero-sized
377 * pipe buffer, and might have made space in the buffers
380 * You can't make zero-sized pipe buffers by doing an empty
381 * write (not even in packet mode), but they can happen if
382 * the writer gets an EFAULT when trying to fill a buffer
383 * that already got allocated and inserted in the buffer
386 * So we still need to wake up any pending writers in the
387 * _very_ unlikely case that the pipe was full, but we got
390 if (unlikely(was_full
))
391 wake_up_interruptible_sync_poll(&pipe
->wr_wait
, EPOLLOUT
| EPOLLWRNORM
);
392 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
395 * But because we didn't read anything, at this point we can
396 * just return directly with -ERESTARTSYS if we're interrupted,
397 * since we've done any required wakeups and there's no need
398 * to mark anything accessed. And we've dropped the lock.
400 if (wait_event_interruptible_exclusive(pipe
->rd_wait
, pipe_readable(pipe
)) < 0)
404 was_full
= pipe_full(pipe
->head
, pipe
->tail
, pipe
->max_usage
);
405 wake_next_reader
= true;
407 if (pipe_empty(pipe
->head
, pipe
->tail
))
408 wake_next_reader
= false;
412 wake_up_interruptible_sync_poll(&pipe
->wr_wait
, EPOLLOUT
| EPOLLWRNORM
);
413 if (wake_next_reader
)
414 wake_up_interruptible_sync_poll(&pipe
->rd_wait
, EPOLLIN
| EPOLLRDNORM
);
415 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
421 static inline int is_packetized(struct file
*file
)
423 return (file
->f_flags
& O_DIRECT
) != 0;
426 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
427 static inline bool pipe_writable(const struct pipe_inode_info
*pipe
)
429 unsigned int head
= READ_ONCE(pipe
->head
);
430 unsigned int tail
= READ_ONCE(pipe
->tail
);
431 unsigned int max_usage
= READ_ONCE(pipe
->max_usage
);
433 return !pipe_full(head
, tail
, max_usage
) ||
434 !READ_ONCE(pipe
->readers
);
438 pipe_write(struct kiocb
*iocb
, struct iov_iter
*from
)
440 struct file
*filp
= iocb
->ki_filp
;
441 struct pipe_inode_info
*pipe
= filp
->private_data
;
444 size_t total_len
= iov_iter_count(from
);
446 bool was_empty
= false;
447 bool wake_next_writer
= false;
449 /* Null write succeeds. */
450 if (unlikely(total_len
== 0))
455 if (!pipe
->readers
) {
456 send_sig(SIGPIPE
, current
, 0);
461 if (pipe_has_watch_queue(pipe
)) {
467 * If it wasn't empty we try to merge new data into
470 * That naturally merges small writes, but it also
471 * page-aligns the rest of the writes for large writes
472 * spanning multiple pages.
475 was_empty
= pipe_empty(head
, pipe
->tail
);
476 chars
= total_len
& (PAGE_SIZE
-1);
477 if (chars
&& !was_empty
) {
478 unsigned int mask
= pipe
->ring_size
- 1;
479 struct pipe_buffer
*buf
= &pipe
->bufs
[(head
- 1) & mask
];
480 int offset
= buf
->offset
+ buf
->len
;
482 if ((buf
->flags
& PIPE_BUF_FLAG_CAN_MERGE
) &&
483 offset
+ chars
<= PAGE_SIZE
) {
484 ret
= pipe_buf_confirm(pipe
, buf
);
488 ret
= copy_page_from_iter(buf
->page
, offset
, chars
, from
);
489 if (unlikely(ret
< chars
)) {
495 if (!iov_iter_count(from
))
501 if (!pipe
->readers
) {
502 send_sig(SIGPIPE
, current
, 0);
509 if (!pipe_full(head
, pipe
->tail
, pipe
->max_usage
)) {
510 unsigned int mask
= pipe
->ring_size
- 1;
511 struct pipe_buffer
*buf
;
512 struct page
*page
= pipe
->tmp_page
;
516 page
= alloc_page(GFP_HIGHUSER
| __GFP_ACCOUNT
);
517 if (unlikely(!page
)) {
518 ret
= ret
? : -ENOMEM
;
521 pipe
->tmp_page
= page
;
524 /* Allocate a slot in the ring in advance and attach an
525 * empty buffer. If we fault or otherwise fail to use
526 * it, either the reader will consume it or it'll still
527 * be there for the next write.
529 pipe
->head
= head
+ 1;
531 /* Insert it into the buffer array */
532 buf
= &pipe
->bufs
[head
& mask
];
534 buf
->ops
= &anon_pipe_buf_ops
;
537 if (is_packetized(filp
))
538 buf
->flags
= PIPE_BUF_FLAG_PACKET
;
540 buf
->flags
= PIPE_BUF_FLAG_CAN_MERGE
;
541 pipe
->tmp_page
= NULL
;
543 copied
= copy_page_from_iter(page
, 0, PAGE_SIZE
, from
);
544 if (unlikely(copied
< PAGE_SIZE
&& iov_iter_count(from
))) {
552 if (!iov_iter_count(from
))
556 if (!pipe_full(head
, pipe
->tail
, pipe
->max_usage
))
559 /* Wait for buffer space to become available. */
560 if ((filp
->f_flags
& O_NONBLOCK
) ||
561 (iocb
->ki_flags
& IOCB_NOWAIT
)) {
566 if (signal_pending(current
)) {
573 * We're going to release the pipe lock and wait for more
574 * space. We wake up any readers if necessary, and then
575 * after waiting we need to re-check whether the pipe
576 * become empty while we dropped the lock.
580 wake_up_interruptible_sync_poll(&pipe
->rd_wait
, EPOLLIN
| EPOLLRDNORM
);
581 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
582 wait_event_interruptible_exclusive(pipe
->wr_wait
, pipe_writable(pipe
));
584 was_empty
= pipe_empty(pipe
->head
, pipe
->tail
);
585 wake_next_writer
= true;
588 if (pipe_full(pipe
->head
, pipe
->tail
, pipe
->max_usage
))
589 wake_next_writer
= false;
593 * If we do do a wakeup event, we do a 'sync' wakeup, because we
594 * want the reader to start processing things asap, rather than
595 * leave the data pending.
597 * This is particularly important for small writes, because of
598 * how (for example) the GNU make jobserver uses small writes to
599 * wake up pending jobs
601 * Epoll nonsensically wants a wakeup whether the pipe
602 * was already empty or not.
604 if (was_empty
|| pipe
->poll_usage
)
605 wake_up_interruptible_sync_poll(&pipe
->rd_wait
, EPOLLIN
| EPOLLRDNORM
);
606 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
607 if (wake_next_writer
)
608 wake_up_interruptible_sync_poll(&pipe
->wr_wait
, EPOLLOUT
| EPOLLWRNORM
);
609 if (ret
> 0 && sb_start_write_trylock(file_inode(filp
)->i_sb
)) {
610 int err
= file_update_time(filp
);
613 sb_end_write(file_inode(filp
)->i_sb
);
618 static long pipe_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
620 struct pipe_inode_info
*pipe
= filp
->private_data
;
621 unsigned int count
, head
, tail
, mask
;
629 mask
= pipe
->ring_size
- 1;
631 while (tail
!= head
) {
632 count
+= pipe
->bufs
[tail
& mask
].len
;
637 return put_user(count
, (int __user
*)arg
);
639 #ifdef CONFIG_WATCH_QUEUE
640 case IOC_WATCH_QUEUE_SET_SIZE
: {
643 ret
= watch_queue_set_size(pipe
, arg
);
648 case IOC_WATCH_QUEUE_SET_FILTER
:
649 return watch_queue_set_filter(
650 pipe
, (struct watch_notification_filter __user
*)arg
);
658 /* No kernel lock held - fine */
660 pipe_poll(struct file
*filp
, poll_table
*wait
)
663 struct pipe_inode_info
*pipe
= filp
->private_data
;
664 unsigned int head
, tail
;
666 /* Epoll has some historical nasty semantics, this enables them */
667 WRITE_ONCE(pipe
->poll_usage
, true);
670 * Reading pipe state only -- no need for acquiring the semaphore.
672 * But because this is racy, the code has to add the
673 * entry to the poll table _first_ ..
675 if (filp
->f_mode
& FMODE_READ
)
676 poll_wait(filp
, &pipe
->rd_wait
, wait
);
677 if (filp
->f_mode
& FMODE_WRITE
)
678 poll_wait(filp
, &pipe
->wr_wait
, wait
);
681 * .. and only then can you do the racy tests. That way,
682 * if something changes and you got it wrong, the poll
683 * table entry will wake you up and fix it.
685 head
= READ_ONCE(pipe
->head
);
686 tail
= READ_ONCE(pipe
->tail
);
689 if (filp
->f_mode
& FMODE_READ
) {
690 if (!pipe_empty(head
, tail
))
691 mask
|= EPOLLIN
| EPOLLRDNORM
;
692 if (!pipe
->writers
&& filp
->f_version
!= pipe
->w_counter
)
696 if (filp
->f_mode
& FMODE_WRITE
) {
697 if (!pipe_full(head
, tail
, pipe
->max_usage
))
698 mask
|= EPOLLOUT
| EPOLLWRNORM
;
700 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
701 * behave exactly like pipes for poll().
710 static void put_pipe_info(struct inode
*inode
, struct pipe_inode_info
*pipe
)
714 spin_lock(&inode
->i_lock
);
715 if (!--pipe
->files
) {
716 inode
->i_pipe
= NULL
;
719 spin_unlock(&inode
->i_lock
);
722 free_pipe_info(pipe
);
726 pipe_release(struct inode
*inode
, struct file
*file
)
728 struct pipe_inode_info
*pipe
= file
->private_data
;
731 if (file
->f_mode
& FMODE_READ
)
733 if (file
->f_mode
& FMODE_WRITE
)
736 /* Was that the last reader or writer, but not the other side? */
737 if (!pipe
->readers
!= !pipe
->writers
) {
738 wake_up_interruptible_all(&pipe
->rd_wait
);
739 wake_up_interruptible_all(&pipe
->wr_wait
);
740 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
741 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
745 put_pipe_info(inode
, pipe
);
750 pipe_fasync(int fd
, struct file
*filp
, int on
)
752 struct pipe_inode_info
*pipe
= filp
->private_data
;
756 if (filp
->f_mode
& FMODE_READ
)
757 retval
= fasync_helper(fd
, filp
, on
, &pipe
->fasync_readers
);
758 if ((filp
->f_mode
& FMODE_WRITE
) && retval
>= 0) {
759 retval
= fasync_helper(fd
, filp
, on
, &pipe
->fasync_writers
);
760 if (retval
< 0 && (filp
->f_mode
& FMODE_READ
))
761 /* this can happen only if on == T */
762 fasync_helper(-1, filp
, 0, &pipe
->fasync_readers
);
768 unsigned long account_pipe_buffers(struct user_struct
*user
,
769 unsigned long old
, unsigned long new)
771 return atomic_long_add_return(new - old
, &user
->pipe_bufs
);
774 bool too_many_pipe_buffers_soft(unsigned long user_bufs
)
776 unsigned long soft_limit
= READ_ONCE(pipe_user_pages_soft
);
778 return soft_limit
&& user_bufs
> soft_limit
;
781 bool too_many_pipe_buffers_hard(unsigned long user_bufs
)
783 unsigned long hard_limit
= READ_ONCE(pipe_user_pages_hard
);
785 return hard_limit
&& user_bufs
> hard_limit
;
788 bool pipe_is_unprivileged_user(void)
790 return !capable(CAP_SYS_RESOURCE
) && !capable(CAP_SYS_ADMIN
);
793 struct pipe_inode_info
*alloc_pipe_info(void)
795 struct pipe_inode_info
*pipe
;
796 unsigned long pipe_bufs
= PIPE_DEF_BUFFERS
;
797 struct user_struct
*user
= get_current_user();
798 unsigned long user_bufs
;
799 unsigned int max_size
= READ_ONCE(pipe_max_size
);
801 pipe
= kzalloc(sizeof(struct pipe_inode_info
), GFP_KERNEL_ACCOUNT
);
805 if (pipe_bufs
* PAGE_SIZE
> max_size
&& !capable(CAP_SYS_RESOURCE
))
806 pipe_bufs
= max_size
>> PAGE_SHIFT
;
808 user_bufs
= account_pipe_buffers(user
, 0, pipe_bufs
);
810 if (too_many_pipe_buffers_soft(user_bufs
) && pipe_is_unprivileged_user()) {
811 user_bufs
= account_pipe_buffers(user
, pipe_bufs
, PIPE_MIN_DEF_BUFFERS
);
812 pipe_bufs
= PIPE_MIN_DEF_BUFFERS
;
815 if (too_many_pipe_buffers_hard(user_bufs
) && pipe_is_unprivileged_user())
816 goto out_revert_acct
;
818 pipe
->bufs
= kcalloc(pipe_bufs
, sizeof(struct pipe_buffer
),
822 init_waitqueue_head(&pipe
->rd_wait
);
823 init_waitqueue_head(&pipe
->wr_wait
);
824 pipe
->r_counter
= pipe
->w_counter
= 1;
825 pipe
->max_usage
= pipe_bufs
;
826 pipe
->ring_size
= pipe_bufs
;
827 pipe
->nr_accounted
= pipe_bufs
;
829 mutex_init(&pipe
->mutex
);
834 (void) account_pipe_buffers(user
, pipe_bufs
, 0);
841 void free_pipe_info(struct pipe_inode_info
*pipe
)
845 #ifdef CONFIG_WATCH_QUEUE
846 if (pipe
->watch_queue
)
847 watch_queue_clear(pipe
->watch_queue
);
850 (void) account_pipe_buffers(pipe
->user
, pipe
->nr_accounted
, 0);
851 free_uid(pipe
->user
);
852 for (i
= 0; i
< pipe
->ring_size
; i
++) {
853 struct pipe_buffer
*buf
= pipe
->bufs
+ i
;
855 pipe_buf_release(pipe
, buf
);
857 #ifdef CONFIG_WATCH_QUEUE
858 if (pipe
->watch_queue
)
859 put_watch_queue(pipe
->watch_queue
);
862 __free_page(pipe
->tmp_page
);
867 static struct vfsmount
*pipe_mnt __ro_after_init
;
870 * pipefs_dname() is called from d_path().
872 static char *pipefs_dname(struct dentry
*dentry
, char *buffer
, int buflen
)
874 return dynamic_dname(buffer
, buflen
, "pipe:[%lu]",
875 d_inode(dentry
)->i_ino
);
878 static const struct dentry_operations pipefs_dentry_operations
= {
879 .d_dname
= pipefs_dname
,
882 static struct inode
* get_pipe_inode(void)
884 struct inode
*inode
= new_inode_pseudo(pipe_mnt
->mnt_sb
);
885 struct pipe_inode_info
*pipe
;
890 inode
->i_ino
= get_next_ino();
892 pipe
= alloc_pipe_info();
896 inode
->i_pipe
= pipe
;
898 pipe
->readers
= pipe
->writers
= 1;
899 inode
->i_fop
= &pipefifo_fops
;
902 * Mark the inode dirty from the very beginning,
903 * that way it will never be moved to the dirty
904 * list because "mark_inode_dirty()" will think
905 * that it already _is_ on the dirty list.
907 inode
->i_state
= I_DIRTY
;
908 inode
->i_mode
= S_IFIFO
| S_IRUSR
| S_IWUSR
;
909 inode
->i_uid
= current_fsuid();
910 inode
->i_gid
= current_fsgid();
911 simple_inode_init_ts(inode
);
922 int create_pipe_files(struct file
**res
, int flags
)
924 struct inode
*inode
= get_pipe_inode();
931 if (flags
& O_NOTIFICATION_PIPE
) {
932 error
= watch_queue_init(inode
->i_pipe
);
934 free_pipe_info(inode
->i_pipe
);
940 f
= alloc_file_pseudo(inode
, pipe_mnt
, "",
941 O_WRONLY
| (flags
& (O_NONBLOCK
| O_DIRECT
)),
944 free_pipe_info(inode
->i_pipe
);
949 f
->private_data
= inode
->i_pipe
;
951 res
[0] = alloc_file_clone(f
, O_RDONLY
| (flags
& O_NONBLOCK
),
953 if (IS_ERR(res
[0])) {
954 put_pipe_info(inode
, inode
->i_pipe
);
956 return PTR_ERR(res
[0]);
958 res
[0]->private_data
= inode
->i_pipe
;
960 stream_open(inode
, res
[0]);
961 stream_open(inode
, res
[1]);
965 static int __do_pipe_flags(int *fd
, struct file
**files
, int flags
)
970 if (flags
& ~(O_CLOEXEC
| O_NONBLOCK
| O_DIRECT
| O_NOTIFICATION_PIPE
))
973 error
= create_pipe_files(files
, flags
);
977 error
= get_unused_fd_flags(flags
);
982 error
= get_unused_fd_flags(flags
);
987 audit_fd_pair(fdr
, fdw
);
990 /* pipe groks IOCB_NOWAIT */
991 files
[0]->f_mode
|= FMODE_NOWAIT
;
992 files
[1]->f_mode
|= FMODE_NOWAIT
;
1003 int do_pipe_flags(int *fd
, int flags
)
1005 struct file
*files
[2];
1006 int error
= __do_pipe_flags(fd
, files
, flags
);
1008 fd_install(fd
[0], files
[0]);
1009 fd_install(fd
[1], files
[1]);
1015 * sys_pipe() is the normal C calling standard for creating
1016 * a pipe. It's not the way Unix traditionally does this, though.
1018 static int do_pipe2(int __user
*fildes
, int flags
)
1020 struct file
*files
[2];
1024 error
= __do_pipe_flags(fd
, files
, flags
);
1026 if (unlikely(copy_to_user(fildes
, fd
, sizeof(fd
)))) {
1029 put_unused_fd(fd
[0]);
1030 put_unused_fd(fd
[1]);
1033 fd_install(fd
[0], files
[0]);
1034 fd_install(fd
[1], files
[1]);
1040 SYSCALL_DEFINE2(pipe2
, int __user
*, fildes
, int, flags
)
1042 return do_pipe2(fildes
, flags
);
1045 SYSCALL_DEFINE1(pipe
, int __user
*, fildes
)
1047 return do_pipe2(fildes
, 0);
1051 * This is the stupid "wait for pipe to be readable or writable"
1054 * See pipe_read/write() for the proper kind of exclusive wait,
1055 * but that requires that we wake up any other readers/writers
1056 * if we then do not end up reading everything (ie the whole
1057 * "wake_next_reader/writer" logic in pipe_read/write()).
1059 void pipe_wait_readable(struct pipe_inode_info
*pipe
)
1062 wait_event_interruptible(pipe
->rd_wait
, pipe_readable(pipe
));
1066 void pipe_wait_writable(struct pipe_inode_info
*pipe
)
1069 wait_event_interruptible(pipe
->wr_wait
, pipe_writable(pipe
));
1074 * This depends on both the wait (here) and the wakeup (wake_up_partner)
1075 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1076 * race with the count check and waitqueue prep.
1078 * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1079 * then check the condition you're waiting for, and only then sleep. But
1080 * because of the pipe lock, we can check the condition before being on
1083 * We use the 'rd_wait' waitqueue for pipe partner waiting.
1085 static int wait_for_partner(struct pipe_inode_info
*pipe
, unsigned int *cnt
)
1087 DEFINE_WAIT(rdwait
);
1090 while (cur
== *cnt
) {
1091 prepare_to_wait(&pipe
->rd_wait
, &rdwait
, TASK_INTERRUPTIBLE
);
1094 finish_wait(&pipe
->rd_wait
, &rdwait
);
1096 if (signal_pending(current
))
1099 return cur
== *cnt
? -ERESTARTSYS
: 0;
1102 static void wake_up_partner(struct pipe_inode_info
*pipe
)
1104 wake_up_interruptible_all(&pipe
->rd_wait
);
1107 static int fifo_open(struct inode
*inode
, struct file
*filp
)
1109 struct pipe_inode_info
*pipe
;
1110 bool is_pipe
= inode
->i_sb
->s_magic
== PIPEFS_MAGIC
;
1113 filp
->f_version
= 0;
1115 spin_lock(&inode
->i_lock
);
1116 if (inode
->i_pipe
) {
1117 pipe
= inode
->i_pipe
;
1119 spin_unlock(&inode
->i_lock
);
1121 spin_unlock(&inode
->i_lock
);
1122 pipe
= alloc_pipe_info();
1126 spin_lock(&inode
->i_lock
);
1127 if (unlikely(inode
->i_pipe
)) {
1128 inode
->i_pipe
->files
++;
1129 spin_unlock(&inode
->i_lock
);
1130 free_pipe_info(pipe
);
1131 pipe
= inode
->i_pipe
;
1133 inode
->i_pipe
= pipe
;
1134 spin_unlock(&inode
->i_lock
);
1137 filp
->private_data
= pipe
;
1138 /* OK, we have a pipe and it's pinned down */
1142 /* We can only do regular read/write on fifos */
1143 stream_open(inode
, filp
);
1145 switch (filp
->f_mode
& (FMODE_READ
| FMODE_WRITE
)) {
1149 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1150 * opened, even when there is no process writing the FIFO.
1153 if (pipe
->readers
++ == 0)
1154 wake_up_partner(pipe
);
1156 if (!is_pipe
&& !pipe
->writers
) {
1157 if ((filp
->f_flags
& O_NONBLOCK
)) {
1158 /* suppress EPOLLHUP until we have
1160 filp
->f_version
= pipe
->w_counter
;
1162 if (wait_for_partner(pipe
, &pipe
->w_counter
))
1171 * POSIX.1 says that O_NONBLOCK means return -1 with
1172 * errno=ENXIO when there is no process reading the FIFO.
1175 if (!is_pipe
&& (filp
->f_flags
& O_NONBLOCK
) && !pipe
->readers
)
1179 if (!pipe
->writers
++)
1180 wake_up_partner(pipe
);
1182 if (!is_pipe
&& !pipe
->readers
) {
1183 if (wait_for_partner(pipe
, &pipe
->r_counter
))
1188 case FMODE_READ
| FMODE_WRITE
:
1191 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1192 * This implementation will NEVER block on a O_RDWR open, since
1193 * the process can at least talk to itself.
1200 if (pipe
->readers
== 1 || pipe
->writers
== 1)
1201 wake_up_partner(pipe
);
1210 __pipe_unlock(pipe
);
1214 if (!--pipe
->readers
)
1215 wake_up_interruptible(&pipe
->wr_wait
);
1220 if (!--pipe
->writers
)
1221 wake_up_interruptible_all(&pipe
->rd_wait
);
1226 __pipe_unlock(pipe
);
1228 put_pipe_info(inode
, pipe
);
1232 const struct file_operations pipefifo_fops
= {
1234 .llseek
= no_llseek
,
1235 .read_iter
= pipe_read
,
1236 .write_iter
= pipe_write
,
1238 .unlocked_ioctl
= pipe_ioctl
,
1239 .release
= pipe_release
,
1240 .fasync
= pipe_fasync
,
1241 .splice_write
= iter_file_splice_write
,
1245 * Currently we rely on the pipe array holding a power-of-2 number
1246 * of pages. Returns 0 on error.
1248 unsigned int round_pipe_size(unsigned int size
)
1250 if (size
> (1U << 31))
1253 /* Minimum pipe size, as required by POSIX */
1254 if (size
< PAGE_SIZE
)
1257 return roundup_pow_of_two(size
);
1261 * Resize the pipe ring to a number of slots.
1263 * Note the pipe can be reduced in capacity, but only if the current
1264 * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1267 int pipe_resize_ring(struct pipe_inode_info
*pipe
, unsigned int nr_slots
)
1269 struct pipe_buffer
*bufs
;
1270 unsigned int head
, tail
, mask
, n
;
1272 bufs
= kcalloc(nr_slots
, sizeof(*bufs
),
1273 GFP_KERNEL_ACCOUNT
| __GFP_NOWARN
);
1274 if (unlikely(!bufs
))
1277 spin_lock_irq(&pipe
->rd_wait
.lock
);
1278 mask
= pipe
->ring_size
- 1;
1282 n
= pipe_occupancy(head
, tail
);
1284 spin_unlock_irq(&pipe
->rd_wait
.lock
);
1290 * The pipe array wraps around, so just start the new one at zero
1291 * and adjust the indices.
1294 unsigned int h
= head
& mask
;
1295 unsigned int t
= tail
& mask
;
1297 memcpy(bufs
, pipe
->bufs
+ t
,
1298 n
* sizeof(struct pipe_buffer
));
1300 unsigned int tsize
= pipe
->ring_size
- t
;
1302 memcpy(bufs
+ tsize
, pipe
->bufs
,
1303 h
* sizeof(struct pipe_buffer
));
1304 memcpy(bufs
, pipe
->bufs
+ t
,
1305 tsize
* sizeof(struct pipe_buffer
));
1314 pipe
->ring_size
= nr_slots
;
1315 if (pipe
->max_usage
> nr_slots
)
1316 pipe
->max_usage
= nr_slots
;
1320 spin_unlock_irq(&pipe
->rd_wait
.lock
);
1322 /* This might have made more room for writers */
1323 wake_up_interruptible(&pipe
->wr_wait
);
1328 * Allocate a new array of pipe buffers and copy the info over. Returns the
1329 * pipe size if successful, or return -ERROR on error.
1331 static long pipe_set_size(struct pipe_inode_info
*pipe
, unsigned int arg
)
1333 unsigned long user_bufs
;
1334 unsigned int nr_slots
, size
;
1337 if (pipe_has_watch_queue(pipe
))
1340 size
= round_pipe_size(arg
);
1341 nr_slots
= size
>> PAGE_SHIFT
;
1347 * If trying to increase the pipe capacity, check that an
1348 * unprivileged user is not trying to exceed various limits
1349 * (soft limit check here, hard limit check just below).
1350 * Decreasing the pipe capacity is always permitted, even
1351 * if the user is currently over a limit.
1353 if (nr_slots
> pipe
->max_usage
&&
1354 size
> pipe_max_size
&& !capable(CAP_SYS_RESOURCE
))
1357 user_bufs
= account_pipe_buffers(pipe
->user
, pipe
->nr_accounted
, nr_slots
);
1359 if (nr_slots
> pipe
->max_usage
&&
1360 (too_many_pipe_buffers_hard(user_bufs
) ||
1361 too_many_pipe_buffers_soft(user_bufs
)) &&
1362 pipe_is_unprivileged_user()) {
1364 goto out_revert_acct
;
1367 ret
= pipe_resize_ring(pipe
, nr_slots
);
1369 goto out_revert_acct
;
1371 pipe
->max_usage
= nr_slots
;
1372 pipe
->nr_accounted
= nr_slots
;
1373 return pipe
->max_usage
* PAGE_SIZE
;
1376 (void) account_pipe_buffers(pipe
->user
, nr_slots
, pipe
->nr_accounted
);
1381 * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is
1382 * not enough to verify that this is a pipe.
1384 struct pipe_inode_info
*get_pipe_info(struct file
*file
, bool for_splice
)
1386 struct pipe_inode_info
*pipe
= file
->private_data
;
1388 if (file
->f_op
!= &pipefifo_fops
|| !pipe
)
1390 if (for_splice
&& pipe_has_watch_queue(pipe
))
1395 long pipe_fcntl(struct file
*file
, unsigned int cmd
, unsigned int arg
)
1397 struct pipe_inode_info
*pipe
;
1400 pipe
= get_pipe_info(file
, false);
1408 ret
= pipe_set_size(pipe
, arg
);
1411 ret
= pipe
->max_usage
* PAGE_SIZE
;
1418 __pipe_unlock(pipe
);
1422 static const struct super_operations pipefs_ops
= {
1423 .destroy_inode
= free_inode_nonrcu
,
1424 .statfs
= simple_statfs
,
1428 * pipefs should _never_ be mounted by userland - too much of security hassle,
1429 * no real gain from having the whole whorehouse mounted. So we don't need
1430 * any operations on the root directory. However, we need a non-trivial
1431 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1434 static int pipefs_init_fs_context(struct fs_context
*fc
)
1436 struct pseudo_fs_context
*ctx
= init_pseudo(fc
, PIPEFS_MAGIC
);
1439 ctx
->ops
= &pipefs_ops
;
1440 ctx
->dops
= &pipefs_dentry_operations
;
1444 static struct file_system_type pipe_fs_type
= {
1446 .init_fs_context
= pipefs_init_fs_context
,
1447 .kill_sb
= kill_anon_super
,
1450 #ifdef CONFIG_SYSCTL
1451 static int do_proc_dopipe_max_size_conv(unsigned long *lvalp
,
1453 int write
, void *data
)
1458 val
= round_pipe_size(*lvalp
);
1464 unsigned int val
= *valp
;
1465 *lvalp
= (unsigned long) val
;
1471 static int proc_dopipe_max_size(struct ctl_table
*table
, int write
,
1472 void *buffer
, size_t *lenp
, loff_t
*ppos
)
1474 return do_proc_douintvec(table
, write
, buffer
, lenp
, ppos
,
1475 do_proc_dopipe_max_size_conv
, NULL
);
1478 static struct ctl_table fs_pipe_sysctls
[] = {
1480 .procname
= "pipe-max-size",
1481 .data
= &pipe_max_size
,
1482 .maxlen
= sizeof(pipe_max_size
),
1484 .proc_handler
= proc_dopipe_max_size
,
1487 .procname
= "pipe-user-pages-hard",
1488 .data
= &pipe_user_pages_hard
,
1489 .maxlen
= sizeof(pipe_user_pages_hard
),
1491 .proc_handler
= proc_doulongvec_minmax
,
1494 .procname
= "pipe-user-pages-soft",
1495 .data
= &pipe_user_pages_soft
,
1496 .maxlen
= sizeof(pipe_user_pages_soft
),
1498 .proc_handler
= proc_doulongvec_minmax
,
1504 static int __init
init_pipe_fs(void)
1506 int err
= register_filesystem(&pipe_fs_type
);
1509 pipe_mnt
= kern_mount(&pipe_fs_type
);
1510 if (IS_ERR(pipe_mnt
)) {
1511 err
= PTR_ERR(pipe_mnt
);
1512 unregister_filesystem(&pipe_fs_type
);
1515 #ifdef CONFIG_SYSCTL
1516 register_sysctl_init("fs", fs_pipe_sysctls
);
1521 fs_initcall(init_pipe_fs
);