]> git.ipfire.org Git - thirdparty/linux.git/blame - fs/pipe.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[thirdparty/linux.git] / fs / pipe.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/fs/pipe.c
4 *
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
6 */
7
8#include <linux/mm.h>
9#include <linux/file.h>
10#include <linux/poll.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/fs.h>
35f3d14d 15#include <linux/log2.h>
1da177e4 16#include <linux/mount.h>
4fa7ec5d 17#include <linux/pseudo_fs.h>
b502bd11 18#include <linux/magic.h>
1da177e4
LT
19#include <linux/pipe_fs_i.h>
20#include <linux/uio.h>
21#include <linux/highmem.h>
5274f052 22#include <linux/pagemap.h>
db349509 23#include <linux/audit.h>
ba719bae 24#include <linux/syscalls.h>
b492e95b 25#include <linux/fcntl.h>
d86133bd 26#include <linux/memcontrol.h>
1da177e4 27
7c0f6ba6 28#include <linux/uaccess.h>
1da177e4
LT
29#include <asm/ioctls.h>
30
599a0ac1
AV
31#include "internal.h"
32
b492e95b
JA
33/*
34 * The max size that a non-root user is allowed to grow the pipe. Can
ff9da691 35 * be set by root in /proc/sys/fs/pipe-max-size
b492e95b 36 */
ff9da691
JA
37unsigned int pipe_max_size = 1048576;
38
759c0114
WT
39/* Maximum allocatable pages per user. Hard limit is unset by default, soft
40 * matches default values.
41 */
42unsigned long pipe_user_pages_hard;
43unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
44
1da177e4
LT
45/*
46 * We use a start+len construction, which provides full use of the
47 * allocated memory.
48 * -- Florian Coosmann (FGC)
49 *
50 * Reads with count = 0 should always return 0.
51 * -- Julian Bradfield 1999-06-07.
52 *
53 * FIFOs and Pipes now generate SIGIO for both readers and writers.
54 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
55 *
56 * pipe_read & write cleanup
57 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
58 */
59
61e0d47c
MS
60static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
61{
6447a3cf 62 if (pipe->files)
72b0d9aa 63 mutex_lock_nested(&pipe->mutex, subclass);
61e0d47c
MS
64}
65
66void pipe_lock(struct pipe_inode_info *pipe)
67{
68 /*
69 * pipe_lock() nests non-pipe inode locks (for writing to a file)
70 */
71 pipe_lock_nested(pipe, I_MUTEX_PARENT);
72}
73EXPORT_SYMBOL(pipe_lock);
74
75void pipe_unlock(struct pipe_inode_info *pipe)
76{
6447a3cf 77 if (pipe->files)
72b0d9aa 78 mutex_unlock(&pipe->mutex);
61e0d47c
MS
79}
80EXPORT_SYMBOL(pipe_unlock);
81
ebec73f4
AV
82static inline void __pipe_lock(struct pipe_inode_info *pipe)
83{
84 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
85}
86
87static inline void __pipe_unlock(struct pipe_inode_info *pipe)
88{
89 mutex_unlock(&pipe->mutex);
90}
91
61e0d47c
MS
92void pipe_double_lock(struct pipe_inode_info *pipe1,
93 struct pipe_inode_info *pipe2)
94{
95 BUG_ON(pipe1 == pipe2);
96
97 if (pipe1 < pipe2) {
98 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
99 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
100 } else {
023d43c7
PZ
101 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
102 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
61e0d47c
MS
103 }
104}
105
1da177e4 106/* Drop the inode semaphore and wait for a pipe event, atomically */
3a326a2c 107void pipe_wait(struct pipe_inode_info *pipe)
1da177e4
LT
108{
109 DEFINE_WAIT(wait);
110
d79fc0fc
IM
111 /*
112 * Pipes are system-local resources, so sleeping on them
113 * is considered a noninteractive wait:
114 */
af927232 115 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
61e0d47c 116 pipe_unlock(pipe);
1da177e4 117 schedule();
3a326a2c 118 finish_wait(&pipe->wait, &wait);
61e0d47c 119 pipe_lock(pipe);
1da177e4
LT
120}
121
341b446b
IM
122static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
123 struct pipe_buffer *buf)
1da177e4
LT
124{
125 struct page *page = buf->page;
126
5274f052
JA
127 /*
128 * If nobody else uses this page, and we don't already have a
129 * temporary page, let's keep track of it as a one-deep
341b446b 130 * allocation cache. (Otherwise just release our reference to it)
5274f052 131 */
341b446b 132 if (page_count(page) == 1 && !pipe->tmp_page)
923f4f23 133 pipe->tmp_page = page;
341b446b 134 else
09cbfeaf 135 put_page(page);
1da177e4
LT
136}
137
d86133bd
VD
138static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
139 struct pipe_buffer *buf)
140{
141 struct page *page = buf->page;
142
143 if (page_count(page) == 1) {
60cd4bcd 144 memcg_kmem_uncharge(page, 0);
d86133bd
VD
145 __SetPageLocked(page);
146 return 0;
147 }
148 return 1;
149}
150
0845718d 151/**
b51d63c6 152 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
0845718d
JA
153 * @pipe: the pipe that the buffer belongs to
154 * @buf: the buffer to attempt to steal
155 *
156 * Description:
b51d63c6 157 * This function attempts to steal the &struct page attached to
0845718d
JA
158 * @buf. If successful, this function returns 0 and returns with
159 * the page locked. The caller may then reuse the page for whatever
b51d63c6 160 * he wishes; the typical use is insertion into a different file
0845718d
JA
161 * page cache.
162 */
330ab716
JA
163int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
164 struct pipe_buffer *buf)
5abc97aa 165{
46e678c9
JA
166 struct page *page = buf->page;
167
0845718d
JA
168 /*
169 * A reference of one is golden, that means that the owner of this
170 * page is the only one holding a reference to it. lock the page
171 * and return OK.
172 */
46e678c9 173 if (page_count(page) == 1) {
46e678c9
JA
174 lock_page(page);
175 return 0;
176 }
177
178 return 1;
5abc97aa 179}
51921cb7 180EXPORT_SYMBOL(generic_pipe_buf_steal);
5abc97aa 181
0845718d 182/**
b51d63c6 183 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
0845718d
JA
184 * @pipe: the pipe that the buffer belongs to
185 * @buf: the buffer to get a reference to
186 *
187 * Description:
188 * This function grabs an extra reference to @buf. It's used in
189 * in the tee() system call, when we duplicate the buffers in one
190 * pipe into another.
191 */
15fab63e 192bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
70524490 193{
15fab63e 194 return try_get_page(buf->page);
70524490 195}
51921cb7 196EXPORT_SYMBOL(generic_pipe_buf_get);
70524490 197
0845718d
JA
198/**
199 * generic_pipe_buf_confirm - verify contents of the pipe buffer
79685b8d 200 * @info: the pipe that the buffer belongs to
0845718d
JA
201 * @buf: the buffer to confirm
202 *
203 * Description:
204 * This function does nothing, because the generic pipe code uses
205 * pages that are always good when inserted into the pipe.
206 */
cac36bb0
JA
207int generic_pipe_buf_confirm(struct pipe_inode_info *info,
208 struct pipe_buffer *buf)
f84d7519
JA
209{
210 return 0;
211}
51921cb7 212EXPORT_SYMBOL(generic_pipe_buf_confirm);
f84d7519 213
6818173b
MS
214/**
215 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
216 * @pipe: the pipe that the buffer belongs to
217 * @buf: the buffer to put a reference to
218 *
219 * Description:
220 * This function releases a reference to @buf.
221 */
222void generic_pipe_buf_release(struct pipe_inode_info *pipe,
223 struct pipe_buffer *buf)
224{
09cbfeaf 225 put_page(buf->page);
6818173b 226}
51921cb7 227EXPORT_SYMBOL(generic_pipe_buf_release);
6818173b 228
01e7187b 229/* New data written to a pipe may be appended to a buffer with this type. */
d4c3cca9 230static const struct pipe_buf_operations anon_pipe_buf_ops = {
cac36bb0 231 .confirm = generic_pipe_buf_confirm,
1da177e4 232 .release = anon_pipe_buf_release,
d86133bd 233 .steal = anon_pipe_buf_steal,
f84d7519 234 .get = generic_pipe_buf_get,
1da177e4
LT
235};
236
a0ce2f0a 237static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
cac36bb0 238 .confirm = generic_pipe_buf_confirm,
1da177e4 239 .release = anon_pipe_buf_release,
d86133bd 240 .steal = anon_pipe_buf_steal,
f84d7519 241 .get = generic_pipe_buf_get,
1da177e4
LT
242};
243
9883035a 244static const struct pipe_buf_operations packet_pipe_buf_ops = {
9883035a
LT
245 .confirm = generic_pipe_buf_confirm,
246 .release = anon_pipe_buf_release,
d86133bd 247 .steal = anon_pipe_buf_steal,
9883035a
LT
248 .get = generic_pipe_buf_get,
249};
250
01e7187b
JH
251/**
252 * pipe_buf_mark_unmergeable - mark a &struct pipe_buffer as unmergeable
253 * @buf: the buffer to mark
254 *
255 * Description:
256 * This function ensures that no future writes will be merged into the
257 * given &struct pipe_buffer. This is necessary when multiple pipe buffers
258 * share the same backing page.
259 */
a0ce2f0a
JH
260void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
261{
262 if (buf->ops == &anon_pipe_buf_ops)
263 buf->ops = &anon_pipe_buf_nomerge_ops;
264}
265
01e7187b
JH
266static bool pipe_buf_can_merge(struct pipe_buffer *buf)
267{
268 return buf->ops == &anon_pipe_buf_ops;
269}
270
1da177e4 271static ssize_t
fb9096a3 272pipe_read(struct kiocb *iocb, struct iov_iter *to)
1da177e4 273{
fb9096a3 274 size_t total_len = iov_iter_count(to);
ee0b3e67 275 struct file *filp = iocb->ki_filp;
de32ec4c 276 struct pipe_inode_info *pipe = filp->private_data;
1da177e4
LT
277 int do_wakeup;
278 ssize_t ret;
1da177e4 279
1da177e4
LT
280 /* Null read succeeds. */
281 if (unlikely(total_len == 0))
282 return 0;
283
284 do_wakeup = 0;
285 ret = 0;
ebec73f4 286 __pipe_lock(pipe);
1da177e4 287 for (;;) {
923f4f23 288 int bufs = pipe->nrbufs;
1da177e4 289 if (bufs) {
923f4f23
IM
290 int curbuf = pipe->curbuf;
291 struct pipe_buffer *buf = pipe->bufs + curbuf;
1da177e4 292 size_t chars = buf->len;
637b58c2
AV
293 size_t written;
294 int error;
1da177e4
LT
295
296 if (chars > total_len)
297 chars = total_len;
298
fba597db 299 error = pipe_buf_confirm(pipe, buf);
f84d7519 300 if (error) {
5274f052 301 if (!ret)
e5953cbd 302 ret = error;
5274f052
JA
303 break;
304 }
f84d7519 305
fb9096a3 306 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
637b58c2 307 if (unlikely(written < chars)) {
341b446b 308 if (!ret)
637b58c2 309 ret = -EFAULT;
1da177e4
LT
310 break;
311 }
312 ret += chars;
313 buf->offset += chars;
314 buf->len -= chars;
9883035a
LT
315
316 /* Was it a packet buffer? Clean up and exit */
317 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
318 total_len = chars;
319 buf->len = 0;
320 }
321
1da177e4 322 if (!buf->len) {
a779638c 323 pipe_buf_release(pipe, buf);
35f3d14d 324 curbuf = (curbuf + 1) & (pipe->buffers - 1);
923f4f23
IM
325 pipe->curbuf = curbuf;
326 pipe->nrbufs = --bufs;
1da177e4
LT
327 do_wakeup = 1;
328 }
329 total_len -= chars;
330 if (!total_len)
331 break; /* common path: read succeeded */
332 }
333 if (bufs) /* More to do? */
334 continue;
923f4f23 335 if (!pipe->writers)
1da177e4 336 break;
923f4f23 337 if (!pipe->waiting_writers) {
1da177e4
LT
338 /* syscall merging: Usually we must not sleep
339 * if O_NONBLOCK is set, or if we got some data.
340 * But if a writer sleeps in kernel space, then
341 * we can wait for that data without violating POSIX.
342 */
343 if (ret)
344 break;
345 if (filp->f_flags & O_NONBLOCK) {
346 ret = -EAGAIN;
347 break;
348 }
349 }
350 if (signal_pending(current)) {
341b446b
IM
351 if (!ret)
352 ret = -ERESTARTSYS;
1da177e4
LT
353 break;
354 }
355 if (do_wakeup) {
a9a08845 356 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
923f4f23 357 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
1da177e4 358 }
923f4f23 359 pipe_wait(pipe);
1da177e4 360 }
ebec73f4 361 __pipe_unlock(pipe);
341b446b
IM
362
363 /* Signal writers asynchronously that there is more room. */
1da177e4 364 if (do_wakeup) {
a9a08845 365 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
923f4f23 366 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
1da177e4
LT
367 }
368 if (ret > 0)
369 file_accessed(filp);
370 return ret;
371}
372
9883035a
LT
373static inline int is_packetized(struct file *file)
374{
375 return (file->f_flags & O_DIRECT) != 0;
376}
377
1da177e4 378static ssize_t
f0d1bec9 379pipe_write(struct kiocb *iocb, struct iov_iter *from)
1da177e4 380{
ee0b3e67 381 struct file *filp = iocb->ki_filp;
de32ec4c 382 struct pipe_inode_info *pipe = filp->private_data;
f0d1bec9
AV
383 ssize_t ret = 0;
384 int do_wakeup = 0;
385 size_t total_len = iov_iter_count(from);
1da177e4
LT
386 ssize_t chars;
387
1da177e4
LT
388 /* Null write succeeds. */
389 if (unlikely(total_len == 0))
390 return 0;
391
ebec73f4 392 __pipe_lock(pipe);
1da177e4 393
923f4f23 394 if (!pipe->readers) {
1da177e4
LT
395 send_sig(SIGPIPE, current, 0);
396 ret = -EPIPE;
397 goto out;
398 }
399
400 /* We try to merge small writes */
401 chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
923f4f23 402 if (pipe->nrbufs && chars != 0) {
341b446b 403 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
35f3d14d 404 (pipe->buffers - 1);
923f4f23 405 struct pipe_buffer *buf = pipe->bufs + lastbuf;
1da177e4 406 int offset = buf->offset + buf->len;
341b446b 407
01e7187b 408 if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) {
fba597db 409 ret = pipe_buf_confirm(pipe, buf);
6ae08069 410 if (ret)
5274f052 411 goto out;
f84d7519 412
f0d1bec9
AV
413 ret = copy_page_from_iter(buf->page, offset, chars, from);
414 if (unlikely(ret < chars)) {
6ae08069 415 ret = -EFAULT;
1da177e4 416 goto out;
f6762b7a 417 }
f0d1bec9 418 do_wakeup = 1;
6ae08069 419 buf->len += ret;
f0d1bec9 420 if (!iov_iter_count(from))
1da177e4
LT
421 goto out;
422 }
423 }
424
425 for (;;) {
426 int bufs;
341b446b 427
923f4f23 428 if (!pipe->readers) {
1da177e4 429 send_sig(SIGPIPE, current, 0);
341b446b
IM
430 if (!ret)
431 ret = -EPIPE;
1da177e4
LT
432 break;
433 }
923f4f23 434 bufs = pipe->nrbufs;
35f3d14d
JA
435 if (bufs < pipe->buffers) {
436 int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
923f4f23
IM
437 struct pipe_buffer *buf = pipe->bufs + newbuf;
438 struct page *page = pipe->tmp_page;
f0d1bec9 439 int copied;
1da177e4
LT
440
441 if (!page) {
d86133bd 442 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
1da177e4
LT
443 if (unlikely(!page)) {
444 ret = ret ? : -ENOMEM;
445 break;
446 }
923f4f23 447 pipe->tmp_page = page;
1da177e4 448 }
341b446b 449 /* Always wake up, even if the copy fails. Otherwise
1da177e4
LT
450 * we lock up (O_NONBLOCK-)readers that sleep due to
451 * syscall merging.
452 * FIXME! Is this really true?
453 */
454 do_wakeup = 1;
f0d1bec9
AV
455 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
456 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
341b446b 457 if (!ret)
f0d1bec9 458 ret = -EFAULT;
1da177e4
LT
459 break;
460 }
f0d1bec9 461 ret += copied;
1da177e4
LT
462
463 /* Insert it into the buffer array */
464 buf->page = page;
465 buf->ops = &anon_pipe_buf_ops;
466 buf->offset = 0;
f0d1bec9 467 buf->len = copied;
9883035a
LT
468 buf->flags = 0;
469 if (is_packetized(filp)) {
470 buf->ops = &packet_pipe_buf_ops;
471 buf->flags = PIPE_BUF_FLAG_PACKET;
472 }
923f4f23
IM
473 pipe->nrbufs = ++bufs;
474 pipe->tmp_page = NULL;
1da177e4 475
f0d1bec9 476 if (!iov_iter_count(from))
1da177e4
LT
477 break;
478 }
35f3d14d 479 if (bufs < pipe->buffers)
1da177e4
LT
480 continue;
481 if (filp->f_flags & O_NONBLOCK) {
341b446b
IM
482 if (!ret)
483 ret = -EAGAIN;
1da177e4
LT
484 break;
485 }
486 if (signal_pending(current)) {
341b446b
IM
487 if (!ret)
488 ret = -ERESTARTSYS;
1da177e4
LT
489 break;
490 }
491 if (do_wakeup) {
a9a08845 492 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
923f4f23 493 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1da177e4
LT
494 do_wakeup = 0;
495 }
923f4f23
IM
496 pipe->waiting_writers++;
497 pipe_wait(pipe);
498 pipe->waiting_writers--;
1da177e4
LT
499 }
500out:
ebec73f4 501 __pipe_unlock(pipe);
1da177e4 502 if (do_wakeup) {
a9a08845 503 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
923f4f23 504 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1da177e4 505 }
7e775f46 506 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
c3b2da31
JB
507 int err = file_update_time(filp);
508 if (err)
509 ret = err;
7e775f46 510 sb_end_write(file_inode(filp)->i_sb);
c3b2da31 511 }
1da177e4
LT
512 return ret;
513}
514
d59d0b1b 515static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1da177e4 516{
de32ec4c 517 struct pipe_inode_info *pipe = filp->private_data;
1da177e4
LT
518 int count, buf, nrbufs;
519
520 switch (cmd) {
521 case FIONREAD:
ebec73f4 522 __pipe_lock(pipe);
1da177e4 523 count = 0;
923f4f23
IM
524 buf = pipe->curbuf;
525 nrbufs = pipe->nrbufs;
1da177e4 526 while (--nrbufs >= 0) {
923f4f23 527 count += pipe->bufs[buf].len;
35f3d14d 528 buf = (buf+1) & (pipe->buffers - 1);
1da177e4 529 }
ebec73f4 530 __pipe_unlock(pipe);
923f4f23 531
1da177e4
LT
532 return put_user(count, (int __user *)arg);
533 default:
46ce341b 534 return -ENOIOCTLCMD;
1da177e4
LT
535 }
536}
537
dd67081b 538/* No kernel lock held - fine */
a11e1d43
LT
539static __poll_t
540pipe_poll(struct file *filp, poll_table *wait)
dd67081b 541{
a11e1d43 542 __poll_t mask;
dd67081b 543 struct pipe_inode_info *pipe = filp->private_data;
a11e1d43
LT
544 int nrbufs;
545
546 poll_wait(filp, &pipe->wait, wait);
1da177e4
LT
547
548 /* Reading only -- no need for acquiring the semaphore. */
a11e1d43
LT
549 nrbufs = pipe->nrbufs;
550 mask = 0;
1da177e4 551 if (filp->f_mode & FMODE_READ) {
a9a08845 552 mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
923f4f23 553 if (!pipe->writers && filp->f_version != pipe->w_counter)
a9a08845 554 mask |= EPOLLHUP;
1da177e4
LT
555 }
556
557 if (filp->f_mode & FMODE_WRITE) {
a9a08845 558 mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0;
5e5d7a22 559 /*
a9a08845 560 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
5e5d7a22
PE
561 * behave exactly like pipes for poll().
562 */
923f4f23 563 if (!pipe->readers)
a9a08845 564 mask |= EPOLLERR;
1da177e4
LT
565 }
566
567 return mask;
568}
569
b0d8d229
LT
570static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
571{
572 int kill = 0;
573
574 spin_lock(&inode->i_lock);
575 if (!--pipe->files) {
576 inode->i_pipe = NULL;
577 kill = 1;
578 }
579 spin_unlock(&inode->i_lock);
580
581 if (kill)
582 free_pipe_info(pipe);
583}
584
1da177e4 585static int
599a0ac1 586pipe_release(struct inode *inode, struct file *file)
1da177e4 587{
b0d8d229 588 struct pipe_inode_info *pipe = file->private_data;
923f4f23 589
ebec73f4 590 __pipe_lock(pipe);
599a0ac1
AV
591 if (file->f_mode & FMODE_READ)
592 pipe->readers--;
593 if (file->f_mode & FMODE_WRITE)
594 pipe->writers--;
341b446b 595
ba5bb147 596 if (pipe->readers || pipe->writers) {
a9a08845 597 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
923f4f23
IM
598 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
599 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
1da177e4 600 }
ebec73f4 601 __pipe_unlock(pipe);
ba5bb147 602
b0d8d229 603 put_pipe_info(inode, pipe);
1da177e4
LT
604 return 0;
605}
606
607static int
599a0ac1 608pipe_fasync(int fd, struct file *filp, int on)
1da177e4 609{
de32ec4c 610 struct pipe_inode_info *pipe = filp->private_data;
599a0ac1 611 int retval = 0;
1da177e4 612
ebec73f4 613 __pipe_lock(pipe);
599a0ac1
AV
614 if (filp->f_mode & FMODE_READ)
615 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
616 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
341b446b 617 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
599a0ac1
AV
618 if (retval < 0 && (filp->f_mode & FMODE_READ))
619 /* this can happen only if on == T */
e5bc49ba
ON
620 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
621 }
ebec73f4 622 __pipe_unlock(pipe);
60aa4924 623 return retval;
1da177e4
LT
624}
625
9c87bcf0 626static unsigned long account_pipe_buffers(struct user_struct *user,
759c0114
WT
627 unsigned long old, unsigned long new)
628{
9c87bcf0 629 return atomic_long_add_return(new - old, &user->pipe_bufs);
759c0114
WT
630}
631
9c87bcf0 632static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
759c0114 633{
f7340761
EB
634 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
635
636 return soft_limit && user_bufs > soft_limit;
759c0114
WT
637}
638
9c87bcf0 639static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
759c0114 640{
f7340761
EB
641 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
642
643 return hard_limit && user_bufs > hard_limit;
759c0114
WT
644}
645
85c2dd54
EB
646static bool is_unprivileged_user(void)
647{
648 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
649}
650
7bee130e 651struct pipe_inode_info *alloc_pipe_info(void)
3a326a2c 652{
923f4f23 653 struct pipe_inode_info *pipe;
09b4d199
MK
654 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
655 struct user_struct *user = get_current_user();
9c87bcf0 656 unsigned long user_bufs;
f7340761 657 unsigned int max_size = READ_ONCE(pipe_max_size);
3a326a2c 658
d86133bd 659 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
09b4d199
MK
660 if (pipe == NULL)
661 goto out_free_uid;
662
f7340761
EB
663 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
664 pipe_bufs = max_size >> PAGE_SHIFT;
086e774a 665
9c87bcf0 666 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
a005ca0e 667
85c2dd54 668 if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
9c87bcf0 669 user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
a005ca0e 670 pipe_bufs = 1;
09b4d199 671 }
759c0114 672
85c2dd54 673 if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
a005ca0e
MK
674 goto out_revert_acct;
675
676 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
677 GFP_KERNEL_ACCOUNT);
678
09b4d199
MK
679 if (pipe->bufs) {
680 init_waitqueue_head(&pipe->wait);
681 pipe->r_counter = pipe->w_counter = 1;
682 pipe->buffers = pipe_bufs;
683 pipe->user = user;
09b4d199
MK
684 mutex_init(&pipe->mutex);
685 return pipe;
3a326a2c
IM
686 }
687
a005ca0e 688out_revert_acct:
9c87bcf0 689 (void) account_pipe_buffers(user, pipe_bufs, 0);
09b4d199
MK
690 kfree(pipe);
691out_free_uid:
692 free_uid(user);
35f3d14d 693 return NULL;
3a326a2c
IM
694}
695
4b8a8f1e 696void free_pipe_info(struct pipe_inode_info *pipe)
1da177e4
LT
697{
698 int i;
1da177e4 699
9c87bcf0 700 (void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
759c0114 701 free_uid(pipe->user);
35f3d14d 702 for (i = 0; i < pipe->buffers; i++) {
923f4f23 703 struct pipe_buffer *buf = pipe->bufs + i;
1da177e4 704 if (buf->ops)
a779638c 705 pipe_buf_release(pipe, buf);
1da177e4 706 }
923f4f23
IM
707 if (pipe->tmp_page)
708 __free_page(pipe->tmp_page);
35f3d14d 709 kfree(pipe->bufs);
923f4f23 710 kfree(pipe);
1da177e4
LT
711}
712
fa3536cc 713static struct vfsmount *pipe_mnt __read_mostly;
341b446b 714
c23fbb6b
ED
715/*
716 * pipefs_dname() is called from d_path().
717 */
718static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
719{
720 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
75c3cfa8 721 d_inode(dentry)->i_ino);
c23fbb6b
ED
722}
723
3ba13d17 724static const struct dentry_operations pipefs_dentry_operations = {
c23fbb6b 725 .d_dname = pipefs_dname,
1da177e4
LT
726};
727
728static struct inode * get_pipe_inode(void)
729{
a209dfc7 730 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
923f4f23 731 struct pipe_inode_info *pipe;
1da177e4
LT
732
733 if (!inode)
734 goto fail_inode;
735
85fe4025
CH
736 inode->i_ino = get_next_ino();
737
7bee130e 738 pipe = alloc_pipe_info();
923f4f23 739 if (!pipe)
1da177e4 740 goto fail_iput;
3a326a2c 741
ba5bb147
AV
742 inode->i_pipe = pipe;
743 pipe->files = 2;
923f4f23 744 pipe->readers = pipe->writers = 1;
599a0ac1 745 inode->i_fop = &pipefifo_fops;
1da177e4
LT
746
747 /*
748 * Mark the inode dirty from the very beginning,
749 * that way it will never be moved to the dirty
750 * list because "mark_inode_dirty()" will think
751 * that it already _is_ on the dirty list.
752 */
753 inode->i_state = I_DIRTY;
754 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
da9592ed
DH
755 inode->i_uid = current_fsuid();
756 inode->i_gid = current_fsgid();
078cd827 757 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
923f4f23 758
1da177e4
LT
759 return inode;
760
761fail_iput:
762 iput(inode);
341b446b 763
1da177e4
LT
764fail_inode:
765 return NULL;
766}
767
e4fad8e5 768int create_pipe_files(struct file **res, int flags)
1da177e4 769{
e4fad8e5 770 struct inode *inode = get_pipe_inode();
d6cbd281 771 struct file *f;
1da177e4 772
1da177e4 773 if (!inode)
e4fad8e5 774 return -ENFILE;
1da177e4 775
152b6372
AV
776 f = alloc_file_pseudo(inode, pipe_mnt, "",
777 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
778 &pipefifo_fops);
e9bb1f9b 779 if (IS_ERR(f)) {
152b6372
AV
780 free_pipe_info(inode->i_pipe);
781 iput(inode);
782 return PTR_ERR(f);
e9bb1f9b 783 }
341b446b 784
de32ec4c 785 f->private_data = inode->i_pipe;
d6cbd281 786
183266f2
AV
787 res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
788 &pipefifo_fops);
e9bb1f9b 789 if (IS_ERR(res[0])) {
b10a4a9f
AV
790 put_pipe_info(inode, inode->i_pipe);
791 fput(f);
792 return PTR_ERR(res[0]);
e9bb1f9b 793 }
de32ec4c 794 res[0]->private_data = inode->i_pipe;
e4fad8e5
AV
795 res[1] = f;
796 return 0;
d6cbd281
AK
797}
798
5b249b1b 799static int __do_pipe_flags(int *fd, struct file **files, int flags)
d6cbd281 800{
d6cbd281
AK
801 int error;
802 int fdw, fdr;
803
9883035a 804 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
ed8cae8b
UD
805 return -EINVAL;
806
e4fad8e5
AV
807 error = create_pipe_files(files, flags);
808 if (error)
809 return error;
d6cbd281 810
ed8cae8b 811 error = get_unused_fd_flags(flags);
d6cbd281
AK
812 if (error < 0)
813 goto err_read_pipe;
814 fdr = error;
815
ed8cae8b 816 error = get_unused_fd_flags(flags);
d6cbd281
AK
817 if (error < 0)
818 goto err_fdr;
819 fdw = error;
820
157cf649 821 audit_fd_pair(fdr, fdw);
d6cbd281
AK
822 fd[0] = fdr;
823 fd[1] = fdw;
d6cbd281
AK
824 return 0;
825
826 err_fdr:
827 put_unused_fd(fdr);
828 err_read_pipe:
e4fad8e5
AV
829 fput(files[0]);
830 fput(files[1]);
d6cbd281 831 return error;
1da177e4
LT
832}
833
5b249b1b
AV
834int do_pipe_flags(int *fd, int flags)
835{
836 struct file *files[2];
837 int error = __do_pipe_flags(fd, files, flags);
838 if (!error) {
839 fd_install(fd[0], files[0]);
840 fd_install(fd[1], files[1]);
841 }
842 return error;
843}
844
d35c7b0e
UD
845/*
846 * sys_pipe() is the normal C calling standard for creating
847 * a pipe. It's not the way Unix traditionally does this, though.
848 */
0a216dd1 849static int do_pipe2(int __user *fildes, int flags)
d35c7b0e 850{
5b249b1b 851 struct file *files[2];
d35c7b0e
UD
852 int fd[2];
853 int error;
854
5b249b1b 855 error = __do_pipe_flags(fd, files, flags);
d35c7b0e 856 if (!error) {
5b249b1b
AV
857 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
858 fput(files[0]);
859 fput(files[1]);
860 put_unused_fd(fd[0]);
861 put_unused_fd(fd[1]);
d35c7b0e 862 error = -EFAULT;
5b249b1b
AV
863 } else {
864 fd_install(fd[0], files[0]);
865 fd_install(fd[1], files[1]);
ba719bae 866 }
d35c7b0e
UD
867 }
868 return error;
869}
870
0a216dd1
DB
871SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
872{
873 return do_pipe2(fildes, flags);
874}
875
2b664219 876SYSCALL_DEFINE1(pipe, int __user *, fildes)
ed8cae8b 877{
0a216dd1 878 return do_pipe2(fildes, 0);
ed8cae8b
UD
879}
880
fc7478a2 881static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
f776c738
AV
882{
883 int cur = *cnt;
884
885 while (cur == *cnt) {
fc7478a2 886 pipe_wait(pipe);
f776c738
AV
887 if (signal_pending(current))
888 break;
889 }
890 return cur == *cnt ? -ERESTARTSYS : 0;
891}
892
fc7478a2 893static void wake_up_partner(struct pipe_inode_info *pipe)
f776c738 894{
fc7478a2 895 wake_up_interruptible(&pipe->wait);
f776c738
AV
896}
897
898static int fifo_open(struct inode *inode, struct file *filp)
899{
900 struct pipe_inode_info *pipe;
599a0ac1 901 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
f776c738
AV
902 int ret;
903
ba5bb147
AV
904 filp->f_version = 0;
905
906 spin_lock(&inode->i_lock);
907 if (inode->i_pipe) {
908 pipe = inode->i_pipe;
909 pipe->files++;
910 spin_unlock(&inode->i_lock);
911 } else {
912 spin_unlock(&inode->i_lock);
7bee130e 913 pipe = alloc_pipe_info();
f776c738 914 if (!pipe)
ba5bb147
AV
915 return -ENOMEM;
916 pipe->files = 1;
917 spin_lock(&inode->i_lock);
918 if (unlikely(inode->i_pipe)) {
919 inode->i_pipe->files++;
920 spin_unlock(&inode->i_lock);
4b8a8f1e 921 free_pipe_info(pipe);
ba5bb147
AV
922 pipe = inode->i_pipe;
923 } else {
924 inode->i_pipe = pipe;
925 spin_unlock(&inode->i_lock);
926 }
f776c738 927 }
de32ec4c 928 filp->private_data = pipe;
ba5bb147
AV
929 /* OK, we have a pipe and it's pinned down */
930
ebec73f4 931 __pipe_lock(pipe);
f776c738
AV
932
933 /* We can only do regular read/write on fifos */
934 filp->f_mode &= (FMODE_READ | FMODE_WRITE);
935
936 switch (filp->f_mode) {
937 case FMODE_READ:
938 /*
939 * O_RDONLY
940 * POSIX.1 says that O_NONBLOCK means return with the FIFO
941 * opened, even when there is no process writing the FIFO.
942 */
f776c738
AV
943 pipe->r_counter++;
944 if (pipe->readers++ == 0)
fc7478a2 945 wake_up_partner(pipe);
f776c738 946
599a0ac1 947 if (!is_pipe && !pipe->writers) {
f776c738 948 if ((filp->f_flags & O_NONBLOCK)) {
a9a08845 949 /* suppress EPOLLHUP until we have
f776c738
AV
950 * seen a writer */
951 filp->f_version = pipe->w_counter;
952 } else {
fc7478a2 953 if (wait_for_partner(pipe, &pipe->w_counter))
f776c738
AV
954 goto err_rd;
955 }
956 }
957 break;
958
959 case FMODE_WRITE:
960 /*
961 * O_WRONLY
962 * POSIX.1 says that O_NONBLOCK means return -1 with
963 * errno=ENXIO when there is no process reading the FIFO.
964 */
965 ret = -ENXIO;
599a0ac1 966 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
f776c738
AV
967 goto err;
968
f776c738
AV
969 pipe->w_counter++;
970 if (!pipe->writers++)
fc7478a2 971 wake_up_partner(pipe);
f776c738 972
599a0ac1 973 if (!is_pipe && !pipe->readers) {
fc7478a2 974 if (wait_for_partner(pipe, &pipe->r_counter))
f776c738
AV
975 goto err_wr;
976 }
977 break;
978
979 case FMODE_READ | FMODE_WRITE:
980 /*
981 * O_RDWR
982 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
983 * This implementation will NEVER block on a O_RDWR open, since
984 * the process can at least talk to itself.
985 */
f776c738
AV
986
987 pipe->readers++;
988 pipe->writers++;
989 pipe->r_counter++;
990 pipe->w_counter++;
991 if (pipe->readers == 1 || pipe->writers == 1)
fc7478a2 992 wake_up_partner(pipe);
f776c738
AV
993 break;
994
995 default:
996 ret = -EINVAL;
997 goto err;
998 }
999
1000 /* Ok! */
ebec73f4 1001 __pipe_unlock(pipe);
f776c738
AV
1002 return 0;
1003
1004err_rd:
1005 if (!--pipe->readers)
1006 wake_up_interruptible(&pipe->wait);
1007 ret = -ERESTARTSYS;
1008 goto err;
1009
1010err_wr:
1011 if (!--pipe->writers)
1012 wake_up_interruptible(&pipe->wait);
1013 ret = -ERESTARTSYS;
1014 goto err;
1015
1016err:
ebec73f4 1017 __pipe_unlock(pipe);
b0d8d229
LT
1018
1019 put_pipe_info(inode, pipe);
f776c738
AV
1020 return ret;
1021}
1022
599a0ac1
AV
1023const struct file_operations pipefifo_fops = {
1024 .open = fifo_open,
1025 .llseek = no_llseek,
fb9096a3 1026 .read_iter = pipe_read,
f0d1bec9 1027 .write_iter = pipe_write,
a11e1d43 1028 .poll = pipe_poll,
599a0ac1
AV
1029 .unlocked_ioctl = pipe_ioctl,
1030 .release = pipe_release,
1031 .fasync = pipe_fasync,
f776c738
AV
1032};
1033
f491bd71
MK
1034/*
1035 * Currently we rely on the pipe array holding a power-of-2 number
d3f14c48 1036 * of pages. Returns 0 on error.
f491bd71 1037 */
96e99be4 1038unsigned int round_pipe_size(unsigned long size)
f491bd71 1039{
c4fed5a9 1040 if (size > (1U << 31))
96e99be4
EB
1041 return 0;
1042
4c2e4bef
EB
1043 /* Minimum pipe size, as required by POSIX */
1044 if (size < PAGE_SIZE)
c4fed5a9 1045 return PAGE_SIZE;
d3f14c48 1046
c4fed5a9 1047 return roundup_pow_of_two(size);
f491bd71
MK
1048}
1049
35f3d14d
JA
1050/*
1051 * Allocate a new array of pipe buffers and copy the info over. Returns the
1052 * pipe size if successful, or return -ERROR on error.
1053 */
d37d4166 1054static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
35f3d14d
JA
1055{
1056 struct pipe_buffer *bufs;
d37d4166 1057 unsigned int size, nr_pages;
9c87bcf0 1058 unsigned long user_bufs;
b0b91d18 1059 long ret = 0;
d37d4166
MK
1060
1061 size = round_pipe_size(arg);
1062 nr_pages = size >> PAGE_SHIFT;
1063
1064 if (!nr_pages)
1065 return -EINVAL;
1066
b0b91d18
MK
1067 /*
1068 * If trying to increase the pipe capacity, check that an
1069 * unprivileged user is not trying to exceed various limits
1070 * (soft limit check here, hard limit check just below).
1071 * Decreasing the pipe capacity is always permitted, even
1072 * if the user is currently over a limit.
1073 */
1074 if (nr_pages > pipe->buffers &&
1075 size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
d37d4166
MK
1076 return -EPERM;
1077
9c87bcf0 1078 user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
b0b91d18
MK
1079
1080 if (nr_pages > pipe->buffers &&
9c87bcf0
MK
1081 (too_many_pipe_buffers_hard(user_bufs) ||
1082 too_many_pipe_buffers_soft(user_bufs)) &&
85c2dd54 1083 is_unprivileged_user()) {
b0b91d18
MK
1084 ret = -EPERM;
1085 goto out_revert_acct;
1086 }
35f3d14d 1087
35f3d14d
JA
1088 /*
1089 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1090 * expect a lot of shrink+grow operations, just free and allocate
1091 * again like we would do for growing. If the pipe currently
1092 * contains more buffers than arg, then return busy.
1093 */
b0b91d18
MK
1094 if (nr_pages < pipe->nrbufs) {
1095 ret = -EBUSY;
1096 goto out_revert_acct;
1097 }
35f3d14d 1098
d86133bd
VD
1099 bufs = kcalloc(nr_pages, sizeof(*bufs),
1100 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
b0b91d18
MK
1101 if (unlikely(!bufs)) {
1102 ret = -ENOMEM;
1103 goto out_revert_acct;
1104 }
35f3d14d
JA
1105
1106 /*
1107 * The pipe array wraps around, so just start the new one at zero
1108 * and adjust the indexes.
1109 */
1110 if (pipe->nrbufs) {
1d862f41
MS
1111 unsigned int tail;
1112 unsigned int head;
35f3d14d 1113
1d862f41
MS
1114 tail = pipe->curbuf + pipe->nrbufs;
1115 if (tail < pipe->buffers)
1116 tail = 0;
1117 else
1118 tail &= (pipe->buffers - 1);
1119
1120 head = pipe->nrbufs - tail;
35f3d14d
JA
1121 if (head)
1122 memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1123 if (tail)
1d862f41 1124 memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
35f3d14d
JA
1125 }
1126
1127 pipe->curbuf = 0;
1128 kfree(pipe->bufs);
1129 pipe->bufs = bufs;
b9598db3
JA
1130 pipe->buffers = nr_pages;
1131 return nr_pages * PAGE_SIZE;
b0b91d18
MK
1132
1133out_revert_acct:
9c87bcf0 1134 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
b0b91d18 1135 return ret;
35f3d14d
JA
1136}
1137
72083646
LT
1138/*
1139 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1140 * location, so checking ->i_pipe is not enough to verify that this is a
1141 * pipe.
1142 */
1143struct pipe_inode_info *get_pipe_info(struct file *file)
1144{
de32ec4c 1145 return file->f_op == &pipefifo_fops ? file->private_data : NULL;
72083646
LT
1146}
1147
35f3d14d
JA
1148long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1149{
1150 struct pipe_inode_info *pipe;
1151 long ret;
1152
c66fb347 1153 pipe = get_pipe_info(file);
35f3d14d
JA
1154 if (!pipe)
1155 return -EBADF;
1156
ebec73f4 1157 __pipe_lock(pipe);
35f3d14d
JA
1158
1159 switch (cmd) {
d37d4166
MK
1160 case F_SETPIPE_SZ:
1161 ret = pipe_set_size(pipe, arg);
35f3d14d
JA
1162 break;
1163 case F_GETPIPE_SZ:
b9598db3 1164 ret = pipe->buffers * PAGE_SIZE;
35f3d14d
JA
1165 break;
1166 default:
1167 ret = -EINVAL;
1168 break;
1169 }
1170
ebec73f4 1171 __pipe_unlock(pipe);
35f3d14d
JA
1172 return ret;
1173}
1174
ff0c7d15
NP
1175static const struct super_operations pipefs_ops = {
1176 .destroy_inode = free_inode_nonrcu,
d70ef97b 1177 .statfs = simple_statfs,
ff0c7d15
NP
1178};
1179
1da177e4
LT
1180/*
1181 * pipefs should _never_ be mounted by userland - too much of security hassle,
1182 * no real gain from having the whole whorehouse mounted. So we don't need
1183 * any operations on the root directory. However, we need a non-trivial
1184 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1185 */
4fa7ec5d
DH
1186
1187static int pipefs_init_fs_context(struct fs_context *fc)
1da177e4 1188{
4fa7ec5d
DH
1189 struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1190 if (!ctx)
1191 return -ENOMEM;
1192 ctx->ops = &pipefs_ops;
1193 ctx->dops = &pipefs_dentry_operations;
1194 return 0;
1da177e4
LT
1195}
1196
1197static struct file_system_type pipe_fs_type = {
1198 .name = "pipefs",
4fa7ec5d 1199 .init_fs_context = pipefs_init_fs_context,
1da177e4
LT
1200 .kill_sb = kill_anon_super,
1201};
1202
1203static int __init init_pipe_fs(void)
1204{
1205 int err = register_filesystem(&pipe_fs_type);
341b446b 1206
1da177e4
LT
1207 if (!err) {
1208 pipe_mnt = kern_mount(&pipe_fs_type);
1209 if (IS_ERR(pipe_mnt)) {
1210 err = PTR_ERR(pipe_mnt);
1211 unregister_filesystem(&pipe_fs_type);
1212 }
1213 }
1214 return err;
1215}
1216
1da177e4 1217fs_initcall(init_pipe_fs);