2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/sched/signal.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/pagemap.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/pipe_fs_i.h>
21 #include <linux/swap.h>
22 #include <linux/splice.h>
23 #include <linux/sched.h>
25 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
26 MODULE_ALIAS("devname:fuse");
28 static struct kmem_cache
*fuse_req_cachep
;
30 static struct fuse_dev
*fuse_get_dev(struct file
*file
)
33 * Lockless access is OK, because file->private data is set
34 * once during mount and is valid until the file is released.
36 return ACCESS_ONCE(file
->private_data
);
39 static void fuse_request_init(struct fuse_req
*req
, struct page
**pages
,
40 struct fuse_page_desc
*page_descs
,
43 memset(req
, 0, sizeof(*req
));
44 memset(pages
, 0, sizeof(*pages
) * npages
);
45 memset(page_descs
, 0, sizeof(*page_descs
) * npages
);
46 INIT_LIST_HEAD(&req
->list
);
47 INIT_LIST_HEAD(&req
->intr_entry
);
48 init_waitqueue_head(&req
->waitq
);
49 refcount_set(&req
->count
, 1);
51 req
->page_descs
= page_descs
;
52 req
->max_pages
= npages
;
53 __set_bit(FR_PENDING
, &req
->flags
);
56 static struct fuse_req
*__fuse_request_alloc(unsigned npages
, gfp_t flags
)
58 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, flags
);
61 struct fuse_page_desc
*page_descs
;
63 if (npages
<= FUSE_REQ_INLINE_PAGES
) {
64 pages
= req
->inline_pages
;
65 page_descs
= req
->inline_page_descs
;
67 pages
= kmalloc(sizeof(struct page
*) * npages
, flags
);
68 page_descs
= kmalloc(sizeof(struct fuse_page_desc
) *
72 if (!pages
|| !page_descs
) {
75 kmem_cache_free(fuse_req_cachep
, req
);
79 fuse_request_init(req
, pages
, page_descs
, npages
);
84 struct fuse_req
*fuse_request_alloc(unsigned npages
)
86 return __fuse_request_alloc(npages
, GFP_KERNEL
);
88 EXPORT_SYMBOL_GPL(fuse_request_alloc
);
90 struct fuse_req
*fuse_request_alloc_nofs(unsigned npages
)
92 return __fuse_request_alloc(npages
, GFP_NOFS
);
95 void fuse_request_free(struct fuse_req
*req
)
97 if (req
->pages
!= req
->inline_pages
) {
99 kfree(req
->page_descs
);
101 kmem_cache_free(fuse_req_cachep
, req
);
104 void __fuse_get_request(struct fuse_req
*req
)
106 refcount_inc(&req
->count
);
109 /* Must be called with > 1 refcount */
110 static void __fuse_put_request(struct fuse_req
*req
)
112 refcount_dec(&req
->count
);
115 static void fuse_req_init_context(struct fuse_conn
*fc
, struct fuse_req
*req
)
117 req
->in
.h
.uid
= from_kuid_munged(&init_user_ns
, current_fsuid());
118 req
->in
.h
.gid
= from_kgid_munged(&init_user_ns
, current_fsgid());
119 req
->in
.h
.pid
= pid_nr_ns(task_pid(current
), fc
->pid_ns
);
122 void fuse_set_initialized(struct fuse_conn
*fc
)
124 /* Make sure stores before this are seen on another CPU */
129 static bool fuse_block_alloc(struct fuse_conn
*fc
, bool for_background
)
131 return !fc
->initialized
|| (for_background
&& fc
->blocked
);
134 static void fuse_drop_waiting(struct fuse_conn
*fc
)
137 atomic_dec(&fc
->num_waiting
);
138 } else if (atomic_dec_and_test(&fc
->num_waiting
)) {
139 /* wake up aborters */
140 wake_up_all(&fc
->blocked_waitq
);
144 static struct fuse_req
*__fuse_get_req(struct fuse_conn
*fc
, unsigned npages
,
147 struct fuse_req
*req
;
149 atomic_inc(&fc
->num_waiting
);
151 if (fuse_block_alloc(fc
, for_background
)) {
153 if (wait_event_killable_exclusive(fc
->blocked_waitq
,
154 !fuse_block_alloc(fc
, for_background
)))
157 /* Matches smp_wmb() in fuse_set_initialized() */
168 req
= fuse_request_alloc(npages
);
172 wake_up(&fc
->blocked_waitq
);
176 fuse_req_init_context(fc
, req
);
177 __set_bit(FR_WAITING
, &req
->flags
);
179 __set_bit(FR_BACKGROUND
, &req
->flags
);
184 fuse_drop_waiting(fc
);
188 struct fuse_req
*fuse_get_req(struct fuse_conn
*fc
, unsigned npages
)
190 return __fuse_get_req(fc
, npages
, false);
192 EXPORT_SYMBOL_GPL(fuse_get_req
);
194 struct fuse_req
*fuse_get_req_for_background(struct fuse_conn
*fc
,
197 return __fuse_get_req(fc
, npages
, true);
199 EXPORT_SYMBOL_GPL(fuse_get_req_for_background
);
202 * Return request in fuse_file->reserved_req. However that may
203 * currently be in use. If that is the case, wait for it to become
206 static struct fuse_req
*get_reserved_req(struct fuse_conn
*fc
,
209 struct fuse_req
*req
= NULL
;
210 struct fuse_file
*ff
= file
->private_data
;
213 wait_event(fc
->reserved_req_waitq
, ff
->reserved_req
);
214 spin_lock(&fc
->lock
);
215 if (ff
->reserved_req
) {
216 req
= ff
->reserved_req
;
217 ff
->reserved_req
= NULL
;
218 req
->stolen_file
= get_file(file
);
220 spin_unlock(&fc
->lock
);
227 * Put stolen request back into fuse_file->reserved_req
229 static void put_reserved_req(struct fuse_conn
*fc
, struct fuse_req
*req
)
231 struct file
*file
= req
->stolen_file
;
232 struct fuse_file
*ff
= file
->private_data
;
234 spin_lock(&fc
->lock
);
235 fuse_request_init(req
, req
->pages
, req
->page_descs
, req
->max_pages
);
236 BUG_ON(ff
->reserved_req
);
237 ff
->reserved_req
= req
;
238 wake_up_all(&fc
->reserved_req_waitq
);
239 spin_unlock(&fc
->lock
);
244 * Gets a requests for a file operation, always succeeds
246 * This is used for sending the FLUSH request, which must get to
247 * userspace, due to POSIX locks which may need to be unlocked.
249 * If allocation fails due to OOM, use the reserved request in
252 * This is very unlikely to deadlock accidentally, since the
253 * filesystem should not have it's own file open. If deadlock is
254 * intentional, it can still be broken by "aborting" the filesystem.
256 struct fuse_req
*fuse_get_req_nofail_nopages(struct fuse_conn
*fc
,
259 struct fuse_req
*req
;
261 atomic_inc(&fc
->num_waiting
);
262 wait_event(fc
->blocked_waitq
, fc
->initialized
);
263 /* Matches smp_wmb() in fuse_set_initialized() */
265 req
= fuse_request_alloc(0);
267 req
= get_reserved_req(fc
, file
);
269 fuse_req_init_context(fc
, req
);
270 __set_bit(FR_WAITING
, &req
->flags
);
271 __clear_bit(FR_BACKGROUND
, &req
->flags
);
275 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
277 if (refcount_dec_and_test(&req
->count
)) {
278 if (test_bit(FR_BACKGROUND
, &req
->flags
)) {
280 * We get here in the unlikely case that a background
281 * request was allocated but not sent
283 spin_lock(&fc
->lock
);
285 wake_up(&fc
->blocked_waitq
);
286 spin_unlock(&fc
->lock
);
289 if (test_bit(FR_WAITING
, &req
->flags
)) {
290 __clear_bit(FR_WAITING
, &req
->flags
);
291 fuse_drop_waiting(fc
);
294 if (req
->stolen_file
)
295 put_reserved_req(fc
, req
);
297 fuse_request_free(req
);
300 EXPORT_SYMBOL_GPL(fuse_put_request
);
302 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
307 for (i
= 0; i
< numargs
; i
++)
308 nbytes
+= args
[i
].size
;
313 static u64
fuse_get_unique(struct fuse_iqueue
*fiq
)
315 return ++fiq
->reqctr
;
318 static void queue_request(struct fuse_iqueue
*fiq
, struct fuse_req
*req
)
320 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
321 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
322 list_add_tail(&req
->list
, &fiq
->pending
);
323 wake_up_locked(&fiq
->waitq
);
324 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
327 void fuse_queue_forget(struct fuse_conn
*fc
, struct fuse_forget_link
*forget
,
328 u64 nodeid
, u64 nlookup
)
330 struct fuse_iqueue
*fiq
= &fc
->iq
;
332 forget
->forget_one
.nodeid
= nodeid
;
333 forget
->forget_one
.nlookup
= nlookup
;
335 spin_lock(&fiq
->waitq
.lock
);
336 if (fiq
->connected
) {
337 fiq
->forget_list_tail
->next
= forget
;
338 fiq
->forget_list_tail
= forget
;
339 wake_up_locked(&fiq
->waitq
);
340 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
344 spin_unlock(&fiq
->waitq
.lock
);
347 static void flush_bg_queue(struct fuse_conn
*fc
)
349 while (fc
->active_background
< fc
->max_background
&&
350 !list_empty(&fc
->bg_queue
)) {
351 struct fuse_req
*req
;
352 struct fuse_iqueue
*fiq
= &fc
->iq
;
354 req
= list_entry(fc
->bg_queue
.next
, struct fuse_req
, list
);
355 list_del(&req
->list
);
356 fc
->active_background
++;
357 spin_lock(&fiq
->waitq
.lock
);
358 req
->in
.h
.unique
= fuse_get_unique(fiq
);
359 queue_request(fiq
, req
);
360 spin_unlock(&fiq
->waitq
.lock
);
365 * This function is called when a request is finished. Either a reply
366 * has arrived or it was aborted (and not yet sent) or some error
367 * occurred during communication with userspace, or the device file
368 * was closed. The requester thread is woken up (if still waiting),
369 * the 'end' callback is called if given, else the reference to the
370 * request is released
372 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
374 struct fuse_iqueue
*fiq
= &fc
->iq
;
376 if (test_and_set_bit(FR_FINISHED
, &req
->flags
))
379 spin_lock(&fiq
->waitq
.lock
);
380 list_del_init(&req
->intr_entry
);
381 spin_unlock(&fiq
->waitq
.lock
);
382 WARN_ON(test_bit(FR_PENDING
, &req
->flags
));
383 WARN_ON(test_bit(FR_SENT
, &req
->flags
));
384 if (test_bit(FR_BACKGROUND
, &req
->flags
)) {
385 spin_lock(&fc
->lock
);
386 clear_bit(FR_BACKGROUND
, &req
->flags
);
387 if (fc
->num_background
== fc
->max_background
) {
389 wake_up(&fc
->blocked_waitq
);
390 } else if (!fc
->blocked
) {
392 * Wake up next waiter, if any. It's okay to use
393 * waitqueue_active(), as we've already synced up
394 * fc->blocked with waiters with the wake_up() call
397 if (waitqueue_active(&fc
->blocked_waitq
))
398 wake_up(&fc
->blocked_waitq
);
401 if (fc
->num_background
== fc
->congestion_threshold
&& fc
->sb
) {
402 clear_bdi_congested(fc
->sb
->s_bdi
, BLK_RW_SYNC
);
403 clear_bdi_congested(fc
->sb
->s_bdi
, BLK_RW_ASYNC
);
405 fc
->num_background
--;
406 fc
->active_background
--;
408 spin_unlock(&fc
->lock
);
410 wake_up(&req
->waitq
);
414 fuse_put_request(fc
, req
);
417 static void queue_interrupt(struct fuse_iqueue
*fiq
, struct fuse_req
*req
)
419 spin_lock(&fiq
->waitq
.lock
);
420 if (test_bit(FR_FINISHED
, &req
->flags
)) {
421 spin_unlock(&fiq
->waitq
.lock
);
424 if (list_empty(&req
->intr_entry
)) {
425 list_add_tail(&req
->intr_entry
, &fiq
->interrupts
);
426 wake_up_locked(&fiq
->waitq
);
428 spin_unlock(&fiq
->waitq
.lock
);
429 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
432 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
434 struct fuse_iqueue
*fiq
= &fc
->iq
;
437 if (!fc
->no_interrupt
) {
438 /* Any signal may interrupt this */
439 err
= wait_event_interruptible(req
->waitq
,
440 test_bit(FR_FINISHED
, &req
->flags
));
444 set_bit(FR_INTERRUPTED
, &req
->flags
);
445 /* matches barrier in fuse_dev_do_read() */
446 smp_mb__after_atomic();
447 if (test_bit(FR_SENT
, &req
->flags
))
448 queue_interrupt(fiq
, req
);
451 if (!test_bit(FR_FORCE
, &req
->flags
)) {
452 /* Only fatal signals may interrupt this */
453 err
= wait_event_killable(req
->waitq
,
454 test_bit(FR_FINISHED
, &req
->flags
));
458 spin_lock(&fiq
->waitq
.lock
);
459 /* Request is not yet in userspace, bail out */
460 if (test_bit(FR_PENDING
, &req
->flags
)) {
461 list_del(&req
->list
);
462 spin_unlock(&fiq
->waitq
.lock
);
463 __fuse_put_request(req
);
464 req
->out
.h
.error
= -EINTR
;
467 spin_unlock(&fiq
->waitq
.lock
);
471 * Either request is already in userspace, or it was forced.
474 wait_event(req
->waitq
, test_bit(FR_FINISHED
, &req
->flags
));
477 static void __fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
479 struct fuse_iqueue
*fiq
= &fc
->iq
;
481 BUG_ON(test_bit(FR_BACKGROUND
, &req
->flags
));
482 spin_lock(&fiq
->waitq
.lock
);
483 if (!fiq
->connected
) {
484 spin_unlock(&fiq
->waitq
.lock
);
485 req
->out
.h
.error
= -ENOTCONN
;
487 req
->in
.h
.unique
= fuse_get_unique(fiq
);
488 queue_request(fiq
, req
);
489 /* acquire extra reference, since request is still needed
490 after request_end() */
491 __fuse_get_request(req
);
492 spin_unlock(&fiq
->waitq
.lock
);
494 request_wait_answer(fc
, req
);
495 /* Pairs with smp_wmb() in request_end() */
500 void fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
502 __set_bit(FR_ISREPLY
, &req
->flags
);
503 if (!test_bit(FR_WAITING
, &req
->flags
)) {
504 __set_bit(FR_WAITING
, &req
->flags
);
505 atomic_inc(&fc
->num_waiting
);
507 __fuse_request_send(fc
, req
);
509 EXPORT_SYMBOL_GPL(fuse_request_send
);
511 static void fuse_adjust_compat(struct fuse_conn
*fc
, struct fuse_args
*args
)
513 if (fc
->minor
< 4 && args
->in
.h
.opcode
== FUSE_STATFS
)
514 args
->out
.args
[0].size
= FUSE_COMPAT_STATFS_SIZE
;
517 switch (args
->in
.h
.opcode
) {
524 args
->out
.args
[0].size
= FUSE_COMPAT_ENTRY_OUT_SIZE
;
528 args
->out
.args
[0].size
= FUSE_COMPAT_ATTR_OUT_SIZE
;
532 if (fc
->minor
< 12) {
533 switch (args
->in
.h
.opcode
) {
535 args
->in
.args
[0].size
= sizeof(struct fuse_open_in
);
538 args
->in
.args
[0].size
= FUSE_COMPAT_MKNOD_IN_SIZE
;
544 ssize_t
fuse_simple_request(struct fuse_conn
*fc
, struct fuse_args
*args
)
546 struct fuse_req
*req
;
549 req
= fuse_get_req(fc
, 0);
553 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
554 fuse_adjust_compat(fc
, args
);
556 req
->in
.h
.opcode
= args
->in
.h
.opcode
;
557 req
->in
.h
.nodeid
= args
->in
.h
.nodeid
;
558 req
->in
.numargs
= args
->in
.numargs
;
559 memcpy(req
->in
.args
, args
->in
.args
,
560 args
->in
.numargs
* sizeof(struct fuse_in_arg
));
561 req
->out
.argvar
= args
->out
.argvar
;
562 req
->out
.numargs
= args
->out
.numargs
;
563 memcpy(req
->out
.args
, args
->out
.args
,
564 args
->out
.numargs
* sizeof(struct fuse_arg
));
565 fuse_request_send(fc
, req
);
566 ret
= req
->out
.h
.error
;
567 if (!ret
&& args
->out
.argvar
) {
568 BUG_ON(args
->out
.numargs
!= 1);
569 ret
= req
->out
.args
[0].size
;
571 fuse_put_request(fc
, req
);
577 * Called under fc->lock
579 * fc->connected must have been checked previously
581 void fuse_request_send_background_locked(struct fuse_conn
*fc
,
582 struct fuse_req
*req
)
584 BUG_ON(!test_bit(FR_BACKGROUND
, &req
->flags
));
585 if (!test_bit(FR_WAITING
, &req
->flags
)) {
586 __set_bit(FR_WAITING
, &req
->flags
);
587 atomic_inc(&fc
->num_waiting
);
589 __set_bit(FR_ISREPLY
, &req
->flags
);
590 fc
->num_background
++;
591 if (fc
->num_background
== fc
->max_background
)
593 if (fc
->num_background
== fc
->congestion_threshold
&& fc
->sb
) {
594 set_bdi_congested(fc
->sb
->s_bdi
, BLK_RW_SYNC
);
595 set_bdi_congested(fc
->sb
->s_bdi
, BLK_RW_ASYNC
);
597 list_add_tail(&req
->list
, &fc
->bg_queue
);
601 void fuse_request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
604 spin_lock(&fc
->lock
);
606 fuse_request_send_background_locked(fc
, req
);
607 spin_unlock(&fc
->lock
);
609 spin_unlock(&fc
->lock
);
610 req
->out
.h
.error
= -ENOTCONN
;
612 fuse_put_request(fc
, req
);
615 EXPORT_SYMBOL_GPL(fuse_request_send_background
);
617 static int fuse_request_send_notify_reply(struct fuse_conn
*fc
,
618 struct fuse_req
*req
, u64 unique
)
621 struct fuse_iqueue
*fiq
= &fc
->iq
;
623 __clear_bit(FR_ISREPLY
, &req
->flags
);
624 req
->in
.h
.unique
= unique
;
625 spin_lock(&fiq
->waitq
.lock
);
626 if (fiq
->connected
) {
627 queue_request(fiq
, req
);
630 spin_unlock(&fiq
->waitq
.lock
);
635 void fuse_force_forget(struct file
*file
, u64 nodeid
)
637 struct inode
*inode
= file_inode(file
);
638 struct fuse_conn
*fc
= get_fuse_conn(inode
);
639 struct fuse_req
*req
;
640 struct fuse_forget_in inarg
;
642 memset(&inarg
, 0, sizeof(inarg
));
644 req
= fuse_get_req_nofail_nopages(fc
, file
);
645 req
->in
.h
.opcode
= FUSE_FORGET
;
646 req
->in
.h
.nodeid
= nodeid
;
648 req
->in
.args
[0].size
= sizeof(inarg
);
649 req
->in
.args
[0].value
= &inarg
;
650 __clear_bit(FR_ISREPLY
, &req
->flags
);
651 __fuse_request_send(fc
, req
);
653 fuse_put_request(fc
, req
);
657 * Lock the request. Up to the next unlock_request() there mustn't be
658 * anything that could cause a page-fault. If the request was already
661 static int lock_request(struct fuse_req
*req
)
665 spin_lock(&req
->waitq
.lock
);
666 if (test_bit(FR_ABORTED
, &req
->flags
))
669 set_bit(FR_LOCKED
, &req
->flags
);
670 spin_unlock(&req
->waitq
.lock
);
676 * Unlock request. If it was aborted while locked, caller is responsible
677 * for unlocking and ending the request.
679 static int unlock_request(struct fuse_req
*req
)
683 spin_lock(&req
->waitq
.lock
);
684 if (test_bit(FR_ABORTED
, &req
->flags
))
687 clear_bit(FR_LOCKED
, &req
->flags
);
688 spin_unlock(&req
->waitq
.lock
);
693 struct fuse_copy_state
{
695 struct fuse_req
*req
;
696 struct iov_iter
*iter
;
697 struct pipe_buffer
*pipebufs
;
698 struct pipe_buffer
*currbuf
;
699 struct pipe_inode_info
*pipe
;
700 unsigned long nr_segs
;
704 unsigned move_pages
:1;
707 static void fuse_copy_init(struct fuse_copy_state
*cs
, int write
,
708 struct iov_iter
*iter
)
710 memset(cs
, 0, sizeof(*cs
));
715 /* Unmap and put previous page of userspace buffer */
716 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
719 struct pipe_buffer
*buf
= cs
->currbuf
;
722 buf
->len
= PAGE_SIZE
- cs
->len
;
726 flush_dcache_page(cs
->pg
);
727 set_page_dirty_lock(cs
->pg
);
735 * Get another pagefull of userspace buffer, and map it to kernel
736 * address space, and lock request
738 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
743 err
= unlock_request(cs
->req
);
747 fuse_copy_finish(cs
);
749 struct pipe_buffer
*buf
= cs
->pipebufs
;
752 err
= pipe_buf_confirm(cs
->pipe
, buf
);
756 BUG_ON(!cs
->nr_segs
);
759 cs
->offset
= buf
->offset
;
764 if (cs
->nr_segs
== cs
->pipe
->buffers
)
767 page
= alloc_page(GFP_HIGHUSER
);
784 err
= iov_iter_get_pages(cs
->iter
, &page
, PAGE_SIZE
, 1, &off
);
791 iov_iter_advance(cs
->iter
, err
);
794 return lock_request(cs
->req
);
797 /* Do as much copy to/from userspace buffer as we can */
798 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
800 unsigned ncpy
= min(*size
, cs
->len
);
802 void *pgaddr
= kmap_atomic(cs
->pg
);
803 void *buf
= pgaddr
+ cs
->offset
;
806 memcpy(buf
, *val
, ncpy
);
808 memcpy(*val
, buf
, ncpy
);
810 kunmap_atomic(pgaddr
);
819 static int fuse_check_page(struct page
*page
)
821 if (page_mapcount(page
) ||
822 page
->mapping
!= NULL
||
823 page_count(page
) != 1 ||
824 (page
->flags
& PAGE_FLAGS_CHECK_AT_PREP
&
831 printk(KERN_WARNING
"fuse: trying to steal weird page\n");
832 printk(KERN_WARNING
" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page
, page
->index
, page
->flags
, page_count(page
), page_mapcount(page
), page
->mapping
);
838 static int fuse_try_move_page(struct fuse_copy_state
*cs
, struct page
**pagep
)
841 struct page
*oldpage
= *pagep
;
842 struct page
*newpage
;
843 struct pipe_buffer
*buf
= cs
->pipebufs
;
845 err
= unlock_request(cs
->req
);
849 fuse_copy_finish(cs
);
851 err
= pipe_buf_confirm(cs
->pipe
, buf
);
855 BUG_ON(!cs
->nr_segs
);
861 if (cs
->len
!= PAGE_SIZE
)
864 if (pipe_buf_steal(cs
->pipe
, buf
) != 0)
869 if (!PageUptodate(newpage
))
870 SetPageUptodate(newpage
);
872 ClearPageMappedToDisk(newpage
);
874 if (fuse_check_page(newpage
) != 0)
875 goto out_fallback_unlock
;
878 * This is a new and locked page, it shouldn't be mapped or
879 * have any special flags on it
881 if (WARN_ON(page_mapped(oldpage
)))
882 goto out_fallback_unlock
;
883 if (WARN_ON(page_has_private(oldpage
)))
884 goto out_fallback_unlock
;
885 if (WARN_ON(PageDirty(oldpage
) || PageWriteback(oldpage
)))
886 goto out_fallback_unlock
;
887 if (WARN_ON(PageMlocked(oldpage
)))
888 goto out_fallback_unlock
;
890 err
= replace_page_cache_page(oldpage
, newpage
, GFP_KERNEL
);
892 unlock_page(newpage
);
898 if (!(buf
->flags
& PIPE_BUF_FLAG_LRU
))
899 lru_cache_add_file(newpage
);
902 spin_lock(&cs
->req
->waitq
.lock
);
903 if (test_bit(FR_ABORTED
, &cs
->req
->flags
))
907 spin_unlock(&cs
->req
->waitq
.lock
);
910 unlock_page(newpage
);
915 unlock_page(oldpage
);
922 unlock_page(newpage
);
925 cs
->offset
= buf
->offset
;
927 err
= lock_request(cs
->req
);
934 static int fuse_ref_page(struct fuse_copy_state
*cs
, struct page
*page
,
935 unsigned offset
, unsigned count
)
937 struct pipe_buffer
*buf
;
940 if (cs
->nr_segs
== cs
->pipe
->buffers
)
943 err
= unlock_request(cs
->req
);
947 fuse_copy_finish(cs
);
952 buf
->offset
= offset
;
963 * Copy a page in the request to/from the userspace buffer. Must be
966 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
**pagep
,
967 unsigned offset
, unsigned count
, int zeroing
)
970 struct page
*page
= *pagep
;
972 if (page
&& zeroing
&& count
< PAGE_SIZE
)
973 clear_highpage(page
);
976 if (cs
->write
&& cs
->pipebufs
&& page
) {
977 return fuse_ref_page(cs
, page
, offset
, count
);
978 } else if (!cs
->len
) {
979 if (cs
->move_pages
&& page
&&
980 offset
== 0 && count
== PAGE_SIZE
) {
981 err
= fuse_try_move_page(cs
, pagep
);
985 err
= fuse_copy_fill(cs
);
991 void *mapaddr
= kmap_atomic(page
);
992 void *buf
= mapaddr
+ offset
;
993 offset
+= fuse_copy_do(cs
, &buf
, &count
);
994 kunmap_atomic(mapaddr
);
996 offset
+= fuse_copy_do(cs
, NULL
, &count
);
998 if (page
&& !cs
->write
)
999 flush_dcache_page(page
);
1003 /* Copy pages in the request to/from userspace buffer */
1004 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
1008 struct fuse_req
*req
= cs
->req
;
1010 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
1012 unsigned offset
= req
->page_descs
[i
].offset
;
1013 unsigned count
= min(nbytes
, req
->page_descs
[i
].length
);
1015 err
= fuse_copy_page(cs
, &req
->pages
[i
], offset
, count
,
1025 /* Copy a single argument in the request to/from userspace buffer */
1026 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
1030 int err
= fuse_copy_fill(cs
);
1034 fuse_copy_do(cs
, &val
, &size
);
1039 /* Copy request arguments to/from userspace buffer */
1040 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
1041 unsigned argpages
, struct fuse_arg
*args
,
1047 for (i
= 0; !err
&& i
< numargs
; i
++) {
1048 struct fuse_arg
*arg
= &args
[i
];
1049 if (i
== numargs
- 1 && argpages
)
1050 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
1052 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
1057 static int forget_pending(struct fuse_iqueue
*fiq
)
1059 return fiq
->forget_list_head
.next
!= NULL
;
1062 static int request_pending(struct fuse_iqueue
*fiq
)
1064 return !list_empty(&fiq
->pending
) || !list_empty(&fiq
->interrupts
) ||
1065 forget_pending(fiq
);
1069 * Transfer an interrupt request to userspace
1071 * Unlike other requests this is assembled on demand, without a need
1072 * to allocate a separate fuse_req structure.
1074 * Called with fiq->waitq.lock held, releases it
1076 static int fuse_read_interrupt(struct fuse_iqueue
*fiq
,
1077 struct fuse_copy_state
*cs
,
1078 size_t nbytes
, struct fuse_req
*req
)
1079 __releases(fiq
->waitq
.lock
)
1081 struct fuse_in_header ih
;
1082 struct fuse_interrupt_in arg
;
1083 unsigned reqsize
= sizeof(ih
) + sizeof(arg
);
1086 list_del_init(&req
->intr_entry
);
1087 req
->intr_unique
= fuse_get_unique(fiq
);
1088 memset(&ih
, 0, sizeof(ih
));
1089 memset(&arg
, 0, sizeof(arg
));
1091 ih
.opcode
= FUSE_INTERRUPT
;
1092 ih
.unique
= req
->intr_unique
;
1093 arg
.unique
= req
->in
.h
.unique
;
1095 spin_unlock(&fiq
->waitq
.lock
);
1096 if (nbytes
< reqsize
)
1099 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1101 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1102 fuse_copy_finish(cs
);
1104 return err
? err
: reqsize
;
1107 static struct fuse_forget_link
*dequeue_forget(struct fuse_iqueue
*fiq
,
1111 struct fuse_forget_link
*head
= fiq
->forget_list_head
.next
;
1112 struct fuse_forget_link
**newhead
= &head
;
1115 for (count
= 0; *newhead
!= NULL
&& count
< max
; count
++)
1116 newhead
= &(*newhead
)->next
;
1118 fiq
->forget_list_head
.next
= *newhead
;
1120 if (fiq
->forget_list_head
.next
== NULL
)
1121 fiq
->forget_list_tail
= &fiq
->forget_list_head
;
1129 static int fuse_read_single_forget(struct fuse_iqueue
*fiq
,
1130 struct fuse_copy_state
*cs
,
1132 __releases(fiq
->waitq
.lock
)
1135 struct fuse_forget_link
*forget
= dequeue_forget(fiq
, 1, NULL
);
1136 struct fuse_forget_in arg
= {
1137 .nlookup
= forget
->forget_one
.nlookup
,
1139 struct fuse_in_header ih
= {
1140 .opcode
= FUSE_FORGET
,
1141 .nodeid
= forget
->forget_one
.nodeid
,
1142 .unique
= fuse_get_unique(fiq
),
1143 .len
= sizeof(ih
) + sizeof(arg
),
1146 spin_unlock(&fiq
->waitq
.lock
);
1148 if (nbytes
< ih
.len
)
1151 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1153 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1154 fuse_copy_finish(cs
);
1162 static int fuse_read_batch_forget(struct fuse_iqueue
*fiq
,
1163 struct fuse_copy_state
*cs
, size_t nbytes
)
1164 __releases(fiq
->waitq
.lock
)
1167 unsigned max_forgets
;
1169 struct fuse_forget_link
*head
;
1170 struct fuse_batch_forget_in arg
= { .count
= 0 };
1171 struct fuse_in_header ih
= {
1172 .opcode
= FUSE_BATCH_FORGET
,
1173 .unique
= fuse_get_unique(fiq
),
1174 .len
= sizeof(ih
) + sizeof(arg
),
1177 if (nbytes
< ih
.len
) {
1178 spin_unlock(&fiq
->waitq
.lock
);
1182 max_forgets
= (nbytes
- ih
.len
) / sizeof(struct fuse_forget_one
);
1183 head
= dequeue_forget(fiq
, max_forgets
, &count
);
1184 spin_unlock(&fiq
->waitq
.lock
);
1187 ih
.len
+= count
* sizeof(struct fuse_forget_one
);
1188 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1190 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1193 struct fuse_forget_link
*forget
= head
;
1196 err
= fuse_copy_one(cs
, &forget
->forget_one
,
1197 sizeof(forget
->forget_one
));
1199 head
= forget
->next
;
1203 fuse_copy_finish(cs
);
1211 static int fuse_read_forget(struct fuse_conn
*fc
, struct fuse_iqueue
*fiq
,
1212 struct fuse_copy_state
*cs
,
1214 __releases(fiq
->waitq
.lock
)
1216 if (fc
->minor
< 16 || fiq
->forget_list_head
.next
->next
== NULL
)
1217 return fuse_read_single_forget(fiq
, cs
, nbytes
);
1219 return fuse_read_batch_forget(fiq
, cs
, nbytes
);
1223 * Read a single request into the userspace filesystem's buffer. This
1224 * function waits until a request is available, then removes it from
1225 * the pending list and copies request data to userspace buffer. If
1226 * no reply is needed (FORGET) or request has been aborted or there
1227 * was an error during the copying then it's finished by calling
1228 * request_end(). Otherwise add it to the processing list, and set
1231 static ssize_t
fuse_dev_do_read(struct fuse_dev
*fud
, struct file
*file
,
1232 struct fuse_copy_state
*cs
, size_t nbytes
)
1235 struct fuse_conn
*fc
= fud
->fc
;
1236 struct fuse_iqueue
*fiq
= &fc
->iq
;
1237 struct fuse_pqueue
*fpq
= &fud
->pq
;
1238 struct fuse_req
*req
;
1243 spin_lock(&fiq
->waitq
.lock
);
1245 if ((file
->f_flags
& O_NONBLOCK
) && fiq
->connected
&&
1246 !request_pending(fiq
))
1249 err
= wait_event_interruptible_exclusive_locked(fiq
->waitq
,
1250 !fiq
->connected
|| request_pending(fiq
));
1255 if (!fiq
->connected
)
1258 if (!list_empty(&fiq
->interrupts
)) {
1259 req
= list_entry(fiq
->interrupts
.next
, struct fuse_req
,
1261 return fuse_read_interrupt(fiq
, cs
, nbytes
, req
);
1264 if (forget_pending(fiq
)) {
1265 if (list_empty(&fiq
->pending
) || fiq
->forget_batch
-- > 0)
1266 return fuse_read_forget(fc
, fiq
, cs
, nbytes
);
1268 if (fiq
->forget_batch
<= -8)
1269 fiq
->forget_batch
= 16;
1272 req
= list_entry(fiq
->pending
.next
, struct fuse_req
, list
);
1273 clear_bit(FR_PENDING
, &req
->flags
);
1274 list_del_init(&req
->list
);
1275 spin_unlock(&fiq
->waitq
.lock
);
1278 reqsize
= in
->h
.len
;
1280 if (task_active_pid_ns(current
) != fc
->pid_ns
) {
1282 in
->h
.pid
= pid_vnr(find_pid_ns(in
->h
.pid
, fc
->pid_ns
));
1286 /* If request is too large, reply with an error and restart the read */
1287 if (nbytes
< reqsize
) {
1288 req
->out
.h
.error
= -EIO
;
1289 /* SETXATTR is special, since it may contain too large data */
1290 if (in
->h
.opcode
== FUSE_SETXATTR
)
1291 req
->out
.h
.error
= -E2BIG
;
1292 request_end(fc
, req
);
1295 spin_lock(&fpq
->lock
);
1296 list_add(&req
->list
, &fpq
->io
);
1297 spin_unlock(&fpq
->lock
);
1299 err
= fuse_copy_one(cs
, &in
->h
, sizeof(in
->h
));
1301 err
= fuse_copy_args(cs
, in
->numargs
, in
->argpages
,
1302 (struct fuse_arg
*) in
->args
, 0);
1303 fuse_copy_finish(cs
);
1304 spin_lock(&fpq
->lock
);
1305 clear_bit(FR_LOCKED
, &req
->flags
);
1306 if (!fpq
->connected
) {
1311 req
->out
.h
.error
= -EIO
;
1314 if (!test_bit(FR_ISREPLY
, &req
->flags
)) {
1318 list_move_tail(&req
->list
, &fpq
->processing
);
1319 __fuse_get_request(req
);
1320 set_bit(FR_SENT
, &req
->flags
);
1321 spin_unlock(&fpq
->lock
);
1322 /* matches barrier in request_wait_answer() */
1323 smp_mb__after_atomic();
1324 if (test_bit(FR_INTERRUPTED
, &req
->flags
))
1325 queue_interrupt(fiq
, req
);
1326 fuse_put_request(fc
, req
);
1331 if (!test_bit(FR_PRIVATE
, &req
->flags
))
1332 list_del_init(&req
->list
);
1333 spin_unlock(&fpq
->lock
);
1334 request_end(fc
, req
);
1338 spin_unlock(&fiq
->waitq
.lock
);
1342 static int fuse_dev_open(struct inode
*inode
, struct file
*file
)
1345 * The fuse device's file's private_data is used to hold
1346 * the fuse_conn(ection) when it is mounted, and is used to
1347 * keep track of whether the file has been mounted already.
1349 file
->private_data
= NULL
;
1353 static ssize_t
fuse_dev_read(struct kiocb
*iocb
, struct iov_iter
*to
)
1355 struct fuse_copy_state cs
;
1356 struct file
*file
= iocb
->ki_filp
;
1357 struct fuse_dev
*fud
= fuse_get_dev(file
);
1362 if (!iter_is_iovec(to
))
1365 fuse_copy_init(&cs
, 1, to
);
1367 return fuse_dev_do_read(fud
, file
, &cs
, iov_iter_count(to
));
1370 static ssize_t
fuse_dev_splice_read(struct file
*in
, loff_t
*ppos
,
1371 struct pipe_inode_info
*pipe
,
1372 size_t len
, unsigned int flags
)
1376 struct pipe_buffer
*bufs
;
1377 struct fuse_copy_state cs
;
1378 struct fuse_dev
*fud
= fuse_get_dev(in
);
1383 bufs
= kmalloc(pipe
->buffers
* sizeof(struct pipe_buffer
), GFP_KERNEL
);
1387 fuse_copy_init(&cs
, 1, NULL
);
1390 ret
= fuse_dev_do_read(fud
, in
, &cs
, len
);
1394 if (pipe
->nrbufs
+ cs
.nr_segs
> pipe
->buffers
) {
1399 for (ret
= total
= 0; page_nr
< cs
.nr_segs
; total
+= ret
) {
1401 * Need to be careful about this. Having buf->ops in module
1402 * code can Oops if the buffer persists after module unload.
1404 bufs
[page_nr
].ops
= &nosteal_pipe_buf_ops
;
1405 bufs
[page_nr
].flags
= 0;
1406 ret
= add_to_pipe(pipe
, &bufs
[page_nr
++]);
1407 if (unlikely(ret
< 0))
1413 for (; page_nr
< cs
.nr_segs
; page_nr
++)
1414 put_page(bufs
[page_nr
].page
);
1420 static int fuse_notify_poll(struct fuse_conn
*fc
, unsigned int size
,
1421 struct fuse_copy_state
*cs
)
1423 struct fuse_notify_poll_wakeup_out outarg
;
1426 if (size
!= sizeof(outarg
))
1429 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1433 fuse_copy_finish(cs
);
1434 return fuse_notify_poll_wakeup(fc
, &outarg
);
1437 fuse_copy_finish(cs
);
1441 static int fuse_notify_inval_inode(struct fuse_conn
*fc
, unsigned int size
,
1442 struct fuse_copy_state
*cs
)
1444 struct fuse_notify_inval_inode_out outarg
;
1447 if (size
!= sizeof(outarg
))
1450 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1453 fuse_copy_finish(cs
);
1455 down_read(&fc
->killsb
);
1458 err
= fuse_reverse_inval_inode(fc
->sb
, outarg
.ino
,
1459 outarg
.off
, outarg
.len
);
1461 up_read(&fc
->killsb
);
1465 fuse_copy_finish(cs
);
1469 static int fuse_notify_inval_entry(struct fuse_conn
*fc
, unsigned int size
,
1470 struct fuse_copy_state
*cs
)
1472 struct fuse_notify_inval_entry_out outarg
;
1477 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1482 if (size
< sizeof(outarg
))
1485 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1489 err
= -ENAMETOOLONG
;
1490 if (outarg
.namelen
> FUSE_NAME_MAX
)
1494 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1498 name
.len
= outarg
.namelen
;
1499 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1502 fuse_copy_finish(cs
);
1503 buf
[outarg
.namelen
] = 0;
1505 down_read(&fc
->killsb
);
1508 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
, 0, &name
);
1509 up_read(&fc
->killsb
);
1515 fuse_copy_finish(cs
);
1519 static int fuse_notify_delete(struct fuse_conn
*fc
, unsigned int size
,
1520 struct fuse_copy_state
*cs
)
1522 struct fuse_notify_delete_out outarg
;
1527 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1532 if (size
< sizeof(outarg
))
1535 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1539 err
= -ENAMETOOLONG
;
1540 if (outarg
.namelen
> FUSE_NAME_MAX
)
1544 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1548 name
.len
= outarg
.namelen
;
1549 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1552 fuse_copy_finish(cs
);
1553 buf
[outarg
.namelen
] = 0;
1555 down_read(&fc
->killsb
);
1558 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
,
1559 outarg
.child
, &name
);
1560 up_read(&fc
->killsb
);
1566 fuse_copy_finish(cs
);
1570 static int fuse_notify_store(struct fuse_conn
*fc
, unsigned int size
,
1571 struct fuse_copy_state
*cs
)
1573 struct fuse_notify_store_out outarg
;
1574 struct inode
*inode
;
1575 struct address_space
*mapping
;
1579 unsigned int offset
;
1585 if (size
< sizeof(outarg
))
1588 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1593 if (size
- sizeof(outarg
) != outarg
.size
)
1596 nodeid
= outarg
.nodeid
;
1598 down_read(&fc
->killsb
);
1604 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1608 mapping
= inode
->i_mapping
;
1609 index
= outarg
.offset
>> PAGE_SHIFT
;
1610 offset
= outarg
.offset
& ~PAGE_MASK
;
1611 file_size
= i_size_read(inode
);
1612 end
= outarg
.offset
+ outarg
.size
;
1613 if (end
> file_size
) {
1615 fuse_write_update_size(inode
, file_size
);
1621 unsigned int this_num
;
1624 page
= find_or_create_page(mapping
, index
,
1625 mapping_gfp_mask(mapping
));
1629 this_num
= min_t(unsigned, num
, PAGE_SIZE
- offset
);
1630 err
= fuse_copy_page(cs
, &page
, offset
, this_num
, 0);
1631 if (!err
&& offset
== 0 &&
1632 (this_num
== PAGE_SIZE
|| file_size
== end
))
1633 SetPageUptodate(page
);
1650 up_read(&fc
->killsb
);
1652 fuse_copy_finish(cs
);
1656 static void fuse_retrieve_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1658 release_pages(req
->pages
, req
->num_pages
, false);
1661 static int fuse_retrieve(struct fuse_conn
*fc
, struct inode
*inode
,
1662 struct fuse_notify_retrieve_out
*outarg
)
1665 struct address_space
*mapping
= inode
->i_mapping
;
1666 struct fuse_req
*req
;
1670 unsigned int offset
;
1671 size_t total_len
= 0;
1674 offset
= outarg
->offset
& ~PAGE_MASK
;
1675 file_size
= i_size_read(inode
);
1678 if (outarg
->offset
> file_size
)
1680 else if (outarg
->offset
+ num
> file_size
)
1681 num
= file_size
- outarg
->offset
;
1683 num_pages
= (num
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1684 num_pages
= min(num_pages
, FUSE_MAX_PAGES_PER_REQ
);
1686 req
= fuse_get_req(fc
, num_pages
);
1688 return PTR_ERR(req
);
1690 req
->in
.h
.opcode
= FUSE_NOTIFY_REPLY
;
1691 req
->in
.h
.nodeid
= outarg
->nodeid
;
1692 req
->in
.numargs
= 2;
1693 req
->in
.argpages
= 1;
1694 req
->end
= fuse_retrieve_end
;
1696 index
= outarg
->offset
>> PAGE_SHIFT
;
1698 while (num
&& req
->num_pages
< num_pages
) {
1700 unsigned int this_num
;
1702 page
= find_get_page(mapping
, index
);
1706 this_num
= min_t(unsigned, num
, PAGE_SIZE
- offset
);
1707 req
->pages
[req
->num_pages
] = page
;
1708 req
->page_descs
[req
->num_pages
].offset
= offset
;
1709 req
->page_descs
[req
->num_pages
].length
= this_num
;
1714 total_len
+= this_num
;
1717 req
->misc
.retrieve_in
.offset
= outarg
->offset
;
1718 req
->misc
.retrieve_in
.size
= total_len
;
1719 req
->in
.args
[0].size
= sizeof(req
->misc
.retrieve_in
);
1720 req
->in
.args
[0].value
= &req
->misc
.retrieve_in
;
1721 req
->in
.args
[1].size
= total_len
;
1723 err
= fuse_request_send_notify_reply(fc
, req
, outarg
->notify_unique
);
1725 fuse_retrieve_end(fc
, req
);
1726 fuse_put_request(fc
, req
);
1732 static int fuse_notify_retrieve(struct fuse_conn
*fc
, unsigned int size
,
1733 struct fuse_copy_state
*cs
)
1735 struct fuse_notify_retrieve_out outarg
;
1736 struct inode
*inode
;
1740 if (size
!= sizeof(outarg
))
1743 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1747 fuse_copy_finish(cs
);
1749 down_read(&fc
->killsb
);
1752 u64 nodeid
= outarg
.nodeid
;
1754 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1756 err
= fuse_retrieve(fc
, inode
, &outarg
);
1760 up_read(&fc
->killsb
);
1765 fuse_copy_finish(cs
);
1769 static int fuse_notify(struct fuse_conn
*fc
, enum fuse_notify_code code
,
1770 unsigned int size
, struct fuse_copy_state
*cs
)
1772 /* Don't try to move pages (yet) */
1776 case FUSE_NOTIFY_POLL
:
1777 return fuse_notify_poll(fc
, size
, cs
);
1779 case FUSE_NOTIFY_INVAL_INODE
:
1780 return fuse_notify_inval_inode(fc
, size
, cs
);
1782 case FUSE_NOTIFY_INVAL_ENTRY
:
1783 return fuse_notify_inval_entry(fc
, size
, cs
);
1785 case FUSE_NOTIFY_STORE
:
1786 return fuse_notify_store(fc
, size
, cs
);
1788 case FUSE_NOTIFY_RETRIEVE
:
1789 return fuse_notify_retrieve(fc
, size
, cs
);
1791 case FUSE_NOTIFY_DELETE
:
1792 return fuse_notify_delete(fc
, size
, cs
);
1795 fuse_copy_finish(cs
);
1800 /* Look up request on processing list by unique ID */
1801 static struct fuse_req
*request_find(struct fuse_pqueue
*fpq
, u64 unique
)
1803 struct fuse_req
*req
;
1805 list_for_each_entry(req
, &fpq
->processing
, list
) {
1806 if (req
->in
.h
.unique
== unique
|| req
->intr_unique
== unique
)
1812 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
1815 unsigned reqsize
= sizeof(struct fuse_out_header
);
1818 return nbytes
!= reqsize
? -EINVAL
: 0;
1820 reqsize
+= len_args(out
->numargs
, out
->args
);
1822 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
1824 else if (reqsize
> nbytes
) {
1825 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
1826 unsigned diffsize
= reqsize
- nbytes
;
1827 if (diffsize
> lastarg
->size
)
1829 lastarg
->size
-= diffsize
;
1831 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
1836 * Write a single reply to a request. First the header is copied from
1837 * the write buffer. The request is then searched on the processing
1838 * list by the unique ID found in the header. If found, then remove
1839 * it from the list and copy the rest of the buffer to the request.
1840 * The request is finished by calling request_end()
1842 static ssize_t
fuse_dev_do_write(struct fuse_dev
*fud
,
1843 struct fuse_copy_state
*cs
, size_t nbytes
)
1846 struct fuse_conn
*fc
= fud
->fc
;
1847 struct fuse_pqueue
*fpq
= &fud
->pq
;
1848 struct fuse_req
*req
;
1849 struct fuse_out_header oh
;
1851 if (nbytes
< sizeof(struct fuse_out_header
))
1854 err
= fuse_copy_one(cs
, &oh
, sizeof(oh
));
1859 if (oh
.len
!= nbytes
)
1863 * Zero oh.unique indicates unsolicited notification message
1864 * and error contains notification code.
1867 err
= fuse_notify(fc
, oh
.error
, nbytes
- sizeof(oh
), cs
);
1868 return err
? err
: nbytes
;
1872 if (oh
.error
<= -1000 || oh
.error
> 0)
1875 spin_lock(&fpq
->lock
);
1877 if (!fpq
->connected
)
1880 req
= request_find(fpq
, oh
.unique
);
1884 /* Is it an interrupt reply? */
1885 if (req
->intr_unique
== oh
.unique
) {
1886 __fuse_get_request(req
);
1887 spin_unlock(&fpq
->lock
);
1890 if (nbytes
!= sizeof(struct fuse_out_header
)) {
1891 fuse_put_request(fc
, req
);
1895 if (oh
.error
== -ENOSYS
)
1896 fc
->no_interrupt
= 1;
1897 else if (oh
.error
== -EAGAIN
)
1898 queue_interrupt(&fc
->iq
, req
);
1899 fuse_put_request(fc
, req
);
1901 fuse_copy_finish(cs
);
1905 clear_bit(FR_SENT
, &req
->flags
);
1906 list_move(&req
->list
, &fpq
->io
);
1908 set_bit(FR_LOCKED
, &req
->flags
);
1909 spin_unlock(&fpq
->lock
);
1911 if (!req
->out
.page_replace
)
1914 err
= copy_out_args(cs
, &req
->out
, nbytes
);
1915 fuse_copy_finish(cs
);
1917 spin_lock(&fpq
->lock
);
1918 clear_bit(FR_LOCKED
, &req
->flags
);
1919 if (!fpq
->connected
)
1922 req
->out
.h
.error
= -EIO
;
1923 if (!test_bit(FR_PRIVATE
, &req
->flags
))
1924 list_del_init(&req
->list
);
1925 spin_unlock(&fpq
->lock
);
1927 request_end(fc
, req
);
1929 return err
? err
: nbytes
;
1932 spin_unlock(&fpq
->lock
);
1934 fuse_copy_finish(cs
);
1938 static ssize_t
fuse_dev_write(struct kiocb
*iocb
, struct iov_iter
*from
)
1940 struct fuse_copy_state cs
;
1941 struct fuse_dev
*fud
= fuse_get_dev(iocb
->ki_filp
);
1946 if (!iter_is_iovec(from
))
1949 fuse_copy_init(&cs
, 0, from
);
1951 return fuse_dev_do_write(fud
, &cs
, iov_iter_count(from
));
1954 static ssize_t
fuse_dev_splice_write(struct pipe_inode_info
*pipe
,
1955 struct file
*out
, loff_t
*ppos
,
1956 size_t len
, unsigned int flags
)
1960 struct pipe_buffer
*bufs
;
1961 struct fuse_copy_state cs
;
1962 struct fuse_dev
*fud
;
1966 fud
= fuse_get_dev(out
);
1972 bufs
= kmalloc(pipe
->buffers
* sizeof(struct pipe_buffer
), GFP_KERNEL
);
1980 for (idx
= 0; idx
< pipe
->nrbufs
&& rem
< len
; idx
++)
1981 rem
+= pipe
->bufs
[(pipe
->curbuf
+ idx
) & (pipe
->buffers
- 1)].len
;
1991 struct pipe_buffer
*ibuf
;
1992 struct pipe_buffer
*obuf
;
1994 BUG_ON(nbuf
>= pipe
->buffers
);
1995 BUG_ON(!pipe
->nrbufs
);
1996 ibuf
= &pipe
->bufs
[pipe
->curbuf
];
1999 if (rem
>= ibuf
->len
) {
2002 pipe
->curbuf
= (pipe
->curbuf
+ 1) & (pipe
->buffers
- 1);
2005 pipe_buf_get(pipe
, ibuf
);
2007 obuf
->flags
&= ~PIPE_BUF_FLAG_GIFT
;
2009 ibuf
->offset
+= obuf
->len
;
2010 ibuf
->len
-= obuf
->len
;
2017 fuse_copy_init(&cs
, 0, NULL
);
2022 if (flags
& SPLICE_F_MOVE
)
2025 ret
= fuse_dev_do_write(fud
, &cs
, len
);
2028 for (idx
= 0; idx
< nbuf
; idx
++)
2029 pipe_buf_release(pipe
, &bufs
[idx
]);
2037 static unsigned fuse_dev_poll(struct file
*file
, poll_table
*wait
)
2039 unsigned mask
= POLLOUT
| POLLWRNORM
;
2040 struct fuse_iqueue
*fiq
;
2041 struct fuse_dev
*fud
= fuse_get_dev(file
);
2047 poll_wait(file
, &fiq
->waitq
, wait
);
2049 spin_lock(&fiq
->waitq
.lock
);
2050 if (!fiq
->connected
)
2052 else if (request_pending(fiq
))
2053 mask
|= POLLIN
| POLLRDNORM
;
2054 spin_unlock(&fiq
->waitq
.lock
);
2060 * Abort all requests on the given list (pending or processing)
2062 * This function releases and reacquires fc->lock
2064 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
2066 while (!list_empty(head
)) {
2067 struct fuse_req
*req
;
2068 req
= list_entry(head
->next
, struct fuse_req
, list
);
2069 req
->out
.h
.error
= -ECONNABORTED
;
2070 clear_bit(FR_SENT
, &req
->flags
);
2071 list_del_init(&req
->list
);
2072 request_end(fc
, req
);
2076 static void end_polls(struct fuse_conn
*fc
)
2080 p
= rb_first(&fc
->polled_files
);
2083 struct fuse_file
*ff
;
2084 ff
= rb_entry(p
, struct fuse_file
, polled_node
);
2085 wake_up_interruptible_all(&ff
->poll_wait
);
2092 * Abort all requests.
2094 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2097 * The same effect is usually achievable through killing the filesystem daemon
2098 * and all users of the filesystem. The exception is the combination of an
2099 * asynchronous request and the tricky deadlock (see
2100 * Documentation/filesystems/fuse.txt).
2102 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2103 * requests, they should be finished off immediately. Locked requests will be
2104 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2105 * requests. It is possible that some request will finish before we can. This
2106 * is OK, the request will in that case be removed from the list before we touch
2109 void fuse_abort_conn(struct fuse_conn
*fc
)
2111 struct fuse_iqueue
*fiq
= &fc
->iq
;
2113 spin_lock(&fc
->lock
);
2114 if (fc
->connected
) {
2115 struct fuse_dev
*fud
;
2116 struct fuse_req
*req
, *next
;
2122 fuse_set_initialized(fc
);
2123 list_for_each_entry(fud
, &fc
->devices
, entry
) {
2124 struct fuse_pqueue
*fpq
= &fud
->pq
;
2126 spin_lock(&fpq
->lock
);
2128 list_for_each_entry_safe(req
, next
, &fpq
->io
, list
) {
2129 req
->out
.h
.error
= -ECONNABORTED
;
2130 spin_lock(&req
->waitq
.lock
);
2131 set_bit(FR_ABORTED
, &req
->flags
);
2132 if (!test_bit(FR_LOCKED
, &req
->flags
)) {
2133 set_bit(FR_PRIVATE
, &req
->flags
);
2134 __fuse_get_request(req
);
2135 list_move(&req
->list
, &to_end1
);
2137 spin_unlock(&req
->waitq
.lock
);
2139 list_splice_init(&fpq
->processing
, &to_end2
);
2140 spin_unlock(&fpq
->lock
);
2142 fc
->max_background
= UINT_MAX
;
2145 spin_lock(&fiq
->waitq
.lock
);
2147 list_splice_init(&fiq
->pending
, &to_end2
);
2148 list_for_each_entry(req
, &to_end2
, list
)
2149 clear_bit(FR_PENDING
, &req
->flags
);
2150 while (forget_pending(fiq
))
2151 kfree(dequeue_forget(fiq
, 1, NULL
));
2152 wake_up_all_locked(&fiq
->waitq
);
2153 spin_unlock(&fiq
->waitq
.lock
);
2154 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
2156 wake_up_all(&fc
->blocked_waitq
);
2157 spin_unlock(&fc
->lock
);
2159 while (!list_empty(&to_end1
)) {
2160 req
= list_first_entry(&to_end1
, struct fuse_req
, list
);
2161 list_del_init(&req
->list
);
2162 request_end(fc
, req
);
2164 end_requests(fc
, &to_end2
);
2166 spin_unlock(&fc
->lock
);
2169 EXPORT_SYMBOL_GPL(fuse_abort_conn
);
2171 void fuse_wait_aborted(struct fuse_conn
*fc
)
2173 wait_event(fc
->blocked_waitq
, atomic_read(&fc
->num_waiting
) == 0);
2176 int fuse_dev_release(struct inode
*inode
, struct file
*file
)
2178 struct fuse_dev
*fud
= fuse_get_dev(file
);
2181 struct fuse_conn
*fc
= fud
->fc
;
2182 struct fuse_pqueue
*fpq
= &fud
->pq
;
2185 spin_lock(&fpq
->lock
);
2186 WARN_ON(!list_empty(&fpq
->io
));
2187 list_splice_init(&fpq
->processing
, &to_end
);
2188 spin_unlock(&fpq
->lock
);
2190 end_requests(fc
, &to_end
);
2192 /* Are we the last open device? */
2193 if (atomic_dec_and_test(&fc
->dev_count
)) {
2194 WARN_ON(fc
->iq
.fasync
!= NULL
);
2195 fuse_abort_conn(fc
);
2201 EXPORT_SYMBOL_GPL(fuse_dev_release
);
2203 static int fuse_dev_fasync(int fd
, struct file
*file
, int on
)
2205 struct fuse_dev
*fud
= fuse_get_dev(file
);
2210 /* No locking - fasync_helper does its own locking */
2211 return fasync_helper(fd
, file
, on
, &fud
->fc
->iq
.fasync
);
2214 static int fuse_device_clone(struct fuse_conn
*fc
, struct file
*new)
2216 struct fuse_dev
*fud
;
2218 if (new->private_data
)
2221 fud
= fuse_dev_alloc(fc
);
2225 new->private_data
= fud
;
2226 atomic_inc(&fc
->dev_count
);
2231 static long fuse_dev_ioctl(struct file
*file
, unsigned int cmd
,
2236 if (cmd
== FUSE_DEV_IOC_CLONE
) {
2240 if (!get_user(oldfd
, (__u32 __user
*) arg
)) {
2241 struct file
*old
= fget(oldfd
);
2245 struct fuse_dev
*fud
= NULL
;
2248 * Check against file->f_op because CUSE
2249 * uses the same ioctl handler.
2251 if (old
->f_op
== file
->f_op
&&
2252 old
->f_cred
->user_ns
== file
->f_cred
->user_ns
)
2253 fud
= fuse_get_dev(old
);
2256 mutex_lock(&fuse_mutex
);
2257 err
= fuse_device_clone(fud
->fc
, file
);
2258 mutex_unlock(&fuse_mutex
);
2267 const struct file_operations fuse_dev_operations
= {
2268 .owner
= THIS_MODULE
,
2269 .open
= fuse_dev_open
,
2270 .llseek
= no_llseek
,
2271 .read_iter
= fuse_dev_read
,
2272 .splice_read
= fuse_dev_splice_read
,
2273 .write_iter
= fuse_dev_write
,
2274 .splice_write
= fuse_dev_splice_write
,
2275 .poll
= fuse_dev_poll
,
2276 .release
= fuse_dev_release
,
2277 .fasync
= fuse_dev_fasync
,
2278 .unlocked_ioctl
= fuse_dev_ioctl
,
2279 .compat_ioctl
= fuse_dev_ioctl
,
2281 EXPORT_SYMBOL_GPL(fuse_dev_operations
);
2283 static struct miscdevice fuse_miscdevice
= {
2284 .minor
= FUSE_MINOR
,
2286 .fops
= &fuse_dev_operations
,
2289 int __init
fuse_dev_init(void)
2292 fuse_req_cachep
= kmem_cache_create("fuse_request",
2293 sizeof(struct fuse_req
),
2295 if (!fuse_req_cachep
)
2298 err
= misc_register(&fuse_miscdevice
);
2300 goto out_cache_clean
;
2305 kmem_cache_destroy(fuse_req_cachep
);
2310 void fuse_dev_cleanup(void)
2312 misc_deregister(&fuse_miscdevice
);
2313 kmem_cache_destroy(fuse_req_cachep
);