2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/sched/signal.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/falloc.h>
19 #include <linux/uio.h>
21 #include <linux/filelock.h>
22 #include <linux/splice.h>
24 static int fuse_send_open(struct fuse_mount
*fm
, u64 nodeid
,
25 unsigned int open_flags
, int opcode
,
26 struct fuse_open_out
*outargp
)
28 struct fuse_open_in inarg
;
31 memset(&inarg
, 0, sizeof(inarg
));
32 inarg
.flags
= open_flags
& ~(O_CREAT
| O_EXCL
| O_NOCTTY
);
33 if (!fm
->fc
->atomic_o_trunc
)
34 inarg
.flags
&= ~O_TRUNC
;
36 if (fm
->fc
->handle_killpriv_v2
&&
37 (inarg
.flags
& O_TRUNC
) && !capable(CAP_FSETID
)) {
38 inarg
.open_flags
|= FUSE_OPEN_KILL_SUIDGID
;
44 args
.in_args
[0].size
= sizeof(inarg
);
45 args
.in_args
[0].value
= &inarg
;
47 args
.out_args
[0].size
= sizeof(*outargp
);
48 args
.out_args
[0].value
= outargp
;
50 return fuse_simple_request(fm
, &args
);
53 struct fuse_file
*fuse_file_alloc(struct fuse_mount
*fm
, bool release
)
57 ff
= kzalloc(sizeof(struct fuse_file
), GFP_KERNEL_ACCOUNT
);
63 ff
->args
= kzalloc(sizeof(*ff
->args
), GFP_KERNEL_ACCOUNT
);
70 INIT_LIST_HEAD(&ff
->write_entry
);
71 mutex_init(&ff
->readdir
.lock
);
72 refcount_set(&ff
->count
, 1);
73 RB_CLEAR_NODE(&ff
->polled_node
);
74 init_waitqueue_head(&ff
->poll_wait
);
76 ff
->kh
= atomic64_inc_return(&fm
->fc
->khctr
);
81 void fuse_file_free(struct fuse_file
*ff
)
84 mutex_destroy(&ff
->readdir
.lock
);
88 static struct fuse_file
*fuse_file_get(struct fuse_file
*ff
)
90 refcount_inc(&ff
->count
);
94 static void fuse_release_end(struct fuse_mount
*fm
, struct fuse_args
*args
,
97 struct fuse_release_args
*ra
= container_of(args
, typeof(*ra
), args
);
103 static void fuse_file_put(struct fuse_file
*ff
, bool sync
)
105 if (refcount_dec_and_test(&ff
->count
)) {
106 struct fuse_release_args
*ra
= &ff
->args
->release_args
;
107 struct fuse_args
*args
= (ra
? &ra
->args
: NULL
);
110 fuse_file_io_release(ff
, ra
->inode
);
113 /* Do nothing when server does not implement 'open' */
115 fuse_simple_request(ff
->fm
, args
);
116 fuse_release_end(ff
->fm
, args
, 0);
118 args
->end
= fuse_release_end
;
119 if (fuse_simple_background(ff
->fm
, args
,
120 GFP_KERNEL
| __GFP_NOFAIL
))
121 fuse_release_end(ff
->fm
, args
, -ENOTCONN
);
127 struct fuse_file
*fuse_file_open(struct fuse_mount
*fm
, u64 nodeid
,
128 unsigned int open_flags
, bool isdir
)
130 struct fuse_conn
*fc
= fm
->fc
;
131 struct fuse_file
*ff
;
132 int opcode
= isdir
? FUSE_OPENDIR
: FUSE_OPEN
;
133 bool open
= isdir
? !fc
->no_opendir
: !fc
->no_open
;
135 ff
= fuse_file_alloc(fm
, open
);
137 return ERR_PTR(-ENOMEM
);
140 /* Default for no-open */
141 ff
->open_flags
= FOPEN_KEEP_CACHE
| (isdir
? FOPEN_CACHE_DIR
: 0);
143 /* Store outarg for fuse_finish_open() */
144 struct fuse_open_out
*outargp
= &ff
->args
->open_outarg
;
147 err
= fuse_send_open(fm
, nodeid
, open_flags
, opcode
, outargp
);
149 ff
->fh
= outargp
->fh
;
150 ff
->open_flags
= outargp
->open_flags
;
151 } else if (err
!= -ENOSYS
) {
155 /* No release needed */
166 ff
->open_flags
&= ~FOPEN_DIRECT_IO
;
173 int fuse_do_open(struct fuse_mount
*fm
, u64 nodeid
, struct file
*file
,
176 struct fuse_file
*ff
= fuse_file_open(fm
, nodeid
, file
->f_flags
, isdir
);
179 file
->private_data
= ff
;
181 return PTR_ERR_OR_ZERO(ff
);
183 EXPORT_SYMBOL_GPL(fuse_do_open
);
185 static void fuse_link_write_file(struct file
*file
)
187 struct inode
*inode
= file_inode(file
);
188 struct fuse_inode
*fi
= get_fuse_inode(inode
);
189 struct fuse_file
*ff
= file
->private_data
;
191 * file may be written through mmap, so chain it onto the
192 * inodes's write_file list
194 spin_lock(&fi
->lock
);
195 if (list_empty(&ff
->write_entry
))
196 list_add(&ff
->write_entry
, &fi
->write_files
);
197 spin_unlock(&fi
->lock
);
200 int fuse_finish_open(struct inode
*inode
, struct file
*file
)
202 struct fuse_file
*ff
= file
->private_data
;
203 struct fuse_conn
*fc
= get_fuse_conn(inode
);
206 err
= fuse_file_io_open(file
, inode
);
210 if (ff
->open_flags
& FOPEN_STREAM
)
211 stream_open(inode
, file
);
212 else if (ff
->open_flags
& FOPEN_NONSEEKABLE
)
213 nonseekable_open(inode
, file
);
215 if ((file
->f_mode
& FMODE_WRITE
) && fc
->writeback_cache
)
216 fuse_link_write_file(file
);
221 static void fuse_truncate_update_attr(struct inode
*inode
, struct file
*file
)
223 struct fuse_conn
*fc
= get_fuse_conn(inode
);
224 struct fuse_inode
*fi
= get_fuse_inode(inode
);
226 spin_lock(&fi
->lock
);
227 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
228 i_size_write(inode
, 0);
229 spin_unlock(&fi
->lock
);
230 file_update_time(file
);
231 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODSIZE
);
234 static int fuse_open(struct inode
*inode
, struct file
*file
)
236 struct fuse_mount
*fm
= get_fuse_mount(inode
);
237 struct fuse_inode
*fi
= get_fuse_inode(inode
);
238 struct fuse_conn
*fc
= fm
->fc
;
239 struct fuse_file
*ff
;
241 bool is_truncate
= (file
->f_flags
& O_TRUNC
) && fc
->atomic_o_trunc
;
242 bool is_wb_truncate
= is_truncate
&& fc
->writeback_cache
;
243 bool dax_truncate
= is_truncate
&& FUSE_IS_DAX(inode
);
245 if (fuse_is_bad(inode
))
248 err
= generic_file_open(inode
, file
);
252 if (is_wb_truncate
|| dax_truncate
)
256 filemap_invalidate_lock(inode
->i_mapping
);
257 err
= fuse_dax_break_layouts(inode
, 0, 0);
259 goto out_inode_unlock
;
262 if (is_wb_truncate
|| dax_truncate
)
263 fuse_set_nowrite(inode
);
265 err
= fuse_do_open(fm
, get_node_id(inode
), file
, false);
267 ff
= file
->private_data
;
268 err
= fuse_finish_open(inode
, file
);
270 fuse_sync_release(fi
, ff
, file
->f_flags
);
271 else if (is_truncate
)
272 fuse_truncate_update_attr(inode
, file
);
275 if (is_wb_truncate
|| dax_truncate
)
276 fuse_release_nowrite(inode
);
279 truncate_pagecache(inode
, 0);
280 else if (!(ff
->open_flags
& FOPEN_KEEP_CACHE
))
281 invalidate_inode_pages2(inode
->i_mapping
);
284 filemap_invalidate_unlock(inode
->i_mapping
);
286 if (is_wb_truncate
|| dax_truncate
)
292 static void fuse_prepare_release(struct fuse_inode
*fi
, struct fuse_file
*ff
,
293 unsigned int flags
, int opcode
, bool sync
)
295 struct fuse_conn
*fc
= ff
->fm
->fc
;
296 struct fuse_release_args
*ra
= &ff
->args
->release_args
;
298 if (fuse_file_passthrough(ff
))
299 fuse_passthrough_release(ff
, fuse_inode_backing(fi
));
301 /* Inode is NULL on error path of fuse_create_open() */
303 spin_lock(&fi
->lock
);
304 list_del(&ff
->write_entry
);
305 spin_unlock(&fi
->lock
);
307 spin_lock(&fc
->lock
);
308 if (!RB_EMPTY_NODE(&ff
->polled_node
))
309 rb_erase(&ff
->polled_node
, &fc
->polled_files
);
310 spin_unlock(&fc
->lock
);
312 wake_up_interruptible_all(&ff
->poll_wait
);
317 /* ff->args was used for open outarg */
318 memset(ff
->args
, 0, sizeof(*ff
->args
));
319 ra
->inarg
.fh
= ff
->fh
;
320 ra
->inarg
.flags
= flags
;
321 ra
->args
.in_numargs
= 1;
322 ra
->args
.in_args
[0].size
= sizeof(struct fuse_release_in
);
323 ra
->args
.in_args
[0].value
= &ra
->inarg
;
324 ra
->args
.opcode
= opcode
;
325 ra
->args
.nodeid
= ff
->nodeid
;
326 ra
->args
.force
= true;
327 ra
->args
.nocreds
= true;
330 * Hold inode until release is finished.
331 * From fuse_sync_release() the refcount is 1 and everything's
332 * synchronous, so we are fine with not doing igrab() here.
334 ra
->inode
= sync
? NULL
: igrab(&fi
->inode
);
337 void fuse_file_release(struct inode
*inode
, struct fuse_file
*ff
,
338 unsigned int open_flags
, fl_owner_t id
, bool isdir
)
340 struct fuse_inode
*fi
= get_fuse_inode(inode
);
341 struct fuse_release_args
*ra
= &ff
->args
->release_args
;
342 int opcode
= isdir
? FUSE_RELEASEDIR
: FUSE_RELEASE
;
344 fuse_prepare_release(fi
, ff
, open_flags
, opcode
, false);
346 if (ra
&& ff
->flock
) {
347 ra
->inarg
.release_flags
|= FUSE_RELEASE_FLOCK_UNLOCK
;
348 ra
->inarg
.lock_owner
= fuse_lock_owner_id(ff
->fm
->fc
, id
);
352 * Normally this will send the RELEASE request, however if
353 * some asynchronous READ or WRITE requests are outstanding,
354 * the sending will be delayed.
356 * Make the release synchronous if this is a fuseblk mount,
357 * synchronous RELEASE is allowed (and desirable) in this case
358 * because the server can be trusted not to screw up.
360 fuse_file_put(ff
, ff
->fm
->fc
->destroy
);
363 void fuse_release_common(struct file
*file
, bool isdir
)
365 fuse_file_release(file_inode(file
), file
->private_data
, file
->f_flags
,
366 (fl_owner_t
) file
, isdir
);
369 static int fuse_release(struct inode
*inode
, struct file
*file
)
371 struct fuse_conn
*fc
= get_fuse_conn(inode
);
374 * Dirty pages might remain despite write_inode_now() call from
375 * fuse_flush() due to writes racing with the close.
377 if (fc
->writeback_cache
)
378 write_inode_now(inode
, 1);
380 fuse_release_common(file
, false);
382 /* return value is ignored by VFS */
386 void fuse_sync_release(struct fuse_inode
*fi
, struct fuse_file
*ff
,
389 WARN_ON(refcount_read(&ff
->count
) > 1);
390 fuse_prepare_release(fi
, ff
, flags
, FUSE_RELEASE
, true);
391 fuse_file_put(ff
, true);
393 EXPORT_SYMBOL_GPL(fuse_sync_release
);
396 * Scramble the ID space with XTEA, so that the value of the files_struct
397 * pointer is not exposed to userspace.
399 u64
fuse_lock_owner_id(struct fuse_conn
*fc
, fl_owner_t id
)
401 u32
*k
= fc
->scramble_key
;
402 u64 v
= (unsigned long) id
;
408 for (i
= 0; i
< 32; i
++) {
409 v0
+= ((v1
<< 4 ^ v1
>> 5) + v1
) ^ (sum
+ k
[sum
& 3]);
411 v1
+= ((v0
<< 4 ^ v0
>> 5) + v0
) ^ (sum
+ k
[sum
>>11 & 3]);
414 return (u64
) v0
+ ((u64
) v1
<< 32);
417 struct fuse_writepage_args
{
418 struct fuse_io_args ia
;
419 struct rb_node writepages_entry
;
420 struct list_head queue_entry
;
421 struct fuse_writepage_args
*next
;
423 struct fuse_sync_bucket
*bucket
;
426 static struct fuse_writepage_args
*fuse_find_writeback(struct fuse_inode
*fi
,
427 pgoff_t idx_from
, pgoff_t idx_to
)
431 n
= fi
->writepages
.rb_node
;
434 struct fuse_writepage_args
*wpa
;
437 wpa
= rb_entry(n
, struct fuse_writepage_args
, writepages_entry
);
438 WARN_ON(get_fuse_inode(wpa
->inode
) != fi
);
439 curr_index
= wpa
->ia
.write
.in
.offset
>> PAGE_SHIFT
;
440 if (idx_from
>= curr_index
+ wpa
->ia
.ap
.num_pages
)
442 else if (idx_to
< curr_index
)
451 * Check if any page in a range is under writeback
453 * This is currently done by walking the list of writepage requests
454 * for the inode, which can be pretty inefficient.
456 static bool fuse_range_is_writeback(struct inode
*inode
, pgoff_t idx_from
,
459 struct fuse_inode
*fi
= get_fuse_inode(inode
);
462 spin_lock(&fi
->lock
);
463 found
= fuse_find_writeback(fi
, idx_from
, idx_to
);
464 spin_unlock(&fi
->lock
);
469 static inline bool fuse_page_is_writeback(struct inode
*inode
, pgoff_t index
)
471 return fuse_range_is_writeback(inode
, index
, index
);
475 * Wait for page writeback to be completed.
477 * Since fuse doesn't rely on the VM writeback tracking, this has to
478 * use some other means.
480 static void fuse_wait_on_page_writeback(struct inode
*inode
, pgoff_t index
)
482 struct fuse_inode
*fi
= get_fuse_inode(inode
);
484 wait_event(fi
->page_waitq
, !fuse_page_is_writeback(inode
, index
));
488 * Wait for all pending writepages on the inode to finish.
490 * This is currently done by blocking further writes with FUSE_NOWRITE
491 * and waiting for all sent writes to complete.
493 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
494 * could conflict with truncation.
496 static void fuse_sync_writes(struct inode
*inode
)
498 fuse_set_nowrite(inode
);
499 fuse_release_nowrite(inode
);
502 static int fuse_flush(struct file
*file
, fl_owner_t id
)
504 struct inode
*inode
= file_inode(file
);
505 struct fuse_mount
*fm
= get_fuse_mount(inode
);
506 struct fuse_file
*ff
= file
->private_data
;
507 struct fuse_flush_in inarg
;
511 if (fuse_is_bad(inode
))
514 if (ff
->open_flags
& FOPEN_NOFLUSH
&& !fm
->fc
->writeback_cache
)
517 err
= write_inode_now(inode
, 1);
522 fuse_sync_writes(inode
);
525 err
= filemap_check_errors(file
->f_mapping
);
530 if (fm
->fc
->no_flush
)
533 memset(&inarg
, 0, sizeof(inarg
));
535 inarg
.lock_owner
= fuse_lock_owner_id(fm
->fc
, id
);
536 args
.opcode
= FUSE_FLUSH
;
537 args
.nodeid
= get_node_id(inode
);
539 args
.in_args
[0].size
= sizeof(inarg
);
540 args
.in_args
[0].value
= &inarg
;
543 err
= fuse_simple_request(fm
, &args
);
544 if (err
== -ENOSYS
) {
545 fm
->fc
->no_flush
= 1;
551 * In memory i_blocks is not maintained by fuse, if writeback cache is
552 * enabled, i_blocks from cached attr may not be accurate.
554 if (!err
&& fm
->fc
->writeback_cache
)
555 fuse_invalidate_attr_mask(inode
, STATX_BLOCKS
);
559 int fuse_fsync_common(struct file
*file
, loff_t start
, loff_t end
,
560 int datasync
, int opcode
)
562 struct inode
*inode
= file
->f_mapping
->host
;
563 struct fuse_mount
*fm
= get_fuse_mount(inode
);
564 struct fuse_file
*ff
= file
->private_data
;
566 struct fuse_fsync_in inarg
;
568 memset(&inarg
, 0, sizeof(inarg
));
570 inarg
.fsync_flags
= datasync
? FUSE_FSYNC_FDATASYNC
: 0;
571 args
.opcode
= opcode
;
572 args
.nodeid
= get_node_id(inode
);
574 args
.in_args
[0].size
= sizeof(inarg
);
575 args
.in_args
[0].value
= &inarg
;
576 return fuse_simple_request(fm
, &args
);
579 static int fuse_fsync(struct file
*file
, loff_t start
, loff_t end
,
582 struct inode
*inode
= file
->f_mapping
->host
;
583 struct fuse_conn
*fc
= get_fuse_conn(inode
);
586 if (fuse_is_bad(inode
))
592 * Start writeback against all dirty pages of the inode, then
593 * wait for all outstanding writes, before sending the FSYNC
596 err
= file_write_and_wait_range(file
, start
, end
);
600 fuse_sync_writes(inode
);
603 * Due to implementation of fuse writeback
604 * file_write_and_wait_range() does not catch errors.
605 * We have to do this directly after fuse_sync_writes()
607 err
= file_check_and_advance_wb_err(file
);
611 err
= sync_inode_metadata(inode
, 1);
618 err
= fuse_fsync_common(file
, start
, end
, datasync
, FUSE_FSYNC
);
619 if (err
== -ENOSYS
) {
629 void fuse_read_args_fill(struct fuse_io_args
*ia
, struct file
*file
, loff_t pos
,
630 size_t count
, int opcode
)
632 struct fuse_file
*ff
= file
->private_data
;
633 struct fuse_args
*args
= &ia
->ap
.args
;
635 ia
->read
.in
.fh
= ff
->fh
;
636 ia
->read
.in
.offset
= pos
;
637 ia
->read
.in
.size
= count
;
638 ia
->read
.in
.flags
= file
->f_flags
;
639 args
->opcode
= opcode
;
640 args
->nodeid
= ff
->nodeid
;
641 args
->in_numargs
= 1;
642 args
->in_args
[0].size
= sizeof(ia
->read
.in
);
643 args
->in_args
[0].value
= &ia
->read
.in
;
644 args
->out_argvar
= true;
645 args
->out_numargs
= 1;
646 args
->out_args
[0].size
= count
;
649 static void fuse_release_user_pages(struct fuse_args_pages
*ap
,
654 for (i
= 0; i
< ap
->num_pages
; i
++) {
656 set_page_dirty_lock(ap
->pages
[i
]);
657 put_page(ap
->pages
[i
]);
661 static void fuse_io_release(struct kref
*kref
)
663 kfree(container_of(kref
, struct fuse_io_priv
, refcnt
));
666 static ssize_t
fuse_get_res_by_io(struct fuse_io_priv
*io
)
671 if (io
->bytes
>= 0 && io
->write
)
674 return io
->bytes
< 0 ? io
->size
: io
->bytes
;
678 * In case of short read, the caller sets 'pos' to the position of
679 * actual end of fuse request in IO request. Otherwise, if bytes_requested
680 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
683 * User requested DIO read of 64K. It was split into two 32K fuse requests,
684 * both submitted asynchronously. The first of them was ACKed by userspace as
685 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
686 * second request was ACKed as short, e.g. only 1K was read, resulting in
689 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
690 * will be equal to the length of the longest contiguous fragment of
691 * transferred data starting from the beginning of IO request.
693 static void fuse_aio_complete(struct fuse_io_priv
*io
, int err
, ssize_t pos
)
697 spin_lock(&io
->lock
);
699 io
->err
= io
->err
? : err
;
700 else if (pos
>= 0 && (io
->bytes
< 0 || pos
< io
->bytes
))
704 if (!left
&& io
->blocking
)
706 spin_unlock(&io
->lock
);
708 if (!left
&& !io
->blocking
) {
709 ssize_t res
= fuse_get_res_by_io(io
);
712 struct inode
*inode
= file_inode(io
->iocb
->ki_filp
);
713 struct fuse_conn
*fc
= get_fuse_conn(inode
);
714 struct fuse_inode
*fi
= get_fuse_inode(inode
);
716 spin_lock(&fi
->lock
);
717 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
718 spin_unlock(&fi
->lock
);
721 io
->iocb
->ki_complete(io
->iocb
, res
);
724 kref_put(&io
->refcnt
, fuse_io_release
);
727 static struct fuse_io_args
*fuse_io_alloc(struct fuse_io_priv
*io
,
730 struct fuse_io_args
*ia
;
732 ia
= kzalloc(sizeof(*ia
), GFP_KERNEL
);
735 ia
->ap
.pages
= fuse_pages_alloc(npages
, GFP_KERNEL
,
745 static void fuse_io_free(struct fuse_io_args
*ia
)
751 static void fuse_aio_complete_req(struct fuse_mount
*fm
, struct fuse_args
*args
,
754 struct fuse_io_args
*ia
= container_of(args
, typeof(*ia
), ap
.args
);
755 struct fuse_io_priv
*io
= ia
->io
;
758 fuse_release_user_pages(&ia
->ap
, io
->should_dirty
);
762 } else if (io
->write
) {
763 if (ia
->write
.out
.size
> ia
->write
.in
.size
) {
765 } else if (ia
->write
.in
.size
!= ia
->write
.out
.size
) {
766 pos
= ia
->write
.in
.offset
- io
->offset
+
770 u32 outsize
= args
->out_args
[0].size
;
772 if (ia
->read
.in
.size
!= outsize
)
773 pos
= ia
->read
.in
.offset
- io
->offset
+ outsize
;
776 fuse_aio_complete(io
, err
, pos
);
780 static ssize_t
fuse_async_req_send(struct fuse_mount
*fm
,
781 struct fuse_io_args
*ia
, size_t num_bytes
)
784 struct fuse_io_priv
*io
= ia
->io
;
786 spin_lock(&io
->lock
);
787 kref_get(&io
->refcnt
);
788 io
->size
+= num_bytes
;
790 spin_unlock(&io
->lock
);
792 ia
->ap
.args
.end
= fuse_aio_complete_req
;
793 ia
->ap
.args
.may_block
= io
->should_dirty
;
794 err
= fuse_simple_background(fm
, &ia
->ap
.args
, GFP_KERNEL
);
796 fuse_aio_complete_req(fm
, &ia
->ap
.args
, err
);
801 static ssize_t
fuse_send_read(struct fuse_io_args
*ia
, loff_t pos
, size_t count
,
804 struct file
*file
= ia
->io
->iocb
->ki_filp
;
805 struct fuse_file
*ff
= file
->private_data
;
806 struct fuse_mount
*fm
= ff
->fm
;
808 fuse_read_args_fill(ia
, file
, pos
, count
, FUSE_READ
);
810 ia
->read
.in
.read_flags
|= FUSE_READ_LOCKOWNER
;
811 ia
->read
.in
.lock_owner
= fuse_lock_owner_id(fm
->fc
, owner
);
815 return fuse_async_req_send(fm
, ia
, count
);
817 return fuse_simple_request(fm
, &ia
->ap
.args
);
820 static void fuse_read_update_size(struct inode
*inode
, loff_t size
,
823 struct fuse_conn
*fc
= get_fuse_conn(inode
);
824 struct fuse_inode
*fi
= get_fuse_inode(inode
);
826 spin_lock(&fi
->lock
);
827 if (attr_ver
>= fi
->attr_version
&& size
< inode
->i_size
&&
828 !test_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
)) {
829 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
830 i_size_write(inode
, size
);
832 spin_unlock(&fi
->lock
);
835 static void fuse_short_read(struct inode
*inode
, u64 attr_ver
, size_t num_read
,
836 struct fuse_args_pages
*ap
)
838 struct fuse_conn
*fc
= get_fuse_conn(inode
);
841 * If writeback_cache is enabled, a short read means there's a hole in
842 * the file. Some data after the hole is in page cache, but has not
843 * reached the client fs yet. So the hole is not present there.
845 if (!fc
->writeback_cache
) {
846 loff_t pos
= page_offset(ap
->pages
[0]) + num_read
;
847 fuse_read_update_size(inode
, pos
, attr_ver
);
851 static int fuse_do_readpage(struct file
*file
, struct page
*page
)
853 struct inode
*inode
= page
->mapping
->host
;
854 struct fuse_mount
*fm
= get_fuse_mount(inode
);
855 loff_t pos
= page_offset(page
);
856 struct fuse_page_desc desc
= { .length
= PAGE_SIZE
};
857 struct fuse_io_args ia
= {
858 .ap
.args
.page_zeroing
= true,
859 .ap
.args
.out_pages
= true,
868 * Page writeback can extend beyond the lifetime of the
869 * page-cache page, so make sure we read a properly synced
872 fuse_wait_on_page_writeback(inode
, page
->index
);
874 attr_ver
= fuse_get_attr_version(fm
->fc
);
876 /* Don't overflow end offset */
877 if (pos
+ (desc
.length
- 1) == LLONG_MAX
)
880 fuse_read_args_fill(&ia
, file
, pos
, desc
.length
, FUSE_READ
);
881 res
= fuse_simple_request(fm
, &ia
.ap
.args
);
885 * Short read means EOF. If file size is larger, truncate it
887 if (res
< desc
.length
)
888 fuse_short_read(inode
, attr_ver
, res
, &ia
.ap
);
890 SetPageUptodate(page
);
895 static int fuse_read_folio(struct file
*file
, struct folio
*folio
)
897 struct page
*page
= &folio
->page
;
898 struct inode
*inode
= page
->mapping
->host
;
902 if (fuse_is_bad(inode
))
905 err
= fuse_do_readpage(file
, page
);
906 fuse_invalidate_atime(inode
);
912 static void fuse_readpages_end(struct fuse_mount
*fm
, struct fuse_args
*args
,
916 struct fuse_io_args
*ia
= container_of(args
, typeof(*ia
), ap
.args
);
917 struct fuse_args_pages
*ap
= &ia
->ap
;
918 size_t count
= ia
->read
.in
.size
;
919 size_t num_read
= args
->out_args
[0].size
;
920 struct address_space
*mapping
= NULL
;
922 for (i
= 0; mapping
== NULL
&& i
< ap
->num_pages
; i
++)
923 mapping
= ap
->pages
[i
]->mapping
;
926 struct inode
*inode
= mapping
->host
;
929 * Short read means EOF. If file size is larger, truncate it
931 if (!err
&& num_read
< count
)
932 fuse_short_read(inode
, ia
->read
.attr_ver
, num_read
, ap
);
934 fuse_invalidate_atime(inode
);
937 for (i
= 0; i
< ap
->num_pages
; i
++) {
938 struct page
*page
= ap
->pages
[i
];
941 SetPageUptodate(page
);
948 fuse_file_put(ia
->ff
, false);
953 static void fuse_send_readpages(struct fuse_io_args
*ia
, struct file
*file
)
955 struct fuse_file
*ff
= file
->private_data
;
956 struct fuse_mount
*fm
= ff
->fm
;
957 struct fuse_args_pages
*ap
= &ia
->ap
;
958 loff_t pos
= page_offset(ap
->pages
[0]);
959 size_t count
= ap
->num_pages
<< PAGE_SHIFT
;
963 ap
->args
.out_pages
= true;
964 ap
->args
.page_zeroing
= true;
965 ap
->args
.page_replace
= true;
967 /* Don't overflow end offset */
968 if (pos
+ (count
- 1) == LLONG_MAX
) {
970 ap
->descs
[ap
->num_pages
- 1].length
--;
972 WARN_ON((loff_t
) (pos
+ count
) < 0);
974 fuse_read_args_fill(ia
, file
, pos
, count
, FUSE_READ
);
975 ia
->read
.attr_ver
= fuse_get_attr_version(fm
->fc
);
976 if (fm
->fc
->async_read
) {
977 ia
->ff
= fuse_file_get(ff
);
978 ap
->args
.end
= fuse_readpages_end
;
979 err
= fuse_simple_background(fm
, &ap
->args
, GFP_KERNEL
);
983 res
= fuse_simple_request(fm
, &ap
->args
);
984 err
= res
< 0 ? res
: 0;
986 fuse_readpages_end(fm
, &ap
->args
, err
);
989 static void fuse_readahead(struct readahead_control
*rac
)
991 struct inode
*inode
= rac
->mapping
->host
;
992 struct fuse_conn
*fc
= get_fuse_conn(inode
);
993 unsigned int i
, max_pages
, nr_pages
= 0;
995 if (fuse_is_bad(inode
))
998 max_pages
= min_t(unsigned int, fc
->max_pages
,
999 fc
->max_read
/ PAGE_SIZE
);
1002 struct fuse_io_args
*ia
;
1003 struct fuse_args_pages
*ap
;
1005 if (fc
->num_background
>= fc
->congestion_threshold
&&
1006 rac
->ra
->async_size
>= readahead_count(rac
))
1008 * Congested and only async pages left, so skip the
1013 nr_pages
= readahead_count(rac
) - nr_pages
;
1014 if (nr_pages
> max_pages
)
1015 nr_pages
= max_pages
;
1018 ia
= fuse_io_alloc(NULL
, nr_pages
);
1022 nr_pages
= __readahead_batch(rac
, ap
->pages
, nr_pages
);
1023 for (i
= 0; i
< nr_pages
; i
++) {
1024 fuse_wait_on_page_writeback(inode
,
1025 readahead_index(rac
) + i
);
1026 ap
->descs
[i
].length
= PAGE_SIZE
;
1028 ap
->num_pages
= nr_pages
;
1029 fuse_send_readpages(ia
, rac
->file
);
1033 static ssize_t
fuse_cache_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1035 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
1036 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1039 * In auto invalidate mode, always update attributes on read.
1040 * Otherwise, only update if we attempt to read past EOF (to ensure
1041 * i_size is up to date).
1043 if (fc
->auto_inval_data
||
1044 (iocb
->ki_pos
+ iov_iter_count(to
) > i_size_read(inode
))) {
1046 err
= fuse_update_attributes(inode
, iocb
->ki_filp
, STATX_SIZE
);
1051 return generic_file_read_iter(iocb
, to
);
1054 static void fuse_write_args_fill(struct fuse_io_args
*ia
, struct fuse_file
*ff
,
1055 loff_t pos
, size_t count
)
1057 struct fuse_args
*args
= &ia
->ap
.args
;
1059 ia
->write
.in
.fh
= ff
->fh
;
1060 ia
->write
.in
.offset
= pos
;
1061 ia
->write
.in
.size
= count
;
1062 args
->opcode
= FUSE_WRITE
;
1063 args
->nodeid
= ff
->nodeid
;
1064 args
->in_numargs
= 2;
1065 if (ff
->fm
->fc
->minor
< 9)
1066 args
->in_args
[0].size
= FUSE_COMPAT_WRITE_IN_SIZE
;
1068 args
->in_args
[0].size
= sizeof(ia
->write
.in
);
1069 args
->in_args
[0].value
= &ia
->write
.in
;
1070 args
->in_args
[1].size
= count
;
1071 args
->out_numargs
= 1;
1072 args
->out_args
[0].size
= sizeof(ia
->write
.out
);
1073 args
->out_args
[0].value
= &ia
->write
.out
;
1076 static unsigned int fuse_write_flags(struct kiocb
*iocb
)
1078 unsigned int flags
= iocb
->ki_filp
->f_flags
;
1080 if (iocb_is_dsync(iocb
))
1082 if (iocb
->ki_flags
& IOCB_SYNC
)
1088 static ssize_t
fuse_send_write(struct fuse_io_args
*ia
, loff_t pos
,
1089 size_t count
, fl_owner_t owner
)
1091 struct kiocb
*iocb
= ia
->io
->iocb
;
1092 struct file
*file
= iocb
->ki_filp
;
1093 struct fuse_file
*ff
= file
->private_data
;
1094 struct fuse_mount
*fm
= ff
->fm
;
1095 struct fuse_write_in
*inarg
= &ia
->write
.in
;
1098 fuse_write_args_fill(ia
, ff
, pos
, count
);
1099 inarg
->flags
= fuse_write_flags(iocb
);
1100 if (owner
!= NULL
) {
1101 inarg
->write_flags
|= FUSE_WRITE_LOCKOWNER
;
1102 inarg
->lock_owner
= fuse_lock_owner_id(fm
->fc
, owner
);
1106 return fuse_async_req_send(fm
, ia
, count
);
1108 err
= fuse_simple_request(fm
, &ia
->ap
.args
);
1109 if (!err
&& ia
->write
.out
.size
> count
)
1112 return err
?: ia
->write
.out
.size
;
1115 bool fuse_write_update_attr(struct inode
*inode
, loff_t pos
, ssize_t written
)
1117 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1118 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1121 spin_lock(&fi
->lock
);
1122 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
1123 if (written
> 0 && pos
> inode
->i_size
) {
1124 i_size_write(inode
, pos
);
1127 spin_unlock(&fi
->lock
);
1129 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODSIZE
);
1134 static ssize_t
fuse_send_write_pages(struct fuse_io_args
*ia
,
1135 struct kiocb
*iocb
, struct inode
*inode
,
1136 loff_t pos
, size_t count
)
1138 struct fuse_args_pages
*ap
= &ia
->ap
;
1139 struct file
*file
= iocb
->ki_filp
;
1140 struct fuse_file
*ff
= file
->private_data
;
1141 struct fuse_mount
*fm
= ff
->fm
;
1142 unsigned int offset
, i
;
1146 for (i
= 0; i
< ap
->num_pages
; i
++)
1147 fuse_wait_on_page_writeback(inode
, ap
->pages
[i
]->index
);
1149 fuse_write_args_fill(ia
, ff
, pos
, count
);
1150 ia
->write
.in
.flags
= fuse_write_flags(iocb
);
1151 if (fm
->fc
->handle_killpriv_v2
&& !capable(CAP_FSETID
))
1152 ia
->write
.in
.write_flags
|= FUSE_WRITE_KILL_SUIDGID
;
1154 err
= fuse_simple_request(fm
, &ap
->args
);
1155 if (!err
&& ia
->write
.out
.size
> count
)
1158 short_write
= ia
->write
.out
.size
< count
;
1159 offset
= ap
->descs
[0].offset
;
1160 count
= ia
->write
.out
.size
;
1161 for (i
= 0; i
< ap
->num_pages
; i
++) {
1162 struct page
*page
= ap
->pages
[i
];
1165 ClearPageUptodate(page
);
1167 if (count
>= PAGE_SIZE
- offset
)
1168 count
-= PAGE_SIZE
- offset
;
1171 ClearPageUptodate(page
);
1176 if (ia
->write
.page_locked
&& (i
== ap
->num_pages
- 1))
1184 static ssize_t
fuse_fill_write_pages(struct fuse_io_args
*ia
,
1185 struct address_space
*mapping
,
1186 struct iov_iter
*ii
, loff_t pos
,
1187 unsigned int max_pages
)
1189 struct fuse_args_pages
*ap
= &ia
->ap
;
1190 struct fuse_conn
*fc
= get_fuse_conn(mapping
->host
);
1191 unsigned offset
= pos
& (PAGE_SIZE
- 1);
1195 ap
->args
.in_pages
= true;
1196 ap
->descs
[0].offset
= offset
;
1201 pgoff_t index
= pos
>> PAGE_SHIFT
;
1202 size_t bytes
= min_t(size_t, PAGE_SIZE
- offset
,
1203 iov_iter_count(ii
));
1205 bytes
= min_t(size_t, bytes
, fc
->max_write
- count
);
1209 if (fault_in_iov_iter_readable(ii
, bytes
))
1213 page
= grab_cache_page_write_begin(mapping
, index
);
1217 if (mapping_writably_mapped(mapping
))
1218 flush_dcache_page(page
);
1220 tmp
= copy_page_from_iter_atomic(page
, offset
, bytes
, ii
);
1221 flush_dcache_page(page
);
1230 ap
->pages
[ap
->num_pages
] = page
;
1231 ap
->descs
[ap
->num_pages
].length
= tmp
;
1237 if (offset
== PAGE_SIZE
)
1240 /* If we copied full page, mark it uptodate */
1241 if (tmp
== PAGE_SIZE
)
1242 SetPageUptodate(page
);
1244 if (PageUptodate(page
)) {
1247 ia
->write
.page_locked
= true;
1250 if (!fc
->big_writes
)
1252 } while (iov_iter_count(ii
) && count
< fc
->max_write
&&
1253 ap
->num_pages
< max_pages
&& offset
== 0);
1255 return count
> 0 ? count
: err
;
1258 static inline unsigned int fuse_wr_pages(loff_t pos
, size_t len
,
1259 unsigned int max_pages
)
1261 return min_t(unsigned int,
1262 ((pos
+ len
- 1) >> PAGE_SHIFT
) -
1263 (pos
>> PAGE_SHIFT
) + 1,
1267 static ssize_t
fuse_perform_write(struct kiocb
*iocb
, struct iov_iter
*ii
)
1269 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
1270 struct inode
*inode
= mapping
->host
;
1271 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1272 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1273 loff_t pos
= iocb
->ki_pos
;
1277 if (inode
->i_size
< pos
+ iov_iter_count(ii
))
1278 set_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
1282 struct fuse_io_args ia
= {};
1283 struct fuse_args_pages
*ap
= &ia
.ap
;
1284 unsigned int nr_pages
= fuse_wr_pages(pos
, iov_iter_count(ii
),
1287 ap
->pages
= fuse_pages_alloc(nr_pages
, GFP_KERNEL
, &ap
->descs
);
1293 count
= fuse_fill_write_pages(&ia
, mapping
, ii
, pos
, nr_pages
);
1297 err
= fuse_send_write_pages(&ia
, iocb
, inode
,
1300 size_t num_written
= ia
.write
.out
.size
;
1305 /* break out of the loop on short write */
1306 if (num_written
!= count
)
1311 } while (!err
&& iov_iter_count(ii
));
1313 fuse_write_update_attr(inode
, pos
, res
);
1314 clear_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
1318 iocb
->ki_pos
+= res
;
1322 static bool fuse_io_past_eof(struct kiocb
*iocb
, struct iov_iter
*iter
)
1324 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1326 return iocb
->ki_pos
+ iov_iter_count(iter
) > i_size_read(inode
);
1330 * @return true if an exclusive lock for direct IO writes is needed
1332 static bool fuse_dio_wr_exclusive_lock(struct kiocb
*iocb
, struct iov_iter
*from
)
1334 struct file
*file
= iocb
->ki_filp
;
1335 struct fuse_file
*ff
= file
->private_data
;
1336 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1337 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1339 /* Server side has to advise that it supports parallel dio writes. */
1340 if (!(ff
->open_flags
& FOPEN_PARALLEL_DIRECT_WRITES
))
1344 * Append will need to know the eventual EOF - always needs an
1347 if (iocb
->ki_flags
& IOCB_APPEND
)
1350 /* shared locks are not allowed with parallel page cache IO */
1351 if (test_bit(FUSE_I_CACHE_IO_MODE
, &fi
->state
))
1354 /* Parallel dio beyond EOF is not supported, at least for now. */
1355 if (fuse_io_past_eof(iocb
, from
))
1361 static void fuse_dio_lock(struct kiocb
*iocb
, struct iov_iter
*from
,
1364 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1365 struct fuse_file
*ff
= iocb
->ki_filp
->private_data
;
1367 *exclusive
= fuse_dio_wr_exclusive_lock(iocb
, from
);
1371 inode_lock_shared(inode
);
1373 * New parallal dio allowed only if inode is not in caching
1374 * mode and denies new opens in caching mode. This check
1375 * should be performed only after taking shared inode lock.
1376 * Previous past eof check was without inode lock and might
1377 * have raced, so check it again.
1379 if (fuse_io_past_eof(iocb
, from
) ||
1380 fuse_file_uncached_io_start(inode
, ff
, NULL
) != 0) {
1381 inode_unlock_shared(inode
);
1388 static void fuse_dio_unlock(struct kiocb
*iocb
, bool exclusive
)
1390 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1391 struct fuse_file
*ff
= iocb
->ki_filp
->private_data
;
1394 inode_unlock(inode
);
1396 /* Allow opens in caching mode after last parallel dio end */
1397 fuse_file_uncached_io_end(inode
, ff
);
1398 inode_unlock_shared(inode
);
1402 static ssize_t
fuse_cache_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1404 struct file
*file
= iocb
->ki_filp
;
1405 struct address_space
*mapping
= file
->f_mapping
;
1406 ssize_t written
= 0;
1407 struct inode
*inode
= mapping
->host
;
1409 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1411 if (fc
->writeback_cache
) {
1412 /* Update size (EOF optimization) and mode (SUID clearing) */
1413 err
= fuse_update_attributes(mapping
->host
, file
,
1414 STATX_SIZE
| STATX_MODE
);
1418 if (fc
->handle_killpriv_v2
&&
1419 setattr_should_drop_suidgid(&nop_mnt_idmap
,
1420 file_inode(file
))) {
1424 return generic_file_write_iter(iocb
, from
);
1430 err
= generic_write_checks(iocb
, from
);
1434 err
= file_remove_privs(file
);
1438 err
= file_update_time(file
);
1442 if (iocb
->ki_flags
& IOCB_DIRECT
) {
1443 written
= generic_file_direct_write(iocb
, from
);
1444 if (written
< 0 || !iov_iter_count(from
))
1446 written
= direct_write_fallback(iocb
, from
, written
,
1447 fuse_perform_write(iocb
, from
));
1449 written
= fuse_perform_write(iocb
, from
);
1452 inode_unlock(inode
);
1454 written
= generic_write_sync(iocb
, written
);
1456 return written
? written
: err
;
1459 static inline unsigned long fuse_get_user_addr(const struct iov_iter
*ii
)
1461 return (unsigned long)iter_iov(ii
)->iov_base
+ ii
->iov_offset
;
1464 static inline size_t fuse_get_frag_size(const struct iov_iter
*ii
,
1467 return min(iov_iter_single_seg_count(ii
), max_size
);
1470 static int fuse_get_user_pages(struct fuse_args_pages
*ap
, struct iov_iter
*ii
,
1471 size_t *nbytesp
, int write
,
1472 unsigned int max_pages
)
1474 size_t nbytes
= 0; /* # bytes already packed in req */
1477 /* Special case for kernel I/O: can copy directly into the buffer */
1478 if (iov_iter_is_kvec(ii
)) {
1479 unsigned long user_addr
= fuse_get_user_addr(ii
);
1480 size_t frag_size
= fuse_get_frag_size(ii
, *nbytesp
);
1483 ap
->args
.in_args
[1].value
= (void *) user_addr
;
1485 ap
->args
.out_args
[0].value
= (void *) user_addr
;
1487 iov_iter_advance(ii
, frag_size
);
1488 *nbytesp
= frag_size
;
1492 while (nbytes
< *nbytesp
&& ap
->num_pages
< max_pages
) {
1495 ret
= iov_iter_get_pages2(ii
, &ap
->pages
[ap
->num_pages
],
1497 max_pages
- ap
->num_pages
,
1505 npages
= DIV_ROUND_UP(ret
, PAGE_SIZE
);
1507 ap
->descs
[ap
->num_pages
].offset
= start
;
1508 fuse_page_descs_length_init(ap
->descs
, ap
->num_pages
, npages
);
1510 ap
->num_pages
+= npages
;
1511 ap
->descs
[ap
->num_pages
- 1].length
-=
1512 (PAGE_SIZE
- ret
) & (PAGE_SIZE
- 1);
1515 ap
->args
.user_pages
= true;
1517 ap
->args
.in_pages
= true;
1519 ap
->args
.out_pages
= true;
1523 return ret
< 0 ? ret
: 0;
1526 ssize_t
fuse_direct_io(struct fuse_io_priv
*io
, struct iov_iter
*iter
,
1527 loff_t
*ppos
, int flags
)
1529 int write
= flags
& FUSE_DIO_WRITE
;
1530 int cuse
= flags
& FUSE_DIO_CUSE
;
1531 struct file
*file
= io
->iocb
->ki_filp
;
1532 struct address_space
*mapping
= file
->f_mapping
;
1533 struct inode
*inode
= mapping
->host
;
1534 struct fuse_file
*ff
= file
->private_data
;
1535 struct fuse_conn
*fc
= ff
->fm
->fc
;
1536 size_t nmax
= write
? fc
->max_write
: fc
->max_read
;
1538 size_t count
= iov_iter_count(iter
);
1539 pgoff_t idx_from
= pos
>> PAGE_SHIFT
;
1540 pgoff_t idx_to
= (pos
+ count
- 1) >> PAGE_SHIFT
;
1543 struct fuse_io_args
*ia
;
1544 unsigned int max_pages
;
1545 bool fopen_direct_io
= ff
->open_flags
& FOPEN_DIRECT_IO
;
1547 max_pages
= iov_iter_npages(iter
, fc
->max_pages
);
1548 ia
= fuse_io_alloc(io
, max_pages
);
1552 if (fopen_direct_io
&& fc
->direct_io_allow_mmap
) {
1553 res
= filemap_write_and_wait_range(mapping
, pos
, pos
+ count
- 1);
1559 if (!cuse
&& fuse_range_is_writeback(inode
, idx_from
, idx_to
)) {
1562 fuse_sync_writes(inode
);
1564 inode_unlock(inode
);
1567 if (fopen_direct_io
&& write
) {
1568 res
= invalidate_inode_pages2_range(mapping
, idx_from
, idx_to
);
1575 io
->should_dirty
= !write
&& user_backed_iter(iter
);
1578 fl_owner_t owner
= current
->files
;
1579 size_t nbytes
= min(count
, nmax
);
1581 err
= fuse_get_user_pages(&ia
->ap
, iter
, &nbytes
, write
,
1587 if (!capable(CAP_FSETID
))
1588 ia
->write
.in
.write_flags
|= FUSE_WRITE_KILL_SUIDGID
;
1590 nres
= fuse_send_write(ia
, pos
, nbytes
, owner
);
1592 nres
= fuse_send_read(ia
, pos
, nbytes
, owner
);
1595 if (!io
->async
|| nres
< 0) {
1596 fuse_release_user_pages(&ia
->ap
, io
->should_dirty
);
1601 iov_iter_revert(iter
, nbytes
);
1605 WARN_ON(nres
> nbytes
);
1610 if (nres
!= nbytes
) {
1611 iov_iter_revert(iter
, nbytes
- nres
);
1615 max_pages
= iov_iter_npages(iter
, fc
->max_pages
);
1616 ia
= fuse_io_alloc(io
, max_pages
);
1626 return res
> 0 ? res
: err
;
1628 EXPORT_SYMBOL_GPL(fuse_direct_io
);
1630 static ssize_t
__fuse_direct_read(struct fuse_io_priv
*io
,
1631 struct iov_iter
*iter
,
1635 struct inode
*inode
= file_inode(io
->iocb
->ki_filp
);
1637 res
= fuse_direct_io(io
, iter
, ppos
, 0);
1639 fuse_invalidate_atime(inode
);
1644 static ssize_t
fuse_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
);
1646 static ssize_t
fuse_direct_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1650 if (!is_sync_kiocb(iocb
) && iocb
->ki_flags
& IOCB_DIRECT
) {
1651 res
= fuse_direct_IO(iocb
, to
);
1653 struct fuse_io_priv io
= FUSE_IO_PRIV_SYNC(iocb
);
1655 res
= __fuse_direct_read(&io
, to
, &iocb
->ki_pos
);
1661 static ssize_t
fuse_direct_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1663 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1664 struct fuse_io_priv io
= FUSE_IO_PRIV_SYNC(iocb
);
1668 fuse_dio_lock(iocb
, from
, &exclusive
);
1669 res
= generic_write_checks(iocb
, from
);
1671 if (!is_sync_kiocb(iocb
) && iocb
->ki_flags
& IOCB_DIRECT
) {
1672 res
= fuse_direct_IO(iocb
, from
);
1674 res
= fuse_direct_io(&io
, from
, &iocb
->ki_pos
,
1676 fuse_write_update_attr(inode
, iocb
->ki_pos
, res
);
1679 fuse_dio_unlock(iocb
, exclusive
);
1684 static ssize_t
fuse_file_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1686 struct file
*file
= iocb
->ki_filp
;
1687 struct fuse_file
*ff
= file
->private_data
;
1688 struct inode
*inode
= file_inode(file
);
1690 if (fuse_is_bad(inode
))
1693 if (FUSE_IS_DAX(inode
))
1694 return fuse_dax_read_iter(iocb
, to
);
1696 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1697 if (ff
->open_flags
& FOPEN_DIRECT_IO
)
1698 return fuse_direct_read_iter(iocb
, to
);
1699 else if (fuse_file_passthrough(ff
))
1700 return fuse_passthrough_read_iter(iocb
, to
);
1702 return fuse_cache_read_iter(iocb
, to
);
1705 static ssize_t
fuse_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1707 struct file
*file
= iocb
->ki_filp
;
1708 struct fuse_file
*ff
= file
->private_data
;
1709 struct inode
*inode
= file_inode(file
);
1711 if (fuse_is_bad(inode
))
1714 if (FUSE_IS_DAX(inode
))
1715 return fuse_dax_write_iter(iocb
, from
);
1717 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1718 if (ff
->open_flags
& FOPEN_DIRECT_IO
)
1719 return fuse_direct_write_iter(iocb
, from
);
1720 else if (fuse_file_passthrough(ff
))
1721 return fuse_passthrough_write_iter(iocb
, from
);
1723 return fuse_cache_write_iter(iocb
, from
);
1726 static ssize_t
fuse_splice_read(struct file
*in
, loff_t
*ppos
,
1727 struct pipe_inode_info
*pipe
, size_t len
,
1730 struct fuse_file
*ff
= in
->private_data
;
1732 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1733 if (fuse_file_passthrough(ff
) && !(ff
->open_flags
& FOPEN_DIRECT_IO
))
1734 return fuse_passthrough_splice_read(in
, ppos
, pipe
, len
, flags
);
1736 return filemap_splice_read(in
, ppos
, pipe
, len
, flags
);
1739 static ssize_t
fuse_splice_write(struct pipe_inode_info
*pipe
, struct file
*out
,
1740 loff_t
*ppos
, size_t len
, unsigned int flags
)
1742 struct fuse_file
*ff
= out
->private_data
;
1744 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1745 if (fuse_file_passthrough(ff
) && !(ff
->open_flags
& FOPEN_DIRECT_IO
))
1746 return fuse_passthrough_splice_write(pipe
, out
, ppos
, len
, flags
);
1748 return iter_file_splice_write(pipe
, out
, ppos
, len
, flags
);
1751 static void fuse_writepage_free(struct fuse_writepage_args
*wpa
)
1753 struct fuse_args_pages
*ap
= &wpa
->ia
.ap
;
1757 fuse_sync_bucket_dec(wpa
->bucket
);
1759 for (i
= 0; i
< ap
->num_pages
; i
++)
1760 __free_page(ap
->pages
[i
]);
1763 fuse_file_put(wpa
->ia
.ff
, false);
1769 static void fuse_writepage_finish(struct fuse_mount
*fm
,
1770 struct fuse_writepage_args
*wpa
)
1772 struct fuse_args_pages
*ap
= &wpa
->ia
.ap
;
1773 struct inode
*inode
= wpa
->inode
;
1774 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1775 struct backing_dev_info
*bdi
= inode_to_bdi(inode
);
1778 for (i
= 0; i
< ap
->num_pages
; i
++) {
1779 dec_wb_stat(&bdi
->wb
, WB_WRITEBACK
);
1780 dec_node_page_state(ap
->pages
[i
], NR_WRITEBACK_TEMP
);
1781 wb_writeout_inc(&bdi
->wb
);
1783 wake_up(&fi
->page_waitq
);
1786 /* Called under fi->lock, may release and reacquire it */
1787 static void fuse_send_writepage(struct fuse_mount
*fm
,
1788 struct fuse_writepage_args
*wpa
, loff_t size
)
1789 __releases(fi
->lock
)
1790 __acquires(fi
->lock
)
1792 struct fuse_writepage_args
*aux
, *next
;
1793 struct fuse_inode
*fi
= get_fuse_inode(wpa
->inode
);
1794 struct fuse_write_in
*inarg
= &wpa
->ia
.write
.in
;
1795 struct fuse_args
*args
= &wpa
->ia
.ap
.args
;
1796 __u64 data_size
= wpa
->ia
.ap
.num_pages
* PAGE_SIZE
;
1800 if (inarg
->offset
+ data_size
<= size
) {
1801 inarg
->size
= data_size
;
1802 } else if (inarg
->offset
< size
) {
1803 inarg
->size
= size
- inarg
->offset
;
1805 /* Got truncated off completely */
1809 args
->in_args
[1].size
= inarg
->size
;
1811 args
->nocreds
= true;
1813 err
= fuse_simple_background(fm
, args
, GFP_ATOMIC
);
1814 if (err
== -ENOMEM
) {
1815 spin_unlock(&fi
->lock
);
1816 err
= fuse_simple_background(fm
, args
, GFP_NOFS
| __GFP_NOFAIL
);
1817 spin_lock(&fi
->lock
);
1820 /* Fails on broken connection only */
1828 rb_erase(&wpa
->writepages_entry
, &fi
->writepages
);
1829 fuse_writepage_finish(fm
, wpa
);
1830 spin_unlock(&fi
->lock
);
1832 /* After fuse_writepage_finish() aux request list is private */
1833 for (aux
= wpa
->next
; aux
; aux
= next
) {
1836 fuse_writepage_free(aux
);
1839 fuse_writepage_free(wpa
);
1840 spin_lock(&fi
->lock
);
1844 * If fi->writectr is positive (no truncate or fsync going on) send
1845 * all queued writepage requests.
1847 * Called with fi->lock
1849 void fuse_flush_writepages(struct inode
*inode
)
1850 __releases(fi
->lock
)
1851 __acquires(fi
->lock
)
1853 struct fuse_mount
*fm
= get_fuse_mount(inode
);
1854 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1855 loff_t crop
= i_size_read(inode
);
1856 struct fuse_writepage_args
*wpa
;
1858 while (fi
->writectr
>= 0 && !list_empty(&fi
->queued_writes
)) {
1859 wpa
= list_entry(fi
->queued_writes
.next
,
1860 struct fuse_writepage_args
, queue_entry
);
1861 list_del_init(&wpa
->queue_entry
);
1862 fuse_send_writepage(fm
, wpa
, crop
);
1866 static struct fuse_writepage_args
*fuse_insert_writeback(struct rb_root
*root
,
1867 struct fuse_writepage_args
*wpa
)
1869 pgoff_t idx_from
= wpa
->ia
.write
.in
.offset
>> PAGE_SHIFT
;
1870 pgoff_t idx_to
= idx_from
+ wpa
->ia
.ap
.num_pages
- 1;
1871 struct rb_node
**p
= &root
->rb_node
;
1872 struct rb_node
*parent
= NULL
;
1874 WARN_ON(!wpa
->ia
.ap
.num_pages
);
1876 struct fuse_writepage_args
*curr
;
1880 curr
= rb_entry(parent
, struct fuse_writepage_args
,
1882 WARN_ON(curr
->inode
!= wpa
->inode
);
1883 curr_index
= curr
->ia
.write
.in
.offset
>> PAGE_SHIFT
;
1885 if (idx_from
>= curr_index
+ curr
->ia
.ap
.num_pages
)
1886 p
= &(*p
)->rb_right
;
1887 else if (idx_to
< curr_index
)
1893 rb_link_node(&wpa
->writepages_entry
, parent
, p
);
1894 rb_insert_color(&wpa
->writepages_entry
, root
);
1898 static void tree_insert(struct rb_root
*root
, struct fuse_writepage_args
*wpa
)
1900 WARN_ON(fuse_insert_writeback(root
, wpa
));
1903 static void fuse_writepage_end(struct fuse_mount
*fm
, struct fuse_args
*args
,
1906 struct fuse_writepage_args
*wpa
=
1907 container_of(args
, typeof(*wpa
), ia
.ap
.args
);
1908 struct inode
*inode
= wpa
->inode
;
1909 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1910 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1912 mapping_set_error(inode
->i_mapping
, error
);
1914 * A writeback finished and this might have updated mtime/ctime on
1915 * server making local mtime/ctime stale. Hence invalidate attrs.
1916 * Do this only if writeback_cache is not enabled. If writeback_cache
1917 * is enabled, we trust local ctime/mtime.
1919 if (!fc
->writeback_cache
)
1920 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODIFY
);
1921 spin_lock(&fi
->lock
);
1922 rb_erase(&wpa
->writepages_entry
, &fi
->writepages
);
1924 struct fuse_mount
*fm
= get_fuse_mount(inode
);
1925 struct fuse_write_in
*inarg
= &wpa
->ia
.write
.in
;
1926 struct fuse_writepage_args
*next
= wpa
->next
;
1928 wpa
->next
= next
->next
;
1930 next
->ia
.ff
= fuse_file_get(wpa
->ia
.ff
);
1931 tree_insert(&fi
->writepages
, next
);
1934 * Skip fuse_flush_writepages() to make it easy to crop requests
1935 * based on primary request size.
1937 * 1st case (trivial): there are no concurrent activities using
1938 * fuse_set/release_nowrite. Then we're on safe side because
1939 * fuse_flush_writepages() would call fuse_send_writepage()
1942 * 2nd case: someone called fuse_set_nowrite and it is waiting
1943 * now for completion of all in-flight requests. This happens
1944 * rarely and no more than once per page, so this should be
1947 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
1948 * of fuse_set_nowrite..fuse_release_nowrite section. The fact
1949 * that fuse_set_nowrite returned implies that all in-flight
1950 * requests were completed along with all of their secondary
1951 * requests. Further primary requests are blocked by negative
1952 * writectr. Hence there cannot be any in-flight requests and
1953 * no invocations of fuse_writepage_end() while we're in
1954 * fuse_set_nowrite..fuse_release_nowrite section.
1956 fuse_send_writepage(fm
, next
, inarg
->offset
+ inarg
->size
);
1959 fuse_writepage_finish(fm
, wpa
);
1960 spin_unlock(&fi
->lock
);
1961 fuse_writepage_free(wpa
);
1964 static struct fuse_file
*__fuse_write_file_get(struct fuse_inode
*fi
)
1966 struct fuse_file
*ff
;
1968 spin_lock(&fi
->lock
);
1969 ff
= list_first_entry_or_null(&fi
->write_files
, struct fuse_file
,
1973 spin_unlock(&fi
->lock
);
1978 static struct fuse_file
*fuse_write_file_get(struct fuse_inode
*fi
)
1980 struct fuse_file
*ff
= __fuse_write_file_get(fi
);
1985 int fuse_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1987 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1988 struct fuse_file
*ff
;
1992 * Inode is always written before the last reference is dropped and
1993 * hence this should not be reached from reclaim.
1995 * Writing back the inode from reclaim can deadlock if the request
1996 * processing itself needs an allocation. Allocations triggering
1997 * reclaim while serving a request can't be prevented, because it can
1998 * involve any number of unrelated userspace processes.
2000 WARN_ON(wbc
->for_reclaim
);
2002 ff
= __fuse_write_file_get(fi
);
2003 err
= fuse_flush_times(inode
, ff
);
2005 fuse_file_put(ff
, false);
2010 static struct fuse_writepage_args
*fuse_writepage_args_alloc(void)
2012 struct fuse_writepage_args
*wpa
;
2013 struct fuse_args_pages
*ap
;
2015 wpa
= kzalloc(sizeof(*wpa
), GFP_NOFS
);
2019 ap
->pages
= fuse_pages_alloc(1, GFP_NOFS
, &ap
->descs
);
2029 static void fuse_writepage_add_to_bucket(struct fuse_conn
*fc
,
2030 struct fuse_writepage_args
*wpa
)
2036 /* Prevent resurrection of dead bucket in unlikely race with syncfs */
2038 wpa
->bucket
= rcu_dereference(fc
->curr_bucket
);
2039 } while (unlikely(!atomic_inc_not_zero(&wpa
->bucket
->count
)));
2043 static int fuse_writepage_locked(struct page
*page
)
2045 struct address_space
*mapping
= page
->mapping
;
2046 struct inode
*inode
= mapping
->host
;
2047 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2048 struct fuse_inode
*fi
= get_fuse_inode(inode
);
2049 struct fuse_writepage_args
*wpa
;
2050 struct fuse_args_pages
*ap
;
2051 struct page
*tmp_page
;
2052 int error
= -ENOMEM
;
2054 set_page_writeback(page
);
2056 wpa
= fuse_writepage_args_alloc();
2061 tmp_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2066 wpa
->ia
.ff
= fuse_write_file_get(fi
);
2070 fuse_writepage_add_to_bucket(fc
, wpa
);
2071 fuse_write_args_fill(&wpa
->ia
, wpa
->ia
.ff
, page_offset(page
), 0);
2073 copy_highpage(tmp_page
, page
);
2074 wpa
->ia
.write
.in
.write_flags
|= FUSE_WRITE_CACHE
;
2076 ap
->args
.in_pages
= true;
2078 ap
->pages
[0] = tmp_page
;
2079 ap
->descs
[0].offset
= 0;
2080 ap
->descs
[0].length
= PAGE_SIZE
;
2081 ap
->args
.end
= fuse_writepage_end
;
2084 inc_wb_stat(&inode_to_bdi(inode
)->wb
, WB_WRITEBACK
);
2085 inc_node_page_state(tmp_page
, NR_WRITEBACK_TEMP
);
2087 spin_lock(&fi
->lock
);
2088 tree_insert(&fi
->writepages
, wpa
);
2089 list_add_tail(&wpa
->queue_entry
, &fi
->queued_writes
);
2090 fuse_flush_writepages(inode
);
2091 spin_unlock(&fi
->lock
);
2093 end_page_writeback(page
);
2098 __free_page(tmp_page
);
2102 mapping_set_error(page
->mapping
, error
);
2103 end_page_writeback(page
);
2107 static int fuse_writepage(struct page
*page
, struct writeback_control
*wbc
)
2109 struct fuse_conn
*fc
= get_fuse_conn(page
->mapping
->host
);
2112 if (fuse_page_is_writeback(page
->mapping
->host
, page
->index
)) {
2114 * ->writepages() should be called for sync() and friends. We
2115 * should only get here on direct reclaim and then we are
2116 * allowed to skip a page which is already in flight
2118 WARN_ON(wbc
->sync_mode
== WB_SYNC_ALL
);
2120 redirty_page_for_writepage(wbc
, page
);
2125 if (wbc
->sync_mode
== WB_SYNC_NONE
&&
2126 fc
->num_background
>= fc
->congestion_threshold
)
2127 return AOP_WRITEPAGE_ACTIVATE
;
2129 err
= fuse_writepage_locked(page
);
2135 struct fuse_fill_wb_data
{
2136 struct fuse_writepage_args
*wpa
;
2137 struct fuse_file
*ff
;
2138 struct inode
*inode
;
2139 struct page
**orig_pages
;
2140 unsigned int max_pages
;
2143 static bool fuse_pages_realloc(struct fuse_fill_wb_data
*data
)
2145 struct fuse_args_pages
*ap
= &data
->wpa
->ia
.ap
;
2146 struct fuse_conn
*fc
= get_fuse_conn(data
->inode
);
2147 struct page
**pages
;
2148 struct fuse_page_desc
*descs
;
2149 unsigned int npages
= min_t(unsigned int,
2150 max_t(unsigned int, data
->max_pages
* 2,
2151 FUSE_DEFAULT_MAX_PAGES_PER_REQ
),
2153 WARN_ON(npages
<= data
->max_pages
);
2155 pages
= fuse_pages_alloc(npages
, GFP_NOFS
, &descs
);
2159 memcpy(pages
, ap
->pages
, sizeof(struct page
*) * ap
->num_pages
);
2160 memcpy(descs
, ap
->descs
, sizeof(struct fuse_page_desc
) * ap
->num_pages
);
2164 data
->max_pages
= npages
;
2169 static void fuse_writepages_send(struct fuse_fill_wb_data
*data
)
2171 struct fuse_writepage_args
*wpa
= data
->wpa
;
2172 struct inode
*inode
= data
->inode
;
2173 struct fuse_inode
*fi
= get_fuse_inode(inode
);
2174 int num_pages
= wpa
->ia
.ap
.num_pages
;
2177 wpa
->ia
.ff
= fuse_file_get(data
->ff
);
2178 spin_lock(&fi
->lock
);
2179 list_add_tail(&wpa
->queue_entry
, &fi
->queued_writes
);
2180 fuse_flush_writepages(inode
);
2181 spin_unlock(&fi
->lock
);
2183 for (i
= 0; i
< num_pages
; i
++)
2184 end_page_writeback(data
->orig_pages
[i
]);
2188 * Check under fi->lock if the page is under writeback, and insert it onto the
2189 * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
2190 * one already added for a page at this offset. If there's none, then insert
2191 * this new request onto the auxiliary list, otherwise reuse the existing one by
2192 * swapping the new temp page with the old one.
2194 static bool fuse_writepage_add(struct fuse_writepage_args
*new_wpa
,
2197 struct fuse_inode
*fi
= get_fuse_inode(new_wpa
->inode
);
2198 struct fuse_writepage_args
*tmp
;
2199 struct fuse_writepage_args
*old_wpa
;
2200 struct fuse_args_pages
*new_ap
= &new_wpa
->ia
.ap
;
2202 WARN_ON(new_ap
->num_pages
!= 0);
2203 new_ap
->num_pages
= 1;
2205 spin_lock(&fi
->lock
);
2206 old_wpa
= fuse_insert_writeback(&fi
->writepages
, new_wpa
);
2208 spin_unlock(&fi
->lock
);
2212 for (tmp
= old_wpa
->next
; tmp
; tmp
= tmp
->next
) {
2215 WARN_ON(tmp
->inode
!= new_wpa
->inode
);
2216 curr_index
= tmp
->ia
.write
.in
.offset
>> PAGE_SHIFT
;
2217 if (curr_index
== page
->index
) {
2218 WARN_ON(tmp
->ia
.ap
.num_pages
!= 1);
2219 swap(tmp
->ia
.ap
.pages
[0], new_ap
->pages
[0]);
2225 new_wpa
->next
= old_wpa
->next
;
2226 old_wpa
->next
= new_wpa
;
2229 spin_unlock(&fi
->lock
);
2232 struct backing_dev_info
*bdi
= inode_to_bdi(new_wpa
->inode
);
2234 dec_wb_stat(&bdi
->wb
, WB_WRITEBACK
);
2235 dec_node_page_state(new_ap
->pages
[0], NR_WRITEBACK_TEMP
);
2236 wb_writeout_inc(&bdi
->wb
);
2237 fuse_writepage_free(new_wpa
);
2243 static bool fuse_writepage_need_send(struct fuse_conn
*fc
, struct page
*page
,
2244 struct fuse_args_pages
*ap
,
2245 struct fuse_fill_wb_data
*data
)
2247 WARN_ON(!ap
->num_pages
);
2250 * Being under writeback is unlikely but possible. For example direct
2251 * read to an mmaped fuse file will set the page dirty twice; once when
2252 * the pages are faulted with get_user_pages(), and then after the read
2255 if (fuse_page_is_writeback(data
->inode
, page
->index
))
2258 /* Reached max pages */
2259 if (ap
->num_pages
== fc
->max_pages
)
2262 /* Reached max write bytes */
2263 if ((ap
->num_pages
+ 1) * PAGE_SIZE
> fc
->max_write
)
2267 if (data
->orig_pages
[ap
->num_pages
- 1]->index
+ 1 != page
->index
)
2270 /* Need to grow the pages array? If so, did the expansion fail? */
2271 if (ap
->num_pages
== data
->max_pages
&& !fuse_pages_realloc(data
))
2277 static int fuse_writepages_fill(struct folio
*folio
,
2278 struct writeback_control
*wbc
, void *_data
)
2280 struct fuse_fill_wb_data
*data
= _data
;
2281 struct fuse_writepage_args
*wpa
= data
->wpa
;
2282 struct fuse_args_pages
*ap
= &wpa
->ia
.ap
;
2283 struct inode
*inode
= data
->inode
;
2284 struct fuse_inode
*fi
= get_fuse_inode(inode
);
2285 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2286 struct page
*tmp_page
;
2291 data
->ff
= fuse_write_file_get(fi
);
2296 if (wpa
&& fuse_writepage_need_send(fc
, &folio
->page
, ap
, data
)) {
2297 fuse_writepages_send(data
);
2302 tmp_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2307 * The page must not be redirtied until the writeout is completed
2308 * (i.e. userspace has sent a reply to the write request). Otherwise
2309 * there could be more than one temporary page instance for each real
2312 * This is ensured by holding the page lock in page_mkwrite() while
2313 * checking fuse_page_is_writeback(). We already hold the page lock
2314 * since clear_page_dirty_for_io() and keep it held until we add the
2315 * request to the fi->writepages list and increment ap->num_pages.
2316 * After this fuse_page_is_writeback() will indicate that the page is
2317 * under writeback, so we can release the page lock.
2319 if (data
->wpa
== NULL
) {
2321 wpa
= fuse_writepage_args_alloc();
2323 __free_page(tmp_page
);
2326 fuse_writepage_add_to_bucket(fc
, wpa
);
2328 data
->max_pages
= 1;
2331 fuse_write_args_fill(&wpa
->ia
, data
->ff
, folio_pos(folio
), 0);
2332 wpa
->ia
.write
.in
.write_flags
|= FUSE_WRITE_CACHE
;
2334 ap
->args
.in_pages
= true;
2335 ap
->args
.end
= fuse_writepage_end
;
2339 folio_start_writeback(folio
);
2341 copy_highpage(tmp_page
, &folio
->page
);
2342 ap
->pages
[ap
->num_pages
] = tmp_page
;
2343 ap
->descs
[ap
->num_pages
].offset
= 0;
2344 ap
->descs
[ap
->num_pages
].length
= PAGE_SIZE
;
2345 data
->orig_pages
[ap
->num_pages
] = &folio
->page
;
2347 inc_wb_stat(&inode_to_bdi(inode
)->wb
, WB_WRITEBACK
);
2348 inc_node_page_state(tmp_page
, NR_WRITEBACK_TEMP
);
2353 * Protected by fi->lock against concurrent access by
2354 * fuse_page_is_writeback().
2356 spin_lock(&fi
->lock
);
2358 spin_unlock(&fi
->lock
);
2359 } else if (fuse_writepage_add(wpa
, &folio
->page
)) {
2362 folio_end_writeback(folio
);
2365 folio_unlock(folio
);
2370 static int fuse_writepages(struct address_space
*mapping
,
2371 struct writeback_control
*wbc
)
2373 struct inode
*inode
= mapping
->host
;
2374 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2375 struct fuse_fill_wb_data data
;
2379 if (fuse_is_bad(inode
))
2382 if (wbc
->sync_mode
== WB_SYNC_NONE
&&
2383 fc
->num_background
>= fc
->congestion_threshold
)
2391 data
.orig_pages
= kcalloc(fc
->max_pages
,
2392 sizeof(struct page
*),
2394 if (!data
.orig_pages
)
2397 err
= write_cache_pages(mapping
, wbc
, fuse_writepages_fill
, &data
);
2399 WARN_ON(!data
.wpa
->ia
.ap
.num_pages
);
2400 fuse_writepages_send(&data
);
2403 fuse_file_put(data
.ff
, false);
2405 kfree(data
.orig_pages
);
2411 * It's worthy to make sure that space is reserved on disk for the write,
2412 * but how to implement it without killing performance need more thinking.
2414 static int fuse_write_begin(struct file
*file
, struct address_space
*mapping
,
2415 loff_t pos
, unsigned len
, struct page
**pagep
, void **fsdata
)
2417 pgoff_t index
= pos
>> PAGE_SHIFT
;
2418 struct fuse_conn
*fc
= get_fuse_conn(file_inode(file
));
2423 WARN_ON(!fc
->writeback_cache
);
2425 page
= grab_cache_page_write_begin(mapping
, index
);
2429 fuse_wait_on_page_writeback(mapping
->host
, page
->index
);
2431 if (PageUptodate(page
) || len
== PAGE_SIZE
)
2434 * Check if the start this page comes after the end of file, in which
2435 * case the readpage can be optimized away.
2437 fsize
= i_size_read(mapping
->host
);
2438 if (fsize
<= (pos
& PAGE_MASK
)) {
2439 size_t off
= pos
& ~PAGE_MASK
;
2441 zero_user_segment(page
, 0, off
);
2444 err
= fuse_do_readpage(file
, page
);
2458 static int fuse_write_end(struct file
*file
, struct address_space
*mapping
,
2459 loff_t pos
, unsigned len
, unsigned copied
,
2460 struct page
*page
, void *fsdata
)
2462 struct inode
*inode
= page
->mapping
->host
;
2464 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
2469 if (!PageUptodate(page
)) {
2470 /* Zero any unwritten bytes at the end of the page */
2471 size_t endoff
= pos
& ~PAGE_MASK
;
2473 zero_user_segment(page
, endoff
, PAGE_SIZE
);
2474 SetPageUptodate(page
);
2477 if (pos
> inode
->i_size
)
2478 i_size_write(inode
, pos
);
2480 set_page_dirty(page
);
2489 static int fuse_launder_folio(struct folio
*folio
)
2492 if (folio_clear_dirty_for_io(folio
)) {
2493 struct inode
*inode
= folio
->mapping
->host
;
2495 /* Serialize with pending writeback for the same page */
2496 fuse_wait_on_page_writeback(inode
, folio
->index
);
2497 err
= fuse_writepage_locked(&folio
->page
);
2499 fuse_wait_on_page_writeback(inode
, folio
->index
);
2505 * Write back dirty data/metadata now (there may not be any suitable
2506 * open files later for data)
2508 static void fuse_vma_close(struct vm_area_struct
*vma
)
2512 err
= write_inode_now(vma
->vm_file
->f_mapping
->host
, 1);
2513 mapping_set_error(vma
->vm_file
->f_mapping
, err
);
2517 * Wait for writeback against this page to complete before allowing it
2518 * to be marked dirty again, and hence written back again, possibly
2519 * before the previous writepage completed.
2521 * Block here, instead of in ->writepage(), so that the userspace fs
2522 * can only block processes actually operating on the filesystem.
2524 * Otherwise unprivileged userspace fs would be able to block
2529 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2531 static vm_fault_t
fuse_page_mkwrite(struct vm_fault
*vmf
)
2533 struct page
*page
= vmf
->page
;
2534 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
2536 file_update_time(vmf
->vma
->vm_file
);
2538 if (page
->mapping
!= inode
->i_mapping
) {
2540 return VM_FAULT_NOPAGE
;
2543 fuse_wait_on_page_writeback(inode
, page
->index
);
2544 return VM_FAULT_LOCKED
;
2547 static const struct vm_operations_struct fuse_file_vm_ops
= {
2548 .close
= fuse_vma_close
,
2549 .fault
= filemap_fault
,
2550 .map_pages
= filemap_map_pages
,
2551 .page_mkwrite
= fuse_page_mkwrite
,
2554 static int fuse_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2556 struct fuse_file
*ff
= file
->private_data
;
2557 struct fuse_conn
*fc
= ff
->fm
->fc
;
2560 /* DAX mmap is superior to direct_io mmap */
2561 if (FUSE_IS_DAX(file_inode(file
)))
2562 return fuse_dax_mmap(file
, vma
);
2564 /* TODO: implement mmap to backing file */
2565 if (fuse_file_passthrough(ff
))
2569 * FOPEN_DIRECT_IO handling is special compared to O_DIRECT,
2570 * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP.
2572 if (ff
->open_flags
& FOPEN_DIRECT_IO
) {
2574 * Can't provide the coherency needed for MAP_SHARED
2575 * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set.
2577 if ((vma
->vm_flags
& VM_MAYSHARE
) && !fc
->direct_io_allow_mmap
)
2580 invalidate_inode_pages2(file
->f_mapping
);
2582 if (!(vma
->vm_flags
& VM_MAYSHARE
)) {
2584 return generic_file_mmap(file
, vma
);
2588 * First mmap of direct_io file enters caching inode io mode.
2589 * Also waits for parallel dio writers to go into serial mode
2590 * (exclusive instead of shared lock).
2592 rc
= fuse_file_cached_io_start(file_inode(file
), ff
);
2597 if ((vma
->vm_flags
& VM_SHARED
) && (vma
->vm_flags
& VM_MAYWRITE
))
2598 fuse_link_write_file(file
);
2600 file_accessed(file
);
2601 vma
->vm_ops
= &fuse_file_vm_ops
;
2605 static int convert_fuse_file_lock(struct fuse_conn
*fc
,
2606 const struct fuse_file_lock
*ffl
,
2607 struct file_lock
*fl
)
2609 switch (ffl
->type
) {
2615 if (ffl
->start
> OFFSET_MAX
|| ffl
->end
> OFFSET_MAX
||
2616 ffl
->end
< ffl
->start
)
2619 fl
->fl_start
= ffl
->start
;
2620 fl
->fl_end
= ffl
->end
;
2623 * Convert pid into init's pid namespace. The locks API will
2624 * translate it into the caller's pid namespace.
2627 fl
->fl_pid
= pid_nr_ns(find_pid_ns(ffl
->pid
, fc
->pid_ns
), &init_pid_ns
);
2634 fl
->fl_type
= ffl
->type
;
2638 static void fuse_lk_fill(struct fuse_args
*args
, struct file
*file
,
2639 const struct file_lock
*fl
, int opcode
, pid_t pid
,
2640 int flock
, struct fuse_lk_in
*inarg
)
2642 struct inode
*inode
= file_inode(file
);
2643 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2644 struct fuse_file
*ff
= file
->private_data
;
2646 memset(inarg
, 0, sizeof(*inarg
));
2648 inarg
->owner
= fuse_lock_owner_id(fc
, fl
->fl_owner
);
2649 inarg
->lk
.start
= fl
->fl_start
;
2650 inarg
->lk
.end
= fl
->fl_end
;
2651 inarg
->lk
.type
= fl
->fl_type
;
2652 inarg
->lk
.pid
= pid
;
2654 inarg
->lk_flags
|= FUSE_LK_FLOCK
;
2655 args
->opcode
= opcode
;
2656 args
->nodeid
= get_node_id(inode
);
2657 args
->in_numargs
= 1;
2658 args
->in_args
[0].size
= sizeof(*inarg
);
2659 args
->in_args
[0].value
= inarg
;
2662 static int fuse_getlk(struct file
*file
, struct file_lock
*fl
)
2664 struct inode
*inode
= file_inode(file
);
2665 struct fuse_mount
*fm
= get_fuse_mount(inode
);
2667 struct fuse_lk_in inarg
;
2668 struct fuse_lk_out outarg
;
2671 fuse_lk_fill(&args
, file
, fl
, FUSE_GETLK
, 0, 0, &inarg
);
2672 args
.out_numargs
= 1;
2673 args
.out_args
[0].size
= sizeof(outarg
);
2674 args
.out_args
[0].value
= &outarg
;
2675 err
= fuse_simple_request(fm
, &args
);
2677 err
= convert_fuse_file_lock(fm
->fc
, &outarg
.lk
, fl
);
2682 static int fuse_setlk(struct file
*file
, struct file_lock
*fl
, int flock
)
2684 struct inode
*inode
= file_inode(file
);
2685 struct fuse_mount
*fm
= get_fuse_mount(inode
);
2687 struct fuse_lk_in inarg
;
2688 int opcode
= (fl
->fl_flags
& FL_SLEEP
) ? FUSE_SETLKW
: FUSE_SETLK
;
2689 struct pid
*pid
= fl
->fl_type
!= F_UNLCK
? task_tgid(current
) : NULL
;
2690 pid_t pid_nr
= pid_nr_ns(pid
, fm
->fc
->pid_ns
);
2693 if (fl
->fl_lmops
&& fl
->fl_lmops
->lm_grant
) {
2694 /* NLM needs asynchronous locks, which we don't support yet */
2698 /* Unlock on close is handled by the flush method */
2699 if ((fl
->fl_flags
& FL_CLOSE_POSIX
) == FL_CLOSE_POSIX
)
2702 fuse_lk_fill(&args
, file
, fl
, opcode
, pid_nr
, flock
, &inarg
);
2703 err
= fuse_simple_request(fm
, &args
);
2705 /* locking is restartable */
2712 static int fuse_file_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
2714 struct inode
*inode
= file_inode(file
);
2715 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2718 if (cmd
== F_CANCELLK
) {
2720 } else if (cmd
== F_GETLK
) {
2722 posix_test_lock(file
, fl
);
2725 err
= fuse_getlk(file
, fl
);
2728 err
= posix_lock_file(file
, fl
, NULL
);
2730 err
= fuse_setlk(file
, fl
, 0);
2735 static int fuse_file_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
2737 struct inode
*inode
= file_inode(file
);
2738 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2742 err
= locks_lock_file_wait(file
, fl
);
2744 struct fuse_file
*ff
= file
->private_data
;
2746 /* emulate flock with POSIX locks */
2748 err
= fuse_setlk(file
, fl
, 1);
2754 static sector_t
fuse_bmap(struct address_space
*mapping
, sector_t block
)
2756 struct inode
*inode
= mapping
->host
;
2757 struct fuse_mount
*fm
= get_fuse_mount(inode
);
2759 struct fuse_bmap_in inarg
;
2760 struct fuse_bmap_out outarg
;
2763 if (!inode
->i_sb
->s_bdev
|| fm
->fc
->no_bmap
)
2766 memset(&inarg
, 0, sizeof(inarg
));
2767 inarg
.block
= block
;
2768 inarg
.blocksize
= inode
->i_sb
->s_blocksize
;
2769 args
.opcode
= FUSE_BMAP
;
2770 args
.nodeid
= get_node_id(inode
);
2771 args
.in_numargs
= 1;
2772 args
.in_args
[0].size
= sizeof(inarg
);
2773 args
.in_args
[0].value
= &inarg
;
2774 args
.out_numargs
= 1;
2775 args
.out_args
[0].size
= sizeof(outarg
);
2776 args
.out_args
[0].value
= &outarg
;
2777 err
= fuse_simple_request(fm
, &args
);
2779 fm
->fc
->no_bmap
= 1;
2781 return err
? 0 : outarg
.block
;
2784 static loff_t
fuse_lseek(struct file
*file
, loff_t offset
, int whence
)
2786 struct inode
*inode
= file
->f_mapping
->host
;
2787 struct fuse_mount
*fm
= get_fuse_mount(inode
);
2788 struct fuse_file
*ff
= file
->private_data
;
2790 struct fuse_lseek_in inarg
= {
2795 struct fuse_lseek_out outarg
;
2798 if (fm
->fc
->no_lseek
)
2801 args
.opcode
= FUSE_LSEEK
;
2802 args
.nodeid
= ff
->nodeid
;
2803 args
.in_numargs
= 1;
2804 args
.in_args
[0].size
= sizeof(inarg
);
2805 args
.in_args
[0].value
= &inarg
;
2806 args
.out_numargs
= 1;
2807 args
.out_args
[0].size
= sizeof(outarg
);
2808 args
.out_args
[0].value
= &outarg
;
2809 err
= fuse_simple_request(fm
, &args
);
2811 if (err
== -ENOSYS
) {
2812 fm
->fc
->no_lseek
= 1;
2818 return vfs_setpos(file
, outarg
.offset
, inode
->i_sb
->s_maxbytes
);
2821 err
= fuse_update_attributes(inode
, file
, STATX_SIZE
);
2823 return generic_file_llseek(file
, offset
, whence
);
2828 static loff_t
fuse_file_llseek(struct file
*file
, loff_t offset
, int whence
)
2831 struct inode
*inode
= file_inode(file
);
2836 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
2837 retval
= generic_file_llseek(file
, offset
, whence
);
2841 retval
= fuse_update_attributes(inode
, file
, STATX_SIZE
);
2843 retval
= generic_file_llseek(file
, offset
, whence
);
2844 inode_unlock(inode
);
2849 retval
= fuse_lseek(file
, offset
, whence
);
2850 inode_unlock(inode
);
2860 * All files which have been polled are linked to RB tree
2861 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2862 * find the matching one.
2864 static struct rb_node
**fuse_find_polled_node(struct fuse_conn
*fc
, u64 kh
,
2865 struct rb_node
**parent_out
)
2867 struct rb_node
**link
= &fc
->polled_files
.rb_node
;
2868 struct rb_node
*last
= NULL
;
2871 struct fuse_file
*ff
;
2874 ff
= rb_entry(last
, struct fuse_file
, polled_node
);
2877 link
= &last
->rb_left
;
2878 else if (kh
> ff
->kh
)
2879 link
= &last
->rb_right
;
2890 * The file is about to be polled. Make sure it's on the polled_files
2891 * RB tree. Note that files once added to the polled_files tree are
2892 * not removed before the file is released. This is because a file
2893 * polled once is likely to be polled again.
2895 static void fuse_register_polled_file(struct fuse_conn
*fc
,
2896 struct fuse_file
*ff
)
2898 spin_lock(&fc
->lock
);
2899 if (RB_EMPTY_NODE(&ff
->polled_node
)) {
2900 struct rb_node
**link
, *parent
;
2902 link
= fuse_find_polled_node(fc
, ff
->kh
, &parent
);
2904 rb_link_node(&ff
->polled_node
, parent
, link
);
2905 rb_insert_color(&ff
->polled_node
, &fc
->polled_files
);
2907 spin_unlock(&fc
->lock
);
2910 __poll_t
fuse_file_poll(struct file
*file
, poll_table
*wait
)
2912 struct fuse_file
*ff
= file
->private_data
;
2913 struct fuse_mount
*fm
= ff
->fm
;
2914 struct fuse_poll_in inarg
= { .fh
= ff
->fh
, .kh
= ff
->kh
};
2915 struct fuse_poll_out outarg
;
2919 if (fm
->fc
->no_poll
)
2920 return DEFAULT_POLLMASK
;
2922 poll_wait(file
, &ff
->poll_wait
, wait
);
2923 inarg
.events
= mangle_poll(poll_requested_events(wait
));
2926 * Ask for notification iff there's someone waiting for it.
2927 * The client may ignore the flag and always notify.
2929 if (waitqueue_active(&ff
->poll_wait
)) {
2930 inarg
.flags
|= FUSE_POLL_SCHEDULE_NOTIFY
;
2931 fuse_register_polled_file(fm
->fc
, ff
);
2934 args
.opcode
= FUSE_POLL
;
2935 args
.nodeid
= ff
->nodeid
;
2936 args
.in_numargs
= 1;
2937 args
.in_args
[0].size
= sizeof(inarg
);
2938 args
.in_args
[0].value
= &inarg
;
2939 args
.out_numargs
= 1;
2940 args
.out_args
[0].size
= sizeof(outarg
);
2941 args
.out_args
[0].value
= &outarg
;
2942 err
= fuse_simple_request(fm
, &args
);
2945 return demangle_poll(outarg
.revents
);
2946 if (err
== -ENOSYS
) {
2947 fm
->fc
->no_poll
= 1;
2948 return DEFAULT_POLLMASK
;
2952 EXPORT_SYMBOL_GPL(fuse_file_poll
);
2955 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2956 * wakes up the poll waiters.
2958 int fuse_notify_poll_wakeup(struct fuse_conn
*fc
,
2959 struct fuse_notify_poll_wakeup_out
*outarg
)
2961 u64 kh
= outarg
->kh
;
2962 struct rb_node
**link
;
2964 spin_lock(&fc
->lock
);
2966 link
= fuse_find_polled_node(fc
, kh
, NULL
);
2968 struct fuse_file
*ff
;
2970 ff
= rb_entry(*link
, struct fuse_file
, polled_node
);
2971 wake_up_interruptible_sync(&ff
->poll_wait
);
2974 spin_unlock(&fc
->lock
);
2978 static void fuse_do_truncate(struct file
*file
)
2980 struct inode
*inode
= file
->f_mapping
->host
;
2983 attr
.ia_valid
= ATTR_SIZE
;
2984 attr
.ia_size
= i_size_read(inode
);
2986 attr
.ia_file
= file
;
2987 attr
.ia_valid
|= ATTR_FILE
;
2989 fuse_do_setattr(file_dentry(file
), &attr
, file
);
2992 static inline loff_t
fuse_round_up(struct fuse_conn
*fc
, loff_t off
)
2994 return round_up(off
, fc
->max_pages
<< PAGE_SHIFT
);
2998 fuse_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
3000 DECLARE_COMPLETION_ONSTACK(wait
);
3002 struct file
*file
= iocb
->ki_filp
;
3003 struct fuse_file
*ff
= file
->private_data
;
3005 struct inode
*inode
;
3007 size_t count
= iov_iter_count(iter
), shortened
= 0;
3008 loff_t offset
= iocb
->ki_pos
;
3009 struct fuse_io_priv
*io
;
3012 inode
= file
->f_mapping
->host
;
3013 i_size
= i_size_read(inode
);
3015 if ((iov_iter_rw(iter
) == READ
) && (offset
>= i_size
))
3018 io
= kmalloc(sizeof(struct fuse_io_priv
), GFP_KERNEL
);
3021 spin_lock_init(&io
->lock
);
3022 kref_init(&io
->refcnt
);
3026 io
->offset
= offset
;
3027 io
->write
= (iov_iter_rw(iter
) == WRITE
);
3030 * By default, we want to optimize all I/Os with async request
3031 * submission to the client filesystem if supported.
3033 io
->async
= ff
->fm
->fc
->async_dio
;
3035 io
->blocking
= is_sync_kiocb(iocb
);
3037 /* optimization for short read */
3038 if (io
->async
&& !io
->write
&& offset
+ count
> i_size
) {
3039 iov_iter_truncate(iter
, fuse_round_up(ff
->fm
->fc
, i_size
- offset
));
3040 shortened
= count
- iov_iter_count(iter
);
3045 * We cannot asynchronously extend the size of a file.
3046 * In such case the aio will behave exactly like sync io.
3048 if ((offset
+ count
> i_size
) && io
->write
)
3049 io
->blocking
= true;
3051 if (io
->async
&& io
->blocking
) {
3053 * Additional reference to keep io around after
3054 * calling fuse_aio_complete()
3056 kref_get(&io
->refcnt
);
3060 if (iov_iter_rw(iter
) == WRITE
) {
3061 ret
= fuse_direct_io(io
, iter
, &pos
, FUSE_DIO_WRITE
);
3062 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODSIZE
);
3064 ret
= __fuse_direct_read(io
, iter
, &pos
);
3066 iov_iter_reexpand(iter
, iov_iter_count(iter
) + shortened
);
3069 bool blocking
= io
->blocking
;
3071 fuse_aio_complete(io
, ret
< 0 ? ret
: 0, -1);
3073 /* we have a non-extending, async request, so return */
3075 return -EIOCBQUEUED
;
3077 wait_for_completion(&wait
);
3078 ret
= fuse_get_res_by_io(io
);
3081 kref_put(&io
->refcnt
, fuse_io_release
);
3083 if (iov_iter_rw(iter
) == WRITE
) {
3084 fuse_write_update_attr(inode
, pos
, ret
);
3085 /* For extending writes we already hold exclusive lock */
3086 if (ret
< 0 && offset
+ count
> i_size
)
3087 fuse_do_truncate(file
);
3093 static int fuse_writeback_range(struct inode
*inode
, loff_t start
, loff_t end
)
3095 int err
= filemap_write_and_wait_range(inode
->i_mapping
, start
, LLONG_MAX
);
3098 fuse_sync_writes(inode
);
3103 static long fuse_file_fallocate(struct file
*file
, int mode
, loff_t offset
,
3106 struct fuse_file
*ff
= file
->private_data
;
3107 struct inode
*inode
= file_inode(file
);
3108 struct fuse_inode
*fi
= get_fuse_inode(inode
);
3109 struct fuse_mount
*fm
= ff
->fm
;
3111 struct fuse_fallocate_in inarg
= {
3118 bool block_faults
= FUSE_IS_DAX(inode
) &&
3119 (!(mode
& FALLOC_FL_KEEP_SIZE
) ||
3120 (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_ZERO_RANGE
)));
3122 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
3123 FALLOC_FL_ZERO_RANGE
))
3126 if (fm
->fc
->no_fallocate
)
3131 filemap_invalidate_lock(inode
->i_mapping
);
3132 err
= fuse_dax_break_layouts(inode
, 0, 0);
3137 if (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_ZERO_RANGE
)) {
3138 loff_t endbyte
= offset
+ length
- 1;
3140 err
= fuse_writeback_range(inode
, offset
, endbyte
);
3145 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
3146 offset
+ length
> i_size_read(inode
)) {
3147 err
= inode_newsize_ok(inode
, offset
+ length
);
3152 err
= file_modified(file
);
3156 if (!(mode
& FALLOC_FL_KEEP_SIZE
))
3157 set_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
3159 args
.opcode
= FUSE_FALLOCATE
;
3160 args
.nodeid
= ff
->nodeid
;
3161 args
.in_numargs
= 1;
3162 args
.in_args
[0].size
= sizeof(inarg
);
3163 args
.in_args
[0].value
= &inarg
;
3164 err
= fuse_simple_request(fm
, &args
);
3165 if (err
== -ENOSYS
) {
3166 fm
->fc
->no_fallocate
= 1;
3172 /* we could have extended the file */
3173 if (!(mode
& FALLOC_FL_KEEP_SIZE
)) {
3174 if (fuse_write_update_attr(inode
, offset
+ length
, length
))
3175 file_update_time(file
);
3178 if (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_ZERO_RANGE
))
3179 truncate_pagecache_range(inode
, offset
, offset
+ length
- 1);
3181 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODSIZE
);
3184 if (!(mode
& FALLOC_FL_KEEP_SIZE
))
3185 clear_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
3188 filemap_invalidate_unlock(inode
->i_mapping
);
3190 inode_unlock(inode
);
3192 fuse_flush_time_update(inode
);
3197 static ssize_t
__fuse_copy_file_range(struct file
*file_in
, loff_t pos_in
,
3198 struct file
*file_out
, loff_t pos_out
,
3199 size_t len
, unsigned int flags
)
3201 struct fuse_file
*ff_in
= file_in
->private_data
;
3202 struct fuse_file
*ff_out
= file_out
->private_data
;
3203 struct inode
*inode_in
= file_inode(file_in
);
3204 struct inode
*inode_out
= file_inode(file_out
);
3205 struct fuse_inode
*fi_out
= get_fuse_inode(inode_out
);
3206 struct fuse_mount
*fm
= ff_in
->fm
;
3207 struct fuse_conn
*fc
= fm
->fc
;
3209 struct fuse_copy_file_range_in inarg
= {
3212 .nodeid_out
= ff_out
->nodeid
,
3213 .fh_out
= ff_out
->fh
,
3218 struct fuse_write_out outarg
;
3220 /* mark unstable when write-back is not used, and file_out gets
3222 bool is_unstable
= (!fc
->writeback_cache
) &&
3223 ((pos_out
+ len
) > inode_out
->i_size
);
3225 if (fc
->no_copy_file_range
)
3228 if (file_inode(file_in
)->i_sb
!= file_inode(file_out
)->i_sb
)
3231 inode_lock(inode_in
);
3232 err
= fuse_writeback_range(inode_in
, pos_in
, pos_in
+ len
- 1);
3233 inode_unlock(inode_in
);
3237 inode_lock(inode_out
);
3239 err
= file_modified(file_out
);
3244 * Write out dirty pages in the destination file before sending the COPY
3245 * request to userspace. After the request is completed, truncate off
3246 * pages (including partial ones) from the cache that have been copied,
3247 * since these contain stale data at that point.
3249 * This should be mostly correct, but if the COPY writes to partial
3250 * pages (at the start or end) and the parts not covered by the COPY are
3251 * written through a memory map after calling fuse_writeback_range(),
3252 * then these partial page modifications will be lost on truncation.
3254 * It is unlikely that someone would rely on such mixed style
3255 * modifications. Yet this does give less guarantees than if the
3256 * copying was performed with write(2).
3258 * To fix this a mapping->invalidate_lock could be used to prevent new
3259 * faults while the copy is ongoing.
3261 err
= fuse_writeback_range(inode_out
, pos_out
, pos_out
+ len
- 1);
3266 set_bit(FUSE_I_SIZE_UNSTABLE
, &fi_out
->state
);
3268 args
.opcode
= FUSE_COPY_FILE_RANGE
;
3269 args
.nodeid
= ff_in
->nodeid
;
3270 args
.in_numargs
= 1;
3271 args
.in_args
[0].size
= sizeof(inarg
);
3272 args
.in_args
[0].value
= &inarg
;
3273 args
.out_numargs
= 1;
3274 args
.out_args
[0].size
= sizeof(outarg
);
3275 args
.out_args
[0].value
= &outarg
;
3276 err
= fuse_simple_request(fm
, &args
);
3277 if (err
== -ENOSYS
) {
3278 fc
->no_copy_file_range
= 1;
3284 truncate_inode_pages_range(inode_out
->i_mapping
,
3285 ALIGN_DOWN(pos_out
, PAGE_SIZE
),
3286 ALIGN(pos_out
+ outarg
.size
, PAGE_SIZE
) - 1);
3288 file_update_time(file_out
);
3289 fuse_write_update_attr(inode_out
, pos_out
+ outarg
.size
, outarg
.size
);
3294 clear_bit(FUSE_I_SIZE_UNSTABLE
, &fi_out
->state
);
3296 inode_unlock(inode_out
);
3297 file_accessed(file_in
);
3299 fuse_flush_time_update(inode_out
);
3304 static ssize_t
fuse_copy_file_range(struct file
*src_file
, loff_t src_off
,
3305 struct file
*dst_file
, loff_t dst_off
,
3306 size_t len
, unsigned int flags
)
3310 ret
= __fuse_copy_file_range(src_file
, src_off
, dst_file
, dst_off
,
3313 if (ret
== -EOPNOTSUPP
|| ret
== -EXDEV
)
3314 ret
= splice_copy_file_range(src_file
, src_off
, dst_file
,
3319 static const struct file_operations fuse_file_operations
= {
3320 .llseek
= fuse_file_llseek
,
3321 .read_iter
= fuse_file_read_iter
,
3322 .write_iter
= fuse_file_write_iter
,
3323 .mmap
= fuse_file_mmap
,
3325 .flush
= fuse_flush
,
3326 .release
= fuse_release
,
3327 .fsync
= fuse_fsync
,
3328 .lock
= fuse_file_lock
,
3329 .get_unmapped_area
= thp_get_unmapped_area
,
3330 .flock
= fuse_file_flock
,
3331 .splice_read
= fuse_splice_read
,
3332 .splice_write
= fuse_splice_write
,
3333 .unlocked_ioctl
= fuse_file_ioctl
,
3334 .compat_ioctl
= fuse_file_compat_ioctl
,
3335 .poll
= fuse_file_poll
,
3336 .fallocate
= fuse_file_fallocate
,
3337 .copy_file_range
= fuse_copy_file_range
,
3340 static const struct address_space_operations fuse_file_aops
= {
3341 .read_folio
= fuse_read_folio
,
3342 .readahead
= fuse_readahead
,
3343 .writepage
= fuse_writepage
,
3344 .writepages
= fuse_writepages
,
3345 .launder_folio
= fuse_launder_folio
,
3346 .dirty_folio
= filemap_dirty_folio
,
3348 .direct_IO
= fuse_direct_IO
,
3349 .write_begin
= fuse_write_begin
,
3350 .write_end
= fuse_write_end
,
3353 void fuse_init_file_inode(struct inode
*inode
, unsigned int flags
)
3355 struct fuse_inode
*fi
= get_fuse_inode(inode
);
3357 inode
->i_fop
= &fuse_file_operations
;
3358 inode
->i_data
.a_ops
= &fuse_file_aops
;
3360 INIT_LIST_HEAD(&fi
->write_files
);
3361 INIT_LIST_HEAD(&fi
->queued_writes
);
3364 init_waitqueue_head(&fi
->page_waitq
);
3365 init_waitqueue_head(&fi
->direct_io_waitq
);
3366 fi
->writepages
= RB_ROOT
;
3368 if (IS_ENABLED(CONFIG_FUSE_DAX
))
3369 fuse_dax_inode_init(inode
, flags
);