2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/sched/signal.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/falloc.h>
19 #include <linux/uio.h>
21 #include <linux/filelock.h>
22 #include <linux/file.h>
24 static int fuse_send_open(struct fuse_mount
*fm
, u64 nodeid
,
25 unsigned int open_flags
, int opcode
,
26 struct fuse_open_out
*outargp
)
28 struct fuse_open_in inarg
;
31 memset(&inarg
, 0, sizeof(inarg
));
32 inarg
.flags
= open_flags
& ~(O_CREAT
| O_EXCL
| O_NOCTTY
);
33 if (!fm
->fc
->atomic_o_trunc
)
34 inarg
.flags
&= ~O_TRUNC
;
36 if (fm
->fc
->handle_killpriv_v2
&&
37 (inarg
.flags
& O_TRUNC
) && !capable(CAP_FSETID
)) {
38 inarg
.open_flags
|= FUSE_OPEN_KILL_SUIDGID
;
44 args
.in_args
[0].size
= sizeof(inarg
);
45 args
.in_args
[0].value
= &inarg
;
47 args
.out_args
[0].size
= sizeof(*outargp
);
48 args
.out_args
[0].value
= outargp
;
50 return fuse_simple_request(fm
, &args
);
53 struct fuse_release_args
{
54 struct fuse_args args
;
55 struct fuse_release_in inarg
;
59 struct fuse_file
*fuse_file_alloc(struct fuse_mount
*fm
)
63 ff
= kzalloc(sizeof(struct fuse_file
), GFP_KERNEL_ACCOUNT
);
68 ff
->release_args
= kzalloc(sizeof(*ff
->release_args
),
70 if (!ff
->release_args
) {
75 INIT_LIST_HEAD(&ff
->write_entry
);
76 mutex_init(&ff
->readdir
.lock
);
77 refcount_set(&ff
->count
, 1);
78 RB_CLEAR_NODE(&ff
->polled_node
);
79 init_waitqueue_head(&ff
->poll_wait
);
81 ff
->kh
= atomic64_inc_return(&fm
->fc
->khctr
);
86 void fuse_file_free(struct fuse_file
*ff
)
88 kfree(ff
->release_args
);
89 mutex_destroy(&ff
->readdir
.lock
);
93 static struct fuse_file
*fuse_file_get(struct fuse_file
*ff
)
95 refcount_inc(&ff
->count
);
99 static void fuse_release_end(struct fuse_mount
*fm
, struct fuse_args
*args
,
102 struct fuse_release_args
*ra
= container_of(args
, typeof(*ra
), args
);
108 static void fuse_file_put(struct fuse_file
*ff
, bool sync
, bool isdir
)
110 if (refcount_dec_and_test(&ff
->count
)) {
111 struct fuse_args
*args
= &ff
->release_args
->args
;
113 if (isdir
? ff
->fm
->fc
->no_opendir
: ff
->fm
->fc
->no_open
) {
114 /* Do nothing when client does not implement 'open' */
115 fuse_release_end(ff
->fm
, args
, 0);
117 fuse_simple_request(ff
->fm
, args
);
118 fuse_release_end(ff
->fm
, args
, 0);
120 args
->end
= fuse_release_end
;
121 if (fuse_simple_background(ff
->fm
, args
,
122 GFP_KERNEL
| __GFP_NOFAIL
))
123 fuse_release_end(ff
->fm
, args
, -ENOTCONN
);
129 struct fuse_file
*fuse_file_open(struct fuse_mount
*fm
, u64 nodeid
,
130 unsigned int open_flags
, bool isdir
)
132 struct fuse_conn
*fc
= fm
->fc
;
133 struct fuse_file
*ff
;
134 int opcode
= isdir
? FUSE_OPENDIR
: FUSE_OPEN
;
136 ff
= fuse_file_alloc(fm
);
138 return ERR_PTR(-ENOMEM
);
141 /* Default for no-open */
142 ff
->open_flags
= FOPEN_KEEP_CACHE
| (isdir
? FOPEN_CACHE_DIR
: 0);
143 if (isdir
? !fc
->no_opendir
: !fc
->no_open
) {
144 struct fuse_open_out outarg
;
147 err
= fuse_send_open(fm
, nodeid
, open_flags
, opcode
, &outarg
);
150 ff
->open_flags
= outarg
.open_flags
;
152 } else if (err
!= -ENOSYS
) {
164 ff
->open_flags
&= ~FOPEN_DIRECT_IO
;
171 int fuse_do_open(struct fuse_mount
*fm
, u64 nodeid
, struct file
*file
,
174 struct fuse_file
*ff
= fuse_file_open(fm
, nodeid
, file
->f_flags
, isdir
);
177 file
->private_data
= ff
;
179 return PTR_ERR_OR_ZERO(ff
);
181 EXPORT_SYMBOL_GPL(fuse_do_open
);
183 static void fuse_link_write_file(struct file
*file
)
185 struct inode
*inode
= file_inode(file
);
186 struct fuse_inode
*fi
= get_fuse_inode(inode
);
187 struct fuse_file
*ff
= file
->private_data
;
189 * file may be written through mmap, so chain it onto the
190 * inodes's write_file list
192 spin_lock(&fi
->lock
);
193 if (list_empty(&ff
->write_entry
))
194 list_add(&ff
->write_entry
, &fi
->write_files
);
195 spin_unlock(&fi
->lock
);
198 void fuse_finish_open(struct inode
*inode
, struct file
*file
)
200 struct fuse_file
*ff
= file
->private_data
;
201 struct fuse_conn
*fc
= get_fuse_conn(inode
);
203 if (ff
->open_flags
& FOPEN_STREAM
)
204 stream_open(inode
, file
);
205 else if (ff
->open_flags
& FOPEN_NONSEEKABLE
)
206 nonseekable_open(inode
, file
);
208 if (fc
->atomic_o_trunc
&& (file
->f_flags
& O_TRUNC
)) {
209 struct fuse_inode
*fi
= get_fuse_inode(inode
);
211 spin_lock(&fi
->lock
);
212 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
213 i_size_write(inode
, 0);
214 spin_unlock(&fi
->lock
);
215 file_update_time(file
);
216 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODSIZE
);
218 if ((file
->f_mode
& FMODE_WRITE
) && fc
->writeback_cache
)
219 fuse_link_write_file(file
);
222 int fuse_open_common(struct inode
*inode
, struct file
*file
, bool isdir
)
224 struct fuse_mount
*fm
= get_fuse_mount(inode
);
225 struct fuse_conn
*fc
= fm
->fc
;
227 bool is_wb_truncate
= (file
->f_flags
& O_TRUNC
) &&
228 fc
->atomic_o_trunc
&&
230 bool dax_truncate
= (file
->f_flags
& O_TRUNC
) &&
231 fc
->atomic_o_trunc
&& FUSE_IS_DAX(inode
);
233 if (fuse_is_bad(inode
))
236 err
= generic_file_open(inode
, file
);
240 if (is_wb_truncate
|| dax_truncate
)
244 filemap_invalidate_lock(inode
->i_mapping
);
245 err
= fuse_dax_break_layouts(inode
, 0, 0);
247 goto out_inode_unlock
;
250 if (is_wb_truncate
|| dax_truncate
)
251 fuse_set_nowrite(inode
);
253 err
= fuse_do_open(fm
, get_node_id(inode
), file
, isdir
);
255 fuse_finish_open(inode
, file
);
257 if (is_wb_truncate
|| dax_truncate
)
258 fuse_release_nowrite(inode
);
260 struct fuse_file
*ff
= file
->private_data
;
262 if (fc
->atomic_o_trunc
&& (file
->f_flags
& O_TRUNC
))
263 truncate_pagecache(inode
, 0);
264 else if (!(ff
->open_flags
& FOPEN_KEEP_CACHE
))
265 invalidate_inode_pages2(inode
->i_mapping
);
268 filemap_invalidate_unlock(inode
->i_mapping
);
270 if (is_wb_truncate
|| dax_truncate
)
276 static void fuse_prepare_release(struct fuse_inode
*fi
, struct fuse_file
*ff
,
277 unsigned int flags
, int opcode
)
279 struct fuse_conn
*fc
= ff
->fm
->fc
;
280 struct fuse_release_args
*ra
= ff
->release_args
;
282 /* Inode is NULL on error path of fuse_create_open() */
284 spin_lock(&fi
->lock
);
285 list_del(&ff
->write_entry
);
286 spin_unlock(&fi
->lock
);
288 spin_lock(&fc
->lock
);
289 if (!RB_EMPTY_NODE(&ff
->polled_node
))
290 rb_erase(&ff
->polled_node
, &fc
->polled_files
);
291 spin_unlock(&fc
->lock
);
293 wake_up_interruptible_all(&ff
->poll_wait
);
295 ra
->inarg
.fh
= ff
->fh
;
296 ra
->inarg
.flags
= flags
;
297 ra
->args
.in_numargs
= 1;
298 ra
->args
.in_args
[0].size
= sizeof(struct fuse_release_in
);
299 ra
->args
.in_args
[0].value
= &ra
->inarg
;
300 ra
->args
.opcode
= opcode
;
301 ra
->args
.nodeid
= ff
->nodeid
;
302 ra
->args
.force
= true;
303 ra
->args
.nocreds
= true;
306 void fuse_file_release(struct inode
*inode
, struct fuse_file
*ff
,
307 unsigned int open_flags
, fl_owner_t id
, bool isdir
)
309 struct fuse_inode
*fi
= get_fuse_inode(inode
);
310 struct fuse_release_args
*ra
= ff
->release_args
;
311 int opcode
= isdir
? FUSE_RELEASEDIR
: FUSE_RELEASE
;
313 fuse_prepare_release(fi
, ff
, open_flags
, opcode
);
316 ra
->inarg
.release_flags
|= FUSE_RELEASE_FLOCK_UNLOCK
;
317 ra
->inarg
.lock_owner
= fuse_lock_owner_id(ff
->fm
->fc
, id
);
319 /* Hold inode until release is finished */
320 ra
->inode
= igrab(inode
);
323 * Normally this will send the RELEASE request, however if
324 * some asynchronous READ or WRITE requests are outstanding,
325 * the sending will be delayed.
327 * Make the release synchronous if this is a fuseblk mount,
328 * synchronous RELEASE is allowed (and desirable) in this case
329 * because the server can be trusted not to screw up.
331 fuse_file_put(ff
, ff
->fm
->fc
->destroy
, isdir
);
334 void fuse_release_common(struct file
*file
, bool isdir
)
336 fuse_file_release(file_inode(file
), file
->private_data
, file
->f_flags
,
337 (fl_owner_t
) file
, isdir
);
340 static int fuse_open(struct inode
*inode
, struct file
*file
)
342 return fuse_open_common(inode
, file
, false);
345 static int fuse_release(struct inode
*inode
, struct file
*file
)
347 struct fuse_conn
*fc
= get_fuse_conn(inode
);
350 * Dirty pages might remain despite write_inode_now() call from
351 * fuse_flush() due to writes racing with the close.
353 if (fc
->writeback_cache
)
354 write_inode_now(inode
, 1);
356 fuse_release_common(file
, false);
358 /* return value is ignored by VFS */
362 void fuse_sync_release(struct fuse_inode
*fi
, struct fuse_file
*ff
,
365 WARN_ON(refcount_read(&ff
->count
) > 1);
366 fuse_prepare_release(fi
, ff
, flags
, FUSE_RELEASE
);
368 * iput(NULL) is a no-op and since the refcount is 1 and everything's
369 * synchronous, we are fine with not doing igrab() here"
371 fuse_file_put(ff
, true, false);
373 EXPORT_SYMBOL_GPL(fuse_sync_release
);
376 * Scramble the ID space with XTEA, so that the value of the files_struct
377 * pointer is not exposed to userspace.
379 u64
fuse_lock_owner_id(struct fuse_conn
*fc
, fl_owner_t id
)
381 u32
*k
= fc
->scramble_key
;
382 u64 v
= (unsigned long) id
;
388 for (i
= 0; i
< 32; i
++) {
389 v0
+= ((v1
<< 4 ^ v1
>> 5) + v1
) ^ (sum
+ k
[sum
& 3]);
391 v1
+= ((v0
<< 4 ^ v0
>> 5) + v0
) ^ (sum
+ k
[sum
>>11 & 3]);
394 return (u64
) v0
+ ((u64
) v1
<< 32);
397 struct fuse_writepage_args
{
398 struct fuse_io_args ia
;
399 struct rb_node writepages_entry
;
400 struct list_head queue_entry
;
401 struct fuse_writepage_args
*next
;
403 struct fuse_sync_bucket
*bucket
;
406 static struct fuse_writepage_args
*fuse_find_writeback(struct fuse_inode
*fi
,
407 pgoff_t idx_from
, pgoff_t idx_to
)
411 n
= fi
->writepages
.rb_node
;
414 struct fuse_writepage_args
*wpa
;
417 wpa
= rb_entry(n
, struct fuse_writepage_args
, writepages_entry
);
418 WARN_ON(get_fuse_inode(wpa
->inode
) != fi
);
419 curr_index
= wpa
->ia
.write
.in
.offset
>> PAGE_SHIFT
;
420 if (idx_from
>= curr_index
+ wpa
->ia
.ap
.num_pages
)
422 else if (idx_to
< curr_index
)
431 * Check if any page in a range is under writeback
433 * This is currently done by walking the list of writepage requests
434 * for the inode, which can be pretty inefficient.
436 static bool fuse_range_is_writeback(struct inode
*inode
, pgoff_t idx_from
,
439 struct fuse_inode
*fi
= get_fuse_inode(inode
);
442 spin_lock(&fi
->lock
);
443 found
= fuse_find_writeback(fi
, idx_from
, idx_to
);
444 spin_unlock(&fi
->lock
);
449 static inline bool fuse_page_is_writeback(struct inode
*inode
, pgoff_t index
)
451 return fuse_range_is_writeback(inode
, index
, index
);
455 * Wait for page writeback to be completed.
457 * Since fuse doesn't rely on the VM writeback tracking, this has to
458 * use some other means.
460 static void fuse_wait_on_page_writeback(struct inode
*inode
, pgoff_t index
)
462 struct fuse_inode
*fi
= get_fuse_inode(inode
);
464 wait_event(fi
->page_waitq
, !fuse_page_is_writeback(inode
, index
));
468 * Wait for all pending writepages on the inode to finish.
470 * This is currently done by blocking further writes with FUSE_NOWRITE
471 * and waiting for all sent writes to complete.
473 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
474 * could conflict with truncation.
476 static void fuse_sync_writes(struct inode
*inode
)
478 fuse_set_nowrite(inode
);
479 fuse_release_nowrite(inode
);
482 struct fuse_flush_args
{
483 struct fuse_args args
;
484 struct fuse_flush_in inarg
;
485 struct work_struct work
;
489 static int fuse_do_flush(struct fuse_flush_args
*fa
)
492 struct inode
*inode
= file_inode(fa
->file
);
493 struct fuse_mount
*fm
= get_fuse_mount(inode
);
495 err
= write_inode_now(inode
, 1);
500 fuse_sync_writes(inode
);
503 err
= filemap_check_errors(fa
->file
->f_mapping
);
508 if (fm
->fc
->no_flush
)
511 err
= fuse_simple_request(fm
, &fa
->args
);
512 if (err
== -ENOSYS
) {
513 fm
->fc
->no_flush
= 1;
519 * In memory i_blocks is not maintained by fuse, if writeback cache is
520 * enabled, i_blocks from cached attr may not be accurate.
522 if (!err
&& fm
->fc
->writeback_cache
)
523 fuse_invalidate_attr_mask(inode
, STATX_BLOCKS
);
531 static void fuse_flush_async(struct work_struct
*work
)
533 struct fuse_flush_args
*fa
= container_of(work
, typeof(*fa
), work
);
538 static int fuse_flush(struct file
*file
, fl_owner_t id
)
540 struct fuse_flush_args
*fa
;
541 struct inode
*inode
= file_inode(file
);
542 struct fuse_mount
*fm
= get_fuse_mount(inode
);
543 struct fuse_file
*ff
= file
->private_data
;
545 if (fuse_is_bad(inode
))
548 if (ff
->open_flags
& FOPEN_NOFLUSH
&& !fm
->fc
->writeback_cache
)
551 fa
= kzalloc(sizeof(*fa
), GFP_KERNEL
);
555 fa
->inarg
.fh
= ff
->fh
;
556 fa
->inarg
.lock_owner
= fuse_lock_owner_id(fm
->fc
, id
);
557 fa
->args
.opcode
= FUSE_FLUSH
;
558 fa
->args
.nodeid
= get_node_id(inode
);
559 fa
->args
.in_numargs
= 1;
560 fa
->args
.in_args
[0].size
= sizeof(fa
->inarg
);
561 fa
->args
.in_args
[0].value
= &fa
->inarg
;
562 fa
->args
.force
= true;
563 fa
->file
= get_file(file
);
565 /* Don't wait if the task is exiting */
566 if (current
->flags
& PF_EXITING
) {
567 INIT_WORK(&fa
->work
, fuse_flush_async
);
568 schedule_work(&fa
->work
);
572 return fuse_do_flush(fa
);
575 int fuse_fsync_common(struct file
*file
, loff_t start
, loff_t end
,
576 int datasync
, int opcode
)
578 struct inode
*inode
= file
->f_mapping
->host
;
579 struct fuse_mount
*fm
= get_fuse_mount(inode
);
580 struct fuse_file
*ff
= file
->private_data
;
582 struct fuse_fsync_in inarg
;
584 memset(&inarg
, 0, sizeof(inarg
));
586 inarg
.fsync_flags
= datasync
? FUSE_FSYNC_FDATASYNC
: 0;
587 args
.opcode
= opcode
;
588 args
.nodeid
= get_node_id(inode
);
590 args
.in_args
[0].size
= sizeof(inarg
);
591 args
.in_args
[0].value
= &inarg
;
592 return fuse_simple_request(fm
, &args
);
595 static int fuse_fsync(struct file
*file
, loff_t start
, loff_t end
,
598 struct inode
*inode
= file
->f_mapping
->host
;
599 struct fuse_conn
*fc
= get_fuse_conn(inode
);
602 if (fuse_is_bad(inode
))
608 * Start writeback against all dirty pages of the inode, then
609 * wait for all outstanding writes, before sending the FSYNC
612 err
= file_write_and_wait_range(file
, start
, end
);
616 fuse_sync_writes(inode
);
619 * Due to implementation of fuse writeback
620 * file_write_and_wait_range() does not catch errors.
621 * We have to do this directly after fuse_sync_writes()
623 err
= file_check_and_advance_wb_err(file
);
627 err
= sync_inode_metadata(inode
, 1);
634 err
= fuse_fsync_common(file
, start
, end
, datasync
, FUSE_FSYNC
);
635 if (err
== -ENOSYS
) {
645 void fuse_read_args_fill(struct fuse_io_args
*ia
, struct file
*file
, loff_t pos
,
646 size_t count
, int opcode
)
648 struct fuse_file
*ff
= file
->private_data
;
649 struct fuse_args
*args
= &ia
->ap
.args
;
651 ia
->read
.in
.fh
= ff
->fh
;
652 ia
->read
.in
.offset
= pos
;
653 ia
->read
.in
.size
= count
;
654 ia
->read
.in
.flags
= file
->f_flags
;
655 args
->opcode
= opcode
;
656 args
->nodeid
= ff
->nodeid
;
657 args
->in_numargs
= 1;
658 args
->in_args
[0].size
= sizeof(ia
->read
.in
);
659 args
->in_args
[0].value
= &ia
->read
.in
;
660 args
->out_argvar
= true;
661 args
->out_numargs
= 1;
662 args
->out_args
[0].size
= count
;
665 static void fuse_release_user_pages(struct fuse_args_pages
*ap
,
670 for (i
= 0; i
< ap
->num_pages
; i
++) {
672 set_page_dirty_lock(ap
->pages
[i
]);
673 put_page(ap
->pages
[i
]);
677 static void fuse_io_release(struct kref
*kref
)
679 kfree(container_of(kref
, struct fuse_io_priv
, refcnt
));
682 static ssize_t
fuse_get_res_by_io(struct fuse_io_priv
*io
)
687 if (io
->bytes
>= 0 && io
->write
)
690 return io
->bytes
< 0 ? io
->size
: io
->bytes
;
694 * In case of short read, the caller sets 'pos' to the position of
695 * actual end of fuse request in IO request. Otherwise, if bytes_requested
696 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
699 * User requested DIO read of 64K. It was split into two 32K fuse requests,
700 * both submitted asynchronously. The first of them was ACKed by userspace as
701 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
702 * second request was ACKed as short, e.g. only 1K was read, resulting in
705 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
706 * will be equal to the length of the longest contiguous fragment of
707 * transferred data starting from the beginning of IO request.
709 static void fuse_aio_complete(struct fuse_io_priv
*io
, int err
, ssize_t pos
)
713 spin_lock(&io
->lock
);
715 io
->err
= io
->err
? : err
;
716 else if (pos
>= 0 && (io
->bytes
< 0 || pos
< io
->bytes
))
720 if (!left
&& io
->blocking
)
722 spin_unlock(&io
->lock
);
724 if (!left
&& !io
->blocking
) {
725 ssize_t res
= fuse_get_res_by_io(io
);
728 struct inode
*inode
= file_inode(io
->iocb
->ki_filp
);
729 struct fuse_conn
*fc
= get_fuse_conn(inode
);
730 struct fuse_inode
*fi
= get_fuse_inode(inode
);
732 spin_lock(&fi
->lock
);
733 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
734 spin_unlock(&fi
->lock
);
737 io
->iocb
->ki_complete(io
->iocb
, res
);
740 kref_put(&io
->refcnt
, fuse_io_release
);
743 static struct fuse_io_args
*fuse_io_alloc(struct fuse_io_priv
*io
,
746 struct fuse_io_args
*ia
;
748 ia
= kzalloc(sizeof(*ia
), GFP_KERNEL
);
751 ia
->ap
.pages
= fuse_pages_alloc(npages
, GFP_KERNEL
,
761 static void fuse_io_free(struct fuse_io_args
*ia
)
767 static void fuse_aio_complete_req(struct fuse_mount
*fm
, struct fuse_args
*args
,
770 struct fuse_io_args
*ia
= container_of(args
, typeof(*ia
), ap
.args
);
771 struct fuse_io_priv
*io
= ia
->io
;
774 fuse_release_user_pages(&ia
->ap
, io
->should_dirty
);
778 } else if (io
->write
) {
779 if (ia
->write
.out
.size
> ia
->write
.in
.size
) {
781 } else if (ia
->write
.in
.size
!= ia
->write
.out
.size
) {
782 pos
= ia
->write
.in
.offset
- io
->offset
+
786 u32 outsize
= args
->out_args
[0].size
;
788 if (ia
->read
.in
.size
!= outsize
)
789 pos
= ia
->read
.in
.offset
- io
->offset
+ outsize
;
792 fuse_aio_complete(io
, err
, pos
);
796 static ssize_t
fuse_async_req_send(struct fuse_mount
*fm
,
797 struct fuse_io_args
*ia
, size_t num_bytes
)
800 struct fuse_io_priv
*io
= ia
->io
;
802 spin_lock(&io
->lock
);
803 kref_get(&io
->refcnt
);
804 io
->size
+= num_bytes
;
806 spin_unlock(&io
->lock
);
808 ia
->ap
.args
.end
= fuse_aio_complete_req
;
809 ia
->ap
.args
.may_block
= io
->should_dirty
;
810 err
= fuse_simple_background(fm
, &ia
->ap
.args
, GFP_KERNEL
);
812 fuse_aio_complete_req(fm
, &ia
->ap
.args
, err
);
817 static ssize_t
fuse_send_read(struct fuse_io_args
*ia
, loff_t pos
, size_t count
,
820 struct file
*file
= ia
->io
->iocb
->ki_filp
;
821 struct fuse_file
*ff
= file
->private_data
;
822 struct fuse_mount
*fm
= ff
->fm
;
824 fuse_read_args_fill(ia
, file
, pos
, count
, FUSE_READ
);
826 ia
->read
.in
.read_flags
|= FUSE_READ_LOCKOWNER
;
827 ia
->read
.in
.lock_owner
= fuse_lock_owner_id(fm
->fc
, owner
);
831 return fuse_async_req_send(fm
, ia
, count
);
833 return fuse_simple_request(fm
, &ia
->ap
.args
);
836 static void fuse_read_update_size(struct inode
*inode
, loff_t size
,
839 struct fuse_conn
*fc
= get_fuse_conn(inode
);
840 struct fuse_inode
*fi
= get_fuse_inode(inode
);
842 spin_lock(&fi
->lock
);
843 if (attr_ver
>= fi
->attr_version
&& size
< inode
->i_size
&&
844 !test_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
)) {
845 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
846 i_size_write(inode
, size
);
848 spin_unlock(&fi
->lock
);
851 static void fuse_short_read(struct inode
*inode
, u64 attr_ver
, size_t num_read
,
852 struct fuse_args_pages
*ap
)
854 struct fuse_conn
*fc
= get_fuse_conn(inode
);
857 * If writeback_cache is enabled, a short read means there's a hole in
858 * the file. Some data after the hole is in page cache, but has not
859 * reached the client fs yet. So the hole is not present there.
861 if (!fc
->writeback_cache
) {
862 loff_t pos
= page_offset(ap
->pages
[0]) + num_read
;
863 fuse_read_update_size(inode
, pos
, attr_ver
);
867 static int fuse_do_readpage(struct file
*file
, struct page
*page
)
869 struct inode
*inode
= page
->mapping
->host
;
870 struct fuse_mount
*fm
= get_fuse_mount(inode
);
871 loff_t pos
= page_offset(page
);
872 struct fuse_page_desc desc
= { .length
= PAGE_SIZE
};
873 struct fuse_io_args ia
= {
874 .ap
.args
.page_zeroing
= true,
875 .ap
.args
.out_pages
= true,
884 * Page writeback can extend beyond the lifetime of the
885 * page-cache page, so make sure we read a properly synced
888 fuse_wait_on_page_writeback(inode
, page
->index
);
890 attr_ver
= fuse_get_attr_version(fm
->fc
);
892 /* Don't overflow end offset */
893 if (pos
+ (desc
.length
- 1) == LLONG_MAX
)
896 fuse_read_args_fill(&ia
, file
, pos
, desc
.length
, FUSE_READ
);
897 res
= fuse_simple_request(fm
, &ia
.ap
.args
);
901 * Short read means EOF. If file size is larger, truncate it
903 if (res
< desc
.length
)
904 fuse_short_read(inode
, attr_ver
, res
, &ia
.ap
);
906 SetPageUptodate(page
);
911 static int fuse_read_folio(struct file
*file
, struct folio
*folio
)
913 struct page
*page
= &folio
->page
;
914 struct inode
*inode
= page
->mapping
->host
;
918 if (fuse_is_bad(inode
))
921 err
= fuse_do_readpage(file
, page
);
922 fuse_invalidate_atime(inode
);
928 static void fuse_readpages_end(struct fuse_mount
*fm
, struct fuse_args
*args
,
932 struct fuse_io_args
*ia
= container_of(args
, typeof(*ia
), ap
.args
);
933 struct fuse_args_pages
*ap
= &ia
->ap
;
934 size_t count
= ia
->read
.in
.size
;
935 size_t num_read
= args
->out_args
[0].size
;
936 struct address_space
*mapping
= NULL
;
938 for (i
= 0; mapping
== NULL
&& i
< ap
->num_pages
; i
++)
939 mapping
= ap
->pages
[i
]->mapping
;
942 struct inode
*inode
= mapping
->host
;
945 * Short read means EOF. If file size is larger, truncate it
947 if (!err
&& num_read
< count
)
948 fuse_short_read(inode
, ia
->read
.attr_ver
, num_read
, ap
);
950 fuse_invalidate_atime(inode
);
953 for (i
= 0; i
< ap
->num_pages
; i
++) {
954 struct page
*page
= ap
->pages
[i
];
957 SetPageUptodate(page
);
964 fuse_file_put(ia
->ff
, false, false);
969 static void fuse_send_readpages(struct fuse_io_args
*ia
, struct file
*file
)
971 struct fuse_file
*ff
= file
->private_data
;
972 struct fuse_mount
*fm
= ff
->fm
;
973 struct fuse_args_pages
*ap
= &ia
->ap
;
974 loff_t pos
= page_offset(ap
->pages
[0]);
975 size_t count
= ap
->num_pages
<< PAGE_SHIFT
;
979 ap
->args
.out_pages
= true;
980 ap
->args
.page_zeroing
= true;
981 ap
->args
.page_replace
= true;
983 /* Don't overflow end offset */
984 if (pos
+ (count
- 1) == LLONG_MAX
) {
986 ap
->descs
[ap
->num_pages
- 1].length
--;
988 WARN_ON((loff_t
) (pos
+ count
) < 0);
990 fuse_read_args_fill(ia
, file
, pos
, count
, FUSE_READ
);
991 ia
->read
.attr_ver
= fuse_get_attr_version(fm
->fc
);
992 if (fm
->fc
->async_read
) {
993 ia
->ff
= fuse_file_get(ff
);
994 ap
->args
.end
= fuse_readpages_end
;
995 err
= fuse_simple_background(fm
, &ap
->args
, GFP_KERNEL
);
999 res
= fuse_simple_request(fm
, &ap
->args
);
1000 err
= res
< 0 ? res
: 0;
1002 fuse_readpages_end(fm
, &ap
->args
, err
);
1005 static void fuse_readahead(struct readahead_control
*rac
)
1007 struct inode
*inode
= rac
->mapping
->host
;
1008 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1009 unsigned int i
, max_pages
, nr_pages
= 0;
1011 if (fuse_is_bad(inode
))
1014 max_pages
= min_t(unsigned int, fc
->max_pages
,
1015 fc
->max_read
/ PAGE_SIZE
);
1018 struct fuse_io_args
*ia
;
1019 struct fuse_args_pages
*ap
;
1021 if (fc
->num_background
>= fc
->congestion_threshold
&&
1022 rac
->ra
->async_size
>= readahead_count(rac
))
1024 * Congested and only async pages left, so skip the
1029 nr_pages
= readahead_count(rac
) - nr_pages
;
1030 if (nr_pages
> max_pages
)
1031 nr_pages
= max_pages
;
1034 ia
= fuse_io_alloc(NULL
, nr_pages
);
1038 nr_pages
= __readahead_batch(rac
, ap
->pages
, nr_pages
);
1039 for (i
= 0; i
< nr_pages
; i
++) {
1040 fuse_wait_on_page_writeback(inode
,
1041 readahead_index(rac
) + i
);
1042 ap
->descs
[i
].length
= PAGE_SIZE
;
1044 ap
->num_pages
= nr_pages
;
1045 fuse_send_readpages(ia
, rac
->file
);
1049 static ssize_t
fuse_cache_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1051 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
1052 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1055 * In auto invalidate mode, always update attributes on read.
1056 * Otherwise, only update if we attempt to read past EOF (to ensure
1057 * i_size is up to date).
1059 if (fc
->auto_inval_data
||
1060 (iocb
->ki_pos
+ iov_iter_count(to
) > i_size_read(inode
))) {
1062 err
= fuse_update_attributes(inode
, iocb
->ki_filp
, STATX_SIZE
);
1067 return generic_file_read_iter(iocb
, to
);
1070 static void fuse_write_args_fill(struct fuse_io_args
*ia
, struct fuse_file
*ff
,
1071 loff_t pos
, size_t count
)
1073 struct fuse_args
*args
= &ia
->ap
.args
;
1075 ia
->write
.in
.fh
= ff
->fh
;
1076 ia
->write
.in
.offset
= pos
;
1077 ia
->write
.in
.size
= count
;
1078 args
->opcode
= FUSE_WRITE
;
1079 args
->nodeid
= ff
->nodeid
;
1080 args
->in_numargs
= 2;
1081 if (ff
->fm
->fc
->minor
< 9)
1082 args
->in_args
[0].size
= FUSE_COMPAT_WRITE_IN_SIZE
;
1084 args
->in_args
[0].size
= sizeof(ia
->write
.in
);
1085 args
->in_args
[0].value
= &ia
->write
.in
;
1086 args
->in_args
[1].size
= count
;
1087 args
->out_numargs
= 1;
1088 args
->out_args
[0].size
= sizeof(ia
->write
.out
);
1089 args
->out_args
[0].value
= &ia
->write
.out
;
1092 static unsigned int fuse_write_flags(struct kiocb
*iocb
)
1094 unsigned int flags
= iocb
->ki_filp
->f_flags
;
1096 if (iocb_is_dsync(iocb
))
1098 if (iocb
->ki_flags
& IOCB_SYNC
)
1104 static ssize_t
fuse_send_write(struct fuse_io_args
*ia
, loff_t pos
,
1105 size_t count
, fl_owner_t owner
)
1107 struct kiocb
*iocb
= ia
->io
->iocb
;
1108 struct file
*file
= iocb
->ki_filp
;
1109 struct fuse_file
*ff
= file
->private_data
;
1110 struct fuse_mount
*fm
= ff
->fm
;
1111 struct fuse_write_in
*inarg
= &ia
->write
.in
;
1114 fuse_write_args_fill(ia
, ff
, pos
, count
);
1115 inarg
->flags
= fuse_write_flags(iocb
);
1116 if (owner
!= NULL
) {
1117 inarg
->write_flags
|= FUSE_WRITE_LOCKOWNER
;
1118 inarg
->lock_owner
= fuse_lock_owner_id(fm
->fc
, owner
);
1122 return fuse_async_req_send(fm
, ia
, count
);
1124 err
= fuse_simple_request(fm
, &ia
->ap
.args
);
1125 if (!err
&& ia
->write
.out
.size
> count
)
1128 return err
?: ia
->write
.out
.size
;
1131 bool fuse_write_update_attr(struct inode
*inode
, loff_t pos
, ssize_t written
)
1133 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1134 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1137 spin_lock(&fi
->lock
);
1138 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
1139 if (written
> 0 && pos
> inode
->i_size
) {
1140 i_size_write(inode
, pos
);
1143 spin_unlock(&fi
->lock
);
1145 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODSIZE
);
1150 static ssize_t
fuse_send_write_pages(struct fuse_io_args
*ia
,
1151 struct kiocb
*iocb
, struct inode
*inode
,
1152 loff_t pos
, size_t count
)
1154 struct fuse_args_pages
*ap
= &ia
->ap
;
1155 struct file
*file
= iocb
->ki_filp
;
1156 struct fuse_file
*ff
= file
->private_data
;
1157 struct fuse_mount
*fm
= ff
->fm
;
1158 unsigned int offset
, i
;
1162 for (i
= 0; i
< ap
->num_pages
; i
++)
1163 fuse_wait_on_page_writeback(inode
, ap
->pages
[i
]->index
);
1165 fuse_write_args_fill(ia
, ff
, pos
, count
);
1166 ia
->write
.in
.flags
= fuse_write_flags(iocb
);
1167 if (fm
->fc
->handle_killpriv_v2
&& !capable(CAP_FSETID
))
1168 ia
->write
.in
.write_flags
|= FUSE_WRITE_KILL_SUIDGID
;
1170 err
= fuse_simple_request(fm
, &ap
->args
);
1171 if (!err
&& ia
->write
.out
.size
> count
)
1174 short_write
= ia
->write
.out
.size
< count
;
1175 offset
= ap
->descs
[0].offset
;
1176 count
= ia
->write
.out
.size
;
1177 for (i
= 0; i
< ap
->num_pages
; i
++) {
1178 struct page
*page
= ap
->pages
[i
];
1181 ClearPageUptodate(page
);
1183 if (count
>= PAGE_SIZE
- offset
)
1184 count
-= PAGE_SIZE
- offset
;
1187 ClearPageUptodate(page
);
1192 if (ia
->write
.page_locked
&& (i
== ap
->num_pages
- 1))
1200 static ssize_t
fuse_fill_write_pages(struct fuse_io_args
*ia
,
1201 struct address_space
*mapping
,
1202 struct iov_iter
*ii
, loff_t pos
,
1203 unsigned int max_pages
)
1205 struct fuse_args_pages
*ap
= &ia
->ap
;
1206 struct fuse_conn
*fc
= get_fuse_conn(mapping
->host
);
1207 unsigned offset
= pos
& (PAGE_SIZE
- 1);
1211 ap
->args
.in_pages
= true;
1212 ap
->descs
[0].offset
= offset
;
1217 pgoff_t index
= pos
>> PAGE_SHIFT
;
1218 size_t bytes
= min_t(size_t, PAGE_SIZE
- offset
,
1219 iov_iter_count(ii
));
1221 bytes
= min_t(size_t, bytes
, fc
->max_write
- count
);
1225 if (fault_in_iov_iter_readable(ii
, bytes
))
1229 page
= grab_cache_page_write_begin(mapping
, index
);
1233 if (mapping_writably_mapped(mapping
))
1234 flush_dcache_page(page
);
1236 tmp
= copy_page_from_iter_atomic(page
, offset
, bytes
, ii
);
1237 flush_dcache_page(page
);
1246 ap
->pages
[ap
->num_pages
] = page
;
1247 ap
->descs
[ap
->num_pages
].length
= tmp
;
1253 if (offset
== PAGE_SIZE
)
1256 /* If we copied full page, mark it uptodate */
1257 if (tmp
== PAGE_SIZE
)
1258 SetPageUptodate(page
);
1260 if (PageUptodate(page
)) {
1263 ia
->write
.page_locked
= true;
1266 if (!fc
->big_writes
)
1268 } while (iov_iter_count(ii
) && count
< fc
->max_write
&&
1269 ap
->num_pages
< max_pages
&& offset
== 0);
1271 return count
> 0 ? count
: err
;
1274 static inline unsigned int fuse_wr_pages(loff_t pos
, size_t len
,
1275 unsigned int max_pages
)
1277 return min_t(unsigned int,
1278 ((pos
+ len
- 1) >> PAGE_SHIFT
) -
1279 (pos
>> PAGE_SHIFT
) + 1,
1283 static ssize_t
fuse_perform_write(struct kiocb
*iocb
,
1284 struct address_space
*mapping
,
1285 struct iov_iter
*ii
, loff_t pos
)
1287 struct inode
*inode
= mapping
->host
;
1288 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1289 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1293 if (inode
->i_size
< pos
+ iov_iter_count(ii
))
1294 set_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
1298 struct fuse_io_args ia
= {};
1299 struct fuse_args_pages
*ap
= &ia
.ap
;
1300 unsigned int nr_pages
= fuse_wr_pages(pos
, iov_iter_count(ii
),
1303 ap
->pages
= fuse_pages_alloc(nr_pages
, GFP_KERNEL
, &ap
->descs
);
1309 count
= fuse_fill_write_pages(&ia
, mapping
, ii
, pos
, nr_pages
);
1313 err
= fuse_send_write_pages(&ia
, iocb
, inode
,
1316 size_t num_written
= ia
.write
.out
.size
;
1321 /* break out of the loop on short write */
1322 if (num_written
!= count
)
1327 } while (!err
&& iov_iter_count(ii
));
1329 fuse_write_update_attr(inode
, pos
, res
);
1330 clear_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
1332 return res
> 0 ? res
: err
;
1335 static ssize_t
fuse_cache_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1337 struct file
*file
= iocb
->ki_filp
;
1338 struct address_space
*mapping
= file
->f_mapping
;
1339 ssize_t written
= 0;
1340 ssize_t written_buffered
= 0;
1341 struct inode
*inode
= mapping
->host
;
1343 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1346 if (fc
->writeback_cache
) {
1347 /* Update size (EOF optimization) and mode (SUID clearing) */
1348 err
= fuse_update_attributes(mapping
->host
, file
,
1349 STATX_SIZE
| STATX_MODE
);
1353 if (fc
->handle_killpriv_v2
&&
1354 setattr_should_drop_suidgid(&nop_mnt_idmap
,
1355 file_inode(file
))) {
1359 return generic_file_write_iter(iocb
, from
);
1365 /* We can write back this queue in page reclaim */
1366 current
->backing_dev_info
= inode_to_bdi(inode
);
1368 err
= generic_write_checks(iocb
, from
);
1372 err
= file_remove_privs(file
);
1376 err
= file_update_time(file
);
1380 if (iocb
->ki_flags
& IOCB_DIRECT
) {
1381 loff_t pos
= iocb
->ki_pos
;
1382 written
= generic_file_direct_write(iocb
, from
);
1383 if (written
< 0 || !iov_iter_count(from
))
1388 written_buffered
= fuse_perform_write(iocb
, mapping
, from
, pos
);
1389 if (written_buffered
< 0) {
1390 err
= written_buffered
;
1393 endbyte
= pos
+ written_buffered
- 1;
1395 err
= filemap_write_and_wait_range(file
->f_mapping
, pos
,
1400 invalidate_mapping_pages(file
->f_mapping
,
1402 endbyte
>> PAGE_SHIFT
);
1404 written
+= written_buffered
;
1405 iocb
->ki_pos
= pos
+ written_buffered
;
1407 written
= fuse_perform_write(iocb
, mapping
, from
, iocb
->ki_pos
);
1409 iocb
->ki_pos
+= written
;
1412 current
->backing_dev_info
= NULL
;
1413 inode_unlock(inode
);
1415 written
= generic_write_sync(iocb
, written
);
1417 return written
? written
: err
;
1420 static inline unsigned long fuse_get_user_addr(const struct iov_iter
*ii
)
1422 return (unsigned long)ii
->iov
->iov_base
+ ii
->iov_offset
;
1425 static inline size_t fuse_get_frag_size(const struct iov_iter
*ii
,
1428 return min(iov_iter_single_seg_count(ii
), max_size
);
1431 static int fuse_get_user_pages(struct fuse_args_pages
*ap
, struct iov_iter
*ii
,
1432 size_t *nbytesp
, int write
,
1433 unsigned int max_pages
)
1435 size_t nbytes
= 0; /* # bytes already packed in req */
1438 /* Special case for kernel I/O: can copy directly into the buffer */
1439 if (iov_iter_is_kvec(ii
)) {
1440 unsigned long user_addr
= fuse_get_user_addr(ii
);
1441 size_t frag_size
= fuse_get_frag_size(ii
, *nbytesp
);
1444 ap
->args
.in_args
[1].value
= (void *) user_addr
;
1446 ap
->args
.out_args
[0].value
= (void *) user_addr
;
1448 iov_iter_advance(ii
, frag_size
);
1449 *nbytesp
= frag_size
;
1453 while (nbytes
< *nbytesp
&& ap
->num_pages
< max_pages
) {
1456 ret
= iov_iter_get_pages2(ii
, &ap
->pages
[ap
->num_pages
],
1458 max_pages
- ap
->num_pages
,
1466 npages
= DIV_ROUND_UP(ret
, PAGE_SIZE
);
1468 ap
->descs
[ap
->num_pages
].offset
= start
;
1469 fuse_page_descs_length_init(ap
->descs
, ap
->num_pages
, npages
);
1471 ap
->num_pages
+= npages
;
1472 ap
->descs
[ap
->num_pages
- 1].length
-=
1473 (PAGE_SIZE
- ret
) & (PAGE_SIZE
- 1);
1476 ap
->args
.user_pages
= true;
1478 ap
->args
.in_pages
= true;
1480 ap
->args
.out_pages
= true;
1484 return ret
< 0 ? ret
: 0;
1487 ssize_t
fuse_direct_io(struct fuse_io_priv
*io
, struct iov_iter
*iter
,
1488 loff_t
*ppos
, int flags
)
1490 int write
= flags
& FUSE_DIO_WRITE
;
1491 int cuse
= flags
& FUSE_DIO_CUSE
;
1492 struct file
*file
= io
->iocb
->ki_filp
;
1493 struct inode
*inode
= file
->f_mapping
->host
;
1494 struct fuse_file
*ff
= file
->private_data
;
1495 struct fuse_conn
*fc
= ff
->fm
->fc
;
1496 size_t nmax
= write
? fc
->max_write
: fc
->max_read
;
1498 size_t count
= iov_iter_count(iter
);
1499 pgoff_t idx_from
= pos
>> PAGE_SHIFT
;
1500 pgoff_t idx_to
= (pos
+ count
- 1) >> PAGE_SHIFT
;
1503 struct fuse_io_args
*ia
;
1504 unsigned int max_pages
;
1506 max_pages
= iov_iter_npages(iter
, fc
->max_pages
);
1507 ia
= fuse_io_alloc(io
, max_pages
);
1511 if (!cuse
&& fuse_range_is_writeback(inode
, idx_from
, idx_to
)) {
1514 fuse_sync_writes(inode
);
1516 inode_unlock(inode
);
1519 io
->should_dirty
= !write
&& user_backed_iter(iter
);
1522 fl_owner_t owner
= current
->files
;
1523 size_t nbytes
= min(count
, nmax
);
1525 err
= fuse_get_user_pages(&ia
->ap
, iter
, &nbytes
, write
,
1531 if (!capable(CAP_FSETID
))
1532 ia
->write
.in
.write_flags
|= FUSE_WRITE_KILL_SUIDGID
;
1534 nres
= fuse_send_write(ia
, pos
, nbytes
, owner
);
1536 nres
= fuse_send_read(ia
, pos
, nbytes
, owner
);
1539 if (!io
->async
|| nres
< 0) {
1540 fuse_release_user_pages(&ia
->ap
, io
->should_dirty
);
1545 iov_iter_revert(iter
, nbytes
);
1549 WARN_ON(nres
> nbytes
);
1554 if (nres
!= nbytes
) {
1555 iov_iter_revert(iter
, nbytes
- nres
);
1559 max_pages
= iov_iter_npages(iter
, fc
->max_pages
);
1560 ia
= fuse_io_alloc(io
, max_pages
);
1570 return res
> 0 ? res
: err
;
1572 EXPORT_SYMBOL_GPL(fuse_direct_io
);
1574 static ssize_t
__fuse_direct_read(struct fuse_io_priv
*io
,
1575 struct iov_iter
*iter
,
1579 struct inode
*inode
= file_inode(io
->iocb
->ki_filp
);
1581 res
= fuse_direct_io(io
, iter
, ppos
, 0);
1583 fuse_invalidate_atime(inode
);
1588 static ssize_t
fuse_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
);
1590 static ssize_t
fuse_direct_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1594 if (!is_sync_kiocb(iocb
) && iocb
->ki_flags
& IOCB_DIRECT
) {
1595 res
= fuse_direct_IO(iocb
, to
);
1597 struct fuse_io_priv io
= FUSE_IO_PRIV_SYNC(iocb
);
1599 res
= __fuse_direct_read(&io
, to
, &iocb
->ki_pos
);
1605 static bool fuse_direct_write_extending_i_size(struct kiocb
*iocb
,
1606 struct iov_iter
*iter
)
1608 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1610 return iocb
->ki_pos
+ iov_iter_count(iter
) > i_size_read(inode
);
1613 static ssize_t
fuse_direct_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1615 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1616 struct file
*file
= iocb
->ki_filp
;
1617 struct fuse_file
*ff
= file
->private_data
;
1618 struct fuse_io_priv io
= FUSE_IO_PRIV_SYNC(iocb
);
1620 bool exclusive_lock
=
1621 !(ff
->open_flags
& FOPEN_PARALLEL_DIRECT_WRITES
) ||
1622 iocb
->ki_flags
& IOCB_APPEND
||
1623 fuse_direct_write_extending_i_size(iocb
, from
);
1626 * Take exclusive lock if
1627 * - Parallel direct writes are disabled - a user space decision
1628 * - Parallel direct writes are enabled and i_size is being extended.
1629 * This might not be needed at all, but needs further investigation.
1634 inode_lock_shared(inode
);
1636 /* A race with truncate might have come up as the decision for
1637 * the lock type was done without holding the lock, check again.
1639 if (fuse_direct_write_extending_i_size(iocb
, from
)) {
1640 inode_unlock_shared(inode
);
1642 exclusive_lock
= true;
1646 res
= generic_write_checks(iocb
, from
);
1648 if (!is_sync_kiocb(iocb
) && iocb
->ki_flags
& IOCB_DIRECT
) {
1649 res
= fuse_direct_IO(iocb
, from
);
1651 res
= fuse_direct_io(&io
, from
, &iocb
->ki_pos
,
1653 fuse_write_update_attr(inode
, iocb
->ki_pos
, res
);
1657 inode_unlock(inode
);
1659 inode_unlock_shared(inode
);
1664 static ssize_t
fuse_file_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1666 struct file
*file
= iocb
->ki_filp
;
1667 struct fuse_file
*ff
= file
->private_data
;
1668 struct inode
*inode
= file_inode(file
);
1670 if (fuse_is_bad(inode
))
1673 if (FUSE_IS_DAX(inode
))
1674 return fuse_dax_read_iter(iocb
, to
);
1676 if (!(ff
->open_flags
& FOPEN_DIRECT_IO
))
1677 return fuse_cache_read_iter(iocb
, to
);
1679 return fuse_direct_read_iter(iocb
, to
);
1682 static ssize_t
fuse_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1684 struct file
*file
= iocb
->ki_filp
;
1685 struct fuse_file
*ff
= file
->private_data
;
1686 struct inode
*inode
= file_inode(file
);
1688 if (fuse_is_bad(inode
))
1691 if (FUSE_IS_DAX(inode
))
1692 return fuse_dax_write_iter(iocb
, from
);
1694 if (!(ff
->open_flags
& FOPEN_DIRECT_IO
))
1695 return fuse_cache_write_iter(iocb
, from
);
1697 return fuse_direct_write_iter(iocb
, from
);
1700 static void fuse_writepage_free(struct fuse_writepage_args
*wpa
)
1702 struct fuse_args_pages
*ap
= &wpa
->ia
.ap
;
1706 fuse_sync_bucket_dec(wpa
->bucket
);
1708 for (i
= 0; i
< ap
->num_pages
; i
++)
1709 __free_page(ap
->pages
[i
]);
1712 fuse_file_put(wpa
->ia
.ff
, false, false);
1718 static void fuse_writepage_finish(struct fuse_mount
*fm
,
1719 struct fuse_writepage_args
*wpa
)
1721 struct fuse_args_pages
*ap
= &wpa
->ia
.ap
;
1722 struct inode
*inode
= wpa
->inode
;
1723 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1724 struct backing_dev_info
*bdi
= inode_to_bdi(inode
);
1727 for (i
= 0; i
< ap
->num_pages
; i
++) {
1728 dec_wb_stat(&bdi
->wb
, WB_WRITEBACK
);
1729 dec_node_page_state(ap
->pages
[i
], NR_WRITEBACK_TEMP
);
1730 wb_writeout_inc(&bdi
->wb
);
1732 wake_up(&fi
->page_waitq
);
1735 /* Called under fi->lock, may release and reacquire it */
1736 static void fuse_send_writepage(struct fuse_mount
*fm
,
1737 struct fuse_writepage_args
*wpa
, loff_t size
)
1738 __releases(fi
->lock
)
1739 __acquires(fi
->lock
)
1741 struct fuse_writepage_args
*aux
, *next
;
1742 struct fuse_inode
*fi
= get_fuse_inode(wpa
->inode
);
1743 struct fuse_write_in
*inarg
= &wpa
->ia
.write
.in
;
1744 struct fuse_args
*args
= &wpa
->ia
.ap
.args
;
1745 __u64 data_size
= wpa
->ia
.ap
.num_pages
* PAGE_SIZE
;
1749 if (inarg
->offset
+ data_size
<= size
) {
1750 inarg
->size
= data_size
;
1751 } else if (inarg
->offset
< size
) {
1752 inarg
->size
= size
- inarg
->offset
;
1754 /* Got truncated off completely */
1758 args
->in_args
[1].size
= inarg
->size
;
1760 args
->nocreds
= true;
1762 err
= fuse_simple_background(fm
, args
, GFP_ATOMIC
);
1763 if (err
== -ENOMEM
) {
1764 spin_unlock(&fi
->lock
);
1765 err
= fuse_simple_background(fm
, args
, GFP_NOFS
| __GFP_NOFAIL
);
1766 spin_lock(&fi
->lock
);
1769 /* Fails on broken connection only */
1777 rb_erase(&wpa
->writepages_entry
, &fi
->writepages
);
1778 fuse_writepage_finish(fm
, wpa
);
1779 spin_unlock(&fi
->lock
);
1781 /* After fuse_writepage_finish() aux request list is private */
1782 for (aux
= wpa
->next
; aux
; aux
= next
) {
1785 fuse_writepage_free(aux
);
1788 fuse_writepage_free(wpa
);
1789 spin_lock(&fi
->lock
);
1793 * If fi->writectr is positive (no truncate or fsync going on) send
1794 * all queued writepage requests.
1796 * Called with fi->lock
1798 void fuse_flush_writepages(struct inode
*inode
)
1799 __releases(fi
->lock
)
1800 __acquires(fi
->lock
)
1802 struct fuse_mount
*fm
= get_fuse_mount(inode
);
1803 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1804 loff_t crop
= i_size_read(inode
);
1805 struct fuse_writepage_args
*wpa
;
1807 while (fi
->writectr
>= 0 && !list_empty(&fi
->queued_writes
)) {
1808 wpa
= list_entry(fi
->queued_writes
.next
,
1809 struct fuse_writepage_args
, queue_entry
);
1810 list_del_init(&wpa
->queue_entry
);
1811 fuse_send_writepage(fm
, wpa
, crop
);
1815 static struct fuse_writepage_args
*fuse_insert_writeback(struct rb_root
*root
,
1816 struct fuse_writepage_args
*wpa
)
1818 pgoff_t idx_from
= wpa
->ia
.write
.in
.offset
>> PAGE_SHIFT
;
1819 pgoff_t idx_to
= idx_from
+ wpa
->ia
.ap
.num_pages
- 1;
1820 struct rb_node
**p
= &root
->rb_node
;
1821 struct rb_node
*parent
= NULL
;
1823 WARN_ON(!wpa
->ia
.ap
.num_pages
);
1825 struct fuse_writepage_args
*curr
;
1829 curr
= rb_entry(parent
, struct fuse_writepage_args
,
1831 WARN_ON(curr
->inode
!= wpa
->inode
);
1832 curr_index
= curr
->ia
.write
.in
.offset
>> PAGE_SHIFT
;
1834 if (idx_from
>= curr_index
+ curr
->ia
.ap
.num_pages
)
1835 p
= &(*p
)->rb_right
;
1836 else if (idx_to
< curr_index
)
1842 rb_link_node(&wpa
->writepages_entry
, parent
, p
);
1843 rb_insert_color(&wpa
->writepages_entry
, root
);
1847 static void tree_insert(struct rb_root
*root
, struct fuse_writepage_args
*wpa
)
1849 WARN_ON(fuse_insert_writeback(root
, wpa
));
1852 static void fuse_writepage_end(struct fuse_mount
*fm
, struct fuse_args
*args
,
1855 struct fuse_writepage_args
*wpa
=
1856 container_of(args
, typeof(*wpa
), ia
.ap
.args
);
1857 struct inode
*inode
= wpa
->inode
;
1858 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1859 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1861 mapping_set_error(inode
->i_mapping
, error
);
1863 * A writeback finished and this might have updated mtime/ctime on
1864 * server making local mtime/ctime stale. Hence invalidate attrs.
1865 * Do this only if writeback_cache is not enabled. If writeback_cache
1866 * is enabled, we trust local ctime/mtime.
1868 if (!fc
->writeback_cache
)
1869 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODIFY
);
1870 spin_lock(&fi
->lock
);
1871 rb_erase(&wpa
->writepages_entry
, &fi
->writepages
);
1873 struct fuse_mount
*fm
= get_fuse_mount(inode
);
1874 struct fuse_write_in
*inarg
= &wpa
->ia
.write
.in
;
1875 struct fuse_writepage_args
*next
= wpa
->next
;
1877 wpa
->next
= next
->next
;
1879 next
->ia
.ff
= fuse_file_get(wpa
->ia
.ff
);
1880 tree_insert(&fi
->writepages
, next
);
1883 * Skip fuse_flush_writepages() to make it easy to crop requests
1884 * based on primary request size.
1886 * 1st case (trivial): there are no concurrent activities using
1887 * fuse_set/release_nowrite. Then we're on safe side because
1888 * fuse_flush_writepages() would call fuse_send_writepage()
1891 * 2nd case: someone called fuse_set_nowrite and it is waiting
1892 * now for completion of all in-flight requests. This happens
1893 * rarely and no more than once per page, so this should be
1896 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
1897 * of fuse_set_nowrite..fuse_release_nowrite section. The fact
1898 * that fuse_set_nowrite returned implies that all in-flight
1899 * requests were completed along with all of their secondary
1900 * requests. Further primary requests are blocked by negative
1901 * writectr. Hence there cannot be any in-flight requests and
1902 * no invocations of fuse_writepage_end() while we're in
1903 * fuse_set_nowrite..fuse_release_nowrite section.
1905 fuse_send_writepage(fm
, next
, inarg
->offset
+ inarg
->size
);
1908 fuse_writepage_finish(fm
, wpa
);
1909 spin_unlock(&fi
->lock
);
1910 fuse_writepage_free(wpa
);
1913 static struct fuse_file
*__fuse_write_file_get(struct fuse_inode
*fi
)
1915 struct fuse_file
*ff
;
1917 spin_lock(&fi
->lock
);
1918 ff
= list_first_entry_or_null(&fi
->write_files
, struct fuse_file
,
1922 spin_unlock(&fi
->lock
);
1927 static struct fuse_file
*fuse_write_file_get(struct fuse_inode
*fi
)
1929 struct fuse_file
*ff
= __fuse_write_file_get(fi
);
1934 int fuse_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1936 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1937 struct fuse_file
*ff
;
1941 * Inode is always written before the last reference is dropped and
1942 * hence this should not be reached from reclaim.
1944 * Writing back the inode from reclaim can deadlock if the request
1945 * processing itself needs an allocation. Allocations triggering
1946 * reclaim while serving a request can't be prevented, because it can
1947 * involve any number of unrelated userspace processes.
1949 WARN_ON(wbc
->for_reclaim
);
1951 ff
= __fuse_write_file_get(fi
);
1952 err
= fuse_flush_times(inode
, ff
);
1954 fuse_file_put(ff
, false, false);
1959 static struct fuse_writepage_args
*fuse_writepage_args_alloc(void)
1961 struct fuse_writepage_args
*wpa
;
1962 struct fuse_args_pages
*ap
;
1964 wpa
= kzalloc(sizeof(*wpa
), GFP_NOFS
);
1968 ap
->pages
= fuse_pages_alloc(1, GFP_NOFS
, &ap
->descs
);
1978 static void fuse_writepage_add_to_bucket(struct fuse_conn
*fc
,
1979 struct fuse_writepage_args
*wpa
)
1985 /* Prevent resurrection of dead bucket in unlikely race with syncfs */
1987 wpa
->bucket
= rcu_dereference(fc
->curr_bucket
);
1988 } while (unlikely(!atomic_inc_not_zero(&wpa
->bucket
->count
)));
1992 static int fuse_writepage_locked(struct page
*page
)
1994 struct address_space
*mapping
= page
->mapping
;
1995 struct inode
*inode
= mapping
->host
;
1996 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1997 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1998 struct fuse_writepage_args
*wpa
;
1999 struct fuse_args_pages
*ap
;
2000 struct page
*tmp_page
;
2001 int error
= -ENOMEM
;
2003 set_page_writeback(page
);
2005 wpa
= fuse_writepage_args_alloc();
2010 tmp_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2015 wpa
->ia
.ff
= fuse_write_file_get(fi
);
2019 fuse_writepage_add_to_bucket(fc
, wpa
);
2020 fuse_write_args_fill(&wpa
->ia
, wpa
->ia
.ff
, page_offset(page
), 0);
2022 copy_highpage(tmp_page
, page
);
2023 wpa
->ia
.write
.in
.write_flags
|= FUSE_WRITE_CACHE
;
2025 ap
->args
.in_pages
= true;
2027 ap
->pages
[0] = tmp_page
;
2028 ap
->descs
[0].offset
= 0;
2029 ap
->descs
[0].length
= PAGE_SIZE
;
2030 ap
->args
.end
= fuse_writepage_end
;
2033 inc_wb_stat(&inode_to_bdi(inode
)->wb
, WB_WRITEBACK
);
2034 inc_node_page_state(tmp_page
, NR_WRITEBACK_TEMP
);
2036 spin_lock(&fi
->lock
);
2037 tree_insert(&fi
->writepages
, wpa
);
2038 list_add_tail(&wpa
->queue_entry
, &fi
->queued_writes
);
2039 fuse_flush_writepages(inode
);
2040 spin_unlock(&fi
->lock
);
2042 end_page_writeback(page
);
2047 __free_page(tmp_page
);
2051 mapping_set_error(page
->mapping
, error
);
2052 end_page_writeback(page
);
2056 static int fuse_writepage(struct page
*page
, struct writeback_control
*wbc
)
2058 struct fuse_conn
*fc
= get_fuse_conn(page
->mapping
->host
);
2061 if (fuse_page_is_writeback(page
->mapping
->host
, page
->index
)) {
2063 * ->writepages() should be called for sync() and friends. We
2064 * should only get here on direct reclaim and then we are
2065 * allowed to skip a page which is already in flight
2067 WARN_ON(wbc
->sync_mode
== WB_SYNC_ALL
);
2069 redirty_page_for_writepage(wbc
, page
);
2074 if (wbc
->sync_mode
== WB_SYNC_NONE
&&
2075 fc
->num_background
>= fc
->congestion_threshold
)
2076 return AOP_WRITEPAGE_ACTIVATE
;
2078 err
= fuse_writepage_locked(page
);
2084 struct fuse_fill_wb_data
{
2085 struct fuse_writepage_args
*wpa
;
2086 struct fuse_file
*ff
;
2087 struct inode
*inode
;
2088 struct page
**orig_pages
;
2089 unsigned int max_pages
;
2092 static bool fuse_pages_realloc(struct fuse_fill_wb_data
*data
)
2094 struct fuse_args_pages
*ap
= &data
->wpa
->ia
.ap
;
2095 struct fuse_conn
*fc
= get_fuse_conn(data
->inode
);
2096 struct page
**pages
;
2097 struct fuse_page_desc
*descs
;
2098 unsigned int npages
= min_t(unsigned int,
2099 max_t(unsigned int, data
->max_pages
* 2,
2100 FUSE_DEFAULT_MAX_PAGES_PER_REQ
),
2102 WARN_ON(npages
<= data
->max_pages
);
2104 pages
= fuse_pages_alloc(npages
, GFP_NOFS
, &descs
);
2108 memcpy(pages
, ap
->pages
, sizeof(struct page
*) * ap
->num_pages
);
2109 memcpy(descs
, ap
->descs
, sizeof(struct fuse_page_desc
) * ap
->num_pages
);
2113 data
->max_pages
= npages
;
2118 static void fuse_writepages_send(struct fuse_fill_wb_data
*data
)
2120 struct fuse_writepage_args
*wpa
= data
->wpa
;
2121 struct inode
*inode
= data
->inode
;
2122 struct fuse_inode
*fi
= get_fuse_inode(inode
);
2123 int num_pages
= wpa
->ia
.ap
.num_pages
;
2126 wpa
->ia
.ff
= fuse_file_get(data
->ff
);
2127 spin_lock(&fi
->lock
);
2128 list_add_tail(&wpa
->queue_entry
, &fi
->queued_writes
);
2129 fuse_flush_writepages(inode
);
2130 spin_unlock(&fi
->lock
);
2132 for (i
= 0; i
< num_pages
; i
++)
2133 end_page_writeback(data
->orig_pages
[i
]);
2137 * Check under fi->lock if the page is under writeback, and insert it onto the
2138 * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
2139 * one already added for a page at this offset. If there's none, then insert
2140 * this new request onto the auxiliary list, otherwise reuse the existing one by
2141 * swapping the new temp page with the old one.
2143 static bool fuse_writepage_add(struct fuse_writepage_args
*new_wpa
,
2146 struct fuse_inode
*fi
= get_fuse_inode(new_wpa
->inode
);
2147 struct fuse_writepage_args
*tmp
;
2148 struct fuse_writepage_args
*old_wpa
;
2149 struct fuse_args_pages
*new_ap
= &new_wpa
->ia
.ap
;
2151 WARN_ON(new_ap
->num_pages
!= 0);
2152 new_ap
->num_pages
= 1;
2154 spin_lock(&fi
->lock
);
2155 old_wpa
= fuse_insert_writeback(&fi
->writepages
, new_wpa
);
2157 spin_unlock(&fi
->lock
);
2161 for (tmp
= old_wpa
->next
; tmp
; tmp
= tmp
->next
) {
2164 WARN_ON(tmp
->inode
!= new_wpa
->inode
);
2165 curr_index
= tmp
->ia
.write
.in
.offset
>> PAGE_SHIFT
;
2166 if (curr_index
== page
->index
) {
2167 WARN_ON(tmp
->ia
.ap
.num_pages
!= 1);
2168 swap(tmp
->ia
.ap
.pages
[0], new_ap
->pages
[0]);
2174 new_wpa
->next
= old_wpa
->next
;
2175 old_wpa
->next
= new_wpa
;
2178 spin_unlock(&fi
->lock
);
2181 struct backing_dev_info
*bdi
= inode_to_bdi(new_wpa
->inode
);
2183 dec_wb_stat(&bdi
->wb
, WB_WRITEBACK
);
2184 dec_node_page_state(new_ap
->pages
[0], NR_WRITEBACK_TEMP
);
2185 wb_writeout_inc(&bdi
->wb
);
2186 fuse_writepage_free(new_wpa
);
2192 static bool fuse_writepage_need_send(struct fuse_conn
*fc
, struct page
*page
,
2193 struct fuse_args_pages
*ap
,
2194 struct fuse_fill_wb_data
*data
)
2196 WARN_ON(!ap
->num_pages
);
2199 * Being under writeback is unlikely but possible. For example direct
2200 * read to an mmaped fuse file will set the page dirty twice; once when
2201 * the pages are faulted with get_user_pages(), and then after the read
2204 if (fuse_page_is_writeback(data
->inode
, page
->index
))
2207 /* Reached max pages */
2208 if (ap
->num_pages
== fc
->max_pages
)
2211 /* Reached max write bytes */
2212 if ((ap
->num_pages
+ 1) * PAGE_SIZE
> fc
->max_write
)
2216 if (data
->orig_pages
[ap
->num_pages
- 1]->index
+ 1 != page
->index
)
2219 /* Need to grow the pages array? If so, did the expansion fail? */
2220 if (ap
->num_pages
== data
->max_pages
&& !fuse_pages_realloc(data
))
2226 static int fuse_writepages_fill(struct folio
*folio
,
2227 struct writeback_control
*wbc
, void *_data
)
2229 struct fuse_fill_wb_data
*data
= _data
;
2230 struct fuse_writepage_args
*wpa
= data
->wpa
;
2231 struct fuse_args_pages
*ap
= &wpa
->ia
.ap
;
2232 struct inode
*inode
= data
->inode
;
2233 struct fuse_inode
*fi
= get_fuse_inode(inode
);
2234 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2235 struct page
*tmp_page
;
2240 data
->ff
= fuse_write_file_get(fi
);
2245 if (wpa
&& fuse_writepage_need_send(fc
, &folio
->page
, ap
, data
)) {
2246 fuse_writepages_send(data
);
2251 tmp_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2256 * The page must not be redirtied until the writeout is completed
2257 * (i.e. userspace has sent a reply to the write request). Otherwise
2258 * there could be more than one temporary page instance for each real
2261 * This is ensured by holding the page lock in page_mkwrite() while
2262 * checking fuse_page_is_writeback(). We already hold the page lock
2263 * since clear_page_dirty_for_io() and keep it held until we add the
2264 * request to the fi->writepages list and increment ap->num_pages.
2265 * After this fuse_page_is_writeback() will indicate that the page is
2266 * under writeback, so we can release the page lock.
2268 if (data
->wpa
== NULL
) {
2270 wpa
= fuse_writepage_args_alloc();
2272 __free_page(tmp_page
);
2275 fuse_writepage_add_to_bucket(fc
, wpa
);
2277 data
->max_pages
= 1;
2280 fuse_write_args_fill(&wpa
->ia
, data
->ff
, folio_pos(folio
), 0);
2281 wpa
->ia
.write
.in
.write_flags
|= FUSE_WRITE_CACHE
;
2283 ap
->args
.in_pages
= true;
2284 ap
->args
.end
= fuse_writepage_end
;
2288 folio_start_writeback(folio
);
2290 copy_highpage(tmp_page
, &folio
->page
);
2291 ap
->pages
[ap
->num_pages
] = tmp_page
;
2292 ap
->descs
[ap
->num_pages
].offset
= 0;
2293 ap
->descs
[ap
->num_pages
].length
= PAGE_SIZE
;
2294 data
->orig_pages
[ap
->num_pages
] = &folio
->page
;
2296 inc_wb_stat(&inode_to_bdi(inode
)->wb
, WB_WRITEBACK
);
2297 inc_node_page_state(tmp_page
, NR_WRITEBACK_TEMP
);
2302 * Protected by fi->lock against concurrent access by
2303 * fuse_page_is_writeback().
2305 spin_lock(&fi
->lock
);
2307 spin_unlock(&fi
->lock
);
2308 } else if (fuse_writepage_add(wpa
, &folio
->page
)) {
2311 folio_end_writeback(folio
);
2314 folio_unlock(folio
);
2319 static int fuse_writepages(struct address_space
*mapping
,
2320 struct writeback_control
*wbc
)
2322 struct inode
*inode
= mapping
->host
;
2323 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2324 struct fuse_fill_wb_data data
;
2328 if (fuse_is_bad(inode
))
2331 if (wbc
->sync_mode
== WB_SYNC_NONE
&&
2332 fc
->num_background
>= fc
->congestion_threshold
)
2340 data
.orig_pages
= kcalloc(fc
->max_pages
,
2341 sizeof(struct page
*),
2343 if (!data
.orig_pages
)
2346 err
= write_cache_pages(mapping
, wbc
, fuse_writepages_fill
, &data
);
2348 WARN_ON(!data
.wpa
->ia
.ap
.num_pages
);
2349 fuse_writepages_send(&data
);
2352 fuse_file_put(data
.ff
, false, false);
2354 kfree(data
.orig_pages
);
2360 * It's worthy to make sure that space is reserved on disk for the write,
2361 * but how to implement it without killing performance need more thinking.
2363 static int fuse_write_begin(struct file
*file
, struct address_space
*mapping
,
2364 loff_t pos
, unsigned len
, struct page
**pagep
, void **fsdata
)
2366 pgoff_t index
= pos
>> PAGE_SHIFT
;
2367 struct fuse_conn
*fc
= get_fuse_conn(file_inode(file
));
2372 WARN_ON(!fc
->writeback_cache
);
2374 page
= grab_cache_page_write_begin(mapping
, index
);
2378 fuse_wait_on_page_writeback(mapping
->host
, page
->index
);
2380 if (PageUptodate(page
) || len
== PAGE_SIZE
)
2383 * Check if the start this page comes after the end of file, in which
2384 * case the readpage can be optimized away.
2386 fsize
= i_size_read(mapping
->host
);
2387 if (fsize
<= (pos
& PAGE_MASK
)) {
2388 size_t off
= pos
& ~PAGE_MASK
;
2390 zero_user_segment(page
, 0, off
);
2393 err
= fuse_do_readpage(file
, page
);
2407 static int fuse_write_end(struct file
*file
, struct address_space
*mapping
,
2408 loff_t pos
, unsigned len
, unsigned copied
,
2409 struct page
*page
, void *fsdata
)
2411 struct inode
*inode
= page
->mapping
->host
;
2413 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
2418 if (!PageUptodate(page
)) {
2419 /* Zero any unwritten bytes at the end of the page */
2420 size_t endoff
= pos
& ~PAGE_MASK
;
2422 zero_user_segment(page
, endoff
, PAGE_SIZE
);
2423 SetPageUptodate(page
);
2426 if (pos
> inode
->i_size
)
2427 i_size_write(inode
, pos
);
2429 set_page_dirty(page
);
2438 static int fuse_launder_folio(struct folio
*folio
)
2441 if (folio_clear_dirty_for_io(folio
)) {
2442 struct inode
*inode
= folio
->mapping
->host
;
2444 /* Serialize with pending writeback for the same page */
2445 fuse_wait_on_page_writeback(inode
, folio
->index
);
2446 err
= fuse_writepage_locked(&folio
->page
);
2448 fuse_wait_on_page_writeback(inode
, folio
->index
);
2454 * Write back dirty data/metadata now (there may not be any suitable
2455 * open files later for data)
2457 static void fuse_vma_close(struct vm_area_struct
*vma
)
2461 err
= write_inode_now(vma
->vm_file
->f_mapping
->host
, 1);
2462 mapping_set_error(vma
->vm_file
->f_mapping
, err
);
2466 * Wait for writeback against this page to complete before allowing it
2467 * to be marked dirty again, and hence written back again, possibly
2468 * before the previous writepage completed.
2470 * Block here, instead of in ->writepage(), so that the userspace fs
2471 * can only block processes actually operating on the filesystem.
2473 * Otherwise unprivileged userspace fs would be able to block
2478 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2480 static vm_fault_t
fuse_page_mkwrite(struct vm_fault
*vmf
)
2482 struct page
*page
= vmf
->page
;
2483 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
2485 file_update_time(vmf
->vma
->vm_file
);
2487 if (page
->mapping
!= inode
->i_mapping
) {
2489 return VM_FAULT_NOPAGE
;
2492 fuse_wait_on_page_writeback(inode
, page
->index
);
2493 return VM_FAULT_LOCKED
;
2496 static const struct vm_operations_struct fuse_file_vm_ops
= {
2497 .close
= fuse_vma_close
,
2498 .fault
= filemap_fault
,
2499 .map_pages
= filemap_map_pages
,
2500 .page_mkwrite
= fuse_page_mkwrite
,
2503 static int fuse_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2505 struct fuse_file
*ff
= file
->private_data
;
2507 /* DAX mmap is superior to direct_io mmap */
2508 if (FUSE_IS_DAX(file_inode(file
)))
2509 return fuse_dax_mmap(file
, vma
);
2511 if (ff
->open_flags
& FOPEN_DIRECT_IO
) {
2512 /* Can't provide the coherency needed for MAP_SHARED */
2513 if (vma
->vm_flags
& VM_MAYSHARE
)
2516 invalidate_inode_pages2(file
->f_mapping
);
2518 return generic_file_mmap(file
, vma
);
2521 if ((vma
->vm_flags
& VM_SHARED
) && (vma
->vm_flags
& VM_MAYWRITE
))
2522 fuse_link_write_file(file
);
2524 file_accessed(file
);
2525 vma
->vm_ops
= &fuse_file_vm_ops
;
2529 static int convert_fuse_file_lock(struct fuse_conn
*fc
,
2530 const struct fuse_file_lock
*ffl
,
2531 struct file_lock
*fl
)
2533 switch (ffl
->type
) {
2539 if (ffl
->start
> OFFSET_MAX
|| ffl
->end
> OFFSET_MAX
||
2540 ffl
->end
< ffl
->start
)
2543 fl
->fl_start
= ffl
->start
;
2544 fl
->fl_end
= ffl
->end
;
2547 * Convert pid into init's pid namespace. The locks API will
2548 * translate it into the caller's pid namespace.
2551 fl
->fl_pid
= pid_nr_ns(find_pid_ns(ffl
->pid
, fc
->pid_ns
), &init_pid_ns
);
2558 fl
->fl_type
= ffl
->type
;
2562 static void fuse_lk_fill(struct fuse_args
*args
, struct file
*file
,
2563 const struct file_lock
*fl
, int opcode
, pid_t pid
,
2564 int flock
, struct fuse_lk_in
*inarg
)
2566 struct inode
*inode
= file_inode(file
);
2567 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2568 struct fuse_file
*ff
= file
->private_data
;
2570 memset(inarg
, 0, sizeof(*inarg
));
2572 inarg
->owner
= fuse_lock_owner_id(fc
, fl
->fl_owner
);
2573 inarg
->lk
.start
= fl
->fl_start
;
2574 inarg
->lk
.end
= fl
->fl_end
;
2575 inarg
->lk
.type
= fl
->fl_type
;
2576 inarg
->lk
.pid
= pid
;
2578 inarg
->lk_flags
|= FUSE_LK_FLOCK
;
2579 args
->opcode
= opcode
;
2580 args
->nodeid
= get_node_id(inode
);
2581 args
->in_numargs
= 1;
2582 args
->in_args
[0].size
= sizeof(*inarg
);
2583 args
->in_args
[0].value
= inarg
;
2586 static int fuse_getlk(struct file
*file
, struct file_lock
*fl
)
2588 struct inode
*inode
= file_inode(file
);
2589 struct fuse_mount
*fm
= get_fuse_mount(inode
);
2591 struct fuse_lk_in inarg
;
2592 struct fuse_lk_out outarg
;
2595 fuse_lk_fill(&args
, file
, fl
, FUSE_GETLK
, 0, 0, &inarg
);
2596 args
.out_numargs
= 1;
2597 args
.out_args
[0].size
= sizeof(outarg
);
2598 args
.out_args
[0].value
= &outarg
;
2599 err
= fuse_simple_request(fm
, &args
);
2601 err
= convert_fuse_file_lock(fm
->fc
, &outarg
.lk
, fl
);
2606 static int fuse_setlk(struct file
*file
, struct file_lock
*fl
, int flock
)
2608 struct inode
*inode
= file_inode(file
);
2609 struct fuse_mount
*fm
= get_fuse_mount(inode
);
2611 struct fuse_lk_in inarg
;
2612 int opcode
= (fl
->fl_flags
& FL_SLEEP
) ? FUSE_SETLKW
: FUSE_SETLK
;
2613 struct pid
*pid
= fl
->fl_type
!= F_UNLCK
? task_tgid(current
) : NULL
;
2614 pid_t pid_nr
= pid_nr_ns(pid
, fm
->fc
->pid_ns
);
2617 if (fl
->fl_lmops
&& fl
->fl_lmops
->lm_grant
) {
2618 /* NLM needs asynchronous locks, which we don't support yet */
2622 /* Unlock on close is handled by the flush method */
2623 if ((fl
->fl_flags
& FL_CLOSE_POSIX
) == FL_CLOSE_POSIX
)
2626 fuse_lk_fill(&args
, file
, fl
, opcode
, pid_nr
, flock
, &inarg
);
2627 err
= fuse_simple_request(fm
, &args
);
2629 /* locking is restartable */
2636 static int fuse_file_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
2638 struct inode
*inode
= file_inode(file
);
2639 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2642 if (cmd
== F_CANCELLK
) {
2644 } else if (cmd
== F_GETLK
) {
2646 posix_test_lock(file
, fl
);
2649 err
= fuse_getlk(file
, fl
);
2652 err
= posix_lock_file(file
, fl
, NULL
);
2654 err
= fuse_setlk(file
, fl
, 0);
2659 static int fuse_file_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
2661 struct inode
*inode
= file_inode(file
);
2662 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2666 err
= locks_lock_file_wait(file
, fl
);
2668 struct fuse_file
*ff
= file
->private_data
;
2670 /* emulate flock with POSIX locks */
2672 err
= fuse_setlk(file
, fl
, 1);
2678 static sector_t
fuse_bmap(struct address_space
*mapping
, sector_t block
)
2680 struct inode
*inode
= mapping
->host
;
2681 struct fuse_mount
*fm
= get_fuse_mount(inode
);
2683 struct fuse_bmap_in inarg
;
2684 struct fuse_bmap_out outarg
;
2687 if (!inode
->i_sb
->s_bdev
|| fm
->fc
->no_bmap
)
2690 memset(&inarg
, 0, sizeof(inarg
));
2691 inarg
.block
= block
;
2692 inarg
.blocksize
= inode
->i_sb
->s_blocksize
;
2693 args
.opcode
= FUSE_BMAP
;
2694 args
.nodeid
= get_node_id(inode
);
2695 args
.in_numargs
= 1;
2696 args
.in_args
[0].size
= sizeof(inarg
);
2697 args
.in_args
[0].value
= &inarg
;
2698 args
.out_numargs
= 1;
2699 args
.out_args
[0].size
= sizeof(outarg
);
2700 args
.out_args
[0].value
= &outarg
;
2701 err
= fuse_simple_request(fm
, &args
);
2703 fm
->fc
->no_bmap
= 1;
2705 return err
? 0 : outarg
.block
;
2708 static loff_t
fuse_lseek(struct file
*file
, loff_t offset
, int whence
)
2710 struct inode
*inode
= file
->f_mapping
->host
;
2711 struct fuse_mount
*fm
= get_fuse_mount(inode
);
2712 struct fuse_file
*ff
= file
->private_data
;
2714 struct fuse_lseek_in inarg
= {
2719 struct fuse_lseek_out outarg
;
2722 if (fm
->fc
->no_lseek
)
2725 args
.opcode
= FUSE_LSEEK
;
2726 args
.nodeid
= ff
->nodeid
;
2727 args
.in_numargs
= 1;
2728 args
.in_args
[0].size
= sizeof(inarg
);
2729 args
.in_args
[0].value
= &inarg
;
2730 args
.out_numargs
= 1;
2731 args
.out_args
[0].size
= sizeof(outarg
);
2732 args
.out_args
[0].value
= &outarg
;
2733 err
= fuse_simple_request(fm
, &args
);
2735 if (err
== -ENOSYS
) {
2736 fm
->fc
->no_lseek
= 1;
2742 return vfs_setpos(file
, outarg
.offset
, inode
->i_sb
->s_maxbytes
);
2745 err
= fuse_update_attributes(inode
, file
, STATX_SIZE
);
2747 return generic_file_llseek(file
, offset
, whence
);
2752 static loff_t
fuse_file_llseek(struct file
*file
, loff_t offset
, int whence
)
2755 struct inode
*inode
= file_inode(file
);
2760 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
2761 retval
= generic_file_llseek(file
, offset
, whence
);
2765 retval
= fuse_update_attributes(inode
, file
, STATX_SIZE
);
2767 retval
= generic_file_llseek(file
, offset
, whence
);
2768 inode_unlock(inode
);
2773 retval
= fuse_lseek(file
, offset
, whence
);
2774 inode_unlock(inode
);
2784 * All files which have been polled are linked to RB tree
2785 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2786 * find the matching one.
2788 static struct rb_node
**fuse_find_polled_node(struct fuse_conn
*fc
, u64 kh
,
2789 struct rb_node
**parent_out
)
2791 struct rb_node
**link
= &fc
->polled_files
.rb_node
;
2792 struct rb_node
*last
= NULL
;
2795 struct fuse_file
*ff
;
2798 ff
= rb_entry(last
, struct fuse_file
, polled_node
);
2801 link
= &last
->rb_left
;
2802 else if (kh
> ff
->kh
)
2803 link
= &last
->rb_right
;
2814 * The file is about to be polled. Make sure it's on the polled_files
2815 * RB tree. Note that files once added to the polled_files tree are
2816 * not removed before the file is released. This is because a file
2817 * polled once is likely to be polled again.
2819 static void fuse_register_polled_file(struct fuse_conn
*fc
,
2820 struct fuse_file
*ff
)
2822 spin_lock(&fc
->lock
);
2823 if (RB_EMPTY_NODE(&ff
->polled_node
)) {
2824 struct rb_node
**link
, *parent
;
2826 link
= fuse_find_polled_node(fc
, ff
->kh
, &parent
);
2828 rb_link_node(&ff
->polled_node
, parent
, link
);
2829 rb_insert_color(&ff
->polled_node
, &fc
->polled_files
);
2831 spin_unlock(&fc
->lock
);
2834 __poll_t
fuse_file_poll(struct file
*file
, poll_table
*wait
)
2836 struct fuse_file
*ff
= file
->private_data
;
2837 struct fuse_mount
*fm
= ff
->fm
;
2838 struct fuse_poll_in inarg
= { .fh
= ff
->fh
, .kh
= ff
->kh
};
2839 struct fuse_poll_out outarg
;
2843 if (fm
->fc
->no_poll
)
2844 return DEFAULT_POLLMASK
;
2846 poll_wait(file
, &ff
->poll_wait
, wait
);
2847 inarg
.events
= mangle_poll(poll_requested_events(wait
));
2850 * Ask for notification iff there's someone waiting for it.
2851 * The client may ignore the flag and always notify.
2853 if (waitqueue_active(&ff
->poll_wait
)) {
2854 inarg
.flags
|= FUSE_POLL_SCHEDULE_NOTIFY
;
2855 fuse_register_polled_file(fm
->fc
, ff
);
2858 args
.opcode
= FUSE_POLL
;
2859 args
.nodeid
= ff
->nodeid
;
2860 args
.in_numargs
= 1;
2861 args
.in_args
[0].size
= sizeof(inarg
);
2862 args
.in_args
[0].value
= &inarg
;
2863 args
.out_numargs
= 1;
2864 args
.out_args
[0].size
= sizeof(outarg
);
2865 args
.out_args
[0].value
= &outarg
;
2866 err
= fuse_simple_request(fm
, &args
);
2869 return demangle_poll(outarg
.revents
);
2870 if (err
== -ENOSYS
) {
2871 fm
->fc
->no_poll
= 1;
2872 return DEFAULT_POLLMASK
;
2876 EXPORT_SYMBOL_GPL(fuse_file_poll
);
2879 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2880 * wakes up the poll waiters.
2882 int fuse_notify_poll_wakeup(struct fuse_conn
*fc
,
2883 struct fuse_notify_poll_wakeup_out
*outarg
)
2885 u64 kh
= outarg
->kh
;
2886 struct rb_node
**link
;
2888 spin_lock(&fc
->lock
);
2890 link
= fuse_find_polled_node(fc
, kh
, NULL
);
2892 struct fuse_file
*ff
;
2894 ff
= rb_entry(*link
, struct fuse_file
, polled_node
);
2895 wake_up_interruptible_sync(&ff
->poll_wait
);
2898 spin_unlock(&fc
->lock
);
2902 static void fuse_do_truncate(struct file
*file
)
2904 struct inode
*inode
= file
->f_mapping
->host
;
2907 attr
.ia_valid
= ATTR_SIZE
;
2908 attr
.ia_size
= i_size_read(inode
);
2910 attr
.ia_file
= file
;
2911 attr
.ia_valid
|= ATTR_FILE
;
2913 fuse_do_setattr(file_dentry(file
), &attr
, file
);
2916 static inline loff_t
fuse_round_up(struct fuse_conn
*fc
, loff_t off
)
2918 return round_up(off
, fc
->max_pages
<< PAGE_SHIFT
);
2922 fuse_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
2924 DECLARE_COMPLETION_ONSTACK(wait
);
2926 struct file
*file
= iocb
->ki_filp
;
2927 struct fuse_file
*ff
= file
->private_data
;
2929 struct inode
*inode
;
2931 size_t count
= iov_iter_count(iter
), shortened
= 0;
2932 loff_t offset
= iocb
->ki_pos
;
2933 struct fuse_io_priv
*io
;
2936 inode
= file
->f_mapping
->host
;
2937 i_size
= i_size_read(inode
);
2939 if ((iov_iter_rw(iter
) == READ
) && (offset
>= i_size
))
2942 io
= kmalloc(sizeof(struct fuse_io_priv
), GFP_KERNEL
);
2945 spin_lock_init(&io
->lock
);
2946 kref_init(&io
->refcnt
);
2950 io
->offset
= offset
;
2951 io
->write
= (iov_iter_rw(iter
) == WRITE
);
2954 * By default, we want to optimize all I/Os with async request
2955 * submission to the client filesystem if supported.
2957 io
->async
= ff
->fm
->fc
->async_dio
;
2959 io
->blocking
= is_sync_kiocb(iocb
);
2961 /* optimization for short read */
2962 if (io
->async
&& !io
->write
&& offset
+ count
> i_size
) {
2963 iov_iter_truncate(iter
, fuse_round_up(ff
->fm
->fc
, i_size
- offset
));
2964 shortened
= count
- iov_iter_count(iter
);
2969 * We cannot asynchronously extend the size of a file.
2970 * In such case the aio will behave exactly like sync io.
2972 if ((offset
+ count
> i_size
) && io
->write
)
2973 io
->blocking
= true;
2975 if (io
->async
&& io
->blocking
) {
2977 * Additional reference to keep io around after
2978 * calling fuse_aio_complete()
2980 kref_get(&io
->refcnt
);
2984 if (iov_iter_rw(iter
) == WRITE
) {
2985 ret
= fuse_direct_io(io
, iter
, &pos
, FUSE_DIO_WRITE
);
2986 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODSIZE
);
2988 ret
= __fuse_direct_read(io
, iter
, &pos
);
2990 iov_iter_reexpand(iter
, iov_iter_count(iter
) + shortened
);
2993 bool blocking
= io
->blocking
;
2995 fuse_aio_complete(io
, ret
< 0 ? ret
: 0, -1);
2997 /* we have a non-extending, async request, so return */
2999 return -EIOCBQUEUED
;
3001 wait_for_completion(&wait
);
3002 ret
= fuse_get_res_by_io(io
);
3005 kref_put(&io
->refcnt
, fuse_io_release
);
3007 if (iov_iter_rw(iter
) == WRITE
) {
3008 fuse_write_update_attr(inode
, pos
, ret
);
3009 /* For extending writes we already hold exclusive lock */
3010 if (ret
< 0 && offset
+ count
> i_size
)
3011 fuse_do_truncate(file
);
3017 static int fuse_writeback_range(struct inode
*inode
, loff_t start
, loff_t end
)
3019 int err
= filemap_write_and_wait_range(inode
->i_mapping
, start
, LLONG_MAX
);
3022 fuse_sync_writes(inode
);
3027 static long fuse_file_fallocate(struct file
*file
, int mode
, loff_t offset
,
3030 struct fuse_file
*ff
= file
->private_data
;
3031 struct inode
*inode
= file_inode(file
);
3032 struct fuse_inode
*fi
= get_fuse_inode(inode
);
3033 struct fuse_mount
*fm
= ff
->fm
;
3035 struct fuse_fallocate_in inarg
= {
3042 bool block_faults
= FUSE_IS_DAX(inode
) &&
3043 (!(mode
& FALLOC_FL_KEEP_SIZE
) ||
3044 (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_ZERO_RANGE
)));
3046 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
3047 FALLOC_FL_ZERO_RANGE
))
3050 if (fm
->fc
->no_fallocate
)
3055 filemap_invalidate_lock(inode
->i_mapping
);
3056 err
= fuse_dax_break_layouts(inode
, 0, 0);
3061 if (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_ZERO_RANGE
)) {
3062 loff_t endbyte
= offset
+ length
- 1;
3064 err
= fuse_writeback_range(inode
, offset
, endbyte
);
3069 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
3070 offset
+ length
> i_size_read(inode
)) {
3071 err
= inode_newsize_ok(inode
, offset
+ length
);
3076 err
= file_modified(file
);
3080 if (!(mode
& FALLOC_FL_KEEP_SIZE
))
3081 set_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
3083 args
.opcode
= FUSE_FALLOCATE
;
3084 args
.nodeid
= ff
->nodeid
;
3085 args
.in_numargs
= 1;
3086 args
.in_args
[0].size
= sizeof(inarg
);
3087 args
.in_args
[0].value
= &inarg
;
3088 err
= fuse_simple_request(fm
, &args
);
3089 if (err
== -ENOSYS
) {
3090 fm
->fc
->no_fallocate
= 1;
3096 /* we could have extended the file */
3097 if (!(mode
& FALLOC_FL_KEEP_SIZE
)) {
3098 if (fuse_write_update_attr(inode
, offset
+ length
, length
))
3099 file_update_time(file
);
3102 if (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_ZERO_RANGE
))
3103 truncate_pagecache_range(inode
, offset
, offset
+ length
- 1);
3105 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODSIZE
);
3108 if (!(mode
& FALLOC_FL_KEEP_SIZE
))
3109 clear_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
3112 filemap_invalidate_unlock(inode
->i_mapping
);
3114 inode_unlock(inode
);
3116 fuse_flush_time_update(inode
);
3121 static ssize_t
__fuse_copy_file_range(struct file
*file_in
, loff_t pos_in
,
3122 struct file
*file_out
, loff_t pos_out
,
3123 size_t len
, unsigned int flags
)
3125 struct fuse_file
*ff_in
= file_in
->private_data
;
3126 struct fuse_file
*ff_out
= file_out
->private_data
;
3127 struct inode
*inode_in
= file_inode(file_in
);
3128 struct inode
*inode_out
= file_inode(file_out
);
3129 struct fuse_inode
*fi_out
= get_fuse_inode(inode_out
);
3130 struct fuse_mount
*fm
= ff_in
->fm
;
3131 struct fuse_conn
*fc
= fm
->fc
;
3133 struct fuse_copy_file_range_in inarg
= {
3136 .nodeid_out
= ff_out
->nodeid
,
3137 .fh_out
= ff_out
->fh
,
3142 struct fuse_write_out outarg
;
3144 /* mark unstable when write-back is not used, and file_out gets
3146 bool is_unstable
= (!fc
->writeback_cache
) &&
3147 ((pos_out
+ len
) > inode_out
->i_size
);
3149 if (fc
->no_copy_file_range
)
3152 if (file_inode(file_in
)->i_sb
!= file_inode(file_out
)->i_sb
)
3155 inode_lock(inode_in
);
3156 err
= fuse_writeback_range(inode_in
, pos_in
, pos_in
+ len
- 1);
3157 inode_unlock(inode_in
);
3161 inode_lock(inode_out
);
3163 err
= file_modified(file_out
);
3168 * Write out dirty pages in the destination file before sending the COPY
3169 * request to userspace. After the request is completed, truncate off
3170 * pages (including partial ones) from the cache that have been copied,
3171 * since these contain stale data at that point.
3173 * This should be mostly correct, but if the COPY writes to partial
3174 * pages (at the start or end) and the parts not covered by the COPY are
3175 * written through a memory map after calling fuse_writeback_range(),
3176 * then these partial page modifications will be lost on truncation.
3178 * It is unlikely that someone would rely on such mixed style
3179 * modifications. Yet this does give less guarantees than if the
3180 * copying was performed with write(2).
3182 * To fix this a mapping->invalidate_lock could be used to prevent new
3183 * faults while the copy is ongoing.
3185 err
= fuse_writeback_range(inode_out
, pos_out
, pos_out
+ len
- 1);
3190 set_bit(FUSE_I_SIZE_UNSTABLE
, &fi_out
->state
);
3192 args
.opcode
= FUSE_COPY_FILE_RANGE
;
3193 args
.nodeid
= ff_in
->nodeid
;
3194 args
.in_numargs
= 1;
3195 args
.in_args
[0].size
= sizeof(inarg
);
3196 args
.in_args
[0].value
= &inarg
;
3197 args
.out_numargs
= 1;
3198 args
.out_args
[0].size
= sizeof(outarg
);
3199 args
.out_args
[0].value
= &outarg
;
3200 err
= fuse_simple_request(fm
, &args
);
3201 if (err
== -ENOSYS
) {
3202 fc
->no_copy_file_range
= 1;
3208 truncate_inode_pages_range(inode_out
->i_mapping
,
3209 ALIGN_DOWN(pos_out
, PAGE_SIZE
),
3210 ALIGN(pos_out
+ outarg
.size
, PAGE_SIZE
) - 1);
3212 file_update_time(file_out
);
3213 fuse_write_update_attr(inode_out
, pos_out
+ outarg
.size
, outarg
.size
);
3218 clear_bit(FUSE_I_SIZE_UNSTABLE
, &fi_out
->state
);
3220 inode_unlock(inode_out
);
3221 file_accessed(file_in
);
3223 fuse_flush_time_update(inode_out
);
3228 static ssize_t
fuse_copy_file_range(struct file
*src_file
, loff_t src_off
,
3229 struct file
*dst_file
, loff_t dst_off
,
3230 size_t len
, unsigned int flags
)
3234 ret
= __fuse_copy_file_range(src_file
, src_off
, dst_file
, dst_off
,
3237 if (ret
== -EOPNOTSUPP
|| ret
== -EXDEV
)
3238 ret
= generic_copy_file_range(src_file
, src_off
, dst_file
,
3239 dst_off
, len
, flags
);
3243 static const struct file_operations fuse_file_operations
= {
3244 .llseek
= fuse_file_llseek
,
3245 .read_iter
= fuse_file_read_iter
,
3246 .write_iter
= fuse_file_write_iter
,
3247 .mmap
= fuse_file_mmap
,
3249 .flush
= fuse_flush
,
3250 .release
= fuse_release
,
3251 .fsync
= fuse_fsync
,
3252 .lock
= fuse_file_lock
,
3253 .get_unmapped_area
= thp_get_unmapped_area
,
3254 .flock
= fuse_file_flock
,
3255 .splice_read
= generic_file_splice_read
,
3256 .splice_write
= iter_file_splice_write
,
3257 .unlocked_ioctl
= fuse_file_ioctl
,
3258 .compat_ioctl
= fuse_file_compat_ioctl
,
3259 .poll
= fuse_file_poll
,
3260 .fallocate
= fuse_file_fallocate
,
3261 .copy_file_range
= fuse_copy_file_range
,
3264 static const struct address_space_operations fuse_file_aops
= {
3265 .read_folio
= fuse_read_folio
,
3266 .readahead
= fuse_readahead
,
3267 .writepage
= fuse_writepage
,
3268 .writepages
= fuse_writepages
,
3269 .launder_folio
= fuse_launder_folio
,
3270 .dirty_folio
= filemap_dirty_folio
,
3272 .direct_IO
= fuse_direct_IO
,
3273 .write_begin
= fuse_write_begin
,
3274 .write_end
= fuse_write_end
,
3277 void fuse_init_file_inode(struct inode
*inode
, unsigned int flags
)
3279 struct fuse_inode
*fi
= get_fuse_inode(inode
);
3281 inode
->i_fop
= &fuse_file_operations
;
3282 inode
->i_data
.a_ops
= &fuse_file_aops
;
3284 INIT_LIST_HEAD(&fi
->write_files
);
3285 INIT_LIST_HEAD(&fi
->queued_writes
);
3287 init_waitqueue_head(&fi
->page_waitq
);
3288 fi
->writepages
= RB_ROOT
;
3290 if (IS_ENABLED(CONFIG_FUSE_DAX
))
3291 fuse_dax_inode_init(inode
, flags
);