]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - fs/fuse/file.c
fuse: prepare for failing open response
[thirdparty/kernel/linux.git] / fs / fuse / file.c
CommitLineData
b6aeaded
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
b6aeaded
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/pagemap.h>
12#include <linux/slab.h>
13#include <linux/kernel.h>
e8edc6e0 14#include <linux/sched.h>
7a36094d 15#include <linux/sched/signal.h>
08cbf542 16#include <linux/module.h>
478e0841 17#include <linux/swap.h>
3634a632 18#include <linux/falloc.h>
e2e40f2c 19#include <linux/uio.h>
31070f6c 20#include <linux/fs.h>
5970e15d 21#include <linux/filelock.h>
705bcfcb 22#include <linux/splice.h>
b6aeaded 23
b9d54c6f
MS
24static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
25 unsigned int open_flags, int opcode,
26 struct fuse_open_out *outargp)
b6aeaded 27{
b6aeaded 28 struct fuse_open_in inarg;
7078187a 29 FUSE_ARGS(args);
fd72faac
MS
30
31 memset(&inarg, 0, sizeof(inarg));
b9d54c6f 32 inarg.flags = open_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
fcee216b 33 if (!fm->fc->atomic_o_trunc)
6ff958ed 34 inarg.flags &= ~O_TRUNC;
643a666a
VG
35
36 if (fm->fc->handle_killpriv_v2 &&
37 (inarg.flags & O_TRUNC) && !capable(CAP_FSETID)) {
38 inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
39 }
40
d5b48543
MS
41 args.opcode = opcode;
42 args.nodeid = nodeid;
43 args.in_numargs = 1;
44 args.in_args[0].size = sizeof(inarg);
45 args.in_args[0].value = &inarg;
46 args.out_numargs = 1;
47 args.out_args[0].size = sizeof(*outargp);
48 args.out_args[0].value = outargp;
fd72faac 49
fcee216b 50 return fuse_simple_request(fm, &args);
fd72faac
MS
51}
52
4cb54866
MS
53struct fuse_release_args {
54 struct fuse_args args;
55 struct fuse_release_in inarg;
56 struct inode *inode;
57};
58
e26ee4ef 59struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release)
fd72faac
MS
60{
61 struct fuse_file *ff;
6b2db28a 62
dc69e98c 63 ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT);
6b2db28a
TH
64 if (unlikely(!ff))
65 return NULL;
66
fcee216b 67 ff->fm = fm;
e26ee4ef
AG
68 if (release) {
69 ff->release_args = kzalloc(sizeof(*ff->release_args),
70 GFP_KERNEL_ACCOUNT);
71 if (!ff->release_args) {
72 kfree(ff);
73 return NULL;
74 }
fd72faac 75 }
6b2db28a
TH
76
77 INIT_LIST_HEAD(&ff->write_entry);
5d7bc7e8 78 mutex_init(&ff->readdir.lock);
4e8c2eb5 79 refcount_set(&ff->count, 1);
6b2db28a
TH
80 RB_CLEAR_NODE(&ff->polled_node);
81 init_waitqueue_head(&ff->poll_wait);
82
fcee216b 83 ff->kh = atomic64_inc_return(&fm->fc->khctr);
6b2db28a 84
fd72faac
MS
85 return ff;
86}
87
88void fuse_file_free(struct fuse_file *ff)
89{
4cb54866 90 kfree(ff->release_args);
5d7bc7e8 91 mutex_destroy(&ff->readdir.lock);
fd72faac
MS
92 kfree(ff);
93}
94
267d8444 95static struct fuse_file *fuse_file_get(struct fuse_file *ff)
c756e0a4 96{
4e8c2eb5 97 refcount_inc(&ff->count);
c756e0a4
MS
98 return ff;
99}
100
fcee216b 101static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args,
4cb54866 102 int error)
819c4b3b 103{
4cb54866
MS
104 struct fuse_release_args *ra = container_of(args, typeof(*ra), args);
105
106 iput(ra->inode);
107 kfree(ra);
819c4b3b
MS
108}
109
e26ee4ef 110static void fuse_file_put(struct fuse_file *ff, bool sync)
c756e0a4 111{
4e8c2eb5 112 if (refcount_dec_and_test(&ff->count)) {
e26ee4ef
AG
113 struct fuse_release_args *ra = ff->release_args;
114 struct fuse_args *args = (ra ? &ra->args : NULL);
8b0797a4 115
e26ee4ef
AG
116 if (!args) {
117 /* Do nothing when server does not implement 'open' */
7678ac50 118 } else if (sync) {
fcee216b
MR
119 fuse_simple_request(ff->fm, args);
120 fuse_release_end(ff->fm, args, 0);
5a18ec17 121 } else {
4cb54866 122 args->end = fuse_release_end;
fcee216b 123 if (fuse_simple_background(ff->fm, args,
4cb54866 124 GFP_KERNEL | __GFP_NOFAIL))
fcee216b 125 fuse_release_end(ff->fm, args, -ENOTCONN);
5a18ec17 126 }
c756e0a4
MS
127 kfree(ff);
128 }
129}
130
b9d54c6f
MS
131struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
132 unsigned int open_flags, bool isdir)
91fe96b4 133{
fcee216b 134 struct fuse_conn *fc = fm->fc;
91fe96b4 135 struct fuse_file *ff;
91fe96b4 136 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
e26ee4ef 137 bool open = isdir ? !fc->no_opendir : !fc->no_open;
91fe96b4 138
e26ee4ef 139 ff = fuse_file_alloc(fm, open);
91fe96b4 140 if (!ff)
b9d54c6f 141 return ERR_PTR(-ENOMEM);
91fe96b4 142
7678ac50 143 ff->fh = 0;
fabf7e02
CA
144 /* Default for no-open */
145 ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
e26ee4ef 146 if (open) {
7678ac50
AG
147 struct fuse_open_out outarg;
148 int err;
149
b9d54c6f 150 err = fuse_send_open(fm, nodeid, open_flags, opcode, &outarg);
7678ac50
AG
151 if (!err) {
152 ff->fh = outarg.fh;
153 ff->open_flags = outarg.open_flags;
d9a9ea94 154 } else if (err != -ENOSYS) {
7678ac50 155 fuse_file_free(ff);
b9d54c6f 156 return ERR_PTR(err);
7678ac50 157 } else {
e26ee4ef
AG
158 /* No release needed */
159 kfree(ff->release_args);
160 ff->release_args = NULL;
d9a9ea94
CA
161 if (isdir)
162 fc->no_opendir = 1;
163 else
164 fc->no_open = 1;
7678ac50 165 }
91fe96b4
MS
166 }
167
168 if (isdir)
7678ac50 169 ff->open_flags &= ~FOPEN_DIRECT_IO;
91fe96b4 170
91fe96b4 171 ff->nodeid = nodeid;
91fe96b4 172
b9d54c6f
MS
173 return ff;
174}
175
176int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
177 bool isdir)
178{
179 struct fuse_file *ff = fuse_file_open(fm, nodeid, file->f_flags, isdir);
180
181 if (!IS_ERR(ff))
182 file->private_data = ff;
183
184 return PTR_ERR_OR_ZERO(ff);
91fe96b4 185}
08cbf542 186EXPORT_SYMBOL_GPL(fuse_do_open);
91fe96b4 187
650b22b9
PE
188static void fuse_link_write_file(struct file *file)
189{
190 struct inode *inode = file_inode(file);
650b22b9
PE
191 struct fuse_inode *fi = get_fuse_inode(inode);
192 struct fuse_file *ff = file->private_data;
193 /*
194 * file may be written through mmap, so chain it onto the
195 * inodes's write_file list
196 */
f15ecfef 197 spin_lock(&fi->lock);
650b22b9
PE
198 if (list_empty(&ff->write_entry))
199 list_add(&ff->write_entry, &fi->write_files);
f15ecfef 200 spin_unlock(&fi->lock);
650b22b9
PE
201}
202
d2c487f1 203int fuse_finish_open(struct inode *inode, struct file *file)
fd72faac 204{
c7b7143c 205 struct fuse_file *ff = file->private_data;
a0822c55 206 struct fuse_conn *fc = get_fuse_conn(inode);
c7b7143c 207
bbd84f33
KS
208 if (ff->open_flags & FOPEN_STREAM)
209 stream_open(inode, file);
210 else if (ff->open_flags & FOPEN_NONSEEKABLE)
a7c1b990 211 nonseekable_open(inode, file);
76224355 212
4d99ff8f
PE
213 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
214 fuse_link_write_file(file);
d2c487f1
AG
215
216 return 0;
fd72faac
MS
217}
218
0c9d7089
AG
219static void fuse_truncate_update_attr(struct inode *inode, struct file *file)
220{
221 struct fuse_conn *fc = get_fuse_conn(inode);
222 struct fuse_inode *fi = get_fuse_inode(inode);
223
224 spin_lock(&fi->lock);
225 fi->attr_version = atomic64_inc_return(&fc->attr_version);
226 i_size_write(inode, 0);
227 spin_unlock(&fi->lock);
228 file_update_time(file);
229 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
230}
231
7de64d52 232static int fuse_open(struct inode *inode, struct file *file)
fd72faac 233{
fcee216b 234 struct fuse_mount *fm = get_fuse_mount(inode);
d2c487f1 235 struct fuse_inode *fi = get_fuse_inode(inode);
fcee216b 236 struct fuse_conn *fc = fm->fc;
d2c487f1 237 struct fuse_file *ff;
b6aeaded 238 int err;
0c9d7089
AG
239 bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc;
240 bool is_wb_truncate = is_truncate && fc->writeback_cache;
241 bool dax_truncate = is_truncate && FUSE_IS_DAX(inode);
b6aeaded 242
5d069dbe
MS
243 if (fuse_is_bad(inode))
244 return -EIO;
245
b6aeaded
MS
246 err = generic_file_open(inode, file);
247 if (err)
248 return err;
249
2fdbb8dd 250 if (is_wb_truncate || dax_truncate)
5955102c 251 inode_lock(inode);
75caeecd 252
6ae330ca 253 if (dax_truncate) {
8bcbbe9c 254 filemap_invalidate_lock(inode->i_mapping);
6ae330ca
VG
255 err = fuse_dax_break_layouts(inode, 0, 0);
256 if (err)
2fdbb8dd 257 goto out_inode_unlock;
6ae330ca 258 }
b6aeaded 259
2fdbb8dd
MS
260 if (is_wb_truncate || dax_truncate)
261 fuse_set_nowrite(inode);
262
7de64d52 263 err = fuse_do_open(fm, get_node_id(inode), file, false);
0c9d7089 264 if (!err) {
d2c487f1
AG
265 ff = file->private_data;
266 err = fuse_finish_open(inode, file);
267 if (err)
268 fuse_sync_release(fi, ff, file->f_flags);
269 else if (is_truncate)
0c9d7089
AG
270 fuse_truncate_update_attr(inode, file);
271 }
91fe96b4 272
2fdbb8dd
MS
273 if (is_wb_truncate || dax_truncate)
274 fuse_release_nowrite(inode);
275 if (!err) {
0c9d7089 276 if (is_truncate)
2fdbb8dd
MS
277 truncate_pagecache(inode, 0);
278 else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
279 invalidate_inode_pages2(inode->i_mapping);
280 }
6ae330ca 281 if (dax_truncate)
8bcbbe9c 282 filemap_invalidate_unlock(inode->i_mapping);
2fdbb8dd
MS
283out_inode_unlock:
284 if (is_wb_truncate || dax_truncate)
5955102c 285 inode_unlock(inode);
75caeecd
MP
286
287 return err;
b6aeaded
MS
288}
289
ebf84d0c 290static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
e26ee4ef 291 unsigned int flags, int opcode, bool sync)
64c6d8ed 292{
fcee216b 293 struct fuse_conn *fc = ff->fm->fc;
4cb54866 294 struct fuse_release_args *ra = ff->release_args;
b6aeaded 295
f15ecfef
KT
296 /* Inode is NULL on error path of fuse_create_open() */
297 if (likely(fi)) {
298 spin_lock(&fi->lock);
299 list_del(&ff->write_entry);
300 spin_unlock(&fi->lock);
301 }
8b0797a4 302 spin_lock(&fc->lock);
8b0797a4
MS
303 if (!RB_EMPTY_NODE(&ff->polled_node))
304 rb_erase(&ff->polled_node, &fc->polled_files);
305 spin_unlock(&fc->lock);
306
357ccf2b 307 wake_up_interruptible_all(&ff->poll_wait);
8b0797a4 308
e26ee4ef
AG
309 if (!ra)
310 return;
311
4cb54866
MS
312 ra->inarg.fh = ff->fh;
313 ra->inarg.flags = flags;
314 ra->args.in_numargs = 1;
315 ra->args.in_args[0].size = sizeof(struct fuse_release_in);
316 ra->args.in_args[0].value = &ra->inarg;
317 ra->args.opcode = opcode;
318 ra->args.nodeid = ff->nodeid;
319 ra->args.force = true;
320 ra->args.nocreds = true;
e26ee4ef
AG
321
322 /*
323 * Hold inode until release is finished.
324 * From fuse_sync_release() the refcount is 1 and everything's
325 * synchronous, so we are fine with not doing igrab() here.
326 */
327 ra->inode = sync ? NULL : igrab(&fi->inode);
fd72faac
MS
328}
329
b9d54c6f
MS
330void fuse_file_release(struct inode *inode, struct fuse_file *ff,
331 unsigned int open_flags, fl_owner_t id, bool isdir)
fd72faac 332{
b9d54c6f 333 struct fuse_inode *fi = get_fuse_inode(inode);
4cb54866 334 struct fuse_release_args *ra = ff->release_args;
2e64ff15 335 int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
6b2db28a 336
e26ee4ef 337 fuse_prepare_release(fi, ff, open_flags, opcode, false);
6b2db28a 338
e26ee4ef 339 if (ra && ff->flock) {
4cb54866 340 ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
b9d54c6f 341 ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id);
37fb3a30 342 }
6b2db28a 343
6b2db28a
TH
344 /*
345 * Normally this will send the RELEASE request, however if
346 * some asynchronous READ or WRITE requests are outstanding,
347 * the sending will be delayed.
5a18ec17
MS
348 *
349 * Make the release synchronous if this is a fuseblk mount,
350 * synchronous RELEASE is allowed (and desirable) in this case
351 * because the server can be trusted not to screw up.
6b2db28a 352 */
e26ee4ef 353 fuse_file_put(ff, ff->fm->fc->destroy);
b6aeaded
MS
354}
355
b9d54c6f
MS
356void fuse_release_common(struct file *file, bool isdir)
357{
358 fuse_file_release(file_inode(file), file->private_data, file->f_flags,
359 (fl_owner_t) file, isdir);
360}
361
04730fef
MS
362static int fuse_release(struct inode *inode, struct file *file)
363{
035ff33c
MS
364 struct fuse_conn *fc = get_fuse_conn(inode);
365
366 /*
367 * Dirty pages might remain despite write_inode_now() call from
368 * fuse_flush() due to writes racing with the close.
369 */
370 if (fc->writeback_cache)
371 write_inode_now(inode, 1);
372
2e64ff15 373 fuse_release_common(file, false);
8b0797a4
MS
374
375 /* return value is ignored by VFS */
376 return 0;
377}
378
54d601cb
MS
379void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff,
380 unsigned int flags)
8b0797a4 381{
4e8c2eb5 382 WARN_ON(refcount_read(&ff->count) > 1);
e26ee4ef
AG
383 fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true);
384 fuse_file_put(ff, true);
04730fef 385}
08cbf542 386EXPORT_SYMBOL_GPL(fuse_sync_release);
04730fef 387
71421259 388/*
9c8ef561
MS
389 * Scramble the ID space with XTEA, so that the value of the files_struct
390 * pointer is not exposed to userspace.
71421259 391 */
f3332114 392u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
71421259 393{
9c8ef561
MS
394 u32 *k = fc->scramble_key;
395 u64 v = (unsigned long) id;
396 u32 v0 = v;
397 u32 v1 = v >> 32;
398 u32 sum = 0;
399 int i;
400
401 for (i = 0; i < 32; i++) {
402 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
403 sum += 0x9E3779B9;
404 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
405 }
406
407 return (u64) v0 + ((u64) v1 << 32);
71421259
MS
408}
409
33826ebb
MS
410struct fuse_writepage_args {
411 struct fuse_io_args ia;
6b2fb799 412 struct rb_node writepages_entry;
33826ebb
MS
413 struct list_head queue_entry;
414 struct fuse_writepage_args *next;
415 struct inode *inode;
660585b5 416 struct fuse_sync_bucket *bucket;
33826ebb
MS
417};
418
419static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
2fe93bd4
MS
420 pgoff_t idx_from, pgoff_t idx_to)
421{
6b2fb799
MP
422 struct rb_node *n;
423
424 n = fi->writepages.rb_node;
2fe93bd4 425
6b2fb799
MP
426 while (n) {
427 struct fuse_writepage_args *wpa;
2fe93bd4
MS
428 pgoff_t curr_index;
429
6b2fb799 430 wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry);
33826ebb
MS
431 WARN_ON(get_fuse_inode(wpa->inode) != fi);
432 curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
6b2fb799
MP
433 if (idx_from >= curr_index + wpa->ia.ap.num_pages)
434 n = n->rb_right;
435 else if (idx_to < curr_index)
436 n = n->rb_left;
437 else
33826ebb 438 return wpa;
2fe93bd4
MS
439 }
440 return NULL;
441}
442
3be5a52b 443/*
ea8cd333 444 * Check if any page in a range is under writeback
3be5a52b
MS
445 *
446 * This is currently done by walking the list of writepage requests
447 * for the inode, which can be pretty inefficient.
448 */
ea8cd333
PE
449static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
450 pgoff_t idx_to)
3be5a52b 451{
3be5a52b 452 struct fuse_inode *fi = get_fuse_inode(inode);
2fe93bd4 453 bool found;
3be5a52b 454
f15ecfef 455 spin_lock(&fi->lock);
2fe93bd4 456 found = fuse_find_writeback(fi, idx_from, idx_to);
f15ecfef 457 spin_unlock(&fi->lock);
3be5a52b
MS
458
459 return found;
460}
461
ea8cd333
PE
462static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
463{
464 return fuse_range_is_writeback(inode, index, index);
465}
466
3be5a52b
MS
467/*
468 * Wait for page writeback to be completed.
469 *
470 * Since fuse doesn't rely on the VM writeback tracking, this has to
471 * use some other means.
472 */
17b2cbe2 473static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
3be5a52b
MS
474{
475 struct fuse_inode *fi = get_fuse_inode(inode);
476
477 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
3be5a52b
MS
478}
479
fe38d7df
MP
480/*
481 * Wait for all pending writepages on the inode to finish.
482 *
483 * This is currently done by blocking further writes with FUSE_NOWRITE
484 * and waiting for all sent writes to complete.
485 *
486 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
487 * could conflict with truncation.
488 */
489static void fuse_sync_writes(struct inode *inode)
490{
491 fuse_set_nowrite(inode);
492 fuse_release_nowrite(inode);
493}
494
91ec6c85 495static int fuse_flush(struct file *file, fl_owner_t id)
5a8bee63 496{
91ec6c85 497 struct inode *inode = file_inode(file);
5a8bee63 498 struct fuse_mount *fm = get_fuse_mount(inode);
91ec6c85
MS
499 struct fuse_file *ff = file->private_data;
500 struct fuse_flush_in inarg;
501 FUSE_ARGS(args);
502 int err;
503
504 if (fuse_is_bad(inode))
505 return -EIO;
506
507 if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache)
508 return 0;
a390ccb3 509
1e18bda8 510 err = write_inode_now(inode, 1);
fe38d7df 511 if (err)
91ec6c85 512 return err;
fe38d7df 513
5955102c 514 inode_lock(inode);
fe38d7df 515 fuse_sync_writes(inode);
5955102c 516 inode_unlock(inode);
fe38d7df 517
91ec6c85 518 err = filemap_check_errors(file->f_mapping);
9ebce595 519 if (err)
91ec6c85 520 return err;
9ebce595 521
614c026e 522 err = 0;
fcee216b 523 if (fm->fc->no_flush)
614c026e
MS
524 goto inval_attr_out;
525
91ec6c85
MS
526 memset(&inarg, 0, sizeof(inarg));
527 inarg.fh = ff->fh;
528 inarg.lock_owner = fuse_lock_owner_id(fm->fc, id);
529 args.opcode = FUSE_FLUSH;
530 args.nodeid = get_node_id(inode);
531 args.in_numargs = 1;
532 args.in_args[0].size = sizeof(inarg);
533 args.in_args[0].value = &inarg;
534 args.force = true;
535
536 err = fuse_simple_request(fm, &args);
b6aeaded 537 if (err == -ENOSYS) {
fcee216b 538 fm->fc->no_flush = 1;
b6aeaded
MS
539 err = 0;
540 }
cf576c58
EG
541
542inval_attr_out:
543 /*
544 * In memory i_blocks is not maintained by fuse, if writeback cache is
545 * enabled, i_blocks from cached attr may not be accurate.
546 */
fcee216b 547 if (!err && fm->fc->writeback_cache)
fa5eee57 548 fuse_invalidate_attr_mask(inode, STATX_BLOCKS);
b6aeaded
MS
549 return err;
550}
551
02c24a82 552int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
a9c2d1e8 553 int datasync, int opcode)
b6aeaded 554{
7ea80859 555 struct inode *inode = file->f_mapping->host;
fcee216b 556 struct fuse_mount *fm = get_fuse_mount(inode);
b6aeaded 557 struct fuse_file *ff = file->private_data;
7078187a 558 FUSE_ARGS(args);
b6aeaded 559 struct fuse_fsync_in inarg;
a9c2d1e8
MS
560
561 memset(&inarg, 0, sizeof(inarg));
562 inarg.fh = ff->fh;
154603fe 563 inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0;
d5b48543
MS
564 args.opcode = opcode;
565 args.nodeid = get_node_id(inode);
566 args.in_numargs = 1;
567 args.in_args[0].size = sizeof(inarg);
568 args.in_args[0].value = &inarg;
fcee216b 569 return fuse_simple_request(fm, &args);
a9c2d1e8
MS
570}
571
572static int fuse_fsync(struct file *file, loff_t start, loff_t end,
573 int datasync)
574{
575 struct inode *inode = file->f_mapping->host;
576 struct fuse_conn *fc = get_fuse_conn(inode);
b6aeaded
MS
577 int err;
578
5d069dbe 579 if (fuse_is_bad(inode))
248d86e8
MS
580 return -EIO;
581
5955102c 582 inode_lock(inode);
02c24a82 583
3be5a52b
MS
584 /*
585 * Start writeback against all dirty pages of the inode, then
586 * wait for all outstanding writes, before sending the FSYNC
587 * request.
588 */
7e51fe1d 589 err = file_write_and_wait_range(file, start, end);
3be5a52b 590 if (err)
02c24a82 591 goto out;
3be5a52b
MS
592
593 fuse_sync_writes(inode);
ac7f052b
AK
594
595 /*
596 * Due to implementation of fuse writeback
7e51fe1d 597 * file_write_and_wait_range() does not catch errors.
ac7f052b
AK
598 * We have to do this directly after fuse_sync_writes()
599 */
7e51fe1d 600 err = file_check_and_advance_wb_err(file);
ac7f052b
AK
601 if (err)
602 goto out;
603
1e18bda8
MS
604 err = sync_inode_metadata(inode, 1);
605 if (err)
606 goto out;
3be5a52b 607
a9c2d1e8 608 if (fc->no_fsync)
22401e7b 609 goto out;
b0aa7606 610
a9c2d1e8 611 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
b6aeaded 612 if (err == -ENOSYS) {
a9c2d1e8 613 fc->no_fsync = 1;
b6aeaded
MS
614 err = 0;
615 }
02c24a82 616out:
5955102c 617 inode_unlock(inode);
b6aeaded 618
a9c2d1e8 619 return err;
82547981
MS
620}
621
00793ca5
MS
622void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
623 size_t count, int opcode)
624{
625 struct fuse_file *ff = file->private_data;
626 struct fuse_args *args = &ia->ap.args;
627
628 ia->read.in.fh = ff->fh;
629 ia->read.in.offset = pos;
630 ia->read.in.size = count;
631 ia->read.in.flags = file->f_flags;
632 args->opcode = opcode;
633 args->nodeid = ff->nodeid;
634 args->in_numargs = 1;
635 args->in_args[0].size = sizeof(ia->read.in);
636 args->in_args[0].value = &ia->read.in;
637 args->out_argvar = true;
638 args->out_numargs = 1;
639 args->out_args[0].size = count;
640}
641
45ac96ed
MS
642static void fuse_release_user_pages(struct fuse_args_pages *ap,
643 bool should_dirty)
187c5c36 644{
45ac96ed 645 unsigned int i;
187c5c36 646
45ac96ed 647 for (i = 0; i < ap->num_pages; i++) {
8fba54ae 648 if (should_dirty)
45ac96ed
MS
649 set_page_dirty_lock(ap->pages[i]);
650 put_page(ap->pages[i]);
187c5c36
MP
651 }
652}
653
744742d6
SF
654static void fuse_io_release(struct kref *kref)
655{
656 kfree(container_of(kref, struct fuse_io_priv, refcnt));
657}
658
9d5722b7
CH
659static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
660{
661 if (io->err)
662 return io->err;
663
664 if (io->bytes >= 0 && io->write)
665 return -EIO;
666
667 return io->bytes < 0 ? io->size : io->bytes;
668}
669
06bbb761 670/*
01e9d11a
MP
671 * In case of short read, the caller sets 'pos' to the position of
672 * actual end of fuse request in IO request. Otherwise, if bytes_requested
673 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
674 *
675 * An example:
c4e0cd4e 676 * User requested DIO read of 64K. It was split into two 32K fuse requests,
01e9d11a
MP
677 * both submitted asynchronously. The first of them was ACKed by userspace as
678 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
679 * second request was ACKed as short, e.g. only 1K was read, resulting in
680 * pos == 33K.
681 *
682 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
683 * will be equal to the length of the longest contiguous fragment of
684 * transferred data starting from the beginning of IO request.
685 */
686static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
687{
688 int left;
689
690 spin_lock(&io->lock);
691 if (err)
692 io->err = io->err ? : err;
693 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
694 io->bytes = pos;
695
696 left = --io->reqs;
7879c4e5 697 if (!left && io->blocking)
9d5722b7 698 complete(io->done);
01e9d11a
MP
699 spin_unlock(&io->lock);
700
7879c4e5 701 if (!left && !io->blocking) {
9d5722b7 702 ssize_t res = fuse_get_res_by_io(io);
01e9d11a 703
9d5722b7
CH
704 if (res >= 0) {
705 struct inode *inode = file_inode(io->iocb->ki_filp);
706 struct fuse_conn *fc = get_fuse_conn(inode);
707 struct fuse_inode *fi = get_fuse_inode(inode);
01e9d11a 708
f15ecfef 709 spin_lock(&fi->lock);
4510d86f 710 fi->attr_version = atomic64_inc_return(&fc->attr_version);
f15ecfef 711 spin_unlock(&fi->lock);
01e9d11a
MP
712 }
713
6b19b766 714 io->iocb->ki_complete(io->iocb, res);
01e9d11a 715 }
744742d6
SF
716
717 kref_put(&io->refcnt, fuse_io_release);
01e9d11a
MP
718}
719
45ac96ed
MS
720static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
721 unsigned int npages)
722{
723 struct fuse_io_args *ia;
724
725 ia = kzalloc(sizeof(*ia), GFP_KERNEL);
726 if (ia) {
727 ia->io = io;
728 ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
729 &ia->ap.descs);
730 if (!ia->ap.pages) {
731 kfree(ia);
732 ia = NULL;
733 }
734 }
735 return ia;
736}
737
738static void fuse_io_free(struct fuse_io_args *ia)
01e9d11a 739{
45ac96ed
MS
740 kfree(ia->ap.pages);
741 kfree(ia);
742}
743
fcee216b 744static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
45ac96ed
MS
745 int err)
746{
747 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
748 struct fuse_io_priv *io = ia->io;
01e9d11a
MP
749 ssize_t pos = -1;
750
45ac96ed 751 fuse_release_user_pages(&ia->ap, io->should_dirty);
01e9d11a 752
45ac96ed
MS
753 if (err) {
754 /* Nothing */
755 } else if (io->write) {
756 if (ia->write.out.size > ia->write.in.size) {
757 err = -EIO;
758 } else if (ia->write.in.size != ia->write.out.size) {
759 pos = ia->write.in.offset - io->offset +
760 ia->write.out.size;
761 }
01e9d11a 762 } else {
45ac96ed
MS
763 u32 outsize = args->out_args[0].size;
764
765 if (ia->read.in.size != outsize)
766 pos = ia->read.in.offset - io->offset + outsize;
01e9d11a
MP
767 }
768
45ac96ed
MS
769 fuse_aio_complete(io, err, pos);
770 fuse_io_free(ia);
01e9d11a
MP
771}
772
fcee216b 773static ssize_t fuse_async_req_send(struct fuse_mount *fm,
45ac96ed 774 struct fuse_io_args *ia, size_t num_bytes)
01e9d11a 775{
45ac96ed
MS
776 ssize_t err;
777 struct fuse_io_priv *io = ia->io;
778
01e9d11a 779 spin_lock(&io->lock);
744742d6 780 kref_get(&io->refcnt);
01e9d11a
MP
781 io->size += num_bytes;
782 io->reqs++;
783 spin_unlock(&io->lock);
784
45ac96ed 785 ia->ap.args.end = fuse_aio_complete_req;
bb737bbe 786 ia->ap.args.may_block = io->should_dirty;
fcee216b 787 err = fuse_simple_background(fm, &ia->ap.args, GFP_KERNEL);
f1ebdeff 788 if (err)
fcee216b 789 fuse_aio_complete_req(fm, &ia->ap.args, err);
01e9d11a 790
f1ebdeff 791 return num_bytes;
01e9d11a
MP
792}
793
45ac96ed
MS
794static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
795 fl_owner_t owner)
04730fef 796{
45ac96ed 797 struct file *file = ia->io->iocb->ki_filp;
2106cb18 798 struct fuse_file *ff = file->private_data;
fcee216b 799 struct fuse_mount *fm = ff->fm;
f3332114 800
45ac96ed 801 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
f3332114 802 if (owner != NULL) {
45ac96ed 803 ia->read.in.read_flags |= FUSE_READ_LOCKOWNER;
fcee216b 804 ia->read.in.lock_owner = fuse_lock_owner_id(fm->fc, owner);
f3332114 805 }
36cf66ed 806
45ac96ed 807 if (ia->io->async)
fcee216b 808 return fuse_async_req_send(fm, ia, count);
36cf66ed 809
fcee216b 810 return fuse_simple_request(fm, &ia->ap.args);
04730fef
MS
811}
812
5c5c5e51
MS
813static void fuse_read_update_size(struct inode *inode, loff_t size,
814 u64 attr_ver)
815{
816 struct fuse_conn *fc = get_fuse_conn(inode);
817 struct fuse_inode *fi = get_fuse_inode(inode);
818
f15ecfef 819 spin_lock(&fi->lock);
484ce657 820 if (attr_ver >= fi->attr_version && size < inode->i_size &&
06a7c3c2 821 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
4510d86f 822 fi->attr_version = atomic64_inc_return(&fc->attr_version);
5c5c5e51
MS
823 i_size_write(inode, size);
824 }
f15ecfef 825 spin_unlock(&fi->lock);
5c5c5e51
MS
826}
827
a0d45d84 828static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
134831e3 829 struct fuse_args_pages *ap)
a92adc82 830{
8373200b
PE
831 struct fuse_conn *fc = get_fuse_conn(inode);
832
a73d47f5
MS
833 /*
834 * If writeback_cache is enabled, a short read means there's a hole in
835 * the file. Some data after the hole is in page cache, but has not
836 * reached the client fs yet. So the hole is not present there.
837 */
838 if (!fc->writeback_cache) {
134831e3 839 loff_t pos = page_offset(ap->pages[0]) + num_read;
8373200b
PE
840 fuse_read_update_size(inode, pos, attr_ver);
841 }
a92adc82
PE
842}
843
482fce55 844static int fuse_do_readpage(struct file *file, struct page *page)
b6aeaded
MS
845{
846 struct inode *inode = page->mapping->host;
fcee216b 847 struct fuse_mount *fm = get_fuse_mount(inode);
5c5c5e51 848 loff_t pos = page_offset(page);
00793ca5
MS
849 struct fuse_page_desc desc = { .length = PAGE_SIZE };
850 struct fuse_io_args ia = {
851 .ap.args.page_zeroing = true,
852 .ap.args.out_pages = true,
853 .ap.num_pages = 1,
854 .ap.pages = &page,
855 .ap.descs = &desc,
856 };
857 ssize_t res;
5c5c5e51 858 u64 attr_ver;
248d86e8 859
3be5a52b 860 /*
25985edc 861 * Page writeback can extend beyond the lifetime of the
3be5a52b
MS
862 * page-cache page, so make sure we read a properly synced
863 * page.
864 */
865 fuse_wait_on_page_writeback(inode, page->index);
866
fcee216b 867 attr_ver = fuse_get_attr_version(fm->fc);
5c5c5e51 868
2f139829
MS
869 /* Don't overflow end offset */
870 if (pos + (desc.length - 1) == LLONG_MAX)
871 desc.length--;
872
00793ca5 873 fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
fcee216b 874 res = fuse_simple_request(fm, &ia.ap.args);
00793ca5
MS
875 if (res < 0)
876 return res;
877 /*
878 * Short read means EOF. If file size is larger, truncate it
879 */
880 if (res < desc.length)
134831e3 881 fuse_short_read(inode, attr_ver, res, &ia.ap);
5c5c5e51 882
00793ca5 883 SetPageUptodate(page);
482fce55 884
00793ca5 885 return 0;
482fce55
MP
886}
887
5efd00e4 888static int fuse_read_folio(struct file *file, struct folio *folio)
482fce55 889{
5efd00e4 890 struct page *page = &folio->page;
482fce55
MP
891 struct inode *inode = page->mapping->host;
892 int err;
893
894 err = -EIO;
5d069dbe 895 if (fuse_is_bad(inode))
482fce55
MP
896 goto out;
897
898 err = fuse_do_readpage(file, page);
451418fc 899 fuse_invalidate_atime(inode);
b6aeaded
MS
900 out:
901 unlock_page(page);
902 return err;
903}
904
fcee216b 905static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
134831e3 906 int err)
db50b96c 907{
c1aa96a5 908 int i;
134831e3
MS
909 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
910 struct fuse_args_pages *ap = &ia->ap;
911 size_t count = ia->read.in.size;
912 size_t num_read = args->out_args[0].size;
ce534fb0 913 struct address_space *mapping = NULL;
c1aa96a5 914
134831e3
MS
915 for (i = 0; mapping == NULL && i < ap->num_pages; i++)
916 mapping = ap->pages[i]->mapping;
5c5c5e51 917
ce534fb0
MS
918 if (mapping) {
919 struct inode *inode = mapping->host;
920
921 /*
922 * Short read means EOF. If file size is larger, truncate it
923 */
134831e3
MS
924 if (!err && num_read < count)
925 fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
ce534fb0 926
451418fc 927 fuse_invalidate_atime(inode);
ce534fb0 928 }
c1aa96a5 929
134831e3
MS
930 for (i = 0; i < ap->num_pages; i++) {
931 struct page *page = ap->pages[i];
932
933 if (!err)
db50b96c 934 SetPageUptodate(page);
c1aa96a5
MS
935 else
936 SetPageError(page);
db50b96c 937 unlock_page(page);
09cbfeaf 938 put_page(page);
db50b96c 939 }
134831e3 940 if (ia->ff)
e26ee4ef 941 fuse_file_put(ia->ff, false);
134831e3
MS
942
943 fuse_io_free(ia);
c1aa96a5
MS
944}
945
134831e3 946static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
c1aa96a5 947{
2106cb18 948 struct fuse_file *ff = file->private_data;
fcee216b 949 struct fuse_mount *fm = ff->fm;
134831e3
MS
950 struct fuse_args_pages *ap = &ia->ap;
951 loff_t pos = page_offset(ap->pages[0]);
952 size_t count = ap->num_pages << PAGE_SHIFT;
7df1e988 953 ssize_t res;
134831e3
MS
954 int err;
955
956 ap->args.out_pages = true;
957 ap->args.page_zeroing = true;
958 ap->args.page_replace = true;
2f139829
MS
959
960 /* Don't overflow end offset */
961 if (pos + (count - 1) == LLONG_MAX) {
962 count--;
963 ap->descs[ap->num_pages - 1].length--;
964 }
965 WARN_ON((loff_t) (pos + count) < 0);
966
134831e3 967 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
fcee216b
MR
968 ia->read.attr_ver = fuse_get_attr_version(fm->fc);
969 if (fm->fc->async_read) {
134831e3
MS
970 ia->ff = fuse_file_get(ff);
971 ap->args.end = fuse_readpages_end;
fcee216b 972 err = fuse_simple_background(fm, &ap->args, GFP_KERNEL);
134831e3
MS
973 if (!err)
974 return;
9cd68455 975 } else {
fcee216b 976 res = fuse_simple_request(fm, &ap->args);
7df1e988 977 err = res < 0 ? res : 0;
9cd68455 978 }
fcee216b 979 fuse_readpages_end(fm, &ap->args, err);
db50b96c
MS
980}
981
76a0294e 982static void fuse_readahead(struct readahead_control *rac)
db50b96c 983{
76a0294e 984 struct inode *inode = rac->mapping->host;
db50b96c 985 struct fuse_conn *fc = get_fuse_conn(inode);
76a0294e 986 unsigned int i, max_pages, nr_pages = 0;
db50b96c 987
5d069dbe 988 if (fuse_is_bad(inode))
76a0294e 989 return;
248d86e8 990
76a0294e
MWO
991 max_pages = min_t(unsigned int, fc->max_pages,
992 fc->max_read / PAGE_SIZE);
db50b96c 993
76a0294e
MWO
994 for (;;) {
995 struct fuse_io_args *ia;
996 struct fuse_args_pages *ap;
997
670d21c6
N
998 if (fc->num_background >= fc->congestion_threshold &&
999 rac->ra->async_size >= readahead_count(rac))
1000 /*
1001 * Congested and only async pages left, so skip the
1002 * rest.
1003 */
1004 break;
1005
76a0294e
MWO
1006 nr_pages = readahead_count(rac) - nr_pages;
1007 if (nr_pages > max_pages)
1008 nr_pages = max_pages;
1009 if (nr_pages == 0)
1010 break;
1011 ia = fuse_io_alloc(NULL, nr_pages);
1012 if (!ia)
1013 return;
1014 ap = &ia->ap;
1015 nr_pages = __readahead_batch(rac, ap->pages, nr_pages);
1016 for (i = 0; i < nr_pages; i++) {
1017 fuse_wait_on_page_writeback(inode,
1018 readahead_index(rac) + i);
1019 ap->descs[i].length = PAGE_SIZE;
1020 }
1021 ap->num_pages = nr_pages;
1022 fuse_send_readpages(ia, rac->file);
d3406ffa 1023 }
db50b96c
MS
1024}
1025
55752a3a 1026static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
bcb4be80
MS
1027{
1028 struct inode *inode = iocb->ki_filp->f_mapping->host;
a8894274 1029 struct fuse_conn *fc = get_fuse_conn(inode);
bcb4be80 1030
a8894274
BF
1031 /*
1032 * In auto invalidate mode, always update attributes on read.
1033 * Otherwise, only update if we attempt to read past EOF (to ensure
1034 * i_size is up to date).
1035 */
1036 if (fc->auto_inval_data ||
37c20f16 1037 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
bcb4be80 1038 int err;
c6c745b8 1039 err = fuse_update_attributes(inode, iocb->ki_filp, STATX_SIZE);
bcb4be80
MS
1040 if (err)
1041 return err;
1042 }
1043
37c20f16 1044 return generic_file_read_iter(iocb, to);
bcb4be80
MS
1045}
1046
338f2e3f
MS
1047static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff,
1048 loff_t pos, size_t count)
1049{
1050 struct fuse_args *args = &ia->ap.args;
1051
1052 ia->write.in.fh = ff->fh;
1053 ia->write.in.offset = pos;
1054 ia->write.in.size = count;
1055 args->opcode = FUSE_WRITE;
1056 args->nodeid = ff->nodeid;
1057 args->in_numargs = 2;
fcee216b 1058 if (ff->fm->fc->minor < 9)
338f2e3f
MS
1059 args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
1060 else
1061 args->in_args[0].size = sizeof(ia->write.in);
1062 args->in_args[0].value = &ia->write.in;
1063 args->in_args[1].size = count;
1064 args->out_numargs = 1;
1065 args->out_args[0].size = sizeof(ia->write.out);
1066 args->out_args[0].value = &ia->write.out;
1067}
1068
1069static unsigned int fuse_write_flags(struct kiocb *iocb)
1070{
1071 unsigned int flags = iocb->ki_filp->f_flags;
1072
91b94c5d 1073 if (iocb_is_dsync(iocb))
338f2e3f
MS
1074 flags |= O_DSYNC;
1075 if (iocb->ki_flags & IOCB_SYNC)
1076 flags |= O_SYNC;
1077
1078 return flags;
1079}
1080
45ac96ed
MS
1081static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos,
1082 size_t count, fl_owner_t owner)
b25e82e5 1083{
45ac96ed 1084 struct kiocb *iocb = ia->io->iocb;
e1c0eecb 1085 struct file *file = iocb->ki_filp;
2106cb18 1086 struct fuse_file *ff = file->private_data;
fcee216b 1087 struct fuse_mount *fm = ff->fm;
45ac96ed
MS
1088 struct fuse_write_in *inarg = &ia->write.in;
1089 ssize_t err;
2d698b07 1090
45ac96ed 1091 fuse_write_args_fill(ia, ff, pos, count);
338f2e3f 1092 inarg->flags = fuse_write_flags(iocb);
f3332114 1093 if (owner != NULL) {
f3332114 1094 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
fcee216b 1095 inarg->lock_owner = fuse_lock_owner_id(fm->fc, owner);
f3332114 1096 }
36cf66ed 1097
45ac96ed 1098 if (ia->io->async)
fcee216b 1099 return fuse_async_req_send(fm, ia, count);
45ac96ed 1100
fcee216b 1101 err = fuse_simple_request(fm, &ia->ap.args);
45ac96ed
MS
1102 if (!err && ia->write.out.size > count)
1103 err = -EIO;
36cf66ed 1104
45ac96ed 1105 return err ?: ia->write.out.size;
b6aeaded
MS
1106}
1107
d347739a 1108bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written)
854512ec
MS
1109{
1110 struct fuse_conn *fc = get_fuse_conn(inode);
1111 struct fuse_inode *fi = get_fuse_inode(inode);
b0aa7606 1112 bool ret = false;
854512ec 1113
f15ecfef 1114 spin_lock(&fi->lock);
4510d86f 1115 fi->attr_version = atomic64_inc_return(&fc->attr_version);
d347739a 1116 if (written > 0 && pos > inode->i_size) {
854512ec 1117 i_size_write(inode, pos);
b0aa7606
MP
1118 ret = true;
1119 }
f15ecfef 1120 spin_unlock(&fi->lock);
b0aa7606 1121
d347739a
MS
1122 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
1123
b0aa7606 1124 return ret;
854512ec
MS
1125}
1126
338f2e3f
MS
1127static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
1128 struct kiocb *iocb, struct inode *inode,
1129 loff_t pos, size_t count)
ea9b9907 1130{
338f2e3f
MS
1131 struct fuse_args_pages *ap = &ia->ap;
1132 struct file *file = iocb->ki_filp;
1133 struct fuse_file *ff = file->private_data;
fcee216b 1134 struct fuse_mount *fm = ff->fm;
338f2e3f 1135 unsigned int offset, i;
4f06dd92 1136 bool short_write;
338f2e3f 1137 int err;
ea9b9907 1138
338f2e3f
MS
1139 for (i = 0; i < ap->num_pages; i++)
1140 fuse_wait_on_page_writeback(inode, ap->pages[i]->index);
ea9b9907 1141
338f2e3f
MS
1142 fuse_write_args_fill(ia, ff, pos, count);
1143 ia->write.in.flags = fuse_write_flags(iocb);
b8667395
VG
1144 if (fm->fc->handle_killpriv_v2 && !capable(CAP_FSETID))
1145 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
ea9b9907 1146
fcee216b 1147 err = fuse_simple_request(fm, &ap->args);
8aab336b
MS
1148 if (!err && ia->write.out.size > count)
1149 err = -EIO;
338f2e3f 1150
4f06dd92 1151 short_write = ia->write.out.size < count;
338f2e3f
MS
1152 offset = ap->descs[0].offset;
1153 count = ia->write.out.size;
1154 for (i = 0; i < ap->num_pages; i++) {
1155 struct page *page = ap->pages[i];
ea9b9907 1156
4f06dd92
VG
1157 if (err) {
1158 ClearPageUptodate(page);
1159 } else {
1160 if (count >= PAGE_SIZE - offset)
1161 count -= PAGE_SIZE - offset;
1162 else {
1163 if (short_write)
1164 ClearPageUptodate(page);
1165 count = 0;
1166 }
1167 offset = 0;
1168 }
1169 if (ia->write.page_locked && (i == ap->num_pages - 1))
1170 unlock_page(page);
09cbfeaf 1171 put_page(page);
ea9b9907
NP
1172 }
1173
338f2e3f 1174 return err;
ea9b9907
NP
1175}
1176
4f06dd92 1177static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
338f2e3f
MS
1178 struct address_space *mapping,
1179 struct iov_iter *ii, loff_t pos,
1180 unsigned int max_pages)
ea9b9907 1181{
4f06dd92 1182 struct fuse_args_pages *ap = &ia->ap;
ea9b9907 1183 struct fuse_conn *fc = get_fuse_conn(mapping->host);
09cbfeaf 1184 unsigned offset = pos & (PAGE_SIZE - 1);
ea9b9907
NP
1185 size_t count = 0;
1186 int err;
1187
338f2e3f
MS
1188 ap->args.in_pages = true;
1189 ap->descs[0].offset = offset;
ea9b9907
NP
1190
1191 do {
1192 size_t tmp;
1193 struct page *page;
09cbfeaf
KS
1194 pgoff_t index = pos >> PAGE_SHIFT;
1195 size_t bytes = min_t(size_t, PAGE_SIZE - offset,
ea9b9907
NP
1196 iov_iter_count(ii));
1197
1198 bytes = min_t(size_t, bytes, fc->max_write - count);
1199
1200 again:
1201 err = -EFAULT;
a6294593 1202 if (fault_in_iov_iter_readable(ii, bytes))
ea9b9907
NP
1203 break;
1204
1205 err = -ENOMEM;
b7446e7c 1206 page = grab_cache_page_write_begin(mapping, index);
ea9b9907
NP
1207 if (!page)
1208 break;
1209
931e80e4 1210 if (mapping_writably_mapped(mapping))
1211 flush_dcache_page(page);
1212
f0b65f39 1213 tmp = copy_page_from_iter_atomic(page, offset, bytes, ii);
ea9b9907
NP
1214 flush_dcache_page(page);
1215
1216 if (!tmp) {
1217 unlock_page(page);
09cbfeaf 1218 put_page(page);
ea9b9907
NP
1219 goto again;
1220 }
1221
1222 err = 0;
338f2e3f
MS
1223 ap->pages[ap->num_pages] = page;
1224 ap->descs[ap->num_pages].length = tmp;
1225 ap->num_pages++;
ea9b9907 1226
ea9b9907
NP
1227 count += tmp;
1228 pos += tmp;
1229 offset += tmp;
09cbfeaf 1230 if (offset == PAGE_SIZE)
ea9b9907
NP
1231 offset = 0;
1232
4f06dd92
VG
1233 /* If we copied full page, mark it uptodate */
1234 if (tmp == PAGE_SIZE)
1235 SetPageUptodate(page);
1236
1237 if (PageUptodate(page)) {
1238 unlock_page(page);
1239 } else {
1240 ia->write.page_locked = true;
1241 break;
1242 }
78bb6cb9
MS
1243 if (!fc->big_writes)
1244 break;
ea9b9907 1245 } while (iov_iter_count(ii) && count < fc->max_write &&
338f2e3f 1246 ap->num_pages < max_pages && offset == 0);
ea9b9907
NP
1247
1248 return count > 0 ? count : err;
1249}
1250
5da784cc
CS
1251static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
1252 unsigned int max_pages)
d07f09f5 1253{
5da784cc 1254 return min_t(unsigned int,
09cbfeaf
KS
1255 ((pos + len - 1) >> PAGE_SHIFT) -
1256 (pos >> PAGE_SHIFT) + 1,
5da784cc 1257 max_pages);
d07f09f5
MP
1258}
1259
596df33d 1260static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii)
ea9b9907 1261{
596df33d 1262 struct address_space *mapping = iocb->ki_filp->f_mapping;
ea9b9907
NP
1263 struct inode *inode = mapping->host;
1264 struct fuse_conn *fc = get_fuse_conn(inode);
06a7c3c2 1265 struct fuse_inode *fi = get_fuse_inode(inode);
596df33d 1266 loff_t pos = iocb->ki_pos;
ea9b9907
NP
1267 int err = 0;
1268 ssize_t res = 0;
1269
06a7c3c2
MP
1270 if (inode->i_size < pos + iov_iter_count(ii))
1271 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1272
ea9b9907 1273 do {
ea9b9907 1274 ssize_t count;
338f2e3f
MS
1275 struct fuse_io_args ia = {};
1276 struct fuse_args_pages *ap = &ia.ap;
5da784cc
CS
1277 unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
1278 fc->max_pages);
ea9b9907 1279
338f2e3f
MS
1280 ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs);
1281 if (!ap->pages) {
1282 err = -ENOMEM;
ea9b9907
NP
1283 break;
1284 }
1285
4f06dd92 1286 count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
ea9b9907
NP
1287 if (count <= 0) {
1288 err = count;
1289 } else {
338f2e3f
MS
1290 err = fuse_send_write_pages(&ia, iocb, inode,
1291 pos, count);
ea9b9907 1292 if (!err) {
338f2e3f
MS
1293 size_t num_written = ia.write.out.size;
1294
ea9b9907
NP
1295 res += num_written;
1296 pos += num_written;
1297
1298 /* break out of the loop on short write */
1299 if (num_written != count)
1300 err = -EIO;
1301 }
1302 }
338f2e3f 1303 kfree(ap->pages);
ea9b9907
NP
1304 } while (!err && iov_iter_count(ii));
1305
d347739a 1306 fuse_write_update_attr(inode, pos, res);
06a7c3c2 1307 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
ea9b9907 1308
70e986c3
CH
1309 if (!res)
1310 return err;
1311 iocb->ki_pos += res;
1312 return res;
ea9b9907
NP
1313}
1314
699cf824
BS
1315static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter)
1316{
1317 struct inode *inode = file_inode(iocb->ki_filp);
1318
1319 return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode);
1320}
1321
1322/*
1323 * @return true if an exclusive lock for direct IO writes is needed
1324 */
1325static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from)
1326{
1327 struct file *file = iocb->ki_filp;
1328 struct fuse_file *ff = file->private_data;
1329 struct inode *inode = file_inode(iocb->ki_filp);
1330
1331 /* Server side has to advise that it supports parallel dio writes. */
1332 if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES))
1333 return true;
1334
1335 /*
1336 * Append will need to know the eventual EOF - always needs an
1337 * exclusive lock.
1338 */
1339 if (iocb->ki_flags & IOCB_APPEND)
1340 return true;
1341
1342 /*
1343 * Combination of page access and direct-io is difficult, shared locks
1344 * actually introduce a conflict.
1345 */
1346 if (get_fuse_conn(inode)->direct_io_allow_mmap)
1347 return true;
1348
1349 /* Parallel dio beyond EOF is not supported, at least for now. */
1350 if (fuse_io_past_eof(iocb, from))
1351 return true;
1352
1353 return false;
1354}
1355
9bbb6717
BS
1356static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
1357 bool *exclusive)
1358{
1359 struct inode *inode = file_inode(iocb->ki_filp);
1360
1361 *exclusive = fuse_dio_wr_exclusive_lock(iocb, from);
1362 if (*exclusive) {
1363 inode_lock(inode);
1364 } else {
1365 inode_lock_shared(inode);
1366 /*
1367 * Previous check was without inode lock and might have raced,
1368 * check again.
1369 */
1370 if (fuse_io_past_eof(iocb, from)) {
1371 inode_unlock_shared(inode);
1372 inode_lock(inode);
1373 *exclusive = true;
1374 }
1375 }
1376}
1377
1378static void fuse_dio_unlock(struct inode *inode, bool exclusive)
1379{
1380 if (exclusive) {
1381 inode_unlock(inode);
1382 } else {
1383 inode_unlock_shared(inode);
1384 }
1385}
1386
55752a3a 1387static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
ea9b9907
NP
1388{
1389 struct file *file = iocb->ki_filp;
1390 struct address_space *mapping = file->f_mapping;
ea9b9907
NP
1391 ssize_t written = 0;
1392 struct inode *inode = mapping->host;
1393 ssize_t err;
8981bdfd 1394 struct fuse_conn *fc = get_fuse_conn(inode);
ea9b9907 1395
8981bdfd 1396 if (fc->writeback_cache) {
4d99ff8f 1397 /* Update size (EOF optimization) and mode (SUID clearing) */
c6c745b8
MS
1398 err = fuse_update_attributes(mapping->host, file,
1399 STATX_SIZE | STATX_MODE);
4d99ff8f
PE
1400 if (err)
1401 return err;
1402
8981bdfd 1403 if (fc->handle_killpriv_v2 &&
9452e93e
CB
1404 setattr_should_drop_suidgid(&nop_mnt_idmap,
1405 file_inode(file))) {
8981bdfd
VG
1406 goto writethrough;
1407 }
1408
84c3d55c 1409 return generic_file_write_iter(iocb, from);
4d99ff8f
PE
1410 }
1411
8981bdfd 1412writethrough:
5955102c 1413 inode_lock(inode);
ea9b9907 1414
3309dd04
AV
1415 err = generic_write_checks(iocb, from);
1416 if (err <= 0)
ea9b9907
NP
1417 goto out;
1418
5fa8e0a1 1419 err = file_remove_privs(file);
ea9b9907
NP
1420 if (err)
1421 goto out;
1422
c3b2da31
JB
1423 err = file_update_time(file);
1424 if (err)
1425 goto out;
ea9b9907 1426
2ba48ce5 1427 if (iocb->ki_flags & IOCB_DIRECT) {
1af5bb49 1428 written = generic_file_direct_write(iocb, from);
84c3d55c 1429 if (written < 0 || !iov_iter_count(from))
4273b793 1430 goto out;
64d1b4dd
CH
1431 written = direct_write_fallback(iocb, from, written,
1432 fuse_perform_write(iocb, from));
4273b793 1433 } else {
596df33d 1434 written = fuse_perform_write(iocb, from);
4273b793 1435 }
ea9b9907 1436out:
5955102c 1437 inode_unlock(inode);
e1c0eecb
MS
1438 if (written > 0)
1439 written = generic_write_sync(iocb, written);
ea9b9907
NP
1440
1441 return written ? written : err;
1442}
1443
7c190c8b
MP
1444static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
1445{
de4f5fed 1446 return (unsigned long)iter_iov(ii)->iov_base + ii->iov_offset;
7c190c8b
MP
1447}
1448
1449static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
1450 size_t max_size)
1451{
1452 return min(iov_iter_single_seg_count(ii), max_size);
1453}
1454
45ac96ed
MS
1455static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
1456 size_t *nbytesp, int write,
1457 unsigned int max_pages)
413ef8cb 1458{
7c190c8b 1459 size_t nbytes = 0; /* # bytes already packed in req */
742f9927 1460 ssize_t ret = 0;
b98d023a 1461
f4975c67 1462 /* Special case for kernel I/O: can copy directly into the buffer */
00e23707 1463 if (iov_iter_is_kvec(ii)) {
7c190c8b
MP
1464 unsigned long user_addr = fuse_get_user_addr(ii);
1465 size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
1466
f4975c67 1467 if (write)
45ac96ed 1468 ap->args.in_args[1].value = (void *) user_addr;
f4975c67 1469 else
45ac96ed 1470 ap->args.out_args[0].value = (void *) user_addr;
f4975c67 1471
b98d023a
MP
1472 iov_iter_advance(ii, frag_size);
1473 *nbytesp = frag_size;
f4975c67
MS
1474 return 0;
1475 }
413ef8cb 1476
45ac96ed 1477 while (nbytes < *nbytesp && ap->num_pages < max_pages) {
7c190c8b 1478 unsigned npages;
f67da30c 1479 size_t start;
1ef255e2 1480 ret = iov_iter_get_pages2(ii, &ap->pages[ap->num_pages],
2c80929c 1481 *nbytesp - nbytes,
45ac96ed 1482 max_pages - ap->num_pages,
c7f3888a 1483 &start);
7c190c8b 1484 if (ret < 0)
742f9927 1485 break;
7c190c8b 1486
c9c37e2e 1487 nbytes += ret;
7c190c8b 1488
c9c37e2e 1489 ret += start;
6c88632b 1490 npages = DIV_ROUND_UP(ret, PAGE_SIZE);
7c190c8b 1491
45ac96ed
MS
1492 ap->descs[ap->num_pages].offset = start;
1493 fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
7c190c8b 1494
45ac96ed
MS
1495 ap->num_pages += npages;
1496 ap->descs[ap->num_pages - 1].length -=
c9c37e2e 1497 (PAGE_SIZE - ret) & (PAGE_SIZE - 1);
7c190c8b 1498 }
f4975c67 1499
0c4bcfde 1500 ap->args.user_pages = true;
f4975c67 1501 if (write)
cabdb4fa 1502 ap->args.in_pages = true;
f4975c67 1503 else
cabdb4fa 1504 ap->args.out_pages = true;
f4975c67 1505
7c190c8b 1506 *nbytesp = nbytes;
f4975c67 1507
2c932d4c 1508 return ret < 0 ? ret : 0;
413ef8cb
MS
1509}
1510
d22a943f
AV
1511ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1512 loff_t *ppos, int flags)
413ef8cb 1513{
ea8cd333
PE
1514 int write = flags & FUSE_DIO_WRITE;
1515 int cuse = flags & FUSE_DIO_CUSE;
e1c0eecb 1516 struct file *file = io->iocb->ki_filp;
80e4f252
HX
1517 struct address_space *mapping = file->f_mapping;
1518 struct inode *inode = mapping->host;
2106cb18 1519 struct fuse_file *ff = file->private_data;
fcee216b 1520 struct fuse_conn *fc = ff->fm->fc;
413ef8cb
MS
1521 size_t nmax = write ? fc->max_write : fc->max_read;
1522 loff_t pos = *ppos;
d22a943f 1523 size_t count = iov_iter_count(iter);
09cbfeaf
KS
1524 pgoff_t idx_from = pos >> PAGE_SHIFT;
1525 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
413ef8cb 1526 ssize_t res = 0;
742f9927 1527 int err = 0;
45ac96ed
MS
1528 struct fuse_io_args *ia;
1529 unsigned int max_pages;
80e4f252 1530 bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO;
248d86e8 1531
45ac96ed
MS
1532 max_pages = iov_iter_npages(iter, fc->max_pages);
1533 ia = fuse_io_alloc(io, max_pages);
1534 if (!ia)
1535 return -ENOMEM;
413ef8cb 1536
c55e0a55 1537 if (fopen_direct_io && fc->direct_io_allow_mmap) {
b5a2a3a0
HX
1538 res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
1539 if (res) {
1540 fuse_io_free(ia);
1541 return res;
1542 }
1543 }
ea8cd333
PE
1544 if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
1545 if (!write)
5955102c 1546 inode_lock(inode);
ea8cd333
PE
1547 fuse_sync_writes(inode);
1548 if (!write)
5955102c 1549 inode_unlock(inode);
ea8cd333
PE
1550 }
1551
80e4f252
HX
1552 if (fopen_direct_io && write) {
1553 res = invalidate_inode_pages2_range(mapping, idx_from, idx_to);
1554 if (res) {
1555 fuse_io_free(ia);
1556 return res;
1557 }
1558 }
1559
fcb14cb1 1560 io->should_dirty = !write && user_backed_iter(iter);
413ef8cb 1561 while (count) {
45ac96ed 1562 ssize_t nres;
2106cb18 1563 fl_owner_t owner = current->files;
f4975c67 1564 size_t nbytes = min(count, nmax);
45ac96ed
MS
1565
1566 err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
1567 max_pages);
742f9927 1568 if (err && !nbytes)
413ef8cb 1569 break;
f4975c67 1570
4a2abf99 1571 if (write) {
45ac96ed 1572 if (!capable(CAP_FSETID))
10c52c84 1573 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
4a2abf99 1574
45ac96ed 1575 nres = fuse_send_write(ia, pos, nbytes, owner);
4a2abf99 1576 } else {
45ac96ed 1577 nres = fuse_send_read(ia, pos, nbytes, owner);
4a2abf99 1578 }
2106cb18 1579
45ac96ed
MS
1580 if (!io->async || nres < 0) {
1581 fuse_release_user_pages(&ia->ap, io->should_dirty);
1582 fuse_io_free(ia);
1583 }
1584 ia = NULL;
1585 if (nres < 0) {
f658adee 1586 iov_iter_revert(iter, nbytes);
45ac96ed 1587 err = nres;
413ef8cb
MS
1588 break;
1589 }
45ac96ed
MS
1590 WARN_ON(nres > nbytes);
1591
413ef8cb
MS
1592 count -= nres;
1593 res += nres;
1594 pos += nres;
f658adee
MS
1595 if (nres != nbytes) {
1596 iov_iter_revert(iter, nbytes - nres);
413ef8cb 1597 break;
f658adee 1598 }
56cf34ff 1599 if (count) {
45ac96ed
MS
1600 max_pages = iov_iter_npages(iter, fc->max_pages);
1601 ia = fuse_io_alloc(io, max_pages);
1602 if (!ia)
56cf34ff
MS
1603 break;
1604 }
413ef8cb 1605 }
45ac96ed
MS
1606 if (ia)
1607 fuse_io_free(ia);
d09cb9d7 1608 if (res > 0)
413ef8cb 1609 *ppos = pos;
413ef8cb 1610
742f9927 1611 return res > 0 ? res : err;
413ef8cb 1612}
08cbf542 1613EXPORT_SYMBOL_GPL(fuse_direct_io);
413ef8cb 1614
36cf66ed 1615static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
d22a943f
AV
1616 struct iov_iter *iter,
1617 loff_t *ppos)
413ef8cb 1618{
d09cb9d7 1619 ssize_t res;
e1c0eecb 1620 struct inode *inode = file_inode(io->iocb->ki_filp);
d09cb9d7 1621
d22a943f 1622 res = fuse_direct_io(io, iter, ppos, 0);
d09cb9d7 1623
9a2eb24d 1624 fuse_invalidate_atime(inode);
d09cb9d7
MS
1625
1626 return res;
413ef8cb
MS
1627}
1628
23c94e1c
MR
1629static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
1630
15316263 1631static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
b98d023a 1632{
23c94e1c
MR
1633 ssize_t res;
1634
1635 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
23c94e1c
MR
1636 res = fuse_direct_IO(iocb, to);
1637 } else {
1638 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1639
1640 res = __fuse_direct_read(&io, to, &iocb->ki_pos);
1641 }
1642
1643 return res;
b98d023a
MP
1644}
1645
15316263 1646static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
4273b793 1647{
e1c0eecb
MS
1648 struct inode *inode = file_inode(iocb->ki_filp);
1649 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
15316263 1650 ssize_t res;
9bbb6717 1651 bool exclusive;
4273b793 1652
9bbb6717 1653 fuse_dio_lock(iocb, from, &exclusive);
3309dd04 1654 res = generic_write_checks(iocb, from);
23c94e1c
MR
1655 if (res > 0) {
1656 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
1657 res = fuse_direct_IO(iocb, from);
1658 } else {
1659 res = fuse_direct_io(&io, from, &iocb->ki_pos,
1660 FUSE_DIO_WRITE);
d347739a 1661 fuse_write_update_attr(inode, iocb->ki_pos, res);
23c94e1c
MR
1662 }
1663 }
9bbb6717 1664 fuse_dio_unlock(inode, exclusive);
4273b793
AA
1665
1666 return res;
1667}
1668
55752a3a
MS
1669static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1670{
2f7b6f5b
MS
1671 struct file *file = iocb->ki_filp;
1672 struct fuse_file *ff = file->private_data;
c2d0ad00 1673 struct inode *inode = file_inode(file);
2f7b6f5b 1674
5d069dbe 1675 if (fuse_is_bad(inode))
2f7b6f5b 1676 return -EIO;
55752a3a 1677
c2d0ad00
VG
1678 if (FUSE_IS_DAX(inode))
1679 return fuse_dax_read_iter(iocb, to);
1680
55752a3a
MS
1681 if (!(ff->open_flags & FOPEN_DIRECT_IO))
1682 return fuse_cache_read_iter(iocb, to);
1683 else
1684 return fuse_direct_read_iter(iocb, to);
1685}
1686
1687static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1688{
2f7b6f5b
MS
1689 struct file *file = iocb->ki_filp;
1690 struct fuse_file *ff = file->private_data;
c2d0ad00 1691 struct inode *inode = file_inode(file);
2f7b6f5b 1692
5d069dbe 1693 if (fuse_is_bad(inode))
2f7b6f5b 1694 return -EIO;
55752a3a 1695
c2d0ad00
VG
1696 if (FUSE_IS_DAX(inode))
1697 return fuse_dax_write_iter(iocb, from);
1698
55752a3a
MS
1699 if (!(ff->open_flags & FOPEN_DIRECT_IO))
1700 return fuse_cache_write_iter(iocb, from);
1701 else
1702 return fuse_direct_write_iter(iocb, from);
1703}
1704
33826ebb 1705static void fuse_writepage_free(struct fuse_writepage_args *wpa)
b6aeaded 1706{
33826ebb 1707 struct fuse_args_pages *ap = &wpa->ia.ap;
385b1268
PE
1708 int i;
1709
660585b5
MS
1710 if (wpa->bucket)
1711 fuse_sync_bucket_dec(wpa->bucket);
1712
33826ebb
MS
1713 for (i = 0; i < ap->num_pages; i++)
1714 __free_page(ap->pages[i]);
1715
1716 if (wpa->ia.ff)
e26ee4ef 1717 fuse_file_put(wpa->ia.ff, false);
8b284dc4 1718
33826ebb
MS
1719 kfree(ap->pages);
1720 kfree(wpa);
3be5a52b
MS
1721}
1722
fcee216b 1723static void fuse_writepage_finish(struct fuse_mount *fm,
33826ebb 1724 struct fuse_writepage_args *wpa)
3be5a52b 1725{
33826ebb
MS
1726 struct fuse_args_pages *ap = &wpa->ia.ap;
1727 struct inode *inode = wpa->inode;
3be5a52b 1728 struct fuse_inode *fi = get_fuse_inode(inode);
de1414a6 1729 struct backing_dev_info *bdi = inode_to_bdi(inode);
385b1268 1730 int i;
3be5a52b 1731
33826ebb 1732 for (i = 0; i < ap->num_pages; i++) {
93f78d88 1733 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
33826ebb 1734 dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
93f78d88 1735 wb_writeout_inc(&bdi->wb);
385b1268 1736 }
3be5a52b
MS
1737 wake_up(&fi->page_waitq);
1738}
1739
f15ecfef 1740/* Called under fi->lock, may release and reacquire it */
fcee216b 1741static void fuse_send_writepage(struct fuse_mount *fm,
33826ebb 1742 struct fuse_writepage_args *wpa, loff_t size)
f15ecfef
KT
1743__releases(fi->lock)
1744__acquires(fi->lock)
3be5a52b 1745{
33826ebb
MS
1746 struct fuse_writepage_args *aux, *next;
1747 struct fuse_inode *fi = get_fuse_inode(wpa->inode);
1748 struct fuse_write_in *inarg = &wpa->ia.write.in;
1749 struct fuse_args *args = &wpa->ia.ap.args;
1750 __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE;
1751 int err;
3be5a52b 1752
33826ebb 1753 fi->writectr++;
385b1268
PE
1754 if (inarg->offset + data_size <= size) {
1755 inarg->size = data_size;
3be5a52b 1756 } else if (inarg->offset < size) {
385b1268 1757 inarg->size = size - inarg->offset;
3be5a52b
MS
1758 } else {
1759 /* Got truncated off completely */
1760 goto out_free;
b6aeaded 1761 }
3be5a52b 1762
33826ebb
MS
1763 args->in_args[1].size = inarg->size;
1764 args->force = true;
1765 args->nocreds = true;
1766
fcee216b 1767 err = fuse_simple_background(fm, args, GFP_ATOMIC);
33826ebb
MS
1768 if (err == -ENOMEM) {
1769 spin_unlock(&fi->lock);
fcee216b 1770 err = fuse_simple_background(fm, args, GFP_NOFS | __GFP_NOFAIL);
33826ebb
MS
1771 spin_lock(&fi->lock);
1772 }
1773
f15ecfef 1774 /* Fails on broken connection only */
33826ebb 1775 if (unlikely(err))
f15ecfef
KT
1776 goto out_free;
1777
3be5a52b
MS
1778 return;
1779
1780 out_free:
33826ebb 1781 fi->writectr--;
69a6487a 1782 rb_erase(&wpa->writepages_entry, &fi->writepages);
fcee216b 1783 fuse_writepage_finish(fm, wpa);
f15ecfef 1784 spin_unlock(&fi->lock);
e2653bd5
MS
1785
1786 /* After fuse_writepage_finish() aux request list is private */
33826ebb
MS
1787 for (aux = wpa->next; aux; aux = next) {
1788 next = aux->next;
1789 aux->next = NULL;
1790 fuse_writepage_free(aux);
e2653bd5
MS
1791 }
1792
33826ebb 1793 fuse_writepage_free(wpa);
f15ecfef 1794 spin_lock(&fi->lock);
b6aeaded
MS
1795}
1796
3be5a52b
MS
1797/*
1798 * If fi->writectr is positive (no truncate or fsync going on) send
1799 * all queued writepage requests.
1800 *
f15ecfef 1801 * Called with fi->lock
3be5a52b
MS
1802 */
1803void fuse_flush_writepages(struct inode *inode)
f15ecfef
KT
1804__releases(fi->lock)
1805__acquires(fi->lock)
b6aeaded 1806{
fcee216b 1807 struct fuse_mount *fm = get_fuse_mount(inode);
3be5a52b 1808 struct fuse_inode *fi = get_fuse_inode(inode);
9de5be06 1809 loff_t crop = i_size_read(inode);
33826ebb 1810 struct fuse_writepage_args *wpa;
3be5a52b
MS
1811
1812 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
33826ebb
MS
1813 wpa = list_entry(fi->queued_writes.next,
1814 struct fuse_writepage_args, queue_entry);
1815 list_del_init(&wpa->queue_entry);
fcee216b 1816 fuse_send_writepage(fm, wpa, crop);
3be5a52b
MS
1817 }
1818}
1819
c146024e
MS
1820static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root,
1821 struct fuse_writepage_args *wpa)
6b2fb799
MP
1822{
1823 pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT;
1824 pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1;
1825 struct rb_node **p = &root->rb_node;
1826 struct rb_node *parent = NULL;
1827
1828 WARN_ON(!wpa->ia.ap.num_pages);
1829 while (*p) {
1830 struct fuse_writepage_args *curr;
1831 pgoff_t curr_index;
1832
1833 parent = *p;
1834 curr = rb_entry(parent, struct fuse_writepage_args,
1835 writepages_entry);
1836 WARN_ON(curr->inode != wpa->inode);
1837 curr_index = curr->ia.write.in.offset >> PAGE_SHIFT;
1838
1839 if (idx_from >= curr_index + curr->ia.ap.num_pages)
1840 p = &(*p)->rb_right;
1841 else if (idx_to < curr_index)
1842 p = &(*p)->rb_left;
1843 else
c146024e 1844 return curr;
6b2fb799
MP
1845 }
1846
1847 rb_link_node(&wpa->writepages_entry, parent, p);
1848 rb_insert_color(&wpa->writepages_entry, root);
c146024e
MS
1849 return NULL;
1850}
1851
1852static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
1853{
1854 WARN_ON(fuse_insert_writeback(root, wpa));
6b2fb799
MP
1855}
1856
fcee216b 1857static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
33826ebb 1858 int error)
3be5a52b 1859{
33826ebb
MS
1860 struct fuse_writepage_args *wpa =
1861 container_of(args, typeof(*wpa), ia.ap.args);
1862 struct inode *inode = wpa->inode;
3be5a52b 1863 struct fuse_inode *fi = get_fuse_inode(inode);
3466958b 1864 struct fuse_conn *fc = get_fuse_conn(inode);
3be5a52b 1865
33826ebb 1866 mapping_set_error(inode->i_mapping, error);
3466958b
VG
1867 /*
1868 * A writeback finished and this might have updated mtime/ctime on
1869 * server making local mtime/ctime stale. Hence invalidate attrs.
1870 * Do this only if writeback_cache is not enabled. If writeback_cache
1871 * is enabled, we trust local ctime/mtime.
1872 */
1873 if (!fc->writeback_cache)
fa5eee57 1874 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY);
f15ecfef 1875 spin_lock(&fi->lock);
69a6487a 1876 rb_erase(&wpa->writepages_entry, &fi->writepages);
33826ebb 1877 while (wpa->next) {
fcee216b 1878 struct fuse_mount *fm = get_fuse_mount(inode);
33826ebb
MS
1879 struct fuse_write_in *inarg = &wpa->ia.write.in;
1880 struct fuse_writepage_args *next = wpa->next;
1881
1882 wpa->next = next->next;
1883 next->next = NULL;
1884 next->ia.ff = fuse_file_get(wpa->ia.ff);
6b2fb799 1885 tree_insert(&fi->writepages, next);
6eaf4782
MP
1886
1887 /*
1888 * Skip fuse_flush_writepages() to make it easy to crop requests
1889 * based on primary request size.
1890 *
1891 * 1st case (trivial): there are no concurrent activities using
1892 * fuse_set/release_nowrite. Then we're on safe side because
1893 * fuse_flush_writepages() would call fuse_send_writepage()
1894 * anyway.
1895 *
1896 * 2nd case: someone called fuse_set_nowrite and it is waiting
1897 * now for completion of all in-flight requests. This happens
1898 * rarely and no more than once per page, so this should be
1899 * okay.
1900 *
1901 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
1902 * of fuse_set_nowrite..fuse_release_nowrite section. The fact
1903 * that fuse_set_nowrite returned implies that all in-flight
1904 * requests were completed along with all of their secondary
1905 * requests. Further primary requests are blocked by negative
1906 * writectr. Hence there cannot be any in-flight requests and
1907 * no invocations of fuse_writepage_end() while we're in
1908 * fuse_set_nowrite..fuse_release_nowrite section.
1909 */
fcee216b 1910 fuse_send_writepage(fm, next, inarg->offset + inarg->size);
8b284dc4 1911 }
3be5a52b 1912 fi->writectr--;
fcee216b 1913 fuse_writepage_finish(fm, wpa);
f15ecfef 1914 spin_unlock(&fi->lock);
33826ebb 1915 fuse_writepage_free(wpa);
3be5a52b
MS
1916}
1917
a9667ac8 1918static struct fuse_file *__fuse_write_file_get(struct fuse_inode *fi)
adcadfa8 1919{
84840efc 1920 struct fuse_file *ff;
adcadfa8 1921
f15ecfef 1922 spin_lock(&fi->lock);
84840efc
MS
1923 ff = list_first_entry_or_null(&fi->write_files, struct fuse_file,
1924 write_entry);
1925 if (ff)
72523425 1926 fuse_file_get(ff);
f15ecfef 1927 spin_unlock(&fi->lock);
adcadfa8
PE
1928
1929 return ff;
1930}
1931
a9667ac8 1932static struct fuse_file *fuse_write_file_get(struct fuse_inode *fi)
1e18bda8 1933{
a9667ac8 1934 struct fuse_file *ff = __fuse_write_file_get(fi);
1e18bda8
MS
1935 WARN_ON(!ff);
1936 return ff;
1937}
1938
1939int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
1940{
1e18bda8
MS
1941 struct fuse_inode *fi = get_fuse_inode(inode);
1942 struct fuse_file *ff;
1943 int err;
1944
5c791fe1
MS
1945 /*
1946 * Inode is always written before the last reference is dropped and
1947 * hence this should not be reached from reclaim.
1948 *
1949 * Writing back the inode from reclaim can deadlock if the request
1950 * processing itself needs an allocation. Allocations triggering
1951 * reclaim while serving a request can't be prevented, because it can
1952 * involve any number of unrelated userspace processes.
1953 */
1954 WARN_ON(wbc->for_reclaim);
1955
a9667ac8 1956 ff = __fuse_write_file_get(fi);
ab9e13f7 1957 err = fuse_flush_times(inode, ff);
1e18bda8 1958 if (ff)
e26ee4ef 1959 fuse_file_put(ff, false);
1e18bda8
MS
1960
1961 return err;
1962}
1963
33826ebb
MS
1964static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
1965{
1966 struct fuse_writepage_args *wpa;
1967 struct fuse_args_pages *ap;
1968
1969 wpa = kzalloc(sizeof(*wpa), GFP_NOFS);
1970 if (wpa) {
1971 ap = &wpa->ia.ap;
1972 ap->num_pages = 0;
1973 ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs);
1974 if (!ap->pages) {
1975 kfree(wpa);
1976 wpa = NULL;
1977 }
1978 }
1979 return wpa;
1980
1981}
1982
660585b5
MS
1983static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
1984 struct fuse_writepage_args *wpa)
1985{
1986 if (!fc->sync_fs)
1987 return;
1988
1989 rcu_read_lock();
1990 /* Prevent resurrection of dead bucket in unlikely race with syncfs */
1991 do {
1992 wpa->bucket = rcu_dereference(fc->curr_bucket);
1993 } while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count)));
1994 rcu_read_unlock();
1995}
1996
3be5a52b
MS
1997static int fuse_writepage_locked(struct page *page)
1998{
1999 struct address_space *mapping = page->mapping;
2000 struct inode *inode = mapping->host;
2001 struct fuse_conn *fc = get_fuse_conn(inode);
2002 struct fuse_inode *fi = get_fuse_inode(inode);
33826ebb
MS
2003 struct fuse_writepage_args *wpa;
2004 struct fuse_args_pages *ap;
3be5a52b 2005 struct page *tmp_page;
72523425 2006 int error = -ENOMEM;
3be5a52b
MS
2007
2008 set_page_writeback(page);
2009
33826ebb
MS
2010 wpa = fuse_writepage_args_alloc();
2011 if (!wpa)
3be5a52b 2012 goto err;
33826ebb 2013 ap = &wpa->ia.ap;
3be5a52b
MS
2014
2015 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2016 if (!tmp_page)
2017 goto err_free;
2018
72523425 2019 error = -EIO;
a9667ac8 2020 wpa->ia.ff = fuse_write_file_get(fi);
33826ebb 2021 if (!wpa->ia.ff)
27f1b363 2022 goto err_nofile;
72523425 2023
660585b5 2024 fuse_writepage_add_to_bucket(fc, wpa);
33826ebb 2025 fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0);
3be5a52b
MS
2026
2027 copy_highpage(tmp_page, page);
33826ebb
MS
2028 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2029 wpa->next = NULL;
2030 ap->args.in_pages = true;
2031 ap->num_pages = 1;
2032 ap->pages[0] = tmp_page;
2033 ap->descs[0].offset = 0;
2034 ap->descs[0].length = PAGE_SIZE;
2035 ap->args.end = fuse_writepage_end;
2036 wpa->inode = inode;
3be5a52b 2037
93f78d88 2038 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
11fb9989 2039 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
3be5a52b 2040
f15ecfef 2041 spin_lock(&fi->lock);
6b2fb799 2042 tree_insert(&fi->writepages, wpa);
33826ebb 2043 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
3be5a52b 2044 fuse_flush_writepages(inode);
f15ecfef 2045 spin_unlock(&fi->lock);
3be5a52b 2046
4a4ac4eb
MP
2047 end_page_writeback(page);
2048
3be5a52b
MS
2049 return 0;
2050
27f1b363
MP
2051err_nofile:
2052 __free_page(tmp_page);
3be5a52b 2053err_free:
33826ebb 2054 kfree(wpa);
3be5a52b 2055err:
9183976e 2056 mapping_set_error(page->mapping, error);
3be5a52b 2057 end_page_writeback(page);
72523425 2058 return error;
3be5a52b
MS
2059}
2060
2061static int fuse_writepage(struct page *page, struct writeback_control *wbc)
2062{
670d21c6 2063 struct fuse_conn *fc = get_fuse_conn(page->mapping->host);
3be5a52b
MS
2064 int err;
2065
ff17be08
MS
2066 if (fuse_page_is_writeback(page->mapping->host, page->index)) {
2067 /*
2068 * ->writepages() should be called for sync() and friends. We
2069 * should only get here on direct reclaim and then we are
2070 * allowed to skip a page which is already in flight
2071 */
2072 WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
2073
2074 redirty_page_for_writepage(wbc, page);
d5880c7a 2075 unlock_page(page);
ff17be08
MS
2076 return 0;
2077 }
2078
670d21c6
N
2079 if (wbc->sync_mode == WB_SYNC_NONE &&
2080 fc->num_background >= fc->congestion_threshold)
2081 return AOP_WRITEPAGE_ACTIVATE;
2082
3be5a52b
MS
2083 err = fuse_writepage_locked(page);
2084 unlock_page(page);
2085
2086 return err;
2087}
2088
26d614df 2089struct fuse_fill_wb_data {
33826ebb 2090 struct fuse_writepage_args *wpa;
26d614df
PE
2091 struct fuse_file *ff;
2092 struct inode *inode;
2d033eaa 2093 struct page **orig_pages;
33826ebb 2094 unsigned int max_pages;
26d614df
PE
2095};
2096
33826ebb
MS
2097static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
2098{
2099 struct fuse_args_pages *ap = &data->wpa->ia.ap;
2100 struct fuse_conn *fc = get_fuse_conn(data->inode);
2101 struct page **pages;
2102 struct fuse_page_desc *descs;
2103 unsigned int npages = min_t(unsigned int,
2104 max_t(unsigned int, data->max_pages * 2,
2105 FUSE_DEFAULT_MAX_PAGES_PER_REQ),
2106 fc->max_pages);
2107 WARN_ON(npages <= data->max_pages);
2108
2109 pages = fuse_pages_alloc(npages, GFP_NOFS, &descs);
2110 if (!pages)
2111 return false;
2112
2113 memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages);
2114 memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages);
2115 kfree(ap->pages);
2116 ap->pages = pages;
2117 ap->descs = descs;
2118 data->max_pages = npages;
2119
2120 return true;
2121}
2122
26d614df
PE
2123static void fuse_writepages_send(struct fuse_fill_wb_data *data)
2124{
33826ebb 2125 struct fuse_writepage_args *wpa = data->wpa;
26d614df 2126 struct inode *inode = data->inode;
26d614df 2127 struct fuse_inode *fi = get_fuse_inode(inode);
33826ebb 2128 int num_pages = wpa->ia.ap.num_pages;
2d033eaa 2129 int i;
26d614df 2130
33826ebb 2131 wpa->ia.ff = fuse_file_get(data->ff);
f15ecfef 2132 spin_lock(&fi->lock);
33826ebb 2133 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
26d614df 2134 fuse_flush_writepages(inode);
f15ecfef 2135 spin_unlock(&fi->lock);
2d033eaa
MP
2136
2137 for (i = 0; i < num_pages; i++)
2138 end_page_writeback(data->orig_pages[i]);
26d614df
PE
2139}
2140
7f305ca1 2141/*
c146024e
MS
2142 * Check under fi->lock if the page is under writeback, and insert it onto the
2143 * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
419234d5
MS
2144 * one already added for a page at this offset. If there's none, then insert
2145 * this new request onto the auxiliary list, otherwise reuse the existing one by
c146024e 2146 * swapping the new temp page with the old one.
7f305ca1 2147 */
c146024e
MS
2148static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
2149 struct page *page)
8b284dc4 2150{
33826ebb
MS
2151 struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
2152 struct fuse_writepage_args *tmp;
2153 struct fuse_writepage_args *old_wpa;
2154 struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
8b284dc4 2155
33826ebb 2156 WARN_ON(new_ap->num_pages != 0);
c146024e 2157 new_ap->num_pages = 1;
8b284dc4 2158
f15ecfef 2159 spin_lock(&fi->lock);
c146024e 2160 old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa);
33826ebb 2161 if (!old_wpa) {
f15ecfef 2162 spin_unlock(&fi->lock);
c146024e 2163 return true;
f6011081 2164 }
8b284dc4 2165
33826ebb 2166 for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
7f305ca1
MS
2167 pgoff_t curr_index;
2168
33826ebb
MS
2169 WARN_ON(tmp->inode != new_wpa->inode);
2170 curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT;
419234d5 2171 if (curr_index == page->index) {
33826ebb
MS
2172 WARN_ON(tmp->ia.ap.num_pages != 1);
2173 swap(tmp->ia.ap.pages[0], new_ap->pages[0]);
7f305ca1 2174 break;
8b284dc4
MS
2175 }
2176 }
2177
7f305ca1 2178 if (!tmp) {
33826ebb
MS
2179 new_wpa->next = old_wpa->next;
2180 old_wpa->next = new_wpa;
7f305ca1 2181 }
41b6e41f 2182
f15ecfef 2183 spin_unlock(&fi->lock);
7f305ca1
MS
2184
2185 if (tmp) {
33826ebb 2186 struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
8b284dc4 2187
93f78d88 2188 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
33826ebb 2189 dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
93f78d88 2190 wb_writeout_inc(&bdi->wb);
33826ebb 2191 fuse_writepage_free(new_wpa);
8b284dc4 2192 }
7f305ca1 2193
c146024e 2194 return false;
8b284dc4
MS
2195}
2196
6ddf3af9
MS
2197static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page,
2198 struct fuse_args_pages *ap,
2199 struct fuse_fill_wb_data *data)
2200{
2201 WARN_ON(!ap->num_pages);
2202
2203 /*
2204 * Being under writeback is unlikely but possible. For example direct
2205 * read to an mmaped fuse file will set the page dirty twice; once when
2206 * the pages are faulted with get_user_pages(), and then after the read
2207 * completed.
2208 */
2209 if (fuse_page_is_writeback(data->inode, page->index))
2210 return true;
2211
2212 /* Reached max pages */
2213 if (ap->num_pages == fc->max_pages)
2214 return true;
2215
2216 /* Reached max write bytes */
2217 if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write)
2218 return true;
2219
2220 /* Discontinuity */
2221 if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)
2222 return true;
2223
2224 /* Need to grow the pages array? If so, did the expansion fail? */
2225 if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data))
2226 return true;
2227
2228 return false;
2229}
2230
d585bdbe 2231static int fuse_writepages_fill(struct folio *folio,
26d614df
PE
2232 struct writeback_control *wbc, void *_data)
2233{
2234 struct fuse_fill_wb_data *data = _data;
33826ebb
MS
2235 struct fuse_writepage_args *wpa = data->wpa;
2236 struct fuse_args_pages *ap = &wpa->ia.ap;
26d614df 2237 struct inode *inode = data->inode;
f15ecfef 2238 struct fuse_inode *fi = get_fuse_inode(inode);
26d614df
PE
2239 struct fuse_conn *fc = get_fuse_conn(inode);
2240 struct page *tmp_page;
2241 int err;
2242
2243 if (!data->ff) {
2244 err = -EIO;
a9667ac8 2245 data->ff = fuse_write_file_get(fi);
26d614df
PE
2246 if (!data->ff)
2247 goto out_unlock;
2248 }
2249
d585bdbe 2250 if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) {
8b284dc4 2251 fuse_writepages_send(data);
33826ebb 2252 data->wpa = NULL;
26d614df 2253 }
e52a8250 2254
26d614df
PE
2255 err = -ENOMEM;
2256 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2257 if (!tmp_page)
2258 goto out_unlock;
2259
2260 /*
2261 * The page must not be redirtied until the writeout is completed
2262 * (i.e. userspace has sent a reply to the write request). Otherwise
2263 * there could be more than one temporary page instance for each real
2264 * page.
2265 *
2266 * This is ensured by holding the page lock in page_mkwrite() while
2267 * checking fuse_page_is_writeback(). We already hold the page lock
2268 * since clear_page_dirty_for_io() and keep it held until we add the
33826ebb 2269 * request to the fi->writepages list and increment ap->num_pages.
26d614df
PE
2270 * After this fuse_page_is_writeback() will indicate that the page is
2271 * under writeback, so we can release the page lock.
2272 */
33826ebb 2273 if (data->wpa == NULL) {
26d614df 2274 err = -ENOMEM;
33826ebb
MS
2275 wpa = fuse_writepage_args_alloc();
2276 if (!wpa) {
26d614df
PE
2277 __free_page(tmp_page);
2278 goto out_unlock;
2279 }
660585b5
MS
2280 fuse_writepage_add_to_bucket(fc, wpa);
2281
33826ebb 2282 data->max_pages = 1;
26d614df 2283
33826ebb 2284 ap = &wpa->ia.ap;
d585bdbe 2285 fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0);
33826ebb
MS
2286 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2287 wpa->next = NULL;
2288 ap->args.in_pages = true;
2289 ap->args.end = fuse_writepage_end;
2290 ap->num_pages = 0;
2291 wpa->inode = inode;
26d614df 2292 }
d585bdbe 2293 folio_start_writeback(folio);
26d614df 2294
d585bdbe 2295 copy_highpage(tmp_page, &folio->page);
33826ebb
MS
2296 ap->pages[ap->num_pages] = tmp_page;
2297 ap->descs[ap->num_pages].offset = 0;
2298 ap->descs[ap->num_pages].length = PAGE_SIZE;
d585bdbe 2299 data->orig_pages[ap->num_pages] = &folio->page;
26d614df 2300
93f78d88 2301 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
11fb9989 2302 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
8b284dc4
MS
2303
2304 err = 0;
c146024e
MS
2305 if (data->wpa) {
2306 /*
2307 * Protected by fi->lock against concurrent access by
2308 * fuse_page_is_writeback().
2309 */
2310 spin_lock(&fi->lock);
2311 ap->num_pages++;
2312 spin_unlock(&fi->lock);
d585bdbe 2313 } else if (fuse_writepage_add(wpa, &folio->page)) {
c146024e
MS
2314 data->wpa = wpa;
2315 } else {
d585bdbe 2316 folio_end_writeback(folio);
8b284dc4 2317 }
26d614df 2318out_unlock:
d585bdbe 2319 folio_unlock(folio);
26d614df
PE
2320
2321 return err;
2322}
2323
2324static int fuse_writepages(struct address_space *mapping,
2325 struct writeback_control *wbc)
2326{
2327 struct inode *inode = mapping->host;
5da784cc 2328 struct fuse_conn *fc = get_fuse_conn(inode);
26d614df
PE
2329 struct fuse_fill_wb_data data;
2330 int err;
2331
2332 err = -EIO;
5d069dbe 2333 if (fuse_is_bad(inode))
26d614df
PE
2334 goto out;
2335
670d21c6
N
2336 if (wbc->sync_mode == WB_SYNC_NONE &&
2337 fc->num_background >= fc->congestion_threshold)
2338 return 0;
2339
26d614df 2340 data.inode = inode;
33826ebb 2341 data.wpa = NULL;
26d614df
PE
2342 data.ff = NULL;
2343
2d033eaa 2344 err = -ENOMEM;
5da784cc 2345 data.orig_pages = kcalloc(fc->max_pages,
f2b3455e 2346 sizeof(struct page *),
2d033eaa
MP
2347 GFP_NOFS);
2348 if (!data.orig_pages)
2349 goto out;
2350
26d614df 2351 err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
33826ebb 2352 if (data.wpa) {
33826ebb 2353 WARN_ON(!data.wpa->ia.ap.num_pages);
26d614df 2354 fuse_writepages_send(&data);
26d614df
PE
2355 }
2356 if (data.ff)
e26ee4ef 2357 fuse_file_put(data.ff, false);
2d033eaa
MP
2358
2359 kfree(data.orig_pages);
26d614df
PE
2360out:
2361 return err;
2362}
2363
6b12c1b3
PE
2364/*
2365 * It's worthy to make sure that space is reserved on disk for the write,
2366 * but how to implement it without killing performance need more thinking.
2367 */
2368static int fuse_write_begin(struct file *file, struct address_space *mapping,
9d6b0cd7 2369 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
6b12c1b3 2370{
09cbfeaf 2371 pgoff_t index = pos >> PAGE_SHIFT;
a455589f 2372 struct fuse_conn *fc = get_fuse_conn(file_inode(file));
6b12c1b3
PE
2373 struct page *page;
2374 loff_t fsize;
2375 int err = -ENOMEM;
2376
2377 WARN_ON(!fc->writeback_cache);
2378
b7446e7c 2379 page = grab_cache_page_write_begin(mapping, index);
6b12c1b3
PE
2380 if (!page)
2381 goto error;
2382
2383 fuse_wait_on_page_writeback(mapping->host, page->index);
2384
09cbfeaf 2385 if (PageUptodate(page) || len == PAGE_SIZE)
6b12c1b3
PE
2386 goto success;
2387 /*
2388 * Check if the start this page comes after the end of file, in which
2389 * case the readpage can be optimized away.
2390 */
2391 fsize = i_size_read(mapping->host);
09cbfeaf
KS
2392 if (fsize <= (pos & PAGE_MASK)) {
2393 size_t off = pos & ~PAGE_MASK;
6b12c1b3
PE
2394 if (off)
2395 zero_user_segment(page, 0, off);
2396 goto success;
2397 }
2398 err = fuse_do_readpage(file, page);
2399 if (err)
2400 goto cleanup;
2401success:
2402 *pagep = page;
2403 return 0;
2404
2405cleanup:
2406 unlock_page(page);
09cbfeaf 2407 put_page(page);
6b12c1b3
PE
2408error:
2409 return err;
2410}
2411
2412static int fuse_write_end(struct file *file, struct address_space *mapping,
2413 loff_t pos, unsigned len, unsigned copied,
2414 struct page *page, void *fsdata)
2415{
2416 struct inode *inode = page->mapping->host;
2417
59c3b76c
MS
2418 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
2419 if (!copied)
2420 goto unlock;
2421
8c56e03d 2422 pos += copied;
6b12c1b3
PE
2423 if (!PageUptodate(page)) {
2424 /* Zero any unwritten bytes at the end of the page */
8c56e03d 2425 size_t endoff = pos & ~PAGE_MASK;
6b12c1b3 2426 if (endoff)
09cbfeaf 2427 zero_user_segment(page, endoff, PAGE_SIZE);
6b12c1b3
PE
2428 SetPageUptodate(page);
2429 }
2430
8c56e03d
MS
2431 if (pos > inode->i_size)
2432 i_size_write(inode, pos);
2433
6b12c1b3 2434 set_page_dirty(page);
59c3b76c
MS
2435
2436unlock:
6b12c1b3 2437 unlock_page(page);
09cbfeaf 2438 put_page(page);
6b12c1b3
PE
2439
2440 return copied;
2441}
2442
2bf06b8e 2443static int fuse_launder_folio(struct folio *folio)
3be5a52b
MS
2444{
2445 int err = 0;
2bf06b8e
MWO
2446 if (folio_clear_dirty_for_io(folio)) {
2447 struct inode *inode = folio->mapping->host;
3993382b
MS
2448
2449 /* Serialize with pending writeback for the same page */
2bf06b8e
MWO
2450 fuse_wait_on_page_writeback(inode, folio->index);
2451 err = fuse_writepage_locked(&folio->page);
3be5a52b 2452 if (!err)
2bf06b8e 2453 fuse_wait_on_page_writeback(inode, folio->index);
3be5a52b
MS
2454 }
2455 return err;
2456}
2457
2458/*
36ea2337
MS
2459 * Write back dirty data/metadata now (there may not be any suitable
2460 * open files later for data)
3be5a52b
MS
2461 */
2462static void fuse_vma_close(struct vm_area_struct *vma)
2463{
36ea2337
MS
2464 int err;
2465
2466 err = write_inode_now(vma->vm_file->f_mapping->host, 1);
2467 mapping_set_error(vma->vm_file->f_mapping, err);
3be5a52b
MS
2468}
2469
2470/*
2471 * Wait for writeback against this page to complete before allowing it
2472 * to be marked dirty again, and hence written back again, possibly
2473 * before the previous writepage completed.
2474 *
2475 * Block here, instead of in ->writepage(), so that the userspace fs
2476 * can only block processes actually operating on the filesystem.
2477 *
2478 * Otherwise unprivileged userspace fs would be able to block
2479 * unrelated:
2480 *
2481 * - page migration
2482 * - sync(2)
2483 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2484 */
46fb504a 2485static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
3be5a52b 2486{
c2ec175c 2487 struct page *page = vmf->page;
11bac800 2488 struct inode *inode = file_inode(vmf->vma->vm_file);
cca24370 2489
11bac800 2490 file_update_time(vmf->vma->vm_file);
cca24370
MS
2491 lock_page(page);
2492 if (page->mapping != inode->i_mapping) {
2493 unlock_page(page);
2494 return VM_FAULT_NOPAGE;
2495 }
3be5a52b
MS
2496
2497 fuse_wait_on_page_writeback(inode, page->index);
cca24370 2498 return VM_FAULT_LOCKED;
3be5a52b
MS
2499}
2500
f0f37e2f 2501static const struct vm_operations_struct fuse_file_vm_ops = {
3be5a52b
MS
2502 .close = fuse_vma_close,
2503 .fault = filemap_fault,
f1820361 2504 .map_pages = filemap_map_pages,
3be5a52b
MS
2505 .page_mkwrite = fuse_page_mkwrite,
2506};
2507
2508static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
2509{
55752a3a 2510 struct fuse_file *ff = file->private_data;
e78662e8 2511 struct fuse_conn *fc = ff->fm->fc;
55752a3a 2512
2a9a609a
SH
2513 /* DAX mmap is superior to direct_io mmap */
2514 if (FUSE_IS_DAX(file_inode(file)))
2515 return fuse_dax_mmap(file, vma);
2516
55752a3a 2517 if (ff->open_flags & FOPEN_DIRECT_IO) {
9511176b
BS
2518 /*
2519 * Can't provide the coherency needed for MAP_SHARED
c55e0a55 2520 * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set.
e78662e8 2521 */
c55e0a55 2522 if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap)
55752a3a
MS
2523 return -ENODEV;
2524
2525 invalidate_inode_pages2(file->f_mapping);
2526
9511176b
BS
2527 if (!(vma->vm_flags & VM_MAYSHARE)) {
2528 /* MAP_PRIVATE */
2529 return generic_file_mmap(file, vma);
2530 }
55752a3a
MS
2531 }
2532
650b22b9
PE
2533 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2534 fuse_link_write_file(file);
2535
3be5a52b
MS
2536 file_accessed(file);
2537 vma->vm_ops = &fuse_file_vm_ops;
b6aeaded
MS
2538 return 0;
2539}
2540
0b6e9ea0
SF
2541static int convert_fuse_file_lock(struct fuse_conn *fc,
2542 const struct fuse_file_lock *ffl,
71421259
MS
2543 struct file_lock *fl)
2544{
2545 switch (ffl->type) {
2546 case F_UNLCK:
2547 break;
2548
2549 case F_RDLCK:
2550 case F_WRLCK:
2551 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
2552 ffl->end < ffl->start)
2553 return -EIO;
2554
2555 fl->fl_start = ffl->start;
2556 fl->fl_end = ffl->end;
0b6e9ea0
SF
2557
2558 /*
9d5b86ac
BC
2559 * Convert pid into init's pid namespace. The locks API will
2560 * translate it into the caller's pid namespace.
0b6e9ea0
SF
2561 */
2562 rcu_read_lock();
9d5b86ac 2563 fl->fl_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns);
0b6e9ea0 2564 rcu_read_unlock();
71421259
MS
2565 break;
2566
2567 default:
2568 return -EIO;
2569 }
2570 fl->fl_type = ffl->type;
2571 return 0;
2572}
2573
7078187a 2574static void fuse_lk_fill(struct fuse_args *args, struct file *file,
a9ff4f87 2575 const struct file_lock *fl, int opcode, pid_t pid,
7078187a 2576 int flock, struct fuse_lk_in *inarg)
71421259 2577{
6131ffaa 2578 struct inode *inode = file_inode(file);
9c8ef561 2579 struct fuse_conn *fc = get_fuse_conn(inode);
71421259 2580 struct fuse_file *ff = file->private_data;
7078187a
MS
2581
2582 memset(inarg, 0, sizeof(*inarg));
2583 inarg->fh = ff->fh;
2584 inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
2585 inarg->lk.start = fl->fl_start;
2586 inarg->lk.end = fl->fl_end;
2587 inarg->lk.type = fl->fl_type;
2588 inarg->lk.pid = pid;
a9ff4f87 2589 if (flock)
7078187a 2590 inarg->lk_flags |= FUSE_LK_FLOCK;
d5b48543
MS
2591 args->opcode = opcode;
2592 args->nodeid = get_node_id(inode);
2593 args->in_numargs = 1;
2594 args->in_args[0].size = sizeof(*inarg);
2595 args->in_args[0].value = inarg;
71421259
MS
2596}
2597
2598static int fuse_getlk(struct file *file, struct file_lock *fl)
2599{
6131ffaa 2600 struct inode *inode = file_inode(file);
fcee216b 2601 struct fuse_mount *fm = get_fuse_mount(inode);
7078187a
MS
2602 FUSE_ARGS(args);
2603 struct fuse_lk_in inarg;
71421259
MS
2604 struct fuse_lk_out outarg;
2605 int err;
2606
7078187a 2607 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
d5b48543
MS
2608 args.out_numargs = 1;
2609 args.out_args[0].size = sizeof(outarg);
2610 args.out_args[0].value = &outarg;
fcee216b 2611 err = fuse_simple_request(fm, &args);
71421259 2612 if (!err)
fcee216b 2613 err = convert_fuse_file_lock(fm->fc, &outarg.lk, fl);
71421259
MS
2614
2615 return err;
2616}
2617
a9ff4f87 2618static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
71421259 2619{
6131ffaa 2620 struct inode *inode = file_inode(file);
fcee216b 2621 struct fuse_mount *fm = get_fuse_mount(inode);
7078187a
MS
2622 FUSE_ARGS(args);
2623 struct fuse_lk_in inarg;
71421259 2624 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
0b6e9ea0 2625 struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL;
fcee216b 2626 pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns);
71421259
MS
2627 int err;
2628
8fb47a4f 2629 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
48e90761
MS
2630 /* NLM needs asynchronous locks, which we don't support yet */
2631 return -ENOLCK;
2632 }
2633
71421259 2634 /* Unlock on close is handled by the flush method */
50f2112c 2635 if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX)
71421259
MS
2636 return 0;
2637
0b6e9ea0 2638 fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
fcee216b 2639 err = fuse_simple_request(fm, &args);
71421259 2640
a4d27e75
MS
2641 /* locking is restartable */
2642 if (err == -EINTR)
2643 err = -ERESTARTSYS;
7078187a 2644
71421259
MS
2645 return err;
2646}
2647
2648static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
2649{
6131ffaa 2650 struct inode *inode = file_inode(file);
71421259
MS
2651 struct fuse_conn *fc = get_fuse_conn(inode);
2652 int err;
2653
48e90761
MS
2654 if (cmd == F_CANCELLK) {
2655 err = 0;
2656 } else if (cmd == F_GETLK) {
71421259 2657 if (fc->no_lock) {
9d6a8c5c 2658 posix_test_lock(file, fl);
71421259
MS
2659 err = 0;
2660 } else
2661 err = fuse_getlk(file, fl);
2662 } else {
2663 if (fc->no_lock)
48e90761 2664 err = posix_lock_file(file, fl, NULL);
71421259 2665 else
a9ff4f87 2666 err = fuse_setlk(file, fl, 0);
71421259
MS
2667 }
2668 return err;
2669}
2670
a9ff4f87
MS
2671static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
2672{
6131ffaa 2673 struct inode *inode = file_inode(file);
a9ff4f87
MS
2674 struct fuse_conn *fc = get_fuse_conn(inode);
2675 int err;
2676
37fb3a30 2677 if (fc->no_flock) {
4f656367 2678 err = locks_lock_file_wait(file, fl);
a9ff4f87 2679 } else {
37fb3a30
MS
2680 struct fuse_file *ff = file->private_data;
2681
a9ff4f87 2682 /* emulate flock with POSIX locks */
37fb3a30 2683 ff->flock = true;
a9ff4f87
MS
2684 err = fuse_setlk(file, fl, 1);
2685 }
2686
2687 return err;
2688}
2689
b2d2272f
MS
2690static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
2691{
2692 struct inode *inode = mapping->host;
fcee216b 2693 struct fuse_mount *fm = get_fuse_mount(inode);
7078187a 2694 FUSE_ARGS(args);
b2d2272f
MS
2695 struct fuse_bmap_in inarg;
2696 struct fuse_bmap_out outarg;
2697 int err;
2698
fcee216b 2699 if (!inode->i_sb->s_bdev || fm->fc->no_bmap)
b2d2272f
MS
2700 return 0;
2701
b2d2272f
MS
2702 memset(&inarg, 0, sizeof(inarg));
2703 inarg.block = block;
2704 inarg.blocksize = inode->i_sb->s_blocksize;
d5b48543
MS
2705 args.opcode = FUSE_BMAP;
2706 args.nodeid = get_node_id(inode);
2707 args.in_numargs = 1;
2708 args.in_args[0].size = sizeof(inarg);
2709 args.in_args[0].value = &inarg;
2710 args.out_numargs = 1;
2711 args.out_args[0].size = sizeof(outarg);
2712 args.out_args[0].value = &outarg;
fcee216b 2713 err = fuse_simple_request(fm, &args);
b2d2272f 2714 if (err == -ENOSYS)
fcee216b 2715 fm->fc->no_bmap = 1;
b2d2272f
MS
2716
2717 return err ? 0 : outarg.block;
2718}
2719
0b5da8db
R
2720static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
2721{
2722 struct inode *inode = file->f_mapping->host;
fcee216b 2723 struct fuse_mount *fm = get_fuse_mount(inode);
0b5da8db
R
2724 struct fuse_file *ff = file->private_data;
2725 FUSE_ARGS(args);
2726 struct fuse_lseek_in inarg = {
2727 .fh = ff->fh,
2728 .offset = offset,
2729 .whence = whence
2730 };
2731 struct fuse_lseek_out outarg;
2732 int err;
2733
fcee216b 2734 if (fm->fc->no_lseek)
0b5da8db
R
2735 goto fallback;
2736
d5b48543
MS
2737 args.opcode = FUSE_LSEEK;
2738 args.nodeid = ff->nodeid;
2739 args.in_numargs = 1;
2740 args.in_args[0].size = sizeof(inarg);
2741 args.in_args[0].value = &inarg;
2742 args.out_numargs = 1;
2743 args.out_args[0].size = sizeof(outarg);
2744 args.out_args[0].value = &outarg;
fcee216b 2745 err = fuse_simple_request(fm, &args);
0b5da8db
R
2746 if (err) {
2747 if (err == -ENOSYS) {
fcee216b 2748 fm->fc->no_lseek = 1;
0b5da8db
R
2749 goto fallback;
2750 }
2751 return err;
2752 }
2753
2754 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes);
2755
2756fallback:
c6c745b8 2757 err = fuse_update_attributes(inode, file, STATX_SIZE);
0b5da8db
R
2758 if (!err)
2759 return generic_file_llseek(file, offset, whence);
2760 else
2761 return err;
2762}
2763
965c8e59 2764static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
5559b8f4
MS
2765{
2766 loff_t retval;
6131ffaa 2767 struct inode *inode = file_inode(file);
5559b8f4 2768
0b5da8db
R
2769 switch (whence) {
2770 case SEEK_SET:
2771 case SEEK_CUR:
2772 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
965c8e59 2773 retval = generic_file_llseek(file, offset, whence);
0b5da8db
R
2774 break;
2775 case SEEK_END:
5955102c 2776 inode_lock(inode);
c6c745b8 2777 retval = fuse_update_attributes(inode, file, STATX_SIZE);
0b5da8db
R
2778 if (!retval)
2779 retval = generic_file_llseek(file, offset, whence);
5955102c 2780 inode_unlock(inode);
0b5da8db
R
2781 break;
2782 case SEEK_HOLE:
2783 case SEEK_DATA:
5955102c 2784 inode_lock(inode);
0b5da8db 2785 retval = fuse_lseek(file, offset, whence);
5955102c 2786 inode_unlock(inode);
0b5da8db
R
2787 break;
2788 default:
2789 retval = -EINVAL;
2790 }
c07c3d19 2791
5559b8f4
MS
2792 return retval;
2793}
2794
95668a69
TH
2795/*
2796 * All files which have been polled are linked to RB tree
2797 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2798 * find the matching one.
2799 */
2800static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
2801 struct rb_node **parent_out)
2802{
2803 struct rb_node **link = &fc->polled_files.rb_node;
2804 struct rb_node *last = NULL;
2805
2806 while (*link) {
2807 struct fuse_file *ff;
2808
2809 last = *link;
2810 ff = rb_entry(last, struct fuse_file, polled_node);
2811
2812 if (kh < ff->kh)
2813 link = &last->rb_left;
2814 else if (kh > ff->kh)
2815 link = &last->rb_right;
2816 else
2817 return link;
2818 }
2819
2820 if (parent_out)
2821 *parent_out = last;
2822 return link;
2823}
2824
2825/*
2826 * The file is about to be polled. Make sure it's on the polled_files
2827 * RB tree. Note that files once added to the polled_files tree are
2828 * not removed before the file is released. This is because a file
2829 * polled once is likely to be polled again.
2830 */
2831static void fuse_register_polled_file(struct fuse_conn *fc,
2832 struct fuse_file *ff)
2833{
2834 spin_lock(&fc->lock);
2835 if (RB_EMPTY_NODE(&ff->polled_node)) {
3f649ab7 2836 struct rb_node **link, *parent;
95668a69
TH
2837
2838 link = fuse_find_polled_node(fc, ff->kh, &parent);
2839 BUG_ON(*link);
2840 rb_link_node(&ff->polled_node, parent, link);
2841 rb_insert_color(&ff->polled_node, &fc->polled_files);
2842 }
2843 spin_unlock(&fc->lock);
2844}
2845
076ccb76 2846__poll_t fuse_file_poll(struct file *file, poll_table *wait)
95668a69 2847{
95668a69 2848 struct fuse_file *ff = file->private_data;
fcee216b 2849 struct fuse_mount *fm = ff->fm;
95668a69
TH
2850 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
2851 struct fuse_poll_out outarg;
7078187a 2852 FUSE_ARGS(args);
95668a69
TH
2853 int err;
2854
fcee216b 2855 if (fm->fc->no_poll)
95668a69
TH
2856 return DEFAULT_POLLMASK;
2857
2858 poll_wait(file, &ff->poll_wait, wait);
c71d227f 2859 inarg.events = mangle_poll(poll_requested_events(wait));
95668a69
TH
2860
2861 /*
2862 * Ask for notification iff there's someone waiting for it.
2863 * The client may ignore the flag and always notify.
2864 */
2865 if (waitqueue_active(&ff->poll_wait)) {
2866 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
fcee216b 2867 fuse_register_polled_file(fm->fc, ff);
95668a69
TH
2868 }
2869
d5b48543
MS
2870 args.opcode = FUSE_POLL;
2871 args.nodeid = ff->nodeid;
2872 args.in_numargs = 1;
2873 args.in_args[0].size = sizeof(inarg);
2874 args.in_args[0].value = &inarg;
2875 args.out_numargs = 1;
2876 args.out_args[0].size = sizeof(outarg);
2877 args.out_args[0].value = &outarg;
fcee216b 2878 err = fuse_simple_request(fm, &args);
95668a69
TH
2879
2880 if (!err)
c71d227f 2881 return demangle_poll(outarg.revents);
95668a69 2882 if (err == -ENOSYS) {
fcee216b 2883 fm->fc->no_poll = 1;
95668a69
TH
2884 return DEFAULT_POLLMASK;
2885 }
a9a08845 2886 return EPOLLERR;
95668a69 2887}
08cbf542 2888EXPORT_SYMBOL_GPL(fuse_file_poll);
95668a69
TH
2889
2890/*
2891 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2892 * wakes up the poll waiters.
2893 */
2894int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2895 struct fuse_notify_poll_wakeup_out *outarg)
2896{
2897 u64 kh = outarg->kh;
2898 struct rb_node **link;
2899
2900 spin_lock(&fc->lock);
2901
2902 link = fuse_find_polled_node(fc, kh, NULL);
2903 if (*link) {
2904 struct fuse_file *ff;
2905
2906 ff = rb_entry(*link, struct fuse_file, polled_node);
2907 wake_up_interruptible_sync(&ff->poll_wait);
2908 }
2909
2910 spin_unlock(&fc->lock);
2911 return 0;
2912}
2913
efb9fa9e
MP
2914static void fuse_do_truncate(struct file *file)
2915{
2916 struct inode *inode = file->f_mapping->host;
2917 struct iattr attr;
2918
2919 attr.ia_valid = ATTR_SIZE;
2920 attr.ia_size = i_size_read(inode);
2921
2922 attr.ia_file = file;
2923 attr.ia_valid |= ATTR_FILE;
2924
62490330 2925 fuse_do_setattr(file_dentry(file), &attr, file);
efb9fa9e
MP
2926}
2927
5da784cc 2928static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
e5c5f05d 2929{
5da784cc 2930 return round_up(off, fc->max_pages << PAGE_SHIFT);
e5c5f05d
MP
2931}
2932
4273b793 2933static ssize_t
c8b8e32d 2934fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
4273b793 2935{
9d5722b7 2936 DECLARE_COMPLETION_ONSTACK(wait);
4273b793 2937 ssize_t ret = 0;
60b9df7a
MS
2938 struct file *file = iocb->ki_filp;
2939 struct fuse_file *ff = file->private_data;
4273b793 2940 loff_t pos = 0;
bcba24cc
MP
2941 struct inode *inode;
2942 loff_t i_size;
933a3752 2943 size_t count = iov_iter_count(iter), shortened = 0;
c8b8e32d 2944 loff_t offset = iocb->ki_pos;
36cf66ed 2945 struct fuse_io_priv *io;
4273b793 2946
4273b793 2947 pos = offset;
bcba24cc
MP
2948 inode = file->f_mapping->host;
2949 i_size = i_size_read(inode);
4273b793 2950
933a3752 2951 if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
9fe55eea
SW
2952 return 0;
2953
bcba24cc 2954 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
36cf66ed
MP
2955 if (!io)
2956 return -ENOMEM;
bcba24cc 2957 spin_lock_init(&io->lock);
744742d6 2958 kref_init(&io->refcnt);
bcba24cc
MP
2959 io->reqs = 1;
2960 io->bytes = -1;
2961 io->size = 0;
2962 io->offset = offset;
6f673763 2963 io->write = (iov_iter_rw(iter) == WRITE);
bcba24cc 2964 io->err = 0;
bcba24cc
MP
2965 /*
2966 * By default, we want to optimize all I/Os with async request
60b9df7a 2967 * submission to the client filesystem if supported.
bcba24cc 2968 */
69456535 2969 io->async = ff->fm->fc->async_dio;
bcba24cc 2970 io->iocb = iocb;
7879c4e5 2971 io->blocking = is_sync_kiocb(iocb);
bcba24cc 2972
933a3752
AV
2973 /* optimization for short read */
2974 if (io->async && !io->write && offset + count > i_size) {
69456535 2975 iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset));
933a3752
AV
2976 shortened = count - iov_iter_count(iter);
2977 count -= shortened;
2978 }
2979
bcba24cc 2980 /*
7879c4e5
AS
2981 * We cannot asynchronously extend the size of a file.
2982 * In such case the aio will behave exactly like sync io.
bcba24cc 2983 */
933a3752 2984 if ((offset + count > i_size) && io->write)
7879c4e5 2985 io->blocking = true;
4273b793 2986
7879c4e5 2987 if (io->async && io->blocking) {
744742d6
SF
2988 /*
2989 * Additional reference to keep io around after
2990 * calling fuse_aio_complete()
2991 */
2992 kref_get(&io->refcnt);
9d5722b7 2993 io->done = &wait;
744742d6 2994 }
9d5722b7 2995
6f673763 2996 if (iov_iter_rw(iter) == WRITE) {
6b775b18 2997 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
fa5eee57 2998 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
812408fb 2999 } else {
d22a943f 3000 ret = __fuse_direct_read(io, iter, &pos);
812408fb 3001 }
933a3752 3002 iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
36cf66ed 3003
bcba24cc 3004 if (io->async) {
ebacb812
LC
3005 bool blocking = io->blocking;
3006
bcba24cc
MP
3007 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
3008
3009 /* we have a non-extending, async request, so return */
ebacb812 3010 if (!blocking)
bcba24cc
MP
3011 return -EIOCBQUEUED;
3012
9d5722b7
CH
3013 wait_for_completion(&wait);
3014 ret = fuse_get_res_by_io(io);
bcba24cc
MP
3015 }
3016
744742d6 3017 kref_put(&io->refcnt, fuse_io_release);
9d5722b7 3018
6f673763 3019 if (iov_iter_rw(iter) == WRITE) {
d347739a 3020 fuse_write_update_attr(inode, pos, ret);
15352405 3021 /* For extending writes we already hold exclusive lock */
d347739a 3022 if (ret < 0 && offset + count > i_size)
efb9fa9e
MP
3023 fuse_do_truncate(file);
3024 }
4273b793
AA
3025
3026 return ret;
3027}
3028
26eb3bae
MS
3029static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
3030{
e388164e 3031 int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX);
26eb3bae
MS
3032
3033 if (!err)
3034 fuse_sync_writes(inode);
3035
3036 return err;
3037}
3038
cdadb11c
MS
3039static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
3040 loff_t length)
05ba1f08
AP
3041{
3042 struct fuse_file *ff = file->private_data;
1c68271c 3043 struct inode *inode = file_inode(file);
0ab08f57 3044 struct fuse_inode *fi = get_fuse_inode(inode);
fcee216b 3045 struct fuse_mount *fm = ff->fm;
7078187a 3046 FUSE_ARGS(args);
05ba1f08
AP
3047 struct fuse_fallocate_in inarg = {
3048 .fh = ff->fh,
3049 .offset = offset,
3050 .length = length,
3051 .mode = mode
3052 };
3053 int err;
44361e8c
MS
3054 bool block_faults = FUSE_IS_DAX(inode) &&
3055 (!(mode & FALLOC_FL_KEEP_SIZE) ||
3056 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)));
6ae330ca 3057
6b1bdb56
RJ
3058 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3059 FALLOC_FL_ZERO_RANGE))
4adb8302
MS
3060 return -EOPNOTSUPP;
3061
fcee216b 3062 if (fm->fc->no_fallocate)
519c6040
MS
3063 return -EOPNOTSUPP;
3064
44361e8c
MS
3065 inode_lock(inode);
3066 if (block_faults) {
3067 filemap_invalidate_lock(inode->i_mapping);
3068 err = fuse_dax_break_layouts(inode, 0, 0);
3069 if (err)
3070 goto out;
3071 }
6ae330ca 3072
44361e8c
MS
3073 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) {
3074 loff_t endbyte = offset + length - 1;
26eb3bae 3075
44361e8c
MS
3076 err = fuse_writeback_range(inode, offset, endbyte);
3077 if (err)
3078 goto out;
3634a632
BF
3079 }
3080
0cbade02
LB
3081 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
3082 offset + length > i_size_read(inode)) {
3083 err = inode_newsize_ok(inode, offset + length);
3084 if (err)
35d6fcbb 3085 goto out;
0cbade02
LB
3086 }
3087
4a6f278d
MS
3088 err = file_modified(file);
3089 if (err)
3090 goto out;
3091
0ab08f57
MP
3092 if (!(mode & FALLOC_FL_KEEP_SIZE))
3093 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3094
d5b48543
MS
3095 args.opcode = FUSE_FALLOCATE;
3096 args.nodeid = ff->nodeid;
3097 args.in_numargs = 1;
3098 args.in_args[0].size = sizeof(inarg);
3099 args.in_args[0].value = &inarg;
fcee216b 3100 err = fuse_simple_request(fm, &args);
519c6040 3101 if (err == -ENOSYS) {
fcee216b 3102 fm->fc->no_fallocate = 1;
519c6040
MS
3103 err = -EOPNOTSUPP;
3104 }
bee6c307
BF
3105 if (err)
3106 goto out;
3107
3108 /* we could have extended the file */
b0aa7606 3109 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
20235b43 3110 if (fuse_write_update_attr(inode, offset + length, length))
93d2269d 3111 file_update_time(file);
b0aa7606 3112 }
bee6c307 3113
6b1bdb56 3114 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))
bee6c307
BF
3115 truncate_pagecache_range(inode, offset, offset + length - 1);
3116
fa5eee57 3117 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
bee6c307 3118
3634a632 3119out:
0ab08f57
MP
3120 if (!(mode & FALLOC_FL_KEEP_SIZE))
3121 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3122
6ae330ca 3123 if (block_faults)
8bcbbe9c 3124 filemap_invalidate_unlock(inode->i_mapping);
6ae330ca 3125
44361e8c 3126 inode_unlock(inode);
3634a632 3127
5c791fe1
MS
3128 fuse_flush_time_update(inode);
3129
05ba1f08
AP
3130 return err;
3131}
05ba1f08 3132
64bf5ff5
DC
3133static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
3134 struct file *file_out, loff_t pos_out,
3135 size_t len, unsigned int flags)
88bc7d50
NV
3136{
3137 struct fuse_file *ff_in = file_in->private_data;
3138 struct fuse_file *ff_out = file_out->private_data;
a2bc9236 3139 struct inode *inode_in = file_inode(file_in);
88bc7d50
NV
3140 struct inode *inode_out = file_inode(file_out);
3141 struct fuse_inode *fi_out = get_fuse_inode(inode_out);
fcee216b
MR
3142 struct fuse_mount *fm = ff_in->fm;
3143 struct fuse_conn *fc = fm->fc;
88bc7d50
NV
3144 FUSE_ARGS(args);
3145 struct fuse_copy_file_range_in inarg = {
3146 .fh_in = ff_in->fh,
3147 .off_in = pos_in,
3148 .nodeid_out = ff_out->nodeid,
3149 .fh_out = ff_out->fh,
3150 .off_out = pos_out,
3151 .len = len,
3152 .flags = flags
3153 };
3154 struct fuse_write_out outarg;
3155 ssize_t err;
3156 /* mark unstable when write-back is not used, and file_out gets
3157 * extended */
3158 bool is_unstable = (!fc->writeback_cache) &&
3159 ((pos_out + len) > inode_out->i_size);
3160
3161 if (fc->no_copy_file_range)
3162 return -EOPNOTSUPP;
3163
5dae222a
AG
3164 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
3165 return -EXDEV;
3166
2c4656df
MS
3167 inode_lock(inode_in);
3168 err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1);
3169 inode_unlock(inode_in);
3170 if (err)
3171 return err;
a2bc9236 3172
88bc7d50
NV
3173 inode_lock(inode_out);
3174
fe0da9c0
AG
3175 err = file_modified(file_out);
3176 if (err)
3177 goto out;
3178
9b46418c
MS
3179 /*
3180 * Write out dirty pages in the destination file before sending the COPY
3181 * request to userspace. After the request is completed, truncate off
3182 * pages (including partial ones) from the cache that have been copied,
3183 * since these contain stale data at that point.
3184 *
3185 * This should be mostly correct, but if the COPY writes to partial
3186 * pages (at the start or end) and the parts not covered by the COPY are
3187 * written through a memory map after calling fuse_writeback_range(),
3188 * then these partial page modifications will be lost on truncation.
3189 *
3190 * It is unlikely that someone would rely on such mixed style
3191 * modifications. Yet this does give less guarantees than if the
3192 * copying was performed with write(2).
3193 *
8bcbbe9c 3194 * To fix this a mapping->invalidate_lock could be used to prevent new
9b46418c
MS
3195 * faults while the copy is ongoing.
3196 */
2c4656df
MS
3197 err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
3198 if (err)
3199 goto out;
88bc7d50
NV
3200
3201 if (is_unstable)
3202 set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3203
d5b48543
MS
3204 args.opcode = FUSE_COPY_FILE_RANGE;
3205 args.nodeid = ff_in->nodeid;
3206 args.in_numargs = 1;
3207 args.in_args[0].size = sizeof(inarg);
3208 args.in_args[0].value = &inarg;
3209 args.out_numargs = 1;
3210 args.out_args[0].size = sizeof(outarg);
3211 args.out_args[0].value = &outarg;
fcee216b 3212 err = fuse_simple_request(fm, &args);
88bc7d50
NV
3213 if (err == -ENOSYS) {
3214 fc->no_copy_file_range = 1;
3215 err = -EOPNOTSUPP;
3216 }
3217 if (err)
3218 goto out;
3219
9b46418c
MS
3220 truncate_inode_pages_range(inode_out->i_mapping,
3221 ALIGN_DOWN(pos_out, PAGE_SIZE),
3222 ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1);
3223
20235b43
MS
3224 file_update_time(file_out);
3225 fuse_write_update_attr(inode_out, pos_out + outarg.size, outarg.size);
88bc7d50
NV
3226
3227 err = outarg.size;
3228out:
3229 if (is_unstable)
3230 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3231
3232 inode_unlock(inode_out);
fe0da9c0 3233 file_accessed(file_in);
88bc7d50 3234
5c791fe1
MS
3235 fuse_flush_time_update(inode_out);
3236
88bc7d50
NV
3237 return err;
3238}
3239
64bf5ff5
DC
3240static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off,
3241 struct file *dst_file, loff_t dst_off,
3242 size_t len, unsigned int flags)
3243{
3244 ssize_t ret;
3245
3246 ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off,
3247 len, flags);
3248
5dae222a 3249 if (ret == -EOPNOTSUPP || ret == -EXDEV)
705bcfcb
AG
3250 ret = splice_copy_file_range(src_file, src_off, dst_file,
3251 dst_off, len);
64bf5ff5
DC
3252 return ret;
3253}
3254
4b6f5d20 3255static const struct file_operations fuse_file_operations = {
5559b8f4 3256 .llseek = fuse_file_llseek,
37c20f16 3257 .read_iter = fuse_file_read_iter,
84c3d55c 3258 .write_iter = fuse_file_write_iter,
b6aeaded
MS
3259 .mmap = fuse_file_mmap,
3260 .open = fuse_open,
3261 .flush = fuse_flush,
3262 .release = fuse_release,
3263 .fsync = fuse_fsync,
71421259 3264 .lock = fuse_file_lock,
2a9a609a 3265 .get_unmapped_area = thp_get_unmapped_area,
a9ff4f87 3266 .flock = fuse_file_flock,
2cb1e089 3267 .splice_read = filemap_splice_read,
3c3db095 3268 .splice_write = iter_file_splice_write,
59efec7b
TH
3269 .unlocked_ioctl = fuse_file_ioctl,
3270 .compat_ioctl = fuse_file_compat_ioctl,
95668a69 3271 .poll = fuse_file_poll,
05ba1f08 3272 .fallocate = fuse_file_fallocate,
d4136d60 3273 .copy_file_range = fuse_copy_file_range,
413ef8cb
MS
3274};
3275
f5e54d6e 3276static const struct address_space_operations fuse_file_aops = {
5efd00e4 3277 .read_folio = fuse_read_folio,
76a0294e 3278 .readahead = fuse_readahead,
3be5a52b 3279 .writepage = fuse_writepage,
26d614df 3280 .writepages = fuse_writepages,
2bf06b8e 3281 .launder_folio = fuse_launder_folio,
187c82cb 3282 .dirty_folio = filemap_dirty_folio,
b2d2272f 3283 .bmap = fuse_bmap,
4273b793 3284 .direct_IO = fuse_direct_IO,
6b12c1b3
PE
3285 .write_begin = fuse_write_begin,
3286 .write_end = fuse_write_end,
b6aeaded
MS
3287};
3288
93a497b9 3289void fuse_init_file_inode(struct inode *inode, unsigned int flags)
b6aeaded 3290{
ab2257e9
MS
3291 struct fuse_inode *fi = get_fuse_inode(inode);
3292
45323fb7
MS
3293 inode->i_fop = &fuse_file_operations;
3294 inode->i_data.a_ops = &fuse_file_aops;
ab2257e9
MS
3295
3296 INIT_LIST_HEAD(&fi->write_files);
3297 INIT_LIST_HEAD(&fi->queued_writes);
3298 fi->writectr = 0;
3299 init_waitqueue_head(&fi->page_waitq);
6b2fb799 3300 fi->writepages = RB_ROOT;
c2d0ad00
VG
3301
3302 if (IS_ENABLED(CONFIG_FUSE_DAX))
93a497b9 3303 fuse_dax_inode_init(inode, flags);
b6aeaded 3304}