]> git.ipfire.org Git - thirdparty/linux.git/blame - fs/iomap.c
xfs: halt auto-reclamation activities while rebuilding rmap
[thirdparty/linux.git] / fs / iomap.c
CommitLineData
ae259a9c
CH
1/*
2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/pagemap.h>
23#include <linux/file.h>
24#include <linux/uio.h>
25#include <linux/backing-dev.h>
26#include <linux/buffer_head.h>
ff6a9292 27#include <linux/task_io_accounting_ops.h>
9a286f0e 28#include <linux/dax.h>
f361bf4a
IM
29#include <linux/sched/signal.h>
30
ae259a9c
CH
31#include "internal.h"
32
ae259a9c
CH
33/*
34 * Execute a iomap write on a segment of the mapping that spans a
35 * contiguous range of pages that have identical block mapping state.
36 *
37 * This avoids the need to map pages individually, do individual allocations
38 * for each page and most importantly avoid the need for filesystem specific
39 * locking per page. Instead, all the operations are amortised over the entire
40 * range of pages. It is assumed that the filesystems will lock whatever
41 * resources they require in the iomap_begin call, and release them in the
42 * iomap_end call.
43 */
befb503c 44loff_t
ae259a9c 45iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
8ff6daa1 46 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
ae259a9c
CH
47{
48 struct iomap iomap = { 0 };
49 loff_t written = 0, ret;
50
51 /*
52 * Need to map a range from start position for length bytes. This can
53 * span multiple pages - it is only guaranteed to return a range of a
54 * single type of pages (e.g. all into a hole, all mapped or all
55 * unwritten). Failure at this point has nothing to undo.
56 *
57 * If allocation is required for this range, reserve the space now so
58 * that the allocation is guaranteed to succeed later on. Once we copy
59 * the data into the page cache pages, then we cannot fail otherwise we
60 * expose transient stale data. If the reserve fails, we can safely
61 * back out at this point as there is nothing to undo.
62 */
63 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
64 if (ret)
65 return ret;
66 if (WARN_ON(iomap.offset > pos))
67 return -EIO;
0c6dda7a
DW
68 if (WARN_ON(iomap.length == 0))
69 return -EIO;
ae259a9c
CH
70
71 /*
72 * Cut down the length to the one actually provided by the filesystem,
73 * as it might not be able to give us the whole size that we requested.
74 */
75 if (iomap.offset + iomap.length < pos + length)
76 length = iomap.offset + iomap.length - pos;
77
78 /*
79 * Now that we have guaranteed that the space allocation will succeed.
80 * we can do the copy-in page by page without having to worry about
81 * failures exposing transient data.
82 */
83 written = actor(inode, pos, length, data, &iomap);
84
85 /*
86 * Now the data has been copied, commit the range we've copied. This
87 * should not fail unless the filesystem has had a fatal error.
88 */
f20ac7ab
CH
89 if (ops->iomap_end) {
90 ret = ops->iomap_end(inode, pos, length,
91 written > 0 ? written : 0,
92 flags, &iomap);
93 }
ae259a9c
CH
94
95 return written ? written : ret;
96}
97
98static void
99iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
100{
101 loff_t i_size = i_size_read(inode);
102
103 /*
104 * Only truncate newly allocated pages beyoned EOF, even if the
105 * write started inside the existing inode size.
106 */
107 if (pos + len > i_size)
108 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
109}
110
111static int
112iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
113 struct page **pagep, struct iomap *iomap)
114{
115 pgoff_t index = pos >> PAGE_SHIFT;
116 struct page *page;
117 int status = 0;
118
119 BUG_ON(pos + len > iomap->offset + iomap->length);
120
d1908f52
MH
121 if (fatal_signal_pending(current))
122 return -EINTR;
123
ae259a9c
CH
124 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
125 if (!page)
126 return -ENOMEM;
127
128 status = __block_write_begin_int(page, pos, len, NULL, iomap);
129 if (unlikely(status)) {
130 unlock_page(page);
131 put_page(page);
132 page = NULL;
133
134 iomap_write_failed(inode, pos, len);
135 }
136
137 *pagep = page;
138 return status;
139}
140
141static int
142iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
143 unsigned copied, struct page *page)
144{
145 int ret;
146
147 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
148 copied, page, NULL);
149 if (ret < len)
150 iomap_write_failed(inode, pos, len);
151 return ret;
152}
153
154static loff_t
155iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
156 struct iomap *iomap)
157{
158 struct iov_iter *i = data;
159 long status = 0;
160 ssize_t written = 0;
161 unsigned int flags = AOP_FLAG_NOFS;
162
ae259a9c
CH
163 do {
164 struct page *page;
165 unsigned long offset; /* Offset into pagecache page */
166 unsigned long bytes; /* Bytes to write to page */
167 size_t copied; /* Bytes copied from user */
168
169 offset = (pos & (PAGE_SIZE - 1));
170 bytes = min_t(unsigned long, PAGE_SIZE - offset,
171 iov_iter_count(i));
172again:
173 if (bytes > length)
174 bytes = length;
175
176 /*
177 * Bring in the user page that we will copy from _first_.
178 * Otherwise there's a nasty deadlock on copying from the
179 * same page as we're writing to, without it being marked
180 * up-to-date.
181 *
182 * Not only is this an optimisation, but it is also required
183 * to check that the address is actually valid, when atomic
184 * usercopies are used, below.
185 */
186 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
187 status = -EFAULT;
188 break;
189 }
190
191 status = iomap_write_begin(inode, pos, bytes, flags, &page,
192 iomap);
193 if (unlikely(status))
194 break;
195
196 if (mapping_writably_mapped(inode->i_mapping))
197 flush_dcache_page(page);
198
ae259a9c 199 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
ae259a9c
CH
200
201 flush_dcache_page(page);
ae259a9c
CH
202
203 status = iomap_write_end(inode, pos, bytes, copied, page);
204 if (unlikely(status < 0))
205 break;
206 copied = status;
207
208 cond_resched();
209
210 iov_iter_advance(i, copied);
211 if (unlikely(copied == 0)) {
212 /*
213 * If we were unable to copy any data at all, we must
214 * fall back to a single segment length write.
215 *
216 * If we didn't fallback here, we could livelock
217 * because not all segments in the iov can be copied at
218 * once without a pagefault.
219 */
220 bytes = min_t(unsigned long, PAGE_SIZE - offset,
221 iov_iter_single_seg_count(i));
222 goto again;
223 }
224 pos += copied;
225 written += copied;
226 length -= copied;
227
228 balance_dirty_pages_ratelimited(inode->i_mapping);
229 } while (iov_iter_count(i) && length);
230
231 return written ? written : status;
232}
233
234ssize_t
235iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
8ff6daa1 236 const struct iomap_ops *ops)
ae259a9c
CH
237{
238 struct inode *inode = iocb->ki_filp->f_mapping->host;
239 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
240
241 while (iov_iter_count(iter)) {
242 ret = iomap_apply(inode, pos, iov_iter_count(iter),
243 IOMAP_WRITE, ops, iter, iomap_write_actor);
244 if (ret <= 0)
245 break;
246 pos += ret;
247 written += ret;
248 }
249
250 return written ? written : ret;
251}
252EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
253
5f4e5752
CH
254static struct page *
255__iomap_read_page(struct inode *inode, loff_t offset)
256{
257 struct address_space *mapping = inode->i_mapping;
258 struct page *page;
259
260 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
261 if (IS_ERR(page))
262 return page;
263 if (!PageUptodate(page)) {
264 put_page(page);
265 return ERR_PTR(-EIO);
266 }
267 return page;
268}
269
270static loff_t
271iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
272 struct iomap *iomap)
273{
274 long status = 0;
275 ssize_t written = 0;
276
277 do {
278 struct page *page, *rpage;
279 unsigned long offset; /* Offset into pagecache page */
280 unsigned long bytes; /* Bytes to write to page */
281
282 offset = (pos & (PAGE_SIZE - 1));
e28ae8e4 283 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
5f4e5752
CH
284
285 rpage = __iomap_read_page(inode, pos);
286 if (IS_ERR(rpage))
287 return PTR_ERR(rpage);
288
289 status = iomap_write_begin(inode, pos, bytes,
c718a975 290 AOP_FLAG_NOFS, &page, iomap);
5f4e5752
CH
291 put_page(rpage);
292 if (unlikely(status))
293 return status;
294
295 WARN_ON_ONCE(!PageUptodate(page));
296
297 status = iomap_write_end(inode, pos, bytes, bytes, page);
298 if (unlikely(status <= 0)) {
299 if (WARN_ON_ONCE(status == 0))
300 return -EIO;
301 return status;
302 }
303
304 cond_resched();
305
306 pos += status;
307 written += status;
308 length -= status;
309
310 balance_dirty_pages_ratelimited(inode->i_mapping);
311 } while (length);
312
313 return written;
314}
315
316int
317iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
8ff6daa1 318 const struct iomap_ops *ops)
5f4e5752
CH
319{
320 loff_t ret;
321
322 while (len) {
323 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
324 iomap_dirty_actor);
325 if (ret <= 0)
326 return ret;
327 pos += ret;
328 len -= ret;
329 }
330
331 return 0;
332}
333EXPORT_SYMBOL_GPL(iomap_file_dirty);
334
ae259a9c
CH
335static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
336 unsigned bytes, struct iomap *iomap)
337{
338 struct page *page;
339 int status;
340
c718a975
TH
341 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
342 iomap);
ae259a9c
CH
343 if (status)
344 return status;
345
346 zero_user(page, offset, bytes);
347 mark_page_accessed(page);
348
349 return iomap_write_end(inode, pos, bytes, bytes, page);
350}
351
9a286f0e
CH
352static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
353 struct iomap *iomap)
354{
19fe5f64
AG
355 sector_t sector = (iomap->addr +
356 (pos & PAGE_MASK) - iomap->offset) >> 9;
9a286f0e 357
cccbce67
DW
358 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector,
359 offset, bytes);
9a286f0e
CH
360}
361
ae259a9c
CH
362static loff_t
363iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
364 void *data, struct iomap *iomap)
365{
366 bool *did_zero = data;
367 loff_t written = 0;
368 int status;
369
370 /* already zeroed? we're done. */
371 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
372 return count;
373
374 do {
375 unsigned offset, bytes;
376
377 offset = pos & (PAGE_SIZE - 1); /* Within page */
e28ae8e4 378 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
ae259a9c 379
9a286f0e
CH
380 if (IS_DAX(inode))
381 status = iomap_dax_zero(pos, offset, bytes, iomap);
382 else
383 status = iomap_zero(inode, pos, offset, bytes, iomap);
ae259a9c
CH
384 if (status < 0)
385 return status;
386
387 pos += bytes;
388 count -= bytes;
389 written += bytes;
390 if (did_zero)
391 *did_zero = true;
392 } while (count > 0);
393
394 return written;
395}
396
397int
398iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
8ff6daa1 399 const struct iomap_ops *ops)
ae259a9c
CH
400{
401 loff_t ret;
402
403 while (len > 0) {
404 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
405 ops, did_zero, iomap_zero_range_actor);
406 if (ret <= 0)
407 return ret;
408
409 pos += ret;
410 len -= ret;
411 }
412
413 return 0;
414}
415EXPORT_SYMBOL_GPL(iomap_zero_range);
416
417int
418iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
8ff6daa1 419 const struct iomap_ops *ops)
ae259a9c 420{
93407472
FF
421 unsigned int blocksize = i_blocksize(inode);
422 unsigned int off = pos & (blocksize - 1);
ae259a9c
CH
423
424 /* Block boundary? Nothing to do */
425 if (!off)
426 return 0;
427 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
428}
429EXPORT_SYMBOL_GPL(iomap_truncate_page);
430
431static loff_t
432iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
433 void *data, struct iomap *iomap)
434{
435 struct page *page = data;
436 int ret;
437
c663e29f 438 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
ae259a9c
CH
439 if (ret)
440 return ret;
441
442 block_commit_write(page, 0, length);
443 return length;
444}
445
11bac800 446int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
ae259a9c
CH
447{
448 struct page *page = vmf->page;
11bac800 449 struct inode *inode = file_inode(vmf->vma->vm_file);
ae259a9c
CH
450 unsigned long length;
451 loff_t offset, size;
452 ssize_t ret;
453
454 lock_page(page);
455 size = i_size_read(inode);
456 if ((page->mapping != inode->i_mapping) ||
457 (page_offset(page) > size)) {
458 /* We overload EFAULT to mean page got truncated */
459 ret = -EFAULT;
460 goto out_unlock;
461 }
462
463 /* page is wholly or partially inside EOF */
464 if (((page->index + 1) << PAGE_SHIFT) > size)
465 length = size & ~PAGE_MASK;
466 else
467 length = PAGE_SIZE;
468
469 offset = page_offset(page);
470 while (length > 0) {
9484ab1b
JK
471 ret = iomap_apply(inode, offset, length,
472 IOMAP_WRITE | IOMAP_FAULT, ops, page,
473 iomap_page_mkwrite_actor);
ae259a9c
CH
474 if (unlikely(ret <= 0))
475 goto out_unlock;
476 offset += ret;
477 length -= ret;
478 }
479
480 set_page_dirty(page);
481 wait_for_stable_page(page);
e7647fb4 482 return VM_FAULT_LOCKED;
ae259a9c
CH
483out_unlock:
484 unlock_page(page);
e7647fb4 485 return block_page_mkwrite_return(ret);
ae259a9c
CH
486}
487EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
8be9f564
CH
488
489struct fiemap_ctx {
490 struct fiemap_extent_info *fi;
491 struct iomap prev;
492};
493
494static int iomap_to_fiemap(struct fiemap_extent_info *fi,
495 struct iomap *iomap, u32 flags)
496{
497 switch (iomap->type) {
498 case IOMAP_HOLE:
499 /* skip holes */
500 return 0;
501 case IOMAP_DELALLOC:
502 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
503 break;
504 case IOMAP_UNWRITTEN:
505 flags |= FIEMAP_EXTENT_UNWRITTEN;
506 break;
507 case IOMAP_MAPPED:
508 break;
509 }
510
17de0a9f
CH
511 if (iomap->flags & IOMAP_F_MERGED)
512 flags |= FIEMAP_EXTENT_MERGED;
e43c460d
DW
513 if (iomap->flags & IOMAP_F_SHARED)
514 flags |= FIEMAP_EXTENT_SHARED;
9ca250a5
AG
515 if (iomap->flags & IOMAP_F_DATA_INLINE)
516 flags |= FIEMAP_EXTENT_DATA_INLINE;
17de0a9f 517
8be9f564 518 return fiemap_fill_next_extent(fi, iomap->offset,
19fe5f64 519 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
17de0a9f 520 iomap->length, flags);
8be9f564
CH
521}
522
523static loff_t
524iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
525 struct iomap *iomap)
526{
527 struct fiemap_ctx *ctx = data;
528 loff_t ret = length;
529
530 if (iomap->type == IOMAP_HOLE)
531 return length;
532
533 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
534 ctx->prev = *iomap;
535 switch (ret) {
536 case 0: /* success */
537 return length;
538 case 1: /* extent array full */
539 return 0;
540 default:
541 return ret;
542 }
543}
544
545int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
8ff6daa1 546 loff_t start, loff_t len, const struct iomap_ops *ops)
8be9f564
CH
547{
548 struct fiemap_ctx ctx;
549 loff_t ret;
550
551 memset(&ctx, 0, sizeof(ctx));
552 ctx.fi = fi;
553 ctx.prev.type = IOMAP_HOLE;
554
555 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
556 if (ret)
557 return ret;
558
8896b8f6
DC
559 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
560 ret = filemap_write_and_wait(inode->i_mapping);
561 if (ret)
562 return ret;
563 }
8be9f564
CH
564
565 while (len > 0) {
d33fd776 566 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
8be9f564 567 iomap_fiemap_actor);
ac2dc058
DC
568 /* inode with no (attribute) mapping will give ENOENT */
569 if (ret == -ENOENT)
570 break;
8be9f564
CH
571 if (ret < 0)
572 return ret;
573 if (ret == 0)
574 break;
575
576 start += ret;
577 len -= ret;
578 }
579
580 if (ctx.prev.type != IOMAP_HOLE) {
581 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
582 if (ret < 0)
583 return ret;
584 }
585
586 return 0;
587}
588EXPORT_SYMBOL_GPL(iomap_fiemap);
ff6a9292 589
0ed3b0d4
AG
590static loff_t
591iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
592 void *data, struct iomap *iomap)
593{
594 switch (iomap->type) {
595 case IOMAP_UNWRITTEN:
596 offset = page_cache_seek_hole_data(inode, offset, length,
597 SEEK_HOLE);
598 if (offset < 0)
599 return length;
600 /* fall through */
601 case IOMAP_HOLE:
602 *(loff_t *)data = offset;
603 return 0;
604 default:
605 return length;
606 }
607}
608
609loff_t
610iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
611{
612 loff_t size = i_size_read(inode);
613 loff_t length = size - offset;
614 loff_t ret;
615
d6ab17f2
DW
616 /* Nothing to be found before or beyond the end of the file. */
617 if (offset < 0 || offset >= size)
0ed3b0d4
AG
618 return -ENXIO;
619
620 while (length > 0) {
621 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
622 &offset, iomap_seek_hole_actor);
623 if (ret < 0)
624 return ret;
625 if (ret == 0)
626 break;
627
628 offset += ret;
629 length -= ret;
630 }
631
632 return offset;
633}
634EXPORT_SYMBOL_GPL(iomap_seek_hole);
635
636static loff_t
637iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
638 void *data, struct iomap *iomap)
639{
640 switch (iomap->type) {
641 case IOMAP_HOLE:
642 return length;
643 case IOMAP_UNWRITTEN:
644 offset = page_cache_seek_hole_data(inode, offset, length,
645 SEEK_DATA);
646 if (offset < 0)
647 return length;
648 /*FALLTHRU*/
649 default:
650 *(loff_t *)data = offset;
651 return 0;
652 }
653}
654
655loff_t
656iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
657{
658 loff_t size = i_size_read(inode);
659 loff_t length = size - offset;
660 loff_t ret;
661
d6ab17f2
DW
662 /* Nothing to be found before or beyond the end of the file. */
663 if (offset < 0 || offset >= size)
0ed3b0d4
AG
664 return -ENXIO;
665
666 while (length > 0) {
667 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
668 &offset, iomap_seek_data_actor);
669 if (ret < 0)
670 return ret;
671 if (ret == 0)
672 break;
673
674 offset += ret;
675 length -= ret;
676 }
677
678 if (length <= 0)
679 return -ENXIO;
680 return offset;
681}
682EXPORT_SYMBOL_GPL(iomap_seek_data);
683
ff6a9292
CH
684/*
685 * Private flags for iomap_dio, must not overlap with the public ones in
686 * iomap.h:
687 */
3460cac1 688#define IOMAP_DIO_WRITE_FUA (1 << 28)
4f8ff44b 689#define IOMAP_DIO_NEED_SYNC (1 << 29)
ff6a9292
CH
690#define IOMAP_DIO_WRITE (1 << 30)
691#define IOMAP_DIO_DIRTY (1 << 31)
692
693struct iomap_dio {
694 struct kiocb *iocb;
695 iomap_dio_end_io_t *end_io;
696 loff_t i_size;
697 loff_t size;
698 atomic_t ref;
699 unsigned flags;
700 int error;
701
702 union {
703 /* used during submission and for synchronous completion: */
704 struct {
705 struct iov_iter *iter;
706 struct task_struct *waiter;
707 struct request_queue *last_queue;
708 blk_qc_t cookie;
709 } submit;
710
711 /* used for aio completion: */
712 struct {
713 struct work_struct work;
714 } aio;
715 };
716};
717
718static ssize_t iomap_dio_complete(struct iomap_dio *dio)
719{
720 struct kiocb *iocb = dio->iocb;
332391a9 721 struct inode *inode = file_inode(iocb->ki_filp);
5e25c269 722 loff_t offset = iocb->ki_pos;
ff6a9292
CH
723 ssize_t ret;
724
725 if (dio->end_io) {
726 ret = dio->end_io(iocb,
727 dio->error ? dio->error : dio->size,
728 dio->flags);
729 } else {
730 ret = dio->error;
731 }
732
733 if (likely(!ret)) {
734 ret = dio->size;
735 /* check for short read */
5e25c269 736 if (offset + ret > dio->i_size &&
ff6a9292 737 !(dio->flags & IOMAP_DIO_WRITE))
5e25c269 738 ret = dio->i_size - offset;
ff6a9292
CH
739 iocb->ki_pos += ret;
740 }
741
5e25c269
EG
742 /*
743 * Try again to invalidate clean pages which might have been cached by
744 * non-direct readahead, or faulted in by get_user_pages() if the source
745 * of the write was an mmap'ed region of the file we're writing. Either
746 * one is a pretty crazy thing to do, so we don't support it 100%. If
747 * this invalidation fails, tough, the write still worked...
748 *
749 * And this page cache invalidation has to be after dio->end_io(), as
750 * some filesystems convert unwritten extents to real allocations in
751 * end_io() when necessary, otherwise a racing buffer read would cache
752 * zeros from unwritten extents.
753 */
754 if (!dio->error &&
755 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
756 int err;
757 err = invalidate_inode_pages2_range(inode->i_mapping,
758 offset >> PAGE_SHIFT,
759 (offset + dio->size - 1) >> PAGE_SHIFT);
5a9d929d
DW
760 if (err)
761 dio_warn_stale_pagecache(iocb->ki_filp);
5e25c269
EG
762 }
763
4f8ff44b
DC
764 /*
765 * If this is a DSYNC write, make sure we push it to stable storage now
766 * that we've written data.
767 */
768 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
769 ret = generic_write_sync(iocb, ret);
770
ff6a9292
CH
771 inode_dio_end(file_inode(iocb->ki_filp));
772 kfree(dio);
773
774 return ret;
775}
776
777static void iomap_dio_complete_work(struct work_struct *work)
778{
779 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
780 struct kiocb *iocb = dio->iocb;
ff6a9292 781
4f8ff44b 782 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
ff6a9292
CH
783}
784
785/*
786 * Set an error in the dio if none is set yet. We have to use cmpxchg
787 * as the submission context and the completion context(s) can race to
788 * update the error.
789 */
790static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
791{
792 cmpxchg(&dio->error, 0, ret);
793}
794
795static void iomap_dio_bio_end_io(struct bio *bio)
796{
797 struct iomap_dio *dio = bio->bi_private;
798 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
799
4e4cbee9
CH
800 if (bio->bi_status)
801 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
ff6a9292
CH
802
803 if (atomic_dec_and_test(&dio->ref)) {
804 if (is_sync_kiocb(dio->iocb)) {
805 struct task_struct *waiter = dio->submit.waiter;
806
807 WRITE_ONCE(dio->submit.waiter, NULL);
808 wake_up_process(waiter);
809 } else if (dio->flags & IOMAP_DIO_WRITE) {
810 struct inode *inode = file_inode(dio->iocb->ki_filp);
811
812 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
813 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
814 } else {
815 iomap_dio_complete_work(&dio->aio.work);
816 }
817 }
818
819 if (should_dirty) {
820 bio_check_pages_dirty(bio);
821 } else {
822 struct bio_vec *bvec;
823 int i;
824
825 bio_for_each_segment_all(bvec, bio, i)
826 put_page(bvec->bv_page);
827 bio_put(bio);
828 }
829}
830
831static blk_qc_t
832iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
833 unsigned len)
834{
835 struct page *page = ZERO_PAGE(0);
836 struct bio *bio;
837
838 bio = bio_alloc(GFP_KERNEL, 1);
74d46992 839 bio_set_dev(bio, iomap->bdev);
ff6a9292 840 bio->bi_iter.bi_sector =
19fe5f64 841 (iomap->addr + pos - iomap->offset) >> 9;
ff6a9292
CH
842 bio->bi_private = dio;
843 bio->bi_end_io = iomap_dio_bio_end_io;
844
845 get_page(page);
846 if (bio_add_page(bio, page, len, 0) != len)
847 BUG();
5cc60aee 848 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
ff6a9292
CH
849
850 atomic_inc(&dio->ref);
851 return submit_bio(bio);
852}
853
854static loff_t
855iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
856 void *data, struct iomap *iomap)
857{
858 struct iomap_dio *dio = data;
93407472
FF
859 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
860 unsigned int fs_block_size = i_blocksize(inode), pad;
861 unsigned int align = iov_iter_alignment(dio->submit.iter);
ff6a9292
CH
862 struct iov_iter iter;
863 struct bio *bio;
864 bool need_zeroout = false;
3460cac1 865 bool use_fua = false;
ff6a9292 866 int nr_pages, ret;
cfe057f7 867 size_t copied = 0;
ff6a9292
CH
868
869 if ((pos | length | align) & ((1 << blkbits) - 1))
870 return -EINVAL;
871
872 switch (iomap->type) {
873 case IOMAP_HOLE:
874 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
875 return -EIO;
876 /*FALLTHRU*/
877 case IOMAP_UNWRITTEN:
878 if (!(dio->flags & IOMAP_DIO_WRITE)) {
cfe057f7 879 length = iov_iter_zero(length, dio->submit.iter);
ff6a9292
CH
880 dio->size += length;
881 return length;
882 }
883 dio->flags |= IOMAP_DIO_UNWRITTEN;
884 need_zeroout = true;
885 break;
886 case IOMAP_MAPPED:
887 if (iomap->flags & IOMAP_F_SHARED)
888 dio->flags |= IOMAP_DIO_COW;
3460cac1 889 if (iomap->flags & IOMAP_F_NEW) {
ff6a9292 890 need_zeroout = true;
3460cac1
DC
891 } else {
892 /*
893 * Use a FUA write if we need datasync semantics, this
894 * is a pure data IO that doesn't require any metadata
895 * updates and the underlying device supports FUA. This
896 * allows us to avoid cache flushes on IO completion.
897 */
898 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
899 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
900 blk_queue_fua(bdev_get_queue(iomap->bdev)))
901 use_fua = true;
902 }
ff6a9292
CH
903 break;
904 default:
905 WARN_ON_ONCE(1);
906 return -EIO;
907 }
908
909 /*
910 * Operate on a partial iter trimmed to the extent we were called for.
911 * We'll update the iter in the dio once we're done with this extent.
912 */
913 iter = *dio->submit.iter;
914 iov_iter_truncate(&iter, length);
915
916 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
917 if (nr_pages <= 0)
918 return nr_pages;
919
920 if (need_zeroout) {
921 /* zero out from the start of the block to the write offset */
922 pad = pos & (fs_block_size - 1);
923 if (pad)
924 iomap_dio_zero(dio, iomap, pos - pad, pad);
925 }
926
927 do {
cfe057f7
AV
928 size_t n;
929 if (dio->error) {
930 iov_iter_revert(dio->submit.iter, copied);
ff6a9292 931 return 0;
cfe057f7 932 }
ff6a9292
CH
933
934 bio = bio_alloc(GFP_KERNEL, nr_pages);
74d46992 935 bio_set_dev(bio, iomap->bdev);
ff6a9292 936 bio->bi_iter.bi_sector =
19fe5f64 937 (iomap->addr + pos - iomap->offset) >> 9;
45d06cf7 938 bio->bi_write_hint = dio->iocb->ki_hint;
ff6a9292
CH
939 bio->bi_private = dio;
940 bio->bi_end_io = iomap_dio_bio_end_io;
941
942 ret = bio_iov_iter_get_pages(bio, &iter);
943 if (unlikely(ret)) {
944 bio_put(bio);
cfe057f7 945 return copied ? copied : ret;
ff6a9292
CH
946 }
947
cfe057f7 948 n = bio->bi_iter.bi_size;
ff6a9292 949 if (dio->flags & IOMAP_DIO_WRITE) {
3460cac1
DC
950 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
951 if (use_fua)
952 bio->bi_opf |= REQ_FUA;
953 else
954 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
cfe057f7 955 task_io_account_write(n);
ff6a9292 956 } else {
3460cac1 957 bio->bi_opf = REQ_OP_READ;
ff6a9292
CH
958 if (dio->flags & IOMAP_DIO_DIRTY)
959 bio_set_pages_dirty(bio);
960 }
961
cfe057f7
AV
962 iov_iter_advance(dio->submit.iter, n);
963
964 dio->size += n;
965 pos += n;
966 copied += n;
ff6a9292
CH
967
968 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
969
970 atomic_inc(&dio->ref);
971
972 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
973 dio->submit.cookie = submit_bio(bio);
974 } while (nr_pages);
975
976 if (need_zeroout) {
977 /* zero out from the end of the write to the end of the block */
978 pad = pos & (fs_block_size - 1);
979 if (pad)
980 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
981 }
cfe057f7 982 return copied;
ff6a9292
CH
983}
984
4f8ff44b
DC
985/*
986 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
3460cac1
DC
987 * is being issued as AIO or not. This allows us to optimise pure data writes
988 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
989 * REQ_FLUSH post write. This is slightly tricky because a single request here
990 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
991 * may be pure data writes. In that case, we still need to do a full data sync
992 * completion.
4f8ff44b 993 */
ff6a9292 994ssize_t
8ff6daa1
CH
995iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
996 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
ff6a9292
CH
997{
998 struct address_space *mapping = iocb->ki_filp->f_mapping;
999 struct inode *inode = file_inode(iocb->ki_filp);
1000 size_t count = iov_iter_count(iter);
c771c14b
EG
1001 loff_t pos = iocb->ki_pos, start = pos;
1002 loff_t end = iocb->ki_pos + count - 1, ret = 0;
ff6a9292
CH
1003 unsigned int flags = IOMAP_DIRECT;
1004 struct blk_plug plug;
1005 struct iomap_dio *dio;
1006
1007 lockdep_assert_held(&inode->i_rwsem);
1008
1009 if (!count)
1010 return 0;
1011
1012 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1013 if (!dio)
1014 return -ENOMEM;
1015
1016 dio->iocb = iocb;
1017 atomic_set(&dio->ref, 1);
1018 dio->size = 0;
1019 dio->i_size = i_size_read(inode);
1020 dio->end_io = end_io;
1021 dio->error = 0;
1022 dio->flags = 0;
1023
1024 dio->submit.iter = iter;
1025 if (is_sync_kiocb(iocb)) {
1026 dio->submit.waiter = current;
1027 dio->submit.cookie = BLK_QC_T_NONE;
1028 dio->submit.last_queue = NULL;
1029 }
1030
1031 if (iov_iter_rw(iter) == READ) {
1032 if (pos >= dio->i_size)
1033 goto out_free_dio;
1034
1035 if (iter->type == ITER_IOVEC)
1036 dio->flags |= IOMAP_DIO_DIRTY;
1037 } else {
3460cac1 1038 flags |= IOMAP_WRITE;
ff6a9292 1039 dio->flags |= IOMAP_DIO_WRITE;
3460cac1
DC
1040
1041 /* for data sync or sync, we need sync completion processing */
4f8ff44b
DC
1042 if (iocb->ki_flags & IOCB_DSYNC)
1043 dio->flags |= IOMAP_DIO_NEED_SYNC;
3460cac1
DC
1044
1045 /*
1046 * For datasync only writes, we optimistically try using FUA for
1047 * this IO. Any non-FUA write that occurs will clear this flag,
1048 * hence we know before completion whether a cache flush is
1049 * necessary.
1050 */
1051 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1052 dio->flags |= IOMAP_DIO_WRITE_FUA;
ff6a9292
CH
1053 }
1054
a38d1243
GR
1055 if (iocb->ki_flags & IOCB_NOWAIT) {
1056 if (filemap_range_has_page(mapping, start, end)) {
1057 ret = -EAGAIN;
1058 goto out_free_dio;
1059 }
1060 flags |= IOMAP_NOWAIT;
1061 }
1062
55635ba7
AR
1063 ret = filemap_write_and_wait_range(mapping, start, end);
1064 if (ret)
1065 goto out_free_dio;
ff6a9292 1066
5a9d929d
DW
1067 /*
1068 * Try to invalidate cache pages for the range we're direct
1069 * writing. If this invalidation fails, tough, the write will
1070 * still work, but racing two incompatible write paths is a
1071 * pretty crazy thing to do, so we don't support it 100%.
1072 */
55635ba7
AR
1073 ret = invalidate_inode_pages2_range(mapping,
1074 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
5a9d929d
DW
1075 if (ret)
1076 dio_warn_stale_pagecache(iocb->ki_filp);
55635ba7 1077 ret = 0;
ff6a9292 1078
546e7be8
CR
1079 if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
1080 !inode->i_sb->s_dio_done_wq) {
1081 ret = sb_init_dio_done_wq(inode->i_sb);
1082 if (ret < 0)
1083 goto out_free_dio;
1084 }
1085
ff6a9292
CH
1086 inode_dio_begin(inode);
1087
1088 blk_start_plug(&plug);
1089 do {
1090 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1091 iomap_dio_actor);
1092 if (ret <= 0) {
1093 /* magic error code to fall back to buffered I/O */
1094 if (ret == -ENOTBLK)
1095 ret = 0;
1096 break;
1097 }
1098 pos += ret;
a008c31c
CR
1099
1100 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1101 break;
ff6a9292
CH
1102 } while ((count = iov_iter_count(iter)) > 0);
1103 blk_finish_plug(&plug);
1104
1105 if (ret < 0)
1106 iomap_dio_set_error(dio, ret);
1107
3460cac1
DC
1108 /*
1109 * If all the writes we issued were FUA, we don't need to flush the
1110 * cache on IO completion. Clear the sync flag for this case.
1111 */
1112 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1113 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1114
ff6a9292
CH
1115 if (!atomic_dec_and_test(&dio->ref)) {
1116 if (!is_sync_kiocb(iocb))
1117 return -EIOCBQUEUED;
1118
1119 for (;;) {
1120 set_current_state(TASK_UNINTERRUPTIBLE);
1121 if (!READ_ONCE(dio->submit.waiter))
1122 break;
1123
1124 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1125 !dio->submit.last_queue ||
ea435e1b 1126 !blk_poll(dio->submit.last_queue,
5cc60aee 1127 dio->submit.cookie))
ff6a9292
CH
1128 io_schedule();
1129 }
1130 __set_current_state(TASK_RUNNING);
1131 }
1132
c771c14b
EG
1133 ret = iomap_dio_complete(dio);
1134
c771c14b 1135 return ret;
ff6a9292
CH
1136
1137out_free_dio:
1138 kfree(dio);
1139 return ret;
1140}
1141EXPORT_SYMBOL_GPL(iomap_dio_rw);