1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2019 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/list_sort.h>
16 #include <linux/swap.h>
17 #include <linux/bio.h>
18 #include <linux/sched/signal.h>
19 #include <linux/migrate.h>
22 #include "../internal.h"
24 #define IOEND_BATCH_SIZE 4096
27 * Structure allocated for each folio when block size < folio size
28 * to track sub-folio uptodate status and I/O completions.
31 atomic_t read_bytes_pending
;
32 atomic_t write_bytes_pending
;
33 spinlock_t uptodate_lock
;
34 unsigned long uptodate
[];
37 static inline struct iomap_page
*to_iomap_page(struct folio
*folio
)
39 if (folio_test_private(folio
))
40 return folio_get_private(folio
);
44 static struct bio_set iomap_ioend_bioset
;
46 static struct iomap_page
*
47 iomap_page_create(struct inode
*inode
, struct folio
*folio
, unsigned int flags
)
49 struct iomap_page
*iop
= to_iomap_page(folio
);
50 unsigned int nr_blocks
= i_blocks_per_folio(inode
, folio
);
53 if (iop
|| nr_blocks
<= 1)
56 if (flags
& IOMAP_NOWAIT
)
59 gfp
= GFP_NOFS
| __GFP_NOFAIL
;
61 iop
= kzalloc(struct_size(iop
, uptodate
, BITS_TO_LONGS(nr_blocks
)),
64 spin_lock_init(&iop
->uptodate_lock
);
65 if (folio_test_uptodate(folio
))
66 bitmap_fill(iop
->uptodate
, nr_blocks
);
67 folio_attach_private(folio
, iop
);
72 static void iomap_page_release(struct folio
*folio
)
74 struct iomap_page
*iop
= folio_detach_private(folio
);
75 struct inode
*inode
= folio
->mapping
->host
;
76 unsigned int nr_blocks
= i_blocks_per_folio(inode
, folio
);
80 WARN_ON_ONCE(atomic_read(&iop
->read_bytes_pending
));
81 WARN_ON_ONCE(atomic_read(&iop
->write_bytes_pending
));
82 WARN_ON_ONCE(bitmap_full(iop
->uptodate
, nr_blocks
) !=
83 folio_test_uptodate(folio
));
88 * Calculate the range inside the folio that we actually need to read.
90 static void iomap_adjust_read_range(struct inode
*inode
, struct folio
*folio
,
91 loff_t
*pos
, loff_t length
, size_t *offp
, size_t *lenp
)
93 struct iomap_page
*iop
= to_iomap_page(folio
);
94 loff_t orig_pos
= *pos
;
95 loff_t isize
= i_size_read(inode
);
96 unsigned block_bits
= inode
->i_blkbits
;
97 unsigned block_size
= (1 << block_bits
);
98 size_t poff
= offset_in_folio(folio
, *pos
);
99 size_t plen
= min_t(loff_t
, folio_size(folio
) - poff
, length
);
100 unsigned first
= poff
>> block_bits
;
101 unsigned last
= (poff
+ plen
- 1) >> block_bits
;
104 * If the block size is smaller than the page size, we need to check the
105 * per-block uptodate status and adjust the offset and length if needed
106 * to avoid reading in already uptodate ranges.
111 /* move forward for each leading block marked uptodate */
112 for (i
= first
; i
<= last
; i
++) {
113 if (!test_bit(i
, iop
->uptodate
))
121 /* truncate len if we find any trailing uptodate block(s) */
122 for ( ; i
<= last
; i
++) {
123 if (test_bit(i
, iop
->uptodate
)) {
124 plen
-= (last
- i
+ 1) * block_size
;
132 * If the extent spans the block that contains the i_size, we need to
133 * handle both halves separately so that we properly zero data in the
134 * page cache for blocks that are entirely outside of i_size.
136 if (orig_pos
<= isize
&& orig_pos
+ length
> isize
) {
137 unsigned end
= offset_in_folio(folio
, isize
- 1) >> block_bits
;
139 if (first
<= end
&& last
> end
)
140 plen
-= (last
- end
) * block_size
;
147 static void iomap_iop_set_range_uptodate(struct folio
*folio
,
148 struct iomap_page
*iop
, size_t off
, size_t len
)
150 struct inode
*inode
= folio
->mapping
->host
;
151 unsigned first
= off
>> inode
->i_blkbits
;
152 unsigned last
= (off
+ len
- 1) >> inode
->i_blkbits
;
155 spin_lock_irqsave(&iop
->uptodate_lock
, flags
);
156 bitmap_set(iop
->uptodate
, first
, last
- first
+ 1);
157 if (bitmap_full(iop
->uptodate
, i_blocks_per_folio(inode
, folio
)))
158 folio_mark_uptodate(folio
);
159 spin_unlock_irqrestore(&iop
->uptodate_lock
, flags
);
162 static void iomap_set_range_uptodate(struct folio
*folio
,
163 struct iomap_page
*iop
, size_t off
, size_t len
)
166 iomap_iop_set_range_uptodate(folio
, iop
, off
, len
);
168 folio_mark_uptodate(folio
);
171 static void iomap_finish_folio_read(struct folio
*folio
, size_t offset
,
172 size_t len
, int error
)
174 struct iomap_page
*iop
= to_iomap_page(folio
);
176 if (unlikely(error
)) {
177 folio_clear_uptodate(folio
);
178 folio_set_error(folio
);
180 iomap_set_range_uptodate(folio
, iop
, offset
, len
);
183 if (!iop
|| atomic_sub_and_test(len
, &iop
->read_bytes_pending
))
187 static void iomap_read_end_io(struct bio
*bio
)
189 int error
= blk_status_to_errno(bio
->bi_status
);
190 struct folio_iter fi
;
192 bio_for_each_folio_all(fi
, bio
)
193 iomap_finish_folio_read(fi
.folio
, fi
.offset
, fi
.length
, error
);
197 struct iomap_readpage_ctx
{
198 struct folio
*cur_folio
;
199 bool cur_folio_in_bio
;
201 struct readahead_control
*rac
;
205 * iomap_read_inline_data - copy inline data into the page cache
206 * @iter: iteration structure
207 * @folio: folio to copy to
209 * Copy the inline data in @iter into @folio and zero out the rest of the folio.
210 * Only a single IOMAP_INLINE extent is allowed at the end of each file.
211 * Returns zero for success to complete the read, or the usual negative errno.
213 static int iomap_read_inline_data(const struct iomap_iter
*iter
,
216 struct iomap_page
*iop
;
217 const struct iomap
*iomap
= iomap_iter_srcmap(iter
);
218 size_t size
= i_size_read(iter
->inode
) - iomap
->offset
;
219 size_t poff
= offset_in_page(iomap
->offset
);
220 size_t offset
= offset_in_folio(folio
, iomap
->offset
);
223 if (folio_test_uptodate(folio
))
226 if (WARN_ON_ONCE(size
> PAGE_SIZE
- poff
))
228 if (WARN_ON_ONCE(size
> PAGE_SIZE
-
229 offset_in_page(iomap
->inline_data
)))
231 if (WARN_ON_ONCE(size
> iomap
->length
))
234 iop
= iomap_page_create(iter
->inode
, folio
, iter
->flags
);
236 iop
= to_iomap_page(folio
);
238 addr
= kmap_local_folio(folio
, offset
);
239 memcpy(addr
, iomap
->inline_data
, size
);
240 memset(addr
+ size
, 0, PAGE_SIZE
- poff
- size
);
242 iomap_set_range_uptodate(folio
, iop
, offset
, PAGE_SIZE
- poff
);
246 static inline bool iomap_block_needs_zeroing(const struct iomap_iter
*iter
,
249 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
251 return srcmap
->type
!= IOMAP_MAPPED
||
252 (srcmap
->flags
& IOMAP_F_NEW
) ||
253 pos
>= i_size_read(iter
->inode
);
256 static loff_t
iomap_readpage_iter(const struct iomap_iter
*iter
,
257 struct iomap_readpage_ctx
*ctx
, loff_t offset
)
259 const struct iomap
*iomap
= &iter
->iomap
;
260 loff_t pos
= iter
->pos
+ offset
;
261 loff_t length
= iomap_length(iter
) - offset
;
262 struct folio
*folio
= ctx
->cur_folio
;
263 struct iomap_page
*iop
;
264 loff_t orig_pos
= pos
;
268 if (iomap
->type
== IOMAP_INLINE
)
269 return iomap_read_inline_data(iter
, folio
);
271 /* zero post-eof blocks as the page may be mapped */
272 iop
= iomap_page_create(iter
->inode
, folio
, iter
->flags
);
273 iomap_adjust_read_range(iter
->inode
, folio
, &pos
, length
, &poff
, &plen
);
277 if (iomap_block_needs_zeroing(iter
, pos
)) {
278 folio_zero_range(folio
, poff
, plen
);
279 iomap_set_range_uptodate(folio
, iop
, poff
, plen
);
283 ctx
->cur_folio_in_bio
= true;
285 atomic_add(plen
, &iop
->read_bytes_pending
);
287 sector
= iomap_sector(iomap
, pos
);
289 bio_end_sector(ctx
->bio
) != sector
||
290 !bio_add_folio(ctx
->bio
, folio
, plen
, poff
)) {
291 gfp_t gfp
= mapping_gfp_constraint(folio
->mapping
, GFP_KERNEL
);
292 gfp_t orig_gfp
= gfp
;
293 unsigned int nr_vecs
= DIV_ROUND_UP(length
, PAGE_SIZE
);
296 submit_bio(ctx
->bio
);
298 if (ctx
->rac
) /* same as readahead_gfp_mask */
299 gfp
|= __GFP_NORETRY
| __GFP_NOWARN
;
300 ctx
->bio
= bio_alloc(iomap
->bdev
, bio_max_segs(nr_vecs
),
303 * If the bio_alloc fails, try it again for a single page to
304 * avoid having to deal with partial page reads. This emulates
305 * what do_mpage_read_folio does.
308 ctx
->bio
= bio_alloc(iomap
->bdev
, 1, REQ_OP_READ
,
312 ctx
->bio
->bi_opf
|= REQ_RAHEAD
;
313 ctx
->bio
->bi_iter
.bi_sector
= sector
;
314 ctx
->bio
->bi_end_io
= iomap_read_end_io
;
315 bio_add_folio(ctx
->bio
, folio
, plen
, poff
);
320 * Move the caller beyond our range so that it keeps making progress.
321 * For that, we have to include any leading non-uptodate ranges, but
322 * we can skip trailing ones as they will be handled in the next
325 return pos
- orig_pos
+ plen
;
328 int iomap_read_folio(struct folio
*folio
, const struct iomap_ops
*ops
)
330 struct iomap_iter iter
= {
331 .inode
= folio
->mapping
->host
,
332 .pos
= folio_pos(folio
),
333 .len
= folio_size(folio
),
335 struct iomap_readpage_ctx ctx
= {
340 trace_iomap_readpage(iter
.inode
, 1);
342 while ((ret
= iomap_iter(&iter
, ops
)) > 0)
343 iter
.processed
= iomap_readpage_iter(&iter
, &ctx
, 0);
346 folio_set_error(folio
);
350 WARN_ON_ONCE(!ctx
.cur_folio_in_bio
);
352 WARN_ON_ONCE(ctx
.cur_folio_in_bio
);
357 * Just like mpage_readahead and block_read_full_folio, we always
358 * return 0 and just set the folio error flag on errors. This
359 * should be cleaned up throughout the stack eventually.
363 EXPORT_SYMBOL_GPL(iomap_read_folio
);
365 static loff_t
iomap_readahead_iter(const struct iomap_iter
*iter
,
366 struct iomap_readpage_ctx
*ctx
)
368 loff_t length
= iomap_length(iter
);
371 for (done
= 0; done
< length
; done
+= ret
) {
372 if (ctx
->cur_folio
&&
373 offset_in_folio(ctx
->cur_folio
, iter
->pos
+ done
) == 0) {
374 if (!ctx
->cur_folio_in_bio
)
375 folio_unlock(ctx
->cur_folio
);
376 ctx
->cur_folio
= NULL
;
378 if (!ctx
->cur_folio
) {
379 ctx
->cur_folio
= readahead_folio(ctx
->rac
);
380 ctx
->cur_folio_in_bio
= false;
382 ret
= iomap_readpage_iter(iter
, ctx
, done
);
391 * iomap_readahead - Attempt to read pages from a file.
392 * @rac: Describes the pages to be read.
393 * @ops: The operations vector for the filesystem.
395 * This function is for filesystems to call to implement their readahead
396 * address_space operation.
398 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
399 * blocks from disc), and may wait for it. The caller may be trying to
400 * access a different page, and so sleeping excessively should be avoided.
401 * It may allocate memory, but should avoid costly allocations. This
402 * function is called with memalloc_nofs set, so allocations will not cause
403 * the filesystem to be reentered.
405 void iomap_readahead(struct readahead_control
*rac
, const struct iomap_ops
*ops
)
407 struct iomap_iter iter
= {
408 .inode
= rac
->mapping
->host
,
409 .pos
= readahead_pos(rac
),
410 .len
= readahead_length(rac
),
412 struct iomap_readpage_ctx ctx
= {
416 trace_iomap_readahead(rac
->mapping
->host
, readahead_count(rac
));
418 while (iomap_iter(&iter
, ops
) > 0)
419 iter
.processed
= iomap_readahead_iter(&iter
, &ctx
);
424 if (!ctx
.cur_folio_in_bio
)
425 folio_unlock(ctx
.cur_folio
);
428 EXPORT_SYMBOL_GPL(iomap_readahead
);
431 * iomap_is_partially_uptodate checks whether blocks within a folio are
434 * Returns true if all blocks which correspond to the specified part
435 * of the folio are uptodate.
437 bool iomap_is_partially_uptodate(struct folio
*folio
, size_t from
, size_t count
)
439 struct iomap_page
*iop
= to_iomap_page(folio
);
440 struct inode
*inode
= folio
->mapping
->host
;
441 unsigned first
, last
, i
;
446 /* Caller's range may extend past the end of this folio */
447 count
= min(folio_size(folio
) - from
, count
);
449 /* First and last blocks in range within folio */
450 first
= from
>> inode
->i_blkbits
;
451 last
= (from
+ count
- 1) >> inode
->i_blkbits
;
453 for (i
= first
; i
<= last
; i
++)
454 if (!test_bit(i
, iop
->uptodate
))
458 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate
);
461 * iomap_get_folio - get a folio reference for writing
462 * @iter: iteration structure
463 * @pos: start offset of write
465 * Returns a locked reference to the folio at @pos, or an error pointer if the
466 * folio could not be obtained.
468 struct folio
*iomap_get_folio(struct iomap_iter
*iter
, loff_t pos
)
470 unsigned fgp
= FGP_LOCK
| FGP_WRITE
| FGP_CREAT
| FGP_STABLE
| FGP_NOFS
;
473 if (iter
->flags
& IOMAP_NOWAIT
)
476 folio
= __filemap_get_folio(iter
->inode
->i_mapping
, pos
>> PAGE_SHIFT
,
477 fgp
, mapping_gfp_mask(iter
->inode
->i_mapping
));
481 if (iter
->flags
& IOMAP_NOWAIT
)
482 return ERR_PTR(-EAGAIN
);
483 return ERR_PTR(-ENOMEM
);
485 EXPORT_SYMBOL_GPL(iomap_get_folio
);
487 bool iomap_release_folio(struct folio
*folio
, gfp_t gfp_flags
)
489 trace_iomap_release_folio(folio
->mapping
->host
, folio_pos(folio
),
493 * mm accommodates an old ext3 case where clean folios might
494 * not have had the dirty bit cleared. Thus, it can send actual
495 * dirty folios to ->release_folio() via shrink_active_list();
498 if (folio_test_dirty(folio
) || folio_test_writeback(folio
))
500 iomap_page_release(folio
);
503 EXPORT_SYMBOL_GPL(iomap_release_folio
);
505 void iomap_invalidate_folio(struct folio
*folio
, size_t offset
, size_t len
)
507 trace_iomap_invalidate_folio(folio
->mapping
->host
,
508 folio_pos(folio
) + offset
, len
);
511 * If we're invalidating the entire folio, clear the dirty state
512 * from it and release it to avoid unnecessary buildup of the LRU.
514 if (offset
== 0 && len
== folio_size(folio
)) {
515 WARN_ON_ONCE(folio_test_writeback(folio
));
516 folio_cancel_dirty(folio
);
517 iomap_page_release(folio
);
518 } else if (folio_test_large(folio
)) {
519 /* Must release the iop so the page can be split */
520 WARN_ON_ONCE(!folio_test_uptodate(folio
) &&
521 folio_test_dirty(folio
));
522 iomap_page_release(folio
);
525 EXPORT_SYMBOL_GPL(iomap_invalidate_folio
);
528 iomap_write_failed(struct inode
*inode
, loff_t pos
, unsigned len
)
530 loff_t i_size
= i_size_read(inode
);
533 * Only truncate newly allocated pages beyoned EOF, even if the
534 * write started inside the existing inode size.
536 if (pos
+ len
> i_size
)
537 truncate_pagecache_range(inode
, max(pos
, i_size
),
541 static int iomap_read_folio_sync(loff_t block_start
, struct folio
*folio
,
542 size_t poff
, size_t plen
, const struct iomap
*iomap
)
547 bio_init(&bio
, iomap
->bdev
, &bvec
, 1, REQ_OP_READ
);
548 bio
.bi_iter
.bi_sector
= iomap_sector(iomap
, block_start
);
549 bio_add_folio(&bio
, folio
, plen
, poff
);
550 return submit_bio_wait(&bio
);
553 static int __iomap_write_begin(const struct iomap_iter
*iter
, loff_t pos
,
554 size_t len
, struct folio
*folio
)
556 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
557 struct iomap_page
*iop
;
558 loff_t block_size
= i_blocksize(iter
->inode
);
559 loff_t block_start
= round_down(pos
, block_size
);
560 loff_t block_end
= round_up(pos
+ len
, block_size
);
561 unsigned int nr_blocks
= i_blocks_per_folio(iter
->inode
, folio
);
562 size_t from
= offset_in_folio(folio
, pos
), to
= from
+ len
;
565 if (folio_test_uptodate(folio
))
567 folio_clear_error(folio
);
569 iop
= iomap_page_create(iter
->inode
, folio
, iter
->flags
);
570 if ((iter
->flags
& IOMAP_NOWAIT
) && !iop
&& nr_blocks
> 1)
574 iomap_adjust_read_range(iter
->inode
, folio
, &block_start
,
575 block_end
- block_start
, &poff
, &plen
);
579 if (!(iter
->flags
& IOMAP_UNSHARE
) &&
580 (from
<= poff
|| from
>= poff
+ plen
) &&
581 (to
<= poff
|| to
>= poff
+ plen
))
584 if (iomap_block_needs_zeroing(iter
, block_start
)) {
585 if (WARN_ON_ONCE(iter
->flags
& IOMAP_UNSHARE
))
587 folio_zero_segments(folio
, poff
, from
, to
, poff
+ plen
);
591 if (iter
->flags
& IOMAP_NOWAIT
)
594 status
= iomap_read_folio_sync(block_start
, folio
,
599 iomap_set_range_uptodate(folio
, iop
, poff
, plen
);
600 } while ((block_start
+= plen
) < block_end
);
605 static struct folio
*__iomap_get_folio(struct iomap_iter
*iter
, loff_t pos
,
608 const struct iomap_folio_ops
*folio_ops
= iter
->iomap
.folio_ops
;
610 if (folio_ops
&& folio_ops
->get_folio
)
611 return folio_ops
->get_folio(iter
, pos
, len
);
613 return iomap_get_folio(iter
, pos
);
616 static void __iomap_put_folio(struct iomap_iter
*iter
, loff_t pos
, size_t ret
,
619 const struct iomap_folio_ops
*folio_ops
= iter
->iomap
.folio_ops
;
621 if (folio_ops
&& folio_ops
->put_folio
) {
622 folio_ops
->put_folio(iter
->inode
, pos
, ret
, folio
);
629 static int iomap_write_begin_inline(const struct iomap_iter
*iter
,
632 /* needs more work for the tailpacking case; disable for now */
633 if (WARN_ON_ONCE(iomap_iter_srcmap(iter
)->offset
!= 0))
635 return iomap_read_inline_data(iter
, folio
);
638 static int iomap_write_begin(struct iomap_iter
*iter
, loff_t pos
,
639 size_t len
, struct folio
**foliop
)
641 const struct iomap_folio_ops
*folio_ops
= iter
->iomap
.folio_ops
;
642 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
646 BUG_ON(pos
+ len
> iter
->iomap
.offset
+ iter
->iomap
.length
);
647 if (srcmap
!= &iter
->iomap
)
648 BUG_ON(pos
+ len
> srcmap
->offset
+ srcmap
->length
);
650 if (fatal_signal_pending(current
))
653 if (!mapping_large_folio_support(iter
->inode
->i_mapping
))
654 len
= min_t(size_t, len
, PAGE_SIZE
- offset_in_page(pos
));
656 folio
= __iomap_get_folio(iter
, pos
, len
);
658 return PTR_ERR(folio
);
661 * Now we have a locked folio, before we do anything with it we need to
662 * check that the iomap we have cached is not stale. The inode extent
663 * mapping can change due to concurrent IO in flight (e.g.
664 * IOMAP_UNWRITTEN state can change and memory reclaim could have
665 * reclaimed a previously partially written page at this index after IO
666 * completion before this write reaches this file offset) and hence we
667 * could do the wrong thing here (zero a page range incorrectly or fail
668 * to zero) and corrupt data.
670 if (folio_ops
&& folio_ops
->iomap_valid
) {
671 bool iomap_valid
= folio_ops
->iomap_valid(iter
->inode
,
674 iter
->iomap
.flags
|= IOMAP_F_STALE
;
680 if (pos
+ len
> folio_pos(folio
) + folio_size(folio
))
681 len
= folio_pos(folio
) + folio_size(folio
) - pos
;
683 if (srcmap
->type
== IOMAP_INLINE
)
684 status
= iomap_write_begin_inline(iter
, folio
);
685 else if (srcmap
->flags
& IOMAP_F_BUFFER_HEAD
)
686 status
= __block_write_begin_int(folio
, pos
, len
, NULL
, srcmap
);
688 status
= __iomap_write_begin(iter
, pos
, len
, folio
);
690 if (unlikely(status
))
697 __iomap_put_folio(iter
, pos
, 0, folio
);
698 iomap_write_failed(iter
->inode
, pos
, len
);
703 static size_t __iomap_write_end(struct inode
*inode
, loff_t pos
, size_t len
,
704 size_t copied
, struct folio
*folio
)
706 struct iomap_page
*iop
= to_iomap_page(folio
);
707 flush_dcache_folio(folio
);
710 * The blocks that were entirely written will now be uptodate, so we
711 * don't have to worry about a read_folio reading them and overwriting a
712 * partial write. However, if we've encountered a short write and only
713 * partially written into a block, it will not be marked uptodate, so a
714 * read_folio might come in and destroy our partial write.
716 * Do the simplest thing and just treat any short write to a
717 * non-uptodate page as a zero-length write, and force the caller to
718 * redo the whole thing.
720 if (unlikely(copied
< len
&& !folio_test_uptodate(folio
)))
722 iomap_set_range_uptodate(folio
, iop
, offset_in_folio(folio
, pos
), len
);
723 filemap_dirty_folio(inode
->i_mapping
, folio
);
727 static size_t iomap_write_end_inline(const struct iomap_iter
*iter
,
728 struct folio
*folio
, loff_t pos
, size_t copied
)
730 const struct iomap
*iomap
= &iter
->iomap
;
733 WARN_ON_ONCE(!folio_test_uptodate(folio
));
734 BUG_ON(!iomap_inline_data_valid(iomap
));
736 flush_dcache_folio(folio
);
737 addr
= kmap_local_folio(folio
, pos
);
738 memcpy(iomap_inline_data(iomap
, pos
), addr
, copied
);
741 mark_inode_dirty(iter
->inode
);
745 /* Returns the number of bytes copied. May be 0. Cannot be an errno. */
746 static size_t iomap_write_end(struct iomap_iter
*iter
, loff_t pos
, size_t len
,
747 size_t copied
, struct folio
*folio
)
749 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
750 loff_t old_size
= iter
->inode
->i_size
;
753 if (srcmap
->type
== IOMAP_INLINE
) {
754 ret
= iomap_write_end_inline(iter
, folio
, pos
, copied
);
755 } else if (srcmap
->flags
& IOMAP_F_BUFFER_HEAD
) {
756 ret
= block_write_end(NULL
, iter
->inode
->i_mapping
, pos
, len
,
757 copied
, &folio
->page
, NULL
);
759 ret
= __iomap_write_end(iter
->inode
, pos
, len
, copied
, folio
);
763 * Update the in-memory inode size after copying the data into the page
764 * cache. It's up to the file system to write the updated size to disk,
765 * preferably after I/O completion so that no stale data is exposed.
767 if (pos
+ ret
> old_size
) {
768 i_size_write(iter
->inode
, pos
+ ret
);
769 iter
->iomap
.flags
|= IOMAP_F_SIZE_CHANGED
;
771 __iomap_put_folio(iter
, pos
, ret
, folio
);
774 pagecache_isize_extended(iter
->inode
, old_size
, pos
);
776 iomap_write_failed(iter
->inode
, pos
+ ret
, len
- ret
);
780 static loff_t
iomap_write_iter(struct iomap_iter
*iter
, struct iov_iter
*i
)
782 loff_t length
= iomap_length(iter
);
783 loff_t pos
= iter
->pos
;
786 struct address_space
*mapping
= iter
->inode
->i_mapping
;
787 unsigned int bdp_flags
= (iter
->flags
& IOMAP_NOWAIT
) ? BDP_ASYNC
: 0;
792 unsigned long offset
; /* Offset into pagecache page */
793 unsigned long bytes
; /* Bytes to write to page */
794 size_t copied
; /* Bytes copied from user */
796 offset
= offset_in_page(pos
);
797 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
,
800 status
= balance_dirty_pages_ratelimited_flags(mapping
,
802 if (unlikely(status
))
809 * Bring in the user page that we'll copy from _first_.
810 * Otherwise there's a nasty deadlock on copying from the
811 * same page as we're writing to, without it being marked
814 * For async buffered writes the assumption is that the user
815 * page has already been faulted in. This can be optimized by
816 * faulting the user page.
818 if (unlikely(fault_in_iov_iter_readable(i
, bytes
) == bytes
)) {
823 status
= iomap_write_begin(iter
, pos
, bytes
, &folio
);
824 if (unlikely(status
))
826 if (iter
->iomap
.flags
& IOMAP_F_STALE
)
829 page
= folio_file_page(folio
, pos
>> PAGE_SHIFT
);
830 if (mapping_writably_mapped(mapping
))
831 flush_dcache_page(page
);
833 copied
= copy_page_from_iter_atomic(page
, offset
, bytes
, i
);
835 status
= iomap_write_end(iter
, pos
, bytes
, copied
, folio
);
837 if (unlikely(copied
!= status
))
838 iov_iter_revert(i
, copied
- status
);
841 if (unlikely(status
== 0)) {
843 * A short copy made iomap_write_end() reject the
844 * thing entirely. Might be memory poisoning
845 * halfway through, might be a race with munmap,
846 * might be severe memory pressure.
855 } while (iov_iter_count(i
) && length
);
857 if (status
== -EAGAIN
) {
858 iov_iter_revert(i
, written
);
861 return written
? written
: status
;
865 iomap_file_buffered_write(struct kiocb
*iocb
, struct iov_iter
*i
,
866 const struct iomap_ops
*ops
)
868 struct iomap_iter iter
= {
869 .inode
= iocb
->ki_filp
->f_mapping
->host
,
871 .len
= iov_iter_count(i
),
872 .flags
= IOMAP_WRITE
,
876 if (iocb
->ki_flags
& IOCB_NOWAIT
)
877 iter
.flags
|= IOMAP_NOWAIT
;
879 while ((ret
= iomap_iter(&iter
, ops
)) > 0)
880 iter
.processed
= iomap_write_iter(&iter
, i
);
881 if (iter
.pos
== iocb
->ki_pos
)
883 return iter
.pos
- iocb
->ki_pos
;
885 EXPORT_SYMBOL_GPL(iomap_file_buffered_write
);
888 * Scan the data range passed to us for dirty page cache folios. If we find a
889 * dirty folio, punch out the preceeding range and update the offset from which
890 * the next punch will start from.
892 * We can punch out storage reservations under clean pages because they either
893 * contain data that has been written back - in which case the delalloc punch
894 * over that range is a no-op - or they have been read faults in which case they
895 * contain zeroes and we can remove the delalloc backing range and any new
896 * writes to those pages will do the normal hole filling operation...
898 * This makes the logic simple: we only need to keep the delalloc extents only
899 * over the dirty ranges of the page cache.
901 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
902 * simplify range iterations.
904 static int iomap_write_delalloc_scan(struct inode
*inode
,
905 loff_t
*punch_start_byte
, loff_t start_byte
, loff_t end_byte
,
906 int (*punch
)(struct inode
*inode
, loff_t offset
, loff_t length
))
908 while (start_byte
< end_byte
) {
911 /* grab locked page */
912 folio
= filemap_lock_folio(inode
->i_mapping
,
913 start_byte
>> PAGE_SHIFT
);
915 start_byte
= ALIGN_DOWN(start_byte
, PAGE_SIZE
) +
920 /* if dirty, punch up to offset */
921 if (folio_test_dirty(folio
)) {
922 if (start_byte
> *punch_start_byte
) {
925 error
= punch(inode
, *punch_start_byte
,
926 start_byte
- *punch_start_byte
);
935 * Make sure the next punch start is correctly bound to
936 * the end of this data range, not the end of the folio.
938 *punch_start_byte
= min_t(loff_t
, end_byte
,
939 folio_next_index(folio
) << PAGE_SHIFT
);
942 /* move offset to start of next folio in range */
943 start_byte
= folio_next_index(folio
) << PAGE_SHIFT
;
951 * Punch out all the delalloc blocks in the range given except for those that
952 * have dirty data still pending in the page cache - those are going to be
953 * written and so must still retain the delalloc backing for writeback.
955 * As we are scanning the page cache for data, we don't need to reimplement the
956 * wheel - mapping_seek_hole_data() does exactly what we need to identify the
957 * start and end of data ranges correctly even for sub-folio block sizes. This
958 * byte range based iteration is especially convenient because it means we
959 * don't have to care about variable size folios, nor where the start or end of
960 * the data range lies within a folio, if they lie within the same folio or even
961 * if there are multiple discontiguous data ranges within the folio.
963 * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
964 * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
965 * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
966 * date. A write page fault can then mark it dirty. If we then fail a write()
967 * beyond EOF into that up to date cached range, we allocate a delalloc block
968 * beyond EOF and then have to punch it out. Because the range is up to date,
969 * mapping_seek_hole_data() will return it, and we will skip the punch because
970 * the folio is dirty. THis is incorrect - we always need to punch out delalloc
971 * beyond EOF in this case as writeback will never write back and covert that
972 * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
973 * resulting in always punching out the range from the EOF to the end of the
974 * range the iomap spans.
976 * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
977 * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
978 * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
979 * returns the end of the data range (data_end). Using closed intervals would
980 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
981 * the code to subtle off-by-one bugs....
983 static int iomap_write_delalloc_release(struct inode
*inode
,
984 loff_t start_byte
, loff_t end_byte
,
985 int (*punch
)(struct inode
*inode
, loff_t pos
, loff_t length
))
987 loff_t punch_start_byte
= start_byte
;
988 loff_t scan_end_byte
= min(i_size_read(inode
), end_byte
);
992 * Lock the mapping to avoid races with page faults re-instantiating
993 * folios and dirtying them via ->page_mkwrite whilst we walk the
994 * cache and perform delalloc extent removal. Failing to do this can
995 * leave dirty pages with no space reservation in the cache.
997 filemap_invalidate_lock(inode
->i_mapping
);
998 while (start_byte
< scan_end_byte
) {
1001 start_byte
= mapping_seek_hole_data(inode
->i_mapping
,
1002 start_byte
, scan_end_byte
, SEEK_DATA
);
1004 * If there is no more data to scan, all that is left is to
1005 * punch out the remaining range.
1007 if (start_byte
== -ENXIO
|| start_byte
== scan_end_byte
)
1009 if (start_byte
< 0) {
1013 WARN_ON_ONCE(start_byte
< punch_start_byte
);
1014 WARN_ON_ONCE(start_byte
> scan_end_byte
);
1017 * We find the end of this contiguous cached data range by
1018 * seeking from start_byte to the beginning of the next hole.
1020 data_end
= mapping_seek_hole_data(inode
->i_mapping
, start_byte
,
1021 scan_end_byte
, SEEK_HOLE
);
1026 WARN_ON_ONCE(data_end
<= start_byte
);
1027 WARN_ON_ONCE(data_end
> scan_end_byte
);
1029 error
= iomap_write_delalloc_scan(inode
, &punch_start_byte
,
1030 start_byte
, data_end
, punch
);
1034 /* The next data search starts at the end of this one. */
1035 start_byte
= data_end
;
1038 if (punch_start_byte
< end_byte
)
1039 error
= punch(inode
, punch_start_byte
,
1040 end_byte
- punch_start_byte
);
1042 filemap_invalidate_unlock(inode
->i_mapping
);
1047 * When a short write occurs, the filesystem may need to remove reserved space
1048 * that was allocated in ->iomap_begin from it's ->iomap_end method. For
1049 * filesystems that use delayed allocation, we need to punch out delalloc
1050 * extents from the range that are not dirty in the page cache. As the write can
1051 * race with page faults, there can be dirty pages over the delalloc extent
1052 * outside the range of a short write but still within the delalloc extent
1053 * allocated for this iomap.
1055 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1056 * simplify range iterations.
1058 * The punch() callback *must* only punch delalloc extents in the range passed
1059 * to it. It must skip over all other types of extents in the range and leave
1060 * them completely unchanged. It must do this punch atomically with respect to
1061 * other extent modifications.
1063 * The punch() callback may be called with a folio locked to prevent writeback
1064 * extent allocation racing at the edge of the range we are currently punching.
1065 * The locked folio may or may not cover the range being punched, so it is not
1066 * safe for the punch() callback to lock folios itself.
1070 * inode->i_rwsem (shared or exclusive)
1071 * inode->i_mapping->invalidate_lock (exclusive)
1074 * internal filesystem allocation lock
1076 int iomap_file_buffered_write_punch_delalloc(struct inode
*inode
,
1077 struct iomap
*iomap
, loff_t pos
, loff_t length
,
1079 int (*punch
)(struct inode
*inode
, loff_t pos
, loff_t length
))
1083 int blocksize
= i_blocksize(inode
);
1085 if (iomap
->type
!= IOMAP_DELALLOC
)
1088 /* If we didn't reserve the blocks, we're not allowed to punch them. */
1089 if (!(iomap
->flags
& IOMAP_F_NEW
))
1093 * start_byte refers to the first unused block after a short write. If
1094 * nothing was written, round offset down to point at the first block in
1097 if (unlikely(!written
))
1098 start_byte
= round_down(pos
, blocksize
);
1100 start_byte
= round_up(pos
+ written
, blocksize
);
1101 end_byte
= round_up(pos
+ length
, blocksize
);
1103 /* Nothing to do if we've written the entire delalloc extent */
1104 if (start_byte
>= end_byte
)
1107 return iomap_write_delalloc_release(inode
, start_byte
, end_byte
,
1110 EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc
);
1112 static loff_t
iomap_unshare_iter(struct iomap_iter
*iter
)
1114 struct iomap
*iomap
= &iter
->iomap
;
1115 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
1116 loff_t pos
= iter
->pos
;
1117 loff_t length
= iomap_length(iter
);
1121 /* don't bother with blocks that are not shared to start with */
1122 if (!(iomap
->flags
& IOMAP_F_SHARED
))
1124 /* don't bother with holes or unwritten extents */
1125 if (srcmap
->type
== IOMAP_HOLE
|| srcmap
->type
== IOMAP_UNWRITTEN
)
1129 unsigned long offset
= offset_in_page(pos
);
1130 unsigned long bytes
= min_t(loff_t
, PAGE_SIZE
- offset
, length
);
1131 struct folio
*folio
;
1133 status
= iomap_write_begin(iter
, pos
, bytes
, &folio
);
1134 if (unlikely(status
))
1136 if (iter
->iomap
.flags
& IOMAP_F_STALE
)
1139 status
= iomap_write_end(iter
, pos
, bytes
, bytes
, folio
);
1140 if (WARN_ON_ONCE(status
== 0))
1149 balance_dirty_pages_ratelimited(iter
->inode
->i_mapping
);
1156 iomap_file_unshare(struct inode
*inode
, loff_t pos
, loff_t len
,
1157 const struct iomap_ops
*ops
)
1159 struct iomap_iter iter
= {
1163 .flags
= IOMAP_WRITE
| IOMAP_UNSHARE
,
1167 while ((ret
= iomap_iter(&iter
, ops
)) > 0)
1168 iter
.processed
= iomap_unshare_iter(&iter
);
1171 EXPORT_SYMBOL_GPL(iomap_file_unshare
);
1173 static loff_t
iomap_zero_iter(struct iomap_iter
*iter
, bool *did_zero
)
1175 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
1176 loff_t pos
= iter
->pos
;
1177 loff_t length
= iomap_length(iter
);
1180 /* already zeroed? we're done. */
1181 if (srcmap
->type
== IOMAP_HOLE
|| srcmap
->type
== IOMAP_UNWRITTEN
)
1185 struct folio
*folio
;
1188 size_t bytes
= min_t(u64
, SIZE_MAX
, length
);
1190 status
= iomap_write_begin(iter
, pos
, bytes
, &folio
);
1193 if (iter
->iomap
.flags
& IOMAP_F_STALE
)
1196 offset
= offset_in_folio(folio
, pos
);
1197 if (bytes
> folio_size(folio
) - offset
)
1198 bytes
= folio_size(folio
) - offset
;
1200 folio_zero_range(folio
, offset
, bytes
);
1201 folio_mark_accessed(folio
);
1203 bytes
= iomap_write_end(iter
, pos
, bytes
, bytes
, folio
);
1204 if (WARN_ON_ONCE(bytes
== 0))
1210 } while (length
> 0);
1218 iomap_zero_range(struct inode
*inode
, loff_t pos
, loff_t len
, bool *did_zero
,
1219 const struct iomap_ops
*ops
)
1221 struct iomap_iter iter
= {
1225 .flags
= IOMAP_ZERO
,
1229 while ((ret
= iomap_iter(&iter
, ops
)) > 0)
1230 iter
.processed
= iomap_zero_iter(&iter
, did_zero
);
1233 EXPORT_SYMBOL_GPL(iomap_zero_range
);
1236 iomap_truncate_page(struct inode
*inode
, loff_t pos
, bool *did_zero
,
1237 const struct iomap_ops
*ops
)
1239 unsigned int blocksize
= i_blocksize(inode
);
1240 unsigned int off
= pos
& (blocksize
- 1);
1242 /* Block boundary? Nothing to do */
1245 return iomap_zero_range(inode
, pos
, blocksize
- off
, did_zero
, ops
);
1247 EXPORT_SYMBOL_GPL(iomap_truncate_page
);
1249 static loff_t
iomap_folio_mkwrite_iter(struct iomap_iter
*iter
,
1250 struct folio
*folio
)
1252 loff_t length
= iomap_length(iter
);
1255 if (iter
->iomap
.flags
& IOMAP_F_BUFFER_HEAD
) {
1256 ret
= __block_write_begin_int(folio
, iter
->pos
, length
, NULL
,
1260 block_commit_write(&folio
->page
, 0, length
);
1262 WARN_ON_ONCE(!folio_test_uptodate(folio
));
1263 folio_mark_dirty(folio
);
1269 vm_fault_t
iomap_page_mkwrite(struct vm_fault
*vmf
, const struct iomap_ops
*ops
)
1271 struct iomap_iter iter
= {
1272 .inode
= file_inode(vmf
->vma
->vm_file
),
1273 .flags
= IOMAP_WRITE
| IOMAP_FAULT
,
1275 struct folio
*folio
= page_folio(vmf
->page
);
1279 ret
= folio_mkwrite_check_truncate(folio
, iter
.inode
);
1282 iter
.pos
= folio_pos(folio
);
1284 while ((ret
= iomap_iter(&iter
, ops
)) > 0)
1285 iter
.processed
= iomap_folio_mkwrite_iter(&iter
, folio
);
1289 folio_wait_stable(folio
);
1290 return VM_FAULT_LOCKED
;
1292 folio_unlock(folio
);
1293 return block_page_mkwrite_return(ret
);
1295 EXPORT_SYMBOL_GPL(iomap_page_mkwrite
);
1297 static void iomap_finish_folio_write(struct inode
*inode
, struct folio
*folio
,
1298 size_t len
, int error
)
1300 struct iomap_page
*iop
= to_iomap_page(folio
);
1303 folio_set_error(folio
);
1304 mapping_set_error(inode
->i_mapping
, error
);
1307 WARN_ON_ONCE(i_blocks_per_folio(inode
, folio
) > 1 && !iop
);
1308 WARN_ON_ONCE(iop
&& atomic_read(&iop
->write_bytes_pending
) <= 0);
1310 if (!iop
|| atomic_sub_and_test(len
, &iop
->write_bytes_pending
))
1311 folio_end_writeback(folio
);
1315 * We're now finished for good with this ioend structure. Update the page
1316 * state, release holds on bios, and finally free up memory. Do not use the
1320 iomap_finish_ioend(struct iomap_ioend
*ioend
, int error
)
1322 struct inode
*inode
= ioend
->io_inode
;
1323 struct bio
*bio
= &ioend
->io_inline_bio
;
1324 struct bio
*last
= ioend
->io_bio
, *next
;
1325 u64 start
= bio
->bi_iter
.bi_sector
;
1326 loff_t offset
= ioend
->io_offset
;
1327 bool quiet
= bio_flagged(bio
, BIO_QUIET
);
1328 u32 folio_count
= 0;
1330 for (bio
= &ioend
->io_inline_bio
; bio
; bio
= next
) {
1331 struct folio_iter fi
;
1334 * For the last bio, bi_private points to the ioend, so we
1335 * need to explicitly end the iteration here.
1340 next
= bio
->bi_private
;
1342 /* walk all folios in bio, ending page IO on them */
1343 bio_for_each_folio_all(fi
, bio
) {
1344 iomap_finish_folio_write(inode
, fi
.folio
, fi
.length
,
1350 /* The ioend has been freed by bio_put() */
1352 if (unlikely(error
&& !quiet
)) {
1353 printk_ratelimited(KERN_ERR
1354 "%s: writeback error on inode %lu, offset %lld, sector %llu",
1355 inode
->i_sb
->s_id
, inode
->i_ino
, offset
, start
);
1361 * Ioend completion routine for merged bios. This can only be called from task
1362 * contexts as merged ioends can be of unbound length. Hence we have to break up
1363 * the writeback completions into manageable chunks to avoid long scheduler
1364 * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1365 * good batch processing throughput without creating adverse scheduler latency
1369 iomap_finish_ioends(struct iomap_ioend
*ioend
, int error
)
1371 struct list_head tmp
;
1376 list_replace_init(&ioend
->io_list
, &tmp
);
1377 completions
= iomap_finish_ioend(ioend
, error
);
1379 while (!list_empty(&tmp
)) {
1380 if (completions
> IOEND_BATCH_SIZE
* 8) {
1384 ioend
= list_first_entry(&tmp
, struct iomap_ioend
, io_list
);
1385 list_del_init(&ioend
->io_list
);
1386 completions
+= iomap_finish_ioend(ioend
, error
);
1389 EXPORT_SYMBOL_GPL(iomap_finish_ioends
);
1392 * We can merge two adjacent ioends if they have the same set of work to do.
1395 iomap_ioend_can_merge(struct iomap_ioend
*ioend
, struct iomap_ioend
*next
)
1397 if (ioend
->io_bio
->bi_status
!= next
->io_bio
->bi_status
)
1399 if ((ioend
->io_flags
& IOMAP_F_SHARED
) ^
1400 (next
->io_flags
& IOMAP_F_SHARED
))
1402 if ((ioend
->io_type
== IOMAP_UNWRITTEN
) ^
1403 (next
->io_type
== IOMAP_UNWRITTEN
))
1405 if (ioend
->io_offset
+ ioend
->io_size
!= next
->io_offset
)
1408 * Do not merge physically discontiguous ioends. The filesystem
1409 * completion functions will have to iterate the physical
1410 * discontiguities even if we merge the ioends at a logical level, so
1411 * we don't gain anything by merging physical discontiguities here.
1413 * We cannot use bio->bi_iter.bi_sector here as it is modified during
1414 * submission so does not point to the start sector of the bio at
1417 if (ioend
->io_sector
+ (ioend
->io_size
>> 9) != next
->io_sector
)
1423 iomap_ioend_try_merge(struct iomap_ioend
*ioend
, struct list_head
*more_ioends
)
1425 struct iomap_ioend
*next
;
1427 INIT_LIST_HEAD(&ioend
->io_list
);
1429 while ((next
= list_first_entry_or_null(more_ioends
, struct iomap_ioend
,
1431 if (!iomap_ioend_can_merge(ioend
, next
))
1433 list_move_tail(&next
->io_list
, &ioend
->io_list
);
1434 ioend
->io_size
+= next
->io_size
;
1437 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge
);
1440 iomap_ioend_compare(void *priv
, const struct list_head
*a
,
1441 const struct list_head
*b
)
1443 struct iomap_ioend
*ia
= container_of(a
, struct iomap_ioend
, io_list
);
1444 struct iomap_ioend
*ib
= container_of(b
, struct iomap_ioend
, io_list
);
1446 if (ia
->io_offset
< ib
->io_offset
)
1448 if (ia
->io_offset
> ib
->io_offset
)
1454 iomap_sort_ioends(struct list_head
*ioend_list
)
1456 list_sort(NULL
, ioend_list
, iomap_ioend_compare
);
1458 EXPORT_SYMBOL_GPL(iomap_sort_ioends
);
1460 static void iomap_writepage_end_bio(struct bio
*bio
)
1462 struct iomap_ioend
*ioend
= bio
->bi_private
;
1464 iomap_finish_ioend(ioend
, blk_status_to_errno(bio
->bi_status
));
1468 * Submit the final bio for an ioend.
1470 * If @error is non-zero, it means that we have a situation where some part of
1471 * the submission process has failed after we've marked pages for writeback
1472 * and unlocked them. In this situation, we need to fail the bio instead of
1473 * submitting it. This typically only happens on a filesystem shutdown.
1476 iomap_submit_ioend(struct iomap_writepage_ctx
*wpc
, struct iomap_ioend
*ioend
,
1479 ioend
->io_bio
->bi_private
= ioend
;
1480 ioend
->io_bio
->bi_end_io
= iomap_writepage_end_bio
;
1482 if (wpc
->ops
->prepare_ioend
)
1483 error
= wpc
->ops
->prepare_ioend(ioend
, error
);
1486 * If we're failing the IO now, just mark the ioend with an
1487 * error and finish it. This will run IO completion immediately
1488 * as there is only one reference to the ioend at this point in
1491 ioend
->io_bio
->bi_status
= errno_to_blk_status(error
);
1492 bio_endio(ioend
->io_bio
);
1496 submit_bio(ioend
->io_bio
);
1500 static struct iomap_ioend
*
1501 iomap_alloc_ioend(struct inode
*inode
, struct iomap_writepage_ctx
*wpc
,
1502 loff_t offset
, sector_t sector
, struct writeback_control
*wbc
)
1504 struct iomap_ioend
*ioend
;
1507 bio
= bio_alloc_bioset(wpc
->iomap
.bdev
, BIO_MAX_VECS
,
1508 REQ_OP_WRITE
| wbc_to_write_flags(wbc
),
1509 GFP_NOFS
, &iomap_ioend_bioset
);
1510 bio
->bi_iter
.bi_sector
= sector
;
1511 wbc_init_bio(wbc
, bio
);
1513 ioend
= container_of(bio
, struct iomap_ioend
, io_inline_bio
);
1514 INIT_LIST_HEAD(&ioend
->io_list
);
1515 ioend
->io_type
= wpc
->iomap
.type
;
1516 ioend
->io_flags
= wpc
->iomap
.flags
;
1517 ioend
->io_inode
= inode
;
1519 ioend
->io_folios
= 0;
1520 ioend
->io_offset
= offset
;
1521 ioend
->io_bio
= bio
;
1522 ioend
->io_sector
= sector
;
1527 * Allocate a new bio, and chain the old bio to the new one.
1529 * Note that we have to perform the chaining in this unintuitive order
1530 * so that the bi_private linkage is set up in the right direction for the
1531 * traversal in iomap_finish_ioend().
1534 iomap_chain_bio(struct bio
*prev
)
1538 new = bio_alloc(prev
->bi_bdev
, BIO_MAX_VECS
, prev
->bi_opf
, GFP_NOFS
);
1539 bio_clone_blkg_association(new, prev
);
1540 new->bi_iter
.bi_sector
= bio_end_sector(prev
);
1542 bio_chain(prev
, new);
1543 bio_get(prev
); /* for iomap_finish_ioend */
1549 iomap_can_add_to_ioend(struct iomap_writepage_ctx
*wpc
, loff_t offset
,
1552 if ((wpc
->iomap
.flags
& IOMAP_F_SHARED
) !=
1553 (wpc
->ioend
->io_flags
& IOMAP_F_SHARED
))
1555 if (wpc
->iomap
.type
!= wpc
->ioend
->io_type
)
1557 if (offset
!= wpc
->ioend
->io_offset
+ wpc
->ioend
->io_size
)
1559 if (sector
!= bio_end_sector(wpc
->ioend
->io_bio
))
1562 * Limit ioend bio chain lengths to minimise IO completion latency. This
1563 * also prevents long tight loops ending page writeback on all the
1564 * folios in the ioend.
1566 if (wpc
->ioend
->io_folios
>= IOEND_BATCH_SIZE
)
1572 * Test to see if we have an existing ioend structure that we could append to
1573 * first; otherwise finish off the current ioend and start another.
1576 iomap_add_to_ioend(struct inode
*inode
, loff_t pos
, struct folio
*folio
,
1577 struct iomap_page
*iop
, struct iomap_writepage_ctx
*wpc
,
1578 struct writeback_control
*wbc
, struct list_head
*iolist
)
1580 sector_t sector
= iomap_sector(&wpc
->iomap
, pos
);
1581 unsigned len
= i_blocksize(inode
);
1582 size_t poff
= offset_in_folio(folio
, pos
);
1584 if (!wpc
->ioend
|| !iomap_can_add_to_ioend(wpc
, pos
, sector
)) {
1586 list_add(&wpc
->ioend
->io_list
, iolist
);
1587 wpc
->ioend
= iomap_alloc_ioend(inode
, wpc
, pos
, sector
, wbc
);
1590 if (!bio_add_folio(wpc
->ioend
->io_bio
, folio
, len
, poff
)) {
1591 wpc
->ioend
->io_bio
= iomap_chain_bio(wpc
->ioend
->io_bio
);
1592 bio_add_folio(wpc
->ioend
->io_bio
, folio
, len
, poff
);
1596 atomic_add(len
, &iop
->write_bytes_pending
);
1597 wpc
->ioend
->io_size
+= len
;
1598 wbc_account_cgroup_owner(wbc
, &folio
->page
, len
);
1602 * We implement an immediate ioend submission policy here to avoid needing to
1603 * chain multiple ioends and hence nest mempool allocations which can violate
1604 * the forward progress guarantees we need to provide. The current ioend we're
1605 * adding blocks to is cached in the writepage context, and if the new block
1606 * doesn't append to the cached ioend, it will create a new ioend and cache that
1609 * If a new ioend is created and cached, the old ioend is returned and queued
1610 * locally for submission once the entire page is processed or an error has been
1611 * detected. While ioends are submitted immediately after they are completed,
1612 * batching optimisations are provided by higher level block plugging.
1614 * At the end of a writeback pass, there will be a cached ioend remaining on the
1615 * writepage context that the caller will need to submit.
1618 iomap_writepage_map(struct iomap_writepage_ctx
*wpc
,
1619 struct writeback_control
*wbc
, struct inode
*inode
,
1620 struct folio
*folio
, u64 end_pos
)
1622 struct iomap_page
*iop
= iomap_page_create(inode
, folio
, 0);
1623 struct iomap_ioend
*ioend
, *next
;
1624 unsigned len
= i_blocksize(inode
);
1625 unsigned nblocks
= i_blocks_per_folio(inode
, folio
);
1626 u64 pos
= folio_pos(folio
);
1627 int error
= 0, count
= 0, i
;
1628 LIST_HEAD(submit_list
);
1630 WARN_ON_ONCE(iop
&& atomic_read(&iop
->write_bytes_pending
) != 0);
1633 * Walk through the folio to find areas to write back. If we
1634 * run off the end of the current map or find the current map
1635 * invalid, grab a new one.
1637 for (i
= 0; i
< nblocks
&& pos
< end_pos
; i
++, pos
+= len
) {
1638 if (iop
&& !test_bit(i
, iop
->uptodate
))
1641 error
= wpc
->ops
->map_blocks(wpc
, inode
, pos
);
1644 trace_iomap_writepage_map(inode
, &wpc
->iomap
);
1645 if (WARN_ON_ONCE(wpc
->iomap
.type
== IOMAP_INLINE
))
1647 if (wpc
->iomap
.type
== IOMAP_HOLE
)
1649 iomap_add_to_ioend(inode
, pos
, folio
, iop
, wpc
, wbc
,
1654 wpc
->ioend
->io_folios
++;
1656 WARN_ON_ONCE(!wpc
->ioend
&& !list_empty(&submit_list
));
1657 WARN_ON_ONCE(!folio_test_locked(folio
));
1658 WARN_ON_ONCE(folio_test_writeback(folio
));
1659 WARN_ON_ONCE(folio_test_dirty(folio
));
1662 * We cannot cancel the ioend directly here on error. We may have
1663 * already set other pages under writeback and hence we have to run I/O
1664 * completion to mark the error state of the pages under writeback
1667 if (unlikely(error
)) {
1669 * Let the filesystem know what portion of the current page
1670 * failed to map. If the page hasn't been added to ioend, it
1671 * won't be affected by I/O completion and we must unlock it
1674 if (wpc
->ops
->discard_folio
)
1675 wpc
->ops
->discard_folio(folio
, pos
);
1677 folio_unlock(folio
);
1682 folio_start_writeback(folio
);
1683 folio_unlock(folio
);
1686 * Preserve the original error if there was one; catch
1687 * submission errors here and propagate into subsequent ioend
1690 list_for_each_entry_safe(ioend
, next
, &submit_list
, io_list
) {
1693 list_del_init(&ioend
->io_list
);
1694 error2
= iomap_submit_ioend(wpc
, ioend
, error
);
1695 if (error2
&& !error
)
1700 * We can end up here with no error and nothing to write only if we race
1701 * with a partial page truncate on a sub-page block sized filesystem.
1704 folio_end_writeback(folio
);
1706 mapping_set_error(inode
->i_mapping
, error
);
1711 * Write out a dirty page.
1713 * For delalloc space on the page, we need to allocate space and flush it.
1714 * For unwritten space on the page, we need to start the conversion to
1715 * regular allocated space.
1717 static int iomap_do_writepage(struct folio
*folio
,
1718 struct writeback_control
*wbc
, void *data
)
1720 struct iomap_writepage_ctx
*wpc
= data
;
1721 struct inode
*inode
= folio
->mapping
->host
;
1724 trace_iomap_writepage(inode
, folio_pos(folio
), folio_size(folio
));
1727 * Refuse to write the folio out if we're called from reclaim context.
1729 * This avoids stack overflows when called from deeply used stacks in
1730 * random callers for direct reclaim or memcg reclaim. We explicitly
1731 * allow reclaim from kswapd as the stack usage there is relatively low.
1733 * This should never happen except in the case of a VM regression so
1736 if (WARN_ON_ONCE((current
->flags
& (PF_MEMALLOC
|PF_KSWAPD
)) ==
1741 * Is this folio beyond the end of the file?
1743 * The folio index is less than the end_index, adjust the end_pos
1744 * to the highest offset that this folio should represent.
1745 * -----------------------------------------------------
1746 * | file mapping | <EOF> |
1747 * -----------------------------------------------------
1748 * | Page ... | Page N-2 | Page N-1 | Page N | |
1749 * ^--------------------------------^----------|--------
1750 * | desired writeback range | see else |
1751 * ---------------------------------^------------------|
1753 isize
= i_size_read(inode
);
1754 end_pos
= folio_pos(folio
) + folio_size(folio
);
1755 if (end_pos
> isize
) {
1757 * Check whether the page to write out is beyond or straddles
1759 * -------------------------------------------------------
1760 * | file mapping | <EOF> |
1761 * -------------------------------------------------------
1762 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1763 * ^--------------------------------^-----------|---------
1765 * ---------------------------------^-----------|--------|
1767 size_t poff
= offset_in_folio(folio
, isize
);
1768 pgoff_t end_index
= isize
>> PAGE_SHIFT
;
1771 * Skip the page if it's fully outside i_size, e.g.
1772 * due to a truncate operation that's in progress. We've
1773 * cleaned this page and truncate will finish things off for
1776 * Note that the end_index is unsigned long. If the given
1777 * offset is greater than 16TB on a 32-bit system then if we
1778 * checked if the page is fully outside i_size with
1779 * "if (page->index >= end_index + 1)", "end_index + 1" would
1780 * overflow and evaluate to 0. Hence this page would be
1781 * redirtied and written out repeatedly, which would result in
1782 * an infinite loop; the user program performing this operation
1783 * would hang. Instead, we can detect this situation by
1784 * checking if the page is totally beyond i_size or if its
1785 * offset is just equal to the EOF.
1787 if (folio
->index
> end_index
||
1788 (folio
->index
== end_index
&& poff
== 0))
1792 * The page straddles i_size. It must be zeroed out on each
1793 * and every writepage invocation because it may be mmapped.
1794 * "A file is mapped in multiples of the page size. For a file
1795 * that is not a multiple of the page size, the remaining
1796 * memory is zeroed when mapped, and writes to that region are
1797 * not written out to the file."
1799 folio_zero_segment(folio
, poff
, folio_size(folio
));
1803 return iomap_writepage_map(wpc
, wbc
, inode
, folio
, end_pos
);
1806 folio_redirty_for_writepage(wbc
, folio
);
1808 folio_unlock(folio
);
1813 iomap_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
,
1814 struct iomap_writepage_ctx
*wpc
,
1815 const struct iomap_writeback_ops
*ops
)
1820 ret
= write_cache_pages(mapping
, wbc
, iomap_do_writepage
, wpc
);
1823 return iomap_submit_ioend(wpc
, wpc
->ioend
, ret
);
1825 EXPORT_SYMBOL_GPL(iomap_writepages
);
1827 static int __init
iomap_init(void)
1829 return bioset_init(&iomap_ioend_bioset
, 4 * (PAGE_SIZE
/ SECTOR_SIZE
),
1830 offsetof(struct iomap_ioend
, io_inline_bio
),
1833 fs_initcall(iomap_init
);