}
EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
-static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
+void iomap_start_folio_write(struct inode *inode, struct folio *folio,
+ size_t len)
+{
+ struct iomap_folio_state *ifs = folio->private;
+
+ WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
+ if (ifs)
+ atomic_add(len, &ifs->write_bytes_pending);
+}
+EXPORT_SYMBOL_GPL(iomap_start_folio_write);
+
+void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
size_t len)
{
struct iomap_folio_state *ifs = folio->private;
if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
folio_end_writeback(folio);
}
+EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
/*
* We're now finished for good with this ioend structure. Update the page
loff_t pos, loff_t end_pos, unsigned int dirty_len)
{
struct iomap_ioend *ioend = wpc->wb_ctx;
- struct iomap_folio_state *ifs = folio->private;
size_t poff = offset_in_folio(folio, pos);
unsigned int ioend_flags = 0;
unsigned int map_len = min_t(u64, dirty_len,
if (!bio_add_folio(&ioend->io_bio, folio, map_len, poff))
goto new_ioend;
- if (ifs)
- atomic_add(map_len, &ifs->write_bytes_pending);
+ iomap_start_folio_write(wpc->inode, folio, map_len);
/*
* Clamp io_offset and io_size to the incore EOF so that ondisk
* all blocks.
*/
WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
- atomic_inc(&ifs->write_bytes_pending);
+ iomap_start_folio_write(inode, folio, 1);
}
/*
loff_t pos, loff_t end_pos, unsigned int dirty_len);
int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error);
+void iomap_start_folio_write(struct inode *inode, struct folio *folio,
+ size_t len);
+void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
+ size_t len);
+
int iomap_writepages(struct iomap_writepage_ctx *wpc);
/*