}
static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct inode *inode, loff_t pos,
- u16 ioend_flags)
+ loff_t pos, u16 ioend_flags)
{
struct bio *bio;
bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
- REQ_OP_WRITE | wbc_to_write_flags(wbc),
+ REQ_OP_WRITE | wbc_to_write_flags(wpc->wbc),
GFP_NOFS, &iomap_ioend_bioset);
bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
bio->bi_end_io = iomap_writepage_end_bio;
- bio->bi_write_hint = inode->i_write_hint;
- wbc_init_bio(wbc, bio);
+ bio->bi_write_hint = wpc->inode->i_write_hint;
+ wbc_init_bio(wpc->wbc, bio);
wpc->nr_folios = 0;
- return iomap_init_ioend(inode, bio, pos, ioend_flags);
+ return iomap_init_ioend(wpc->inode, bio, pos, ioend_flags);
}
static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
* writepage context that the caller will need to submit.
*/
static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct folio *folio,
- struct inode *inode, loff_t pos, loff_t end_pos,
- unsigned len)
+ struct folio *folio, loff_t pos, loff_t end_pos, unsigned len)
{
struct iomap_folio_state *ifs = folio->private;
size_t poff = offset_in_folio(folio, pos);
error = iomap_submit_ioend(wpc, 0);
if (error)
return error;
- wpc->ioend = iomap_alloc_ioend(wpc, wbc, inode, pos,
- ioend_flags);
+ wpc->ioend = iomap_alloc_ioend(wpc, pos, ioend_flags);
}
if (!bio_add_folio(&wpc->ioend->io_bio, folio, len, poff))
if (wpc->ioend->io_offset + wpc->ioend->io_size > end_pos)
wpc->ioend->io_size = end_pos - wpc->ioend->io_offset;
- wbc_account_cgroup_owner(wbc, folio, len);
+ wbc_account_cgroup_owner(wpc->wbc, folio, len);
return 0;
}
static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct folio *folio,
- struct inode *inode, u64 pos, u64 end_pos,
- unsigned dirty_len, unsigned *count)
+ struct folio *folio, u64 pos, u64 end_pos, unsigned dirty_len,
+ unsigned *count)
{
int error;
do {
unsigned map_len;
- error = wpc->ops->map_blocks(wpc, inode, pos, dirty_len);
+ error = wpc->ops->map_blocks(wpc, wpc->inode, pos, dirty_len);
if (error)
break;
- trace_iomap_writepage_map(inode, pos, dirty_len, &wpc->iomap);
+ trace_iomap_writepage_map(wpc->inode, pos, dirty_len,
+ &wpc->iomap);
map_len = min_t(u64, dirty_len,
wpc->iomap.offset + wpc->iomap.length - pos);
case IOMAP_HOLE:
break;
default:
- error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos,
- end_pos, map_len);
+ error = iomap_add_to_ioend(wpc, folio, pos, end_pos,
+ map_len);
if (!error)
(*count)++;
break;
}
static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct folio *folio)
+ struct folio *folio)
{
struct iomap_folio_state *ifs = folio->private;
- struct inode *inode = folio->mapping->host;
+ struct inode *inode = wpc->inode;
u64 pos = folio_pos(folio);
u64 end_pos = pos + folio_size(folio);
u64 end_aligned = 0;
*/
end_aligned = round_up(end_pos, i_blocksize(inode));
while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
- error = iomap_writepage_map_blocks(wpc, wbc, folio, inode,
- pos, end_pos, rlen, &count);
+ error = iomap_writepage_map_blocks(wpc, folio, pos, end_pos,
+ rlen, &count);
if (error)
break;
pos += rlen;
}
int
-iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
- struct iomap_writepage_ctx *wpc,
- const struct iomap_writeback_ops *ops)
+iomap_writepages(struct iomap_writepage_ctx *wpc)
{
+ struct address_space *mapping = wpc->inode->i_mapping;
struct folio *folio = NULL;
int error;
PF_MEMALLOC))
return -EIO;
- wpc->ops = ops;
- while ((folio = writeback_iter(mapping, wbc, folio, &error)))
- error = iomap_writepage_map(wpc, wbc, folio);
+ while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error)))
+ error = iomap_writepage_map(wpc, folio);
return iomap_submit_ioend(wpc, error);
}
EXPORT_SYMBOL_GPL(iomap_writepages);