};
struct erofs_iomap_iter_ctx iter_ctx = {};
- trace_erofs_read_folio(folio, true);
-
+ trace_erofs_read_folio(folio_inode(folio), folio, true);
iomap_read_folio(&erofs_iomap_ops, &read_ctx, &iter_ctx);
return 0;
}
struct erofs_iomap_iter_ctx iter_ctx = {};
trace_erofs_readahead(rac->mapping->host, readahead_index(rac),
- readahead_count(rac), true);
-
+ readahead_count(rac), true);
iomap_readahead(&erofs_iomap_ops, &read_ctx, &iter_ctx);
}
struct erofs_fileio io = {};
int err;
- trace_erofs_read_folio(folio, true);
+ trace_erofs_read_folio(folio_inode(folio), folio, true);
err = erofs_fileio_scan_folio(&io, folio);
erofs_fileio_rq_submit(io.rq);
return err;
Z_EROFS_DEFINE_FRONTEND(f, inode, folio_pos(folio));
int err;
- trace_erofs_read_folio(folio, false);
+ trace_erofs_read_folio(inode, folio, false);
z_erofs_pcluster_readmore(&f, NULL, true);
err = z_erofs_scan_folio(&f, folio, false);
z_erofs_pcluster_readmore(&f, NULL, false);
TRACE_EVENT(erofs_read_folio,
- TP_PROTO(struct folio *folio, bool raw),
+ TP_PROTO(struct inode *inode, struct folio *folio, bool raw),
- TP_ARGS(folio, raw),
+ TP_ARGS(inode, folio, raw),
TP_STRUCT__entry(
__field(dev_t, dev )
),
TP_fast_assign(
- __entry->dev = folio->mapping->host->i_sb->s_dev;
- __entry->nid = EROFS_I(folio->mapping->host)->nid;
- __entry->dir = S_ISDIR(folio->mapping->host->i_mode);
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->nid = EROFS_I(inode)->nid;
+ __entry->dir = S_ISDIR(inode->i_mode);
__entry->index = folio->index;
__entry->uptodate = folio_test_uptodate(folio);
__entry->raw = raw;