struct erofs_iomap_iter_ctx {
struct page *page;
void *base;
+ struct inode *realinode;
};
static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
{
struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
struct erofs_iomap_iter_ctx *ctx = iter->private;
- struct super_block *sb = inode->i_sb;
+ struct inode *realinode = ctx ? ctx->realinode : inode;
+ struct super_block *sb = realinode->i_sb;
struct erofs_map_blocks map;
struct erofs_map_dev mdev;
int ret;
map.m_la = offset;
map.m_llen = length;
- ret = erofs_map_blocks(inode, &map);
+ ret = erofs_map_blocks(realinode, &map);
if (ret < 0)
return ret;
return 0;
}
- if (!(map.m_flags & EROFS_MAP_META) || !erofs_inode_in_metabox(inode)) {
+ if (!(map.m_flags & EROFS_MAP_META) || !erofs_inode_in_metabox(realinode)) {
mdev = (struct erofs_map_dev) {
.m_deviceid = map.m_deviceid,
.m_pa = map.m_pa,
void *ptr;
ptr = erofs_read_metabuf(&buf, sb, map.m_pa,
- erofs_inode_in_metabox(inode));
+ erofs_inode_in_metabox(realinode));
if (IS_ERR(ptr))
return PTR_ERR(ptr);
iomap->inline_data = ptr;
.ops = &iomap_bio_read_ops,
.cur_folio = folio,
};
- struct erofs_iomap_iter_ctx iter_ctx = {};
+ bool need_iput;
+ struct erofs_iomap_iter_ctx iter_ctx = {
+ .realinode = erofs_real_inode(folio_inode(folio), &need_iput),
+ };
- trace_erofs_read_folio(folio_inode(folio), folio, true);
+ trace_erofs_read_folio(iter_ctx.realinode, folio, true);
iomap_read_folio(&erofs_iomap_ops, &read_ctx, &iter_ctx);
+ if (need_iput)
+ iput(iter_ctx.realinode);
return 0;
}
.ops = &iomap_bio_read_ops,
.rac = rac,
};
- struct erofs_iomap_iter_ctx iter_ctx = {};
+ bool need_iput;
+ struct erofs_iomap_iter_ctx iter_ctx = {
+ .realinode = erofs_real_inode(rac->mapping->host, &need_iput),
+ };
- trace_erofs_readahead(rac->mapping->host, readahead_index(rac),
+ trace_erofs_readahead(iter_ctx.realinode, readahead_index(rac),
readahead_count(rac), true);
iomap_readahead(&erofs_iomap_ops, &read_ctx, &iter_ctx);
+ if (need_iput)
+ iput(iter_ctx.realinode);
}
static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
#endif
if ((iocb->ki_flags & IOCB_DIRECT) && inode->i_sb->s_bdev) {
- struct erofs_iomap_iter_ctx iter_ctx = {};
+ struct erofs_iomap_iter_ctx iter_ctx = {
+ .realinode = inode,
+ };
return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
NULL, 0, &iter_ctx, 0);
bio));
}
-static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
+static int erofs_fileio_scan_folio(struct erofs_fileio *io,
+ struct inode *inode, struct folio *folio)
{
- struct inode *inode = folio_inode(folio);
struct erofs_map_blocks *map = &io->map;
unsigned int cur = 0, end = folio_size(folio), len, attached = 0;
loff_t pos = folio_pos(folio), ofs;
static int erofs_fileio_read_folio(struct file *file, struct folio *folio)
{
+ bool need_iput;
+ struct inode *realinode = erofs_real_inode(folio_inode(folio), &need_iput);
struct erofs_fileio io = {};
int err;
- trace_erofs_read_folio(folio_inode(folio), folio, true);
- err = erofs_fileio_scan_folio(&io, folio);
+ trace_erofs_read_folio(realinode, folio, true);
+ err = erofs_fileio_scan_folio(&io, realinode, folio);
erofs_fileio_rq_submit(io.rq);
+ if (need_iput)
+ iput(realinode);
return err;
}
static void erofs_fileio_readahead(struct readahead_control *rac)
{
- struct inode *inode = rac->mapping->host;
+ bool need_iput;
+ struct inode *realinode = erofs_real_inode(rac->mapping->host, &need_iput);
struct erofs_fileio io = {};
struct folio *folio;
int err;
- trace_erofs_readahead(inode, readahead_index(rac),
+ trace_erofs_readahead(realinode, readahead_index(rac),
readahead_count(rac), true);
while ((folio = readahead_folio(rac))) {
- err = erofs_fileio_scan_folio(&io, folio);
+ err = erofs_fileio_scan_folio(&io, realinode, folio);
if (err && err != -EINTR)
- erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
- folio->index, EROFS_I(inode)->nid);
+ erofs_err(realinode->i_sb, "readahead error at folio %lu @ nid %llu",
+ folio->index, EROFS_I(realinode)->nid);
}
erofs_fileio_rq_submit(io.rq);
+ if (need_iput)
+ iput(realinode);
}
const struct address_space_operations erofs_fileio_aops = {
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_op = &erofs_generic_iops;
- inode->i_fop = &erofs_file_fops;
+ inode->i_fop = erofs_ishare_fill_inode(inode) ?
+ &erofs_ishare_fops : &erofs_file_fops;
break;
case S_IFDIR:
inode->i_op = &erofs_dir_iops;
void erofs_exit_ishare(void);
bool erofs_ishare_fill_inode(struct inode *inode);
void erofs_ishare_free_inode(struct inode *inode);
+struct inode *erofs_real_inode(struct inode *inode, bool *need_iput);
#else
static inline int erofs_init_ishare(void) { return 0; }
static inline void erofs_exit_ishare(void) {}
static inline bool erofs_ishare_fill_inode(struct inode *inode) { return false; }
static inline void erofs_ishare_free_inode(struct inode *inode) {}
+static inline struct inode *erofs_real_inode(struct inode *inode, bool *need_iput)
+{
+ *need_iput = false;
+ return inode;
+}
#endif
long erofs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
static struct vfsmount *erofs_ishare_mnt;
+static inline bool erofs_is_ishare_inode(struct inode *inode)
+{
+ /* assumed FS_ONDEMAND is excluded with FS_PAGE_CACHE_SHARE feature */
+ return inode->i_sb->s_type == &erofs_anon_fs_type;
+}
+
static int erofs_ishare_iget5_eq(struct inode *inode, void *data)
{
struct erofs_inode_fingerprint *fp1 = &EROFS_I(inode)->fingerprint;
struct inode *sharedinode;
unsigned long hash;
+ if (erofs_inode_is_data_compressed(vi->datalayout))
+ return false;
if (erofs_xattr_fill_inode_fingerprint(&fp, inode, sbi->domain_id))
return false;
hash = xxh32(fp.opaque, fp.size, 0);
.splice_read = filemap_splice_read,
};
+struct inode *erofs_real_inode(struct inode *inode, bool *need_iput)
+{
+ struct erofs_inode *vi, *vi_share;
+ struct inode *realinode;
+
+ *need_iput = false;
+ if (!erofs_is_ishare_inode(inode))
+ return inode;
+
+ vi_share = EROFS_I(inode);
+ spin_lock(&vi_share->ishare_lock);
+ /* fetch any one as real inode */
+ DBG_BUGON(list_empty(&vi_share->ishare_list));
+ list_for_each_entry(vi, &vi_share->ishare_list, ishare_list) {
+ realinode = igrab(&vi->vfs_inode);
+ if (realinode) {
+ *need_iput = true;
+ break;
+ }
+ }
+ spin_unlock(&vi_share->ishare_lock);
+
+ DBG_BUGON(!realinode);
+ return realinode;
+}
+
int __init erofs_init_ishare(void)
{
erofs_ishare_mnt = kern_mount(&erofs_anon_fs_type);