.read = generic_read_dir,
.iterate_shared = udf_readdir,
.unlocked_ioctl = udf_ioctl,
- .fsync = generic_buffers_fsync,
+ .fsync = udf_fsync,
.setlease = generic_setlease,
};
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
mark_inode_dirty(iter->dir);
} else {
- mark_buffer_dirty_inode(iter->bh[0], iter->dir);
+ mmb_mark_buffer_dirty(iter->bh[0], &iinfo->i_metadata_bhs);
if (iter->bh[1])
- mark_buffer_dirty_inode(iter->bh[1], iter->dir);
+ mmb_mark_buffer_dirty(iter->bh[1],
+ &iinfo->i_metadata_bhs);
}
inode_inc_iversion(iter->dir);
}
return 0;
}
+int udf_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ return mmb_fsync(file,
+ &UDF_I(file->f_mapping->host)->i_metadata_bhs,
+ start, end, datasync);
+}
+
const struct file_operations udf_file_operations = {
.read_iter = generic_file_read_iter,
.unlocked_ioctl = udf_ioctl,
.mmap = udf_file_mmap,
.write_iter = udf_file_write_iter,
.release = udf_release_file,
- .fsync = generic_buffers_fsync,
+ .fsync = udf_fsync,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.llseek = generic_file_llseek,
}
truncate_inode_pages_final(&inode->i_data);
if (!want_delete)
- sync_mapping_buffers(&inode->i_data);
- invalidate_inode_buffers(inode);
+ mmb_sync(&iinfo->i_metadata_bhs);
+ mmb_invalidate(&iinfo->i_metadata_bhs);
clear_inode(inode);
kfree(iinfo->i_data);
iinfo->i_data = NULL;
memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
set_buffer_uptodate(bh);
unlock_buffer(bh);
- mark_buffer_dirty_inode(bh, inode);
+ mmb_mark_buffer_dirty(bh, &UDF_I(inode)->i_metadata_bhs);
return bh;
}
memset(bh->b_data, 0x00, sb->s_blocksize);
set_buffer_uptodate(bh);
unlock_buffer(bh);
- mark_buffer_dirty_inode(bh, inode);
+ mmb_mark_buffer_dirty(bh, &UDF_I(inode)->i_metadata_bhs);
aed = (struct allocExtDesc *)(bh->b_data);
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) {
else
udf_update_tag(epos->bh->b_data,
sizeof(struct allocExtDesc));
- mark_buffer_dirty_inode(epos->bh, inode);
+ mmb_mark_buffer_dirty(epos->bh, &iinfo->i_metadata_bhs);
}
return 0;
le32_to_cpu(aed->lengthAllocDescs) +
sizeof(struct allocExtDesc));
}
- mark_buffer_dirty_inode(epos->bh, inode);
+ mmb_mark_buffer_dirty(epos->bh, &iinfo->i_metadata_bhs);
} else {
mark_inode_dirty(inode);
}
else
udf_update_tag(oepos.bh->b_data,
sizeof(struct allocExtDesc));
- mark_buffer_dirty_inode(oepos.bh, inode);
+ mmb_mark_buffer_dirty(oepos.bh, &iinfo->i_metadata_bhs);
}
} else {
udf_write_aext(inode, &oepos, &eloc, elen, 1);
else
udf_update_tag(oepos.bh->b_data,
sizeof(struct allocExtDesc));
- mark_buffer_dirty_inode(oepos.bh, inode);
+ mmb_mark_buffer_dirty(oepos.bh, &iinfo->i_metadata_bhs);
}
}
memset(epos.bh->b_data, 0x00, bsize);
set_buffer_uptodate(epos.bh);
unlock_buffer(epos.bh);
- mark_buffer_dirty_inode(epos.bh, inode);
+ mmb_mark_buffer_dirty(epos.bh, &iinfo->i_metadata_bhs);
ea = epos.bh->b_data + udf_ext0_offset(inode);
} else
ea = iinfo->i_data + iinfo->i_lenEAttr;
ei->cached_extent.lstart = -1;
spin_lock_init(&ei->i_extent_cache_lock);
inode_set_iversion(&ei->vfs_inode, 1);
+ mmb_init(&ei->i_metadata_bhs, &ei->vfs_inode.i_data);
return &ei->vfs_inode;
}
len += lenalloc;
udf_update_tag(epos->bh->b_data, len);
- mark_buffer_dirty_inode(epos->bh, inode);
+ mmb_mark_buffer_dirty(epos->bh, &UDF_I(inode)->i_metadata_bhs);
}
/*
struct kernel_lb_addr i_locStreamdir;
__u64 i_lenStreams;
struct rw_semaphore i_data_sem;
+ struct mapping_metadata_bhs i_metadata_bhs;
struct udf_ext_cache cached_extent;
/* Spinlock for protecting extent cache */
spinlock_t i_extent_cache_lock;
/* file.c */
extern long udf_ioctl(struct file *, unsigned int, unsigned long);
+int udf_fsync(struct file *file, loff_t start, loff_t end, int datasync);
/* inode.c */
extern struct inode *__udf_iget(struct super_block *, struct kernel_lb_addr *,