Use f2fs_{down,up}_read_trace for cp_rwsem to trace lock elapsed time.
Signed-off-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
trace_lock_elapsed_time_end(sem, lc, true);
}
+void f2fs_lock_op(struct f2fs_sb_info *sbi, struct f2fs_lock_context *lc)
+{
+ f2fs_down_read_trace(&sbi->cp_rwsem, lc);
+}
+
+int f2fs_trylock_op(struct f2fs_sb_info *sbi, struct f2fs_lock_context *lc)
+{
+ if (time_to_inject(sbi, FAULT_LOCK_OP))
+ return 0;
+
+ return f2fs_down_read_trylock_trace(&sbi->cp_rwsem, lc);
+}
+
+void f2fs_unlock_op(struct f2fs_sb_info *sbi, struct f2fs_lock_context *lc)
+{
+ f2fs_up_read_trace(&sbi->cp_rwsem, lc);
+}
+
+static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
+{
+ f2fs_down_write(&sbi->cp_rwsem);
+}
+
+static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
+{
+ f2fs_up_write(&sbi->cp_rwsem);
+}
+
#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 3))
static struct kmem_cache *ino_entry_slab;
struct dnode_of_data dn;
struct node_info ni;
struct compress_io_ctx *cic;
+ struct f2fs_lock_context lc;
pgoff_t start_idx = start_idx_of_cluster(cc);
unsigned int last_index = cc->cluster_size - 1;
loff_t psize;
* the below discard race condition.
*/
f2fs_down_read(&sbi->node_write);
- } else if (!f2fs_trylock_op(sbi)) {
+ } else if (!f2fs_trylock_op(sbi, &lc)) {
goto out_free;
}
if (quota_inode)
f2fs_up_read(&sbi->node_write);
else
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
spin_lock(&fi->i_size_lock);
if (fi->last_disk_size < psize)
if (quota_inode)
f2fs_up_read(&sbi->node_write);
else
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
out_free:
for (i = 0; i < cc->valid_nr_cpages; i++) {
f2fs_compress_free_page(cc->cpages[i]);
{
struct address_space *mapping = cc->inode->i_mapping;
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ struct f2fs_lock_context lc;
int submitted, compr_blocks, i;
int ret = 0;
/* overwrite compressed cluster w/ normal cluster */
if (compr_blocks > 0)
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
for (i = 0; i < cc->cluster_size; i++) {
struct folio *folio;
out:
if (compr_blocks > 0)
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
f2fs_balance_fs(sbi, true);
return ret;
return 0;
}
-static void f2fs_map_lock(struct f2fs_sb_info *sbi, int flag)
+static void f2fs_map_lock(struct f2fs_sb_info *sbi,
+ struct f2fs_lock_context *lc,
+ int flag)
{
f2fs_down_read(&sbi->cp_enable_rwsem);
if (flag == F2FS_GET_BLOCK_PRE_AIO)
f2fs_down_read(&sbi->node_change);
else
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, lc);
}
-static void f2fs_map_unlock(struct f2fs_sb_info *sbi, int flag)
+static void f2fs_map_unlock(struct f2fs_sb_info *sbi,
+ struct f2fs_lock_context *lc,
+ int flag)
{
if (flag == F2FS_GET_BLOCK_PRE_AIO)
f2fs_up_read(&sbi->node_change);
else
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, lc);
f2fs_up_read(&sbi->cp_enable_rwsem);
}
int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ struct f2fs_lock_context lc;
int err = 0;
- f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
+ f2fs_map_lock(sbi, &lc, F2FS_GET_BLOCK_PRE_AIO);
if (!f2fs_lookup_read_extent_cache_block(dn->inode, index,
&dn->data_blkaddr))
err = f2fs_reserve_block(dn, index);
- f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
+ f2fs_map_unlock(sbi, &lc, F2FS_GET_BLOCK_PRE_AIO);
return err;
}
unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_lock_context lc;
int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
pgoff_t pgofs, end_offset, end;
int err = 0, ofs = 1;
if (map->m_may_create) {
if (f2fs_lfs_mode(sbi))
f2fs_balance_fs(sbi, true);
- f2fs_map_lock(sbi, flag);
+ f2fs_map_lock(sbi, &lc, flag);
}
/* When reading holes, we need its node page */
f2fs_put_dnode(&dn);
if (map->m_may_create) {
- f2fs_map_unlock(sbi, flag);
+ f2fs_map_unlock(sbi, &lc, flag);
f2fs_balance_fs(sbi, dn.node_changed);
}
goto next_dnode;
f2fs_put_dnode(&dn);
unlock_out:
if (map->m_may_create) {
- f2fs_map_unlock(sbi, flag);
+ f2fs_map_unlock(sbi, &lc, flag);
f2fs_balance_fs(sbi, dn.node_changed);
}
out:
struct inode *inode = folio->mapping->host;
struct dnode_of_data dn;
struct node_info ni;
+ struct f2fs_lock_context lc;
bool ipu_force = false;
bool atomic_commit;
int err = 0;
}
/* Deadlock due to between page->lock and f2fs_lock_op */
- if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
+ if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi, &lc))
return -EAGAIN;
err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
folio_start_writeback(folio);
f2fs_put_dnode(&dn);
if (fio->need_lock == LOCK_REQ)
- f2fs_unlock_op(fio->sbi);
+ f2fs_unlock_op(fio->sbi, &lc);
err = f2fs_inplace_write_data(fio);
if (err) {
if (fscrypt_inode_uses_fs_layer_crypto(inode))
}
if (fio->need_lock == LOCK_RETRY) {
- if (!f2fs_trylock_op(fio->sbi)) {
+ if (!f2fs_trylock_op(fio->sbi, &lc)) {
err = -EAGAIN;
goto out_writepage;
}
f2fs_put_dnode(&dn);
out:
if (fio->need_lock == LOCK_REQ)
- f2fs_unlock_op(fio->sbi);
+ f2fs_unlock_op(fio->sbi, &lc);
return err;
}
struct inode *inode = folio->mapping->host;
pgoff_t index = folio->index;
struct dnode_of_data dn;
+ struct f2fs_lock_context lc;
struct folio *ifolio;
bool locked = false;
int flag = F2FS_GET_BLOCK_PRE_AIO;
if (f2fs_has_inline_data(inode)) {
if (pos + len > MAX_INLINE_DATA(inode))
flag = F2FS_GET_BLOCK_DEFAULT;
- f2fs_map_lock(sbi, flag);
+ f2fs_map_lock(sbi, &lc, flag);
locked = true;
} else if ((pos & PAGE_MASK) >= i_size_read(inode)) {
- f2fs_map_lock(sbi, flag);
+ f2fs_map_lock(sbi, &lc, flag);
locked = true;
}
if (!err && dn.data_blkaddr != NULL_ADDR)
goto out;
f2fs_put_dnode(&dn);
- f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
+ f2fs_map_lock(sbi, &lc, F2FS_GET_BLOCK_PRE_AIO);
WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
locked = true;
goto restart;
f2fs_put_dnode(&dn);
unlock_out:
if (locked)
- f2fs_map_unlock(sbi, flag);
+ f2fs_map_unlock(sbi, &lc, flag);
return err;
}
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
+ struct f2fs_lock_context lc;
struct folio *ifolio;
int err = 0;
- f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
+ f2fs_map_lock(sbi, &lc, F2FS_GET_BLOCK_PRE_AIO);
ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
if (IS_ERR(ifolio)) {
f2fs_put_dnode(&dn);
unlock_out:
- f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
+ f2fs_map_unlock(sbi, &lc, F2FS_GET_BLOCK_PRE_AIO);
return err;
}
enum f2fs_lock_name {
LOCK_NAME_NONE,
+ LOCK_NAME_CP_RWSEM,
};
/*
return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
}
-static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
-{
- f2fs_down_read(&sbi->cp_rwsem);
-}
-
-static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
-{
- if (time_to_inject(sbi, FAULT_LOCK_OP))
- return 0;
- return f2fs_down_read_trylock(&sbi->cp_rwsem);
-}
-
-static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
-{
- f2fs_up_read(&sbi->cp_rwsem);
-}
-
-static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
-{
- f2fs_down_write(&sbi->cp_rwsem);
-}
-
-static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
-{
- f2fs_up_write(&sbi->cp_rwsem);
-}
-
static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
{
int reason = CP_SYNC;
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
void f2fs_remove_donate_inode(struct inode *inode);
void f2fs_evict_inode(struct inode *inode);
-void f2fs_handle_failed_inode(struct inode *inode);
+void f2fs_handle_failed_inode(struct inode *inode, struct f2fs_lock_context *lc);
/*
* namei.c
/*
* checkpoint.c
*/
+void f2fs_lock_op(struct f2fs_sb_info *sbi, struct f2fs_lock_context *lc);
+int f2fs_trylock_op(struct f2fs_sb_info *sbi, struct f2fs_lock_context *lc);
+void f2fs_unlock_op(struct f2fs_sb_info *sbi, struct f2fs_lock_context *lc);
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
unsigned char reason);
void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi);
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
+ struct f2fs_lock_context lc;
pgoff_t free_from;
int count = 0, err = 0;
struct folio *ifolio;
goto free_partial;
if (lock)
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
if (IS_ERR(ifolio)) {
err = f2fs_truncate_inode_blocks(inode, free_from);
out:
if (lock)
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
free_partial:
/* lastly zero out the first data page */
if (!err)
}
if (i_uid_needs_update(idmap, attr, inode) ||
i_gid_needs_update(idmap, attr, inode)) {
- f2fs_lock_op(sbi);
+ struct f2fs_lock_context lc;
+
+ f2fs_lock_op(sbi, &lc);
err = dquot_transfer(idmap, inode, attr);
if (err) {
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
return err;
}
/*
i_uid_update(idmap, attr, inode);
i_gid_update(idmap, attr, inode);
f2fs_mark_inode_dirty_sync(inode, true);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
}
if (attr->ia_valid & ATTR_SIZE) {
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct folio *folio;
+ struct f2fs_lock_context lc;
if (!len)
return 0;
f2fs_balance_fs(sbi, true);
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
folio = f2fs_get_new_data_folio(inode, NULL, index, false);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
if (IS_ERR(folio))
return PTR_ERR(folio);
if (pg_start < pg_end) {
loff_t blk_start, blk_end;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_lock_context lc;
f2fs_balance_fs(sbi, true);
truncate_pagecache_range(inode, blk_start, blk_end - 1);
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
ret = f2fs_truncate_hole(inode, pg_start, pg_end);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
filemap_invalidate_unlock(inode->i_mapping);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_lock_context lc;
pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
pgoff_t start = offset >> PAGE_SHIFT;
pgoff_t end = (offset + len) >> PAGE_SHIFT;
f2fs_zero_post_eof_page(inode, offset + len, false);
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
f2fs_drop_extent_tree(inode);
truncate_pagecache(inode, offset);
ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
filemap_invalidate_unlock(inode->i_mapping);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
for (index = pg_start; index < pg_end;) {
struct dnode_of_data dn;
+ struct f2fs_lock_context lc;
unsigned int end_offset;
pgoff_t end;
(loff_t)index << PAGE_SHIFT,
((loff_t)pg_end << PAGE_SHIFT) - 1);
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
set_new_dnode(&dn, inode, NULL, NULL, 0);
ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
if (ret) {
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
filemap_invalidate_unlock(mapping);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out;
ret = f2fs_do_zero_range(&dn, index, end);
f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
filemap_invalidate_unlock(mapping);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
truncate_pagecache(inode, offset);
while (!ret && idx > pg_start) {
+ struct f2fs_lock_context lc;
+
nr = idx - pg_start;
if (nr > delta)
nr = delta;
idx -= nr;
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
f2fs_drop_extent_tree(inode);
ret = __exchange_data_block(inode, inode, idx,
idx + delta, nr, false);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
}
filemap_invalidate_unlock(mapping);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
struct inode *src = file_inode(file_in);
struct inode *dst = file_inode(file_out);
struct f2fs_sb_info *sbi = F2FS_I_SB(src);
+ struct f2fs_lock_context lc;
size_t olen = len, dst_max_i_size = 0;
size_t dst_osize;
int ret;
goto out_src;
}
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
ret = __exchange_data_block(src, dst, F2FS_BYTES_TO_BLK(pos_in),
F2FS_BYTES_TO_BLK(pos_out),
F2FS_BYTES_TO_BLK(len), false);
else if (dst_osize != dst->i_size)
f2fs_i_size_write(dst, dst_osize);
}
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
if (src != dst)
f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode *ri = NULL;
+ struct f2fs_lock_context lc;
kprojid_t kprojid;
int err;
if (err)
return err;
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
err = f2fs_transfer_project_quota(inode, kprojid);
if (err)
goto out_unlock;
inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, true);
out_unlock:
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
return err;
}
#else
struct inode *inode = file_inode(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_lock_context lc;
pgoff_t page_idx = 0, last_idx;
unsigned int released_blocks = 0;
int ret;
struct dnode_of_data dn;
pgoff_t end_offset, count;
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
set_new_dnode(&dn, inode, NULL, NULL, 0);
ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
if (ret) {
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
if (ret == -ENOENT) {
page_idx = f2fs_get_next_page_offset(&dn,
page_idx);
f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
if (ret < 0)
break;
while (page_idx < last_idx) {
struct dnode_of_data dn;
+ struct f2fs_lock_context lc;
pgoff_t end_offset, count;
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
set_new_dnode(&dn, inode, NULL, NULL, 0);
ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
if (ret) {
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
if (ret == -ENOENT) {
page_idx = f2fs_get_next_page_offset(&dn,
page_idx);
f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
if (ret < 0)
break;
struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
__u64 old_block_count, shrunk_blocks;
struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
+ struct f2fs_lock_context lc;
unsigned int secs;
int err = 0;
__u32 rem;
}
/* stop CP to protect MAIN_SEC in free_segment_range */
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
spin_lock(&sbi->stat_lock);
if (shrunk_blocks + valid_user_blocks(sbi) +
err = free_segment_range(sbi, secs, true);
out_unlock:
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
f2fs_up_write(&sbi->gc_lock);
out_drop_write:
mnt_drop_write_file(filp);
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
+ struct f2fs_lock_context lc;
struct folio *ifolio, *folio;
int err = 0;
if (IS_ERR(folio))
return PTR_ERR(folio);
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
if (IS_ERR(ifolio)) {
f2fs_put_dnode(&dn);
out:
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
f2fs_folio_put(folio, true);
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct folio *ifolio;
struct f2fs_filename fname;
+ struct f2fs_lock_context lc;
void *inline_dentry = NULL;
int err = 0;
if (!f2fs_has_inline_dentry(dir))
return 0;
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
err = f2fs_setup_filename(dir, &dentry->d_name, 0, &fname);
if (err)
out_fname:
f2fs_free_filename(&fname);
out:
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
return err;
}
err = -EIO;
if (!err) {
- f2fs_lock_op(sbi);
+ struct f2fs_lock_context lc;
+
+ f2fs_lock_op(sbi, &lc);
err = f2fs_remove_inode_page(inode);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
if (err == -ENOENT) {
err = 0;
}
/* caller should call f2fs_lock_op() */
-void f2fs_handle_failed_inode(struct inode *inode)
+void f2fs_handle_failed_inode(struct inode *inode, struct f2fs_lock_context *lc)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct node_info ni;
}
out:
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, lc);
/* iput will drop the inode object */
iput(inode);
struct dentry *dentry, umode_t mode, bool excl)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ struct f2fs_lock_context lc;
struct inode *inode;
nid_t ino = 0;
int err;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
ino = inode->i_ino;
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
err = f2fs_add_link(dentry, inode);
if (err)
goto out;
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
f2fs_alloc_nid_done(sbi, ino);
f2fs_balance_fs(sbi, true);
return 0;
out:
- f2fs_handle_failed_inode(inode);
+ f2fs_handle_failed_inode(inode, &lc);
return err;
}
{
struct inode *inode = d_inode(old_dentry);
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ struct f2fs_lock_context lc;
int err;
if (unlikely(f2fs_cp_error(sbi)))
ihold(inode);
set_inode_flag(inode, FI_INC_LINK);
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
err = f2fs_add_link(dentry, inode);
if (err)
goto out;
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
d_instantiate(dentry, inode);
out:
clear_inode_flag(inode, FI_INC_LINK);
iput(inode);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
return err;
}
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode = d_inode(dentry);
struct f2fs_dir_entry *de;
+ struct f2fs_lock_context lc;
struct folio *folio;
int err;
f2fs_balance_fs(sbi, true);
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
err = f2fs_acquire_orphan_inode(sbi);
if (err) {
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
f2fs_folio_put(folio, false);
goto out;
}
f2fs_delete_entry(de, folio, dir, inode);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
struct dentry *dentry, const char *symname)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ struct f2fs_lock_context lc;
struct inode *inode;
size_t len = strlen(symname);
struct fscrypt_str disk_link;
inode_nohighmem(inode);
inode->i_mapping->a_ops = &f2fs_dblock_aops;
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
err = f2fs_add_link(dentry, inode);
if (err)
goto out_f2fs_handle_failed_inode;
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
f2fs_alloc_nid_done(sbi, inode->i_ino);
err = fscrypt_encrypt_symlink(inode, symname, len, &disk_link);
goto out_free_encrypted_link;
out_f2fs_handle_failed_inode:
- f2fs_handle_failed_inode(inode);
+ f2fs_handle_failed_inode(inode, &lc);
out_free_encrypted_link:
if (disk_link.name != (unsigned char *)symname)
kfree(disk_link.name);
struct dentry *dentry, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ struct f2fs_lock_context lc;
struct inode *inode;
int err;
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
set_inode_flag(inode, FI_INC_LINK);
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
err = f2fs_add_link(dentry, inode);
if (err)
goto out_fail;
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
f2fs_alloc_nid_done(sbi, inode->i_ino);
out_fail:
clear_inode_flag(inode, FI_INC_LINK);
- f2fs_handle_failed_inode(inode);
+ f2fs_handle_failed_inode(inode, &lc);
return ERR_PTR(err);
}
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ struct f2fs_lock_context lc;
struct inode *inode;
int err = 0;
init_special_inode(inode, inode->i_mode, rdev);
inode->i_op = &f2fs_special_inode_operations;
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
err = f2fs_add_link(dentry, inode);
if (err)
goto out;
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
f2fs_alloc_nid_done(sbi, inode->i_ino);
f2fs_balance_fs(sbi, true);
return 0;
out:
- f2fs_handle_failed_inode(inode);
+ f2fs_handle_failed_inode(inode, &lc);
return err;
}
struct inode **new_inode, struct f2fs_filename *fname)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ struct f2fs_lock_context lc;
struct inode *inode;
int err;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
}
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
err = f2fs_acquire_orphan_inode(sbi);
if (err)
goto out;
f2fs_i_links_write(inode, false);
}
/* link_count was changed by d_tmpfile as well. */
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
unlock_new_inode(inode);
if (new_inode)
release_out:
f2fs_release_orphan_inode(sbi);
out:
- f2fs_handle_failed_inode(inode);
+ f2fs_handle_failed_inode(inode, &lc);
return err;
}
struct f2fs_dir_entry *old_dir_entry = NULL;
struct f2fs_dir_entry *old_entry;
struct f2fs_dir_entry *new_entry;
+ struct f2fs_lock_context lc;
bool old_is_dir = S_ISDIR(old_inode->i_mode);
int err;
f2fs_balance_fs(sbi, true);
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
err = f2fs_acquire_orphan_inode(sbi);
if (err)
} else {
f2fs_balance_fs(sbi, true);
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
err = f2fs_add_link(new_dentry, old_inode);
if (err) {
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
goto out_dir;
}
TRANS_DIR_INO);
}
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
f2fs_sync_fs(sbi->sb, 1);
return 0;
put_out_dir:
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
f2fs_folio_put(new_folio, false);
out_dir:
if (old_dir_entry)
struct folio *old_folio, *new_folio;
struct f2fs_dir_entry *old_dir_entry = NULL, *new_dir_entry = NULL;
struct f2fs_dir_entry *old_entry, *new_entry;
+ struct f2fs_lock_context lc;
int old_nlink = 0, new_nlink = 0;
int err;
f2fs_balance_fs(sbi, true);
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
/* update ".." directory entry info of old dentry */
if (old_dir_entry)
f2fs_add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
}
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
f2fs_sync_fs(sbi->sb, 1);
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_lock_context lc;
int err;
err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
return err;
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
err = __f2fs_commit_atomic_write(inode);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
return err;
int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi)
{
+ struct f2fs_lock_context lc;
int err;
bool gc_required = true;
retry:
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) {
f2fs_down_write(&sbi->gc_lock);
* that userspace sees the changes.
*/
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ struct f2fs_lock_context lc;
if (type != -1 && cnt != type)
continue;
* block_operation
* f2fs_down_read(quota_sem)
*/
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
f2fs_down_read(&sbi->quota_sem);
ret = f2fs_quota_sync_file(sbi, cnt);
f2fs_up_read(&sbi->quota_sem);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
if (!f2fs_sb_has_quota_ino(sbi))
inode_unlock(dqopt->files[cnt]);
init_f2fs_rwsem(&sbi->node_write);
init_f2fs_rwsem(&sbi->node_change);
spin_lock_init(&sbi->stat_lock);
- init_f2fs_rwsem(&sbi->cp_rwsem);
+ init_f2fs_rwsem_trace(&sbi->cp_rwsem, sbi, LOCK_NAME_CP_RWSEM);
init_f2fs_rwsem(&sbi->cp_enable_rwsem);
init_f2fs_rwsem(&sbi->quota_sem);
init_waitqueue_head(&sbi->cp_wait);
struct folio *ifolio, int flags)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_lock_context lc;
int err;
if (unlikely(f2fs_cp_error(sbi)))
size, ifolio, flags);
f2fs_balance_fs(sbi, true);
- f2fs_lock_op(sbi);
+ f2fs_lock_op(sbi, &lc);
f2fs_down_write(&F2FS_I(inode)->i_xattr_sem);
err = __f2fs_setxattr(inode, index, name, value, size, NULL, flags);
f2fs_up_write(&F2FS_I(inode)->i_xattr_sem);
- f2fs_unlock_op(sbi);
+ f2fs_unlock_op(sbi, &lc);
f2fs_update_time(sbi, REQ_TIME);
return err;
#define show_lock_name(lock) \
__print_symbolic(lock, \
- { LOCK_NAME_NONE, "none" })
+ { LOCK_NAME_CP_RWSEM, "cp_rwsem" })
struct f2fs_sb_info;
struct f2fs_io_info;