void ext4_fc_del(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_fc_dentry_update *fc_dentry;
wait_queue_head_t *wq;
+ int alloc_ctx;
if (ext4_fc_disabled(inode->i_sb))
return;
- mutex_lock(&sbi->s_fc_lock);
+ alloc_ctx = ext4_fc_lock(inode->i_sb);
if (list_empty(&ei->i_fc_list) && list_empty(&ei->i_fc_dilist)) {
- mutex_unlock(&sbi->s_fc_lock);
+ ext4_fc_unlock(inode->i_sb, alloc_ctx);
return;
}
#endif
prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
if (ext4_test_inode_state(inode, EXT4_STATE_FC_FLUSHING_DATA)) {
- mutex_unlock(&sbi->s_fc_lock);
+ ext4_fc_unlock(inode->i_sb, alloc_ctx);
schedule();
- mutex_lock(&sbi->s_fc_lock);
+ alloc_ctx = ext4_fc_lock(inode->i_sb);
}
finish_wait(wq, &wait.wq_entry);
}
* dentry create references, since it is not needed to log it anyways.
*/
if (list_empty(&ei->i_fc_dilist)) {
- mutex_unlock(&sbi->s_fc_lock);
+ ext4_fc_unlock(inode->i_sb, alloc_ctx);
return;
}
list_del_init(&fc_dentry->fcd_dilist);
WARN_ON(!list_empty(&ei->i_fc_dilist));
- mutex_unlock(&sbi->s_fc_lock);
+ ext4_fc_unlock(inode->i_sb, alloc_ctx);
release_dentry_name_snapshot(&fc_dentry->fcd_name);
kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
tid_t tid;
bool has_transaction = true;
bool is_ineligible;
+ int alloc_ctx;
if (ext4_fc_disabled(sb))
return;
has_transaction = false;
read_unlock(&sbi->s_journal->j_state_lock);
}
- mutex_lock(&sbi->s_fc_lock);
+ alloc_ctx = ext4_fc_lock(sb);
is_ineligible = ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
if (has_transaction && (!is_ineligible || tid_gt(tid, sbi->s_fc_ineligible_tid)))
sbi->s_fc_ineligible_tid = tid;
ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
- mutex_unlock(&sbi->s_fc_lock);
+ ext4_fc_unlock(sb, alloc_ctx);
WARN_ON(reason >= EXT4_FC_REASON_MAX);
sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
}
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
tid_t tid = 0;
+ int alloc_ctx;
int ret;
tid = handle->h_transaction->t_tid;
if (!enqueue)
return ret;
- mutex_lock(&sbi->s_fc_lock);
+ alloc_ctx = ext4_fc_lock(inode->i_sb);
if (list_empty(&EXT4_I(inode)->i_fc_list))
list_add_tail(&EXT4_I(inode)->i_fc_list,
(sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING) ?
&sbi->s_fc_q[FC_Q_STAGING] :
&sbi->s_fc_q[FC_Q_MAIN]);
- mutex_unlock(&sbi->s_fc_lock);
+ ext4_fc_unlock(inode->i_sb, alloc_ctx);
return ret;
}
struct inode *dir = dentry->d_parent->d_inode;
struct super_block *sb = inode->i_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int alloc_ctx;
spin_unlock(&ei->i_fc_lock);
take_dentry_name_snapshot(&node->fcd_name, dentry);
INIT_LIST_HEAD(&node->fcd_dilist);
INIT_LIST_HEAD(&node->fcd_list);
- mutex_lock(&sbi->s_fc_lock);
+ alloc_ctx = ext4_fc_lock(sb);
if (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING)
list_add_tail(&node->fcd_list,
WARN_ON(!list_empty(&ei->i_fc_dilist));
list_add_tail(&node->fcd_dilist, &ei->i_fc_dilist);
}
- mutex_unlock(&sbi->s_fc_lock);
+ ext4_fc_unlock(sb, alloc_ctx);
spin_lock(&ei->i_fc_lock);
return 0;
struct blk_plug plug;
int ret = 0;
u32 crc = 0;
+ int alloc_ctx;
/*
* Step 1: Mark all inodes on s_fc_q[MAIN] with
* EXT4_STATE_FC_FLUSHING_DATA. This prevents these inodes from being
* freed until the data flush is over.
*/
- mutex_lock(&sbi->s_fc_lock);
+ alloc_ctx = ext4_fc_lock(sb);
list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
ext4_set_inode_state(&iter->vfs_inode,
EXT4_STATE_FC_FLUSHING_DATA);
}
- mutex_unlock(&sbi->s_fc_lock);
+ ext4_fc_unlock(sb, alloc_ctx);
/* Step 2: Flush data for all the eligible inodes. */
ret = ext4_fc_flush_data(journal);
* any error from step 2. This ensures that waiters waiting on
* EXT4_STATE_FC_FLUSHING_DATA can resume.
*/
- mutex_lock(&sbi->s_fc_lock);
+ alloc_ctx = ext4_fc_lock(sb);
list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
ext4_clear_inode_state(&iter->vfs_inode,
EXT4_STATE_FC_FLUSHING_DATA);
* prepare_to_wait() in ext4_fc_del().
*/
smp_mb();
- mutex_unlock(&sbi->s_fc_lock);
+ ext4_fc_unlock(sb, alloc_ctx);
/*
* If we encountered error in Step 2, return it now after clearing
* previous handles are now drained. We now mark the inodes on the
* commit queue as being committed.
*/
- mutex_lock(&sbi->s_fc_lock);
+ alloc_ctx = ext4_fc_lock(sb);
list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
ext4_set_inode_state(&iter->vfs_inode,
EXT4_STATE_FC_COMMITTING);
}
- mutex_unlock(&sbi->s_fc_lock);
+ ext4_fc_unlock(sb, alloc_ctx);
jbd2_journal_unlock_updates(journal);
/*
blkdev_issue_flush(journal->j_fs_dev);
blk_start_plug(&plug);
+ alloc_ctx = ext4_fc_lock(sb);
/* Step 6: Write fast commit blocks to disk. */
if (sbi->s_fc_bytes == 0) {
/*
}
/* Step 6.2: Now write all the dentry updates. */
- mutex_lock(&sbi->s_fc_lock);
ret = ext4_fc_commit_dentry_updates(journal, &crc);
if (ret)
goto out;
ret = ext4_fc_write_tail(sb, crc);
out:
- mutex_unlock(&sbi->s_fc_lock);
+ ext4_fc_unlock(sb, alloc_ctx);
blk_finish_plug(&plug);
return ret;
}
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_inode_info *ei;
struct ext4_fc_dentry_update *fc_dentry;
+ int alloc_ctx;
if (full && sbi->s_fc_bh)
sbi->s_fc_bh = NULL;
trace_ext4_fc_cleanup(journal, full, tid);
jbd2_fc_release_bufs(journal);
- mutex_lock(&sbi->s_fc_lock);
+ alloc_ctx = ext4_fc_lock(sb);
while (!list_empty(&sbi->s_fc_q[FC_Q_MAIN])) {
ei = list_first_entry(&sbi->s_fc_q[FC_Q_MAIN],
struct ext4_inode_info,
if (full)
sbi->s_fc_bytes = 0;
- mutex_unlock(&sbi->s_fc_lock);
+ ext4_fc_unlock(sb, alloc_ctx);
trace_ext4_fc_stats(sb);
}