]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
ext4: convert s_fc_lock to mutex type
authorHarshad Shirwadkar <harshadshirwadkar@gmail.com>
Thu, 8 May 2025 17:59:07 +0000 (17:59 +0000)
committerTheodore Ts'o <tytso@mit.edu>
Fri, 9 May 2025 01:56:17 +0000 (21:56 -0400)
This allows us to hold s_fc_lock during kmem_cache_* functions, which
is needed in the following patch.

Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://patch.msgid.link/20250508175908.1004880-9-harshadshirwadkar@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
fs/ext4/ext4.h
fs/ext4/fast_commit.c
fs/ext4/super.c

index 3987c5bf2ff9741f0bf33f235f545e596ff7b016..052d7afeefaf49615d97ed45f480cf3642989b4f 100644 (file)
@@ -1754,7 +1754,7 @@ struct ext4_sb_info {
         * following fields:
         * ei->i_fc_list, s_fc_dentry_q, s_fc_q, s_fc_bytes, s_fc_bh.
         */
-       spinlock_t s_fc_lock;
+       struct mutex s_fc_lock;
        struct buffer_head *s_fc_bh;
        struct ext4_fc_stats s_fc_stats;
        tid_t s_fc_ineligible_tid;
index 5f6a8ec249b97b33d6c7fffc48acd2f6d59057e7..eb888e52261f79a1070078b880bdc8847a15d554 100644 (file)
@@ -238,9 +238,9 @@ void ext4_fc_del(struct inode *inode)
        if (ext4_fc_disabled(inode->i_sb))
                return;
 
-       spin_lock(&sbi->s_fc_lock);
+       mutex_lock(&sbi->s_fc_lock);
        if (list_empty(&ei->i_fc_list) && list_empty(&ei->i_fc_dilist)) {
-               spin_unlock(&sbi->s_fc_lock);
+               mutex_unlock(&sbi->s_fc_lock);
                return;
        }
 
@@ -275,9 +275,9 @@ void ext4_fc_del(struct inode *inode)
 #endif
                prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
                if (ext4_test_inode_state(inode, EXT4_STATE_FC_FLUSHING_DATA)) {
-                       spin_unlock(&sbi->s_fc_lock);
+                       mutex_unlock(&sbi->s_fc_lock);
                        schedule();
-                       spin_lock(&sbi->s_fc_lock);
+                       mutex_lock(&sbi->s_fc_lock);
                }
                finish_wait(wq, &wait.wq_entry);
        }
@@ -288,7 +288,7 @@ void ext4_fc_del(struct inode *inode)
         * dentry create references, since it is not needed to log it anyways.
         */
        if (list_empty(&ei->i_fc_dilist)) {
-               spin_unlock(&sbi->s_fc_lock);
+               mutex_unlock(&sbi->s_fc_lock);
                return;
        }
 
@@ -298,7 +298,7 @@ void ext4_fc_del(struct inode *inode)
        list_del_init(&fc_dentry->fcd_dilist);
 
        WARN_ON(!list_empty(&ei->i_fc_dilist));
-       spin_unlock(&sbi->s_fc_lock);
+       mutex_unlock(&sbi->s_fc_lock);
 
        release_dentry_name_snapshot(&fc_dentry->fcd_name);
        kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
@@ -329,12 +329,12 @@ void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handl
                        has_transaction = false;
                read_unlock(&sbi->s_journal->j_state_lock);
        }
-       spin_lock(&sbi->s_fc_lock);
+       mutex_lock(&sbi->s_fc_lock);
        is_ineligible = ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
        if (has_transaction && (!is_ineligible || tid_gt(tid, sbi->s_fc_ineligible_tid)))
                sbi->s_fc_ineligible_tid = tid;
        ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
-       spin_unlock(&sbi->s_fc_lock);
+       mutex_unlock(&sbi->s_fc_lock);
        WARN_ON(reason >= EXT4_FC_REASON_MAX);
        sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
 }
@@ -373,14 +373,14 @@ static int ext4_fc_track_template(
        if (!enqueue)
                return ret;
 
-       spin_lock(&sbi->s_fc_lock);
+       mutex_lock(&sbi->s_fc_lock);
        if (list_empty(&EXT4_I(inode)->i_fc_list))
                list_add_tail(&EXT4_I(inode)->i_fc_list,
                                (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
                                 sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING) ?
                                &sbi->s_fc_q[FC_Q_STAGING] :
                                &sbi->s_fc_q[FC_Q_MAIN]);
-       spin_unlock(&sbi->s_fc_lock);
+       mutex_unlock(&sbi->s_fc_lock);
 
        return ret;
 }
@@ -424,7 +424,7 @@ static int __track_dentry_update(handle_t *handle, struct inode *inode,
        node->fcd_ino = inode->i_ino;
        take_dentry_name_snapshot(&node->fcd_name, dentry);
        INIT_LIST_HEAD(&node->fcd_dilist);
-       spin_lock(&sbi->s_fc_lock);
+       mutex_lock(&sbi->s_fc_lock);
        if (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
                sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING)
                list_add_tail(&node->fcd_list,
@@ -445,7 +445,7 @@ static int __track_dentry_update(handle_t *handle, struct inode *inode,
                WARN_ON(!list_empty(&ei->i_fc_dilist));
                list_add_tail(&node->fcd_dilist, &ei->i_fc_dilist);
        }
-       spin_unlock(&sbi->s_fc_lock);
+       mutex_unlock(&sbi->s_fc_lock);
        spin_lock(&ei->i_fc_lock);
 
        return 0;
@@ -1000,12 +1000,12 @@ __releases(&sbi->s_fc_lock)
        list_for_each_entry_safe(fc_dentry, fc_dentry_n,
                                 &sbi->s_fc_dentry_q[FC_Q_MAIN], fcd_list) {
                if (fc_dentry->fcd_op != EXT4_FC_TAG_CREAT) {
-                       spin_unlock(&sbi->s_fc_lock);
+                       mutex_unlock(&sbi->s_fc_lock);
                        if (!ext4_fc_add_dentry_tlv(sb, crc, fc_dentry)) {
                                ret = -ENOSPC;
                                goto lock_and_exit;
                        }
-                       spin_lock(&sbi->s_fc_lock);
+                       mutex_lock(&sbi->s_fc_lock);
                        continue;
                }
                /*
@@ -1018,7 +1018,7 @@ __releases(&sbi->s_fc_lock)
                inode = &ei->vfs_inode;
                WARN_ON(inode->i_ino != fc_dentry->fcd_ino);
 
-               spin_unlock(&sbi->s_fc_lock);
+               mutex_unlock(&sbi->s_fc_lock);
 
                /*
                 * We first write the inode and then the create dirent. This
@@ -1040,11 +1040,11 @@ __releases(&sbi->s_fc_lock)
                        goto lock_and_exit;
                }
 
-               spin_lock(&sbi->s_fc_lock);
+               mutex_lock(&sbi->s_fc_lock);
        }
        return 0;
 lock_and_exit:
-       spin_lock(&sbi->s_fc_lock);
+       mutex_lock(&sbi->s_fc_lock);
        return ret;
 }
 
@@ -1064,12 +1064,12 @@ static int ext4_fc_perform_commit(journal_t *journal)
         * EXT4_STATE_FC_FLUSHING_DATA. This prevents these inodes from being
         * freed until the data flush is over.
         */
-       spin_lock(&sbi->s_fc_lock);
+       mutex_lock(&sbi->s_fc_lock);
        list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
                ext4_set_inode_state(&iter->vfs_inode,
                                     EXT4_STATE_FC_FLUSHING_DATA);
        }
-       spin_unlock(&sbi->s_fc_lock);
+       mutex_unlock(&sbi->s_fc_lock);
 
        /* Step 2: Flush data for all the eligible inodes. */
        ret = ext4_fc_flush_data(journal);
@@ -1079,7 +1079,7 @@ static int ext4_fc_perform_commit(journal_t *journal)
         * any error from step 2. This ensures that waiters waiting on
         * EXT4_STATE_FC_FLUSHING_DATA can resume.
         */
-       spin_lock(&sbi->s_fc_lock);
+       mutex_lock(&sbi->s_fc_lock);
        list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
                ext4_clear_inode_state(&iter->vfs_inode,
                                       EXT4_STATE_FC_FLUSHING_DATA);
@@ -1096,7 +1096,7 @@ static int ext4_fc_perform_commit(journal_t *journal)
         * prepare_to_wait() in ext4_fc_del().
         */
        smp_mb();
-       spin_unlock(&sbi->s_fc_lock);
+       mutex_unlock(&sbi->s_fc_lock);
 
        /*
         * If we encountered error in Step 2, return it now after clearing
@@ -1113,12 +1113,12 @@ static int ext4_fc_perform_commit(journal_t *journal)
         * previous handles are now drained. We now mark the inodes on the
         * commit queue as being committed.
         */
-       spin_lock(&sbi->s_fc_lock);
+       mutex_lock(&sbi->s_fc_lock);
        list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
                ext4_set_inode_state(&iter->vfs_inode,
                                     EXT4_STATE_FC_COMMITTING);
        }
-       spin_unlock(&sbi->s_fc_lock);
+       mutex_unlock(&sbi->s_fc_lock);
        jbd2_journal_unlock_updates(journal);
 
        /*
@@ -1146,10 +1146,10 @@ static int ext4_fc_perform_commit(journal_t *journal)
        }
 
        /* Step 6.2: Now write all the dentry updates. */
-       spin_lock(&sbi->s_fc_lock);
+       mutex_lock(&sbi->s_fc_lock);
        ret = ext4_fc_commit_dentry_updates(journal, &crc);
        if (ret) {
-               spin_unlock(&sbi->s_fc_lock);
+               mutex_unlock(&sbi->s_fc_lock);
                goto out;
        }
 
@@ -1159,7 +1159,7 @@ static int ext4_fc_perform_commit(journal_t *journal)
                if (!ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING))
                        continue;
 
-               spin_unlock(&sbi->s_fc_lock);
+               mutex_unlock(&sbi->s_fc_lock);
                ret = ext4_fc_write_inode_data(inode, &crc);
                if (ret)
                        goto out;
@@ -1311,7 +1311,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
        trace_ext4_fc_cleanup(journal, full, tid);
        jbd2_fc_release_bufs(journal);
 
-       spin_lock(&sbi->s_fc_lock);
+       mutex_lock(&sbi->s_fc_lock);
        while (!list_empty(&sbi->s_fc_q[FC_Q_MAIN])) {
                ei = list_first_entry(&sbi->s_fc_q[FC_Q_MAIN],
                                        struct ext4_inode_info,
@@ -1353,11 +1353,11 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
                                             fcd_list);
                list_del_init(&fc_dentry->fcd_list);
                list_del_init(&fc_dentry->fcd_dilist);
-               spin_unlock(&sbi->s_fc_lock);
+               mutex_unlock(&sbi->s_fc_lock);
 
                release_dentry_name_snapshot(&fc_dentry->fcd_name);
                kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
-               spin_lock(&sbi->s_fc_lock);
+               mutex_lock(&sbi->s_fc_lock);
        }
 
        list_splice_init(&sbi->s_fc_dentry_q[FC_Q_STAGING],
@@ -1372,7 +1372,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
 
        if (full)
                sbi->s_fc_bytes = 0;
-       spin_unlock(&sbi->s_fc_lock);
+       mutex_unlock(&sbi->s_fc_lock);
        trace_ext4_fc_stats(sb);
 }
 
index 356a96269a2187006f5c74da440cb6fdf56e41b3..5bd81dd9751c2675848098dd79eb5f28b2ab8542 100644 (file)
@@ -4481,7 +4481,7 @@ static void ext4_fast_commit_init(struct super_block *sb)
        sbi->s_fc_bytes = 0;
        ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
        sbi->s_fc_ineligible_tid = 0;
-       spin_lock_init(&sbi->s_fc_lock);
+       mutex_init(&sbi->s_fc_lock);
        memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
        sbi->s_fc_replay_state.fc_regions = NULL;
        sbi->s_fc_replay_state.fc_regions_size = 0;