]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
ext4: fix a potential assertion failure due to improperly dirtied buffer
authorShida Zhang <zhangshida@kylinos.cn>
Fri, 30 Aug 2024 05:37:38 +0000 (13:37 +0800)
committerTheodore Ts'o <tytso@mit.edu>
Wed, 4 Sep 2024 02:14:17 +0000 (22:14 -0400)
On an old kernel version(4.19, ext3, data=journal, pagesize=64k),
an assertion failure will occasionally be triggered by the line below:
-----------
jbd2_journal_commit_transaction
{
...
J_ASSERT_BH(bh, !buffer_dirty(bh));
/*
* The buffer on BJ_Forget list and not jbddirty means
...
}
-----------

The same condition may also be applied to the lattest kernel version.

When blocksize < pagesize and we truncate a file, there can be buffers in
the mapping tail page beyond i_size. These buffers will be filed to
transaction's BJ_Forget list by ext4_journalled_invalidatepage() during
truncation. When the transaction doing truncate starts committing, we can
grow the file again. This calls __block_write_begin() which allocates new
blocks under these buffers in the tail page we go through the branch:

                        if (buffer_new(bh)) {
                                clean_bdev_bh_alias(bh);
                                if (folio_test_uptodate(folio)) {
                                        clear_buffer_new(bh);
                                        set_buffer_uptodate(bh);
                                        mark_buffer_dirty(bh);
                                        continue;
                                }
                                ...
                        }

Hence buffers on BJ_Forget list of the committing transaction get marked
dirty and this triggers the jbd2 assertion.

Teach ext4_block_write_begin() to properly handle files with data
journalling by avoiding dirtying them directly. Instead of
folio_zero_new_buffers() we use ext4_journalled_zero_new_buffers() which
takes care of handling journalling. We also don't need to mark new uptodate
buffers as dirty in ext4_block_write_begin(). That will be either done
either by block_commit_write() in case of success or by
folio_zero_new_buffers() in case of failure.

Reported-by: Baolin Liu <liubaolin@kylinos.cn>
Suggested-by: Jan Kara <jack@suse.cz>
Signed-off-by: Shida Zhang <zhangshida@kylinos.cn>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://patch.msgid.link/20240830053739.3588573-4-zhangshida@kylinos.cn
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
fs/ext4/ext4.h
fs/ext4/inline.c
fs/ext4/inode.c

index 8bd08637fe14e1cc6b8225dc87392500170bbccb..8cc15d00e5c8e4e4b225861fdfcb14684a098baa 100644 (file)
@@ -3853,7 +3853,8 @@ static inline int ext4_buffer_uptodate(struct buffer_head *bh)
        return buffer_uptodate(bh);
 }
 
-extern int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
+extern int ext4_block_write_begin(handle_t *handle, struct folio *folio,
+                                 loff_t pos, unsigned len,
                                  get_block_t *get_block);
 #endif /* __KERNEL__ */
 
index b018a80fec7a73613005252990e69767365a7e5a..7ca4aca531625a078021999ef0f45097a4018351 100644 (file)
@@ -601,10 +601,11 @@ retry:
                goto out;
 
        if (ext4_should_dioread_nolock(inode)) {
-               ret = ext4_block_write_begin(folio, from, to,
+               ret = ext4_block_write_begin(handle, folio, from, to,
                                             ext4_get_block_unwritten);
        } else
-               ret = ext4_block_write_begin(folio, from, to, ext4_get_block);
+               ret = ext4_block_write_begin(handle, folio, from, to,
+                                            ext4_get_block);
 
        if (!ret && ext4_should_journal_data(inode)) {
                ret = ext4_walk_page_buffers(handle, inode,
@@ -856,7 +857,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
                        goto out;
        }
 
-       ret = ext4_block_write_begin(folio, 0, inline_size,
+       ret = ext4_block_write_begin(NULL, folio, 0, inline_size,
                                     ext4_da_get_block_prep);
        if (ret) {
                up_read(&EXT4_I(inode)->xattr_sem);
index 0010f7004688c9a23cea453eb64fd311abdca69f..a11e2626b5f68ad78dcaa4955e19cf8d4313d603 100644 (file)
 
 #include <trace/events/ext4.h>
 
+static void ext4_journalled_zero_new_buffers(handle_t *handle,
+                                           struct inode *inode,
+                                           struct folio *folio,
+                                           unsigned from, unsigned to);
+
 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
                              struct ext4_inode_info *ei)
 {
@@ -1023,7 +1028,8 @@ int do_journal_get_write_access(handle_t *handle, struct inode *inode,
        return ret;
 }
 
-int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
+int ext4_block_write_begin(handle_t *handle, struct folio *folio,
+                          loff_t pos, unsigned len,
                           get_block_t *get_block)
 {
        unsigned from = pos & (PAGE_SIZE - 1);
@@ -1037,6 +1043,7 @@ int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
        struct buffer_head *bh, *head, *wait[2];
        int nr_wait = 0;
        int i;
+       bool should_journal_data = ext4_should_journal_data(inode);
 
        BUG_ON(!folio_test_locked(folio));
        BUG_ON(from > PAGE_SIZE);
@@ -1066,10 +1073,22 @@ int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
                        if (err)
                                break;
                        if (buffer_new(bh)) {
+                               /*
+                                * We may be zeroing partial buffers or all new
+                                * buffers in case of failure. Prepare JBD2 for
+                                * that.
+                                */
+                               if (should_journal_data)
+                                       do_journal_get_write_access(handle,
+                                                                   inode, bh);
                                if (folio_test_uptodate(folio)) {
-                                       clear_buffer_new(bh);
+                                       /*
+                                        * Unlike __block_write_begin() we leave
+                                        * dirtying of new uptodate buffers to
+                                        * ->write_end() time or
+                                        * folio_zero_new_buffers().
+                                        */
                                        set_buffer_uptodate(bh);
-                                       mark_buffer_dirty(bh);
                                        continue;
                                }
                                if (block_end > to || block_start < from)
@@ -1099,7 +1118,11 @@ int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
                        err = -EIO;
        }
        if (unlikely(err)) {
-               folio_zero_new_buffers(folio, from, to);
+               if (should_journal_data)
+                       ext4_journalled_zero_new_buffers(handle, inode, folio,
+                                                        from, to);
+               else
+                       folio_zero_new_buffers(folio, from, to);
        } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
                for (i = 0; i < nr_wait; i++) {
                        int err2;
@@ -1197,10 +1220,11 @@ retry_journal:
        folio_wait_stable(folio);
 
        if (ext4_should_dioread_nolock(inode))
-               ret = ext4_block_write_begin(folio, pos, len,
+               ret = ext4_block_write_begin(handle, folio, pos, len,
                                             ext4_get_block_unwritten);
        else
-               ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
+               ret = ext4_block_write_begin(handle, folio, pos, len,
+                                            ext4_get_block);
        if (!ret && ext4_should_journal_data(inode)) {
                ret = ext4_walk_page_buffers(handle, inode,
                                             folio_buffers(folio), from, to,
@@ -2926,7 +2950,8 @@ retry:
        if (IS_ERR(folio))
                return PTR_ERR(folio);
 
-       ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
+       ret = ext4_block_write_begin(NULL, folio, pos, len,
+                                    ext4_da_get_block_prep);
        if (ret < 0) {
                folio_unlock(folio);
                folio_put(folio);
@@ -6183,7 +6208,8 @@ retry_alloc:
                if (folio_pos(folio) + len > size)
                        len = size - folio_pos(folio);
 
-               err = __block_write_begin(&folio->page, 0, len, ext4_get_block);
+               err = ext4_block_write_begin(handle, folio, 0, len,
+                                            ext4_get_block);
                if (!err) {
                        ret = VM_FAULT_SIGBUS;
                        if (ext4_journal_folio_buffers(handle, folio, len))