]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
ext4: factor out journalled block zeroing range
authorZhang Yi <yi.zhang@huawei.com>
Fri, 27 Mar 2026 10:29:29 +0000 (18:29 +0800)
committerTheodore Ts'o <tytso@mit.edu>
Fri, 10 Apr 2026 01:57:51 +0000 (21:57 -0400)
Refactor __ext4_block_zero_page_range() by separating the block zeroing
operations for ordered data mode and journal data mode into two distinct
functions:

  - ext4_block_do_zero_range(): handles non-journal data mode with
    ordered data support
  - ext4_block_journalled_zero_range(): handles journal data mode

Also extract a common helper, ext4_load_tail_bh(), to handle buffer head
and folio retrieval, along with the associated error handling. This
prepares for converting the partial block zero range to the iomap
infrastructure.

Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://patch.msgid.link/20260327102939.1095257-4-yi.zhang@huaweicloud.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
fs/ext4/inode.c

index 5eade0040e536b3c5965cb8c655099c93f6702d7..3d4650cfc3e0dc1438cbef16e65a0dd9b4a539e4 100644 (file)
@@ -4032,13 +4032,11 @@ void ext4_set_aops(struct inode *inode)
  * ext4_punch_hole, etc) which needs to be properly zeroed out. Otherwise a
  * racing writeback can come later and flush the stale pagecache to disk.
  */
-static int __ext4_block_zero_page_range(handle_t *handle,
-               struct address_space *mapping, loff_t from, loff_t length,
-               bool *did_zero)
+static struct buffer_head *ext4_load_tail_bh(struct inode *inode, loff_t from)
 {
        unsigned int offset, blocksize, pos;
        ext4_lblk_t iblock;
-       struct inode *inode = mapping->host;
+       struct address_space *mapping = inode->i_mapping;
        struct buffer_head *bh;
        struct folio *folio;
        int err = 0;
@@ -4047,7 +4045,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
                                    FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
                                    mapping_gfp_constraint(mapping, ~__GFP_FS));
        if (IS_ERR(folio))
-               return PTR_ERR(folio);
+               return ERR_CAST(folio);
 
        blocksize = inode->i_sb->s_blocksize;
 
@@ -4099,33 +4097,73 @@ static int __ext4_block_zero_page_range(handle_t *handle,
                        }
                }
        }
-       if (ext4_should_journal_data(inode)) {
-               BUFFER_TRACE(bh, "get write access");
-               err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
-                                                   EXT4_JTR_NONE);
-               if (err)
-                       goto unlock;
-       }
-       folio_zero_range(folio, offset, length);
+       return bh;
+
+unlock:
+       folio_unlock(folio);
+       folio_put(folio);
+       return err ? ERR_PTR(err) : NULL;
+}
+
+static int ext4_block_do_zero_range(handle_t *handle, struct inode *inode,
+                                   loff_t from, loff_t length, bool *did_zero)
+{
+       struct buffer_head *bh;
+       struct folio *folio;
+       int err = 0;
+
+       bh = ext4_load_tail_bh(inode, from);
+       if (IS_ERR_OR_NULL(bh))
+               return PTR_ERR_OR_ZERO(bh);
+
+       folio = bh->b_folio;
+       folio_zero_range(folio, offset_in_folio(folio, from), length);
        BUFFER_TRACE(bh, "zeroed end of block");
 
-       if (ext4_should_journal_data(inode)) {
-               err = ext4_dirty_journalled_data(handle, bh);
-       } else {
-               mark_buffer_dirty(bh);
-               /*
-                * Only the written block requires ordered data to prevent
-                * exposing stale data.
-                */
-               if (!buffer_unwritten(bh) && !buffer_delay(bh) &&
-                   ext4_should_order_data(inode))
-                       err = ext4_jbd2_inode_add_write(handle, inode, from,
-                                       length);
-       }
+       mark_buffer_dirty(bh);
+       /*
+        * Only the written block requires ordered data to prevent exposing
+        * stale data.
+        */
+       if (ext4_should_order_data(inode) &&
+           !buffer_unwritten(bh) && !buffer_delay(bh))
+               err = ext4_jbd2_inode_add_write(handle, inode, from, length);
        if (!err && did_zero)
                *did_zero = true;
 
-unlock:
+       folio_unlock(folio);
+       folio_put(folio);
+       return err;
+}
+
+static int ext4_block_journalled_zero_range(handle_t *handle,
+               struct inode *inode, loff_t from, loff_t length, bool *did_zero)
+{
+       struct buffer_head *bh;
+       struct folio *folio;
+       int err;
+
+       bh = ext4_load_tail_bh(inode, from);
+       if (IS_ERR_OR_NULL(bh))
+               return PTR_ERR_OR_ZERO(bh);
+       folio = bh->b_folio;
+
+       BUFFER_TRACE(bh, "get write access");
+       err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
+                                           EXT4_JTR_NONE);
+       if (err)
+               goto out;
+
+       folio_zero_range(folio, offset_in_folio(folio, from), length);
+       BUFFER_TRACE(bh, "zeroed end of block");
+
+       err = ext4_dirty_journalled_data(handle, bh);
+       if (err)
+               goto out;
+
+       if (did_zero)
+               *did_zero = true;
+out:
        folio_unlock(folio);
        folio_put(folio);
        return err;
@@ -4156,9 +4194,11 @@ static int ext4_block_zero_page_range(handle_t *handle,
        if (IS_DAX(inode)) {
                return dax_zero_range(inode, from, length, did_zero,
                                      &ext4_iomap_ops);
+       } else if (ext4_should_journal_data(inode)) {
+               return ext4_block_journalled_zero_range(handle, inode, from,
+                                                       length, did_zero);
        }
-       return __ext4_block_zero_page_range(handle, mapping, from, length,
-                                           did_zero);
+       return ext4_block_do_zero_range(handle, inode, from, length, did_zero);
 }
 
 /*