]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
ext4: pipeline buffer reads in mext_page_mkuptodate()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 18 Jul 2024 22:30:00 +0000 (23:30 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 5 Dec 2024 12:52:48 +0000 (13:52 +0100)
[ Upstream commit 368a83cebbb949adbcc20877c35367178497d9cc ]

Instead of synchronously reading one buffer at a time, submit reads
as we walk the buffers in the first loop, then wait for them in the
second loop.  This should be significantly more efficient, particularly
on HDDs, but I have not measured.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Link: https://patch.msgid.link/20240718223005.568869-2-willy@infradead.org
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Stable-dep-of: 2f3d93e210b9 ("ext4: fix race in buffer_head read fault injection")
Signed-off-by: Sasha Levin <sashal@kernel.org>
fs/ext4/move_extent.c

index c95e3e526390d79bf95f3adc5f31ce516d57888f..7a80c32fd7326ecf1852e2f7af698cae5d43abd5 100644 (file)
@@ -173,7 +173,9 @@ mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
        sector_t block;
        struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
        unsigned int blocksize, block_start, block_end;
-       int i, err,  nr = 0, partial = 0;
+       int i, nr = 0;
+       bool partial = false;
+
        BUG_ON(!folio_test_locked(folio));
        BUG_ON(folio_test_writeback(folio));
 
@@ -191,13 +193,13 @@ mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
                block_end = block_start + blocksize;
                if (block_end <= from || block_start >= to) {
                        if (!buffer_uptodate(bh))
-                               partial = 1;
+                               partial = true;
                        continue;
                }
                if (buffer_uptodate(bh))
                        continue;
                if (!buffer_mapped(bh)) {
-                       err = ext4_get_block(inode, block, bh, 0);
+                       int err = ext4_get_block(inode, block, bh, 0);
                        if (err)
                                return err;
                        if (!buffer_mapped(bh)) {
@@ -206,6 +208,12 @@ mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
                                continue;
                        }
                }
+               lock_buffer(bh);
+               if (buffer_uptodate(bh)) {
+                       unlock_buffer(bh);
+                       continue;
+               }
+               ext4_read_bh_nowait(bh, 0, NULL);
                BUG_ON(nr >= MAX_BUF_PER_PAGE);
                arr[nr++] = bh;
        }
@@ -215,11 +223,10 @@ mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
 
        for (i = 0; i < nr; i++) {
                bh = arr[i];
-               if (!bh_uptodate_or_lock(bh)) {
-                       err = ext4_read_bh(bh, 0, NULL);
-                       if (err)
-                               return err;
-               }
+               wait_on_buffer(bh);
+               if (buffer_uptodate(bh))
+                       continue;
+               return -EIO;
        }
 out:
        if (!partial)