]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
fs/mpage: use blocks_per_folio instead of blocks_per_page
authorLuis Chamberlain <mcgrof@kernel.org>
Fri, 21 Feb 2025 22:38:19 +0000 (14:38 -0800)
committerChristian Brauner <brauner@kernel.org>
Mon, 24 Feb 2025 10:44:43 +0000 (11:44 +0100)
Convert mpage to folios and adjust accounting for the number of blocks
within a folio instead of a single page. This also adjusts the number
of pages we should process to be the size of the folio to ensure we
always read a full folio.

Note that the page cache code already ensures do_mpage_readpage() will
work with folios respecting the address space min order, this ensures
that so long as folio_size() is used for our requirements mpage will
also now be able to process block sizes larger than the page size.

Originally-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
Link: https://lore.kernel.org/r/20250221223823.1680616-5-mcgrof@kernel.org
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/mpage.c

index a3c82206977f6c2bb6d6fc0393f7306e2b541e97..9c8cf40152384c50b35089d26a9f21d916c855d9 100644 (file)
@@ -107,7 +107,7 @@ static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
                 * don't make any buffers if there is only one buffer on
                 * the folio and the folio just needs to be set up to date
                 */
-               if (inode->i_blkbits == PAGE_SHIFT &&
+               if (inode->i_blkbits == folio_shift(folio) &&
                    buffer_uptodate(bh)) {
                        folio_mark_uptodate(folio);
                        return;
@@ -153,7 +153,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
        struct folio *folio = args->folio;
        struct inode *inode = folio->mapping->host;
        const unsigned blkbits = inode->i_blkbits;
-       const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
+       const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
        const unsigned blocksize = 1 << blkbits;
        struct buffer_head *map_bh = &args->map_bh;
        sector_t block_in_file;
@@ -161,7 +161,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
        sector_t last_block_in_file;
        sector_t first_block;
        unsigned page_block;
-       unsigned first_hole = blocks_per_page;
+       unsigned first_hole = blocks_per_folio;
        struct block_device *bdev = NULL;
        int length;
        int fully_mapped = 1;
@@ -182,7 +182,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
                goto confused;
 
        block_in_file = folio_pos(folio) >> blkbits;
-       last_block = block_in_file + args->nr_pages * blocks_per_page;
+       last_block = block_in_file + ((args->nr_pages * PAGE_SIZE) >> blkbits);
        last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
        if (last_block > last_block_in_file)
                last_block = last_block_in_file;
@@ -204,7 +204,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
                                clear_buffer_mapped(map_bh);
                                break;
                        }
-                       if (page_block == blocks_per_page)
+                       if (page_block == blocks_per_folio)
                                break;
                        page_block++;
                        block_in_file++;
@@ -216,7 +216,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
         * Then do more get_blocks calls until we are done with this folio.
         */
        map_bh->b_folio = folio;
-       while (page_block < blocks_per_page) {
+       while (page_block < blocks_per_folio) {
                map_bh->b_state = 0;
                map_bh->b_size = 0;
 
@@ -229,7 +229,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
 
                if (!buffer_mapped(map_bh)) {
                        fully_mapped = 0;
-                       if (first_hole == blocks_per_page)
+                       if (first_hole == blocks_per_folio)
                                first_hole = page_block;
                        page_block++;
                        block_in_file++;
@@ -247,7 +247,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
                        goto confused;
                }
        
-               if (first_hole != blocks_per_page)
+               if (first_hole != blocks_per_folio)
                        goto confused;          /* hole -> non-hole */
 
                /* Contiguous blocks? */
@@ -260,7 +260,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
                        if (relative_block == nblocks) {
                                clear_buffer_mapped(map_bh);
                                break;
-                       } else if (page_block == blocks_per_page)
+                       } else if (page_block == blocks_per_folio)
                                break;
                        page_block++;
                        block_in_file++;
@@ -268,8 +268,8 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
                bdev = map_bh->b_bdev;
        }
 
-       if (first_hole != blocks_per_page) {
-               folio_zero_segment(folio, first_hole << blkbits, PAGE_SIZE);
+       if (first_hole != blocks_per_folio) {
+               folio_zero_segment(folio, first_hole << blkbits, folio_size(folio));
                if (first_hole == 0) {
                        folio_mark_uptodate(folio);
                        folio_unlock(folio);
@@ -303,10 +303,10 @@ alloc_new:
        relative_block = block_in_file - args->first_logical_block;
        nblocks = map_bh->b_size >> blkbits;
        if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
-           (first_hole != blocks_per_page))
+           (first_hole != blocks_per_folio))
                args->bio = mpage_bio_submit_read(args->bio);
        else
-               args->last_block_in_bio = first_block + blocks_per_page - 1;
+               args->last_block_in_bio = first_block + blocks_per_folio - 1;
 out:
        return args->bio;
 
@@ -385,7 +385,7 @@ int mpage_read_folio(struct folio *folio, get_block_t get_block)
 {
        struct mpage_readpage_args args = {
                .folio = folio,
-               .nr_pages = 1,
+               .nr_pages = folio_nr_pages(folio),
                .get_block = get_block,
        };
 
@@ -456,12 +456,12 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
        struct address_space *mapping = folio->mapping;
        struct inode *inode = mapping->host;
        const unsigned blkbits = inode->i_blkbits;
-       const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
+       const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
        sector_t last_block;
        sector_t block_in_file;
        sector_t first_block;
        unsigned page_block;
-       unsigned first_unmapped = blocks_per_page;
+       unsigned first_unmapped = blocks_per_folio;
        struct block_device *bdev = NULL;
        int boundary = 0;
        sector_t boundary_block = 0;
@@ -486,12 +486,12 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
                                 */
                                if (buffer_dirty(bh))
                                        goto confused;
-                               if (first_unmapped == blocks_per_page)
+                               if (first_unmapped == blocks_per_folio)
                                        first_unmapped = page_block;
                                continue;
                        }
 
-                       if (first_unmapped != blocks_per_page)
+                       if (first_unmapped != blocks_per_folio)
                                goto confused;  /* hole -> non-hole */
 
                        if (!buffer_dirty(bh) || !buffer_uptodate(bh))
@@ -536,7 +536,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
                goto page_is_mapped;
        last_block = (i_size - 1) >> blkbits;
        map_bh.b_folio = folio;
-       for (page_block = 0; page_block < blocks_per_page; ) {
+       for (page_block = 0; page_block < blocks_per_folio; ) {
 
                map_bh.b_state = 0;
                map_bh.b_size = 1 << blkbits;
@@ -618,14 +618,14 @@ alloc_new:
        BUG_ON(folio_test_writeback(folio));
        folio_start_writeback(folio);
        folio_unlock(folio);
-       if (boundary || (first_unmapped != blocks_per_page)) {
+       if (boundary || (first_unmapped != blocks_per_folio)) {
                bio = mpage_bio_submit_write(bio);
                if (boundary_block) {
                        write_boundary_block(boundary_bdev,
                                        boundary_block, 1 << blkbits);
                }
        } else {
-               mpd->last_block_in_bio = first_block + blocks_per_page - 1;
+               mpd->last_block_in_bio = first_block + blocks_per_folio - 1;
        }
        goto out;