]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
hfs: Convert to release_folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sun, 1 May 2022 03:53:28 +0000 (23:53 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 10 May 2022 03:12:33 +0000 (23:12 -0400)
Use a folio throughout hfs_release_folio().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
fs/hfs/inode.c

index ba3ff9cd7cfce0e20f4e40357b82817e0e3c16ad..86fd50e5fccbea1408f8505b39dffb3ad2c47e36 100644 (file)
@@ -69,14 +69,15 @@ static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
        return generic_block_bmap(mapping, block, hfs_get_block);
 }
 
-static int hfs_releasepage(struct page *page, gfp_t mask)
+static bool hfs_release_folio(struct folio *folio, gfp_t mask)
 {
-       struct inode *inode = page->mapping->host;
+       struct inode *inode = folio->mapping->host;
        struct super_block *sb = inode->i_sb;
        struct hfs_btree *tree;
        struct hfs_bnode *node;
        u32 nidx;
-       int i, res = 1;
+       int i;
+       bool res = true;
 
        switch (inode->i_ino) {
        case HFS_EXT_CNID:
@@ -87,27 +88,27 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
                break;
        default:
                BUG();
-               return 0;
+               return false;
        }
 
        if (!tree)
-               return 0;
+               return false;
 
        if (tree->node_size >= PAGE_SIZE) {
-               nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
+               nidx = folio->index >> (tree->node_size_shift - PAGE_SHIFT);
                spin_lock(&tree->hash_lock);
                node = hfs_bnode_findhash(tree, nidx);
                if (!node)
                        ;
                else if (atomic_read(&node->refcnt))
-                       res = 0;
+                       res = false;
                if (res && node) {
                        hfs_bnode_unhash(node);
                        hfs_bnode_free(node);
                }
                spin_unlock(&tree->hash_lock);
        } else {
-               nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
+               nidx = folio->index << (PAGE_SHIFT - tree->node_size_shift);
                i = 1 << (PAGE_SHIFT - tree->node_size_shift);
                spin_lock(&tree->hash_lock);
                do {
@@ -115,7 +116,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
                        if (!node)
                                continue;
                        if (atomic_read(&node->refcnt)) {
-                               res = 0;
+                               res = false;
                                break;
                        }
                        hfs_bnode_unhash(node);
@@ -123,7 +124,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
                } while (--i && nidx < tree->node_count);
                spin_unlock(&tree->hash_lock);
        }
-       return res ? try_to_free_buffers(page) : 0;
+       return res ? try_to_free_buffers(&folio->page) : false;
 }
 
 static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
@@ -165,7 +166,7 @@ const struct address_space_operations hfs_btree_aops = {
        .write_begin    = hfs_write_begin,
        .write_end      = generic_write_end,
        .bmap           = hfs_bmap,
-       .releasepage    = hfs_releasepage,
+       .release_folio  = hfs_release_folio,
 };
 
 const struct address_space_operations hfs_aops = {