]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
ceph: Convert writepage_nounlock() to write_folio_nounlock()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 17 Feb 2025 18:51:13 +0000 (18:51 +0000)
committerChristian Brauner <brauner@kernel.org>
Fri, 28 Feb 2025 10:21:30 +0000 (11:21 +0100)
Remove references to page->index, page->mapping, thp_size(),
page_offset() and other page APIs in favour of their more efficient
folio replacements.

Signed-off-by: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Link: https://lore.kernel.org/r/20250217185119.430193-6-willy@infradead.org
Tested-by: Viacheslav Dubeyko <Slava.Dubeyko@ibm.com>
Reviewed-by: Viacheslav Dubeyko <Slava.Dubeyko@ibm.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/ceph/addr.c

index 30355a28c4ba52dc949bbca33a8f1567bd741c84..d5c43492e92cef5d0b06bfcc40e6a6d6e96ee349 100644 (file)
@@ -698,22 +698,23 @@ static u64 get_writepages_data_length(struct inode *inode,
 }
 
 /*
- * Write a single page, but leave the page locked.
+ * Write a folio, but leave it locked.
  *
  * If we get a write error, mark the mapping for error, but still adjust the
- * dirty page accounting (i.e., page is no longer dirty).
+ * dirty page accounting (i.e., folio is no longer dirty).
  */
-static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
+static int write_folio_nounlock(struct folio *folio,
+               struct writeback_control *wbc)
 {
-       struct folio *folio = page_folio(page);
-       struct inode *inode = page->mapping->host;
+       struct page *page = &folio->page;
+       struct inode *inode = folio->mapping->host;
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
        struct ceph_client *cl = fsc->client;
        struct ceph_snap_context *snapc, *oldest;
-       loff_t page_off = page_offset(page);
+       loff_t page_off = folio_pos(folio);
        int err;
-       loff_t len = thp_size(page);
+       loff_t len = folio_size(folio);
        loff_t wlen;
        struct ceph_writeback_ctl ceph_wbc;
        struct ceph_osd_client *osdc = &fsc->client->osdc;
@@ -721,27 +722,27 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        bool caching = ceph_is_cache_enabled(inode);
        struct page *bounce_page = NULL;
 
-       doutc(cl, "%llx.%llx page %p idx %lu\n", ceph_vinop(inode), page,
-             page->index);
+       doutc(cl, "%llx.%llx folio %p idx %lu\n", ceph_vinop(inode), folio,
+             folio->index);
 
        if (ceph_inode_is_shutdown(inode))
                return -EIO;
 
        /* verify this is a writeable snap context */
-       snapc = page_snap_context(page);
+       snapc = page_snap_context(&folio->page);
        if (!snapc) {
-               doutc(cl, "%llx.%llx page %p not dirty?\n", ceph_vinop(inode),
-                     page);
+               doutc(cl, "%llx.%llx folio %p not dirty?\n", ceph_vinop(inode),
+                     folio);
                return 0;
        }
        oldest = get_oldest_context(inode, &ceph_wbc, snapc);
        if (snapc->seq > oldest->seq) {
-               doutc(cl, "%llx.%llx page %p snapc %p not writeable - noop\n",
-                     ceph_vinop(inode), page, snapc);
+               doutc(cl, "%llx.%llx folio %p snapc %p not writeable - noop\n",
+                     ceph_vinop(inode), folio, snapc);
                /* we should only noop if called by kswapd */
                WARN_ON(!(current->flags & PF_MEMALLOC));
                ceph_put_snap_context(oldest);
-               redirty_page_for_writepage(wbc, page);
+               folio_redirty_for_writepage(wbc, folio);
                return 0;
        }
        ceph_put_snap_context(oldest);
@@ -758,8 +759,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
                len = ceph_wbc.i_size - page_off;
 
        wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len;
-       doutc(cl, "%llx.%llx page %p index %lu on %llu~%llu snapc %p seq %lld\n",
-             ceph_vinop(inode), page, page->index, page_off, wlen, snapc,
+       doutc(cl, "%llx.%llx folio %p index %lu on %llu~%llu snapc %p seq %lld\n",
+             ceph_vinop(inode), folio, folio->index, page_off, wlen, snapc,
              snapc->seq);
 
        if (atomic_long_inc_return(&fsc->writeback_count) >
@@ -772,32 +773,32 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
                                    ceph_wbc.truncate_seq,
                                    ceph_wbc.truncate_size, true);
        if (IS_ERR(req)) {
-               redirty_page_for_writepage(wbc, page);
+               folio_redirty_for_writepage(wbc, folio);
                return PTR_ERR(req);
        }
 
        if (wlen < len)
                len = wlen;
 
-       set_page_writeback(page);
+       folio_start_writeback(folio);
        if (caching)
-               ceph_set_page_fscache(page);
+               ceph_set_page_fscache(&folio->page);
        ceph_fscache_write_to_cache(inode, page_off, len, caching);
 
        if (IS_ENCRYPTED(inode)) {
-               bounce_page = fscrypt_encrypt_pagecache_blocks(page,
+               bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page,
                                                    CEPH_FSCRYPT_BLOCK_SIZE, 0,
                                                    GFP_NOFS);
                if (IS_ERR(bounce_page)) {
-                       redirty_page_for_writepage(wbc, page);
-                       end_page_writeback(page);
+                       folio_redirty_for_writepage(wbc, folio);
+                       folio_end_writeback(folio);
                        ceph_osdc_put_request(req);
                        return PTR_ERR(bounce_page);
                }
        }
 
        /* it may be a short write due to an object boundary */
-       WARN_ON_ONCE(len > thp_size(page));
+       WARN_ON_ONCE(len > folio_size(folio));
        osd_req_op_extent_osd_data_pages(req, 0,
                        bounce_page ? &bounce_page : &page, wlen, 0,
                        false, false);
@@ -823,25 +824,25 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
                if (err == -ERESTARTSYS) {
                        /* killed by SIGKILL */
                        doutc(cl, "%llx.%llx interrupted page %p\n",
-                             ceph_vinop(inode), page);
-                       redirty_page_for_writepage(wbc, page);
-                       end_page_writeback(page);
+                             ceph_vinop(inode), folio);
+                       folio_redirty_for_writepage(wbc, folio);
+                       folio_end_writeback(folio);
                        return err;
                }
                if (err == -EBLOCKLISTED)
                        fsc->blocklisted = true;
-               doutc(cl, "%llx.%llx setting page/mapping error %d %p\n",
-                     ceph_vinop(inode), err, page);
+               doutc(cl, "%llx.%llx setting mapping error %d %p\n",
+                     ceph_vinop(inode), err, folio);
                mapping_set_error(&inode->i_data, err);
                wbc->pages_skipped++;
        } else {
                doutc(cl, "%llx.%llx cleaned page %p\n",
-                     ceph_vinop(inode), page);
+                     ceph_vinop(inode), folio);
                err = 0;  /* vfs expects us to return 0 */
        }
-       oldest = detach_page_private(page);
+       oldest = folio_detach_private(folio);
        WARN_ON_ONCE(oldest != snapc);
-       end_page_writeback(page);
+       folio_end_writeback(folio);
        ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
        ceph_put_snap_context(snapc);  /* page's reference */
 
@@ -1820,7 +1821,7 @@ ceph_find_incompatible(struct folio *folio)
                doutc(cl, " %llx.%llx folio %p snapc %p not current, but oldest\n",
                      ceph_vinop(inode), folio, snapc);
                if (folio_clear_dirty_for_io(folio)) {
-                       int r = writepage_nounlock(&folio->page, NULL);
+                       int r = write_folio_nounlock(folio, NULL);
                        if (r < 0)
                                return ERR_PTR(r);
                }