]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
vboxsf: Convert to writepages
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 2 Apr 2025 14:59:56 +0000 (15:59 +0100)
committerChristian Brauner <brauner@kernel.org>
Mon, 7 Apr 2025 07:36:48 +0000 (09:36 +0200)
If we add a migrate_folio operation, we can convert the writepage
operation to writepages.  Further, this lets us optimise by using
the same write handle for multiple folios.  The large folio support here
is illusory; we would need to kmap each page in turn for proper support.
But we do remove a few hidden calls to compound_head().

Signed-off-by: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Link: https://lore.kernel.org/r/20250402150005.2309458-3-willy@infradead.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/vboxsf/file.c

index b780deb81b022c692ef8b95ff5b94f1c2955381e..b492794f8e9a72f8a93efbc35512b1dda9f8e11a 100644 (file)
@@ -262,40 +262,42 @@ static struct vboxsf_handle *vboxsf_get_write_handle(struct vboxsf_inode *sf_i)
        return sf_handle;
 }
 
-static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
+static int vboxsf_writepages(struct address_space *mapping,
+               struct writeback_control *wbc)
 {
-       struct inode *inode = page->mapping->host;
+       struct inode *inode = mapping->host;
+       struct folio *folio = NULL;
        struct vboxsf_inode *sf_i = VBOXSF_I(inode);
        struct vboxsf_handle *sf_handle;
-       loff_t off = page_offset(page);
        loff_t size = i_size_read(inode);
-       u32 nwrite = PAGE_SIZE;
-       u8 *buf;
-       int err;
-
-       if (off + PAGE_SIZE > size)
-               nwrite = size & ~PAGE_MASK;
+       int error;
 
        sf_handle = vboxsf_get_write_handle(sf_i);
        if (!sf_handle)
                return -EBADF;
 
-       buf = kmap(page);
-       err = vboxsf_write(sf_handle->root, sf_handle->handle,
-                          off, &nwrite, buf);
-       kunmap(page);
+       while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
+               loff_t off = folio_pos(folio);
+               u32 nwrite = folio_size(folio);
+               u8 *buf;
 
-       kref_put(&sf_handle->refcount, vboxsf_handle_release);
+               if (nwrite > size - off)
+                       nwrite = size - off;
 
-       if (err == 0) {
-               /* mtime changed */
-               sf_i->force_restat = 1;
-       } else {
-               ClearPageUptodate(page);
+               buf = kmap_local_folio(folio, 0);
+               error = vboxsf_write(sf_handle->root, sf_handle->handle,
+                               off, &nwrite, buf);
+               kunmap_local(buf);
+
+               folio_unlock(folio);
        }
 
-       unlock_page(page);
-       return err;
+       kref_put(&sf_handle->refcount, vboxsf_handle_release);
+
+       /* mtime changed */
+       if (error == 0)
+               sf_i->force_restat = 1;
+       return error;
 }
 
 static int vboxsf_write_end(struct file *file, struct address_space *mapping,
@@ -347,10 +349,11 @@ out:
  */
 const struct address_space_operations vboxsf_reg_aops = {
        .read_folio = vboxsf_read_folio,
-       .writepage = vboxsf_writepage,
+       .writepages = vboxsf_writepages,
        .dirty_folio = filemap_dirty_folio,
        .write_begin = simple_write_begin,
        .write_end = vboxsf_write_end,
+       .migrate_folio = filemap_migrate_folio,
 };
 
 static const char *vboxsf_get_link(struct dentry *dentry, struct inode *inode,