From: Christoph Hellwig Date: Tue, 10 Jun 2025 05:49:38 +0000 (+0200) Subject: mm: stop passing a writeback_control structure to shmem_writeout X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=44b1b073eb36143ec65a918c0fbaa582f3ec2aa1;p=thirdparty%2Fkernel%2Flinux.git mm: stop passing a writeback_control structure to shmem_writeout shmem_writeout only needs the swap_iocb cookie and the split folio list. Pass those explicitly and remove the now unused list member from struct writeback_control. Link: https://lkml.kernel.org/r/20250610054959.2057526-3-hch@lst.de Signed-off-by: Christoph Hellwig Cc: Baolin Wang Cc: Chengming Zhou Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Nhat Pham Signed-off-by: Andrew Morton --- diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 19a3eb82dc6a6..24d8daa4fdb35 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -317,7 +317,7 @@ void __shmem_writeback(size_t size, struct address_space *mapping) if (folio_mapped(folio)) folio_redirty_for_writepage(&wbc, folio); else - error = shmem_writeout(folio, &wbc); + error = shmem_writeout(folio, NULL, NULL); } } diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c index ffaab68bd5dd2..6f2e58be4f3e3 100644 --- a/drivers/gpu/drm/ttm/ttm_backup.c +++ b/drivers/gpu/drm/ttm/ttm_backup.c @@ -112,15 +112,8 @@ ttm_backup_backup_page(struct file *backup, struct page *page, if (writeback && !folio_mapped(to_folio) && folio_clear_dirty_for_io(to_folio)) { - struct writeback_control wbc = { - .sync_mode = WB_SYNC_NONE, - .nr_to_write = SWAP_CLUSTER_MAX, - .range_start = 0, - .range_end = LLONG_MAX, - .for_reclaim = 1, - }; folio_set_reclaim(to_folio); - ret = shmem_writeout(to_folio, &wbc); + ret = shmem_writeout(to_folio, NULL, NULL); if (!folio_test_writeback(to_folio)) folio_clear_reclaim(to_folio); /* diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 5f03a39a26f71..6d0f9c599ff7e 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -11,6 +11,8 @@ #include #include +struct swap_iocb; + /* inode in-kernel data */ #ifdef CONFIG_TMPFS_QUOTA @@ -107,7 +109,8 @@ static inline bool shmem_mapping(struct address_space *mapping) void shmem_unlock_mapping(struct address_space *mapping); struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); -int shmem_writeout(struct folio *folio, struct writeback_control *wbc); +int shmem_writeout(struct folio *folio, struct swap_iocb **plug, + struct list_head *folio_list); void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); int shmem_unuse(unsigned int type); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index eda4b62511f70..82f217970092a 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -79,9 +79,6 @@ struct writeback_control { */ struct swap_iocb **swap_plug; - /* Target list for splitting a large folio */ - struct list_head *list; - /* internal fields used by the ->writepages implementation: */ struct folio_batch fbatch; pgoff_t index; diff --git a/mm/shmem.c b/mm/shmem.c index 3a5a65b1f41a3..ad8db487e7217 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1540,11 +1540,13 @@ start_over: /** * shmem_writeout - Write the folio to swap * @folio: The folio to write - * @wbc: How writeback is to be done + * @plug: swap plug + * @folio_list: list to put back folios on split * * Move the folio from the page cache to the swap cache. */ -int shmem_writeout(struct folio *folio, struct writeback_control *wbc) +int shmem_writeout(struct folio *folio, struct swap_iocb **plug, + struct list_head *folio_list) { struct address_space *mapping = folio->mapping; struct inode *inode = mapping->host; @@ -1554,9 +1556,6 @@ int shmem_writeout(struct folio *folio, struct writeback_control *wbc) int nr_pages; bool split = false; - if (WARN_ON_ONCE(!wbc->for_reclaim)) - goto redirty; - if ((info->flags & VM_LOCKED) || sbinfo->noswap) goto redirty; @@ -1583,7 +1582,7 @@ int shmem_writeout(struct folio *folio, struct writeback_control *wbc) try_split: /* Ensure the subpages are still dirty */ folio_test_set_dirty(folio); - if (split_folio_to_list(folio, wbc->list)) + if (split_folio_to_list(folio, folio_list)) goto redirty; folio_clear_dirty(folio); } @@ -1636,13 +1635,21 @@ try_split: list_add(&info->swaplist, &shmem_swaplist); if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) { + struct writeback_control wbc = { + .sync_mode = WB_SYNC_NONE, + .nr_to_write = SWAP_CLUSTER_MAX, + .range_start = 0, + .range_end = LLONG_MAX, + .for_reclaim = 1, + .swap_plug = plug, + }; shmem_recalc_inode(inode, 0, nr_pages); swap_shmem_alloc(folio->swap, nr_pages); shmem_delete_from_page_cache(folio, swp_to_radix_entry(folio->swap)); mutex_unlock(&shmem_swaplist_mutex); BUG_ON(folio_mapped(folio)); - return swap_writeout(folio, wbc); + return swap_writeout(folio, &wbc); } if (!info->swapped) list_del_init(&info->swaplist); @@ -1651,10 +1658,7 @@ try_split: goto try_split; redirty: folio_mark_dirty(folio); - if (wbc->for_reclaim) - return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */ - folio_unlock(folio); - return 0; + return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */ } EXPORT_SYMBOL_GPL(shmem_writeout); diff --git a/mm/vmscan.c b/mm/vmscan.c index b6dd1708fe82e..3cceb619a8539 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -669,15 +669,13 @@ static pageout_t writeout(struct folio *folio, struct address_space *mapping, /* * The large shmem folio can be split if CONFIG_THP_SWAP is not enabled - * or we failed to allocate contiguous swap entries. + * or we failed to allocate contiguous swap entries, in which case + * the split out folios get added back to folio_list. */ - if (shmem_mapping(mapping)) { - if (folio_test_large(folio)) - wbc.list = folio_list; - res = shmem_writeout(folio, &wbc); - } else { + if (shmem_mapping(mapping)) + res = shmem_writeout(folio, plug, folio_list); + else res = swap_writeout(folio, &wbc); - } if (res < 0) handle_write_error(mapping, folio, res);