]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/huge_memory: refactor after-split (page) cache code
authorZi Yan <ziy@nvidia.com>
Fri, 18 Jul 2025 18:37:20 +0000 (14:37 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 25 Jul 2025 02:12:39 +0000 (19:12 -0700)
Smatch/coverity checkers report NULL mapping referencing issues[1][2][3]
every time the code is modified, because they do not understand that
mapping cannot be NULL when a folio is in page cache in the code.
Refactor the code to make it explicit.

Remove "end = -1" for anonymous folios, since after code refactoring, end
is no longer used by anonymous folio handling code.

No functional change is intended.

Link: https://lkml.kernel.org/r/20250718023000.4044406-7-ziy@nvidia.com
Link: https://lore.kernel.org/linux-mm/2afe3d59-aca5-40f7-82a3-a6d976fb0f4f@stanley.mountain/
Link: https://lore.kernel.org/oe-kbuild/64b54034-f311-4e7d-b935-c16775dbb642@suswa.mountain/
Link: https://lore.kernel.org/linux-mm/20250716145804.4836-1-antonio@mandelbit.com/
Link: https://lkml.kernel.org/r/20250718183720.4054515-7-ziy@nvidia.com
Signed-off-by: Zi Yan <ziy@nvidia.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <k.shutemov@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 19e69704fcff2d180ef23cd4845bba2d395acd08..9c38a95e9f091bd4ac28fc7446495a5b8a597041 100644 (file)
@@ -3640,7 +3640,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
                        ret = -EBUSY;
                        goto out;
                }
-               end = -1;
                mapping = NULL;
                anon_vma_lock_write(anon_vma);
        } else {
@@ -3793,6 +3792,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
                 */
                for (new_folio = folio_next(folio); new_folio != end_folio;
                     new_folio = next) {
+                       unsigned long nr_pages = folio_nr_pages(new_folio);
+
                        next = folio_next(new_folio);
 
                        expected_refs = folio_expected_ref_count(new_folio) + 1;
@@ -3800,25 +3801,36 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
 
                        lru_add_split_folio(folio, new_folio, lruvec, list);
 
-                       /* Some pages can be beyond EOF: drop them from cache */
-                       if (new_folio->index >= end) {
-                               if (shmem_mapping(mapping))
-                                       nr_shmem_dropped += folio_nr_pages(new_folio);
-                               else if (folio_test_clear_dirty(new_folio))
-                                       folio_account_cleaned(
-                                               new_folio,
-                                               inode_to_wb(mapping->host));
-                               __filemap_remove_folio(new_folio, NULL);
-                               folio_put_refs(new_folio,
-                                              folio_nr_pages(new_folio));
-                       } else if (mapping) {
-                               __xa_store(&mapping->i_pages, new_folio->index,
-                                          new_folio, 0);
-                       } else if (swap_cache) {
+                       /*
+                        * Anonymous folio with swap cache.
+                        * NOTE: shmem in swap cache is not supported yet.
+                        */
+                       if (swap_cache) {
                                __xa_store(&swap_cache->i_pages,
                                           swap_cache_index(new_folio->swap),
                                           new_folio, 0);
+                               continue;
+                       }
+
+                       /* Anonymous folio without swap cache */
+                       if (!mapping)
+                               continue;
+
+                       /* Add the new folio to the page cache. */
+                       if (new_folio->index < end) {
+                               __xa_store(&mapping->i_pages, new_folio->index,
+                                          new_folio, 0);
+                               continue;
                        }
+
+                       /* Drop folio beyond EOF: ->index >= end */
+                       if (shmem_mapping(mapping))
+                               nr_shmem_dropped += nr_pages;
+                       else if (folio_test_clear_dirty(new_folio))
+                               folio_account_cleaned(
+                                       new_folio, inode_to_wb(mapping->host));
+                       __filemap_remove_folio(new_folio, NULL);
+                       folio_put_refs(new_folio, nr_pages);
                }
                /*
                 * Unfreeze @folio only after all page cache entries, which