]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
ksm: replace function unmerge_ksm_pages with break_ksm
authorPedro Demarchi Gomes <pedrodemargomes@gmail.com>
Wed, 5 Nov 2025 18:49:12 +0000 (15:49 -0300)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 17 Nov 2025 01:28:28 +0000 (17:28 -0800)
Function unmerge_ksm_pages() is unnecessary since now break_ksm() walks an
address range.  So replace it with break_ksm().

Link: https://lkml.kernel.org/r/20251105184912.186329-4-pedrodemargomes@gmail.com
Signed-off-by: Pedro Demarchi Gomes <pedrodemargomes@gmail.com>
Suggested-by: David Hildenbrand (Red Hat) <david@kernel.org>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/ksm.c

index 43be57a6a3fd6514eec24715401628c121bf4f6e..f9a1a3658eaddac785f58420b7234371a1c82dea 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -665,6 +665,18 @@ static const struct mm_walk_ops break_ksm_lock_vma_ops = {
 };
 
 /*
+ * Though it's very tempting to unmerge rmap_items from stable tree rather
+ * than check every pte of a given vma, the locking doesn't quite work for
+ * that - an rmap_item is assigned to the stable tree after inserting ksm
+ * page and upping mmap_lock.  Nor does it fit with the way we skip dup'ing
+ * rmap_items from parent to child at fork time (so as not to waste time
+ * if exit comes before the next scan reaches it).
+ *
+ * Similarly, although we'd like to remove rmap_items (so updating counts
+ * and freeing memory) when unmerging an area, it's easier to leave that
+ * to the next pass of ksmd - consider, for example, how ksmd might be
+ * in cmp_and_merge_page on one of the rmap_items we would be removing.
+ *
  * We use break_ksm to break COW on a ksm page by triggering unsharing,
  * such that the ksm page will get replaced by an exclusive anonymous page.
  *
@@ -1071,25 +1083,6 @@ static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list)
        }
 }
 
-/*
- * Though it's very tempting to unmerge rmap_items from stable tree rather
- * than check every pte of a given vma, the locking doesn't quite work for
- * that - an rmap_item is assigned to the stable tree after inserting ksm
- * page and upping mmap_lock.  Nor does it fit with the way we skip dup'ing
- * rmap_items from parent to child at fork time (so as not to waste time
- * if exit comes before the next scan reaches it).
- *
- * Similarly, although we'd like to remove rmap_items (so updating counts
- * and freeing memory) when unmerging an area, it's easier to leave that
- * to the next pass of ksmd - consider, for example, how ksmd might be
- * in cmp_and_merge_page on one of the rmap_items we would be removing.
- */
-static int unmerge_ksm_pages(struct vm_area_struct *vma,
-                            unsigned long start, unsigned long end, bool lock_vma)
-{
-       return break_ksm(vma, start, end, lock_vma);
-}
-
 static inline
 struct ksm_stable_node *folio_stable_node(const struct folio *folio)
 {
@@ -1227,8 +1220,7 @@ static int unmerge_and_remove_all_rmap_items(void)
                for_each_vma(vmi, vma) {
                        if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
                                continue;
-                       err = unmerge_ksm_pages(vma,
-                                               vma->vm_start, vma->vm_end, false);
+                       err = break_ksm(vma, vma->vm_start, vma->vm_end, false);
                        if (err)
                                goto error;
                }
@@ -2855,7 +2847,7 @@ static int __ksm_del_vma(struct vm_area_struct *vma)
                return 0;
 
        if (vma->anon_vma) {
-               err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true);
+               err = break_ksm(vma, vma->vm_start, vma->vm_end, true);
                if (err)
                        return err;
        }
@@ -3007,7 +2999,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
                        return 0;               /* just ignore the advice */
 
                if (vma->anon_vma) {
-                       err = unmerge_ksm_pages(vma, start, end, true);
+                       err = break_ksm(vma, start, end, true);
                        if (err)
                                return err;
                }
@@ -3389,7 +3381,7 @@ static int ksm_memory_callback(struct notifier_block *self,
                 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
                 * and remove_all_stable_nodes() while memory is going offline:
                 * it is unsafe for them to touch the stable tree at this time.
-                * But unmerge_ksm_pages(), rmap lookups and other entry points
+                * But break_ksm(), rmap lookups and other entry points
                 * which do not need the ksm_thread_mutex are all safe.
                 */
                mutex_lock(&ksm_thread_mutex);