]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/ksm: use folio in write_protect_page
authorAlex Shi (tencent) <alexs@kernel.org>
Thu, 11 Apr 2024 06:17:08 +0000 (14:17 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 6 May 2024 00:53:34 +0000 (17:53 -0700)
Compound page is checked and skipped before write_protect_page() called,
use folio to save a few compound_head checks.

Link: https://lkml.kernel.org/r/20240411061713.1847574-8-alexs@kernel.org
Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Izik Eidus <izik.eidus@ravellosystems.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/ksm.c

index b127d39c9af0010257c26454e6fff18b41c877c4..2fdd6586a3a76970b6cfe64d0cf8b2b2298dfe44 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1289,23 +1289,24 @@ static u32 calc_checksum(struct page *page)
        return checksum;
 }
 
-static int write_protect_page(struct vm_area_struct *vma, struct page *page,
+static int write_protect_page(struct vm_area_struct *vma, struct folio *folio,
                              pte_t *orig_pte)
 {
        struct mm_struct *mm = vma->vm_mm;
-       DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0);
+       DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0);
        int swapped;
        int err = -EFAULT;
        struct mmu_notifier_range range;
        bool anon_exclusive;
        pte_t entry;
 
-       pvmw.address = page_address_in_vma(page, vma);
+       if (WARN_ON_ONCE(folio_test_large(folio)))
+               return err;
+
+       pvmw.address = page_address_in_vma(&folio->page, vma);
        if (pvmw.address == -EFAULT)
                goto out;
 
-       BUG_ON(PageTransCompound(page));
-
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address,
                                pvmw.address + PAGE_SIZE);
        mmu_notifier_invalidate_range_start(&range);
@@ -1315,12 +1316,12 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
        if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
                goto out_unlock;
 
-       anon_exclusive = PageAnonExclusive(page);
+       anon_exclusive = PageAnonExclusive(&folio->page);
        entry = ptep_get(pvmw.pte);
        if (pte_write(entry) || pte_dirty(entry) ||
            anon_exclusive || mm_tlb_flush_pending(mm)) {
-               swapped = PageSwapCache(page);
-               flush_cache_page(vma, pvmw.address, page_to_pfn(page));
+               swapped = folio_test_swapcache(folio);
+               flush_cache_page(vma, pvmw.address, folio_pfn(folio));
                /*
                 * Ok this is tricky, when get_user_pages_fast() run it doesn't
                 * take any lock, therefore the check that we are going to make
@@ -1340,20 +1341,20 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                 * Check that no O_DIRECT or similar I/O is in progress on the
                 * page
                 */
-               if (page_mapcount(page) + 1 + swapped != page_count(page)) {
+               if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) {
                        set_pte_at(mm, pvmw.address, pvmw.pte, entry);
                        goto out_unlock;
                }
 
                /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
                if (anon_exclusive &&
-                   folio_try_share_anon_rmap_pte(page_folio(page), page)) {
+                   folio_try_share_anon_rmap_pte(folio, &folio->page)) {
                        set_pte_at(mm, pvmw.address, pvmw.pte, entry);
                        goto out_unlock;
                }
 
                if (pte_dirty(entry))
-                       set_page_dirty(page);
+                       folio_mark_dirty(folio);
                entry = pte_mkclean(entry);
 
                if (pte_write(entry))
@@ -1519,7 +1520,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
         * ptes are necessarily already write-protected.  But in either
         * case, we need to lock and check page_count is not raised.
         */
-       if (write_protect_page(vma, page, &orig_pte) == 0) {
+       if (write_protect_page(vma, page_folio(page), &orig_pte) == 0) {
                if (!kpage) {
                        /*
                         * While we hold page lock, upgrade page from