]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/rmap: inline folio_test_large_maybe_mapped_shared() into callers
authorLance Yang <lance.yang@linux.dev>
Thu, 24 Apr 2025 15:56:06 +0000 (23:56 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 13 May 2025 06:50:45 +0000 (23:50 -0700)
To prevent the function from being used when CONFIG_MM_ID is disabled, we
intend to inline it into its few callers, which also would help maintain
the expected code placement.

Link: https://lkml.kernel.org/r/20250424155606.57488-1-lance.yang@linux.dev
Signed-off-by: Lance Yang <lance.yang@linux.dev>
Suggested-by: David Hildenbrand <david@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Mingzhe Yang <mingzhe.yang@ly.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
include/linux/page-flags.h
include/linux/rmap.h
mm/memory.c

index 9b701cfbef22375826f2386ff74ff7a5112751ee..21dd110b6655e4c6ef0d889c3e8a78b43a8cdca7 100644 (file)
@@ -2111,7 +2111,7 @@ static inline bool folio_maybe_mapped_shared(struct folio *folio)
         */
        if (mapcount <= 1)
                return false;
-       return folio_test_large_maybe_mapped_shared(folio);
+       return test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids);
 }
 
 #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
index d3909cb1e5766cc88f816f4807ef5cc43f4181d8..37b11f15dbd9f91dae473b68dd44dc34953a8eaa 100644 (file)
@@ -1230,10 +1230,6 @@ static inline int folio_has_private(const struct folio *folio)
        return !!(folio->flags & PAGE_FLAGS_PRIVATE);
 }
 
-static inline bool folio_test_large_maybe_mapped_shared(const struct folio *folio)
-{
-       return test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids);
-}
 #undef PF_ANY
 #undef PF_HEAD
 #undef PF_NO_TAIL
index 6b82b618846eebaec2acf3fa4edd6913e23db99b..c4f4903b108815c6b8d7b089b24e6ff416e7392d 100644 (file)
@@ -223,7 +223,7 @@ static inline void __folio_large_mapcount_sanity_checks(const struct folio *foli
        VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY &&
                        folio->_mm_id_mapcount[1] < 0);
        VM_WARN_ON_ONCE(!folio_mapped(folio) &&
-                       folio_test_large_maybe_mapped_shared(folio));
+                       test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids));
 }
 
 static __always_inline void folio_set_large_mapcount(struct folio *folio,
index be124dadec9e2593eb2416ff7f091586a15327d2..68c1d962d0ad2fafafc0b1c715f90e5aa81a8ade 100644 (file)
@@ -3768,7 +3768,7 @@ static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
         * If all folio references are from mappings, and all mappings are in
         * the page tables of this MM, then this folio is exclusive to this MM.
         */
-       if (folio_test_large_maybe_mapped_shared(folio))
+       if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids))
                return false;
 
        VM_WARN_ON_ONCE(folio_test_ksm(folio));
@@ -3791,7 +3791,7 @@ static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
        folio_lock_large_mapcount(folio);
        VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_ref_count(folio), folio);
 
-       if (folio_test_large_maybe_mapped_shared(folio))
+       if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids))
                goto unlock;
        if (folio_large_mapcount(folio) != folio_ref_count(folio))
                goto unlock;