]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: move _entire_mapcount in folio to page[2] on 32bit
authorDavid Hildenbrand <david@redhat.com>
Mon, 3 Mar 2025 16:29:59 +0000 (17:29 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 18 Mar 2025 05:06:44 +0000 (22:06 -0700)
Let's free up some space on 32bit in page[1] by moving the _pincount to
page[2].

Ordinary folios only use the entire mapcount with PMD mappings, so order-1
folios don't apply.  Similarly, hugetlb folios are always larger than
order-1, turning the entire mapcount essentially unused for all order-1
folios.  Moving it to order-1 folios will not change anything.

On 32bit, simply check in folio_entire_mapcount() whether we have an
order-1 folio, and return 0 in that case.

Note that THPs on 32bit are not particularly common (and we don't care too
much about performance), but we want to keep it working reliably, because
likely we want to use large folios there as well in the future,
independent of PMD leaf support.

Once we dynamically allocate "struct folio", the 32bit specifics will go
away again; even small folios could then have a pincount.

Link: https://lkml.kernel.org/r/20250303163014.1128035-7-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Andy Lutomirks^H^Hski <luto@kernel.org>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcow (Oracle) <willy@infradead.org>
Cc: Michal Koutn <mkoutny@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: tejun heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zefan Li <lizefan.x@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
include/linux/mm_types.h
mm/internal.h
mm/page_alloc.c

index 860082ba89781bc5ed292b2cc1fdcca9d3f6e1a0..f366c180f2b67e0b1cf5a0bde051948327e5a572 100644 (file)
@@ -1333,6 +1333,8 @@ static inline int is_vmalloc_or_module_addr(const void *x)
 static inline int folio_entire_mapcount(const struct folio *folio)
 {
        VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+       if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio_large_order(folio) == 1))
+               return 0;
        return atomic_read(&folio->_entire_mapcount) + 1;
 }
 
index 3ea2019a1aacf8a79cc5218f51ccc05f52ab216e..9499eb8e8e66eea8f1ea98a6c83f7dbd6a552089 100644 (file)
@@ -385,9 +385,9 @@ struct folio {
                                struct {
        /* public: */
                                        atomic_t _large_mapcount;
-                                       atomic_t _entire_mapcount;
                                        atomic_t _nr_pages_mapped;
 #ifdef CONFIG_64BIT
+                                       atomic_t _entire_mapcount;
                                        atomic_t _pincount;
 #endif /* CONFIG_64BIT */
        /* private: the union with struct page is transitional */
@@ -411,6 +411,7 @@ struct folio {
        /* public: */
                        struct list_head _deferred_list;
 #ifndef CONFIG_64BIT
+                       atomic_t _entire_mapcount;
                        atomic_t _pincount;
 #endif /* !CONFIG_64BIT */
        /* private: the union with struct page is transitional */
index 2d44a4c9d2820a7be4e36dca7539e9d413c69da0..fcf0aeae3934f071b50602e47582f5705091690f 100644 (file)
@@ -762,10 +762,11 @@ static inline void prep_compound_head(struct page *page, unsigned int order)
 
        folio_set_order(folio, order);
        atomic_set(&folio->_large_mapcount, -1);
-       atomic_set(&folio->_entire_mapcount, -1);
        atomic_set(&folio->_nr_pages_mapped, 0);
-       if (IS_ENABLED(CONFIG_64BIT) || order > 1)
+       if (IS_ENABLED(CONFIG_64BIT) || order > 1) {
                atomic_set(&folio->_pincount, 0);
+               atomic_set(&folio->_entire_mapcount, -1);
+       }
        if (order > 1)
                INIT_LIST_HEAD(&folio->_deferred_list);
 }
index 2a9aa4439a662550e31564d1a735739fd6fa1e39..e456a43811fd8898144dad563d25513f8837eb60 100644 (file)
@@ -947,10 +947,6 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
        switch (page - head_page) {
        case 1:
                /* the first tail page: these may be in place of ->mapping */
-               if (unlikely(folio_entire_mapcount(folio))) {
-                       bad_page(page, "nonzero entire_mapcount");
-                       goto out;
-               }
                if (unlikely(folio_large_mapcount(folio))) {
                        bad_page(page, "nonzero large_mapcount");
                        goto out;
@@ -960,6 +956,10 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
                        goto out;
                }
                if (IS_ENABLED(CONFIG_64BIT)) {
+                       if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
+                               bad_page(page, "nonzero entire_mapcount");
+                               goto out;
+                       }
                        if (unlikely(atomic_read(&folio->_pincount))) {
                                bad_page(page, "nonzero pincount");
                                goto out;
@@ -973,6 +973,10 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
                        goto out;
                }
                if (!IS_ENABLED(CONFIG_64BIT)) {
+                       if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
+                               bad_page(page, "nonzero entire_mapcount");
+                               goto out;
+                       }
                        if (unlikely(atomic_read(&folio->_pincount))) {
                                bad_page(page, "nonzero pincount");
                                goto out;