]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/mm: Clear _PAGE_DIRTY for kernel mappings when we clear _PAGE_RW
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 25 Feb 2025 19:37:32 +0000 (19:37 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 25 Apr 2025 08:45:10 +0000 (10:45 +0200)
[ Upstream commit c1fcf41cf37f7a3fd3bbf6f0c04aba3ea4258888 ]

The bit pattern of _PAGE_DIRTY set and _PAGE_RW clear is used to mark
shadow stacks.  This is currently checked for in mk_pte() but not
pfn_pte().  If we add the check to pfn_pte(), it catches vfree()
calling set_direct_map_invalid_noflush() which calls
__change_page_attr() which loads the old protection bits from the
PTE, clears the specified bits and uses pfn_pte() to construct the
new PTE.

We should, therefore, for kernel mappings, clear the _PAGE_DIRTY bit
consistently whenever we clear _PAGE_RW.  I opted to do it in the
callers in case we want to use __change_page_attr() to create shadow
stacks inside the kernel at some point in the future.  Arguably, we
might also want to clear _PAGE_ACCESSED here.

Note that the 3 functions involved:

  __set_pages_np()
  kernel_map_pages_in_pgd()
  kernel_unmap_pages_in_pgd()

Only ever manipulate non-swappable kernel mappings, so maintaining
the DIRTY:1|RW:0 special pattern for shadow stacks and DIRTY:0
pattern for non-shadow-stack entries can be maintained consistently
and doesn't result in the unintended clearing of a live dirty bit
that could corrupt (destroy) dirty bit information for user mappings.

Reported-by: kernel test robot <oliver.sang@intel.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/174051422675.10177.13226545170101706336.tip-bot2@tip-bot2
Closes: https://lore.kernel.org/oe-lkp/202502241646.719f4651-lkp@intel.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/mm/pat/set_memory.c

index 2d850f6bae701cf34fc2c2c68d9a87b80bcd7d09..525794f1eefb3f74e94a33b7a75cf8f942b37ac6 100644 (file)
@@ -2374,7 +2374,7 @@ static int __set_pages_np(struct page *page, int numpages)
                                .pgd = NULL,
                                .numpages = numpages,
                                .mask_set = __pgprot(0),
-                               .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
+                               .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
                                .flags = CPA_NO_CHECK_ALIAS };
 
        /*
@@ -2453,7 +2453,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
                .pgd = pgd,
                .numpages = numpages,
                .mask_set = __pgprot(0),
-               .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
+               .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW|_PAGE_DIRTY)),
                .flags = CPA_NO_CHECK_ALIAS,
        };
 
@@ -2496,7 +2496,7 @@ int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
                .pgd            = pgd,
                .numpages       = numpages,
                .mask_set       = __pgprot(0),
-               .mask_clr       = __pgprot(_PAGE_PRESENT | _PAGE_RW),
+               .mask_clr       = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
                .flags          = CPA_NO_CHECK_ALIAS,
        };