From: Ashish Kalra Date: Tue, 16 Sep 2025 21:29:04 +0000 (+0000) Subject: x86/sev: Add new dump_rmp parameter to snp_leak_pages() API X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=e4c00c4ce2aafe61dc7436e763a78d6d112d9e2f;p=thirdparty%2Fkernel%2Fstable.git x86/sev: Add new dump_rmp parameter to snp_leak_pages() API When leaking certain page types, such as Hypervisor Fixed (HV_FIXED) pages, it does not make sense to dump RMP contents for the 2MB range of the page(s) being leaked. In the case of HV_FIXED pages, this is not an error situation where the surrounding 2MB page RMP entries can provide debug information. Add new __snp_leak_pages() API with dump_rmp bool parameter to support continue adding pages to the snp_leaked_pages_list but not issue dump_rmpentry(). Make snp_leak_pages() a wrapper for the common case which also allows existing users to continue to dump RMP entries. Suggested-by: Thomas Lendacky Suggested-by: Sean Christopherson Signed-off-by: Ashish Kalra Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Tom Lendacky Acked-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/cover.1758057691.git.ashish.kalra@amd.com --- diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 44dae74722467..f9046c4b9a2b0 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -655,9 +655,13 @@ void snp_dump_hva_rmpentry(unsigned long address); int psmash(u64 pfn); int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable); int rmp_make_shared(u64 pfn, enum pg_level level); -void snp_leak_pages(u64 pfn, unsigned int npages); +void __snp_leak_pages(u64 pfn, unsigned int npages, bool dump_rmp); void kdump_sev_callback(void); void snp_fixup_e820_tables(void); +static inline void snp_leak_pages(u64 pfn, unsigned int pages) +{ + __snp_leak_pages(pfn, pages, true); +} #else static inline bool snp_probe_rmptable_info(void) { return false; } static inline int snp_rmptable_init(void) { return -ENOSYS; } @@ -670,6 +674,7 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 as return -ENODEV; } static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; } +static inline void __snp_leak_pages(u64 pfn, unsigned int npages, bool dump_rmp) {} static inline void snp_leak_pages(u64 pfn, unsigned int npages) {} static inline void kdump_sev_callback(void) { } static inline void snp_fixup_e820_tables(void) {} diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c index 942372e69b4dd..ee643a6cd6916 100644 --- a/arch/x86/virt/svm/sev.c +++ b/arch/x86/virt/svm/sev.c @@ -1029,7 +1029,7 @@ int rmp_make_shared(u64 pfn, enum pg_level level) } EXPORT_SYMBOL_GPL(rmp_make_shared); -void snp_leak_pages(u64 pfn, unsigned int npages) +void __snp_leak_pages(u64 pfn, unsigned int npages, bool dump_rmp) { struct page *page = pfn_to_page(pfn); @@ -1052,14 +1052,15 @@ void snp_leak_pages(u64 pfn, unsigned int npages) (PageHead(page) && compound_nr(page) <= npages)) list_add_tail(&page->buddy_list, &snp_leaked_pages_list); - dump_rmpentry(pfn); + if (dump_rmp) + dump_rmpentry(pfn); snp_nr_leaked_pages++; pfn++; page++; } spin_unlock(&snp_leaked_pages_list_lock); } -EXPORT_SYMBOL_GPL(snp_leak_pages); +EXPORT_SYMBOL_GPL(__snp_leak_pages); void kdump_sev_callback(void) {