From edbd3fd522536a81d4c3acf9620275b3efaf4f04 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 30 Nov 2022 13:32:14 +0100 Subject: [PATCH] 5.10-stable patches added patches: x86-ioremap-fix-page-aligned-size-calculation-in-__ioremap_caller.patch --- queue-5.10/series | 1 + ...size-calculation-in-__ioremap_caller.patch | 49 +++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 queue-5.10/x86-ioremap-fix-page-aligned-size-calculation-in-__ioremap_caller.patch diff --git a/queue-5.10/series b/queue-5.10/series index 5780bc8ffe8..95172f4143a 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -121,3 +121,4 @@ gcov-clang-fix-the-buffer-overflow-issue.patch mm-vmscan-fix-extreme-overreclaim-and-swap-floods.patch kvm-x86-nsvm-leave-nested-mode-on-vcpu-free.patch kvm-x86-remove-exit_int_info-warning-in-svm_handle_exit.patch +x86-ioremap-fix-page-aligned-size-calculation-in-__ioremap_caller.patch diff --git a/queue-5.10/x86-ioremap-fix-page-aligned-size-calculation-in-__ioremap_caller.patch b/queue-5.10/x86-ioremap-fix-page-aligned-size-calculation-in-__ioremap_caller.patch new file mode 100644 index 00000000000..d1f64ba2ae3 --- /dev/null +++ b/queue-5.10/x86-ioremap-fix-page-aligned-size-calculation-in-__ioremap_caller.patch @@ -0,0 +1,49 @@ +From 4dbd6a3e90e03130973688fd79e19425f720d999 Mon Sep 17 00:00:00 2001 +From: Michael Kelley +Date: Wed, 16 Nov 2022 10:41:24 -0800 +Subject: x86/ioremap: Fix page aligned size calculation in __ioremap_caller() + +From: Michael Kelley + +commit 4dbd6a3e90e03130973688fd79e19425f720d999 upstream. + +Current code re-calculates the size after aligning the starting and +ending physical addresses on a page boundary. But the re-calculation +also embeds the masking of high order bits that exceed the size of +the physical address space (via PHYSICAL_PAGE_MASK). If the masking +removes any high order bits, the size calculation results in a huge +value that is likely to immediately fail. + +Fix this by re-calculating the page-aligned size first. Then mask any +high order bits using PHYSICAL_PAGE_MASK. + +Fixes: ffa71f33a820 ("x86, ioremap: Fix incorrect physical address handling in PAE mode") +Signed-off-by: Michael Kelley +Signed-off-by: Borislav Petkov +Acked-by: Dave Hansen +Cc: +Link: https://lore.kernel.org/r/1668624097-14884-2-git-send-email-mikelley@microsoft.com +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/mm/ioremap.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +--- a/arch/x86/mm/ioremap.c ++++ b/arch/x86/mm/ioremap.c +@@ -216,9 +216,15 @@ __ioremap_caller(resource_size_t phys_ad + * Mappings have to be page-aligned + */ + offset = phys_addr & ~PAGE_MASK; +- phys_addr &= PHYSICAL_PAGE_MASK; ++ phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(last_addr+1) - phys_addr; + ++ /* ++ * Mask out any bits not part of the actual physical ++ * address, like memory encryption bits. ++ */ ++ phys_addr &= PHYSICAL_PAGE_MASK; ++ + retval = memtype_reserve(phys_addr, (u64)phys_addr + size, + pcm, &new_pcm); + if (retval) { -- 2.47.3