]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.11
authorSasha Levin <sashal@kernel.org>
Tue, 30 Mar 2021 20:51:39 +0000 (16:51 -0400)
committerSasha Levin <sashal@kernel.org>
Tue, 30 Mar 2021 20:51:39 +0000 (16:51 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.11/arm64-mm-correct-the-inside-linear-map-range-during-.patch [new file with mode: 0644]
queue-5.11/series [new file with mode: 0644]

diff --git a/queue-5.11/arm64-mm-correct-the-inside-linear-map-range-during-.patch b/queue-5.11/arm64-mm-correct-the-inside-linear-map-range-during-.patch
new file mode 100644 (file)
index 0000000..811b5fb
--- /dev/null
@@ -0,0 +1,73 @@
+From 6ea83a1b80ab8bec15205e34a746cac5a0bcaf72 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Mar 2021 10:28:47 -0400
+Subject: arm64: mm: correct the inside linear map range during hotplug check
+
+From: Pavel Tatashin <pasha.tatashin@soleen.com>
+
+[ Upstream commit ee7febce051945be28ad86d16a15886f878204de ]
+
+Memory hotplug may fail on systems with CONFIG_RANDOMIZE_BASE because the
+linear map range is not checked correctly.
+
+The start physical address that linear map covers can be actually at the
+end of the range because of randomization. Check that and if so reduce it
+to 0.
+
+This can be verified on QEMU with setting kaslr-seed to ~0ul:
+
+memstart_offset_seed = 0xffff
+START: __pa(_PAGE_OFFSET(vabits_actual)) = ffff9000c0000000
+END:   __pa(PAGE_END - 1) =  1000bfffffff
+
+Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
+Fixes: 58284a901b42 ("arm64/mm: Validate hotplug range before creating linear mapping")
+Tested-by: Tyler Hicks <tyhicks@linux.microsoft.com>
+Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Link: https://lore.kernel.org/r/20210216150351.129018-2-pasha.tatashin@soleen.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/mm/mmu.c | 20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 6f0648777d34..ee01f421e1e4 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -1445,14 +1445,30 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
+ static bool inside_linear_region(u64 start, u64 size)
+ {
++      u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
++      u64 end_linear_pa = __pa(PAGE_END - 1);
++
++      if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
++              /*
++               * Check for a wrap, it is possible because of randomized linear
++               * mapping the start physical address is actually bigger than
++               * the end physical address. In this case set start to zero
++               * because [0, end_linear_pa] range must still be able to cover
++               * all addressable physical addresses.
++               */
++              if (start_linear_pa > end_linear_pa)
++                      start_linear_pa = 0;
++      }
++
++      WARN_ON(start_linear_pa > end_linear_pa);
++
+       /*
+        * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
+        * accommodating both its ends but excluding PAGE_END. Max physical
+        * range which can be mapped inside this linear mapping range, must
+        * also be derived from its end points.
+        */
+-      return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
+-             (start + size - 1) <= __pa(PAGE_END - 1);
++      return start >= start_linear_pa && (start + size - 1) <= end_linear_pa;
+ }
+ int arch_add_memory(int nid, u64 start, u64 size,
+-- 
+2.30.1
+
diff --git a/queue-5.11/series b/queue-5.11/series
new file mode 100644 (file)
index 0000000..f3e7230
--- /dev/null
@@ -0,0 +1 @@
+arm64-mm-correct-the-inside-linear-map-range-during-.patch