]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
patches for 4.9
authorSasha Levin (Microsoft) <sashal@kernel.org>
Sun, 14 Apr 2019 00:32:41 +0000 (20:32 -0400)
committerSasha Levin (Microsoft) <sashal@kernel.org>
Sun, 14 Apr 2019 00:32:41 +0000 (20:32 -0400)
Signed-off-by: Sasha Levin (Microsoft) <sashal@kernel.org>
queue-4.9/arm64-kaslr-reserve-size-of-arm64_memstart_align-in-.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/arm64-kaslr-reserve-size-of-arm64_memstart_align-in-.patch b/queue-4.9/arm64-kaslr-reserve-size-of-arm64_memstart_align-in-.patch
new file mode 100644 (file)
index 0000000..8b1ac61
--- /dev/null
@@ -0,0 +1,39 @@
+From 082c6695928dcfef44674d8e658832123c9fc906 Mon Sep 17 00:00:00 2001
+From: Yueyi Li <liyueyi@live.com>
+Date: Mon, 24 Dec 2018 07:40:07 +0000
+Subject: arm64: kaslr: Reserve size of ARM64_MEMSTART_ALIGN in linear region
+
+[ Upstream commit c8a43c18a97845e7f94ed7d181c11f41964976a2 ]
+
+When KASLR is enabled (CONFIG_RANDOMIZE_BASE=y), the top 4K of kernel
+virtual address space may be mapped to physical addresses despite being
+reserved for ERR_PTR values.
+
+Fix the randomization of the linear region so that we avoid mapping the
+last page of the virtual address space.
+
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: liyueyi <liyueyi@live.com>
+[will: rewrote commit message; merged in suggestion from Ard]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Sasha Levin (Microsoft) <sashal@kernel.org>
+---
+ arch/arm64/mm/init.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index fa6b2fad7a3d..5d3df68272f5 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -272,7 +272,7 @@ void __init arm64_memblock_init(void)
+                * memory spans, randomize the linear region as well.
+                */
+               if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
+-                      range = range / ARM64_MEMSTART_ALIGN + 1;
++                      range /= ARM64_MEMSTART_ALIGN;
+                       memstart_addr -= ARM64_MEMSTART_ALIGN *
+                                        ((range * memstart_offset_seed) >> 16);
+               }
+-- 
+2.19.1
+
index 335cc3b66044b403b7952c6f3bbe2f3c64eafaa7..15df165958f5eb1b2599dffa8c718dfc1dfb017b 100644 (file)
@@ -45,3 +45,4 @@ powerpc-fsl-update-spectre-v2-reporting.patch
 powerpc-fsl-fixed-warning-orphan-section-__btb_flush.patch
 powerpc-fsl-fix-the-flush-of-branch-predictor.patch
 powerpc-security-fix-spectre_v2-reporting.patch
+arm64-kaslr-reserve-size-of-arm64_memstart_align-in-.patch