]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.14.321/init-x86-move-mem_encrypt_init-into-arch_cpu_finalize_init.patch
6.1-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.14.321 / init-x86-move-mem_encrypt_init-into-arch_cpu_finalize_init.patch
CommitLineData
0aef986f
GKH
1From 555b9962472818fba44eb42f31cfd1e118d20478 Mon Sep 17 00:00:00 2001
2From: Thomas Gleixner <tglx@linutronix.de>
3Date: Wed, 14 Jun 2023 01:39:41 +0200
4Subject: init, x86: Move mem_encrypt_init() into arch_cpu_finalize_init()
5
6From: Thomas Gleixner <tglx@linutronix.de>
7
8commit 439e17576eb47f26b78c5bbc72e344d4206d2327 upstream
9
10Invoke the X86ism mem_encrypt_init() from X86 arch_cpu_finalize_init() and
11remove the weak fallback from the core code.
12
13No functional change.
14
15Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
16Link: https://lore.kernel.org/r/20230613224545.670360645@linutronix.de
17Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
18Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
19---
20 arch/x86/include/asm/mem_encrypt.h | 2 ++
21 arch/x86/kernel/cpu/common.c | 11 +++++++++++
22 init/main.c | 10 ----------
23 3 files changed, 13 insertions(+), 10 deletions(-)
24
25--- a/arch/x86/include/asm/mem_encrypt.h
26+++ b/arch/x86/include/asm/mem_encrypt.h
27@@ -64,6 +64,8 @@ static inline void __init sme_early_init
28 static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
29 static inline void __init sme_enable(struct boot_params *bp) { }
30
31+static inline void mem_encrypt_init(void) { }
32+
33 #endif /* CONFIG_AMD_MEM_ENCRYPT */
34
35 /*
36--- a/arch/x86/kernel/cpu/common.c
37+++ b/arch/x86/kernel/cpu/common.c
38@@ -14,6 +14,7 @@
39 #include <linux/init.h>
40 #include <linux/kprobes.h>
41 #include <linux/kgdb.h>
42+#include <linux/mem_encrypt.h>
43 #include <linux/smp.h>
44 #include <linux/cpu.h>
45 #include <linux/io.h>
46@@ -2049,4 +2050,14 @@ void __init arch_cpu_finalize_init(void)
47 } else {
48 fpu__init_check_bugs();
49 }
50+
51+ /*
52+ * This needs to be called before any devices perform DMA
53+ * operations that might use the SWIOTLB bounce buffers. It will
54+ * mark the bounce buffers as decrypted so that their usage will
55+ * not cause "plain-text" data to be decrypted when accessed. It
56+ * must be called after late_time_init() so that Hyper-V x86/x64
57+ * hypercalls work when the SWIOTLB bounce buffers are decrypted.
58+ */
59+ mem_encrypt_init();
60 }
61--- a/init/main.c
62+++ b/init/main.c
63@@ -485,8 +485,6 @@ void __init __weak thread_stack_cache_in
64 }
65 #endif
66
67-void __init __weak mem_encrypt_init(void) { }
68-
69 /*
70 * Set up kernel memory allocators
71 */
72@@ -648,14 +646,6 @@ asmlinkage __visible void __init start_k
73 */
74 locking_selftest();
75
76- /*
77- * This needs to be called before any devices perform DMA
78- * operations that might use the SWIOTLB bounce buffers. It will
79- * mark the bounce buffers as decrypted so that their usage will
80- * not cause "plain-text" data to be decrypted when accessed.
81- */
82- mem_encrypt_init();
83-
84 #ifdef CONFIG_BLK_DEV_INITRD
85 if (initrd_start && !initrd_below_start_ok &&
86 page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {