--- /dev/null
+From 29956748339aa8757a7e2f927a8679dd08f24bb6 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Fri, 2 Feb 2024 17:29:32 +0100
+Subject: x86/Kconfig: Remove CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
+
+From: Borislav Petkov (AMD) <bp@alien8.de>
+
+commit 29956748339aa8757a7e2f927a8679dd08f24bb6 upstream.
+
+It was meant well at the time but nothing's using it so get rid of it.
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Ard Biesheuvel <ardb@kernel.org>
+Link: https://lore.kernel.org/r/20240202163510.GDZb0Zvj8qOndvFOiZ@fat_crate.local
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 4 +---
+ Documentation/arch/x86/amd-memory-encryption.rst | 16 ++++++++--------
+ arch/x86/Kconfig | 13 -------------
+ arch/x86/mm/mem_encrypt_identity.c | 11 +----------
+ 4 files changed, 10 insertions(+), 34 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3327,9 +3327,7 @@
+
+ mem_encrypt= [X86-64] AMD Secure Memory Encryption (SME) control
+ Valid arguments: on, off
+- Default (depends on kernel configuration option):
+- on (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y)
+- off (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=n)
++ Default: off
+ mem_encrypt=on: Activate SME
+ mem_encrypt=off: Do not activate SME
+
+--- a/Documentation/arch/x86/amd-memory-encryption.rst
++++ b/Documentation/arch/x86/amd-memory-encryption.rst
+@@ -87,14 +87,14 @@ The state of SME in the Linux kernel can
+ kernel is non-zero).
+
+ SME can also be enabled and activated in the BIOS. If SME is enabled and
+-activated in the BIOS, then all memory accesses will be encrypted and it will
+-not be necessary to activate the Linux memory encryption support. If the BIOS
+-merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG), then Linux can activate
+-memory encryption by default (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y) or
+-by supplying mem_encrypt=on on the kernel command line. However, if BIOS does
+-not enable SME, then Linux will not be able to activate memory encryption, even
+-if configured to do so by default or the mem_encrypt=on command line parameter
+-is specified.
++activated in the BIOS, then all memory accesses will be encrypted and it
++will not be necessary to activate the Linux memory encryption support.
++
++If the BIOS merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG),
++then memory encryption can be enabled by supplying mem_encrypt=on on the
++kernel command line. However, if BIOS does not enable SME, then Linux
++will not be able to activate memory encryption, even if configured to do
++so by default or the mem_encrypt=on command line parameter is specified.
+
+ Secure Nested Paging (SNP)
+ ==========================
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1539,19 +1539,6 @@ config AMD_MEM_ENCRYPT
+ This requires an AMD processor that supports Secure Memory
+ Encryption (SME).
+
+-config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
+- bool "Activate AMD Secure Memory Encryption (SME) by default"
+- depends on AMD_MEM_ENCRYPT
+- help
+- Say yes to have system memory encrypted by default if running on
+- an AMD processor that supports Secure Memory Encryption (SME).
+-
+- If set to Y, then the encryption of system memory can be
+- deactivated with the mem_encrypt=off command line option.
+-
+- If set to N, then the encryption of system memory can be
+- activated with the mem_encrypt=on command line option.
+-
+ # Common NUMA Features
+ config NUMA
+ bool "NUMA Memory Allocation and Scheduler Support"
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -97,7 +97,6 @@ static char sme_workarea[2 * PMD_SIZE] _
+
+ static char sme_cmdline_arg[] __initdata = "mem_encrypt";
+ static char sme_cmdline_on[] __initdata = "on";
+-static char sme_cmdline_off[] __initdata = "off";
+
+ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
+ {
+@@ -504,7 +503,7 @@ void __init sme_encrypt_kernel(struct bo
+
+ void __init sme_enable(struct boot_params *bp)
+ {
+- const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
++ const char *cmdline_ptr, *cmdline_arg, *cmdline_on;
+ unsigned int eax, ebx, ecx, edx;
+ unsigned long feature_mask;
+ unsigned long me_mask;
+@@ -587,12 +586,6 @@ void __init sme_enable(struct boot_param
+ asm ("lea sme_cmdline_on(%%rip), %0"
+ : "=r" (cmdline_on)
+ : "p" (sme_cmdline_on));
+- asm ("lea sme_cmdline_off(%%rip), %0"
+- : "=r" (cmdline_off)
+- : "p" (sme_cmdline_off));
+-
+- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
+- sme_me_mask = me_mask;
+
+ cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
+ ((u64)bp->ext_cmd_line_ptr << 32));
+@@ -602,8 +595,6 @@ void __init sme_enable(struct boot_param
+
+ if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
+ sme_me_mask = me_mask;
+- else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
+- sme_me_mask = 0;
+
+ out:
+ if (sme_me_mask) {
--- /dev/null
+From 1c811d403afd73f04bde82b83b24c754011bd0e8 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Sat, 3 Feb 2024 13:53:06 +0100
+Subject: x86/sev: Fix position dependent variable references in startup code
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 1c811d403afd73f04bde82b83b24c754011bd0e8 upstream.
+
+The early startup code executes from a 1:1 mapping of memory, which
+differs from the mapping that the code was linked and/or relocated to
+run at. The latter mapping is not active yet at this point, and so
+symbol references that rely on it will fault.
+
+Given that the core kernel is built without -fPIC, symbol references are
+typically emitted as absolute, and so any such references occuring in
+the early startup code will therefore crash the kernel.
+
+While an attempt was made to work around this for the early SEV/SME
+startup code, by forcing RIP-relative addressing for certain global
+SEV/SME variables via inline assembly (see snp_cpuid_get_table() for
+example), RIP-relative addressing must be pervasively enforced for
+SEV/SME global variables when accessed prior to page table fixups.
+
+__startup_64() already handles this issue for select non-SEV/SME global
+variables using fixup_pointer(), which adjusts the pointer relative to a
+`physaddr` argument. To avoid having to pass around this `physaddr`
+argument across all functions needing to apply pointer fixups, introduce
+a macro RIP_RELATIVE_REF() which generates a RIP-relative reference to
+a given global variable. It is used where necessary to force
+RIP-relative accesses to global variables.
+
+For backporting purposes, this patch makes no attempt at cleaning up
+other occurrences of this pattern, involving either inline asm or
+fixup_pointer(). Those will be addressed later.
+
+ [ bp: Call it "rip_rel_ref" everywhere like other code shortens
+ "rIP-relative reference" and make the asm wrapper __always_inline. ]
+
+Co-developed-by: Kevin Loughlin <kevinloughlin@google.com>
+Signed-off-by: Kevin Loughlin <kevinloughlin@google.com>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/all/20240130220845.1978329-1-kevinloughlin@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/coco/core.c | 7 +------
+ arch/x86/include/asm/asm.h | 14 ++++++++++++++
+ arch/x86/include/asm/coco.h | 8 +++++++-
+ arch/x86/include/asm/mem_encrypt.h | 15 +++++++++------
+ arch/x86/kernel/sev-shared.c | 12 ++++++------
+ arch/x86/kernel/sev.c | 4 ++--
+ arch/x86/mm/mem_encrypt_identity.c | 27 ++++++++++++---------------
+ 7 files changed, 51 insertions(+), 36 deletions(-)
+
+--- a/arch/x86/coco/core.c
++++ b/arch/x86/coco/core.c
+@@ -14,7 +14,7 @@
+ #include <asm/processor.h>
+
+ enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
+-static u64 cc_mask __ro_after_init;
++u64 cc_mask __ro_after_init;
+
+ static bool noinstr intel_cc_platform_has(enum cc_attr attr)
+ {
+@@ -148,8 +148,3 @@ u64 cc_mkdec(u64 val)
+ }
+ }
+ EXPORT_SYMBOL_GPL(cc_mkdec);
+-
+-__init void cc_set_mask(u64 mask)
+-{
+- cc_mask = mask;
+-}
+--- a/arch/x86/include/asm/asm.h
++++ b/arch/x86/include/asm/asm.h
+@@ -113,6 +113,20 @@
+
+ #endif
+
++#ifndef __ASSEMBLY__
++#ifndef __pic__
++static __always_inline __pure void *rip_rel_ptr(void *p)
++{
++ asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p));
++
++ return p;
++}
++#define RIP_REL_REF(var) (*(typeof(&(var)))rip_rel_ptr(&(var)))
++#else
++#define RIP_REL_REF(var) (var)
++#endif
++#endif
++
+ /*
+ * Macros to generate condition code outputs from inline assembly,
+ * The output operand must be type "bool".
+--- a/arch/x86/include/asm/coco.h
++++ b/arch/x86/include/asm/coco.h
+@@ -2,6 +2,7 @@
+ #ifndef _ASM_X86_COCO_H
+ #define _ASM_X86_COCO_H
+
++#include <asm/asm.h>
+ #include <asm/types.h>
+
+ enum cc_vendor {
+@@ -11,9 +12,14 @@ enum cc_vendor {
+ };
+
+ extern enum cc_vendor cc_vendor;
++extern u64 cc_mask;
+
+ #ifdef CONFIG_ARCH_HAS_CC_PLATFORM
+-void cc_set_mask(u64 mask);
++static inline void cc_set_mask(u64 mask)
++{
++ RIP_REL_REF(cc_mask) = mask;
++}
++
+ u64 cc_mkenc(u64 val);
+ u64 cc_mkdec(u64 val);
+ #else
+--- a/arch/x86/include/asm/mem_encrypt.h
++++ b/arch/x86/include/asm/mem_encrypt.h
+@@ -15,7 +15,8 @@
+ #include <linux/init.h>
+ #include <linux/cc_platform.h>
+
+-#include <asm/bootparam.h>
++#include <asm/asm.h>
++struct boot_params;
+
+ #ifdef CONFIG_X86_MEM_ENCRYPT
+ void __init mem_encrypt_init(void);
+@@ -58,6 +59,11 @@ void __init mem_encrypt_free_decrypted_m
+
+ void __init sev_es_init_vc_handling(void);
+
++static inline u64 sme_get_me_mask(void)
++{
++ return RIP_REL_REF(sme_me_mask);
++}
++
+ #define __bss_decrypted __section(".bss..decrypted")
+
+ #else /* !CONFIG_AMD_MEM_ENCRYPT */
+@@ -89,6 +95,8 @@ early_set_mem_enc_dec_hypercall(unsigned
+
+ static inline void mem_encrypt_free_decrypted_mem(void) { }
+
++static inline u64 sme_get_me_mask(void) { return 0; }
++
+ #define __bss_decrypted
+
+ #endif /* CONFIG_AMD_MEM_ENCRYPT */
+@@ -106,11 +114,6 @@ void add_encrypt_protection_map(void);
+
+ extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
+
+-static inline u64 sme_get_me_mask(void)
+-{
+- return sme_me_mask;
+-}
+-
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* __X86_MEM_ENCRYPT_H__ */
+--- a/arch/x86/kernel/sev-shared.c
++++ b/arch/x86/kernel/sev-shared.c
+@@ -556,9 +556,9 @@ static int snp_cpuid(struct ghcb *ghcb,
+ leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
+
+ /* Skip post-processing for out-of-range zero leafs. */
+- if (!(leaf->fn <= cpuid_std_range_max ||
+- (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) ||
+- (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max)))
++ if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) ||
++ (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) ||
++ (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max))))
+ return 0;
+ }
+
+@@ -1063,11 +1063,11 @@ static void __init setup_cpuid_table(con
+ const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
+
+ if (fn->eax_in == 0x0)
+- cpuid_std_range_max = fn->eax;
++ RIP_REL_REF(cpuid_std_range_max) = fn->eax;
+ else if (fn->eax_in == 0x40000000)
+- cpuid_hyp_range_max = fn->eax;
++ RIP_REL_REF(cpuid_hyp_range_max) = fn->eax;
+ else if (fn->eax_in == 0x80000000)
+- cpuid_ext_range_max = fn->eax;
++ RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
+ }
+ }
+
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -748,7 +748,7 @@ void __init early_snp_set_memory_private
+ * This eliminates worries about jump tables or checking boot_cpu_data
+ * in the cc_platform_has() function.
+ */
+- if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
++ if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
+ return;
+
+ /*
+@@ -767,7 +767,7 @@ void __init early_snp_set_memory_shared(
+ * This eliminates worries about jump tables or checking boot_cpu_data
+ * in the cc_platform_has() function.
+ */
+- if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
++ if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
+ return;
+
+ /* Ask hypervisor to mark the memory pages shared in the RMP table. */
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -304,7 +304,8 @@ void __init sme_encrypt_kernel(struct bo
+ * instrumentation or checking boot_cpu_data in the cc_platform_has()
+ * function.
+ */
+- if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED)
++ if (!sme_get_me_mask() ||
++ RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
+ return;
+
+ /*
+@@ -541,11 +542,11 @@ void __init sme_enable(struct boot_param
+ me_mask = 1UL << (ebx & 0x3f);
+
+ /* Check the SEV MSR whether SEV or SME is enabled */
+- sev_status = __rdmsr(MSR_AMD64_SEV);
+- feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
++ RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
++ feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+
+ /* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
+- if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
++ if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED))
+ snp_abort();
+
+ /* Check if memory encryption is enabled */
+@@ -571,7 +572,6 @@ void __init sme_enable(struct boot_param
+ return;
+ } else {
+ /* SEV state cannot be controlled by a command line option */
+- sme_me_mask = me_mask;
+ goto out;
+ }
+
+@@ -590,16 +590,13 @@ void __init sme_enable(struct boot_param
+ cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
+ ((u64)bp->ext_cmd_line_ptr << 32));
+
+- if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
+- goto out;
+-
+- if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
+- sme_me_mask = me_mask;
++ if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0 ||
++ strncmp(buffer, cmdline_on, sizeof(buffer)))
++ return;
+
+ out:
+- if (sme_me_mask) {
+- physical_mask &= ~sme_me_mask;
+- cc_vendor = CC_VENDOR_AMD;
+- cc_set_mask(sme_me_mask);
+- }
++ RIP_REL_REF(sme_me_mask) = me_mask;
++ physical_mask &= ~me_mask;
++ cc_vendor = CC_VENDOR_AMD;
++ cc_set_mask(me_mask);
+ }