]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 8 Apr 2024 12:37:53 +0000 (14:37 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 8 Apr 2024 12:37:53 +0000 (14:37 +0200)
added patches:
x86-efistub-remap-kernel-text-read-only-before-dropping-nx-attribute.patch
x86-sev-move-early-startup-code-into-.head.text-section.patch
x86-sme-move-early-sme-kernel-encryption-handling-into-.head.text.patch

queue-6.8/series
queue-6.8/x86-efistub-remap-kernel-text-read-only-before-dropping-nx-attribute.patch [new file with mode: 0644]
queue-6.8/x86-sev-move-early-startup-code-into-.head.text-section.patch [new file with mode: 0644]
queue-6.8/x86-sme-move-early-sme-kernel-encryption-handling-into-.head.text.patch [new file with mode: 0644]

index 97953c7028ce4e9b041d779c495f0022e0b3481d..0875397b65df750b4bbb2e7fd2e58a8ff56f2a4d 100644 (file)
@@ -268,3 +268,6 @@ bpf-put-uprobe-link-s-path-and-task-in-release-callback.patch
 bpf-support-deferring-bpf_link-dealloc-to-after-rcu-grace-period.patch
 efi-libstub-add-generic-support-for-parsing-mem_encrypt.patch
 x86-boot-move-mem_encrypt-parsing-to-the-decompressor.patch
+x86-sme-move-early-sme-kernel-encryption-handling-into-.head.text.patch
+x86-sev-move-early-startup-code-into-.head.text-section.patch
+x86-efistub-remap-kernel-text-read-only-before-dropping-nx-attribute.patch
diff --git a/queue-6.8/x86-efistub-remap-kernel-text-read-only-before-dropping-nx-attribute.patch b/queue-6.8/x86-efistub-remap-kernel-text-read-only-before-dropping-nx-attribute.patch
new file mode 100644 (file)
index 0000000..224d2d9
--- /dev/null
@@ -0,0 +1,93 @@
+From 9c55461040a9264b7e44444c53d26480b438eda6 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Thu, 25 Jan 2024 14:32:07 +0100
+Subject: x86/efistub: Remap kernel text read-only before dropping NX attribute
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 9c55461040a9264b7e44444c53d26480b438eda6 upstream.
+
+Currently, the EFI stub invokes the EFI memory attributes protocol to
+strip any NX restrictions from the entire loaded kernel, resulting in
+all code and data being mapped read-write-execute.
+
+The point of the EFI memory attributes protocol is to remove the need
+for all memory allocations to be mapped with both write and execute
+permissions by default, and make it the OS loader's responsibility to
+transition data mappings to code mappings where appropriate.
+
+Even though the UEFI specification does not appear to leave room for
+denying memory attribute changes based on security policy, let's be
+cautious and avoid relying on the ability to create read-write-execute
+mappings. This is trivially achievable, given that the amount of kernel
+code executing via the firmware's 1:1 mapping is rather small and
+limited to the .head.text region. So let's drop the NX restrictions only
+on that subregion, but not before remapping it as read-only first.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/Makefile       |    2 +-
+ arch/x86/boot/compressed/misc.c         |    1 +
+ arch/x86/include/asm/boot.h             |    1 +
+ drivers/firmware/efi/libstub/x86-stub.c |   11 ++++++++++-
+ 4 files changed, 13 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -84,7 +84,7 @@ LDFLAGS_vmlinux += -T
+ hostprogs     := mkpiggy
+ HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+-sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
++sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
+ quiet_cmd_voffset = VOFFSET $@
+       cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -330,6 +330,7 @@ static size_t parse_elf(void *output)
+       return ehdr.e_entry - LOAD_PHYSICAL_ADDR;
+ }
++const unsigned long kernel_text_size = VO___start_rodata - VO__text;
+ const unsigned long kernel_total_size = VO__end - VO__text;
+ static u8 boot_heap[BOOT_HEAP_SIZE] __aligned(4);
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -81,6 +81,7 @@
+ #ifndef __ASSEMBLY__
+ extern unsigned int output_len;
++extern const unsigned long kernel_text_size;
+ extern const unsigned long kernel_total_size;
+ unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -238,6 +238,15 @@ efi_status_t efi_adjust_memory_range_pro
+       rounded_end = roundup(start + size, EFI_PAGE_SIZE);
+       if (memattr != NULL) {
++              status = efi_call_proto(memattr, set_memory_attributes,
++                                      rounded_start,
++                                      rounded_end - rounded_start,
++                                      EFI_MEMORY_RO);
++              if (status != EFI_SUCCESS) {
++                      efi_warn("Failed to set EFI_MEMORY_RO attribute\n");
++                      return status;
++              }
++
+               status = efi_call_proto(memattr, clear_memory_attributes,
+                                       rounded_start,
+                                       rounded_end - rounded_start,
+@@ -818,7 +827,7 @@ static efi_status_t efi_decompress_kerne
+       *kernel_entry = addr + entry;
+-      return efi_adjust_memory_range_protection(addr, kernel_total_size);
++      return efi_adjust_memory_range_protection(addr, kernel_text_size);
+ }
+ static void __noreturn enter_kernel(unsigned long kernel_addr,
diff --git a/queue-6.8/x86-sev-move-early-startup-code-into-.head.text-section.patch b/queue-6.8/x86-sev-move-early-startup-code-into-.head.text-section.patch
new file mode 100644 (file)
index 0000000..fbfdf8a
--- /dev/null
@@ -0,0 +1,194 @@
+From 428080c9b19bfda37c478cd626dbd3851db1aff9 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Tue, 27 Feb 2024 16:19:16 +0100
+Subject: x86/sev: Move early startup code into .head.text section
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 428080c9b19bfda37c478cd626dbd3851db1aff9 upstream.
+
+In preparation for implementing rigorous build time checks to enforce
+that only code that can support it will be called from the early 1:1
+mapping of memory, move SEV init code that is called in this manner to
+the .head.text section.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Link: https://lore.kernel.org/r/20240227151907.387873-19-ardb+git@google.com
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/sev.c |    3 +++
+ arch/x86/include/asm/sev.h     |   10 +++++-----
+ arch/x86/kernel/sev-shared.c   |   23 ++++++++++-------------
+ arch/x86/kernel/sev.c          |   14 ++++++++------
+ 4 files changed, 26 insertions(+), 24 deletions(-)
+
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -116,6 +116,9 @@ static bool fault_in_kernel_space(unsign
+ #undef __init
+ #define __init
++#undef __head
++#define __head
++
+ #define __BOOT_COMPRESSED
+ /* Basic instruction decoding support needed */
+--- a/arch/x86/include/asm/sev.h
++++ b/arch/x86/include/asm/sev.h
+@@ -199,15 +199,15 @@ static inline int pvalidate(unsigned lon
+ struct snp_guest_request_ioctl;
+ void setup_ghcb(void);
+-void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+-                                       unsigned long npages);
+-void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+-                                      unsigned long npages);
++void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
++                                unsigned long npages);
++void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
++                               unsigned long npages);
+ void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
+ void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
+ void snp_set_wakeup_secondary_cpu(void);
+ bool snp_init(struct boot_params *bp);
+-void __init __noreturn snp_abort(void);
++void __noreturn snp_abort(void);
+ void snp_dmi_setup(void);
+ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
+ void snp_accept_memory(phys_addr_t start, phys_addr_t end);
+--- a/arch/x86/kernel/sev-shared.c
++++ b/arch/x86/kernel/sev-shared.c
+@@ -89,7 +89,8 @@ static bool __init sev_es_check_cpu_feat
+       return true;
+ }
+-static void __noreturn sev_es_terminate(unsigned int set, unsigned int reason)
++static void __head __noreturn
++sev_es_terminate(unsigned int set, unsigned int reason)
+ {
+       u64 val = GHCB_MSR_TERM_REQ;
+@@ -326,13 +327,7 @@ static int sev_cpuid_hv(struct ghcb *ghc
+  */
+ static const struct snp_cpuid_table *snp_cpuid_get_table(void)
+ {
+-      void *ptr;
+-
+-      asm ("lea cpuid_table_copy(%%rip), %0"
+-           : "=r" (ptr)
+-           : "p" (&cpuid_table_copy));
+-
+-      return ptr;
++      return &RIP_REL_REF(cpuid_table_copy);
+ }
+ /*
+@@ -391,7 +386,7 @@ static u32 snp_cpuid_calc_xsave_size(u64
+       return xsave_size;
+ }
+-static bool
++static bool __head
+ snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
+ {
+       const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
+@@ -528,7 +523,8 @@ static int snp_cpuid_postprocess(struct
+  * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
+  * should be treated as fatal by caller.
+  */
+-static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
++static int __head
++snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
+ {
+       const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
+@@ -570,7 +566,7 @@ static int snp_cpuid(struct ghcb *ghcb,
+  * page yet, so it only supports the MSR based communication with the
+  * hypervisor and only the CPUID exit-code.
+  */
+-void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
++void __head do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
+ {
+       unsigned int subfn = lower_bits(regs->cx, 32);
+       unsigned int fn = lower_bits(regs->ax, 32);
+@@ -1016,7 +1012,8 @@ struct cc_setup_data {
+  * Search for a Confidential Computing blob passed in as a setup_data entry
+  * via the Linux Boot Protocol.
+  */
+-static struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
++static __head
++struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
+ {
+       struct cc_setup_data *sd = NULL;
+       struct setup_data *hdr;
+@@ -1043,7 +1040,7 @@ static struct cc_blob_sev_info *find_cc_
+  * mapping needs to be updated in sync with all the changes to virtual memory
+  * layout and related mapping facilities throughout the boot process.
+  */
+-static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
++static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
+ {
+       const struct snp_cpuid_table *cpuid_table_fw, *cpuid_table;
+       int i;
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -26,6 +26,7 @@
+ #include <linux/dmi.h>
+ #include <uapi/linux/sev-guest.h>
++#include <asm/init.h>
+ #include <asm/cpu_entry_area.h>
+ #include <asm/stacktrace.h>
+ #include <asm/sev.h>
+@@ -683,8 +684,9 @@ static u64 __init get_jump_table_addr(vo
+       return ret;
+ }
+-static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
+-                                unsigned long npages, enum psc_op op)
++static void __head
++early_set_pages_state(unsigned long vaddr, unsigned long paddr,
++                    unsigned long npages, enum psc_op op)
+ {
+       unsigned long paddr_end;
+       u64 val;
+@@ -740,7 +742,7 @@ e_term:
+       sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+ }
+-void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
++void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+                                        unsigned long npages)
+ {
+       /*
+@@ -2045,7 +2047,7 @@ fail:
+  *
+  * Scan for the blob in that order.
+  */
+-static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
++static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
+ {
+       struct cc_blob_sev_info *cc_info;
+@@ -2071,7 +2073,7 @@ found_cc_info:
+       return cc_info;
+ }
+-bool __init snp_init(struct boot_params *bp)
++bool __head snp_init(struct boot_params *bp)
+ {
+       struct cc_blob_sev_info *cc_info;
+@@ -2093,7 +2095,7 @@ bool __init snp_init(struct boot_params
+       return true;
+ }
+-void __init __noreturn snp_abort(void)
++void __head __noreturn snp_abort(void)
+ {
+       sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
+ }
diff --git a/queue-6.8/x86-sme-move-early-sme-kernel-encryption-handling-into-.head.text.patch b/queue-6.8/x86-sme-move-early-sme-kernel-encryption-handling-into-.head.text.patch
new file mode 100644 (file)
index 0000000..2686c34
--- /dev/null
@@ -0,0 +1,213 @@
+From 48204aba801f1b512b3abed10b8e1a63e03f3dd1 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Tue, 27 Feb 2024 16:19:15 +0100
+Subject: x86/sme: Move early SME kernel encryption handling into .head.text
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 48204aba801f1b512b3abed10b8e1a63e03f3dd1 upstream.
+
+The .head.text section is the initial primary entrypoint of the core
+kernel, and is entered with the CPU executing from a 1:1 mapping of
+memory. Such code must never access global variables using absolute
+references, as these are based on the kernel virtual mapping which is
+not active yet at this point.
+
+Given that the SME startup code is also called from this early execution
+context, move it into .head.text as well. This will allow more thorough
+build time checks in the future to ensure that early startup code only
+uses RIP-relative references to global variables.
+
+Also replace some occurrences of __pa_symbol() [which relies on the
+compiler generating an absolute reference, which is not guaranteed] and
+an open coded RIP-relative access with RIP_REL_REF().
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Link: https://lore.kernel.org/r/20240227151907.387873-18-ardb+git@google.com
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/mem_encrypt.h |    8 +++----
+ arch/x86/mm/mem_encrypt_identity.c |   42 ++++++++++++++-----------------------
+ 2 files changed, 21 insertions(+), 29 deletions(-)
+
+--- a/arch/x86/include/asm/mem_encrypt.h
++++ b/arch/x86/include/asm/mem_encrypt.h
+@@ -47,8 +47,8 @@ void __init sme_unmap_bootdata(char *rea
+ void __init sme_early_init(void);
+-void __init sme_encrypt_kernel(struct boot_params *bp);
+-void __init sme_enable(struct boot_params *bp);
++void sme_encrypt_kernel(struct boot_params *bp);
++void sme_enable(struct boot_params *bp);
+ int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
+ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
+@@ -81,8 +81,8 @@ static inline void __init sme_unmap_boot
+ static inline void __init sme_early_init(void) { }
+-static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
+-static inline void __init sme_enable(struct boot_params *bp) { }
++static inline void sme_encrypt_kernel(struct boot_params *bp) { }
++static inline void sme_enable(struct boot_params *bp) { }
+ static inline void sev_es_init_vc_handling(void) { }
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -41,6 +41,7 @@
+ #include <linux/mem_encrypt.h>
+ #include <linux/cc_platform.h>
++#include <asm/init.h>
+ #include <asm/setup.h>
+ #include <asm/sections.h>
+ #include <asm/coco.h>
+@@ -94,7 +95,7 @@ struct sme_populate_pgd_data {
+  */
+ static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
+-static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
++static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd)
+ {
+       unsigned long pgd_start, pgd_end, pgd_size;
+       pgd_t *pgd_p;
+@@ -109,7 +110,7 @@ static void __init sme_clear_pgd(struct
+       memset(pgd_p, 0, pgd_size);
+ }
+-static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
++static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
+ {
+       pgd_t *pgd;
+       p4d_t *p4d;
+@@ -146,7 +147,7 @@ static pud_t __init *sme_prepare_pgd(str
+       return pud;
+ }
+-static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
++static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
+ {
+       pud_t *pud;
+       pmd_t *pmd;
+@@ -162,7 +163,7 @@ static void __init sme_populate_pgd_larg
+       set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
+ }
+-static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
++static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd)
+ {
+       pud_t *pud;
+       pmd_t *pmd;
+@@ -188,7 +189,7 @@ static void __init sme_populate_pgd(stru
+               set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
+ }
+-static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
++static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
+ {
+       while (ppd->vaddr < ppd->vaddr_end) {
+               sme_populate_pgd_large(ppd);
+@@ -198,7 +199,7 @@ static void __init __sme_map_range_pmd(s
+       }
+ }
+-static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
++static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
+ {
+       while (ppd->vaddr < ppd->vaddr_end) {
+               sme_populate_pgd(ppd);
+@@ -208,7 +209,7 @@ static void __init __sme_map_range_pte(s
+       }
+ }
+-static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
++static void __head __sme_map_range(struct sme_populate_pgd_data *ppd,
+                                  pmdval_t pmd_flags, pteval_t pte_flags)
+ {
+       unsigned long vaddr_end;
+@@ -232,22 +233,22 @@ static void __init __sme_map_range(struc
+       __sme_map_range_pte(ppd);
+ }
+-static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
++static void __head sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
+ {
+       __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
+ }
+-static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
++static void __head sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
+ {
+       __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
+ }
+-static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
++static void __head sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
+ {
+       __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
+ }
+-static unsigned long __init sme_pgtable_calc(unsigned long len)
++static unsigned long __head sme_pgtable_calc(unsigned long len)
+ {
+       unsigned long entries = 0, tables = 0;
+@@ -284,7 +285,7 @@ static unsigned long __init sme_pgtable_
+       return entries + tables;
+ }
+-void __init sme_encrypt_kernel(struct boot_params *bp)
++void __head sme_encrypt_kernel(struct boot_params *bp)
+ {
+       unsigned long workarea_start, workarea_end, workarea_len;
+       unsigned long execute_start, execute_end, execute_len;
+@@ -319,9 +320,8 @@ void __init sme_encrypt_kernel(struct bo
+        *     memory from being cached.
+        */
+-      /* Physical addresses gives us the identity mapped virtual addresses */
+-      kernel_start = __pa_symbol(_text);
+-      kernel_end = ALIGN(__pa_symbol(_end), PMD_SIZE);
++      kernel_start = (unsigned long)RIP_REL_REF(_text);
++      kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE);
+       kernel_len = kernel_end - kernel_start;
+       initrd_start = 0;
+@@ -339,14 +339,6 @@ void __init sme_encrypt_kernel(struct bo
+ #endif
+       /*
+-       * We're running identity mapped, so we must obtain the address to the
+-       * SME encryption workarea using rip-relative addressing.
+-       */
+-      asm ("lea sme_workarea(%%rip), %0"
+-           : "=r" (workarea_start)
+-           : "p" (sme_workarea));
+-
+-      /*
+        * Calculate required number of workarea bytes needed:
+        *   executable encryption area size:
+        *     stack page (PAGE_SIZE)
+@@ -355,7 +347,7 @@ void __init sme_encrypt_kernel(struct bo
+        *   pagetable structures for the encryption of the kernel
+        *   pagetable structures for workarea (in case not currently mapped)
+        */
+-      execute_start = workarea_start;
++      execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea);
+       execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
+       execute_len = execute_end - execute_start;
+@@ -498,7 +490,7 @@ void __init sme_encrypt_kernel(struct bo
+       native_write_cr3(__native_read_cr3());
+ }
+-void __init sme_enable(struct boot_params *bp)
++void __head sme_enable(struct boot_params *bp)
+ {
+       unsigned int eax, ebx, ecx, edx;
+       unsigned long feature_mask;