]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/boot: Create a confined code area for startup code
authorArd Biesheuvel <ardb@kernel.org>
Thu, 28 Aug 2025 10:22:22 +0000 (12:22 +0200)
committerBorislav Petkov (AMD) <bp@alien8.de>
Wed, 3 Sep 2025 16:00:01 +0000 (18:00 +0200)
In order to be able to have tight control over which code may execute
from the early 1:1 mapping of memory, but still link vmlinux as a single
executable, prefix all symbol references in startup code with __pi_, and
invoke it from outside using the __pi_ prefix.

Use objtool to check that no absolute symbol references are present in
the startup code, as these cannot be used from code running from the 1:1
mapping.

Note that this also requires disabling the latent-entropy GCC plugin, as
the global symbol references that it injects would require explicit
exports, and given that the startup code rarely executes more than once,
it is not a useful source of entropy anyway.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/20250828102202.1849035-43-ardb+git@google.com
arch/x86/boot/startup/Makefile
arch/x86/boot/startup/sev-shared.c
arch/x86/boot/startup/sme.c
arch/x86/coco/sev/core.c
arch/x86/include/asm/setup.h
arch/x86/include/asm/sev.h
arch/x86/kernel/head64.c
arch/x86/kernel/head_64.S
arch/x86/mm/mem_encrypt_boot.S
tools/objtool/check.c

index 32737f4ab5a8075f89f5e4f976092733f79c129c..e8fdf020b4223eb1a02f753aabd4a0c225bab65c 100644 (file)
@@ -4,6 +4,7 @@ KBUILD_AFLAGS           += -D__DISABLE_EXPORTS
 KBUILD_CFLAGS          += -D__DISABLE_EXPORTS -mcmodel=small -fPIC \
                           -Os -DDISABLE_BRANCH_PROFILING \
                           $(DISABLE_STACKLEAK_PLUGIN) \
+                          $(DISABLE_LATENT_ENTROPY_PLUGIN) \
                           -fno-stack-protector -D__NO_FORTIFY \
                           -fno-jump-tables \
                           -include $(srctree)/include/linux/hidden.h
@@ -36,3 +37,16 @@ $(patsubst %.o,$(obj)/%.o,$(lib-y)): OBJECT_FILES_NON_STANDARD := y
 #
 $(pi-objs): objtool-enabled    = 1
 $(pi-objs): objtool-args       = $(if $(delay-objtool),,$(objtool-args-y)) --noabs
+
+#
+# Confine the startup code by prefixing all symbols with __pi_ (for position
+# independent). This ensures that startup code can only call other startup
+# code, or code that has explicitly been made accessible to it via a symbol
+# alias.
+#
+$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_
+$(obj)/%.pi.o: $(obj)/%.o FORCE
+       $(call if_changed,objcopy)
+
+targets        += $(obj-y)
+obj-y  := $(patsubst %.o,%.pi.o,$(obj-y))
index 2a28463edd9960f5c1262f8bca5551c1fc9cfa06..e09c66845e43eae524ee4d6c1c861bfafede2358 100644 (file)
@@ -12,7 +12,6 @@
 #include <asm/setup_data.h>
 
 #ifndef __BOOT_COMPRESSED
-#define error(v)                       pr_err(v)
 #define has_cpuflag(f)                 boot_cpu_has(f)
 #else
 #undef WARN
index bf9153b9a3d9f233558b661504b7a3f4401747cf..52b98e7624fe81dea429dc9af5d76fd54c2f3a2c 100644 (file)
@@ -568,7 +568,6 @@ void __head sme_enable(struct boot_params *bp)
 
 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
 /* Local version for startup code, which never operates on user page tables */
-__weak
 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
 {
        return pgd;
index b9133c825f90c955bcbf406f75ffa266bf826845..cf9a511b47e09449caad80fb1e9b049d87300895 100644 (file)
@@ -272,7 +272,7 @@ static int svsm_perform_call_protocol(struct svsm_call *call)
 
        do {
                ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
-                          : svsm_perform_msr_protocol(call);
+                          : __pi_svsm_perform_msr_protocol(call);
        } while (ret == -EAGAIN);
 
        if (sev_cfg.ghcbs_initialized)
index 692af46603a175f8086d33f31fb78582cacc7228..914eb32581c73d181c809d8c25580e7d531d39a6 100644 (file)
@@ -53,6 +53,7 @@ extern void i386_reserve_resources(void);
 extern unsigned long __startup_64(unsigned long p2v_offset, struct boot_params *bp);
 extern void startup_64_setup_gdt_idt(void);
 extern void startup_64_load_idt(void *vc_handler);
+extern void __pi_startup_64_load_idt(void *vc_handler);
 extern void early_setup_idt(void);
 extern void __init do_early_exception(struct pt_regs *regs, int trapnr);
 
index 0030c7125b291c4d2fc9e317576c41115076ba89..f222bef9dca88c518f85dfef2531d6e4f6a57b51 100644 (file)
@@ -551,6 +551,7 @@ struct cpuid_leaf {
 };
 
 int svsm_perform_msr_protocol(struct svsm_call *call);
+int __pi_svsm_perform_msr_protocol(struct svsm_call *call);
 int snp_cpuid(void (*cpuid_fn)(void *ctx, struct cpuid_leaf *leaf),
              void *ctx, struct cpuid_leaf *leaf);
 
index 1bc40d0785ee3e035409233de06a727dd6d44920..fd28b53dbac51fe6283fb79975aa4e6acbe220ce 100644 (file)
@@ -319,5 +319,5 @@ void early_setup_idt(void)
                handler = vc_boot_ghcb;
        }
 
-       startup_64_load_idt(handler);
+       __pi_startup_64_load_idt(handler);
 }
index 3e9b3a3bd039610d3759b629bb921ca041bcaa59..d219963ecb605cdb918075ec3e5c425434fa25a6 100644 (file)
@@ -71,7 +71,7 @@ SYM_CODE_START_NOALIGN(startup_64)
        xorl    %edx, %edx
        wrmsr
 
-       call    startup_64_setup_gdt_idt
+       call    __pi_startup_64_setup_gdt_idt
 
        /* Now switch to __KERNEL_CS so IRET works reliably */
        pushq   $__KERNEL_CS
@@ -91,7 +91,7 @@ SYM_CODE_START_NOALIGN(startup_64)
         * subsequent code. Pass the boot_params pointer as the first argument.
         */
        movq    %r15, %rdi
-       call    sme_enable
+       call    __pi_sme_enable
 #endif
 
        /* Sanitize CPU configuration */
@@ -111,7 +111,7 @@ SYM_CODE_START_NOALIGN(startup_64)
         * programmed into CR3.
         */
        movq    %r15, %rsi
-       call    __startup_64
+       call    __pi___startup_64
 
        /* Form the CR3 value being sure to include the CR3 modifier */
        leaq    early_top_pgt(%rip), %rcx
@@ -562,7 +562,7 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb)
        /* Call C handler */
        movq    %rsp, %rdi
        movq    ORIG_RAX(%rsp), %rsi
-       call    do_vc_no_ghcb
+       call    __pi_do_vc_no_ghcb
 
        /* Unwind pt_regs */
        POP_REGS
index f8a33b25ae869e3be9f841e33ab0e289213033e5..edbf9c99884846f4ef9206385ec78632cb3d0759 100644 (file)
@@ -16,7 +16,7 @@
 
        .text
        .code64
-SYM_FUNC_START(sme_encrypt_execute)
+SYM_FUNC_START(__pi_sme_encrypt_execute)
 
        /*
         * Entry parameters:
@@ -69,9 +69,9 @@ SYM_FUNC_START(sme_encrypt_execute)
        ANNOTATE_UNRET_SAFE
        ret
        int3
-SYM_FUNC_END(sme_encrypt_execute)
+SYM_FUNC_END(__pi_sme_encrypt_execute)
 
-SYM_FUNC_START(__enc_copy)
+SYM_FUNC_START_LOCAL(__enc_copy)
        ANNOTATE_NOENDBR
 /*
  * Routine used to encrypt memory in place.
index fb47327075fbaff8fd94feecf472b9278fddc0c4..d0d20666e872752760fc0b764ffdf58b5339b882 100644 (file)
@@ -3564,7 +3564,8 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
                if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
                        /* Ignore KCFI type preambles, which always fall through */
                        if (!strncmp(func->name, "__cfi_", 6) ||
-                           !strncmp(func->name, "__pfx_", 6))
+                           !strncmp(func->name, "__pfx_", 6) ||
+                           !strncmp(func->name, "__pi___pfx_", 11))
                                return 0;
 
                        if (file->ignore_unreachables)