]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
x86/boot: Drop RIP_REL_REF() uses from early mapping code
authorArd Biesheuvel <ardb@kernel.org>
Thu, 10 Apr 2025 13:41:22 +0000 (15:41 +0200)
committerIngo Molnar <mingo@kernel.org>
Sat, 12 Apr 2025 09:13:05 +0000 (11:13 +0200)
Now that __startup_64() is built using -fPIC, RIP_REL_REF() has become a
NOP and can be removed. Only some occurrences of rip_rel_ptr() will
remain, to explicitly take the address of certain global structures in
the 1:1 mapping of memory.

While at it, update the code comment to describe why this is needed.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Dionna Amalie Glaze <dionnaglaze@google.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Kevin Loughlin <kevinloughlin@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: linux-efi@vger.kernel.org
Link: https://lore.kernel.org/r/20250410134117.3713574-17-ardb+git@google.com
arch/x86/boot/startup/map_kernel.c

index 5f1b7e0ba26eaeef20b2dcd6894328e0385b66e0..0eac3f17dbd3f93c724277c487b0d57e5b482b31 100644 (file)
@@ -26,12 +26,12 @@ static inline bool check_la57_support(void)
        if (!(native_read_cr4() & X86_CR4_LA57))
                return false;
 
-       RIP_REL_REF(__pgtable_l5_enabled)       = 1;
-       RIP_REL_REF(pgdir_shift)                = 48;
-       RIP_REL_REF(ptrs_per_p4d)               = 512;
-       RIP_REL_REF(page_offset_base)           = __PAGE_OFFSET_BASE_L5;
-       RIP_REL_REF(vmalloc_base)               = __VMALLOC_BASE_L5;
-       RIP_REL_REF(vmemmap_base)               = __VMEMMAP_BASE_L5;
+       __pgtable_l5_enabled    = 1;
+       pgdir_shift             = 48;
+       ptrs_per_p4d            = 512;
+       page_offset_base        = __PAGE_OFFSET_BASE_L5;
+       vmalloc_base            = __VMALLOC_BASE_L5;
+       vmemmap_base            = __VMEMMAP_BASE_L5;
 
        return true;
 }
@@ -81,12 +81,14 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
        return sme_get_me_mask();
 }
 
-/* Code in __startup_64() can be relocated during execution, but the compiler
- * doesn't have to generate PC-relative relocations when accessing globals from
- * that function. Clang actually does not generate them, which leads to
- * boot-time crashes. To work around this problem, every global pointer must
- * be accessed using RIP_REL_REF(). Kernel virtual addresses can be determined
- * by subtracting p2v_offset from the RIP-relative address.
+/*
+ * This code is compiled using PIC codegen because it will execute from the
+ * early 1:1 mapping of memory, which deviates from the mapping expected by the
+ * linker. Due to this deviation, taking the address of a global variable will
+ * produce an ambiguous result when using the plain & operator.  Instead,
+ * rip_rel_ptr() must be used, which will return the RIP-relative address in
+ * the 1:1 mapping of memory. Kernel virtual addresses can be determined by
+ * subtracting p2v_offset from the RIP-relative address.
  */
 unsigned long __head __startup_64(unsigned long p2v_offset,
                                  struct boot_params *bp)
@@ -113,8 +115,7 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
         * Compute the delta between the address I am compiled to run at
         * and the address I am actually running at.
         */
-       load_delta = __START_KERNEL_map + p2v_offset;
-       RIP_REL_REF(phys_base) = load_delta;
+       phys_base = load_delta = __START_KERNEL_map + p2v_offset;
 
        /* Is the address not 2M aligned? */
        if (load_delta & ~PMD_MASK)
@@ -138,11 +139,11 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
                pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE;
        }
 
-       RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 2].pud += load_delta;
-       RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 1].pud += load_delta;
+       level3_kernel_pgt[PTRS_PER_PUD - 2].pud += load_delta;
+       level3_kernel_pgt[PTRS_PER_PUD - 1].pud += load_delta;
 
        for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
-               RIP_REL_REF(level2_fixmap_pgt)[i].pmd += load_delta;
+               level2_fixmap_pgt[i].pmd += load_delta;
 
        /*
         * Set up the identity mapping for the switchover.  These
@@ -153,12 +154,12 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
 
        pud = &early_pgts[0]->pmd;
        pmd = &early_pgts[1]->pmd;
-       RIP_REL_REF(next_early_pgt) = 2;
+       next_early_pgt = 2;
 
        pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
 
        if (la57) {
-               p4d = &early_pgts[RIP_REL_REF(next_early_pgt)++]->pmd;
+               p4d = &early_pgts[next_early_pgt++]->pmd;
 
                i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
                pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
@@ -179,7 +180,7 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
 
        pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
        /* Filter out unsupported __PAGE_KERNEL_* bits: */
-       pmd_entry &= RIP_REL_REF(__supported_pte_mask);
+       pmd_entry &= __supported_pte_mask;
        pmd_entry += sme_get_me_mask();
        pmd_entry +=  physaddr;