From: Greg Kroah-Hartman Date: Sun, 9 Mar 2025 08:42:25 +0000 (+0100) Subject: 6.6-stable patches X-Git-Tag: v6.6.82~1 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=b1324c17f83f2760db1d041a85dabf9f88eb7600;p=thirdparty%2Fkernel%2Fstable-queue.git 6.6-stable patches added patches: series x86-boot-32-de-uglify-the-2-3-level-paging-difference-in-mk_early_pgtbl_32.patch x86-boot-32-disable-stackprotector-and-tracing-for-mk_early_pgtbl_32.patch x86-boot-32-restructure-mk_early_pgtbl_32.patch x86-boot-32-temporarily-map-initrd-for-microcode-loading.patch x86-boot-use-__pa_nodebug-in-mk_early_pgtbl_32.patch x86-microcode-provide-config_microcode_initrd32.patch --- diff --git a/queue-6.6/series b/queue-6.6/series new file mode 100644 index 0000000000..e619fa197e --- /dev/null +++ b/queue-6.6/series @@ -0,0 +1,6 @@ +x86-boot-32-disable-stackprotector-and-tracing-for-mk_early_pgtbl_32.patch +x86-boot-use-__pa_nodebug-in-mk_early_pgtbl_32.patch +x86-boot-32-de-uglify-the-2-3-level-paging-difference-in-mk_early_pgtbl_32.patch +x86-boot-32-restructure-mk_early_pgtbl_32.patch +x86-microcode-provide-config_microcode_initrd32.patch +x86-boot-32-temporarily-map-initrd-for-microcode-loading.patch diff --git a/queue-6.6/x86-boot-32-de-uglify-the-2-3-level-paging-difference-in-mk_early_pgtbl_32.patch b/queue-6.6/x86-boot-32-de-uglify-the-2-3-level-paging-difference-in-mk_early_pgtbl_32.patch new file mode 100644 index 0000000000..b581a69473 --- /dev/null +++ b/queue-6.6/x86-boot-32-de-uglify-the-2-3-level-paging-difference-in-mk_early_pgtbl_32.patch @@ -0,0 +1,99 @@ +From 03154ffcc109c8ee2f9a6b39ce26e0dbc9b6ff5c Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Tue, 17 Oct 2023 23:23:26 +0200 +Subject: x86/boot/32: De-uglify the 2/3 level paging difference in mk_early_pgtbl_32() + +From: Thomas Gleixner + +commit a62f4ca106fd250e9247decd100f3905131fc1fe upstream + +Move the ifdeffery out of the function and use proper typedefs to make it +work for both 2 and 3 level paging. + +No functional change. + + [ bp: Move mk_early_pgtbl_32() declaration into a header. ] + +Signed-off-by: Thomas Gleixner +Signed-off-by: Borislav Petkov (AMD) +Link: https://lore.kernel.org/r/20231017211722.111059491@linutronix.de +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/setup.h | 1 + + arch/x86/kernel/head32.c | 38 ++++++++++++++++++++------------------ + 2 files changed, 21 insertions(+), 18 deletions(-) + +--- a/arch/x86/include/asm/setup.h ++++ b/arch/x86/include/asm/setup.h +@@ -126,6 +126,7 @@ void clear_bss(void); + #ifdef __i386__ + + asmlinkage void __init __noreturn i386_start_kernel(void); ++void __init mk_early_pgtbl_32(void); + + #else + asmlinkage void __init __noreturn x86_64_start_kernel(char *real_mode); +--- a/arch/x86/kernel/head32.c ++++ b/arch/x86/kernel/head32.c +@@ -72,41 +72,43 @@ asmlinkage __visible void __init __noret + * to the first kernel PMD. Note the upper half of each PMD or PTE are + * always zero at this stage. + */ +-void __init mk_early_pgtbl_32(void); ++#ifdef CONFIG_X86_PAE ++typedef pmd_t pl2_t; ++#define pl2_base initial_pg_pmd ++#define SET_PL2(val) { .pmd = (val), } ++#else ++typedef pgd_t pl2_t; ++#define pl2_base initial_page_table ++#define SET_PL2(val) { .pgd = (val), } ++#endif + + void __init __no_stack_protector mk_early_pgtbl_32(void) + { +- pte_t pte, *ptep; +- int i; +- unsigned long *ptr; + /* Enough space to fit pagetables for the low memory linear map */ + const unsigned long limit = __pa_nodebug(_end) + + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); +-#ifdef CONFIG_X86_PAE +- pmd_t pl2, *pl2p = (pmd_t *)__pa_nodebug(initial_pg_pmd); +-#define SET_PL2(pl2, val) { (pl2).pmd = (val); } +-#else +- pgd_t pl2, *pl2p = (pgd_t *)__pa_nodebug(initial_page_table); +-#define SET_PL2(pl2, val) { (pl2).pgd = (val); } +-#endif ++ pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); ++ pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); ++ unsigned long *ptr; ++ int i; + +- ptep = (pte_t *)__pa_nodebug(__brk_base); + pte.pte = PTE_IDENT_ATTR; + + while ((pte.pte & PTE_PFN_MASK) < limit) { ++ pl2_t pl2 = SET_PL2((unsigned long)ptep | PDE_IDENT_ATTR); + +- SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR); + *pl2p = pl2; +-#ifndef CONFIG_X86_PAE +- /* Kernel PDE entry */ +- *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; +-#endif ++ ++ if (!IS_ENABLED(CONFIG_X86_PAE)) { ++ /* Kernel PDE entry */ ++ *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; ++ } ++ + for (i = 0; i < PTRS_PER_PTE; i++) { + *ptep = pte; + pte.pte += PAGE_SIZE; + ptep++; + } +- + pl2p++; + } + diff --git a/queue-6.6/x86-boot-32-disable-stackprotector-and-tracing-for-mk_early_pgtbl_32.patch b/queue-6.6/x86-boot-32-disable-stackprotector-and-tracing-for-mk_early_pgtbl_32.patch new file mode 100644 index 0000000000..5dba6dff73 --- /dev/null +++ b/queue-6.6/x86-boot-32-disable-stackprotector-and-tracing-for-mk_early_pgtbl_32.patch @@ -0,0 +1,58 @@ +From 4f2f269722e191887363b027ca8402bd53b51973 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Mon, 2 Oct 2023 13:59:36 +0200 +Subject: x86/boot/32: Disable stackprotector and tracing for mk_early_pgtbl_32() + +From: Thomas Gleixner + +commit 242db7589460ca94e28c51ffbddd621756f97e11 upstream + +Stackprotector cannot work before paging is enabled. The read from the per +CPU variable __stack_chk_guard is always accessing the virtual address +either directly on UP or via FS on SMP. In physical address mode this +results in an access to memory above 3GB. + +So this works by chance as the hardware returns the same value when there +is no RAM at this physical address. When there is RAM populated above 3G +then the read is by chance the same as nothing changes that memory during +the very early boot stage. + +Stop relying on pure luck and disable the stack protector for the only C +function which is called during early boot before paging is enabled. + +Remove function tracing from the whole source file as there is no way to +trace this at all, but in case of CONFIG_DYNAMIC_FTRACE=n +mk_early_pgtbl_32() would access global function tracer variables in +physical address mode which again might work by chance. + +Signed-off-by: Thomas Gleixner +Signed-off-by: Borislav Petkov (AMD) +Link: https://lore.kernel.org/r/20231002115902.156063939@linutronix.de +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/Makefile | 1 + + arch/x86/kernel/head32.c | 3 ++- + 2 files changed, 3 insertions(+), 1 deletion(-) + +--- a/arch/x86/kernel/Makefile ++++ b/arch/x86/kernel/Makefile +@@ -16,6 +16,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg + CFLAGS_REMOVE_ftrace.o = -pg + CFLAGS_REMOVE_early_printk.o = -pg + CFLAGS_REMOVE_head64.o = -pg ++CFLAGS_REMOVE_head32.o = -pg + CFLAGS_REMOVE_sev.o = -pg + CFLAGS_REMOVE_rethook.o = -pg + endif +--- a/arch/x86/kernel/head32.c ++++ b/arch/x86/kernel/head32.c +@@ -73,7 +73,8 @@ asmlinkage __visible void __init __noret + * always zero at this stage. + */ + void __init mk_early_pgtbl_32(void); +-void __init mk_early_pgtbl_32(void) ++ ++void __init __no_stack_protector mk_early_pgtbl_32(void) + { + #ifdef __pa + #undef __pa diff --git a/queue-6.6/x86-boot-32-restructure-mk_early_pgtbl_32.patch b/queue-6.6/x86-boot-32-restructure-mk_early_pgtbl_32.patch new file mode 100644 index 0000000000..603bd7cb85 --- /dev/null +++ b/queue-6.6/x86-boot-32-restructure-mk_early_pgtbl_32.patch @@ -0,0 +1,88 @@ +From 5535426ec91e8294cc70bfa5d6fcaa16e240473e Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Tue, 17 Oct 2023 23:23:28 +0200 +Subject: x86/boot/32: Restructure mk_early_pgtbl_32() + +From: Thomas Gleixner + +commit 69ba866db281c768d5ecca909361ea4c4e71d57e upstream + +Prepare it for adding a temporary initrd mapping by splitting out the +actual map loop. + +No functional change. + +Signed-off-by: Thomas Gleixner +Signed-off-by: Borislav Petkov (AMD) +Link: https://lore.kernel.org/r/20231017211722.175910753@linutronix.de +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/head32.c | 42 +++++++++++++++++++++++------------------- + 1 file changed, 23 insertions(+), 19 deletions(-) + +--- a/arch/x86/kernel/head32.c ++++ b/arch/x86/kernel/head32.c +@@ -82,35 +82,40 @@ typedef pgd_t pl2_t; + #define SET_PL2(val) { .pgd = (val), } + #endif + +-void __init __no_stack_protector mk_early_pgtbl_32(void) ++static __init __no_stack_protector pte_t init_map(pte_t pte, pte_t **ptep, pl2_t **pl2p, ++ const unsigned long limit) + { +- /* Enough space to fit pagetables for the low memory linear map */ +- const unsigned long limit = __pa_nodebug(_end) + +- (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); +- pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); +- pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); +- unsigned long *ptr; +- int i; +- +- pte.pte = PTE_IDENT_ATTR; +- + while ((pte.pte & PTE_PFN_MASK) < limit) { +- pl2_t pl2 = SET_PL2((unsigned long)ptep | PDE_IDENT_ATTR); +- +- *pl2p = pl2; ++ pl2_t pl2 = SET_PL2((unsigned long)*ptep | PDE_IDENT_ATTR); ++ int i; + ++ **pl2p = pl2; + if (!IS_ENABLED(CONFIG_X86_PAE)) { + /* Kernel PDE entry */ +- *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; ++ *(*pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; + } + + for (i = 0; i < PTRS_PER_PTE; i++) { +- *ptep = pte; ++ **ptep = pte; + pte.pte += PAGE_SIZE; +- ptep++; ++ (*ptep)++; + } +- pl2p++; ++ (*pl2p)++; + } ++ return pte; ++} ++ ++void __init __no_stack_protector mk_early_pgtbl_32(void) ++{ ++ /* Enough space to fit pagetables for the low memory linear map */ ++ const unsigned long limit = __pa_nodebug(_end) + ++ (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); ++ pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); ++ pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); ++ unsigned long *ptr; ++ ++ pte.pte = PTE_IDENT_ATTR; ++ pte = init_map(pte, &ptep, &pl2p, limit); + + ptr = (unsigned long *)__pa_nodebug(&max_pfn_mapped); + /* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */ +@@ -119,4 +124,3 @@ void __init __no_stack_protector mk_earl + ptr = (unsigned long *)__pa_nodebug(&_brk_end); + *ptr = (unsigned long)ptep + PAGE_OFFSET; + } +- diff --git a/queue-6.6/x86-boot-32-temporarily-map-initrd-for-microcode-loading.patch b/queue-6.6/x86-boot-32-temporarily-map-initrd-for-microcode-loading.patch new file mode 100644 index 0000000000..969bff8b23 --- /dev/null +++ b/queue-6.6/x86-boot-32-temporarily-map-initrd-for-microcode-loading.patch @@ -0,0 +1,127 @@ +From 9d1151cf6bfe9184951bd9b8da5002769a49b55f Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Tue, 17 Oct 2023 23:23:31 +0200 +Subject: x86/boot/32: Temporarily map initrd for microcode loading + +From: Thomas Gleixner + +commit 4c585af7180c147062c636a927a2fc2b6a7072f5 upstream + +Early microcode loading on 32-bit runs in physical address mode because +the initrd is not covered by the initial page tables. That results in +a horrible mess all over the microcode loader code. + +Provide a temporary mapping for the initrd in the initial page tables by +appending it to the actual initial mapping starting with a new PGD or +PMD depending on the configured page table levels ([non-]PAE). + +The page table entries are located after _brk_end so they are not +permanently using memory space. The mapping is invalidated right away in +i386_start_kernel() after the early microcode loader has run. + +This prepares for removing the physical address mode oddities from all +over the microcode loader code, which in turn allows further cleanups. + +Provide the map and unmap code and document the place where the +microcode loader needs to be invoked with a comment. + +Signed-off-by: Thomas Gleixner +Signed-off-by: Borislav Petkov (AMD) +Link: https://lore.kernel.org/r/20231017211722.292291436@linutronix.de +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/microcode.h | 2 + + arch/x86/kernel/head32.c | 52 +++++++++++++++++++++++++++++++++++++-- + 2 files changed, 52 insertions(+), 2 deletions(-) + +--- a/arch/x86/include/asm/microcode.h ++++ b/arch/x86/include/asm/microcode.h +@@ -23,6 +23,8 @@ static inline void load_ucode_ap(void) { + static inline void microcode_bsp_resume(void) { } + #endif + ++extern unsigned long initrd_start_early; ++ + #ifdef CONFIG_CPU_SUP_INTEL + /* Intel specific microcode defines. Public for IFS */ + struct microcode_header_intel { +--- a/arch/x86/kernel/head32.c ++++ b/arch/x86/kernel/head32.c +@@ -30,12 +30,32 @@ static void __init i386_default_early_se + x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; + } + ++#ifdef CONFIG_MICROCODE_INITRD32 ++unsigned long __initdata initrd_start_early; ++static pte_t __initdata *initrd_pl2p_start, *initrd_pl2p_end; ++ ++static void zap_early_initrd_mapping(void) ++{ ++ pte_t *pl2p = initrd_pl2p_start; ++ ++ for (; pl2p < initrd_pl2p_end; pl2p++) { ++ *pl2p = (pte_t){ .pte = 0 }; ++ ++ if (!IS_ENABLED(CONFIG_X86_PAE)) ++ *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = (pte_t) {.pte = 0}; ++ } ++} ++#else ++static inline void zap_early_initrd_mapping(void) { } ++#endif ++ + asmlinkage __visible void __init __noreturn i386_start_kernel(void) + { + /* Make sure IDT is set up before any exception happens */ + idt_setup_early_handler(); + + load_ucode_bsp(); ++ zap_early_initrd_mapping(); + + cr4_init_shadow(); + +@@ -108,9 +128,9 @@ static __init __no_stack_protector pte_t + void __init __no_stack_protector mk_early_pgtbl_32(void) + { + /* Enough space to fit pagetables for the low memory linear map */ +- const unsigned long limit = __pa_nodebug(_end) + +- (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); ++ unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); + pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); ++ struct boot_params __maybe_unused *params; + pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); + unsigned long *ptr; + +@@ -123,4 +143,32 @@ void __init __no_stack_protector mk_earl + + ptr = (unsigned long *)__pa_nodebug(&_brk_end); + *ptr = (unsigned long)ptep + PAGE_OFFSET; ++ ++#ifdef CONFIG_MICROCODE_INITRD32 ++ /* Running on a hypervisor? */ ++ if (native_cpuid_ecx(1) & BIT(31)) ++ return; ++ ++ params = (struct boot_params *)__pa_nodebug(&boot_params); ++ if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image) ++ return; ++ ++ /* Save the virtual start address */ ++ ptr = (unsigned long *)__pa_nodebug(&initrd_start_early); ++ *ptr = (pte.pte & PTE_PFN_MASK) + PAGE_OFFSET; ++ *ptr += ((unsigned long)params->hdr.ramdisk_image) & ~PAGE_MASK; ++ ++ /* Save PLP2 for cleanup */ ++ ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_start); ++ *ptr = (unsigned long)pl2p + PAGE_OFFSET; ++ ++ limit = (unsigned long)params->hdr.ramdisk_image; ++ pte.pte = PTE_IDENT_ATTR | PFN_ALIGN(limit); ++ limit = (unsigned long)params->hdr.ramdisk_image + params->hdr.ramdisk_size; ++ ++ init_map(pte, &ptep, &pl2p, limit); ++ ++ ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_end); ++ *ptr = (unsigned long)pl2p + PAGE_OFFSET; ++#endif + } diff --git a/queue-6.6/x86-boot-use-__pa_nodebug-in-mk_early_pgtbl_32.patch b/queue-6.6/x86-boot-use-__pa_nodebug-in-mk_early_pgtbl_32.patch new file mode 100644 index 0000000000..2ad29a2f90 --- /dev/null +++ b/queue-6.6/x86-boot-use-__pa_nodebug-in-mk_early_pgtbl_32.patch @@ -0,0 +1,67 @@ +From 146338adb29e98f4c41d47025e38d5a84b600e62 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Tue, 17 Oct 2023 23:23:25 +0200 +Subject: x86/boot: Use __pa_nodebug() in mk_early_pgtbl_32() + +From: Thomas Gleixner + +commit 1e2dd572d2b773b5b8882aae66e5f0328d562aa9 upstream + +Use the existing macro instead of undefining and redefining __pa(). + +No functional change. + +Signed-off-by: Thomas Gleixner +Signed-off-by: Borislav Petkov (AMD) +Link: https://lore.kernel.org/r/20231017211722.051625827@linutronix.de +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/head32.c | 16 ++++++---------- + 1 file changed, 6 insertions(+), 10 deletions(-) + +--- a/arch/x86/kernel/head32.c ++++ b/arch/x86/kernel/head32.c +@@ -76,25 +76,21 @@ void __init mk_early_pgtbl_32(void); + + void __init __no_stack_protector mk_early_pgtbl_32(void) + { +-#ifdef __pa +-#undef __pa +-#endif +-#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) + pte_t pte, *ptep; + int i; + unsigned long *ptr; + /* Enough space to fit pagetables for the low memory linear map */ +- const unsigned long limit = __pa(_end) + ++ const unsigned long limit = __pa_nodebug(_end) + + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); + #ifdef CONFIG_X86_PAE +- pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd); ++ pmd_t pl2, *pl2p = (pmd_t *)__pa_nodebug(initial_pg_pmd); + #define SET_PL2(pl2, val) { (pl2).pmd = (val); } + #else +- pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table); ++ pgd_t pl2, *pl2p = (pgd_t *)__pa_nodebug(initial_page_table); + #define SET_PL2(pl2, val) { (pl2).pgd = (val); } + #endif + +- ptep = (pte_t *)__pa(__brk_base); ++ ptep = (pte_t *)__pa_nodebug(__brk_base); + pte.pte = PTE_IDENT_ATTR; + + while ((pte.pte & PTE_PFN_MASK) < limit) { +@@ -114,11 +110,11 @@ void __init __no_stack_protector mk_earl + pl2p++; + } + +- ptr = (unsigned long *)__pa(&max_pfn_mapped); ++ ptr = (unsigned long *)__pa_nodebug(&max_pfn_mapped); + /* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */ + *ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; + +- ptr = (unsigned long *)__pa(&_brk_end); ++ ptr = (unsigned long *)__pa_nodebug(&_brk_end); + *ptr = (unsigned long)ptep + PAGE_OFFSET; + } + diff --git a/queue-6.6/x86-microcode-provide-config_microcode_initrd32.patch b/queue-6.6/x86-microcode-provide-config_microcode_initrd32.patch new file mode 100644 index 0000000000..e8b03a7f7c --- /dev/null +++ b/queue-6.6/x86-microcode-provide-config_microcode_initrd32.patch @@ -0,0 +1,33 @@ +From 5ad4ca05f4ea57a75cc50cfc368f74923478a116 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Tue, 17 Oct 2023 23:23:29 +0200 +Subject: x86/microcode: Provide CONFIG_MICROCODE_INITRD32 + +From: Thomas Gleixner + +commit fdbd43819400e74c1c20a646969ea8f71706eb2b upstream + +Create an aggregate config switch which covers X86_32, MICROCODE and +BLK_DEV_INITRD to avoid lengthy #ifdeffery in upcoming code. + +Signed-off-by: Thomas Gleixner +Signed-off-by: Borislav Petkov (AMD) +Link: https://lore.kernel.org/r/20231017211722.236208250@linutronix.de +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/Kconfig | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -1315,6 +1315,10 @@ config MICROCODE + depends on CPU_SUP_AMD || CPU_SUP_INTEL + select CRYPTO_LIB_SHA256 if CPU_SUP_AMD + ++config MICROCODE_INITRD32 ++ def_bool y ++ depends on MICROCODE && X86_32 && BLK_DEV_INITRD ++ + config MICROCODE_LATE_LOADING + bool "Late microcode loading (DANGEROUS)" + default n