--- /dev/null
+From stable+bounces-25894-greg=kroah.com@vger.kernel.org Mon Mar 4 12:20:47 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:39 +0100
+Subject: arm64: efi: Limit allocations to 48-bit addressable physical region
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20240304111937.2556102-21-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit a37dac5c5dcfe0f1fd58513c16cdbc280a47f628 upstream ]
+
+The UEFI spec does not mention or reason about the configured size of
+the virtual address space at all, but it does mention that all memory
+should be identity mapped using a page size of 4 KiB.
+
+This means that a LPA2 capable system that has any system memory outside
+of the 48-bit addressable physical range and follows the spec to the
+letter may serve page allocation requests from regions of memory that
+the kernel cannot access unless it was built with LPA2 support and
+enables it at runtime.
+
+So let's ensure that all page allocations are limited to the 48-bit
+range.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/efi.h | 1 +
+ drivers/firmware/efi/libstub/alignedmem.c | 2 ++
+ drivers/firmware/efi/libstub/arm64-stub.c | 5 +++--
+ drivers/firmware/efi/libstub/efistub.h | 4 ++++
+ drivers/firmware/efi/libstub/mem.c | 2 ++
+ drivers/firmware/efi/libstub/randomalloc.c | 2 +-
+ 6 files changed, 13 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/include/asm/efi.h
++++ b/arch/arm64/include/asm/efi.h
+@@ -103,6 +103,7 @@ static inline void free_screen_info(stru
+ }
+
+ #define EFI_ALLOC_ALIGN SZ_64K
++#define EFI_ALLOC_LIMIT ((1UL << 48) - 1)
+
+ /*
+ * On ARM systems, virtually remapped UEFI runtime services are set up in two
+--- a/drivers/firmware/efi/libstub/alignedmem.c
++++ b/drivers/firmware/efi/libstub/alignedmem.c
+@@ -29,6 +29,8 @@ efi_status_t efi_allocate_pages_aligned(
+ efi_status_t status;
+ int slack;
+
++ max = min(max, EFI_ALLOC_LIMIT);
++
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+--- a/drivers/firmware/efi/libstub/arm64-stub.c
++++ b/drivers/firmware/efi/libstub/arm64-stub.c
+@@ -191,10 +191,11 @@ efi_status_t handle_kernel_image(unsigne
+ if (status != EFI_SUCCESS) {
+ if (!check_image_region((u64)_text, kernel_memsize)) {
+ efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
+- } else if (IS_ALIGNED((u64)_text, min_kimg_align)) {
++ } else if (IS_ALIGNED((u64)_text, min_kimg_align) &&
++ (u64)_end < EFI_ALLOC_LIMIT) {
+ /*
+ * Just execute from wherever we were loaded by the
+- * UEFI PE/COFF loader if the alignment is suitable.
++ * UEFI PE/COFF loader if the placement is suitable.
+ */
+ *image_addr = (u64)_text;
+ *reserve_size = 0;
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -29,6 +29,10 @@
+ #define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
+ #endif
+
++#ifndef EFI_ALLOC_LIMIT
++#define EFI_ALLOC_LIMIT ULONG_MAX
++#endif
++
+ extern bool efi_nochunk;
+ extern bool efi_nokaslr;
+ extern int efi_loglevel;
+--- a/drivers/firmware/efi/libstub/mem.c
++++ b/drivers/firmware/efi/libstub/mem.c
+@@ -89,6 +89,8 @@ efi_status_t efi_allocate_pages(unsigned
+ efi_physical_addr_t alloc_addr;
+ efi_status_t status;
+
++ max = min(max, EFI_ALLOC_LIMIT);
++
+ if (EFI_ALLOC_ALIGN > EFI_PAGE_SIZE)
+ return efi_allocate_pages_aligned(size, addr, max,
+ EFI_ALLOC_ALIGN,
+--- a/drivers/firmware/efi/libstub/randomalloc.c
++++ b/drivers/firmware/efi/libstub/randomalloc.c
+@@ -29,7 +29,7 @@ static unsigned long get_entry_num_slots
+ return 0;
+
+ region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
+- (u64)ULONG_MAX);
++ (u64)EFI_ALLOC_LIMIT);
+ if (region_end < size)
+ return 0;
+
--- /dev/null
+From stable+bounces-25895-greg=kroah.com@vger.kernel.org Mon Mar 4 12:20:46 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:40 +0100
+Subject: efi: efivars: prevent double registration
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Johan Hovold <johan+linaro@kernel.org>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20240304111937.2556102-22-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Commit 0217a40d7ba6e71d7f3422fbe89b436e8ee7ece7 upstream ]
+
+Add the missing sanity check to efivars_register() so that it is no
+longer possible to override an already registered set of efivar ops
+(without first deregistering them).
+
+This can help debug initialisation ordering issues where drivers have so
+far unknowingly been relying on overriding the generic ops.
+
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/efi/vars.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -66,19 +66,28 @@ int efivars_register(struct efivars *efi
+ const struct efivar_operations *ops,
+ struct kobject *kobject)
+ {
++ int rv;
++
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+
++ if (__efivars) {
++ pr_warn("efivars already registered\n");
++ rv = -EBUSY;
++ goto out;
++ }
++
+ efivars->ops = ops;
+ efivars->kobject = kobject;
+
+ __efivars = efivars;
+
+ pr_info("Registered efivars operations\n");
+-
++ rv = 0;
++out:
+ up(&efivars_lock);
+
+- return 0;
++ return rv;
+ }
+ EXPORT_SYMBOL_GPL(efivars_register);
+
--- /dev/null
+From stable+bounces-25901-greg=kroah.com@vger.kernel.org Mon Mar 4 12:21:00 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:46 +0100
+Subject: efi/libstub: Add limit argument to efi_random_alloc()
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>, Borislav Petkov <bp@alien8.de>
+Message-ID: <20240304111937.2556102-28-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit bc5ddceff4c14494d83449ad45c985e6cd353fce upstream ]
+
+x86 will need to limit the kernel memory allocation to the lowest 512
+MiB of memory, to match the behavior of the existing bare metal KASLR
+physical randomization logic. So in preparation for that, add a limit
+parameter to efi_random_alloc() and wire it up.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230807162720.545787-22-ardb@kernel.org
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/efi/libstub/arm64-stub.c | 2 +-
+ drivers/firmware/efi/libstub/efistub.h | 2 +-
+ drivers/firmware/efi/libstub/randomalloc.c | 10 ++++++----
+ 3 files changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/firmware/efi/libstub/arm64-stub.c
++++ b/drivers/firmware/efi/libstub/arm64-stub.c
+@@ -181,7 +181,7 @@ efi_status_t handle_kernel_image(unsigne
+ */
+ status = efi_random_alloc(*reserve_size, min_kimg_align,
+ reserve_addr, phys_seed,
+- EFI_LOADER_CODE);
++ EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
+ if (status != EFI_SUCCESS)
+ efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
+ } else {
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -905,7 +905,7 @@ efi_status_t efi_get_random_bytes(unsign
+
+ efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long random_seed,
+- int memory_type);
++ int memory_type, unsigned long alloc_limit);
+
+ efi_status_t efi_random_get_seed(void);
+
+--- a/drivers/firmware/efi/libstub/randomalloc.c
++++ b/drivers/firmware/efi/libstub/randomalloc.c
+@@ -16,7 +16,8 @@
+ */
+ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ unsigned long size,
+- unsigned long align_shift)
++ unsigned long align_shift,
++ u64 alloc_limit)
+ {
+ unsigned long align = 1UL << align_shift;
+ u64 first_slot, last_slot, region_end;
+@@ -29,7 +30,7 @@ static unsigned long get_entry_num_slots
+ return 0;
+
+ region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
+- (u64)EFI_ALLOC_LIMIT);
++ alloc_limit);
+ if (region_end < size)
+ return 0;
+
+@@ -54,7 +55,8 @@ efi_status_t efi_random_alloc(unsigned l
+ unsigned long align,
+ unsigned long *addr,
+ unsigned long random_seed,
+- int memory_type)
++ int memory_type,
++ unsigned long alloc_limit)
+ {
+ unsigned long total_slots = 0, target_slot;
+ unsigned long total_mirrored_slots = 0;
+@@ -76,7 +78,7 @@ efi_status_t efi_random_alloc(unsigned l
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
+ unsigned long slots;
+
+- slots = get_entry_num_slots(md, size, ilog2(align));
++ slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit);
+ MD_NUM_SLOTS(md) = slots;
+ total_slots += slots;
+ if (md->attribute & EFI_MEMORY_MORE_RELIABLE)
--- /dev/null
+From stable+bounces-25900-greg=kroah.com@vger.kernel.org Mon Mar 4 12:20:56 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:45 +0100
+Subject: efi/libstub: Add memory attribute protocol definitions
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Evgeniy Baskov <baskov@ispras.ru>, Mario Limonciello <mario.limonciello@amd.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20240304111937.2556102-27-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Evgeniy Baskov <baskov@ispras.ru>
+
+[ Commit 79729f26b074a5d2722c27fa76cc45ef721e65cd upstream ]
+
+EFI_MEMORY_ATTRIBUTE_PROTOCOL servers as a better alternative to
+DXE services for setting memory attributes in EFI Boot Services
+environment. This protocol is better since it is a part of UEFI
+specification itself and not UEFI PI specification like DXE
+services.
+
+Add EFI_MEMORY_ATTRIBUTE_PROTOCOL definitions.
+Support mixed mode properly for its calls.
+
+Tested-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Evgeniy Baskov <baskov@ispras.ru>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/efi.h | 7 +++++++
+ drivers/firmware/efi/libstub/efistub.h | 20 ++++++++++++++++++++
+ include/linux/efi.h | 1 +
+ 3 files changed, 28 insertions(+)
+
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -325,6 +325,13 @@ static inline u32 efi64_convert_status(e
+ #define __efi64_argmap_set_memory_space_attributes(phys, size, flags) \
+ (__efi64_split(phys), __efi64_split(size), __efi64_split(flags))
+
++/* Memory Attribute Protocol */
++#define __efi64_argmap_set_memory_attributes(protocol, phys, size, flags) \
++ ((protocol), __efi64_split(phys), __efi64_split(size), __efi64_split(flags))
++
++#define __efi64_argmap_clear_memory_attributes(protocol, phys, size, flags) \
++ ((protocol), __efi64_split(phys), __efi64_split(size), __efi64_split(flags))
++
+ /*
+ * The macros below handle the plumbing for the argument mapping. To add a
+ * mapping for a specific EFI method, simply define a macro
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -419,6 +419,26 @@ union efi_dxe_services_table {
+ } mixed_mode;
+ };
+
++typedef union efi_memory_attribute_protocol efi_memory_attribute_protocol_t;
++
++union efi_memory_attribute_protocol {
++ struct {
++ efi_status_t (__efiapi *get_memory_attributes)(
++ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64 *);
++
++ efi_status_t (__efiapi *set_memory_attributes)(
++ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
++
++ efi_status_t (__efiapi *clear_memory_attributes)(
++ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
++ };
++ struct {
++ u32 get_memory_attributes;
++ u32 set_memory_attributes;
++ u32 clear_memory_attributes;
++ } mixed_mode;
++};
++
+ typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t;
+
+ union efi_uga_draw_protocol {
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -390,6 +390,7 @@ void efi_native_runtime_setup(void);
+ #define EFI_RT_PROPERTIES_TABLE_GUID EFI_GUID(0xeb66918a, 0x7eef, 0x402a, 0x84, 0x2e, 0x93, 0x1d, 0x21, 0xc3, 0x8a, 0xe9)
+ #define EFI_DXE_SERVICES_TABLE_GUID EFI_GUID(0x05ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9)
+ #define EFI_SMBIOS_PROTOCOL_GUID EFI_GUID(0x03583ff6, 0xcb36, 0x4940, 0x94, 0x7e, 0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7)
++#define EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID EFI_GUID(0xf4560cf6, 0x40ec, 0x4b4a, 0xa1, 0x92, 0xbf, 0x1d, 0x57, 0xd0, 0xb1, 0x89)
+
+ #define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
+ #define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
--- /dev/null
+From stable+bounces-25907-greg=kroah.com@vger.kernel.org Mon Mar 4 12:21:09 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:52 +0100
+Subject: efi/x86: Avoid physical KASLR on older Dell systems
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20240304111937.2556102-34-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit 50d7cdf7a9b1ab6f4f74a69c84e974d5dc0c1bf1 upstream ]
+
+River reports boot hangs with v6.6 and v6.7, and the bisect points to
+commit
+
+ a1b87d54f4e4 ("x86/efistub: Avoid legacy decompressor when doing EFI boot")
+
+which moves the memory allocation and kernel decompression from the
+legacy decompressor (which executes *after* ExitBootServices()) to the
+EFI stub, using boot services for allocating the memory. The memory
+allocation succeeds but the subsequent call to decompress_kernel() never
+returns, resulting in a failed boot and a hanging system.
+
+As it turns out, this issue only occurs when physical address
+randomization (KASLR) is enabled, and given that this is a feature we
+can live without (virtual KASLR is much more important), let's disable
+the physical part of KASLR when booting on AMI UEFI firmware claiming to
+implement revision v2.0 of the specification (which was released in
+2006), as this is the version these systems advertise.
+
+Fixes: a1b87d54f4e4 ("x86/efistub: Avoid legacy decompressor when doing EFI boot")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218173
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/efi/libstub/x86-stub.c | 31 ++++++++++++++++++++++++-------
+ 1 file changed, 24 insertions(+), 7 deletions(-)
+
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -273,17 +273,20 @@ void efi_adjust_memory_range_protection(
+ }
+ }
+
++static efi_char16_t *efistub_fw_vendor(void)
++{
++ unsigned long vendor = efi_table_attr(efi_system_table, fw_vendor);
++
++ return (efi_char16_t *)vendor;
++}
++
+ static const efi_char16_t apple[] = L"Apple";
+
+ static void setup_quirks(struct boot_params *boot_params)
+ {
+- efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
+- efi_table_attr(efi_system_table, fw_vendor);
+-
+- if (!memcmp(fw_vendor, apple, sizeof(apple))) {
+- if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
+- retrieve_apple_device_properties(boot_params);
+- }
++ if (IS_ENABLED(CONFIG_APPLE_PROPERTIES) &&
++ !memcmp(efistub_fw_vendor(), apple, sizeof(apple)))
++ retrieve_apple_device_properties(boot_params);
+ }
+
+ /*
+@@ -759,11 +762,25 @@ static efi_status_t efi_decompress_kerne
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && !efi_nokaslr) {
+ u64 range = KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR - kernel_total_size;
++ static const efi_char16_t ami[] = L"American Megatrends";
+
+ efi_get_seed(seed, sizeof(seed));
+
+ virt_addr += (range * seed[1]) >> 32;
+ virt_addr &= ~(CONFIG_PHYSICAL_ALIGN - 1);
++
++ /*
++ * Older Dell systems with AMI UEFI firmware v2.0 may hang
++ * while decompressing the kernel if physical address
++ * randomization is enabled.
++ *
++ * https://bugzilla.kernel.org/show_bug.cgi?id=218173
++ */
++ if (efi_system_table->hdr.revision <= EFI_2_00_SYSTEM_TABLE_REVISION &&
++ !memcmp(efistub_fw_vendor(), ami, sizeof(ami))) {
++ efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n");
++ seed[0] = 0;
++ }
+ }
+
+ status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
--- /dev/null
+From stable+bounces-25911-greg=kroah.com@vger.kernel.org Mon Mar 4 12:21:17 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:56 +0100
+Subject: efi/x86: Fix the missing KASLR_FLAG bit in boot_params->hdr.loadflags
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Yuntao Wang <ytcoode@gmail.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20240304111937.2556102-38-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Yuntao Wang <ytcoode@gmail.com>
+
+[ Commit 01638431c465741e071ab34acf3bef3c2570f878 upstream ]
+
+When KASLR is enabled, the KASLR_FLAG bit in boot_params->hdr.loadflags
+should be set to 1 to propagate KASLR status from compressed kernel to
+kernel, just as the choose_random_location() function does.
+
+Currently, when the kernel is booted via the EFI stub, the KASLR_FLAG
+bit in boot_params->hdr.loadflags is not set, even though it should be.
+This causes some functions, such as kernel_randomize_memory(), not to
+execute as expected. Fix it.
+
+Fixes: a1b87d54f4e4 ("x86/efistub: Avoid legacy decompressor when doing EFI boot")
+Signed-off-by: Yuntao Wang <ytcoode@gmail.com>
+[ardb: drop 'else' branch clearing KASLR_FLAG]
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/efi/libstub/x86-stub.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -781,6 +781,8 @@ static efi_status_t efi_decompress_kerne
+ efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n");
+ seed[0] = 0;
+ }
++
++ boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
+ }
+
+ status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
nfsd-register-unregister-of-nfsd-client-shrinker-at-nfsd-startup-shutdown-time.patch
nfsd-replace-delayed_work-with-work_struct-for-nfsd_client_shrinker.patch
nfsd-don-t-destroy-global-nfs4_file-table-in-per-net-shutdown.patch
+arm64-efi-limit-allocations-to-48-bit-addressable-physical-region.patch
+efi-efivars-prevent-double-registration.patch
+x86-efistub-simplify-and-clean-up-handover-entry-code.patch
+x86-decompressor-avoid-magic-offsets-for-efi-handover-entrypoint.patch
+x86-efistub-clear-bss-in-efi-handover-protocol-entrypoint.patch
+efi-libstub-add-memory-attribute-protocol-definitions.patch
+efi-libstub-add-limit-argument-to-efi_random_alloc.patch
+x86-efistub-perform-4-5-level-paging-switch-from-the-stub.patch
+x86-decompressor-factor-out-kernel-decompression-and-relocation.patch
+x86-efistub-prefer-efi-memory-attributes-protocol-over-dxe-services.patch
+x86-efistub-perform-snp-feature-test-while-running-in-the-firmware.patch
+x86-efistub-avoid-legacy-decompressor-when-doing-efi-boot.patch
+efi-x86-avoid-physical-kaslr-on-older-dell-systems.patch
+x86-efistub-avoid-placing-the-kernel-below-load_physical_addr.patch
+x86-boot-rename-conflicting-boot_params-pointer-to-boot_params_ptr.patch
+x86-boot-efistub-assign-global-boot_params-variable.patch
+efi-x86-fix-the-missing-kaslr_flag-bit-in-boot_params-hdr.loadflags.patch
--- /dev/null
+From stable+bounces-25910-greg=kroah.com@vger.kernel.org Mon Mar 4 12:21:16 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:55 +0100
+Subject: x86/boot: efistub: Assign global boot_params variable
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>, Ingo Molnar <mingo@kernel.org>
+Message-ID: <20240304111937.2556102-37-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit 50dcc2e0d62e3c4a54f39673c4dc3dcde7c74d52 upstream ]
+
+Now that the x86 EFI stub calls into some APIs exposed by the
+decompressor (e.g., kaslr_get_random_long()), it is necessary to ensure
+that the global boot_params variable is set correctly before doing so.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/efi/libstub/x86-stub.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -827,6 +827,8 @@ void __noreturn efi_stub_entry(efi_handl
+ unsigned long kernel_entry;
+ efi_status_t status;
+
++ boot_params_ptr = boot_params;
++
+ efi_system_table = sys_table_arg;
+ /* Check if we were booted by the EFI firmware */
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
--- /dev/null
+From stable+bounces-25909-greg=kroah.com@vger.kernel.org Mon Mar 4 12:21:13 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:54 +0100
+Subject: x86/boot: Rename conflicting 'boot_params' pointer to 'boot_params_ptr'
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>, Ingo Molnar <mingo@kernel.org>
+Message-ID: <20240304111937.2556102-36-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit b9e909f78e7e4b826f318cfe7bedf3ce229920e6 upstream ]
+
+The x86 decompressor is built and linked as a separate executable, but
+it shares components with the kernel proper, which are either #include'd
+as C files, or linked into the decompresor as a static library (e.g, the
+EFI stub)
+
+Both the kernel itself and the decompressor define a global symbol
+'boot_params' to refer to the boot_params struct, but in the former
+case, it refers to the struct directly, whereas in the decompressor, it
+refers to a global pointer variable referring to the struct boot_params
+passed by the bootloader or constructed from scratch.
+
+This ambiguity is unfortunate, and makes it impossible to assign this
+decompressor variable from the x86 EFI stub, given that declaring it as
+extern results in a clash. So rename the decompressor version (whose
+scope is limited) to boot_params_ptr.
+
+[ mingo: Renamed 'boot_params_p' to 'boot_params_ptr' for clarity ]
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/acpi.c | 14 +++++++-------
+ arch/x86/boot/compressed/cmdline.c | 4 ++--
+ arch/x86/boot/compressed/ident_map_64.c | 7 ++++---
+ arch/x86/boot/compressed/kaslr.c | 26 +++++++++++++-------------
+ arch/x86/boot/compressed/misc.c | 24 ++++++++++++------------
+ arch/x86/boot/compressed/misc.h | 1 -
+ arch/x86/boot/compressed/pgtable_64.c | 9 ++++-----
+ arch/x86/boot/compressed/sev.c | 2 +-
+ arch/x86/include/asm/boot.h | 2 ++
+ 9 files changed, 45 insertions(+), 44 deletions(-)
+
+--- a/arch/x86/boot/compressed/acpi.c
++++ b/arch/x86/boot/compressed/acpi.c
+@@ -30,13 +30,13 @@ __efi_get_rsdp_addr(unsigned long cfg_tb
+ * Search EFI system tables for RSDP. Preferred is ACPI_20_TABLE_GUID to
+ * ACPI_TABLE_GUID because it has more features.
+ */
+- rsdp_addr = efi_find_vendor_table(boot_params, cfg_tbl_pa, cfg_tbl_len,
++ rsdp_addr = efi_find_vendor_table(boot_params_ptr, cfg_tbl_pa, cfg_tbl_len,
+ ACPI_20_TABLE_GUID);
+ if (rsdp_addr)
+ return (acpi_physical_address)rsdp_addr;
+
+ /* No ACPI_20_TABLE_GUID found, fallback to ACPI_TABLE_GUID. */
+- rsdp_addr = efi_find_vendor_table(boot_params, cfg_tbl_pa, cfg_tbl_len,
++ rsdp_addr = efi_find_vendor_table(boot_params_ptr, cfg_tbl_pa, cfg_tbl_len,
+ ACPI_TABLE_GUID);
+ if (rsdp_addr)
+ return (acpi_physical_address)rsdp_addr;
+@@ -56,15 +56,15 @@ static acpi_physical_address efi_get_rsd
+ enum efi_type et;
+ int ret;
+
+- et = efi_get_type(boot_params);
++ et = efi_get_type(boot_params_ptr);
+ if (et == EFI_TYPE_NONE)
+ return 0;
+
+- systab_pa = efi_get_system_table(boot_params);
++ systab_pa = efi_get_system_table(boot_params_ptr);
+ if (!systab_pa)
+ error("EFI support advertised, but unable to locate system table.");
+
+- ret = efi_get_conf_table(boot_params, &cfg_tbl_pa, &cfg_tbl_len);
++ ret = efi_get_conf_table(boot_params_ptr, &cfg_tbl_pa, &cfg_tbl_len);
+ if (ret || !cfg_tbl_pa)
+ error("EFI config table not found.");
+
+@@ -156,7 +156,7 @@ acpi_physical_address get_rsdp_addr(void
+ {
+ acpi_physical_address pa;
+
+- pa = boot_params->acpi_rsdp_addr;
++ pa = boot_params_ptr->acpi_rsdp_addr;
+
+ if (!pa)
+ pa = efi_get_rsdp_addr();
+@@ -210,7 +210,7 @@ static unsigned long get_acpi_srat_table
+ rsdp = (struct acpi_table_rsdp *)get_cmdline_acpi_rsdp();
+ if (!rsdp)
+ rsdp = (struct acpi_table_rsdp *)(long)
+- boot_params->acpi_rsdp_addr;
++ boot_params_ptr->acpi_rsdp_addr;
+
+ if (!rsdp)
+ return 0;
+--- a/arch/x86/boot/compressed/cmdline.c
++++ b/arch/x86/boot/compressed/cmdline.c
+@@ -14,9 +14,9 @@ static inline char rdfs8(addr_t addr)
+ #include "../cmdline.c"
+ unsigned long get_cmd_line_ptr(void)
+ {
+- unsigned long cmd_line_ptr = boot_params->hdr.cmd_line_ptr;
++ unsigned long cmd_line_ptr = boot_params_ptr->hdr.cmd_line_ptr;
+
+- cmd_line_ptr |= (u64)boot_params->ext_cmd_line_ptr << 32;
++ cmd_line_ptr |= (u64)boot_params_ptr->ext_cmd_line_ptr << 32;
+
+ return cmd_line_ptr;
+ }
+--- a/arch/x86/boot/compressed/ident_map_64.c
++++ b/arch/x86/boot/compressed/ident_map_64.c
+@@ -167,8 +167,9 @@ void initialize_identity_maps(void *rmod
+ * or does not touch all the pages covering them.
+ */
+ kernel_add_identity_map((unsigned long)_head, (unsigned long)_end);
+- boot_params = rmode;
+- kernel_add_identity_map((unsigned long)boot_params, (unsigned long)(boot_params + 1));
++ boot_params_ptr = rmode;
++ kernel_add_identity_map((unsigned long)boot_params_ptr,
++ (unsigned long)(boot_params_ptr + 1));
+ cmdline = get_cmd_line_ptr();
+ kernel_add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
+
+@@ -176,7 +177,7 @@ void initialize_identity_maps(void *rmod
+ * Also map the setup_data entries passed via boot_params in case they
+ * need to be accessed by uncompressed kernel via the identity mapping.
+ */
+- sd = (struct setup_data *)boot_params->hdr.setup_data;
++ sd = (struct setup_data *)boot_params_ptr->hdr.setup_data;
+ while (sd) {
+ unsigned long sd_addr = (unsigned long)sd;
+
+--- a/arch/x86/boot/compressed/kaslr.c
++++ b/arch/x86/boot/compressed/kaslr.c
+@@ -63,7 +63,7 @@ static unsigned long get_boot_seed(void)
+ unsigned long hash = 0;
+
+ hash = rotate_xor(hash, build_str, sizeof(build_str));
+- hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
++ hash = rotate_xor(hash, boot_params_ptr, sizeof(*boot_params_ptr));
+
+ return hash;
+ }
+@@ -383,7 +383,7 @@ static void handle_mem_options(void)
+ static void mem_avoid_init(unsigned long input, unsigned long input_size,
+ unsigned long output)
+ {
+- unsigned long init_size = boot_params->hdr.init_size;
++ unsigned long init_size = boot_params_ptr->hdr.init_size;
+ u64 initrd_start, initrd_size;
+ unsigned long cmd_line, cmd_line_size;
+
+@@ -395,10 +395,10 @@ static void mem_avoid_init(unsigned long
+ mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
+
+ /* Avoid initrd. */
+- initrd_start = (u64)boot_params->ext_ramdisk_image << 32;
+- initrd_start |= boot_params->hdr.ramdisk_image;
+- initrd_size = (u64)boot_params->ext_ramdisk_size << 32;
+- initrd_size |= boot_params->hdr.ramdisk_size;
++ initrd_start = (u64)boot_params_ptr->ext_ramdisk_image << 32;
++ initrd_start |= boot_params_ptr->hdr.ramdisk_image;
++ initrd_size = (u64)boot_params_ptr->ext_ramdisk_size << 32;
++ initrd_size |= boot_params_ptr->hdr.ramdisk_size;
+ mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
+ mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
+ /* No need to set mapping for initrd, it will be handled in VO. */
+@@ -413,8 +413,8 @@ static void mem_avoid_init(unsigned long
+ }
+
+ /* Avoid boot parameters. */
+- mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
+- mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
++ mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params_ptr;
++ mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params_ptr);
+
+ /* We don't need to set a mapping for setup_data. */
+
+@@ -447,7 +447,7 @@ static bool mem_avoid_overlap(struct mem
+ }
+
+ /* Avoid all entries in the setup_data linked list. */
+- ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
++ ptr = (struct setup_data *)(unsigned long)boot_params_ptr->hdr.setup_data;
+ while (ptr) {
+ struct mem_vector avoid;
+
+@@ -679,7 +679,7 @@ static bool process_mem_region(struct me
+ static bool
+ process_efi_entries(unsigned long minimum, unsigned long image_size)
+ {
+- struct efi_info *e = &boot_params->efi_info;
++ struct efi_info *e = &boot_params_ptr->efi_info;
+ bool efi_mirror_found = false;
+ struct mem_vector region;
+ efi_memory_desc_t *md;
+@@ -761,8 +761,8 @@ static void process_e820_entries(unsigne
+ struct boot_e820_entry *entry;
+
+ /* Verify potential e820 positions, appending to slots list. */
+- for (i = 0; i < boot_params->e820_entries; i++) {
+- entry = &boot_params->e820_table[i];
++ for (i = 0; i < boot_params_ptr->e820_entries; i++) {
++ entry = &boot_params_ptr->e820_table[i];
+ /* Skip non-RAM entries. */
+ if (entry->type != E820_TYPE_RAM)
+ continue;
+@@ -836,7 +836,7 @@ void choose_random_location(unsigned lon
+ return;
+ }
+
+- boot_params->hdr.loadflags |= KASLR_FLAG;
++ boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
+
+ if (IS_ENABLED(CONFIG_X86_32))
+ mem_limit = KERNEL_IMAGE_SIZE;
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -46,7 +46,7 @@ void *memmove(void *dest, const void *sr
+ /*
+ * This is set up by the setup-routine at boot-time
+ */
+-struct boot_params *boot_params;
++struct boot_params *boot_params_ptr;
+
+ struct port_io_ops pio_ops;
+
+@@ -132,8 +132,8 @@ void __putstr(const char *s)
+ if (lines == 0 || cols == 0)
+ return;
+
+- x = boot_params->screen_info.orig_x;
+- y = boot_params->screen_info.orig_y;
++ x = boot_params_ptr->screen_info.orig_x;
++ y = boot_params_ptr->screen_info.orig_y;
+
+ while ((c = *s++) != '\0') {
+ if (c == '\n') {
+@@ -154,8 +154,8 @@ void __putstr(const char *s)
+ }
+ }
+
+- boot_params->screen_info.orig_x = x;
+- boot_params->screen_info.orig_y = y;
++ boot_params_ptr->screen_info.orig_x = x;
++ boot_params_ptr->screen_info.orig_y = y;
+
+ pos = (x + cols * y) * 2; /* Update cursor position */
+ outb(14, vidport);
+@@ -382,14 +382,14 @@ asmlinkage __visible void *extract_kerne
+ size_t entry_offset;
+
+ /* Retain x86 boot parameters pointer passed from startup_32/64. */
+- boot_params = rmode;
++ boot_params_ptr = rmode;
+
+ /* Clear flags intended for solely in-kernel use. */
+- boot_params->hdr.loadflags &= ~KASLR_FLAG;
++ boot_params_ptr->hdr.loadflags &= ~KASLR_FLAG;
+
+- sanitize_boot_params(boot_params);
++ sanitize_boot_params(boot_params_ptr);
+
+- if (boot_params->screen_info.orig_video_mode == 7) {
++ if (boot_params_ptr->screen_info.orig_video_mode == 7) {
+ vidmem = (char *) 0xb0000;
+ vidport = 0x3b4;
+ } else {
+@@ -397,8 +397,8 @@ asmlinkage __visible void *extract_kerne
+ vidport = 0x3d4;
+ }
+
+- lines = boot_params->screen_info.orig_video_lines;
+- cols = boot_params->screen_info.orig_video_cols;
++ lines = boot_params_ptr->screen_info.orig_video_lines;
++ cols = boot_params_ptr->screen_info.orig_video_cols;
+
+ init_default_io_ops();
+
+@@ -417,7 +417,7 @@ asmlinkage __visible void *extract_kerne
+ * so that early debugging output from the RSDP parsing code can be
+ * collected.
+ */
+- boot_params->acpi_rsdp_addr = get_rsdp_addr();
++ boot_params_ptr->acpi_rsdp_addr = get_rsdp_addr();
+
+ debug_putstr("early console in extract_kernel\n");
+
+--- a/arch/x86/boot/compressed/misc.h
++++ b/arch/x86/boot/compressed/misc.h
+@@ -52,7 +52,6 @@ extern memptr free_mem_ptr;
+ extern memptr free_mem_end_ptr;
+ void *malloc(int size);
+ void free(void *where);
+-extern struct boot_params *boot_params;
+ void __putstr(const char *s);
+ void __puthex(unsigned long value);
+ #define error_putstr(__x) __putstr(__x)
+--- a/arch/x86/boot/compressed/pgtable_64.c
++++ b/arch/x86/boot/compressed/pgtable_64.c
+@@ -28,7 +28,6 @@ static char trampoline_save[TRAMPOLINE_3
+ */
+ unsigned long *trampoline_32bit __section(".data");
+
+-extern struct boot_params *boot_params;
+ int cmdline_find_option_bool(const char *option);
+
+ static unsigned long find_trampoline_placement(void)
+@@ -49,7 +48,7 @@ static unsigned long find_trampoline_pla
+ *
+ * Only look for values in the legacy ROM for non-EFI system.
+ */
+- signature = (char *)&boot_params->efi_info.efi_loader_signature;
++ signature = (char *)&boot_params_ptr->efi_info.efi_loader_signature;
+ if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
+ strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) {
+ ebda_start = *(unsigned short *)0x40e << 4;
+@@ -65,10 +64,10 @@ static unsigned long find_trampoline_pla
+ bios_start = round_down(bios_start, PAGE_SIZE);
+
+ /* Find the first usable memory region under bios_start. */
+- for (i = boot_params->e820_entries - 1; i >= 0; i--) {
++ for (i = boot_params_ptr->e820_entries - 1; i >= 0; i--) {
+ unsigned long new = bios_start;
+
+- entry = &boot_params->e820_table[i];
++ entry = &boot_params_ptr->e820_table[i];
+
+ /* Skip all entries above bios_start. */
+ if (bios_start <= entry->addr)
+@@ -107,7 +106,7 @@ asmlinkage void configure_5level_paging(
+ bool l5_required = false;
+
+ /* Initialize boot_params. Required for cmdline_find_option_bool(). */
+- boot_params = bp;
++ boot_params_ptr = bp;
+
+ /*
+ * Check if LA57 is desired and supported.
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -565,7 +565,7 @@ void sev_prep_identity_maps(unsigned lon
+ * accessed after switchover.
+ */
+ if (sev_snp_enabled()) {
+- unsigned long cc_info_pa = boot_params->cc_blob_address;
++ unsigned long cc_info_pa = boot_params_ptr->cc_blob_address;
+ struct cc_blob_sev_info *cc_info;
+
+ kernel_add_identity_map(cc_info_pa, cc_info_pa + sizeof(*cc_info));
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -85,6 +85,8 @@ extern const unsigned long kernel_total_
+
+ unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
+ void (*error)(char *x));
++
++extern struct boot_params *boot_params_ptr;
+ #endif
+
+ #endif /* _ASM_X86_BOOT_H */
--- /dev/null
+From stable+bounces-25897-greg=kroah.com@vger.kernel.org Mon Mar 4 12:20:51 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:42 +0100
+Subject: x86/decompressor: Avoid magic offsets for EFI handover entrypoint
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>, Borislav Petkov <bp@alien8.de>
+Message-ID: <20240304111937.2556102-24-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit 12792064587623065250069d1df980e2c9ac3e67 upstream ]
+
+The native 32-bit or 64-bit EFI handover protocol entrypoint offset
+relative to the respective startup_32/64 address is described in
+boot_params as handover_offset, so that the special Linux/x86 aware EFI
+loader can find it there.
+
+When mixed mode is enabled, this single field has to describe this
+offset for both the 32-bit and 64-bit entrypoints, so their respective
+relative offsets have to be identical. Given that startup_32 and
+startup_64 are 0x200 bytes apart, and the EFI handover entrypoint
+resides at a fixed offset, the 32-bit and 64-bit versions of those
+entrypoints must be exactly 0x200 bytes apart as well.
+
+Currently, hard-coded fixed offsets are used to ensure this, but it is
+sufficient to emit the 64-bit entrypoint 0x200 bytes after the 32-bit
+one, wherever it happens to reside. This allows this code (which is now
+EFI mixed mode specific) to be moved into efi_mixed.S and out of the
+startup code in head_64.S.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230807162720.545787-6-ardb@kernel.org
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/efi_mixed.S | 20 +++++++++++++++++++-
+ arch/x86/boot/compressed/head_64.S | 18 ------------------
+ 2 files changed, 19 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/boot/compressed/efi_mixed.S
++++ b/arch/x86/boot/compressed/efi_mixed.S
+@@ -146,6 +146,16 @@ SYM_FUNC_START(__efi64_thunk)
+ SYM_FUNC_END(__efi64_thunk)
+
+ .code32
++#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
++SYM_FUNC_START(efi32_stub_entry)
++ add $0x4, %esp /* Discard return address */
++ popl %ecx
++ popl %edx
++ popl %esi
++ jmp efi32_entry
++SYM_FUNC_END(efi32_stub_entry)
++#endif
++
+ /*
+ * EFI service pointer must be in %edi.
+ *
+@@ -226,7 +236,7 @@ SYM_FUNC_END(efi_enter32)
+ * stub may still exit and return to the firmware using the Exit() EFI boot
+ * service.]
+ */
+-SYM_FUNC_START(efi32_entry)
++SYM_FUNC_START_LOCAL(efi32_entry)
+ call 1f
+ 1: pop %ebx
+
+@@ -326,6 +336,14 @@ SYM_FUNC_START(efi32_pe_entry)
+ RET
+ SYM_FUNC_END(efi32_pe_entry)
+
++#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
++ .org efi32_stub_entry + 0x200
++ .code64
++SYM_FUNC_START_NOALIGN(efi64_stub_entry)
++ jmp efi_stub_entry
++SYM_FUNC_END(efi64_stub_entry)
++#endif
++
+ .section ".rodata"
+ /* EFI loaded image protocol GUID */
+ .balign 4
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -286,17 +286,6 @@ SYM_FUNC_START(startup_32)
+ lret
+ SYM_FUNC_END(startup_32)
+
+-#if IS_ENABLED(CONFIG_EFI_MIXED) && IS_ENABLED(CONFIG_EFI_HANDOVER_PROTOCOL)
+- .org 0x190
+-SYM_FUNC_START(efi32_stub_entry)
+- add $0x4, %esp /* Discard return address */
+- popl %ecx
+- popl %edx
+- popl %esi
+- jmp efi32_entry
+-SYM_FUNC_END(efi32_stub_entry)
+-#endif
+-
+ .code64
+ .org 0x200
+ SYM_CODE_START(startup_64)
+@@ -474,13 +463,6 @@ SYM_CODE_START(startup_64)
+ jmp *%rax
+ SYM_CODE_END(startup_64)
+
+-#if IS_ENABLED(CONFIG_EFI_MIXED) && IS_ENABLED(CONFIG_EFI_HANDOVER_PROTOCOL)
+- .org 0x390
+-SYM_FUNC_START(efi64_stub_entry)
+- jmp efi_stub_entry
+-SYM_FUNC_END(efi64_stub_entry)
+-#endif
+-
+ .text
+ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
+
--- /dev/null
+From stable+bounces-25903-greg=kroah.com@vger.kernel.org Mon Mar 4 12:21:02 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:48 +0100
+Subject: x86/decompressor: Factor out kernel decompression and relocation
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>, Borislav Petkov <bp@alien8.de>
+Message-ID: <20240304111937.2556102-30-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit 83381519352d6b5b3e429bf72aaab907480cb6b6 upstream ]
+
+Factor out the decompressor sequence that invokes the decompressor,
+parses the ELF and applies the relocations so that it can be called
+directly from the EFI stub.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230807162720.545787-21-ardb@kernel.org
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/misc.c | 29 ++++++++++++++++++++++++-----
+ arch/x86/include/asm/boot.h | 8 ++++++++
+ 2 files changed, 32 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -330,11 +330,33 @@ static size_t parse_elf(void *output)
+ return ehdr.e_entry - LOAD_PHYSICAL_ADDR;
+ }
+
++const unsigned long kernel_total_size = VO__end - VO__text;
++
+ static u8 boot_heap[BOOT_HEAP_SIZE] __aligned(4);
+
+ extern unsigned char input_data[];
+ extern unsigned int input_len, output_len;
+
++unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
++ void (*error)(char *x))
++{
++ unsigned long entry;
++
++ if (!free_mem_ptr) {
++ free_mem_ptr = (unsigned long)boot_heap;
++ free_mem_end_ptr = (unsigned long)boot_heap + sizeof(boot_heap);
++ }
++
++ if (__decompress(input_data, input_len, NULL, NULL, outbuf, output_len,
++ NULL, error) < 0)
++ return ULONG_MAX;
++
++ entry = parse_elf(outbuf);
++ handle_relocations(outbuf, output_len, virt_addr);
++
++ return entry;
++}
++
+ /*
+ * The compressed kernel image (ZO), has been moved so that its position
+ * is against the end of the buffer used to hold the uncompressed kernel
+@@ -354,7 +376,6 @@ extern unsigned int input_len, output_le
+ */
+ asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output)
+ {
+- const unsigned long kernel_total_size = VO__end - VO__text;
+ unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+ memptr heap = (memptr)boot_heap;
+ unsigned long needed_size;
+@@ -457,10 +478,8 @@ asmlinkage __visible void *extract_kerne
+ #endif
+
+ debug_putstr("\nDecompressing Linux... ");
+- __decompress(input_data, input_len, NULL, NULL, output, output_len,
+- NULL, error);
+- entry_offset = parse_elf(output);
+- handle_relocations(output, output_len, virt_addr);
++
++ entry_offset = decompress_kernel(output, virt_addr, error);
+
+ debug_putstr("done.\nBooting the kernel (entry_offset: 0x");
+ debug_puthex(entry_offset);
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -79,4 +79,12 @@
+ # define BOOT_STACK_SIZE 0x1000
+ #endif
+
++#ifndef __ASSEMBLY__
++extern unsigned int output_len;
++extern const unsigned long kernel_total_size;
++
++unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
++ void (*error)(char *x));
++#endif
++
+ #endif /* _ASM_X86_BOOT_H */
--- /dev/null
+From stable+bounces-25906-greg=kroah.com@vger.kernel.org Mon Mar 4 12:21:08 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:51 +0100
+Subject: x86/efistub: Avoid legacy decompressor when doing EFI boot
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>, Borislav Petkov <bp@alien8.de>
+Message-ID: <20240304111937.2556102-33-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit a1b87d54f4e45ff5e0d081fb1d9db3bf1a8fb39a upstream ]
+
+The bare metal decompressor code was never really intended to run in a
+hosted environment such as the EFI boot services, and does a few things
+that are becoming problematic in the context of EFI boot now that the
+logo requirements are getting tighter: EFI executables will no longer be
+allowed to consist of a single executable section that is mapped with
+read, write and execute permissions if they are intended for use in a
+context where Secure Boot is enabled (and where Microsoft's set of
+certificates is used, i.e., every x86 PC built to run Windows).
+
+To avoid stepping on reserved memory before having inspected the E820
+tables, and to ensure the correct placement when running a kernel build
+that is non-relocatable, the bare metal decompressor moves its own
+executable image to the end of the allocation that was reserved for it,
+in order to perform the decompression in place. This means the region in
+question requires both write and execute permissions, which either need
+to be given upfront (which EFI will no longer permit), or need to be
+applied on demand using the existing page fault handling framework.
+
+However, the physical placement of the kernel is usually randomized
+anyway, and even if it isn't, a dedicated decompression output buffer
+can be allocated anywhere in memory using EFI APIs when still running in
+the boot services, given that EFI support already implies a relocatable
+kernel. This means that decompression in place is never necessary, nor
+is moving the compressed image from one end to the other.
+
+Since EFI already maps all of memory 1:1, it is also unnecessary to
+create new page tables or handle page faults when decompressing the
+kernel. That means there is also no need to replace the special
+exception handlers for SEV. Generally, there is little need to do
+any of the things that the decompressor does beyond
+
+- initialize SEV encryption, if needed,
+- perform the 4/5 level paging switch, if needed,
+- decompress the kernel
+- relocate the kernel
+
+So do all of this from the EFI stub code, and avoid the bare metal
+decompressor altogether.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230807162720.545787-24-ardb@kernel.org
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/Makefile | 5
+ arch/x86/boot/compressed/efi_mixed.S | 55 ----------
+ arch/x86/boot/compressed/head_32.S | 13 --
+ arch/x86/boot/compressed/head_64.S | 27 -----
+ arch/x86/include/asm/efi.h | 7 -
+ arch/x86/include/asm/sev.h | 2
+ drivers/firmware/efi/libstub/x86-stub.c | 166 +++++++++++++-------------------
+ 7 files changed, 84 insertions(+), 191 deletions(-)
+
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -74,6 +74,11 @@ LDFLAGS_vmlinux += -z noexecstack
+ ifeq ($(CONFIG_LD_IS_BFD),y)
+ LDFLAGS_vmlinux += $(call ld-option,--no-warn-rwx-segments)
+ endif
++ifeq ($(CONFIG_EFI_STUB),y)
++# ensure that the static EFI stub library will be pulled in, even if it is
++# never referenced explicitly from the startup code
++LDFLAGS_vmlinux += -u efi_pe_entry
++endif
+ LDFLAGS_vmlinux += -T
+
+ hostprogs := mkpiggy
+--- a/arch/x86/boot/compressed/efi_mixed.S
++++ b/arch/x86/boot/compressed/efi_mixed.S
+@@ -275,10 +275,6 @@ SYM_FUNC_START_LOCAL(efi32_entry)
+ jmp startup_32
+ SYM_FUNC_END(efi32_entry)
+
+-#define ST32_boottime 60 // offsetof(efi_system_table_32_t, boottime)
+-#define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol)
+-#define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base)
+-
+ /*
+ * efi_status_t efi32_pe_entry(efi_handle_t image_handle,
+ * efi_system_table_32_t *sys_table)
+@@ -286,8 +282,6 @@ SYM_FUNC_END(efi32_entry)
+ SYM_FUNC_START(efi32_pe_entry)
+ pushl %ebp
+ movl %esp, %ebp
+- pushl %eax // dummy push to allocate loaded_image
+-
+ pushl %ebx // save callee-save registers
+ pushl %edi
+
+@@ -296,48 +290,8 @@ SYM_FUNC_START(efi32_pe_entry)
+ movl $0x80000003, %eax // EFI_UNSUPPORTED
+ jnz 2f
+
+- call 1f
+-1: pop %ebx
+-
+- /* Get the loaded image protocol pointer from the image handle */
+- leal -4(%ebp), %eax
+- pushl %eax // &loaded_image
+- leal (loaded_image_proto - 1b)(%ebx), %eax
+- pushl %eax // pass the GUID address
+- pushl 8(%ebp) // pass the image handle
+-
+- /*
+- * Note the alignment of the stack frame.
+- * sys_table
+- * handle <-- 16-byte aligned on entry by ABI
+- * return address
+- * frame pointer
+- * loaded_image <-- local variable
+- * saved %ebx <-- 16-byte aligned here
+- * saved %edi
+- * &loaded_image
+- * &loaded_image_proto
+- * handle <-- 16-byte aligned for call to handle_protocol
+- */
+-
+- movl 12(%ebp), %eax // sys_table
+- movl ST32_boottime(%eax), %eax // sys_table->boottime
+- call *BS32_handle_protocol(%eax) // sys_table->boottime->handle_protocol
+- addl $12, %esp // restore argument space
+- testl %eax, %eax
+- jnz 2f
+-
+ movl 8(%ebp), %ecx // image_handle
+ movl 12(%ebp), %edx // sys_table
+- movl -4(%ebp), %esi // loaded_image
+- movl LI32_image_base(%esi), %esi // loaded_image->image_base
+- leal (startup_32 - 1b)(%ebx), %ebp // runtime address of startup_32
+- /*
+- * We need to set the image_offset variable here since startup_32() will
+- * use it before we get to the 64-bit efi_pe_entry() in C code.
+- */
+- subl %esi, %ebp // calculate image_offset
+- movl %ebp, (image_offset - 1b)(%ebx) // save image_offset
+ xorl %esi, %esi
+ jmp efi32_entry // pass %ecx, %edx, %esi
+ // no other registers remain live
+@@ -356,15 +310,6 @@ SYM_FUNC_START_NOALIGN(efi64_stub_entry)
+ SYM_FUNC_END(efi64_stub_entry)
+ #endif
+
+- .section ".rodata"
+- /* EFI loaded image protocol GUID */
+- .balign 4
+-SYM_DATA_START_LOCAL(loaded_image_proto)
+- .long 0x5b1b31a1
+- .word 0x9562, 0x11d2
+- .byte 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b
+-SYM_DATA_END(loaded_image_proto)
+-
+ .data
+ .balign 8
+ SYM_DATA_START_LOCAL(efi32_boot_gdt)
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -84,19 +84,6 @@ SYM_FUNC_START(startup_32)
+
+ #ifdef CONFIG_RELOCATABLE
+ leal startup_32@GOTOFF(%edx), %ebx
+-
+-#ifdef CONFIG_EFI_STUB
+-/*
+- * If we were loaded via the EFI LoadImage service, startup_32() will be at an
+- * offset to the start of the space allocated for the image. efi_pe_entry() will
+- * set up image_offset to tell us where the image actually starts, so that we
+- * can use the full available buffer.
+- * image_offset = startup_32 - image_base
+- * Otherwise image_offset will be zero and has no effect on the calculations.
+- */
+- subl image_offset@GOTOFF(%edx), %ebx
+-#endif
+-
+ movl BP_kernel_alignment(%esi), %eax
+ decl %eax
+ addl %eax, %ebx
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -138,19 +138,6 @@ SYM_FUNC_START(startup_32)
+
+ #ifdef CONFIG_RELOCATABLE
+ movl %ebp, %ebx
+-
+-#ifdef CONFIG_EFI_STUB
+-/*
+- * If we were loaded via the EFI LoadImage service, startup_32 will be at an
+- * offset to the start of the space allocated for the image. efi_pe_entry will
+- * set up image_offset to tell us where the image actually starts, so that we
+- * can use the full available buffer.
+- * image_offset = startup_32 - image_base
+- * Otherwise image_offset will be zero and has no effect on the calculations.
+- */
+- subl rva(image_offset)(%ebp), %ebx
+-#endif
+-
+ movl BP_kernel_alignment(%esi), %eax
+ decl %eax
+ addl %eax, %ebx
+@@ -327,20 +314,6 @@ SYM_CODE_START(startup_64)
+ /* Start with the delta to where the kernel will run at. */
+ #ifdef CONFIG_RELOCATABLE
+ leaq startup_32(%rip) /* - $startup_32 */, %rbp
+-
+-#ifdef CONFIG_EFI_STUB
+-/*
+- * If we were loaded via the EFI LoadImage service, startup_32 will be at an
+- * offset to the start of the space allocated for the image. efi_pe_entry will
+- * set up image_offset to tell us where the image actually starts, so that we
+- * can use the full available buffer.
+- * image_offset = startup_32 - image_base
+- * Otherwise image_offset will be zero and has no effect on the calculations.
+- */
+- movl image_offset(%rip), %eax
+- subq %rax, %rbp
+-#endif
+-
+ movl BP_kernel_alignment(%rsi), %eax
+ decl %eax
+ addq %rax, %rbp
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -88,6 +88,8 @@ static inline void efi_fpu_end(void)
+ }
+
+ #ifdef CONFIG_X86_32
++#define EFI_X86_KERNEL_ALLOC_LIMIT (SZ_512M - 1)
++
+ #define arch_efi_call_virt_setup() \
+ ({ \
+ efi_fpu_begin(); \
+@@ -101,8 +103,7 @@ static inline void efi_fpu_end(void)
+ })
+
+ #else /* !CONFIG_X86_32 */
+-
+-#define EFI_LOADER_SIGNATURE "EL64"
++#define EFI_X86_KERNEL_ALLOC_LIMIT EFI_ALLOC_LIMIT
+
+ extern asmlinkage u64 __efi_call(void *fp, ...);
+
+@@ -214,6 +215,8 @@ efi_status_t efi_set_virtual_address_map
+
+ #ifdef CONFIG_EFI_MIXED
+
++#define EFI_ALLOC_LIMIT (efi_is_64bit() ? ULONG_MAX : U32_MAX)
++
+ #define ARCH_HAS_EFISTUB_WRAPPERS
+
+ static inline bool efi_is_64bit(void)
+--- a/arch/x86/include/asm/sev.h
++++ b/arch/x86/include/asm/sev.h
+@@ -157,6 +157,7 @@ static __always_inline void sev_es_nmi_c
+ __sev_es_nmi_complete();
+ }
+ extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
++extern void sev_enable(struct boot_params *bp);
+
+ static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs)
+ {
+@@ -210,6 +211,7 @@ static inline void sev_es_ist_exit(void)
+ static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
+ static inline void sev_es_nmi_complete(void) { }
+ static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; }
++static inline void sev_enable(struct boot_params *bp) { }
+ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) { return 0; }
+ static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) { return 0; }
+ static inline void setup_ghcb(void) { }
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -15,17 +15,14 @@
+ #include <asm/setup.h>
+ #include <asm/desc.h>
+ #include <asm/boot.h>
++#include <asm/kaslr.h>
+ #include <asm/sev.h>
+
+ #include "efistub.h"
+ #include "x86-stub.h"
+
+-/* Maximum physical address for 64-bit kernel with 4-level paging */
+-#define MAXMEM_X86_64_4LEVEL (1ull << 46)
+-
+ const efi_system_table_t *efi_system_table;
+ const efi_dxe_services_table_t *efi_dxe_table;
+-u32 image_offset __section(".data");
+ static efi_loaded_image_t *image = NULL;
+ static efi_memory_attribute_protocol_t *memattr;
+
+@@ -276,33 +273,9 @@ void efi_adjust_memory_range_protection(
+ }
+ }
+
+-extern const u8 startup_32[], startup_64[];
+-
+-static void
+-setup_memory_protection(unsigned long image_base, unsigned long image_size)
+-{
+-#ifdef CONFIG_64BIT
+- if (image_base != (unsigned long)startup_32)
+- efi_adjust_memory_range_protection(image_base, image_size);
+-#else
+- /*
+- * Clear protection flags on a whole range of possible
+- * addresses used for KASLR. We don't need to do that
+- * on x86_64, since KASLR/extraction is performed after
+- * dedicated identity page tables are built and we only
+- * need to remove possible protection on relocated image
+- * itself disregarding further relocations.
+- */
+- efi_adjust_memory_range_protection(LOAD_PHYSICAL_ADDR,
+- KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR);
+-#endif
+-}
+-
+ static const efi_char16_t apple[] = L"Apple";
+
+-static void setup_quirks(struct boot_params *boot_params,
+- unsigned long image_base,
+- unsigned long image_size)
++static void setup_quirks(struct boot_params *boot_params)
+ {
+ efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
+ efi_table_attr(efi_system_table, fw_vendor);
+@@ -311,9 +284,6 @@ static void setup_quirks(struct boot_par
+ if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
+ retrieve_apple_device_properties(boot_params);
+ }
+-
+- if (IS_ENABLED(CONFIG_EFI_DXE_MEM_ATTRIBUTES))
+- setup_memory_protection(image_base, image_size);
+ }
+
+ /*
+@@ -466,7 +436,6 @@ efi_status_t __efiapi efi_pe_entry(efi_h
+ }
+
+ image_base = efi_table_attr(image, image_base);
+- image_offset = (void *)startup_32 - image_base;
+
+ status = efi_allocate_pages(sizeof(struct boot_params),
+ (unsigned long *)&boot_params, ULONG_MAX);
+@@ -761,6 +730,61 @@ static bool have_unsupported_snp_feature
+ return false;
+ }
+
++static void efi_get_seed(void *seed, int size)
++{
++ efi_get_random_bytes(size, seed);
++
++ /*
++ * This only updates seed[0] when running on 32-bit, but in that case,
++ * seed[1] is not used anyway, as there is no virtual KASLR on 32-bit.
++ */
++ *(unsigned long *)seed ^= kaslr_get_random_long("EFI");
++}
++
++static void error(char *str)
++{
++ efi_warn("Decompression failed: %s\n", str);
++}
++
++static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
++{
++ unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
++ unsigned long addr, alloc_size, entry;
++ efi_status_t status;
++ u32 seed[2] = {};
++
++ /* determine the required size of the allocation */
++ alloc_size = ALIGN(max_t(unsigned long, output_len, kernel_total_size),
++ MIN_KERNEL_ALIGN);
++
++ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && !efi_nokaslr) {
++ u64 range = KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR - kernel_total_size;
++
++ efi_get_seed(seed, sizeof(seed));
++
++ virt_addr += (range * seed[1]) >> 32;
++ virt_addr &= ~(CONFIG_PHYSICAL_ALIGN - 1);
++ }
++
++ status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
++ seed[0], EFI_LOADER_CODE,
++ EFI_X86_KERNEL_ALLOC_LIMIT);
++ if (status != EFI_SUCCESS)
++ return status;
++
++ entry = decompress_kernel((void *)addr, virt_addr, error);
++ if (entry == ULONG_MAX) {
++ efi_free(alloc_size, addr);
++ return EFI_LOAD_ERROR;
++ }
++
++ *kernel_entry = addr + entry;
++
++ efi_adjust_memory_range_protection(addr, kernel_total_size);
++
++ return EFI_SUCCESS;
++}
++
+ static void __noreturn enter_kernel(unsigned long kernel_addr,
+ struct boot_params *boot_params)
+ {
+@@ -780,10 +804,9 @@ void __noreturn efi_stub_entry(efi_handl
+ struct boot_params *boot_params)
+ {
+ efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID;
+- unsigned long bzimage_addr = (unsigned long)startup_32;
+- unsigned long buffer_start, buffer_end;
+ struct setup_header *hdr = &boot_params->hdr;
+ const struct linux_efi_initrd *initrd = NULL;
++ unsigned long kernel_entry;
+ efi_status_t status;
+
+ efi_system_table = sys_table_arg;
+@@ -812,60 +835,6 @@ void __noreturn efi_stub_entry(efi_handl
+ goto fail;
+ }
+
+- /*
+- * If the kernel isn't already loaded at a suitable address,
+- * relocate it.
+- *
+- * It must be loaded above LOAD_PHYSICAL_ADDR.
+- *
+- * The maximum address for 64-bit is 1 << 46 for 4-level paging. This
+- * is defined as the macro MAXMEM, but unfortunately that is not a
+- * compile-time constant if 5-level paging is configured, so we instead
+- * define our own macro for use here.
+- *
+- * For 32-bit, the maximum address is complicated to figure out, for
+- * now use KERNEL_IMAGE_SIZE, which will be 512MiB, the same as what
+- * KASLR uses.
+- *
+- * Also relocate it if image_offset is zero, i.e. the kernel wasn't
+- * loaded by LoadImage, but rather by a bootloader that called the
+- * handover entry. The reason we must always relocate in this case is
+- * to handle the case of systemd-boot booting a unified kernel image,
+- * which is a PE executable that contains the bzImage and an initrd as
+- * COFF sections. The initrd section is placed after the bzImage
+- * without ensuring that there are at least init_size bytes available
+- * for the bzImage, and thus the compressed kernel's startup code may
+- * overwrite the initrd unless it is moved out of the way.
+- */
+-
+- buffer_start = ALIGN(bzimage_addr - image_offset,
+- hdr->kernel_alignment);
+- buffer_end = buffer_start + hdr->init_size;
+-
+- if ((buffer_start < LOAD_PHYSICAL_ADDR) ||
+- (IS_ENABLED(CONFIG_X86_32) && buffer_end > KERNEL_IMAGE_SIZE) ||
+- (IS_ENABLED(CONFIG_X86_64) && buffer_end > MAXMEM_X86_64_4LEVEL) ||
+- (image_offset == 0)) {
+- extern char _bss[];
+-
+- status = efi_relocate_kernel(&bzimage_addr,
+- (unsigned long)_bss - bzimage_addr,
+- hdr->init_size,
+- hdr->pref_address,
+- hdr->kernel_alignment,
+- LOAD_PHYSICAL_ADDR);
+- if (status != EFI_SUCCESS) {
+- efi_err("efi_relocate_kernel() failed!\n");
+- goto fail;
+- }
+- /*
+- * Now that we've copied the kernel elsewhere, we no longer
+- * have a set up block before startup_32(), so reset image_offset
+- * to zero in case it was set earlier.
+- */
+- image_offset = 0;
+- }
+-
+ #ifdef CONFIG_CMDLINE_BOOL
+ status = efi_parse_options(CONFIG_CMDLINE);
+ if (status != EFI_SUCCESS) {
+@@ -883,6 +852,12 @@ void __noreturn efi_stub_entry(efi_handl
+ }
+ }
+
++ status = efi_decompress_kernel(&kernel_entry);
++ if (status != EFI_SUCCESS) {
++ efi_err("Failed to decompress kernel\n");
++ goto fail;
++ }
++
+ /*
+ * At this point, an initrd may already have been loaded by the
+ * bootloader and passed via bootparams. We permit an initrd loaded
+@@ -922,7 +897,7 @@ void __noreturn efi_stub_entry(efi_handl
+
+ setup_efi_pci(boot_params);
+
+- setup_quirks(boot_params, bzimage_addr, buffer_end - buffer_start);
++ setup_quirks(boot_params);
+
+ status = exit_boot(boot_params, handle);
+ if (status != EFI_SUCCESS) {
+@@ -930,12 +905,15 @@ void __noreturn efi_stub_entry(efi_handl
+ goto fail;
+ }
+
+- efi_5level_switch();
++ /*
++ * Call the SEV init code while still running with the firmware's
++ * GDT/IDT, so #VC exceptions will be handled by EFI.
++ */
++ sev_enable(boot_params);
+
+- if (IS_ENABLED(CONFIG_X86_64))
+- bzimage_addr += startup_64 - startup_32;
++ efi_5level_switch();
+
+- enter_kernel(bzimage_addr, boot_params);
++ enter_kernel(kernel_entry, boot_params);
+ fail:
+ efi_err("efi_stub_entry() failed!\n");
+
--- /dev/null
+From stable+bounces-25908-greg=kroah.com@vger.kernel.org Mon Mar 4 12:21:12 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:53 +0100
+Subject: x86/efistub: Avoid placing the kernel below LOAD_PHYSICAL_ADDR
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>, Tom Englund <tomenglund26@gmail.com>
+Message-ID: <20240304111937.2556102-35-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit 2f77465b05b1270c832b5e2ee27037672ad2a10a upstream ]
+
+The EFI stub's kernel placement logic randomizes the physical placement
+of the kernel by taking all available memory into account, and picking a
+region at random, based on a random seed.
+
+When KASLR is disabled, this seed is set to 0x0, and this results in the
+lowest available region of memory to be selected for loading the kernel,
+even if this is below LOAD_PHYSICAL_ADDR. Some of this memory is
+typically reserved for the GFP_DMA region, to accommodate masters that
+can only access the first 16 MiB of system memory.
+
+Even if such devices are rare these days, we may still end up with a
+warning in the kernel log, as reported by Tom:
+
+ swapper/0: page allocation failure: order:10, mode:0xcc1(GFP_KERNEL|GFP_DMA), nodemask=(null),cpuset=/,mems_allowed=0
+
+Fix this by tweaking the random allocation logic to accept a low bound
+on the placement, and set it to LOAD_PHYSICAL_ADDR.
+
+Fixes: a1b87d54f4e4 ("x86/efistub: Avoid legacy decompressor when doing EFI boot")
+Reported-by: Tom Englund <tomenglund26@gmail.com>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218404
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/efi/libstub/arm64-stub.c | 2 +-
+ drivers/firmware/efi/libstub/efistub.h | 3 ++-
+ drivers/firmware/efi/libstub/randomalloc.c | 12 +++++++-----
+ drivers/firmware/efi/libstub/x86-stub.c | 1 +
+ 4 files changed, 11 insertions(+), 7 deletions(-)
+
+--- a/drivers/firmware/efi/libstub/arm64-stub.c
++++ b/drivers/firmware/efi/libstub/arm64-stub.c
+@@ -181,7 +181,7 @@ efi_status_t handle_kernel_image(unsigne
+ */
+ status = efi_random_alloc(*reserve_size, min_kimg_align,
+ reserve_addr, phys_seed,
+- EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
++ EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
+ if (status != EFI_SUCCESS)
+ efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
+ } else {
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -906,7 +906,8 @@ efi_status_t efi_get_random_bytes(unsign
+
+ efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long random_seed,
+- int memory_type, unsigned long alloc_limit);
++ int memory_type, unsigned long alloc_min,
++ unsigned long alloc_max);
+
+ efi_status_t efi_random_get_seed(void);
+
+--- a/drivers/firmware/efi/libstub/randomalloc.c
++++ b/drivers/firmware/efi/libstub/randomalloc.c
+@@ -17,7 +17,7 @@
+ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ unsigned long size,
+ unsigned long align_shift,
+- u64 alloc_limit)
++ u64 alloc_min, u64 alloc_max)
+ {
+ unsigned long align = 1UL << align_shift;
+ u64 first_slot, last_slot, region_end;
+@@ -30,11 +30,11 @@ static unsigned long get_entry_num_slots
+ return 0;
+
+ region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
+- alloc_limit);
++ alloc_max);
+ if (region_end < size)
+ return 0;
+
+- first_slot = round_up(md->phys_addr, align);
++ first_slot = round_up(max(md->phys_addr, alloc_min), align);
+ last_slot = round_down(region_end - size + 1, align);
+
+ if (first_slot > last_slot)
+@@ -56,7 +56,8 @@ efi_status_t efi_random_alloc(unsigned l
+ unsigned long *addr,
+ unsigned long random_seed,
+ int memory_type,
+- unsigned long alloc_limit)
++ unsigned long alloc_min,
++ unsigned long alloc_max)
+ {
+ unsigned long total_slots = 0, target_slot;
+ unsigned long total_mirrored_slots = 0;
+@@ -78,7 +79,8 @@ efi_status_t efi_random_alloc(unsigned l
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
+ unsigned long slots;
+
+- slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit);
++ slots = get_entry_num_slots(md, size, ilog2(align), alloc_min,
++ alloc_max);
+ MD_NUM_SLOTS(md) = slots;
+ total_slots += slots;
+ if (md->attribute & EFI_MEMORY_MORE_RELIABLE)
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -785,6 +785,7 @@ static efi_status_t efi_decompress_kerne
+
+ status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
+ seed[0], EFI_LOADER_CODE,
++ LOAD_PHYSICAL_ADDR,
+ EFI_X86_KERNEL_ALLOC_LIMIT);
+ if (status != EFI_SUCCESS)
+ return status;
--- /dev/null
+From stable+bounces-25898-greg=kroah.com@vger.kernel.org Mon Mar 4 12:20:52 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:43 +0100
+Subject: x86/efistub: Clear BSS in EFI handover protocol entrypoint
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>, Borislav Petkov <bp@alien8.de>
+Message-ID: <20240304111937.2556102-25-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit d7156b986d4cc0657fa6dc05c9fcf51c3d55a0fe upstream ]
+
+The so-called EFI handover protocol is value-add from the distros that
+permits a loader to simply copy a PE kernel image into memory and call
+an alternative entrypoint that is described by an embedded boot_params
+structure.
+
+Most implementations of this protocol do not bother to check the PE
+header for minimum alignment, section placement, etc, and therefore also
+don't clear the image's BSS, or even allocate enough memory for it.
+
+Allocating more memory on the fly is rather difficult, but at least
+clear the BSS region explicitly when entering in this manner, so that
+the EFI stub code does not get confused by global variables that were
+not zero-initialized correctly.
+
+When booting in mixed mode, this BSS clearing must occur before any
+global state is created, so clear it in the 32-bit asm entry point.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230807162720.545787-7-ardb@kernel.org
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/efi_mixed.S | 14 +++++++++++++-
+ drivers/firmware/efi/libstub/x86-stub.c | 13 +++++++++++--
+ 2 files changed, 24 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/boot/compressed/efi_mixed.S
++++ b/arch/x86/boot/compressed/efi_mixed.S
+@@ -148,6 +148,18 @@ SYM_FUNC_END(__efi64_thunk)
+ .code32
+ #ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+ SYM_FUNC_START(efi32_stub_entry)
++ call 1f
++1: popl %ecx
++
++ /* Clear BSS */
++ xorl %eax, %eax
++ leal (_bss - 1b)(%ecx), %edi
++ leal (_ebss - 1b)(%ecx), %ecx
++ subl %edi, %ecx
++ shrl $2, %ecx
++ cld
++ rep stosl
++
+ add $0x4, %esp /* Discard return address */
+ popl %ecx
+ popl %edx
+@@ -340,7 +352,7 @@ SYM_FUNC_END(efi32_pe_entry)
+ .org efi32_stub_entry + 0x200
+ .code64
+ SYM_FUNC_START_NOALIGN(efi64_stub_entry)
+- jmp efi_stub_entry
++ jmp efi_handover_entry
+ SYM_FUNC_END(efi64_stub_entry)
+ #endif
+
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -925,12 +925,21 @@ fail:
+ }
+
+ #ifdef CONFIG_EFI_HANDOVER_PROTOCOL
++void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
++ struct boot_params *boot_params)
++{
++ extern char _bss[], _ebss[];
++
++ memset(_bss, 0, _ebss - _bss);
++ efi_stub_entry(handle, sys_table_arg, boot_params);
++}
++
+ #ifndef CONFIG_EFI_MIXED
+-extern __alias(efi_stub_entry)
++extern __alias(efi_handover_entry)
+ void efi32_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params);
+
+-extern __alias(efi_stub_entry)
++extern __alias(efi_handover_entry)
+ void efi64_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params);
+ #endif
--- /dev/null
+From stable+bounces-25902-greg=kroah.com@vger.kernel.org Mon Mar 4 12:21:00 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:47 +0100
+Subject: x86/efistub: Perform 4/5 level paging switch from the stub
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>, "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
+Message-ID: <20240304111937.2556102-29-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit cb380000dd23cbbf8bd7d023b51896804c1f7e68 upstream ]
+
+In preparation for updating the EFI stub boot flow to avoid the bare
+metal decompressor code altogether, implement the support code for
+switching between 4 and 5 levels of paging before jumping to the kernel
+proper.
+
+This reuses the newly refactored trampoline that the bare metal
+decompressor uses, but relies on EFI APIs to allocate 32-bit addressable
+memory and remap it with the appropriate permissions. Given that the
+bare metal decompressor will no longer call into the trampoline if the
+number of paging levels is already set correctly, it is no longer needed
+to remove NX restrictions from the memory range where this trampoline
+may end up.
+
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/efi/libstub/Makefile | 1
+ drivers/firmware/efi/libstub/efi-stub-helper.c | 2
+ drivers/firmware/efi/libstub/efistub.h | 1
+ drivers/firmware/efi/libstub/x86-5lvl.c | 95 +++++++++++++++++++++++++
+ drivers/firmware/efi/libstub/x86-stub.c | 40 +++-------
+ drivers/firmware/efi/libstub/x86-stub.h | 17 ++++
+ 6 files changed, 130 insertions(+), 26 deletions(-)
+
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -84,6 +84,7 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-st
+ lib-$(CONFIG_ARM) += arm32-stub.o
+ lib-$(CONFIG_ARM64) += arm64-stub.o smbios.o
+ lib-$(CONFIG_X86) += x86-stub.o
++lib-$(CONFIG_X86_64) += x86-5lvl.o
+ lib-$(CONFIG_RISCV) += riscv-stub.o
+ lib-$(CONFIG_LOONGARCH) += loongarch-stub.o
+
+--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
+@@ -216,6 +216,8 @@ efi_status_t efi_parse_options(char cons
+ efi_loglevel = CONSOLE_LOGLEVEL_QUIET;
+ } else if (!strcmp(param, "noinitrd")) {
+ efi_noinitrd = true;
++ } else if (IS_ENABLED(CONFIG_X86_64) && !strcmp(param, "no5lvl")) {
++ efi_no5lvl = true;
+ } else if (!strcmp(param, "efi") && val) {
+ efi_nochunk = parse_option_str(val, "nochunk");
+ efi_novamap |= parse_option_str(val, "novamap");
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -33,6 +33,7 @@
+ #define EFI_ALLOC_LIMIT ULONG_MAX
+ #endif
+
++extern bool efi_no5lvl;
+ extern bool efi_nochunk;
+ extern bool efi_nokaslr;
+ extern int efi_loglevel;
+--- /dev/null
++++ b/drivers/firmware/efi/libstub/x86-5lvl.c
+@@ -0,0 +1,95 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#include <linux/efi.h>
++
++#include <asm/boot.h>
++#include <asm/desc.h>
++#include <asm/efi.h>
++
++#include "efistub.h"
++#include "x86-stub.h"
++
++bool efi_no5lvl;
++
++static void (*la57_toggle)(void *cr3);
++
++static const struct desc_struct gdt[] = {
++ [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
++ [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
++};
++
++/*
++ * Enabling (or disabling) 5 level paging is tricky, because it can only be
++ * done from 32-bit mode with paging disabled. This means not only that the
++ * code itself must be running from 32-bit addressable physical memory, but
++ * also that the root page table must be 32-bit addressable, as programming
++ * a 64-bit value into CR3 when running in 32-bit mode is not supported.
++ */
++efi_status_t efi_setup_5level_paging(void)
++{
++ u8 tmpl_size = (u8 *)&trampoline_ljmp_imm_offset - (u8 *)&trampoline_32bit_src;
++ efi_status_t status;
++ u8 *la57_code;
++
++ if (!efi_is_64bit())
++ return EFI_SUCCESS;
++
++ /* check for 5 level paging support */
++ if (native_cpuid_eax(0) < 7 ||
++ !(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31))))
++ return EFI_SUCCESS;
++
++ /* allocate some 32-bit addressable memory for code and a page table */
++ status = efi_allocate_pages(2 * PAGE_SIZE, (unsigned long *)&la57_code,
++ U32_MAX);
++ if (status != EFI_SUCCESS)
++ return status;
++
++ la57_toggle = memcpy(la57_code, trampoline_32bit_src, tmpl_size);
++ memset(la57_code + tmpl_size, 0x90, PAGE_SIZE - tmpl_size);
++
++ /*
++ * To avoid the need to allocate a 32-bit addressable stack, the
++ * trampoline uses a LJMP instruction to switch back to long mode.
++ * LJMP takes an absolute destination address, which needs to be
++ * fixed up at runtime.
++ */
++ *(u32 *)&la57_code[trampoline_ljmp_imm_offset] += (unsigned long)la57_code;
++
++ efi_adjust_memory_range_protection((unsigned long)la57_toggle, PAGE_SIZE);
++
++ return EFI_SUCCESS;
++}
++
++void efi_5level_switch(void)
++{
++ bool want_la57 = IS_ENABLED(CONFIG_X86_5LEVEL) && !efi_no5lvl;
++ bool have_la57 = native_read_cr4() & X86_CR4_LA57;
++ bool need_toggle = want_la57 ^ have_la57;
++ u64 *pgt = (void *)la57_toggle + PAGE_SIZE;
++ u64 *cr3 = (u64 *)__native_read_cr3();
++ u64 *new_cr3;
++
++ if (!la57_toggle || !need_toggle)
++ return;
++
++ if (!have_la57) {
++ /*
++ * 5 level paging will be enabled, so a root level page needs
++ * to be allocated from the 32-bit addressable physical region,
++ * with its first entry referring to the existing hierarchy.
++ */
++ new_cr3 = memset(pgt, 0, PAGE_SIZE);
++ new_cr3[0] = (u64)cr3 | _PAGE_TABLE_NOENC;
++ } else {
++ /* take the new root table pointer from the current entry #0 */
++ new_cr3 = (u64 *)(cr3[0] & PAGE_MASK);
++
++ /* copy the new root table if it is not 32-bit addressable */
++ if ((u64)new_cr3 > U32_MAX)
++ new_cr3 = memcpy(pgt, new_cr3, PAGE_SIZE);
++ }
++
++ native_load_gdt(&(struct desc_ptr){ sizeof(gdt) - 1, (u64)gdt });
++
++ la57_toggle(new_cr3);
++}
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -17,6 +17,7 @@
+ #include <asm/boot.h>
+
+ #include "efistub.h"
++#include "x86-stub.h"
+
+ /* Maximum physical address for 64-bit kernel with 4-level paging */
+ #define MAXMEM_X86_64_4LEVEL (1ull << 46)
+@@ -212,8 +213,8 @@ static void retrieve_apple_device_proper
+ }
+ }
+
+-static void
+-adjust_memory_range_protection(unsigned long start, unsigned long size)
++void efi_adjust_memory_range_protection(unsigned long start,
++ unsigned long size)
+ {
+ efi_status_t status;
+ efi_gcd_memory_space_desc_t desc;
+@@ -267,35 +268,14 @@ adjust_memory_range_protection(unsigned
+ }
+ }
+
+-/*
+- * Trampoline takes 2 pages and can be loaded in first megabyte of memory
+- * with its end placed between 128k and 640k where BIOS might start.
+- * (see arch/x86/boot/compressed/pgtable_64.c)
+- *
+- * We cannot find exact trampoline placement since memory map
+- * can be modified by UEFI, and it can alter the computed address.
+- */
+-
+-#define TRAMPOLINE_PLACEMENT_BASE ((128 - 8)*1024)
+-#define TRAMPOLINE_PLACEMENT_SIZE (640*1024 - (128 - 8)*1024)
+-
+ extern const u8 startup_32[], startup_64[];
+
+ static void
+ setup_memory_protection(unsigned long image_base, unsigned long image_size)
+ {
+- /*
+- * Allow execution of possible trampoline used
+- * for switching between 4- and 5-level page tables
+- * and relocated kernel image.
+- */
+-
+- adjust_memory_range_protection(TRAMPOLINE_PLACEMENT_BASE,
+- TRAMPOLINE_PLACEMENT_SIZE);
+-
+ #ifdef CONFIG_64BIT
+ if (image_base != (unsigned long)startup_32)
+- adjust_memory_range_protection(image_base, image_size);
++ efi_adjust_memory_range_protection(image_base, image_size);
+ #else
+ /*
+ * Clear protection flags on a whole range of possible
+@@ -305,8 +285,8 @@ setup_memory_protection(unsigned long im
+ * need to remove possible protection on relocated image
+ * itself disregarding further relocations.
+ */
+- adjust_memory_range_protection(LOAD_PHYSICAL_ADDR,
+- KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR);
++ efi_adjust_memory_range_protection(LOAD_PHYSICAL_ADDR,
++ KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR);
+ #endif
+ }
+
+@@ -796,6 +776,12 @@ void __noreturn efi_stub_entry(efi_handl
+ efi_dxe_table = NULL;
+ }
+
++ status = efi_setup_5level_paging();
++ if (status != EFI_SUCCESS) {
++ efi_err("efi_setup_5level_paging() failed!\n");
++ goto fail;
++ }
++
+ /*
+ * If the kernel isn't already loaded at a suitable address,
+ * relocate it.
+@@ -914,6 +900,8 @@ void __noreturn efi_stub_entry(efi_handl
+ goto fail;
+ }
+
++ efi_5level_switch();
++
+ if (IS_ENABLED(CONFIG_X86_64))
+ bzimage_addr += startup_64 - startup_32;
+
+--- /dev/null
++++ b/drivers/firmware/efi/libstub/x86-stub.h
+@@ -0,0 +1,17 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#include <linux/efi.h>
++
++extern void trampoline_32bit_src(void *, bool);
++extern const u16 trampoline_ljmp_imm_offset;
++
++void efi_adjust_memory_range_protection(unsigned long start,
++ unsigned long size);
++
++#ifdef CONFIG_X86_64
++efi_status_t efi_setup_5level_paging(void);
++void efi_5level_switch(void);
++#else
++static inline efi_status_t efi_setup_5level_paging(void) { return EFI_SUCCESS; }
++static inline void efi_5level_switch(void) {}
++#endif
--- /dev/null
+From stable+bounces-25905-greg=kroah.com@vger.kernel.org Mon Mar 4 12:21:08 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:50 +0100
+Subject: x86/efistub: Perform SNP feature test while running in the firmware
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>, Borislav Petkov <bp@alien8.de>
+Message-ID: <20240304111937.2556102-32-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit 31c77a50992e8dd136feed7b67073bb5f1f978cc upstream ]
+
+Before refactoring the EFI stub boot flow to avoid the legacy bare metal
+decompressor, duplicate the SNP feature check in the EFI stub before
+handing over to the kernel proper.
+
+The SNP feature check can be performed while running under the EFI boot
+services, which means it can force the boot to fail gracefully and
+return an error to the bootloader if the loaded kernel does not
+implement support for all the features that the hypervisor enabled.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230807162720.545787-23-ardb@kernel.org
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/sev.c | 112 ++++++++++++++++++--------------
+ arch/x86/include/asm/sev.h | 5 +
+ drivers/firmware/efi/libstub/x86-stub.c | 17 ++++
+ 3 files changed, 88 insertions(+), 46 deletions(-)
+
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -327,20 +327,25 @@ static void enforce_vmpl0(void)
+ */
+ #define SNP_FEATURES_PRESENT (0)
+
++u64 snp_get_unsupported_features(u64 status)
++{
++ if (!(status & MSR_AMD64_SEV_SNP_ENABLED))
++ return 0;
++
++ return status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
++}
++
+ void snp_check_features(void)
+ {
+ u64 unsupported;
+
+- if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+- return;
+-
+ /*
+ * Terminate the boot if hypervisor has enabled any feature lacking
+ * guest side implementation. Pass on the unsupported features mask through
+ * EXIT_INFO_2 of the GHCB protocol so that those features can be reported
+ * as part of the guest boot failure.
+ */
+- unsupported = sev_status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
++ unsupported = snp_get_unsupported_features(sev_status);
+ if (unsupported) {
+ if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb()))
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
+@@ -350,35 +355,22 @@ void snp_check_features(void)
+ }
+ }
+
+-void sev_enable(struct boot_params *bp)
++/*
++ * sev_check_cpu_support - Check for SEV support in the CPU capabilities
++ *
++ * Returns < 0 if SEV is not supported, otherwise the position of the
++ * encryption bit in the page table descriptors.
++ */
++static int sev_check_cpu_support(void)
+ {
+ unsigned int eax, ebx, ecx, edx;
+- struct msr m;
+- bool snp;
+-
+- /*
+- * bp->cc_blob_address should only be set by boot/compressed kernel.
+- * Initialize it to 0 to ensure that uninitialized values from
+- * buggy bootloaders aren't propagated.
+- */
+- if (bp)
+- bp->cc_blob_address = 0;
+-
+- /*
+- * Do an initial SEV capability check before snp_init() which
+- * loads the CPUID page and the same checks afterwards are done
+- * without the hypervisor and are trustworthy.
+- *
+- * If the HV fakes SEV support, the guest will crash'n'burn
+- * which is good enough.
+- */
+
+ /* Check for the SME/SEV support leaf */
+ eax = 0x80000000;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+ if (eax < 0x8000001f)
+- return;
++ return -ENODEV;
+
+ /*
+ * Check for the SME/SEV feature:
+@@ -393,6 +385,35 @@ void sev_enable(struct boot_params *bp)
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+ /* Check whether SEV is supported */
+ if (!(eax & BIT(1)))
++ return -ENODEV;
++
++ return ebx & 0x3f;
++}
++
++void sev_enable(struct boot_params *bp)
++{
++ struct msr m;
++ int bitpos;
++ bool snp;
++
++ /*
++ * bp->cc_blob_address should only be set by boot/compressed kernel.
++ * Initialize it to 0 to ensure that uninitialized values from
++ * buggy bootloaders aren't propagated.
++ */
++ if (bp)
++ bp->cc_blob_address = 0;
++
++ /*
++ * Do an initial SEV capability check before snp_init() which
++ * loads the CPUID page and the same checks afterwards are done
++ * without the hypervisor and are trustworthy.
++ *
++ * If the HV fakes SEV support, the guest will crash'n'burn
++ * which is good enough.
++ */
++
++ if (sev_check_cpu_support() < 0)
+ return;
+
+ /*
+@@ -403,26 +424,8 @@ void sev_enable(struct boot_params *bp)
+
+ /* Now repeat the checks with the SNP CPUID table. */
+
+- /* Recheck the SME/SEV support leaf */
+- eax = 0x80000000;
+- ecx = 0;
+- native_cpuid(&eax, &ebx, &ecx, &edx);
+- if (eax < 0x8000001f)
+- return;
+-
+- /*
+- * Recheck for the SME/SEV feature:
+- * CPUID Fn8000_001F[EAX]
+- * - Bit 0 - Secure Memory Encryption support
+- * - Bit 1 - Secure Encrypted Virtualization support
+- * CPUID Fn8000_001F[EBX]
+- * - Bits 5:0 - Pagetable bit position used to indicate encryption
+- */
+- eax = 0x8000001f;
+- ecx = 0;
+- native_cpuid(&eax, &ebx, &ecx, &edx);
+- /* Check whether SEV is supported */
+- if (!(eax & BIT(1))) {
++ bitpos = sev_check_cpu_support();
++ if (bitpos < 0) {
+ if (snp)
+ error("SEV-SNP support indicated by CC blob, but not CPUID.");
+ return;
+@@ -454,7 +457,24 @@ void sev_enable(struct boot_params *bp)
+ if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+ error("SEV-SNP supported indicated by CC blob, but not SEV status MSR.");
+
+- sme_me_mask = BIT_ULL(ebx & 0x3f);
++ sme_me_mask = BIT_ULL(bitpos);
++}
++
++/*
++ * sev_get_status - Retrieve the SEV status mask
++ *
++ * Returns 0 if the CPU is not SEV capable, otherwise the value of the
++ * AMD64_SEV MSR.
++ */
++u64 sev_get_status(void)
++{
++ struct msr m;
++
++ if (sev_check_cpu_support() < 0)
++ return 0;
++
++ boot_rdmsr(MSR_AMD64_SEV, &m);
++ return m.q;
+ }
+
+ /* Search for Confidential Computing blob in the EFI config table. */
+--- a/arch/x86/include/asm/sev.h
++++ b/arch/x86/include/asm/sev.h
+@@ -202,6 +202,8 @@ void snp_set_wakeup_secondary_cpu(void);
+ bool snp_init(struct boot_params *bp);
+ void __init __noreturn snp_abort(void);
+ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
++u64 snp_get_unsupported_features(u64 status);
++u64 sev_get_status(void);
+ #else
+ static inline void sev_es_ist_enter(struct pt_regs *regs) { }
+ static inline void sev_es_ist_exit(void) { }
+@@ -225,6 +227,9 @@ static inline int snp_issue_guest_reques
+ {
+ return -ENOTTY;
+ }
++
++static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
++static inline u64 sev_get_status(void) { return 0; }
+ #endif
+
+ #endif
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -15,6 +15,7 @@
+ #include <asm/setup.h>
+ #include <asm/desc.h>
+ #include <asm/boot.h>
++#include <asm/sev.h>
+
+ #include "efistub.h"
+ #include "x86-stub.h"
+@@ -747,6 +748,19 @@ static efi_status_t exit_boot(struct boo
+ return EFI_SUCCESS;
+ }
+
++static bool have_unsupported_snp_features(void)
++{
++ u64 unsupported;
++
++ unsupported = snp_get_unsupported_features(sev_get_status());
++ if (unsupported) {
++ efi_err("Unsupported SEV-SNP features detected: 0x%llx\n",
++ unsupported);
++ return true;
++ }
++ return false;
++}
++
+ static void __noreturn enter_kernel(unsigned long kernel_addr,
+ struct boot_params *boot_params)
+ {
+@@ -777,6 +791,9 @@ void __noreturn efi_stub_entry(efi_handl
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ efi_exit(handle, EFI_INVALID_PARAMETER);
+
++ if (have_unsupported_snp_features())
++ efi_exit(handle, EFI_UNSUPPORTED);
++
+ if (IS_ENABLED(CONFIG_EFI_DXE_MEM_ATTRIBUTES)) {
+ efi_dxe_table = get_efi_config_table(EFI_DXE_SERVICES_TABLE_GUID);
+ if (efi_dxe_table &&
--- /dev/null
+From stable+bounces-25904-greg=kroah.com@vger.kernel.org Mon Mar 4 12:21:04 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:49 +0100
+Subject: x86/efistub: Prefer EFI memory attributes protocol over DXE services
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>, Borislav Petkov <bp@alien8.de>
+Message-ID: <20240304111937.2556102-31-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit 11078876b7a6a1b7226344fecab968945c806832 upstream ]
+
+Currently, the EFI stub relies on DXE services in some cases to clear
+non-execute restrictions from page allocations that need to be
+executable. This is dodgy, because DXE services are not specified by
+UEFI but by PI, and they are not intended for consumption by OS loaders.
+However, no alternative existed at the time.
+
+Now, there is a new UEFI protocol that should be used instead, so if it
+exists, prefer it over the DXE services calls.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230807162720.545787-18-ardb@kernel.org
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/efi/libstub/x86-stub.c | 29 +++++++++++++++++++++--------
+ 1 file changed, 21 insertions(+), 8 deletions(-)
+
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -26,6 +26,7 @@ const efi_system_table_t *efi_system_tab
+ const efi_dxe_services_table_t *efi_dxe_table;
+ u32 image_offset __section(".data");
+ static efi_loaded_image_t *image = NULL;
++static efi_memory_attribute_protocol_t *memattr;
+
+ static efi_status_t
+ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
+@@ -222,12 +223,18 @@ void efi_adjust_memory_range_protection(
+ unsigned long rounded_start, rounded_end;
+ unsigned long unprotect_start, unprotect_size;
+
+- if (efi_dxe_table == NULL)
+- return;
+-
+ rounded_start = rounddown(start, EFI_PAGE_SIZE);
+ rounded_end = roundup(start + size, EFI_PAGE_SIZE);
+
++ if (memattr != NULL) {
++ efi_call_proto(memattr, clear_memory_attributes, rounded_start,
++ rounded_end - rounded_start, EFI_MEMORY_XP);
++ return;
++ }
++
++ if (efi_dxe_table == NULL)
++ return;
++
+ /*
+ * Don't modify memory region attributes, they are
+ * already suitable, to lower the possibility to
+@@ -758,6 +765,7 @@ void __noreturn efi_stub_entry(efi_handl
+ efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params)
+ {
++ efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID;
+ unsigned long bzimage_addr = (unsigned long)startup_32;
+ unsigned long buffer_start, buffer_end;
+ struct setup_header *hdr = &boot_params->hdr;
+@@ -769,13 +777,18 @@ void __noreturn efi_stub_entry(efi_handl
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ efi_exit(handle, EFI_INVALID_PARAMETER);
+
+- efi_dxe_table = get_efi_config_table(EFI_DXE_SERVICES_TABLE_GUID);
+- if (efi_dxe_table &&
+- efi_dxe_table->hdr.signature != EFI_DXE_SERVICES_TABLE_SIGNATURE) {
+- efi_warn("Ignoring DXE services table: invalid signature\n");
+- efi_dxe_table = NULL;
++ if (IS_ENABLED(CONFIG_EFI_DXE_MEM_ATTRIBUTES)) {
++ efi_dxe_table = get_efi_config_table(EFI_DXE_SERVICES_TABLE_GUID);
++ if (efi_dxe_table &&
++ efi_dxe_table->hdr.signature != EFI_DXE_SERVICES_TABLE_SIGNATURE) {
++ efi_warn("Ignoring DXE services table: invalid signature\n");
++ efi_dxe_table = NULL;
++ }
+ }
+
++ /* grab the memory attributes protocol if it exists */
++ efi_bs_call(locate_protocol, &guid, NULL, (void **)&memattr);
++
+ status = efi_setup_5level_paging();
+ if (status != EFI_SUCCESS) {
+ efi_err("efi_setup_5level_paging() failed!\n");
--- /dev/null
+From stable+bounces-25896-greg=kroah.com@vger.kernel.org Mon Mar 4 12:20:48 2024
+From: Ard Biesheuvel <ardb+git@google.com>
+Date: Mon, 4 Mar 2024 12:19:41 +0100
+Subject: x86/efistub: Simplify and clean up handover entry code
+To: stable@vger.kernel.org
+Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>, Borislav Petkov <bp@alien8.de>
+Message-ID: <20240304111937.2556102-23-ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb+git@google.com>
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Commit df9215f15206c2a81909ccf60f21d170801dce38 upstream ]
+
+Now that the EFI entry code in assembler is only used by the optional
+and deprecated EFI handover protocol, and given that the EFI stub C code
+no longer returns to it, most of it can simply be dropped.
+
+While at it, clarify the symbol naming, by merging efi_main() and
+efi_stub_entry(), making the latter the shared entry point for all
+different boot modes that enter via the EFI stub.
+
+The efi32_stub_entry() and efi64_stub_entry() names are referenced
+explicitly by the tooling that populates the setup header, so these must
+be retained, but can be emitted as aliases of efi_stub_entry() where
+appropriate.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230807162720.545787-5-ardb@kernel.org
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/x86/boot.rst | 2 +-
+ arch/x86/boot/compressed/efi_mixed.S | 22 ++++++++++++----------
+ arch/x86/boot/compressed/head_32.S | 11 -----------
+ arch/x86/boot/compressed/head_64.S | 12 ++----------
+ drivers/firmware/efi/libstub/x86-stub.c | 20 ++++++++++++++++----
+ 5 files changed, 31 insertions(+), 36 deletions(-)
+
+--- a/Documentation/x86/boot.rst
++++ b/Documentation/x86/boot.rst
+@@ -1416,7 +1416,7 @@ execution context provided by the EFI fi
+
+ The function prototype for the handover entry point looks like this::
+
+- efi_main(void *handle, efi_system_table_t *table, struct boot_params *bp)
++ efi_stub_entry(void *handle, efi_system_table_t *table, struct boot_params *bp)
+
+ 'handle' is the EFI image handle passed to the boot loader by the EFI
+ firmware, 'table' is the EFI system table - these are the first two
+--- a/arch/x86/boot/compressed/efi_mixed.S
++++ b/arch/x86/boot/compressed/efi_mixed.S
+@@ -26,8 +26,8 @@
+ * When booting in 64-bit mode on 32-bit EFI firmware, startup_64_mixed_mode()
+ * is the first thing that runs after switching to long mode. Depending on
+ * whether the EFI handover protocol or the compat entry point was used to
+- * enter the kernel, it will either branch to the 64-bit EFI handover
+- * entrypoint at offset 0x390 in the image, or to the 64-bit EFI PE/COFF
++ * enter the kernel, it will either branch to the common 64-bit EFI stub
++ * entrypoint efi_stub_entry() directly, or via the 64-bit EFI PE/COFF
+ * entrypoint efi_pe_entry(). In the former case, the bootloader must provide a
+ * struct bootparams pointer as the third argument, so the presence of such a
+ * pointer is used to disambiguate.
+@@ -37,21 +37,23 @@
+ * | efi32_pe_entry |---->| | | +-----------+--+
+ * +------------------+ | | +------+----------------+ |
+ * | startup_32 |---->| startup_64_mixed_mode | |
+- * +------------------+ | | +------+----------------+ V
+- * | efi32_stub_entry |---->| | | +------------------+
+- * +------------------+ +------------+ +---->| efi64_stub_entry |
+- * +-------------+----+
+- * +------------+ +----------+ |
+- * | startup_64 |<----| efi_main |<--------------+
+- * +------------+ +----------+
++ * +------------------+ | | +------+----------------+ |
++ * | efi32_stub_entry |---->| | | |
++ * +------------------+ +------------+ | |
++ * V |
++ * +------------+ +----------------+ |
++ * | startup_64 |<----| efi_stub_entry |<--------+
++ * +------------+ +----------------+
+ */
+ SYM_FUNC_START(startup_64_mixed_mode)
+ lea efi32_boot_args(%rip), %rdx
+ mov 0(%rdx), %edi
+ mov 4(%rdx), %esi
++#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+ mov 8(%rdx), %edx // saved bootparams pointer
+ test %edx, %edx
+- jnz efi64_stub_entry
++ jnz efi_stub_entry
++#endif
+ /*
+ * efi_pe_entry uses MS calling convention, which requires 32 bytes of
+ * shadow space on the stack even if all arguments are passed in
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -150,17 +150,6 @@ SYM_FUNC_START(startup_32)
+ jmp *%eax
+ SYM_FUNC_END(startup_32)
+
+-#ifdef CONFIG_EFI_STUB
+-SYM_FUNC_START(efi32_stub_entry)
+- add $0x4, %esp
+- movl 8(%esp), %esi /* save boot_params pointer */
+- call efi_main
+- /* efi_main returns the possibly relocated address of startup_32 */
+- jmp *%eax
+-SYM_FUNC_END(efi32_stub_entry)
+-SYM_FUNC_ALIAS(efi_stub_entry, efi32_stub_entry)
+-#endif
+-
+ .text
+ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
+
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -474,19 +474,11 @@ SYM_CODE_START(startup_64)
+ jmp *%rax
+ SYM_CODE_END(startup_64)
+
+-#ifdef CONFIG_EFI_STUB
+-#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
++#if IS_ENABLED(CONFIG_EFI_MIXED) && IS_ENABLED(CONFIG_EFI_HANDOVER_PROTOCOL)
+ .org 0x390
+-#endif
+ SYM_FUNC_START(efi64_stub_entry)
+- and $~0xf, %rsp /* realign the stack */
+- movq %rdx, %rbx /* save boot_params pointer */
+- call efi_main
+- movq %rbx,%rsi
+- leaq rva(startup_64)(%rax), %rax
+- jmp *%rax
++ jmp efi_stub_entry
+ SYM_FUNC_END(efi64_stub_entry)
+-SYM_FUNC_ALIAS(efi_stub_entry, efi64_stub_entry)
+ #endif
+
+ .text
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -774,9 +774,9 @@ static void __noreturn enter_kernel(unsi
+ * return. On failure, it will exit to the firmware via efi_exit() instead of
+ * returning.
+ */
+-asmlinkage unsigned long efi_main(efi_handle_t handle,
+- efi_system_table_t *sys_table_arg,
+- struct boot_params *boot_params)
++void __noreturn efi_stub_entry(efi_handle_t handle,
++ efi_system_table_t *sys_table_arg,
++ struct boot_params *boot_params)
+ {
+ unsigned long bzimage_addr = (unsigned long)startup_32;
+ unsigned long buffer_start, buffer_end;
+@@ -919,7 +919,19 @@ asmlinkage unsigned long efi_main(efi_ha
+
+ enter_kernel(bzimage_addr, boot_params);
+ fail:
+- efi_err("efi_main() failed!\n");
++ efi_err("efi_stub_entry() failed!\n");
+
+ efi_exit(handle, status);
+ }
++
++#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
++#ifndef CONFIG_EFI_MIXED
++extern __alias(efi_stub_entry)
++void efi32_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
++ struct boot_params *boot_params);
++
++extern __alias(efi_stub_entry)
++void efi64_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
++ struct boot_params *boot_params);
++#endif
++#endif