]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
memblock: add KHO support for reserve_mem
authorAlexander Graf <graf@amazon.com>
Fri, 9 May 2025 07:46:33 +0000 (00:46 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 13 May 2025 06:50:42 +0000 (23:50 -0700)
Linux has recently gained support for "reserve_mem": A mechanism to
allocate a region of memory early enough in boot that we can cross our
fingers and hope it stays at the same location during most boots, so we
can store for example ftrace buffers into it.

Thanks to KASLR, we can never be really sure that "reserve_mem"
allocations are static across kexec.  Let's teach it KHO awareness so that
it serializes its reservations on kexec exit and deserializes them again
on boot, preserving the exact same mapping across kexec.

This is an example user for KHO in the KHO patch set to ensure we have at
least one (not very controversial) user in the tree before extending KHO's
use to more subsystems.

Link: https://lkml.kernel.org/r/20250509074635.3187114-16-changyuanl@google.com
Signed-off-by: Alexander Graf <graf@amazon.com>
Co-developed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Co-developed-by: Changyuan Lyu <changyuanl@google.com>
Signed-off-by: Changyuan Lyu <changyuanl@google.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Anthony Yznaga <anthony.yznaga@oracle.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Ashish Kalra <ashish.kalra@amd.com>
Cc: Ben Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Gowans <jgowans@amazon.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Krzysztof Kozlowski <krzk@kernel.org>
Cc: Marc Rutland <mark.rutland@arm.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Pratyush Yadav <ptyadav@amazon.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Saravana Kannan <saravanak@google.com>
Cc: Stanislav Kinsburskii <skinsburskii@linux.microsoft.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Thomas Lendacky <thomas.lendacky@amd.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memblock.c

index 8895b95ffb5baa834d5e602ee4fa245bd6c3fdec..154f1d73b61f2234efe61c5cce5105be160d0041 100644 (file)
 #include <linux/memblock.h>
 #include <linux/mutex.h>
 
+#ifdef CONFIG_KEXEC_HANDOVER
+#include <linux/libfdt.h>
+#include <linux/kexec_handover.h>
+#endif /* CONFIG_KEXEC_HANDOVER */
+
 #include <asm/sections.h>
 #include <linux/io.h>
 
@@ -2492,6 +2497,189 @@ int reserve_mem_release_by_name(const char *name)
        return 1;
 }
 
+#ifdef CONFIG_KEXEC_HANDOVER
+#define MEMBLOCK_KHO_FDT "memblock"
+#define MEMBLOCK_KHO_NODE_COMPATIBLE "memblock-v1"
+#define RESERVE_MEM_KHO_NODE_COMPATIBLE "reserve-mem-v1"
+static struct page *kho_fdt;
+
+static int reserve_mem_kho_finalize(struct kho_serialization *ser)
+{
+       int err = 0, i;
+
+       for (i = 0; i < reserved_mem_count; i++) {
+               struct reserve_mem_table *map = &reserved_mem_table[i];
+
+               err |= kho_preserve_phys(map->start, map->size);
+       }
+
+       err |= kho_preserve_folio(page_folio(kho_fdt));
+       err |= kho_add_subtree(ser, MEMBLOCK_KHO_FDT, page_to_virt(kho_fdt));
+
+       return notifier_from_errno(err);
+}
+
+static int reserve_mem_kho_notifier(struct notifier_block *self,
+                                   unsigned long cmd, void *v)
+{
+       switch (cmd) {
+       case KEXEC_KHO_FINALIZE:
+               return reserve_mem_kho_finalize((struct kho_serialization *)v);
+       case KEXEC_KHO_ABORT:
+               return NOTIFY_DONE;
+       default:
+               return NOTIFY_BAD;
+       }
+}
+
+static struct notifier_block reserve_mem_kho_nb = {
+       .notifier_call = reserve_mem_kho_notifier,
+};
+
+static int __init prepare_kho_fdt(void)
+{
+       int err = 0, i;
+       void *fdt;
+
+       kho_fdt = alloc_page(GFP_KERNEL);
+       if (!kho_fdt)
+               return -ENOMEM;
+
+       fdt = page_to_virt(kho_fdt);
+
+       err |= fdt_create(fdt, PAGE_SIZE);
+       err |= fdt_finish_reservemap(fdt);
+
+       err |= fdt_begin_node(fdt, "");
+       err |= fdt_property_string(fdt, "compatible", MEMBLOCK_KHO_NODE_COMPATIBLE);
+       for (i = 0; i < reserved_mem_count; i++) {
+               struct reserve_mem_table *map = &reserved_mem_table[i];
+
+               err |= fdt_begin_node(fdt, map->name);
+               err |= fdt_property_string(fdt, "compatible", RESERVE_MEM_KHO_NODE_COMPATIBLE);
+               err |= fdt_property(fdt, "start", &map->start, sizeof(map->start));
+               err |= fdt_property(fdt, "size", &map->size, sizeof(map->size));
+               err |= fdt_end_node(fdt);
+       }
+       err |= fdt_end_node(fdt);
+
+       err |= fdt_finish(fdt);
+
+       if (err) {
+               pr_err("failed to prepare memblock FDT for KHO: %d\n", err);
+               put_page(kho_fdt);
+               kho_fdt = NULL;
+       }
+
+       return err;
+}
+
+static int __init reserve_mem_init(void)
+{
+       int err;
+
+       if (!kho_is_enabled() || !reserved_mem_count)
+               return 0;
+
+       err = prepare_kho_fdt();
+       if (err)
+               return err;
+
+       err = register_kho_notifier(&reserve_mem_kho_nb);
+       if (err) {
+               put_page(kho_fdt);
+               kho_fdt = NULL;
+       }
+
+       return err;
+}
+late_initcall(reserve_mem_init);
+
+static void *__init reserve_mem_kho_retrieve_fdt(void)
+{
+       phys_addr_t fdt_phys;
+       static void *fdt;
+       int err;
+
+       if (fdt)
+               return fdt;
+
+       err = kho_retrieve_subtree(MEMBLOCK_KHO_FDT, &fdt_phys);
+       if (err) {
+               if (err != -ENOENT)
+                       pr_warn("failed to retrieve FDT '%s' from KHO: %d\n",
+                               MEMBLOCK_KHO_FDT, err);
+               return NULL;
+       }
+
+       fdt = phys_to_virt(fdt_phys);
+
+       err = fdt_node_check_compatible(fdt, 0, MEMBLOCK_KHO_NODE_COMPATIBLE);
+       if (err) {
+               pr_warn("FDT '%s' is incompatible with '%s': %d\n",
+                       MEMBLOCK_KHO_FDT, MEMBLOCK_KHO_NODE_COMPATIBLE, err);
+               fdt = NULL;
+       }
+
+       return fdt;
+}
+
+static bool __init reserve_mem_kho_revive(const char *name, phys_addr_t size,
+                                         phys_addr_t align)
+{
+       int err, len_start, len_size, offset;
+       const phys_addr_t *p_start, *p_size;
+       const void *fdt;
+
+       fdt = reserve_mem_kho_retrieve_fdt();
+       if (!fdt)
+               return false;
+
+       offset = fdt_subnode_offset(fdt, 0, name);
+       if (offset < 0) {
+               pr_warn("FDT '%s' has no child '%s': %d\n",
+                       MEMBLOCK_KHO_FDT, name, offset);
+               return false;
+       }
+       err = fdt_node_check_compatible(fdt, offset, RESERVE_MEM_KHO_NODE_COMPATIBLE);
+       if (err) {
+               pr_warn("Node '%s' is incompatible with '%s': %d\n",
+                       name, RESERVE_MEM_KHO_NODE_COMPATIBLE, err);
+               return false;
+       }
+
+       p_start = fdt_getprop(fdt, offset, "start", &len_start);
+       p_size = fdt_getprop(fdt, offset, "size", &len_size);
+       if (!p_start || len_start != sizeof(*p_start) || !p_size ||
+           len_size != sizeof(*p_size)) {
+               return false;
+       }
+
+       if (*p_start & (align - 1)) {
+               pr_warn("KHO reserve-mem '%s' has wrong alignment (0x%lx, 0x%lx)\n",
+                       name, (long)align, (long)*p_start);
+               return false;
+       }
+
+       if (*p_size != size) {
+               pr_warn("KHO reserve-mem '%s' has wrong size (0x%lx != 0x%lx)\n",
+                       name, (long)*p_size, (long)size);
+               return false;
+       }
+
+       reserved_mem_add(*p_start, size, name);
+       pr_info("Revived memory reservation '%s' from KHO\n", name);
+
+       return true;
+}
+#else
+static bool __init reserve_mem_kho_revive(const char *name, phys_addr_t size,
+                                         phys_addr_t align)
+{
+       return false;
+}
+#endif /* CONFIG_KEXEC_HANDOVER */
+
 /*
  * Parse reserve_mem=nn:align:name
  */
@@ -2547,6 +2735,11 @@ static int __init reserve_mem(char *p)
        if (reserve_mem_find_by_name(name, &start, &tmp))
                return -EBUSY;
 
+       /* Pick previous allocations up from KHO if available */
+       if (reserve_mem_kho_revive(name, size, align))
+               return 1;
+
+       /* TODO: Allocation must be outside of scratch region */
        start = memblock_phys_alloc(size, align);
        if (!start)
                return -ENOMEM;