#include <linux/libfdt.h>
#include <linux/list.h>
#include <linux/memblock.h>
-#include <linux/notifier.h>
#include <linux/page-isolation.h>
#include <linux/vmalloc.h>
struct khoser_mem_chunk;
-struct kho_serialization {
- struct page *fdt;
- struct kho_mem_track track;
- /* First chunk of serialized preserved memory map */
- struct khoser_mem_chunk *preserved_mem_map;
+struct kho_sub_fdt {
+ struct list_head l;
+ const char *name;
+ void *fdt;
};
struct kho_out {
- struct blocking_notifier_head chain_head;
- struct mutex lock; /* protects KHO FDT finalization */
- struct kho_serialization ser;
+ void *fdt;
bool finalized;
+ struct mutex lock; /* protects KHO FDT finalization */
+
+ struct list_head sub_fdts;
+ struct mutex fdts_lock;
+
+ struct kho_mem_track track;
+ /* First chunk of serialized preserved memory map */
+ struct khoser_mem_chunk *preserved_mem_map;
+
struct kho_debugfs dbg;
};
static struct kho_out kho_out = {
- .chain_head = BLOCKING_NOTIFIER_INIT(kho_out.chain_head),
.lock = __MUTEX_INITIALIZER(kho_out.lock),
- .ser = {
- .track = {
- .orders = XARRAY_INIT(kho_out.ser.track.orders, 0),
- },
+ .track = {
+ .orders = XARRAY_INIT(kho_out.track.orders, 0),
},
+ .sub_fdts = LIST_HEAD_INIT(kho_out.sub_fdts),
+ .fdts_lock = __MUTEX_INITIALIZER(kho_out.fdts_lock),
.finalized = false,
};
}
}
-static int kho_mem_serialize(struct kho_serialization *ser)
+static int kho_mem_serialize(struct kho_out *kho_out)
{
struct khoser_mem_chunk *first_chunk = NULL;
struct khoser_mem_chunk *chunk = NULL;
unsigned long order;
int err = -ENOMEM;
- xa_for_each(&ser->track.orders, order, physxa) {
+ xa_for_each(&kho_out->track.orders, order, physxa) {
struct kho_mem_phys_bits *bits;
unsigned long phys;
}
}
- ser->preserved_mem_map = first_chunk;
+ kho_out->preserved_mem_map = first_chunk;
return 0;
/**
* kho_add_subtree - record the physical address of a sub FDT in KHO root tree.
- * @ser: serialization control object passed by KHO notifiers.
* @name: name of the sub tree.
* @fdt: the sub tree blob.
*
*
* Return: 0 on success, error code on failure
*/
-int kho_add_subtree(struct kho_serialization *ser, const char *name, void *fdt)
+int kho_add_subtree(const char *name, void *fdt)
{
- int err = 0;
- u64 phys = (u64)virt_to_phys(fdt);
- void *root = page_to_virt(ser->fdt);
+ struct kho_sub_fdt *sub_fdt;
- err |= fdt_begin_node(root, name);
- err |= fdt_property(root, PROP_SUB_FDT, &phys, sizeof(phys));
- err |= fdt_end_node(root);
+ sub_fdt = kmalloc(sizeof(*sub_fdt), GFP_KERNEL);
+ if (!sub_fdt)
+ return -ENOMEM;
- if (err)
- return err;
+ INIT_LIST_HEAD(&sub_fdt->l);
+ sub_fdt->name = name;
+ sub_fdt->fdt = fdt;
- return kho_debugfs_fdt_add(&kho_out.dbg, name, fdt, false);
-}
-EXPORT_SYMBOL_GPL(kho_add_subtree);
+ guard(mutex)(&kho_out.fdts_lock);
+ list_add_tail(&sub_fdt->l, &kho_out.sub_fdts);
+ WARN_ON_ONCE(kho_debugfs_fdt_add(&kho_out.dbg, name, fdt, false));
-int register_kho_notifier(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&kho_out.chain_head, nb);
+ return 0;
}
-EXPORT_SYMBOL_GPL(register_kho_notifier);
+EXPORT_SYMBOL_GPL(kho_add_subtree);
-int unregister_kho_notifier(struct notifier_block *nb)
+void kho_remove_subtree(void *fdt)
{
- return blocking_notifier_chain_unregister(&kho_out.chain_head, nb);
+ struct kho_sub_fdt *sub_fdt;
+
+ guard(mutex)(&kho_out.fdts_lock);
+ list_for_each_entry(sub_fdt, &kho_out.sub_fdts, l) {
+ if (sub_fdt->fdt == fdt) {
+ list_del(&sub_fdt->l);
+ kfree(sub_fdt);
+ kho_debugfs_fdt_remove(&kho_out.dbg, fdt);
+ break;
+ }
+ }
}
-EXPORT_SYMBOL_GPL(unregister_kho_notifier);
+EXPORT_SYMBOL_GPL(kho_remove_subtree);
/**
* kho_preserve_folio - preserve a folio across kexec.
{
const unsigned long pfn = folio_pfn(folio);
const unsigned int order = folio_order(folio);
- struct kho_mem_track *track = &kho_out.ser.track;
+ struct kho_mem_track *track = &kho_out.track;
if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order)))
return -EINVAL;
*/
int kho_preserve_pages(struct page *page, unsigned int nr_pages)
{
- struct kho_mem_track *track = &kho_out.ser.track;
+ struct kho_mem_track *track = &kho_out.track;
const unsigned long start_pfn = page_to_pfn(page);
const unsigned long end_pfn = start_pfn + nr_pages;
unsigned long pfn = start_pfn;
static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk,
unsigned short order)
{
- struct kho_mem_track *track = &kho_out.ser.track;
+ struct kho_mem_track *track = &kho_out.track;
unsigned long pfn = PHYS_PFN(virt_to_phys(chunk));
__kho_unpreserve(track, pfn, pfn + 1);
static int __kho_abort(void)
{
- int err;
+ int err = 0;
unsigned long order;
struct kho_mem_phys *physxa;
- xa_for_each(&kho_out.ser.track.orders, order, physxa) {
+ xa_for_each(&kho_out.track.orders, order, physxa) {
struct kho_mem_phys_bits *bits;
unsigned long phys;
xa_destroy(&physxa->phys_bits);
kfree(physxa);
}
- xa_destroy(&kho_out.ser.track.orders);
+ xa_destroy(&kho_out.track.orders);
- if (kho_out.ser.preserved_mem_map) {
- kho_mem_ser_free(kho_out.ser.preserved_mem_map);
- kho_out.ser.preserved_mem_map = NULL;
+ if (kho_out.preserved_mem_map) {
+ kho_mem_ser_free(kho_out.preserved_mem_map);
+ kho_out.preserved_mem_map = NULL;
}
- err = blocking_notifier_call_chain(&kho_out.chain_head, KEXEC_KHO_ABORT,
- NULL);
- err = notifier_to_errno(err);
-
if (err)
pr_err("Failed to abort KHO finalization: %d\n", err);
return ret;
kho_out.finalized = false;
- kho_debugfs_cleanup(&kho_out.dbg);
+
+ kho_debugfs_fdt_remove(&kho_out.dbg, kho_out.fdt);
return 0;
}
{
int err = 0;
u64 *preserved_mem_map;
- void *fdt = page_to_virt(kho_out.ser.fdt);
+ void *root = kho_out.fdt;
+ struct kho_sub_fdt *fdt;
- err |= fdt_create(fdt, PAGE_SIZE);
- err |= fdt_finish_reservemap(fdt);
- err |= fdt_begin_node(fdt, "");
- err |= fdt_property_string(fdt, "compatible", KHO_FDT_COMPATIBLE);
+ err |= fdt_create(root, PAGE_SIZE);
+ err |= fdt_finish_reservemap(root);
+ err |= fdt_begin_node(root, "");
+ err |= fdt_property_string(root, "compatible", KHO_FDT_COMPATIBLE);
/**
* Reserve the preserved-memory-map property in the root FDT, so
* that all property definitions will precede subnodes created by
* KHO callers.
*/
- err |= fdt_property_placeholder(fdt, PROP_PRESERVED_MEMORY_MAP,
+ err |= fdt_property_placeholder(root, PROP_PRESERVED_MEMORY_MAP,
sizeof(*preserved_mem_map),
(void **)&preserved_mem_map);
if (err)
goto abort;
- err = kho_preserve_folio(page_folio(kho_out.ser.fdt));
+ err = kho_preserve_folio(virt_to_folio(kho_out.fdt));
if (err)
goto abort;
- err = blocking_notifier_call_chain(&kho_out.chain_head,
- KEXEC_KHO_FINALIZE, &kho_out.ser);
- err = notifier_to_errno(err);
+ err = kho_mem_serialize(&kho_out);
if (err)
goto abort;
- err = kho_mem_serialize(&kho_out.ser);
- if (err)
- goto abort;
+ *preserved_mem_map = (u64)virt_to_phys(kho_out.preserved_mem_map);
+
+ mutex_lock(&kho_out.fdts_lock);
+ list_for_each_entry(fdt, &kho_out.sub_fdts, l) {
+ phys_addr_t phys = virt_to_phys(fdt->fdt);
- *preserved_mem_map = (u64)virt_to_phys(kho_out.ser.preserved_mem_map);
+ err |= fdt_begin_node(root, fdt->name);
+ err |= fdt_property(root, PROP_SUB_FDT, &phys, sizeof(phys));
+ err |= fdt_end_node(root);
+ }
+ mutex_unlock(&kho_out.fdts_lock);
- err |= fdt_end_node(fdt);
- err |= fdt_finish(fdt);
+ err |= fdt_end_node(root);
+ err |= fdt_finish(root);
abort:
if (err) {
kho_out.finalized = true;
- return kho_debugfs_fdt_add(&kho_out.dbg, "fdt",
- page_to_virt(kho_out.ser.fdt), true);
+ WARN_ON_ONCE(kho_debugfs_fdt_add(&kho_out.dbg, "fdt",
+ kho_out.fdt, true));
+
+ return 0;
}
bool kho_finalized(void)
{
int err = 0;
const void *fdt = kho_get_fdt();
+ struct page *fdt_page;
if (!kho_enable)
return 0;
- kho_out.ser.fdt = alloc_page(GFP_KERNEL);
- if (!kho_out.ser.fdt) {
+ fdt_page = alloc_page(GFP_KERNEL);
+ if (!fdt_page) {
err = -ENOMEM;
goto err_free_scratch;
}
+ kho_out.fdt = page_to_virt(fdt_page);
err = kho_debugfs_init();
if (err)
return 0;
err_free_fdt:
- put_page(kho_out.ser.fdt);
- kho_out.ser.fdt = NULL;
+ put_page(fdt_page);
+ kho_out.fdt = NULL;
err_free_scratch:
for (int i = 0; i < kho_scratch_cnt; i++) {
void *start = __va(kho_scratch[i].addr);
kho_enable = false;
return err;
}
-late_initcall(kho_init);
+fs_initcall(kho_init);
static void __init kho_release_scratch(void)
{
if (!kho_out.finalized)
return 0;
- image->kho.fdt = page_to_phys(kho_out.ser.fdt);
+ image->kho.fdt = virt_to_phys(kho_out.fdt);
scratch_size = sizeof(*kho_scratch) * kho_scratch_cnt;
scratch = (struct kexec_buf){
#define MEMBLOCK_KHO_FDT "memblock"
#define MEMBLOCK_KHO_NODE_COMPATIBLE "memblock-v1"
#define RESERVE_MEM_KHO_NODE_COMPATIBLE "reserve-mem-v1"
-static struct page *kho_fdt;
-
-static int reserve_mem_kho_finalize(struct kho_serialization *ser)
-{
- int err = 0, i;
-
- for (i = 0; i < reserved_mem_count; i++) {
- struct reserve_mem_table *map = &reserved_mem_table[i];
- struct page *page = phys_to_page(map->start);
- unsigned int nr_pages = map->size >> PAGE_SHIFT;
-
- err |= kho_preserve_pages(page, nr_pages);
- }
-
- err |= kho_preserve_folio(page_folio(kho_fdt));
- err |= kho_add_subtree(ser, MEMBLOCK_KHO_FDT, page_to_virt(kho_fdt));
-
- return notifier_from_errno(err);
-}
-
-static int reserve_mem_kho_notifier(struct notifier_block *self,
- unsigned long cmd, void *v)
-{
- switch (cmd) {
- case KEXEC_KHO_FINALIZE:
- return reserve_mem_kho_finalize((struct kho_serialization *)v);
- case KEXEC_KHO_ABORT:
- return NOTIFY_DONE;
- default:
- return NOTIFY_BAD;
- }
-}
-
-static struct notifier_block reserve_mem_kho_nb = {
- .notifier_call = reserve_mem_kho_notifier,
-};
static int __init prepare_kho_fdt(void)
{
int err = 0, i;
+ struct page *fdt_page;
void *fdt;
- kho_fdt = alloc_page(GFP_KERNEL);
- if (!kho_fdt)
+ fdt_page = alloc_page(GFP_KERNEL);
+ if (!fdt_page)
return -ENOMEM;
- fdt = page_to_virt(kho_fdt);
+ fdt = page_to_virt(fdt_page);
err |= fdt_create(fdt, PAGE_SIZE);
err |= fdt_finish_reservemap(fdt);
err |= fdt_property_string(fdt, "compatible", MEMBLOCK_KHO_NODE_COMPATIBLE);
for (i = 0; i < reserved_mem_count; i++) {
struct reserve_mem_table *map = &reserved_mem_table[i];
+ struct page *page = phys_to_page(map->start);
+ unsigned int nr_pages = map->size >> PAGE_SHIFT;
+ err |= kho_preserve_pages(page, nr_pages);
err |= fdt_begin_node(fdt, map->name);
err |= fdt_property_string(fdt, "compatible", RESERVE_MEM_KHO_NODE_COMPATIBLE);
err |= fdt_property(fdt, "start", &map->start, sizeof(map->start));
err |= fdt_end_node(fdt);
}
err |= fdt_end_node(fdt);
-
err |= fdt_finish(fdt);
+ err |= kho_preserve_folio(page_folio(fdt_page));
+
+ if (!err)
+ err = kho_add_subtree(MEMBLOCK_KHO_FDT, fdt);
+
if (err) {
pr_err("failed to prepare memblock FDT for KHO: %d\n", err);
- put_page(kho_fdt);
- kho_fdt = NULL;
+ put_page(fdt_page);
}
return err;
err = prepare_kho_fdt();
if (err)
return err;
-
- err = register_kho_notifier(&reserve_mem_kho_nb);
- if (err) {
- put_page(kho_fdt);
- kho_fdt = NULL;
- }
-
return err;
}
late_initcall(reserve_mem_init);