]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
memblock, treewide: make memblock_free() handle late freeing
authorMike Rapoport (Microsoft) <rppt@kernel.org>
Mon, 23 Mar 2026 07:48:35 +0000 (09:48 +0200)
committerMike Rapoport (Microsoft) <rppt@kernel.org>
Wed, 1 Apr 2026 08:20:15 +0000 (11:20 +0300)
It shouldn't be responsibility of memblock users to detect if they free
memory allocated from memblock late and should use memblock_free_late().

Make memblock_free() and memblock_phys_free() take care of late memory
freeing and drop memblock_free_late().

Link: https://patch.msgid.link/20260323074836.3653702-9-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
arch/sparc/kernel/mdesc.c
arch/x86/kernel/setup.c
arch/x86/platform/efi/memmap.c
arch/x86/platform/efi/quirks.c
drivers/firmware/efi/apple-properties.c
drivers/of/kexec.c
include/linux/memblock.h
kernel/dma/swiotlb.c
lib/bootconfig.c
mm/kfence/core.c
mm/memblock.c

index 30f171b7b00c23aeb887a3c7fa13020e47420f45..ecd6c8ae49c74d6a02c30c314f6453a37d83f71d 100644 (file)
@@ -183,14 +183,12 @@ static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size
 static void __init mdesc_memblock_free(struct mdesc_handle *hp)
 {
        unsigned int alloc_size;
-       unsigned long start;
 
        BUG_ON(refcount_read(&hp->refcnt) != 0);
        BUG_ON(!list_empty(&hp->list));
 
        alloc_size = PAGE_ALIGN(hp->handle_size);
-       start = __pa(hp);
-       memblock_free_late(start, alloc_size);
+       memblock_free(hp, alloc_size);
 }
 
 static struct mdesc_mem_ops memblock_mdesc_ops = {
index eebcc9db1a1bb565afd357eed138fa63e6dfd3ff..46882ce79c3a4d29964ebe646f3f26996314e30c 100644 (file)
@@ -426,7 +426,7 @@ int __init ima_free_kexec_buffer(void)
        if (!ima_kexec_buffer_size)
                return -ENOENT;
 
-       memblock_free_late(ima_kexec_buffer_phys,
+       memblock_phys_free(ima_kexec_buffer_phys,
                           ima_kexec_buffer_size);
 
        ima_kexec_buffer_phys = 0;
index 023697c8891010aecfd4c32bd5dc947c3aad6641..697a9a26a005d7cdea981afc7775d52798ce1c72 100644 (file)
@@ -34,10 +34,7 @@ static
 void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
 {
        if (flags & EFI_MEMMAP_MEMBLOCK) {
-               if (slab_is_available())
-                       memblock_free_late(phys, size);
-               else
-                       memblock_phys_free(phys, size);
+               memblock_phys_free(phys, size);
        } else if (flags & EFI_MEMMAP_SLAB) {
                struct page *p = pfn_to_page(PHYS_PFN(phys));
                unsigned int order = get_order(size);
index 35caa5746115d74c61e8090bf04b5880cf2e5d8f..a560bbcaa006602b2e17e1fe5600f2a32a96b75d 100644 (file)
@@ -372,7 +372,7 @@ void __init efi_reserve_boot_services(void)
                 * doesn't make sense as far as the firmware is
                 * concerned, but it does provide us with a way to tag
                 * those regions that must not be paired with
-                * memblock_free_late().
+                * memblock_phys_free().
                 */
                md->attribute |= EFI_MEMORY_RUNTIME;
        }
index 13ac28754c0368b85af5c900ccd7bf5c89fb3c7e..2e525e17fba7788564b631f9fb1dc645f795b4f1 100644 (file)
@@ -226,7 +226,7 @@ static int __init map_properties(void)
                 */
                data->len = 0;
                memunmap(data);
-               memblock_free_late(pa_data + sizeof(*data), data_len);
+               memblock_phys_free(pa_data + sizeof(*data), data_len);
 
                return ret;
        }
index c4cf3552c0183887583dff6ee04a85f89c424489..512d9be9d513d8e213b7e7b1988b1fabc3459ad5 100644 (file)
@@ -175,7 +175,7 @@ int __init ima_free_kexec_buffer(void)
        if (ret)
                return ret;
 
-       memblock_free_late(addr, size);
+       memblock_phys_free(addr, size);
        return 0;
 }
 #endif
index 6ec5e9ac06992a0dd1412ec2f32c181af36eeabd..6f6c5b5c4a4b39e8fc43ef5f90c1ca4a380f1262 100644 (file)
@@ -172,8 +172,6 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
                          struct memblock_type *type_b, phys_addr_t *out_start,
                          phys_addr_t *out_end, int *out_nid);
 
-void memblock_free_late(phys_addr_t base, phys_addr_t size);
-
 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
                                        phys_addr_t *out_start,
index d8e6f1d889d55dcc80bed7a4b5c9d71adc8e94eb..e44e039e00d30e08c85867c184ba217cfec35462 100644 (file)
@@ -546,10 +546,10 @@ void __init swiotlb_exit(void)
                free_pages(tbl_vaddr, get_order(tbl_size));
                free_pages((unsigned long)mem->slots, get_order(slots_size));
        } else {
-               memblock_free_late(__pa(mem->areas),
+               memblock_free(mem->areas,
                        array_size(sizeof(*mem->areas), mem->nareas));
-               memblock_free_late(mem->start, tbl_size);
-               memblock_free_late(__pa(mem->slots), slots_size);
+               memblock_phys_free(mem->start, tbl_size);
+               memblock_free(mem->slots, slots_size);
        }
 
        memset(mem, 0, sizeof(*mem));
index 2da049216fe0e2a211244b9ca7fef8d2e9f8ee70..9225fa057c1e4bc10b94679670b91890c6c0183c 100644 (file)
@@ -64,7 +64,7 @@ static inline void __init xbc_free_mem(void *addr, size_t size, bool early)
        if (early)
                memblock_free(addr, size);
        else if (addr)
-               memblock_free_late(__pa(addr), size);
+               memblock_free(addr, size);
 }
 
 #else /* !__KERNEL__ */
index 7393957f9a202ba1f42f30ea5d3a216a0ae14507..5c8268af533eda7e7db90cb146a44fdd41ff041f 100644 (file)
@@ -731,10 +731,10 @@ static bool __init kfence_init_pool_early(void)
         * fails for the first page, and therefore expect addr==__kfence_pool in
         * most failure cases.
         */
-       memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
+       memblock_free((void *)addr, KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
        __kfence_pool = NULL;
 
-       memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE);
+       memblock_free(kfence_metadata_init, KFENCE_METADATA_SIZE);
        kfence_metadata_init = NULL;
 
        return false;
index dee18c40d9289c5aebce8803e9b3217ad5147d2c..df4e3475fe399d8c3ccef49b713b82d1a1fea545 100644 (file)
@@ -385,26 +385,27 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
  */
 void __init memblock_discard(void)
 {
-       phys_addr_t addr, size;
+       phys_addr_t size;
+       void *addr;
 
        if (memblock.reserved.regions != memblock_reserved_init_regions) {
-               addr = __pa(memblock.reserved.regions);
+               addr = memblock.reserved.regions;
                size = PAGE_ALIGN(sizeof(struct memblock_region) *
                                  memblock.reserved.max);
                if (memblock_reserved_in_slab)
-                       kfree(memblock.reserved.regions);
+                       kfree(addr);
                else
-                       memblock_free_late(addr, size);
+                       memblock_free(addr, size);
        }
 
        if (memblock.memory.regions != memblock_memory_init_regions) {
-               addr = __pa(memblock.memory.regions);
+               addr = memblock.memory.regions;
                size = PAGE_ALIGN(sizeof(struct memblock_region) *
                                  memblock.memory.max);
                if (memblock_memory_in_slab)
-                       kfree(memblock.memory.regions);
+                       kfree(addr);
                else
-                       memblock_free_late(addr, size);
+                       memblock_free(addr, size);
        }
 
        memblock_memory = NULL;
@@ -962,7 +963,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
  * @size: size of the boot memory block in bytes
  *
  * Free boot memory block previously allocated by memblock_alloc_xx() API.
- * The freeing memory will not be released to the buddy allocator.
+ * If called after the buddy allocator is available, the memory is released to
+ * the buddy allocator.
  */
 void __init_memblock memblock_free(void *ptr, size_t size)
 {
@@ -976,17 +978,24 @@ void __init_memblock memblock_free(void *ptr, size_t size)
  * @size: size of the boot memory block in bytes
  *
  * Free boot memory block previously allocated by memblock_phys_alloc_xx() API.
- * The freeing memory will not be released to the buddy allocator.
+ * If called after the buddy allocator is available, the memory is released to
+ * the buddy allocator.
  */
 int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
 {
        phys_addr_t end = base + size - 1;
+       int ret;
 
        memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
                     &base, &end, (void *)_RET_IP_);
 
        kmemleak_free_part_phys(base, size);
-       return memblock_remove_range(&memblock.reserved, base, size);
+       ret = memblock_remove_range(&memblock.reserved, base, size);
+
+       if (slab_is_available())
+               __free_reserved_area(base, base + size, -1);
+
+       return ret;
 }
 
 int __init_memblock __memblock_reserve(phys_addr_t base, phys_addr_t size,
@@ -1814,26 +1823,6 @@ void *__init __memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
        return addr;
 }
 
-/**
- * memblock_free_late - free pages directly to buddy allocator
- * @base: phys starting address of the  boot memory block
- * @size: size of the boot memory block in bytes
- *
- * This is only useful when the memblock allocator has already been torn
- * down, but we are still initializing the system.  Pages are released directly
- * to the buddy allocator.
- */
-void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
-{
-       phys_addr_t end = base + size - 1;
-
-       memblock_dbg("%s: [%pa-%pa] %pS\n",
-                    __func__, &base, &end, (void *)_RET_IP_);
-
-       kmemleak_free_part_phys(base, size);
-       __free_reserved_area(base, base + size, -1);
-}
-
 /*
  * Remaining API functions
  */