From: Mike Rapoport (Microsoft) Date: Mon, 23 Mar 2026 07:48:31 +0000 (+0200) Subject: mm: move free_reserved_area() to mm/memblock.c X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=0510bdab538e2af07a67bc58a0c6c4547b83f8d5;p=thirdparty%2Fkernel%2Flinux.git mm: move free_reserved_area() to mm/memblock.c free_reserved_area() is related to memblock as it frees reserved memory back to the buddy allocator, similar to what memblock_free_late() does. Move free_reserved_area() to mm/memblock.c to prepare for further consolidation of the functions that free reserved memory. No functional changes. Link: https://patch.msgid.link/20260323074836.3653702-5-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Acked-by: Vlastimil Babka (SUSE) --- diff --git a/mm/memblock.c b/mm/memblock.c index 134724f5299e0..180b8347458f2 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -894,6 +894,42 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) return memblock_remove_range(&memblock.memory, base, size); } +unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) +{ + void *pos; + unsigned long pages = 0; + + start = (void *)PAGE_ALIGN((unsigned long)start); + end = (void *)((unsigned long)end & PAGE_MASK); + for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { + struct page *page = virt_to_page(pos); + void *direct_map_addr; + + /* + * 'direct_map_addr' might be different from 'pos' + * because some architectures' virt_to_page() + * work with aliases. Getting the direct map + * address ensures that we get a _writeable_ + * alias for the memset(). + */ + direct_map_addr = page_address(page); + /* + * Perform a kasan-unchecked memset() since this memory + * has not been initialized. + */ + direct_map_addr = kasan_reset_tag(direct_map_addr); + if ((unsigned int)poison <= 0xFF) + memset(direct_map_addr, poison, PAGE_SIZE); + + free_reserved_page(page); + } + + if (pages && s) + pr_info("Freeing %s memory: %ldK\n", s, K(pages)); + + return pages; +} + /** * memblock_free - free boot memory allocation * @ptr: starting address of the boot memory allocation @@ -1777,7 +1813,6 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size) totalram_pages_inc(); } } - /* * Remaining API functions */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2d4b6f1a554ed..df3d61253001e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6234,42 +6234,6 @@ void adjust_managed_page_count(struct page *page, long count) } EXPORT_SYMBOL(adjust_managed_page_count); -unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) -{ - void *pos; - unsigned long pages = 0; - - start = (void *)PAGE_ALIGN((unsigned long)start); - end = (void *)((unsigned long)end & PAGE_MASK); - for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { - struct page *page = virt_to_page(pos); - void *direct_map_addr; - - /* - * 'direct_map_addr' might be different from 'pos' - * because some architectures' virt_to_page() - * work with aliases. Getting the direct map - * address ensures that we get a _writeable_ - * alias for the memset(). - */ - direct_map_addr = page_address(page); - /* - * Perform a kasan-unchecked memset() since this memory - * has not been initialized. - */ - direct_map_addr = kasan_reset_tag(direct_map_addr); - if ((unsigned int)poison <= 0xFF) - memset(direct_map_addr, poison, PAGE_SIZE); - - free_reserved_page(page); - } - - if (pages && s) - pr_info("Freeing %s memory: %ldK\n", s, K(pages)); - - return pages; -} - void free_reserved_page(struct page *page) { clear_page_tag_ref(page); diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h index 74cbd51dbea29..84b5954f66c3d 100644 --- a/tools/include/linux/mm.h +++ b/tools/include/linux/mm.h @@ -17,6 +17,7 @@ #define __va(x) ((void *)((unsigned long)(x))) #define __pa(x) ((unsigned long)(x)) +#define __pa_symbol(x) ((unsigned long)(x)) #define pfn_to_page(pfn) ((void *)((pfn) * PAGE_SIZE)) diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h index eb02d5771f4c8..b6b1d147fd750 100644 --- a/tools/testing/memblock/internal.h +++ b/tools/testing/memblock/internal.h @@ -11,9 +11,22 @@ static int memblock_debug = 1; #define pr_warn_ratelimited(fmt, ...) printf(fmt, ##__VA_ARGS__) +#define K(x) ((x) << (PAGE_SHIFT-10)) + bool mirrored_kernelcore = false; struct page {}; +static inline void *page_address(struct page *page) +{ + BUG(); + return page; +} + +static inline struct page *virt_to_page(void *virt) +{ + BUG(); + return virt; +} void memblock_free_pages(unsigned long pfn, unsigned int order) { @@ -23,10 +36,25 @@ static inline void accept_memory(phys_addr_t start, unsigned long size) { } -static inline unsigned long free_reserved_area(void *start, void *end, - int poison, const char *s) +unsigned long free_reserved_area(void *start, void *end, int poison, const char *s); +void free_reserved_page(struct page *page); + +static inline bool deferred_pages_enabled(void) +{ + return false; +} + +#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \ + for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++) + +static inline void *kasan_reset_tag(const void *addr) +{ + return (void *)addr; +} + +static inline bool __is_kernel(unsigned long addr) { - return 0; + return false; } #define for_each_valid_pfn(pfn, start_pfn, end_pfn) \