]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
um: Replace UML_ROUND_UP() with PAGE_ALIGN()
authorTiwei Bie <tiwei.btw@antgroup.com>
Mon, 27 Oct 2025 05:45:18 +0000 (13:45 +0800)
committerJohannes Berg <johannes.berg@intel.com>
Mon, 27 Oct 2025 15:37:12 +0000 (16:37 +0100)
Although UML_ROUND_UP() is defined in a shared header file, it
depends on the PAGE_SIZE and PAGE_MASK macros, so it can only be
used in kernel code. Considering its name is not very clear and
its functionality is the same as PAGE_ALIGN(), replace its usages
with a direct call to PAGE_ALIGN() and remove it.

Signed-off-by: Tiwei Bie <tiwei.btw@antgroup.com>
Link: https://patch.msgid.link/20251027054519.1996090-4-tiwei.bie@linux.dev
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
arch/um/include/shared/kern_util.h
arch/um/kernel/mem.c
arch/um/kernel/um_arch.c

index 00ca3e12fd9aeef4f9579fca22f9f16b579aeb15..949a03c7861ecd865543701166d6fa785b55f068 100644 (file)
@@ -15,9 +15,6 @@ extern int uml_exitcode;
 
 extern int kmalloc_ok;
 
-#define UML_ROUND_UP(addr) \
-       ((((unsigned long) addr) + PAGE_SIZE - 1) & PAGE_MASK)
-
 extern unsigned long alloc_stack(int order, int atomic);
 extern void free_stack(unsigned long stack, int order);
 
index 32e3b1972dc12f57620cf6b2d97d5057619845df..19d40b58eac44bdda15302fd8fbcdd58dfa876ec 100644 (file)
@@ -71,7 +71,7 @@ void __init arch_mm_preinit(void)
        /* Map in the area just after the brk now that kmalloc is about
         * to be turned on.
         */
-       brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
+       brk_end = PAGE_ALIGN((unsigned long) sbrk(0));
        map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
        memblock_free((void *)brk_end, uml_reserved - brk_end);
        uml_reserved = brk_end;
index fcabef8c72245679485000cb21f3bc178c1365c5..6f9a49e6c6a045f32395c94da55c76d633976b64 100644 (file)
@@ -348,12 +348,11 @@ int __init linux_main(int argc, char **argv, char **envp)
         * so they actually get what they asked for. This should
         * add zero for non-exec shield users
         */
-
-       diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
+       diff = PAGE_ALIGN(brk_start) - PAGE_ALIGN((unsigned long) &_end);
        if (diff > 1024 * 1024) {
                os_info("Adding %ld bytes to physical memory to account for "
                        "exec-shield gap\n", diff);
-               physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
+               physmem_size += diff;
        }
 
        uml_physmem = (unsigned long) __binary_start & PAGE_MASK;