Although UML_ROUND_UP() is defined in a shared header file, it
depends on the PAGE_SIZE and PAGE_MASK macros, so it can only be
used in kernel code. Considering its name is not very clear and
its functionality is the same as PAGE_ALIGN(), replace its usages
with a direct call to PAGE_ALIGN() and remove it.
Signed-off-by: Tiwei Bie <tiwei.btw@antgroup.com>
Link: https://patch.msgid.link/20251027054519.1996090-4-tiwei.bie@linux.dev
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
extern int kmalloc_ok;
-#define UML_ROUND_UP(addr) \
- ((((unsigned long) addr) + PAGE_SIZE - 1) & PAGE_MASK)
-
extern unsigned long alloc_stack(int order, int atomic);
extern void free_stack(unsigned long stack, int order);
/* Map in the area just after the brk now that kmalloc is about
* to be turned on.
*/
- brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
+ brk_end = PAGE_ALIGN((unsigned long) sbrk(0));
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
memblock_free((void *)brk_end, uml_reserved - brk_end);
uml_reserved = brk_end;
* so they actually get what they asked for. This should
* add zero for non-exec shield users
*/
-
- diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
+ diff = PAGE_ALIGN(brk_start) - PAGE_ALIGN((unsigned long) &_end);
if (diff > 1024 * 1024) {
os_info("Adding %ld bytes to physical memory to account for "
"exec-shield gap\n", diff);
- physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
+ physmem_size += diff;
}
uml_physmem = (unsigned long) __binary_start & PAGE_MASK;