At the time being, TASK_SIZE can be customized by the user via Kconfig
but it is not possible to check all constraints in Kconfig. Impossible
setups are detected at compile time with BUILD_BUG() but that leads
to build failure when setting crazy values. It is not a problem on its
own because the user will usually either use the default value or set
a well thought value. However build robots generate crazy random
configs that lead to build failures, and build robots see it as a
regression every time a patch adds such a constraint.
So instead of failing the build when the custom TASK_SIZE is too
big, just adjust it to the maximum possible value matching the setup.
Several architectures already calculate TASK_SIZE based on other
parameters and options.
In order to do so, move MODULES_VADDR calculation into task_size_32.h
and ensure that:
- On book3s/32, userspace and module area have their own segments (256M)
- On 8xx, userspace has its own full PGDIR entries (4M)
Then TASK_SIZE is guaranteed to be correct so remove related
BUILD_BUG()s.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/6a2575420770d075cd090b5a316730a2ffafdee4.1766574657.git.chleroy@kernel.org
Say N here unless you know what you are doing.
config TASK_SIZE
- hex "Size of user task space" if TASK_SIZE_BOOL
+ hex "Size of maximum user task space" if TASK_SIZE_BOOL
default "0x80000000" if PPC_8xx
- default "0xb0000000" if PPC_BOOK3S_32 && EXECMEM
default "0xc0000000"
config MODULES_SIZE_BOOL
#define VMALLOC_END ioremap_bot
#endif
-#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
-#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
-#define MODULES_VADDR (MODULES_END - MODULES_SIZE)
-
#ifndef __ASSEMBLER__
#include <linux/sched.h>
#include <linux/threads.h>
#define mmu_linear_psize MMU_PAGE_8M
-#define MODULES_END PAGE_OFFSET
-#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
-#define MODULES_VADDR (MODULES_END - MODULES_SIZE)
-
#ifndef __ASSEMBLER__
#include <linux/mmdebug.h>
#ifndef _ASM_POWERPC_TASK_SIZE_32_H
#define _ASM_POWERPC_TASK_SIZE_32_H
+#include <linux/sizes.h>
+
#if CONFIG_TASK_SIZE > CONFIG_KERNEL_START
#error User TASK_SIZE overlaps with KERNEL_START address
#endif
+#ifdef CONFIG_PPC_8xx
+#define MODULES_END ASM_CONST(CONFIG_PAGE_OFFSET)
+#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
+#define MODULES_VADDR (MODULES_END - MODULES_SIZE)
+#define MODULES_BASE (MODULES_VADDR & ~(UL(SZ_4M) - 1))
+#define USER_TOP MODULES_BASE
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+#define MODULES_END (ASM_CONST(CONFIG_PAGE_OFFSET) & ~(UL(SZ_256M) - 1))
+#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
+#define MODULES_VADDR (MODULES_END - MODULES_SIZE)
+#define MODULES_BASE (MODULES_VADDR & ~(UL(SZ_256M) - 1))
+#define USER_TOP MODULES_BASE
+#endif
+
+#ifndef USER_TOP
+#define USER_TOP ASM_CONST(CONFIG_PAGE_OFFSET)
+#endif
+
+#if CONFIG_TASK_SIZE < USER_TOP
#define TASK_SIZE ASM_CONST(CONFIG_TASK_SIZE)
+#else
+#define TASK_SIZE USER_TOP
+#endif
/*
* This decides where the kernel will search for a free chunk of vm space during
update_bats();
- BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, SZ_256M) < TASK_SIZE);
-
for (i = ALIGN(TASK_SIZE, SZ_256M) >> 28; i < 16; i++) {
/* Do not set NX on VM space for modules */
if (is_module_segment(i << 28))
#ifdef MODULES_VADDR
unsigned long limit = (unsigned long)_etext - SZ_32M;
- BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
-
/* First try within 32M limit from _etext to avoid branch trampolines */
if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) {
start = limit;
/* 8xx can only access 32MB at the moment */
memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
-
- BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE) < TASK_SIZE);
}
int pud_clear_huge(pud_t *pud)