Since the rework of the kernel virtual address space [1] the module area
and the kernel image are within the same 4GB area. Therefore there is no
need for the weak per cpu workaround for modules anymore. Remove it.
[1] commit
c98d2ecae08f ("s390/mm: Uncouple physical vs virtual address spaces")
Acked-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
- select ARCH_MODULE_NEEDS_WEAK_PER_CPU
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
*/
#define __my_cpu_offset get_lowcore()->percpu_offset
-/*
- * For 64 bit module code, the module may be more than 4G above the
- * per cpu area, use weak definitions to force the compiler to
- * generate external references.
- * Therefore, we have enabled CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU
- * in the Kconfig.
- */
-
/*
* We use a compare-and-swap loop since that uses less cpu cycles than
* disabling and enabling interrupts like the generic variant would do.
__section(".discard") __attribute__((unused))
/*
- * s390 and alpha modules require percpu variables to be defined as
+ * alpha modules require percpu variables to be defined as
* weak to force the compiler to generate GOT based external
* references for them. This is necessary because percpu sections
* will be located outside of the usually addressable area.