Replace all S390_lowcore usages in arch/s390/ by get_lowcore().
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
struct task_struct;
-#define current ((struct task_struct *const)S390_lowcore.current_task)
+#define current ((struct task_struct *const)get_lowcore()->current_task)
#endif /* !(_S390_CURRENT_H) */
asm volatile(
" stfl 0(0)\n"
- : "=m" (S390_lowcore.stfl_fac_list));
- stfl_fac_list = S390_lowcore.stfl_fac_list;
+ : "=m" (get_lowcore()->stfl_fac_list));
+ stfl_fac_list = get_lowcore()->stfl_fac_list;
memcpy(stfle_fac_list, &stfl_fac_list, 4);
nr = 4; /* bytes stored by stfl */
if (stfl_fac_list & 0x01000000) {
#include <asm/lowcore.h>
-#define local_softirq_pending() (S390_lowcore.softirq_pending)
-#define set_softirq_pending(x) (S390_lowcore.softirq_pending = (x))
-#define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x))
+#define local_softirq_pending() (get_lowcore()->softirq_pending)
+#define set_softirq_pending(x) (get_lowcore()->softirq_pending = (x))
+#define or_softirq_pending(x) (get_lowcore()->softirq_pending |= (x))
#define __ARCH_IRQ_STAT
#define __ARCH_IRQ_EXIT_IRQS_DISABLED
int cpu = smp_processor_id();
if (next == &init_mm)
- S390_lowcore.user_asce = s390_invalid_asce;
+ get_lowcore()->user_asce = s390_invalid_asce;
else
- S390_lowcore.user_asce.val = next->context.asce;
+ get_lowcore()->user_asce.val = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
/* Clear previous user-ASCE from CR7 */
local_ctl_load(7, &s390_invalid_asce);
__tlb_flush_mm_lazy(mm);
preempt_enable();
}
- local_ctl_load(7, &S390_lowcore.user_asce);
+ local_ctl_load(7, &get_lowcore()->user_asce);
}
#define activate_mm activate_mm
{
switch_mm(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
- local_ctl_load(7, &S390_lowcore.user_asce);
+ local_ctl_load(7, &get_lowcore()->user_asce);
}
#include <asm-generic/mmu_context.h>
return;
if (!static_branch_unlikely(&pai_key))
return;
- if (!S390_lowcore.ccd)
+ if (!get_lowcore()->ccd)
return;
if (!user_mode(regs))
return;
- WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd | PAI_CRYPTO_KERNEL_OFFSET);
+ WRITE_ONCE(get_lowcore()->ccd, get_lowcore()->ccd | PAI_CRYPTO_KERNEL_OFFSET);
}
static __always_inline void pai_kernel_exit(struct pt_regs *regs)
return;
if (!static_branch_unlikely(&pai_key))
return;
- if (!S390_lowcore.ccd)
+ if (!get_lowcore()->ccd)
return;
if (!user_mode(regs))
return;
- WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET);
+ WRITE_ONCE(get_lowcore()->ccd, get_lowcore()->ccd & ~PAI_CRYPTO_KERNEL_OFFSET);
}
#define PAI_SAVE_AREA(x) ((x)->hw.event_base)
* s390 uses its own implementation for per cpu data, the offset of
* the cpu local data area is cached in the cpu's lowcore memory.
*/
-#define __my_cpu_offset S390_lowcore.percpu_offset
+#define __my_cpu_offset get_lowcore()->percpu_offset
/*
* For 64 bit module code, the module may be more than 4G above the
static __always_inline int preempt_count(void)
{
- return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
+ return READ_ONCE(get_lowcore()->preempt_count) & ~PREEMPT_NEED_RESCHED;
}
static __always_inline void preempt_count_set(int pc)
int old, new;
do {
- old = READ_ONCE(S390_lowcore.preempt_count);
+ old = READ_ONCE(get_lowcore()->preempt_count);
new = (old & PREEMPT_NEED_RESCHED) |
(pc & ~PREEMPT_NEED_RESCHED);
- } while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
+ } while (__atomic_cmpxchg(&get_lowcore()->preempt_count,
old, new) != old);
}
static __always_inline void set_preempt_need_resched(void)
{
- __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+ __atomic_and(~PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count);
}
static __always_inline void clear_preempt_need_resched(void)
{
- __atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+ __atomic_or(PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count);
}
static __always_inline bool test_preempt_need_resched(void)
{
- return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
+ return !(READ_ONCE(get_lowcore()->preempt_count) & PREEMPT_NEED_RESCHED);
}
static __always_inline void __preempt_count_add(int val)
*/
if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
- __atomic_add_const(val, &S390_lowcore.preempt_count);
+ __atomic_add_const(val, &get_lowcore()->preempt_count);
return;
}
}
- __atomic_add(val, &S390_lowcore.preempt_count);
+ __atomic_add(val, &get_lowcore()->preempt_count);
}
static __always_inline void __preempt_count_sub(int val)
static __always_inline bool __preempt_count_dec_and_test(void)
{
- return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
+ return __atomic_add(-1, &get_lowcore()->preempt_count) == 1;
}
static __always_inline bool should_resched(int preempt_offset)
{
- return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
+ return unlikely(READ_ONCE(get_lowcore()->preempt_count) ==
preempt_offset);
}
static __always_inline int preempt_count(void)
{
- return READ_ONCE(S390_lowcore.preempt_count);
+ return READ_ONCE(get_lowcore()->preempt_count);
}
static __always_inline void preempt_count_set(int pc)
{
- S390_lowcore.preempt_count = pc;
+ get_lowcore()->preempt_count = pc;
}
static __always_inline void set_preempt_need_resched(void)
static __always_inline void __preempt_count_add(int val)
{
- S390_lowcore.preempt_count += val;
+ get_lowcore()->preempt_count += val;
}
static __always_inline void __preempt_count_sub(int val)
{
- S390_lowcore.preempt_count -= val;
+ get_lowcore()->preempt_count -= val;
}
static __always_inline bool __preempt_count_dec_and_test(void)
{
- return !--S390_lowcore.preempt_count && tif_need_resched();
+ return !--get_lowcore()->preempt_count && tif_need_resched();
}
static __always_inline bool should_resched(int preempt_offset)
static __always_inline void set_cpu_flag(int flag)
{
- S390_lowcore.cpu_flags |= (1UL << flag);
+ get_lowcore()->cpu_flags |= (1UL << flag);
}
static __always_inline void clear_cpu_flag(int flag)
{
- S390_lowcore.cpu_flags &= ~(1UL << flag);
+ get_lowcore()->cpu_flags &= ~(1UL << flag);
}
static __always_inline bool test_cpu_flag(int flag)
{
- return S390_lowcore.cpu_flags & (1UL << flag);
+ return get_lowcore()->cpu_flags & (1UL << flag);
}
static __always_inline bool test_and_set_cpu_flag(int flag)
static __always_inline bool on_thread_stack(void)
{
- unsigned long ksp = S390_lowcore.kernel_stack;
+ unsigned long ksp = get_lowcore()->kernel_stack;
return !((ksp ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
}
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;
-#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
-#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
-#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
-
-#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
-#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP)
-#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
-#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
-#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
-#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
-#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
-#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
-#define MACHINE_HAS_TLB_GUEST (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_GUEST)
-#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
-#define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
-#define MACHINE_HAS_SCC (S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
-#define MACHINE_HAS_PCI_MIO (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO)
-#define MACHINE_HAS_RDP (S390_lowcore.machine_flags & MACHINE_FLAG_RDP)
+#define MACHINE_IS_VM (get_lowcore()->machine_flags & MACHINE_FLAG_VM)
+#define MACHINE_IS_KVM (get_lowcore()->machine_flags & MACHINE_FLAG_KVM)
+#define MACHINE_IS_LPAR (get_lowcore()->machine_flags & MACHINE_FLAG_LPAR)
+
+#define MACHINE_HAS_DIAG9C (get_lowcore()->machine_flags & MACHINE_FLAG_DIAG9C)
+#define MACHINE_HAS_ESOP (get_lowcore()->machine_flags & MACHINE_FLAG_ESOP)
+#define MACHINE_HAS_IDTE (get_lowcore()->machine_flags & MACHINE_FLAG_IDTE)
+#define MACHINE_HAS_EDAT1 (get_lowcore()->machine_flags & MACHINE_FLAG_EDAT1)
+#define MACHINE_HAS_EDAT2 (get_lowcore()->machine_flags & MACHINE_FLAG_EDAT2)
+#define MACHINE_HAS_TOPOLOGY (get_lowcore()->machine_flags & MACHINE_FLAG_TOPOLOGY)
+#define MACHINE_HAS_TE (get_lowcore()->machine_flags & MACHINE_FLAG_TE)
+#define MACHINE_HAS_TLB_LC (get_lowcore()->machine_flags & MACHINE_FLAG_TLB_LC)
+#define MACHINE_HAS_TLB_GUEST (get_lowcore()->machine_flags & MACHINE_FLAG_TLB_GUEST)
+#define MACHINE_HAS_NX (get_lowcore()->machine_flags & MACHINE_FLAG_NX)
+#define MACHINE_HAS_GS (get_lowcore()->machine_flags & MACHINE_FLAG_GS)
+#define MACHINE_HAS_SCC (get_lowcore()->machine_flags & MACHINE_FLAG_SCC)
+#define MACHINE_HAS_PCI_MIO (get_lowcore()->machine_flags & MACHINE_FLAG_PCI_MIO)
+#define MACHINE_HAS_RDP (get_lowcore()->machine_flags & MACHINE_FLAG_RDP)
/*
* Console mode. Override with conmode=
#include <asm/lowcore.h>
#include <asm/processor.h>
-#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
+#define raw_smp_processor_id() (get_lowcore()->cpu_nr)
extern struct mutex smp_cpu_state_mutex;
extern unsigned int smp_cpu_mt_shift;
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
static inline void do_softirq_own_stack(void)
{
- call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
+ call_on_stack(0, get_lowcore()->async_stack, void, __do_softirq);
}
#endif
#endif /* __ASM_S390_SOFTIRQ_STACK_H */
#include <asm/processor.h>
#include <asm/alternative.h>
-#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
+#define SPINLOCK_LOCKVAL (get_lowcore()->spinlock_lockval)
extern int spin_retry;
{
unsigned long old;
- old = S390_lowcore.clock_comparator;
- S390_lowcore.clock_comparator = clock_comparator_max;
- set_clock_comparator(S390_lowcore.clock_comparator);
+ old = get_lowcore()->clock_comparator;
+ get_lowcore()->clock_comparator = clock_comparator_max;
+ set_clock_comparator(get_lowcore()->clock_comparator);
return old;
}
static inline void local_tick_enable(unsigned long comp)
{
- S390_lowcore.clock_comparator = comp;
- set_clock_comparator(S390_lowcore.clock_comparator);
+ get_lowcore()->clock_comparator = comp;
+ set_clock_comparator(get_lowcore()->clock_comparator);
}
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
static inline void update_timer_sys(void)
{
- S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer;
- S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.sys_enter_timer;
- S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer;
+ get_lowcore()->system_timer += get_lowcore()->last_update_timer - get_lowcore()->exit_timer;
+ get_lowcore()->user_timer += get_lowcore()->exit_timer - get_lowcore()->sys_enter_timer;
+ get_lowcore()->last_update_timer = get_lowcore()->sys_enter_timer;
}
static inline void update_timer_mcck(void)
{
- S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer;
- S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.mcck_enter_timer;
- S390_lowcore.last_update_timer = S390_lowcore.mcck_enter_timer;
+ get_lowcore()->system_timer += get_lowcore()->last_update_timer - get_lowcore()->exit_timer;
+ get_lowcore()->user_timer += get_lowcore()->exit_timer - get_lowcore()->mcck_enter_timer;
+ get_lowcore()->last_update_timer = get_lowcore()->mcck_enter_timer;
}
#endif /* _S390_VTIME_H */
static bool in_irq_stack(unsigned long sp, struct stack_info *info)
{
- unsigned long stack = S390_lowcore.async_stack - STACK_INIT_OFFSET;
+ unsigned long stack = get_lowcore()->async_stack - STACK_INIT_OFFSET;
return in_stack(sp, info, STACK_TYPE_IRQ, stack);
}
static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
{
- unsigned long stack = S390_lowcore.nodat_stack - STACK_INIT_OFFSET;
+ unsigned long stack = get_lowcore()->nodat_stack - STACK_INIT_OFFSET;
return in_stack(sp, info, STACK_TYPE_NODAT, stack);
}
static bool in_mcck_stack(unsigned long sp, struct stack_info *info)
{
- unsigned long stack = S390_lowcore.mcck_stack - STACK_INIT_OFFSET;
+ unsigned long stack = get_lowcore()->mcck_stack - STACK_INIT_OFFSET;
return in_stack(sp, info, STACK_TYPE_MCCK, stack);
}
static bool in_restart_stack(unsigned long sp, struct stack_info *info)
{
- unsigned long stack = S390_lowcore.restart_stack - STACK_INIT_OFFSET;
+ unsigned long stack = get_lowcore()->restart_stack - STACK_INIT_OFFSET;
return in_stack(sp, info, STACK_TYPE_RESTART, stack);
}
memset(&tod_clock_base, 0, sizeof(tod_clock_base));
tod_clock_base.tod = TOD_UNIX_EPOCH;
- S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
+ get_lowcore()->last_update_clock = TOD_UNIX_EPOCH;
}
/*
/* Check current-configuration-level */
if (stsi(NULL, 0, 0, 0) <= 2) {
- S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_LPAR;
return;
}
/* Get virtual-machine cpu information. */
/* Detect known hypervisors */
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
- S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_KVM;
else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
- S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_VM;
}
/* Remove leading, trailing and double whitespace. */
if (!test_facility(11))
return;
- S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_TOPOLOGY;
for (max_mnest = 6; max_mnest > 1; max_mnest--) {
if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
break;
psw.addr = (unsigned long)early_pgm_check_handler;
psw.mask = PSW_KERNEL_BITS;
- S390_lowcore.program_new_psw = psw;
- S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
+ get_lowcore()->program_new_psw = psw;
+ get_lowcore()->preempt_count = INIT_PREEMPT_COUNT;
}
static noinline __init void setup_facility_list(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
if (!rc)
- S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_DIAG9C;
}
static __init void detect_machine_facilities(void)
{
if (test_facility(8)) {
- S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT1;
system_ctl_set_bit(0, CR0_EDAT_BIT);
}
if (test_facility(78))
- S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT2;
if (test_facility(3))
- S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_IDTE;
if (test_facility(50) && test_facility(73)) {
- S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_TE;
system_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT);
}
if (test_facility(51))
- S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129))
system_ctl_set_bit(0, CR0_VECTOR_BIT);
if (test_facility(130))
- S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_NX;
if (test_facility(133))
- S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_GS;
if (test_facility(139) && (tod_clock_base.tod >> 63)) {
/* Enabled signed clock comparator comparisons */
- S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_SCC;
clock_comparator_max = -1ULL >> 1;
system_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT);
}
if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
- S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_PCI_MIO;
/* the control bit is set during PCI initialization */
}
if (test_facility(194))
- S390_lowcore.machine_flags |= MACHINE_FLAG_RDP;
+ get_lowcore()->machine_flags |= MACHINE_FLAG_RDP;
}
static inline void save_vector_registers(void)
this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
}
- idle_time = S390_lowcore.int_clock - idle->clock_idle_enter;
+ idle_time = get_lowcore()->int_clock - idle->clock_idle_enter;
- S390_lowcore.steal_timer += idle->clock_idle_enter - S390_lowcore.last_update_clock;
- S390_lowcore.last_update_clock = S390_lowcore.int_clock;
+ get_lowcore()->steal_timer += idle->clock_idle_enter - get_lowcore()->last_update_clock;
+ get_lowcore()->last_update_clock = get_lowcore()->int_clock;
- S390_lowcore.system_timer += S390_lowcore.last_update_timer - idle->timer_idle_enter;
- S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer;
+ get_lowcore()->system_timer += get_lowcore()->last_update_timer - idle->timer_idle_enter;
+ get_lowcore()->last_update_timer = get_lowcore()->sys_enter_timer;
/* Account time spent with enabled wait psw loaded as idle time. */
WRITE_ONCE(idle->idle_time, READ_ONCE(idle->idle_time) + idle_time);
static void do_IRQ(struct pt_regs *regs, int irq)
{
- if (tod_after_eq(S390_lowcore.int_clock,
- S390_lowcore.clock_comparator))
+ if (tod_after_eq(get_lowcore()->int_clock,
+ get_lowcore()->clock_comparator))
/* Serve timer interrupts first. */
clock_comparator_work();
generic_handle_irq(irq);
{
unsigned long frame = current_frame_address();
- return ((S390_lowcore.async_stack ^ frame) & ~(THREAD_SIZE - 1)) == 0;
+ return ((get_lowcore()->async_stack ^ frame) & ~(THREAD_SIZE - 1)) == 0;
}
static void do_irq_async(struct pt_regs *regs, int irq)
if (on_async_stack()) {
do_IRQ(regs, irq);
} else {
- call_on_stack(2, S390_lowcore.async_stack, void, do_IRQ,
+ call_on_stack(2, get_lowcore()->async_stack, void, do_IRQ,
struct pt_regs *, regs, int, irq);
}
}
set_cpu_flag(CIF_NOHZ_DELAY);
do {
- regs->tpi_info = S390_lowcore.tpi_info;
- if (S390_lowcore.tpi_info.adapter_IO)
+ regs->tpi_info = get_lowcore()->tpi_info;
+ if (get_lowcore()->tpi_info.adapter_IO)
do_irq_async(regs, THIN_INTERRUPT);
else
do_irq_async(regs, IO_INTERRUPT);
current->thread.last_break = regs->last_break;
}
- regs->int_code = S390_lowcore.ext_int_code_addr;
- regs->int_parm = S390_lowcore.ext_params;
- regs->int_parm_long = S390_lowcore.ext_params2;
+ regs->int_code = get_lowcore()->ext_int_code_addr;
+ regs->int_parm = get_lowcore()->ext_params;
+ regs->int_parm_long = get_lowcore()->ext_params2;
from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
if (from_idle)
purgatory = (purgatory_t)image->start;
/* store_status() saved the prefix register to lowcore */
- prefix = (unsigned long) S390_lowcore.prefixreg_save_area;
+ prefix = (unsigned long)get_lowcore()->prefixreg_save_area;
/* Now do the reset */
s390_reset_system();
continue;
}
/* Store status of the boot CPU */
- mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
+ mcesa = __va(get_lowcore()->mcesad & MCESA_ORIGIN_MASK);
if (cpu_has_vx())
save_vx_regs((__vector128 *) mcesa->vector_save_area);
if (MACHINE_HAS_GS) {
smp_emergency_stop();
diag_amode31_ops.diag308_reset();
ptr = nmi_puts(message, "System stopped due to unrecoverable machine check, code: 0x");
- u64_to_hex(ptr, S390_lowcore.mcck_interruption_code);
+ u64_to_hex(ptr, get_lowcore()->mcck_interruption_code);
/*
* Disable low address protection and make machine check new PSW a
cr0_new = cr0;
cr0_new.lap = 0;
local_ctl_load(0, &cr0_new.reg);
- psw_save = S390_lowcore.mcck_new_psw;
- psw_bits(S390_lowcore.mcck_new_psw).io = 0;
- psw_bits(S390_lowcore.mcck_new_psw).ext = 0;
- psw_bits(S390_lowcore.mcck_new_psw).wait = 1;
+ psw_save = get_lowcore()->mcck_new_psw;
+ psw_bits(get_lowcore()->mcck_new_psw).io = 0;
+ psw_bits(get_lowcore()->mcck_new_psw).ext = 0;
+ psw_bits(get_lowcore()->mcck_new_psw).wait = 1;
sclp_emergency_printk(message);
/*
* Restore machine check new PSW and control register 0 to original
* values. This makes possible system dump analysis easier.
*/
- S390_lowcore.mcck_new_psw = psw_save;
+ get_lowcore()->mcck_new_psw = psw_save;
local_ctl_load(0, &cr0.reg);
disabled_wait();
while (1);
/*
* Set the clock comparator register to the next expected value.
*/
- set_clock_comparator(S390_lowcore.clock_comparator);
+ set_clock_comparator(get_lowcore()->clock_comparator);
if (!mci.gr || !mci.fp || !mci.fc)
return false;
/*
* check handling must take care of this. The host values are saved by
* KVM and are not affected.
*/
- cr2.reg = S390_lowcore.cregs_save_area[2];
+ cr2.reg = get_lowcore()->cregs_save_area[2];
if (cr2.gse && !mci.gs && !test_cpu_flag(CIF_MCCK_GUEST))
return false;
if (!mci.ms || !mci.pm || !mci.ia)
sie_page = container_of(sie_block, struct sie_page, sie_block);
mcck_backup = &sie_page->mcck_info;
- mcck_backup->mcic = S390_lowcore.mcck_interruption_code &
+ mcck_backup->mcic = get_lowcore()->mcck_interruption_code &
~(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE);
- mcck_backup->ext_damage_code = S390_lowcore.external_damage_code;
- mcck_backup->failing_storage_address
- = S390_lowcore.failing_storage_address;
+ mcck_backup->ext_damage_code = get_lowcore()->external_damage_code;
+ mcck_backup->failing_storage_address = get_lowcore()->failing_storage_address;
}
NOKPROBE_SYMBOL(s390_backup_mcck_info);
if (user_mode(regs))
update_timer_mcck();
inc_irq_stat(NMI_NMI);
- mci.val = S390_lowcore.mcck_interruption_code;
+ mci.val = get_lowcore()->mcck_interruption_code;
mcck = this_cpu_ptr(&cpu_mcck);
/*
}
if (mci.ed && mci.ec) {
/* External damage */
- if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
+ if (get_lowcore()->external_damage_code & (1U << ED_STP_SYNC))
mcck->stp_queue |= stp_sync_check();
- if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
+ if (get_lowcore()->external_damage_code & (1U << ED_STP_ISLAND))
mcck->stp_queue |= stp_island_check();
mcck_pending = 1;
}
}
/* Load current program parameter */
- lpp(&S390_lowcore.lpp);
+ lpp(&get_lowcore()->lpp);
debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i "
"interval %#lx tear %#lx dear %#lx\n", __func__,
if (++cpump->active_events == 1) {
ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
- WRITE_ONCE(S390_lowcore.ccd, ccd);
+ WRITE_ONCE(get_lowcore()->ccd, ccd);
local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
}
if (flags & PERF_EF_START)
paicrypt_stop(event, PERF_EF_UPDATE);
if (--cpump->active_events == 0) {
local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
- WRITE_ONCE(S390_lowcore.ccd, 0);
+ WRITE_ONCE(get_lowcore()->ccd, 0);
}
}
struct paiext_cb *pcb = cpump->paiext_cb;
if (++cpump->active_events == 1) {
- S390_lowcore.aicd = virt_to_phys(cpump->paiext_cb);
+ get_lowcore()->aicd = virt_to_phys(cpump->paiext_cb);
pcb->acc = virt_to_phys(cpump->area) | 0x1;
/* Enable CPU instruction lookup for PAIE1 control block */
local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT);
/* Disable CPU instruction lookup for PAIE1 control block */
local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT);
pcb->acc = 0;
- S390_lowcore.aicd = 0;
+ get_lowcore()->aicd = 0;
}
}
void arch_setup_new_exec(void)
{
- if (S390_lowcore.current_pid != current->pid) {
- S390_lowcore.current_pid = current->pid;
+ if (get_lowcore()->current_pid != current->pid) {
+ get_lowcore()->current_pid = current->pid;
if (test_facility(40))
- lpp(&S390_lowcore.lpp);
+ lpp(&get_lowcore()->lpp);
}
}
lc->clock_comparator = clock_comparator_max;
lc->current_task = (unsigned long)&init_task;
lc->lpp = LPP_MAGIC;
- lc->machine_flags = S390_lowcore.machine_flags;
- lc->preempt_count = S390_lowcore.preempt_count;
+ lc->machine_flags = get_lowcore()->machine_flags;
+ lc->preempt_count = get_lowcore()->preempt_count;
nmi_alloc_mcesa_early(&lc->mcesad);
- lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
- lc->exit_timer = S390_lowcore.exit_timer;
- lc->user_timer = S390_lowcore.user_timer;
- lc->system_timer = S390_lowcore.system_timer;
- lc->steal_timer = S390_lowcore.steal_timer;
- lc->last_update_timer = S390_lowcore.last_update_timer;
- lc->last_update_clock = S390_lowcore.last_update_clock;
+ lc->sys_enter_timer = get_lowcore()->sys_enter_timer;
+ lc->exit_timer = get_lowcore()->exit_timer;
+ lc->user_timer = get_lowcore()->user_timer;
+ lc->system_timer = get_lowcore()->system_timer;
+ lc->steal_timer = get_lowcore()->steal_timer;
+ lc->last_update_timer = get_lowcore()->last_update_timer;
+ lc->last_update_clock = get_lowcore()->last_update_clock;
/*
* Allocate the global restart stack which is the same for
* all CPUs in case *one* of them does a PSW restart.
lc->mcck_stack = stack_alloc_early() + STACK_INIT_OFFSET;
lc->async_stack = stack_alloc_early() + STACK_INIT_OFFSET;
lc->nodat_stack = stack_alloc_early() + STACK_INIT_OFFSET;
- lc->kernel_stack = S390_lowcore.kernel_stack;
+ lc->kernel_stack = get_lowcore()->kernel_stack;
/*
* Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
* restart data to the absolute zero lowcore. This is necessary if
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc->preempt_count = PREEMPT_DISABLED;
- lc->kernel_asce = S390_lowcore.kernel_asce;
- lc->user_asce = S390_lowcore.user_asce;
+ lc->kernel_asce = get_lowcore()->kernel_asce;
+ lc->user_asce = get_lowcore()->user_asce;
system_ctlreg_init_save_area(lc);
abs_lc = get_abs_lowcore();
mcck_stack = stack_alloc();
if (!lc || !nodat_stack || !async_stack || !mcck_stack)
goto out;
- memcpy(lc, &S390_lowcore, 512);
+ memcpy(lc, get_lowcore(), 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
lc->async_stack = async_stack + STACK_INIT_OFFSET;
lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
lc->percpu_offset = __per_cpu_offset[cpu];
- lc->kernel_asce = S390_lowcore.kernel_asce;
+ lc->kernel_asce = get_lowcore()->kernel_asce;
lc->user_asce = s390_invalid_asce;
- lc->machine_flags = S390_lowcore.machine_flags;
+ lc->machine_flags = get_lowcore()->machine_flags;
lc->user_timer = lc->system_timer =
lc->steal_timer = lc->avg_steal_timer = 0;
abs_lc = get_abs_lowcore();
struct lowcore *lc = lowcore_ptr[0];
if (pcpu_devices[0].address == stap())
- lc = &S390_lowcore;
+ lc = get_lowcore();
pcpu_delegate(&pcpu_devices[0], func, data,
lc->nodat_stack);
{
int cpu = raw_smp_processor_id();
- S390_lowcore.last_update_clock = get_tod_clock();
- S390_lowcore.restart_stack = (unsigned long)restart_stack;
- S390_lowcore.restart_fn = (unsigned long)do_restart;
- S390_lowcore.restart_data = 0;
- S390_lowcore.restart_source = -1U;
- S390_lowcore.restart_flags = 0;
- restore_access_regs(S390_lowcore.access_regs_save_area);
+ get_lowcore()->last_update_clock = get_tod_clock();
+ get_lowcore()->restart_stack = (unsigned long)restart_stack;
+ get_lowcore()->restart_fn = (unsigned long)do_restart;
+ get_lowcore()->restart_data = 0;
+ get_lowcore()->restart_source = -1U;
+ get_lowcore()->restart_flags = 0;
+ restore_access_regs(get_lowcore()->access_regs_save_area);
cpu_init();
rcutree_report_cpu_starting(cpu);
init_cpu_timer();
WARN_ON(!cpu_present(0) || !cpu_online(0));
pcpu->state = CPU_STATE_CONFIGURED;
- S390_lowcore.percpu_offset = __per_cpu_offset[0];
+ get_lowcore()->percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
}
void __init smp_setup_processor_id(void)
{
pcpu_devices[0].address = stap();
- S390_lowcore.cpu_nr = 0;
- S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
- S390_lowcore.spinlock_index = 0;
+ get_lowcore()->cpu_nr = 0;
+ get_lowcore()->spinlock_lockval = arch_spin_lockval(0);
+ get_lowcore()->spinlock_index = 0;
}
/*
{
add_random_kstack_offset();
enter_from_user_mode(regs);
- regs->psw = S390_lowcore.svc_old_psw;
- regs->int_code = S390_lowcore.svc_int_code;
+ regs->psw = get_lowcore()->svc_old_psw;
+ regs->int_code = get_lowcore()->svc_int_code;
update_timer_sys();
if (static_branch_likely(&cpu_has_bear))
current->thread.last_break = regs->last_break;
{
struct clock_event_device *cd;
- S390_lowcore.clock_comparator = clock_comparator_max;
+ get_lowcore()->clock_comparator = clock_comparator_max;
cd = this_cpu_ptr(&comparators);
cd->event_handler(cd);
}
static int s390_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- S390_lowcore.clock_comparator = get_tod_clock() + delta;
- set_clock_comparator(S390_lowcore.clock_comparator);
+ get_lowcore()->clock_comparator = get_tod_clock() + delta;
+ set_clock_comparator(get_lowcore()->clock_comparator);
return 0;
}
struct clock_event_device *cd;
int cpu;
- S390_lowcore.clock_comparator = clock_comparator_max;
- set_clock_comparator(S390_lowcore.clock_comparator);
+ get_lowcore()->clock_comparator = clock_comparator_max;
+ set_clock_comparator(get_lowcore()->clock_comparator);
cpu = smp_processor_id();
cd = &per_cpu(comparators, cpu);
unsigned long param64)
{
inc_irq_stat(IRQEXT_CLK);
- if (S390_lowcore.clock_comparator == clock_comparator_max)
- set_clock_comparator(S390_lowcore.clock_comparator);
+ if (get_lowcore()->clock_comparator == clock_comparator_max)
+ set_clock_comparator(get_lowcore()->clock_comparator);
}
static void stp_timing_alert(struct stp_irq_parm *);
static void clock_sync_local(long delta)
{
/* Add the delta to the clock comparator. */
- if (S390_lowcore.clock_comparator != clock_comparator_max) {
- S390_lowcore.clock_comparator += delta;
- set_clock_comparator(S390_lowcore.clock_comparator);
+ if (get_lowcore()->clock_comparator != clock_comparator_max) {
+ get_lowcore()->clock_comparator += delta;
+ set_clock_comparator(get_lowcore()->clock_comparator);
}
/* Adjust the last_update_clock time-stamp. */
- S390_lowcore.last_update_clock += delta;
+ get_lowcore()->last_update_clock += delta;
}
/* Single threaded workqueue used for stp sync events */
local_irq_save(flags);
cr0 = local_ctl_clear_bit(0, CR0_LOW_ADDRESS_PROTECTION_BIT);
- psw_bits(S390_lowcore.external_new_psw).mcheck = 1;
- psw_bits(S390_lowcore.program_new_psw).mcheck = 1;
- psw_bits(S390_lowcore.svc_new_psw).mcheck = 1;
- psw_bits(S390_lowcore.io_new_psw).mcheck = 1;
+ psw_bits(get_lowcore()->external_new_psw).mcheck = 1;
+ psw_bits(get_lowcore()->program_new_psw).mcheck = 1;
+ psw_bits(get_lowcore()->svc_new_psw).mcheck = 1;
+ psw_bits(get_lowcore()->io_new_psw).mcheck = 1;
local_ctl_load(0, &cr0);
local_irq_restore(flags);
local_mcck_enable();
unsigned int trapnr;
irqentry_state_t state;
- regs->int_code = S390_lowcore.pgm_int_code;
- regs->int_parm_long = S390_lowcore.trans_exc_code;
+ regs->int_code = get_lowcore()->pgm_int_code;
+ regs->int_parm_long = get_lowcore()->trans_exc_code;
state = irqentry_enter(regs);
current->thread.last_break = regs->last_break;
}
- if (S390_lowcore.pgm_code & 0x0200) {
+ if (get_lowcore()->pgm_code & 0x0200) {
/* transaction abort */
- current->thread.trap_tdb = S390_lowcore.pgm_tdb;
+ current->thread.trap_tdb = get_lowcore()->pgm_tdb;
}
- if (S390_lowcore.pgm_code & PGM_INT_CODE_PER) {
+ if (get_lowcore()->pgm_code & PGM_INT_CODE_PER) {
if (user_mode(regs)) {
struct per_event *ev = ¤t->thread.per_event;
set_thread_flag(TIF_PER_TRAP);
- ev->address = S390_lowcore.per_address;
- ev->cause = S390_lowcore.per_code_combined;
- ev->paid = S390_lowcore.per_access_id;
+ ev->address = get_lowcore()->per_address;
+ ev->cause = get_lowcore()->per_code_combined;
+ ev->paid = get_lowcore()->per_access_id;
} else {
/* PER event in kernel is kprobes */
__arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
" stpt %0\n" /* Store current cpu timer value */
" spt %1" /* Set new value imm. afterwards */
: "=Q" (timer) : "Q" (expires));
- S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
- S390_lowcore.last_update_timer = expires;
+ get_lowcore()->system_timer += get_lowcore()->last_update_timer - timer;
+ get_lowcore()->last_update_timer = expires;
}
static inline int virt_timer_forward(u64 elapsed)
{
u64 timer, clock, user, guest, system, hardirq, softirq;
- timer = S390_lowcore.last_update_timer;
- clock = S390_lowcore.last_update_clock;
+ timer = get_lowcore()->last_update_timer;
+ clock = get_lowcore()->last_update_clock;
asm volatile(
" stpt %0\n" /* Store current cpu timer value */
" stckf %1" /* Store current tod clock value */
- : "=Q" (S390_lowcore.last_update_timer),
- "=Q" (S390_lowcore.last_update_clock)
+ : "=Q" (get_lowcore()->last_update_timer),
+ "=Q" (get_lowcore()->last_update_clock)
: : "cc");
- clock = S390_lowcore.last_update_clock - clock;
- timer -= S390_lowcore.last_update_timer;
+ clock = get_lowcore()->last_update_clock - clock;
+ timer -= get_lowcore()->last_update_timer;
if (hardirq_count())
- S390_lowcore.hardirq_timer += timer;
+ get_lowcore()->hardirq_timer += timer;
else
- S390_lowcore.system_timer += timer;
+ get_lowcore()->system_timer += timer;
/* Update MT utilization calculation */
if (smp_cpu_mtid &&
/* Calculate cputime delta */
user = update_tsk_timer(&tsk->thread.user_timer,
- READ_ONCE(S390_lowcore.user_timer));
+ READ_ONCE(get_lowcore()->user_timer));
guest = update_tsk_timer(&tsk->thread.guest_timer,
- READ_ONCE(S390_lowcore.guest_timer));
+ READ_ONCE(get_lowcore()->guest_timer));
system = update_tsk_timer(&tsk->thread.system_timer,
- READ_ONCE(S390_lowcore.system_timer));
+ READ_ONCE(get_lowcore()->system_timer));
hardirq = update_tsk_timer(&tsk->thread.hardirq_timer,
- READ_ONCE(S390_lowcore.hardirq_timer));
+ READ_ONCE(get_lowcore()->hardirq_timer));
softirq = update_tsk_timer(&tsk->thread.softirq_timer,
- READ_ONCE(S390_lowcore.softirq_timer));
- S390_lowcore.steal_timer +=
+ READ_ONCE(get_lowcore()->softirq_timer));
+ get_lowcore()->steal_timer +=
clock - user - guest - system - hardirq - softirq;
/* Push account value */
void vtime_task_switch(struct task_struct *prev)
{
do_account_vtime(prev);
- prev->thread.user_timer = S390_lowcore.user_timer;
- prev->thread.guest_timer = S390_lowcore.guest_timer;
- prev->thread.system_timer = S390_lowcore.system_timer;
- prev->thread.hardirq_timer = S390_lowcore.hardirq_timer;
- prev->thread.softirq_timer = S390_lowcore.softirq_timer;
- S390_lowcore.user_timer = current->thread.user_timer;
- S390_lowcore.guest_timer = current->thread.guest_timer;
- S390_lowcore.system_timer = current->thread.system_timer;
- S390_lowcore.hardirq_timer = current->thread.hardirq_timer;
- S390_lowcore.softirq_timer = current->thread.softirq_timer;
+ prev->thread.user_timer = get_lowcore()->user_timer;
+ prev->thread.guest_timer = get_lowcore()->guest_timer;
+ prev->thread.system_timer = get_lowcore()->system_timer;
+ prev->thread.hardirq_timer = get_lowcore()->hardirq_timer;
+ prev->thread.softirq_timer = get_lowcore()->softirq_timer;
+ get_lowcore()->user_timer = current->thread.user_timer;
+ get_lowcore()->guest_timer = current->thread.guest_timer;
+ get_lowcore()->system_timer = current->thread.system_timer;
+ get_lowcore()->hardirq_timer = current->thread.hardirq_timer;
+ get_lowcore()->softirq_timer = current->thread.softirq_timer;
}
/*
if (do_account_vtime(tsk))
virt_timer_expire();
- steal = S390_lowcore.steal_timer;
- avg_steal = S390_lowcore.avg_steal_timer;
+ steal = get_lowcore()->steal_timer;
+ avg_steal = get_lowcore()->avg_steal_timer;
if ((s64) steal > 0) {
- S390_lowcore.steal_timer = 0;
+ get_lowcore()->steal_timer = 0;
account_steal_time(cputime_to_nsecs(steal));
avg_steal += steal;
}
- S390_lowcore.avg_steal_timer = avg_steal / 2;
+ get_lowcore()->avg_steal_timer = avg_steal / 2;
}
static u64 vtime_delta(void)
{
- u64 timer = S390_lowcore.last_update_timer;
+ u64 timer = get_lowcore()->last_update_timer;
- S390_lowcore.last_update_timer = get_cpu_timer();
+ get_lowcore()->last_update_timer = get_cpu_timer();
- return timer - S390_lowcore.last_update_timer;
+ return timer - get_lowcore()->last_update_timer;
}
/*
u64 delta = vtime_delta();
if (tsk->flags & PF_VCPU)
- S390_lowcore.guest_timer += delta;
+ get_lowcore()->guest_timer += delta;
else
- S390_lowcore.system_timer += delta;
+ get_lowcore()->system_timer += delta;
virt_timer_forward(delta);
}
{
u64 delta = vtime_delta();
- S390_lowcore.softirq_timer += delta;
+ get_lowcore()->softirq_timer += delta;
virt_timer_forward(delta);
}
{
u64 delta = vtime_delta();
- S390_lowcore.hardirq_timer += delta;
+ get_lowcore()->hardirq_timer += delta;
virt_timer_forward(delta);
}
bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
{
/* do not poll with more than halt_poll_max_steal percent of steal time */
- if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
+ if (get_lowcore()->avg_steal_timer * 100 / (TICK_USEC << 12) >=
READ_ONCE(halt_poll_max_steal)) {
vcpu->stat.halt_no_poll_steal++;
return true;
struct spin_wait *node, *next;
int lockval, ix, node_id, tail_id, old, new, owner, count;
- ix = S390_lowcore.spinlock_index++;
+ ix = get_lowcore()->spinlock_index++;
barrier();
lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
node = this_cpu_ptr(&spin_wait[ix]);
}
out:
- S390_lowcore.spinlock_index--;
+ get_lowcore()->spinlock_index--;
}
static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
if (u->flags & UWM_SWITCH_STACK) {
local_irq_save(flags);
local_mcck_save(mflags);
- rc = call_on_stack(1, S390_lowcore.nodat_stack,
+ rc = call_on_stack(1, get_lowcore()->nodat_stack,
int, unwindme_func3, struct unwindme *, u);
local_mcck_restore(mflags);
local_irq_restore(flags);
local_ctl_store(1, &cr1);
local_ctl_store(7, &cr7);
- if (cr1.val == S390_lowcore.kernel_asce.val && cr7.val == S390_lowcore.user_asce.val)
+ if (cr1.val == get_lowcore()->kernel_asce.val && cr7.val == get_lowcore()->user_asce.val)
return;
panic("incorrect ASCE on kernel %s\n"
"cr1: %016lx cr7: %016lx\n"
"kernel: %016lx user: %016lx\n",
exit ? "exit" : "entry", cr1.val, cr7.val,
- S390_lowcore.kernel_asce.val, S390_lowcore.user_asce.val);
+ get_lowcore()->kernel_asce.val, get_lowcore()->user_asce.val);
}
#endif /*CONFIG_DEBUG_ENTRY */
* kernel ASCE. We need this to keep the page table walker functions
* from accessing non-existent entries.
*/
- max_addr = (S390_lowcore.kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
+ max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
return USER_FAULT;
if (!IS_ENABLED(CONFIG_PGSTE))
return KERNEL_FAULT;
- gmap = (struct gmap *)S390_lowcore.gmap;
+ gmap = (struct gmap *)get_lowcore()->gmap;
if (gmap && gmap->asce == regs->cr1)
return GMAP_FAULT;
return KERNEL_FAULT;
pr_cont("mode while using ");
switch (get_fault_type(regs)) {
case USER_FAULT:
- asce = S390_lowcore.user_asce.val;
+ asce = get_lowcore()->user_asce.val;
pr_cont("user ");
break;
case GMAP_FAULT:
- asce = ((struct gmap *)S390_lowcore.gmap)->asce;
+ asce = ((struct gmap *)get_lowcore()->gmap)->asce;
pr_cont("gmap ");
break;
case KERNEL_FAULT:
- asce = S390_lowcore.kernel_asce.val;
+ asce = get_lowcore()->kernel_asce.val;
pr_cont("kernel ");
break;
default:
mmap_read_lock(mm);
gmap = NULL;
if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
- gmap = (struct gmap *)S390_lowcore.gmap;
+ gmap = (struct gmap *)get_lowcore()->gmap;
current->thread.gmap_addr = address;
current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
current->thread.gmap_int_code = regs->int_code & 0xffff;
switch (get_fault_type(regs)) {
case GMAP_FAULT:
mm = current->mm;
- gmap = (struct gmap *)S390_lowcore.gmap;
+ gmap = (struct gmap *)get_lowcore()->gmap;
mmap_read_lock(mm);
addr = __gmap_translate(gmap, addr);
mmap_read_unlock(mm);
void do_non_secure_storage_access(struct pt_regs *regs)
{
- struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
+ struct gmap *gmap = (struct gmap *)get_lowcore()->gmap;
unsigned long gaddr = get_fault_address(regs);
if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT))
void do_secure_storage_violation(struct pt_regs *regs)
{
- struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
+ struct gmap *gmap = (struct gmap *)get_lowcore()->gmap;
unsigned long gaddr = get_fault_address(regs);
/*
*/
void gmap_enable(struct gmap *gmap)
{
- S390_lowcore.gmap = (unsigned long) gmap;
+ get_lowcore()->gmap = (unsigned long)gmap;
}
EXPORT_SYMBOL_GPL(gmap_enable);
*/
void gmap_disable(struct gmap *gmap)
{
- S390_lowcore.gmap = 0UL;
+ get_lowcore()->gmap = 0UL;
}
EXPORT_SYMBOL_GPL(gmap_disable);
*/
struct gmap *gmap_get_enabled(void)
{
- return (struct gmap *) S390_lowcore.gmap;
+ return (struct gmap *)get_lowcore()->gmap;
}
EXPORT_SYMBOL_GPL(gmap_get_enabled);
break;
}
table = (unsigned long *)((unsigned long)old & mask);
- crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce.val);
+ crdte(*old, new, table, dtt, addr, get_lowcore()->kernel_asce.val);
} else if (MACHINE_HAS_IDTE) {
cspg(old, *old, new);
} else {
/* change all active ASCEs to avoid the creation of new TLBs */
if (current->active_mm == mm) {
- S390_lowcore.user_asce.val = mm->context.asce;
- local_ctl_load(7, &S390_lowcore.user_asce);
+ get_lowcore()->user_asce.val = mm->context.asce;
+ local_ctl_load(7, &get_lowcore()->user_asce);
}
__tlb_flush_local();
}
return NULL;
}
if (!strcmp(str, "nomio")) {
- S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
+ get_lowcore()->machine_flags &= ~MACHINE_FLAG_PCI_MIO;
return NULL;
}
if (!strcmp(str, "force_floating")) {