]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
s390: Move CIF flags to struct pcpu
authorSven Schnelle <svens@linux.ibm.com>
Tue, 16 Jul 2024 07:26:15 +0000 (09:26 +0200)
committerVasily Gorbik <gor@linux.ibm.com>
Tue, 23 Jul 2024 14:02:31 +0000 (16:02 +0200)
To allow testing flags for offline CPUs, move the CIF flags
to struct pcpu. To avoid having to calculate the array index
for each access, add a pointer to the pcpu member for the current
cpu to lowcore.

Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
arch/s390/include/asm/lowcore.h
arch/s390/include/asm/processor.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/entry.S
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c

index c724e71e17852dadf805eb246bb421e6c7ab04ec..bce3a69ab2a3083b1169efb214a06f87410dfd09 100644 (file)
@@ -97,8 +97,7 @@ struct lowcore {
        __u64   save_area_async[8];             /* 0x0240 */
        __u64   save_area_restart[1];           /* 0x0280 */
 
-       /* CPU flags. */
-       __u64   cpu_flags;                      /* 0x0288 */
+       __u64   pcpu;                           /* 0x0288 */
 
        /* Return psws. */
        psw_t   return_psw;                     /* 0x0290 */
index c87cf2b8e81af864f64f9e1657487751c7ff5853..5debb12614adbbe7fc54000f302c821d2b7fe764 100644 (file)
 #include <asm/irqflags.h>
 #include <asm/alternative.h>
 
+struct pcpu {
+       unsigned long ec_mask;          /* bit mask for ec_xxx functions */
+       unsigned long ec_clk;           /* sigp timestamp for ec_xxx */
+       unsigned long flags;            /* per CPU flags */
+       signed char state;              /* physical cpu state */
+       signed char polarization;       /* physical polarization */
+       u16 address;                    /* physical cpu address */
+};
+
+DECLARE_PER_CPU(struct pcpu, pcpu_devices);
+
 typedef long (*sys_call_ptr_t)(struct pt_regs *regs);
 
+static __always_inline struct pcpu *this_pcpu(void)
+{
+       return (struct pcpu *)(get_lowcore()->pcpu);
+}
+
 static __always_inline void set_cpu_flag(int flag)
 {
-       get_lowcore()->cpu_flags |= (1UL << flag);
+       this_pcpu()->flags |= (1UL << flag);
 }
 
 static __always_inline void clear_cpu_flag(int flag)
 {
-       get_lowcore()->cpu_flags &= ~(1UL << flag);
+       this_pcpu()->flags &= ~(1UL << flag);
 }
 
 static __always_inline bool test_cpu_flag(int flag)
 {
-       return get_lowcore()->cpu_flags & (1UL << flag);
+       return this_pcpu()->flags & (1UL << flag);
 }
 
 static __always_inline bool test_and_set_cpu_flag(int flag)
@@ -81,9 +97,7 @@ static __always_inline bool test_and_clear_cpu_flag(int flag)
  */
 static __always_inline bool test_cpu_flag_of(int flag, int cpu)
 {
-       struct lowcore *lc = lowcore_ptr[cpu];
-
-       return lc->cpu_flags & (1UL << flag);
+       return per_cpu(pcpu_devices, cpu).flags & (1UL << flag);
 }
 
 #define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
index 26bb45d0e6f13f0177cf523c43ef774dd220dd64..58fc6b93b475f24845d11e6b1adc85b142b6fea2 100644 (file)
@@ -114,7 +114,7 @@ int main(void)
        OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync);
        OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async);
        OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart);
-       OFFSET(__LC_CPU_FLAGS, lowcore, cpu_flags);
+       OFFSET(__LC_PCPU, lowcore, pcpu);
        OFFSET(__LC_RETURN_PSW, lowcore, return_psw);
        OFFSET(__LC_RETURN_MCCK_PSW, lowcore, return_mcck_psw);
        OFFSET(__LC_SYS_ENTER_TIMER, lowcore, sys_enter_timer);
@@ -186,5 +186,7 @@ int main(void)
 #endif
        OFFSET(__FTRACE_REGS_PT_REGS, ftrace_regs, regs);
        DEFINE(__FTRACE_REGS_SIZE, sizeof(struct ftrace_regs));
+
+       OFFSET(__PCPU_FLAGS, pcpu, flags);
        return 0;
 }
index 454b6b92c7f84294e7afaca7beabb9446b5b40d6..fa58bd2c48c9aab2c2d96fe305794cb92f6a7673 100644 (file)
@@ -480,7 +480,8 @@ SYM_CODE_START(mcck_int_handler)
        clgrjl  %r9,%r14, 4f
        larl    %r14,.Lsie_leave
        clgrjhe %r9,%r14, 4f
-       oi      __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
+       lg      %r10,__LC_PCPU
+       oi      __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST
 4:     BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
        SIEEXIT __SF_SIE_CONTROL(%r15)
 #endif
index 1faba11d5f0b95210fa723f3ac969be6429521bc..178daf4e356359b2c76d845c0ad3b3790d297253 100644 (file)
@@ -406,6 +406,7 @@ static void __init setup_lowcore(void)
                panic("%s: Failed to allocate %zu bytes align=%zx\n",
                      __func__, sizeof(*lc), sizeof(*lc));
 
+       lc->pcpu = (unsigned long)per_cpu_ptr(&pcpu_devices, 0);
        lc->restart_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT;
        lc->restart_psw.addr = __pa(restart_int_handler);
        lc->external_new_psw.mask = PSW_KERNEL_BITS;
index b36b089b9a268179aaee636067676d16ace5fc0f..fbba37ec53cf7d507c4890c21a02b93a1ad280d5 100644 (file)
@@ -74,16 +74,8 @@ enum {
        CPU_STATE_CONFIGURED,
 };
 
-struct pcpu {
-       unsigned long ec_mask;          /* bit mask for ec_xxx functions */
-       unsigned long ec_clk;           /* sigp timestamp for ec_xxx */
-       signed char state;              /* physical cpu state */
-       signed char polarization;       /* physical polarization */
-       u16 address;                    /* physical cpu address */
-};
-
 static u8 boot_core_type;
-static DEFINE_PER_CPU(struct pcpu, pcpu_devices);
+DEFINE_PER_CPU(struct pcpu, pcpu_devices);
 /*
  * Pointer to the pcpu area of the boot CPU. This is required when a restart
  * interrupt is triggered on an offline CPU. For that case accessing percpu
@@ -264,6 +256,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
        cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
        cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
        lc->cpu_nr = cpu;
+       lc->pcpu = (unsigned long)pcpu;
        lc->restart_flags = RESTART_FLAG_CTLREGS;
        lc->spinlock_lockval = arch_spin_lockval(cpu);
        lc->spinlock_index = 0;
@@ -924,6 +917,7 @@ void __cpu_die(unsigned int cpu)
        pcpu_free_lowcore(pcpu, cpu);
        cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
        cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
+       pcpu->flags = 0;
 }
 
 void __noreturn cpu_die(void)
@@ -959,10 +953,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 
 void __init smp_prepare_boot_cpu(void)
 {
+       struct lowcore *lc = get_lowcore();
+
        WARN_ON(!cpu_present(0) || !cpu_online(0));
+       lc->percpu_offset = __per_cpu_offset[0];
        ipl_pcpu = per_cpu_ptr(&pcpu_devices, 0);
        ipl_pcpu->state = CPU_STATE_CONFIGURED;
-       get_lowcore()->percpu_offset = __per_cpu_offset[0];
+       lc->pcpu = (unsigned long)ipl_pcpu;
        smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
 }