]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 8 Jul 2019 17:39:56 +0000 (10:39 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 8 Jul 2019 17:39:56 +0000 (10:39 -0700)
Pull SMP/hotplug updates from Thomas Gleixner:
 "A small set of updates for SMP and CPU hotplug:

   - Abort disabling secondary CPUs in the freezer when a wakeup is
     pending instead of evaluating it only after all CPUs have been
     offlined.

   - Remove the shared annotation for the strict per CPU cfd_data in the
     smp function call core code.

   - Remove the return values of smp_call_function() and on_each_cpu()
     as they are unconditionally 0. Fixup the few callers which actually
     bothered to check the return value"

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  smp: Remove smp_call_function() and on_each_cpu() return values
  smp: Do not mark call_function_data as shared
  cpu/hotplug: Abort disabling secondary CPUs if wakeup is pending
  cpu/hotplug: Fix notify_cpu_starting() reference in bringup_wait_for_ap()

12 files changed:
arch/alpha/kernel/smp.c
arch/alpha/oprofile/common.c
arch/arm/common/bL_switcher.c
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/uncached.c
arch/powerpc/kernel/rtas.c
arch/x86/lib/cache-smp.c
drivers/char/agp/generic.c
include/linux/smp.h
kernel/cpu.c
kernel/smp.c
kernel/up.c

index d0dccae53ba9f1ce0e5e15021d84c8cafdbd4bae..5f90df30be20a699d2ffde53234c108c55eb81e9 100644 (file)
@@ -614,8 +614,7 @@ void
 smp_imb(void)
 {
        /* Must wait other processors to flush their icache before continue. */
-       if (on_each_cpu(ipi_imb, NULL, 1))
-               printk(KERN_CRIT "smp_imb: timed out\n");
+       on_each_cpu(ipi_imb, NULL, 1);
 }
 EXPORT_SYMBOL(smp_imb);
 
@@ -630,9 +629,7 @@ flush_tlb_all(void)
 {
        /* Although we don't have any data to pass, we do want to
           synchronize with the other processors.  */
-       if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
-               printk(KERN_CRIT "flush_tlb_all: timed out\n");
-       }
+       on_each_cpu(ipi_flush_tlb_all, NULL, 1);
 }
 
 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
@@ -667,9 +664,7 @@ flush_tlb_mm(struct mm_struct *mm)
                }
        }
 
-       if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
-               printk(KERN_CRIT "flush_tlb_mm: timed out\n");
-       }
+       smp_call_function(ipi_flush_tlb_mm, mm, 1);
 
        preempt_enable();
 }
@@ -720,9 +715,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
        data.mm = mm;
        data.addr = addr;
 
-       if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
-               printk(KERN_CRIT "flush_tlb_page: timed out\n");
-       }
+       smp_call_function(ipi_flush_tlb_page, &data, 1);
 
        preempt_enable();
 }
@@ -772,9 +765,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
                }
        }
 
-       if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
-               printk(KERN_CRIT "flush_icache_page: timed out\n");
-       }
+       smp_call_function(ipi_flush_icache_page, mm, 1);
 
        preempt_enable();
 }
index 310a4ce1dccc0a1da774bcc89acee06b1d018857..1b1259c7d7d132795e0141b1dbd1af229370bdf8 100644 (file)
@@ -65,7 +65,7 @@ op_axp_setup(void)
        model->reg_setup(&reg, ctr, &sys);
 
        /* Configure the registers on all cpus.  */
-       (void)smp_call_function(model->cpu_setup, &reg, 1);
+       smp_call_function(model->cpu_setup, &reg, 1);
        model->cpu_setup(&reg);
        return 0;
 }
@@ -86,7 +86,7 @@ op_axp_cpu_start(void *dummy)
 static int
 op_axp_start(void)
 {
-       (void)smp_call_function(op_axp_cpu_start, NULL, 1);
+       smp_call_function(op_axp_cpu_start, NULL, 1);
        op_axp_cpu_start(NULL);
        return 0;
 }
@@ -101,7 +101,7 @@ op_axp_cpu_stop(void *dummy)
 static void
 op_axp_stop(void)
 {
-       (void)smp_call_function(op_axp_cpu_stop, NULL, 1);
+       smp_call_function(op_axp_cpu_stop, NULL, 1);
        op_axp_cpu_stop(NULL);
 }
 
index 13e561737ca86f43ac4750c77384d663814e54eb..746e1fce777eb2bc1f00a51d7b2f99026457e67e 100644 (file)
@@ -539,16 +539,14 @@ static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
 
 int bL_switcher_trace_trigger(void)
 {
-       int ret;
-
        preempt_disable();
 
        bL_switcher_trace_trigger_cpu(NULL);
-       ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
+       smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
 
        preempt_enable();
 
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
 
index 58a6337c06905d3c8edefc12acdac5e34b95792f..7c52bd2695a21b57665c5db8b7d5c0eb0cd4c325 100644 (file)
@@ -6390,11 +6390,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
        }
 
        /* save the current system wide pmu states */
-       ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
-       if (ret) {
-               DPRINT(("on_each_cpu() failed: %d\n", ret));
-               goto cleanup_reserve;
-       }
+       on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
 
        /* officially change to the alternate interrupt handler */
        pfm_alt_intr_handler = hdl;
@@ -6421,7 +6417,6 @@ int
 pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
 {
        int i;
-       int ret;
 
        if (hdl == NULL) return -EINVAL;
 
@@ -6435,10 +6430,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
 
        pfm_alt_intr_handler = NULL;
 
-       ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
-       if (ret) {
-               DPRINT(("on_each_cpu() failed: %d\n", ret));
-       }
+       on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
 
        for_each_online_cpu(i) {
                pfm_unreserve_session(NULL, 1, i);
index edcdfc149311b0145ca223be6b6da49b66212d09..16c6d377c502daf6fb7aa7b6d9143fa5cfd4d4b9 100644 (file)
@@ -121,8 +121,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
        status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
        if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
                atomic_set(&uc_pool->status, 0);
-               status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
-               if (status || atomic_read(&uc_pool->status))
+               smp_call_function(uncached_ipi_visibility, uc_pool, 1);
+               if (atomic_read(&uc_pool->status))
                        goto failed;
        } else if (status != PAL_VISIBILITY_OK)
                goto failed;
@@ -143,8 +143,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
        if (status != PAL_STATUS_SUCCESS)
                goto failed;
        atomic_set(&uc_pool->status, 0);
-       status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
-       if (status || atomic_read(&uc_pool->status))
+       smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
+       if (atomic_read(&uc_pool->status))
                goto failed;
 
        /*
index b824f4c69622d8e7a555d133e6b9559698b36720..0ab4c72515c4e19e1f1b7867b86a5d0b3cc61046 100644 (file)
@@ -990,8 +990,7 @@ int rtas_ibm_suspend_me(u64 handle)
        /* Call function on all CPUs.  One of us will make the
         * rtas call
         */
-       if (on_each_cpu(rtas_percpu_suspend_me, &data, 0))
-               atomic_set(&data.error, -EINVAL);
+       on_each_cpu(rtas_percpu_suspend_me, &data, 0);
 
        wait_for_completion(&done);
 
index 1811fa4a1b1ae398611a488c5ffa7e2e98cb5af7..7c48ff4ae8d196b651b274dd3a06e1e392e6cee6 100644 (file)
@@ -15,6 +15,7 @@ EXPORT_SYMBOL(wbinvd_on_cpu);
 
 int wbinvd_on_all_cpus(void)
 {
-       return on_each_cpu(__wbinvd, NULL, 1);
+       on_each_cpu(__wbinvd, NULL, 1);
+       return 0;
 }
 EXPORT_SYMBOL(wbinvd_on_all_cpus);
index 658664a5a5aa4f31404584bd283b1d6530e882ba..df1edb5ec0ade81a2bfa9ab8e876b9f781df4169 100644 (file)
@@ -1311,8 +1311,7 @@ static void ipi_handler(void *null)
 
 void global_cache_flush(void)
 {
-       if (on_each_cpu(ipi_handler, NULL, 1) != 0)
-               panic(PFX "timed out waiting for the other CPUs!\n");
+       on_each_cpu(ipi_handler, NULL, 1);
 }
 EXPORT_SYMBOL(global_cache_flush);
 
index a56f08ff3097b4bfac294ebded29a1a2bfea53c0..bb8b451ab01f2907e5ff3db9dd19e97e2033ff66 100644 (file)
@@ -35,7 +35,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
 /*
  * Call a function on all processors
  */
-int on_each_cpu(smp_call_func_t func, void *info, int wait);
+void on_each_cpu(smp_call_func_t func, void *info, int wait);
 
 /*
  * Call a function on processors specified by mask, which might include
@@ -101,7 +101,7 @@ extern void smp_cpus_done(unsigned int max_cpus);
 /*
  * Call a function on all other processors
  */
-int smp_call_function(smp_call_func_t func, void *info, int wait);
+void smp_call_function(smp_call_func_t func, void *info, int wait);
 void smp_call_function_many(const struct cpumask *mask,
                            smp_call_func_t func, void *info, bool wait);
 
@@ -144,9 +144,8 @@ static inline void smp_send_stop(void) { }
  *     These macros fold the SMP functionality into a single CPU system
  */
 #define raw_smp_processor_id()                 0
-static inline int up_smp_call_function(smp_call_func_t func, void *info)
+static inline void up_smp_call_function(smp_call_func_t func, void *info)
 {
-       return 0;
 }
 #define smp_call_function(func, info, wait) \
                        (up_smp_call_function(func, info))
index ef1c565edc5da5407e8eb4eb48c9c8250fa8e0b9..e84c0873559ed3f4dff968a6240293bb22f0978b 100644 (file)
@@ -522,7 +522,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
        /*
         * SMT soft disabling on X86 requires to bring the CPU out of the
         * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
-        * CPU marked itself as booted_once in cpu_notify_starting() so the
+        * CPU marked itself as booted_once in notify_cpu_starting() so the
         * cpu_smt_allowed() check will now return false if this is not the
         * primary sibling.
         */
@@ -1221,6 +1221,13 @@ int freeze_secondary_cpus(int primary)
        for_each_online_cpu(cpu) {
                if (cpu == primary)
                        continue;
+
+               if (pm_wakeup_pending()) {
+                       pr_info("Wakeup pending. Abort CPU freeze\n");
+                       error = -EBUSY;
+                       break;
+               }
+
                trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
                error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
                trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
index d155374632eba70533081b37fdf7ba0597c46af9..616d4d1148475291a16167aee7e48890b13890ec 100644 (file)
@@ -34,7 +34,7 @@ struct call_function_data {
        cpumask_var_t           cpumask_ipi;
 };
 
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
+static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 
@@ -487,13 +487,11 @@ EXPORT_SYMBOL(smp_call_function_many);
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
-int smp_call_function(smp_call_func_t func, void *info, int wait)
+void smp_call_function(smp_call_func_t func, void *info, int wait)
 {
        preempt_disable();
        smp_call_function_many(cpu_online_mask, func, info, wait);
        preempt_enable();
-
-       return 0;
 }
 EXPORT_SYMBOL(smp_call_function);
 
@@ -594,18 +592,16 @@ void __init smp_init(void)
  * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
  * of local_irq_disable/enable().
  */
-int on_each_cpu(void (*func) (void *info), void *info, int wait)
+void on_each_cpu(void (*func) (void *info), void *info, int wait)
 {
        unsigned long flags;
-       int ret = 0;
 
        preempt_disable();
-       ret = smp_call_function(func, info, wait);
+       smp_call_function(func, info, wait);
        local_irq_save(flags);
        func(info);
        local_irq_restore(flags);
        preempt_enable();
-       return ret;
 }
 EXPORT_SYMBOL(on_each_cpu);
 
index 483c9962c99947d9e5f86e8cf1aba5019b965959..862b460ab97a8e954c3ac292be94d88670a6fb0c 100644 (file)
@@ -35,14 +35,13 @@ int smp_call_function_single_async(int cpu, call_single_data_t *csd)
 }
 EXPORT_SYMBOL(smp_call_function_single_async);
 
-int on_each_cpu(smp_call_func_t func, void *info, int wait)
+void on_each_cpu(smp_call_func_t func, void *info, int wait)
 {
        unsigned long flags;
 
        local_irq_save(flags);
        func(info);
        local_irq_restore(flags);
-       return 0;
 }
 EXPORT_SYMBOL(on_each_cpu);