]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched: Add sched_smt_active()
authorBen Hutchings <ben@decadent.org.uk>
Thu, 9 May 2019 23:46:25 +0000 (00:46 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 16 May 2019 17:45:11 +0000 (19:45 +0200)
Add the sched_smt_active() function needed for some x86 speculation
mitigations.  This was introduced upstream by commits 1b568f0aabf2
"sched/core: Optimize SCHED_SMT", ba2591a5993e "sched/smt: Update
sched_smt_present at runtime", c5511d03ec09 "sched/smt: Make
sched_smt_present track topology", and 321a874a7ef8 "sched/smt: Expose
sched_smt_present static key".  The upstream implementation uses the
static_key_{disable,enable}_cpuslocked() functions, which aren't
practical to backport.

Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/sched/smt.h [new file with mode: 0644]
kernel/sched/core.c
kernel/sched/sched.h

diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
new file mode 100644 (file)
index 0000000..5209c26
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_SMT_H
+#define _LINUX_SCHED_SMT_H
+
+#include <linux/atomic.h>
+
+#ifdef CONFIG_SCHED_SMT
+extern atomic_t sched_smt_present;
+
+static __always_inline bool sched_smt_active(void)
+{
+       return atomic_read(&sched_smt_present);
+}
+#else
+static inline bool sched_smt_active(void) { return false; }
+#endif
+
+#endif
index d0618951014b0690699c59a3935efb9a06819444..d35a7d528ea667adee59aac713b85c8d24d2baf7 100644 (file)
@@ -5610,6 +5610,10 @@ static void set_cpu_rq_start_time(void)
        rq->age_stamp = sched_clock_cpu(cpu);
 }
 
+#ifdef CONFIG_SCHED_SMT
+atomic_t sched_smt_present = ATOMIC_INIT(0);
+#endif
+
 static int sched_cpu_active(struct notifier_block *nfb,
                                      unsigned long action, void *hcpu)
 {
@@ -5626,11 +5630,23 @@ static int sched_cpu_active(struct notifier_block *nfb,
                 * set_cpu_online(). But it might not yet have marked itself
                 * as active, which is essential from here on.
                 */
+#ifdef CONFIG_SCHED_SMT
+               /*
+                * When going up, increment the number of cores with SMT present.
+                */
+               if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+                       atomic_inc(&sched_smt_present);
+#endif
                set_cpu_active(cpu, true);
                stop_machine_unpark(cpu);
                return NOTIFY_OK;
 
        case CPU_DOWN_FAILED:
+#ifdef CONFIG_SCHED_SMT
+               /* Same as for CPU_ONLINE */
+               if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+                       atomic_inc(&sched_smt_present);
+#endif
                set_cpu_active(cpu, true);
                return NOTIFY_OK;
 
@@ -5645,7 +5661,15 @@ static int sched_cpu_inactive(struct notifier_block *nfb,
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_DOWN_PREPARE:
                set_cpu_active((long)hcpu, false);
+#ifdef CONFIG_SCHED_SMT
+               /*
+                * When going down, decrement the number of cores with SMT present.
+                */
+               if (cpumask_weight(cpu_smt_mask((long)hcpu)) == 2)
+                       atomic_dec(&sched_smt_present);
+#endif
                return NOTIFY_OK;
+
        default:
                return NOTIFY_DONE;
        }
index 6893ee31df4d995a62cae1e73776263767de4cd0..8b96df04ba7851b32a27a73ac897899514b5394b 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/sched.h>
 #include <linux/sched/sysctl.h>
 #include <linux/sched/rt.h>
+#include <linux/sched/smt.h>
 #include <linux/sched/deadline.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>