]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-4.9/0030-sched-Add-sched_smt_active.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / queue-4.9 / 0030-sched-Add-sched_smt_active.patch
1 From b8ef0db49f019de335737e44082f01532d34fe61 Mon Sep 17 00:00:00 2001
2 From: Ben Hutchings <ben@decadent.org.uk>
3 Date: Fri, 10 May 2019 00:46:25 +0100
4 Subject: [PATCH 30/76] sched: Add sched_smt_active()
5
6 Add the sched_smt_active() function needed for some x86 speculation
7 mitigations. This was introduced upstream by commits 1b568f0aabf2
8 "sched/core: Optimize SCHED_SMT", ba2591a5993e "sched/smt: Update
9 sched_smt_present at runtime", c5511d03ec09 "sched/smt: Make
10 sched_smt_present track topology", and 321a874a7ef8 "sched/smt: Expose
11 sched_smt_present static key". The upstream implementation uses the
12 static_key_{disable,enable}_cpuslocked() functions, which aren't
13 practical to backport.
14
15 Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
16 Cc: Thomas Gleixner <tglx@linutronix.de>
17 Cc: Ingo Molnar <mingo@kernel.org>
18 Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
19 Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
20 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
21 ---
22 include/linux/sched/smt.h | 18 ++++++++++++++++++
23 kernel/sched/core.c | 19 +++++++++++++++++++
24 kernel/sched/sched.h | 1 +
25 3 files changed, 38 insertions(+)
26 create mode 100644 include/linux/sched/smt.h
27
28 diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
29 new file mode 100644
30 index 000000000000..5209c268c6fd
31 --- /dev/null
32 +++ b/include/linux/sched/smt.h
33 @@ -0,0 +1,18 @@
34 +/* SPDX-License-Identifier: GPL-2.0 */
35 +#ifndef _LINUX_SCHED_SMT_H
36 +#define _LINUX_SCHED_SMT_H
37 +
38 +#include <linux/atomic.h>
39 +
40 +#ifdef CONFIG_SCHED_SMT
41 +extern atomic_t sched_smt_present;
42 +
43 +static __always_inline bool sched_smt_active(void)
44 +{
45 + return atomic_read(&sched_smt_present);
46 +}
47 +#else
48 +static inline bool sched_smt_active(void) { return false; }
49 +#endif
50 +
51 +#endif
52 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
53 index 6b3fff6a6437..50e80b1be2c8 100644
54 --- a/kernel/sched/core.c
55 +++ b/kernel/sched/core.c
56 @@ -7355,11 +7355,22 @@ static int cpuset_cpu_inactive(unsigned int cpu)
57 return 0;
58 }
59
60 +#ifdef CONFIG_SCHED_SMT
61 +atomic_t sched_smt_present = ATOMIC_INIT(0);
62 +#endif
63 +
64 int sched_cpu_activate(unsigned int cpu)
65 {
66 struct rq *rq = cpu_rq(cpu);
67 unsigned long flags;
68
69 +#ifdef CONFIG_SCHED_SMT
70 + /*
71 + * When going up, increment the number of cores with SMT present.
72 + */
73 + if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
74 + atomic_inc(&sched_smt_present);
75 +#endif
76 set_cpu_active(cpu, true);
77
78 if (sched_smp_initialized) {
79 @@ -7408,6 +7419,14 @@ int sched_cpu_deactivate(unsigned int cpu)
80 else
81 synchronize_rcu();
82
83 +#ifdef CONFIG_SCHED_SMT
84 + /*
85 + * When going down, decrement the number of cores with SMT present.
86 + */
87 + if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
88 + atomic_dec(&sched_smt_present);
89 +#endif
90 +
91 if (!sched_smp_initialized)
92 return 0;
93
94 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
95 index ec6e838e991a..15c08752926b 100644
96 --- a/kernel/sched/sched.h
97 +++ b/kernel/sched/sched.h
98 @@ -2,6 +2,7 @@
99 #include <linux/sched.h>
100 #include <linux/sched/sysctl.h>
101 #include <linux/sched/rt.h>
102 +#include <linux/sched/smt.h>
103 #include <linux/u64_stats_sync.h>
104 #include <linux/sched/deadline.h>
105 #include <linux/kernel_stat.h>
106 --
107 2.21.0
108