]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-4.4/sched-add-sched_smt_active.patch
4.4-stable patches
[thirdparty/kernel/stable-queue.git] / queue-4.4 / sched-add-sched_smt_active.patch
1 From foo@baz Tue 14 May 2019 08:29:35 PM CEST
2 From: Ben Hutchings <ben@decadent.org.uk>
3 Date: Fri, 10 May 2019 00:46:25 +0100
4 Subject: sched: Add sched_smt_active()
5
6 From: Ben Hutchings <ben@decadent.org.uk>
7
8 Add the sched_smt_active() function needed for some x86 speculation
9 mitigations. This was introduced upstream by commits 1b568f0aabf2
10 "sched/core: Optimize SCHED_SMT", ba2591a5993e "sched/smt: Update
11 sched_smt_present at runtime", c5511d03ec09 "sched/smt: Make
12 sched_smt_present track topology", and 321a874a7ef8 "sched/smt: Expose
13 sched_smt_present static key". The upstream implementation uses the
14 static_key_{disable,enable}_cpuslocked() functions, which aren't
15 practical to backport.
16
17 Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
18 Cc: Thomas Gleixner <tglx@linutronix.de>
19 Cc: Ingo Molnar <mingo@kernel.org>
20 Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
21 Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
22 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
23 ---
24 include/linux/sched/smt.h | 18 ++++++++++++++++++
25 kernel/sched/core.c | 24 ++++++++++++++++++++++++
26 kernel/sched/sched.h | 1 +
27 3 files changed, 43 insertions(+)
28 create mode 100644 include/linux/sched/smt.h
29
30 --- /dev/null
31 +++ b/include/linux/sched/smt.h
32 @@ -0,0 +1,18 @@
33 +/* SPDX-License-Identifier: GPL-2.0 */
34 +#ifndef _LINUX_SCHED_SMT_H
35 +#define _LINUX_SCHED_SMT_H
36 +
37 +#include <linux/atomic.h>
38 +
39 +#ifdef CONFIG_SCHED_SMT
40 +extern atomic_t sched_smt_present;
41 +
42 +static __always_inline bool sched_smt_active(void)
43 +{
44 + return atomic_read(&sched_smt_present);
45 +}
46 +#else
47 +static inline bool sched_smt_active(void) { return false; }
48 +#endif
49 +
50 +#endif
51 --- a/kernel/sched/core.c
52 +++ b/kernel/sched/core.c
53 @@ -5610,6 +5610,10 @@ static void set_cpu_rq_start_time(void)
54 rq->age_stamp = sched_clock_cpu(cpu);
55 }
56
57 +#ifdef CONFIG_SCHED_SMT
58 +atomic_t sched_smt_present = ATOMIC_INIT(0);
59 +#endif
60 +
61 static int sched_cpu_active(struct notifier_block *nfb,
62 unsigned long action, void *hcpu)
63 {
64 @@ -5626,11 +5630,23 @@ static int sched_cpu_active(struct notif
65 * set_cpu_online(). But it might not yet have marked itself
66 * as active, which is essential from here on.
67 */
68 +#ifdef CONFIG_SCHED_SMT
69 + /*
70 + * When going up, increment the number of cores with SMT present.
71 + */
72 + if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
73 + atomic_inc(&sched_smt_present);
74 +#endif
75 set_cpu_active(cpu, true);
76 stop_machine_unpark(cpu);
77 return NOTIFY_OK;
78
79 case CPU_DOWN_FAILED:
80 +#ifdef CONFIG_SCHED_SMT
81 + /* Same as for CPU_ONLINE */
82 + if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
83 + atomic_inc(&sched_smt_present);
84 +#endif
85 set_cpu_active(cpu, true);
86 return NOTIFY_OK;
87
88 @@ -5645,7 +5661,15 @@ static int sched_cpu_inactive(struct not
89 switch (action & ~CPU_TASKS_FROZEN) {
90 case CPU_DOWN_PREPARE:
91 set_cpu_active((long)hcpu, false);
92 +#ifdef CONFIG_SCHED_SMT
93 + /*
94 + * When going down, decrement the number of cores with SMT present.
95 + */
96 + if (cpumask_weight(cpu_smt_mask((long)hcpu)) == 2)
97 + atomic_dec(&sched_smt_present);
98 +#endif
99 return NOTIFY_OK;
100 +
101 default:
102 return NOTIFY_DONE;
103 }
104 --- a/kernel/sched/sched.h
105 +++ b/kernel/sched/sched.h
106 @@ -2,6 +2,7 @@
107 #include <linux/sched.h>
108 #include <linux/sched/sysctl.h>
109 #include <linux/sched/rt.h>
110 +#include <linux/sched/smt.h>
111 #include <linux/sched/deadline.h>
112 #include <linux/mutex.h>
113 #include <linux/spinlock.h>