]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.20.16/perf-x86-intel-make-cpuc-allocations-consistent.patch
Linux 4.20.16
[thirdparty/kernel/stable-queue.git] / releases / 4.20.16 / perf-x86-intel-make-cpuc-allocations-consistent.patch
1 From foo@baz Tue Mar 12 09:25:52 PDT 2019
2 From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
3 Date: Tue, 5 Mar 2019 22:23:15 +0100
4 Subject: perf/x86/intel: Make cpuc allocations consistent
5
6 From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
7
8 commit d01b1f96a82e5dd7841a1d39db3abfdaf95f70ab upstream
9
10 The cpuc data structure allocation is different between fake and real
11 cpuc's; use the same code to init/free both.
12
13 Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
14 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
15 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
16 ---
17 arch/x86/events/core.c | 13 +++++--------
18 arch/x86/events/intel/core.c | 29 ++++++++++++++++++-----------
19 arch/x86/events/perf_event.h | 11 ++++++++---
20 3 files changed, 31 insertions(+), 22 deletions(-)
21
22 --- a/arch/x86/events/core.c
23 +++ b/arch/x86/events/core.c
24 @@ -1995,7 +1995,7 @@ static int x86_pmu_commit_txn(struct pmu
25 */
26 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
27 {
28 - kfree(cpuc->shared_regs);
29 + intel_cpuc_finish(cpuc);
30 kfree(cpuc);
31 }
32
33 @@ -2007,14 +2007,11 @@ static struct cpu_hw_events *allocate_fa
34 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
35 if (!cpuc)
36 return ERR_PTR(-ENOMEM);
37 -
38 - /* only needed, if we have extra_regs */
39 - if (x86_pmu.extra_regs) {
40 - cpuc->shared_regs = allocate_shared_regs(cpu);
41 - if (!cpuc->shared_regs)
42 - goto error;
43 - }
44 cpuc->is_fake = 1;
45 +
46 + if (intel_cpuc_prepare(cpuc, cpu))
47 + goto error;
48 +
49 return cpuc;
50 error:
51 free_fake_cpuc(cpuc);
52 --- a/arch/x86/events/intel/core.c
53 +++ b/arch/x86/events/intel/core.c
54 @@ -3398,7 +3398,7 @@ ssize_t intel_event_sysfs_show(char *pag
55 return x86_event_sysfs_show(page, config, event);
56 }
57
58 -struct intel_shared_regs *allocate_shared_regs(int cpu)
59 +static struct intel_shared_regs *allocate_shared_regs(int cpu)
60 {
61 struct intel_shared_regs *regs;
62 int i;
63 @@ -3430,10 +3430,9 @@ static struct intel_excl_cntrs *allocate
64 return c;
65 }
66
67 -static int intel_pmu_cpu_prepare(int cpu)
68 -{
69 - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
70
71 +int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
72 +{
73 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
74 cpuc->shared_regs = allocate_shared_regs(cpu);
75 if (!cpuc->shared_regs)
76 @@ -3443,7 +3442,7 @@ static int intel_pmu_cpu_prepare(int cpu
77 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
78 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
79
80 - cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
81 + cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
82 if (!cpuc->constraint_list)
83 goto err_shared_regs;
84
85 @@ -3468,6 +3467,11 @@ err:
86 return -ENOMEM;
87 }
88
89 +static int intel_pmu_cpu_prepare(int cpu)
90 +{
91 + return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
92 +}
93 +
94 static void flip_smm_bit(void *data)
95 {
96 unsigned long set = *(unsigned long *)data;
97 @@ -3542,9 +3546,8 @@ static void intel_pmu_cpu_starting(int c
98 }
99 }
100
101 -static void free_excl_cntrs(int cpu)
102 +static void free_excl_cntrs(struct cpu_hw_events *cpuc)
103 {
104 - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
105 struct intel_excl_cntrs *c;
106
107 c = cpuc->excl_cntrs;
108 @@ -3565,9 +3568,8 @@ static void intel_pmu_cpu_dying(int cpu)
109 disable_counter_freeze();
110 }
111
112 -static void intel_pmu_cpu_dead(int cpu)
113 +void intel_cpuc_finish(struct cpu_hw_events *cpuc)
114 {
115 - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
116 struct intel_shared_regs *pc;
117
118 pc = cpuc->shared_regs;
119 @@ -3577,7 +3579,12 @@ static void intel_pmu_cpu_dead(int cpu)
120 cpuc->shared_regs = NULL;
121 }
122
123 - free_excl_cntrs(cpu);
124 + free_excl_cntrs(cpuc);
125 +}
126 +
127 +static void intel_pmu_cpu_dead(int cpu)
128 +{
129 + intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
130 }
131
132 static void intel_pmu_sched_task(struct perf_event_context *ctx,
133 @@ -4715,7 +4722,7 @@ static __init int fixup_ht_bug(void)
134 hardlockup_detector_perf_restart();
135
136 for_each_online_cpu(c)
137 - free_excl_cntrs(c);
138 + free_excl_cntrs(&per_cpu(cpu_hw_events, c));
139
140 cpus_read_unlock();
141 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
142 --- a/arch/x86/events/perf_event.h
143 +++ b/arch/x86/events/perf_event.h
144 @@ -889,7 +889,8 @@ struct event_constraint *
145 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
146 struct perf_event *event);
147
148 -struct intel_shared_regs *allocate_shared_regs(int cpu);
149 +extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
150 +extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
151
152 int intel_pmu_init(void);
153
154 @@ -1025,9 +1026,13 @@ static inline int intel_pmu_init(void)
155 return 0;
156 }
157
158 -static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
159 +static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
160 +{
161 + return 0;
162 +}
163 +
164 +static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
165 {
166 - return NULL;
167 }
168
169 static inline int is_ht_workaround_enabled(void)