1 From foo@baz Tue Mar 12 09:25:52 PDT 2019
2 From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
3 Date: Tue, 5 Mar 2019 22:23:15 +0100
4 Subject: perf/x86/intel: Make cpuc allocations consistent
6 From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
8 commit d01b1f96a82e5dd7841a1d39db3abfdaf95f70ab upstream
10 The cpuc data structure allocation is different between fake and real
11 cpuc's; use the same code to init/free both.
13 Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
14 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
15 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
17 arch/x86/events/core.c | 13 +++++--------
18 arch/x86/events/intel/core.c | 29 ++++++++++++++++++-----------
19 arch/x86/events/perf_event.h | 11 ++++++++---
20 3 files changed, 31 insertions(+), 22 deletions(-)
22 --- a/arch/x86/events/core.c
23 +++ b/arch/x86/events/core.c
24 @@ -1995,7 +1995,7 @@ static int x86_pmu_commit_txn(struct pmu
26 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
28 - kfree(cpuc->shared_regs);
29 + intel_cpuc_finish(cpuc);
33 @@ -2007,14 +2007,11 @@ static struct cpu_hw_events *allocate_fa
34 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
36 return ERR_PTR(-ENOMEM);
38 - /* only needed, if we have extra_regs */
39 - if (x86_pmu.extra_regs) {
40 - cpuc->shared_regs = allocate_shared_regs(cpu);
41 - if (!cpuc->shared_regs)
46 + if (intel_cpuc_prepare(cpuc, cpu))
52 --- a/arch/x86/events/intel/core.c
53 +++ b/arch/x86/events/intel/core.c
54 @@ -3398,7 +3398,7 @@ ssize_t intel_event_sysfs_show(char *pag
55 return x86_event_sysfs_show(page, config, event);
58 -struct intel_shared_regs *allocate_shared_regs(int cpu)
59 +static struct intel_shared_regs *allocate_shared_regs(int cpu)
61 struct intel_shared_regs *regs;
63 @@ -3430,10 +3430,9 @@ static struct intel_excl_cntrs *allocate
67 -static int intel_pmu_cpu_prepare(int cpu)
69 - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
71 +int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
73 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
74 cpuc->shared_regs = allocate_shared_regs(cpu);
75 if (!cpuc->shared_regs)
76 @@ -3443,7 +3442,7 @@ static int intel_pmu_cpu_prepare(int cpu
77 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
78 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
80 - cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
81 + cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
82 if (!cpuc->constraint_list)
85 @@ -3468,6 +3467,11 @@ err:
89 +static int intel_pmu_cpu_prepare(int cpu)
91 + return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
94 static void flip_smm_bit(void *data)
96 unsigned long set = *(unsigned long *)data;
97 @@ -3542,9 +3546,8 @@ static void intel_pmu_cpu_starting(int c
101 -static void free_excl_cntrs(int cpu)
102 +static void free_excl_cntrs(struct cpu_hw_events *cpuc)
104 - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
105 struct intel_excl_cntrs *c;
107 c = cpuc->excl_cntrs;
108 @@ -3565,9 +3568,8 @@ static void intel_pmu_cpu_dying(int cpu)
109 disable_counter_freeze();
112 -static void intel_pmu_cpu_dead(int cpu)
113 +void intel_cpuc_finish(struct cpu_hw_events *cpuc)
115 - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
116 struct intel_shared_regs *pc;
118 pc = cpuc->shared_regs;
119 @@ -3577,7 +3579,12 @@ static void intel_pmu_cpu_dead(int cpu)
120 cpuc->shared_regs = NULL;
123 - free_excl_cntrs(cpu);
124 + free_excl_cntrs(cpuc);
127 +static void intel_pmu_cpu_dead(int cpu)
129 + intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
132 static void intel_pmu_sched_task(struct perf_event_context *ctx,
133 @@ -4715,7 +4722,7 @@ static __init int fixup_ht_bug(void)
134 hardlockup_detector_perf_restart();
136 for_each_online_cpu(c)
137 - free_excl_cntrs(c);
138 + free_excl_cntrs(&per_cpu(cpu_hw_events, c));
141 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
142 --- a/arch/x86/events/perf_event.h
143 +++ b/arch/x86/events/perf_event.h
144 @@ -889,7 +889,8 @@ struct event_constraint *
145 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
146 struct perf_event *event);
148 -struct intel_shared_regs *allocate_shared_regs(int cpu);
149 +extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
150 +extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
152 int intel_pmu_init(void);
154 @@ -1025,9 +1026,13 @@ static inline int intel_pmu_init(void)
158 -static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
159 +static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
164 +static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
169 static inline int is_ht_workaround_enabled(void)