From: Greg Kroah-Hartman Date: Tue, 12 Mar 2019 17:02:04 +0000 (-0700) Subject: 4.9-stable patches X-Git-Tag: v5.0.2~11 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=8fbe2834e33ad32078348c568331c48d927311f1;p=thirdparty%2Fkernel%2Fstable-queue.git 4.9-stable patches added patches: perf-x86-intel-generalize-dynamic-constraint-creation.patch perf-x86-intel-make-cpuc-allocations-consistent.patch x86-add-tsx-force-abort-cpuid-msr.patch --- diff --git a/queue-4.9/perf-x86-intel-generalize-dynamic-constraint-creation.patch b/queue-4.9/perf-x86-intel-generalize-dynamic-constraint-creation.patch new file mode 100644 index 00000000000..906de9f81d9 --- /dev/null +++ b/queue-4.9/perf-x86-intel-generalize-dynamic-constraint-creation.patch @@ -0,0 +1,85 @@ +From foo@baz Tue Mar 12 09:30:28 PDT 2019 +From: "Peter Zijlstra (Intel)" +Date: Tue, 5 Mar 2019 22:23:16 +0100 +Subject: perf/x86/intel: Generalize dynamic constraint creation + +From: "Peter Zijlstra (Intel)" + +commit 11f8b2d65ca9029591c8df26bb6bd063c312b7fe upstream + +Such that we can re-use it. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/events/intel/core.c | 51 +++++++++++++++++++++++++------------------ + 1 file changed, 30 insertions(+), 21 deletions(-) + +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -2493,6 +2493,35 @@ intel_stop_scheduling(struct cpu_hw_even + } + + static struct event_constraint * ++dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx) ++{ ++ WARN_ON_ONCE(!cpuc->constraint_list); ++ ++ if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { ++ struct event_constraint *cx; ++ ++ /* ++ * grab pre-allocated constraint entry ++ */ ++ cx = &cpuc->constraint_list[idx]; ++ ++ /* ++ * initialize dynamic constraint ++ * with static constraint ++ */ ++ *cx = *c; ++ ++ /* ++ * mark constraint as dynamic ++ */ ++ cx->flags |= PERF_X86_EVENT_DYNAMIC; ++ c = cx; ++ } ++ ++ return c; ++} ++ ++static struct event_constraint * + intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, + int idx, struct event_constraint *c) + { +@@ -2522,27 +2551,7 @@ intel_get_excl_constraints(struct cpu_hw + * only needed when constraint has not yet + * been cloned (marked dynamic) + */ +- if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { +- struct event_constraint *cx; +- +- /* +- * grab pre-allocated constraint entry +- */ +- cx = &cpuc->constraint_list[idx]; +- +- /* +- * initialize dynamic constraint +- * with static constraint +- */ +- *cx = *c; +- +- /* +- * mark constraint as dynamic, so we +- * can free it later on +- */ +- cx->flags |= PERF_X86_EVENT_DYNAMIC; +- c = cx; +- } ++ c = dyn_constraint(cpuc, c, idx); + + /* + * From here on, the constraint is dynamic. diff --git a/queue-4.9/perf-x86-intel-make-cpuc-allocations-consistent.patch b/queue-4.9/perf-x86-intel-make-cpuc-allocations-consistent.patch new file mode 100644 index 00000000000..37abe2269b0 --- /dev/null +++ b/queue-4.9/perf-x86-intel-make-cpuc-allocations-consistent.patch @@ -0,0 +1,169 @@ +From foo@baz Tue Mar 12 09:30:28 PDT 2019 +From: "Peter Zijlstra (Intel)" +Date: Tue, 5 Mar 2019 22:23:15 +0100 +Subject: perf/x86/intel: Make cpuc allocations consistent + +From: "Peter Zijlstra (Intel)" + +commit d01b1f96a82e5dd7841a1d39db3abfdaf95f70ab upstream + +The cpuc data structure allocation is different between fake and real +cpuc's; use the same code to init/free both. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/events/core.c | 13 +++++-------- + arch/x86/events/intel/core.c | 29 ++++++++++++++++++----------- + arch/x86/events/perf_event.h | 11 ++++++++--- + 3 files changed, 31 insertions(+), 22 deletions(-) + +--- a/arch/x86/events/core.c ++++ b/arch/x86/events/core.c +@@ -1942,7 +1942,7 @@ static int x86_pmu_commit_txn(struct pmu + */ + static void free_fake_cpuc(struct cpu_hw_events *cpuc) + { +- kfree(cpuc->shared_regs); ++ intel_cpuc_finish(cpuc); + kfree(cpuc); + } + +@@ -1954,14 +1954,11 @@ static struct cpu_hw_events *allocate_fa + cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); + if (!cpuc) + return ERR_PTR(-ENOMEM); +- +- /* only needed, if we have extra_regs */ +- if (x86_pmu.extra_regs) { +- cpuc->shared_regs = allocate_shared_regs(cpu); +- if (!cpuc->shared_regs) +- goto error; +- } + cpuc->is_fake = 1; ++ ++ if (intel_cpuc_prepare(cpuc, cpu)) ++ goto error; ++ + return cpuc; + error: + free_fake_cpuc(cpuc); +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -3093,7 +3093,7 @@ ssize_t intel_event_sysfs_show(char *pag + return x86_event_sysfs_show(page, config, event); + } + +-struct intel_shared_regs *allocate_shared_regs(int cpu) ++static struct intel_shared_regs *allocate_shared_regs(int cpu) + { + struct intel_shared_regs *regs; + int i; +@@ -3125,10 +3125,9 @@ static struct intel_excl_cntrs *allocate + return c; + } + +-static int intel_pmu_cpu_prepare(int cpu) +-{ +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + ++int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) ++{ + if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { + cpuc->shared_regs = allocate_shared_regs(cpu); + if (!cpuc->shared_regs) +@@ -3138,7 +3137,7 @@ static int intel_pmu_cpu_prepare(int cpu + if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { + size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); + +- cpuc->constraint_list = kzalloc(sz, GFP_KERNEL); ++ cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); + if (!cpuc->constraint_list) + goto err_shared_regs; + +@@ -3163,6 +3162,11 @@ err: + return -ENOMEM; + } + ++static int intel_pmu_cpu_prepare(int cpu) ++{ ++ return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu); ++} ++ + static void intel_pmu_cpu_starting(int cpu) + { + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); +@@ -3218,9 +3222,8 @@ static void intel_pmu_cpu_starting(int c + } + } + +-static void free_excl_cntrs(int cpu) ++static void free_excl_cntrs(struct cpu_hw_events *cpuc) + { +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + struct intel_excl_cntrs *c; + + c = cpuc->excl_cntrs; +@@ -3238,9 +3241,8 @@ static void intel_pmu_cpu_dying(int cpu) + fini_debug_store_on_cpu(cpu); + } + +-static void intel_pmu_cpu_dead(int cpu) ++void intel_cpuc_finish(struct cpu_hw_events *cpuc) + { +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + struct intel_shared_regs *pc; + + pc = cpuc->shared_regs; +@@ -3250,7 +3252,12 @@ static void intel_pmu_cpu_dead(int cpu) + cpuc->shared_regs = NULL; + } + +- free_excl_cntrs(cpu); ++ free_excl_cntrs(cpuc); ++} ++ ++static void intel_pmu_cpu_dead(int cpu) ++{ ++ intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu)); + } + + static void intel_pmu_sched_task(struct perf_event_context *ctx, +@@ -4132,7 +4139,7 @@ static __init int fixup_ht_bug(void) + get_online_cpus(); + + for_each_online_cpu(c) { +- free_excl_cntrs(c); ++ free_excl_cntrs(&per_cpu(cpu_hw_events, c)); + } + + put_online_cpus(); +--- a/arch/x86/events/perf_event.h ++++ b/arch/x86/events/perf_event.h +@@ -865,7 +865,8 @@ struct event_constraint * + x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event); + +-struct intel_shared_regs *allocate_shared_regs(int cpu); ++extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu); ++extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); + + int intel_pmu_init(void); + +@@ -995,9 +996,13 @@ static inline int intel_pmu_init(void) + return 0; + } + +-static inline struct intel_shared_regs *allocate_shared_regs(int cpu) ++static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu) ++{ ++ return 0; ++} ++ ++static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc) + { +- return NULL; + } + + static inline int is_ht_workaround_enabled(void) diff --git a/queue-4.9/series b/queue-4.9/series index 481dc6108b2..c7dddb09a6d 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -91,3 +91,6 @@ arm-dts-exynos-add-minimal-clkout-parameters-to-exynos3250-pmu.patch drm-disable-uncached-dma-optimization-for-arm-and-ar.patch arm-8781-1-fix-thumb-2-syscall-return-for-binutils-2.patch arm-dts-exynos-do-not-ignore-real-world-fuse-values-for-thermal-zone-0-on-exynos5420.patch +perf-x86-intel-make-cpuc-allocations-consistent.patch +perf-x86-intel-generalize-dynamic-constraint-creation.patch +x86-add-tsx-force-abort-cpuid-msr.patch diff --git a/queue-4.9/x86-add-tsx-force-abort-cpuid-msr.patch b/queue-4.9/x86-add-tsx-force-abort-cpuid-msr.patch new file mode 100644 index 00000000000..f4e2b989fd8 --- /dev/null +++ b/queue-4.9/x86-add-tsx-force-abort-cpuid-msr.patch @@ -0,0 +1,51 @@ +From foo@baz Tue Mar 12 09:30:28 PDT 2019 +From: "Peter Zijlstra (Intel)" +Date: Tue, 5 Mar 2019 22:23:17 +0100 +Subject: x86: Add TSX Force Abort CPUID/MSR + +From: "Peter Zijlstra (Intel)" + +commit 52f64909409c17adf54fcf5f9751e0544ca3a6b4 upstream + +Skylake systems will receive a microcode update to address a TSX +errata. This microcode will (by default) clobber PMC3 when TSX +instructions are (speculatively or not) executed. + +It also provides an MSR to cause all TSX transaction to abort and +preserve PMC3. + +Add the CPUID enumeration and MSR definition. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/cpufeatures.h | 1 + + arch/x86/include/asm/msr-index.h | 6 ++++++ + 2 files changed, 7 insertions(+) + +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -314,6 +314,7 @@ + /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ ++#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ + #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ + #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ + #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -575,6 +575,12 @@ + + #define MSR_IA32_TSC_DEADLINE 0x000006E0 + ++ ++#define MSR_TSX_FORCE_ABORT 0x0000010F ++ ++#define MSR_TFA_RTM_FORCE_ABORT_BIT 0 ++#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT) ++ + /* P4/Xeon+ specific */ + #define MSR_IA32_MCG_EAX 0x00000180 + #define MSR_IA32_MCG_EBX 0x00000181