From: Greg Kroah-Hartman Date: Tue, 12 Mar 2019 17:02:22 +0000 (-0700) Subject: 4.20-stable patches X-Git-Tag: v5.0.2~8 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=d1d3b9c7f041a708b8531ec1075a653960668a8d;p=thirdparty%2Fkernel%2Fstable-queue.git 4.20-stable patches added patches: perf-x86-intel-generalize-dynamic-constraint-creation.patch perf-x86-intel-implement-support-for-tsx-force-abort.patch perf-x86-intel-make-cpuc-allocations-consistent.patch x86-add-tsx-force-abort-cpuid-msr.patch --- diff --git a/queue-4.20/perf-x86-intel-generalize-dynamic-constraint-creation.patch b/queue-4.20/perf-x86-intel-generalize-dynamic-constraint-creation.patch new file mode 100644 index 00000000000..9232b6aa2f1 --- /dev/null +++ b/queue-4.20/perf-x86-intel-generalize-dynamic-constraint-creation.patch @@ -0,0 +1,85 @@ +From foo@baz Tue Mar 12 09:25:52 PDT 2019 +From: "Peter Zijlstra (Intel)" +Date: Tue, 5 Mar 2019 22:23:16 +0100 +Subject: perf/x86/intel: Generalize dynamic constraint creation + +From: "Peter Zijlstra (Intel)" + +commit 11f8b2d65ca9029591c8df26bb6bd063c312b7fe upstream + +Such that we can re-use it. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/events/intel/core.c | 51 +++++++++++++++++++++++++------------------ + 1 file changed, 30 insertions(+), 21 deletions(-) + +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -2769,6 +2769,35 @@ intel_stop_scheduling(struct cpu_hw_even + } + + static struct event_constraint * ++dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx) ++{ ++ WARN_ON_ONCE(!cpuc->constraint_list); ++ ++ if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { ++ struct event_constraint *cx; ++ ++ /* ++ * grab pre-allocated constraint entry ++ */ ++ cx = &cpuc->constraint_list[idx]; ++ ++ /* ++ * initialize dynamic constraint ++ * with static constraint ++ */ ++ *cx = *c; ++ ++ /* ++ * mark constraint as dynamic ++ */ ++ cx->flags |= PERF_X86_EVENT_DYNAMIC; ++ c = cx; ++ } ++ ++ return c; ++} ++ ++static struct event_constraint * + intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, + int idx, struct event_constraint *c) + { +@@ -2798,27 +2827,7 @@ intel_get_excl_constraints(struct cpu_hw + * only needed when constraint has not yet + * been cloned (marked dynamic) + */ +- if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { +- struct event_constraint *cx; +- +- /* +- * grab pre-allocated constraint entry +- */ +- cx = &cpuc->constraint_list[idx]; +- +- /* +- * initialize dynamic constraint +- * with static constraint +- */ +- *cx = *c; +- +- /* +- * mark constraint as dynamic, so we +- * can free it later on +- */ +- cx->flags |= PERF_X86_EVENT_DYNAMIC; +- c = cx; +- } ++ c = dyn_constraint(cpuc, c, idx); + + /* + * From here on, the constraint is dynamic. diff --git a/queue-4.20/perf-x86-intel-implement-support-for-tsx-force-abort.patch b/queue-4.20/perf-x86-intel-implement-support-for-tsx-force-abort.patch new file mode 100644 index 00000000000..4472b18fa67 --- /dev/null +++ b/queue-4.20/perf-x86-intel-implement-support-for-tsx-force-abort.patch @@ -0,0 +1,182 @@ +From foo@baz Tue Mar 12 09:25:52 PDT 2019 +From: "Peter Zijlstra (Intel)" +Date: Tue, 5 Mar 2019 22:23:18 +0100 +Subject: perf/x86/intel: Implement support for TSX Force Abort + +From: "Peter Zijlstra (Intel)" + +commit 400816f60c543153656ac74eaf7f36f6b7202378 upstream + +Skylake (and later) will receive a microcode update to address a TSX +errata. This microcode will, on execution of a TSX instruction +(speculative or not) use (clobber) PMC3. This update will also provide +a new MSR to change this behaviour along with a CPUID bit to enumerate +the presence of this new MSR. + +When the MSR gets set; the microcode will no longer use PMC3 but will +Force Abort every TSX transaction (upon executing COMMIT). + +When TSX Force Abort (TFA) is allowed (default); the MSR gets set when +PMC3 gets scheduled and cleared when, after scheduling, PMC3 is +unused. + +When TFA is not allowed; clear PMC3 from all constraints such that it +will not get used. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/events/intel/core.c | 74 +++++++++++++++++++++++++++++++++++++++++-- + arch/x86/events/perf_event.h | 6 +++ + 2 files changed, 77 insertions(+), 3 deletions(-) + +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -1999,6 +1999,39 @@ static void intel_pmu_nhm_enable_all(int + intel_pmu_enable_all(added); + } + ++static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on) ++{ ++ u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0; ++ ++ if (cpuc->tfa_shadow != val) { ++ cpuc->tfa_shadow = val; ++ wrmsrl(MSR_TSX_FORCE_ABORT, val); ++ } ++} ++ ++static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) ++{ ++ /* ++ * We're going to use PMC3, make sure TFA is set before we touch it. ++ */ ++ if (cntr == 3 && !cpuc->is_fake) ++ intel_set_tfa(cpuc, true); ++} ++ ++static void intel_tfa_pmu_enable_all(int added) ++{ ++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); ++ ++ /* ++ * If we find PMC3 is no longer used when we enable the PMU, we can ++ * clear TFA. ++ */ ++ if (!test_bit(3, cpuc->active_mask)) ++ intel_set_tfa(cpuc, false); ++ ++ intel_pmu_enable_all(added); ++} ++ + static void enable_counter_freeze(void) + { + update_debugctlmsr(get_debugctlmsr() | +@@ -3354,6 +3387,26 @@ glp_get_event_constraints(struct cpu_hw_ + return c; + } + ++static bool allow_tsx_force_abort = true; ++ ++static struct event_constraint * ++tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx, ++ struct perf_event *event) ++{ ++ struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event); ++ ++ /* ++ * Without TFA we must not use PMC3. ++ */ ++ if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) { ++ c = dyn_constraint(cpuc, c, idx); ++ c->idxmsk64 &= ~(1ULL << 3); ++ c->weight--; ++ } ++ ++ return c; ++} ++ + /* + * Broadwell: + * +@@ -3448,13 +3501,15 @@ int intel_cpuc_prepare(struct cpu_hw_eve + goto err; + } + +- if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { ++ if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) { + size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); + + cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); + if (!cpuc->constraint_list) + goto err_shared_regs; ++ } + ++ if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { + cpuc->excl_cntrs = allocate_excl_cntrs(cpu); + if (!cpuc->excl_cntrs) + goto err_constraint_list; +@@ -3564,9 +3619,10 @@ static void free_excl_cntrs(struct cpu_h + if (c->core_id == -1 || --c->refcnt == 0) + kfree(c); + cpuc->excl_cntrs = NULL; +- kfree(cpuc->constraint_list); +- cpuc->constraint_list = NULL; + } ++ ++ kfree(cpuc->constraint_list); ++ cpuc->constraint_list = NULL; + } + + static void intel_pmu_cpu_dying(int cpu) +@@ -4086,8 +4142,11 @@ static struct attribute *intel_pmu_caps_ + NULL + }; + ++DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort); ++ + static struct attribute *intel_pmu_attrs[] = { + &dev_attr_freeze_on_smi.attr, ++ NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */ + NULL, + }; + +@@ -4580,6 +4639,15 @@ __init int intel_pmu_init(void) + tsx_attr = hsw_tsx_events_attrs; + intel_pmu_pebs_data_source_skl( + boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); ++ ++ if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) { ++ x86_pmu.flags |= PMU_FL_TFA; ++ x86_pmu.get_event_constraints = tfa_get_event_constraints; ++ x86_pmu.enable_all = intel_tfa_pmu_enable_all; ++ x86_pmu.commit_scheduling = intel_tfa_commit_scheduling; ++ intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr; ++ } ++ + pr_cont("Skylake events, "); + name = "skylake"; + break; +--- a/arch/x86/events/perf_event.h ++++ b/arch/x86/events/perf_event.h +@@ -243,6 +243,11 @@ struct cpu_hw_events { + int excl_thread_id; /* 0 or 1 */ + + /* ++ * SKL TSX_FORCE_ABORT shadow ++ */ ++ u64 tfa_shadow; ++ ++ /* + * AMD specific bits + */ + struct amd_nb *amd_nb; +@@ -681,6 +686,7 @@ do { \ + #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ + #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ + #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ ++#define PMU_FL_TFA 0x20 /* deal with TSX force abort */ + + #define EVENT_VAR(_id) event_attr_##_id + #define EVENT_PTR(_id) &event_attr_##_id.attr.attr diff --git a/queue-4.20/perf-x86-intel-make-cpuc-allocations-consistent.patch b/queue-4.20/perf-x86-intel-make-cpuc-allocations-consistent.patch new file mode 100644 index 00000000000..bd6283ecb4a --- /dev/null +++ b/queue-4.20/perf-x86-intel-make-cpuc-allocations-consistent.patch @@ -0,0 +1,169 @@ +From foo@baz Tue Mar 12 09:25:52 PDT 2019 +From: "Peter Zijlstra (Intel)" +Date: Tue, 5 Mar 2019 22:23:15 +0100 +Subject: perf/x86/intel: Make cpuc allocations consistent + +From: "Peter Zijlstra (Intel)" + +commit d01b1f96a82e5dd7841a1d39db3abfdaf95f70ab upstream + +The cpuc data structure allocation is different between fake and real +cpuc's; use the same code to init/free both. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/events/core.c | 13 +++++-------- + arch/x86/events/intel/core.c | 29 ++++++++++++++++++----------- + arch/x86/events/perf_event.h | 11 ++++++++--- + 3 files changed, 31 insertions(+), 22 deletions(-) + +--- a/arch/x86/events/core.c ++++ b/arch/x86/events/core.c +@@ -1995,7 +1995,7 @@ static int x86_pmu_commit_txn(struct pmu + */ + static void free_fake_cpuc(struct cpu_hw_events *cpuc) + { +- kfree(cpuc->shared_regs); ++ intel_cpuc_finish(cpuc); + kfree(cpuc); + } + +@@ -2007,14 +2007,11 @@ static struct cpu_hw_events *allocate_fa + cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); + if (!cpuc) + return ERR_PTR(-ENOMEM); +- +- /* only needed, if we have extra_regs */ +- if (x86_pmu.extra_regs) { +- cpuc->shared_regs = allocate_shared_regs(cpu); +- if (!cpuc->shared_regs) +- goto error; +- } + cpuc->is_fake = 1; ++ ++ if (intel_cpuc_prepare(cpuc, cpu)) ++ goto error; ++ + return cpuc; + error: + free_fake_cpuc(cpuc); +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -3398,7 +3398,7 @@ ssize_t intel_event_sysfs_show(char *pag + return x86_event_sysfs_show(page, config, event); + } + +-struct intel_shared_regs *allocate_shared_regs(int cpu) ++static struct intel_shared_regs *allocate_shared_regs(int cpu) + { + struct intel_shared_regs *regs; + int i; +@@ -3430,10 +3430,9 @@ static struct intel_excl_cntrs *allocate + return c; + } + +-static int intel_pmu_cpu_prepare(int cpu) +-{ +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + ++int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) ++{ + if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { + cpuc->shared_regs = allocate_shared_regs(cpu); + if (!cpuc->shared_regs) +@@ -3443,7 +3442,7 @@ static int intel_pmu_cpu_prepare(int cpu + if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { + size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); + +- cpuc->constraint_list = kzalloc(sz, GFP_KERNEL); ++ cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); + if (!cpuc->constraint_list) + goto err_shared_regs; + +@@ -3468,6 +3467,11 @@ err: + return -ENOMEM; + } + ++static int intel_pmu_cpu_prepare(int cpu) ++{ ++ return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu); ++} ++ + static void flip_smm_bit(void *data) + { + unsigned long set = *(unsigned long *)data; +@@ -3542,9 +3546,8 @@ static void intel_pmu_cpu_starting(int c + } + } + +-static void free_excl_cntrs(int cpu) ++static void free_excl_cntrs(struct cpu_hw_events *cpuc) + { +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + struct intel_excl_cntrs *c; + + c = cpuc->excl_cntrs; +@@ -3565,9 +3568,8 @@ static void intel_pmu_cpu_dying(int cpu) + disable_counter_freeze(); + } + +-static void intel_pmu_cpu_dead(int cpu) ++void intel_cpuc_finish(struct cpu_hw_events *cpuc) + { +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + struct intel_shared_regs *pc; + + pc = cpuc->shared_regs; +@@ -3577,7 +3579,12 @@ static void intel_pmu_cpu_dead(int cpu) + cpuc->shared_regs = NULL; + } + +- free_excl_cntrs(cpu); ++ free_excl_cntrs(cpuc); ++} ++ ++static void intel_pmu_cpu_dead(int cpu) ++{ ++ intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu)); + } + + static void intel_pmu_sched_task(struct perf_event_context *ctx, +@@ -4715,7 +4722,7 @@ static __init int fixup_ht_bug(void) + hardlockup_detector_perf_restart(); + + for_each_online_cpu(c) +- free_excl_cntrs(c); ++ free_excl_cntrs(&per_cpu(cpu_hw_events, c)); + + cpus_read_unlock(); + pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n"); +--- a/arch/x86/events/perf_event.h ++++ b/arch/x86/events/perf_event.h +@@ -889,7 +889,8 @@ struct event_constraint * + x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event); + +-struct intel_shared_regs *allocate_shared_regs(int cpu); ++extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu); ++extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); + + int intel_pmu_init(void); + +@@ -1025,9 +1026,13 @@ static inline int intel_pmu_init(void) + return 0; + } + +-static inline struct intel_shared_regs *allocate_shared_regs(int cpu) ++static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu) ++{ ++ return 0; ++} ++ ++static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc) + { +- return NULL; + } + + static inline int is_ht_workaround_enabled(void) diff --git a/queue-4.20/series b/queue-4.20/series index 897ddf4fd40..a483cb96905 100644 --- a/queue-4.20/series +++ b/queue-4.20/series @@ -165,3 +165,7 @@ bpf-stop-the-psock-parser-before-canceling-its-work.patch gfs2-fix-missed-wakeups-in-find_insert_glock.patch staging-erofs-keep-corrupted-fs-from-crashing-kernel-in-erofs_namei.patch ath9k-avoid-of-no-eeprom-quirks-without-qca-no-eeprom.patch +perf-x86-intel-make-cpuc-allocations-consistent.patch +perf-x86-intel-generalize-dynamic-constraint-creation.patch +x86-add-tsx-force-abort-cpuid-msr.patch +perf-x86-intel-implement-support-for-tsx-force-abort.patch diff --git a/queue-4.20/x86-add-tsx-force-abort-cpuid-msr.patch b/queue-4.20/x86-add-tsx-force-abort-cpuid-msr.patch new file mode 100644 index 00000000000..82bd2874930 --- /dev/null +++ b/queue-4.20/x86-add-tsx-force-abort-cpuid-msr.patch @@ -0,0 +1,51 @@ +From foo@baz Tue Mar 12 09:25:52 PDT 2019 +From: "Peter Zijlstra (Intel)" +Date: Tue, 5 Mar 2019 22:23:17 +0100 +Subject: x86: Add TSX Force Abort CPUID/MSR + +From: "Peter Zijlstra (Intel)" + +commit 52f64909409c17adf54fcf5f9751e0544ca3a6b4 upstream + +Skylake systems will receive a microcode update to address a TSX +errata. This microcode will (by default) clobber PMC3 when TSX +instructions are (speculatively or not) executed. + +It also provides an MSR to cause all TSX transaction to abort and +preserve PMC3. + +Add the CPUID enumeration and MSR definition. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/cpufeatures.h | 1 + + arch/x86/include/asm/msr-index.h | 6 ++++++ + 2 files changed, 7 insertions(+) + +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -342,6 +342,7 @@ + /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ ++#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ + #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ + #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ + #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -630,6 +630,12 @@ + + #define MSR_IA32_TSC_DEADLINE 0x000006E0 + ++ ++#define MSR_TSX_FORCE_ABORT 0x0000010F ++ ++#define MSR_TFA_RTM_FORCE_ABORT_BIT 0 ++#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT) ++ + /* P4/Xeon+ specific */ + #define MSR_IA32_MCG_EAX 0x00000180 + #define MSR_IA32_MCG_EBX 0x00000181