--- /dev/null
+From foo@baz Tue Mar 12 09:28:36 PDT 2019
+From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+Date: Tue, 5 Mar 2019 22:23:16 +0100
+Subject: perf/x86/intel: Generalize dynamic constraint creation
+
+From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+
+commit 11f8b2d65ca9029591c8df26bb6bd063c312b7fe upstream
+
+Such that we can re-use it.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/core.c | 51 +++++++++++++++++++++++++------------------
+ 1 file changed, 30 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2640,6 +2640,35 @@ intel_stop_scheduling(struct cpu_hw_even
+ }
+
+ static struct event_constraint *
++dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
++{
++ WARN_ON_ONCE(!cpuc->constraint_list);
++
++ if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
++ struct event_constraint *cx;
++
++ /*
++ * grab pre-allocated constraint entry
++ */
++ cx = &cpuc->constraint_list[idx];
++
++ /*
++ * initialize dynamic constraint
++ * with static constraint
++ */
++ *cx = *c;
++
++ /*
++ * mark constraint as dynamic
++ */
++ cx->flags |= PERF_X86_EVENT_DYNAMIC;
++ c = cx;
++ }
++
++ return c;
++}
++
++static struct event_constraint *
+ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
+ int idx, struct event_constraint *c)
+ {
+@@ -2669,27 +2698,7 @@ intel_get_excl_constraints(struct cpu_hw
+ * only needed when constraint has not yet
+ * been cloned (marked dynamic)
+ */
+- if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
+- struct event_constraint *cx;
+-
+- /*
+- * grab pre-allocated constraint entry
+- */
+- cx = &cpuc->constraint_list[idx];
+-
+- /*
+- * initialize dynamic constraint
+- * with static constraint
+- */
+- *cx = *c;
+-
+- /*
+- * mark constraint as dynamic, so we
+- * can free it later on
+- */
+- cx->flags |= PERF_X86_EVENT_DYNAMIC;
+- c = cx;
+- }
++ c = dyn_constraint(cpuc, c, idx);
+
+ /*
+ * From here on, the constraint is dynamic.
--- /dev/null
+From foo@baz Tue Mar 12 09:28:36 PDT 2019
+From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+Date: Tue, 5 Mar 2019 22:23:18 +0100
+Subject: perf/x86/intel: Implement support for TSX Force Abort
+
+From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+
+commit 400816f60c543153656ac74eaf7f36f6b7202378 upstream
+
+Skylake (and later) will receive a microcode update to address a TSX
+errata. This microcode will, on execution of a TSX instruction
+(speculative or not) use (clobber) PMC3. This update will also provide
+a new MSR to change this behaviour along with a CPUID bit to enumerate
+the presence of this new MSR.
+
+When the MSR gets set; the microcode will no longer use PMC3 but will
+Force Abort every TSX transaction (upon executing COMMIT).
+
+When TSX Force Abort (TFA) is allowed (default); the MSR gets set when
+PMC3 gets scheduled and cleared when, after scheduling, PMC3 is
+unused.
+
+When TFA is not allowed; clear PMC3 from all constraints such that it
+will not get used.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/core.c | 74 +++++++++++++++++++++++++++++++++++++++++--
+ arch/x86/events/perf_event.h | 6 +++
+ 2 files changed, 77 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -1995,6 +1995,39 @@ static void intel_pmu_nhm_enable_all(int
+ intel_pmu_enable_all(added);
+ }
+
++static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
++{
++ u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
++
++ if (cpuc->tfa_shadow != val) {
++ cpuc->tfa_shadow = val;
++ wrmsrl(MSR_TSX_FORCE_ABORT, val);
++ }
++}
++
++static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
++{
++ /*
++ * We're going to use PMC3, make sure TFA is set before we touch it.
++ */
++ if (cntr == 3 && !cpuc->is_fake)
++ intel_set_tfa(cpuc, true);
++}
++
++static void intel_tfa_pmu_enable_all(int added)
++{
++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++
++ /*
++ * If we find PMC3 is no longer used when we enable the PMU, we can
++ * clear TFA.
++ */
++ if (!test_bit(3, cpuc->active_mask))
++ intel_set_tfa(cpuc, false);
++
++ intel_pmu_enable_all(added);
++}
++
+ static inline u64 intel_pmu_get_status(void)
+ {
+ u64 status;
+@@ -3218,6 +3251,26 @@ glp_get_event_constraints(struct cpu_hw_
+ return c;
+ }
+
++static bool allow_tsx_force_abort = true;
++
++static struct event_constraint *
++tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
++ struct perf_event *event)
++{
++ struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
++
++ /*
++ * Without TFA we must not use PMC3.
++ */
++ if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
++ c = dyn_constraint(cpuc, c, idx);
++ c->idxmsk64 &= ~(1ULL << 3);
++ c->weight--;
++ }
++
++ return c;
++}
++
+ /*
+ * Broadwell:
+ *
+@@ -3312,13 +3365,15 @@ int intel_cpuc_prepare(struct cpu_hw_eve
+ goto err;
+ }
+
+- if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
++ if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
+ size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
+
+ cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
+ if (!cpuc->constraint_list)
+ goto err_shared_regs;
++ }
+
++ if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+ cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
+ if (!cpuc->excl_cntrs)
+ goto err_constraint_list;
+@@ -3425,9 +3480,10 @@ static void free_excl_cntrs(struct cpu_h
+ if (c->core_id == -1 || --c->refcnt == 0)
+ kfree(c);
+ cpuc->excl_cntrs = NULL;
+- kfree(cpuc->constraint_list);
+- cpuc->constraint_list = NULL;
+ }
++
++ kfree(cpuc->constraint_list);
++ cpuc->constraint_list = NULL;
+ }
+
+ static void intel_pmu_cpu_dying(int cpu)
+@@ -3912,8 +3968,11 @@ static struct attribute *intel_pmu_caps_
+ NULL
+ };
+
++DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
++
+ static struct attribute *intel_pmu_attrs[] = {
+ &dev_attr_freeze_on_smi.attr,
++ NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
+ NULL,
+ };
+
+@@ -4369,6 +4428,15 @@ __init int intel_pmu_init(void)
+ x86_pmu.cpu_events = get_hsw_events_attrs();
+ intel_pmu_pebs_data_source_skl(
+ boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
++
++ if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
++ x86_pmu.flags |= PMU_FL_TFA;
++ x86_pmu.get_event_constraints = tfa_get_event_constraints;
++ x86_pmu.enable_all = intel_tfa_pmu_enable_all;
++ x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
++ intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
++ }
++
+ pr_cont("Skylake events, ");
+ name = "skylake";
+ break;
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -239,6 +239,11 @@ struct cpu_hw_events {
+ int excl_thread_id; /* 0 or 1 */
+
+ /*
++ * SKL TSX_FORCE_ABORT shadow
++ */
++ u64 tfa_shadow;
++
++ /*
+ * AMD specific bits
+ */
+ struct amd_nb *amd_nb;
+@@ -672,6 +677,7 @@ do { \
+ #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */
+ #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
+ #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
++#define PMU_FL_TFA 0x20 /* deal with TSX force abort */
+
+ #define EVENT_VAR(_id) event_attr_##_id
+ #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
--- /dev/null
+From foo@baz Tue Mar 12 09:28:36 PDT 2019
+From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+Date: Tue, 5 Mar 2019 22:23:15 +0100
+Subject: perf/x86/intel: Make cpuc allocations consistent
+
+From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+
+commit d01b1f96a82e5dd7841a1d39db3abfdaf95f70ab upstream
+
+The cpuc data structure allocation is different between fake and real
+cpuc's; use the same code to init/free both.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/core.c | 13 +++++--------
+ arch/x86/events/intel/core.c | 29 ++++++++++++++++++-----------
+ arch/x86/events/perf_event.h | 11 ++++++++---
+ 3 files changed, 31 insertions(+), 22 deletions(-)
+
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1968,7 +1968,7 @@ static int x86_pmu_commit_txn(struct pmu
+ */
+ static void free_fake_cpuc(struct cpu_hw_events *cpuc)
+ {
+- kfree(cpuc->shared_regs);
++ intel_cpuc_finish(cpuc);
+ kfree(cpuc);
+ }
+
+@@ -1980,14 +1980,11 @@ static struct cpu_hw_events *allocate_fa
+ cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
+ if (!cpuc)
+ return ERR_PTR(-ENOMEM);
+-
+- /* only needed, if we have extra_regs */
+- if (x86_pmu.extra_regs) {
+- cpuc->shared_regs = allocate_shared_regs(cpu);
+- if (!cpuc->shared_regs)
+- goto error;
+- }
+ cpuc->is_fake = 1;
++
++ if (intel_cpuc_prepare(cpuc, cpu))
++ goto error;
++
+ return cpuc;
+ error:
+ free_fake_cpuc(cpuc);
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3262,7 +3262,7 @@ ssize_t intel_event_sysfs_show(char *pag
+ return x86_event_sysfs_show(page, config, event);
+ }
+
+-struct intel_shared_regs *allocate_shared_regs(int cpu)
++static struct intel_shared_regs *allocate_shared_regs(int cpu)
+ {
+ struct intel_shared_regs *regs;
+ int i;
+@@ -3294,10 +3294,9 @@ static struct intel_excl_cntrs *allocate
+ return c;
+ }
+
+-static int intel_pmu_cpu_prepare(int cpu)
+-{
+- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+
++int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
++{
+ if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
+ cpuc->shared_regs = allocate_shared_regs(cpu);
+ if (!cpuc->shared_regs)
+@@ -3307,7 +3306,7 @@ static int intel_pmu_cpu_prepare(int cpu
+ if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+ size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
+
+- cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
++ cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
+ if (!cpuc->constraint_list)
+ goto err_shared_regs;
+
+@@ -3332,6 +3331,11 @@ err:
+ return -ENOMEM;
+ }
+
++static int intel_pmu_cpu_prepare(int cpu)
++{
++ return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
++}
++
+ static void flip_smm_bit(void *data)
+ {
+ unsigned long set = *(unsigned long *)data;
+@@ -3403,9 +3407,8 @@ static void intel_pmu_cpu_starting(int c
+ }
+ }
+
+-static void free_excl_cntrs(int cpu)
++static void free_excl_cntrs(struct cpu_hw_events *cpuc)
+ {
+- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ struct intel_excl_cntrs *c;
+
+ c = cpuc->excl_cntrs;
+@@ -3423,9 +3426,8 @@ static void intel_pmu_cpu_dying(int cpu)
+ fini_debug_store_on_cpu(cpu);
+ }
+
+-static void intel_pmu_cpu_dead(int cpu)
++void intel_cpuc_finish(struct cpu_hw_events *cpuc)
+ {
+- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ struct intel_shared_regs *pc;
+
+ pc = cpuc->shared_regs;
+@@ -3435,7 +3437,12 @@ static void intel_pmu_cpu_dead(int cpu)
+ cpuc->shared_regs = NULL;
+ }
+
+- free_excl_cntrs(cpu);
++ free_excl_cntrs(cpuc);
++}
++
++static void intel_pmu_cpu_dead(int cpu)
++{
++ intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
+ }
+
+ static void intel_pmu_sched_task(struct perf_event_context *ctx,
+@@ -4494,7 +4501,7 @@ static __init int fixup_ht_bug(void)
+ hardlockup_detector_perf_restart();
+
+ for_each_online_cpu(c)
+- free_excl_cntrs(c);
++ free_excl_cntrs(&per_cpu(cpu_hw_events, c));
+
+ cpus_read_unlock();
+ pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -880,7 +880,8 @@ struct event_constraint *
+ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event);
+
+-struct intel_shared_regs *allocate_shared_regs(int cpu);
++extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
++extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
+
+ int intel_pmu_init(void);
+
+@@ -1014,9 +1015,13 @@ static inline int intel_pmu_init(void)
+ return 0;
+ }
+
+-static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
++static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
++{
++ return 0;
++}
++
++static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
+ {
+- return NULL;
+ }
+
+ static inline int is_ht_workaround_enabled(void)
gfs2-fix-missed-wakeups-in-find_insert_glock.patch
ath9k-avoid-of-no-eeprom-quirks-without-qca-no-eeprom.patch
driver-core-postpone-dma-tear-down-until-after-devres-release.patch
+perf-x86-intel-make-cpuc-allocations-consistent.patch
+perf-x86-intel-generalize-dynamic-constraint-creation.patch
+x86-add-tsx-force-abort-cpuid-msr.patch
+perf-x86-intel-implement-support-for-tsx-force-abort.patch
--- /dev/null
+From foo@baz Tue Mar 12 09:28:36 PDT 2019
+From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+Date: Tue, 5 Mar 2019 22:23:17 +0100
+Subject: x86: Add TSX Force Abort CPUID/MSR
+
+From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+
+commit 52f64909409c17adf54fcf5f9751e0544ca3a6b4 upstream
+
+Skylake systems will receive a microcode update to address a TSX
+errata. This microcode will (by default) clobber PMC3 when TSX
+instructions are (speculatively or not) executed.
+
+It also provides an MSR to cause all TSX transaction to abort and
+preserve PMC3.
+
+Add the CPUID enumeration and MSR definition.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/include/asm/msr-index.h | 6 ++++++
+ 2 files changed, 7 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -339,6 +339,7 @@
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
++#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
+ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -610,6 +610,12 @@
+
+ #define MSR_IA32_TSC_DEADLINE 0x000006E0
+
++
++#define MSR_TSX_FORCE_ABORT 0x0000010F
++
++#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
++#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
++
+ /* P4/Xeon+ specific */
+ #define MSR_IA32_MCG_EAX 0x00000180
+ #define MSR_IA32_MCG_EBX 0x00000181