--- /dev/null
+From b0380e13502adf7dd8be4c47d622c3522aae6c63 Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Thu, 14 Jul 2022 11:26:30 -0700
+Subject: perf/x86/intel/lbr: Fix unchecked MSR access error on HSW
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit b0380e13502adf7dd8be4c47d622c3522aae6c63 upstream.
+
+The fuzzer triggers the below trace.
+
+[ 7763.384369] unchecked MSR access error: WRMSR to 0x689
+(tried to write 0x1fffffff8101349e) at rIP: 0xffffffff810704a4
+(native_write_msr+0x4/0x20)
+[ 7763.397420] Call Trace:
+[ 7763.399881] <TASK>
+[ 7763.401994] intel_pmu_lbr_restore+0x9a/0x1f0
+[ 7763.406363] intel_pmu_lbr_sched_task+0x91/0x1c0
+[ 7763.410992] __perf_event_task_sched_in+0x1cd/0x240
+
+On a machine with the LBR format LBR_FORMAT_EIP_FLAGS2, when the TSX is
+disabled, a TSX quirk is required to access LBR from registers.
+The lbr_from_signext_quirk_needed() is introduced to determine whether
+the TSX quirk should be applied. However, the
+lbr_from_signext_quirk_needed() is invoked before the
+intel_pmu_lbr_init(), which parses the LBR format information. Without
+the correct LBR format information, the TSX quirk never be applied.
+
+Move the lbr_from_signext_quirk_needed() into the intel_pmu_lbr_init().
+Checking x86_pmu.lbr_has_tsx in the lbr_from_signext_quirk_needed() is
+not required anymore.
+
+Both LBR_FORMAT_EIP_FLAGS2 and LBR_FORMAT_INFO have LBR_TSX flag, but
+only the LBR_FORMAT_EIP_FLAGS2 requirs the quirk. Update the comments
+accordingly.
+
+Fixes: 1ac7fd8159a8 ("perf/x86/intel/lbr: Support LBR format V7")
+Reported-by: Vince Weaver <vincent.weaver@maine.edu>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20220714182630.342107-1-kan.liang@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/lbr.c | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/events/intel/lbr.c
++++ b/arch/x86/events/intel/lbr.c
+@@ -278,9 +278,9 @@ enum {
+ };
+
+ /*
+- * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
+- * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
+- * TSX is not supported they have no consistent behavior:
++ * For format LBR_FORMAT_EIP_FLAGS2, bits 61:62 in MSR_LAST_BRANCH_FROM_x
++ * are the TSX flags when TSX is supported, but when TSX is not supported
++ * they have no consistent behavior:
+ *
+ * - For wrmsr(), bits 61:62 are considered part of the sign extension.
+ * - For HW updates (branch captures) bits 61:62 are always OFF and are not
+@@ -288,7 +288,7 @@ enum {
+ *
+ * Therefore, if:
+ *
+- * 1) LBR has TSX format
++ * 1) LBR format LBR_FORMAT_EIP_FLAGS2
+ * 2) CPU has no TSX support enabled
+ *
+ * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
+@@ -300,7 +300,7 @@ static inline bool lbr_from_signext_quir
+ bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
+ boot_cpu_has(X86_FEATURE_RTM);
+
+- return !tsx_support && x86_pmu.lbr_has_tsx;
++ return !tsx_support;
+ }
+
+ static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
+@@ -1611,9 +1611,6 @@ void intel_pmu_lbr_init_hsw(void)
+ x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
+
+ x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
+-
+- if (lbr_from_signext_quirk_needed())
+- static_branch_enable(&lbr_from_quirk_key);
+ }
+
+ /* skylake */
+@@ -1704,7 +1701,11 @@ void intel_pmu_lbr_init(void)
+ switch (x86_pmu.intel_cap.lbr_format) {
+ case LBR_FORMAT_EIP_FLAGS2:
+ x86_pmu.lbr_has_tsx = 1;
+- fallthrough;
++ x86_pmu.lbr_from_flags = 1;
++ if (lbr_from_signext_quirk_needed())
++ static_branch_enable(&lbr_from_quirk_key);
++ break;
++
+ case LBR_FORMAT_EIP_FLAGS:
+ x86_pmu.lbr_from_flags = 1;
+ break;
--- /dev/null
+From ddfc710395cccc61247348df9eb18ea50321cbed Mon Sep 17 00:00:00 2001
+From: Juri Lelli <juri.lelli@redhat.com>
+Date: Thu, 14 Jul 2022 17:19:08 +0200
+Subject: sched/deadline: Fix BUG_ON condition for deboosted tasks
+
+From: Juri Lelli <juri.lelli@redhat.com>
+
+commit ddfc710395cccc61247348df9eb18ea50321cbed upstream.
+
+Tasks the are being deboosted from SCHED_DEADLINE might enter
+enqueue_task_dl() one last time and hit an erroneous BUG_ON condition:
+since they are not boosted anymore, the if (is_dl_boosted()) branch is
+not taken, but the else if (!dl_prio) is and inside this one we
+BUG_ON(!is_dl_boosted), which is of course false (BUG_ON triggered)
+otherwise we had entered the if branch above. Long story short, the
+current condition doesn't make sense and always leads to triggering of a
+BUG.
+
+Fix this by only checking enqueue flags, properly: ENQUEUE_REPLENISH has
+to be present, but additional flags are not a problem.
+
+Fixes: 64be6f1f5f71 ("sched/deadline: Don't replenish from a !SCHED_DEADLINE entity")
+Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20220714151908.533052-1-juri.lelli@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/deadline.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -1669,7 +1669,10 @@ static void enqueue_task_dl(struct rq *r
+ * the throttle.
+ */
+ p->dl.dl_throttled = 0;
+- BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
++ if (!(flags & ENQUEUE_REPLENISH))
++ printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
++ task_pid_nr(p));
++
+ return;
+ }
+
kvm-don-t-null-dereference-ops-destroy.patch
mm-mempolicy-fix-uninit-value-in-mpol_rebind_policy.patch
bpf-make-sure-mac_header-was-set-before-using-it.patch
+sched-deadline-fix-bug_on-condition-for-deboosted-tasks.patch
+perf-x86-intel-lbr-fix-unchecked-msr-access-error-on-hsw.patch
+x86-bugs-warn-when-ibrs-mitigation-is-selected-on-enhanced-ibrs-parts.patch
--- /dev/null
+From eb23b5ef9131e6d65011de349a4d25ef1b3d4314 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Thu, 14 Jul 2022 16:15:35 -0700
+Subject: x86/bugs: Warn when "ibrs" mitigation is selected on Enhanced IBRS parts
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit eb23b5ef9131e6d65011de349a4d25ef1b3d4314 upstream.
+
+IBRS mitigation for spectre_v2 forces write to MSR_IA32_SPEC_CTRL at
+every kernel entry/exit. On Enhanced IBRS parts setting
+MSR_IA32_SPEC_CTRL[IBRS] only once at boot is sufficient. MSR writes at
+every kernel entry/exit incur unnecessary performance loss.
+
+When Enhanced IBRS feature is present, print a warning about this
+unnecessary performance loss.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/2a5eaf54583c2bfe0edc4fea64006656256cca17.1657814857.git.pawan.kumar.gupta@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -968,6 +968,7 @@ static inline const char *spectre_v2_mod
+ #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
+ #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
+ #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
++#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
+
+ #ifdef CONFIG_BPF_SYSCALL
+ void unpriv_ebpf_notify(int new_state)
+@@ -1408,6 +1409,8 @@ static void __init spectre_v2_select_mit
+
+ case SPECTRE_V2_IBRS:
+ setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
++ if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
++ pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
+ break;
+
+ case SPECTRE_V2_LFENCE: