]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 4.19
authorSasha Levin <sashal@kernel.org>
Mon, 15 Jun 2020 21:02:56 +0000 (17:02 -0400)
committerSasha Levin <sashal@kernel.org>
Mon, 15 Jun 2020 21:02:56 +0000 (17:02 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.19/kvm-x86-fix-apic-page-invalidation-race.patch [new file with mode: 0644]
queue-4.19/kvm-x86-fix-l1tf-mitigation-for-shadow-mmu.patch [new file with mode: 0644]
queue-4.19/kvm-x86-mmu-consolidate-is-mmio-spte-code.patch [new file with mode: 0644]
queue-4.19/kvm-x86-only-do-l1tf-workaround-on-affected-processo.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/x86-speculation-add-support-for-stibp-always-on-pref.patch [new file with mode: 0644]
queue-4.19/x86-speculation-avoid-force-disabling-ibpb-based-on-.patch [new file with mode: 0644]
queue-4.19/x86-speculation-change-misspelled-stipb-to-stibp.patch [new file with mode: 0644]
queue-4.19/x86-speculation-pr_spec_force_disable-enforcement-fo.patch [new file with mode: 0644]

diff --git a/queue-4.19/kvm-x86-fix-apic-page-invalidation-race.patch b/queue-4.19/kvm-x86-fix-apic-page-invalidation-race.patch
new file mode 100644 (file)
index 0000000..7e9e04b
--- /dev/null
@@ -0,0 +1,169 @@
+From 4b04c05160a61ea8eb2d3604261c57e2ebd404fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Jun 2020 13:26:27 +0900
+Subject: KVM: x86: Fix APIC page invalidation race
+
+From: Eiichi Tsukata <eiichi.tsukata@nutanix.com>
+
+[ Upstream commit e649b3f0188f8fd34dd0dde8d43fd3312b902fb2 ]
+
+Commit b1394e745b94 ("KVM: x86: fix APIC page invalidation") tried
+to fix inappropriate APIC page invalidation by re-introducing arch
+specific kvm_arch_mmu_notifier_invalidate_range() and calling it from
+kvm_mmu_notifier_invalidate_range_start. However, the patch left a
+possible race where the VMCS APIC address cache is updated *before*
+it is unmapped:
+
+  (Invalidator) kvm_mmu_notifier_invalidate_range_start()
+  (Invalidator) kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD)
+  (KVM VCPU) vcpu_enter_guest()
+  (KVM VCPU) kvm_vcpu_reload_apic_access_page()
+  (Invalidator) actually unmap page
+
+Because of the above race, there can be a mismatch between the
+host physical address stored in the APIC_ACCESS_PAGE VMCS field and
+the host physical address stored in the EPT entry for the APIC GPA
+(0xfee0000).  When this happens, the processor will not trap APIC
+accesses, and will instead show the raw contents of the APIC-access page.
+Because Windows OS periodically checks for unexpected modifications to
+the LAPIC register, this will show up as a BSOD crash with BugCheck
+CRITICAL_STRUCTURE_CORRUPTION (109) we are currently seeing in
+https://bugzilla.redhat.com/show_bug.cgi?id=1751017.
+
+The root cause of the issue is that kvm_arch_mmu_notifier_invalidate_range()
+cannot guarantee that no additional references are taken to the pages in
+the range before kvm_mmu_notifier_invalidate_range_end().  Fortunately,
+this case is supported by the MMU notifier API, as documented in
+include/linux/mmu_notifier.h:
+
+        * If the subsystem
+         * can't guarantee that no additional references are taken to
+         * the pages in the range, it has to implement the
+         * invalidate_range() notifier to remove any references taken
+         * after invalidate_range_start().
+
+The fix therefore is to reload the APIC-access page field in the VMCS
+from kvm_mmu_notifier_invalidate_range() instead of ..._range_start().
+
+Cc: stable@vger.kernel.org
+Fixes: b1394e745b94 ("KVM: x86: fix APIC page invalidation")
+Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=197951
+Signed-off-by: Eiichi Tsukata <eiichi.tsukata@nutanix.com>
+Message-Id: <20200606042627.61070-1-eiichi.tsukata@nutanix.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c       |  7 ++-----
+ include/linux/kvm_host.h |  4 ++--
+ virt/kvm/kvm_main.c      | 24 ++++++++++++++++--------
+ 3 files changed, 20 insertions(+), 15 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b0fd24ee08d2..c53df0b95385 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7525,9 +7525,8 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
+       kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+ }
+-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+-              unsigned long start, unsigned long end,
+-              bool blockable)
++void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
++                                          unsigned long start, unsigned long end)
+ {
+       unsigned long apic_address;
+@@ -7538,8 +7537,6 @@ int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+       apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+       if (start <= apic_address && apic_address < end)
+               kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
+-
+-      return 0;
+ }
+ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 92c6f80e6327..a0de4c7dc9d3 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -1327,8 +1327,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
+ }
+ #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
+-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+-              unsigned long start, unsigned long end, bool blockable);
++void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
++                                          unsigned long start, unsigned long end);
+ #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
+ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index aca15bd1cc4c..1218ea663c6d 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -141,10 +141,9 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
+ static unsigned long long kvm_createvm_count;
+ static unsigned long long kvm_active_vms;
+-__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+-              unsigned long start, unsigned long end, bool blockable)
++__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
++                                                 unsigned long start, unsigned long end)
+ {
+-      return 0;
+ }
+ bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
+@@ -366,6 +365,18 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
+       return container_of(mn, struct kvm, mmu_notifier);
+ }
++static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
++                                            struct mm_struct *mm,
++                                            unsigned long start, unsigned long end)
++{
++      struct kvm *kvm = mmu_notifier_to_kvm(mn);
++      int idx;
++
++      idx = srcu_read_lock(&kvm->srcu);
++      kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
++      srcu_read_unlock(&kvm->srcu, idx);
++}
++
+ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
+                                       struct mm_struct *mm,
+                                       unsigned long address,
+@@ -390,7 +401,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ {
+       struct kvm *kvm = mmu_notifier_to_kvm(mn);
+       int need_tlb_flush = 0, idx;
+-      int ret;
+       idx = srcu_read_lock(&kvm->srcu);
+       spin_lock(&kvm->mmu_lock);
+@@ -407,12 +417,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+               kvm_flush_remote_tlbs(kvm);
+       spin_unlock(&kvm->mmu_lock);
+-
+-      ret = kvm_arch_mmu_notifier_invalidate_range(kvm, start, end, blockable);
+-
+       srcu_read_unlock(&kvm->srcu, idx);
+-      return ret;
++      return 0;
+ }
+ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
+@@ -521,6 +528,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
+ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
+       .flags                  = MMU_INVALIDATE_DOES_NOT_BLOCK,
++      .invalidate_range       = kvm_mmu_notifier_invalidate_range,
+       .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
+       .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
+       .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
+-- 
+2.25.1
+
diff --git a/queue-4.19/kvm-x86-fix-l1tf-mitigation-for-shadow-mmu.patch b/queue-4.19/kvm-x86-fix-l1tf-mitigation-for-shadow-mmu.patch
new file mode 100644 (file)
index 0000000..a23dd76
--- /dev/null
@@ -0,0 +1,74 @@
+From 63147580db2aec2a2ba334de1f92e953314f9e10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 May 2019 01:40:25 -0700
+Subject: kvm: x86: Fix L1TF mitigation for shadow MMU
+
+From: Kai Huang <kai.huang@linux.intel.com>
+
+[ Upstream commit 61455bf26236e7f3d72705382a6437fdfd1bd0af ]
+
+Currently KVM sets 5 most significant bits of physical address bits
+reported by CPUID (boot_cpu_data.x86_phys_bits) for nonpresent or
+reserved bits SPTE to mitigate L1TF attack from guest when using shadow
+MMU. However for some particular Intel CPUs the physical address bits
+of internal cache is greater than physical address bits reported by
+CPUID.
+
+Use the kernel's existing boot_cpu_data.x86_cache_bits to determine the
+five most significant bits. Doing so improves KVM's L1TF mitigation in
+the unlikely scenario that system RAM overlaps the high order bits of
+the "real" physical address space as reported by CPUID. This aligns with
+the kernel's warnings regarding L1TF mitigation, e.g. in the above
+scenario the kernel won't warn the user about lack of L1TF mitigation
+if x86_cache_bits is greater than x86_phys_bits.
+
+Also initialize shadow_nonpresent_or_rsvd_mask explicitly to make it
+consistent with other 'shadow_{xxx}_mask', and opportunistically add a
+WARN once if KVM's L1TF mitigation cannot be applied on a system that
+is marked as being susceptible to L1TF.
+
+Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Kai Huang <kai.huang@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/mmu.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 62f1e4663bc3..440ffe810e5d 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -479,16 +479,24 @@ static void kvm_mmu_reset_all_pte_masks(void)
+        * If the CPU has 46 or less physical address bits, then set an
+        * appropriate mask to guard against L1TF attacks. Otherwise, it is
+        * assumed that the CPU is not vulnerable to L1TF.
++       *
++       * Some Intel CPUs address the L1 cache using more PA bits than are
++       * reported by CPUID. Use the PA width of the L1 cache when possible
++       * to achieve more effective mitigation, e.g. if system RAM overlaps
++       * the most significant bits of legal physical address space.
+        */
+-      low_phys_bits = boot_cpu_data.x86_phys_bits;
+-      if (boot_cpu_data.x86_phys_bits <
++      shadow_nonpresent_or_rsvd_mask = 0;
++      low_phys_bits = boot_cpu_data.x86_cache_bits;
++      if (boot_cpu_data.x86_cache_bits <
+           52 - shadow_nonpresent_or_rsvd_mask_len) {
+               shadow_nonpresent_or_rsvd_mask =
+-                      rsvd_bits(boot_cpu_data.x86_phys_bits -
++                      rsvd_bits(boot_cpu_data.x86_cache_bits -
+                                 shadow_nonpresent_or_rsvd_mask_len,
+-                                boot_cpu_data.x86_phys_bits - 1);
++                                boot_cpu_data.x86_cache_bits - 1);
+               low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
+-      }
++      } else
++              WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF));
++
+       shadow_nonpresent_or_rsvd_lower_gfn_mask =
+               GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
+ }
+-- 
+2.25.1
+
diff --git a/queue-4.19/kvm-x86-mmu-consolidate-is-mmio-spte-code.patch b/queue-4.19/kvm-x86-mmu-consolidate-is-mmio-spte-code.patch
new file mode 100644 (file)
index 0000000..2cce888
--- /dev/null
@@ -0,0 +1,78 @@
+From 17415c906e3dc56e4d043238dd8251a7b207e404 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Aug 2019 13:35:23 -0700
+Subject: KVM: x86/mmu: Consolidate "is MMIO SPTE" code
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+[ Upstream commit 26c44a63a291893e0a00f01e96b6e1d0310a79a9 ]
+
+Replace the open-coded "is MMIO SPTE" checks in the MMU warnings
+related to software-based access/dirty tracking to make the code
+slightly more self-documenting.
+
+No functional change intended.
+
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/mmu.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 440ffe810e5d..ac0a794267d4 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -299,6 +299,11 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
+ }
+ EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
++static bool is_mmio_spte(u64 spte)
++{
++      return (spte & shadow_mmio_mask) == shadow_mmio_value;
++}
++
+ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
+ {
+       return sp->role.ad_disabled;
+@@ -306,7 +311,7 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
+ static inline bool spte_ad_enabled(u64 spte)
+ {
+-      MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
++      MMU_WARN_ON(is_mmio_spte(spte));
+       return !(spte & shadow_acc_track_value);
+ }
+@@ -317,13 +322,13 @@ static bool is_nx_huge_page_enabled(void)
+ static inline u64 spte_shadow_accessed_mask(u64 spte)
+ {
+-      MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
++      MMU_WARN_ON(is_mmio_spte(spte));
+       return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
+ }
+ static inline u64 spte_shadow_dirty_mask(u64 spte)
+ {
+-      MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
++      MMU_WARN_ON(is_mmio_spte(spte));
+       return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
+ }
+@@ -393,11 +398,6 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
+       mmu_spte_set(sptep, mask);
+ }
+-static bool is_mmio_spte(u64 spte)
+-{
+-      return (spte & shadow_mmio_mask) == shadow_mmio_value;
+-}
+-
+ static gfn_t get_mmio_spte_gfn(u64 spte)
+ {
+       u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
+-- 
+2.25.1
+
diff --git a/queue-4.19/kvm-x86-only-do-l1tf-workaround-on-affected-processo.patch b/queue-4.19/kvm-x86-only-do-l1tf-workaround-on-affected-processo.patch
new file mode 100644 (file)
index 0000000..25d3fb8
--- /dev/null
@@ -0,0 +1,80 @@
+From 5e73dba1d09ac6380ab1b8b4df51d823586ee849 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 May 2020 05:34:41 -0400
+Subject: KVM: x86: only do L1TF workaround on affected processors
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+[ Upstream commit d43e2675e96fc6ae1a633b6a69d296394448cc32 ]
+
+KVM stores the gfn in MMIO SPTEs as a caching optimization.  These are split
+in two parts, as in "[high 11111 low]", to thwart any attempt to use these bits
+in an L1TF attack.  This works as long as there are 5 free bits between
+MAXPHYADDR and bit 50 (inclusive), leaving bit 51 free so that the MMIO
+access triggers a reserved-bit-set page fault.
+
+The bit positions however were computed wrongly for AMD processors that have
+encryption support.  In this case, x86_phys_bits is reduced (for example
+from 48 to 43, to account for the C bit at position 47 and four bits used
+internally to store the SEV ASID and other stuff) while x86_cache_bits in
+would remain set to 48, and _all_ bits between the reduced MAXPHYADDR
+and bit 51 are set.  Then low_phys_bits would also cover some of the
+bits that are set in the shadow_mmio_value, terribly confusing the gfn
+caching mechanism.
+
+To fix this, avoid splitting gfns as long as the processor does not have
+the L1TF bug (which includes all AMD processors).  When there is no
+splitting, low_phys_bits can be set to the reduced MAXPHYADDR removing
+the overlap.  This fixes "npt=0" operation on EPYC processors.
+
+Thanks to Maxim Levitsky for bisecting this bug.
+
+Cc: stable@vger.kernel.org
+Fixes: 52918ed5fcf0 ("KVM: SVM: Override default MMIO mask if memory encryption is enabled")
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/mmu.c | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index ac0a794267d4..18632f15b29f 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -294,6 +294,8 @@ kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
+ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
+ {
+       BUG_ON((mmio_mask & mmio_value) != mmio_value);
++      WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len));
++      WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
+       shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK;
+       shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
+ }
+@@ -486,16 +488,15 @@ static void kvm_mmu_reset_all_pte_masks(void)
+        * the most significant bits of legal physical address space.
+        */
+       shadow_nonpresent_or_rsvd_mask = 0;
+-      low_phys_bits = boot_cpu_data.x86_cache_bits;
+-      if (boot_cpu_data.x86_cache_bits <
+-          52 - shadow_nonpresent_or_rsvd_mask_len) {
++      low_phys_bits = boot_cpu_data.x86_phys_bits;
++      if (boot_cpu_has_bug(X86_BUG_L1TF) &&
++          !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
++                        52 - shadow_nonpresent_or_rsvd_mask_len)) {
++              low_phys_bits = boot_cpu_data.x86_cache_bits
++                      - shadow_nonpresent_or_rsvd_mask_len;
+               shadow_nonpresent_or_rsvd_mask =
+-                      rsvd_bits(boot_cpu_data.x86_cache_bits -
+-                                shadow_nonpresent_or_rsvd_mask_len,
+-                                boot_cpu_data.x86_cache_bits - 1);
+-              low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
+-      } else
+-              WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF));
++                      rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
++      }
+       shadow_nonpresent_or_rsvd_lower_gfn_mask =
+               GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
+-- 
+2.25.1
+
index 15dc5ed3511db64b251405da739ab6963e1ba8f2..2c246b0351456d07a37123302ae720a31a8d297f 100644 (file)
@@ -47,3 +47,11 @@ pm-runtime-clk-fix-clk_pm_runtime_get-error-path.patch
 crypto-cavium-nitrox-fix-nitrox_get_first_device-when-ndevlist-is-fully-iterated.patch
 alsa-pcm-disallow-linking-stream-to-itself.patch
 x86-mce-mm-unmap-the-entire-page-if-the-whole-page-is-affected-and-poisoned.patch
+kvm-x86-fix-apic-page-invalidation-race.patch
+kvm-x86-fix-l1tf-mitigation-for-shadow-mmu.patch
+kvm-x86-mmu-consolidate-is-mmio-spte-code.patch
+kvm-x86-only-do-l1tf-workaround-on-affected-processo.patch
+x86-speculation-change-misspelled-stipb-to-stibp.patch
+x86-speculation-add-support-for-stibp-always-on-pref.patch
+x86-speculation-avoid-force-disabling-ibpb-based-on-.patch
+x86-speculation-pr_spec_force_disable-enforcement-fo.patch
diff --git a/queue-4.19/x86-speculation-add-support-for-stibp-always-on-pref.patch b/queue-4.19/x86-speculation-add-support-for-stibp-always-on-pref.patch
new file mode 100644 (file)
index 0000000..878afd4
--- /dev/null
@@ -0,0 +1,145 @@
+From 042fa385c6ad79abcb32c680e8f9a2842c0f2097 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Dec 2018 23:03:54 +0000
+Subject: x86/speculation: Add support for STIBP always-on preferred mode
+
+From: Thomas Lendacky <Thomas.Lendacky@amd.com>
+
+[ Upstream commit 20c3a2c33e9fdc82e9e8e8d2a6445b3256d20191 ]
+
+Different AMD processors may have different implementations of STIBP.
+When STIBP is conditionally enabled, some implementations would benefit
+from having STIBP always on instead of toggling the STIBP bit through MSR
+writes. This preference is advertised through a CPUID feature bit.
+
+When conditional STIBP support is requested at boot and the CPU advertises
+STIBP always-on mode as preferred, switch to STIBP "on" support. To show
+that this transition has occurred, create a new spectre_v2_user_mitigation
+value and a new spectre_v2_user_strings message. The new mitigation value
+is used in spectre_v2_user_select_mitigation() to print the new mitigation
+message as well as to return a new string from stibp_state().
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Link: https://lkml.kernel.org/r/20181213230352.6937.74943.stgit@tlendack-t1.amdoffice.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/cpufeatures.h   |  1 +
+ arch/x86/include/asm/nospec-branch.h |  1 +
+ arch/x86/kernel/cpu/bugs.c           | 28 ++++++++++++++++++++++------
+ 3 files changed, 24 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 9f03ac233566..f7f9604b10cc 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -291,6 +291,7 @@
+ #define X86_FEATURE_AMD_IBPB          (13*32+12) /* "" Indirect Branch Prediction Barrier */
+ #define X86_FEATURE_AMD_IBRS          (13*32+14) /* "" Indirect Branch Restricted Speculation */
+ #define X86_FEATURE_AMD_STIBP         (13*32+15) /* "" Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_AMD_STIBP_ALWAYS_ON       (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
+ #define X86_FEATURE_AMD_SSBD          (13*32+24) /* "" Speculative Store Bypass Disable */
+ #define X86_FEATURE_VIRT_SSBD         (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+ #define X86_FEATURE_AMD_SSB_NO                (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 09c7466c4880..e3f70c60e8cc 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -232,6 +232,7 @@ enum spectre_v2_mitigation {
+ enum spectre_v2_user_mitigation {
+       SPECTRE_V2_USER_NONE,
+       SPECTRE_V2_USER_STRICT,
++      SPECTRE_V2_USER_STRICT_PREFERRED,
+       SPECTRE_V2_USER_PRCTL,
+       SPECTRE_V2_USER_SECCOMP,
+ };
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 0ea87f9095f0..1f1f342574a2 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -633,10 +633,11 @@ enum spectre_v2_user_cmd {
+ };
+ static const char * const spectre_v2_user_strings[] = {
+-      [SPECTRE_V2_USER_NONE]          = "User space: Vulnerable",
+-      [SPECTRE_V2_USER_STRICT]        = "User space: Mitigation: STIBP protection",
+-      [SPECTRE_V2_USER_PRCTL]         = "User space: Mitigation: STIBP via prctl",
+-      [SPECTRE_V2_USER_SECCOMP]       = "User space: Mitigation: STIBP via seccomp and prctl",
++      [SPECTRE_V2_USER_NONE]                  = "User space: Vulnerable",
++      [SPECTRE_V2_USER_STRICT]                = "User space: Mitigation: STIBP protection",
++      [SPECTRE_V2_USER_STRICT_PREFERRED]      = "User space: Mitigation: STIBP always-on protection",
++      [SPECTRE_V2_USER_PRCTL]                 = "User space: Mitigation: STIBP via prctl",
++      [SPECTRE_V2_USER_SECCOMP]               = "User space: Mitigation: STIBP via seccomp and prctl",
+ };
+ static const struct {
+@@ -726,6 +727,15 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+               break;
+       }
++      /*
++       * At this point, an STIBP mode other than "off" has been set.
++       * If STIBP support is not being forced, check if STIBP always-on
++       * is preferred.
++       */
++      if (mode != SPECTRE_V2_USER_STRICT &&
++          boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
++              mode = SPECTRE_V2_USER_STRICT_PREFERRED;
++
+       /* Initialize Indirect Branch Prediction Barrier */
+       if (boot_cpu_has(X86_FEATURE_IBPB)) {
+               setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+@@ -999,6 +1009,7 @@ void arch_smt_update(void)
+       case SPECTRE_V2_USER_NONE:
+               break;
+       case SPECTRE_V2_USER_STRICT:
++      case SPECTRE_V2_USER_STRICT_PREFERRED:
+               update_stibp_strict();
+               break;
+       case SPECTRE_V2_USER_PRCTL:
+@@ -1233,7 +1244,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+                * Indirect branch speculation is always disabled in strict
+                * mode.
+                */
+-              if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
++              if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
++                  spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+                       return -EPERM;
+               task_clear_spec_ib_disable(task);
+               task_update_spec_tif(task);
+@@ -1246,7 +1258,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+                */
+               if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+                       return -EPERM;
+-              if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
++              if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
++                  spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+                       return 0;
+               task_set_spec_ib_disable(task);
+               if (ctrl == PR_SPEC_FORCE_DISABLE)
+@@ -1317,6 +1330,7 @@ static int ib_prctl_get(struct task_struct *task)
+                       return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+               return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+       case SPECTRE_V2_USER_STRICT:
++      case SPECTRE_V2_USER_STRICT_PREFERRED:
+               return PR_SPEC_DISABLE;
+       default:
+               return PR_SPEC_NOT_AFFECTED;
+@@ -1564,6 +1578,8 @@ static char *stibp_state(void)
+               return ", STIBP: disabled";
+       case SPECTRE_V2_USER_STRICT:
+               return ", STIBP: forced";
++      case SPECTRE_V2_USER_STRICT_PREFERRED:
++              return ", STIBP: always-on";
+       case SPECTRE_V2_USER_PRCTL:
+       case SPECTRE_V2_USER_SECCOMP:
+               if (static_key_enabled(&switch_to_cond_stibp))
+-- 
+2.25.1
+
diff --git a/queue-4.19/x86-speculation-avoid-force-disabling-ibpb-based-on-.patch b/queue-4.19/x86-speculation-avoid-force-disabling-ibpb-based-on-.patch
new file mode 100644 (file)
index 0000000..53168de
--- /dev/null
@@ -0,0 +1,227 @@
+From 82c36685f1455028035965e97b0c137f656d8a6d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 May 2020 06:40:42 -0700
+Subject: x86/speculation: Avoid force-disabling IBPB based on STIBP and
+ enhanced IBRS.
+
+From: Anthony Steinhauser <asteinhauser@google.com>
+
+[ Upstream commit 21998a351512eba4ed5969006f0c55882d995ada ]
+
+When STIBP is unavailable or enhanced IBRS is available, Linux
+force-disables the IBPB mitigation of Spectre-BTB even when simultaneous
+multithreading is disabled. While attempts to enable IBPB using
+prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, ...) fail with
+EPERM, the seccomp syscall (or its prctl(PR_SET_SECCOMP, ...) equivalent)
+which are used e.g. by Chromium or OpenSSH succeed with no errors but the
+application remains silently vulnerable to cross-process Spectre v2 attacks
+(classical BTB poisoning). At the same time the SYSFS reporting
+(/sys/devices/system/cpu/vulnerabilities/spectre_v2) displays that IBPB is
+conditionally enabled when in fact it is unconditionally disabled.
+
+STIBP is useful only when SMT is enabled. When SMT is disabled and STIBP is
+unavailable, it makes no sense to force-disable also IBPB, because IBPB
+protects against cross-process Spectre-BTB attacks regardless of the SMT
+state. At the same time since missing STIBP was only observed on AMD CPUs,
+AMD does not recommend using STIBP, but recommends using IBPB, so disabling
+IBPB because of missing STIBP goes directly against AMD's advice:
+https://developer.amd.com/wp-content/resources/Architecture_Guidelines_Update_Indirect_Branch_Control.pdf
+
+Similarly, enhanced IBRS is designed to protect cross-core BTB poisoning
+and BTB-poisoning attacks from user space against kernel (and
+BTB-poisoning attacks from guest against hypervisor), it is not designed
+to prevent cross-process (or cross-VM) BTB poisoning between processes (or
+VMs) running on the same core. Therefore, even with enhanced IBRS it is
+necessary to flush the BTB during context-switches, so there is no reason
+to force disable IBPB when enhanced IBRS is available.
+
+Enable the prctl control of IBPB even when STIBP is unavailable or enhanced
+IBRS is available.
+
+Fixes: 7cc765a67d8e ("x86/speculation: Enable prctl mode for spectre_v2_user")
+Signed-off-by: Anthony Steinhauser <asteinhauser@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 87 ++++++++++++++++++++++----------------
+ 1 file changed, 50 insertions(+), 37 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 1f1f342574a2..9f178423cbf0 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -581,7 +581,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline);
+ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+       SPECTRE_V2_NONE;
+-static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
++static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
++      SPECTRE_V2_USER_NONE;
++static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
+       SPECTRE_V2_USER_NONE;
+ #ifdef CONFIG_RETPOLINE
+@@ -727,15 +729,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+               break;
+       }
+-      /*
+-       * At this point, an STIBP mode other than "off" has been set.
+-       * If STIBP support is not being forced, check if STIBP always-on
+-       * is preferred.
+-       */
+-      if (mode != SPECTRE_V2_USER_STRICT &&
+-          boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
+-              mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+-
+       /* Initialize Indirect Branch Prediction Barrier */
+       if (boot_cpu_has(X86_FEATURE_IBPB)) {
+               setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+@@ -758,23 +751,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+               pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
+                       static_key_enabled(&switch_mm_always_ibpb) ?
+                       "always-on" : "conditional");
++
++              spectre_v2_user_ibpb = mode;
+       }
+-      /* If enhanced IBRS is enabled no STIBP required */
+-      if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
++      /*
++       * If enhanced IBRS is enabled or SMT impossible, STIBP is not
++       * required.
++       */
++      if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+               return;
+       /*
+-       * If SMT is not possible or STIBP is not available clear the STIBP
+-       * mode.
++       * At this point, an STIBP mode other than "off" has been set.
++       * If STIBP support is not being forced, check if STIBP always-on
++       * is preferred.
++       */
++      if (mode != SPECTRE_V2_USER_STRICT &&
++          boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
++              mode = SPECTRE_V2_USER_STRICT_PREFERRED;
++
++      /*
++       * If STIBP is not available, clear the STIBP mode.
+        */
+-      if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
++      if (!boot_cpu_has(X86_FEATURE_STIBP))
+               mode = SPECTRE_V2_USER_NONE;
++
++      spectre_v2_user_stibp = mode;
++
+ set_mode:
+-      spectre_v2_user = mode;
+-      /* Only print the STIBP mode when SMT possible */
+-      if (smt_possible)
+-              pr_info("%s\n", spectre_v2_user_strings[mode]);
++      pr_info("%s\n", spectre_v2_user_strings[mode]);
+ }
+ static const char * const spectre_v2_strings[] = {
+@@ -1005,7 +1011,7 @@ void arch_smt_update(void)
+ {
+       mutex_lock(&spec_ctrl_mutex);
+-      switch (spectre_v2_user) {
++      switch (spectre_v2_user_stibp) {
+       case SPECTRE_V2_USER_NONE:
+               break;
+       case SPECTRE_V2_USER_STRICT:
+@@ -1238,14 +1244,16 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+       switch (ctrl) {
+       case PR_SPEC_ENABLE:
+-              if (spectre_v2_user == SPECTRE_V2_USER_NONE)
++              if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+                       return 0;
+               /*
+                * Indirect branch speculation is always disabled in strict
+                * mode.
+                */
+-              if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
+-                  spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
++              if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+                       return -EPERM;
+               task_clear_spec_ib_disable(task);
+               task_update_spec_tif(task);
+@@ -1256,10 +1264,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+                * Indirect branch speculation is always allowed when
+                * mitigation is force disabled.
+                */
+-              if (spectre_v2_user == SPECTRE_V2_USER_NONE)
++              if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+                       return -EPERM;
+-              if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
+-                  spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
++              if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+                       return 0;
+               task_set_spec_ib_disable(task);
+               if (ctrl == PR_SPEC_FORCE_DISABLE)
+@@ -1290,7 +1300,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
+ {
+       if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
+               ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+-      if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
++      if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
++          spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
+               ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+ }
+ #endif
+@@ -1319,22 +1330,24 @@ static int ib_prctl_get(struct task_struct *task)
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+               return PR_SPEC_NOT_AFFECTED;
+-      switch (spectre_v2_user) {
+-      case SPECTRE_V2_USER_NONE:
++      if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
++          spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+               return PR_SPEC_ENABLE;
+-      case SPECTRE_V2_USER_PRCTL:
+-      case SPECTRE_V2_USER_SECCOMP:
++      else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
++          spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++          spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
++              return PR_SPEC_DISABLE;
++      else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
++          spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
++          spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
++          spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
+               if (task_spec_ib_force_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+               if (task_spec_ib_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+               return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+-      case SPECTRE_V2_USER_STRICT:
+-      case SPECTRE_V2_USER_STRICT_PREFERRED:
+-              return PR_SPEC_DISABLE;
+-      default:
++      } else
+               return PR_SPEC_NOT_AFFECTED;
+-      }
+ }
+ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+@@ -1573,7 +1586,7 @@ static char *stibp_state(void)
+       if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+               return "";
+-      switch (spectre_v2_user) {
++      switch (spectre_v2_user_stibp) {
+       case SPECTRE_V2_USER_NONE:
+               return ", STIBP: disabled";
+       case SPECTRE_V2_USER_STRICT:
+-- 
+2.25.1
+
diff --git a/queue-4.19/x86-speculation-change-misspelled-stipb-to-stibp.patch b/queue-4.19/x86-speculation-change-misspelled-stipb-to-stibp.patch
new file mode 100644 (file)
index 0000000..efb972e
--- /dev/null
@@ -0,0 +1,81 @@
+From bcbd8224a997088534cd2a56d3232466bdf96214 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Dec 2018 14:49:27 -0500
+Subject: x86/speculation: Change misspelled STIPB to STIBP
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit aa77bfb354c495fc4361199e63fc5765b9e1e783 ]
+
+STIBP stands for Single Thread Indirect Branch Predictors. The acronym,
+however, can be easily mis-spelled as STIPB. It is perhaps due to the
+presence of another related term - IBPB (Indirect Branch Predictor
+Barrier).
+
+Fix the mis-spelling in the code.
+
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: KarimAllah Ahmed <karahmed@amazon.de>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/1544039368-9009-1-git-send-email-longman@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 6 +++---
+ arch/x86/kernel/process.h  | 2 +-
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index cf07437cd106..0ea87f9095f0 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -61,7 +61,7 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
+ u64 __ro_after_init x86_amd_ls_cfg_base;
+ u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
+-/* Control conditional STIPB in switch_to() */
++/* Control conditional STIBP in switch_to() */
+ DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
+ /* Control conditional IBPB in switch_mm() */
+ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+@@ -750,12 +750,12 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+                       "always-on" : "conditional");
+       }
+-      /* If enhanced IBRS is enabled no STIPB required */
++      /* If enhanced IBRS is enabled no STIBP required */
+       if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+               return;
+       /*
+-       * If SMT is not possible or STIBP is not available clear the STIPB
++       * If SMT is not possible or STIBP is not available clear the STIBP
+        * mode.
+        */
+       if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
+diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h
+index 898e97cf6629..320ab978fb1f 100644
+--- a/arch/x86/kernel/process.h
++++ b/arch/x86/kernel/process.h
+@@ -19,7 +19,7 @@ static inline void switch_to_extra(struct task_struct *prev,
+       if (IS_ENABLED(CONFIG_SMP)) {
+               /*
+                * Avoid __switch_to_xtra() invocation when conditional
+-               * STIPB is disabled and the only different bit is
++               * STIBP is disabled and the only different bit is
+                * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
+                * in the TIF_WORK_CTXSW masks.
+                */
+-- 
+2.25.1
+
diff --git a/queue-4.19/x86-speculation-pr_spec_force_disable-enforcement-fo.patch b/queue-4.19/x86-speculation-pr_spec_force_disable-enforcement-fo.patch
new file mode 100644 (file)
index 0000000..139dadd
--- /dev/null
@@ -0,0 +1,54 @@
+From 6d414a9bc4568fb65096f7b1f3edb7f104e709f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 7 Jun 2020 05:44:19 -0700
+Subject: x86/speculation: PR_SPEC_FORCE_DISABLE enforcement for indirect
+ branches.
+
+From: Anthony Steinhauser <asteinhauser@google.com>
+
+[ Upstream commit 4d8df8cbb9156b0a0ab3f802b80cb5db57acc0bf ]
+
+Currently, it is possible to enable indirect branch speculation even after
+it was force-disabled using the PR_SPEC_FORCE_DISABLE option. Moreover, the
+PR_GET_SPECULATION_CTRL command gives afterwards an incorrect result
+(force-disabled when it is in fact enabled). This also is inconsistent
+vs. STIBP and the documention which cleary states that
+PR_SPEC_FORCE_DISABLE cannot be undone.
+
+Fix this by actually enforcing force-disabled indirect branch
+speculation. PR_SPEC_ENABLE called after PR_SPEC_FORCE_DISABLE now fails
+with -EPERM as described in the documentation.
+
+Fixes: 9137bb27e60e ("x86/speculation: Add prctl() control for indirect branch speculation")
+Signed-off-by: Anthony Steinhauser <asteinhauser@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 9f178423cbf0..bf554ed2fd51 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1249,11 +1249,14 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+                       return 0;
+               /*
+                * Indirect branch speculation is always disabled in strict
+-               * mode.
++               * mode. It can neither be enabled if it was force-disabled
++               * by a  previous prctl call.
++
+                */
+               if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+-                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
++                  task_spec_ib_force_disable(task))
+                       return -EPERM;
+               task_clear_spec_ib_disable(task);
+               task_update_spec_tif(task);
+-- 
+2.25.1
+